include dockerfile / docker-compose for startup

This commit is contained in:
Ra
2025-09-05 20:32:33 -07:00
parent 4b7c4b6314
commit 1056672e7c
14 changed files with 942 additions and 82 deletions

66
.dockerignore Normal file
View File

@@ -0,0 +1,66 @@
# Docker build context optimization
# Exclude files that are not needed for building the Docker image
# Build artifacts
_build/
deps/
.elixir_ls/
erl_crash.dump
# Development files
.git/
.gitignore
*.md
!README.md
docs/
examples/
# Test files
test/
cover/
.cover/
# IDE files
.vscode/
.idea/
*.swp
*.swo
*~
# OS files
.DS_Store
Thumbs.db
# Log files
*.log
logs/
# Temporary files
tmp/
temp/
*.tmp
# Docker files (to avoid recursive copying)
Dockerfile*
.dockerignore
docker-compose*.yml
# Environment files that might contain secrets
.env*
!.env.example
# NATS data (if running locally)
nats-data/
# Node.js files (they'll be installed fresh in container)
node_modules/
npm-debug.log*
yarn-debug.log*
yarn-error.log*
# Coverage reports
coverage/
lcov.info
# Dialyzer PLT files
priv/plts/

1
.gitignore vendored
View File

@@ -94,3 +94,4 @@ coverage/
.claude/
/docs/LANGUAGE_IMPLEMENTATIONS.md
/asdf.txt

100
Dockerfile Normal file
View File

@@ -0,0 +1,100 @@
# Agent Coordinator - Multi-stage Docker Build
# Creates a production-ready container for the MCP server without requiring local Elixir/OTP installation
# Build stage - Use official Elixir image with OTP
FROM elixir:1.16-otp-26-alpine AS builder
# Install build dependencies
RUN apk add --no-cache \
build-base \
git \
curl \
bash
# Install Node.js and npm for MCP external servers (bunx dependency)
RUN apk add --no-cache nodejs npm
RUN npm install -g bun
# Set build environment
ENV MIX_ENV=prod
# Create app directory
WORKDIR /app
# Copy mix files
COPY mix.exs mix.lock ./
# Install mix dependencies
RUN mix local.hex --force && \
mix local.rebar --force && \
mix deps.get --only $MIX_ENV && \
mix deps.compile
# Copy source code
COPY lib lib
COPY config config
# Compile the release
RUN mix compile
# Prepare release
RUN mix release
# Runtime stage - Use smaller Alpine image
FROM alpine:3.18 AS runtime
# Install runtime dependencies
RUN apk add --no-cache \
bash \
openssl \
ncurses-libs \
libstdc++ \
nodejs \
npm
# Install Node.js packages for external MCP servers
RUN npm install -g bun
# Create non-root user for security
RUN addgroup -g 1000 appuser && \
adduser -u 1000 -G appuser -s /bin/bash -D appuser
# Create app directory and set permissions
WORKDIR /app
RUN chown -R appuser:appuser /app
# Copy the release from builder stage
COPY --from=builder --chown=appuser:appuser /app/_build/prod/rel/agent_coordinator ./
# Copy configuration files
COPY --chown=appuser:appuser mcp_servers.json ./
COPY --chown=appuser:appuser scripts/mcp_launcher.sh ./scripts/
# Make scripts executable
RUN chmod +x ./scripts/mcp_launcher.sh
# Copy Docker entrypoint script
COPY --chown=appuser:appuser docker-entrypoint.sh ./
RUN chmod +x ./docker-entrypoint.sh
# Switch to non-root user
USER appuser
# Set environment variables
ENV MIX_ENV=prod
ENV NATS_HOST=localhost
ENV NATS_PORT=4222
ENV SHELL=/bin/bash
# Expose the default port (if needed for HTTP endpoints)
EXPOSE 4000
# Health check
HEALTHCHECK --interval=30s --timeout=10s --start-period=5s --retries=3 \
CMD /app/bin/agent_coordinator ping || exit 1
# Set the entrypoint
ENTRYPOINT ["/app/docker-entrypoint.sh"]
# Default command
CMD ["/app/scripts/mcp_launcher.sh"]

296
README.md
View File

@@ -4,7 +4,19 @@ A **Model Context Protocol (MCP) server** that enables multiple AI agents to coo
## 🎯 What is Agent Coordinator?
Agent Coordinator is an MCP server that solves the problem of multiple AI agents stepping on each other's toes when working on the same codebase. Instead of agents conflicting over files or duplicating work, they can register with the coordinator, receive tasks, and collaborate intelligently.
Agent Coordinator is a **unified MCP proxy server** that enables multiple AI agents to collaborate seamlessly without conflicts. As shown in the architecture diagram above, it acts as a single interface connecting multiple agents (Purple Zebra, Yellow Elephant, etc.) to a comprehensive ecosystem of tools and task management.
**The coordinator orchestrates three core components:**
- **Task Registry**: Intelligent task queuing, agent matching, and automatic progress tracking
- **Agent Manager**: Agent registration, heartbeat monitoring, and capability-based assignment
- **Codebase Registry**: Cross-repository coordination, dependency management, and workspace organization
**Plus a Unified Tool Registry** that seamlessly combines:
- Native coordination tools (register_agent, get_next_task, etc.)
- Proxied MCP tools from external servers (read_file, search_memory, etc.)
- VS Code integration tools (get_active_editor, run_command, etc.)
Instead of agents conflicting over files or duplicating work, they connect through a single MCP interface that automatically routes tool calls, tracks all operations as coordinated tasks, and maintains real-time communication via personal agent inboxes and shared task boards.
**Key Features:**
@@ -42,28 +54,29 @@ Agent Coordinator is an MCP server that solves the problem of multiple AI agents
│ ┌─────────────────────────────────────────────────────────────┐ │
│ │ UNIFIED TOOL REGISTRY │ │
│ ├─────────────────────────────────────────────────────────────┤ │
│ │ Native Tools: register_agent, get_next_task, │
│ │ create_task_set, complete_task, ... │ │
│ │ Proxied MCP Tools: read_file, write_file, │ │
│ │ search_memory, get_docs, ... │ │
│ │ VS Code Tools: get_active_editor, set_selection, │ │
│ │ get_workspace_folders, run_command, ... │ │
│ ├─────────────────────────────────────────────────────────────┤ │
│ │ Routes to appropriate server or handles natively │ │
│ │ Configure MCP Servers to run via MCP_TOOLS_FILE │ │
│ └─────────────────────────────────────────────────────────────┘ │
└─────────────────────────────────┬────────────────────────────────┘
┌─────────────────────────────────┴─────────────────────────────────────┐
│ EXTERNAL MCP SERVERS
└──────────────┬─────────┬─────────┬─────────┬─────────┬─────────┬──────┤
│ │
┌────┴───┐ │ ┌────┴───┐ │ ┌────┴───┐ │ ┌────┴───┐ │
│ MCP 1 │ │ │ MCP 2 │ │ │ MCP 3 MCP 4 │ │
├────────┤ │ ├────────┤ │ ├────────┤ │ ├────────┤ │
│• tool 1│ │ │• tool 1│ │ │• tool 1│ │ │• tool 1│
│ │ Native Tools: register_agent, get_next_task, │ ╞═══════════════════════════════════╕
│ │ create_task_set, complete_task, ... │ │
│ │ Proxied MCP Tools: read_file, write_file, │ │ ┍━━━━━━━━━━━┷━━━━━━━━━━━┑
│ │ search_memory, get_docs, ... │ │ │ Task Board │
│ │ VS Code Tools: get_active_editor, set_selection, │ │ ┏━━━━━━━━━━━━━━━━━━━━┓┝━━━━━━━━━━━┳━━━━━━━━━━━┥ ┏━━━━━━━━━━━━━━━━━━━━┓
│ │ get_workspace_folders, run_command, ... │ │ ┃ Agent 1 INBOX ┃│ Agent 1 Q ┃ Agent 2 Q │ ┃ Agent 2 INBOX ┃
│ ├─────────────────────────────────────────────────────────────┤ │ ┣━━━━━━━━━━━━━━━━━━━━┫┝━━━━━━━━━━━╋━━━━━━━━━━━┥ ┣━━━━━━━━━━━━━━━━━━━━┫
│ │ Routes to appropriate server or handles natively │ │ ┃ current: task 3 ┃│ ✓ Task 1 ┃ ✓ Task 1 │ ┃ current: task 2 ┃
│ │ Configure MCP Servers to run via MCP_TOOLS_FILE │ │ ┃ [ complete task ] ┣┥ ✓ Task 2 ┃ ➔ Task 2 ┝━┫ [ complete task ] ┃<─┐
│ └─────────────────────────────────────────────────────────────┘ │ ┗━━━━━━━━━━━━━━━━━━━━┛│ ➔ Task 3 ┃ … Task 3 │ ┗━━━━━━━━━━━━━━━━━━━━┛ │
└─────────────────────────────────┬────────────────────────────────┘ ┝━━━━━━━━━━━╋━━━━━━━━━━━┥ │
│ Agent 3 Q ┃ Agent 4 Q │ ┏━━━━━━━━━━━━━━━━━━━━┓ │
┝━━━━━━━━━━━╋━━━━━━━━━━━┥ ┃ Agent 4 INBOX ┃<─┤ Personal inboxes
│ ✓ Task 1 ┃ ➔ Task 1 │ ┣━━━━━━━━━━━━━━━━━━━━┫ │
│ │ ✓ Task 2 ┃ … Task 2 │ ┃ current: task 2 ┃ │
┌─────────────────────────────────┴─────────────────────────────────────┐ │ ✓ Task 3 ┃ … Task 3 ┝━┫ [ complete task ] ┃
│ EXTERNAL MCP SERVERS │ ┕━━━━━┳━━━━━┻━━━━━━━━━━━┙ ┗━━━━━━━━━━━━━━━━━━━━┛ │
└──────────────┬─────────┬─────────┬─────────┬─────────┬─────────┬──────┤ ┏━━━━━━━━━━┻━━━━━━━━━┓
│ │ │ │ │ │ │ │ ┃ Agent 3 INBOX ┃<━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┙
┌────┴───┐ │ ┌────┴───┐ │ ┌────┴───┐ │ ┌────┴───┐ ┣━━━━━━━━━━━━━━━━━━━━┫
│ MCP 1 │ │ │ MCP 2 │ │ MCP 3 │ │ │ MCP 4 │ │ ┃ current: none ┃
├────────┤ │ ├────────┤ │ ├────────┤ │ ├────────┤ │ ┃ [ view history ]
│• tool 1│ │ │• tool 1│ │ │• tool 1│ │ │• tool 1│ │ ┗━━━━━━━━━━━━━━━━━━━━┛
│• tool 2│ │ │• tool 2│ │ │• tool 2│ │ │• tool 2│ │
│• tool 3│┌────┴───┐│• tool 3│┌────┴───┐│• tool 3│┌────┴───┐│• tool 3│┌─┴──────┐
└────────┘│ MCP 5 │└────────┘│ MCP 6 │└────────┘│ MCP 7 │└────────┘│ MCP 8 │
@@ -84,6 +97,7 @@ Agent Coordinator is an MCP server that solves the problem of multiple AI agents
6. Routes tool calls to appropriate servers
7. Automatically tracks all operations as tasks
8. Maintains heartbeat & coordination across agents
```
## 🔧 MCP Server Management & Unified Tool Registry
@@ -201,14 +215,54 @@ Updates agent task → Sends heartbeat → Returns file content
## 🛠️ Prerequisites
You need these installed to run Agent Coordinator:
Choose one of these installation methods:
### Option 1: Docker (Recommended - No Elixir Installation Required)
- **Docker**: 20.10+ and Docker Compose
- **Node.js**: 18+ (for external MCP servers via bun)
### Option 2: Manual Installation
- **Elixir**: 1.16+ with OTP 26+
- **Mix**: Comes with Elixir installation
- **Node.js**: 18+ (for external MCP servers via bun)
## ⚡ Quick Start
### 1. Get the Code
### Option A: Docker Setup (Easiest)
#### 1. Get the Code
```bash
git clone https://github.com/your-username/agent_coordinator.git
cd agent_coordinator
```
#### 2. Run with Docker Compose
```bash
# Start the full stack (MCP server + NATS + monitoring)
docker-compose up -d
# Or start just the MCP server
docker-compose up agent-coordinator
# Check logs
docker-compose logs -f agent-coordinator
```
#### 3. Configuration
Edit `mcp_servers.json` to configure external MCP servers, then restart:
```bash
docker-compose restart agent-coordinator
```
### Option B: Manual Setup
#### 1. Get the Code
```bash
git clone https://github.com/your-username/agent_coordinator.git
@@ -216,7 +270,7 @@ cd agent_coordinator
mix deps.get
```
### 2. Start the MCP Server
#### 2. Start the MCP Server
```bash
# Start the MCP server directly
@@ -228,7 +282,31 @@ mix run --no-halt
### 3. Configure Your AI Tools
The agent coordinator is designed to work with VS Code and AI tools that support MCP. Add this to your VS Code `settings.json`:
#### For Docker Setup
If using Docker, the MCP server is available at the container's stdio interface. Add this to your VS Code `settings.json`:
```json
{
"github.copilot.advanced": {
"mcp": {
"servers": {
"agent-coordinator": {
"command": "docker",
"args": ["exec", "-i", "agent-coordinator", "/app/scripts/mcp_launcher.sh"],
"env": {
"MIX_ENV": "prod"
}
}
}
}
}
}
```
#### For Manual Setup
Add this to your VS Code `settings.json`:
```json
{
@@ -245,16 +323,180 @@ The agent coordinator is designed to work with VS Code and AI tools that support
}
}
}
}
}
}
```
### 4. Test It Works
#### Docker Testing
```bash
# Test with Docker
docker-compose exec agent-coordinator /app/bin/agent_coordinator ping
# Run example (if available in container)
docker-compose exec agent-coordinator mix run examples/full_workflow_demo.exs
# View logs
docker-compose logs -f agent-coordinator
```
#### Manual Testing
```bash
# Run the demo to see it in action
mix run examples/full_workflow_demo.exs
```
## 🐳 Docker Usage Guide
### Available Docker Commands
#### Basic Operations
```bash
# Build the image
docker build -t agent-coordinator .
# Run standalone container
docker run -d --name agent-coordinator -p 4000:4000 agent-coordinator
# Run with custom config
docker run -d \
-v ./mcp_servers.json:/app/mcp_servers.json:ro \
-p 4000:4000 \
agent-coordinator
```
#### Docker Compose Operations
```bash
# Start full stack
docker-compose up -d
# Start only agent coordinator
docker-compose up -d agent-coordinator
# View logs
docker-compose logs -f agent-coordinator
# Restart after config changes
docker-compose restart agent-coordinator
# Stop everything
docker-compose down
# Remove volumes (reset data)
docker-compose down -v
```
#### Development with Docker
```bash
# Start in development mode
docker-compose -f docker-compose.yml -f docker-compose.dev.yml up
# Interactive shell for debugging
docker-compose exec agent-coordinator bash
# Run tests in container
docker-compose exec agent-coordinator mix test
# Watch logs during development
docker-compose logs -f
```
### Environment Variables
Configure the container using environment variables:
```bash
# docker-compose.override.yml example
version: '3.8'
services:
agent-coordinator:
environment:
- MIX_ENV=prod
- NATS_HOST=nats
- NATS_PORT=4222
- LOG_LEVEL=info
```
### Custom Configuration
#### External MCP Servers
Mount your own `mcp_servers.json`:
```bash
docker run -d \
-v ./my-mcp-config.json:/app/mcp_servers.json:ro \
agent-coordinator
```
#### Persistent Data
```bash
docker run -d \
-v agent_data:/app/data \
-v nats_data:/data \
agent-coordinator
```
### Monitoring & Health Checks
#### Container Health
```bash
# Check container health
docker-compose ps
# Health check details
docker inspect --format='{{json .State.Health}}' agent-coordinator
# Manual health check
docker-compose exec agent-coordinator /app/bin/agent_coordinator ping
```
#### NATS Monitoring
Access NATS monitoring dashboard:
```bash
# Start with monitoring profile
docker-compose --profile monitoring up -d
# Access dashboard at http://localhost:8080
open http://localhost:8080
```
### Troubleshooting
#### Common Issues
```bash
# Check container logs
docker-compose logs agent-coordinator
# Check NATS connectivity
docker-compose exec agent-coordinator nc -z nats 4222
# Restart stuck container
docker-compose restart agent-coordinator
# Reset everything
docker-compose down -v && docker-compose up -d
```
#### Performance Tuning
```bash
# Allocate more memory
docker-compose up -d --scale agent-coordinator=1 \
--memory=1g --cpus="2.0"
```
## 🎮 How to Use
Once your AI agents are connected via MCP, they can:

View File

@@ -1,5 +0,0 @@
⓪⓫⓬⓭⓮⓯⓰⓱⓲⓳⓴⓵⓶⓷⓸⓹⓺⓻⓼⓽⓾⓿─━│┃┄┅┆┇┈┉┊┋┌┍┎┏┐┑┒┓└┕┖┗┘┙┚┛├┝┞┟┠┡┢┣┤┥┦┧┨┩┪┫┬┭┮┯┰┱┲┳┴┵┶┷┸┹┺┻┼┽┾┿╀╁╂╃╄╅╆╇╈╉╊╋╌╍╎╏═║╒╓╔╕╖╗╘╙╚╛╜╝╞╟╠╡╢╣╤╥╦╧╨╩╪╫╬╭╮╯╰╱╲╳╴╵╶╷╸╹╺╻╼╽╾╿▀
▁▂▃▄▅▆▇█▉▊▋▌▍▎▏▐░▒▓▔▕▖▗▘▙▚▛▜▝▞▟■□▢▣▤▥▦▧▨▩▪▫▬▭▮▯▰▱▲△▴▵▶▷▸▹►▻▼▽▾▿◀◁◂◃◄◅◆◇◈◉◊○◌◍◎●◐◑◒◓◔◕◖◗◘◙◚◛◜◝◞◟◠◡◢◣◤◥◦◧◨◩◪◫◬◭◮◯◰◱◲◳◴◵◶◷◸◹◺◻◼◽◾◿☀☁☂☃☄★☆☇☈☉☊☋☌☍☎☏☐☑☒☓☔
☕☖☗☘☙☚☛☜☝☞☟☠☡☢☣☤☥☦☧☨☩☪☫☬☭☮☯☰☱☲☳☴☵☶☷☸☹☺☻☼☽☾☿♀♁♂♃♄♅♆♇♈♉♊♋♌♍♎♏♐♑♒♓♔♕♖♗♘♙♚♛♜♝♞♟♠♡♢♣♤♥♦♧♨♩♪♫♬♭♮♯♰♱♲♳♴♵♶♷♸♹♺♻♼♽♾♿⚀⚁⚂⚃⚄⚅
⚆⚇⚈⚉⚊⚋⚌⚍⚎⚏⚐⚑⚒⚓⚔⚕⚖⚗⚘⚙⚚⚛⚜⚝⚞⚟⚠⚡⚢⚣⚤⚥⚦⚧⚨⚩⚪⚫⚬⚭⚮⚯⚰⚱⚲⚳⚴⚵⚶⚷⚸⚹⚺⚻⚼⚽⚾⚿⛀⛁⛂⛃⛄⛅⛆⛇⛈⛉⛊⛋⛌⛍⛎⛏⛐⛑⛒⛓⛔⛕⛖⛗⛘⛙⛚⛛⛜⛝⛞⛟⛠⛡⛢⛣⛤⛥⛦⛧⛨⛩⛪⛫⛬⛭⛮
⛯⛰⛱⛲⛳⛴⛵⛶⛷⛸⛹⛺⛻⛼⛽⛾⛿✀✁✂✃✄✅✆✇✈✉✊✋✌✍✎✏✐✑✒✓✔✕✖✗✘✙✚✛✜✝✞✟✠✡✢✣✤✥✦✧✨✩✪✫✬✭✮✯✰✱✲✳✴✵✶✷✸✹✺✻✼✽✾✿❀❁

27
docker-compose.dev.yml Normal file
View File

@@ -0,0 +1,27 @@
version: '3.8'
# Development override for docker-compose.yml
# Run with: docker-compose -f docker-compose.yml -f docker-compose.dev.yml up
services:
agent-coordinator:
environment:
- MIX_ENV=dev
volumes:
# Mount source code for development
- .:/app/src:ro
# Mount config for easy editing
- ./mcp_servers.json:/app/mcp_servers.json
command: ["bash"]
stdin_open: true
tty: true
profiles:
- dev
# Lightweight development NATS without persistence
nats:
command:
- '--jetstream'
volumes: []
profiles:
- dev

85
docker-compose.yml Normal file
View File

@@ -0,0 +1,85 @@
version: '3.8'
services:
# Agent Coordinator MCP Server
agent-coordinator:
build:
context: .
dockerfile: Dockerfile
container_name: agent-coordinator
environment:
- MIX_ENV=prod
- NATS_HOST=nats
- NATS_PORT=4222
volumes:
# Mount local mcp_servers.json for easy configuration
- ./mcp_servers.json:/app/mcp_servers.json:ro
# Mount a directory for persistent data (optional)
- agent_data:/app/data
ports:
# Expose port 4000 if the app serves HTTP endpoints
- "4000:4000"
depends_on:
nats:
condition: service_healthy
restart: unless-stopped
healthcheck:
test: ["/app/bin/agent_coordinator", "ping"]
interval: 30s
timeout: 10s
retries: 3
start_period: 30s
# NATS Message Broker (optional but recommended for production)
nats:
image: nats:2.10-alpine
container_name: agent-coordinator-nats
command:
- '--jetstream'
- '--store_dir=/data'
- '--max_file_store=1G'
- '--max_mem_store=256M'
ports:
# NATS client port
- "4222:4222"
# NATS HTTP monitoring port
- "8222:8222"
# NATS routing port for clustering
- "6222:6222"
volumes:
- nats_data:/data
restart: unless-stopped
healthcheck:
test: ["CMD", "wget", "--quiet", "--tries=1", "--spider", "http://localhost:8222/healthz"]
interval: 10s
timeout: 5s
retries: 3
start_period: 10s
# Optional: NATS Monitoring Dashboard
nats-board:
image: devforth/nats-board:latest
container_name: agent-coordinator-nats-board
environment:
- NATS_HOSTS=nats:4222
ports:
- "8080:8080"
depends_on:
nats:
condition: service_healthy
restart: unless-stopped
profiles:
- monitoring
volumes:
# Persistent storage for NATS JetStream
nats_data:
driver: local
# Persistent storage for agent coordinator data
agent_data:
driver: local
networks:
default:
name: agent-coordinator-network

176
docker-entrypoint.sh Normal file
View File

@@ -0,0 +1,176 @@
#!/bin/bash
# Docker entrypoint script for Agent Coordinator MCP Server
# Handles initialization, configuration, and graceful shutdown
set -e
# Default environment variables
export MIX_ENV="${MIX_ENV:-prod}"
export NATS_HOST="${NATS_HOST:-localhost}"
export NATS_PORT="${NATS_PORT:-4222}"
# Colors for output
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m' # No Color
# Logging functions
log_info() {
echo -e "${BLUE}[INFO]${NC} $1" >&2
}
log_warn() {
echo -e "${YELLOW}[WARN]${NC} $1" >&2
}
log_error() {
echo -e "${RED}[ERROR]${NC} $1" >&2
}
log_success() {
echo -e "${GREEN}[SUCCESS]${NC} $1" >&2
}
# Cleanup function for graceful shutdown
cleanup() {
log_info "Received shutdown signal, cleaning up..."
# Send termination signals to child processes
if [ ! -z "$MAIN_PID" ]; then
log_info "Stopping main process (PID: $MAIN_PID)..."
kill -TERM "$MAIN_PID" 2>/dev/null || true
wait "$MAIN_PID" 2>/dev/null || true
fi
log_success "Cleanup completed"
exit 0
}
# Set up signal handlers for graceful shutdown
trap cleanup SIGTERM SIGINT SIGQUIT
# Function to wait for NATS (if configured)
wait_for_nats() {
if [ "$NATS_HOST" != "localhost" ] || [ "$NATS_PORT" != "4222" ]; then
log_info "Waiting for NATS at $NATS_HOST:$NATS_PORT..."
local timeout=30
local count=0
while [ $count -lt $timeout ]; do
if nc -z "$NATS_HOST" "$NATS_PORT" 2>/dev/null; then
log_success "NATS is available"
return 0
fi
log_info "NATS not yet available, waiting... ($((count + 1))/$timeout)"
sleep 1
count=$((count + 1))
done
log_error "Timeout waiting for NATS at $NATS_HOST:$NATS_PORT"
exit 1
else
log_info "Using default NATS configuration (localhost:4222)"
fi
}
# Validate configuration
validate_config() {
log_info "Validating configuration..."
# Check if mcp_servers.json exists
if [ ! -f "/app/mcp_servers.json" ]; then
log_error "mcp_servers.json not found"
exit 1
fi
# Validate JSON
if ! cat /app/mcp_servers.json | bun run -e "JSON.parse(require('fs').readFileSync(0, 'utf8'))" >/dev/null 2>&1; then
log_error "Invalid JSON in mcp_servers.json"
exit 1
fi
log_success "Configuration validation passed"
}
# Pre-install external MCP server dependencies
preinstall_dependencies() {
log_info "Pre-installing external MCP server dependencies..."
# Check if bun is available
if ! command -v bun >/dev/null 2>&1; then
log_error "bun is not available - external MCP servers may not work"
return 1
fi
# Pre-cache common MCP packages to speed up startup
local packages=(
"@modelcontextprotocol/server-filesystem"
"@modelcontextprotocol/server-memory"
"@modelcontextprotocol/server-sequential-thinking"
"@upstash/context7-mcp"
)
for package in "${packages[@]}"; do
log_info "Caching package: $package"
bun add --global --silent "$package" || log_warn "Failed to cache $package"
done
log_success "Dependencies pre-installed"
}
# Main execution
main() {
log_info "Starting Agent Coordinator MCP Server"
log_info "Environment: $MIX_ENV"
log_info "NATS: $NATS_HOST:$NATS_PORT"
# Validate configuration
validate_config
# Wait for external services if needed
wait_for_nats
# Pre-install dependencies
preinstall_dependencies
# Change to app directory
cd /app
# Start the main application
log_info "Starting main application..."
if [ "$#" -eq 0 ] || [ "$1" = "/app/scripts/mcp_launcher.sh" ]; then
# Default: start the MCP server
log_info "Starting MCP server via launcher script..."
exec "/app/scripts/mcp_launcher.sh" &
MAIN_PID=$!
elif [ "$1" = "bash" ] || [ "$1" = "sh" ]; then
# Interactive shell mode
log_info "Starting interactive shell..."
exec "$@"
elif [ "$1" = "release" ]; then
# Direct release mode
log_info "Starting via Elixir release..."
exec "/app/bin/agent_coordinator" "start" &
MAIN_PID=$!
else
# Custom command
log_info "Starting custom command: $*"
exec "$@" &
MAIN_PID=$!
fi
# Wait for the main process if it's running in background
if [ ! -z "$MAIN_PID" ]; then
log_success "Main process started (PID: $MAIN_PID)"
wait "$MAIN_PID"
fi
}
# Execute main function with all arguments
main "$@"

View File

@@ -11,7 +11,7 @@ defmodule AgentCoordinator.MCPServer do
use GenServer
require Logger
alias AgentCoordinator.{TaskRegistry, Inbox, Agent, Task, CodebaseRegistry}
alias AgentCoordinator.{TaskRegistry, Inbox, Agent, Task, CodebaseRegistry, VSCodeToolProvider}
# State for tracking external servers and agent sessions
defstruct [
@@ -345,7 +345,10 @@ defmodule AgentCoordinator.MCPServer do
end
def get_tools do
@mcp_tools
case GenServer.call(__MODULE__, :get_all_tools, 5000) do
tools when is_list(tools) -> tools
_ -> @mcp_tools
end
end
# Server callbacks
@@ -407,11 +410,11 @@ defmodule AgentCoordinator.MCPServer do
if allowed_without_agent do
# Allow system calls and register_agent to proceed without agent_id
response = process_mcp_request(request)
response = process_mcp_request(request, state)
{:reply, response, state}
else
# Log the rejected call for debugging
Logger.warning("Rejected call without agent_id: method=#{method}, tool=#{tool_name}")
IO.puts(:stderr, "Rejected call without agent_id: method=#{method}, tool=#{tool_name}")
error_response = %{
"jsonrpc" => "2.0",
"id" => Map.get(request, "id"),
@@ -425,7 +428,7 @@ defmodule AgentCoordinator.MCPServer do
%{agent_id: nil} ->
# System call without agent context
response = process_mcp_request(request)
response = process_mcp_request(request, state)
{:reply, response, state}
agent_context ->
@@ -436,7 +439,7 @@ defmodule AgentCoordinator.MCPServer do
end
# Process the request
response = process_mcp_request(request)
response = process_mcp_request(request, state)
# Send post-operation heartbeat and update session activity
if agent_context[:agent_id] do
@@ -461,9 +464,14 @@ defmodule AgentCoordinator.MCPServer do
end
end
def handle_call(:get_all_tools, _from, state) do
all_tools = get_all_unified_tools_from_state(state)
{:reply, all_tools, state}
end
# MCP request processing
defp process_mcp_request(%{"method" => "initialize"} = request) do
defp process_mcp_request(%{"method" => "initialize"} = request, _state) do
id = Map.get(request, "id", nil)
%{
@@ -491,11 +499,11 @@ defmodule AgentCoordinator.MCPServer do
}
end
defp process_mcp_request(%{"method" => "tools/list"} = request) do
defp process_mcp_request(%{"method" => "tools/list"} = request, state) do
id = Map.get(request, "id", nil)
# Get both coordinator tools and external server tools
all_tools = get_all_unified_tools()
all_tools = get_all_unified_tools_from_state(state)
%{
"jsonrpc" => "2.0",
@@ -504,11 +512,11 @@ defmodule AgentCoordinator.MCPServer do
}
end
defp process_mcp_request(%{"method" => "notifications/initialized"} = request) do
defp process_mcp_request(%{"method" => "notifications/initialized"} = request, _state) do
# Handle the initialized notification - this is sent by clients after initialization
id = Map.get(request, "id", nil)
Logger.info("Client initialization notification received")
IO.puts(:stderr, "Client initialization notification received")
# For notifications, we typically don't send a response, but if there's an ID, respond
if id do
@@ -527,12 +535,13 @@ defmodule AgentCoordinator.MCPServer do
%{
"method" => "tools/call",
"params" => %{"name" => tool_name, "arguments" => args}
} = request
} = request,
state
) do
id = Map.get(request, "id", nil)
# Determine if this is a coordinator tool or external tool
result = route_tool_call(tool_name, args)
result = route_tool_call(tool_name, args, state)
case result do
{:ok, data} ->
@@ -551,7 +560,7 @@ defmodule AgentCoordinator.MCPServer do
end
end
defp process_mcp_request(request) do
defp process_mcp_request(request, _state) do
id = Map.get(request, "id", nil)
%{
@@ -586,7 +595,7 @@ defmodule AgentCoordinator.MCPServer do
{:ok, _pid} -> :ok
{:error, {:already_started, _pid}} -> :ok
{:error, reason} ->
Logger.warning("Failed to start inbox for agent #{agent.id}: #{inspect(reason)}")
IO.puts(:stderr, "Failed to start inbox for agent #{agent.id}: #{inspect(reason)}")
:ok
end
@@ -1118,7 +1127,7 @@ defmodule AgentCoordinator.MCPServer do
config: config
}
Logger.info("Registering HTTP server: #{name} at #{Map.get(config, :url)}")
IO.puts(:stderr, "Registering HTTP server: #{name} at #{Map.get(config, :url)}")
{:ok, server_info}
end
@@ -1303,12 +1312,56 @@ defmodule AgentCoordinator.MCPServer do
new_registry =
Enum.reduce(state.external_servers, %{}, fn {name, server_info}, acc ->
tools = Map.get(server_info, :tools, [])
Map.put(acc, name, tools)
# Transform each tool to include agent_id in the schema
transformed_tools = Enum.map(tools, &transform_external_tool_schema/1)
Map.put(acc, name, transformed_tools)
end)
%{state | tool_registry: new_registry}
end
defp transform_external_tool_schema(tool) when is_map(tool) do
try do
# Get the existing input schema
input_schema = Map.get(tool, "inputSchema", %{})
# Get existing properties and required fields
properties = Map.get(input_schema, "properties", %{})
required = Map.get(input_schema, "required", [])
# Add agent_id to properties if not already present
updated_properties = Map.put_new(properties, "agent_id", %{
"type" => "string",
"description" => "ID of the agent making the tool call"
})
# Add agent_id to required fields if not already present
updated_required = if "agent_id" in required do
required
else
["agent_id" | required]
end
# Update the input schema
updated_schema = Map.merge(input_schema, %{
"properties" => updated_properties,
"required" => updated_required
})
# Return the tool with updated schema
Map.put(tool, "inputSchema", updated_schema)
rescue
error ->
IO.puts(:stderr, "Failed to transform tool schema for #{inspect(tool)}: #{inspect(error)}")
tool # Return original tool if transformation fails
end
end
defp transform_external_tool_schema(tool) do
IO.puts(:stderr, "Received non-map tool: #{inspect(tool)}")
tool # Return as-is if not a map
end
defp create_external_pid_file(server_name, os_pid) do
pid_dir = Path.join(System.tmp_dir(), "mcp_servers")
File.mkdir_p!(pid_dir)
@@ -1348,29 +1401,49 @@ defmodule AgentCoordinator.MCPServer do
end
end
defp get_all_unified_tools do
# Combine coordinator tools with external server tools
defp get_all_unified_tools_from_state(state) do
# Combine coordinator tools with external server tools from state
coordinator_tools = @mcp_tools
# Get external tools from the current process state
external_tools =
case Process.get(:external_tool_registry) do
nil -> []
registry -> Map.values(registry) |> List.flatten()
# Get external tools from GenServer state
external_tools = Map.values(state.tool_registry) |> List.flatten()
# Get VS Code tools only if VS Code functionality is available
vscode_tools =
try do
if Code.ensure_loaded?(VSCodeToolProvider) do
VSCodeToolProvider.get_tools()
else
IO.puts(:stderr, "VS Code tools not available - module not loaded")
[]
end
rescue
error ->
IO.puts(:stderr, "VS Code tools not available - error loading: #{inspect(error)}")
[]
end
coordinator_tools ++ external_tools
coordinator_tools ++ external_tools ++ vscode_tools
end
defp route_tool_call(tool_name, args) do
defp route_tool_call(tool_name, args, state) do
# Check if it's a coordinator tool first
coordinator_tool_names = Enum.map(@mcp_tools, & &1["name"])
if tool_name in coordinator_tool_names do
handle_coordinator_tool(tool_name, args)
else
# Try to route to external server
route_to_external_server(tool_name, args)
cond do
tool_name in coordinator_tool_names ->
handle_coordinator_tool(tool_name, args)
# Check if it's a VS Code tool
String.starts_with?(tool_name, "vscode_") ->
# Route to VS Code Tool Provider with agent context
agent_id = Map.get(args, "agent_id")
context = if agent_id, do: %{agent_id: agent_id}, else: %{}
VSCodeToolProvider.handle_tool_call(tool_name, args, context)
true ->
# Try to route to external server
route_to_external_server(tool_name, args, state)
end
end
@@ -1396,10 +1469,73 @@ defmodule AgentCoordinator.MCPServer do
end
end
defp route_to_external_server(tool_name, _args) do
# For now, return error for external tools
# This will be enhanced when we fully implement external routing
{:error, "External tool routing not yet implemented: #{tool_name}"}
defp route_to_external_server(tool_name, args, state) do
# Find which external server has this tool
server_with_tool = find_server_for_tool(tool_name, state)
case server_with_tool do
nil ->
{:error, "Tool not found in any external server: #{tool_name}"}
{server_name, server_info} ->
# Strip agent_id from args before sending to external server
# External servers don't expect this parameter
external_args = Map.delete(args, "agent_id")
# Send tool call to the external server
tool_call_request = %{
"jsonrpc" => "2.0",
"id" => System.unique_integer(),
"method" => "tools/call",
"params" => %{
"name" => tool_name,
"arguments" => external_args
}
}
case send_external_server_request(server_info, tool_call_request) do
{:ok, %{"result" => result}} ->
# Extract the actual content from the MCP response
case result do
%{"content" => content} when is_list(content) ->
# Return the first text content for simplicity
text_content = Enum.find(content, fn item ->
Map.get(item, "type") == "text"
end)
if text_content do
case Jason.decode(Map.get(text_content, "text", "{}")) do
{:ok, decoded} -> {:ok, decoded}
{:error, _} -> {:ok, Map.get(text_content, "text")}
end
else
{:ok, result}
end
_ ->
{:ok, result}
end
{:ok, %{"error" => error}} ->
{:error, "External server error: #{inspect(error)}"}
{:error, reason} ->
{:error, "Failed to call external server #{server_name}: #{reason}"}
end
end
end
defp find_server_for_tool(tool_name, state) do
# Search through all external servers for the tool
Enum.find_value(state.external_servers, fn {server_name, server_info} ->
tools = Map.get(server_info, :tools, [])
if Enum.any?(tools, fn tool -> Map.get(tool, "name") == tool_name end) do
{server_name, server_info}
else
nil
end
end)
end
# Session management functions

View File

@@ -139,13 +139,13 @@ defmodule AgentCoordinator.TaskRegistry do
{Inbox, agent.id}
) do
{:ok, _pid} ->
Logger.info("Created inbox for agent #{agent.id}")
IO.puts(:stderr, "Created inbox for agent #{agent.id}")
{:error, {:already_started, _pid}} ->
Logger.info("Inbox already exists for agent #{agent.id}")
IO.puts(:stderr, "Inbox already exists for agent #{agent.id}")
{:error, reason} ->
Logger.warning("Failed to create inbox for agent #{agent.id}: #{inspect(reason)}")
IO.puts(:stderr, "Failed to create inbox for agent #{agent.id}: #{inspect(reason)}")
end
# Publish agent registration with codebase info
@@ -752,15 +752,15 @@ defmodule AgentCoordinator.TaskRegistry do
{Inbox, agent_id}
) do
{:ok, _pid} ->
Logger.info("Created inbox for agent #{agent_id}")
IO.puts(:stderr, "Created inbox for agent #{agent_id}")
:ok
{:error, {:already_started, _pid}} ->
Logger.info("Inbox already exists for agent #{agent_id}")
IO.puts(:stderr, "Inbox already exists for agent #{agent_id}")
:ok
{:error, reason} ->
Logger.warning("Failed to create inbox for agent #{agent_id}: #{inspect(reason)}")
IO.puts(:stderr, "Failed to create inbox for agent #{agent_id}: #{inspect(reason)}")
{:error, reason}
end

View File

@@ -124,7 +124,7 @@ defmodule AgentCoordinator.VSCodePermissions do
def set_agent_permission_level(agent_id, level)
when level in [:read_only, :editor, :filesystem, :terminal, :git, :admin] do
# This would persist to a database or configuration store
Logger.info("Setting permission level for agent #{agent_id} to #{level}")
IO.puts(:stderr, "Setting permission level for agent #{agent_id} to #{level}")
:ok
end

View File

@@ -317,13 +317,13 @@ defmodule AgentCoordinator.VSCodeToolProvider do
Handle a VS Code tool call with permission checking and error handling.
"""
def handle_tool_call(tool_name, args, context) do
Logger.info("VS Code tool call: #{tool_name} with args: #{inspect(args)}")
IO.puts(:stderr, "VS Code tool call: #{tool_name} with args: #{inspect(args)}")
# Extract agent_id from args (required for all VS Code tools)
agent_id = Map.get(args, "agent_id")
if is_nil(agent_id) or agent_id == "" do
Logger.warning("Missing agent_id in VS Code tool call: #{tool_name}")
IO.puts(:stderr, "Missing agent_id in VS Code tool call: #{tool_name}")
{:error,
%{
@@ -347,7 +347,7 @@ defmodule AgentCoordinator.VSCodeToolProvider do
result
{:error, reason} ->
Logger.warning("Permission denied for #{tool_name} (agent: #{agent_id}): #{reason}")
IO.puts(:stderr, "Permission denied for #{tool_name} (agent: #{agent_id}): #{reason}")
{:error, %{"error" => "Permission denied", "reason" => reason}}
end
end
@@ -363,7 +363,7 @@ defmodule AgentCoordinator.VSCodeToolProvider do
{:error, :not_found} ->
# Agent not registered, auto-register with VS Code capabilities
Logger.info("Auto-registering new agent: #{agent_id}")
IO.puts(:stderr, "Auto-registering new agent: #{agent_id}")
capabilities = [
"coding",
@@ -384,11 +384,11 @@ defmodule AgentCoordinator.VSCodeToolProvider do
}
) do
{:ok, _result} ->
Logger.info("Successfully auto-registered agent: #{agent_id}")
IO.puts(:stderr, "Successfully auto-registered agent: #{agent_id}")
Map.put(context, :agent_id, agent_id)
{:error, reason} ->
Logger.error("Failed to auto-register agent #{agent_id}: #{inspect(reason)}")
IO.puts(:stderr, "Failed to auto-register agent #{agent_id}: #{inspect(reason)}")
# Continue anyway
Map.put(context, :agent_id, agent_id)
end
@@ -545,12 +545,14 @@ defmodule AgentCoordinator.VSCodeToolProvider do
# Logging function
defp log_tool_operation(tool_name, args, context, result) do
Logger.info("VS Code tool operation completed", %{
operation_data = %{
tool: tool_name,
agent_id: context[:agent_id],
args_summary: inspect(Map.take(args, ["path", "command", "message"])),
success: match?({:ok, _}, result),
timestamp: DateTime.utc_now()
})
}
IO.puts(:stderr, "VS Code tool operation completed: #{inspect(operation_data)}")
end
end

View File

@@ -0,0 +1,30 @@
#!/usr/bin/env elixir
# Quick test to check if VS Code tools are properly integrated
IO.puts("Testing VS Code tool integration...")
# Start the agent coordinator
{:ok, _} = AgentCoordinator.start_link()
# Give it a moment to start
:timer.sleep(2000)
# Check if VS Code tools are available
tools = AgentCoordinator.MCPServer.get_tools()
vscode_tools = Enum.filter(tools, fn tool ->
case Map.get(tool, "name") do
"vscode_" <> _ -> true
_ -> false
end
end)
IO.puts("Found #{length(vscode_tools)} VS Code tools:")
Enum.each(vscode_tools, fn tool ->
IO.puts(" - #{tool["name"]}")
end)
if length(vscode_tools) > 0 do
IO.puts("✅ VS Code tools are properly integrated!")
else
IO.puts("❌ VS Code tools are NOT integrated")
end