Compare commits
15 Commits
37f76e060e
...
main
| Author | SHA1 | Date | |
|---|---|---|---|
|
101e1e5c81
|
|||
| ee30aca4d7 | |||
| d22675fd16 | |||
| 87b7181d4a | |||
| 0d9166cc56 | |||
| 74a8574778 | |||
| 5d3e04c5f8 | |||
| 0327665fe2 | |||
| b1f55799ec | |||
| 1056672e7c | |||
| 4b7c4b6314 | |||
| 004ba6ab55 | |||
| 7cb42e4131 | |||
| ea3c390257 | |||
| 074c4473ca |
66
.dockerignore
Normal file
66
.dockerignore
Normal file
@@ -0,0 +1,66 @@
|
||||
# Docker build context optimization
|
||||
# Exclude files that are not needed for building the Docker image
|
||||
|
||||
# Build artifacts
|
||||
_build/
|
||||
deps/
|
||||
.elixir_ls/
|
||||
erl_crash.dump
|
||||
|
||||
# Development files
|
||||
.git/
|
||||
.gitignore
|
||||
*.md
|
||||
!README.md
|
||||
docs/
|
||||
examples/
|
||||
|
||||
# Test files
|
||||
test/
|
||||
cover/
|
||||
.cover/
|
||||
|
||||
# IDE files
|
||||
.vscode/
|
||||
.idea/
|
||||
*.swp
|
||||
*.swo
|
||||
*~
|
||||
|
||||
# OS files
|
||||
.DS_Store
|
||||
Thumbs.db
|
||||
|
||||
# Log files
|
||||
*.log
|
||||
logs/
|
||||
|
||||
# Temporary files
|
||||
tmp/
|
||||
temp/
|
||||
*.tmp
|
||||
|
||||
# Docker files (to avoid recursive copying)
|
||||
Dockerfile*
|
||||
.dockerignore
|
||||
docker-compose*.yml
|
||||
|
||||
# Environment files that might contain secrets
|
||||
.env*
|
||||
!.env.example
|
||||
|
||||
# NATS data (if running locally)
|
||||
nats-data/
|
||||
|
||||
# Node.js files (they'll be installed fresh in container)
|
||||
node_modules/
|
||||
npm-debug.log*
|
||||
yarn-debug.log*
|
||||
yarn-error.log*
|
||||
|
||||
# Coverage reports
|
||||
coverage/
|
||||
lcov.info
|
||||
|
||||
# Dialyzer PLT files
|
||||
priv/plts/
|
||||
9
.github/instructions/example.instructions.md
vendored
Normal file
9
.github/instructions/example.instructions.md
vendored
Normal file
@@ -0,0 +1,9 @@
|
||||
---
|
||||
applyTo: '**'
|
||||
---
|
||||
- You must always use the agent-coordinator mcp server, this will allow you to coordinate your work with other agents that may be actively working on the same codebase as you.
|
||||
- All MCP tool calls which would alter anythings state should be done through the agent-coordinator, even vscode internal tool calls. Searches are fine to not be done through the agent coordinator. The core reason for running them through the agent-coordinator is for the heartbeat.
|
||||
- Always make sure you append at least 2 random words onto the end of your agent name when registering / sending metadata, eg: `GitHub Copilot Silver Banana`, `GitHub Copilot Yellow Koala`, `GitHub Copilot Olive Llama`, this will assure that not agents have overlapping names and get tracked as the same agent (hopefully)
|
||||
- Always check the task board when given new user input which will require making any changes to files.
|
||||
- Always populate your own task board with your task set so other agents may be informed of your planned tasks, and in turn you will be able to see other agents' agendas.
|
||||
- Once you are coming to an end of your current query, and you will be passing the turn to wait for user input, you must unregister yourself as an agent, and upon the followup you should re-register and follow through with the same registration flow.
|
||||
41
.github/workflows/build.yml
vendored
Normal file
41
.github/workflows/build.yml
vendored
Normal file
@@ -0,0 +1,41 @@
|
||||
name: build-container
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
|
||||
run-name: build-image-${{ github.run_id }}
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v5
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
|
||||
- name: Log in to GHCR
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Build and push Docker image
|
||||
uses: docker/build-push-action@v6
|
||||
with:
|
||||
context: .
|
||||
push: true
|
||||
tags: |
|
||||
ghcr.io/rooba/agentcoordinator:latest
|
||||
ghcr.io/rooba/agentcoordinator:${{ github.sha }}
|
||||
file: ./Dockerfile
|
||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
13
.gitignore
vendored
13
.gitignore
vendored
@@ -23,7 +23,8 @@ agent_coordinator-*.tar
|
||||
/tmp/
|
||||
|
||||
# IDE and Editor files
|
||||
.vscode/
|
||||
/.vscode/
|
||||
!/.vscode/mcp.json
|
||||
.idea/
|
||||
*.swp
|
||||
*.swo
|
||||
@@ -40,6 +41,7 @@ Thumbs.db
|
||||
logs/
|
||||
/tmp/nats.log
|
||||
/tmp/nats.pid
|
||||
firebase-debug.log
|
||||
|
||||
# Environment and configuration files
|
||||
.env
|
||||
@@ -91,3 +93,12 @@ coverage/
|
||||
|
||||
# Claude settings (local configuration)
|
||||
.claude/
|
||||
|
||||
/docs/LANGUAGE_IMPLEMENTATIONS.md
|
||||
/asdf.txt
|
||||
/erl_crash.dump
|
||||
/_build
|
||||
/test_env
|
||||
/docs
|
||||
|
||||
!/.vscode/mcp.json
|
||||
|
||||
16
.vscode/mcp.json
vendored
Normal file
16
.vscode/mcp.json
vendored
Normal file
@@ -0,0 +1,16 @@
|
||||
{
|
||||
"servers": {
|
||||
"coordinator": {
|
||||
"command": "/home/user/agent_coordinator/scripts/mcp_launcher.sh",
|
||||
"args": [],
|
||||
"env": {
|
||||
"MIX_ENV": "dev",
|
||||
"NATS_HOST": "127.0.0.1",
|
||||
"NATS_PORT": "4222",
|
||||
"MCP_CONFIG_FILE": "/home/user/agent_coordinator/mcp_servers.json"
|
||||
},
|
||||
"type": "stdio"
|
||||
}
|
||||
},
|
||||
"inputs": []
|
||||
}
|
||||
@@ -1,333 +0,0 @@
|
||||
# Unified MCP Server with Auto-Heartbeat System Documentation
|
||||
|
||||
## Overview
|
||||
|
||||
The Agent Coordinator now operates as a **unified MCP server** that internally manages all external MCP servers (Context7, Figma, Filesystem, Firebase, Memory, Sequential Thinking, etc.) while providing automatic task tracking and heartbeat coverage for every tool operation. GitHub Copilot sees only a single MCP server, but gets access to all tools with automatic coordination.
|
||||
|
||||
## Key Features
|
||||
|
||||
### 1. Unified MCP Server Architecture
|
||||
- **Single interface**: GitHub Copilot connects to only the Agent Coordinator
|
||||
- **Internal server management**: Automatically starts and manages all external MCP servers
|
||||
- **Unified tool registry**: Aggregates tools from all servers into one comprehensive list
|
||||
- **Automatic task tracking**: Every tool call automatically creates/updates agent tasks
|
||||
|
||||
### 2. Automatic Task Tracking
|
||||
- **Transparent operation**: Any tool usage automatically becomes a tracked task
|
||||
- **No explicit coordination needed**: Agents don't need to call `create_task` manually
|
||||
- **Real-time activity monitoring**: See what each agent is working on in real-time
|
||||
- **Smart task titles**: Automatically generated based on tool usage and context
|
||||
|
||||
### 3. Enhanced Heartbeat Coverage
|
||||
- **Universal coverage**: Every tool call from any server includes heartbeat management
|
||||
- **Agent session tracking**: Automatic agent registration for GitHub Copilot
|
||||
- **Activity-based heartbeats**: Heartbeats sent before/after each tool operation
|
||||
- **Session metadata**: Enhanced task board shows real activity and tool usage
|
||||
|
||||
## Architecture
|
||||
|
||||
```
|
||||
GitHub Copilot
|
||||
↓
|
||||
Agent Coordinator (Single Visible MCP Server)
|
||||
↓
|
||||
┌─────────────────────────────────────────────────────────┐
|
||||
│ Unified MCP Server │
|
||||
│ • Aggregates all tools into single interface │
|
||||
│ • Automatic task tracking for every operation │
|
||||
│ • Agent coordination tools (create_task, etc.) │
|
||||
│ • Universal heartbeat coverage │
|
||||
└─────────────────────────────────────────────────────────┘
|
||||
↓
|
||||
┌─────────────────────────────────────────────────────────┐
|
||||
│ MCP Server Manager │
|
||||
│ • Starts & manages external servers internally │
|
||||
│ • Health monitoring & auto-restart │
|
||||
│ • Tool aggregation & routing │
|
||||
│ • Auto-task creation for any tool usage │
|
||||
└─────────────────────────────────────────────────────────┘
|
||||
↓
|
||||
┌──────────┬──────────┬───────────┬──────────┬─────────────┐
|
||||
│ Context7 │ Figma │Filesystem │ Firebase │ Memory + │
|
||||
│ Server │ Server │ Server │ Server │ Sequential │
|
||||
└──────────┴──────────┴───────────┴──────────┴─────────────┘
|
||||
```
|
||||
|
||||
## Usage
|
||||
|
||||
### GitHub Copilot Experience
|
||||
|
||||
From GitHub Copilot's perspective, there's only one MCP server with all tools available:
|
||||
|
||||
```javascript
|
||||
// All these tools are available from the single Agent Coordinator server:
|
||||
|
||||
// Agent coordination tools
|
||||
register_agent, create_task, get_next_task, complete_task, get_task_board, heartbeat
|
||||
|
||||
// Context7 tools
|
||||
mcp_context7_get-library-docs, mcp_context7_resolve-library-id
|
||||
|
||||
// Figma tools
|
||||
mcp_figma_get_code, mcp_figma_get_image, mcp_figma_get_variable_defs
|
||||
|
||||
// Filesystem tools
|
||||
mcp_filesystem_read_file, mcp_filesystem_write_file, mcp_filesystem_list_directory
|
||||
|
||||
// Firebase tools
|
||||
mcp_firebase_firestore_get_documents, mcp_firebase_auth_get_user
|
||||
|
||||
// Memory tools
|
||||
mcp_memory_search_nodes, mcp_memory_create_entities
|
||||
|
||||
// Sequential thinking tools
|
||||
mcp_sequentialthi_sequentialthinking
|
||||
|
||||
// Plus any other configured MCP servers...
|
||||
```
|
||||
|
||||
### Automatic Task Tracking
|
||||
|
||||
Every tool usage automatically creates or updates an agent's current task:
|
||||
|
||||
```elixir
|
||||
# When GitHub Copilot calls any tool, it automatically:
|
||||
# 1. Sends pre-operation heartbeat
|
||||
# 2. Creates/updates current task based on tool usage
|
||||
# 3. Routes to appropriate external server
|
||||
# 4. Sends post-operation heartbeat
|
||||
# 5. Updates task activity log
|
||||
|
||||
# Example: Reading a file automatically creates a task
|
||||
Tool Call: mcp_filesystem_read_file(%{"path" => "/project/src/main.rs"})
|
||||
Auto-Created Task: "Reading file: main.rs"
|
||||
Description: "Reading and analyzing file content from /project/src/main.rs"
|
||||
|
||||
# Example: Figma code generation automatically creates a task
|
||||
Tool Call: mcp_figma_get_code(%{"nodeId" => "123:456"})
|
||||
Auto-Created Task: "Generating Figma code: 123:456"
|
||||
Description: "Generating code for Figma component 123:456"
|
||||
|
||||
# Example: Library research automatically creates a task
|
||||
Tool Call: mcp_context7_get-library-docs(%{"context7CompatibleLibraryID" => "/vercel/next.js"})
|
||||
Auto-Created Task: "Researching: /vercel/next.js"
|
||||
Description: "Researching documentation for /vercel/next.js library"
|
||||
```
|
||||
|
||||
### Task Board with Real Activity
|
||||
|
||||
```elixir
|
||||
# Get enhanced task board showing real agent activity
|
||||
{:ok, board} = get_task_board()
|
||||
|
||||
# Returns:
|
||||
%{
|
||||
agents: [
|
||||
%{
|
||||
agent_id: "github_copilot_session",
|
||||
name: "GitHub Copilot",
|
||||
status: :working,
|
||||
current_task: %{
|
||||
title: "Reading file: database.ex",
|
||||
description: "Reading and analyzing file content from /project/lib/database.ex",
|
||||
auto_generated: true,
|
||||
tool_name: "mcp_filesystem_read_file",
|
||||
created_at: ~U[2025-08-23 10:30:00Z]
|
||||
},
|
||||
last_heartbeat: ~U[2025-08-23 10:30:05Z],
|
||||
online: true
|
||||
}
|
||||
],
|
||||
pending_tasks: [],
|
||||
total_agents: 1,
|
||||
active_tasks: 1,
|
||||
pending_count: 0
|
||||
}
|
||||
```
|
||||
|
||||
## Configuration
|
||||
|
||||
### MCP Server Configuration
|
||||
|
||||
External servers are configured in `mcp_servers.json`:
|
||||
|
||||
```json
|
||||
{
|
||||
"servers": {
|
||||
"mcp_context7": {
|
||||
"type": "stdio",
|
||||
"command": "uvx",
|
||||
"args": ["mcp-server-context7"],
|
||||
"auto_restart": true,
|
||||
"description": "Context7 library documentation server"
|
||||
},
|
||||
"mcp_figma": {
|
||||
"type": "stdio",
|
||||
"command": "npx",
|
||||
"args": ["-y", "@figma/mcp-server-figma"],
|
||||
"auto_restart": true,
|
||||
"description": "Figma design integration server"
|
||||
},
|
||||
"mcp_filesystem": {
|
||||
"type": "stdio",
|
||||
"command": "npx",
|
||||
"args": ["-y", "@modelcontextprotocol/server-filesystem", "/home/ra"],
|
||||
"auto_restart": true,
|
||||
"description": "Filesystem operations with auto-task tracking"
|
||||
}
|
||||
},
|
||||
"config": {
|
||||
"startup_timeout": 30000,
|
||||
"heartbeat_interval": 10000,
|
||||
"auto_restart_delay": 1000,
|
||||
"max_restart_attempts": 3
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### VS Code Settings
|
||||
|
||||
Update your VS Code MCP settings to point to the unified server:
|
||||
|
||||
```json
|
||||
{
|
||||
"mcp.servers": {
|
||||
"agent-coordinator": {
|
||||
"command": "/home/ra/agent_coordinator/scripts/mcp_launcher.sh",
|
||||
"args": []
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Benefits
|
||||
|
||||
### 1. Simplified Configuration
|
||||
- **One server**: GitHub Copilot only needs to connect to Agent Coordinator
|
||||
- **No manual setup**: External servers are managed automatically
|
||||
- **Unified tools**: All tools appear in one comprehensive list
|
||||
|
||||
### 2. Automatic Coordination
|
||||
- **Zero-effort tracking**: Every tool usage automatically tracked as tasks
|
||||
- **Real-time visibility**: See exactly what agents are working on
|
||||
- **Smart task creation**: Descriptive task titles based on actual tool usage
|
||||
- **Universal heartbeats**: Every operation maintains agent liveness
|
||||
|
||||
### 3. Enhanced Collaboration
|
||||
- **Agent communication**: Coordination tools still available for planning
|
||||
- **Multi-agent workflows**: Agents can create tasks for each other
|
||||
- **Activity awareness**: Agents can see what others are working on
|
||||
- **File conflict prevention**: Automatic file locking across operations
|
||||
|
||||
### 4. Operational Excellence
|
||||
- **Auto-restart**: Failed external servers automatically restarted
|
||||
- **Health monitoring**: Real-time status of all managed servers
|
||||
- **Error handling**: Graceful degradation when servers unavailable
|
||||
- **Performance**: Direct routing without external proxy overhead
|
||||
|
||||
## Migration Guide
|
||||
|
||||
### From Individual MCP Servers
|
||||
|
||||
**Before:**
|
||||
```json
|
||||
// VS Code settings with multiple servers
|
||||
{
|
||||
"mcp.servers": {
|
||||
"context7": {"command": "uvx", "args": ["mcp-server-context7"]},
|
||||
"figma": {"command": "npx", "args": ["-y", "@figma/mcp-server-figma"]},
|
||||
"filesystem": {"command": "npx", "args": ["-y", "@modelcontextprotocol/server-filesystem", "/path"]},
|
||||
"agent-coordinator": {"command": "/path/to/mcp_launcher.sh"}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
**After:**
|
||||
```json
|
||||
// VS Code settings with single unified server
|
||||
{
|
||||
"mcp.servers": {
|
||||
"agent-coordinator": {
|
||||
"command": "/home/ra/agent_coordinator/scripts/mcp_launcher.sh",
|
||||
"args": []
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Configuration Migration
|
||||
|
||||
1. **Remove individual MCP servers** from VS Code settings
|
||||
2. **Add external servers** to `mcp_servers.json` configuration
|
||||
3. **Update launcher script** path if needed
|
||||
4. **Restart VS Code** to apply changes
|
||||
|
||||
## Startup and Testing
|
||||
|
||||
### Starting the Unified Server
|
||||
|
||||
```bash
|
||||
# From the project directory
|
||||
./scripts/mcp_launcher.sh
|
||||
```
|
||||
|
||||
### Testing Tool Aggregation
|
||||
|
||||
```bash
|
||||
# Test that all tools are available
|
||||
echo '{"jsonrpc":"2.0","id":1,"method":"tools/list"}' | ./scripts/mcp_launcher.sh
|
||||
|
||||
# Should return tools from Agent Coordinator + all external servers
|
||||
```
|
||||
|
||||
### Testing Automatic Task Tracking
|
||||
|
||||
```bash
|
||||
# Use any tool - it should automatically create a task
|
||||
echo '{"jsonrpc":"2.0","id":2,"method":"tools/call","params":{"name":"mcp_filesystem_read_file","arguments":{"path":"/home/ra/test.txt"}}}' | ./scripts/mcp_launcher.sh
|
||||
|
||||
# Check task board to see auto-created task
|
||||
echo '{"jsonrpc":"2.0","id":3,"method":"tools/call","params":{"name":"get_task_board","arguments":{}}}' | ./scripts/mcp_launcher.sh
|
||||
```
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### External Server Issues
|
||||
|
||||
1. **Server won't start**
|
||||
- Check command path in `mcp_servers.json`
|
||||
- Verify dependencies are installed (`npm install -g @modelcontextprotocol/server-*`)
|
||||
- Check logs for startup errors
|
||||
|
||||
2. **Tools not appearing**
|
||||
- Verify server started successfully
|
||||
- Check server health: use `get_server_status` tool
|
||||
- Restart specific servers if needed
|
||||
|
||||
3. **Auto-restart not working**
|
||||
- Check `auto_restart: true` in server config
|
||||
- Verify process monitoring is active
|
||||
- Check restart attempt limits
|
||||
|
||||
### Task Tracking Issues
|
||||
|
||||
1. **Tasks not auto-creating**
|
||||
- Verify agent session is active
|
||||
- Check that GitHub Copilot is registered as agent
|
||||
- Ensure heartbeat system is working
|
||||
|
||||
2. **Incorrect task titles**
|
||||
- Task titles are generated based on tool name and arguments
|
||||
- Can be customized in `generate_task_title/2` function
|
||||
- File-based operations use file paths in titles
|
||||
|
||||
## Future Enhancements
|
||||
|
||||
Planned improvements:
|
||||
|
||||
1. **Dynamic server discovery** - Auto-detect and add new MCP servers
|
||||
2. **Load balancing** - Distribute tool calls across multiple server instances
|
||||
3. **Tool versioning** - Support multiple versions of the same tool
|
||||
4. **Custom task templates** - Configurable task generation based on tool patterns
|
||||
5. **Inter-agent messaging** - Direct communication channels between agents
|
||||
6. **Workflow orchestration** - Multi-step task coordination across agents
|
||||
56
CHANGELOG.md
56
CHANGELOG.md
@@ -1,56 +0,0 @@
|
||||
# Changelog
|
||||
|
||||
All notable changes to this project will be documented in this file.
|
||||
|
||||
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
|
||||
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
|
||||
|
||||
## [Unreleased]
|
||||
|
||||
### Added
|
||||
|
||||
- Initial repository structure cleanup
|
||||
- Organized scripts into dedicated directories
|
||||
- Enhanced documentation
|
||||
- GitHub Actions CI/CD workflow
|
||||
- Development and testing dependencies
|
||||
|
||||
### Changed
|
||||
|
||||
- Moved demo files to `examples/` directory
|
||||
- Moved utility scripts to `scripts/` directory
|
||||
- Updated project metadata in mix.exs
|
||||
- Enhanced .gitignore for better coverage
|
||||
|
||||
## [0.1.0] - 2025-08-22
|
||||
|
||||
### Features
|
||||
|
||||
- Initial release of AgentCoordinator
|
||||
- Distributed task coordination system for AI agents
|
||||
- NATS-based messaging and persistence
|
||||
- MCP (Model Context Protocol) server integration
|
||||
- Task registry with agent-specific inboxes
|
||||
- File-level conflict resolution
|
||||
- Real-time agent communication
|
||||
- Event sourcing with configurable retention
|
||||
- Fault-tolerant supervision trees
|
||||
- Command-line interface for task management
|
||||
- VS Code integration setup scripts
|
||||
- Comprehensive examples and documentation
|
||||
|
||||
### Core Features
|
||||
|
||||
- Agent registration and capability management
|
||||
- Task creation, assignment, and completion
|
||||
- Task board visualization
|
||||
- Heartbeat monitoring for agent health
|
||||
- Persistent task state with NATS JetStream
|
||||
- MCP tools for external agent integration
|
||||
|
||||
### Development Tools
|
||||
|
||||
- Setup scripts for NATS and VS Code configuration
|
||||
- Example MCP client implementations
|
||||
- Test scripts for various scenarios
|
||||
- Demo workflows for testing functionality
|
||||
195
CONTRIBUTING.md
195
CONTRIBUTING.md
@@ -1,195 +0,0 @@
|
||||
# Contributing to AgentCoordinator
|
||||
|
||||
Thank you for your interest in contributing to AgentCoordinator! This document provides guidelines for contributing to the project.
|
||||
|
||||
## 🤝 Code of Conduct
|
||||
|
||||
By participating in this project, you agree to abide by our Code of Conduct. Please report unacceptable behavior to the project maintainers.
|
||||
|
||||
## 🚀 How to Contribute
|
||||
|
||||
### Reporting Bugs
|
||||
|
||||
1. **Check existing issues** first to see if the bug has already been reported
|
||||
2. **Create a new issue** with a clear title and description
|
||||
3. **Include reproduction steps** with specific details
|
||||
4. **Provide system information** (Elixir version, OS, etc.)
|
||||
5. **Add relevant logs** or error messages
|
||||
|
||||
### Suggesting Features
|
||||
|
||||
1. **Check existing feature requests** to avoid duplicates
|
||||
2. **Create a new issue** with the `enhancement` label
|
||||
3. **Describe the feature** and its use case clearly
|
||||
4. **Explain why** this feature would be beneficial
|
||||
5. **Provide examples** of how it would be used
|
||||
|
||||
### Development Setup
|
||||
|
||||
1. **Fork the repository** on GitHub
|
||||
2. **Clone your fork** locally:
|
||||
```bash
|
||||
git clone https://github.com/your-username/agent_coordinator.git
|
||||
cd agent_coordinator
|
||||
```
|
||||
3. **Install dependencies**:
|
||||
```bash
|
||||
mix deps.get
|
||||
```
|
||||
4. **Start NATS server**:
|
||||
```bash
|
||||
nats-server -js -p 4222 -m 8222
|
||||
```
|
||||
5. **Run tests** to ensure everything works:
|
||||
```bash
|
||||
mix test
|
||||
```
|
||||
|
||||
### Making Changes
|
||||
|
||||
1. **Create a feature branch**:
|
||||
```bash
|
||||
git checkout -b feature/your-feature-name
|
||||
```
|
||||
2. **Make your changes** following our coding standards
|
||||
3. **Add tests** for new functionality
|
||||
4. **Run the test suite**:
|
||||
```bash
|
||||
mix test
|
||||
```
|
||||
5. **Run code quality checks**:
|
||||
```bash
|
||||
mix format
|
||||
mix credo
|
||||
mix dialyzer
|
||||
```
|
||||
6. **Commit your changes** with a descriptive message:
|
||||
```bash
|
||||
git commit -m "Add feature: your feature description"
|
||||
```
|
||||
7. **Push to your fork**:
|
||||
```bash
|
||||
git push origin feature/your-feature-name
|
||||
```
|
||||
8. **Create a Pull Request** on GitHub
|
||||
|
||||
## 📝 Coding Standards
|
||||
|
||||
### Elixir Style Guide
|
||||
|
||||
- Follow the [Elixir Style Guide](https://github.com/christopheradams/elixir_style_guide)
|
||||
- Use `mix format` to format your code
|
||||
- Write clear, descriptive function and variable names
|
||||
- Add `@doc` and `@spec` for public functions
|
||||
- Follow the existing code patterns in the project
|
||||
|
||||
### Code Organization
|
||||
|
||||
- Keep modules focused and cohesive
|
||||
- Use appropriate GenServer patterns for stateful processes
|
||||
- Follow OTP principles and supervision tree design
|
||||
- Organize code into logical namespaces
|
||||
|
||||
### Testing
|
||||
|
||||
- Write comprehensive tests for all new functionality
|
||||
- Use descriptive test names that explain what is being tested
|
||||
- Follow the existing test patterns and structure
|
||||
- Ensure tests are fast and reliable
|
||||
- Aim for good test coverage (check with `mix test --cover`)
|
||||
|
||||
### Documentation
|
||||
|
||||
- Update documentation for any API changes
|
||||
- Add examples for new features
|
||||
- Keep the README.md up to date
|
||||
- Use clear, concise language
|
||||
- Include code examples where helpful
|
||||
|
||||
## 🔧 Pull Request Guidelines
|
||||
|
||||
### Before Submitting
|
||||
|
||||
- [ ] Tests pass locally (`mix test`)
|
||||
- [ ] Code is properly formatted (`mix format`)
|
||||
- [ ] No linting errors (`mix credo`)
|
||||
- [ ] Type checks pass (`mix dialyzer`)
|
||||
- [ ] Documentation is updated
|
||||
- [ ] CHANGELOG.md is updated (if applicable)
|
||||
|
||||
### Pull Request Description
|
||||
|
||||
Please include:
|
||||
|
||||
1. **Clear title** describing the change
|
||||
2. **Description** of what the PR does
|
||||
3. **Issue reference** if applicable (fixes #123)
|
||||
4. **Testing instructions** for reviewers
|
||||
5. **Breaking changes** if any
|
||||
6. **Screenshots** if UI changes are involved
|
||||
|
||||
### Review Process
|
||||
|
||||
1. At least one maintainer will review your PR
|
||||
2. Address any feedback or requested changes
|
||||
3. Once approved, a maintainer will merge your PR
|
||||
4. Your contribution will be credited in the release notes
|
||||
|
||||
## 🧪 Testing
|
||||
|
||||
### Running Tests
|
||||
|
||||
```bash
|
||||
# Run all tests
|
||||
mix test
|
||||
|
||||
# Run tests with coverage
|
||||
mix test --cover
|
||||
|
||||
# Run specific test file
|
||||
mix test test/agent_coordinator/mcp_server_test.exs
|
||||
|
||||
# Run tests in watch mode
|
||||
mix test.watch
|
||||
```
|
||||
|
||||
### Writing Tests
|
||||
|
||||
- Place test files in the `test/` directory
|
||||
- Mirror the structure of the `lib/` directory
|
||||
- Use descriptive `describe` blocks to group related tests
|
||||
- Use `setup` blocks for common test setup
|
||||
- Mock external dependencies appropriately
|
||||
|
||||
## 🚀 Release Process
|
||||
|
||||
1. Update version in `mix.exs`
|
||||
2. Update `CHANGELOG.md` with new version details
|
||||
3. Create and push a version tag
|
||||
4. Create a GitHub release
|
||||
5. Publish to Hex (maintainers only)
|
||||
|
||||
## 📞 Getting Help
|
||||
|
||||
- **GitHub Issues**: For bugs and feature requests
|
||||
- **GitHub Discussions**: For questions and general discussion
|
||||
- **Documentation**: Check the [online docs](https://hexdocs.pm/agent_coordinator)
|
||||
|
||||
## 🏷️ Issue Labels
|
||||
|
||||
- `bug`: Something isn't working
|
||||
- `enhancement`: New feature or request
|
||||
- `documentation`: Improvements or additions to documentation
|
||||
- `good first issue`: Good for newcomers
|
||||
- `help wanted`: Extra attention is needed
|
||||
- `question`: Further information is requested
|
||||
|
||||
## 🎉 Recognition
|
||||
|
||||
Contributors will be:
|
||||
|
||||
- Listed in the project's contributors section
|
||||
- Mentioned in release notes for significant contributions
|
||||
- Given credit in any related blog posts or presentations
|
||||
|
||||
Thank you for contributing to AgentCoordinator! 🚀
|
||||
@@ -1,107 +0,0 @@
|
||||
# Dynamic Tool Discovery Implementation Summary
|
||||
|
||||
## What We Accomplished
|
||||
|
||||
The Agent Coordinator has been successfully refactored to implement **fully dynamic tool discovery** following the MCP protocol specification, eliminating all hardcoded tool lists **and ensuring shared MCP server instances across all agents**.
|
||||
|
||||
## Key Changes Made
|
||||
|
||||
### 1. Removed Hardcoded Tool Lists
|
||||
**Before**:
|
||||
```elixir
|
||||
coordinator_native = ~w[register_agent create_task get_next_task complete_task get_task_board heartbeat]
|
||||
```
|
||||
|
||||
**After**:
|
||||
```elixir
|
||||
# Tools discovered dynamically by checking actual tool definitions
|
||||
coordinator_tools = get_coordinator_tools()
|
||||
if Enum.any?(coordinator_tools, fn tool -> tool["name"] == tool_name end) do
|
||||
{:coordinator, tool_name}
|
||||
end
|
||||
```
|
||||
|
||||
### 2. Made VS Code Tools Conditional
|
||||
**Before**: Always included VS Code tools even if not available
|
||||
|
||||
**After**:
|
||||
```elixir
|
||||
vscode_tools = try do
|
||||
if Code.ensure_loaded?(AgentCoordinator.VSCodeToolProvider) do
|
||||
AgentCoordinator.VSCodeToolProvider.get_tools()
|
||||
else
|
||||
[]
|
||||
end
|
||||
rescue
|
||||
_ -> []
|
||||
end
|
||||
```
|
||||
|
||||
### 3. Added Shared MCP Server Management
|
||||
**MAJOR FIX**: MCPServerManager is now part of the application supervision tree
|
||||
|
||||
**Before**: Each agent/test started its own MCP servers
|
||||
- Multiple server instances for the same functionality
|
||||
- Resource waste and potential conflicts
|
||||
- Different OS PIDs per agent
|
||||
|
||||
**After**: Single shared MCP server instance
|
||||
- Added to `application.ex` supervision tree
|
||||
- All agents use the same MCP server processes
|
||||
- Perfect resource sharing
|
||||
|
||||
### 4. Added Dynamic Tool Refresh
|
||||
**New function**: `refresh_tools/0`
|
||||
- Re-discovers tools from all running MCP servers
|
||||
- Updates tool registry in real-time
|
||||
- Handles both PID and Port server types properly
|
||||
|
||||
### 5. Enhanced Tool Routing
|
||||
**Before**: Used hardcoded tool name lists for routing decisions
|
||||
|
||||
**After**: Checks actual tool definitions to determine routing## Test Results
|
||||
|
||||
✅ All tests passing with dynamic discovery:
|
||||
```
|
||||
Found 44 total tools:
|
||||
• Coordinator tools: 6
|
||||
• External MCP tools: 26+ (context7, filesystem, memory, sequential thinking)
|
||||
• VS Code tools: 12 (when available)
|
||||
```
|
||||
|
||||
**External servers discovered**:
|
||||
- Context7: 2 tools (resolve-library-id, get-library-docs)
|
||||
- Filesystem: 14 tools (read_file, write_file, edit_file, etc.)
|
||||
- Memory: 9 tools (search_nodes, create_entities, etc.)
|
||||
- Sequential Thinking: 1 tool (sequentialthinking)
|
||||
|
||||
## Benefits Achieved
|
||||
|
||||
1. **Perfect MCP Protocol Compliance**: No hardcoded assumptions, everything discovered via `tools/list`
|
||||
2. **Shared Server Architecture**: Single MCP server instance shared by all agents (massive resource savings)
|
||||
3. **Flexibility**: New MCP servers can be added via configuration without code changes
|
||||
4. **Reliability**: Tools automatically re-discovered when servers restart
|
||||
5. **Performance**: Only available tools included in routing decisions + shared server processes
|
||||
6. **Maintainability**: No need to manually sync tool lists with server implementations
|
||||
7. **Resource Efficiency**: No duplicate server processes per agent/session
|
||||
8. **Debugging**: Clear visibility into which tools are available from which servers
|
||||
|
||||
## Files Modified
|
||||
|
||||
1. **`lib/agent_coordinator/mcp_server_manager.ex`**:
|
||||
- Removed `get_coordinator_tool_names/0` function
|
||||
- Modified `find_tool_server/2` to use dynamic discovery
|
||||
- Added conditional VS Code tool loading
|
||||
- Added `refresh_tools/0` and `rediscover_all_tools/1`
|
||||
- Fixed Port vs PID handling for server aliveness checks
|
||||
|
||||
2. **Tests**:
|
||||
- Added `test/dynamic_tool_discovery_test.exs`
|
||||
- All existing tests still pass
|
||||
- New tests verify dynamic discovery works correctly
|
||||
|
||||
## Impact
|
||||
|
||||
This refactoring makes the Agent Coordinator a true MCP-compliant aggregation server that follows the protocol specification exactly, rather than making assumptions about what tools servers provide. It's now much more flexible and maintainable while being more reliable in dynamic environments where servers may come and go.
|
||||
|
||||
The system now perfectly implements the user's original request: **"all tools will reply with what tools are available"** via the MCP protocol's `tools/list` method.
|
||||
48
Dockerfile
Normal file
48
Dockerfile
Normal file
@@ -0,0 +1,48 @@
|
||||
# Agent Coordinator - Multi-stage Docker Build
|
||||
# Creates a production-ready container for the MCP server without requiring local Elixir/OTP installation
|
||||
|
||||
# Build stage - Use official Elixir image with OTP
|
||||
FROM elixir:1.18 AS builder
|
||||
|
||||
|
||||
# Set environment variables
|
||||
RUN apt-get update && apt-get install -y \
|
||||
git \
|
||||
curl \
|
||||
bash \
|
||||
unzip \
|
||||
zlib1g
|
||||
|
||||
# Set build environment
|
||||
ENV MIX_ENV=prod
|
||||
|
||||
# Create app directory
|
||||
WORKDIR /app
|
||||
|
||||
# Copy mix files
|
||||
COPY lib lib
|
||||
COPY mcp_servers.json \
|
||||
mix.exs \
|
||||
mix.lock \
|
||||
docker-entrypoint.sh ./
|
||||
COPY scripts ./scripts/
|
||||
|
||||
|
||||
# Install mix dependencies
|
||||
RUN mix deps.get
|
||||
RUN mix deps.compile
|
||||
RUN mix compile
|
||||
RUN mix release
|
||||
RUN chmod +x ./docker-entrypoint.sh ./scripts/mcp_launcher.sh
|
||||
RUN curl -fsSL https://bun.sh/install | bash
|
||||
RUN ln -s /root/.bun/bin/* /usr/local/bin/
|
||||
|
||||
ENV NATS_HOST=localhost
|
||||
ENV NATS_PORT=4222
|
||||
ENV SHELL=/bin/bash
|
||||
|
||||
EXPOSE 4000
|
||||
|
||||
ENTRYPOINT ["/app/docker-entrypoint.sh"]
|
||||
|
||||
CMD ["/app/scripts/mcp_launcher.sh"]
|
||||
21
LICENSE
21
LICENSE
@@ -1,21 +0,0 @@
|
||||
MIT License
|
||||
|
||||
Copyright (c) 2025 AgentCoordinator Team
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
443
README.md
443
README.md
@@ -1,282 +1,275 @@
|
||||
# AgentCoordinator
|
||||
# Agent Coordinator
|
||||
|
||||
[](https://github.com/your-username/agent_coordinator/actions)
|
||||
[](https://coveralls.io/github/your-username/agent_coordinator?branch=main)
|
||||
[](https://hex.pm/packages/agent_coordinator)
|
||||
Agent Coordinator is a MCP proxy server that enables multiple AI agents to collaborate seamlessly without conflicts. It acts as a single MCP interface that proxies ALL tool calls through itself, ensuring every agent maintains full project awareness while the coordinator tracks real-time agent presence.
|
||||
|
||||
A distributed task coordination system for AI agents built with Elixir and NATS.
|
||||
## What is Agent Coordinator?
|
||||
|
||||
## 🚀 Overview
|
||||
**The coordinator operates as a transparent proxy layer:**
|
||||
|
||||
AgentCoordinator enables multiple AI agents (Claude Code, GitHub Copilot, etc.) to work collaboratively on the same codebase without conflicts. It provides:
|
||||
- **Single Interface**: All agents connect to one MCP server (the coordinator)
|
||||
- **Proxy Architecture**: Every tool call flows through the coordinator to external MCP servers
|
||||
- **Presence Tracking**: Each proxied tool call updates agent heartbeat and task status
|
||||
- **Project Awareness**: All agents see the same unified view of project state through the proxy
|
||||
|
||||
- **🎯 Distributed Task Management**: Centralized task queue with agent-specific inboxes
|
||||
- **🔒 Conflict Resolution**: File-level locking prevents agents from working on the same files
|
||||
- **⚡ Real-time Communication**: NATS messaging for instant coordination
|
||||
- **💾 Persistent Storage**: Event sourcing with configurable retention policies
|
||||
- **🔌 MCP Integration**: Model Context Protocol server for agent communication
|
||||
- **🛡️ Fault Tolerance**: Elixir supervision trees ensure system resilience
|
||||
**This proxy design orchestrates four core components:**
|
||||
|
||||
## 🏗️ Architecture
|
||||
- **Task Registry**: Intelligent task queuing, agent matching, and automatic progress tracking
|
||||
- **Agent Manager**: Agent registration, heartbeat monitoring, and capability-based assignment
|
||||
- **Codebase Registry**: Cross-repository coordination, dependency management, and workspace organization
|
||||
- **Unified Tool Registry**: Seamlessly proxies external MCP tools while adding coordination capabilities
|
||||
|
||||
```
|
||||
┌─────────────────┐ ┌──────────────────┐ ┌─────────────────┐
|
||||
│ AI Agent 1 │ │ AI Agent 2 │ │ AI Agent N │
|
||||
│ (Claude Code) │ │ (Copilot) │ │ ... │
|
||||
└─────────┬───────┘ └─────────┬────────┘ └─────────┬───────┘
|
||||
│ │ │
|
||||
└──────────────────────┼───────────────────────┘
|
||||
│
|
||||
┌─────────────┴──────────────┐
|
||||
│ MCP Server Interface │
|
||||
└─────────────┬──────────────┘
|
||||
│
|
||||
┌─────────────┴──────────────┐
|
||||
│ AgentCoordinator │
|
||||
│ │
|
||||
│ ┌──────────────────────┐ │
|
||||
│ │ Task Registry │ │
|
||||
│ │ ┌──────────────┐ │ │
|
||||
│ │ │ Agent Inbox │ │ │
|
||||
│ │ │ Agent Inbox │ │ │
|
||||
│ │ │ Agent Inbox │ │ │
|
||||
│ │ └──────────────┘ │ │
|
||||
│ └──────────────────────┘ │
|
||||
│ │
|
||||
│ ┌──────────────────────┐ │
|
||||
│ │ NATS Messaging │ │
|
||||
│ └──────────────────────┘ │
|
||||
│ │
|
||||
│ ┌──────────────────────┐ │
|
||||
│ │ Persistence │ │
|
||||
│ │ (JetStream) │ │
|
||||
│ └──────────────────────┘ │
|
||||
└────────────────────────────┘
|
||||
```
|
||||
## Overview
|
||||
<!--  Let's not show this it's confusing -->
|
||||
### 🏗️ Architecture Components
|
||||
|
||||
## 📋 Prerequisites
|
||||
**Core Coordinator Components:**
|
||||
|
||||
- **Elixir**: 1.16+
|
||||
- **Erlang/OTP**: 26+
|
||||
- **NATS Server**: With JetStream enabled
|
||||
- Task Registry: Intelligent task queuing, agent matching, and progress tracking
|
||||
- Agent Manager: Registration, heartbeat monitoring, and capability-based assignment
|
||||
Codebase Registry: Cross-repository coordination and workspace management
|
||||
- Unified Tool Registry: Combines native coordination tools with external MCP tools
|
||||
- Every tool call automatically updates the agent's activity for other agent's to see
|
||||
|
||||
## ⚡ Quick Start
|
||||
**External Integration:**
|
||||
|
||||
### 1. Clone and Setup
|
||||
- VS Code Integration: Direct editor commands and workspace management
|
||||
|
||||
```bash
|
||||
git clone https://github.com/your-username/agent_coordinator.git
|
||||
cd agent_coordinator
|
||||
mix deps.get
|
||||
```
|
||||
### External Server Management
|
||||
|
||||
### 2. Start NATS Server
|
||||
|
||||
```bash
|
||||
# Using Docker (recommended)
|
||||
docker run -p 4222:4222 -p 8222:8222 nats:latest -js
|
||||
|
||||
# Or install locally and run
|
||||
nats-server -js -p 4222 -m 8222
|
||||
```
|
||||
|
||||
### 3. Run the Application
|
||||
|
||||
```bash
|
||||
# Start in development mode
|
||||
iex -S mix
|
||||
|
||||
# Or use the provided setup script
|
||||
./scripts/setup.sh
|
||||
```
|
||||
|
||||
### 4. Test the MCP Server
|
||||
|
||||
```bash
|
||||
# Run example demo
|
||||
mix run examples/demo_mcp_server.exs
|
||||
|
||||
# Or test with Python client
|
||||
python3 examples/mcp_client_example.py
|
||||
```
|
||||
|
||||
## 🔧 Configuration
|
||||
|
||||
### Environment Variables
|
||||
|
||||
```bash
|
||||
export NATS_HOST=localhost
|
||||
export NATS_PORT=4222
|
||||
export MIX_ENV=dev
|
||||
```
|
||||
|
||||
### VS Code Integration
|
||||
|
||||
Run the setup script to configure VS Code automatically:
|
||||
|
||||
```bash
|
||||
./scripts/setup.sh
|
||||
```
|
||||
|
||||
Or manually configure your VS Code `settings.json`:
|
||||
The coordinator automatically manages external MCP servers based on configuration in `mcp_servers.json`:
|
||||
|
||||
```json
|
||||
{
|
||||
"github.copilot.advanced": {
|
||||
"mcp": {
|
||||
"servers": {
|
||||
"agent-coordinator": {
|
||||
"command": "/path/to/agent_coordinator/scripts/mcp_launcher.sh",
|
||||
"args": [],
|
||||
"env": {
|
||||
"MIX_ENV": "dev",
|
||||
"NATS_HOST": "localhost",
|
||||
"NATS_PORT": "4222"
|
||||
}
|
||||
}
|
||||
}
|
||||
"servers": {
|
||||
"mcp_filesystem": {
|
||||
"type": "stdio",
|
||||
"command": "bunx",
|
||||
"args": ["-y", "@modelcontextprotocol/server-filesystem", "/workspace"],
|
||||
"auto_restart": true,
|
||||
"description": "Filesystem operations server"
|
||||
},
|
||||
"mcp_memory": {
|
||||
"type": "stdio",
|
||||
"command": "bunx",
|
||||
"args": ["-y", "@modelcontextprotocol/server-memory"],
|
||||
"auto_restart": true,
|
||||
"description": "Memory and knowledge graph server"
|
||||
}
|
||||
},
|
||||
"config": {
|
||||
"startup_timeout": 30000,
|
||||
"heartbeat_interval": 10000,
|
||||
"auto_restart_delay": 1000,
|
||||
"max_restart_attempts": 3
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## 🎮 Usage
|
||||
## Setup
|
||||
|
||||
### Command Line Interface
|
||||
Choose one of these installation methods:
|
||||
|
||||
```bash
|
||||
# Register an agent
|
||||
mix run -e "AgentCoordinator.CLI.main([\"register\", \"CodeBot\", \"coding\", \"testing\"])"
|
||||
<details>
|
||||
<summary>Docker</summary>
|
||||
|
||||
# Create a task
|
||||
mix run -e "AgentCoordinator.CLI.main([\"create-task\", \"Fix login bug\", \"User login fails\", \"priority=high\"])"
|
||||
### 1. Start NATS Server
|
||||
|
||||
# View task board
|
||||
mix run -e "AgentCoordinator.CLI.main([\"board\"])"
|
||||
```
|
||||
First, start a NATS server that the Agent Coordinator can connect to:
|
||||
|
||||
### MCP Integration
|
||||
```bash
|
||||
# Start NATS server with persistent storage
|
||||
docker run -d \
|
||||
--name nats-server \
|
||||
--network agent-coordinator-net \
|
||||
-p 4222:4222 \
|
||||
-p 8222:8222 \
|
||||
-v nats_data:/data \
|
||||
nats:2.10-alpine \
|
||||
--jetstream \
|
||||
--store_dir=/data \
|
||||
--max_mem_store=1Gb \
|
||||
--max_file_store=10Gb
|
||||
|
||||
Available MCP tools for agents:
|
||||
# Create the network first if it doesn't exist
|
||||
docker network create agent-coordinator-net
|
||||
```
|
||||
|
||||
- `register_agent` - Register a new agent with capabilities
|
||||
- `create_task` - Create a new task with priority and requirements
|
||||
- `get_next_task` - Get the next available task for an agent
|
||||
- `complete_task` - Mark the current task as completed
|
||||
- `get_task_board` - View all agents and their current status
|
||||
- `heartbeat` - Send agent heartbeat to maintain active status
|
||||
### 2. Configure Your AI Tools
|
||||
|
||||
### API Example
|
||||
**For STDIO Mode (Recommended - Direct MCP Integration):**
|
||||
|
||||
```elixir
|
||||
# Register an agent
|
||||
{:ok, agent_id} = AgentCoordinator.register_agent("MyAgent", ["coding", "testing"])
|
||||
First, create a Docker network and start the NATS server:
|
||||
|
||||
# Create a task
|
||||
{:ok, task_id} = AgentCoordinator.create_task(
|
||||
"Implement user authentication",
|
||||
"Add JWT-based authentication to the API",
|
||||
priority: :high,
|
||||
required_capabilities: ["coding", "security"]
|
||||
)
|
||||
```bash
|
||||
# Create network for secure communication
|
||||
docker network create agent-coordinator-net
|
||||
|
||||
# Get next task for agent
|
||||
{:ok, task} = AgentCoordinator.get_next_task(agent_id)
|
||||
# Start NATS server
|
||||
docker run -d \
|
||||
--name nats-server \
|
||||
--network agent-coordinator-net \
|
||||
-p 4222:4222 \
|
||||
-v nats_data:/data \
|
||||
nats:2.10-alpine \
|
||||
--jetstream \
|
||||
--store_dir=/data \
|
||||
--max_mem_store=1Gb \
|
||||
--max_file_store=10Gb
|
||||
```
|
||||
|
||||
# Complete the task
|
||||
:ok = AgentCoordinator.complete_task(agent_id, "Authentication implemented successfully")
|
||||
```
|
||||
Then add this configuration to your VS Code `mcp.json` configuration file inside of your workspace's `./.vscode/mcp.json`:
|
||||
|
||||
## 🧪 Development
|
||||
```json
|
||||
{
|
||||
"servers": {
|
||||
"agent-coordinator": {
|
||||
"command": "docker",
|
||||
"args": [
|
||||
"run",
|
||||
"--network=agent-coordinator-net",
|
||||
"-v=./mcp_servers.json:/app/mcp_servers.json:ro",
|
||||
"-v=/path/to/your/workspace:/workspace:rw",
|
||||
"-e=NATS_HOST=nats-server",
|
||||
"-e=NATS_PORT=4222",
|
||||
"-i",
|
||||
"--rm",
|
||||
"ghcr.io/rooba/agentcoordinator:latest"
|
||||
],
|
||||
"type": "stdio"
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Running Tests
|
||||
**Important Notes for File System Access:**
|
||||
|
||||
```bash
|
||||
# Run all tests
|
||||
mix test
|
||||
If you're using MCP filesystem servers, mount the directories they need access to:
|
||||
|
||||
# Run with coverage
|
||||
mix test --cover
|
||||
```json
|
||||
{
|
||||
"args": [
|
||||
"run",
|
||||
"--network=agent-coordinator-net",
|
||||
"-v=./mcp_servers.json:/app/mcp_servers.json:ro",
|
||||
"-v=/home/user/projects:/home/user/projects:rw",
|
||||
"-v=/path/to/workspace:/workspace:rw",
|
||||
"-e=NATS_HOST=nats-server",
|
||||
"-e=NATS_PORT=4222",
|
||||
"-i",
|
||||
"--rm",
|
||||
"ghcr.io/rooba/agentcoordinator:latest"
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
# Run specific test file
|
||||
mix test test/agent_coordinator/mcp_server_test.exs
|
||||
```
|
||||
**For HTTP/WebSocket Mode (Alternative - Web API Access):**
|
||||
|
||||
### Code Quality
|
||||
If you prefer to run as a web service instead of stdio:
|
||||
|
||||
```bash
|
||||
# Format code
|
||||
mix format
|
||||
```bash
|
||||
# Create network first
|
||||
docker network create agent-coordinator-net
|
||||
|
||||
# Run static analysis
|
||||
mix credo
|
||||
# Start NATS server
|
||||
docker run -d \
|
||||
--name nats-server \
|
||||
--network agent-coordinator-net \
|
||||
-p 4222:4222 \
|
||||
-v nats_data:/data \
|
||||
nats:2.10-alpine \
|
||||
--jetstream \
|
||||
--store_dir=/data \
|
||||
--max_mem_store=1Gb \
|
||||
--max_file_store=10Gb
|
||||
|
||||
# Run Dialyzer for type checking
|
||||
mix dialyzer
|
||||
```
|
||||
# Run Agent Coordinator in HTTP mode
|
||||
docker run -d \
|
||||
--name agent-coordinator \
|
||||
--network agent-coordinator-net \
|
||||
-p 8080:4000 \
|
||||
-v ./mcp_servers.json:/app/mcp_servers.json:ro \
|
||||
-v /path/to/workspace:/workspace:rw \
|
||||
-e NATS_HOST=nats-server \
|
||||
-e NATS_PORT=4222 \
|
||||
-e MCP_INTERFACE_MODE=http \
|
||||
-e MCP_HTTP_PORT=4000 \
|
||||
ghcr.io/rooba/agentcoordinator:latest
|
||||
```
|
||||
|
||||
### Available Scripts
|
||||
Then access via HTTP API at `http://localhost:8080/mcp` or configure your MCP client to use the HTTP endpoint.
|
||||
|
||||
- `scripts/setup.sh` - Complete environment setup
|
||||
- `scripts/mcp_launcher.sh` - Start MCP server
|
||||
- `scripts/minimal_test.sh` - Quick functionality test
|
||||
- `scripts/quick_test.sh` - Comprehensive test suite
|
||||
Create or edit `mcp_servers.json` in your project directory to configure external MCP servers:
|
||||
|
||||
## 📁 Project Structure
|
||||
```json
|
||||
{
|
||||
"servers": {
|
||||
"mcp_filesystem": {
|
||||
"type": "stdio",
|
||||
"command": "bunx",
|
||||
"args": ["-y", "@modelcontextprotocol/server-filesystem", "/workspace"],
|
||||
"auto_restart": true
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
```
|
||||
agent_coordinator/
|
||||
├── lib/ # Application source code
|
||||
│ ├── agent_coordinator.ex
|
||||
│ └── agent_coordinator/
|
||||
│ ├── agent.ex
|
||||
│ ├── application.ex
|
||||
│ ├── cli.ex
|
||||
│ ├── inbox.ex
|
||||
│ ├── mcp_server.ex
|
||||
│ ├── persistence.ex
|
||||
│ ├── task_registry.ex
|
||||
│ └── task.ex
|
||||
├── test/ # Test files
|
||||
├── examples/ # Example implementations
|
||||
│ ├── demo_mcp_server.exs
|
||||
│ ├── mcp_client_example.py
|
||||
│ └── full_workflow_demo.exs
|
||||
├── scripts/ # Utility scripts
|
||||
│ ├── setup.sh
|
||||
│ ├── mcp_launcher.sh
|
||||
│ └── minimal_test.sh
|
||||
├── mix.exs # Project configuration
|
||||
├── README.md # This file
|
||||
└── CHANGELOG.md # Version history
|
||||
```
|
||||
</details>
|
||||
|
||||
## 🤝 Contributing
|
||||
<details>
|
||||
<summary>Manual Setup</summary>
|
||||
|
||||
1. Fork the repository
|
||||
2. Create your feature branch (`git checkout -b feature/amazing-feature`)
|
||||
3. Commit your changes (`git commit -m 'Add some amazing feature'`)
|
||||
4. Push to the branch (`git push origin feature/amazing-feature`)
|
||||
5. Open a Pull Request
|
||||
### Prerequisites
|
||||
|
||||
Please read [CONTRIBUTING.md](CONTRIBUTING.md) for details on our code of conduct and development process.
|
||||
- **Elixir**: 1.16+ with OTP 26+
|
||||
- **Node.js**: 18+ (for some MCP servers)
|
||||
- **uv**: If using python MCP servers
|
||||
|
||||
## 📄 License
|
||||
### Clone the Repository
|
||||
|
||||
This project is licensed under the MIT License - see the [LICENSE](LICENSE) file for details.
|
||||
It is suggested to install Elixir (and Erlang) via [asdf](https://asdf-vm.com/) for easy version management.
|
||||
|
||||
## 🙏 Acknowledgments
|
||||
NATS can be found at [nats.io](https://github.com/nats-io/nats-server/releases/latest), or via Docker
|
||||
|
||||
- [NATS](https://nats.io/) for providing the messaging infrastructure
|
||||
- [Elixir](https://elixir-lang.org/) community for the excellent ecosystem
|
||||
- [Model Context Protocol](https://modelcontextprotocol.io/) for agent communication standards
|
||||
```bash
|
||||
git clone https://github.com/rooba/agentcoordinator.git
|
||||
cd agentcoordinator
|
||||
mix deps.get
|
||||
mix compile
|
||||
```
|
||||
|
||||
## 📞 Support
|
||||
### Start the MCP Server directly
|
||||
|
||||
- 📖 [Documentation](https://hexdocs.pm/agent_coordinator)
|
||||
- 🐛 [Issue Tracker](https://github.com/your-username/agent_coordinator/issues)
|
||||
- 💬 [Discussions](https://github.com/your-username/agent_coordinator/discussions)
|
||||
```bash
|
||||
# Start the MCP server directly
|
||||
export MCP_INTERFACE_MODE=stdio # or http / websocket
|
||||
# export MCP_HTTP_PORT=4000 # if using http mode
|
||||
|
||||
---
|
||||
./scripts/mcp_launcher.sh
|
||||
|
||||
Made with ❤️ by the AgentCoordinator team
|
||||
# Or in development mode
|
||||
mix run --no-halt
|
||||
```
|
||||
|
||||
### Run via VS Code or similar tools
|
||||
|
||||
Add this to your workspace's `./.vscode/mcp.json` (vscode copilot) or `mcp_servers.json` depending on your tool:
|
||||
|
||||
```json
|
||||
{
|
||||
"servers": {
|
||||
"agent-coordinator": {
|
||||
"command": "/path/to/agent_coordinator/scripts/mcp_launcher.sh",
|
||||
"args": [],
|
||||
"env": {
|
||||
"MIX_ENV": "prod",
|
||||
"NATS_HOST": "localhost",
|
||||
"NATS_PORT": "4222",
|
||||
"MCP_CONFIG_FILE": "/path/to/mcp_servers.json",
|
||||
"PWD": "${workspaceFolder}"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
</details>
|
||||
|
||||
287
README_old.md
287
README_old.md
@@ -1,287 +0,0 @@
|
||||
# AgentCoordinator
|
||||
|
||||
A distributed task coordination system for AI agents built with Elixir and NATS.
|
||||
|
||||
## Overview
|
||||
|
||||
AgentCoordinator is a centralized task management system designed to enable multiple AI agents (Claude Code, GitHub Copilot, etc.) to work collaboratively on the same codebase without conflicts. It provides:
|
||||
|
||||
- **Distributed Task Management**: Centralized task queue with agent-specific inboxes
|
||||
- **Conflict Resolution**: File-level locking prevents agents from working on the same files
|
||||
- **Real-time Communication**: NATS messaging for instant coordination
|
||||
- **Persistent Storage**: Event sourcing with configurable retention policies
|
||||
- **MCP Integration**: Model Context Protocol server for agent communication
|
||||
- **Fault Tolerance**: Elixir supervision trees ensure system resilience
|
||||
|
||||
## Architecture
|
||||
|
||||
```
|
||||
┌─────────────────┐ ┌──────────────────┐ ┌─────────────────┐
|
||||
│ AI Agent 1 │ │ AI Agent 2 │ │ AI Agent N │
|
||||
│ (Claude Code) │ │ (Copilot) │ │ ... │
|
||||
└─────────┬───────┘ └─────────┬────────┘ └─────────┬───────┘
|
||||
│ │ │
|
||||
└──────────────────────┼───────────────────────┘
|
||||
│
|
||||
┌─────────────┴──────────────┐
|
||||
│ MCP Server Interface │
|
||||
└─────────────┬──────────────┘
|
||||
│
|
||||
┌─────────────┴──────────────┐
|
||||
│ AgentCoordinator │
|
||||
│ │
|
||||
│ ┌──────────────────────┐ │
|
||||
│ │ Task Registry │ │
|
||||
│ │ ┌──────────────┐ │ │
|
||||
│ │ │ Agent Inbox │ │ │
|
||||
│ │ │ Agent Inbox │ │ │
|
||||
│ │ │ Agent Inbox │ │ │
|
||||
│ │ └──────────────┘ │ │
|
||||
│ └──────────────────────┘ │
|
||||
│ │
|
||||
│ ┌──────────────────────┐ │
|
||||
│ │ NATS Messaging │ │
|
||||
│ └──────────────────────┘ │
|
||||
│ │
|
||||
│ ┌──────────────────────┐ │
|
||||
│ │ Persistence │ │
|
||||
│ │ (JetStream) │ │
|
||||
│ └──────────────────────┘ │
|
||||
└────────────────────────────┘
|
||||
```
|
||||
|
||||
## Installation
|
||||
|
||||
### Prerequisites
|
||||
|
||||
- Elixir 1.16+ and Erlang/OTP 28+
|
||||
- NATS server (with JetStream enabled)
|
||||
|
||||
### Setup
|
||||
|
||||
1. **Install Dependencies**
|
||||
```bash
|
||||
mix deps.get
|
||||
```
|
||||
|
||||
2. **Start NATS Server**
|
||||
```bash
|
||||
# Using Docker
|
||||
docker run -p 4222:4222 -p 8222:8222 nats:latest -js
|
||||
|
||||
# Or install locally and run
|
||||
nats-server -js
|
||||
```
|
||||
|
||||
3. **Configure Environment**
|
||||
```bash
|
||||
export NATS_HOST=localhost
|
||||
export NATS_PORT=4222
|
||||
```
|
||||
|
||||
4. **Start the Application**
|
||||
```bash
|
||||
iex -S mix
|
||||
```
|
||||
|
||||
## Usage
|
||||
|
||||
### Command Line Interface
|
||||
|
||||
```bash
|
||||
# Register an agent
|
||||
mix run -e "AgentCoordinator.CLI.main([\"register\", \"CodeBot\", \"coding\", \"testing\"])"
|
||||
|
||||
# Create a task
|
||||
mix run -e "AgentCoordinator.CLI.main([\"create-task\", \"Fix login bug\", \"User login fails\", \"priority=high\"])"
|
||||
|
||||
# View task board
|
||||
mix run -e "AgentCoordinator.CLI.main([\"board\"])"
|
||||
```
|
||||
|
||||
### MCP Integration
|
||||
|
||||
Available MCP tools for agents:
|
||||
- `register_agent` - Register a new agent
|
||||
- `create_task` - Create a new task
|
||||
- `get_next_task` - Get next task for agent
|
||||
- `complete_task` - Mark current task complete
|
||||
- `get_task_board` - View all agent statuses
|
||||
- `heartbeat` - Send agent heartbeat
|
||||
|
||||
## Connecting to GitHub Copilot
|
||||
|
||||
### Step 1: Start the MCP Server
|
||||
|
||||
The AgentCoordinator MCP server needs to be running and accessible via stdio. Here's how to set it up:
|
||||
|
||||
1. **Create MCP Server Launcher Script**
|
||||
```bash
|
||||
# Create a launcher script for the MCP server
|
||||
cat > mcp_launcher.sh << 'EOF'
|
||||
#!/bin/bash
|
||||
cd /home/ra/agent_coordinator
|
||||
export MIX_ENV=prod
|
||||
mix run --no-halt -e "
|
||||
# Start the application
|
||||
Application.ensure_all_started(:agent_coordinator)
|
||||
|
||||
# Start MCP stdio interface
|
||||
IO.puts(\"MCP server started...\")
|
||||
|
||||
# Read JSON-RPC messages from stdin and send responses to stdout
|
||||
spawn(fn ->
|
||||
Stream.repeatedly(fn -> IO.read(:stdio, :line) end)
|
||||
|> Stream.take_while(&(&1 != :eof))
|
||||
|> Enum.each(fn line ->
|
||||
case String.trim(line) do
|
||||
\"\" -> :ok
|
||||
json_line ->
|
||||
try do
|
||||
request = Jason.decode!(json_line)
|
||||
response = AgentCoordinator.MCPServer.handle_mcp_request(request)
|
||||
IO.puts(Jason.encode!(response))
|
||||
rescue
|
||||
e ->
|
||||
error_response = %{
|
||||
\"jsonrpc\" => \"2.0\",
|
||||
\"id\" => Map.get(Jason.decode!(json_line), \"id\", null),
|
||||
\"error\" => %{\"code\" => -32603, \"message\" => Exception.message(e)}
|
||||
}
|
||||
IO.puts(Jason.encode!(error_response))
|
||||
end
|
||||
end
|
||||
end)
|
||||
end)
|
||||
|
||||
# Keep process alive
|
||||
Process.sleep(:infinity)
|
||||
"
|
||||
EOF
|
||||
chmod +x mcp_launcher.sh
|
||||
```
|
||||
|
||||
### Step 2: Configure VS Code for MCP
|
||||
|
||||
1. **Install Required Extensions**
|
||||
- Make sure you have the latest GitHub Copilot extension
|
||||
- Install any MCP-related VS Code extensions if available
|
||||
|
||||
2. **Create MCP Configuration**
|
||||
Create or update your VS Code settings to include the MCP server:
|
||||
|
||||
```json
|
||||
// In your VS Code settings.json or workspace settings
|
||||
{
|
||||
"github.copilot.advanced": {
|
||||
"mcp": {
|
||||
"servers": {
|
||||
"agent-coordinator": {
|
||||
"command": "/home/ra/agent_coordinator/mcp_launcher.sh",
|
||||
"args": [],
|
||||
"env": {}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Step 3: Alternative Direct Integration
|
||||
|
||||
If VS Code MCP integration isn't available yet, you can create a VS Code extension to bridge the gap:
|
||||
|
||||
1. **Create Extension Scaffold**
|
||||
```bash
|
||||
mkdir agent-coordinator-extension
|
||||
cd agent-coordinator-extension
|
||||
npm init -y
|
||||
|
||||
# Create package.json for VS Code extension
|
||||
cat > package.json << 'EOF'
|
||||
{
|
||||
"name": "agent-coordinator",
|
||||
"displayName": "Agent Coordinator",
|
||||
"description": "Integration with AgentCoordinator MCP server",
|
||||
"version": "0.1.0",
|
||||
"engines": { "vscode": "^1.74.0" },
|
||||
"categories": ["Other"],
|
||||
"activationEvents": ["*"],
|
||||
"main": "./out/extension.js",
|
||||
"contributes": {
|
||||
"commands": [
|
||||
{
|
||||
"command": "agentCoordinator.registerAgent",
|
||||
"title": "Register as Agent"
|
||||
},
|
||||
{
|
||||
"command": "agentCoordinator.getNextTask",
|
||||
"title": "Get Next Task"
|
||||
},
|
||||
{
|
||||
"command": "agentCoordinator.viewTaskBoard",
|
||||
"title": "View Task Board"
|
||||
}
|
||||
]
|
||||
},
|
||||
"devDependencies": {
|
||||
"@types/vscode": "^1.74.0",
|
||||
"typescript": "^4.9.0"
|
||||
}
|
||||
}
|
||||
EOF
|
||||
```
|
||||
|
||||
### Step 4: Direct Command Line Usage
|
||||
|
||||
For immediate use, you can interact with the MCP server directly:
|
||||
|
||||
1. **Start the Server**
|
||||
```bash
|
||||
cd /home/ra/agent_coordinator
|
||||
iex -S mix
|
||||
```
|
||||
|
||||
2. **In another terminal, use the MCP tools**
|
||||
```bash
|
||||
# Test MCP server directly
|
||||
cd /home/ra/agent_coordinator
|
||||
mix run demo_mcp_server.exs
|
||||
```
|
||||
|
||||
### Step 5: Production Deployment
|
||||
|
||||
1. **Create Systemd Service for MCP Server**
|
||||
```bash
|
||||
sudo tee /etc/systemd/system/agent-coordinator-mcp.service > /dev/null << EOF
|
||||
[Unit]
|
||||
Description=Agent Coordinator MCP Server
|
||||
After=network.target nats.service
|
||||
Requires=nats.service
|
||||
|
||||
[Service]
|
||||
Type=simple
|
||||
User=ra
|
||||
WorkingDirectory=/home/ra/agent_coordinator
|
||||
Environment=MIX_ENV=prod
|
||||
Environment=NATS_HOST=localhost
|
||||
Environment=NATS_PORT=4222
|
||||
ExecStart=/usr/bin/mix run --no-halt
|
||||
Restart=always
|
||||
RestartSec=5
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
EOF
|
||||
|
||||
sudo systemctl daemon-reload
|
||||
sudo systemctl enable agent-coordinator-mcp
|
||||
sudo systemctl start agent-coordinator-mcp
|
||||
```
|
||||
|
||||
2. **Check Status**
|
||||
```bash
|
||||
sudo systemctl status agent-coordinator-mcp
|
||||
sudo journalctl -fu agent-coordinator-mcp
|
||||
```
|
||||
|
||||
@@ -1,441 +0,0 @@
|
||||
# VS Code Tool Integration with Agent Coordinator
|
||||
|
||||
## 🎉 Latest Update: Dynamic Tool Discovery (COMPLETED)
|
||||
|
||||
**Date**: August 23, 2025
|
||||
**Status**: ✅ **COMPLETED** - Full dynamic tool discovery implementation
|
||||
|
||||
### What Changed
|
||||
The Agent Coordinator has been refactored to eliminate all hardcoded tool lists and implement **fully dynamic tool discovery** following the MCP protocol specification.
|
||||
|
||||
**Key Improvements**:
|
||||
- ✅ **No hardcoded tools**: All external server tools discovered via MCP `tools/list`
|
||||
- ✅ **Conditional VS Code tools**: Only included when VS Code functionality is available
|
||||
- ✅ **Real-time refresh**: `refresh_tools()` function to rediscover tools on demand
|
||||
- ✅ **Perfect MCP compliance**: Follows protocol specification exactly
|
||||
- ✅ **Better error handling**: Proper handling of both PIDs and Ports for server monitoring
|
||||
|
||||
**Example Tool Discovery Results**:
|
||||
```
|
||||
Found 44 total tools:
|
||||
• Coordinator tools: 6 (register_agent, create_task, etc.)
|
||||
• External MCP tools: 26+ (context7, filesystem, memory, sequential thinking)
|
||||
• VS Code tools: 12 (when available)
|
||||
```
|
||||
|
||||
### Benefits
|
||||
1. **MCP Protocol Compliance**: Perfect adherence to MCP specification
|
||||
2. **Flexibility**: New MCP servers can be added without code changes
|
||||
3. **Reliability**: Tools automatically discovered when servers restart
|
||||
4. **Performance**: Only available tools are included in routing
|
||||
5. **Debugging**: Clear visibility into which tools are available
|
||||
|
||||
---
|
||||
|
||||
## Overview
|
||||
|
||||
This document outlines the implementation of VS Code's built-in tools as MCP (Model Context Protocol) tools within the Agent Coordinator system. This integration allows agents to access VS Code's native capabilities alongside external MCP servers through a unified coordination interface.
|
||||
|
||||
## Architecture
|
||||
|
||||
### Current State
|
||||
- Agent Coordinator acts as a unified MCP server
|
||||
- Proxies tools from external MCP servers (Context7, filesystem, memory, sequential thinking, etc.)
|
||||
- Manages task coordination, agent assignment, and cross-codebase workflows
|
||||
|
||||
### Proposed Enhancement
|
||||
- Add VS Code Extension API tools as native MCP tools
|
||||
- Integrate with existing tool routing and coordination system
|
||||
- Maintain security and permission controls
|
||||
|
||||
## Implementation Plan
|
||||
|
||||
### Phase 1: Core VS Code Tool Provider
|
||||
|
||||
#### 1.1 Create VSCodeToolProvider Module
|
||||
**File**: `lib/agent_coordinator/vscode_tool_provider.ex`
|
||||
|
||||
**Core Tools to Implement**:
|
||||
- `vscode_read_file` - Read file contents using VS Code API
|
||||
- `vscode_write_file` - Write file contents
|
||||
- `vscode_create_file` - Create new files
|
||||
- `vscode_delete_file` - Delete files
|
||||
- `vscode_list_directory` - List directory contents
|
||||
- `vscode_get_workspace_folders` - Get workspace information
|
||||
- `vscode_run_command` - Execute VS Code commands
|
||||
- `vscode_get_active_editor` - Get current editor state
|
||||
- `vscode_set_editor_content` - Modify editor content
|
||||
- `vscode_get_selection` - Get current text selection
|
||||
- `vscode_set_selection` - Set text selection
|
||||
- `vscode_show_message` - Display messages to user
|
||||
|
||||
#### 1.2 Tool Definitions
|
||||
Each tool will have:
|
||||
- MCP-compliant schema definition
|
||||
- Input validation
|
||||
- Error handling
|
||||
- Audit logging
|
||||
- Permission checking
|
||||
|
||||
### Phase 2: Advanced Editor Operations
|
||||
|
||||
#### 2.1 Language Services Integration
|
||||
- `vscode_get_diagnostics` - Get language server diagnostics
|
||||
- `vscode_format_document` - Format current document
|
||||
- `vscode_format_selection` - Format selected text
|
||||
- `vscode_find_references` - Find symbol references
|
||||
- `vscode_go_to_definition` - Navigate to definition
|
||||
- `vscode_rename_symbol` - Rename symbols
|
||||
- `vscode_code_actions` - Get available code actions
|
||||
|
||||
#### 2.2 Search and Navigation
|
||||
- `vscode_find_in_files` - Search across workspace
|
||||
- `vscode_find_symbols` - Find symbols in workspace
|
||||
- `vscode_goto_line` - Navigate to specific line
|
||||
- `vscode_reveal_in_explorer` - Show file in explorer
|
||||
|
||||
### Phase 3: Terminal and Process Management
|
||||
|
||||
#### 3.1 Terminal Operations
|
||||
- `vscode_create_terminal` - Create new terminal
|
||||
- `vscode_send_to_terminal` - Send commands to terminal
|
||||
- `vscode_get_terminal_output` - Get terminal output (if possible)
|
||||
- `vscode_close_terminal` - Close terminal instances
|
||||
|
||||
#### 3.2 Task and Process Management
|
||||
- `vscode_run_task` - Execute VS Code tasks
|
||||
- `vscode_get_tasks` - List available tasks
|
||||
- `vscode_debug_start` - Start debugging session
|
||||
- `vscode_debug_stop` - Stop debugging
|
||||
|
||||
### Phase 4: Git and Version Control
|
||||
|
||||
#### 4.1 Git Operations
|
||||
- `vscode_git_status` - Get git status
|
||||
- `vscode_git_commit` - Create commits
|
||||
- `vscode_git_push` - Push changes
|
||||
- `vscode_git_pull` - Pull changes
|
||||
- `vscode_git_branch` - Branch operations
|
||||
- `vscode_git_diff` - Get file differences
|
||||
|
||||
### Phase 5: Extension and Settings Management
|
||||
|
||||
#### 5.1 Configuration
|
||||
- `vscode_get_settings` - Get VS Code settings
|
||||
- `vscode_update_settings` - Update settings
|
||||
- `vscode_get_extensions` - List installed extensions
|
||||
- `vscode_install_extension` - Install extensions (if permitted)
|
||||
|
||||
## Security and Safety
|
||||
|
||||
### Permission Model
|
||||
```elixir
|
||||
defmodule AgentCoordinator.VSCodePermissions do
|
||||
@moduledoc """
|
||||
Manages permissions for VS Code tool access.
|
||||
"""
|
||||
|
||||
# Permission levels:
|
||||
# :read_only - File reading, workspace inspection
|
||||
# :editor - Text editing, selections
|
||||
# :filesystem - File creation/deletion
|
||||
# :terminal - Terminal access
|
||||
# :git - Version control operations
|
||||
# :admin - Settings, extensions, system commands
|
||||
end
|
||||
```
|
||||
|
||||
### Sandboxing
|
||||
- Restrict file operations to workspace folders only
|
||||
- Prevent access to system files outside workspace
|
||||
- Rate limiting for expensive operations
|
||||
- Command whitelist for `vscode_run_command`
|
||||
|
||||
### Audit Logging
|
||||
- Log all VS Code tool calls with:
|
||||
- Timestamp
|
||||
- Agent ID
|
||||
- Tool name and parameters
|
||||
- Result summary
|
||||
- Permission level used
|
||||
|
||||
## Integration Points
|
||||
|
||||
### 1. UnifiedMCPServer Enhancement
|
||||
**File**: `lib/agent_coordinator/unified_mcp_server.ex`
|
||||
|
||||
Add VS Code tools to the tool discovery and routing:
|
||||
|
||||
```elixir
|
||||
defp get_all_tools(state) do
|
||||
# Existing external MCP server tools
|
||||
external_tools = get_external_tools(state)
|
||||
|
||||
# New VS Code tools
|
||||
vscode_tools = VSCodeToolProvider.get_tools()
|
||||
|
||||
external_tools ++ vscode_tools
|
||||
end
|
||||
|
||||
defp route_tool_call(tool_name, args, context, state) do
|
||||
case tool_name do
|
||||
"vscode_" <> _rest ->
|
||||
VSCodeToolProvider.handle_tool_call(tool_name, args, context)
|
||||
_ ->
|
||||
# Route to external MCP servers
|
||||
route_to_external_server(tool_name, args, context, state)
|
||||
end
|
||||
end
|
||||
```
|
||||
|
||||
### 2. Task Coordination
|
||||
VS Code tools will participate in the same task coordination system:
|
||||
- Task creation and assignment
|
||||
- File locking (prevent conflicts)
|
||||
- Cross-agent coordination
|
||||
- Priority management
|
||||
|
||||
### 3. Agent Capabilities
|
||||
Agents can declare VS Code tool capabilities:
|
||||
```elixir
|
||||
capabilities: [
|
||||
"coding",
|
||||
"analysis",
|
||||
"vscode_editing",
|
||||
"vscode_terminal",
|
||||
"vscode_git"
|
||||
]
|
||||
```
|
||||
|
||||
## Usage Examples
|
||||
|
||||
### Example 1: File Analysis and Editing
|
||||
```json
|
||||
{
|
||||
"tool": "vscode_read_file",
|
||||
"args": {"path": "src/main.rs"}
|
||||
}
|
||||
// Agent reads file, analyzes it
|
||||
|
||||
{
|
||||
"tool": "vscode_get_diagnostics",
|
||||
"args": {"file": "src/main.rs"}
|
||||
}
|
||||
// Agent gets compiler errors
|
||||
|
||||
{
|
||||
"tool": "vscode_set_editor_content",
|
||||
"args": {
|
||||
"file": "src/main.rs",
|
||||
"content": "// Fixed code here",
|
||||
"range": {"start": 10, "end": 15}
|
||||
}
|
||||
}
|
||||
// Agent fixes the issues
|
||||
```
|
||||
|
||||
### Example 2: Cross-Tool Workflow
|
||||
```json
|
||||
// 1. Agent searches documentation using Context7
|
||||
{"tool": "mcp_context7_get-library-docs", "args": {"libraryID": "/rust/std"}}
|
||||
|
||||
// 2. Agent analyzes current code using VS Code
|
||||
{"tool": "vscode_get_active_editor", "args": {}}
|
||||
|
||||
// 3. Agent applies documentation insights to code
|
||||
{"tool": "vscode_format_document", "args": {}}
|
||||
{"tool": "vscode_set_editor_content", "args": {...}}
|
||||
|
||||
// 4. Agent commits changes using VS Code Git
|
||||
{"tool": "vscode_git_commit", "args": {"message": "Applied best practices from docs"}}
|
||||
```
|
||||
|
||||
## Benefits
|
||||
|
||||
1. **Unified Tool Access**: Agents access both external services and VS Code features through same interface
|
||||
2. **Enhanced Capabilities**: Complex workflows combining external data with direct IDE manipulation
|
||||
3. **Consistent Coordination**: Same task management for all tool types
|
||||
4. **Security**: Controlled access to powerful VS Code features
|
||||
5. **Extensibility**: Easy to add new VS Code capabilities as needs arise
|
||||
|
||||
## Implementation Status & Updated Roadmap
|
||||
|
||||
### ✅ **COMPLETED - Phase 1: Core VS Code Tool Provider (August 23, 2025)**
|
||||
|
||||
**Successfully Implemented & Tested:**
|
||||
|
||||
- ✅ VSCodeToolProvider module with 12 core tools
|
||||
- ✅ VSCodePermissions system with 6 permission levels
|
||||
- ✅ Integration with UnifiedMCPServer tool discovery and routing
|
||||
- ✅ Security controls: path sandboxing, command whitelisting, audit logging
|
||||
- ✅ Agent coordination integration (tasks, assignments, coordination)
|
||||
|
||||
**Working Tools:**
|
||||
|
||||
- ✅ File Operations: `vscode_read_file`, `vscode_write_file`, `vscode_create_file`, `vscode_delete_file`, `vscode_list_directory`
|
||||
- ✅ Editor Operations: `vscode_get_active_editor`, `vscode_set_editor_content`, `vscode_get_selection`, `vscode_set_selection`
|
||||
- ✅ Commands: `vscode_run_command`, `vscode_show_message`
|
||||
- ✅ Workspace: `vscode_get_workspace_folders`
|
||||
|
||||
**Key Achievement:** VS Code tools now work seamlessly alongside external MCP servers through unified agent coordination!
|
||||
|
||||
### 🔄 **CURRENT PRIORITY - Phase 1.5: VS Code Extension API Bridge**
|
||||
|
||||
**Status:** Tools currently return placeholder data. Need to implement actual VS Code Extension API calls.
|
||||
|
||||
**Implementation Steps:**
|
||||
|
||||
1. **JavaScript Bridge Module** - Create communication layer between Elixir and VS Code Extension API
|
||||
2. **Real API Integration** - Replace placeholder responses with actual VS Code API calls
|
||||
3. **Error Handling** - Robust error handling for VS Code API failures
|
||||
4. **Testing** - Verify all tools work with real VS Code operations
|
||||
|
||||
**Target Completion:** Next 2-3 days
|
||||
|
||||
### 📅 **UPDATED IMPLEMENTATION TIMELINE**
|
||||
|
||||
#### **Phase 2: Language Services & Advanced Editor Operations (Priority: High)**
|
||||
|
||||
**Target:** Week of August 26, 2025
|
||||
|
||||
**Tools to Implement:**
|
||||
|
||||
- `vscode_get_diagnostics` - Get language server diagnostics
|
||||
- `vscode_format_document` - Format current document
|
||||
- `vscode_format_selection` - Format selected text
|
||||
- `vscode_find_references` - Find symbol references
|
||||
- `vscode_go_to_definition` - Navigate to definition
|
||||
- `vscode_rename_symbol` - Rename symbols across workspace
|
||||
- `vscode_code_actions` - Get available code actions
|
||||
- `vscode_apply_code_action` - Apply specific code action
|
||||
|
||||
**Value:** Enables agents to perform intelligent code analysis and refactoring
|
||||
|
||||
#### **Phase 3: Search, Navigation & Workspace Management (Priority: Medium)**
|
||||
|
||||
**Target:** Week of September 2, 2025
|
||||
|
||||
**Tools to Implement:**
|
||||
|
||||
- `vscode_find_in_files` - Search across workspace with regex support
|
||||
- `vscode_find_symbols` - Find symbols in workspace
|
||||
- `vscode_goto_line` - Navigate to specific line/column
|
||||
- `vscode_reveal_in_explorer` - Show file in explorer
|
||||
- `vscode_open_folder` - Open workspace folder
|
||||
- `vscode_close_folder` - Close workspace folder
|
||||
- `vscode_switch_editor_tab` - Switch between open files
|
||||
|
||||
**Value:** Enables agents to navigate and understand large codebases
|
||||
|
||||
#### **Phase 4: Terminal & Process Management (Priority: Medium)**
|
||||
|
||||
**Target:** Week of September 9, 2025
|
||||
|
||||
**Tools to Implement:**
|
||||
|
||||
- `vscode_create_terminal` - Create new terminal instance
|
||||
- `vscode_send_to_terminal` - Send commands to terminal
|
||||
- `vscode_get_terminal_output` - Get terminal output (if possible via API)
|
||||
- `vscode_close_terminal` - Close terminal instances
|
||||
- `vscode_run_task` - Execute VS Code tasks (build, test, etc.)
|
||||
- `vscode_get_tasks` - List available tasks
|
||||
- `vscode_stop_task` - Stop running task
|
||||
|
||||
**Value:** Enables agents to manage build processes and execute commands
|
||||
|
||||
#### **Phase 5: Git & Version Control Integration (Priority: High)**
|
||||
|
||||
**Target:** Week of September 16, 2025
|
||||
|
||||
**Tools to Implement:**
|
||||
|
||||
- `vscode_git_status` - Get repository status
|
||||
- `vscode_git_commit` - Create commits with messages
|
||||
- `vscode_git_push` - Push changes to remote
|
||||
- `vscode_git_pull` - Pull changes from remote
|
||||
- `vscode_git_branch` - Branch operations (create, switch, delete)
|
||||
- `vscode_git_diff` - Get file differences
|
||||
- `vscode_git_stage` - Stage/unstage files
|
||||
- `vscode_git_blame` - Get blame information
|
||||
|
||||
**Value:** Enables agents to manage version control workflows
|
||||
|
||||
#### **Phase 6: Advanced Features & Extension Management (Priority: Low)**
|
||||
|
||||
**Target:** Week of September 23, 2025
|
||||
|
||||
**Tools to Implement:**
|
||||
|
||||
- `vscode_get_settings` - Get VS Code settings
|
||||
- `vscode_update_settings` - Update settings
|
||||
- `vscode_get_extensions` - List installed extensions
|
||||
- `vscode_install_extension` - Install extensions (if permitted)
|
||||
- `vscode_debug_start` - Start debugging session
|
||||
- `vscode_debug_stop` - Stop debugging
|
||||
- `vscode_set_breakpoint` - Set/remove breakpoints
|
||||
|
||||
**Value:** Complete IDE automation capabilities
|
||||
|
||||
### 🚀 **Key Insights from Phase 1**
|
||||
|
||||
1. **Integration Success**: The MCP tool routing system works perfectly for VS Code tools
|
||||
2. **Permission System**: Granular permissions are essential for security
|
||||
3. **Agent Coordination**: VS Code tools integrate seamlessly with task management
|
||||
4. **Unified Experience**: Agents can now use external services + VS Code through same interface
|
||||
|
||||
### 🎯 **Next Immediate Actions**
|
||||
|
||||
1. **Priority 1**: Implement proper agent identification system for multi-agent scenarios
|
||||
2. **Priority 2**: Implement real VS Code Extension API bridge (replace placeholders)
|
||||
3. **Priority 3**: Add Phase 2 language services tools
|
||||
4. **Priority 4**: Create comprehensive testing suite
|
||||
5. **Priority 5**: Document usage patterns and best practices
|
||||
|
||||
### 🔧 **Critical Enhancement: Multi-Agent Identification System**
|
||||
|
||||
**Problem:** Current system treats all GitHub Copilot instances as the same agent, causing conflicts in multi-agent scenarios.
|
||||
|
||||
**Solution:** Implement unique agent identification with session-based tracking.
|
||||
|
||||
**Implementation Requirements:**
|
||||
|
||||
1. **Agent ID Parameter**: All tools must include an `agent_id` parameter
|
||||
2. **Session-Based Registration**: Each chat session/agent instance gets unique ID
|
||||
3. **Tool Schema Updates**: Add `agent_id` to all VS Code tool schemas
|
||||
4. **Auto-Registration**: System automatically creates unique agents per session
|
||||
5. **Agent Isolation**: Tasks, permissions, and state isolated per agent ID
|
||||
|
||||
**Benefits:**
|
||||
|
||||
- Multiple agents can work simultaneously without conflicts
|
||||
- Individual agent permissions and capabilities
|
||||
- Proper task assignment and coordination
|
||||
- Clear audit trails per agent
|
||||
|
||||
### 📊 **Success Metrics**
|
||||
|
||||
- **Tool Reliability**: >95% success rate for all VS Code tool calls
|
||||
- **Performance**: <500ms average response time for VS Code operations
|
||||
- **Security**: Zero security incidents with workspace sandboxing
|
||||
- **Integration**: All tools work seamlessly with agent coordination system
|
||||
- **Adoption**: Agents can complete full development workflows using only coordinated tools## Testing Strategy
|
||||
|
||||
1. **Unit Tests**: Each VS Code tool function
|
||||
2. **Integration Tests**: Tool coordination and routing
|
||||
3. **Security Tests**: Permission enforcement and sandboxing
|
||||
4. **Performance Tests**: Rate limiting and resource usage
|
||||
5. **User Acceptance**: Real workflow testing with multiple agents
|
||||
|
||||
## Future Enhancements
|
||||
|
||||
- **Extension-specific Tools**: Tools for specific VS Code extensions
|
||||
- **Collaborative Features**: Multi-agent editing coordination
|
||||
- **AI-Enhanced Operations**: Intelligent code suggestions and fixes
|
||||
- **Remote Development**: Support for remote VS Code scenarios
|
||||
- **Custom Tool Creation**: Framework for users to create their own VS Code tools
|
||||
|
||||
---
|
||||
|
||||
## Notes
|
||||
|
||||
This implementation transforms the Agent Coordinator from a simple MCP proxy into a comprehensive development environment orchestrator, enabling sophisticated AI-assisted development workflows.
|
||||
26
docker-compose.dev.yml
Normal file
26
docker-compose.dev.yml
Normal file
@@ -0,0 +1,26 @@
|
||||
version: '3.8'
|
||||
|
||||
# Development override for docker-compose.yml
|
||||
# Run with: docker-compose -f docker-compose.yml -f docker-compose.dev.yml up
|
||||
|
||||
services:
|
||||
agent-coordinator:
|
||||
environment:
|
||||
- MIX_ENV=dev
|
||||
volumes:
|
||||
# Mount source code for development
|
||||
- .:/app/src:ro
|
||||
# Mount config for easy editing
|
||||
- ./mcp_servers.json:/app/mcp_servers.json
|
||||
command: ["bash"]
|
||||
stdin_open: true
|
||||
tty: true
|
||||
profiles:
|
||||
- dev
|
||||
|
||||
nats:
|
||||
command:
|
||||
- '--jetstream'
|
||||
volumes: []
|
||||
profiles:
|
||||
- dev
|
||||
52
docker-compose.yml
Normal file
52
docker-compose.yml
Normal file
@@ -0,0 +1,52 @@
|
||||
version: '3.8'
|
||||
|
||||
services:
|
||||
nats:
|
||||
image: nats:2.10-alpine
|
||||
container_name: agent-coordinator-nats
|
||||
command:
|
||||
- '--jetstream'
|
||||
- '--store_dir=/data'
|
||||
- '--http_port=8222'
|
||||
ports:
|
||||
- "4223:4222"
|
||||
- "8223:8222"
|
||||
- "6223:6222"
|
||||
volumes:
|
||||
- nats_data:/data
|
||||
restart: unless-stopped
|
||||
healthcheck:
|
||||
test: ["CMD", "wget", "--quiet", "--tries=1", "--spider", "http://localhost:8222/healthz"]
|
||||
interval: 10s
|
||||
timeout: 5s
|
||||
retries: 3
|
||||
start_period: 10s
|
||||
networks:
|
||||
- agent-coordinator-network
|
||||
|
||||
agent-coordinator:
|
||||
image: ghcr.io/rooba/agentcoordinator:latest
|
||||
container_name: agent-coordinator
|
||||
environment:
|
||||
- NATS_HOST=nats
|
||||
- NATS_PORT=4222
|
||||
- MIX_ENV=prod
|
||||
volumes:
|
||||
- ./mcp_servers.json:/app/mcp_servers.json:ro
|
||||
- ./workspace:/workspace:rw
|
||||
ports:
|
||||
- "4000:4000"
|
||||
depends_on:
|
||||
nats:
|
||||
condition: service_healthy
|
||||
restart: unless-stopped
|
||||
networks:
|
||||
- agent-coordinator-network
|
||||
|
||||
volumes:
|
||||
nats_data:
|
||||
driver: local
|
||||
|
||||
networks:
|
||||
agent-coordinator-network:
|
||||
driver: bridge
|
||||
159
docker-entrypoint.sh
Normal file
159
docker-entrypoint.sh
Normal file
@@ -0,0 +1,159 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Docker entrypoint script for Agent Coordinator MCP Server
|
||||
# Handles initialization, configuration, and graceful shutdown
|
||||
|
||||
set -e
|
||||
|
||||
# Default environment variables
|
||||
export MIX_ENV="${MIX_ENV:-prod}"
|
||||
export NATS_HOST="${NATS_HOST:-localhost}"
|
||||
export NATS_PORT="${NATS_PORT:-4222}"
|
||||
export DOCKERIZED="true"
|
||||
COLORIZED="${COLORIZED:-}"
|
||||
|
||||
if [ ! -z "$COLORIZED" ]; then
|
||||
# Colors for output
|
||||
RED='\033[0;31m'
|
||||
GREEN='\033[0;32m'
|
||||
YELLOW='\033[1;33m'
|
||||
BLUE='\033[0;34m'
|
||||
NC='\033[0m' # No Color
|
||||
else
|
||||
RED=''
|
||||
GREEN=''
|
||||
YELLOW=''
|
||||
BLUE=''
|
||||
NC=''
|
||||
fi
|
||||
|
||||
# Logging functions
|
||||
log_info() {
|
||||
echo -e "${BLUE}[INFO]${NC} $1" >&2
|
||||
}
|
||||
|
||||
log_warn() {
|
||||
echo -e "${YELLOW}[WARN]${NC} $1" >&2
|
||||
}
|
||||
|
||||
log_error() {
|
||||
echo -e "${RED}[ERROR]${NC} $1" >&2
|
||||
}
|
||||
|
||||
log_debug() {
|
||||
echo -e "${GREEN}[DEBUG]${NC} $1" >&2
|
||||
}
|
||||
|
||||
cleanup() {
|
||||
log_info "Received shutdown signal, shutting down..."
|
||||
exit 0
|
||||
}
|
||||
|
||||
# Set up signal handlers for graceful shutdown
|
||||
trap cleanup SIGTERM SIGINT SIGQUIT
|
||||
|
||||
# Function to wait for NATS (if configured)
|
||||
wait_for_nats() {
|
||||
if [ "$NATS_HOST" != "localhost" ] || [ "$NATS_PORT" != "4222" ]; then
|
||||
log_info "Waiting for NATS at $NATS_HOST:$NATS_PORT..."
|
||||
|
||||
local timeout=30
|
||||
local count=0
|
||||
|
||||
while [ $count -lt $timeout ]; do
|
||||
if nc -z "$NATS_HOST" "$NATS_PORT" 2>/dev/null; then
|
||||
log_debug "NATS is available"
|
||||
return 0
|
||||
fi
|
||||
|
||||
log_info "NATS not yet available, waiting... ($((count + 1))/$timeout)"
|
||||
sleep 1
|
||||
count=$((count + 1))
|
||||
done
|
||||
|
||||
log_error "Timeout waiting for NATS at $NATS_HOST:$NATS_PORT"
|
||||
exit 1
|
||||
else
|
||||
log_info "Using default NATS configuration (localhost:4222)"
|
||||
fi
|
||||
}
|
||||
|
||||
# Validate configuration
|
||||
validate_config() {
|
||||
log_info "Validating configuration..."
|
||||
|
||||
# Check if mcp_servers.json exists
|
||||
if [ ! -f "/app/mcp_servers.json" ]; then
|
||||
log_error "mcp_servers.json not found"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
log_debug "Configuration validation passed"
|
||||
}
|
||||
|
||||
# Pre-install external MCP server dependencies
|
||||
preinstall_dependencies() {
|
||||
log_info "Pre-installing external MCP server dependencies..."
|
||||
|
||||
# Check if bun is available
|
||||
if ! command -v bun >/dev/null 2>&1; then
|
||||
log_error "bun is not available - external MCP servers may not work"
|
||||
return 1
|
||||
fi
|
||||
|
||||
# Pre-cache common MCP packages to speed up startup
|
||||
local packages=(
|
||||
"@modelcontextprotocol/server-filesystem"
|
||||
"@modelcontextprotocol/server-memory"
|
||||
"@modelcontextprotocol/server-sequential-thinking"
|
||||
"@upstash/context7-mcp"
|
||||
)
|
||||
|
||||
for package in "${packages[@]}"; do
|
||||
log_info "Caching package: $package"
|
||||
bun add --global --silent "$package" || log_warn "Failed to cache $package"
|
||||
done
|
||||
|
||||
log_debug "Dependencies pre-installed"
|
||||
}
|
||||
|
||||
# Main execution
|
||||
main() {
|
||||
log_info "Starting Agent Coordinator MCP Server"
|
||||
log_info "Environment: $MIX_ENV"
|
||||
log_info "NATS: $NATS_HOST:$NATS_PORT"
|
||||
|
||||
|
||||
# Validate configuration
|
||||
validate_config
|
||||
|
||||
# Wait for external services if needed
|
||||
wait_for_nats
|
||||
|
||||
# Pre-install dependencies
|
||||
preinstall_dependencies
|
||||
|
||||
# Change to app directory
|
||||
cd /app
|
||||
|
||||
# Start the main application
|
||||
log_info "Starting main application..."
|
||||
|
||||
if [ "$#" -eq 0 ] || [ "$1" = "/app/scripts/mcp_launcher.sh" ]; then
|
||||
# Default: start the MCP server
|
||||
log_info "Starting MCP server via launcher script..."
|
||||
exec "/app/scripts/mcp_launcher.sh"
|
||||
elif [ "$1" = "bash" ] || [ "$1" = "sh" ]; then
|
||||
# Interactive shell mode
|
||||
log_info "Starting interactive shell..."
|
||||
exec "$@"
|
||||
elif [ "$1" = "release" ]; then
|
||||
# Direct release mode
|
||||
log_info "Starting via Elixir release..."
|
||||
exec "/app/bin/agent_coordinator" "start"
|
||||
else
|
||||
exit 0
|
||||
fi
|
||||
}
|
||||
|
||||
main "$@"
|
||||
461
examples/director_demo.exs
Normal file
461
examples/director_demo.exs
Normal file
@@ -0,0 +1,461 @@
|
||||
#!/usr/bin/env elixir
|
||||
|
||||
# Director Management Demo Script
|
||||
#
|
||||
# This script demonstrates the director role functionality:
|
||||
# 1. Register a director agent with oversight capabilities
|
||||
# 2. Register multiple standard agents for the director to manage
|
||||
# 3. Show director observing and managing other agents
|
||||
# 4. Demonstrate task assignment, feedback, and redundancy detection
|
||||
# 5. Show autonomous workflow coordination
|
||||
|
||||
Mix.install([
|
||||
{:agent_coordinator, path: "."}
|
||||
])
|
||||
|
||||
defmodule DirectorDemo do
|
||||
alias AgentCoordinator.{TaskRegistry, Inbox, Agent, Task}
|
||||
|
||||
def run do
|
||||
IO.puts("\n🎬 Director Management Demo Starting...")
|
||||
IO.puts("=" <> String.duplicate("=", 50))
|
||||
|
||||
# Start the Agent Coordinator application
|
||||
{:ok, _} = AgentCoordinator.Application.start(:normal, [])
|
||||
:timer.sleep(2000) # Give more time for startup
|
||||
|
||||
# Setup demo scenario
|
||||
setup_demo_scenario()
|
||||
|
||||
# Demonstrate director capabilities
|
||||
demo_director_observations()
|
||||
demo_task_management()
|
||||
demo_redundancy_detection()
|
||||
demo_autonomous_workflow()
|
||||
|
||||
IO.puts("\n✅ Director Management Demo Complete!")
|
||||
IO.puts("=" <> String.duplicate("=", 50))
|
||||
end
|
||||
|
||||
defp setup_demo_scenario do
|
||||
IO.puts("\n📋 Setting up demo scenario...")
|
||||
|
||||
# Register a global director
|
||||
director_opts = %{
|
||||
role: :director,
|
||||
oversight_scope: :global,
|
||||
capabilities: ["management", "coordination", "oversight", "coding"],
|
||||
workspace_path: "/home/ra/agent_coordinator",
|
||||
codebase_id: "agent_coordinator"
|
||||
}
|
||||
|
||||
{:ok, director_id} = TaskRegistry.register_agent("Director Phoenix Eagle", director_opts)
|
||||
IO.puts("✅ Registered Director: #{director_id}")
|
||||
|
||||
# Register several standard agents for the director to manage
|
||||
agents = [
|
||||
{"Frontend Developer Ruby Shark", %{capabilities: ["coding", "testing"], role: :standard}},
|
||||
{"Backend Engineer Silver Wolf", %{capabilities: ["coding", "analysis"], role: :standard}},
|
||||
{"QA Tester Golden Panda", %{capabilities: ["testing", "documentation"], role: :standard}},
|
||||
{"DevOps Engineer Blue Tiger", %{capabilities: ["coding", "review"], role: :standard}}
|
||||
]
|
||||
|
||||
agent_ids = Enum.map(agents, fn {name, opts} ->
|
||||
base_opts = Map.merge(opts, %{
|
||||
workspace_path: "/home/ra/agent_coordinator",
|
||||
codebase_id: "agent_coordinator"
|
||||
})
|
||||
{:ok, agent_id} = TaskRegistry.register_agent(name, base_opts)
|
||||
IO.puts("✅ Registered Agent: #{name} (#{agent_id})")
|
||||
|
||||
# Add some initial tasks to create realistic scenario
|
||||
add_demo_tasks(agent_id, name)
|
||||
|
||||
agent_id
|
||||
end)
|
||||
|
||||
%{director_id: director_id, agent_ids: agent_ids}
|
||||
end
|
||||
|
||||
defp add_demo_tasks(agent_id, agent_name) do
|
||||
tasks = case agent_name do
|
||||
"Frontend Developer" <> _ -> [
|
||||
{"Implement User Dashboard", "Create responsive dashboard with user stats and activity feed"},
|
||||
{"Fix CSS Layout Issues", "Resolve responsive design problems on mobile devices"},
|
||||
{"Add Dark Mode Support", "Implement theme switching with proper contrast ratios"}
|
||||
]
|
||||
"Backend Engineer" <> _ -> [
|
||||
{"Optimize Database Queries", "Review and optimize slow queries in user management system"},
|
||||
{"Implement API Rate Limiting", "Add rate limiting to prevent API abuse"},
|
||||
{"Fix Authentication Bug", "Resolve JWT token refresh issue causing user logouts"}
|
||||
]
|
||||
"QA Tester" <> _ -> [
|
||||
{"Write End-to-End Tests", "Create comprehensive test suite for user authentication flow"},
|
||||
{"Performance Testing", "Conduct load testing on API endpoints"},
|
||||
{"Fix Authentication Bug", "Validate JWT token refresh fix from backend team"} # Intentional duplicate
|
||||
]
|
||||
"DevOps Engineer" <> _ -> [
|
||||
{"Setup CI/CD Pipeline", "Configure automated testing and deployment pipeline"},
|
||||
{"Monitor System Performance", "Setup monitoring dashboards and alerting"},
|
||||
{"Optimize Database Queries", "Database performance tuning and indexing"} # Intentional duplicate
|
||||
]
|
||||
end
|
||||
|
||||
Enum.each(tasks, fn {title, description} ->
|
||||
task = Task.new(title, description, %{
|
||||
priority: Enum.random([:low, :normal, :high]),
|
||||
codebase_id: "agent_coordinator"
|
||||
})
|
||||
Inbox.add_task(agent_id, task)
|
||||
end)
|
||||
end
|
||||
|
||||
defp demo_director_observations do
|
||||
IO.puts("\n👁️ Director Observation Capabilities")
|
||||
IO.puts("-" <> String.duplicate("-", 40))
|
||||
|
||||
# Get the director agent
|
||||
agents = TaskRegistry.list_agents()
|
||||
director = Enum.find(agents, fn agent -> Agent.is_director?(agent) end)
|
||||
|
||||
if director do
|
||||
IO.puts("🔍 Director '#{director.name}' observing all agents...")
|
||||
|
||||
# Simulate director observing agents
|
||||
args = %{
|
||||
"agent_id" => director.id,
|
||||
"scope" => "codebase",
|
||||
"include_activity_history" => true
|
||||
}
|
||||
|
||||
# This would normally be called through MCP, but we'll call directly for demo
|
||||
result = observe_all_agents_demo(director, args)
|
||||
|
||||
case result do
|
||||
{:ok, observation} ->
|
||||
IO.puts("📊 Observation Results:")
|
||||
IO.puts(" - Total Agents: #{observation.total_agents}")
|
||||
IO.puts(" - Oversight Scope: #{observation.oversight_capability}")
|
||||
|
||||
Enum.each(observation.agents, fn agent_info ->
|
||||
task_count = %{
|
||||
pending: length(agent_info.tasks.pending),
|
||||
in_progress: if(agent_info.tasks.in_progress, do: 1, else: 0),
|
||||
completed: length(agent_info.tasks.completed)
|
||||
}
|
||||
|
||||
IO.puts(" 📋 #{agent_info.name}:")
|
||||
IO.puts(" Role: #{agent_info.role} | Status: #{agent_info.status}")
|
||||
IO.puts(" Tasks: #{task_count.pending} pending, #{task_count.in_progress} active, #{task_count.completed} done")
|
||||
IO.puts(" Capabilities: #{Enum.join(agent_info.capabilities, ", ")}")
|
||||
end)
|
||||
|
||||
{:error, reason} ->
|
||||
IO.puts("❌ Observation failed: #{reason}")
|
||||
end
|
||||
else
|
||||
IO.puts("❌ No director found in system")
|
||||
end
|
||||
end
|
||||
|
||||
defp observe_all_agents_demo(director, args) do
|
||||
# Simplified version of the actual function for demo
|
||||
all_agents = TaskRegistry.list_agents()
|
||||
|> Enum.filter(fn a -> a.codebase_id == director.codebase_id end)
|
||||
|
||||
detailed_agents = Enum.map(all_agents, fn target_agent ->
|
||||
task_info = case Inbox.list_tasks(target_agent.id) do
|
||||
{:error, _} -> %{pending: [], in_progress: nil, completed: []}
|
||||
tasks -> tasks
|
||||
end
|
||||
|
||||
%{
|
||||
agent_id: target_agent.id,
|
||||
name: target_agent.name,
|
||||
role: target_agent.role,
|
||||
capabilities: target_agent.capabilities,
|
||||
status: target_agent.status,
|
||||
codebase_id: target_agent.codebase_id,
|
||||
managed_by_director: target_agent.id in (director.managed_agents || []),
|
||||
tasks: task_info
|
||||
}
|
||||
end)
|
||||
|
||||
{:ok, %{
|
||||
director_id: director.id,
|
||||
scope: "codebase",
|
||||
oversight_capability: director.oversight_scope,
|
||||
agents: detailed_agents,
|
||||
total_agents: length(detailed_agents),
|
||||
timestamp: DateTime.utc_now()
|
||||
}}
|
||||
end
|
||||
|
||||
defp demo_task_management do
|
||||
IO.puts("\n📝 Director Task Management")
|
||||
IO.puts("-" <> String.duplicate("-", 40))
|
||||
|
||||
agents = TaskRegistry.list_agents()
|
||||
director = Enum.find(agents, fn agent -> Agent.is_director?(agent) end)
|
||||
standard_agents = Enum.filter(agents, fn agent -> !Agent.is_director?(agent) end)
|
||||
|
||||
if director && length(standard_agents) > 0 do
|
||||
target_agent = Enum.random(standard_agents)
|
||||
|
||||
IO.puts("🎯 Director assigning new task to #{target_agent.name}...")
|
||||
|
||||
# Create a high-priority coordination task
|
||||
new_task = %{
|
||||
"title" => "Team Coordination Meeting",
|
||||
"description" => "Organize cross-functional team sync to align on project priorities and resolve blockers. Focus on identifying dependencies between frontend, backend, and QA work streams.",
|
||||
"priority" => "high",
|
||||
"file_paths" => []
|
||||
}
|
||||
|
||||
# Director assigns the task
|
||||
task = Task.new(new_task["title"], new_task["description"], %{
|
||||
priority: :high,
|
||||
codebase_id: target_agent.codebase_id,
|
||||
assignment_reason: "Director identified need for team alignment",
|
||||
metadata: %{
|
||||
director_assigned: true,
|
||||
director_id: director.id
|
||||
}
|
||||
})
|
||||
|
||||
case Inbox.add_task(target_agent.id, task) do
|
||||
:ok ->
|
||||
IO.puts("✅ Task assigned successfully!")
|
||||
IO.puts(" Task: #{task.title}")
|
||||
IO.puts(" Assigned to: #{target_agent.name}")
|
||||
IO.puts(" Priority: #{task.priority}")
|
||||
IO.puts(" Reason: #{task.assignment_reason}")
|
||||
|
||||
# Update director's managed agents list
|
||||
updated_director = Agent.add_managed_agent(director, target_agent.id)
|
||||
TaskRegistry.update_agent(director.id, updated_director)
|
||||
|
||||
{:error, reason} ->
|
||||
IO.puts("❌ Task assignment failed: #{reason}")
|
||||
end
|
||||
|
||||
IO.puts("\n💬 Director providing task feedback...")
|
||||
|
||||
# Simulate director providing feedback on existing tasks
|
||||
{:ok, tasks} = Inbox.list_tasks(target_agent.id)
|
||||
if length(tasks.pending) > 0 do
|
||||
sample_task = Enum.random(tasks.pending)
|
||||
|
||||
feedback_examples = [
|
||||
"Consider breaking this task into smaller, more manageable subtasks for better tracking.",
|
||||
"This aligns well with the current sprint goals. Prioritize integration with the new API endpoints.",
|
||||
"Coordinate with the QA team before implementation to ensure test coverage is adequate.",
|
||||
"This task may have dependencies on the backend authentication work. Check with the backend team first."
|
||||
]
|
||||
|
||||
feedback = Enum.random(feedback_examples)
|
||||
|
||||
IO.puts("📋 Feedback for task '#{sample_task.title}':")
|
||||
IO.puts(" 💡 #{feedback}")
|
||||
IO.puts(" ⏰ Timestamp: #{DateTime.utc_now()}")
|
||||
end
|
||||
|
||||
else
|
||||
IO.puts("❌ No director or standard agents found for task management demo")
|
||||
end
|
||||
end
|
||||
|
||||
defp demo_redundancy_detection do
|
||||
IO.puts("\n🔍 Director Redundancy Detection")
|
||||
IO.puts("-" <> String.duplicate("-", 40))
|
||||
|
||||
agents = TaskRegistry.list_agents()
|
||||
director = Enum.find(agents, fn agent -> Agent.is_director?(agent) end)
|
||||
|
||||
if director do
|
||||
IO.puts("🔎 Analyzing tasks across all agents for redundancy...")
|
||||
|
||||
# Collect all tasks from all agents
|
||||
all_agents = Enum.filter(agents, fn a -> a.codebase_id == director.codebase_id end)
|
||||
all_tasks = Enum.flat_map(all_agents, fn agent ->
|
||||
case Inbox.list_tasks(agent.id) do
|
||||
{:error, _} -> []
|
||||
tasks ->
|
||||
(tasks.pending ++ (if tasks.in_progress, do: [tasks.in_progress], else: []))
|
||||
|> Enum.map(fn task -> Map.put(task, :agent_id, agent.id) end)
|
||||
end
|
||||
end)
|
||||
|
||||
IO.puts("📊 Total tasks analyzed: #{length(all_tasks)}")
|
||||
|
||||
# Detect redundant tasks (simplified similarity detection)
|
||||
redundant_groups = detect_similar_tasks_demo(all_tasks)
|
||||
|
||||
if length(redundant_groups) > 0 do
|
||||
IO.puts("⚠️ Found #{length(redundant_groups)} groups of potentially redundant tasks:")
|
||||
|
||||
Enum.each(redundant_groups, fn group ->
|
||||
IO.puts("\n 🔄 Redundant Group: '#{group.similarity_key}'")
|
||||
IO.puts(" Task count: #{group.task_count}")
|
||||
|
||||
Enum.each(group.tasks, fn task ->
|
||||
agent = Enum.find(agents, fn a -> a.id == task.agent_id end)
|
||||
agent_name = if agent, do: agent.name, else: "Unknown Agent"
|
||||
IO.puts(" - #{task.title} (#{agent_name})")
|
||||
end)
|
||||
|
||||
IO.puts(" 🎯 Recommendation: Consider consolidating these similar tasks or clearly define distinct responsibilities.")
|
||||
end)
|
||||
|
||||
total_redundant = Enum.sum(Enum.map(redundant_groups, fn g -> g.task_count end))
|
||||
IO.puts("\n📈 Impact Analysis:")
|
||||
IO.puts(" - Total redundant tasks: #{total_redundant}")
|
||||
IO.puts(" - Potential efficiency gain: #{round(total_redundant / length(all_tasks) * 100)}%")
|
||||
|
||||
else
|
||||
IO.puts("✅ No redundant tasks detected. Teams are well-coordinated!")
|
||||
end
|
||||
|
||||
else
|
||||
IO.puts("❌ No director found for redundancy detection")
|
||||
end
|
||||
end
|
||||
|
||||
defp detect_similar_tasks_demo(tasks) do
|
||||
# Group tasks by normalized title keywords
|
||||
tasks
|
||||
|> Enum.group_by(fn task ->
|
||||
# Normalize title for comparison
|
||||
String.downcase(task.title)
|
||||
|> String.replace(~r/[^\w\s]/, "")
|
||||
|> String.split()
|
||||
|> Enum.take(3)
|
||||
|> Enum.join(" ")
|
||||
end)
|
||||
|> Enum.filter(fn {_key, group_tasks} -> length(group_tasks) > 1 end)
|
||||
|> Enum.map(fn {key, group_tasks} ->
|
||||
%{
|
||||
similarity_key: key,
|
||||
tasks: Enum.map(group_tasks, fn task ->
|
||||
%{
|
||||
task_id: task.id,
|
||||
title: task.title,
|
||||
agent_id: task.agent_id,
|
||||
codebase_id: task.codebase_id
|
||||
}
|
||||
end),
|
||||
task_count: length(group_tasks)
|
||||
}
|
||||
end)
|
||||
end
|
||||
|
||||
defp demo_autonomous_workflow do
|
||||
IO.puts("\n🤖 Director Autonomous Workflow Coordination")
|
||||
IO.puts("-" <> String.duplicate("-", 50))
|
||||
|
||||
agents = TaskRegistry.list_agents()
|
||||
director = Enum.find(agents, fn agent -> Agent.is_director?(agent) end)
|
||||
standard_agents = Enum.filter(agents, fn agent -> !Agent.is_director?(agent) end)
|
||||
|
||||
if director && length(standard_agents) >= 2 do
|
||||
IO.puts("🎭 Simulating autonomous workflow coordination scenario...")
|
||||
IO.puts("\nScenario: Director detects that authentication bug fixes require coordination")
|
||||
IO.puts("between Backend Engineer and QA Tester.")
|
||||
|
||||
# Find agents working on authentication
|
||||
backend_agent = Enum.find(standard_agents, fn agent ->
|
||||
String.contains?(agent.name, "Backend")
|
||||
end)
|
||||
|
||||
qa_agent = Enum.find(standard_agents, fn agent ->
|
||||
String.contains?(agent.name, "QA")
|
||||
end)
|
||||
|
||||
if backend_agent && qa_agent do
|
||||
IO.puts("\n1️⃣ Director sending coordination input to Backend Engineer...")
|
||||
|
||||
coordination_message = """
|
||||
🤖 Director Coordination:
|
||||
|
||||
I've identified that your JWT authentication fix needs to be coordinated with QA testing.
|
||||
|
||||
Action Required:
|
||||
- Notify QA team when your fix is ready for testing
|
||||
- Provide test credentials and reproduction steps
|
||||
- Schedule knowledge transfer session if needed
|
||||
|
||||
This will help avoid testing delays and ensure comprehensive coverage.
|
||||
"""
|
||||
|
||||
# Simulate sending input to backend agent
|
||||
IO.puts("📤 Sending message to #{backend_agent.name}:")
|
||||
IO.puts(" Input Type: chat_message")
|
||||
IO.puts(" Content: [Coordination message about JWT fix coordination]")
|
||||
IO.puts(" Context: authentication_workflow_coordination")
|
||||
|
||||
IO.puts("\n2️⃣ Director sending parallel input to QA Tester...")
|
||||
|
||||
qa_message = """
|
||||
🤖 Director Coordination:
|
||||
|
||||
Backend team is working on JWT authentication fix. Please prepare for coordinated testing.
|
||||
|
||||
Action Required:
|
||||
- Review current authentication test cases
|
||||
- Prepare test environment for JWT token scenarios
|
||||
- Block time for testing once backend fix is ready
|
||||
|
||||
I'll facilitate the handoff between teams when implementation is complete.
|
||||
"""
|
||||
|
||||
IO.puts("📤 Sending message to #{qa_agent.name}:")
|
||||
IO.puts(" Input Type: chat_message")
|
||||
IO.puts(" Content: [Coordination message about authentication testing prep]")
|
||||
IO.puts(" Context: authentication_workflow_coordination")
|
||||
|
||||
IO.puts("\n3️⃣ Director scheduling follow-up coordination...")
|
||||
|
||||
# Create coordination task
|
||||
coordination_task = Task.new(
|
||||
"Authentication Fix Coordination Follow-up",
|
||||
"Check progress on JWT fix coordination between backend and QA teams. Ensure handoff is smooth and testing is proceeding without blockers.",
|
||||
%{
|
||||
priority: :normal,
|
||||
codebase_id: director.codebase_id,
|
||||
assignment_reason: "Autonomous workflow coordination",
|
||||
metadata: %{
|
||||
workflow_type: "authentication_coordination",
|
||||
involves_agents: [backend_agent.id, qa_agent.id],
|
||||
coordination_phase: "follow_up"
|
||||
}
|
||||
}
|
||||
)
|
||||
|
||||
Inbox.add_task(director.id, coordination_task)
|
||||
IO.puts("✅ Created follow-up coordination task for director")
|
||||
|
||||
IO.puts("\n🎯 Autonomous Workflow Benefits Demonstrated:")
|
||||
IO.puts(" ✅ Proactive cross-team coordination")
|
||||
IO.puts(" ✅ Parallel communication to reduce delays")
|
||||
IO.puts(" ✅ Automated follow-up task creation")
|
||||
IO.puts(" ✅ Context-aware workflow management")
|
||||
IO.puts(" ✅ Human-out-of-the-loop efficiency")
|
||||
|
||||
IO.puts("\n🔮 Next Steps in Full Implementation:")
|
||||
IO.puts(" - VSCode integration for real agent messaging")
|
||||
IO.puts(" - Workflow templates for common coordination patterns")
|
||||
IO.puts(" - ML-based task dependency detection")
|
||||
IO.puts(" - Automated testing trigger coordination")
|
||||
IO.puts(" - Cross-codebase workflow orchestration")
|
||||
|
||||
else
|
||||
IO.puts("❌ Could not find Backend and QA agents for workflow demo")
|
||||
end
|
||||
else
|
||||
IO.puts("❌ Insufficient agents for autonomous workflow demonstration")
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
# Run the demo
|
||||
DirectorDemo.run()
|
||||
@@ -1,193 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
AgentCoordinator MCP Client Example
|
||||
|
||||
This script demonstrates how to connect to and interact with the
|
||||
AgentCoordinator MCP server programmatically.
|
||||
"""
|
||||
|
||||
import json
|
||||
import subprocess
|
||||
import sys
|
||||
import uuid
|
||||
from typing import Dict, Any, Optional
|
||||
|
||||
class AgentCoordinatorMCP:
|
||||
def __init__(self, launcher_path: str = "./scripts/mcp_launcher.sh"):
|
||||
self.launcher_path = launcher_path
|
||||
self.process = None
|
||||
|
||||
def start(self):
|
||||
"""Start the MCP server process"""
|
||||
try:
|
||||
self.process = subprocess.Popen(
|
||||
[self.launcher_path],
|
||||
stdin=subprocess.PIPE,
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.PIPE,
|
||||
text=True,
|
||||
bufsize=0
|
||||
)
|
||||
print("🚀 MCP server started")
|
||||
return True
|
||||
except Exception as e:
|
||||
print(f"❌ Failed to start MCP server: {e}")
|
||||
return False
|
||||
|
||||
def stop(self):
|
||||
"""Stop the MCP server process"""
|
||||
if self.process:
|
||||
self.process.terminate()
|
||||
self.process.wait()
|
||||
print("🛑 MCP server stopped")
|
||||
|
||||
def send_request(self, method: str, params: Optional[Dict[str, Any]] = None) -> Dict[str, Any]:
|
||||
"""Send a JSON-RPC request to the MCP server"""
|
||||
if not self.process:
|
||||
raise RuntimeError("MCP server not started")
|
||||
|
||||
request = {
|
||||
"jsonrpc": "2.0",
|
||||
"id": str(uuid.uuid4()),
|
||||
"method": method
|
||||
}
|
||||
|
||||
if params:
|
||||
request["params"] = params
|
||||
|
||||
# Send request
|
||||
request_json = json.dumps(request) + "\n"
|
||||
self.process.stdin.write(request_json)
|
||||
self.process.stdin.flush()
|
||||
|
||||
# Read response
|
||||
response_line = self.process.stdout.readline()
|
||||
if not response_line:
|
||||
raise RuntimeError("No response from MCP server")
|
||||
|
||||
return json.loads(response_line.strip())
|
||||
|
||||
def get_tools(self) -> Dict[str, Any]:
|
||||
"""Get list of available tools"""
|
||||
return self.send_request("tools/list")
|
||||
|
||||
def register_agent(self, name: str, capabilities: list) -> Dict[str, Any]:
|
||||
"""Register a new agent"""
|
||||
return self.send_request("tools/call", {
|
||||
"name": "register_agent",
|
||||
"arguments": {
|
||||
"name": name,
|
||||
"capabilities": capabilities
|
||||
}
|
||||
})
|
||||
|
||||
def create_task(self, title: str, description: str, priority: str = "normal",
|
||||
required_capabilities: list = None) -> Dict[str, Any]:
|
||||
"""Create a new task"""
|
||||
args = {
|
||||
"title": title,
|
||||
"description": description,
|
||||
"priority": priority
|
||||
}
|
||||
if required_capabilities:
|
||||
args["required_capabilities"] = required_capabilities
|
||||
|
||||
return self.send_request("tools/call", {
|
||||
"name": "create_task",
|
||||
"arguments": args
|
||||
})
|
||||
|
||||
def get_next_task(self, agent_id: str) -> Dict[str, Any]:
|
||||
"""Get next task for an agent"""
|
||||
return self.send_request("tools/call", {
|
||||
"name": "get_next_task",
|
||||
"arguments": {"agent_id": agent_id}
|
||||
})
|
||||
|
||||
def complete_task(self, agent_id: str, result: str) -> Dict[str, Any]:
|
||||
"""Complete current task"""
|
||||
return self.send_request("tools/call", {
|
||||
"name": "complete_task",
|
||||
"arguments": {
|
||||
"agent_id": agent_id,
|
||||
"result": result
|
||||
}
|
||||
})
|
||||
|
||||
def get_task_board(self) -> Dict[str, Any]:
|
||||
"""Get task board overview"""
|
||||
return self.send_request("tools/call", {
|
||||
"name": "get_task_board",
|
||||
"arguments": {}
|
||||
})
|
||||
|
||||
def heartbeat(self, agent_id: str) -> Dict[str, Any]:
|
||||
"""Send agent heartbeat"""
|
||||
return self.send_request("tools/call", {
|
||||
"name": "heartbeat",
|
||||
"arguments": {"agent_id": agent_id}
|
||||
})
|
||||
|
||||
def demo():
|
||||
"""Demonstrate MCP client functionality"""
|
||||
print("🎯 AgentCoordinator MCP Client Demo")
|
||||
print("=" * 50)
|
||||
|
||||
client = AgentCoordinatorMCP()
|
||||
|
||||
try:
|
||||
# Start server
|
||||
if not client.start():
|
||||
return
|
||||
|
||||
# Wait for server to be ready
|
||||
import time
|
||||
time.sleep(2)
|
||||
|
||||
# Get tools
|
||||
print("\n📋 Available tools:")
|
||||
tools_response = client.get_tools()
|
||||
if "result" in tools_response:
|
||||
for tool in tools_response["result"]["tools"]:
|
||||
print(f" - {tool['name']}: {tool['description']}")
|
||||
|
||||
# Register agent
|
||||
print("\n👤 Registering agent...")
|
||||
register_response = client.register_agent("PythonAgent", ["coding", "testing"])
|
||||
if "result" in register_response:
|
||||
content = register_response["result"]["content"][0]["text"]
|
||||
agent_data = json.loads(content)
|
||||
agent_id = agent_data["agent_id"]
|
||||
print(f"✅ Agent registered: {agent_id}")
|
||||
|
||||
# Create task
|
||||
print("\n📝 Creating task...")
|
||||
task_response = client.create_task(
|
||||
"Python Script",
|
||||
"Write a Python script for data processing",
|
||||
"high",
|
||||
["coding"]
|
||||
)
|
||||
if "result" in task_response:
|
||||
content = task_response["result"]["content"][0]["text"]
|
||||
task_data = json.loads(content)
|
||||
print(f"✅ Task created: {task_data['task_id']}")
|
||||
|
||||
# Get task board
|
||||
print("\n📊 Task board:")
|
||||
board_response = client.get_task_board()
|
||||
if "result" in board_response:
|
||||
content = board_response["result"]["content"][0]["text"]
|
||||
board_data = json.loads(content)
|
||||
for agent in board_data["agents"]:
|
||||
print(f" 📱 {agent['name']}: {agent['status']}")
|
||||
print(f" Capabilities: {', '.join(agent['capabilities'])}")
|
||||
print(f" Pending: {agent['pending_tasks']}, Completed: {agent['completed_tasks']}")
|
||||
|
||||
except Exception as e:
|
||||
print(f"❌ Error: {e}")
|
||||
finally:
|
||||
client.stop()
|
||||
|
||||
if __name__ == "__main__":
|
||||
demo()
|
||||
111
examples/simple_test_demo.exs
Normal file
111
examples/simple_test_demo.exs
Normal file
@@ -0,0 +1,111 @@
|
||||
#!/usr/bin/env elixir
|
||||
|
||||
# Simple test for agent-specific task pools using Mix
|
||||
Mix.install([{:jason, "~> 1.4"}])
|
||||
|
||||
Code.require_file("mix.exs")
|
||||
|
||||
Application.ensure_all_started(:agent_coordinator)
|
||||
|
||||
alias AgentCoordinator.{TaskRegistry, Inbox, Agent, Task}
|
||||
|
||||
IO.puts("🧪 Simple Agent-Specific Task Pool Test")
|
||||
IO.puts("=" |> String.duplicate(50))
|
||||
|
||||
# Wait for services to start
|
||||
Process.sleep(2000)
|
||||
|
||||
# Test 1: Create agents directly
|
||||
IO.puts("\n1️⃣ Creating agents directly...")
|
||||
|
||||
agent1 = Agent.new("Alpha Wolf", [:coding, :testing])
|
||||
agent2 = Agent.new("Beta Tiger", [:documentation, :analysis])
|
||||
|
||||
case TaskRegistry.register_agent(agent1) do
|
||||
:ok -> IO.puts("✅ Agent 1 registered: #{agent1.id}")
|
||||
error -> IO.puts("❌ Agent 1 failed: #{inspect(error)}")
|
||||
end
|
||||
|
||||
case TaskRegistry.register_agent(agent2) do
|
||||
:ok -> IO.puts("✅ Agent 2 registered: #{agent2.id}")
|
||||
error -> IO.puts("❌ Agent 2 failed: #{inspect(error)}")
|
||||
end
|
||||
|
||||
# Test 2: Create agent-specific tasks
|
||||
IO.puts("\n2️⃣ Creating agent-specific tasks...")
|
||||
|
||||
# Create tasks for Agent 1
|
||||
task1_agent1 = Task.new("Fix auth bug", "Debug authentication issue", %{
|
||||
priority: :high,
|
||||
assigned_agent: agent1.id,
|
||||
metadata: %{agent_created: true}
|
||||
})
|
||||
|
||||
task2_agent1 = Task.new("Add auth tests", "Write comprehensive auth tests", %{
|
||||
priority: :normal,
|
||||
assigned_agent: agent1.id,
|
||||
metadata: %{agent_created: true}
|
||||
})
|
||||
|
||||
# Create tasks for Agent 2
|
||||
task1_agent2 = Task.new("Write API docs", "Document REST endpoints", %{
|
||||
priority: :normal,
|
||||
assigned_agent: agent2.id,
|
||||
metadata: %{agent_created: true}
|
||||
})
|
||||
|
||||
# Add tasks to respective agent inboxes
|
||||
case Inbox.add_task(agent1.id, task1_agent1) do
|
||||
:ok -> IO.puts("✅ Task 1 added to Agent 1")
|
||||
error -> IO.puts("❌ Task 1 failed: #{inspect(error)}")
|
||||
end
|
||||
|
||||
case Inbox.add_task(agent1.id, task2_agent1) do
|
||||
:ok -> IO.puts("✅ Task 2 added to Agent 1")
|
||||
error -> IO.puts("❌ Task 2 failed: #{inspect(error)}")
|
||||
end
|
||||
|
||||
case Inbox.add_task(agent2.id, task1_agent2) do
|
||||
:ok -> IO.puts("✅ Task 1 added to Agent 2")
|
||||
error -> IO.puts("❌ Task 1 to Agent 2 failed: #{inspect(error)}")
|
||||
end
|
||||
|
||||
# Test 3: Verify agent isolation
|
||||
IO.puts("\n3️⃣ Testing agent task isolation...")
|
||||
|
||||
# Agent 1 gets their tasks
|
||||
case Inbox.get_next_task(agent1.id) do
|
||||
nil -> IO.puts("❌ Agent 1 has no tasks")
|
||||
task -> IO.puts("✅ Agent 1 got task: #{task.title}")
|
||||
end
|
||||
|
||||
# Agent 2 gets their tasks
|
||||
case Inbox.get_next_task(agent2.id) do
|
||||
nil -> IO.puts("❌ Agent 2 has no tasks")
|
||||
task -> IO.puts("✅ Agent 2 got task: #{task.title}")
|
||||
end
|
||||
|
||||
# Test 4: Check task status
|
||||
IO.puts("\n4️⃣ Checking task status...")
|
||||
|
||||
status1 = Inbox.get_status(agent1.id)
|
||||
status2 = Inbox.get_status(agent2.id)
|
||||
|
||||
IO.puts("Agent 1 status: #{inspect(status1)}")
|
||||
IO.puts("Agent 2 status: #{inspect(status2)}")
|
||||
|
||||
# Test 5: List all tasks for each agent
|
||||
IO.puts("\n5️⃣ Listing all tasks per agent...")
|
||||
|
||||
tasks1 = Inbox.list_tasks(agent1.id)
|
||||
tasks2 = Inbox.list_tasks(agent2.id)
|
||||
|
||||
IO.puts("Agent 1 tasks: #{inspect(tasks1)}")
|
||||
IO.puts("Agent 2 tasks: #{inspect(tasks2)}")
|
||||
|
||||
IO.puts("\n" <> "=" |> String.duplicate(50))
|
||||
IO.puts("🎉 AGENT ISOLATION TEST COMPLETE!")
|
||||
IO.puts("✅ Each agent has their own task inbox")
|
||||
IO.puts("✅ No cross-contamination of tasks")
|
||||
IO.puts("✅ Agent-specific task pools working!")
|
||||
IO.puts("=" |> String.duplicate(50))
|
||||
@@ -1,18 +1,269 @@
|
||||
defmodule AgentCoordinator do
|
||||
@moduledoc """
|
||||
Documentation for `AgentCoordinator`.
|
||||
Agent Coordinator - A Model Context Protocol (MCP) server for multi-agent coordination.
|
||||
|
||||
Agent Coordinator enables multiple AI agents to work together seamlessly across codebases
|
||||
without conflicts. It provides intelligent task distribution, real-time communication,
|
||||
and cross-codebase coordination through a unified MCP interface.
|
||||
|
||||
## Key Features
|
||||
|
||||
- **Multi-Agent Coordination**: Register multiple AI agents with different capabilities
|
||||
- **Intelligent Task Distribution**: Automatically assigns tasks based on agent capabilities
|
||||
- **Cross-Codebase Support**: Coordinate work across multiple repositories
|
||||
- **Unified MCP Interface**: Single server providing access to multiple external MCP servers
|
||||
- **Automatic Task Tracking**: Every tool usage becomes a tracked task
|
||||
- **Real-Time Communication**: Heartbeat system for agent liveness and coordination
|
||||
|
||||
## Quick Start
|
||||
|
||||
To start the Agent Coordinator:
|
||||
|
||||
# Start the MCP server
|
||||
./scripts/mcp_launcher.sh
|
||||
|
||||
# Or in development mode
|
||||
iex -S mix
|
||||
|
||||
## Main Components
|
||||
|
||||
- `AgentCoordinator.MCPServer` - Core MCP protocol implementation
|
||||
- `AgentCoordinator.TaskRegistry` - Task management and agent coordination
|
||||
- `AgentCoordinator.UnifiedMCPServer` - Unified interface to external MCP servers
|
||||
- `AgentCoordinator.CodebaseRegistry` - Multi-repository support
|
||||
- `AgentCoordinator.VSCodeToolProvider` - VS Code integration tools
|
||||
|
||||
## MCP Tools Available
|
||||
|
||||
### Agent Coordination
|
||||
- `register_agent` - Register an agent with capabilities
|
||||
- `create_task` - Create tasks with requirements
|
||||
- `get_next_task` - Get assigned tasks
|
||||
- `complete_task` - Mark tasks complete
|
||||
- `get_task_board` - View all agent status
|
||||
- `heartbeat` - Maintain agent liveness
|
||||
|
||||
### Codebase Management
|
||||
- `register_codebase` - Register repositories
|
||||
- `create_cross_codebase_task` - Tasks spanning multiple repos
|
||||
- `add_codebase_dependency` - Define repository relationships
|
||||
|
||||
### External Tool Access
|
||||
All tools from external MCP servers are automatically available through
|
||||
the unified interface, including filesystem, context7, memory, and other servers.
|
||||
|
||||
## Usage Example
|
||||
|
||||
# Register an agent
|
||||
AgentCoordinator.MCPServer.handle_mcp_request(%{
|
||||
"method" => "tools/call",
|
||||
"params" => %{
|
||||
"name" => "register_agent",
|
||||
"arguments" => %{
|
||||
"name" => "MyAgent",
|
||||
"capabilities" => ["coding", "testing"]
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
See the documentation in `docs/` for detailed implementation guides.
|
||||
"""
|
||||
|
||||
alias AgentCoordinator.MCPServer
|
||||
|
||||
@doc """
|
||||
Hello world.
|
||||
Get the version of Agent Coordinator.
|
||||
|
||||
## Examples
|
||||
|
||||
iex> AgentCoordinator.hello()
|
||||
:world
|
||||
iex> AgentCoordinator.version()
|
||||
"0.1.0"
|
||||
|
||||
"""
|
||||
def hello do
|
||||
:world
|
||||
def version do
|
||||
Application.spec(:agent_coordinator, :vsn) |> to_string()
|
||||
end
|
||||
|
||||
@doc """
|
||||
Get the current status of the Agent Coordinator system.
|
||||
|
||||
Returns information about active agents, tasks, and external MCP servers.
|
||||
|
||||
## Examples
|
||||
|
||||
iex> AgentCoordinator.status()
|
||||
%{
|
||||
agents: 2,
|
||||
active_tasks: 1,
|
||||
external_servers: 3,
|
||||
uptime: 12345
|
||||
}
|
||||
|
||||
"""
|
||||
def status do
|
||||
with {:ok, board} <- get_task_board(),
|
||||
{:ok, server_status} <- get_server_status() do
|
||||
%{
|
||||
agents: length(board[:agents] || []),
|
||||
active_tasks: count_active_tasks(board),
|
||||
external_servers: count_active_servers(server_status),
|
||||
uptime: get_uptime()
|
||||
}
|
||||
else
|
||||
_ -> %{status: :error, message: "Unable to retrieve system status"}
|
||||
end
|
||||
end
|
||||
|
||||
@doc """
|
||||
Get the current task board showing all agents and their status.
|
||||
|
||||
Returns information about all registered agents, their current tasks,
|
||||
and overall system status.
|
||||
|
||||
## Examples
|
||||
|
||||
iex> {:ok, board} = AgentCoordinator.get_task_board()
|
||||
iex> is_map(board)
|
||||
true
|
||||
|
||||
"""
|
||||
def get_task_board do
|
||||
request = %{
|
||||
"method" => "tools/call",
|
||||
"params" => %{"name" => "get_task_board", "arguments" => %{}},
|
||||
"jsonrpc" => "2.0",
|
||||
"id" => System.unique_integer()
|
||||
}
|
||||
|
||||
case MCPServer.handle_mcp_request(request) do
|
||||
%{"result" => %{"content" => [%{"text" => text}]}} ->
|
||||
{:ok, Jason.decode!(text)}
|
||||
|
||||
%{"error" => error} ->
|
||||
{:error, error}
|
||||
|
||||
_ ->
|
||||
{:error, "Unexpected response format"}
|
||||
end
|
||||
end
|
||||
|
||||
@doc """
|
||||
Register a new agent with the coordination system.
|
||||
|
||||
## Parameters
|
||||
|
||||
- `name` - Agent name (string)
|
||||
- `capabilities` - List of capabilities (["coding", "testing", ...])
|
||||
- `opts` - Optional parameters (codebase_id, workspace_path, etc.)
|
||||
|
||||
## Examples
|
||||
|
||||
iex> {:ok, result} = AgentCoordinator.register_agent("TestAgent", ["coding"])
|
||||
iex> is_map(result)
|
||||
true
|
||||
|
||||
"""
|
||||
def register_agent(name, capabilities, opts \\ []) do
|
||||
args =
|
||||
%{
|
||||
"name" => name,
|
||||
"capabilities" => capabilities
|
||||
}
|
||||
|> add_optional_arg("codebase_id", opts[:codebase_id])
|
||||
|> add_optional_arg("workspace_path", opts[:workspace_path])
|
||||
|> add_optional_arg("cross_codebase_capable", opts[:cross_codebase_capable])
|
||||
|
||||
request = %{
|
||||
"method" => "tools/call",
|
||||
"params" => %{"name" => "register_agent", "arguments" => args},
|
||||
"jsonrpc" => "2.0",
|
||||
"id" => System.unique_integer()
|
||||
}
|
||||
|
||||
case MCPServer.handle_mcp_request(request) do
|
||||
%{"result" => %{"content" => [%{"text" => text}]}} ->
|
||||
{:ok, Jason.decode!(text)}
|
||||
|
||||
%{"error" => error} ->
|
||||
{:error, error}
|
||||
|
||||
_ ->
|
||||
{:error, "Unexpected response format"}
|
||||
end
|
||||
end
|
||||
|
||||
@doc """
|
||||
Create a new task in the coordination system.
|
||||
|
||||
## Parameters
|
||||
|
||||
- `title` - Task title (string)
|
||||
- `description` - Task description (string)
|
||||
- `opts` - Optional parameters (priority, codebase_id, file_paths, etc.)
|
||||
|
||||
## Examples
|
||||
|
||||
iex> {:ok, result} = AgentCoordinator.create_task("Test Task", "Test description")
|
||||
iex> is_map(result)
|
||||
true
|
||||
|
||||
"""
|
||||
def create_task(title, description, opts \\ []) do
|
||||
args =
|
||||
%{
|
||||
"title" => title,
|
||||
"description" => description
|
||||
}
|
||||
|> add_optional_arg("priority", opts[:priority])
|
||||
|> add_optional_arg("codebase_id", opts[:codebase_id])
|
||||
|> add_optional_arg("file_paths", opts[:file_paths])
|
||||
|> add_optional_arg("required_capabilities", opts[:required_capabilities])
|
||||
|
||||
request = %{
|
||||
"method" => "tools/call",
|
||||
"params" => %{"name" => "create_task", "arguments" => args},
|
||||
"jsonrpc" => "2.0",
|
||||
"id" => System.unique_integer()
|
||||
}
|
||||
|
||||
case MCPServer.handle_mcp_request(request) do
|
||||
%{"result" => %{"content" => [%{"text" => text}]}} ->
|
||||
{:ok, Jason.decode!(text)}
|
||||
|
||||
%{"error" => error} ->
|
||||
{:error, error}
|
||||
|
||||
_ ->
|
||||
{:error, "Unexpected response format"}
|
||||
end
|
||||
end
|
||||
|
||||
# Private helpers
|
||||
|
||||
defp add_optional_arg(args, _key, nil), do: args
|
||||
defp add_optional_arg(args, key, value), do: Map.put(args, key, value)
|
||||
|
||||
defp count_active_tasks(%{agents: agents}) do
|
||||
Enum.count(agents, fn agent ->
|
||||
Map.get(agent, "current_task") != nil
|
||||
end)
|
||||
end
|
||||
|
||||
defp count_active_tasks(_), do: 0
|
||||
|
||||
defp count_active_servers(server_status) when is_map(server_status) do
|
||||
Map.get(server_status, :active_servers, 0)
|
||||
end
|
||||
|
||||
defp get_server_status do
|
||||
# This would call UnifiedMCPServer to get external server status
|
||||
# For now, return a placeholder
|
||||
{:ok, %{active_servers: 3}}
|
||||
end
|
||||
|
||||
defp get_uptime do
|
||||
# Get system uptime in seconds
|
||||
{uptime_ms, _} = :erlang.statistics(:wall_clock)
|
||||
div(uptime_ms, 1000)
|
||||
end
|
||||
end
|
||||
|
||||
300
lib/agent_coordinator/activity_tracker.ex
Normal file
300
lib/agent_coordinator/activity_tracker.ex
Normal file
@@ -0,0 +1,300 @@
|
||||
defmodule AgentCoordinator.ActivityTracker do
|
||||
@moduledoc """
|
||||
Tracks agent activities based on tool calls and infers human-readable activity descriptions.
|
||||
"""
|
||||
|
||||
alias AgentCoordinator.{Agent, TaskRegistry}
|
||||
|
||||
@doc """
|
||||
Infer activity description and files from tool name and arguments.
|
||||
Returns {activity_description, files_list}.
|
||||
"""
|
||||
def infer_activity(tool_name, args) do
|
||||
case tool_name do
|
||||
# File operations
|
||||
"read_file" ->
|
||||
file_path = extract_file_path(args)
|
||||
{"Reading #{Path.basename(file_path || "file")}", [file_path]}
|
||||
|
||||
"read_text_file" ->
|
||||
file_path = extract_file_path(args)
|
||||
{"Reading #{Path.basename(file_path || "file")}", [file_path]}
|
||||
|
||||
"read_multiple_files" ->
|
||||
files = Map.get(args, "paths", [])
|
||||
file_names = Enum.map(files, &Path.basename/1)
|
||||
{"Reading #{length(files)} files: #{Enum.join(file_names, ", ")}", files}
|
||||
|
||||
"write_file" ->
|
||||
file_path = extract_file_path(args)
|
||||
{"Writing #{Path.basename(file_path || "file")}", [file_path]}
|
||||
|
||||
"edit_file" ->
|
||||
file_path = extract_file_path(args)
|
||||
{"Editing #{Path.basename(file_path || "file")}", [file_path]}
|
||||
|
||||
"create_file" ->
|
||||
file_path = extract_file_path(args)
|
||||
{"Creating #{Path.basename(file_path || "file")}", [file_path]}
|
||||
|
||||
"move_file" ->
|
||||
source = Map.get(args, "source")
|
||||
dest = Map.get(args, "destination")
|
||||
files = [source, dest] |> Enum.filter(& &1)
|
||||
|
||||
{"Moving #{Path.basename(source || "file")} to #{Path.basename(dest || "destination")}",
|
||||
files}
|
||||
|
||||
# VS Code operations
|
||||
"vscode_read_file" ->
|
||||
file_path = extract_file_path(args)
|
||||
{"Reading #{Path.basename(file_path || "file")} in VS Code", [file_path]}
|
||||
|
||||
"vscode_write_file" ->
|
||||
file_path = extract_file_path(args)
|
||||
{"Writing #{Path.basename(file_path || "file")} in VS Code", [file_path]}
|
||||
|
||||
"vscode_set_editor_content" ->
|
||||
file_path = Map.get(args, "file_path")
|
||||
|
||||
if file_path do
|
||||
{"Editing #{Path.basename(file_path)} in VS Code", [file_path]}
|
||||
else
|
||||
{"Editing active file in VS Code", []}
|
||||
end
|
||||
|
||||
"vscode_get_active_editor" ->
|
||||
{"Viewing active editor in VS Code", []}
|
||||
|
||||
"vscode_get_selection" ->
|
||||
{"Viewing text selection in VS Code", []}
|
||||
|
||||
# Directory operations
|
||||
"list_directory" ->
|
||||
path = extract_file_path(args)
|
||||
{"Browsing directory #{Path.basename(path || ".")}", []}
|
||||
|
||||
"list_directory_with_sizes" ->
|
||||
path = extract_file_path(args)
|
||||
{"Browsing directory #{Path.basename(path || ".")} with sizes", []}
|
||||
|
||||
"directory_tree" ->
|
||||
path = extract_file_path(args)
|
||||
{"Exploring directory tree for #{Path.basename(path || ".")}", []}
|
||||
|
||||
"create_directory" ->
|
||||
path = extract_file_path(args)
|
||||
{"Creating directory #{Path.basename(path || "directory")}", []}
|
||||
|
||||
# Search operations
|
||||
"search_files" ->
|
||||
pattern = Map.get(args, "pattern", "files")
|
||||
{"Searching for #{pattern}", []}
|
||||
|
||||
"grep_search" ->
|
||||
query = Map.get(args, "query", "text")
|
||||
{"Searching for '#{query}' in files", []}
|
||||
|
||||
"semantic_search" ->
|
||||
query = Map.get(args, "query", "content")
|
||||
{"Semantic search for '#{query}'", []}
|
||||
|
||||
# Thinking operations
|
||||
"sequentialthinking" ->
|
||||
thought = Map.get(args, "thought", "")
|
||||
thought_summary = String.slice(thought, 0, 50) |> String.trim()
|
||||
{"Sequential thinking: #{thought_summary}...", []}
|
||||
|
||||
# Terminal operations
|
||||
"run_in_terminal" ->
|
||||
command = Map.get(args, "command", "command")
|
||||
command_summary = String.slice(command, 0, 30) |> String.trim()
|
||||
{"Running: #{command_summary}...", []}
|
||||
|
||||
"get_terminal_output" ->
|
||||
{"Checking terminal output", []}
|
||||
|
||||
# Test operations
|
||||
"runTests" ->
|
||||
files = Map.get(args, "files", [])
|
||||
|
||||
if files != [] do
|
||||
file_names = Enum.map(files, &Path.basename/1)
|
||||
{"Running tests in #{Enum.join(file_names, ", ")}", files}
|
||||
else
|
||||
{"Running all tests", []}
|
||||
end
|
||||
|
||||
# Task management
|
||||
"create_task" ->
|
||||
title = Map.get(args, "title", "task")
|
||||
{"Creating task: #{title}", []}
|
||||
|
||||
"get_next_task" ->
|
||||
{"Getting next task", []}
|
||||
|
||||
"complete_task" ->
|
||||
{"Completing current task", []}
|
||||
|
||||
# Knowledge operations
|
||||
"create_entities" ->
|
||||
entities = Map.get(args, "entities", [])
|
||||
count = length(entities)
|
||||
{"Creating #{count} knowledge entities", []}
|
||||
|
||||
"create_relations" ->
|
||||
relations = Map.get(args, "relations", [])
|
||||
count = length(relations)
|
||||
{"Creating #{count} knowledge relations", []}
|
||||
|
||||
"search_nodes" ->
|
||||
query = Map.get(args, "query", "nodes")
|
||||
{"Searching knowledge graph for '#{query}'", []}
|
||||
|
||||
"read_graph" ->
|
||||
{"Reading knowledge graph", []}
|
||||
|
||||
# HTTP/Web operations
|
||||
"fetch_webpage" ->
|
||||
urls = Map.get(args, "urls", [])
|
||||
|
||||
if urls != [] do
|
||||
{"Fetching #{length(urls)} webpages", []}
|
||||
else
|
||||
{"Fetching webpage", []}
|
||||
end
|
||||
|
||||
# Development operations
|
||||
"get_errors" ->
|
||||
files = Map.get(args, "filePaths", [])
|
||||
|
||||
if files != [] do
|
||||
file_names = Enum.map(files, &Path.basename/1)
|
||||
{"Checking errors in #{Enum.join(file_names, ", ")}", files}
|
||||
else
|
||||
{"Checking all errors", []}
|
||||
end
|
||||
|
||||
"list_code_usages" ->
|
||||
symbol = Map.get(args, "symbolName", "symbol")
|
||||
{"Finding usages of #{symbol}", []}
|
||||
|
||||
# Elixir-specific operations
|
||||
"elixir-definition" ->
|
||||
symbol = Map.get(args, "symbol", "symbol")
|
||||
{"Finding definition of #{symbol}", []}
|
||||
|
||||
"elixir-docs" ->
|
||||
modules = Map.get(args, "modules", [])
|
||||
|
||||
if modules != [] do
|
||||
{"Getting docs for #{Enum.join(modules, ", ")}", []}
|
||||
else
|
||||
{"Getting Elixir documentation", []}
|
||||
end
|
||||
|
||||
"elixir-environment" ->
|
||||
location = Map.get(args, "location", "code")
|
||||
{"Analyzing Elixir environment at #{location}", []}
|
||||
|
||||
# Python operations
|
||||
"pylanceRunCodeSnippet" ->
|
||||
{"Running Python code snippet", []}
|
||||
|
||||
"pylanceFileSyntaxErrors" ->
|
||||
file_uri = Map.get(args, "fileUri")
|
||||
|
||||
if file_uri do
|
||||
file_path = uri_to_path(file_uri)
|
||||
{"Checking syntax errors in #{Path.basename(file_path)}", [file_path]}
|
||||
else
|
||||
{"Checking Python syntax errors", []}
|
||||
end
|
||||
|
||||
# Default cases
|
||||
tool_name when is_binary(tool_name) ->
|
||||
cond do
|
||||
String.starts_with?(tool_name, "vscode_") ->
|
||||
action = String.replace(tool_name, "vscode_", "") |> String.replace("_", " ")
|
||||
{"VS Code: #{action}", []}
|
||||
|
||||
String.starts_with?(tool_name, "elixir-") ->
|
||||
action = String.replace(tool_name, "elixir-", "") |> String.replace("-", " ")
|
||||
{"Elixir: #{action}", []}
|
||||
|
||||
String.starts_with?(tool_name, "pylance") ->
|
||||
action = String.replace(tool_name, "pylance", "") |> humanize_string()
|
||||
{"Python: #{action}", []}
|
||||
|
||||
String.contains?(tool_name, "_") ->
|
||||
action = String.replace(tool_name, "_", " ") |> String.capitalize()
|
||||
{action, []}
|
||||
|
||||
true ->
|
||||
{String.capitalize(tool_name), []}
|
||||
end
|
||||
|
||||
_ ->
|
||||
{"Unknown activity", []}
|
||||
end
|
||||
end
|
||||
|
||||
@doc """
|
||||
Update an agent's activity based on a tool call.
|
||||
"""
|
||||
def update_agent_activity(agent_id, tool_name, args) do
|
||||
{activity, files} = infer_activity(tool_name, args)
|
||||
|
||||
case TaskRegistry.get_agent(agent_id) do
|
||||
{:ok, agent} ->
|
||||
updated_agent = Agent.update_activity(agent, activity, files)
|
||||
# Update the agent in the registry
|
||||
TaskRegistry.update_agent(agent_id, updated_agent)
|
||||
|
||||
{:error, _} ->
|
||||
# Agent not found, ignore
|
||||
:ok
|
||||
end
|
||||
end
|
||||
|
||||
@doc """
|
||||
Clear an agent's activity (e.g., when they go idle).
|
||||
"""
|
||||
def clear_agent_activity(agent_id) do
|
||||
case TaskRegistry.get_agent(agent_id) do
|
||||
{:ok, agent} ->
|
||||
updated_agent = Agent.clear_activity(agent)
|
||||
TaskRegistry.update_agent(agent_id, updated_agent)
|
||||
|
||||
{:error, _} ->
|
||||
:ok
|
||||
end
|
||||
end
|
||||
|
||||
# Private helper functions
|
||||
|
||||
defp extract_file_path(args) do
|
||||
# Try various common parameter names for file paths
|
||||
args["path"] || args["filePath"] || args["file_path"] ||
|
||||
args["source"] || args["destination"] || args["fileUri"] |> uri_to_path()
|
||||
end
|
||||
|
||||
defp uri_to_path(nil), do: nil
|
||||
|
||||
defp uri_to_path(uri) when is_binary(uri) do
|
||||
if String.starts_with?(uri, "file://") do
|
||||
String.replace_prefix(uri, "file://", "")
|
||||
else
|
||||
uri
|
||||
end
|
||||
end
|
||||
|
||||
defp humanize_string(str) do
|
||||
str
|
||||
|> String.split(~r/[A-Z]/)
|
||||
|> Enum.map(&String.downcase/1)
|
||||
|> Enum.filter(&(&1 != ""))
|
||||
|> Enum.join(" ")
|
||||
|> String.capitalize()
|
||||
end
|
||||
end
|
||||
@@ -13,7 +13,13 @@ defmodule AgentCoordinator.Agent do
|
||||
:codebase_id,
|
||||
:workspace_path,
|
||||
:last_heartbeat,
|
||||
:metadata
|
||||
:metadata,
|
||||
:current_activity,
|
||||
:current_files,
|
||||
:activity_history,
|
||||
:role,
|
||||
:managed_agents,
|
||||
:oversight_scope
|
||||
]}
|
||||
defstruct [
|
||||
:id,
|
||||
@@ -24,11 +30,26 @@ defmodule AgentCoordinator.Agent do
|
||||
:codebase_id,
|
||||
:workspace_path,
|
||||
:last_heartbeat,
|
||||
:metadata
|
||||
:metadata,
|
||||
:current_activity,
|
||||
:current_files,
|
||||
:activity_history,
|
||||
:role,
|
||||
:managed_agents,
|
||||
:oversight_scope
|
||||
]
|
||||
|
||||
@type status :: :idle | :busy | :offline | :error
|
||||
@type capability :: :coding | :testing | :documentation | :analysis | :review
|
||||
@type capability ::
|
||||
:coding
|
||||
| :testing
|
||||
| :documentation
|
||||
| :analysis
|
||||
| :review
|
||||
| :management
|
||||
| :coordination
|
||||
| :oversight
|
||||
@type role :: :standard | :director | :project_manager
|
||||
|
||||
@type t :: %__MODULE__{
|
||||
id: String.t(),
|
||||
@@ -39,20 +60,59 @@ defmodule AgentCoordinator.Agent do
|
||||
codebase_id: String.t(),
|
||||
workspace_path: String.t() | nil,
|
||||
last_heartbeat: DateTime.t(),
|
||||
metadata: map()
|
||||
metadata: map(),
|
||||
current_activity: String.t() | nil,
|
||||
current_files: [String.t()],
|
||||
activity_history: [map()],
|
||||
role: role(),
|
||||
managed_agents: [String.t()],
|
||||
oversight_scope: :codebase | :global
|
||||
}
|
||||
|
||||
def new(name, capabilities, opts \\ []) do
|
||||
workspace_path = Keyword.get(opts, :workspace_path)
|
||||
|
||||
# Use smart codebase identification
|
||||
codebase_id =
|
||||
case Keyword.get(opts, :codebase_id) do
|
||||
nil when workspace_path ->
|
||||
# Auto-detect from workspace
|
||||
case AgentCoordinator.CodebaseIdentifier.identify_codebase(workspace_path) do
|
||||
%{canonical_id: canonical_id} -> canonical_id
|
||||
_ -> Path.basename(workspace_path || "default")
|
||||
end
|
||||
|
||||
nil ->
|
||||
"default"
|
||||
|
||||
explicit_id ->
|
||||
# Normalize the provided ID
|
||||
AgentCoordinator.CodebaseIdentifier.normalize_codebase_reference(
|
||||
explicit_id,
|
||||
workspace_path
|
||||
)
|
||||
end
|
||||
|
||||
# Determine role based on capabilities
|
||||
role = determine_role(capabilities)
|
||||
|
||||
%__MODULE__{
|
||||
id: UUID.uuid4(),
|
||||
name: name,
|
||||
capabilities: capabilities,
|
||||
status: :idle,
|
||||
current_task_id: nil,
|
||||
codebase_id: Keyword.get(opts, :codebase_id, "default"),
|
||||
workspace_path: Keyword.get(opts, :workspace_path),
|
||||
codebase_id: codebase_id,
|
||||
workspace_path: workspace_path,
|
||||
last_heartbeat: DateTime.utc_now(),
|
||||
metadata: Keyword.get(opts, :metadata, %{})
|
||||
metadata: Keyword.get(opts, :metadata, %{}),
|
||||
current_activity: nil,
|
||||
current_files: [],
|
||||
activity_history: [],
|
||||
role: role,
|
||||
managed_agents: [],
|
||||
oversight_scope:
|
||||
if(role == :director, do: Keyword.get(opts, :oversight_scope, :codebase), else: :codebase)
|
||||
}
|
||||
end
|
||||
|
||||
@@ -60,6 +120,31 @@ defmodule AgentCoordinator.Agent do
|
||||
%{agent | last_heartbeat: DateTime.utc_now()}
|
||||
end
|
||||
|
||||
def update_activity(agent, activity, files \\ []) do
|
||||
# Add to activity history (keep last 10 activities)
|
||||
activity_entry = %{
|
||||
activity: activity,
|
||||
files: files,
|
||||
timestamp: DateTime.utc_now()
|
||||
}
|
||||
|
||||
new_history =
|
||||
[activity_entry | agent.activity_history]
|
||||
|> Enum.take(10)
|
||||
|
||||
%{
|
||||
agent
|
||||
| current_activity: activity,
|
||||
current_files: files,
|
||||
activity_history: new_history,
|
||||
last_heartbeat: DateTime.utc_now()
|
||||
}
|
||||
end
|
||||
|
||||
def clear_activity(agent) do
|
||||
%{agent | current_activity: nil, current_files: [], last_heartbeat: DateTime.utc_now()}
|
||||
end
|
||||
|
||||
def assign_task(agent, task_id) do
|
||||
%{agent | status: :busy, current_task_id: task_id}
|
||||
end
|
||||
@@ -74,16 +159,18 @@ defmodule AgentCoordinator.Agent do
|
||||
|
||||
def can_handle?(agent, task) do
|
||||
# Check if agent is in the same codebase or can handle cross-codebase tasks
|
||||
codebase_compatible = agent.codebase_id == task.codebase_id or
|
||||
Map.get(agent.metadata, :cross_codebase_capable, false)
|
||||
|
||||
codebase_compatible =
|
||||
agent.codebase_id == task.codebase_id or
|
||||
Map.get(agent.metadata, :cross_codebase_capable, false)
|
||||
|
||||
# Simple capability matching - can be enhanced
|
||||
required_capabilities = Map.get(task.metadata, :required_capabilities, [])
|
||||
|
||||
capability_match = case required_capabilities do
|
||||
[] -> true
|
||||
caps -> Enum.any?(caps, fn cap -> cap in agent.capabilities end)
|
||||
end
|
||||
capability_match =
|
||||
case required_capabilities do
|
||||
[] -> true
|
||||
caps -> Enum.any?(caps, fn cap -> cap in agent.capabilities end)
|
||||
end
|
||||
|
||||
codebase_compatible and capability_match
|
||||
end
|
||||
@@ -91,4 +178,55 @@ defmodule AgentCoordinator.Agent do
|
||||
def can_work_cross_codebase?(agent) do
|
||||
Map.get(agent.metadata, :cross_codebase_capable, false)
|
||||
end
|
||||
|
||||
# Director-specific functions
|
||||
|
||||
def is_director?(agent) do
|
||||
agent.role == :director
|
||||
end
|
||||
|
||||
def is_manager?(agent) do
|
||||
agent.role in [:director, :project_manager]
|
||||
end
|
||||
|
||||
def can_manage_agent?(director, target_agent) do
|
||||
case director.oversight_scope do
|
||||
:global -> true
|
||||
:codebase -> director.codebase_id == target_agent.codebase_id
|
||||
end
|
||||
end
|
||||
|
||||
def add_managed_agent(director, agent_id) do
|
||||
if is_manager?(director) do
|
||||
managed_agents = [agent_id | director.managed_agents] |> Enum.uniq()
|
||||
%{director | managed_agents: managed_agents}
|
||||
else
|
||||
director
|
||||
end
|
||||
end
|
||||
|
||||
def remove_managed_agent(director, agent_id) do
|
||||
if is_manager?(director) do
|
||||
managed_agents = director.managed_agents |> Enum.reject(&(&1 == agent_id))
|
||||
%{director | managed_agents: managed_agents}
|
||||
else
|
||||
director
|
||||
end
|
||||
end
|
||||
|
||||
# Private helper to determine role from capabilities
|
||||
defp determine_role(capabilities) do
|
||||
management_caps = [:management, :coordination, :oversight]
|
||||
|
||||
cond do
|
||||
Enum.any?(management_caps, &(&1 in capabilities)) and :oversight in capabilities ->
|
||||
:director
|
||||
|
||||
:management in capabilities ->
|
||||
:project_manager
|
||||
|
||||
true ->
|
||||
:standard
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
@@ -18,23 +18,24 @@ defmodule AgentCoordinator.Application do
|
||||
{Phoenix.PubSub, name: AgentCoordinator.PubSub},
|
||||
|
||||
# Codebase registry for multi-codebase coordination
|
||||
{AgentCoordinator.CodebaseRegistry, nats: if(enable_persistence, do: nats_config(), else: nil)},
|
||||
{AgentCoordinator.CodebaseRegistry,
|
||||
nats: if(enable_persistence, do: nats_config(), else: nil)},
|
||||
|
||||
# Task registry with NATS integration (conditionally add persistence)
|
||||
{AgentCoordinator.TaskRegistry, nats: if(enable_persistence, do: nats_config(), else: nil)},
|
||||
|
||||
# MCP Server Manager (manages external MCP servers)
|
||||
{AgentCoordinator.MCPServerManager, config_file: Application.get_env(:agent_coordinator, :mcp_config_file, "mcp_servers.json")},
|
||||
# Session manager for MCP session token handling
|
||||
AgentCoordinator.SessionManager,
|
||||
|
||||
# MCP server
|
||||
# Unified MCP server (includes external server management, session tracking, and auto-registration)
|
||||
AgentCoordinator.MCPServer,
|
||||
|
||||
# Interface manager for multiple MCP interface modes
|
||||
AgentCoordinator.InterfaceManager,
|
||||
|
||||
# Auto-heartbeat manager
|
||||
AgentCoordinator.AutoHeartbeat,
|
||||
|
||||
# Enhanced MCP server with automatic heartbeats
|
||||
AgentCoordinator.EnhancedMCPServer,
|
||||
|
||||
# Dynamic supervisor for agent inboxes
|
||||
{DynamicSupervisor, name: AgentCoordinator.InboxSupervisor, strategy: :one_for_one}
|
||||
]
|
||||
|
||||
@@ -31,19 +31,20 @@ defmodule AgentCoordinator.AutoHeartbeat do
|
||||
"""
|
||||
def register_agent_with_heartbeat(name, capabilities, agent_context \\ %{}) do
|
||||
# Convert capabilities to strings if they're atoms
|
||||
string_capabilities = Enum.map(capabilities, fn
|
||||
cap when is_atom(cap) -> Atom.to_string(cap)
|
||||
cap when is_binary(cap) -> cap
|
||||
end)
|
||||
string_capabilities =
|
||||
Enum.map(capabilities, fn
|
||||
cap when is_atom(cap) -> Atom.to_string(cap)
|
||||
cap when is_binary(cap) -> cap
|
||||
end)
|
||||
|
||||
# First register the agent normally
|
||||
case MCPServer.handle_mcp_request(%{
|
||||
"method" => "tools/call",
|
||||
"params" => %{
|
||||
"name" => "register_agent",
|
||||
"arguments" => %{"name" => name, "capabilities" => string_capabilities}
|
||||
}
|
||||
}) do
|
||||
"method" => "tools/call",
|
||||
"params" => %{
|
||||
"name" => "register_agent",
|
||||
"arguments" => %{"name" => name, "capabilities" => string_capabilities}
|
||||
}
|
||||
}) do
|
||||
%{"result" => %{"content" => [%{"text" => response_json}]}} ->
|
||||
case Jason.decode(response_json) do
|
||||
{:ok, %{"agent_id" => agent_id}} ->
|
||||
@@ -100,10 +101,14 @@ defmodule AgentCoordinator.AutoHeartbeat do
|
||||
"method" => "tools/call",
|
||||
"params" => %{
|
||||
"name" => "create_task",
|
||||
"arguments" => Map.merge(%{
|
||||
"title" => title,
|
||||
"description" => description
|
||||
}, opts)
|
||||
"arguments" =>
|
||||
Map.merge(
|
||||
%{
|
||||
"title" => title,
|
||||
"description" => description
|
||||
},
|
||||
opts
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -173,9 +178,10 @@ defmodule AgentCoordinator.AutoHeartbeat do
|
||||
# Start new timer
|
||||
timer_ref = Process.send_after(self(), {:heartbeat_timer, agent_id}, @heartbeat_interval)
|
||||
|
||||
new_state = %{state |
|
||||
timers: Map.put(state.timers, agent_id, timer_ref),
|
||||
agent_contexts: Map.put(state.agent_contexts, agent_id, context)
|
||||
new_state = %{
|
||||
state
|
||||
| timers: Map.put(state.timers, agent_id, timer_ref),
|
||||
agent_contexts: Map.put(state.agent_contexts, agent_id, context)
|
||||
}
|
||||
|
||||
{:reply, :ok, new_state}
|
||||
@@ -187,9 +193,10 @@ defmodule AgentCoordinator.AutoHeartbeat do
|
||||
Process.cancel_timer(state.timers[agent_id])
|
||||
end
|
||||
|
||||
new_state = %{state |
|
||||
timers: Map.delete(state.timers, agent_id),
|
||||
agent_contexts: Map.delete(state.agent_contexts, agent_id)
|
||||
new_state = %{
|
||||
state
|
||||
| timers: Map.delete(state.timers, agent_id),
|
||||
agent_contexts: Map.delete(state.agent_contexts, agent_id)
|
||||
}
|
||||
|
||||
{:reply, :ok, new_state}
|
||||
|
||||
@@ -20,7 +20,7 @@ defmodule AgentCoordinator.Client do
|
||||
"""
|
||||
|
||||
use GenServer
|
||||
alias AgentCoordinator.{EnhancedMCPServer, AutoHeartbeat}
|
||||
alias AgentCoordinator.AutoHeartbeat
|
||||
|
||||
defstruct [
|
||||
:agent_id,
|
||||
@@ -108,12 +108,12 @@ defmodule AgentCoordinator.Client do
|
||||
# Server callbacks
|
||||
|
||||
def init(config) do
|
||||
# Register with enhanced MCP server
|
||||
case EnhancedMCPServer.register_agent_with_session(
|
||||
config.agent_name,
|
||||
config.capabilities,
|
||||
self()
|
||||
) do
|
||||
# Register with task registry
|
||||
case AgentCoordinator.TaskRegistry.register_agent(
|
||||
config.agent_name,
|
||||
config.capabilities,
|
||||
session_pid: self()
|
||||
) do
|
||||
{:ok, agent_id} ->
|
||||
state = %__MODULE__{
|
||||
agent_id: agent_id,
|
||||
@@ -151,10 +151,14 @@ defmodule AgentCoordinator.Client do
|
||||
end
|
||||
|
||||
def handle_call({:create_task, title, description, opts}, _from, state) do
|
||||
arguments = Map.merge(%{
|
||||
"title" => title,
|
||||
"description" => description
|
||||
}, opts)
|
||||
arguments =
|
||||
Map.merge(
|
||||
%{
|
||||
"title" => title,
|
||||
"description" => description
|
||||
},
|
||||
opts
|
||||
)
|
||||
|
||||
request = %{
|
||||
"method" => "tools/call",
|
||||
@@ -182,9 +186,9 @@ defmodule AgentCoordinator.Client do
|
||||
end
|
||||
|
||||
def handle_call(:get_task_board, _from, state) do
|
||||
case EnhancedMCPServer.get_enhanced_task_board() do
|
||||
{:ok, board} ->
|
||||
{:reply, {:ok, board}, update_last_heartbeat(state)}
|
||||
case AgentCoordinator.TaskRegistry.get_task_board() do
|
||||
task_board when is_map(task_board) ->
|
||||
{:reply, {:ok, task_board}, update_last_heartbeat(state)}
|
||||
|
||||
{:error, reason} ->
|
||||
{:reply, {:error, reason}, state}
|
||||
@@ -266,12 +270,10 @@ defmodule AgentCoordinator.Client do
|
||||
# Private helpers
|
||||
|
||||
defp enhanced_mcp_call(request, state) do
|
||||
session_info = %{
|
||||
agent_id: state.agent_id,
|
||||
session_pid: state.session_pid
|
||||
}
|
||||
# Add agent_id to the request for the MCP server
|
||||
request_with_agent = Map.put(request, "agent_id", state.agent_id)
|
||||
|
||||
case EnhancedMCPServer.handle_enhanced_mcp_request(request, session_info) do
|
||||
case AgentCoordinator.MCPServer.handle_mcp_request(request_with_agent) do
|
||||
%{"result" => %{"content" => [%{"text" => response_json}]}} = response ->
|
||||
case Jason.decode(response_json) do
|
||||
{:ok, data} ->
|
||||
@@ -300,7 +302,7 @@ defmodule AgentCoordinator.Client do
|
||||
}
|
||||
}
|
||||
|
||||
case EnhancedMCPServer.handle_enhanced_mcp_request(request) do
|
||||
case AgentCoordinator.MCPServer.handle_mcp_request(request) do
|
||||
%{"result" => _} -> :ok
|
||||
%{"error" => %{"message" => message}} -> {:error, message}
|
||||
_ -> {:error, :unknown_heartbeat_error}
|
||||
|
||||
326
lib/agent_coordinator/codebase_identifier.ex
Normal file
326
lib/agent_coordinator/codebase_identifier.ex
Normal file
@@ -0,0 +1,326 @@
|
||||
defmodule AgentCoordinator.CodebaseIdentifier do
|
||||
@moduledoc """
|
||||
Smart codebase identification system that works across local and remote scenarios.
|
||||
|
||||
Generates canonical codebase identifiers using multiple strategies:
|
||||
1. Git repository detection (preferred)
|
||||
2. Local folder name fallback
|
||||
3. Remote workspace mapping
|
||||
4. Custom identifier override
|
||||
"""
|
||||
|
||||
require Logger
|
||||
|
||||
@type codebase_info :: %{
|
||||
canonical_id: String.t(),
|
||||
display_name: String.t(),
|
||||
workspace_path: String.t(),
|
||||
repository_url: String.t() | nil,
|
||||
git_remote: String.t() | nil,
|
||||
branch: String.t() | nil,
|
||||
commit_hash: String.t() | nil,
|
||||
identification_method: :git_remote | :git_local | :folder_name | :custom
|
||||
}
|
||||
|
||||
@doc """
|
||||
Identify a codebase from a workspace path, generating a canonical ID.
|
||||
|
||||
Priority order:
|
||||
1. Git remote URL (most reliable for distributed teams)
|
||||
2. Git local repository info
|
||||
3. Folder name (fallback for non-git projects)
|
||||
4. Custom override from metadata
|
||||
|
||||
## Examples
|
||||
|
||||
# Git repository with remote
|
||||
iex> identify_codebase("/home/user/my-project")
|
||||
%{
|
||||
canonical_id: "github.com/owner/my-project",
|
||||
display_name: "my-project",
|
||||
workspace_path: "/home/user/my-project",
|
||||
repository_url: "https://github.com/owner/my-project.git",
|
||||
git_remote: "origin",
|
||||
branch: "main",
|
||||
identification_method: :git_remote
|
||||
}
|
||||
|
||||
# Local folder (no git)
|
||||
iex> identify_codebase("/home/user/local-project")
|
||||
%{
|
||||
canonical_id: "local:/home/user/local-project",
|
||||
display_name: "local-project",
|
||||
workspace_path: "/home/user/local-project",
|
||||
repository_url: nil,
|
||||
identification_method: :folder_name
|
||||
}
|
||||
"""
|
||||
def identify_codebase(workspace_path, opts \\ [])
|
||||
|
||||
def identify_codebase(nil, opts) do
|
||||
custom_id = Keyword.get(opts, :custom_id, "default")
|
||||
build_custom_codebase_info(nil, custom_id)
|
||||
end
|
||||
|
||||
def identify_codebase(workspace_path, opts) do
|
||||
custom_id = Keyword.get(opts, :custom_id)
|
||||
|
||||
cond do
|
||||
custom_id ->
|
||||
build_custom_codebase_info(workspace_path, custom_id)
|
||||
|
||||
git_repository?(workspace_path) ->
|
||||
identify_git_codebase(workspace_path)
|
||||
|
||||
true ->
|
||||
identify_folder_codebase(workspace_path)
|
||||
end
|
||||
end
|
||||
|
||||
@doc """
|
||||
Normalize different codebase references to canonical IDs.
|
||||
Handles cases where agents specify different local paths for same repository.
|
||||
"""
|
||||
def normalize_codebase_reference(codebase_ref, workspace_path) do
|
||||
case codebase_ref do
|
||||
# Already canonical
|
||||
id when is_binary(id) ->
|
||||
if String.contains?(id, ".com/") or String.starts_with?(id, "local:") do
|
||||
id
|
||||
else
|
||||
# Folder name - try to resolve to canonical
|
||||
case identify_codebase(workspace_path) do
|
||||
%{canonical_id: canonical_id} -> canonical_id
|
||||
_ -> "local:#{id}"
|
||||
end
|
||||
end
|
||||
|
||||
_ ->
|
||||
# Fallback to folder-based ID
|
||||
Path.basename(workspace_path || "/unknown")
|
||||
end
|
||||
end
|
||||
|
||||
@doc """
|
||||
Check if two workspace paths refer to the same codebase.
|
||||
Useful for detecting when agents from different machines work on same project.
|
||||
"""
|
||||
def same_codebase?(workspace_path1, workspace_path2) do
|
||||
info1 = identify_codebase(workspace_path1)
|
||||
info2 = identify_codebase(workspace_path2)
|
||||
|
||||
info1.canonical_id == info2.canonical_id
|
||||
end
|
||||
|
||||
# Private functions
|
||||
|
||||
defp build_custom_codebase_info(workspace_path, custom_id) do
|
||||
%{
|
||||
canonical_id: custom_id,
|
||||
display_name: custom_id,
|
||||
workspace_path: workspace_path,
|
||||
repository_url: nil,
|
||||
git_remote: nil,
|
||||
branch: nil,
|
||||
commit_hash: nil,
|
||||
identification_method: :custom
|
||||
}
|
||||
end
|
||||
|
||||
defp identify_git_codebase(workspace_path) do
|
||||
with {:ok, git_info} <- get_git_info(workspace_path) do
|
||||
canonical_id =
|
||||
case git_info.remote_url do
|
||||
nil ->
|
||||
# Local git repo without remote
|
||||
"git-local:#{git_info.repo_name}"
|
||||
|
||||
remote_url ->
|
||||
# Extract canonical identifier from remote URL
|
||||
extract_canonical_from_remote(remote_url)
|
||||
end
|
||||
|
||||
%{
|
||||
canonical_id: canonical_id,
|
||||
display_name: git_info.repo_name,
|
||||
workspace_path: workspace_path,
|
||||
repository_url: git_info.remote_url,
|
||||
git_remote: git_info.remote_name,
|
||||
branch: git_info.branch,
|
||||
commit_hash: git_info.commit_hash,
|
||||
identification_method: if(git_info.remote_url, do: :git_remote, else: :git_local)
|
||||
}
|
||||
else
|
||||
_ ->
|
||||
identify_folder_codebase(workspace_path)
|
||||
end
|
||||
end
|
||||
|
||||
defp identify_folder_codebase(workspace_path) when is_nil(workspace_path) do
|
||||
%{
|
||||
canonical_id: "default",
|
||||
display_name: "default",
|
||||
workspace_path: nil,
|
||||
repository_url: nil,
|
||||
git_remote: nil,
|
||||
branch: nil,
|
||||
commit_hash: nil,
|
||||
identification_method: :folder_name
|
||||
}
|
||||
end
|
||||
|
||||
defp identify_folder_codebase(workspace_path) do
|
||||
folder_name = Path.basename(workspace_path)
|
||||
|
||||
%{
|
||||
canonical_id: "local:#{workspace_path}",
|
||||
display_name: folder_name,
|
||||
workspace_path: workspace_path,
|
||||
repository_url: nil,
|
||||
git_remote: nil,
|
||||
branch: nil,
|
||||
commit_hash: nil,
|
||||
identification_method: :folder_name
|
||||
}
|
||||
end
|
||||
|
||||
defp git_repository?(workspace_path) when is_nil(workspace_path), do: false
|
||||
|
||||
defp git_repository?(workspace_path) do
|
||||
File.exists?(Path.join(workspace_path, ".git"))
|
||||
end
|
||||
|
||||
defp get_git_info(workspace_path) do
|
||||
try do
|
||||
# Get repository name
|
||||
repo_name = Path.basename(workspace_path)
|
||||
|
||||
# Get current branch
|
||||
{branch, 0} = System.cmd("git", ["branch", "--show-current"], cd: workspace_path)
|
||||
branch = String.trim(branch)
|
||||
|
||||
# Get current commit
|
||||
{commit_hash, 0} = System.cmd("git", ["rev-parse", "HEAD"], cd: workspace_path)
|
||||
commit_hash = String.trim(commit_hash)
|
||||
|
||||
# Try to get remote URL
|
||||
{remote_info, _remote_result_use_me?} =
|
||||
case System.cmd("git", ["remote", "-v"], cd: workspace_path) do
|
||||
{output, 0} when output != "" ->
|
||||
# Parse remote output to extract origin URL
|
||||
lines = String.split(String.trim(output), "\n")
|
||||
|
||||
origin_line =
|
||||
Enum.find(lines, fn line ->
|
||||
String.starts_with?(line, "origin") and String.contains?(line, "(fetch)")
|
||||
end)
|
||||
|
||||
case origin_line do
|
||||
nil ->
|
||||
{nil, :no_origin}
|
||||
|
||||
line ->
|
||||
# Extract URL from "origin <url> (fetch)"
|
||||
url =
|
||||
line
|
||||
|> String.split()
|
||||
|> Enum.at(1)
|
||||
|
||||
{url, :ok}
|
||||
end
|
||||
|
||||
_ ->
|
||||
{nil, :no_remotes}
|
||||
end
|
||||
|
||||
git_info = %{
|
||||
repo_name: repo_name,
|
||||
branch: branch,
|
||||
commit_hash: commit_hash,
|
||||
remote_url: remote_info,
|
||||
remote_name: if(remote_info, do: "origin", else: nil)
|
||||
}
|
||||
|
||||
{:ok, git_info}
|
||||
rescue
|
||||
_ -> {:error, :git_command_failed}
|
||||
end
|
||||
end
|
||||
|
||||
defp extract_canonical_from_remote(remote_url) do
|
||||
cond do
|
||||
# GitHub HTTPS
|
||||
String.contains?(remote_url, "github.com") ->
|
||||
extract_github_id(remote_url)
|
||||
|
||||
# GitLab HTTPS
|
||||
String.contains?(remote_url, "gitlab.com") ->
|
||||
extract_gitlab_id(remote_url)
|
||||
|
||||
# SSH format
|
||||
String.contains?(remote_url, "@") and String.contains?(remote_url, ":") ->
|
||||
extract_ssh_id(remote_url)
|
||||
|
||||
# Other HTTPS
|
||||
String.starts_with?(remote_url, "https://") ->
|
||||
extract_https_id(remote_url)
|
||||
|
||||
true ->
|
||||
# Fallback - use raw URL
|
||||
"remote:#{remote_url}"
|
||||
end
|
||||
end
|
||||
|
||||
defp extract_github_id(url) do
|
||||
# Extract "owner/repo" from various GitHub URL formats
|
||||
regex = ~r/github\.com[\/:]([^\/]+)\/([^\/\.]+)/
|
||||
|
||||
case Regex.run(regex, url) do
|
||||
[_, owner, repo] ->
|
||||
"github.com/#{owner}/#{repo}"
|
||||
|
||||
_ ->
|
||||
"github.com/unknown"
|
||||
end
|
||||
end
|
||||
|
||||
defp extract_gitlab_id(url) do
|
||||
# Similar logic for GitLab
|
||||
regex = ~r/gitlab\.com[\/:]([^\/]+)\/([^\/\.]+)/
|
||||
|
||||
case Regex.run(regex, url) do
|
||||
[_, owner, repo] ->
|
||||
"gitlab.com/#{owner}/#{repo}"
|
||||
|
||||
_ ->
|
||||
"gitlab.com/unknown"
|
||||
end
|
||||
end
|
||||
|
||||
defp extract_ssh_id(url) do
|
||||
# SSH format: git@host:owner/repo.git
|
||||
case String.split(url, ":") do
|
||||
[host_part, path_part] ->
|
||||
host = String.replace(host_part, ~r/.*@/, "")
|
||||
path = String.replace(path_part, ".git", "")
|
||||
"#{host}/#{path}"
|
||||
|
||||
_ ->
|
||||
"ssh:#{url}"
|
||||
end
|
||||
end
|
||||
|
||||
defp extract_https_id(url) do
|
||||
# Extract from general HTTPS URLs
|
||||
uri = URI.parse(url)
|
||||
host = uri.host
|
||||
path = String.replace(uri.path || "", ~r/^\//, "")
|
||||
path = String.replace(path, ".git", "")
|
||||
|
||||
if host && path != "" do
|
||||
"#{host}/#{path}"
|
||||
else
|
||||
"https:#{url}"
|
||||
end
|
||||
end
|
||||
end
|
||||
@@ -1,266 +0,0 @@
|
||||
defmodule AgentCoordinator.EnhancedMCPServer do
|
||||
@moduledoc """
|
||||
Enhanced MCP server with automatic heartbeat management and collision detection.
|
||||
|
||||
This module extends the base MCP server with:
|
||||
1. Automatic heartbeats on every operation
|
||||
2. Agent session tracking
|
||||
3. Enhanced collision detection
|
||||
4. Automatic agent cleanup on disconnect
|
||||
"""
|
||||
|
||||
use GenServer
|
||||
alias AgentCoordinator.{MCPServer, AutoHeartbeat, TaskRegistry}
|
||||
|
||||
# Track active agent sessions
|
||||
defstruct [
|
||||
:agent_sessions,
|
||||
:session_monitors
|
||||
]
|
||||
|
||||
# Client API
|
||||
|
||||
def start_link(opts \\ []) do
|
||||
GenServer.start_link(__MODULE__, opts, name: __MODULE__)
|
||||
end
|
||||
|
||||
@doc """
|
||||
Enhanced MCP request handler with automatic heartbeat management
|
||||
"""
|
||||
def handle_enhanced_mcp_request(request, session_info \\ %{}) do
|
||||
GenServer.call(__MODULE__, {:enhanced_mcp_request, request, session_info})
|
||||
end
|
||||
|
||||
@doc """
|
||||
Register an agent with enhanced session tracking
|
||||
"""
|
||||
def register_agent_with_session(name, capabilities, session_pid \\ self()) do
|
||||
GenServer.call(__MODULE__, {:register_agent_with_session, name, capabilities, session_pid})
|
||||
end
|
||||
|
||||
# Server callbacks
|
||||
|
||||
def init(_opts) do
|
||||
state = %__MODULE__{
|
||||
agent_sessions: %{},
|
||||
session_monitors: %{}
|
||||
}
|
||||
|
||||
{:ok, state}
|
||||
end
|
||||
|
||||
def handle_call({:enhanced_mcp_request, request, session_info}, {from_pid, _}, state) do
|
||||
# Extract agent_id from session or request
|
||||
agent_id = extract_agent_id(request, session_info, state)
|
||||
|
||||
# If we have an agent_id, send heartbeat before and after operation
|
||||
enhanced_result =
|
||||
case agent_id do
|
||||
nil ->
|
||||
# No agent context, use normal MCP processing
|
||||
MCPServer.handle_mcp_request(request)
|
||||
|
||||
id ->
|
||||
# Send pre-operation heartbeat
|
||||
pre_heartbeat = TaskRegistry.heartbeat_agent(id)
|
||||
|
||||
# Process the request
|
||||
result = MCPServer.handle_mcp_request(request)
|
||||
|
||||
# Send post-operation heartbeat and update session activity
|
||||
post_heartbeat = TaskRegistry.heartbeat_agent(id)
|
||||
update_session_activity(state, id, from_pid)
|
||||
|
||||
# Add heartbeat metadata to successful responses
|
||||
case result do
|
||||
%{"result" => _} = success ->
|
||||
Map.put(success, "_heartbeat_metadata", %{
|
||||
agent_id: id,
|
||||
pre_heartbeat: pre_heartbeat,
|
||||
post_heartbeat: post_heartbeat,
|
||||
timestamp: DateTime.utc_now()
|
||||
})
|
||||
|
||||
error_result ->
|
||||
error_result
|
||||
end
|
||||
end
|
||||
|
||||
{:reply, enhanced_result, state}
|
||||
end
|
||||
|
||||
def handle_call({:register_agent_with_session, name, capabilities, session_pid}, _from, state) do
|
||||
# Convert capabilities to strings if they're atoms
|
||||
string_capabilities =
|
||||
Enum.map(capabilities, fn
|
||||
cap when is_atom(cap) -> Atom.to_string(cap)
|
||||
cap when is_binary(cap) -> cap
|
||||
end)
|
||||
|
||||
# Register the agent normally first
|
||||
case MCPServer.handle_mcp_request(%{
|
||||
"method" => "tools/call",
|
||||
"params" => %{
|
||||
"name" => "register_agent",
|
||||
"arguments" => %{"name" => name, "capabilities" => string_capabilities}
|
||||
}
|
||||
}) do
|
||||
%{"result" => %{"content" => [%{"text" => response_json}]}} ->
|
||||
case Jason.decode(response_json) do
|
||||
{:ok, %{"agent_id" => agent_id}} ->
|
||||
# Track the session
|
||||
monitor_ref = Process.monitor(session_pid)
|
||||
|
||||
new_state = %{
|
||||
state
|
||||
| agent_sessions:
|
||||
Map.put(state.agent_sessions, agent_id, %{
|
||||
pid: session_pid,
|
||||
name: name,
|
||||
capabilities: capabilities,
|
||||
registered_at: DateTime.utc_now(),
|
||||
last_activity: DateTime.utc_now()
|
||||
}),
|
||||
session_monitors: Map.put(state.session_monitors, monitor_ref, agent_id)
|
||||
}
|
||||
|
||||
# Start automatic heartbeat management
|
||||
AutoHeartbeat.start_link([])
|
||||
|
||||
AutoHeartbeat.register_agent_with_heartbeat(name, capabilities, %{
|
||||
session_pid: session_pid,
|
||||
enhanced_server: true
|
||||
})
|
||||
|
||||
{:reply, {:ok, agent_id}, new_state}
|
||||
|
||||
{:error, reason} ->
|
||||
{:reply, {:error, reason}, state}
|
||||
end
|
||||
|
||||
%{"error" => %{"message" => message}} ->
|
||||
{:reply, {:error, message}, state}
|
||||
|
||||
_ ->
|
||||
{:reply, {:error, "Unexpected response format"}, state}
|
||||
end
|
||||
end
|
||||
|
||||
def handle_call(:get_enhanced_task_board, _from, state) do
|
||||
# Get the regular task board
|
||||
case MCPServer.handle_mcp_request(%{
|
||||
"method" => "tools/call",
|
||||
"params" => %{"name" => "get_task_board", "arguments" => %{}}
|
||||
}) do
|
||||
%{"result" => %{"content" => [%{"text" => response_json}]}} ->
|
||||
case Jason.decode(response_json) do
|
||||
{:ok, %{"agents" => agents}} ->
|
||||
# Enhance with session information
|
||||
enhanced_agents =
|
||||
Enum.map(agents, fn agent ->
|
||||
agent_id = agent["agent_id"]
|
||||
session_info = Map.get(state.agent_sessions, agent_id, %{})
|
||||
|
||||
Map.merge(agent, %{
|
||||
"session_active" => Map.has_key?(state.agent_sessions, agent_id),
|
||||
"last_activity" => Map.get(session_info, :last_activity),
|
||||
"session_duration" => calculate_session_duration(session_info)
|
||||
})
|
||||
end)
|
||||
|
||||
result = %{
|
||||
"agents" => enhanced_agents,
|
||||
"active_sessions" => map_size(state.agent_sessions)
|
||||
}
|
||||
|
||||
{:reply, {:ok, result}, state}
|
||||
|
||||
{:error, reason} ->
|
||||
{:reply, {:error, reason}, state}
|
||||
end
|
||||
|
||||
%{"error" => %{"message" => message}} ->
|
||||
{:reply, {:error, message}, state}
|
||||
end
|
||||
end
|
||||
|
||||
# Handle process monitoring - cleanup when agent session dies
|
||||
def handle_info({:DOWN, monitor_ref, :process, _pid, _reason}, state) do
|
||||
case Map.get(state.session_monitors, monitor_ref) do
|
||||
nil ->
|
||||
{:noreply, state}
|
||||
|
||||
agent_id ->
|
||||
# Clean up the agent session
|
||||
new_state = %{
|
||||
state
|
||||
| agent_sessions: Map.delete(state.agent_sessions, agent_id),
|
||||
session_monitors: Map.delete(state.session_monitors, monitor_ref)
|
||||
}
|
||||
|
||||
# Stop heartbeat management
|
||||
AutoHeartbeat.stop_heartbeat(agent_id)
|
||||
|
||||
# Mark agent as offline in registry
|
||||
# (This could be enhanced to gracefully handle ongoing tasks)
|
||||
|
||||
{:noreply, new_state}
|
||||
end
|
||||
end
|
||||
|
||||
# Private helpers
|
||||
|
||||
defp extract_agent_id(request, session_info, state) do
|
||||
# Try to get agent_id from various sources
|
||||
cond do
|
||||
# From request arguments
|
||||
Map.get(request, "params", %{})
|
||||
|> Map.get("arguments", %{})
|
||||
|> Map.get("agent_id") ->
|
||||
request["params"]["arguments"]["agent_id"]
|
||||
|
||||
# From session info
|
||||
Map.get(session_info, :agent_id) ->
|
||||
session_info.agent_id
|
||||
|
||||
# From session lookup by PID
|
||||
session_pid = Map.get(session_info, :session_pid, self()) ->
|
||||
find_agent_by_session_pid(state, session_pid)
|
||||
|
||||
true ->
|
||||
nil
|
||||
end
|
||||
end
|
||||
|
||||
defp find_agent_by_session_pid(state, session_pid) do
|
||||
Enum.find_value(state.agent_sessions, fn {agent_id, session_data} ->
|
||||
if session_data.pid == session_pid, do: agent_id, else: nil
|
||||
end)
|
||||
end
|
||||
|
||||
defp update_session_activity(state, agent_id, _session_pid) do
|
||||
case Map.get(state.agent_sessions, agent_id) do
|
||||
nil ->
|
||||
:ok
|
||||
|
||||
session_data ->
|
||||
_updated_session = %{session_data | last_activity: DateTime.utc_now()}
|
||||
# Note: This doesn't update the state since we're in a call handler
|
||||
# In a real implementation, you might want to use cast for this
|
||||
:ok
|
||||
end
|
||||
end
|
||||
|
||||
@doc """
|
||||
Get enhanced task board with session information
|
||||
"""
|
||||
def get_enhanced_task_board do
|
||||
GenServer.call(__MODULE__, :get_enhanced_task_board)
|
||||
end
|
||||
|
||||
defp calculate_session_duration(%{registered_at: start_time}) do
|
||||
DateTime.diff(DateTime.utc_now(), start_time, :second)
|
||||
end
|
||||
|
||||
defp calculate_session_duration(_), do: nil
|
||||
end
|
||||
637
lib/agent_coordinator/http_interface.ex
Normal file
637
lib/agent_coordinator/http_interface.ex
Normal file
@@ -0,0 +1,637 @@
|
||||
defmodule AgentCoordinator.HttpInterface do
|
||||
@moduledoc """
|
||||
HTTP and WebSocket interface for the Agent Coordinator MCP server.
|
||||
|
||||
This module provides:
|
||||
- HTTP REST API for MCP requests
|
||||
- WebSocket support for real-time communication
|
||||
- Remote client detection and tool filtering
|
||||
- CORS support for web clients
|
||||
- Session management across HTTP requests
|
||||
"""
|
||||
|
||||
use Plug.Router
|
||||
require Logger
|
||||
alias AgentCoordinator.{MCPServer, ToolFilter, SessionManager}
|
||||
|
||||
plug(Plug.Logger)
|
||||
plug(:match)
|
||||
plug(Plug.Parsers, parsers: [:json], json_decoder: Jason)
|
||||
plug(:put_cors_headers)
|
||||
plug(:dispatch)
|
||||
|
||||
@doc """
|
||||
Start the HTTP server on the specified port.
|
||||
"""
|
||||
def start_link(opts \\ []) do
|
||||
port = Keyword.get(opts, :port, 8080)
|
||||
|
||||
IO.puts(:stderr, "Starting Agent Coordinator HTTP interface on port #{port}")
|
||||
|
||||
Plug.Cowboy.http(__MODULE__, [],
|
||||
port: port,
|
||||
dispatch: cowboy_dispatch()
|
||||
)
|
||||
end
|
||||
|
||||
# HTTP Routes
|
||||
|
||||
get "/health" do
|
||||
send_json_response(conn, 200, %{
|
||||
status: "healthy",
|
||||
service: "agent-coordinator",
|
||||
version: AgentCoordinator.version(),
|
||||
timestamp: DateTime.utc_now()
|
||||
})
|
||||
end
|
||||
|
||||
get "/mcp/capabilities" do
|
||||
context = extract_client_context(conn)
|
||||
|
||||
# Get filtered tools based on client context
|
||||
all_tools = MCPServer.get_tools()
|
||||
filtered_tools = ToolFilter.filter_tools(all_tools, context)
|
||||
|
||||
capabilities = %{
|
||||
protocolVersion: "2024-11-05",
|
||||
serverInfo: %{
|
||||
name: "agent-coordinator-http",
|
||||
version: AgentCoordinator.version(),
|
||||
description: "Agent Coordinator HTTP/WebSocket interface"
|
||||
},
|
||||
capabilities: %{
|
||||
tools: %{},
|
||||
coordination: %{
|
||||
automatic_task_tracking: true,
|
||||
agent_management: true,
|
||||
multi_server_proxy: true,
|
||||
heartbeat_coverage: true,
|
||||
session_tracking: true,
|
||||
tool_filtering: true
|
||||
}
|
||||
},
|
||||
tools: filtered_tools,
|
||||
context: %{
|
||||
connection_type: context.connection_type,
|
||||
security_level: context.security_level,
|
||||
tool_count: length(filtered_tools)
|
||||
}
|
||||
}
|
||||
|
||||
send_json_response(conn, 200, capabilities)
|
||||
end
|
||||
|
||||
get "/mcp/tools" do
|
||||
context = extract_client_context(conn)
|
||||
all_tools = MCPServer.get_tools()
|
||||
filtered_tools = ToolFilter.filter_tools(all_tools, context)
|
||||
|
||||
filter_stats = ToolFilter.get_filter_stats(all_tools, context)
|
||||
|
||||
response = %{
|
||||
tools: filtered_tools,
|
||||
_meta: %{
|
||||
filter_stats: filter_stats,
|
||||
context: %{
|
||||
connection_type: context.connection_type,
|
||||
security_level: context.security_level
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
send_json_response(conn, 200, response)
|
||||
end
|
||||
|
||||
post "/mcp/tools/:tool_name" do
|
||||
context = extract_client_context(conn)
|
||||
|
||||
# Check if tool is allowed for this client
|
||||
all_tools = MCPServer.get_tools()
|
||||
filtered_tools = ToolFilter.filter_tools(all_tools, context)
|
||||
|
||||
tool_allowed =
|
||||
Enum.any?(filtered_tools, fn tool ->
|
||||
Map.get(tool, "name") == tool_name
|
||||
end)
|
||||
|
||||
if not tool_allowed do
|
||||
send_json_response(conn, 403, %{
|
||||
error: %{
|
||||
code: -32601,
|
||||
message: "Tool not available for remote clients: #{tool_name}",
|
||||
data: %{
|
||||
available_tools: Enum.map(filtered_tools, &Map.get(&1, "name")),
|
||||
connection_type: context.connection_type
|
||||
}
|
||||
}
|
||||
})
|
||||
else
|
||||
# Execute the tool call
|
||||
args = Map.get(conn.body_params, "arguments", %{})
|
||||
|
||||
# Create MCP request format
|
||||
mcp_request = %{
|
||||
"jsonrpc" => "2.0",
|
||||
"id" => Map.get(conn.body_params, "id", generate_request_id()),
|
||||
"method" => "tools/call",
|
||||
"params" => %{
|
||||
"name" => tool_name,
|
||||
"arguments" => args
|
||||
}
|
||||
}
|
||||
|
||||
# Add session tracking
|
||||
mcp_request = add_session_info(mcp_request, conn, context)
|
||||
|
||||
# Execute through MCP server
|
||||
case MCPServer.handle_mcp_request(mcp_request) do
|
||||
%{"result" => result} ->
|
||||
send_json_response(conn, 200, %{
|
||||
result: result,
|
||||
_meta: %{
|
||||
tool_name: tool_name,
|
||||
request_id: mcp_request["id"],
|
||||
context: context.connection_type
|
||||
}
|
||||
})
|
||||
|
||||
%{"error" => error} ->
|
||||
send_json_response(conn, 400, %{error: error})
|
||||
|
||||
unexpected ->
|
||||
IO.puts(:stderr, "Unexpected MCP response: #{inspect(unexpected)}")
|
||||
|
||||
send_json_response(conn, 500, %{
|
||||
error: %{
|
||||
code: -32603,
|
||||
message: "Internal server error"
|
||||
}
|
||||
})
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
post "/mcp/request" do
|
||||
context = extract_client_context(conn)
|
||||
|
||||
# Validate MCP request format
|
||||
case validate_mcp_request(conn.body_params) do
|
||||
{:ok, mcp_request} ->
|
||||
method = Map.get(mcp_request, "method")
|
||||
|
||||
# Validate session for this method
|
||||
case validate_session_for_method(method, conn, context) do
|
||||
{:ok, _session_info} ->
|
||||
# Add session tracking
|
||||
enhanced_request = add_session_info(mcp_request, conn, context)
|
||||
|
||||
# For tool calls, check tool filtering
|
||||
case method do
|
||||
"tools/call" ->
|
||||
tool_name = get_in(enhanced_request, ["params", "name"])
|
||||
|
||||
if tool_allowed_for_context?(tool_name, context) do
|
||||
execute_mcp_request(conn, enhanced_request, context)
|
||||
else
|
||||
send_json_response(conn, 403, %{
|
||||
jsonrpc: "2.0",
|
||||
id: Map.get(enhanced_request, "id"),
|
||||
error: %{
|
||||
code: -32601,
|
||||
message: "Tool not available: #{tool_name}"
|
||||
}
|
||||
})
|
||||
end
|
||||
|
||||
"tools/list" ->
|
||||
# Override tools/list to return filtered tools
|
||||
handle_filtered_tools_list(conn, enhanced_request, context)
|
||||
|
||||
_ ->
|
||||
# Other methods pass through normally
|
||||
execute_mcp_request(conn, enhanced_request, context)
|
||||
end
|
||||
|
||||
{:error, auth_error} ->
|
||||
send_json_response(conn, 401, %{
|
||||
jsonrpc: "2.0",
|
||||
id: Map.get(mcp_request, "id"),
|
||||
error: auth_error
|
||||
})
|
||||
end
|
||||
|
||||
{:error, reason} ->
|
||||
send_json_response(conn, 400, %{
|
||||
jsonrpc: "2.0",
|
||||
id: Map.get(conn.body_params, "id"),
|
||||
error: %{
|
||||
code: -32700,
|
||||
message: "Invalid request: #{reason}"
|
||||
}
|
||||
})
|
||||
end
|
||||
end
|
||||
|
||||
get "/mcp/ws" do
|
||||
conn
|
||||
|> WebSockAdapter.upgrade(AgentCoordinator.WebSocketHandler, %{}, timeout: 60_000)
|
||||
end
|
||||
|
||||
get "/agents" do
|
||||
context = extract_client_context(conn)
|
||||
|
||||
# Only allow agent status for authorized clients
|
||||
case context.security_level do
|
||||
level when level in [:trusted, :sandboxed] ->
|
||||
mcp_request = %{
|
||||
"jsonrpc" => "2.0",
|
||||
"id" => generate_request_id(),
|
||||
"method" => "tools/call",
|
||||
"params" => %{
|
||||
"name" => "get_task_board",
|
||||
"arguments" => %{"agent_id" => "http_interface"}
|
||||
}
|
||||
}
|
||||
|
||||
case MCPServer.handle_mcp_request(mcp_request) do
|
||||
%{"result" => %{"content" => [%{"text" => text}]}} ->
|
||||
data = Jason.decode!(text)
|
||||
send_json_response(conn, 200, data)
|
||||
|
||||
%{"error" => error} ->
|
||||
send_json_response(conn, 500, %{error: error})
|
||||
end
|
||||
|
||||
_ ->
|
||||
send_json_response(conn, 403, %{
|
||||
error: "Insufficient privileges to view agent status"
|
||||
})
|
||||
end
|
||||
end
|
||||
|
||||
# Server-Sent Events (SSE) endpoint for real-time MCP streaming.
|
||||
# Implements MCP Streamable HTTP transport for live updates.
|
||||
get "/mcp/stream" do
|
||||
context = extract_client_context(conn)
|
||||
|
||||
# Validate session for SSE stream
|
||||
case validate_session_for_method("stream/subscribe", conn, context) do
|
||||
{:ok, session_info} ->
|
||||
# Set up SSE headers
|
||||
conn =
|
||||
conn
|
||||
|> put_resp_content_type("text/event-stream")
|
||||
|> put_mcp_headers()
|
||||
|> put_resp_header("cache-control", "no-cache")
|
||||
|> put_resp_header("connection", "keep-alive")
|
||||
|> put_resp_header("access-control-allow-credentials", "true")
|
||||
|> send_chunked(200)
|
||||
|
||||
# Send initial connection event
|
||||
{:ok, conn} =
|
||||
chunk(
|
||||
conn,
|
||||
format_sse_event("connected", %{
|
||||
session_id: Map.get(session_info, :agent_id, "anonymous"),
|
||||
protocol_version: "2025-06-18",
|
||||
timestamp: DateTime.utc_now() |> DateTime.to_iso8601()
|
||||
})
|
||||
)
|
||||
|
||||
# Start streaming loop
|
||||
stream_mcp_events(conn, session_info, context)
|
||||
|
||||
{:error, auth_error} ->
|
||||
send_json_response(conn, 401, auth_error)
|
||||
end
|
||||
end
|
||||
|
||||
defp stream_mcp_events(conn, session_info, context) do
|
||||
# This is a basic implementation - in production you'd want to:
|
||||
# 1. Subscribe to a GenServer/PubSub for real-time events
|
||||
# 2. Handle client disconnections gracefully
|
||||
# 3. Implement proper backpressure
|
||||
|
||||
# Send periodic heartbeat for now
|
||||
try do
|
||||
:timer.sleep(1000)
|
||||
|
||||
{:ok, conn} =
|
||||
chunk(
|
||||
conn,
|
||||
format_sse_event("heartbeat", %{
|
||||
timestamp: DateTime.utc_now() |> DateTime.to_iso8601(),
|
||||
session_id: Map.get(session_info, :agent_id, "anonymous")
|
||||
})
|
||||
)
|
||||
|
||||
# Continue streaming (this would be event-driven in production)
|
||||
stream_mcp_events(conn, session_info, context)
|
||||
rescue
|
||||
# Client disconnected
|
||||
_ ->
|
||||
IO.puts(:stderr, "SSE client disconnected")
|
||||
conn
|
||||
end
|
||||
end
|
||||
|
||||
defp format_sse_event(event_type, data) do
|
||||
"event: #{event_type}\ndata: #{Jason.encode!(data)}\n\n"
|
||||
end
|
||||
|
||||
# Catch-all for unmatched routes
|
||||
match _ do
|
||||
send_json_response(conn, 404, %{
|
||||
error: "Not found",
|
||||
available_endpoints: [
|
||||
"GET /health",
|
||||
"GET /mcp/capabilities",
|
||||
"GET /mcp/tools",
|
||||
"POST /mcp/tools/:tool_name",
|
||||
"POST /mcp/request",
|
||||
"GET /mcp/stream (SSE)",
|
||||
"GET /mcp/ws",
|
||||
"GET /agents"
|
||||
]
|
||||
})
|
||||
end
|
||||
|
||||
# Private helper functions
|
||||
|
||||
defp cowboy_dispatch do
|
||||
[
|
||||
{:_,
|
||||
[
|
||||
{"/mcp/ws", AgentCoordinator.WebSocketHandler, []},
|
||||
{:_, Plug.Cowboy.Handler, {__MODULE__, []}}
|
||||
]}
|
||||
]
|
||||
end
|
||||
|
||||
defp extract_client_context(conn) do
|
||||
remote_ip = get_remote_ip(conn)
|
||||
user_agent = get_req_header(conn, "user-agent") |> List.first()
|
||||
origin = get_req_header(conn, "origin") |> List.first()
|
||||
|
||||
connection_info = %{
|
||||
transport: :http,
|
||||
remote_ip: remote_ip,
|
||||
user_agent: user_agent,
|
||||
origin: origin,
|
||||
secure: conn.scheme == :https,
|
||||
headers: conn.req_headers
|
||||
}
|
||||
|
||||
ToolFilter.detect_client_context(connection_info)
|
||||
end
|
||||
|
||||
defp get_remote_ip(conn) do
|
||||
# Check for forwarded headers first (for reverse proxies)
|
||||
forwarded_for = get_req_header(conn, "x-forwarded-for") |> List.first()
|
||||
real_ip = get_req_header(conn, "x-real-ip") |> List.first()
|
||||
|
||||
cond do
|
||||
forwarded_for ->
|
||||
forwarded_for |> String.split(",") |> List.first() |> String.trim()
|
||||
|
||||
real_ip ->
|
||||
real_ip
|
||||
|
||||
true ->
|
||||
conn.remote_ip |> :inet.ntoa() |> to_string()
|
||||
end
|
||||
end
|
||||
|
||||
defp put_cors_headers(conn, _opts) do
|
||||
# Validate origin for enhanced security
|
||||
origin = get_req_header(conn, "origin") |> List.first()
|
||||
allowed_origin = validate_origin(origin)
|
||||
|
||||
conn
|
||||
|> put_resp_header("access-control-allow-origin", allowed_origin)
|
||||
|> put_resp_header("access-control-allow-methods", "GET, POST, OPTIONS")
|
||||
|> put_resp_header(
|
||||
"access-control-allow-headers",
|
||||
"content-type, authorization, mcp-session-id, mcp-protocol-version, x-session-id"
|
||||
)
|
||||
|> put_resp_header("access-control-expose-headers", "mcp-protocol-version, server")
|
||||
|> put_resp_header("access-control-max-age", "86400")
|
||||
end
|
||||
|
||||
# No origin header (direct API calls)
|
||||
defp validate_origin(nil), do: "*"
|
||||
|
||||
defp validate_origin(origin) do
|
||||
# Allow localhost and development origins
|
||||
case URI.parse(origin) do
|
||||
%URI{host: host} when host in ["localhost", "127.0.0.1", "::1"] ->
|
||||
origin
|
||||
|
||||
%URI{host: host} when is_binary(host) ->
|
||||
# Allow HTTPS origins and known development domains
|
||||
if String.starts_with?(origin, "https://") or
|
||||
String.contains?(host, ["localhost", "127.0.0.1", "dev", "local"]) do
|
||||
origin
|
||||
else
|
||||
# For production, be more restrictive
|
||||
IO.puts(:stderr, "Potentially unsafe origin: #{origin}")
|
||||
# Fallback for now, could be more restrictive
|
||||
"*"
|
||||
end
|
||||
|
||||
_ ->
|
||||
"*"
|
||||
end
|
||||
end
|
||||
|
||||
defp send_json_response(conn, status, data) do
|
||||
conn
|
||||
|> put_resp_content_type("application/json")
|
||||
|> put_mcp_headers()
|
||||
|> send_resp(status, Jason.encode!(data))
|
||||
end
|
||||
|
||||
defp put_mcp_headers(conn) do
|
||||
conn
|
||||
|> put_resp_header("mcp-protocol-version", "2025-06-18")
|
||||
|> put_resp_header("server", "AgentCoordinator/1.0")
|
||||
end
|
||||
|
||||
defp validate_mcp_request(params) when is_map(params) do
|
||||
required_fields = ["jsonrpc", "method"]
|
||||
|
||||
missing_fields =
|
||||
Enum.filter(required_fields, fn field ->
|
||||
not Map.has_key?(params, field)
|
||||
end)
|
||||
|
||||
cond do
|
||||
not Enum.empty?(missing_fields) ->
|
||||
{:error, "Missing required fields: #{Enum.join(missing_fields, ", ")}"}
|
||||
|
||||
Map.get(params, "jsonrpc") != "2.0" ->
|
||||
{:error, "Invalid jsonrpc version, must be '2.0'"}
|
||||
|
||||
not is_binary(Map.get(params, "method")) ->
|
||||
{:error, "Method must be a string"}
|
||||
|
||||
true ->
|
||||
{:ok, params}
|
||||
end
|
||||
end
|
||||
|
||||
defp validate_mcp_request(_), do: {:error, "Request must be a JSON object"}
|
||||
|
||||
defp add_session_info(mcp_request, conn, context) do
|
||||
# Extract and validate MCP session token
|
||||
{session_id, session_info} = get_session_info(conn)
|
||||
|
||||
# Add context metadata to request params
|
||||
enhanced_params =
|
||||
Map.get(mcp_request, "params", %{})
|
||||
|> Map.put("_session_id", session_id)
|
||||
|> Map.put("_session_info", session_info)
|
||||
|> Map.put("_client_context", %{
|
||||
connection_type: context.connection_type,
|
||||
security_level: context.security_level,
|
||||
remote_ip: get_remote_ip(conn),
|
||||
user_agent: context.user_agent
|
||||
})
|
||||
|
||||
Map.put(mcp_request, "params", enhanced_params)
|
||||
end
|
||||
|
||||
defp get_session_info(conn) do
|
||||
# Check for MCP-Session-Id header (MCP compliant)
|
||||
case get_req_header(conn, "mcp-session-id") do
|
||||
[session_token] when byte_size(session_token) > 0 ->
|
||||
case SessionManager.validate_session(session_token) do
|
||||
{:ok, session_info} ->
|
||||
{session_info.agent_id,
|
||||
%{
|
||||
token: session_token,
|
||||
agent_id: session_info.agent_id,
|
||||
capabilities: session_info.capabilities,
|
||||
expires_at: session_info.expires_at,
|
||||
validated: true
|
||||
}}
|
||||
|
||||
{:error, reason} ->
|
||||
IO.puts(:stderr, "Invalid MCP session token: #{reason}")
|
||||
# Fall back to generating anonymous session
|
||||
anonymous_id =
|
||||
"http_anonymous_" <> (:crypto.strong_rand_bytes(8) |> Base.encode16(case: :lower))
|
||||
|
||||
{anonymous_id, %{validated: false, reason: reason}}
|
||||
end
|
||||
|
||||
[] ->
|
||||
# Check legacy X-Session-Id header for backward compatibility
|
||||
case get_req_header(conn, "x-session-id") do
|
||||
[session_id] when byte_size(session_id) > 0 ->
|
||||
{session_id, %{validated: false, legacy: true}}
|
||||
|
||||
_ ->
|
||||
# No session header, generate anonymous session
|
||||
anonymous_id =
|
||||
"http_anonymous_" <> (:crypto.strong_rand_bytes(8) |> Base.encode16(case: :lower))
|
||||
|
||||
{anonymous_id, %{validated: false, anonymous: true}}
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
defp require_authenticated_session(conn, _context) do
|
||||
{_session_id, session_info} = get_session_info(conn)
|
||||
|
||||
case Map.get(session_info, :validated, false) do
|
||||
true ->
|
||||
{:ok, session_info}
|
||||
|
||||
false ->
|
||||
reason = Map.get(session_info, :reason, "Session not authenticated")
|
||||
|
||||
{:error,
|
||||
%{
|
||||
code: -32001,
|
||||
message: "Authentication required",
|
||||
data: %{reason: reason}
|
||||
}}
|
||||
end
|
||||
end
|
||||
|
||||
defp validate_session_for_method(method, conn, context) do
|
||||
# Define which methods require authenticated sessions
|
||||
authenticated_methods =
|
||||
MapSet.new([
|
||||
"agents/register",
|
||||
"agents/unregister",
|
||||
"agents/heartbeat",
|
||||
"tasks/create",
|
||||
"tasks/complete",
|
||||
"codebase/register",
|
||||
"stream/subscribe"
|
||||
])
|
||||
|
||||
if MapSet.member?(authenticated_methods, method) do
|
||||
require_authenticated_session(conn, context)
|
||||
else
|
||||
{:ok, %{anonymous: true}}
|
||||
end
|
||||
end
|
||||
|
||||
defp tool_allowed_for_context?(tool_name, context) do
|
||||
all_tools = MCPServer.get_tools()
|
||||
filtered_tools = ToolFilter.filter_tools(all_tools, context)
|
||||
|
||||
Enum.any?(filtered_tools, fn tool ->
|
||||
Map.get(tool, "name") == tool_name
|
||||
end)
|
||||
end
|
||||
|
||||
defp execute_mcp_request(conn, mcp_request, _context) do
|
||||
case MCPServer.handle_mcp_request(mcp_request) do
|
||||
%{"result" => _} = response ->
|
||||
send_json_response(conn, 200, response)
|
||||
|
||||
%{"error" => _} = response ->
|
||||
send_json_response(conn, 400, response)
|
||||
|
||||
unexpected ->
|
||||
IO.puts(:stderr, "Unexpected MCP response: #{inspect(unexpected)}")
|
||||
|
||||
send_json_response(conn, 500, %{
|
||||
jsonrpc: "2.0",
|
||||
id: Map.get(mcp_request, "id"),
|
||||
error: %{
|
||||
code: -32603,
|
||||
message: "Internal server error"
|
||||
}
|
||||
})
|
||||
end
|
||||
end
|
||||
|
||||
defp handle_filtered_tools_list(conn, mcp_request, context) do
|
||||
all_tools = MCPServer.get_tools()
|
||||
filtered_tools = ToolFilter.filter_tools(all_tools, context)
|
||||
|
||||
response = %{
|
||||
"jsonrpc" => "2.0",
|
||||
"id" => Map.get(mcp_request, "id"),
|
||||
"result" => %{
|
||||
"tools" => filtered_tools,
|
||||
"_meta" => %{
|
||||
"filtered_for" => context.connection_type,
|
||||
"original_count" => length(all_tools),
|
||||
"filtered_count" => length(filtered_tools)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
send_json_response(conn, 200, response)
|
||||
end
|
||||
|
||||
defp generate_request_id do
|
||||
"http_req_" <> (:crypto.strong_rand_bytes(8) |> Base.encode16(case: :lower))
|
||||
end
|
||||
end
|
||||
@@ -29,27 +29,31 @@ defmodule AgentCoordinator.Inbox do
|
||||
end
|
||||
|
||||
def add_task(agent_id, task) do
|
||||
GenServer.call(via_tuple(agent_id), {:add_task, task})
|
||||
GenServer.call(via_tuple(agent_id), {:add_task, task}, 30_000)
|
||||
end
|
||||
|
||||
def remove_task(agent_id, task_id) do
|
||||
GenServer.call(via_tuple(agent_id), {:remove_task, task_id}, 30_000)
|
||||
end
|
||||
|
||||
def get_next_task(agent_id) do
|
||||
GenServer.call(via_tuple(agent_id), :get_next_task)
|
||||
GenServer.call(via_tuple(agent_id), :get_next_task, 15_000)
|
||||
end
|
||||
|
||||
def complete_current_task(agent_id) do
|
||||
GenServer.call(via_tuple(agent_id), :complete_current_task)
|
||||
GenServer.call(via_tuple(agent_id), :complete_current_task, 30_000)
|
||||
end
|
||||
|
||||
def get_status(agent_id) do
|
||||
GenServer.call(via_tuple(agent_id), :get_status)
|
||||
GenServer.call(via_tuple(agent_id), :get_status, 15_000)
|
||||
end
|
||||
|
||||
def list_tasks(agent_id) do
|
||||
GenServer.call(via_tuple(agent_id), :list_tasks)
|
||||
GenServer.call(via_tuple(agent_id), :list_tasks, 15_000)
|
||||
end
|
||||
|
||||
def get_current_task(agent_id) do
|
||||
GenServer.call(via_tuple(agent_id), :get_current_task)
|
||||
GenServer.call(via_tuple(agent_id), :get_current_task, 15_000)
|
||||
end
|
||||
|
||||
def stop(agent_id) do
|
||||
@@ -92,6 +96,47 @@ defmodule AgentCoordinator.Inbox do
|
||||
{:reply, :ok, new_state}
|
||||
end
|
||||
|
||||
def handle_call({:remove_task, task_id}, _from, state) do
|
||||
# Remove task from pending tasks
|
||||
{removed_task, remaining_pending} =
|
||||
Enum.reduce(state.pending_tasks, {nil, []}, fn task, {found_task, acc} ->
|
||||
if task.id == task_id do
|
||||
{task, acc}
|
||||
else
|
||||
{found_task, [task | acc]}
|
||||
end
|
||||
end)
|
||||
|
||||
# Check if task is currently in progress
|
||||
{new_in_progress, removed_from_progress} =
|
||||
if state.in_progress_task && state.in_progress_task.id == task_id do
|
||||
{nil, state.in_progress_task}
|
||||
else
|
||||
{state.in_progress_task, nil}
|
||||
end
|
||||
|
||||
final_removed_task = removed_task || removed_from_progress
|
||||
|
||||
if final_removed_task do
|
||||
new_state = %{
|
||||
state
|
||||
| pending_tasks: Enum.reverse(remaining_pending),
|
||||
in_progress_task: new_in_progress
|
||||
}
|
||||
|
||||
# Broadcast task removed
|
||||
Phoenix.PubSub.broadcast(
|
||||
AgentCoordinator.PubSub,
|
||||
"agent:#{state.agent_id}",
|
||||
{:task_removed, final_removed_task}
|
||||
)
|
||||
|
||||
{:reply, :ok, new_state}
|
||||
else
|
||||
{:reply, {:error, :task_not_found}, state}
|
||||
end
|
||||
end
|
||||
|
||||
def handle_call(:get_next_task, _from, state) do
|
||||
case state.pending_tasks do
|
||||
[] ->
|
||||
|
||||
718
lib/agent_coordinator/interface_manager.ex
Normal file
718
lib/agent_coordinator/interface_manager.ex
Normal file
@@ -0,0 +1,718 @@
|
||||
defmodule AgentCoordinator.InterfaceManager do
|
||||
@moduledoc """
|
||||
Centralized manager for multiple MCP interface modes.
|
||||
|
||||
This module coordinates between different interface types:
|
||||
- STDIO interface (for local MCP clients like VSCode)
|
||||
- HTTP REST interface (for remote API access)
|
||||
- WebSocket interface (for real-time web clients)
|
||||
|
||||
Responsibilities:
|
||||
- Start/stop interface servers based on configuration
|
||||
- Coordinate session state across interfaces
|
||||
- Apply appropriate tool filtering per interface
|
||||
- Monitor interface health and restart if needed
|
||||
- Provide unified metrics and monitoring
|
||||
"""
|
||||
|
||||
use GenServer
|
||||
require Logger
|
||||
alias AgentCoordinator.{HttpInterface, ToolFilter}
|
||||
|
||||
defstruct [
|
||||
:config,
|
||||
:interfaces,
|
||||
:stdio_handler,
|
||||
:session_registry,
|
||||
:metrics
|
||||
]
|
||||
|
||||
@interface_types [:stdio, :http, :websocket]
|
||||
|
||||
# Client API
|
||||
|
||||
@doc """
|
||||
Start the interface manager with configuration.
|
||||
"""
|
||||
def start_link(opts \\ []) do
|
||||
GenServer.start_link(__MODULE__, opts, name: __MODULE__)
|
||||
end
|
||||
|
||||
@doc """
|
||||
Get current interface status.
|
||||
"""
|
||||
def get_status do
|
||||
GenServer.call(__MODULE__, :get_status)
|
||||
end
|
||||
|
||||
@doc """
|
||||
Start a specific interface type.
|
||||
"""
|
||||
def start_interface(interface_type, opts \\ []) do
|
||||
GenServer.call(__MODULE__, {:start_interface, interface_type, opts})
|
||||
end
|
||||
|
||||
@doc """
|
||||
Stop a specific interface type.
|
||||
"""
|
||||
def stop_interface(interface_type) do
|
||||
GenServer.call(__MODULE__, {:stop_interface, interface_type})
|
||||
end
|
||||
|
||||
@doc """
|
||||
Restart an interface.
|
||||
"""
|
||||
def restart_interface(interface_type) do
|
||||
GenServer.call(__MODULE__, {:restart_interface, interface_type})
|
||||
end
|
||||
|
||||
@doc """
|
||||
Get metrics for all interfaces.
|
||||
"""
|
||||
def get_metrics do
|
||||
GenServer.call(__MODULE__, :get_metrics)
|
||||
end
|
||||
|
||||
@doc """
|
||||
Register a session across interfaces.
|
||||
"""
|
||||
def register_session(session_id, interface_type, session_info) do
|
||||
GenServer.cast(__MODULE__, {:register_session, session_id, interface_type, session_info})
|
||||
end
|
||||
|
||||
@doc """
|
||||
Unregister a session.
|
||||
"""
|
||||
def unregister_session(session_id) do
|
||||
GenServer.cast(__MODULE__, {:unregister_session, session_id})
|
||||
end
|
||||
|
||||
# Server callbacks
|
||||
|
||||
@impl GenServer
|
||||
def init(opts) do
|
||||
# Load configuration
|
||||
config = load_interface_config(opts)
|
||||
|
||||
state = %__MODULE__{
|
||||
config: config,
|
||||
interfaces: %{},
|
||||
stdio_handler: nil,
|
||||
session_registry: %{},
|
||||
metrics: initialize_metrics()
|
||||
}
|
||||
|
||||
IO.puts(
|
||||
:stderr,
|
||||
"Interface Manager starting with config: #{inspect(config.enabled_interfaces)}"
|
||||
)
|
||||
|
||||
# Start enabled interfaces
|
||||
{:ok, state, {:continue, :start_interfaces}}
|
||||
end
|
||||
|
||||
@impl GenServer
|
||||
def handle_continue(:start_interfaces, state) do
|
||||
# Start each enabled interface
|
||||
updated_state =
|
||||
Enum.reduce(state.config.enabled_interfaces, state, fn interface_type, acc ->
|
||||
case start_interface_server(interface_type, state.config, acc) do
|
||||
{:ok, interface_info} ->
|
||||
IO.puts(:stderr, "Started #{interface_type} interface")
|
||||
%{acc | interfaces: Map.put(acc.interfaces, interface_type, interface_info)}
|
||||
|
||||
{:error, reason} ->
|
||||
IO.puts(:stderr, "Failed to start #{interface_type} interface: #{reason}")
|
||||
acc
|
||||
end
|
||||
end)
|
||||
|
||||
{:noreply, updated_state}
|
||||
end
|
||||
|
||||
@impl GenServer
|
||||
def handle_call(:get_status, _from, state) do
|
||||
status = %{
|
||||
enabled_interfaces: state.config.enabled_interfaces,
|
||||
running_interfaces: Map.keys(state.interfaces),
|
||||
active_sessions: map_size(state.session_registry),
|
||||
config: %{
|
||||
stdio: state.config.stdio,
|
||||
http: state.config.http,
|
||||
websocket: state.config.websocket
|
||||
},
|
||||
uptime: get_uptime(),
|
||||
metrics: state.metrics
|
||||
}
|
||||
|
||||
{:reply, status, state}
|
||||
end
|
||||
|
||||
@impl GenServer
|
||||
def handle_call({:start_interface, interface_type, opts}, _from, state) do
|
||||
if interface_type in @interface_types do
|
||||
case start_interface_server(interface_type, state.config, state, opts) do
|
||||
{:ok, interface_info} ->
|
||||
updated_interfaces = Map.put(state.interfaces, interface_type, interface_info)
|
||||
updated_state = %{state | interfaces: updated_interfaces}
|
||||
|
||||
IO.puts(:stderr, "Started #{interface_type} interface on demand")
|
||||
{:reply, {:ok, interface_info}, updated_state}
|
||||
|
||||
{:error, reason} ->
|
||||
IO.puts(:stderr, "Failed to start #{interface_type} interface: #{reason}")
|
||||
{:reply, {:error, reason}, state}
|
||||
end
|
||||
else
|
||||
{:reply, {:error, "Unknown interface type: #{interface_type}"}, state}
|
||||
end
|
||||
end
|
||||
|
||||
@impl GenServer
|
||||
def handle_call({:stop_interface, interface_type}, _from, state) do
|
||||
case Map.get(state.interfaces, interface_type) do
|
||||
nil ->
|
||||
{:reply, {:error, "Interface not running: #{interface_type}"}, state}
|
||||
|
||||
interface_info ->
|
||||
case stop_interface_server(interface_type, interface_info) do
|
||||
:ok ->
|
||||
updated_interfaces = Map.delete(state.interfaces, interface_type)
|
||||
updated_state = %{state | interfaces: updated_interfaces}
|
||||
|
||||
IO.puts(:stderr, "Stopped #{interface_type} interface")
|
||||
{:reply, :ok, updated_state}
|
||||
|
||||
{:error, reason} ->
|
||||
IO.puts(:stderr, "Failed to stop #{interface_type} interface: #{reason}")
|
||||
{:reply, {:error, reason}, state}
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
@impl GenServer
|
||||
def handle_call({:restart_interface, interface_type}, _from, state) do
|
||||
case Map.get(state.interfaces, interface_type) do
|
||||
nil ->
|
||||
{:reply, {:error, "Interface not running: #{interface_type}"}, state}
|
||||
|
||||
interface_info ->
|
||||
# Stop the interface
|
||||
case stop_interface_server(interface_type, interface_info) do
|
||||
:ok ->
|
||||
# Start it again
|
||||
case start_interface_server(interface_type, state.config, state) do
|
||||
{:ok, new_interface_info} ->
|
||||
updated_interfaces = Map.put(state.interfaces, interface_type, new_interface_info)
|
||||
updated_state = %{state | interfaces: updated_interfaces}
|
||||
|
||||
IO.puts(:stderr, "Restarted #{interface_type} interface")
|
||||
{:reply, {:ok, new_interface_info}, updated_state}
|
||||
|
||||
{:error, reason} ->
|
||||
# Remove from running interfaces since it failed to restart
|
||||
updated_interfaces = Map.delete(state.interfaces, interface_type)
|
||||
updated_state = %{state | interfaces: updated_interfaces}
|
||||
|
||||
IO.puts(:stderr, "Failed to restart #{interface_type} interface: #{reason}")
|
||||
{:reply, {:error, reason}, updated_state}
|
||||
end
|
||||
|
||||
{:error, reason} ->
|
||||
IO.puts(:stderr, "Failed to stop #{interface_type} interface for restart: #{reason}")
|
||||
{:reply, {:error, reason}, state}
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
@impl GenServer
|
||||
def handle_call(:get_metrics, _from, state) do
|
||||
# Collect metrics from all running interfaces
|
||||
interface_metrics =
|
||||
Enum.map(state.interfaces, fn {interface_type, interface_info} ->
|
||||
{interface_type, get_interface_metrics(interface_type, interface_info)}
|
||||
end)
|
||||
|> Enum.into(%{})
|
||||
|
||||
metrics = %{
|
||||
interfaces: interface_metrics,
|
||||
sessions: %{
|
||||
total: map_size(state.session_registry),
|
||||
by_interface: get_sessions_by_interface(state.session_registry)
|
||||
},
|
||||
uptime: get_uptime(),
|
||||
timestamp: DateTime.utc_now()
|
||||
}
|
||||
|
||||
{:reply, metrics, state}
|
||||
end
|
||||
|
||||
@impl GenServer
|
||||
def handle_cast({:register_session, session_id, interface_type, session_info}, state) do
|
||||
session_data = %{
|
||||
interface_type: interface_type,
|
||||
info: session_info,
|
||||
registered_at: DateTime.utc_now(),
|
||||
last_activity: DateTime.utc_now()
|
||||
}
|
||||
|
||||
updated_registry = Map.put(state.session_registry, session_id, session_data)
|
||||
updated_state = %{state | session_registry: updated_registry}
|
||||
|
||||
IO.puts(:stderr, "Registered session #{session_id} for #{interface_type}")
|
||||
{:noreply, updated_state}
|
||||
end
|
||||
|
||||
@impl GenServer
|
||||
def handle_cast({:unregister_session, session_id}, state) do
|
||||
case Map.get(state.session_registry, session_id) do
|
||||
nil ->
|
||||
IO.puts(:stderr, "Attempted to unregister unknown session: #{session_id}")
|
||||
{:noreply, state}
|
||||
|
||||
_session_data ->
|
||||
updated_registry = Map.delete(state.session_registry, session_id)
|
||||
updated_state = %{state | session_registry: updated_registry}
|
||||
|
||||
IO.puts(:stderr, "Unregistered session #{session_id}")
|
||||
{:noreply, updated_state}
|
||||
end
|
||||
end
|
||||
|
||||
@impl GenServer
|
||||
def handle_info({:DOWN, _ref, :process, pid, reason}, state) do
|
||||
# Handle interface process crashes
|
||||
case find_interface_by_pid(pid, state.interfaces) do
|
||||
{interface_type, _interface_info} ->
|
||||
IO.puts(:stderr, "#{interface_type} interface crashed: #{inspect(reason)}")
|
||||
|
||||
# Remove from running interfaces
|
||||
updated_interfaces = Map.delete(state.interfaces, interface_type)
|
||||
updated_state = %{state | interfaces: updated_interfaces}
|
||||
|
||||
# Optionally restart if configured
|
||||
if should_auto_restart?(interface_type, state.config) do
|
||||
IO.puts(:stderr, "Auto-restarting #{interface_type} interface")
|
||||
Process.send_after(self(), {:restart_interface, interface_type}, 5000)
|
||||
end
|
||||
|
||||
{:noreply, updated_state}
|
||||
|
||||
nil ->
|
||||
IO.puts(:stderr, "Unknown process died: #{inspect(pid)}")
|
||||
{:noreply, state}
|
||||
end
|
||||
end
|
||||
|
||||
@impl GenServer
|
||||
def handle_info({:restart_interface, interface_type}, state) do
|
||||
case start_interface_server(interface_type, state.config, state) do
|
||||
{:ok, interface_info} ->
|
||||
updated_interfaces = Map.put(state.interfaces, interface_type, interface_info)
|
||||
updated_state = %{state | interfaces: updated_interfaces}
|
||||
|
||||
IO.puts(:stderr, "Auto-restarted #{interface_type} interface")
|
||||
{:noreply, updated_state}
|
||||
|
||||
{:error, reason} ->
|
||||
IO.puts(:stderr, "Failed to auto-restart #{interface_type} interface: #{reason}")
|
||||
{:noreply, state}
|
||||
end
|
||||
end
|
||||
|
||||
@impl GenServer
|
||||
def handle_info(message, state) do
|
||||
IO.puts(:stderr, "Interface Manager received unexpected message: #{inspect(message)}")
|
||||
{:noreply, state}
|
||||
end
|
||||
|
||||
# Private helper functions
|
||||
|
||||
defp load_interface_config(opts) do
|
||||
# Load from application config and override with opts
|
||||
base_config = Application.get_env(:agent_coordinator, :interfaces, %{})
|
||||
|
||||
# Default configuration
|
||||
default_config = %{
|
||||
enabled_interfaces: [:stdio],
|
||||
stdio: %{
|
||||
enabled: true,
|
||||
handle_stdio: true
|
||||
},
|
||||
http: %{
|
||||
enabled: false,
|
||||
port: 8080,
|
||||
host: "localhost",
|
||||
cors_enabled: true
|
||||
},
|
||||
websocket: %{
|
||||
enabled: false,
|
||||
port: 8081,
|
||||
host: "localhost"
|
||||
},
|
||||
auto_restart: %{
|
||||
stdio: false,
|
||||
http: true,
|
||||
websocket: true
|
||||
}
|
||||
}
|
||||
|
||||
# Merge configurations
|
||||
config = deep_merge(default_config, base_config)
|
||||
config = deep_merge(config, Enum.into(opts, %{}))
|
||||
|
||||
# Determine enabled interfaces from environment or config
|
||||
enabled = determine_enabled_interfaces(config)
|
||||
|
||||
# Update individual interface enabled flags based on environment
|
||||
config = update_interface_enabled_flags(config, enabled)
|
||||
|
||||
%{config | enabled_interfaces: enabled}
|
||||
end
|
||||
|
||||
defp determine_enabled_interfaces(config) do
|
||||
# Check environment variables
|
||||
interface_mode = System.get_env("MCP_INTERFACE_MODE", "stdio")
|
||||
|
||||
case interface_mode do
|
||||
"stdio" ->
|
||||
[:stdio]
|
||||
|
||||
"http" ->
|
||||
[:http]
|
||||
|
||||
"websocket" ->
|
||||
[:websocket]
|
||||
|
||||
"all" ->
|
||||
[:stdio, :http, :websocket]
|
||||
|
||||
"remote" ->
|
||||
[:http, :websocket]
|
||||
|
||||
_ ->
|
||||
# Check for comma-separated list
|
||||
if String.contains?(interface_mode, ",") do
|
||||
interface_mode
|
||||
|> String.split(",")
|
||||
|> Enum.map(&String.trim/1)
|
||||
|> Enum.map(&String.to_atom/1)
|
||||
|> Enum.filter(&(&1 in @interface_types))
|
||||
else
|
||||
# Fall back to config
|
||||
Map.get(config, :enabled_interfaces, [:stdio])
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
defp update_interface_enabled_flags(config, enabled_interfaces) do
|
||||
# Update individual interface enabled flags based on which interfaces are enabled
|
||||
config
|
||||
|> update_in([:stdio, :enabled], fn _ -> :stdio in enabled_interfaces end)
|
||||
|> update_in([:http, :enabled], fn _ -> :http in enabled_interfaces end)
|
||||
|> update_in([:websocket, :enabled], fn _ -> :websocket in enabled_interfaces end)
|
||||
# Also update ports from environment if set
|
||||
|> update_http_config_from_env()
|
||||
end
|
||||
|
||||
defp update_http_config_from_env(config) do
|
||||
config =
|
||||
case System.get_env("MCP_HTTP_PORT") do
|
||||
nil ->
|
||||
config
|
||||
|
||||
port_str ->
|
||||
case Integer.parse(port_str) do
|
||||
{port, ""} -> put_in(config, [:http, :port], port)
|
||||
_ -> config
|
||||
end
|
||||
end
|
||||
|
||||
case System.get_env("MCP_HTTP_HOST") do
|
||||
nil -> config
|
||||
host -> put_in(config, [:http, :host], host)
|
||||
end
|
||||
end
|
||||
|
||||
# Declare defaults once
|
||||
defp start_interface_server(type, config, state, opts \\ %{})
|
||||
|
||||
defp start_interface_server(:stdio, config, state, _opts) do
|
||||
if config.stdio.enabled and config.stdio.handle_stdio do
|
||||
# Start stdio handler
|
||||
stdio_handler = spawn_link(fn -> handle_stdio_loop(state) end)
|
||||
|
||||
interface_info = %{
|
||||
type: :stdio,
|
||||
pid: stdio_handler,
|
||||
started_at: DateTime.utc_now(),
|
||||
config: config.stdio
|
||||
}
|
||||
|
||||
{:ok, interface_info}
|
||||
else
|
||||
{:error, "STDIO interface not enabled"}
|
||||
end
|
||||
end
|
||||
|
||||
defp start_interface_server(:http, config, _state, opts) do
|
||||
if config.http.enabled do
|
||||
http_opts = [
|
||||
port: Map.get(opts, :port, config.http.port),
|
||||
host: Map.get(opts, :host, config.http.host)
|
||||
]
|
||||
|
||||
case HttpInterface.start_link(http_opts) do
|
||||
{:ok, pid} ->
|
||||
# Monitor the process
|
||||
ref = Process.monitor(pid)
|
||||
|
||||
interface_info = %{
|
||||
type: :http,
|
||||
pid: pid,
|
||||
monitor_ref: ref,
|
||||
started_at: DateTime.utc_now(),
|
||||
config: Map.merge(config.http, Enum.into(opts, %{})),
|
||||
port: http_opts[:port]
|
||||
}
|
||||
|
||||
{:ok, interface_info}
|
||||
|
||||
{:error, reason} ->
|
||||
{:error, reason}
|
||||
end
|
||||
else
|
||||
{:error, "HTTP interface not enabled"}
|
||||
end
|
||||
end
|
||||
|
||||
defp start_interface_server(:websocket, config, _state, _opts) do
|
||||
if config.websocket.enabled do
|
||||
# WebSocket is handled by the HTTP server, so just mark it as enabled
|
||||
interface_info = %{
|
||||
type: :websocket,
|
||||
# Embedded in HTTP server
|
||||
pid: :embedded,
|
||||
started_at: DateTime.utc_now(),
|
||||
config: config.websocket
|
||||
}
|
||||
|
||||
{:ok, interface_info}
|
||||
else
|
||||
{:error, "WebSocket interface not enabled"}
|
||||
end
|
||||
end
|
||||
|
||||
defp start_interface_server(unknown_type, _config, _state, _opts) do
|
||||
{:error, "Unknown interface type: #{unknown_type}"}
|
||||
end
|
||||
|
||||
defp stop_interface_server(:stdio, interface_info) do
|
||||
if Process.alive?(interface_info.pid) do
|
||||
Process.exit(interface_info.pid, :shutdown)
|
||||
:ok
|
||||
else
|
||||
:ok
|
||||
end
|
||||
end
|
||||
|
||||
defp stop_interface_server(:http, interface_info) do
|
||||
if Process.alive?(interface_info.pid) do
|
||||
Process.exit(interface_info.pid, :shutdown)
|
||||
:ok
|
||||
else
|
||||
:ok
|
||||
end
|
||||
end
|
||||
|
||||
defp stop_interface_server(:websocket, _interface_info) do
|
||||
# WebSocket is embedded in HTTP server, so nothing to stop separately
|
||||
:ok
|
||||
end
|
||||
|
||||
defp stop_interface_server(_type, _interface_info) do
|
||||
{:error, "Unknown interface type"}
|
||||
end
|
||||
|
||||
defp handle_stdio_loop(state) do
|
||||
# Handle MCP JSON-RPC messages from STDIO
|
||||
# Use different approaches for Docker vs regular environments
|
||||
if docker_environment?() do
|
||||
handle_stdio_docker_loop(state)
|
||||
else
|
||||
handle_stdio_regular_loop(state)
|
||||
end
|
||||
end
|
||||
|
||||
defp handle_stdio_regular_loop(state) do
|
||||
case IO.read(:stdio, :line) do
|
||||
:eof ->
|
||||
IO.puts(:stderr, "STDIO interface shutting down (EOF)")
|
||||
exit(:normal)
|
||||
|
||||
{:error, reason} ->
|
||||
IO.puts(:stderr, "STDIO error: #{inspect(reason)}")
|
||||
exit({:error, reason})
|
||||
|
||||
line ->
|
||||
handle_stdio_message(String.trim(line), state)
|
||||
handle_stdio_regular_loop(state)
|
||||
end
|
||||
end
|
||||
|
||||
defp handle_stdio_docker_loop(state) do
|
||||
# In Docker, use regular IO.read instead of Port.open({:fd, 0, 1})
|
||||
# to avoid "driver_select stealing control of fd=0" conflicts with external MCP servers
|
||||
# This allows external servers to use pipes while Agent Coordinator reads from stdin
|
||||
case IO.read(:stdio, :line) do
|
||||
:eof ->
|
||||
IO.puts(:stderr, "STDIO interface shutting down (EOF)")
|
||||
exit(:normal)
|
||||
|
||||
{:error, reason} ->
|
||||
IO.puts(:stderr, "STDIO error: #{inspect(reason)}")
|
||||
exit({:error, reason})
|
||||
|
||||
line ->
|
||||
handle_stdio_message(String.trim(line), state)
|
||||
handle_stdio_docker_loop(state)
|
||||
end
|
||||
end
|
||||
|
||||
defp handle_stdio_message("", _state), do: :ok
|
||||
|
||||
defp handle_stdio_message(json_line, _state) do
|
||||
try do
|
||||
request = Jason.decode!(json_line)
|
||||
|
||||
# Create local client context for stdio
|
||||
_client_context = ToolFilter.local_context()
|
||||
|
||||
# Process through MCP server with full tool access
|
||||
response = AgentCoordinator.MCPServer.handle_mcp_request(request)
|
||||
|
||||
# Send response
|
||||
IO.puts(Jason.encode!(response))
|
||||
rescue
|
||||
e in Jason.DecodeError ->
|
||||
error_response = %{
|
||||
"jsonrpc" => "2.0",
|
||||
"id" => nil,
|
||||
"error" => %{
|
||||
"code" => -32700,
|
||||
"message" => "Parse error: #{Exception.message(e)}"
|
||||
}
|
||||
}
|
||||
|
||||
IO.puts(Jason.encode!(error_response))
|
||||
|
||||
e ->
|
||||
# Try to get the ID from the malformed request
|
||||
id =
|
||||
try do
|
||||
partial = Jason.decode!(json_line)
|
||||
Map.get(partial, "id")
|
||||
rescue
|
||||
_ -> nil
|
||||
end
|
||||
|
||||
error_response = %{
|
||||
"jsonrpc" => "2.0",
|
||||
"id" => id,
|
||||
"error" => %{
|
||||
"code" => -32603,
|
||||
"message" => "Internal error: #{Exception.message(e)}"
|
||||
}
|
||||
}
|
||||
|
||||
IO.puts(Jason.encode!(error_response))
|
||||
end
|
||||
end
|
||||
|
||||
defp get_interface_metrics(:stdio, interface_info) do
|
||||
%{
|
||||
type: :stdio,
|
||||
status: if(Process.alive?(interface_info.pid), do: :running, else: :stopped),
|
||||
uptime: DateTime.diff(DateTime.utc_now(), interface_info.started_at, :second),
|
||||
pid: interface_info.pid
|
||||
}
|
||||
end
|
||||
|
||||
defp get_interface_metrics(:http, interface_info) do
|
||||
%{
|
||||
type: :http,
|
||||
status: if(Process.alive?(interface_info.pid), do: :running, else: :stopped),
|
||||
uptime: DateTime.diff(DateTime.utc_now(), interface_info.started_at, :second),
|
||||
port: interface_info.port,
|
||||
pid: interface_info.pid
|
||||
}
|
||||
end
|
||||
|
||||
defp get_interface_metrics(:websocket, interface_info) do
|
||||
%{
|
||||
type: :websocket,
|
||||
# Embedded in HTTP server
|
||||
status: :running,
|
||||
uptime: DateTime.diff(DateTime.utc_now(), interface_info.started_at, :second),
|
||||
embedded: true
|
||||
}
|
||||
end
|
||||
|
||||
defp get_sessions_by_interface(session_registry) do
|
||||
Enum.reduce(session_registry, %{}, fn {_session_id, session_data}, acc ->
|
||||
interface_type = session_data.interface_type
|
||||
count = Map.get(acc, interface_type, 0)
|
||||
Map.put(acc, interface_type, count + 1)
|
||||
end)
|
||||
end
|
||||
|
||||
defp find_interface_by_pid(pid, interfaces) do
|
||||
Enum.find(interfaces, fn {_type, interface_info} ->
|
||||
interface_info.pid == pid
|
||||
end)
|
||||
end
|
||||
|
||||
defp should_auto_restart?(interface_type, config) do
|
||||
Map.get(config.auto_restart, interface_type, false)
|
||||
end
|
||||
|
||||
defp initialize_metrics do
|
||||
%{
|
||||
started_at: DateTime.utc_now(),
|
||||
requests_total: 0,
|
||||
errors_total: 0,
|
||||
sessions_total: 0
|
||||
}
|
||||
end
|
||||
|
||||
defp get_uptime do
|
||||
{uptime_ms, _} = :erlang.statistics(:wall_clock)
|
||||
div(uptime_ms, 1000)
|
||||
end
|
||||
|
||||
# Deep merge helper for configuration
|
||||
defp deep_merge(left, right) when is_map(left) and is_map(right) do
|
||||
Map.merge(left, right, fn _key, left_val, right_val ->
|
||||
deep_merge(left_val, right_val)
|
||||
end)
|
||||
end
|
||||
|
||||
defp deep_merge(_left, right), do: right
|
||||
|
||||
# Check if running in Docker environment
|
||||
defp docker_environment? do
|
||||
# Check common Docker environment indicators
|
||||
# Check if we're running under a container init system
|
||||
System.get_env("DOCKER_CONTAINER") != nil or
|
||||
System.get_env("container") != nil or
|
||||
System.get_env("DOCKERIZED") != nil or
|
||||
File.exists?("/.dockerenv") or
|
||||
(File.exists?("/proc/1/cgroup") and
|
||||
File.read!("/proc/1/cgroup") |> String.contains?("docker")) or
|
||||
String.contains?(to_string(System.get_env("PATH", "")), "/app/") or
|
||||
case File.read("/proc/1/comm") do
|
||||
{:ok, comm} -> String.trim(comm) in ["bash", "sh", "docker-init", "tini"]
|
||||
_ -> false
|
||||
end
|
||||
end
|
||||
end
|
||||
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@@ -95,9 +95,6 @@ defmodule AgentCoordinator.Persistence do
|
||||
case Gnat.pub(state.nats_conn, subject, message, headers: event_headers()) do
|
||||
:ok ->
|
||||
:ok
|
||||
|
||||
{:error, reason} ->
|
||||
IO.puts("Failed to store event: #{inspect(reason)}")
|
||||
end
|
||||
end
|
||||
|
||||
|
||||
197
lib/agent_coordinator/session_manager.ex
Normal file
197
lib/agent_coordinator/session_manager.ex
Normal file
@@ -0,0 +1,197 @@
|
||||
defmodule AgentCoordinator.SessionManager do
|
||||
@moduledoc """
|
||||
Session management for MCP agents with token-based authentication.
|
||||
|
||||
Implements MCP-compliant session management where:
|
||||
1. Agents register and receive session tokens
|
||||
2. Session tokens must be included in Mcp-Session-Id headers
|
||||
3. Session tokens are cryptographically secure and time-limited
|
||||
4. Sessions are tied to specific agent IDs
|
||||
"""
|
||||
|
||||
use GenServer
|
||||
require Logger
|
||||
|
||||
defstruct [
|
||||
:sessions,
|
||||
:config
|
||||
]
|
||||
|
||||
@session_expiry_minutes 60
|
||||
@cleanup_interval_minutes 5
|
||||
|
||||
# Client API
|
||||
|
||||
def start_link(opts \\ []) do
|
||||
GenServer.start_link(__MODULE__, opts, name: __MODULE__)
|
||||
end
|
||||
|
||||
@doc """
|
||||
Generate a new session token for an agent.
|
||||
Returns {:ok, session_token} or {:error, reason}
|
||||
"""
|
||||
def create_session(agent_id, metadata \\ %{}) do
|
||||
GenServer.call(__MODULE__, {:create_session, agent_id, metadata})
|
||||
end
|
||||
|
||||
@doc """
|
||||
Validate a session token and return agent information.
|
||||
Returns {:ok, agent_id, metadata} or {:error, reason}
|
||||
"""
|
||||
def validate_session(session_token) do
|
||||
GenServer.call(__MODULE__, {:validate_session, session_token})
|
||||
end
|
||||
|
||||
@doc """
|
||||
Invalidate a session token.
|
||||
"""
|
||||
def invalidate_session(session_token) do
|
||||
GenServer.call(__MODULE__, {:invalidate_session, session_token})
|
||||
end
|
||||
|
||||
@doc """
|
||||
Get all active sessions for an agent.
|
||||
"""
|
||||
def get_agent_sessions(agent_id) do
|
||||
GenServer.call(__MODULE__, {:get_agent_sessions, agent_id})
|
||||
end
|
||||
|
||||
@doc """
|
||||
Clean up expired sessions.
|
||||
"""
|
||||
def cleanup_expired_sessions do
|
||||
GenServer.cast(__MODULE__, :cleanup_expired)
|
||||
end
|
||||
|
||||
# Server implementation
|
||||
|
||||
@impl GenServer
|
||||
def init(opts) do
|
||||
# Start periodic cleanup
|
||||
schedule_cleanup()
|
||||
|
||||
state = %__MODULE__{
|
||||
sessions: %{},
|
||||
config: %{
|
||||
expiry_minutes: Keyword.get(opts, :expiry_minutes, @session_expiry_minutes),
|
||||
cleanup_interval: Keyword.get(opts, :cleanup_interval, @cleanup_interval_minutes)
|
||||
}
|
||||
}
|
||||
|
||||
IO.puts(:stderr, "SessionManager started with #{state.config.expiry_minutes}min expiry")
|
||||
{:ok, state}
|
||||
end
|
||||
|
||||
@impl GenServer
|
||||
def handle_call({:create_session, agent_id, metadata}, _from, state) do
|
||||
session_token = generate_session_token()
|
||||
expires_at = DateTime.add(DateTime.utc_now(), state.config.expiry_minutes, :minute)
|
||||
|
||||
session_data = %{
|
||||
agent_id: agent_id,
|
||||
token: session_token,
|
||||
created_at: DateTime.utc_now(),
|
||||
expires_at: expires_at,
|
||||
metadata: metadata,
|
||||
last_activity: DateTime.utc_now()
|
||||
}
|
||||
|
||||
new_sessions = Map.put(state.sessions, session_token, session_data)
|
||||
new_state = %{state | sessions: new_sessions}
|
||||
|
||||
IO.puts(:stderr, "Created session #{session_token} for agent #{agent_id}")
|
||||
{:reply, {:ok, session_token}, new_state}
|
||||
end
|
||||
|
||||
@impl GenServer
|
||||
def handle_call({:validate_session, session_token}, _from, state) do
|
||||
case Map.get(state.sessions, session_token) do
|
||||
nil ->
|
||||
{:reply, {:error, :session_not_found}, state}
|
||||
|
||||
session_data ->
|
||||
if DateTime.compare(DateTime.utc_now(), session_data.expires_at) == :gt do
|
||||
# Session expired, remove it
|
||||
new_sessions = Map.delete(state.sessions, session_token)
|
||||
new_state = %{state | sessions: new_sessions}
|
||||
{:reply, {:error, :session_expired}, new_state}
|
||||
else
|
||||
# Session valid, update last activity
|
||||
updated_session = %{session_data | last_activity: DateTime.utc_now()}
|
||||
new_sessions = Map.put(state.sessions, session_token, updated_session)
|
||||
new_state = %{state | sessions: new_sessions}
|
||||
|
||||
result = {:ok, session_data.agent_id, session_data.metadata}
|
||||
{:reply, result, new_state}
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
@impl GenServer
|
||||
def handle_call({:invalidate_session, session_token}, _from, state) do
|
||||
case Map.get(state.sessions, session_token) do
|
||||
nil ->
|
||||
{:reply, {:error, :session_not_found}, state}
|
||||
|
||||
session_data ->
|
||||
new_sessions = Map.delete(state.sessions, session_token)
|
||||
new_state = %{state | sessions: new_sessions}
|
||||
|
||||
IO.puts(
|
||||
:stderr,
|
||||
"Invalidated session #{session_token} for agent #{session_data.agent_id}"
|
||||
)
|
||||
|
||||
{:reply, :ok, new_state}
|
||||
end
|
||||
end
|
||||
|
||||
@impl GenServer
|
||||
def handle_call({:get_agent_sessions, agent_id}, _from, state) do
|
||||
agent_sessions =
|
||||
state.sessions
|
||||
|> Enum.filter(fn {_token, session} -> session.agent_id == agent_id end)
|
||||
|> Enum.map(fn {token, session} -> {token, session} end)
|
||||
|
||||
{:reply, agent_sessions, state}
|
||||
end
|
||||
|
||||
@impl GenServer
|
||||
def handle_cast(:cleanup_expired, state) do
|
||||
now = DateTime.utc_now()
|
||||
|
||||
{expired_sessions, active_sessions} =
|
||||
Enum.split_with(state.sessions, fn {_token, session} ->
|
||||
DateTime.compare(now, session.expires_at) == :gt
|
||||
end)
|
||||
|
||||
if length(expired_sessions) > 0 do
|
||||
IO.puts(:stderr, "Cleaned up #{length(expired_sessions)} expired sessions")
|
||||
end
|
||||
|
||||
new_state = %{state | sessions: Map.new(active_sessions)}
|
||||
schedule_cleanup()
|
||||
{:noreply, new_state}
|
||||
end
|
||||
|
||||
@impl GenServer
|
||||
def handle_info(:cleanup_expired, state) do
|
||||
handle_cast(:cleanup_expired, state)
|
||||
end
|
||||
|
||||
# Private functions
|
||||
|
||||
defp generate_session_token do
|
||||
# Generate cryptographically secure session token
|
||||
# Format: "mcp_" + base64url(32 random bytes) + "_" + timestamp
|
||||
random_bytes = :crypto.strong_rand_bytes(32)
|
||||
timestamp = DateTime.utc_now() |> DateTime.to_unix()
|
||||
|
||||
token_body = Base.url_encode64(random_bytes, padding: false)
|
||||
"mcp_#{token_body}_#{timestamp}"
|
||||
end
|
||||
|
||||
defp schedule_cleanup do
|
||||
Process.send_after(self(), :cleanup_expired, @cleanup_interval_minutes * 60 * 1000)
|
||||
end
|
||||
end
|
||||
@@ -17,7 +17,12 @@ defmodule AgentCoordinator.Task do
|
||||
:cross_codebase_dependencies,
|
||||
:created_at,
|
||||
:updated_at,
|
||||
:metadata
|
||||
:metadata,
|
||||
:feedback,
|
||||
:director_notes,
|
||||
:assignment_reason,
|
||||
:refinement_history,
|
||||
:blocking_issues
|
||||
]}
|
||||
defstruct [
|
||||
:id,
|
||||
@@ -32,7 +37,12 @@ defmodule AgentCoordinator.Task do
|
||||
:cross_codebase_dependencies,
|
||||
:created_at,
|
||||
:updated_at,
|
||||
:metadata
|
||||
:metadata,
|
||||
:feedback,
|
||||
:director_notes,
|
||||
:assignment_reason,
|
||||
:refinement_history,
|
||||
:blocking_issues
|
||||
]
|
||||
|
||||
@type status :: :pending | :in_progress | :completed | :failed | :blocked
|
||||
@@ -51,7 +61,12 @@ defmodule AgentCoordinator.Task do
|
||||
cross_codebase_dependencies: [%{codebase_id: String.t(), task_id: String.t()}],
|
||||
created_at: DateTime.t(),
|
||||
updated_at: DateTime.t(),
|
||||
metadata: map()
|
||||
metadata: map(),
|
||||
feedback: String.t() | nil,
|
||||
director_notes: String.t() | nil,
|
||||
assignment_reason: String.t() | nil,
|
||||
refinement_history: [map()],
|
||||
blocking_issues: [String.t()]
|
||||
}
|
||||
|
||||
def new(title, description, opts \\ []) do
|
||||
@@ -78,7 +93,12 @@ defmodule AgentCoordinator.Task do
|
||||
cross_codebase_dependencies: get_opt.(:cross_codebase_dependencies, []),
|
||||
created_at: now,
|
||||
updated_at: now,
|
||||
metadata: get_opt.(:metadata, %{})
|
||||
metadata: get_opt.(:metadata, %{}),
|
||||
feedback: nil,
|
||||
director_notes: nil,
|
||||
assignment_reason: nil,
|
||||
refinement_history: [],
|
||||
blocking_issues: []
|
||||
}
|
||||
end
|
||||
|
||||
@@ -115,4 +135,109 @@ defmodule AgentCoordinator.Task do
|
||||
dependencies = [dependency | task.cross_codebase_dependencies]
|
||||
%{task | cross_codebase_dependencies: dependencies, updated_at: DateTime.utc_now()}
|
||||
end
|
||||
|
||||
# Director management functions
|
||||
|
||||
def add_feedback(task, feedback, director_id) do
|
||||
refinement_entry = %{
|
||||
type: "feedback_added",
|
||||
director_id: director_id,
|
||||
content: feedback,
|
||||
timestamp: DateTime.utc_now()
|
||||
}
|
||||
|
||||
%{
|
||||
task
|
||||
| feedback: feedback,
|
||||
refinement_history: [refinement_entry | task.refinement_history],
|
||||
updated_at: DateTime.utc_now()
|
||||
}
|
||||
end
|
||||
|
||||
def add_director_notes(task, notes, director_id) do
|
||||
refinement_entry = %{
|
||||
type: "director_notes_added",
|
||||
director_id: director_id,
|
||||
content: notes,
|
||||
timestamp: DateTime.utc_now()
|
||||
}
|
||||
|
||||
%{
|
||||
task
|
||||
| director_notes: notes,
|
||||
refinement_history: [refinement_entry | task.refinement_history],
|
||||
updated_at: DateTime.utc_now()
|
||||
}
|
||||
end
|
||||
|
||||
def set_assignment_reason(task, reason, director_id) do
|
||||
refinement_entry = %{
|
||||
type: "assignment_reason_set",
|
||||
director_id: director_id,
|
||||
reason: reason,
|
||||
timestamp: DateTime.utc_now()
|
||||
}
|
||||
|
||||
%{
|
||||
task
|
||||
| assignment_reason: reason,
|
||||
refinement_history: [refinement_entry | task.refinement_history],
|
||||
updated_at: DateTime.utc_now()
|
||||
}
|
||||
end
|
||||
|
||||
def add_blocking_issue(task, issue, director_id) do
|
||||
new_issues = [issue | task.blocking_issues] |> Enum.uniq()
|
||||
|
||||
refinement_entry = %{
|
||||
type: "blocking_issue_added",
|
||||
director_id: director_id,
|
||||
issue: issue,
|
||||
timestamp: DateTime.utc_now()
|
||||
}
|
||||
|
||||
%{
|
||||
task
|
||||
| blocking_issues: new_issues,
|
||||
refinement_history: [refinement_entry | task.refinement_history],
|
||||
updated_at: DateTime.utc_now()
|
||||
}
|
||||
end
|
||||
|
||||
def remove_blocking_issue(task, issue, director_id) do
|
||||
new_issues = task.blocking_issues |> Enum.reject(&(&1 == issue))
|
||||
|
||||
refinement_entry = %{
|
||||
type: "blocking_issue_removed",
|
||||
director_id: director_id,
|
||||
issue: issue,
|
||||
timestamp: DateTime.utc_now()
|
||||
}
|
||||
|
||||
%{
|
||||
task
|
||||
| blocking_issues: new_issues,
|
||||
refinement_history: [refinement_entry | task.refinement_history],
|
||||
updated_at: DateTime.utc_now()
|
||||
}
|
||||
end
|
||||
|
||||
def reassign(task, new_agent_id, director_id, reason) do
|
||||
refinement_entry = %{
|
||||
type: "task_reassigned",
|
||||
director_id: director_id,
|
||||
from_agent_id: task.agent_id,
|
||||
to_agent_id: new_agent_id,
|
||||
reason: reason,
|
||||
timestamp: DateTime.utc_now()
|
||||
}
|
||||
|
||||
%{
|
||||
task
|
||||
| agent_id: new_agent_id,
|
||||
assignment_reason: reason,
|
||||
refinement_history: [refinement_entry | task.refinement_history],
|
||||
updated_at: DateTime.utc_now()
|
||||
}
|
||||
end
|
||||
end
|
||||
|
||||
@@ -24,11 +24,11 @@ defmodule AgentCoordinator.TaskRegistry do
|
||||
end
|
||||
|
||||
def register_agent(agent) do
|
||||
GenServer.call(__MODULE__, {:register_agent, agent})
|
||||
GenServer.call(__MODULE__, {:register_agent, agent}, 30_000)
|
||||
end
|
||||
|
||||
def assign_task(task) do
|
||||
GenServer.call(__MODULE__, {:assign_task, task})
|
||||
GenServer.call(__MODULE__, {:assign_task, task}, 30_000)
|
||||
end
|
||||
|
||||
def add_to_pending(task) do
|
||||
@@ -40,7 +40,7 @@ defmodule AgentCoordinator.TaskRegistry do
|
||||
end
|
||||
|
||||
def heartbeat_agent(agent_id) do
|
||||
GenServer.call(__MODULE__, {:heartbeat_agent, agent_id})
|
||||
GenServer.call(__MODULE__, {:heartbeat_agent, agent_id}, 30_000)
|
||||
end
|
||||
|
||||
def unregister_agent(agent_id, reason \\ "Agent requested unregistration") do
|
||||
@@ -52,7 +52,7 @@ defmodule AgentCoordinator.TaskRegistry do
|
||||
end
|
||||
|
||||
def get_agent_current_task(agent_id) do
|
||||
GenServer.call(__MODULE__, {:get_agent_current_task, agent_id})
|
||||
GenServer.call(__MODULE__, {:get_agent_current_task, agent_id}, 15_000)
|
||||
end
|
||||
|
||||
def get_agent(agent_id) do
|
||||
@@ -64,11 +64,11 @@ defmodule AgentCoordinator.TaskRegistry do
|
||||
end
|
||||
|
||||
def update_task_activity(task_id, tool_name, arguments) do
|
||||
GenServer.call(__MODULE__, {:update_task_activity, task_id, tool_name, arguments})
|
||||
GenServer.call(__MODULE__, {:update_task_activity, task_id, tool_name, arguments}, 30_000)
|
||||
end
|
||||
|
||||
def create_task(title, description, opts \\ %{}) do
|
||||
GenServer.call(__MODULE__, {:create_task, title, description, opts})
|
||||
GenServer.call(__MODULE__, {:create_task, title, description, opts}, 30_000)
|
||||
end
|
||||
|
||||
def get_next_task(agent_id) do
|
||||
@@ -76,7 +76,11 @@ defmodule AgentCoordinator.TaskRegistry do
|
||||
end
|
||||
|
||||
def complete_task(agent_id) do
|
||||
GenServer.call(__MODULE__, {:complete_task, agent_id})
|
||||
GenServer.call(__MODULE__, {:complete_task, agent_id}, 30_000)
|
||||
end
|
||||
|
||||
def update_agent(agent_id, updated_agent) do
|
||||
GenServer.call(__MODULE__, {:update_agent, agent_id, updated_agent})
|
||||
end
|
||||
|
||||
def get_task_board do
|
||||
@@ -139,13 +143,13 @@ defmodule AgentCoordinator.TaskRegistry do
|
||||
{Inbox, agent.id}
|
||||
) do
|
||||
{:ok, _pid} ->
|
||||
Logger.info("Created inbox for agent #{agent.id}")
|
||||
IO.puts(:stderr, "Created inbox for agent #{agent.id}")
|
||||
|
||||
{:error, {:already_started, _pid}} ->
|
||||
Logger.info("Inbox already exists for agent #{agent.id}")
|
||||
IO.puts(:stderr, "Inbox already exists for agent #{agent.id}")
|
||||
|
||||
{:error, reason} ->
|
||||
Logger.warning("Failed to create inbox for agent #{agent.id}: #{inspect(reason)}")
|
||||
IO.puts(:stderr, "Failed to create inbox for agent #{agent.id}: #{inspect(reason)}")
|
||||
end
|
||||
|
||||
# Publish agent registration with codebase info
|
||||
@@ -284,6 +288,7 @@ defmodule AgentCoordinator.TaskRegistry do
|
||||
case Map.get(state.agents, agent_id) do
|
||||
nil ->
|
||||
{:reply, {:error, :not_found}, state}
|
||||
|
||||
agent ->
|
||||
{:reply, {:ok, agent}, state}
|
||||
end
|
||||
@@ -293,6 +298,7 @@ defmodule AgentCoordinator.TaskRegistry do
|
||||
case Enum.find(state.agents, fn {_id, agent} -> agent.name == agent_name end) do
|
||||
nil ->
|
||||
{:reply, {:error, :not_found}, state}
|
||||
|
||||
{_id, agent} ->
|
||||
{:reply, {:ok, agent}, state}
|
||||
end
|
||||
@@ -338,9 +344,6 @@ defmodule AgentCoordinator.TaskRegistry do
|
||||
# Remove from pending since it was assigned
|
||||
final_state = %{final_state | pending_tasks: state.pending_tasks}
|
||||
{:reply, {:ok, task}, final_state}
|
||||
|
||||
error ->
|
||||
error
|
||||
end
|
||||
|
||||
_conflicts ->
|
||||
@@ -424,6 +427,18 @@ defmodule AgentCoordinator.TaskRegistry do
|
||||
end
|
||||
end
|
||||
|
||||
def handle_call({:update_agent, agent_id, updated_agent}, _from, state) do
|
||||
case Map.get(state.agents, agent_id) do
|
||||
nil ->
|
||||
{:reply, {:error, :agent_not_found}, state}
|
||||
|
||||
_current_agent ->
|
||||
new_agents = Map.put(state.agents, agent_id, updated_agent)
|
||||
new_state = %{state | agents: new_agents}
|
||||
{:reply, :ok, new_state}
|
||||
end
|
||||
end
|
||||
|
||||
def handle_call(:get_task_board, _from, state) do
|
||||
agents_info =
|
||||
Enum.map(state.agents, fn {_id, agent} ->
|
||||
@@ -440,7 +455,9 @@ defmodule AgentCoordinator.TaskRegistry do
|
||||
capabilities: agent.capabilities,
|
||||
current_task: current_task,
|
||||
last_heartbeat: agent.last_heartbeat,
|
||||
online: Agent.is_online?(agent)
|
||||
online: Agent.is_online?(agent),
|
||||
current_activity: agent.current_activity,
|
||||
current_files: agent.current_files || []
|
||||
}
|
||||
end)
|
||||
|
||||
@@ -559,6 +576,7 @@ defmodule AgentCoordinator.TaskRegistry do
|
||||
catch
|
||||
:exit, _ -> 0
|
||||
end
|
||||
|
||||
[] ->
|
||||
# No inbox process exists, treat as 0 pending tasks
|
||||
0
|
||||
@@ -685,8 +703,6 @@ defmodule AgentCoordinator.TaskRegistry do
|
||||
:ok -> :ok
|
||||
# Inbox already stopped
|
||||
{:error, :not_found} -> :ok
|
||||
# Continue regardless
|
||||
_ -> :ok
|
||||
end
|
||||
|
||||
# Publish unregistration event
|
||||
@@ -754,15 +770,15 @@ defmodule AgentCoordinator.TaskRegistry do
|
||||
{Inbox, agent_id}
|
||||
) do
|
||||
{:ok, _pid} ->
|
||||
Logger.info("Created inbox for agent #{agent_id}")
|
||||
IO.puts(:stderr, "Created inbox for agent #{agent_id}")
|
||||
:ok
|
||||
|
||||
{:error, {:already_started, _pid}} ->
|
||||
Logger.info("Inbox already exists for agent #{agent_id}")
|
||||
IO.puts(:stderr, "Inbox already exists for agent #{agent_id}")
|
||||
:ok
|
||||
|
||||
{:error, reason} ->
|
||||
Logger.warning("Failed to create inbox for agent #{agent_id}: #{inspect(reason)}")
|
||||
IO.puts(:stderr, "Failed to create inbox for agent #{agent_id}: #{inspect(reason)}")
|
||||
{:error, reason}
|
||||
end
|
||||
|
||||
|
||||
318
lib/agent_coordinator/tool_filter.ex
Normal file
318
lib/agent_coordinator/tool_filter.ex
Normal file
@@ -0,0 +1,318 @@
|
||||
defmodule AgentCoordinator.ToolFilter do
|
||||
@moduledoc """
|
||||
Intelligent tool filtering system that adapts available tools based on client context.
|
||||
|
||||
This module determines which tools should be available to different types of clients:
|
||||
- Local clients: Full tool access including filesystem and VSCode tools
|
||||
- Remote clients: Limited to agent coordination and safe remote tools
|
||||
- Web clients: Browser-safe tools only
|
||||
|
||||
Tool filtering is based on:
|
||||
- Tool capabilities and requirements
|
||||
- Client connection type (local/remote)
|
||||
- Security considerations
|
||||
- Tool metadata annotations
|
||||
"""
|
||||
|
||||
require Logger
|
||||
|
||||
@doc """
|
||||
Context information about the client connection.
|
||||
"""
|
||||
defstruct [
|
||||
# :local, :remote, :web
|
||||
:connection_type,
|
||||
# Client identification
|
||||
:client_info,
|
||||
# Client declared capabilities
|
||||
:capabilities,
|
||||
# :trusted, :sandboxed, :restricted
|
||||
:security_level,
|
||||
# For web clients, the origin domain
|
||||
:origin,
|
||||
# Client user agent string
|
||||
:user_agent
|
||||
]
|
||||
|
||||
@type client_context :: %__MODULE__{
|
||||
connection_type: :local | :remote | :web,
|
||||
client_info: map(),
|
||||
capabilities: [String.t()],
|
||||
security_level: :trusted | :sandboxed | :restricted,
|
||||
origin: String.t() | nil,
|
||||
user_agent: String.t() | nil
|
||||
}
|
||||
|
||||
# Tool name patterns that indicate local-only functionality (defined as function to avoid compilation issues)
|
||||
defp local_only_patterns do
|
||||
[
|
||||
~r/^(read_file|write_file|create_file|delete_file)/,
|
||||
~r/^(list_dir|search_files|move_file)/,
|
||||
~r/^vscode_/,
|
||||
~r/^(run_in_terminal|get_terminal)/,
|
||||
~r/filesystem/,
|
||||
~r/directory/
|
||||
]
|
||||
end
|
||||
|
||||
# Tools that are always safe for remote access
|
||||
@always_safe_tools [
|
||||
# Agent coordination tools
|
||||
"register_agent",
|
||||
"create_task",
|
||||
"get_next_task",
|
||||
"complete_task",
|
||||
"get_task_board",
|
||||
"get_detailed_task_board",
|
||||
"get_agent_task_history",
|
||||
"heartbeat",
|
||||
"unregister_agent",
|
||||
"register_task_set",
|
||||
"create_agent_task",
|
||||
"create_cross_codebase_task",
|
||||
"list_codebases",
|
||||
"register_codebase",
|
||||
"get_codebase_status",
|
||||
"add_codebase_dependency",
|
||||
|
||||
# Memory and knowledge graph (safe for remote)
|
||||
"create_entities",
|
||||
"create_relations",
|
||||
"read_graph",
|
||||
"search_nodes",
|
||||
"open_nodes",
|
||||
"add_observations",
|
||||
"delete_entities",
|
||||
"delete_relations",
|
||||
"delete_observations",
|
||||
|
||||
# Sequential thinking (safe for remote)
|
||||
"sequentialthinking",
|
||||
|
||||
# Library documentation (safe for remote)
|
||||
"get-library-docs",
|
||||
"resolve-library-id"
|
||||
]
|
||||
|
||||
@doc """
|
||||
Filter tools based on client context.
|
||||
|
||||
Returns a filtered list of tools appropriate for the client's context.
|
||||
"""
|
||||
@spec filter_tools([map()], client_context()) :: [map()]
|
||||
def filter_tools(tools, %__MODULE__{} = context) do
|
||||
tools
|
||||
|> Enum.filter(&should_include_tool?(&1, context))
|
||||
|> maybe_annotate_tools(context)
|
||||
end
|
||||
|
||||
@doc """
|
||||
Determine if a tool should be included for the given client context.
|
||||
"""
|
||||
@spec should_include_tool?(map(), client_context()) :: boolean()
|
||||
def should_include_tool?(tool, context) do
|
||||
tool_name = Map.get(tool, "name", "")
|
||||
|
||||
cond do
|
||||
# Always include safe tools
|
||||
tool_name in @always_safe_tools ->
|
||||
true
|
||||
|
||||
# Local clients get everything
|
||||
context.connection_type == :local ->
|
||||
true
|
||||
|
||||
# Remote/web clients get filtered access
|
||||
context.connection_type in [:remote, :web] ->
|
||||
not is_local_only_tool?(tool, context)
|
||||
|
||||
# Default to restrictive
|
||||
true ->
|
||||
tool_name in @always_safe_tools
|
||||
end
|
||||
end
|
||||
|
||||
@doc """
|
||||
Detect client context from connection information.
|
||||
"""
|
||||
@spec detect_client_context(map()) :: client_context()
|
||||
def detect_client_context(connection_info) do
|
||||
connection_type = determine_connection_type(connection_info)
|
||||
security_level = determine_security_level(connection_type, connection_info)
|
||||
|
||||
%__MODULE__{
|
||||
connection_type: connection_type,
|
||||
client_info: Map.get(connection_info, :client_info, %{}),
|
||||
capabilities: Map.get(connection_info, :capabilities, []),
|
||||
security_level: security_level,
|
||||
origin: Map.get(connection_info, :origin),
|
||||
user_agent: Map.get(connection_info, :user_agent)
|
||||
}
|
||||
end
|
||||
|
||||
@doc """
|
||||
Create a local client context (for stdio and direct connections).
|
||||
"""
|
||||
@spec local_context() :: client_context()
|
||||
def local_context do
|
||||
%__MODULE__{
|
||||
connection_type: :local,
|
||||
client_info: %{type: "local_stdio"},
|
||||
capabilities: ["full_access"],
|
||||
security_level: :trusted,
|
||||
origin: nil,
|
||||
user_agent: "agent-coordinator-local"
|
||||
}
|
||||
end
|
||||
|
||||
@doc """
|
||||
Create a remote client context.
|
||||
"""
|
||||
@spec remote_context(map()) :: client_context()
|
||||
def remote_context(opts \\ %{}) do
|
||||
%__MODULE__{
|
||||
connection_type: :remote,
|
||||
client_info: Map.get(opts, :client_info, %{type: "remote_http"}),
|
||||
capabilities: Map.get(opts, :capabilities, ["coordination"]),
|
||||
security_level: :sandboxed,
|
||||
origin: Map.get(opts, :origin),
|
||||
user_agent: Map.get(opts, :user_agent, "unknown")
|
||||
}
|
||||
end
|
||||
|
||||
@doc """
|
||||
Get tool filtering statistics for monitoring.
|
||||
"""
|
||||
@spec get_filter_stats([map()], client_context()) :: map()
|
||||
def get_filter_stats(original_tools, context) do
|
||||
filtered_tools = filter_tools(original_tools, context)
|
||||
|
||||
%{
|
||||
original_count: length(original_tools),
|
||||
filtered_count: length(filtered_tools),
|
||||
removed_count: length(original_tools) - length(filtered_tools),
|
||||
connection_type: context.connection_type,
|
||||
security_level: context.security_level,
|
||||
filtered_at: DateTime.utc_now()
|
||||
}
|
||||
end
|
||||
|
||||
# Private helpers
|
||||
|
||||
defp is_local_only_tool?(tool, _context) do
|
||||
tool_name = Map.get(tool, "name", "")
|
||||
description = Map.get(tool, "description", "")
|
||||
|
||||
# Check against known local-only tool names
|
||||
name_is_local =
|
||||
tool_name in get_local_only_tool_names() or
|
||||
Enum.any?(local_only_patterns(), &Regex.match?(&1, tool_name))
|
||||
|
||||
# Check description for local-only indicators
|
||||
description_is_local =
|
||||
String.contains?(
|
||||
String.downcase(description),
|
||||
["filesystem", "file system", "vscode", "terminal", "local file", "directory"]
|
||||
)
|
||||
|
||||
# Check tool schema for local-only parameters
|
||||
schema_is_local = has_local_only_parameters?(tool)
|
||||
|
||||
name_is_local or description_is_local or schema_is_local
|
||||
end
|
||||
|
||||
defp get_local_only_tool_names do
|
||||
[
|
||||
# Filesystem tools
|
||||
"read_file",
|
||||
"write_file",
|
||||
"create_file",
|
||||
"delete_file",
|
||||
"list_directory",
|
||||
"search_files",
|
||||
"move_file",
|
||||
"get_file_info",
|
||||
"list_allowed_directories",
|
||||
"directory_tree",
|
||||
"edit_file",
|
||||
"read_text_file",
|
||||
"read_multiple_files",
|
||||
"read_media_file",
|
||||
|
||||
# VSCode tools
|
||||
"vscode_create_file",
|
||||
"vscode_write_file",
|
||||
"vscode_read_file",
|
||||
"vscode_delete_file",
|
||||
"vscode_list_directory",
|
||||
"vscode_get_active_editor",
|
||||
"vscode_set_editor_content",
|
||||
"vscode_get_selection",
|
||||
"vscode_set_selection",
|
||||
"vscode_show_message",
|
||||
"vscode_run_command",
|
||||
"vscode_get_workspace_folders",
|
||||
|
||||
# Terminal/process tools
|
||||
"run_in_terminal",
|
||||
"get_terminal_output",
|
||||
"terminal_last_command",
|
||||
"terminal_selection"
|
||||
]
|
||||
end
|
||||
|
||||
defp has_local_only_parameters?(tool) do
|
||||
schema = Map.get(tool, "inputSchema", %{})
|
||||
properties = Map.get(schema, "properties", %{})
|
||||
|
||||
# Look for file path parameters or other local indicators
|
||||
Enum.any?(properties, fn {param_name, param_schema} ->
|
||||
param_name in ["path", "filePath", "file_path", "directory", "workspace_path"] or
|
||||
String.contains?(
|
||||
Map.get(param_schema, "description", ""),
|
||||
["file path", "directory", "workspace", "local"]
|
||||
)
|
||||
end)
|
||||
end
|
||||
|
||||
defp determine_connection_type(connection_info) do
|
||||
cond do
|
||||
Map.get(connection_info, :transport) == :stdio -> :local
|
||||
Map.get(connection_info, :transport) == :websocket -> :web
|
||||
Map.get(connection_info, :transport) == :http -> :remote
|
||||
Map.get(connection_info, :remote_ip) == "127.0.0.1" -> :local
|
||||
Map.get(connection_info, :remote_ip) == "::1" -> :local
|
||||
Map.has_key?(connection_info, :remote_ip) -> :remote
|
||||
# Default to local for stdio
|
||||
true -> :local
|
||||
end
|
||||
end
|
||||
|
||||
defp determine_security_level(connection_type, connection_info) do
|
||||
case connection_type do
|
||||
:local ->
|
||||
:trusted
|
||||
|
||||
:remote ->
|
||||
if Map.get(connection_info, :secure, false) do
|
||||
:sandboxed
|
||||
else
|
||||
:restricted
|
||||
end
|
||||
|
||||
:web ->
|
||||
:sandboxed
|
||||
end
|
||||
end
|
||||
|
||||
defp maybe_annotate_tools(tools, context) do
|
||||
# Add context information to tools if needed
|
||||
if context.connection_type == :remote do
|
||||
Enum.map(tools, fn tool ->
|
||||
Map.put(tool, "_filtered_for", "remote_client")
|
||||
end)
|
||||
else
|
||||
tools
|
||||
end
|
||||
end
|
||||
end
|
||||
@@ -1,251 +0,0 @@
|
||||
defmodule AgentCoordinator.UnifiedMCPServer do
|
||||
@moduledoc """
|
||||
Unified MCP Server that aggregates all external MCP servers and Agent Coordinator tools.
|
||||
|
||||
This is the single MCP server that GitHub Copilot sees, which internally manages
|
||||
all other MCP servers and provides automatic task tracking for any tool usage.
|
||||
"""
|
||||
|
||||
use GenServer
|
||||
require Logger
|
||||
|
||||
alias AgentCoordinator.{MCPServerManager, TaskRegistry}
|
||||
|
||||
defstruct [
|
||||
:agent_sessions,
|
||||
:request_id_counter
|
||||
]
|
||||
|
||||
# Client API
|
||||
|
||||
def start_link(opts \\ []) do
|
||||
GenServer.start_link(__MODULE__, opts, name: __MODULE__)
|
||||
end
|
||||
|
||||
@doc """
|
||||
Handle MCP request from GitHub Copilot
|
||||
"""
|
||||
def handle_mcp_request(request) do
|
||||
GenServer.call(__MODULE__, {:handle_request, request}, 60_000)
|
||||
end
|
||||
|
||||
# Server callbacks
|
||||
|
||||
def init(_opts) do
|
||||
state = %__MODULE__{
|
||||
agent_sessions: %{},
|
||||
request_id_counter: 0
|
||||
}
|
||||
|
||||
Logger.info("Unified MCP Server starting...")
|
||||
|
||||
{:ok, state}
|
||||
end
|
||||
|
||||
def handle_call({:handle_request, request}, _from, state) do
|
||||
response = process_mcp_request(request, state)
|
||||
{:reply, response, state}
|
||||
end
|
||||
|
||||
def handle_call({:register_agent_session, agent_id, session_info}, _from, state) do
|
||||
new_state = %{state | agent_sessions: Map.put(state.agent_sessions, agent_id, session_info)}
|
||||
{:reply, :ok, new_state}
|
||||
end
|
||||
|
||||
def handle_info(_msg, state) do
|
||||
{:noreply, state}
|
||||
end
|
||||
|
||||
# Private functions
|
||||
|
||||
defp process_mcp_request(request, state) do
|
||||
method = Map.get(request, "method")
|
||||
id = Map.get(request, "id")
|
||||
|
||||
case method do
|
||||
"initialize" ->
|
||||
handle_initialize(request, id)
|
||||
|
||||
"tools/list" ->
|
||||
handle_tools_list(request, id)
|
||||
|
||||
"tools/call" ->
|
||||
handle_tools_call(request, id, state)
|
||||
|
||||
_ ->
|
||||
error_response(id, -32601, "Method not found: #{method}")
|
||||
end
|
||||
end
|
||||
|
||||
defp handle_initialize(_request, id) do
|
||||
%{
|
||||
"jsonrpc" => "2.0",
|
||||
"id" => id,
|
||||
"result" => %{
|
||||
"protocolVersion" => "2024-11-05",
|
||||
"capabilities" => %{
|
||||
"tools" => %{},
|
||||
"coordination" => %{
|
||||
"automatic_task_tracking" => true,
|
||||
"agent_management" => true,
|
||||
"multi_server_proxy" => true,
|
||||
"heartbeat_coverage" => true
|
||||
}
|
||||
},
|
||||
"serverInfo" => %{
|
||||
"name" => "agent-coordinator-unified",
|
||||
"version" => "0.1.0",
|
||||
"description" =>
|
||||
"Unified MCP server with automatic task tracking and agent coordination"
|
||||
}
|
||||
}
|
||||
}
|
||||
end
|
||||
|
||||
defp handle_tools_list(_request, id) do
|
||||
case MCPServerManager.get_unified_tools() do
|
||||
tools when is_list(tools) ->
|
||||
%{
|
||||
"jsonrpc" => "2.0",
|
||||
"id" => id,
|
||||
"result" => %{
|
||||
"tools" => tools
|
||||
}
|
||||
}
|
||||
|
||||
{:error, reason} ->
|
||||
error_response(id, -32603, "Failed to get tools: #{reason}")
|
||||
end
|
||||
end
|
||||
|
||||
defp handle_tools_call(request, id, state) do
|
||||
params = Map.get(request, "params", %{})
|
||||
tool_name = Map.get(params, "name")
|
||||
arguments = Map.get(params, "arguments", %{})
|
||||
|
||||
# Determine agent context from the request or session
|
||||
agent_context = determine_agent_context(request, arguments, state)
|
||||
|
||||
case MCPServerManager.route_tool_call(tool_name, arguments, agent_context) do
|
||||
%{"error" => _} = error_result ->
|
||||
Map.put(error_result, "id", id)
|
||||
|
||||
result ->
|
||||
# Wrap successful results in MCP format
|
||||
success_response = %{
|
||||
"jsonrpc" => "2.0",
|
||||
"id" => id,
|
||||
"result" => format_tool_result(result, tool_name, agent_context)
|
||||
}
|
||||
|
||||
success_response
|
||||
end
|
||||
end
|
||||
|
||||
defp determine_agent_context(request, arguments, state) do
|
||||
# Try to determine agent from various sources:
|
||||
|
||||
# 1. Explicit agent_id in arguments
|
||||
case Map.get(arguments, "agent_id") do
|
||||
agent_id when is_binary(agent_id) ->
|
||||
%{agent_id: agent_id}
|
||||
|
||||
_ ->
|
||||
# 2. Try to extract from request metadata
|
||||
case extract_agent_from_request(request) do
|
||||
agent_id when is_binary(agent_id) ->
|
||||
%{agent_id: agent_id}
|
||||
|
||||
_ ->
|
||||
# 3. Use a default session for GitHub Copilot
|
||||
default_agent_context(state)
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
defp extract_agent_from_request(_request) do
|
||||
# Look for agent info in request headers, params, etc.
|
||||
# This could be extended to support various ways of identifying the agent
|
||||
nil
|
||||
end
|
||||
|
||||
defp default_agent_context(state) do
|
||||
# Create or use a default agent session for GitHub Copilot
|
||||
default_agent_id = "github_copilot_session"
|
||||
|
||||
case Map.get(state.agent_sessions, default_agent_id) do
|
||||
nil ->
|
||||
# Auto-register GitHub Copilot as an agent
|
||||
case TaskRegistry.register_agent("GitHub Copilot", [
|
||||
"coding",
|
||||
"analysis",
|
||||
"review",
|
||||
"documentation"
|
||||
]) do
|
||||
{:ok, %{agent_id: agent_id}} ->
|
||||
session_info = %{
|
||||
agent_id: agent_id,
|
||||
name: "GitHub Copilot",
|
||||
auto_registered: true,
|
||||
created_at: DateTime.utc_now()
|
||||
}
|
||||
|
||||
GenServer.call(self(), {:register_agent_session, agent_id, session_info})
|
||||
%{agent_id: agent_id}
|
||||
|
||||
_ ->
|
||||
%{agent_id: default_agent_id}
|
||||
end
|
||||
|
||||
session_info ->
|
||||
%{agent_id: session_info.agent_id}
|
||||
end
|
||||
end
|
||||
|
||||
defp format_tool_result(result, tool_name, agent_context) do
|
||||
# Format the result according to MCP tool call response format
|
||||
base_result =
|
||||
case result do
|
||||
%{"result" => content} when is_map(content) ->
|
||||
# Already properly formatted
|
||||
content
|
||||
|
||||
{:ok, content} ->
|
||||
# Convert tuple response to content
|
||||
%{"content" => [%{"type" => "text", "text" => inspect(content)}]}
|
||||
|
||||
%{} = map_result ->
|
||||
# Convert map to text content
|
||||
%{"content" => [%{"type" => "text", "text" => Jason.encode!(map_result)}]}
|
||||
|
||||
binary when is_binary(binary) ->
|
||||
# Simple text result
|
||||
%{"content" => [%{"type" => "text", "text" => binary}]}
|
||||
|
||||
other ->
|
||||
# Fallback for any other type
|
||||
%{"content" => [%{"type" => "text", "text" => inspect(other)}]}
|
||||
end
|
||||
|
||||
# Add metadata about the operation
|
||||
metadata = %{
|
||||
"tool_name" => tool_name,
|
||||
"agent_id" => agent_context.agent_id,
|
||||
"timestamp" => DateTime.utc_now() |> DateTime.to_iso8601(),
|
||||
"auto_tracked" => true
|
||||
}
|
||||
|
||||
Map.put(base_result, "_metadata", metadata)
|
||||
end
|
||||
|
||||
defp error_response(id, code, message) do
|
||||
%{
|
||||
"jsonrpc" => "2.0",
|
||||
"id" => id,
|
||||
"error" => %{
|
||||
"code" => code,
|
||||
"message" => message
|
||||
}
|
||||
}
|
||||
end
|
||||
end
|
||||
@@ -33,7 +33,8 @@ defmodule AgentCoordinator.VSCodePermissions do
|
||||
"vscode_set_selection" => :editor,
|
||||
|
||||
# Command Operations (varies by command)
|
||||
"vscode_run_command" => :admin, # Default to admin, will check specific commands
|
||||
# Default to admin, will check specific commands
|
||||
"vscode_run_command" => :admin,
|
||||
|
||||
# User Communication
|
||||
"vscode_show_message" => :read_only
|
||||
@@ -88,6 +89,7 @@ defmodule AgentCoordinator.VSCodePermissions do
|
||||
case additional_checks(tool_name, args, context) do
|
||||
:ok ->
|
||||
{:ok, required_level}
|
||||
|
||||
{:error, reason} ->
|
||||
{:error, reason}
|
||||
end
|
||||
@@ -109,17 +111,20 @@ defmodule AgentCoordinator.VSCodePermissions do
|
||||
|
||||
case agent_id do
|
||||
"github_copilot_session" -> :filesystem
|
||||
id when is_binary(id) and byte_size(id) > 0 -> :editor # Other registered agents
|
||||
_ -> :read_only # Unknown agents
|
||||
# Other registered agents
|
||||
id when is_binary(id) and byte_size(id) > 0 -> :editor
|
||||
# Unknown agents
|
||||
_ -> :read_only
|
||||
end
|
||||
end
|
||||
|
||||
@doc """
|
||||
Update an agent's permission level (for administrative purposes).
|
||||
"""
|
||||
def set_agent_permission_level(agent_id, level) when level in [:read_only, :editor, :filesystem, :terminal, :git, :admin] do
|
||||
def set_agent_permission_level(agent_id, level)
|
||||
when level in [:read_only, :editor, :filesystem, :terminal, :git, :admin] do
|
||||
# This would persist to a database or configuration store
|
||||
Logger.info("Setting permission level for agent #{agent_id} to #{level}")
|
||||
IO.puts(:stderr, "Setting permission level for agent #{agent_id} to #{level}")
|
||||
:ok
|
||||
end
|
||||
|
||||
@@ -127,16 +132,24 @@ defmodule AgentCoordinator.VSCodePermissions do
|
||||
|
||||
defp get_required_permission(tool_name, args) do
|
||||
case Map.get(@tool_permissions, tool_name) do
|
||||
nil -> :admin # Unknown tools require admin by default
|
||||
# Unknown tools require admin by default
|
||||
nil ->
|
||||
:admin
|
||||
|
||||
:admin when tool_name == "vscode_run_command" ->
|
||||
# Special handling for run_command - check specific command
|
||||
command = args["command"]
|
||||
|
||||
if command in @whitelisted_commands do
|
||||
:editor # Whitelisted commands only need editor level
|
||||
# Whitelisted commands only need editor level
|
||||
:editor
|
||||
else
|
||||
:admin # Unknown commands need admin
|
||||
# Unknown commands need admin
|
||||
:admin
|
||||
end
|
||||
level -> level
|
||||
|
||||
level ->
|
||||
level
|
||||
end
|
||||
end
|
||||
|
||||
@@ -165,11 +178,19 @@ defmodule AgentCoordinator.VSCodePermissions do
|
||||
|
||||
forbidden_patterns = [
|
||||
# System directories
|
||||
"/etc/", "/bin/", "/usr/", "/var/", "/tmp/",
|
||||
"/etc/",
|
||||
"/bin/",
|
||||
"/usr/",
|
||||
"/var/",
|
||||
"/tmp/",
|
||||
# User sensitive areas
|
||||
"/.ssh/", "/.config/", "/home/", "~",
|
||||
"/.ssh/",
|
||||
"/.config/",
|
||||
"/home/",
|
||||
"~",
|
||||
# Relative path traversal
|
||||
"../", "..\\"
|
||||
"../",
|
||||
"..\\"
|
||||
]
|
||||
|
||||
if Enum.any?(forbidden_patterns, fn pattern -> String.contains?(path, pattern) end) do
|
||||
@@ -181,7 +202,7 @@ defmodule AgentCoordinator.VSCodePermissions do
|
||||
|
||||
defp check_workspace_bounds(_path, _context), do: {:error, "Invalid path format"}
|
||||
|
||||
defp check_command_safety(command, args) when is_binary(command) do
|
||||
defp check_command_safety(command, _args) when is_binary(command) do
|
||||
cond do
|
||||
command in @whitelisted_commands ->
|
||||
:ok
|
||||
@@ -219,4 +240,4 @@ defmodule AgentCoordinator.VSCodePermissions do
|
||||
whitelisted_commands: @whitelisted_commands
|
||||
}
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
@@ -18,7 +18,8 @@ defmodule AgentCoordinator.VSCodeToolProvider do
|
||||
# File Operations
|
||||
%{
|
||||
"name" => "vscode_read_file",
|
||||
"description" => "Read file contents using VS Code's file system API. Only works within workspace folders.",
|
||||
"description" =>
|
||||
"Read file contents using VS Code's file system API. Only works within workspace folders.",
|
||||
"inputSchema" => %{
|
||||
"type" => "object",
|
||||
"properties" => %{
|
||||
@@ -37,7 +38,8 @@ defmodule AgentCoordinator.VSCodeToolProvider do
|
||||
},
|
||||
%{
|
||||
"name" => "vscode_write_file",
|
||||
"description" => "Write content to a file using VS Code's file system API. Creates directories if needed.",
|
||||
"description" =>
|
||||
"Write content to a file using VS Code's file system API. Creates directories if needed.",
|
||||
"inputSchema" => %{
|
||||
"type" => "object",
|
||||
"properties" => %{
|
||||
@@ -93,7 +95,8 @@ defmodule AgentCoordinator.VSCodeToolProvider do
|
||||
"properties" => %{
|
||||
"path" => %{
|
||||
"type" => "string",
|
||||
"description" => "Relative or absolute path to the file/directory within the workspace"
|
||||
"description" =>
|
||||
"Relative or absolute path to the file/directory within the workspace"
|
||||
},
|
||||
"recursive" => %{
|
||||
"type" => "boolean",
|
||||
@@ -101,7 +104,8 @@ defmodule AgentCoordinator.VSCodeToolProvider do
|
||||
},
|
||||
"use_trash" => %{
|
||||
"type" => "boolean",
|
||||
"description" => "Whether to move to trash instead of permanent deletion (default: true)"
|
||||
"description" =>
|
||||
"Whether to move to trash instead of permanent deletion (default: true)"
|
||||
}
|
||||
},
|
||||
"required" => ["path"]
|
||||
@@ -227,7 +231,8 @@ defmodule AgentCoordinator.VSCodeToolProvider do
|
||||
# Command Operations
|
||||
%{
|
||||
"name" => "vscode_run_command",
|
||||
"description" => "Execute a VS Code command. Only whitelisted commands are allowed for security.",
|
||||
"description" =>
|
||||
"Execute a VS Code command. Only whitelisted commands are allowed for security.",
|
||||
"inputSchema" => %{
|
||||
"type" => "object",
|
||||
"properties" => %{
|
||||
@@ -282,21 +287,26 @@ defmodule AgentCoordinator.VSCodeToolProvider do
|
||||
required = Map.get(input_schema, "required", [])
|
||||
|
||||
# Add agent_id to properties
|
||||
updated_properties = Map.put(properties, "agent_id", %{
|
||||
"type" => "string",
|
||||
"description" => "Unique identifier for the agent making this request. Each agent session must use a consistent, unique ID throughout their interaction. Generate a UUID or use a descriptive identifier like 'agent_main_task_001'."
|
||||
})
|
||||
updated_properties =
|
||||
Map.put(properties, "agent_id", %{
|
||||
"type" => "string",
|
||||
"description" =>
|
||||
"Unique identifier for the agent making this request. Each agent session must use a consistent, unique ID throughout their interaction. Generate a UUID or use a descriptive identifier like 'agent_main_task_001'."
|
||||
})
|
||||
|
||||
# Add agent_id to required fields
|
||||
updated_required = if "agent_id" in required, do: required, else: ["agent_id" | required]
|
||||
|
||||
# Update the tool schema
|
||||
updated_input_schema = input_schema
|
||||
|> Map.put("properties", updated_properties)
|
||||
|> Map.put("required", updated_required)
|
||||
updated_input_schema =
|
||||
input_schema
|
||||
|> Map.put("properties", updated_properties)
|
||||
|> Map.put("required", updated_required)
|
||||
|
||||
# Update tool description to mention agent_id requirement
|
||||
updated_description = tool["description"] <> " IMPORTANT: Include a unique agent_id parameter to identify your agent session."
|
||||
updated_description =
|
||||
tool["description"] <>
|
||||
" IMPORTANT: Include a unique agent_id parameter to identify your agent session."
|
||||
|
||||
tool
|
||||
|> Map.put("inputSchema", updated_input_schema)
|
||||
@@ -307,17 +317,20 @@ defmodule AgentCoordinator.VSCodeToolProvider do
|
||||
Handle a VS Code tool call with permission checking and error handling.
|
||||
"""
|
||||
def handle_tool_call(tool_name, args, context) do
|
||||
Logger.info("VS Code tool call: #{tool_name} with args: #{inspect(args)}")
|
||||
IO.puts(:stderr, "VS Code tool call: #{tool_name} with args: #{inspect(args)}")
|
||||
|
||||
# Extract agent_id from args (required for all VS Code tools)
|
||||
agent_id = Map.get(args, "agent_id")
|
||||
|
||||
if is_nil(agent_id) or agent_id == "" do
|
||||
Logger.warning("Missing agent_id in VS Code tool call: #{tool_name}")
|
||||
{:error, %{
|
||||
"error" => "Missing agent_id",
|
||||
"message" => "All VS Code tools require a unique agent_id parameter. Please include your agent session identifier."
|
||||
}}
|
||||
IO.puts(:stderr, "Missing agent_id in VS Code tool call: #{tool_name}")
|
||||
|
||||
{:error,
|
||||
%{
|
||||
"error" => "Missing agent_id",
|
||||
"message" =>
|
||||
"All VS Code tools require a unique agent_id parameter. Please include your agent session identifier."
|
||||
}}
|
||||
else
|
||||
# Ensure agent is registered and create enhanced context
|
||||
enhanced_context = ensure_agent_registered(agent_id, context)
|
||||
@@ -334,7 +347,7 @@ defmodule AgentCoordinator.VSCodeToolProvider do
|
||||
result
|
||||
|
||||
{:error, reason} ->
|
||||
Logger.warning("Permission denied for #{tool_name} (agent: #{agent_id}): #{reason}")
|
||||
IO.puts(:stderr, "Permission denied for #{tool_name} (agent: #{agent_id}): #{reason}")
|
||||
{:error, %{"error" => "Permission denied", "reason" => reason}}
|
||||
end
|
||||
end
|
||||
@@ -350,7 +363,7 @@ defmodule AgentCoordinator.VSCodeToolProvider do
|
||||
|
||||
{:error, :not_found} ->
|
||||
# Agent not registered, auto-register with VS Code capabilities
|
||||
Logger.info("Auto-registering new agent: #{agent_id}")
|
||||
IO.puts(:stderr, "Auto-registering new agent: #{agent_id}")
|
||||
|
||||
capabilities = [
|
||||
"coding",
|
||||
@@ -364,18 +377,25 @@ defmodule AgentCoordinator.VSCodeToolProvider do
|
||||
case AgentCoordinator.TaskRegistry.register_agent(
|
||||
"GitHub Copilot (#{agent_id})",
|
||||
capabilities,
|
||||
[metadata: %{agent_id: agent_id, auto_registered: true, session_start: DateTime.utc_now()}]
|
||||
metadata: %{
|
||||
agent_id: agent_id,
|
||||
auto_registered: true,
|
||||
session_start: DateTime.utc_now()
|
||||
}
|
||||
) do
|
||||
{:ok, _result} ->
|
||||
Logger.info("Successfully auto-registered agent: #{agent_id}")
|
||||
IO.puts(:stderr, "Successfully auto-registered agent: #{agent_id}")
|
||||
Map.put(context, :agent_id, agent_id)
|
||||
|
||||
{:error, reason} ->
|
||||
Logger.error("Failed to auto-register agent #{agent_id}: #{inspect(reason)}")
|
||||
Map.put(context, :agent_id, agent_id) # Continue anyway
|
||||
IO.puts(:stderr, "Failed to auto-register agent #{agent_id}: #{inspect(reason)}")
|
||||
# Continue anyway
|
||||
Map.put(context, :agent_id, agent_id)
|
||||
end
|
||||
end
|
||||
end # Private function to execute individual tools
|
||||
end
|
||||
|
||||
# Private function to execute individual tools
|
||||
defp execute_tool(tool_name, args, context) do
|
||||
case tool_name do
|
||||
"vscode_read_file" -> read_file(args, context)
|
||||
@@ -398,127 +418,141 @@ defmodule AgentCoordinator.VSCodeToolProvider do
|
||||
|
||||
defp read_file(args, _context) do
|
||||
# For now, return a placeholder - we'll implement the actual VS Code API bridge
|
||||
{:ok, %{
|
||||
"content" => "// VS Code file content would be here",
|
||||
"path" => args["path"],
|
||||
"encoding" => args["encoding"] || "utf8",
|
||||
"size" => 42,
|
||||
"timestamp" => DateTime.utc_now() |> DateTime.to_iso8601()
|
||||
}}
|
||||
{:ok,
|
||||
%{
|
||||
"content" => "// VS Code file content would be here",
|
||||
"path" => args["path"],
|
||||
"encoding" => args["encoding"] || "utf8",
|
||||
"size" => 42,
|
||||
"timestamp" => DateTime.utc_now() |> DateTime.to_iso8601()
|
||||
}}
|
||||
end
|
||||
|
||||
defp write_file(args, _context) do
|
||||
{:ok, %{
|
||||
"path" => args["path"],
|
||||
"bytes_written" => String.length(args["content"]),
|
||||
"timestamp" => DateTime.utc_now() |> DateTime.to_iso8601()
|
||||
}}
|
||||
{:ok,
|
||||
%{
|
||||
"path" => args["path"],
|
||||
"bytes_written" => String.length(args["content"]),
|
||||
"timestamp" => DateTime.utc_now() |> DateTime.to_iso8601()
|
||||
}}
|
||||
end
|
||||
|
||||
defp create_file(args, _context) do
|
||||
{:ok, %{
|
||||
"path" => args["path"],
|
||||
"created" => true,
|
||||
"timestamp" => DateTime.utc_now() |> DateTime.to_iso8601()
|
||||
}}
|
||||
{:ok,
|
||||
%{
|
||||
"path" => args["path"],
|
||||
"created" => true,
|
||||
"timestamp" => DateTime.utc_now() |> DateTime.to_iso8601()
|
||||
}}
|
||||
end
|
||||
|
||||
defp delete_file(args, _context) do
|
||||
{:ok, %{
|
||||
"path" => args["path"],
|
||||
"deleted" => true,
|
||||
"timestamp" => DateTime.utc_now() |> DateTime.to_iso8601()
|
||||
}}
|
||||
{:ok,
|
||||
%{
|
||||
"path" => args["path"],
|
||||
"deleted" => true,
|
||||
"timestamp" => DateTime.utc_now() |> DateTime.to_iso8601()
|
||||
}}
|
||||
end
|
||||
|
||||
defp list_directory(args, _context) do
|
||||
{:ok, %{
|
||||
"path" => args["path"],
|
||||
"entries" => [
|
||||
%{"name" => "file1.txt", "type" => "file", "size" => 123},
|
||||
%{"name" => "subdir", "type" => "directory", "size" => nil}
|
||||
]
|
||||
}}
|
||||
{:ok,
|
||||
%{
|
||||
"path" => args["path"],
|
||||
"entries" => [
|
||||
%{"name" => "file1.txt", "type" => "file", "size" => 123},
|
||||
%{"name" => "subdir", "type" => "directory", "size" => nil}
|
||||
]
|
||||
}}
|
||||
end
|
||||
|
||||
defp get_workspace_folders(_args, _context) do
|
||||
{:ok, %{
|
||||
"folders" => [
|
||||
%{"name" => "agent_coordinator", "uri" => "file:///home/ra/agent_coordinator"}
|
||||
]
|
||||
}}
|
||||
{:ok,
|
||||
%{
|
||||
"folders" => [
|
||||
%{"name" => "agent_coordinator", "uri" => "file:///home/ra/agent_coordinator"}
|
||||
]
|
||||
}}
|
||||
end
|
||||
|
||||
defp get_active_editor(args, _context) do
|
||||
{:ok, %{
|
||||
"file_path" => "/home/ra/agent_coordinator/lib/agent_coordinator.ex",
|
||||
"language" => "elixir",
|
||||
"line_count" => 150,
|
||||
"content" => if(args["include_content"], do: "// Editor content here", else: nil),
|
||||
"selection" => %{
|
||||
"start" => %{"line" => 10, "character" => 5},
|
||||
"end" => %{"line" => 10, "character" => 15}
|
||||
},
|
||||
"cursor_position" => %{"line" => 10, "character" => 15}
|
||||
}}
|
||||
{:ok,
|
||||
%{
|
||||
"file_path" => "/home/ra/agent_coordinator/lib/agent_coordinator.ex",
|
||||
"language" => "elixir",
|
||||
"line_count" => 150,
|
||||
"content" => if(args["include_content"], do: "// Editor content here", else: nil),
|
||||
"selection" => %{
|
||||
"start" => %{"line" => 10, "character" => 5},
|
||||
"end" => %{"line" => 10, "character" => 15}
|
||||
},
|
||||
"cursor_position" => %{"line" => 10, "character" => 15}
|
||||
}}
|
||||
end
|
||||
|
||||
defp set_editor_content(args, _context) do
|
||||
{:ok, %{
|
||||
"file_path" => args["file_path"],
|
||||
"content_length" => String.length(args["content"]),
|
||||
"timestamp" => DateTime.utc_now() |> DateTime.to_iso8601()
|
||||
}}
|
||||
{:ok,
|
||||
%{
|
||||
"file_path" => args["file_path"],
|
||||
"content_length" => String.length(args["content"]),
|
||||
"timestamp" => DateTime.utc_now() |> DateTime.to_iso8601()
|
||||
}}
|
||||
end
|
||||
|
||||
defp get_selection(args, _context) do
|
||||
{:ok, %{
|
||||
"selection" => %{
|
||||
"start" => %{"line" => 5, "character" => 0},
|
||||
"end" => %{"line" => 8, "character" => 20}
|
||||
},
|
||||
"content" => if(args["include_content"], do: "Selected text here", else: nil),
|
||||
"is_empty" => false
|
||||
}}
|
||||
{:ok,
|
||||
%{
|
||||
"selection" => %{
|
||||
"start" => %{"line" => 5, "character" => 0},
|
||||
"end" => %{"line" => 8, "character" => 20}
|
||||
},
|
||||
"content" => if(args["include_content"], do: "Selected text here", else: nil),
|
||||
"is_empty" => false
|
||||
}}
|
||||
end
|
||||
|
||||
defp set_selection(args, _context) do
|
||||
{:ok, %{
|
||||
"selection" => %{
|
||||
"start" => %{"line" => args["start_line"], "character" => args["start_character"]},
|
||||
"end" => %{"line" => args["end_line"], "character" => args["end_character"]}
|
||||
},
|
||||
"revealed" => args["reveal"] != false
|
||||
}}
|
||||
{:ok,
|
||||
%{
|
||||
"selection" => %{
|
||||
"start" => %{"line" => args["start_line"], "character" => args["start_character"]},
|
||||
"end" => %{"line" => args["end_line"], "character" => args["end_character"]}
|
||||
},
|
||||
"revealed" => args["reveal"] != false
|
||||
}}
|
||||
end
|
||||
|
||||
defp run_command(args, _context) do
|
||||
# This would execute actual VS Code commands
|
||||
{:ok, %{
|
||||
"command" => args["command"],
|
||||
"args" => args["args"] || [],
|
||||
"result" => "Command executed successfully",
|
||||
"timestamp" => DateTime.utc_now() |> DateTime.to_iso8601()
|
||||
}}
|
||||
{:ok,
|
||||
%{
|
||||
"command" => args["command"],
|
||||
"args" => args["args"] || [],
|
||||
"result" => "Command executed successfully",
|
||||
"timestamp" => DateTime.utc_now() |> DateTime.to_iso8601()
|
||||
}}
|
||||
end
|
||||
|
||||
defp show_message(args, _context) do
|
||||
{:ok, %{
|
||||
"message" => args["message"],
|
||||
"type" => args["type"] || "info",
|
||||
"displayed" => true,
|
||||
"timestamp" => DateTime.utc_now() |> DateTime.to_iso8601()
|
||||
}}
|
||||
{:ok,
|
||||
%{
|
||||
"message" => args["message"],
|
||||
"type" => args["type"] || "info",
|
||||
"displayed" => true,
|
||||
"timestamp" => DateTime.utc_now() |> DateTime.to_iso8601()
|
||||
}}
|
||||
end
|
||||
|
||||
# Logging function
|
||||
defp log_tool_operation(tool_name, args, context, result) do
|
||||
Logger.info("VS Code tool operation completed", %{
|
||||
operation_data = %{
|
||||
tool: tool_name,
|
||||
agent_id: context[:agent_id],
|
||||
args_summary: inspect(Map.take(args, ["path", "command", "message"])),
|
||||
success: match?({:ok, _}, result),
|
||||
timestamp: DateTime.utc_now()
|
||||
})
|
||||
}
|
||||
|
||||
IO.puts(:stderr, "VS Code tool operation completed: #{inspect(operation_data)}")
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
388
lib/agent_coordinator/websocket_handler.ex
Normal file
388
lib/agent_coordinator/websocket_handler.ex
Normal file
@@ -0,0 +1,388 @@
|
||||
defmodule AgentCoordinator.WebSocketHandler do
|
||||
@moduledoc """
|
||||
WebSocket handler for real-time MCP communication.
|
||||
|
||||
Provides:
|
||||
- Real-time MCP JSON-RPC over WebSocket
|
||||
- Tool filtering based on client context
|
||||
- Session management
|
||||
- Heartbeat and connection monitoring
|
||||
"""
|
||||
|
||||
@behaviour WebSock
|
||||
require Logger
|
||||
alias AgentCoordinator.{MCPServer, ToolFilter}
|
||||
|
||||
defstruct [
|
||||
:client_context,
|
||||
:session_id,
|
||||
:last_heartbeat,
|
||||
:agent_id,
|
||||
:connection_info
|
||||
]
|
||||
|
||||
# 30 seconds
|
||||
@heartbeat_interval 30_000
|
||||
|
||||
@impl WebSock
|
||||
def init(opts) do
|
||||
session_id = "ws_" <> UUID.uuid4()
|
||||
|
||||
# Initialize connection state
|
||||
state = %__MODULE__{
|
||||
session_id: session_id,
|
||||
last_heartbeat: DateTime.utc_now(),
|
||||
connection_info: opts
|
||||
}
|
||||
|
||||
# Start heartbeat timer
|
||||
Process.send_after(self(), :heartbeat, @heartbeat_interval)
|
||||
|
||||
IO.puts(:stderr, "WebSocket connection established: #{session_id}")
|
||||
|
||||
{:ok, state}
|
||||
end
|
||||
|
||||
@impl WebSock
|
||||
def handle_in({text, [opcode: :text]}, state) do
|
||||
case Jason.decode(text) do
|
||||
{:ok, message} ->
|
||||
handle_mcp_message(message, state)
|
||||
|
||||
{:error, %Jason.DecodeError{} = error} ->
|
||||
error_response = %{
|
||||
"jsonrpc" => "2.0",
|
||||
"id" => nil,
|
||||
"error" => %{
|
||||
"code" => -32700,
|
||||
"message" => "Parse error: #{Exception.message(error)}"
|
||||
}
|
||||
}
|
||||
|
||||
{:reply, {:text, Jason.encode!(error_response)}, state}
|
||||
end
|
||||
end
|
||||
|
||||
@impl WebSock
|
||||
def handle_in({_binary, [opcode: :binary]}, state) do
|
||||
IO.puts(:stderr, "Received unexpected binary data on WebSocket")
|
||||
{:ok, state}
|
||||
end
|
||||
|
||||
@impl WebSock
|
||||
def handle_info(:heartbeat, state) do
|
||||
# Send heartbeat if we have an agent registered
|
||||
if state.agent_id do
|
||||
heartbeat_request = %{
|
||||
"jsonrpc" => "2.0",
|
||||
"id" => generate_request_id(),
|
||||
"method" => "tools/call",
|
||||
"params" => %{
|
||||
"name" => "heartbeat",
|
||||
"arguments" => %{"agent_id" => state.agent_id}
|
||||
}
|
||||
}
|
||||
|
||||
# Send heartbeat to MCP server
|
||||
MCPServer.handle_mcp_request(heartbeat_request)
|
||||
end
|
||||
|
||||
# Schedule next heartbeat
|
||||
Process.send_after(self(), :heartbeat, @heartbeat_interval)
|
||||
|
||||
updated_state = %{state | last_heartbeat: DateTime.utc_now()}
|
||||
{:ok, updated_state}
|
||||
end
|
||||
|
||||
@impl WebSock
|
||||
def handle_info(message, state) do
|
||||
IO.puts(:stderr, "Received unexpected message: #{inspect(message)}")
|
||||
{:ok, state}
|
||||
end
|
||||
|
||||
@impl WebSock
|
||||
def terminate(:remote, state) do
|
||||
IO.puts(:stderr, "WebSocket connection closed by client: #{state.session_id}")
|
||||
cleanup_session(state)
|
||||
:ok
|
||||
end
|
||||
|
||||
@impl WebSock
|
||||
def terminate(reason, state) do
|
||||
IO.puts(
|
||||
:stderr,
|
||||
"WebSocket connection terminated: #{state.session_id}, reason: #{inspect(reason)}"
|
||||
)
|
||||
|
||||
cleanup_session(state)
|
||||
:ok
|
||||
end
|
||||
|
||||
# Private helper functions
|
||||
|
||||
defp handle_mcp_message(message, state) do
|
||||
method = Map.get(message, "method")
|
||||
|
||||
case method do
|
||||
"initialize" ->
|
||||
handle_initialize(message, state)
|
||||
|
||||
"tools/list" ->
|
||||
handle_tools_list(message, state)
|
||||
|
||||
"tools/call" ->
|
||||
handle_tool_call(message, state)
|
||||
|
||||
"notifications/initialized" ->
|
||||
handle_initialized_notification(message, state)
|
||||
|
||||
_ ->
|
||||
# Forward other methods to MCP server
|
||||
forward_to_mcp_server(message, state)
|
||||
end
|
||||
end
|
||||
|
||||
defp handle_initialize(message, state) do
|
||||
# Extract client info from initialize message
|
||||
params = Map.get(message, "params", %{})
|
||||
client_info = Map.get(params, "clientInfo", %{})
|
||||
|
||||
# Detect client context
|
||||
connection_info = %{
|
||||
transport: :websocket,
|
||||
client_info: client_info,
|
||||
session_id: state.session_id,
|
||||
capabilities: Map.get(params, "capabilities", [])
|
||||
}
|
||||
|
||||
client_context = ToolFilter.detect_client_context(connection_info)
|
||||
|
||||
# Send initialize response
|
||||
response = %{
|
||||
"jsonrpc" => "2.0",
|
||||
"id" => Map.get(message, "id"),
|
||||
"result" => %{
|
||||
"protocolVersion" => "2024-11-05",
|
||||
"capabilities" => %{
|
||||
"tools" => %{},
|
||||
"coordination" => %{
|
||||
"automatic_task_tracking" => true,
|
||||
"agent_management" => true,
|
||||
"multi_server_proxy" => true,
|
||||
"heartbeat_coverage" => true,
|
||||
"session_tracking" => true,
|
||||
"tool_filtering" => true,
|
||||
"websocket_realtime" => true
|
||||
}
|
||||
},
|
||||
"serverInfo" => %{
|
||||
"name" => "agent-coordinator-websocket",
|
||||
"version" => AgentCoordinator.version(),
|
||||
"description" => "Agent Coordinator WebSocket interface with tool filtering"
|
||||
},
|
||||
"_meta" => %{
|
||||
"session_id" => state.session_id,
|
||||
"connection_type" => client_context.connection_type,
|
||||
"security_level" => client_context.security_level
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
updated_state = %{state | client_context: client_context, connection_info: connection_info}
|
||||
|
||||
{:reply, {:text, Jason.encode!(response)}, updated_state}
|
||||
end
|
||||
|
||||
defp handle_tools_list(message, state) do
|
||||
if state.client_context do
|
||||
# Get filtered tools based on client context
|
||||
all_tools = MCPServer.get_tools()
|
||||
filtered_tools = ToolFilter.filter_tools(all_tools, state.client_context)
|
||||
|
||||
response = %{
|
||||
"jsonrpc" => "2.0",
|
||||
"id" => Map.get(message, "id"),
|
||||
"result" => %{
|
||||
"tools" => filtered_tools,
|
||||
"_meta" => %{
|
||||
"filtered_for" => state.client_context.connection_type,
|
||||
"original_count" => length(all_tools),
|
||||
"filtered_count" => length(filtered_tools),
|
||||
"session_id" => state.session_id
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
{:reply, {:text, Jason.encode!(response)}, state}
|
||||
else
|
||||
# Client hasn't initialized yet
|
||||
error_response = %{
|
||||
"jsonrpc" => "2.0",
|
||||
"id" => Map.get(message, "id"),
|
||||
"error" => %{
|
||||
"code" => -32002,
|
||||
"message" => "Client must initialize first"
|
||||
}
|
||||
}
|
||||
|
||||
{:reply, {:text, Jason.encode!(error_response)}, state}
|
||||
end
|
||||
end
|
||||
|
||||
defp handle_tool_call(message, state) do
|
||||
if state.client_context do
|
||||
tool_name = get_in(message, ["params", "name"])
|
||||
|
||||
# Check if tool is allowed for this client context
|
||||
if tool_allowed_for_context?(tool_name, state.client_context) do
|
||||
# Enhance message with session info
|
||||
enhanced_message = add_websocket_session_info(message, state)
|
||||
|
||||
# Track agent ID if this is a register_agent call
|
||||
updated_state = maybe_track_agent_id(message, state)
|
||||
|
||||
# Forward to MCP server
|
||||
case MCPServer.handle_mcp_request(enhanced_message) do
|
||||
response when is_map(response) ->
|
||||
{:reply, {:text, Jason.encode!(response)}, updated_state}
|
||||
|
||||
unexpected ->
|
||||
IO.puts(:stderr, "Unexpected MCP response: #{inspect(unexpected)}")
|
||||
|
||||
error_response = %{
|
||||
"jsonrpc" => "2.0",
|
||||
"id" => Map.get(message, "id"),
|
||||
"error" => %{
|
||||
"code" => -32603,
|
||||
"message" => "Internal server error"
|
||||
}
|
||||
}
|
||||
|
||||
{:reply, {:text, Jason.encode!(error_response)}, updated_state}
|
||||
end
|
||||
else
|
||||
# Tool not allowed for this client
|
||||
error_response = %{
|
||||
"jsonrpc" => "2.0",
|
||||
"id" => Map.get(message, "id"),
|
||||
"error" => %{
|
||||
"code" => -32601,
|
||||
"message" =>
|
||||
"Tool not available for #{state.client_context.connection_type} clients: #{tool_name}"
|
||||
}
|
||||
}
|
||||
|
||||
{:reply, {:text, Jason.encode!(error_response)}, state}
|
||||
end
|
||||
else
|
||||
# Client hasn't initialized yet
|
||||
error_response = %{
|
||||
"jsonrpc" => "2.0",
|
||||
"id" => Map.get(message, "id"),
|
||||
"error" => %{
|
||||
"code" => -32002,
|
||||
"message" => "Client must initialize first"
|
||||
}
|
||||
}
|
||||
|
||||
{:reply, {:text, Jason.encode!(error_response)}, state}
|
||||
end
|
||||
end
|
||||
|
||||
defp handle_initialized_notification(_message, state) do
|
||||
# Client is ready to receive notifications
|
||||
IO.puts(:stderr, "WebSocket client initialized: #{state.session_id}")
|
||||
{:ok, state}
|
||||
end
|
||||
|
||||
defp forward_to_mcp_server(message, state) do
|
||||
if state.client_context do
|
||||
enhanced_message = add_websocket_session_info(message, state)
|
||||
|
||||
case MCPServer.handle_mcp_request(enhanced_message) do
|
||||
response when is_map(response) ->
|
||||
{:reply, {:text, Jason.encode!(response)}, state}
|
||||
|
||||
nil ->
|
||||
# Some notifications don't return responses
|
||||
{:ok, state}
|
||||
|
||||
unexpected ->
|
||||
IO.puts(:stderr, "Unexpected MCP response: #{inspect(unexpected)}")
|
||||
{:ok, state}
|
||||
end
|
||||
else
|
||||
error_response = %{
|
||||
"jsonrpc" => "2.0",
|
||||
"id" => Map.get(message, "id"),
|
||||
"error" => %{
|
||||
"code" => -32002,
|
||||
"message" => "Client must initialize first"
|
||||
}
|
||||
}
|
||||
|
||||
{:reply, {:text, Jason.encode!(error_response)}, state}
|
||||
end
|
||||
end
|
||||
|
||||
defp add_websocket_session_info(message, state) do
|
||||
# Add session tracking info to the message
|
||||
params = Map.get(message, "params", %{})
|
||||
|
||||
enhanced_params =
|
||||
params
|
||||
|> Map.put("_session_id", state.session_id)
|
||||
|> Map.put("_transport", "websocket")
|
||||
|> Map.put("_client_context", %{
|
||||
connection_type: state.client_context.connection_type,
|
||||
security_level: state.client_context.security_level,
|
||||
session_id: state.session_id
|
||||
})
|
||||
|
||||
Map.put(message, "params", enhanced_params)
|
||||
end
|
||||
|
||||
defp tool_allowed_for_context?(tool_name, client_context) do
|
||||
all_tools = MCPServer.get_tools()
|
||||
filtered_tools = ToolFilter.filter_tools(all_tools, client_context)
|
||||
|
||||
Enum.any?(filtered_tools, fn tool ->
|
||||
Map.get(tool, "name") == tool_name
|
||||
end)
|
||||
end
|
||||
|
||||
defp maybe_track_agent_id(message, state) do
|
||||
case get_in(message, ["params", "name"]) do
|
||||
"register_agent" ->
|
||||
# We'll get the agent_id from the response, but for now mark that we expect one
|
||||
%{state | agent_id: :pending}
|
||||
|
||||
_ ->
|
||||
state
|
||||
end
|
||||
end
|
||||
|
||||
defp cleanup_session(state) do
|
||||
# Unregister agent if one was registered through this session
|
||||
if state.agent_id && state.agent_id != :pending do
|
||||
unregister_request = %{
|
||||
"jsonrpc" => "2.0",
|
||||
"id" => generate_request_id(),
|
||||
"method" => "tools/call",
|
||||
"params" => %{
|
||||
"name" => "unregister_agent",
|
||||
"arguments" => %{
|
||||
"agent_id" => state.agent_id,
|
||||
"reason" => "WebSocket connection closed"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
MCPServer.handle_mcp_request(unregister_request)
|
||||
end
|
||||
end
|
||||
|
||||
defp generate_request_id do
|
||||
"ws_req_" <> (:crypto.strong_rand_bytes(8) |> Base.encode16(case: :lower))
|
||||
end
|
||||
end
|
||||
@@ -10,12 +10,6 @@
|
||||
"auto_restart": true,
|
||||
"description": "Context7 library documentation server"
|
||||
},
|
||||
"mcp_figma": {
|
||||
"url": "http://127.0.0.1:3845/mcp",
|
||||
"type": "http",
|
||||
"auto_restart": true,
|
||||
"description": "Figma design integration server"
|
||||
},
|
||||
"mcp_filesystem": {
|
||||
"type": "stdio",
|
||||
"command": "bunx",
|
||||
@@ -54,4 +48,4 @@
|
||||
"auto_restart_delay": 1000,
|
||||
"max_restart_attempts": 3
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
6
mix.exs
6
mix.exs
@@ -48,6 +48,12 @@ defmodule AgentCoordinator.MixProject do
|
||||
{:gen_stage, "~> 1.2"},
|
||||
{:uuid, "~> 1.1"},
|
||||
|
||||
# HTTP server dependencies
|
||||
{:plug, "~> 1.15"},
|
||||
{:plug_cowboy, "~> 2.7"},
|
||||
{:websock_adapter, "~> 0.5"},
|
||||
{:cors_plug, "~> 3.0"},
|
||||
|
||||
# Development and testing dependencies
|
||||
{:ex_doc, "~> 0.34", only: :dev, runtime: false},
|
||||
{:dialyxir, "~> 1.4", only: [:dev], runtime: false},
|
||||
|
||||
10
mix.lock
10
mix.lock
@@ -2,6 +2,9 @@
|
||||
"bunt": {:hex, :bunt, "1.0.0", "081c2c665f086849e6d57900292b3a161727ab40431219529f13c4ddcf3e7a44", [:mix], [], "hexpm", "dc5f86aa08a5f6fa6b8096f0735c4e76d54ae5c9fa2c143e5a1fc7c1cd9bb6b5"},
|
||||
"chacha20": {:hex, :chacha20, "1.0.4", "0359d8f9a32269271044c1b471d5cf69660c362a7c61a98f73a05ef0b5d9eb9e", [:mix], [], "hexpm", "2027f5d321ae9903f1f0da7f51b0635ad6b8819bc7fe397837930a2011bc2349"},
|
||||
"connection": {:hex, :connection, "1.1.0", "ff2a49c4b75b6fb3e674bfc5536451607270aac754ffd1bdfe175abe4a6d7a68", [:mix], [], "hexpm", "722c1eb0a418fbe91ba7bd59a47e28008a189d47e37e0e7bb85585a016b2869c"},
|
||||
"cors_plug": {:hex, :cors_plug, "3.0.3", "7c3ac52b39624bc616db2e937c282f3f623f25f8d550068b6710e58d04a0e330", [:mix], [{:plug, "~> 1.13", [hex: :plug, repo: "hexpm", optional: false]}], "hexpm", "3f2d759e8c272ed3835fab2ef11b46bddab8c1ab9528167bd463b6452edf830d"},
|
||||
"cowboy": {:hex, :cowboy, "2.13.0", "09d770dd5f6a22cc60c071f432cd7cb87776164527f205c5a6b0f24ff6b38990", [:make, :rebar3], [{:cowlib, ">= 2.14.0 and < 3.0.0", [hex: :cowlib, repo: "hexpm", optional: false]}, {:ranch, ">= 1.8.0 and < 3.0.0", [hex: :ranch, repo: "hexpm", optional: false]}], "hexpm", "e724d3a70995025d654c1992c7b11dbfea95205c047d86ff9bf1cda92ddc5614"},
|
||||
"cowboy_telemetry": {:hex, :cowboy_telemetry, "0.4.0", "f239f68b588efa7707abce16a84d0d2acf3a0f50571f8bb7f56a15865aae820c", [:rebar3], [{:cowboy, "~> 2.7", [hex: :cowboy, repo: "hexpm", optional: false]}, {:telemetry, "~> 1.0", [hex: :telemetry, repo: "hexpm", optional: false]}], "hexpm", "7d98bac1ee4565d31b62d59f8823dfd8356a169e7fcbb83831b8a5397404c9de"},
|
||||
"cowlib": {:hex, :cowlib, "2.15.0", "3c97a318a933962d1c12b96ab7c1d728267d2c523c25a5b57b0f93392b6e9e25", [:make, :rebar3], [], "hexpm", "4f00c879a64b4fe7c8fcb42a4281925e9ffdb928820b03c3ad325a617e857532"},
|
||||
"credo": {:hex, :credo, "1.7.12", "9e3c20463de4b5f3f23721527fcaf16722ec815e70ff6c60b86412c695d426c1", [:mix], [{:bunt, "~> 0.2.1 or ~> 1.0", [hex: :bunt, repo: "hexpm", optional: false]}, {:file_system, "~> 0.2 or ~> 1.0", [hex: :file_system, repo: "hexpm", optional: false]}, {:jason, "~> 1.0", [hex: :jason, repo: "hexpm", optional: false]}], "hexpm", "8493d45c656c5427d9c729235b99d498bd133421f3e0a683e5c1b561471291e5"},
|
||||
"curve25519": {:hex, :curve25519, "1.0.5", "f801179424e4012049fcfcfcda74ac04f65d0ffceeb80e7ef1d3352deb09f5bb", [:mix], [], "hexpm", "0fba3ad55bf1154d4d5fc3ae5fb91b912b77b13f0def6ccb3a5d58168ff4192d"},
|
||||
@@ -20,11 +23,18 @@
|
||||
"makeup": {:hex, :makeup, "1.2.1", "e90ac1c65589ef354378def3ba19d401e739ee7ee06fb47f94c687016e3713d1", [:mix], [{:nimble_parsec, "~> 1.4", [hex: :nimble_parsec, repo: "hexpm", optional: false]}], "hexpm", "d36484867b0bae0fea568d10131197a4c2e47056a6fbe84922bf6ba71c8d17ce"},
|
||||
"makeup_elixir": {:hex, :makeup_elixir, "1.0.1", "e928a4f984e795e41e3abd27bfc09f51db16ab8ba1aebdba2b3a575437efafc2", [:mix], [{:makeup, "~> 1.0", [hex: :makeup, repo: "hexpm", optional: false]}, {:nimble_parsec, "~> 1.2.3 or ~> 1.3", [hex: :nimble_parsec, repo: "hexpm", optional: false]}], "hexpm", "7284900d412a3e5cfd97fdaed4f5ed389b8f2b4cb49efc0eb3bd10e2febf9507"},
|
||||
"makeup_erlang": {:hex, :makeup_erlang, "1.0.2", "03e1804074b3aa64d5fad7aa64601ed0fb395337b982d9bcf04029d68d51b6a7", [:mix], [{:makeup, "~> 1.0", [hex: :makeup, repo: "hexpm", optional: false]}], "hexpm", "af33ff7ef368d5893e4a267933e7744e46ce3cf1f61e2dccf53a111ed3aa3727"},
|
||||
"mime": {:hex, :mime, "2.0.7", "b8d739037be7cd402aee1ba0306edfdef982687ee7e9859bee6198c1e7e2f128", [:mix], [], "hexpm", "6171188e399ee16023ffc5b76ce445eb6d9672e2e241d2df6050f3c771e80ccd"},
|
||||
"nimble_parsec": {:hex, :nimble_parsec, "1.4.2", "8efba0122db06df95bfaa78f791344a89352ba04baedd3849593bfce4d0dc1c6", [:mix], [], "hexpm", "4b21398942dda052b403bbe1da991ccd03a053668d147d53fb8c4e0efe09c973"},
|
||||
"nkeys": {:hex, :nkeys, "0.3.0", "837add5261a3cdd8ff75b54e0475062313093929ab5e042fa48e010f33b10d16", [:mix], [{:ed25519, "~> 1.3", [hex: :ed25519, repo: "hexpm", optional: false]}, {:kcl, "~> 1.4", [hex: :kcl, repo: "hexpm", optional: false]}], "hexpm", "b5af773a296620ee8eeb1ec6dc5b68f716386f7e53f7bda8c4ac23515823dfe4"},
|
||||
"phoenix_pubsub": {:hex, :phoenix_pubsub, "2.1.3", "3168d78ba41835aecad272d5e8cd51aa87a7ac9eb836eabc42f6e57538e3731d", [:mix], [], "hexpm", "bba06bc1dcfd8cb086759f0edc94a8ba2bc8896d5331a1e2c2902bf8e36ee502"},
|
||||
"plug": {:hex, :plug, "1.18.1", "5067f26f7745b7e31bc3368bc1a2b818b9779faa959b49c934c17730efc911cf", [:mix], [{:mime, "~> 1.0 or ~> 2.0", [hex: :mime, repo: "hexpm", optional: false]}, {:plug_crypto, "~> 1.1.1 or ~> 1.2 or ~> 2.0", [hex: :plug_crypto, repo: "hexpm", optional: false]}, {:telemetry, "~> 0.4.3 or ~> 1.0", [hex: :telemetry, repo: "hexpm", optional: false]}], "hexpm", "57a57db70df2b422b564437d2d33cf8d33cd16339c1edb190cd11b1a3a546cc2"},
|
||||
"plug_cowboy": {:hex, :plug_cowboy, "2.7.4", "729c752d17cf364e2b8da5bdb34fb5804f56251e88bb602aff48ae0bd8673d11", [:mix], [{:cowboy, "~> 2.7", [hex: :cowboy, repo: "hexpm", optional: false]}, {:cowboy_telemetry, "~> 0.3", [hex: :cowboy_telemetry, repo: "hexpm", optional: false]}, {:plug, "~> 1.14", [hex: :plug, repo: "hexpm", optional: false]}], "hexpm", "9b85632bd7012615bae0a5d70084deb1b25d2bcbb32cab82d1e9a1e023168aa3"},
|
||||
"plug_crypto": {:hex, :plug_crypto, "2.1.1", "19bda8184399cb24afa10be734f84a16ea0a2bc65054e23a62bb10f06bc89491", [:mix], [], "hexpm", "6470bce6ffe41c8bd497612ffde1a7e4af67f36a15eea5f921af71cf3e11247c"},
|
||||
"poly1305": {:hex, :poly1305, "1.0.4", "7cdc8961a0a6e00a764835918cdb8ade868044026df8ef5d718708ea6cc06611", [:mix], [{:chacha20, "~> 1.0", [hex: :chacha20, repo: "hexpm", optional: false]}, {:equivalex, "~> 1.0", [hex: :equivalex, repo: "hexpm", optional: false]}], "hexpm", "e14e684661a5195e149b3139db4a1693579d4659d65bba115a307529c47dbc3b"},
|
||||
"ranch": {:hex, :ranch, "2.2.0", "25528f82bc8d7c6152c57666ca99ec716510fe0925cb188172f41ce93117b1b0", [:make, :rebar3], [], "hexpm", "fa0b99a1780c80218a4197a59ea8d3bdae32fbff7e88527d7d8a4787eff4f8e7"},
|
||||
"salsa20": {:hex, :salsa20, "1.0.4", "404cbea1fa8e68a41bcc834c0a2571ac175580fec01cc38cc70c0fb9ffc87e9b", [:mix], [], "hexpm", "745ddcd8cfa563ddb0fd61e7ce48d5146279a2cf7834e1da8441b369fdc58ac6"},
|
||||
"telemetry": {:hex, :telemetry, "1.3.0", "fedebbae410d715cf8e7062c96a1ef32ec22e764197f70cda73d82778d61e7a2", [:rebar3], [], "hexpm", "7015fc8919dbe63764f4b4b87a95b7c0996bd539e0d499be6ec9d7f3875b79e6"},
|
||||
"uuid": {:hex, :uuid, "1.1.8", "e22fc04499de0de3ed1116b770c7737779f226ceefa0badb3592e64d5cfb4eb9", [:mix], [], "hexpm", "c790593b4c3b601f5dc2378baae7efaf5b3d73c4c6456ba85759905be792f2ac"},
|
||||
"websock": {:hex, :websock, "0.5.3", "2f69a6ebe810328555b6fe5c831a851f485e303a7c8ce6c5f675abeb20ebdadc", [:mix], [], "hexpm", "6105453d7fac22c712ad66fab1d45abdf049868f253cf719b625151460b8b453"},
|
||||
"websock_adapter": {:hex, :websock_adapter, "0.5.8", "3b97dc94e407e2d1fc666b2fb9acf6be81a1798a2602294aac000260a7c4a47d", [:mix], [{:bandit, ">= 0.6.0", [hex: :bandit, repo: "hexpm", optional: true]}, {:plug, "~> 1.14", [hex: :plug, repo: "hexpm", optional: false]}, {:plug_cowboy, "~> 2.6", [hex: :plug_cowboy, repo: "hexpm", optional: true]}, {:websock, "~> 0.5", [hex: :websock, repo: "hexpm", optional: false]}], "hexpm", "315b9a1865552212b5f35140ad194e67ce31af45bcee443d4ecb96b5fd3f3782"},
|
||||
}
|
||||
|
||||
12
nats-server.conf
Normal file
12
nats-server.conf
Normal file
@@ -0,0 +1,12 @@
|
||||
port: 4222
|
||||
|
||||
jetstream {
|
||||
store_dir: /var/lib/nats/jetstream
|
||||
max_memory_store: 1GB
|
||||
max_file_store: 10GB
|
||||
}
|
||||
|
||||
http_port: 8222
|
||||
log_file: "/var/log/nats-server.log"
|
||||
debug: false
|
||||
trace: false
|
||||
@@ -6,20 +6,27 @@
|
||||
|
||||
set -e
|
||||
|
||||
CALLER_PWD="${PWD}"
|
||||
WORKSPACE_DIR="${MCP_WORKSPACE_DIR:-$CALLER_PWD}"
|
||||
|
||||
export PATH="$HOME/.asdf/shims:$PATH"
|
||||
|
||||
# Change to the project directory
|
||||
cd "$(dirname "$0")/.."
|
||||
export MCP_WORKSPACE_DIR="$WORKSPACE_DIR"
|
||||
|
||||
# Set environment
|
||||
export MIX_ENV="${MIX_ENV:-dev}"
|
||||
export NATS_HOST="${NATS_HOST:-localhost}"
|
||||
export NATS_PORT="${NATS_PORT:-4222}"
|
||||
|
||||
# Log startup
|
||||
# Log startup with workspace information
|
||||
echo "Starting AgentCoordinator Unified MCP Server..." >&2
|
||||
echo "Environment: $MIX_ENV" >&2
|
||||
echo "NATS: $NATS_HOST:$NATS_PORT" >&2
|
||||
echo "Caller PWD: $CALLER_PWD" >&2
|
||||
echo "Workspace Directory: $WORKSPACE_DIR" >&2
|
||||
echo "Agent Coordinator Directory: $(pwd)" >&2
|
||||
|
||||
# Start the Elixir application with unified MCP server
|
||||
exec mix run --no-halt -e "
|
||||
@@ -28,76 +35,16 @@ exec mix run --no-halt -e "
|
||||
|
||||
# MCPServerManager is now started by the application supervisor automatically
|
||||
|
||||
case AgentCoordinator.UnifiedMCPServer.start_link() do
|
||||
case AgentCoordinator.MCPServer.start_link() do
|
||||
{:ok, _} -> :ok
|
||||
{:error, {:already_started, _}} -> :ok
|
||||
{:error, reason} -> raise \"Failed to start UnifiedMCPServer: #{inspect(reason)}\"
|
||||
{:error, reason} -> raise \"Failed to start MCPServer: #{inspect(reason)}\"
|
||||
end
|
||||
|
||||
# Log that we're ready
|
||||
IO.puts(:stderr, \"Unified MCP server ready with automatic task tracking\")
|
||||
|
||||
# Handle MCP JSON-RPC messages through the unified server
|
||||
defmodule UnifiedMCPStdio do
|
||||
def start do
|
||||
spawn_link(fn -> message_loop() end)
|
||||
Process.sleep(:infinity)
|
||||
end
|
||||
|
||||
defp message_loop do
|
||||
case IO.read(:stdio, :line) do
|
||||
:eof ->
|
||||
IO.puts(:stderr, \"Unified MCP server shutting down\")
|
||||
System.halt(0)
|
||||
{:error, reason} ->
|
||||
IO.puts(:stderr, \"IO Error: #{inspect(reason)}\")
|
||||
System.halt(1)
|
||||
line ->
|
||||
handle_message(String.trim(line))
|
||||
message_loop()
|
||||
end
|
||||
end
|
||||
|
||||
defp handle_message(\"\"), do: :ok
|
||||
defp handle_message(json_line) do
|
||||
try do
|
||||
request = Jason.decode!(json_line)
|
||||
|
||||
# Route through unified MCP server for automatic task tracking
|
||||
response = AgentCoordinator.UnifiedMCPServer.handle_mcp_request(request)
|
||||
IO.puts(Jason.encode!(response))
|
||||
rescue
|
||||
e in Jason.DecodeError ->
|
||||
error_response = %{
|
||||
\"jsonrpc\" => \"2.0\",
|
||||
\"id\" => nil,
|
||||
\"error\" => %{
|
||||
\"code\" => -32700,
|
||||
\"message\" => \"Parse error: #{Exception.message(e)}\"
|
||||
}
|
||||
}
|
||||
IO.puts(Jason.encode!(error_response))
|
||||
e ->
|
||||
# Try to get the ID from the malformed request
|
||||
id = try do
|
||||
partial = Jason.decode!(json_line)
|
||||
Map.get(partial, \"id\")
|
||||
rescue
|
||||
_ -> nil
|
||||
end
|
||||
|
||||
error_response = %{
|
||||
\"jsonrpc\" => \"2.0\",
|
||||
\"id\" => id,
|
||||
\"error\" => %{
|
||||
\"code\" => -32603,
|
||||
\"message\" => \"Internal error: #{Exception.message(e)}\"
|
||||
}
|
||||
}
|
||||
IO.puts(Jason.encode!(error_response))
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
UnifiedMCPStdio.start()
|
||||
"
|
||||
# STDIO handling is now managed by InterfaceManager, not here
|
||||
# Just keep the process alive
|
||||
Process.sleep(:infinity)
|
||||
"
|
||||
|
||||
239
scripts/mcp_launcher_multi.sh
Executable file
239
scripts/mcp_launcher_multi.sh
Executable file
@@ -0,0 +1,239 @@
|
||||
#!/bin/bash
|
||||
|
||||
# AgentCoordinator Multi-Interface MCP Server Launcher
|
||||
# This script starts the unified MCP server with support for multiple interface modes:
|
||||
# - stdio: Traditional MCP over stdio (default for VSCode)
|
||||
# - http: HTTP REST API for remote clients
|
||||
# - websocket: WebSocket interface for real-time web clients
|
||||
# - remote: Both HTTP and WebSocket
|
||||
# - all: All interface modes
|
||||
|
||||
set -e
|
||||
|
||||
CALLER_PWD="${PWD}"
|
||||
WORKSPACE_DIR="${MCP_WORKSPACE_DIR:-$CALLER_PWD}"
|
||||
|
||||
export PATH="$HOME/.asdf/shims:$PATH"
|
||||
|
||||
# Change to the project directory
|
||||
cd "$(dirname "$0")/.."
|
||||
export MCP_WORKSPACE_DIR="$WORKSPACE_DIR"
|
||||
|
||||
# Parse command line arguments
|
||||
INTERFACE_MODE="${1:-stdio}"
|
||||
HTTP_PORT="${2:-8080}"
|
||||
WS_PORT="${3:-8081}"
|
||||
|
||||
# Set environment variables
|
||||
export MIX_ENV="${MIX_ENV:-dev}"
|
||||
export NATS_HOST="${NATS_HOST:-localhost}"
|
||||
export NATS_PORT="${NATS_PORT:-4222}"
|
||||
export MCP_INTERFACE_MODE="$INTERFACE_MODE"
|
||||
export MCP_HTTP_PORT="$HTTP_PORT"
|
||||
export MCP_WS_PORT="$WS_PORT"
|
||||
|
||||
# Validate interface mode
|
||||
case "$INTERFACE_MODE" in
|
||||
stdio|http|websocket|remote|all)
|
||||
;;
|
||||
*)
|
||||
echo "Invalid interface mode: $INTERFACE_MODE"
|
||||
echo "Valid modes: stdio, http, websocket, remote, all"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
|
||||
# Log startup
|
||||
echo "Starting AgentCoordinator Multi-Interface MCP Server..." >&2
|
||||
echo "Interface Mode: $INTERFACE_MODE" >&2
|
||||
echo "Environment: $MIX_ENV" >&2
|
||||
echo "NATS: $NATS_HOST:$NATS_PORT" >&2
|
||||
|
||||
if [[ "$INTERFACE_MODE" != "stdio" ]]; then
|
||||
echo "HTTP Port: $HTTP_PORT" >&2
|
||||
echo "WebSocket Port: $WS_PORT" >&2
|
||||
fi
|
||||
|
||||
# Install dependencies if needed
|
||||
if [[ ! -d "deps" ]] || [[ ! -d "_build" ]]; then
|
||||
echo "Installing dependencies..." >&2
|
||||
mix deps.get
|
||||
mix compile
|
||||
fi
|
||||
|
||||
# Start the appropriate interface mode
|
||||
case "$INTERFACE_MODE" in
|
||||
stdio)
|
||||
# Traditional stdio mode for VSCode and local clients
|
||||
exec mix run --no-halt -e "
|
||||
# Ensure all applications are started
|
||||
{:ok, _} = Application.ensure_all_started(:agent_coordinator)
|
||||
|
||||
# Configure interface manager for stdio only
|
||||
Application.put_env(:agent_coordinator, :interfaces, %{
|
||||
enabled_interfaces: [:stdio],
|
||||
stdio: %{enabled: true, handle_stdio: true},
|
||||
http: %{enabled: false},
|
||||
websocket: %{enabled: false}
|
||||
})
|
||||
|
||||
# MCPServer and InterfaceManager are started by the application supervisor automatically
|
||||
IO.puts(:stderr, \"STDIO MCP server ready with tool filtering\")
|
||||
|
||||
# Handle MCP JSON-RPC messages through the unified server
|
||||
defmodule StdioMCPHandler do
|
||||
def start do
|
||||
spawn_link(fn -> message_loop() end)
|
||||
Process.sleep(:infinity)
|
||||
end
|
||||
|
||||
defp message_loop do
|
||||
case IO.read(:stdio, :line) do
|
||||
:eof ->
|
||||
IO.puts(:stderr, \"MCP server shutting down\")
|
||||
System.halt(0)
|
||||
{:error, reason} ->
|
||||
IO.puts(:stderr, \"IO Error: #{inspect(reason)}\")
|
||||
System.halt(1)
|
||||
line ->
|
||||
handle_message(String.trim(line))
|
||||
message_loop()
|
||||
end
|
||||
end
|
||||
|
||||
defp handle_message(\"\"), do: :ok
|
||||
defp handle_message(json_line) do
|
||||
try do
|
||||
request = Jason.decode!(json_line)
|
||||
# Route through unified MCP server with local context (full tool access)
|
||||
response = AgentCoordinator.MCPServer.handle_mcp_request(request)
|
||||
IO.puts(Jason.encode!(response))
|
||||
rescue
|
||||
e in Jason.DecodeError ->
|
||||
error_response = %{
|
||||
\"jsonrpc\" => \"2.0\",
|
||||
\"id\" => nil,
|
||||
\"error\" => %{
|
||||
\"code\" => -32700,
|
||||
\"message\" => \"Parse error: #{Exception.message(e)}\"
|
||||
}
|
||||
}
|
||||
IO.puts(Jason.encode!(error_response))
|
||||
e ->
|
||||
id = try do
|
||||
partial = Jason.decode!(json_line)
|
||||
Map.get(partial, \"id\")
|
||||
rescue
|
||||
_ -> nil
|
||||
end
|
||||
|
||||
error_response = %{
|
||||
\"jsonrpc\" => \"2.0\",
|
||||
\"id\" => id,
|
||||
\"error\" => %{
|
||||
\"code\" => -32603,
|
||||
\"message\" => \"Internal error: #{Exception.message(e)}\"
|
||||
}
|
||||
}
|
||||
IO.puts(Jason.encode!(error_response))
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
StdioMCPHandler.start()
|
||||
"
|
||||
;;
|
||||
|
||||
http)
|
||||
# HTTP-only mode for REST API clients
|
||||
exec mix run --no-halt -e "
|
||||
# Ensure all applications are started
|
||||
{:ok, _} = Application.ensure_all_started(:agent_coordinator)
|
||||
|
||||
# Configure interface manager for HTTP only
|
||||
Application.put_env(:agent_coordinator, :interfaces, %{
|
||||
enabled_interfaces: [:http],
|
||||
stdio: %{enabled: false},
|
||||
http: %{enabled: true, port: $HTTP_PORT, host: \"0.0.0.0\"},
|
||||
websocket: %{enabled: false}
|
||||
})
|
||||
|
||||
IO.puts(:stderr, \"HTTP MCP server ready on port $HTTP_PORT with tool filtering\")
|
||||
IO.puts(:stderr, \"Available endpoints:\")
|
||||
IO.puts(:stderr, \" GET /health - Health check\")
|
||||
IO.puts(:stderr, \" GET /mcp/capabilities - Server capabilities\")
|
||||
IO.puts(:stderr, \" GET /mcp/tools - Available tools (filtered)\")
|
||||
IO.puts(:stderr, \" POST /mcp/tools/:tool_name - Execute tool\")
|
||||
IO.puts(:stderr, \" POST /mcp/request - Full MCP request\")
|
||||
IO.puts(:stderr, \" GET /agents - Agent status\")
|
||||
|
||||
Process.sleep(:infinity)
|
||||
"
|
||||
;;
|
||||
|
||||
websocket)
|
||||
# WebSocket-only mode
|
||||
exec mix run --no-halt -e "
|
||||
# Ensure all applications are started
|
||||
{:ok, _} = Application.ensure_all_started(:agent_coordinator)
|
||||
|
||||
# Configure interface manager for WebSocket only
|
||||
Application.put_env(:agent_coordinator, :interfaces, %{
|
||||
enabled_interfaces: [:websocket],
|
||||
stdio: %{enabled: false},
|
||||
http: %{enabled: true, port: $WS_PORT, host: \"0.0.0.0\"},
|
||||
websocket: %{enabled: true, port: $WS_PORT}
|
||||
})
|
||||
|
||||
IO.puts(:stderr, \"WebSocket MCP server ready on port $WS_PORT with tool filtering\")
|
||||
IO.puts(:stderr, \"WebSocket endpoint: ws://localhost:$WS_PORT/mcp/ws\")
|
||||
|
||||
Process.sleep(:infinity)
|
||||
"
|
||||
;;
|
||||
|
||||
remote)
|
||||
# Both HTTP and WebSocket for remote clients
|
||||
exec mix run --no-halt -e "
|
||||
# Ensure all applications are started
|
||||
{:ok, _} = Application.ensure_all_started(:agent_coordinator)
|
||||
|
||||
# Configure interface manager for remote access
|
||||
Application.put_env(:agent_coordinator, :interfaces, %{
|
||||
enabled_interfaces: [:http, :websocket],
|
||||
stdio: %{enabled: false},
|
||||
http: %{enabled: true, port: $HTTP_PORT, host: \"0.0.0.0\"},
|
||||
websocket: %{enabled: true, port: $HTTP_PORT}
|
||||
})
|
||||
|
||||
IO.puts(:stderr, \"Remote MCP server ready on port $HTTP_PORT with tool filtering\")
|
||||
IO.puts(:stderr, \"HTTP endpoints available at http://localhost:$HTTP_PORT/\")
|
||||
IO.puts(:stderr, \"WebSocket endpoint: ws://localhost:$HTTP_PORT/mcp/ws\")
|
||||
|
||||
Process.sleep(:infinity)
|
||||
"
|
||||
;;
|
||||
|
||||
all)
|
||||
# All interface modes
|
||||
exec mix run --no-halt -e "
|
||||
# Ensure all applications are started
|
||||
{:ok, _} = Application.ensure_all_started(:agent_coordinator)
|
||||
|
||||
# Configure interface manager for all interfaces
|
||||
Application.put_env(:agent_coordinator, :interfaces, %{
|
||||
enabled_interfaces: [:stdio, :http, :websocket],
|
||||
stdio: %{enabled: true, handle_stdio: false}, # Don't handle stdio in all mode
|
||||
http: %{enabled: true, port: $HTTP_PORT, host: \"0.0.0.0\"},
|
||||
websocket: %{enabled: true, port: $HTTP_PORT}
|
||||
})
|
||||
|
||||
IO.puts(:stderr, \"Multi-interface MCP server ready with tool filtering\")
|
||||
IO.puts(:stderr, \"STDIO: Available for local MCP clients\")
|
||||
IO.puts(:stderr, \"HTTP: Available at http://localhost:$HTTP_PORT/\")
|
||||
IO.puts(:stderr, \"WebSocket: Available at ws://localhost:$HTTP_PORT/mcp/ws\")
|
||||
|
||||
Process.sleep(:infinity)
|
||||
"
|
||||
;;
|
||||
esac
|
||||
@@ -1,73 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Ultra-minimal test that doesn't start the full application
|
||||
|
||||
echo "🔬 Ultra-Minimal AgentCoordinator Test"
|
||||
echo "======================================"
|
||||
|
||||
cd "$(dirname "$0")"
|
||||
|
||||
echo "📋 Testing compilation..."
|
||||
if mix compile >/dev/null 2>&1; then
|
||||
echo "✅ Compilation successful"
|
||||
else
|
||||
echo "❌ Compilation failed"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "📋 Testing MCP server without application startup..."
|
||||
if timeout 10 mix run --no-start -e "
|
||||
# Load compiled modules without starting application
|
||||
Code.ensure_loaded(AgentCoordinator.MCPServer)
|
||||
|
||||
# Test MCP server directly
|
||||
try do
|
||||
# Start just the required processes manually
|
||||
{:ok, _} = Registry.start_link(keys: :unique, name: AgentCoordinator.InboxRegistry)
|
||||
{:ok, _} = Phoenix.PubSub.start_link(name: AgentCoordinator.PubSub)
|
||||
|
||||
# Start TaskRegistry without NATS
|
||||
{:ok, _} = GenServer.start_link(AgentCoordinator.TaskRegistry, [nats: nil], name: AgentCoordinator.TaskRegistry)
|
||||
|
||||
# Start MCP server
|
||||
{:ok, _} = GenServer.start_link(AgentCoordinator.MCPServer, %{}, name: AgentCoordinator.MCPServer)
|
||||
|
||||
IO.puts('✅ Core components started')
|
||||
|
||||
# Test MCP functionality
|
||||
response = AgentCoordinator.MCPServer.handle_mcp_request(%{
|
||||
\"jsonrpc\" => \"2.0\",
|
||||
\"id\" => 1,
|
||||
\"method\" => \"tools/list\"
|
||||
})
|
||||
|
||||
case response do
|
||||
%{\"result\" => %{\"tools\" => tools}} when is_list(tools) ->
|
||||
IO.puts(\"✅ MCP server working (#{length(tools)} tools)\")
|
||||
_ ->
|
||||
IO.puts(\"❌ MCP server not working: #{inspect(response)}\")
|
||||
end
|
||||
|
||||
rescue
|
||||
e ->
|
||||
IO.puts(\"❌ Error: #{inspect(e)}\")
|
||||
end
|
||||
|
||||
System.halt(0)
|
||||
"; then
|
||||
echo "✅ Minimal test passed!"
|
||||
else
|
||||
echo "❌ Minimal test failed"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo ""
|
||||
echo "🎉 Core MCP functionality works!"
|
||||
echo ""
|
||||
echo "📝 The hanging issue was due to NATS persistence trying to connect."
|
||||
echo " Your MCP server core functionality is working perfectly."
|
||||
echo ""
|
||||
echo "🚀 To run with proper NATS setup:"
|
||||
echo " 1. Make sure NATS server is running: sudo systemctl start nats"
|
||||
echo " 2. Or run: nats-server -js -p 4222 -m 8222 &"
|
||||
echo " 3. Then use: ../scripts/mcp_launcher.sh"
|
||||
@@ -1,54 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Quick test script to verify Agentecho "💡 Next steps:"
|
||||
echo " 1. Run scripts/setup.sh to configure VS Code integration"
|
||||
echo " 2. Or test manually with: scripts/mcp_launcher.sh"rdinator works without getting stuck
|
||||
|
||||
echo "🧪 Quick AgentCoordinator Test"
|
||||
echo "=============================="
|
||||
|
||||
cd "$(dirname "$0")"
|
||||
|
||||
echo "📋 Testing basic compilation..."
|
||||
if mix compile --force >/dev/null 2>&1; then
|
||||
echo "✅ Compilation successful"
|
||||
else
|
||||
echo "❌ Compilation failed"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "📋 Testing application startup (without persistence)..."
|
||||
if timeout 10 mix run -e "
|
||||
Application.put_env(:agent_coordinator, :enable_persistence, false)
|
||||
{:ok, _apps} = Application.ensure_all_started(:agent_coordinator)
|
||||
IO.puts('✅ Application started successfully')
|
||||
|
||||
# Quick MCP server test
|
||||
response = AgentCoordinator.MCPServer.handle_mcp_request(%{
|
||||
\"jsonrpc\" => \"2.0\",
|
||||
\"id\" => 1,
|
||||
\"method\" => \"tools/list\"
|
||||
})
|
||||
|
||||
case response do
|
||||
%{\"result\" => %{\"tools\" => tools}} when is_list(tools) ->
|
||||
IO.puts(\"✅ MCP server working (#{length(tools)} tools available)\")
|
||||
_ ->
|
||||
IO.puts(\"❌ MCP server not responding correctly\")
|
||||
end
|
||||
|
||||
System.halt(0)
|
||||
"; then
|
||||
echo "✅ Quick test passed!"
|
||||
else
|
||||
echo "❌ Quick test failed"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo ""
|
||||
echo "🎉 AgentCoordinator is ready!"
|
||||
echo ""
|
||||
echo "🚀 Next steps:"
|
||||
echo " 1. Run ./setup.sh to configure VS Code integration"
|
||||
echo " 2. Or test manually with: ./mcp_launcher.sh"
|
||||
echo " 3. Or run Python example: python3 mcp_client_example.py"
|
||||
@@ -145,7 +145,7 @@ if [ -f "$SETTINGS_FILE" ]; then
|
||||
echo "$MCP_CONFIG" | jq -s '.[0] * .[1]' "$SETTINGS_FILE" - > "$SETTINGS_FILE.tmp"
|
||||
mv "$SETTINGS_FILE.tmp" "$SETTINGS_FILE"
|
||||
else
|
||||
echo "⚠️ jq not found. Please manually add MCP configuration to $SETTINGS_FILE"
|
||||
echo "jq not found. Please manually add MCP configuration to $SETTINGS_FILE"
|
||||
echo "Add this configuration:"
|
||||
echo "$MCP_CONFIG"
|
||||
fi
|
||||
@@ -153,25 +153,25 @@ else
|
||||
echo "$MCP_CONFIG" > "$SETTINGS_FILE"
|
||||
fi
|
||||
|
||||
echo "✅ VS Code settings updated"
|
||||
echo "VS Code settings updated"
|
||||
|
||||
# Test MCP server
|
||||
echo -e "\n🧪 Testing MCP server..."
|
||||
echo -e "\nTesting MCP server..."
|
||||
cd "$PROJECT_DIR"
|
||||
if timeout 5 ./scripts/mcp_launcher.sh >/dev/null 2>&1; then
|
||||
echo "✅ MCP server test passed"
|
||||
echo "MCP server test passed"
|
||||
else
|
||||
echo "⚠️ MCP server test timed out (this is expected)"
|
||||
echo "MCP server test timed out (this is expected)"
|
||||
fi
|
||||
|
||||
# Create desktop shortcut for easy access
|
||||
echo -e "\n🖥️ Creating desktop shortcuts..."
|
||||
echo -e "\nCreating desktop shortcuts..."
|
||||
|
||||
# Start script
|
||||
cat > "$PROJECT_DIR/start_agent_coordinator.sh" << 'EOF'
|
||||
#!/bin/bash
|
||||
cd "$(dirname "$0")"
|
||||
echo "🚀 Starting AgentCoordinator..."
|
||||
echo "Starting AgentCoordinator..."
|
||||
|
||||
# Start NATS if not running
|
||||
if ! pgrep -f nats-server > /dev/null; then
|
||||
@@ -191,7 +191,7 @@ chmod +x "$PROJECT_DIR/start_agent_coordinator.sh"
|
||||
# Stop script
|
||||
cat > "$PROJECT_DIR/stop_agent_coordinator.sh" << 'EOF'
|
||||
#!/bin/bash
|
||||
echo "🛑 Stopping AgentCoordinator..."
|
||||
echo "Stopping AgentCoordinator..."
|
||||
|
||||
# Stop NATS if we started it
|
||||
if [ -f /tmp/nats.pid ]; then
|
||||
@@ -203,24 +203,24 @@ fi
|
||||
pkill -f "scripts/mcp_launcher.sh" || true
|
||||
pkill -f "agent_coordinator" || true
|
||||
|
||||
echo "✅ AgentCoordinator stopped"
|
||||
echo "AgentCoordinator stopped"
|
||||
EOF
|
||||
|
||||
chmod +x "$PROJECT_DIR/stop_agent_coordinator.sh"
|
||||
|
||||
echo "✅ Created start/stop scripts"
|
||||
echo "Created start/stop scripts"
|
||||
|
||||
# Final instructions
|
||||
echo -e "\n🎉 Setup Complete!"
|
||||
echo -e "\nSetup Complete!"
|
||||
echo "==================="
|
||||
echo ""
|
||||
echo "📋 Next Steps:"
|
||||
echo "Next Steps:"
|
||||
echo ""
|
||||
echo "1. 🔄 Restart VS Code to load the new MCP configuration"
|
||||
echo "1. Restart VS Code to load the new MCP configuration"
|
||||
echo " - Close all VS Code windows"
|
||||
echo " - Reopen VS Code in your project"
|
||||
echo ""
|
||||
echo "2. 🤖 GitHub Copilot should now have access to AgentCoordinator tools:"
|
||||
echo "2. GitHub Copilot should now have access to AgentCoordinator tools:"
|
||||
echo " - register_agent"
|
||||
echo " - create_task"
|
||||
echo " - get_next_task"
|
||||
@@ -233,14 +233,13 @@ echo " - Ask Copilot: 'Register me as an agent with coding capabilities'"
|
||||
echo " - Ask Copilot: 'Create a task to refactor the login module'"
|
||||
echo " - Ask Copilot: 'Show me the task board'"
|
||||
echo ""
|
||||
echo "📂 Useful files:"
|
||||
echo " Useful files:"
|
||||
echo " - Start server: $PROJECT_DIR/start_agent_coordinator.sh"
|
||||
echo " - Stop server: $PROJECT_DIR/stop_agent_coordinator.sh"
|
||||
echo " - Test client: $PROJECT_DIR/mcp_client_example.py"
|
||||
echo " - VS Code settings: $SETTINGS_FILE"
|
||||
echo ""
|
||||
echo "🔧 Manual start (if needed):"
|
||||
echo " cd $PROJECT_DIR && ./scripts/mcp_launcher.sh"
|
||||
echo ""
|
||||
echo "💡 Tip: The MCP server will auto-start when Copilot needs it!"
|
||||
echo ""
|
||||
echo ""
|
||||
|
||||
282
scripts/test_multi_interface.py
Executable file
282
scripts/test_multi_interface.py
Executable file
@@ -0,0 +1,282 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Test script for Agent Coordinator Multi-Interface MCP Server.
|
||||
|
||||
This script tests:
|
||||
1. HTTP interface with tool filtering
|
||||
2. WebSocket interface with real-time communication
|
||||
3. Tool filtering based on client context
|
||||
4. Agent registration and coordination
|
||||
"""
|
||||
|
||||
import json
|
||||
import requests
|
||||
import websocket
|
||||
import asyncio
|
||||
import time
|
||||
from concurrent.futures import ThreadPoolExecutor
|
||||
|
||||
BASE_URL = "http://localhost:8080"
|
||||
WS_URL = "ws://localhost:8080/mcp/ws"
|
||||
|
||||
def test_http_interface():
|
||||
"""Test HTTP interface and tool filtering."""
|
||||
print("\n=== Testing HTTP Interface ===")
|
||||
|
||||
# Test health endpoint
|
||||
try:
|
||||
response = requests.get(f"{BASE_URL}/health")
|
||||
print(f"Health check: {response.status_code}")
|
||||
if response.status_code == 200:
|
||||
print(f"Health data: {response.json()}")
|
||||
except Exception as e:
|
||||
print(f"Health check failed: {e}")
|
||||
return False
|
||||
|
||||
# Test capabilities endpoint
|
||||
try:
|
||||
response = requests.get(f"{BASE_URL}/mcp/capabilities")
|
||||
print(f"Capabilities: {response.status_code}")
|
||||
if response.status_code == 200:
|
||||
caps = response.json()
|
||||
print(f"Tools available: {len(caps.get('tools', []))}")
|
||||
print(f"Connection type: {caps.get('context', {}).get('connection_type')}")
|
||||
print(f"Security level: {caps.get('context', {}).get('security_level')}")
|
||||
|
||||
# Check that local-only tools are filtered out
|
||||
tool_names = [tool.get('name') for tool in caps.get('tools', [])]
|
||||
local_tools = ['read_file', 'vscode_create_file', 'run_in_terminal']
|
||||
filtered_out = [tool for tool in local_tools if tool not in tool_names]
|
||||
print(f"Local tools filtered out: {filtered_out}")
|
||||
except Exception as e:
|
||||
print(f"Capabilities test failed: {e}")
|
||||
return False
|
||||
|
||||
# Test tool list endpoint
|
||||
try:
|
||||
response = requests.get(f"{BASE_URL}/mcp/tools")
|
||||
print(f"Tools list: {response.status_code}")
|
||||
if response.status_code == 200:
|
||||
tools = response.json()
|
||||
print(f"Filter stats: {tools.get('_meta', {}).get('filter_stats')}")
|
||||
except Exception as e:
|
||||
print(f"Tools list test failed: {e}")
|
||||
return False
|
||||
|
||||
# Test agent registration
|
||||
try:
|
||||
register_data = {
|
||||
"arguments": {
|
||||
"name": "Test Agent HTTP",
|
||||
"capabilities": ["testing", "analysis"]
|
||||
}
|
||||
}
|
||||
response = requests.post(f"{BASE_URL}/mcp/tools/register_agent",
|
||||
json=register_data,
|
||||
headers={"Content-Type": "application/json"})
|
||||
print(f"Agent registration: {response.status_code}")
|
||||
if response.status_code == 200:
|
||||
result = response.json()
|
||||
print(f"Registration result: {result.get('result')}")
|
||||
return result.get('result', {}).get('agent_id')
|
||||
except Exception as e:
|
||||
print(f"Agent registration failed: {e}")
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
def test_websocket_interface():
|
||||
"""Test WebSocket interface with real-time communication."""
|
||||
print("\n=== Testing WebSocket Interface ===")
|
||||
|
||||
messages_received = []
|
||||
|
||||
def on_message(ws, message):
|
||||
print(f"Received: {message}")
|
||||
messages_received.append(json.loads(message))
|
||||
|
||||
def on_error(ws, error):
|
||||
print(f"WebSocket error: {error}")
|
||||
|
||||
def on_close(ws, close_status_code, close_msg):
|
||||
print("WebSocket connection closed")
|
||||
|
||||
def on_open(ws):
|
||||
print("WebSocket connection opened")
|
||||
|
||||
# Send initialize message
|
||||
init_msg = {
|
||||
"jsonrpc": "2.0",
|
||||
"id": 1,
|
||||
"method": "initialize",
|
||||
"params": {
|
||||
"protocolVersion": "2024-11-05",
|
||||
"clientInfo": {
|
||||
"name": "test-websocket-client",
|
||||
"version": "1.0.0"
|
||||
},
|
||||
"capabilities": ["coordination"]
|
||||
}
|
||||
}
|
||||
ws.send(json.dumps(init_msg))
|
||||
|
||||
# Wait a bit then request tools list
|
||||
time.sleep(0.5)
|
||||
tools_msg = {
|
||||
"jsonrpc": "2.0",
|
||||
"id": 2,
|
||||
"method": "tools/list"
|
||||
}
|
||||
ws.send(json.dumps(tools_msg))
|
||||
|
||||
# Register an agent
|
||||
time.sleep(0.5)
|
||||
register_msg = {
|
||||
"jsonrpc": "2.0",
|
||||
"id": 3,
|
||||
"method": "tools/call",
|
||||
"params": {
|
||||
"name": "register_agent",
|
||||
"arguments": {
|
||||
"name": "Test Agent WebSocket",
|
||||
"capabilities": ["testing", "websocket"]
|
||||
}
|
||||
}
|
||||
}
|
||||
ws.send(json.dumps(register_msg))
|
||||
|
||||
# Close after a delay
|
||||
time.sleep(2)
|
||||
ws.close()
|
||||
|
||||
try:
|
||||
ws = websocket.WebSocketApp(WS_URL,
|
||||
on_open=on_open,
|
||||
on_message=on_message,
|
||||
on_error=on_error,
|
||||
on_close=on_close)
|
||||
ws.run_forever()
|
||||
|
||||
print(f"Messages received: {len(messages_received)}")
|
||||
for i, msg in enumerate(messages_received):
|
||||
print(f"Message {i+1}: {msg.get('result', {}).get('_meta', 'No meta')}")
|
||||
|
||||
return len(messages_received) > 0
|
||||
except Exception as e:
|
||||
print(f"WebSocket test failed: {e}")
|
||||
return False
|
||||
|
||||
def test_tool_filtering():
|
||||
"""Test tool filtering functionality specifically."""
|
||||
print("\n=== Testing Tool Filtering ===")
|
||||
|
||||
try:
|
||||
# Get tools from HTTP (remote context)
|
||||
response = requests.get(f"{BASE_URL}/mcp/tools")
|
||||
if response.status_code != 200:
|
||||
print("Failed to get tools from HTTP")
|
||||
return False
|
||||
|
||||
remote_tools = response.json()
|
||||
tool_names = [tool.get('name') for tool in remote_tools.get('tools', [])]
|
||||
|
||||
# Check that coordination tools are present
|
||||
coordination_tools = ['register_agent', 'create_task', 'get_task_board', 'heartbeat']
|
||||
present_coordination = [tool for tool in coordination_tools if tool in tool_names]
|
||||
print(f"Coordination tools present: {present_coordination}")
|
||||
|
||||
# Check that local-only tools are filtered out
|
||||
local_only_tools = ['read_file', 'write_file', 'vscode_create_file', 'run_in_terminal']
|
||||
filtered_local = [tool for tool in local_only_tools if tool not in tool_names]
|
||||
print(f"Local-only tools filtered: {filtered_local}")
|
||||
|
||||
# Check that safe remote tools are present
|
||||
safe_remote_tools = ['create_entities', 'sequentialthinking', 'get-library-docs']
|
||||
present_safe = [tool for tool in safe_remote_tools if tool in tool_names]
|
||||
print(f"Safe remote tools present: {present_safe}")
|
||||
|
||||
# Verify filter statistics
|
||||
filter_stats = remote_tools.get('_meta', {}).get('filter_stats', {})
|
||||
print(f"Filter stats: {filter_stats}")
|
||||
|
||||
success = (
|
||||
len(present_coordination) >= 3 and # Most coordination tools present
|
||||
len(filtered_local) >= 2 and # Local tools filtered
|
||||
filter_stats.get('connection_type') == 'remote'
|
||||
)
|
||||
|
||||
return success
|
||||
except Exception as e:
|
||||
print(f"Tool filtering test failed: {e}")
|
||||
return False
|
||||
|
||||
def test_forbidden_tool_access():
|
||||
"""Test that local-only tools are properly blocked for remote clients."""
|
||||
print("\n=== Testing Forbidden Tool Access ===")
|
||||
|
||||
try:
|
||||
# Try to call a local-only tool
|
||||
forbidden_data = {
|
||||
"arguments": {
|
||||
"path": "/etc/passwd",
|
||||
"agent_id": "test_agent"
|
||||
}
|
||||
}
|
||||
response = requests.post(f"{BASE_URL}/mcp/tools/read_file",
|
||||
json=forbidden_data,
|
||||
headers={"Content-Type": "application/json"})
|
||||
|
||||
print(f"Forbidden tool call status: {response.status_code}")
|
||||
if response.status_code == 403:
|
||||
error_data = response.json()
|
||||
print(f"Expected 403 error: {error_data.get('error', {}).get('message')}")
|
||||
return True
|
||||
else:
|
||||
print(f"Unexpected response: {response.json()}")
|
||||
return False
|
||||
except Exception as e:
|
||||
print(f"Forbidden tool test failed: {e}")
|
||||
return False
|
||||
|
||||
def main():
|
||||
"""Run all tests."""
|
||||
print("Agent Coordinator Multi-Interface Test Suite")
|
||||
print("=" * 50)
|
||||
|
||||
# Test results
|
||||
results = {}
|
||||
|
||||
# HTTP Interface Test
|
||||
results['http'] = test_http_interface()
|
||||
|
||||
# WebSocket Interface Test
|
||||
results['websocket'] = test_websocket_interface()
|
||||
|
||||
# Tool Filtering Test
|
||||
results['tool_filtering'] = test_tool_filtering()
|
||||
|
||||
# Forbidden Access Test
|
||||
results['forbidden'] = test_forbidden_tool_access()
|
||||
|
||||
# Summary
|
||||
print("\n" + "=" * 50)
|
||||
print("TEST RESULTS SUMMARY")
|
||||
print("=" * 50)
|
||||
|
||||
for test_name, success in results.items():
|
||||
status = "✅ PASS" if success else "❌ FAIL"
|
||||
print(f"{test_name.ljust(20)}: {status}")
|
||||
|
||||
total_tests = len(results)
|
||||
passed_tests = sum(results.values())
|
||||
print(f"\nOverall: {passed_tests}/{total_tests} tests passed")
|
||||
|
||||
if passed_tests == total_tests:
|
||||
print("🎉 All tests passed! Multi-interface MCP server is working correctly.")
|
||||
return 0
|
||||
else:
|
||||
print("⚠️ Some tests failed. Check the implementation.")
|
||||
return 1
|
||||
|
||||
if __name__ == "__main__":
|
||||
exit(main())
|
||||
@@ -5,7 +5,10 @@ defmodule AgentCoordinator.AutoHeartbeatTest do
|
||||
setup do
|
||||
# Start necessary services for testing
|
||||
{:ok, _} = Registry.start_link(keys: :unique, name: AgentCoordinator.InboxRegistry)
|
||||
{:ok, _} = DynamicSupervisor.start_link(name: AgentCoordinator.InboxSupervisor, strategy: :one_for_one)
|
||||
|
||||
{:ok, _} =
|
||||
DynamicSupervisor.start_link(name: AgentCoordinator.InboxSupervisor, strategy: :one_for_one)
|
||||
|
||||
{:ok, _} = TaskRegistry.start_link()
|
||||
{:ok, _} = AgentCoordinator.MCPServer.start_link()
|
||||
{:ok, _} = AgentCoordinator.AutoHeartbeat.start_link()
|
||||
@@ -17,7 +20,11 @@ defmodule AgentCoordinator.AutoHeartbeatTest do
|
||||
describe "automatic heartbeat functionality" do
|
||||
test "agent automatically sends heartbeats during operations" do
|
||||
# Start a client with auto-heartbeat
|
||||
{:ok, client} = Client.start_session("TestAgent", [:coding], auto_heartbeat: true, heartbeat_interval: 1000)
|
||||
{:ok, client} =
|
||||
Client.start_session("TestAgent", [:coding],
|
||||
auto_heartbeat: true,
|
||||
heartbeat_interval: 1000
|
||||
)
|
||||
|
||||
# Get initial session info
|
||||
{:ok, initial_info} = Client.get_session_info(client)
|
||||
@@ -36,7 +43,11 @@ defmodule AgentCoordinator.AutoHeartbeatTest do
|
||||
|
||||
test "agent stays online with regular heartbeats" do
|
||||
# Start client
|
||||
{:ok, client} = Client.start_session("OnlineAgent", [:analysis], auto_heartbeat: true, heartbeat_interval: 500)
|
||||
{:ok, client} =
|
||||
Client.start_session("OnlineAgent", [:analysis],
|
||||
auto_heartbeat: true,
|
||||
heartbeat_interval: 500
|
||||
)
|
||||
|
||||
# Get agent info
|
||||
{:ok, session_info} = Client.get_session_info(client)
|
||||
@@ -70,17 +81,20 @@ defmodule AgentCoordinator.AutoHeartbeatTest do
|
||||
assert length(online_agents) >= 3
|
||||
|
||||
# Create tasks from different agents simultaneously
|
||||
task1 = Task.async(fn ->
|
||||
Client.create_task(agent1, "Task1", "Description1", %{"priority" => "normal"})
|
||||
end)
|
||||
task1 =
|
||||
Task.async(fn ->
|
||||
Client.create_task(agent1, "Task1", "Description1", %{"priority" => "normal"})
|
||||
end)
|
||||
|
||||
task2 = Task.async(fn ->
|
||||
Client.create_task(agent2, "Task2", "Description2", %{"priority" => "high"})
|
||||
end)
|
||||
task2 =
|
||||
Task.async(fn ->
|
||||
Client.create_task(agent2, "Task2", "Description2", %{"priority" => "high"})
|
||||
end)
|
||||
|
||||
task3 = Task.async(fn ->
|
||||
Client.create_task(agent3, "Task3", "Description3", %{"priority" => "low"})
|
||||
end)
|
||||
task3 =
|
||||
Task.async(fn ->
|
||||
Client.create_task(agent3, "Task3", "Description3", %{"priority" => "low"})
|
||||
end)
|
||||
|
||||
# All tasks should complete successfully
|
||||
{:ok, result1} = Task.await(task1)
|
||||
@@ -145,6 +159,7 @@ defmodule AgentCoordinator.AutoHeartbeatTest do
|
||||
nil ->
|
||||
# Agent was cleaned up - this is acceptable
|
||||
:ok
|
||||
|
||||
agent ->
|
||||
# Agent should be offline
|
||||
refute agent["online"]
|
||||
@@ -1,8 +0,0 @@
|
||||
defmodule AgentCoordinatorTest do
|
||||
use ExUnit.Case
|
||||
doctest AgentCoordinator
|
||||
|
||||
test "greets the world" do
|
||||
assert AgentCoordinator.hello() == :world
|
||||
end
|
||||
end
|
||||
16
test/test_agent_coordinator.exs
Normal file
16
test/test_agent_coordinator.exs
Normal file
@@ -0,0 +1,16 @@
|
||||
defmodule AgentCoordinatorTest do
|
||||
use ExUnit.Case
|
||||
doctest AgentCoordinator
|
||||
|
||||
test "returns version" do
|
||||
assert is_binary(AgentCoordinator.version())
|
||||
assert AgentCoordinator.version() == "0.1.0"
|
||||
end
|
||||
|
||||
test "returns status structure" do
|
||||
status = AgentCoordinator.status()
|
||||
assert is_map(status)
|
||||
assert Map.has_key?(status, :agents)
|
||||
assert Map.has_key?(status, :uptime)
|
||||
end
|
||||
end
|
||||
@@ -16,11 +16,12 @@ defmodule AgentCoordinator.MetadataTest do
|
||||
agent_name = "MetadataTestAgent_#{:rand.uniform(1000)}"
|
||||
|
||||
# Register agent with metadata
|
||||
result = AgentCoordinator.TaskRegistry.register_agent(
|
||||
agent_name,
|
||||
["coding", "testing", "vscode_integration"],
|
||||
[metadata: metadata]
|
||||
)
|
||||
result =
|
||||
AgentCoordinator.TaskRegistry.register_agent(
|
||||
agent_name,
|
||||
["coding", "testing", "vscode_integration"],
|
||||
metadata: metadata
|
||||
)
|
||||
|
||||
assert :ok = result
|
||||
|
||||
@@ -44,10 +45,11 @@ defmodule AgentCoordinator.MetadataTest do
|
||||
agent_name = "LegacyTestAgent_#{:rand.uniform(1000)}"
|
||||
|
||||
# Register agent without metadata (old way)
|
||||
result = AgentCoordinator.TaskRegistry.register_agent(
|
||||
agent_name,
|
||||
["coding", "testing"]
|
||||
)
|
||||
result =
|
||||
AgentCoordinator.TaskRegistry.register_agent(
|
||||
agent_name,
|
||||
["coding", "testing"]
|
||||
)
|
||||
|
||||
assert :ok = result
|
||||
|
||||
@@ -67,11 +69,12 @@ defmodule AgentCoordinator.MetadataTest do
|
||||
boolean: true
|
||||
}
|
||||
|
||||
agent = AgentCoordinator.Agent.new(
|
||||
"TestAgent",
|
||||
["capability1"],
|
||||
[metadata: metadata]
|
||||
)
|
||||
agent =
|
||||
AgentCoordinator.Agent.new(
|
||||
"TestAgent",
|
||||
["capability1"],
|
||||
metadata: metadata
|
||||
)
|
||||
|
||||
assert agent.metadata[:test_key] == "test_value"
|
||||
assert agent.metadata[:number] == 42
|
||||
@@ -82,4 +85,4 @@ defmodule AgentCoordinator.MetadataTest do
|
||||
assert agent_no_metadata.metadata == %{}
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
234
test/test_agent_specific_tasks.exs
Normal file
234
test/test_agent_specific_tasks.exs
Normal file
@@ -0,0 +1,234 @@
|
||||
#!/usr/bin/env elixir
|
||||
|
||||
# Comprehensive test for agent-specific task pools
|
||||
# This verifies that the chaos problem is fixed and agents can manage their own task sets
|
||||
|
||||
Application.ensure_all_started(:agent_coordinator)
|
||||
|
||||
alias AgentCoordinator.{MCPServer, TaskRegistry, Agent, Inbox}
|
||||
|
||||
IO.puts("🧪 Testing Agent-Specific Task Pools Fix")
|
||||
IO.puts("=" |> String.duplicate(60))
|
||||
|
||||
# Ensure clean state
|
||||
try do
|
||||
TaskRegistry.start_link()
|
||||
rescue
|
||||
# Already started
|
||||
_ -> :ok
|
||||
end
|
||||
|
||||
try do
|
||||
MCPServer.start_link()
|
||||
rescue
|
||||
# Already started
|
||||
_ -> :ok
|
||||
end
|
||||
|
||||
# Give services time to start
|
||||
Process.sleep(1000)
|
||||
|
||||
# Test 1: Register two agents
|
||||
IO.puts("\n1️⃣ Registering two test agents...")
|
||||
|
||||
agent1_req = %{
|
||||
"method" => "tools/call",
|
||||
"params" => %{
|
||||
"name" => "register_agent",
|
||||
"arguments" => %{
|
||||
"name" => "GitHub Copilot Alpha Wolf",
|
||||
"capabilities" => ["coding", "testing"]
|
||||
}
|
||||
},
|
||||
"jsonrpc" => "2.0",
|
||||
"id" => 1
|
||||
}
|
||||
|
||||
agent2_req = %{
|
||||
"method" => "tools/call",
|
||||
"params" => %{
|
||||
"name" => "register_agent",
|
||||
"arguments" => %{
|
||||
"name" => "GitHub Copilot Beta Tiger",
|
||||
"capabilities" => ["documentation", "analysis"]
|
||||
}
|
||||
},
|
||||
"jsonrpc" => "2.0",
|
||||
"id" => 2
|
||||
}
|
||||
|
||||
resp1 = MCPServer.handle_mcp_request(agent1_req)
|
||||
resp2 = MCPServer.handle_mcp_request(agent2_req)
|
||||
|
||||
# Extract agent IDs
|
||||
agent1_id =
|
||||
case resp1 do
|
||||
%{"result" => %{"content" => [%{"text" => text}]}} ->
|
||||
data = Jason.decode!(text)
|
||||
data["agent_id"]
|
||||
|
||||
_ ->
|
||||
IO.puts("❌ Failed to register agent 1: #{inspect(resp1)}")
|
||||
System.halt(1)
|
||||
end
|
||||
|
||||
agent2_id =
|
||||
case resp2 do
|
||||
%{"result" => %{"content" => [%{"text" => text}]}} ->
|
||||
data = Jason.decode!(text)
|
||||
data["agent_id"]
|
||||
|
||||
_ ->
|
||||
IO.puts("❌ Failed to register agent 2: #{inspect(resp2)}")
|
||||
System.halt(1)
|
||||
end
|
||||
|
||||
IO.puts("✅ Agent 1 (Alpha Wolf): #{agent1_id}")
|
||||
IO.puts("✅ Agent 2 (Beta Tiger): #{agent2_id}")
|
||||
|
||||
# Test 2: Create task sets for each agent (THIS IS THE KEY TEST!)
|
||||
IO.puts("\n2️⃣ Creating agent-specific task sets...")
|
||||
|
||||
# Agent 1 task set
|
||||
agent1_task_set = %{
|
||||
"method" => "tools/call",
|
||||
"params" => %{
|
||||
"name" => "register_task_set",
|
||||
"arguments" => %{
|
||||
"agent_id" => agent1_id,
|
||||
"task_set" => [
|
||||
%{
|
||||
"title" => "Fix authentication bug",
|
||||
"description" => "Debug and fix the login authentication issue",
|
||||
"priority" => "high",
|
||||
"estimated_time" => "2 hours",
|
||||
"file_paths" => ["lib/auth.ex", "test/auth_test.exs"]
|
||||
},
|
||||
%{
|
||||
"title" => "Add unit tests for auth module",
|
||||
"description" => "Write comprehensive tests for authentication",
|
||||
"priority" => "normal",
|
||||
"estimated_time" => "1 hour"
|
||||
},
|
||||
%{
|
||||
"title" => "Refactor auth middleware",
|
||||
"description" => "Clean up and optimize auth middleware code",
|
||||
"priority" => "low",
|
||||
"estimated_time" => "30 minutes"
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
"jsonrpc" => "2.0",
|
||||
"id" => 3
|
||||
}
|
||||
|
||||
# Agent 2 task set (completely different)
|
||||
agent2_task_set = %{
|
||||
"method" => "tools/call",
|
||||
"params" => %{
|
||||
"name" => "register_task_set",
|
||||
"arguments" => %{
|
||||
"agent_id" => agent2_id,
|
||||
"task_set" => [
|
||||
%{
|
||||
"title" => "Write API documentation",
|
||||
"description" => "Document all REST API endpoints with examples",
|
||||
"priority" => "normal",
|
||||
"estimated_time" => "3 hours",
|
||||
"file_paths" => ["docs/api.md"]
|
||||
},
|
||||
%{
|
||||
"title" => "Analyze code coverage",
|
||||
"description" => "Run coverage analysis and identify gaps",
|
||||
"priority" => "high",
|
||||
"estimated_time" => "1 hour"
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
"jsonrpc" => "2.0",
|
||||
"id" => 4
|
||||
}
|
||||
|
||||
task_set_resp1 = MCPServer.handle_mcp_request(agent1_task_set)
|
||||
task_set_resp2 = MCPServer.handle_mcp_request(agent2_task_set)
|
||||
|
||||
IO.puts("Agent 1 task set response: #{inspect(task_set_resp1)}")
|
||||
IO.puts("Agent 2 task set response: #{inspect(task_set_resp2)}")
|
||||
|
||||
# Test 3: Verify agents only see their own tasks
|
||||
IO.puts("\n3️⃣ Verifying agent isolation...")
|
||||
|
||||
# Get detailed task board
|
||||
task_board_req = %{
|
||||
"method" => "tools/call",
|
||||
"params" => %{
|
||||
"name" => "get_detailed_task_board",
|
||||
"arguments" => %{}
|
||||
},
|
||||
"jsonrpc" => "2.0",
|
||||
"id" => 5
|
||||
}
|
||||
|
||||
board_resp = MCPServer.handle_mcp_request(task_board_req)
|
||||
IO.puts("Task board response: #{inspect(board_resp)}")
|
||||
|
||||
# Test 4: Agent 1 gets their next task (should be their own)
|
||||
IO.puts("\n4️⃣ Testing task retrieval...")
|
||||
|
||||
next_task_req1 = %{
|
||||
"method" => "tools/call",
|
||||
"params" => %{
|
||||
"name" => "get_next_task",
|
||||
"arguments" => %{
|
||||
"agent_id" => agent1_id
|
||||
}
|
||||
},
|
||||
"jsonrpc" => "2.0",
|
||||
"id" => 6
|
||||
}
|
||||
|
||||
task_resp1 = MCPServer.handle_mcp_request(next_task_req1)
|
||||
IO.puts("Agent 1 next task: #{inspect(task_resp1)}")
|
||||
|
||||
# Test 5: Agent 2 gets their next task (should be different)
|
||||
next_task_req2 = %{
|
||||
"method" => "tools/call",
|
||||
"params" => %{
|
||||
"name" => "get_next_task",
|
||||
"arguments" => %{
|
||||
"agent_id" => agent2_id
|
||||
}
|
||||
},
|
||||
"jsonrpc" => "2.0",
|
||||
"id" => 7
|
||||
}
|
||||
|
||||
task_resp2 = MCPServer.handle_mcp_request(next_task_req2)
|
||||
IO.puts("Agent 2 next task: #{inspect(task_resp2)}")
|
||||
|
||||
# Test 6: Get individual agent task history
|
||||
IO.puts("\n5️⃣ Testing agent task history...")
|
||||
|
||||
history_req1 = %{
|
||||
"method" => "tools/call",
|
||||
"params" => %{
|
||||
"name" => "get_agent_task_history",
|
||||
"arguments" => %{
|
||||
"agent_id" => agent1_id
|
||||
}
|
||||
},
|
||||
"jsonrpc" => "2.0",
|
||||
"id" => 8
|
||||
}
|
||||
|
||||
history_resp1 = MCPServer.handle_mcp_request(history_req1)
|
||||
IO.puts("Agent 1 history: #{inspect(history_resp1)}")
|
||||
|
||||
IO.puts(("\n" <> "=") |> String.duplicate(60))
|
||||
IO.puts("🎉 AGENT-SPECIFIC TASK POOLS TEST COMPLETE!")
|
||||
IO.puts("✅ Each agent now has their own task pool")
|
||||
IO.puts("✅ No more task chaos or cross-contamination")
|
||||
IO.puts("✅ Agents can plan and coordinate their workflows")
|
||||
IO.puts("=" |> String.duplicate(60))
|
||||
235
test/test_agent_task_pools.exs
Normal file
235
test/test_agent_task_pools.exs
Normal file
@@ -0,0 +1,235 @@
|
||||
#!/usr/bin/env elixir
|
||||
|
||||
# Test script for agent-specific task pools
|
||||
# This tests the new functionality to ensure agents have separate task pools
|
||||
|
||||
Mix.install([
|
||||
{:jason, "~> 1.4"}
|
||||
])
|
||||
|
||||
defmodule AgentTaskPoolTest do
|
||||
def run_test do
|
||||
IO.puts("🚀 Testing Agent-Specific Task Pools")
|
||||
IO.puts("=====================================")
|
||||
|
||||
# Start the application
|
||||
IO.puts("Starting AgentCoordinator application...")
|
||||
Application.start(:agent_coordinator)
|
||||
|
||||
# Test 1: Register two agents
|
||||
IO.puts("\n📋 Test 1: Registering two test agents")
|
||||
|
||||
agent1_request = %{
|
||||
"method" => "tools/call",
|
||||
"params" => %{
|
||||
"name" => "register_agent",
|
||||
"arguments" => %{
|
||||
"name" => "TestAgent_Alpha_Banana",
|
||||
"capabilities" => ["coding", "testing"]
|
||||
}
|
||||
},
|
||||
"jsonrpc" => "2.0",
|
||||
"id" => 1
|
||||
}
|
||||
|
||||
agent2_request = %{
|
||||
"method" => "tools/call",
|
||||
"params" => %{
|
||||
"name" => "register_agent",
|
||||
"arguments" => %{
|
||||
"name" => "TestAgent_Beta_Koala",
|
||||
"capabilities" => ["documentation", "analysis"]
|
||||
}
|
||||
},
|
||||
"jsonrpc" => "2.0",
|
||||
"id" => 2
|
||||
}
|
||||
|
||||
# Register agents
|
||||
agent1_response = AgentCoordinator.MCPServer.handle_mcp_request(agent1_request)
|
||||
agent2_response = AgentCoordinator.MCPServer.handle_mcp_request(agent2_request)
|
||||
|
||||
agent1_id = extract_agent_id(agent1_response)
|
||||
agent2_id = extract_agent_id(agent2_response)
|
||||
|
||||
IO.puts("✅ Agent 1 registered: #{agent1_id}")
|
||||
IO.puts("✅ Agent 2 registered: #{agent2_id}")
|
||||
|
||||
# Test 2: Register task sets for each agent
|
||||
IO.puts("\n📝 Test 2: Registering task sets for each agent")
|
||||
|
||||
task_set_1 = %{
|
||||
"method" => "tools/call",
|
||||
"params" => %{
|
||||
"name" => "register_task_set",
|
||||
"arguments" => %{
|
||||
"agent_id" => agent1_id,
|
||||
"task_set" => [
|
||||
%{
|
||||
"title" => "Implement login feature",
|
||||
"description" => "Create user authentication system",
|
||||
"priority" => "high",
|
||||
"estimated_time" => "2 hours"
|
||||
},
|
||||
%{
|
||||
"title" => "Write unit tests",
|
||||
"description" => "Add tests for authentication",
|
||||
"priority" => "normal",
|
||||
"estimated_time" => "1 hour"
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
"jsonrpc" => "2.0",
|
||||
"id" => 3
|
||||
}
|
||||
|
||||
task_set_2 = %{
|
||||
"method" => "tools/call",
|
||||
"params" => %{
|
||||
"name" => "register_task_set",
|
||||
"arguments" => %{
|
||||
"agent_id" => agent2_id,
|
||||
"task_set" => [
|
||||
%{
|
||||
"title" => "Write API documentation",
|
||||
"description" => "Document the new authentication API",
|
||||
"priority" => "normal",
|
||||
"estimated_time" => "3 hours"
|
||||
},
|
||||
%{
|
||||
"title" => "Review code quality",
|
||||
"description" => "Analyze the authentication implementation",
|
||||
"priority" => "low",
|
||||
"estimated_time" => "1 hour"
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
"jsonrpc" => "2.0",
|
||||
"id" => 4
|
||||
}
|
||||
|
||||
taskset1_response = AgentCoordinator.MCPServer.handle_mcp_request(task_set_1)
|
||||
taskset2_response = AgentCoordinator.MCPServer.handle_mcp_request(task_set_2)
|
||||
|
||||
IO.puts("✅ Task set registered for Agent 1: #{inspect(taskset1_response)}")
|
||||
IO.puts("✅ Task set registered for Agent 2: #{inspect(taskset2_response)}")
|
||||
|
||||
# Test 3: Get detailed task board
|
||||
IO.puts("\n📊 Test 3: Getting detailed task board")
|
||||
|
||||
detailed_board_request = %{
|
||||
"method" => "tools/call",
|
||||
"params" => %{
|
||||
"name" => "get_detailed_task_board",
|
||||
"arguments" => %{}
|
||||
},
|
||||
"jsonrpc" => "2.0",
|
||||
"id" => 5
|
||||
}
|
||||
|
||||
board_response = AgentCoordinator.MCPServer.handle_mcp_request(detailed_board_request)
|
||||
IO.puts("📋 Detailed task board: #{inspect(board_response, pretty: true)}")
|
||||
|
||||
# Test 4: Get agent task history
|
||||
IO.puts("\n📜 Test 4: Getting individual agent task histories")
|
||||
|
||||
history1_request = %{
|
||||
"method" => "tools/call",
|
||||
"params" => %{
|
||||
"name" => "get_agent_task_history",
|
||||
"arguments" => %{"agent_id" => agent1_id}
|
||||
},
|
||||
"jsonrpc" => "2.0",
|
||||
"id" => 6
|
||||
}
|
||||
|
||||
history2_request = %{
|
||||
"method" => "tools/call",
|
||||
"params" => %{
|
||||
"name" => "get_agent_task_history",
|
||||
"arguments" => %{"agent_id" => agent2_id}
|
||||
},
|
||||
"jsonrpc" => "2.0",
|
||||
"id" => 7
|
||||
}
|
||||
|
||||
history1_response = AgentCoordinator.MCPServer.handle_mcp_request(history1_request)
|
||||
history2_response = AgentCoordinator.MCPServer.handle_mcp_request(history2_request)
|
||||
|
||||
IO.puts("📜 Agent 1 history: #{inspect(history1_response, pretty: true)}")
|
||||
IO.puts("📜 Agent 2 history: #{inspect(history2_response, pretty: true)}")
|
||||
|
||||
# Test 5: Verify agents can get their own tasks
|
||||
IO.puts("\n🎯 Test 5: Verifying agents get their own tasks")
|
||||
|
||||
next_task1_request = %{
|
||||
"method" => "tools/call",
|
||||
"params" => %{
|
||||
"name" => "get_next_task",
|
||||
"arguments" => %{"agent_id" => agent1_id}
|
||||
},
|
||||
"jsonrpc" => "2.0",
|
||||
"id" => 8
|
||||
}
|
||||
|
||||
next_task2_request = %{
|
||||
"method" => "tools/call",
|
||||
"params" => %{
|
||||
"name" => "get_next_task",
|
||||
"arguments" => %{"agent_id" => agent2_id}
|
||||
},
|
||||
"jsonrpc" => "2.0",
|
||||
"id" => 9
|
||||
}
|
||||
|
||||
task1_response = AgentCoordinator.MCPServer.handle_mcp_request(next_task1_request)
|
||||
task2_response = AgentCoordinator.MCPServer.handle_mcp_request(next_task2_request)
|
||||
|
||||
IO.puts("🎯 Agent 1 next task: #{inspect(task1_response)}")
|
||||
IO.puts("🎯 Agent 2 next task: #{inspect(task2_response)}")
|
||||
|
||||
IO.puts("\n✅ Test completed! Agent-specific task pools are working!")
|
||||
IO.puts("Each agent now has their own task queue and cannot access other agents' tasks.")
|
||||
|
||||
# Cleanup
|
||||
cleanup_agents([agent1_id, agent2_id])
|
||||
end
|
||||
|
||||
defp extract_agent_id(response) do
|
||||
case response do
|
||||
%{"result" => %{"content" => [%{"text" => text}]}} ->
|
||||
data = Jason.decode!(text)
|
||||
data["agent_id"]
|
||||
|
||||
_ ->
|
||||
"unknown"
|
||||
end
|
||||
end
|
||||
|
||||
defp cleanup_agents(agent_ids) do
|
||||
IO.puts("\n🧹 Cleaning up test agents...")
|
||||
|
||||
Enum.each(agent_ids, fn agent_id ->
|
||||
unregister_request = %{
|
||||
"method" => "tools/call",
|
||||
"params" => %{
|
||||
"name" => "unregister_agent",
|
||||
"arguments" => %{
|
||||
"agent_id" => agent_id,
|
||||
"reason" => "Test completed"
|
||||
}
|
||||
},
|
||||
"jsonrpc" => "2.0",
|
||||
"id" => 999
|
||||
}
|
||||
|
||||
AgentCoordinator.MCPServer.handle_mcp_request(unregister_request)
|
||||
IO.puts("🗑️ Unregistered agent: #{agent_id}")
|
||||
end)
|
||||
end
|
||||
end
|
||||
|
||||
# Run the test
|
||||
AgentTaskPoolTest.run_test()
|
||||
@@ -1,5 +1,6 @@
|
||||
defmodule AgentCoordinator.DynamicToolDiscoveryTest do
|
||||
use ExUnit.Case, async: false # Changed to false since we're using shared resources
|
||||
# Changed to false since we're using shared resources
|
||||
use ExUnit.Case, async: false
|
||||
|
||||
describe "Dynamic tool discovery" do
|
||||
test "tools are discovered from external MCP servers via tools/list" do
|
||||
@@ -9,23 +10,32 @@ defmodule AgentCoordinator.DynamicToolDiscoveryTest do
|
||||
initial_tools = AgentCoordinator.MCPServerManager.get_unified_tools()
|
||||
|
||||
# Should have at least the coordinator native tools
|
||||
coordinator_tool_names = ["register_agent", "create_task", "get_next_task", "complete_task", "get_task_board", "heartbeat"]
|
||||
coordinator_tool_names = [
|
||||
"register_agent",
|
||||
"create_task",
|
||||
"get_next_task",
|
||||
"complete_task",
|
||||
"get_task_board",
|
||||
"heartbeat"
|
||||
]
|
||||
|
||||
Enum.each(coordinator_tool_names, fn tool_name ->
|
||||
assert Enum.any?(initial_tools, fn tool -> tool["name"] == tool_name end),
|
||||
"Coordinator tool #{tool_name} should be available"
|
||||
"Coordinator tool #{tool_name} should be available"
|
||||
end)
|
||||
|
||||
# Verify VS Code tools are conditionally included
|
||||
vscode_tools = Enum.filter(initial_tools, fn tool ->
|
||||
String.starts_with?(tool["name"], "vscode_")
|
||||
end)
|
||||
vscode_tools =
|
||||
Enum.filter(initial_tools, fn tool ->
|
||||
String.starts_with?(tool["name"], "vscode_")
|
||||
end)
|
||||
|
||||
# Should have VS Code tools if the module is available
|
||||
if Code.ensure_loaded?(AgentCoordinator.VSCodeToolProvider) do
|
||||
assert length(vscode_tools) > 0, "VS Code tools should be available when module is loaded"
|
||||
else
|
||||
assert length(vscode_tools) == 0, "VS Code tools should not be available when module is not loaded"
|
||||
assert length(vscode_tools) == 0,
|
||||
"VS Code tools should not be available when module is not loaded"
|
||||
end
|
||||
|
||||
# Test tool refresh functionality
|
||||
@@ -39,21 +49,23 @@ defmodule AgentCoordinator.DynamicToolDiscoveryTest do
|
||||
# Use the shared MCP server manager
|
||||
|
||||
# Test routing for coordinator tools
|
||||
result = AgentCoordinator.MCPServerManager.route_tool_call(
|
||||
"register_agent",
|
||||
%{"name" => "TestAgent", "capabilities" => ["testing"]},
|
||||
%{agent_id: "test_#{:rand.uniform(1000)}"}
|
||||
)
|
||||
result =
|
||||
AgentCoordinator.MCPServerManager.route_tool_call(
|
||||
"register_agent",
|
||||
%{"name" => "TestAgent", "capabilities" => ["testing"]},
|
||||
%{agent_id: "test_#{:rand.uniform(1000)}"}
|
||||
)
|
||||
|
||||
# Should succeed (returns :ok for register_agent)
|
||||
assert result == :ok or (is_map(result) and not Map.has_key?(result, "error"))
|
||||
|
||||
# Test routing for non-existent tool
|
||||
error_result = AgentCoordinator.MCPServerManager.route_tool_call(
|
||||
"nonexistent_tool",
|
||||
%{},
|
||||
%{agent_id: "test"}
|
||||
)
|
||||
error_result =
|
||||
AgentCoordinator.MCPServerManager.route_tool_call(
|
||||
"nonexistent_tool",
|
||||
%{},
|
||||
%{agent_id: "test"}
|
||||
)
|
||||
|
||||
assert error_result["error"]["code"] == -32601
|
||||
assert String.contains?(error_result["error"]["message"], "Tool not found")
|
||||
@@ -72,11 +84,20 @@ defmodule AgentCoordinator.DynamicToolDiscoveryTest do
|
||||
assert tool_count >= 0
|
||||
|
||||
# Verify we have external tools (context7, filesystem, etc.)
|
||||
external_tools = Enum.filter(tools, fn tool ->
|
||||
name = tool["name"]
|
||||
not String.starts_with?(name, "vscode_") and
|
||||
name not in ["register_agent", "create_task", "get_next_task", "complete_task", "get_task_board", "heartbeat"]
|
||||
end)
|
||||
external_tools =
|
||||
Enum.filter(tools, fn tool ->
|
||||
name = tool["name"]
|
||||
|
||||
not String.starts_with?(name, "vscode_") and
|
||||
name not in [
|
||||
"register_agent",
|
||||
"create_task",
|
||||
"get_next_task",
|
||||
"complete_task",
|
||||
"get_task_board",
|
||||
"heartbeat"
|
||||
]
|
||||
end)
|
||||
|
||||
# Should have some external tools from the configured MCP servers
|
||||
assert length(external_tools) > 0, "Should have external MCP server tools available"
|
||||
@@ -84,4 +105,4 @@ defmodule AgentCoordinator.DynamicToolDiscoveryTest do
|
||||
# No cleanup needed - using shared instance
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
90
test/test_isolation.exs
Normal file
90
test/test_isolation.exs
Normal file
@@ -0,0 +1,90 @@
|
||||
# Simple test for agent-specific task pools
|
||||
alias AgentCoordinator.{TaskRegistry, Inbox, Agent, Task}
|
||||
|
||||
IO.puts("🧪 Agent-Specific Task Pool Test")
|
||||
IO.puts("=" |> String.duplicate(40))
|
||||
|
||||
# Test 1: Create agents directly
|
||||
IO.puts("\n1️⃣ Creating agents...")
|
||||
|
||||
agent1 = Agent.new("Alpha Wolf", [:coding, :testing])
|
||||
agent2 = Agent.new("Beta Tiger", [:documentation, :analysis])
|
||||
|
||||
IO.puts("Agent 1 ID: #{agent1.id}")
|
||||
IO.puts("Agent 2 ID: #{agent2.id}")
|
||||
|
||||
case TaskRegistry.register_agent(agent1) do
|
||||
:ok -> IO.puts("✅ Agent 1 registered")
|
||||
error -> IO.puts("❌ Agent 1 failed: #{inspect(error)}")
|
||||
end
|
||||
|
||||
case TaskRegistry.register_agent(agent2) do
|
||||
:ok -> IO.puts("✅ Agent 2 registered")
|
||||
error -> IO.puts("❌ Agent 2 failed: #{inspect(error)}")
|
||||
end
|
||||
|
||||
# Wait for inboxes to be created
|
||||
Process.sleep(1000)
|
||||
|
||||
# Test 2: Create agent-specific tasks
|
||||
IO.puts("\n2️⃣ Creating agent-specific tasks...")
|
||||
|
||||
# Tasks for Agent 1
|
||||
task1_agent1 =
|
||||
Task.new("Fix auth bug", "Debug authentication issue", %{
|
||||
priority: :high,
|
||||
assigned_agent: agent1.id,
|
||||
metadata: %{agent_created: true}
|
||||
})
|
||||
|
||||
task2_agent1 =
|
||||
Task.new("Add auth tests", "Write auth tests", %{
|
||||
priority: :normal,
|
||||
assigned_agent: agent1.id,
|
||||
metadata: %{agent_created: true}
|
||||
})
|
||||
|
||||
# Tasks for Agent 2
|
||||
task1_agent2 =
|
||||
Task.new("Write API docs", "Document endpoints", %{
|
||||
priority: :normal,
|
||||
assigned_agent: agent2.id,
|
||||
metadata: %{agent_created: true}
|
||||
})
|
||||
|
||||
# Add tasks to respective inboxes
|
||||
Inbox.add_task(agent1.id, task1_agent1)
|
||||
Inbox.add_task(agent1.id, task2_agent1)
|
||||
Inbox.add_task(agent2.id, task1_agent2)
|
||||
|
||||
IO.puts("✅ Tasks added to agent inboxes")
|
||||
|
||||
# Test 3: Verify isolation
|
||||
IO.puts("\n3️⃣ Testing isolation...")
|
||||
|
||||
# Check what each agent gets
|
||||
case Inbox.get_next_task(agent1.id) do
|
||||
nil -> IO.puts("❌ Agent 1 has no tasks")
|
||||
task -> IO.puts("✅ Agent 1 got: '#{task.title}'")
|
||||
end
|
||||
|
||||
case Inbox.get_next_task(agent2.id) do
|
||||
nil -> IO.puts("❌ Agent 2 has no tasks")
|
||||
task -> IO.puts("✅ Agent 2 got: '#{task.title}'")
|
||||
end
|
||||
|
||||
# Test 4: Check remaining tasks
|
||||
IO.puts("\n4️⃣ Checking remaining tasks...")
|
||||
|
||||
status1 = Inbox.get_status(agent1.id)
|
||||
status2 = Inbox.get_status(agent2.id)
|
||||
|
||||
IO.puts(
|
||||
"Agent 1: #{status1.pending_count} pending, current: #{if status1.current_task, do: status1.current_task.title, else: "none"}"
|
||||
)
|
||||
|
||||
IO.puts(
|
||||
"Agent 2: #{status2.pending_count} pending, current: #{if status2.current_task, do: status2.current_task.title, else: "none"}"
|
||||
)
|
||||
|
||||
IO.puts("\n🎉 SUCCESS! Agent-specific task pools working!")
|
||||
63
test/test_multi_interface.exs
Executable file
63
test/test_multi_interface.exs
Executable file
@@ -0,0 +1,63 @@
|
||||
#!/usr/bin/env elixir
|
||||
|
||||
# Simple test script to verify multi-interface functionality
|
||||
Mix.install([
|
||||
{:jason, "~> 1.4"}
|
||||
])
|
||||
|
||||
defmodule MultiInterfaceTest do
|
||||
def test_stdio_mode do
|
||||
IO.puts("Testing STDIO mode...")
|
||||
|
||||
# Start the application manually in stdio mode
|
||||
System.put_env("MCP_INTERFACE_MODE", "stdio")
|
||||
|
||||
IO.puts("✅ STDIO mode configuration test passed")
|
||||
end
|
||||
|
||||
def test_http_mode do
|
||||
IO.puts("Testing HTTP mode configuration...")
|
||||
|
||||
# Test HTTP mode configuration
|
||||
System.put_env("MCP_INTERFACE_MODE", "http")
|
||||
System.put_env("MCP_HTTP_PORT", "8080")
|
||||
System.put_env("MCP_HTTP_HOST", "127.0.0.1")
|
||||
|
||||
IO.puts("✅ HTTP mode configuration test passed")
|
||||
end
|
||||
|
||||
def test_multi_mode do
|
||||
IO.puts("Testing multi-interface mode...")
|
||||
|
||||
# Test multiple interfaces
|
||||
System.put_env("MCP_INTERFACE_MODE", "stdio,http,websocket")
|
||||
System.put_env("MCP_HTTP_PORT", "8080")
|
||||
|
||||
IO.puts("✅ Multi-interface mode configuration test passed")
|
||||
end
|
||||
|
||||
def run_tests do
|
||||
IO.puts("🚀 Testing Multi-Interface MCP Server")
|
||||
IO.puts("====================================")
|
||||
|
||||
test_stdio_mode()
|
||||
test_http_mode()
|
||||
test_multi_mode()
|
||||
|
||||
IO.puts("")
|
||||
IO.puts("✅ All configuration tests passed!")
|
||||
IO.puts("You can now test the actual server with:")
|
||||
IO.puts("")
|
||||
IO.puts(" # STDIO mode (default)")
|
||||
IO.puts(" mix run --no-halt")
|
||||
IO.puts("")
|
||||
IO.puts(" # HTTP mode")
|
||||
IO.puts(" MCP_INTERFACE_MODE=http MCP_HTTP_PORT=8080 mix run --no-halt")
|
||||
IO.puts("")
|
||||
IO.puts(" # Multi-interface mode")
|
||||
IO.puts(" MCP_INTERFACE_MODE=stdio,http,websocket MCP_HTTP_PORT=8080 mix run --no-halt")
|
||||
IO.puts("")
|
||||
end
|
||||
end
|
||||
|
||||
MultiInterfaceTest.run_tests()
|
||||
157
test/test_session_management.exs
Normal file
157
test/test_session_management.exs
Normal file
@@ -0,0 +1,157 @@
|
||||
#!/usr/bin/env elixir
|
||||
|
||||
# Quick test script for the enhanced MCP session management
|
||||
# This tests the new session token authentication flow
|
||||
|
||||
Mix.install([
|
||||
{:jason, "~> 1.4"},
|
||||
{:httpoison, "~> 2.0"}
|
||||
])
|
||||
|
||||
defmodule SessionManagementTest do
|
||||
@base_url "http://localhost:4000"
|
||||
|
||||
def run_test do
|
||||
IO.puts("🔧 Testing Enhanced MCP Session Management")
|
||||
IO.puts("=" <> String.duplicate("=", 50))
|
||||
|
||||
# Step 1: Register an agent to get a session token
|
||||
IO.puts("\n1️⃣ Registering agent to get session token...")
|
||||
|
||||
register_payload = %{
|
||||
"jsonrpc" => "2.0",
|
||||
"id" => "test_001",
|
||||
"method" => "agents/register",
|
||||
"params" => %{
|
||||
"name" => "Test Agent Blue Koala",
|
||||
"capabilities" => ["coding", "testing"],
|
||||
"codebase_id" => "test_codebase",
|
||||
"workspace_path" => "/tmp/test"
|
||||
}
|
||||
}
|
||||
|
||||
case post_mcp_request("/mcp/request", register_payload) do
|
||||
{:ok, %{"result" => result}} ->
|
||||
session_token = Map.get(result, "session_token")
|
||||
expires_at = Map.get(result, "expires_at")
|
||||
|
||||
IO.puts("✅ Agent registered successfully!")
|
||||
IO.puts(" Session Token: #{String.slice(session_token || "nil", 0, 20)}...")
|
||||
IO.puts(" Expires At: #{expires_at}")
|
||||
|
||||
if session_token do
|
||||
test_authenticated_request(session_token)
|
||||
else
|
||||
IO.puts("❌ No session token returned!")
|
||||
end
|
||||
|
||||
{:ok, %{"error" => error}} ->
|
||||
IO.puts("❌ Registration failed: #{inspect(error)}")
|
||||
|
||||
{:error, reason} ->
|
||||
IO.puts("❌ Request failed: #{reason}")
|
||||
end
|
||||
|
||||
# Step 2: Test MCP protocol headers
|
||||
IO.puts("\n2️⃣ Testing MCP protocol headers...")
|
||||
test_protocol_headers()
|
||||
|
||||
IO.puts("\n🎉 Session management test completed!")
|
||||
end
|
||||
|
||||
defp test_authenticated_request(session_token) do
|
||||
IO.puts("\n🔐 Testing authenticated request with session token...")
|
||||
|
||||
# Try to call a tool that requires authentication
|
||||
tool_payload = %{
|
||||
"jsonrpc" => "2.0",
|
||||
"id" => "test_002",
|
||||
"method" => "tools/call",
|
||||
"params" => %{
|
||||
"name" => "get_task_board",
|
||||
"arguments" => %{"agent_id" => "Test Agent Blue Koala"}
|
||||
}
|
||||
}
|
||||
|
||||
headers = [
|
||||
{"Content-Type", "application/json"},
|
||||
{"Mcp-Session-Id", session_token}
|
||||
]
|
||||
|
||||
case HTTPoison.post("#{@base_url}/mcp/request", Jason.encode!(tool_payload), headers) do
|
||||
{:ok, %HTTPoison.Response{status_code: 200, headers: response_headers, body: body}} ->
|
||||
IO.puts("✅ Authenticated request successful!")
|
||||
|
||||
# Check for MCP protocol headers
|
||||
mcp_version = get_header_value(response_headers, "mcp-protocol-version")
|
||||
IO.puts(" MCP Protocol Version: #{mcp_version || "Not found"}")
|
||||
|
||||
# Parse response
|
||||
case Jason.decode(body) do
|
||||
{:ok, %{"result" => _result}} ->
|
||||
IO.puts(" ✅ Valid MCP response received")
|
||||
|
||||
{:ok, %{"error" => error}} ->
|
||||
IO.puts(" ⚠️ MCP error: #{inspect(error)}")
|
||||
|
||||
_ ->
|
||||
IO.puts(" ❌ Invalid response format")
|
||||
end
|
||||
|
||||
{:ok, %HTTPoison.Response{status_code: status_code, body: body}} ->
|
||||
IO.puts("❌ Request failed with status #{status_code}")
|
||||
|
||||
case Jason.decode(body) do
|
||||
{:ok, parsed} -> IO.puts(" Error: #{inspect(parsed)}")
|
||||
_ -> IO.puts(" Body: #{body}")
|
||||
end
|
||||
|
||||
{:error, reason} ->
|
||||
IO.puts("❌ HTTP request failed: #{inspect(reason)}")
|
||||
end
|
||||
end
|
||||
|
||||
defp test_protocol_headers do
|
||||
case HTTPoison.get("#{@base_url}/health") do
|
||||
{:ok, %HTTPoison.Response{headers: headers}} ->
|
||||
mcp_version = get_header_value(headers, "mcp-protocol-version")
|
||||
server_header = get_header_value(headers, "server")
|
||||
|
||||
IO.puts("✅ Protocol headers check:")
|
||||
IO.puts(" MCP-Protocol-Version: #{mcp_version || "❌ Missing"}")
|
||||
IO.puts(" Server: #{server_header || "❌ Missing"}")
|
||||
|
||||
{:error, reason} ->
|
||||
IO.puts("❌ Failed to test headers: #{inspect(reason)}")
|
||||
end
|
||||
end
|
||||
|
||||
defp post_mcp_request(endpoint, payload) do
|
||||
headers = [{"Content-Type", "application/json"}]
|
||||
|
||||
case HTTPoison.post("#{@base_url}#{endpoint}", Jason.encode!(payload), headers) do
|
||||
{:ok, %HTTPoison.Response{status_code: 200, body: body}} ->
|
||||
Jason.decode(body)
|
||||
|
||||
{:ok, %HTTPoison.Response{status_code: status_code, body: body}} ->
|
||||
{:error, "HTTP #{status_code}: #{body}"}
|
||||
|
||||
{:error, reason} ->
|
||||
{:error, inspect(reason)}
|
||||
end
|
||||
end
|
||||
|
||||
defp get_header_value(headers, header_name) do
|
||||
headers
|
||||
|> Enum.find(fn {name, _value} ->
|
||||
String.downcase(name) == String.downcase(header_name)
|
||||
end)
|
||||
|> case do
|
||||
{_name, value} -> value
|
||||
nil -> nil
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
# Run the test
|
||||
SessionManagementTest.run_test()
|
||||
83
test/test_vscode_init.exs
Normal file
83
test/test_vscode_init.exs
Normal file
@@ -0,0 +1,83 @@
|
||||
#!/usr/bin/env elixir
|
||||
|
||||
# Test script to simulate VS Code MCP initialization sequence
|
||||
|
||||
# Start the application
|
||||
Application.start(:agent_coordinator)
|
||||
|
||||
# Wait a moment for the server to fully start
|
||||
Process.sleep(1000)
|
||||
|
||||
# Test 1: Initialize call (system call, should work without agent_id)
|
||||
IO.puts("Testing initialize call...")
|
||||
|
||||
init_request = %{
|
||||
"jsonrpc" => "2.0",
|
||||
"id" => 1,
|
||||
"method" => "initialize",
|
||||
"params" => %{
|
||||
"protocolVersion" => "2024-11-05",
|
||||
"capabilities" => %{
|
||||
"tools" => %{}
|
||||
},
|
||||
"clientInfo" => %{
|
||||
"name" => "vscode",
|
||||
"version" => "1.0.0"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
init_response = GenServer.call(AgentCoordinator.MCPServer, {:mcp_request, init_request})
|
||||
IO.puts("Initialize response: #{inspect(init_response)}")
|
||||
|
||||
# Test 2: Tools/list call (system call, should work without agent_id)
|
||||
IO.puts("\nTesting tools/list call...")
|
||||
|
||||
tools_request = %{
|
||||
"jsonrpc" => "2.0",
|
||||
"id" => 2,
|
||||
"method" => "tools/list"
|
||||
}
|
||||
|
||||
tools_response = GenServer.call(AgentCoordinator.MCPServer, {:mcp_request, tools_request})
|
||||
IO.puts("Tools/list response: #{inspect(tools_response)}")
|
||||
|
||||
# Test 3: Register agent call (should work)
|
||||
IO.puts("\nTesting register_agent call...")
|
||||
|
||||
register_request = %{
|
||||
"jsonrpc" => "2.0",
|
||||
"id" => 3,
|
||||
"method" => "tools/call",
|
||||
"params" => %{
|
||||
"name" => "register_agent",
|
||||
"arguments" => %{
|
||||
"name" => "GitHub Copilot Test Agent",
|
||||
"capabilities" => ["file_operations", "code_generation"]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
register_response = GenServer.call(AgentCoordinator.MCPServer, {:mcp_request, register_request})
|
||||
IO.puts("Register agent response: #{inspect(register_response)}")
|
||||
|
||||
# Test 4: Try a call that requires agent_id (should fail without agent_id)
|
||||
IO.puts("Testing call that requires agent_id (should fail)...")
|
||||
|
||||
task_request = %{
|
||||
"jsonrpc" => "2.0",
|
||||
"id" => 4,
|
||||
"method" => "tools/call",
|
||||
"params" => %{
|
||||
"name" => "create_task",
|
||||
"arguments" => %{
|
||||
"title" => "Test task",
|
||||
"description" => "This should fail without agent_id"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
task_response = GenServer.call(AgentCoordinator.MCPServer, {:mcp_request, task_request})
|
||||
IO.puts("Task creation response: #{inspect(task_response)}")
|
||||
|
||||
IO.puts("All tests completed!")
|
||||
33
test/test_vscode_integration.exs
Normal file
33
test/test_vscode_integration.exs
Normal file
@@ -0,0 +1,33 @@
|
||||
#!/usr/bin/env elixir
|
||||
|
||||
# Quick test to check if VS Code tools are properly integrated
|
||||
IO.puts("Testing VS Code tool integration...")
|
||||
|
||||
# Start the agent coordinator
|
||||
{:ok, _} = AgentCoordinator.start_link()
|
||||
|
||||
# Give it a moment to start
|
||||
:timer.sleep(2000)
|
||||
|
||||
# Check if VS Code tools are available
|
||||
tools = AgentCoordinator.MCPServer.get_tools()
|
||||
|
||||
vscode_tools =
|
||||
Enum.filter(tools, fn tool ->
|
||||
case Map.get(tool, "name") do
|
||||
"vscode_" <> _ -> true
|
||||
_ -> false
|
||||
end
|
||||
end)
|
||||
|
||||
IO.puts("Found #{length(vscode_tools)} VS Code tools:")
|
||||
|
||||
Enum.each(vscode_tools, fn tool ->
|
||||
IO.puts(" - #{tool["name"]}")
|
||||
end)
|
||||
|
||||
if length(vscode_tools) > 0 do
|
||||
IO.puts("✅ VS Code tools are properly integrated!")
|
||||
else
|
||||
IO.puts("❌ VS Code tools are NOT integrated")
|
||||
end
|
||||
@@ -1,71 +0,0 @@
|
||||
# Test enhanced Agent Coordinator with auto-heartbeat and unregister
|
||||
|
||||
# Start a client with automatic heartbeat
|
||||
IO.puts "🚀 Testing Enhanced Agent Coordinator"
|
||||
IO.puts "====================================="
|
||||
|
||||
{:ok, client1} = AgentCoordinator.Client.start_session("TestAgent1", [:coding, :analysis])
|
||||
|
||||
# Get session info
|
||||
{:ok, info} = AgentCoordinator.Client.get_session_info(client1)
|
||||
IO.puts "✅ Agent registered: #{info.agent_name} (#{info.agent_id})"
|
||||
IO.puts " Auto-heartbeat: #{info.auto_heartbeat_enabled}"
|
||||
|
||||
# Check task board
|
||||
{:ok, board} = AgentCoordinator.Client.get_task_board(client1)
|
||||
IO.puts "📊 Task board status:"
|
||||
IO.puts " Total agents: #{length(board.agents)}"
|
||||
IO.puts " Active sessions: #{board.active_sessions}"
|
||||
|
||||
# Find our agent on the board
|
||||
our_agent = Enum.find(board.agents, fn a -> a["agent_id"] == info.agent_id end)
|
||||
IO.puts " Our agent online: #{our_agent["online"]}"
|
||||
IO.puts " Session active: #{our_agent["session_active"]}"
|
||||
|
||||
# Test heartbeat functionality
|
||||
IO.puts "\n💓 Testing manual heartbeat..."
|
||||
{:ok, _} = AgentCoordinator.Client.heartbeat(client1)
|
||||
IO.puts " Heartbeat sent successfully"
|
||||
|
||||
# Wait to observe automatic heartbeats
|
||||
IO.puts "\n⏱️ Waiting 3 seconds to observe automatic heartbeats..."
|
||||
Process.sleep(3000)
|
||||
|
||||
{:ok, updated_info} = AgentCoordinator.Client.get_session_info(client1)
|
||||
IO.puts " Last heartbeat updated: #{DateTime.diff(updated_info.last_heartbeat, info.last_heartbeat) > 0}"
|
||||
|
||||
# Test unregister functionality
|
||||
IO.puts "\n🔄 Testing unregister functionality..."
|
||||
{:ok, result} = AgentCoordinator.Client.unregister_agent(client1, "Testing unregister from script")
|
||||
IO.puts " Unregister result: #{result["status"]}"
|
||||
|
||||
# Check agent status after unregister
|
||||
{:ok, final_board} = AgentCoordinator.Client.get_task_board(client1)
|
||||
final_agent = Enum.find(final_board.agents, fn a -> a["agent_id"] == info.agent_id end)
|
||||
|
||||
case final_agent do
|
||||
nil ->
|
||||
IO.puts " Agent removed from board ✅"
|
||||
agent ->
|
||||
IO.puts " Agent still on board, online: #{agent["online"]}"
|
||||
end
|
||||
|
||||
# Test task creation
|
||||
IO.puts "\n📝 Testing task creation with heartbeats..."
|
||||
{:ok, task_result} = AgentCoordinator.Client.create_task(
|
||||
client1,
|
||||
"Test Task",
|
||||
"A test task to verify heartbeat integration",
|
||||
%{"priority" => "normal"}
|
||||
)
|
||||
|
||||
IO.puts " Task created: #{task_result["task_id"]}"
|
||||
if Map.has_key?(task_result, "_heartbeat_metadata") do
|
||||
IO.puts " Heartbeat metadata included ✅"
|
||||
else
|
||||
IO.puts " No heartbeat metadata ❌"
|
||||
end
|
||||
|
||||
# Clean up
|
||||
AgentCoordinator.Client.stop_session(client1)
|
||||
IO.puts "\n✨ Test completed successfully!"
|
||||
@@ -1,321 +0,0 @@
|
||||
#!/usr/bin/env elixir
|
||||
|
||||
# Multi-Codebase Coordination Test Script
|
||||
# This script demonstrates how agents can coordinate across multiple codebases
|
||||
|
||||
Mix.install([
|
||||
{:jason, "~> 1.4"},
|
||||
{:uuid, "~> 1.1"}
|
||||
])
|
||||
|
||||
defmodule MultiCodebaseTest do
|
||||
@moduledoc """
|
||||
Test script for multi-codebase agent coordination functionality.
|
||||
Demonstrates cross-codebase task creation, dependency management, and agent coordination.
|
||||
"""
|
||||
|
||||
def run do
|
||||
IO.puts("=== Multi-Codebase Agent Coordination Test ===\n")
|
||||
|
||||
# Test 1: Register multiple codebases
|
||||
test_codebase_registration()
|
||||
|
||||
# Test 2: Register agents in different codebases
|
||||
test_agent_registration()
|
||||
|
||||
# Test 3: Create tasks within individual codebases
|
||||
test_single_codebase_tasks()
|
||||
|
||||
# Test 4: Create cross-codebase tasks
|
||||
test_cross_codebase_tasks()
|
||||
|
||||
# Test 5: Test cross-codebase dependencies
|
||||
test_codebase_dependencies()
|
||||
|
||||
# Test 6: Verify coordination and task board
|
||||
test_coordination_overview()
|
||||
|
||||
IO.puts("\n=== Test Completed ===")
|
||||
end
|
||||
|
||||
def test_codebase_registration do
|
||||
IO.puts("1. Testing Codebase Registration")
|
||||
IO.puts(" - Registering frontend codebase...")
|
||||
IO.puts(" - Registering backend codebase...")
|
||||
IO.puts(" - Registering shared-lib codebase...")
|
||||
|
||||
frontend_codebase = %{
|
||||
"id" => "frontend-app",
|
||||
"name" => "Frontend Application",
|
||||
"workspace_path" => "/workspace/frontend",
|
||||
"description" => "React-based frontend application",
|
||||
"metadata" => %{
|
||||
"tech_stack" => ["react", "typescript", "tailwind"],
|
||||
"dependencies" => ["backend-api", "shared-lib"]
|
||||
}
|
||||
}
|
||||
|
||||
backend_codebase = %{
|
||||
"id" => "backend-api",
|
||||
"name" => "Backend API",
|
||||
"workspace_path" => "/workspace/backend",
|
||||
"description" => "Node.js API server",
|
||||
"metadata" => %{
|
||||
"tech_stack" => ["nodejs", "express", "mongodb"],
|
||||
"dependencies" => ["shared-lib"]
|
||||
}
|
||||
}
|
||||
|
||||
shared_lib_codebase = %{
|
||||
"id" => "shared-lib",
|
||||
"name" => "Shared Library",
|
||||
"workspace_path" => "/workspace/shared",
|
||||
"description" => "Shared utilities and types",
|
||||
"metadata" => %{
|
||||
"tech_stack" => ["typescript"],
|
||||
"dependencies" => []
|
||||
}
|
||||
}
|
||||
|
||||
# Simulate MCP calls
|
||||
simulate_mcp_call("register_codebase", frontend_codebase)
|
||||
simulate_mcp_call("register_codebase", backend_codebase)
|
||||
simulate_mcp_call("register_codebase", shared_lib_codebase)
|
||||
|
||||
IO.puts(" ✓ All codebases registered successfully\n")
|
||||
end
|
||||
|
||||
def test_agent_registration do
|
||||
IO.puts("2. Testing Agent Registration")
|
||||
|
||||
# Frontend agents
|
||||
frontend_agent1 = %{
|
||||
"name" => "frontend-dev-1",
|
||||
"capabilities" => ["coding", "testing"],
|
||||
"codebase_id" => "frontend-app",
|
||||
"workspace_path" => "/workspace/frontend",
|
||||
"cross_codebase_capable" => true
|
||||
}
|
||||
|
||||
frontend_agent2 = %{
|
||||
"name" => "frontend-dev-2",
|
||||
"capabilities" => ["coding", "review"],
|
||||
"codebase_id" => "frontend-app",
|
||||
"workspace_path" => "/workspace/frontend",
|
||||
"cross_codebase_capable" => false
|
||||
}
|
||||
|
||||
# Backend agents
|
||||
backend_agent1 = %{
|
||||
"name" => "backend-dev-1",
|
||||
"capabilities" => ["coding", "testing", "analysis"],
|
||||
"codebase_id" => "backend-api",
|
||||
"workspace_path" => "/workspace/backend",
|
||||
"cross_codebase_capable" => true
|
||||
}
|
||||
|
||||
# Shared library agent (cross-codebase capable)
|
||||
shared_agent = %{
|
||||
"name" => "shared-lib-dev",
|
||||
"capabilities" => ["coding", "documentation", "review"],
|
||||
"codebase_id" => "shared-lib",
|
||||
"workspace_path" => "/workspace/shared",
|
||||
"cross_codebase_capable" => true
|
||||
}
|
||||
|
||||
agents = [frontend_agent1, frontend_agent2, backend_agent1, shared_agent]
|
||||
|
||||
Enum.each(agents, fn agent ->
|
||||
IO.puts(" - Registering agent: #{agent["name"]} (#{agent["codebase_id"]})")
|
||||
simulate_mcp_call("register_agent", agent)
|
||||
end)
|
||||
|
||||
IO.puts(" ✓ All agents registered successfully\n")
|
||||
end
|
||||
|
||||
def test_single_codebase_tasks do
|
||||
IO.puts("3. Testing Single Codebase Tasks")
|
||||
|
||||
tasks = [
|
||||
%{
|
||||
"title" => "Update user interface components",
|
||||
"description" => "Modernize the login and dashboard components",
|
||||
"codebase_id" => "frontend-app",
|
||||
"file_paths" => ["/src/components/Login.tsx", "/src/components/Dashboard.tsx"],
|
||||
"required_capabilities" => ["coding"],
|
||||
"priority" => "normal"
|
||||
},
|
||||
%{
|
||||
"title" => "Implement user authentication API",
|
||||
"description" => "Create secure user authentication endpoints",
|
||||
"codebase_id" => "backend-api",
|
||||
"file_paths" => ["/src/routes/auth.js", "/src/middleware/auth.js"],
|
||||
"required_capabilities" => ["coding", "testing"],
|
||||
"priority" => "high"
|
||||
},
|
||||
%{
|
||||
"title" => "Add utility functions for date handling",
|
||||
"description" => "Create reusable date utility functions",
|
||||
"codebase_id" => "shared-lib",
|
||||
"file_paths" => ["/src/utils/date.ts", "/src/types/date.ts"],
|
||||
"required_capabilities" => ["coding", "documentation"],
|
||||
"priority" => "normal"
|
||||
}
|
||||
]
|
||||
|
||||
Enum.each(tasks, fn task ->
|
||||
IO.puts(" - Creating task: #{task["title"]} (#{task["codebase_id"]})")
|
||||
simulate_mcp_call("create_task", task)
|
||||
end)
|
||||
|
||||
IO.puts(" ✓ All single-codebase tasks created successfully\n")
|
||||
end
|
||||
|
||||
def test_cross_codebase_tasks do
|
||||
IO.puts("4. Testing Cross-Codebase Tasks")
|
||||
|
||||
# Task that affects multiple codebases
|
||||
cross_codebase_task = %{
|
||||
"title" => "Implement real-time notifications feature",
|
||||
"description" => "Add real-time notifications across frontend and backend",
|
||||
"primary_codebase_id" => "backend-api",
|
||||
"affected_codebases" => ["backend-api", "frontend-app", "shared-lib"],
|
||||
"coordination_strategy" => "sequential"
|
||||
}
|
||||
|
||||
IO.puts(" - Creating cross-codebase task: #{cross_codebase_task["title"]}")
|
||||
IO.puts(" Primary: #{cross_codebase_task["primary_codebase_id"]}")
|
||||
IO.puts(" Affected: #{Enum.join(cross_codebase_task["affected_codebases"], ", ")}")
|
||||
|
||||
simulate_mcp_call("create_cross_codebase_task", cross_codebase_task)
|
||||
|
||||
# Another cross-codebase task with different strategy
|
||||
parallel_task = %{
|
||||
"title" => "Update shared types and interfaces",
|
||||
"description" => "Synchronize type definitions across all codebases",
|
||||
"primary_codebase_id" => "shared-lib",
|
||||
"affected_codebases" => ["shared-lib", "frontend-app", "backend-api"],
|
||||
"coordination_strategy" => "parallel"
|
||||
}
|
||||
|
||||
IO.puts(" - Creating parallel cross-codebase task: #{parallel_task["title"]}")
|
||||
simulate_mcp_call("create_cross_codebase_task", parallel_task)
|
||||
|
||||
IO.puts(" ✓ Cross-codebase tasks created successfully\n")
|
||||
end
|
||||
|
||||
def test_codebase_dependencies do
|
||||
IO.puts("5. Testing Codebase Dependencies")
|
||||
|
||||
dependencies = [
|
||||
%{
|
||||
"source_codebase_id" => "frontend-app",
|
||||
"target_codebase_id" => "backend-api",
|
||||
"dependency_type" => "api_consumption",
|
||||
"metadata" => %{"api_version" => "v1", "endpoints" => ["auth", "users", "notifications"]}
|
||||
},
|
||||
%{
|
||||
"source_codebase_id" => "frontend-app",
|
||||
"target_codebase_id" => "shared-lib",
|
||||
"dependency_type" => "library_import",
|
||||
"metadata" => %{"imports" => ["types", "utils", "constants"]}
|
||||
},
|
||||
%{
|
||||
"source_codebase_id" => "backend-api",
|
||||
"target_codebase_id" => "shared-lib",
|
||||
"dependency_type" => "library_import",
|
||||
"metadata" => %{"imports" => ["types", "validators"]}
|
||||
}
|
||||
]
|
||||
|
||||
Enum.each(dependencies, fn dep ->
|
||||
IO.puts(" - Adding dependency: #{dep["source_codebase_id"]} → #{dep["target_codebase_id"]} (#{dep["dependency_type"]})")
|
||||
simulate_mcp_call("add_codebase_dependency", dep)
|
||||
end)
|
||||
|
||||
IO.puts(" ✓ All codebase dependencies added successfully\n")
|
||||
end
|
||||
|
||||
def test_coordination_overview do
|
||||
IO.puts("6. Testing Coordination Overview")
|
||||
|
||||
IO.puts(" - Getting overall task board...")
|
||||
simulate_mcp_call("get_task_board", %{})
|
||||
|
||||
IO.puts(" - Getting frontend codebase status...")
|
||||
simulate_mcp_call("get_codebase_status", %{"codebase_id" => "frontend-app"})
|
||||
|
||||
IO.puts(" - Getting backend codebase status...")
|
||||
simulate_mcp_call("get_codebase_status", %{"codebase_id" => "backend-api"})
|
||||
|
||||
IO.puts(" - Listing all codebases...")
|
||||
simulate_mcp_call("list_codebases", %{})
|
||||
|
||||
IO.puts(" ✓ Coordination overview retrieved successfully\n")
|
||||
end
|
||||
|
||||
defp simulate_mcp_call(tool_name, arguments) do
|
||||
request = %{
|
||||
"jsonrpc" => "2.0",
|
||||
"id" => UUID.uuid4(),
|
||||
"method" => "tools/call",
|
||||
"params" => %{
|
||||
"name" => tool_name,
|
||||
"arguments" => arguments
|
||||
}
|
||||
}
|
||||
|
||||
# In a real implementation, this would make an actual MCP call
|
||||
# For now, we'll just show the structure
|
||||
IO.puts(" MCP Call: #{tool_name}")
|
||||
IO.puts(" Arguments: #{Jason.encode!(arguments, pretty: true) |> String.replace("\n", "\n ")}")
|
||||
|
||||
# Simulate successful response
|
||||
response = %{
|
||||
"jsonrpc" => "2.0",
|
||||
"id" => request["id"],
|
||||
"result" => %{
|
||||
"content" => [%{
|
||||
"type" => "text",
|
||||
"text" => Jason.encode!(%{"status" => "success", "tool" => tool_name})
|
||||
}]
|
||||
}
|
||||
}
|
||||
|
||||
IO.puts(" Response: success")
|
||||
end
|
||||
|
||||
def simulate_task_flow do
|
||||
IO.puts("\n=== Simulating Multi-Codebase Task Flow ===")
|
||||
|
||||
IO.puts("1. Cross-codebase task created:")
|
||||
IO.puts(" - Main task assigned to backend agent")
|
||||
IO.puts(" - Dependent task created for frontend")
|
||||
IO.puts(" - Dependent task created for shared library")
|
||||
|
||||
IO.puts("\n2. Agent coordination:")
|
||||
IO.puts(" - Backend agent starts implementation")
|
||||
IO.puts(" - Publishes API specification to NATS stream")
|
||||
IO.puts(" - Frontend agent receives notification")
|
||||
IO.puts(" - Shared library agent updates type definitions")
|
||||
|
||||
IO.puts("\n3. File conflict detection:")
|
||||
IO.puts(" - Frontend agent attempts to modify shared types")
|
||||
IO.puts(" - System detects conflict with shared-lib agent's work")
|
||||
IO.puts(" - Task is queued until shared-lib work completes")
|
||||
|
||||
IO.puts("\n4. Cross-codebase synchronization:")
|
||||
IO.puts(" - Shared-lib agent completes type updates")
|
||||
IO.puts(" - Frontend task is automatically unblocked")
|
||||
IO.puts(" - All agents coordinate through NATS streams")
|
||||
|
||||
IO.puts("\n5. Task completion:")
|
||||
IO.puts(" - All subtasks complete successfully")
|
||||
IO.puts(" - Cross-codebase dependencies resolved")
|
||||
IO.puts(" - Coordination system updates task board")
|
||||
end
|
||||
end
|
||||
|
||||
# Run the test
|
||||
MultiCodebaseTest.run()
|
||||
MultiCodebaseTest.simulate_task_flow()
|
||||
Reference in New Issue
Block a user