# Directory Structure ``` ├── .gitignore ├── examples │ ├── agents.yml │ └── tasks.yml ├── pyproject.toml ├── README.md ├── src │ └── mcp_crew_ai │ ├── __main__.py │ ├── __pycache__ │ │ └── server.cpython-311.pyc │ ├── cli.py │ ├── server_cmd.py │ └── server.py └── uv.lock ``` # Files -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- ``` # Byte-compiled / optimized / DLL files __pycache__/ *.py[cod] *$py.class # C extensions *.so # Distribution / packaging .Python build/ develop-eggs/ dist/ downloads/ eggs/ .eggs/ lib/ lib64/ parts/ sdist/ var/ wheels/ *.egg-info/ .installed.cfg *.egg MANIFEST # PyInstaller *.manifest *.spec # Installer logs pip-log.txt pip-delete-this-directory.txt # Unit test / coverage reports htmlcov/ .tox/ .nox/ .coverage .coverage.* .cache nosetests.xml coverage.xml *.cover .hypothesis/ .pytest_cache/ # Translations *.mo *.pot # Django stuff: *.log local_settings.py db.sqlite3 db.sqlite3-journal # Flask stuff: instance/ .webassets-cache # Scrapy stuff: .scrapy # Sphinx documentation docs/_build/ # PyBuilder target/ # Jupyter Notebook .ipynb_checkpoints # IPython profile_default/ ipython_config.py # pyenv .python-version # Virtual environments venv/ env/ ENV/ .venv/ .env/ env.bak/ venv.bak/ .virtualenv/ .python-virtualenv/ Pipfile.lock # Spyder project settings .spyderproject .spyproject # Rope project settings .ropeproject # mkdocs documentation /site # mypy .mypy_cache/ .dmypy.json dmypy.json # Pyre type checker .pyre/ # IDE specific files .idea/ .vscode/ *.swp *.swo .DS_Store Thumbs.db *.sublime-project *.sublime-workspace # Poetry poetry.lock # dotenv .env .env.* # pytest pytest.ini ``` -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- ```markdown <div align="center"> <img src="https://github.com/crewAIInc/crewAI/blob/main/docs/crewai_logo.png" alt="CrewAI Logo" /> </div> # MCP Crew AI Server MCP Crew AI Server is a lightweight Python-based server designed to run, manage and create CrewAI workflows. This project leverages the [Model Context Protocol (MCP)](https://modelcontextprotocol.io/introduction) to communicate with Large Language Models (LLMs) and tools such as Claude Desktop or Cursor IDE, allowing you to orchestrate multi-agent workflows with ease. ## Features - **Automatic Configuration:** Automatically loads agent and task configurations from two YAML files (`agents.yml` and `tasks.yml`), so you don't need to write custom code for basic setups. - **Command Line Flexibility:** Pass custom paths to your configuration files via command line arguments (`--agents` and `--tasks`). - **Seamless Workflow Execution:** Easily run pre-configured workflows through the MCP `run_workflow` tool. - **Local Development:** Run the server locally in STDIO mode, making it ideal for development and testing. ## Installation There are several ways to install the MCP Crew AI server: ### Option 1: Install from PyPI (Recommended) ```bash pip install mcp-crew-ai ``` ### Option 2: Install from GitHub ```bash pip install git+https://github.com/adam-paterson/mcp-crew-ai.git ``` ### Option 3: Clone and Install ```bash git clone https://github.com/adam-paterson/mcp-crew-ai.git cd mcp-crew-ai pip install -e . ``` ### Requirements - Python 3.11+ - MCP SDK - CrewAI - PyYAML ## Configuration - **agents.yml:** Define your agents with roles, goals, and backstories. - **tasks.yml:** Define tasks with descriptions, expected outputs, and assign them to agents. **Example `agents.yml`:** ```yaml zookeeper: role: Zookeeper goal: Manage zoo operations backstory: > You are a seasoned zookeeper with a passion for wildlife conservation... ``` **Example `tasks.yml`:** ```yaml write_stories: description: > Write an engaging zoo update capturing the day's highlights. expected_output: 5 engaging stories agent: zookeeper output_file: zoo_report.md ``` ## Usage Once installed, you can run the MCP CrewAI server using either of these methods: ### Standard Python Command ```bash mcp-crew-ai --agents path/to/agents.yml --tasks path/to/tasks.yml ``` ### Using UV Execution (uvx) For a more streamlined experience, you can use the UV execution command: ```bash uvx mcp-crew-ai --agents path/to/agents.yml --tasks path/to/tasks.yml ``` Or run just the server directly: ```bash uvx mcp-crew-ai-server ``` This will start the server using default configuration from environment variables. ### Command Line Options - `--agents`: Path to the agents YAML file (required) - `--tasks`: Path to the tasks YAML file (required) - `--topic`: The main topic for the crew to work on (default: "Artificial Intelligence") - `--process`: Process type to use (choices: "sequential" or "hierarchical", default: "sequential") - `--verbose`: Enable verbose output - `--variables`: JSON string or path to JSON file with additional variables to replace in YAML files - `--version`: Show version information and exit ### Advanced Usage You can also provide additional variables to be used in your YAML templates: ```bash mcp-crew-ai --agents examples/agents.yml --tasks examples/tasks.yml --topic "Machine Learning" --variables '{"year": 2025, "focus": "deep learning"}' ``` These variables will replace placeholders in your YAML files. For example, `{topic}` will be replaced with "Machine Learning" and `{year}` with "2025". ## Contributing Contributions are welcome! Please open issues or submit pull requests with improvements, bug fixes, or new features. ## Licence This project is licensed under the MIT Licence. See the LICENSE file for details. Happy workflow orchestration! ``` -------------------------------------------------------------------------------- /src/mcp_crew_ai/__main__.py: -------------------------------------------------------------------------------- ```python #!/usr/bin/env python3 """ MCP Crew AI - Main module entry point Allows running the module directly with: python -m mcp_crew_ai """ from mcp_crew_ai.cli import main if __name__ == "__main__": main() ``` -------------------------------------------------------------------------------- /src/mcp_crew_ai/server_cmd.py: -------------------------------------------------------------------------------- ```python #!/usr/bin/env python3 """ MCP Crew AI Server - Standalone executable This module provides a direct command-line interface to run the server via uvx """ from mcp_crew_ai.server import main if __name__ == "__main__": main() ``` -------------------------------------------------------------------------------- /examples/tasks.yml: -------------------------------------------------------------------------------- ```yaml research_task: description: > Conduct a thorough research about {topic} Make sure you find any interesting and relevant information given the current year is 2025. expected_output: > A list with 10 bullet points of the most relevant information about {topic} agent: researcher reporting_task: description: > Review the context you got and expand each topic into a full section for a report. Make sure the report is detailed and contains any and all relevant information. expected_output: > A fully fledge reports with the mains topics, each with a full section of information. Formatted as markdown without '```' agent: reporting_analyst output_file: report.md ``` -------------------------------------------------------------------------------- /examples/agents.yml: -------------------------------------------------------------------------------- ```yaml researcher: role: > {topic} Senior Data Researcher goal: > Uncover cutting-edge developments in {topic} backstory: > You're a seasoned researcher with a knack for uncovering the latest developments in {topic}. Known for your ability to find the most relevant information and present it in a clear and concise manner. reporting_analyst: role: > {topic} Reporting Analyst goal: > Create detailed reports based on {topic} data analysis and research findings backstory: > You're a meticulous analyst with a keen eye for detail. You're known for your ability to turn complex data into clear and concise reports, making it easy for others to understand and act on the information you provide. ``` -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- ```toml [project] name = "mcp-crew-ai" version = "0.1.0" description = "MCP Crew AI Server - Run CrewAI agents through Model Context Protocol" readme = "README.md" authors = [ { name = "adam.paterson", email = "[email protected]" } ] requires-python = ">=3.11" dependencies = [ "mcp[cli]>=1.3.0", "crewai>=0.8.0", "pyyaml>=6.0", "importlib-metadata>=6.0.0", ] [build-system] requires = ["hatchling"] build-backend = "hatchling.build" [tool.hatch.build.targets.wheel] packages = ["src/mcp_crew_ai"] [project.scripts] mcp-crew-ai = "mcp_crew_ai.cli:main" [project.entry-points.uv] mcp-crew-ai = "mcp_crew_ai.cli:main" mcp-crew-ai-server = "mcp_crew_ai.server_cmd:main" [project.urls] Homepage = "https://github.com/adam-paterson/mcp-crew-ai" Repository = "https://github.com/adam-paterson/mcp-crew-ai" Issues = "https://github.com/adam-paterson/mcp-crew-ai/issues" ``` -------------------------------------------------------------------------------- /src/mcp_crew_ai/cli.py: -------------------------------------------------------------------------------- ```python import os import argparse import yaml import tempfile import json import subprocess import sys import logging from pathlib import Path from typing import Dict, Any, Optional, List, Callable import importlib.metadata # Configure logging logging.basicConfig( level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', handlers=[ logging.FileHandler("crew_ai_server.log"), logging.StreamHandler() ] ) logger = logging.getLogger("mcp_crew_ai") def main(): """ Main entry point for the MCP Crew AI CLI. Parses command line arguments and starts an MCP server with the specified configuration. """ parser = argparse.ArgumentParser(description='MCP Crew AI - Run CrewAI agents through MCP') parser.add_argument('--agents', type=str, help='Path to agents YAML file') parser.add_argument('--tasks', type=str, help='Path to tasks YAML file') parser.add_argument('--topic', type=str, default='Artificial Intelligence', help='The main topic for the crew to work on') parser.add_argument('--process', type=str, default='sequential', choices=['sequential', 'hierarchical'], help='Process type: sequential or hierarchical') parser.add_argument('--verbose', action='store_true', help='Enable verbose output') parser.add_argument('--variables', type=str, help='JSON string or path to JSON file with variables to replace in YAML files') parser.add_argument('--version', action='store_true', help='Show version and exit') args = parser.parse_args() # Show version and exit if requested if args.version: try: version = importlib.metadata.version("mcp-crew-ai") print(f"MCP Crew AI v{version}") except importlib.metadata.PackageNotFoundError: print("MCP Crew AI (development version)") return # Get version for MCP_CREW_VERSION environment variable try: version = importlib.metadata.version("mcp-crew-ai") except importlib.metadata.PackageNotFoundError: version = "0.1.0" # Process YAML file paths agents_path = args.agents tasks_path = args.tasks if not agents_path or not tasks_path: logger.error("Both --agents and --tasks arguments are required. Use --help for more information.") sys.exit(1) # Validate that the files exist agents_file = Path(agents_path) tasks_file = Path(tasks_path) if not agents_file.exists(): logger.error(f"Agents file not found: {agents_path}") sys.exit(1) if not tasks_file.exists(): logger.error(f"Tasks file not found: {tasks_path}") sys.exit(1) # Process variables if provided variables = {} if args.variables: if os.path.isfile(args.variables): with open(args.variables, 'r') as f: variables = json.load(f) else: try: variables = json.loads(args.variables) except json.JSONDecodeError: logger.warning(f"Could not parse variables as JSON: {args.variables}") # Add topic to variables variables['topic'] = args.topic logger.info(f"Starting MCP Crew AI server with:") logger.info(f"- Agents file: {agents_file}") logger.info(f"- Tasks file: {tasks_file}") logger.info(f"- Topic: {args.topic}") logger.info(f"- Process type: {args.process}") # Set environment variables for the server to use os.environ["MCP_CREW_AGENTS_FILE"] = str(agents_file.absolute()) os.environ["MCP_CREW_TASKS_FILE"] = str(tasks_file.absolute()) os.environ["MCP_CREW_TOPIC"] = args.topic os.environ["MCP_CREW_PROCESS"] = args.process os.environ["MCP_CREW_VERBOSE"] = "1" if args.verbose else "0" os.environ["MCP_CREW_VERSION"] = version if variables: os.environ["MCP_CREW_VARIABLES"] = json.dumps(variables) # Build MCP command to run the server server_module = os.path.join(os.path.dirname(__file__), "server.py") cmd = ["mcp", "dev", server_module] logger.info(f"Executing: {' '.join(cmd)}") try: # Run the MCP server subprocess.run(cmd) except KeyboardInterrupt: logger.info("Server stopped by user") except Exception as e: logger.error(f"Error running MCP server: {e}") sys.exit(1) def load_yaml_with_variables(file_path: Path, variables: Dict[str, Any]) -> Dict[str, Any]: """Load YAML and replace variables in memory""" if not file_path.exists(): logger.error(f"File not found: {file_path}") return {} try: with open(file_path, 'r') as file: content = file.read() # Replace all variables in the content for key, value in variables.items(): placeholder = '{' + key + '}' content = content.replace(placeholder, str(value)) # Parse the YAML content yaml_content = yaml.safe_load(content) or {} return yaml_content except Exception as e: logger.error(f"Error loading YAML file {file_path}: {e}") return {} if __name__ == "__main__": main() ``` -------------------------------------------------------------------------------- /src/mcp_crew_ai/server.py: -------------------------------------------------------------------------------- ```python from mcp.server.fastmcp import FastMCP from crewai import Crew, Agent, Task, Process import yaml import os import sys import io import contextlib import json import argparse import logging from pathlib import Path # Configure logging logging.basicConfig( level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', handlers=[ logging.FileHandler("crew_ai_server.log"), logging.StreamHandler(sys.stderr) ] ) logger = logging.getLogger("mcp_crew_ai_server") # Initialize server at module level server = None @contextlib.contextmanager def capture_output(): """Capture stdout and stderr.""" new_out, new_err = io.StringIO(), io.StringIO() old_out, old_err = sys.stdout, sys.stderr try: sys.stdout, sys.stderr = new_out, new_err yield new_out, new_err finally: sys.stdout, sys.stderr = old_out, old_err def format_output(output): """Format the output to make it more readable.""" # Split by lines and filter out LiteLLM log lines lines = output.split('\n') filtered_lines = [line for line in lines if not line.strip().startswith('[') or 'LiteLLM' not in line] # Join the filtered lines back together return '\n'.join(filtered_lines) def kickoff( agents_file: str = None, tasks_file: str = None, topic: str = None, additional_context: dict = None ): """ Execute a CrewAI workflow using YAML configuration files. Args: agents_file: Optional path to override the default agents YAML file tasks_file: Optional path to override the default tasks YAML file topic: The main topic for the crew to work on additional_context: Additional context variables for template formatting Returns: The results from the crew execution """ logger.info(f"Tool kickoff called with: agents_file={agents_file}, tasks_file={tasks_file}, topic={topic}") # Use default paths if none provided agents_path = agents_file if agents_file else str(agents_yaml_path) tasks_path = tasks_file if tasks_file else str(tasks_yaml_path) # Use provided topic or default from environment variable current_topic = topic if topic else os.environ.get("MCP_CREW_TOPIC", "Artificial Intelligence") logger.info(f"Using agents file: {agents_path}") logger.info(f"Using tasks file: {tasks_path}") logger.info(f"Using topic: {current_topic}") # Check if files exist if not os.path.exists(agents_path): logger.error(f"Agent file not found: {agents_path}") return {"error": f"Agent file not found: {agents_path}"} if not os.path.exists(tasks_path): logger.error(f"Task file not found: {tasks_path}") return {"error": f"Task file not found: {tasks_path}"} # Template variables current_variables = {"topic": current_topic} # Add additional context if provided if additional_context: current_variables.update(additional_context) # Also add variables from command line if they exist if variables: # Don't overwrite explicit variables with command line ones for key, value in variables.items(): if key not in current_variables: current_variables[key] = value logger.info(f"Template variables: {current_variables}") # Load agent configurations try: with open(agents_path, 'r') as f: agents_data = yaml.safe_load(f) logger.info(f"Loaded agents data: {list(agents_data.keys())}") except Exception as e: logger.error(f"Error loading agents file: {str(e)}") return {"error": f"Error loading agents file: {str(e)}"} # Create agents agents_dict = {} for name, config in agents_data.items(): try: # Format template strings in config role = config.get("role", "") goal = config.get("goal", "") backstory = config.get("backstory", "") # Format with variables if they contain placeholders if "{" in role: role = role.format(**current_variables) if "{" in goal: goal = goal.format(**current_variables) if "{" in backstory: backstory = backstory.format(**current_variables) logger.info(f"Creating agent: {name}") agents_dict[name] = Agent( name=name, role=role, goal=goal, backstory=backstory, verbose=verbose, allow_delegation=True ) except Exception as e: logger.error(f"Error creating agent {name}: {str(e)}") return {"error": f"Error creating agent {name}: {str(e)}"} # Load task configurations try: with open(tasks_path, 'r') as f: tasks_data = yaml.safe_load(f) logger.info(f"Loaded tasks data: {list(tasks_data.keys())}") except Exception as e: logger.error(f"Error loading tasks file: {str(e)}") return {"error": f"Error loading tasks file: {str(e)}"} # Create tasks tasks_list = [] for name, config in tasks_data.items(): try: description = config.get("description", "") expected_output = config.get("expected_output", "") agent_name = config.get("agent") # Format with variables if they contain placeholders if "{" in description: description = description.format(**current_variables) if "{" in expected_output: expected_output = expected_output.format(**current_variables) if not agent_name or agent_name not in agents_dict: logger.error(f"Task {name} has invalid agent: {agent_name}") logger.error(f"Available agents: {list(agents_dict.keys())}") return {"error": f"Task {name} has invalid agent: {agent_name}"} logger.info(f"Creating task: {name} for agent: {agent_name}") task = Task( description=description, expected_output=expected_output, agent=agents_dict[agent_name] ) # Optional output file output_file = config.get("output_file") if output_file: task.output_file = output_file tasks_list.append(task) except Exception as e: logger.error(f"Error creating task {name}: {str(e)}") return {"error": f"Error creating task {name}: {str(e)}"} # Create the crew logger.info("Creating crew") logger.info(f"Number of agents: {len(agents_dict)}") logger.info(f"Number of tasks: {len(tasks_list)}") # Check if we have agents and tasks if not agents_dict: logger.error("No agents were created") return {"error": "No agents were created"} if not tasks_list: logger.error("No tasks were created") return {"error": "No tasks were created"} try: crew = Crew( agents=list(agents_dict.values()), tasks=tasks_list, verbose=verbose, process=process_type ) logger.info("Crew created successfully") except Exception as e: logger.error(f"Error creating crew: {str(e)}") return {"error": f"Error creating crew: {str(e)}"} # Execute the crew with captured output try: logger.info("Starting crew kickoff with captured output") with capture_output() as (out, err): result = crew.kickoff() # Get the captured output stdout_content = out.getvalue() stderr_content = err.getvalue() # Format the output to make it more readable formatted_stdout = format_output(stdout_content) formatted_stderr = format_output(stderr_content) logger.info("Crew kickoff completed successfully") # Convert result to string if it's not a simple type if not isinstance(result, (str, int, float, bool, list, dict)) and result is not None: logger.info(f"Converting result of type {type(result)} to string") result = str(result) # Create a structured response with the agent outputs response = { "result": result, "agent_outputs": formatted_stdout, "errors": formatted_stderr if formatted_stderr.strip() else None } # Log a sample of the output for debugging if formatted_stdout: sample = formatted_stdout[:500] + "..." if len(formatted_stdout) > 500 else formatted_stdout logger.info(f"Sample of agent outputs: {sample}") return response except Exception as e: logger.error(f"Error in crew kickoff: {str(e)}") return {"error": f"Error in crew kickoff: {str(e)}"} def initialize(): """Initialize the server with configuration from environment variables.""" global server, agents_yaml_path, tasks_yaml_path, topic, process_type_str, verbose, variables_json, variables, process_type # Log startup logger.info("Starting Crew AI Server") # Create FastMCP server server = FastMCP("Crew AI Server", version=os.environ.get("MCP_CREW_VERSION", "0.1.0")) # Get configuration from environment variables agents_yaml_path = os.environ.get("MCP_CREW_AGENTS_FILE", "") tasks_yaml_path = os.environ.get("MCP_CREW_TASKS_FILE", "") topic = os.environ.get("MCP_CREW_TOPIC", "Artificial Intelligence") process_type_str = os.environ.get("MCP_CREW_PROCESS", "sequential") verbose = os.environ.get("MCP_CREW_VERBOSE", "0") == "1" variables_json = os.environ.get("MCP_CREW_VARIABLES", "") # Define fallback paths if not agents_yaml_path or not tasks_yaml_path: current_dir = Path(os.path.dirname(os.path.abspath(__file__))) project_root = current_dir.parent.parent examples_dir = project_root / "examples" if not agents_yaml_path: agents_yaml_path = str(examples_dir / "agents.yml") if not tasks_yaml_path: tasks_yaml_path = str(examples_dir / "tasks.yml") # Convert paths to Path objects agents_yaml_path = Path(agents_yaml_path) tasks_yaml_path = Path(tasks_yaml_path) logger.info(f"Agents YAML path: {agents_yaml_path} (exists: {agents_yaml_path.exists()})") logger.info(f"Tasks YAML path: {tasks_yaml_path} (exists: {tasks_yaml_path.exists()})") logger.info(f"Topic: {topic}") logger.info(f"Process type: {process_type_str}") logger.info(f"Verbose: {verbose}") # Parse variables variables = {"topic": topic} if variables_json: try: additional_vars = json.loads(variables_json) variables.update(additional_vars) logger.info(f"Loaded additional variables: {list(additional_vars.keys())}") except json.JSONDecodeError: logger.warning(f"Could not parse variables JSON: {variables_json}") logger.info(f"Template variables: {variables}") # Set process type process_type = Process.sequential if process_type_str.lower() == 'hierarchical': process_type = Process.hierarchical logger.info("Using hierarchical process") # Register the kickoff tool server.tool()(kickoff) return server def main(): """Run the MCP server as a standalone application.""" server = initialize() # If run directly, start the FastMCP server if __name__ == "__main__": server.run() return server # Initialize server when module is imported initialize() ```