#
tokens: 32494/50000 1/56 files (page 6/6)
lines: on (toggle) GitHub
raw markdown copy reset
This is page 6 of 6. Use http://codebase.md/arthurcolle/openai-mcp?lines=true&page={x} to view the full context.

# Directory Structure

```
├── .gitignore
├── claude_code
│   ├── __init__.py
│   ├── __pycache__
│   │   ├── __init__.cpython-312.pyc
│   │   └── mcp_server.cpython-312.pyc
│   ├── claude.py
│   ├── commands
│   │   ├── __init__.py
│   │   ├── __pycache__
│   │   │   ├── __init__.cpython-312.pyc
│   │   │   └── serve.cpython-312.pyc
│   │   ├── client.py
│   │   ├── multi_agent_client.py
│   │   └── serve.py
│   ├── config
│   │   └── __init__.py
│   ├── examples
│   │   ├── agents_config.json
│   │   ├── claude_mcp_config.html
│   │   ├── claude_mcp_config.json
│   │   ├── echo_server.py
│   │   └── README.md
│   ├── lib
│   │   ├── __init__.py
│   │   ├── __pycache__
│   │   │   └── __init__.cpython-312.pyc
│   │   ├── context
│   │   │   └── __init__.py
│   │   ├── monitoring
│   │   │   ├── __init__.py
│   │   │   ├── __pycache__
│   │   │   │   ├── __init__.cpython-312.pyc
│   │   │   │   └── server_metrics.cpython-312.pyc
│   │   │   ├── cost_tracker.py
│   │   │   └── server_metrics.py
│   │   ├── providers
│   │   │   ├── __init__.py
│   │   │   ├── base.py
│   │   │   └── openai.py
│   │   ├── rl
│   │   │   ├── __init__.py
│   │   │   ├── grpo.py
│   │   │   ├── mcts.py
│   │   │   └── tool_optimizer.py
│   │   ├── tools
│   │   │   ├── __init__.py
│   │   │   ├── __pycache__
│   │   │   │   ├── __init__.cpython-312.pyc
│   │   │   │   ├── base.cpython-312.pyc
│   │   │   │   ├── file_tools.cpython-312.pyc
│   │   │   │   └── manager.cpython-312.pyc
│   │   │   ├── ai_tools.py
│   │   │   ├── base.py
│   │   │   ├── code_tools.py
│   │   │   ├── file_tools.py
│   │   │   ├── manager.py
│   │   │   └── search_tools.py
│   │   └── ui
│   │       ├── __init__.py
│   │       └── tool_visualizer.py
│   ├── mcp_server.py
│   ├── README_MCP_CLIENT.md
│   ├── README_MULTI_AGENT.md
│   └── util
│       └── __init__.py
├── claude.py
├── cli.py
├── data
│   └── prompt_templates.json
├── deploy_modal_mcp.py
├── deploy.sh
├── examples
│   ├── agents_config.json
│   └── echo_server.py
├── install.sh
├── mcp_modal_adapter.py
├── mcp_server.py
├── modal_mcp_server.py
├── README_modal_mcp.md
├── README.md
├── requirements.txt
├── setup.py
├── static
│   └── style.css
├── templates
│   └── index.html
└── web-client.html
```

# Files

--------------------------------------------------------------------------------
/cli.py:
--------------------------------------------------------------------------------

```python
   1 | #!/usr/bin/env python3
   2 | # TODO: Refactor into modular structure similar to Claude Code (lib/, commands/, tools/ directories)
   3 | # TODO: Add support for multiple LLM providers (Azure OpenAI, Anthropic, etc.)
   4 | # TODO: Implement telemetry and usage tracking (optional, with consent)
   5 | import os
   6 | import sys
   7 | import json
   8 | import typer
   9 | from rich.console import Console
  10 | from rich.markdown import Markdown
  11 | from rich.prompt import Prompt
  12 | from rich.panel import Panel
  13 | from rich.progress import Progress
  14 | from rich.syntax import Syntax
  15 | from rich.live import Live
  16 | from rich.layout import Layout
  17 | from rich.table import Table
  18 | from openai import OpenAI
  19 | from pydantic import BaseModel, Field
  20 | from typing import List, Dict, Any, Optional, Union, Callable
  21 | import asyncio
  22 | import concurrent.futures
  23 | from dotenv import load_dotenv
  24 | import time
  25 | import re
  26 | import traceback
  27 | import requests
  28 | import urllib.parse
  29 | from uuid import uuid4
  30 | import socket
  31 | import threading
  32 | import multiprocessing
  33 | import pickle
  34 | import hashlib
  35 | import logging
  36 | import fastapi
  37 | import uvicorn
  38 | from fastapi import FastAPI, HTTPException, Depends, Request, BackgroundTasks
  39 | from fastapi.responses import JSONResponse, StreamingResponse
  40 | from fastapi.middleware.cors import CORSMiddleware
  41 | 
  42 | # Jina.ai client for search, fact-checking, and web reading
  43 | class JinaClient:
  44 |     """Client for interacting with Jina.ai endpoints"""
  45 |     
  46 |     def __init__(self, token: Optional[str] = None):
  47 |         """Initialize with your Jina token"""
  48 |         self.token = token or os.getenv("JINA_API_KEY", "")
  49 |         
  50 |         self.headers = {
  51 |             "Authorization": f"Bearer {self.token}",
  52 |             "Content-Type": "application/json"
  53 |         }
  54 |     
  55 |     def search(self, query: str) -> dict:
  56 |         """
  57 |         Search using s.jina.ai endpoint
  58 |         Args:
  59 |             query: Search term
  60 |         Returns:
  61 |             API response as dict
  62 |         """
  63 |         encoded_query = urllib.parse.quote(query)
  64 |         url = f"https://s.jina.ai/{encoded_query}"
  65 |         response = requests.get(url, headers=self.headers)
  66 |         return response.json()
  67 |     
  68 |     def fact_check(self, query: str) -> dict:
  69 |         """
  70 |         Get grounding info using g.jina.ai endpoint
  71 |         Args:
  72 |             query: Query to ground
  73 |         Returns:
  74 |             API response as dict
  75 |         """
  76 |         encoded_query = urllib.parse.quote(query)
  77 |         url = f"https://g.jina.ai/{encoded_query}"
  78 |         response = requests.get(url, headers=self.headers)
  79 |         return response.json()
  80 |         
  81 |     def reader(self, url: str) -> dict:
  82 |         """
  83 |         Get ranking using r.jina.ai endpoint
  84 |         Args:
  85 |             url: URL to rank
  86 |         Returns:
  87 |             API response as dict
  88 |         """
  89 |         encoded_url = urllib.parse.quote(url)
  90 |         url = f"https://r.jina.ai/{encoded_url}"
  91 |         response = requests.get(url, headers=self.headers)
  92 |         return response.json()
  93 | 
  94 | # Check if RL tools are available
  95 | HAVE_RL_TOOLS = False
  96 | try:
  97 |     # This is a placeholder for the actual import that would be used
  98 |     from tool_optimizer import ToolSelectionManager
  99 |     # If the import succeeds, set HAVE_RL_TOOLS to True
 100 |     HAVE_RL_TOOLS = True
 101 | except ImportError:
 102 |     # RL tools not available
 103 |     # Define a dummy ToolSelectionManager to avoid NameError
 104 |     class ToolSelectionManager:
 105 |         def __init__(self, **kwargs):
 106 |             self.optimizer = None
 107 |             self.data_dir = kwargs.get('data_dir', '')
 108 |             
 109 |         def record_tool_usage(self, **kwargs):
 110 |             pass
 111 | 
 112 | # Load environment variables
 113 | load_dotenv()
 114 | 
 115 | # TODO: Add update checking similar to Claude Code's auto-update functionality
 116 | # TODO: Add configuration file support to store settings beyond environment variables
 117 | 
 118 | app = typer.Typer(help="OpenAI Code Assistant CLI")
 119 | console = Console()
 120 | 
 121 | # Global Constants
 122 | # TODO: Move these to a config file
 123 | DEFAULT_MODEL = "gpt-4o"
 124 | DEFAULT_TEMPERATURE = 0
 125 | MAX_TOKENS = 4096
 126 | TOKEN_LIMIT_WARNING = 0.8  # Warn when 80% of token limit is reached
 127 | 
 128 | # Models
 129 | # TODO: Implement more sophisticated schema validation similar to Zod in the original
 130 | # TODO: Add permission system for tools that require user approval
 131 | 
 132 | class ToolParameter(BaseModel):
 133 |     name: str
 134 |     description: str
 135 |     type: str
 136 |     required: bool = False
 137 | 
 138 | class Tool(BaseModel):
 139 |     name: str
 140 |     description: str
 141 |     parameters: Dict[str, Any]
 142 |     function: Callable
 143 |     # TODO: Add needs_permission flag for sensitive operations
 144 |     # TODO: Add category for organizing tools (file, search, etc.)
 145 | 
 146 | class Message(BaseModel):
 147 |     role: str
 148 |     content: Optional[str] = None
 149 |     tool_calls: Optional[List[Dict[str, Any]]] = None
 150 |     tool_call_id: Optional[str] = None
 151 |     name: Optional[str] = None
 152 |     # TODO: Add timestamp for message tracking
 153 |     # TODO: Add token count for better context management
 154 | 
 155 | class Conversation:
 156 |     def __init__(self):
 157 |         self.messages = []
 158 |         # TODO: Implement retry logic with exponential backoff for API calls
 159 |         # TODO: Add support for multiple LLM providers
 160 |         self.client = OpenAI(api_key=os.getenv("OPENAI_API_KEY"))
 161 |         self.model = os.getenv("OPENAI_MODEL", DEFAULT_MODEL)
 162 |         self.temperature = float(os.getenv("OPENAI_TEMPERATURE", DEFAULT_TEMPERATURE))
 163 |         self.tools = self._register_tools()
 164 |         self.tool_map = {tool.name: tool.function for tool in self.tools}
 165 |         self.conversation_id = str(uuid4())
 166 |         self.session_start_time = time.time()
 167 |         self.token_usage = {"prompt": 0, "completion": 0, "total": 0}
 168 |         self.verbose = False
 169 |         self.max_tool_iterations = int(os.getenv("MAX_TOOL_ITERATIONS", "10"))
 170 |         
 171 |         # Initialize tool selection optimizer if available
 172 |         self.tool_optimizer = None
 173 |         if HAVE_RL_TOOLS:
 174 |             try:
 175 |                 # Create a simple tool registry adapter for the optimizer
 176 |                 class ToolRegistryAdapter:
 177 |                     def __init__(self, tools):
 178 |                         self.tools = tools
 179 |                 
 180 |                     def get_all_tools(self):
 181 |                         return self.tools
 182 |                 
 183 |                     def get_all_tool_names(self):
 184 |                         return [tool.name for tool in self.tools]
 185 |             
 186 |                 # Initialize the tool selection manager
 187 |                 self.tool_optimizer = ToolSelectionManager(
 188 |                     tool_registry=ToolRegistryAdapter(self.tools),
 189 |                     enable_optimization=os.getenv("ENABLE_TOOL_OPTIMIZATION", "1") == "1",
 190 |                     data_dir=os.path.join(os.path.dirname(os.path.abspath(__file__)), "data/rl")
 191 |                 )
 192 |                 if self.verbose:
 193 |                     print("Tool selection optimization enabled")
 194 |             except Exception as e:
 195 |                 print(f"Warning: Failed to initialize tool optimizer: {e}")
 196 |         # TODO: Implement context window management
 197 |         
 198 |     # Jina.ai client for search, fact-checking, and web reading
 199 |     def _init_jina_client(self):
 200 |         """Initialize the Jina.ai client"""
 201 |         token = os.getenv("JINA_API_KEY", "")
 202 |         return JinaClient(token)
 203 |     
 204 |     def _jina_search(self, query: str) -> str:
 205 |         """Search the web using Jina.ai"""
 206 |         try:
 207 |             client = self._init_jina_client()
 208 |             results = client.search(query)
 209 |             
 210 |             if not results or not isinstance(results, dict):
 211 |                 return f"No search results found for '{query}'"
 212 |             
 213 |             # Format the results
 214 |             formatted_results = "Search Results:\n\n"
 215 |             
 216 |             if "results" in results and isinstance(results["results"], list):
 217 |                 for i, result in enumerate(results["results"], 1):
 218 |                     title = result.get("title", "No title")
 219 |                     url = result.get("url", "No URL")
 220 |                     snippet = result.get("snippet", "No snippet")
 221 |                     
 222 |                     formatted_results += f"{i}. {title}\n"
 223 |                     formatted_results += f"   URL: {url}\n"
 224 |                     formatted_results += f"   {snippet}\n\n"
 225 |             else:
 226 |                 formatted_results += "Unexpected response format. Raw data:\n"
 227 |                 formatted_results += json.dumps(results, indent=2)[:1000]
 228 |                 
 229 |             return formatted_results
 230 |         except Exception as e:
 231 |             return f"Error performing search: {str(e)}"
 232 |     
 233 |     def _jina_fact_check(self, statement: str) -> str:
 234 |         """Fact check a statement using Jina.ai"""
 235 |         try:
 236 |             client = self._init_jina_client()
 237 |             results = client.fact_check(statement)
 238 |             
 239 |             if not results or not isinstance(results, dict):
 240 |                 return f"No fact-checking results for '{statement}'"
 241 |             
 242 |             # Format the results
 243 |             formatted_results = "Fact Check Results:\n\n"
 244 |             formatted_results += f"Statement: {statement}\n\n"
 245 |             
 246 |             if "grounding" in results:
 247 |                 grounding = results["grounding"]
 248 |                 verdict = grounding.get("verdict", "Unknown")
 249 |                 confidence = grounding.get("confidence", 0)
 250 |                 
 251 |                 formatted_results += f"Verdict: {verdict}\n"
 252 |                 formatted_results += f"Confidence: {confidence:.2f}\n\n"
 253 |                 
 254 |                 if "sources" in grounding and isinstance(grounding["sources"], list):
 255 |                     formatted_results += "Sources:\n"
 256 |                     for i, source in enumerate(grounding["sources"], 1):
 257 |                         title = source.get("title", "No title")
 258 |                         url = source.get("url", "No URL")
 259 |                         formatted_results += f"{i}. {title}\n   {url}\n\n"
 260 |             else:
 261 |                 formatted_results += "Unexpected response format. Raw data:\n"
 262 |                 formatted_results += json.dumps(results, indent=2)[:1000]
 263 |                 
 264 |             return formatted_results
 265 |         except Exception as e:
 266 |             return f"Error performing fact check: {str(e)}"
 267 |     
 268 |     def _jina_read_url(self, url: str) -> str:
 269 |         """Read and summarize a webpage using Jina.ai"""
 270 |         try:
 271 |             client = self._init_jina_client()
 272 |             results = client.reader(url)
 273 |             
 274 |             if not results or not isinstance(results, dict):
 275 |                 return f"No reading results for URL '{url}'"
 276 |             
 277 |             # Format the results
 278 |             formatted_results = f"Web Page Summary: {url}\n\n"
 279 |             
 280 |             if "content" in results:
 281 |                 content = results["content"]
 282 |                 title = content.get("title", "No title")
 283 |                 summary = content.get("summary", "No summary available")
 284 |                 
 285 |                 formatted_results += f"Title: {title}\n\n"
 286 |                 formatted_results += f"Summary:\n{summary}\n\n"
 287 |                 
 288 |                 if "keyPoints" in content and isinstance(content["keyPoints"], list):
 289 |                     formatted_results += "Key Points:\n"
 290 |                     for i, point in enumerate(content["keyPoints"], 1):
 291 |                         formatted_results += f"{i}. {point}\n"
 292 |             else:
 293 |                 formatted_results += "Unexpected response format. Raw data:\n"
 294 |                 formatted_results += json.dumps(results, indent=2)[:1000]
 295 |                 
 296 |             return formatted_results
 297 |         except Exception as e:
 298 |             return f"Error reading URL: {str(e)}"
 299 |     
 300 |     def _register_tools(self) -> List[Tool]:
 301 |         # TODO: Modularize tools into separate files
 302 |         # TODO: Implement Tool decorators for easier registration
 303 |         # TODO: Add more tools similar to Claude Code (ReadNotebook, NotebookEditCell, etc.)
 304 |         
 305 |         # Define and register all tools
 306 |         tools = [
 307 |             Tool(
 308 |                 name="Weather",
 309 |                 description="Gets the current weather for a location",
 310 |                 parameters={
 311 |                     "type": "object",
 312 |                     "properties": {
 313 |                         "location": {
 314 |                             "type": "string",
 315 |                             "description": "The city and optional state/country (e.g., 'San Francisco, CA' or 'London, UK')"
 316 |                         }
 317 |                     },
 318 |                     "required": ["location"]
 319 |                 },
 320 |                 function=self._get_weather
 321 |             ),
 322 |             Tool(
 323 |                 name="View",
 324 |                 description="Reads a file from the local filesystem. The file_path parameter must be an absolute path, not a relative path.",
 325 |                 parameters={
 326 |                     "type": "object",
 327 |                     "properties": {
 328 |                         "file_path": {
 329 |                             "type": "string",
 330 |                             "description": "The absolute path to the file to read"
 331 |                         },
 332 |                         "limit": {
 333 |                             "type": "number",
 334 |                             "description": "The number of lines to read. Only provide if the file is too large to read at once."
 335 |                         },
 336 |                         "offset": {
 337 |                             "type": "number",
 338 |                             "description": "The line number to start reading from. Only provide if the file is too large to read at once"
 339 |                         }
 340 |                     },
 341 |                     "required": ["file_path"]
 342 |                 },
 343 |                 function=self._view_file
 344 |             ),
 345 |             Tool(
 346 |                 name="Edit",
 347 |                 description="This is a tool for editing files.",
 348 |                 parameters={
 349 |                     "type": "object",
 350 |                     "properties": {
 351 |                         "file_path": {
 352 |                             "type": "string",
 353 |                             "description": "The absolute path to the file to modify"
 354 |                         },
 355 |                         "old_string": {
 356 |                             "type": "string",
 357 |                             "description": "The text to replace"
 358 |                         },
 359 |                         "new_string": {
 360 |                             "type": "string",
 361 |                             "description": "The text to replace it with"
 362 |                         }
 363 |                     },
 364 |                     "required": ["file_path", "old_string", "new_string"]
 365 |                 },
 366 |                 function=self._edit_file
 367 |             ),
 368 |             Tool(
 369 |                 name="Replace",
 370 |                 description="Write a file to the local filesystem. Overwrites the existing file if there is one.",
 371 |                 parameters={
 372 |                     "type": "object",
 373 |                     "properties": {
 374 |                         "file_path": {
 375 |                             "type": "string",
 376 |                             "description": "The absolute path to the file to write"
 377 |                         },
 378 |                         "content": {
 379 |                             "type": "string",
 380 |                             "description": "The content to write to the file"
 381 |                         }
 382 |                     },
 383 |                     "required": ["file_path", "content"]
 384 |                 },
 385 |                 function=self._replace_file
 386 |             ),
 387 |             Tool(
 388 |                 name="Bash",
 389 |                 description="Executes a given bash command in a persistent shell session.",
 390 |                 parameters={
 391 |                     "type": "object",
 392 |                     "properties": {
 393 |                         "command": {
 394 |                             "type": "string",
 395 |                             "description": "The command to execute"
 396 |                         },
 397 |                         "timeout": {
 398 |                             "type": "number",
 399 |                             "description": "Optional timeout in milliseconds (max 600000)"
 400 |                         }
 401 |                     },
 402 |                     "required": ["command"]
 403 |                 },
 404 |                 function=self._execute_bash
 405 |             ),
 406 |             Tool(
 407 |                 name="GlobTool",
 408 |                 description="Fast file pattern matching tool that works with any codebase size.",
 409 |                 parameters={
 410 |                     "type": "object",
 411 |                     "properties": {
 412 |                         "path": {
 413 |                             "type": "string",
 414 |                             "description": "The directory to search in. Defaults to the current working directory."
 415 |                         },
 416 |                         "pattern": {
 417 |                             "type": "string",
 418 |                             "description": "The glob pattern to match files against"
 419 |                         }
 420 |                     },
 421 |                     "required": ["pattern"]
 422 |                 },
 423 |                 function=self._glob_tool
 424 |             ),
 425 |             Tool(
 426 |                 name="GrepTool",
 427 |                 description="Fast content search tool that works with any codebase size.",
 428 |                 parameters={
 429 |                     "type": "object",
 430 |                     "properties": {
 431 |                         "path": {
 432 |                             "type": "string",
 433 |                             "description": "The directory to search in. Defaults to the current working directory."
 434 |                         },
 435 |                         "pattern": {
 436 |                             "type": "string",
 437 |                             "description": "The regular expression pattern to search for in file contents"
 438 |                         },
 439 |                         "include": {
 440 |                             "type": "string",
 441 |                             "description": "File pattern to include in the search (e.g. \"*.js\", \"*.{ts,tsx}\")"
 442 |                         }
 443 |                     },
 444 |                     "required": ["pattern"]
 445 |                 },
 446 |                 function=self._grep_tool
 447 |             ),
 448 |             Tool(
 449 |                 name="LS",
 450 |                 description="Lists files and directories in a given path.",
 451 |                 parameters={
 452 |                     "type": "object",
 453 |                     "properties": {
 454 |                         "path": {
 455 |                             "type": "string",
 456 |                             "description": "The absolute path to the directory to list"
 457 |                         },
 458 |                         "ignore": {
 459 |                             "type": "array",
 460 |                             "items": {
 461 |                                 "type": "string"
 462 |                             },
 463 |                             "description": "List of glob patterns to ignore"
 464 |                         }
 465 |                     },
 466 |                     "required": ["path"]
 467 |                 },
 468 |                 function=self._list_directory
 469 |             ),
 470 |             Tool(
 471 |                 name="JinaSearch",
 472 |                 description="Search the web for information using Jina.ai",
 473 |                 parameters={
 474 |                     "type": "object",
 475 |                     "properties": {
 476 |                         "query": {
 477 |                             "type": "string",
 478 |                             "description": "The search query"
 479 |                         }
 480 |                     },
 481 |                     "required": ["query"]
 482 |                 },
 483 |                 function=self._jina_search
 484 |             ),
 485 |             Tool(
 486 |                 name="JinaFactCheck",
 487 |                 description="Fact check a statement using Jina.ai",
 488 |                 parameters={
 489 |                     "type": "object",
 490 |                     "properties": {
 491 |                         "statement": {
 492 |                             "type": "string",
 493 |                             "description": "The statement to fact check"
 494 |                         }
 495 |                     },
 496 |                     "required": ["statement"]
 497 |                 },
 498 |                 function=self._jina_fact_check
 499 |             ),
 500 |             Tool(
 501 |                 name="JinaReadURL",
 502 |                 description="Read and summarize a webpage using Jina.ai",
 503 |                 parameters={
 504 |                     "type": "object",
 505 |                     "properties": {
 506 |                         "url": {
 507 |                             "type": "string",
 508 |                             "description": "The URL of the webpage to read"
 509 |                         }
 510 |                     },
 511 |                     "required": ["url"]
 512 |                 },
 513 |                 function=self._jina_read_url
 514 |             )
 515 |         ]
 516 |         return tools
 517 |     
 518 |     # Tool implementations
 519 |     # TODO: Add better error handling and user feedback
 520 |     # TODO: Implement tool usage tracking and metrics
 521 |     
 522 |     def _get_weather(self, location: str) -> str:
 523 |         """Get current weather for a location using OpenWeatherMap API"""
 524 |         try:
 525 |             # Get API key from environment or use a default for testing
 526 |             api_key = os.getenv("OPENWEATHER_API_KEY", "")
 527 |             if not api_key:
 528 |                 return "Error: OpenWeatherMap API key not found. Please set the OPENWEATHER_API_KEY environment variable."
 529 |             
 530 |             # Prepare the API request
 531 |             base_url = "https://api.openweathermap.org/data/2.5/weather"
 532 |             params = {
 533 |                 "q": location,
 534 |                 "appid": api_key,
 535 |                 "units": "metric"  # Use metric units (Celsius)
 536 |             }
 537 |             
 538 |             # Make the API request
 539 |             response = requests.get(base_url, params=params)
 540 |             
 541 |             # Check if the request was successful
 542 |             if response.status_code == 200:
 543 |                 data = response.text
 544 |                 # Try to parse as JSON
 545 |                 try:
 546 |                     data = json.loads(data)
 547 |                 except json.JSONDecodeError:
 548 |                     return f"Error: Unable to parse weather data. Raw response: {data[:200]}..."
 549 |                 
 550 |                 # Extract relevant weather information
 551 |                 weather_desc = data["weather"][0]["description"]
 552 |                 temp = data["main"]["temp"]
 553 |                 feels_like = data["main"]["feels_like"]
 554 |                 humidity = data["main"]["humidity"]
 555 |                 wind_speed = data["wind"]["speed"]
 556 |                 
 557 |                 # Format the response
 558 |                 weather_info = (
 559 |                     f"Current weather in {location}:\n"
 560 |                     f"• Condition: {weather_desc.capitalize()}\n"
 561 |                     f"• Temperature: {temp}°C ({(temp * 9/5) + 32:.1f}°F)\n"
 562 |                     f"• Feels like: {feels_like}°C ({(feels_like * 9/5) + 32:.1f}°F)\n"
 563 |                     f"• Humidity: {humidity}%\n"
 564 |                     f"• Wind speed: {wind_speed} m/s ({wind_speed * 2.237:.1f} mph)"
 565 |                 )
 566 |                 return weather_info
 567 |             else:
 568 |                 # Handle API errors
 569 |                 if response.status_code == 404:
 570 |                     return f"Error: Location '{location}' not found. Please check the spelling or try a different location."
 571 |                 elif response.status_code == 401:
 572 |                     return "Error: Invalid API key. Please check your OpenWeatherMap API key."
 573 |                 else:
 574 |                     return f"Error: Unable to fetch weather data. Status code: {response.status_code}"
 575 |         
 576 |         except requests.exceptions.RequestException as e:
 577 |             return f"Error: Network error when fetching weather data: {str(e)}"
 578 |         except Exception as e:
 579 |             return f"Error: Failed to get weather information: {str(e)}"
 580 |     
 581 |     def _view_file(self, file_path: str, limit: Optional[int] = None, offset: Optional[int] = 0) -> str:
 582 |         # TODO: Add special handling for binary files and images
 583 |         # TODO: Add syntax highlighting for code files
 584 |         try:
 585 |             if not os.path.exists(file_path):
 586 |                 return f"Error: File not found: {file_path}"
 587 |             
 588 |             # TODO: Handle file size limits better
 589 |             
 590 |             with open(file_path, 'r') as f:
 591 |                 if limit is not None and offset is not None:
 592 |                     # Skip to offset
 593 |                     for _ in range(offset):
 594 |                         next(f, None)
 595 |                     
 596 |                     # Read limited lines
 597 |                     lines = []
 598 |                     for _ in range(limit):
 599 |                         line = next(f, None)
 600 |                         if line is None:
 601 |                             break
 602 |                         lines.append(line)
 603 |                     content = ''.join(lines)
 604 |                 else:
 605 |                     content = f.read()
 606 |             
 607 |             # TODO: Add file metadata like size, permissions, etc.
 608 |             return content
 609 |         except Exception as e:
 610 |             return f"Error reading file: {str(e)}"
 611 |     
 612 |     def _edit_file(self, file_path: str, old_string: str, new_string: str) -> str:
 613 |         try:
 614 |             # Create directory if creating new file
 615 |             if not os.path.exists(os.path.dirname(file_path)) and old_string == "":
 616 |                 os.makedirs(os.path.dirname(file_path), exist_ok=True)
 617 |                 
 618 |             if old_string == "" and not os.path.exists(file_path):
 619 |                 # Creating new file
 620 |                 with open(file_path, 'w') as f:
 621 |                     f.write(new_string)
 622 |                 return f"Created new file: {file_path}"
 623 |             
 624 |             # Reading existing file
 625 |             if not os.path.exists(file_path):
 626 |                 return f"Error: File not found: {file_path}"
 627 |             
 628 |             with open(file_path, 'r') as f:
 629 |                 content = f.read()
 630 |             
 631 |             # Replace string
 632 |             if old_string not in content:
 633 |                 return f"Error: Could not find the specified text in {file_path}"
 634 |             
 635 |             # Count occurrences to ensure uniqueness
 636 |             occurrences = content.count(old_string)
 637 |             if occurrences > 1:
 638 |                 return f"Error: Found {occurrences} occurrences of the specified text in {file_path}. Please provide more context to uniquely identify the text to replace."
 639 |             
 640 |             new_content = content.replace(old_string, new_string)
 641 |             
 642 |             # Write back to file
 643 |             with open(file_path, 'w') as f:
 644 |                 f.write(new_content)
 645 |             
 646 |             return f"Successfully edited {file_path}"
 647 |         
 648 |         except Exception as e:
 649 |             return f"Error editing file: {str(e)}"
 650 |     
 651 |     def _replace_file(self, file_path: str, content: str) -> str:
 652 |         try:
 653 |             # Create directory if it doesn't exist
 654 |             directory = os.path.dirname(file_path)
 655 |             if directory and not os.path.exists(directory):
 656 |                 os.makedirs(directory, exist_ok=True)
 657 |             
 658 |             # Write content to file
 659 |             with open(file_path, 'w') as f:
 660 |                 f.write(content)
 661 |             
 662 |             return f"Successfully wrote to {file_path}"
 663 |         
 664 |         except Exception as e:
 665 |             return f"Error writing file: {str(e)}"
 666 |     
 667 |     def _execute_bash(self, command: str, timeout: Optional[int] = None) -> str:
 668 |         try:
 669 |             import subprocess
 670 |             import shlex
 671 |             
 672 |             # Security check for banned commands
 673 |             banned_commands = [
 674 |                 'alias', 'curl', 'curlie', 'wget', 'axel', 'aria2c', 'nc', 
 675 |                 'telnet', 'lynx', 'w3m', 'links', 'httpie', 'xh', 'http-prompt', 
 676 |                 'chrome', 'firefox', 'safari'
 677 |             ]
 678 |             
 679 |             for banned in banned_commands:
 680 |                 if banned in command.split():
 681 |                     return f"Error: The command '{banned}' is not allowed for security reasons."
 682 |             
 683 |             # Execute command
 684 |             if timeout:
 685 |                 timeout_seconds = timeout / 1000  # Convert to seconds
 686 |             else:
 687 |                 timeout_seconds = 1800  # 30 minutes default
 688 |             
 689 |             result = subprocess.run(
 690 |                 command,
 691 |                 shell=True,
 692 |                 capture_output=True,
 693 |                 text=True,
 694 |                 timeout=timeout_seconds
 695 |             )
 696 |             
 697 |             output = result.stdout
 698 |             if result.stderr:
 699 |                 output += f"\nErrors:\n{result.stderr}"
 700 |             
 701 |             # Truncate if too long
 702 |             if len(output) > 30000:
 703 |                 output = output[:30000] + "\n... (output truncated)"
 704 |             
 705 |             return output
 706 |         
 707 |         except subprocess.TimeoutExpired:
 708 |             return f"Error: Command timed out after {timeout_seconds} seconds"
 709 |         except Exception as e:
 710 |             return f"Error executing command: {str(e)}"
 711 |     
 712 |     def _glob_tool(self, pattern: str, path: Optional[str] = None) -> str:
 713 |         try:
 714 |             import glob
 715 |             import os
 716 |             
 717 |             if path is None:
 718 |                 path = os.getcwd()
 719 |             
 720 |             # Build the full pattern path
 721 |             if not os.path.isabs(path):
 722 |                 path = os.path.abspath(path)
 723 |             
 724 |             full_pattern = os.path.join(path, pattern)
 725 |             
 726 |             # Get matching files
 727 |             matches = glob.glob(full_pattern, recursive=True)
 728 |             
 729 |             # Sort by modification time (newest first)
 730 |             matches.sort(key=os.path.getmtime, reverse=True)
 731 |             
 732 |             if not matches:
 733 |                 return f"No files matching pattern '{pattern}' in {path}"
 734 |             
 735 |             return "\n".join(matches)
 736 |         
 737 |         except Exception as e:
 738 |             return f"Error in glob search: {str(e)}"
 739 |     
 740 |     def _grep_tool(self, pattern: str, path: Optional[str] = None, include: Optional[str] = None) -> str:
 741 |         try:
 742 |             import re
 743 |             import os
 744 |             import fnmatch
 745 |             from concurrent.futures import ThreadPoolExecutor
 746 |             
 747 |             if path is None:
 748 |                 path = os.getcwd()
 749 |             
 750 |             if not os.path.isabs(path):
 751 |                 path = os.path.abspath(path)
 752 |             
 753 |             # Compile regex pattern
 754 |             regex = re.compile(pattern)
 755 |             
 756 |             # Get all files
 757 |             all_files = []
 758 |             for root, _, files in os.walk(path):
 759 |                 for file in files:
 760 |                     file_path = os.path.join(root, file)
 761 |                     
 762 |                     # Apply include filter if provided
 763 |                     if include:
 764 |                         if not fnmatch.fnmatch(file, include):
 765 |                             continue
 766 |                     
 767 |                     all_files.append(file_path)
 768 |             
 769 |             # Sort by modification time (newest first)
 770 |             all_files.sort(key=os.path.getmtime, reverse=True)
 771 |             
 772 |             matches = []
 773 |             
 774 |             def search_file(file_path):
 775 |                 try:
 776 |                     with open(file_path, 'r', errors='ignore') as f:
 777 |                         content = f.read()
 778 |                         if regex.search(content):
 779 |                             return file_path
 780 |                 except:
 781 |                     # Skip files that can't be read
 782 |                     pass
 783 |                 return None
 784 |             
 785 |             # Search files in parallel
 786 |             with ThreadPoolExecutor(max_workers=10) as executor:
 787 |                 results = executor.map(search_file, all_files)
 788 |                 
 789 |                 for result in results:
 790 |                     if result:
 791 |                         matches.append(result)
 792 |             
 793 |             if not matches:
 794 |                 return f"No matches found for pattern '{pattern}' in {path}"
 795 |             
 796 |             return "\n".join(matches)
 797 |         
 798 |         except Exception as e:
 799 |             return f"Error in grep search: {str(e)}"
 800 |     
 801 |     def _list_directory(self, path: str, ignore: Optional[List[str]] = None) -> str:
 802 |         try:
 803 |             import os
 804 |             import fnmatch
 805 |             
 806 |             # If path is not absolute, make it absolute from current directory
 807 |             if not os.path.isabs(path):
 808 |                 path = os.path.abspath(os.path.join(os.getcwd(), path))
 809 |             
 810 |             if not os.path.exists(path):
 811 |                 return f"Error: Directory not found: {path}"
 812 |             
 813 |             if not os.path.isdir(path):
 814 |                 return f"Error: Path is not a directory: {path}"
 815 |             
 816 |             # List directory contents
 817 |             items = os.listdir(path)
 818 |             
 819 |             # Apply ignore patterns
 820 |             if ignore:
 821 |                 for pattern in ignore:
 822 |                     items = [item for item in items if not fnmatch.fnmatch(item, pattern)]
 823 |             
 824 |             # Sort items
 825 |             items.sort()
 826 |             
 827 |             # Format output
 828 |             result = []
 829 |             for item in items:
 830 |                 item_path = os.path.join(path, item)
 831 |                 if os.path.isdir(item_path):
 832 |                     result.append(f"{item}/")
 833 |                 else:
 834 |                     result.append(item)
 835 |             
 836 |             if not result:
 837 |                 return f"Directory {path} is empty"
 838 |             
 839 |             return "\n".join(result)
 840 |         
 841 |         except Exception as e:
 842 |             return f"Error listing directory: {str(e)}"
 843 |     
 844 |     def add_message(self, role: str, content: str):
 845 |         """Legacy method to add messages - use direct append now"""
 846 |         self.messages.append({"role": role, "content": content})
 847 |     
 848 |     def process_tool_calls(self, tool_calls, query=None):
 849 |         # TODO: Add tool call validation
 850 |         # TODO: Add permission system for sensitive tools
 851 |         # TODO: Add progress visualization for long-running tools
 852 |         responses = []
 853 |         
 854 |         # Process tool calls in parallel
 855 |         from concurrent.futures import ThreadPoolExecutor
 856 |         
 857 |         def process_single_tool(tool_call):
 858 |             # Handle both object-style and dict-style tool calls
 859 |             if isinstance(tool_call, dict):
 860 |                 function_name = tool_call["function"]["name"]
 861 |                 function_args = json.loads(tool_call["function"]["arguments"])
 862 |                 tool_call_id = tool_call["id"]
 863 |             else:
 864 |                 function_name = tool_call.function.name
 865 |                 function_args = json.loads(tool_call.function.arguments)
 866 |                 tool_call_id = tool_call.id
 867 |             
 868 |             # Get the tool function
 869 |             if function_name in self.tool_map:
 870 |                 # TODO: Add pre-execution validation
 871 |                 # TODO: Add permission check here
 872 |                 
 873 |                 # Track start time for metrics
 874 |                 start_time = time.time()
 875 |                 
 876 |                 try:
 877 |                     function = self.tool_map[function_name]
 878 |                     result = function(**function_args)
 879 |                     success = True
 880 |                 except Exception as e:
 881 |                     result = f"Error executing tool {function_name}: {str(e)}\n{traceback.format_exc()}"
 882 |                     success = False
 883 |                 
 884 |                 # Calculate execution time
 885 |                 execution_time = time.time() - start_time
 886 |                 
 887 |                 # Record tool usage for optimization if optimizer is available
 888 |                 if self.tool_optimizer is not None and query is not None:
 889 |                     try:
 890 |                         # Create current context snapshot
 891 |                         context = {
 892 |                             "messages": self.messages.copy(),
 893 |                             "conversation_id": self.conversation_id,
 894 |                         }
 895 |                         
 896 |                         # Record tool usage
 897 |                         self.tool_optimizer.record_tool_usage(
 898 |                             query=query,
 899 |                             tool_name=function_name,
 900 |                             execution_time=execution_time,
 901 |                             token_usage=self.token_usage.copy(),
 902 |                             success=success,
 903 |                             context=context,
 904 |                             result=result
 905 |                         )
 906 |                     except Exception as e:
 907 |                         if self.verbose:
 908 |                             print(f"Warning: Failed to record tool usage: {e}")
 909 |                 
 910 |                 return {
 911 |                     "tool_call_id": tool_call_id,
 912 |                     "function_name": function_name,
 913 |                     "result": result,
 914 |                     "name": function_name,
 915 |                     "execution_time": execution_time,  # For metrics
 916 |                     "success": success
 917 |                 }
 918 |             return None
 919 |         
 920 |         # Process all tool calls in parallel
 921 |         with ThreadPoolExecutor(max_workers=min(10, len(tool_calls))) as executor:
 922 |             futures = [executor.submit(process_single_tool, tool_call) for tool_call in tool_calls]
 923 |             for future in futures:
 924 |                 result = future.result()
 925 |                 if result:
 926 |                     # Add tool response to messages
 927 |                     self.messages.append({
 928 |                         "tool_call_id": result["tool_call_id"],
 929 |                         "role": "tool",
 930 |                         "name": result["name"],
 931 |                         "content": result["result"]
 932 |                     })
 933 |                     
 934 |                     responses.append({
 935 |                         "tool_call_id": result["tool_call_id"],
 936 |                         "function_name": result["function_name"],
 937 |                         "result": result["result"]
 938 |                     })
 939 |                     
 940 |                     # Log tool execution metrics if verbose
 941 |                     if self.verbose:
 942 |                         print(f"Tool {result['function_name']} executed in {result['execution_time']:.2f}s (success: {result['success']})")
 943 |         
 944 |         # Return tool responses
 945 |         return responses
 946 |     
 947 |     def compact(self):
 948 |         # TODO: Add more sophisticated compaction with token counting
 949 |         # TODO: Implement selective retention of critical information
 950 |         # TODO: Add option to save conversation history before compacting
 951 |         
 952 |         system_prompt = next((m for m in self.messages if m["role"] == "system"), None)
 953 |         user_messages = [m for m in self.messages if m["role"] == "user"]
 954 |         
 955 |         if not user_messages:
 956 |             return "No user messages to compact."
 957 |         
 958 |         last_user_message = user_messages[-1]
 959 |         
 960 |         # Create a compaction prompt
 961 |         # TODO: Improve the compaction prompt with more guidance on what to retain
 962 |         compact_prompt = (
 963 |             "Summarize the conversation so far, focusing on the key points, decisions, and context. "
 964 |             "Keep important details about the code and tasks. Retain critical file paths, commands, "
 965 |             "and code snippets. The summary should be concise but complete enough to continue the "
 966 |             "conversation effectively."
 967 |         )
 968 |         
 969 |         # Add compaction message
 970 |         self.messages.append({"role": "user", "content": compact_prompt})
 971 |         
 972 |         # Get compaction summary
 973 |         # TODO: Add error handling for compaction API call
 974 |         response = self.client.chat.completions.create(
 975 |             model=self.model,
 976 |             messages=self.messages,
 977 |             stream=False
 978 |         )
 979 |         
 980 |         summary = response.choices[0].message.content
 981 |         
 982 |         # Reset conversation with summary
 983 |         if system_prompt:
 984 |             self.messages = [system_prompt]
 985 |         else:
 986 |             self.messages = []
 987 |         
 988 |         self.messages.append({"role": "system", "content": f"This is a compacted conversation. Previous context: {summary}"})
 989 |         self.messages.append({"role": "user", "content": last_user_message["content"]})
 990 |         
 991 |         # TODO: Add metrics for compaction (tokens before/after)
 992 |         
 993 |         return "Conversation compacted successfully."
 994 |     
 995 |     def get_response(self, user_input: str, stream: bool = True):
 996 |         # TODO: Add more special commands similar to Claude Code (e.g., /version, /status)
 997 |         # TODO: Implement binary feedback mechanism for comparing responses
 998 |         
 999 |         # Special commands
1000 |         if user_input.strip() == "/compact":
1001 |             return self.compact()
1002 |         
1003 |         # Add a debug command to help diagnose issues
1004 |         if user_input.strip() == "/debug":
1005 |             debug_info = {
1006 |                 "model": self.model,
1007 |                 "temperature": self.temperature,
1008 |                 "message_count": len(self.messages),
1009 |                 "token_usage": self.token_usage,
1010 |                 "conversation_id": self.conversation_id,
1011 |                 "session_duration": time.time() - self.session_start_time,
1012 |                 "tools_count": len(self.tools),
1013 |                 "python_version": sys.version,
1014 |                 "openai_version": OpenAI.__version__ if hasattr(OpenAI, "__version__") else "Unknown"
1015 |             }
1016 |             return "Debug Information:\n" + json.dumps(debug_info, indent=2)
1017 |         
1018 |         if user_input.strip() == "/help":
1019 |             # Standard commands
1020 |             commands = [
1021 |                 "/help - Show this help message",
1022 |                 "/compact - Compact the conversation to reduce token usage",
1023 |                 "/status - Show token usage and session information",
1024 |                 "/config - Show current configuration settings",
1025 |             ]
1026 |             
1027 |             # RL-specific commands if available
1028 |             if self.tool_optimizer is not None:
1029 |                 commands.extend([
1030 |                     "/rl-status - Show RL tool optimizer status",
1031 |                     "/rl-update - Update the RL model manually",
1032 |                     "/rl-stats - Show tool usage statistics",
1033 |                 ])
1034 |             
1035 |             return "Available commands:\n" + "\n".join(commands)
1036 |         
1037 |         # Token usage and session stats
1038 |         if user_input.strip() == "/status":
1039 |             # Calculate session duration
1040 |             session_duration = time.time() - self.session_start_time
1041 |             hours, remainder = divmod(session_duration, 3600)
1042 |             minutes, seconds = divmod(remainder, 60)
1043 |             
1044 |             # Format message
1045 |             status = (
1046 |                 f"Session ID: {self.conversation_id}\n"
1047 |                 f"Model: {self.model} (Temperature: {self.temperature})\n"
1048 |                 f"Session duration: {int(hours)}h {int(minutes)}m {int(seconds)}s\n\n"
1049 |                 f"Token usage:\n"
1050 |                 f"  Prompt tokens: {self.token_usage['prompt']}\n"
1051 |                 f"  Completion tokens: {self.token_usage['completion']}\n"
1052 |                 f"  Total tokens: {self.token_usage['total']}\n"
1053 |             )
1054 |             return status
1055 |             
1056 |         # Configuration settings
1057 |         if user_input.strip() == "/config":
1058 |             config_info = (
1059 |                 f"Current Configuration:\n"
1060 |                 f"  Model: {self.model}\n"
1061 |                 f"  Temperature: {self.temperature}\n"
1062 |                 f"  Max tool iterations: {self.max_tool_iterations}\n"
1063 |                 f"  Verbose mode: {self.verbose}\n"
1064 |                 f"  RL optimization: {self.tool_optimizer is not None}\n"
1065 |             )
1066 |             
1067 |             # Provide instructions for changing settings
1068 |             config_info += "\nTo change settings, use:\n"
1069 |             config_info += "  /config set <setting> <value>\n"
1070 |             config_info += "Example: /config set max_tool_iterations 15"
1071 |             
1072 |             return config_info
1073 |             
1074 |         # Handle configuration changes
1075 |         if user_input.strip().startswith("/config set "):
1076 |             parts = user_input.strip().split(" ", 3)
1077 |             if len(parts) != 4:
1078 |                 return "Invalid format. Use: /config set <setting> <value>"
1079 |                 
1080 |             setting = parts[2]
1081 |             value = parts[3]
1082 |             
1083 |             if setting == "max_tool_iterations":
1084 |                 try:
1085 |                     self.max_tool_iterations = int(value)
1086 |                     return f"Max tool iterations set to {self.max_tool_iterations}"
1087 |                 except ValueError:
1088 |                     return "Invalid value. Please provide a number."
1089 |             elif setting == "temperature":
1090 |                 try:
1091 |                     self.temperature = float(value)
1092 |                     return f"Temperature set to {self.temperature}"
1093 |                 except ValueError:
1094 |                     return "Invalid value. Please provide a number."
1095 |             elif setting == "verbose":
1096 |                 if value.lower() in ("true", "yes", "1", "on"):
1097 |                     self.verbose = True
1098 |                     return "Verbose mode enabled"
1099 |                 elif value.lower() in ("false", "no", "0", "off"):
1100 |                     self.verbose = False
1101 |                     return "Verbose mode disabled"
1102 |                 else:
1103 |                     return "Invalid value. Use 'true' or 'false'."
1104 |             elif setting == "model":
1105 |                 self.model = value
1106 |                 return f"Model set to {self.model}"
1107 |             else:
1108 |                 return f"Unknown setting: {setting}"
1109 |         
1110 |         # RL-specific commands
1111 |         if self.tool_optimizer is not None:
1112 |             # RL status command
1113 |             if user_input.strip() == "/rl-status":
1114 |                 return (
1115 |                     f"RL tool optimization is active\n"
1116 |                     f"Optimizer type: {type(self.tool_optimizer).__name__}\n"
1117 |                     f"Number of tools: {len(self.tools)}\n"
1118 |                     f"Data directory: {self.tool_optimizer.optimizer.data_dir if hasattr(self.tool_optimizer, 'optimizer') else 'N/A'}\n"
1119 |                 )
1120 |             
1121 |             # RL update command
1122 |             if user_input.strip() == "/rl-update":
1123 |                 try:
1124 |                     result = self.tool_optimizer.optimizer.update_model()
1125 |                     status = f"RL model update status: {result['status']}\n"
1126 |                     if 'metrics' in result:
1127 |                         status += "Metrics:\n" + "\n".join([f"  {k}: {v}" for k, v in result['metrics'].items()])
1128 |                     return status
1129 |                 except Exception as e:
1130 |                     return f"Error updating RL model: {str(e)}"
1131 |             
1132 |             # RL stats command
1133 |             if user_input.strip() == "/rl-stats":
1134 |                 try:
1135 |                     if hasattr(self.tool_optimizer, 'optimizer') and hasattr(self.tool_optimizer.optimizer, 'tracker'):
1136 |                         stats = self.tool_optimizer.optimizer.tracker.get_tool_stats()
1137 |                         if not stats:
1138 |                             return "No tool usage data available yet."
1139 |                         
1140 |                         result = "Tool Usage Statistics:\n\n"
1141 |                         for tool_name, tool_stats in stats.items():
1142 |                             result += f"{tool_name}:\n"
1143 |                             result += f"  Count: {tool_stats['count']}\n"
1144 |                             result += f"  Success rate: {tool_stats['success_rate']:.2f}\n"
1145 |                             result += f"  Avg time: {tool_stats['avg_time']:.2f}s\n"
1146 |                             result += f"  Avg tokens: {tool_stats['avg_total_tokens']:.1f}\n"
1147 |                             result += "\n"
1148 |                         return result
1149 |                     return "Tool usage statistics not available."
1150 |                 except Exception as e:
1151 |                     return f"Error getting tool statistics: {str(e)}"
1152 |         
1153 |         # TODO: Add /version command to show version information
1154 |         
1155 |         # Add user message
1156 |         self.messages.append({"role": "user", "content": user_input})
1157 |         
1158 |         # Initialize empty response
1159 |         response_text = ""
1160 |         
1161 |         # Create tools list for API
1162 |         # TODO: Add dynamic tool availability based on context
1163 |         api_tools = []
1164 |         for tool in self.tools:
1165 |             api_tools.append({
1166 |                 "type": "function",
1167 |                 "function": {
1168 |                     "name": tool.name,
1169 |                     "description": tool.description,
1170 |                     "parameters": tool.parameters
1171 |                 }
1172 |             })
1173 |         
1174 |         if stream:
1175 |             # TODO: Add retry mechanism for API failures
1176 |             # TODO: Add token tracking for response
1177 |             # TODO: Implement cancellation support
1178 |             
1179 |             # Stream response
1180 |             try:
1181 |                 # Add retry logic for API calls
1182 |                 max_retries = 3
1183 |                 retry_count = 0
1184 |                 while retry_count < max_retries:
1185 |                     try:
1186 |                         stream = self.client.chat.completions.create(
1187 |                             model=self.model,
1188 |                             messages=self.messages,
1189 |                             tools=api_tools,
1190 |                             temperature=self.temperature,
1191 |                             stream=True
1192 |                         )
1193 |                         break  # Success, exit retry loop
1194 |                     except Exception as e:
1195 |                         retry_count += 1
1196 |                         if retry_count >= max_retries:
1197 |                             raise  # Re-raise if we've exhausted retries
1198 |                         
1199 |                         # Exponential backoff
1200 |                         wait_time = 2 ** retry_count
1201 |                         if self.verbose:
1202 |                             console.print(f"[yellow]API call failed, retrying in {wait_time}s... ({retry_count}/{max_retries})[/yellow]")
1203 |                         time.sleep(wait_time)
1204 |                 
1205 |                 current_tool_calls = []
1206 |                 tool_call_chunks = {}
1207 |                 
1208 |                 # Process streaming response outside of the status context
1209 |                 with Live("", refresh_per_second=10) as live:
1210 |                     for chunk in stream:
1211 |                         # If there's content, print it
1212 |                         if chunk.choices[0].delta.content:
1213 |                             content_piece = chunk.choices[0].delta.content
1214 |                             response_text += content_piece
1215 |                             # Update the live display with the accumulated response
1216 |                             live.update(response_text)
1217 |                         
1218 |                         # Process tool calls
1219 |                         delta = chunk.choices[0].delta
1220 |                         if delta.tool_calls:
1221 |                             for tool_call_delta in delta.tool_calls:
1222 |                                 # Initialize tool call in chunks dictionary if new
1223 |                                 if tool_call_delta.index not in tool_call_chunks:
1224 |                                     tool_call_chunks[tool_call_delta.index] = {
1225 |                                         "id": "",
1226 |                                         "function": {"name": "", "arguments": ""}
1227 |                                     }
1228 |                                 
1229 |                                 # Update tool call data
1230 |                                 if tool_call_delta.id:
1231 |                                     tool_call_chunks[tool_call_delta.index]["id"] = tool_call_delta.id
1232 |                                 
1233 |                                 if tool_call_delta.function:
1234 |                                     if tool_call_delta.function.name:
1235 |                                         tool_call_chunks[tool_call_delta.index]["function"]["name"] = tool_call_delta.function.name
1236 |                                     
1237 |                                     if tool_call_delta.function.arguments:
1238 |                                         tool_call_chunks[tool_call_delta.index]["function"]["arguments"] += tool_call_delta.function.arguments
1239 |                 
1240 |                 # No need to print the response again as it was already streamed in the Live context
1241 |                 
1242 |             except Exception as e:
1243 |                 # TODO: Add better error handling and user feedback
1244 |                 console.print(f"[bold red]Error during API call:[/bold red] {str(e)}")
1245 |                 return f"Error during API call: {str(e)}"
1246 |             
1247 |             # Convert tool call chunks to actual tool calls
1248 |             for index, tool_call_data in tool_call_chunks.items():
1249 |                 current_tool_calls.append({
1250 |                     "id": tool_call_data["id"],
1251 |                     "function": {
1252 |                         "name": tool_call_data["function"]["name"],
1253 |                         "arguments": tool_call_data["function"]["arguments"]
1254 |                     }
1255 |                 })
1256 |             
1257 |             # Process tool calls if any
1258 |             if current_tool_calls:
1259 |                 try:
1260 |                     # Add assistant message with tool_calls to messages first
1261 |                     # Ensure each tool call has a "type" field set to "function"
1262 |                     processed_tool_calls = []
1263 |                     for tool_call in current_tool_calls:
1264 |                         processed_tool_call = tool_call.copy()
1265 |                         processed_tool_call["type"] = "function"
1266 |                         processed_tool_calls.append(processed_tool_call)
1267 |                         
1268 |                     # Make sure we add the assistant message with tool calls before processing them
1269 |                     self.messages.append({
1270 |                         "role": "assistant", 
1271 |                         "content": response_text,
1272 |                         "tool_calls": processed_tool_calls
1273 |                     })
1274 |                         
1275 |                     # Now process the tool calls
1276 |                     with console.status("[bold green]Running tools..."):
1277 |                         tool_responses = self.process_tool_calls(current_tool_calls, query=user_input)
1278 |                 except Exception as e:
1279 |                     console.print(f"[bold red]Error:[/bold red] {str(e)}")
1280 |                     console.print(traceback.format_exc())
1281 |                     return f"Error processing tool calls: {str(e)}"
1282 |                     
1283 |                 # Continue the conversation with tool responses
1284 |                 # Implement looping function calls to allow for recursive tool usage
1285 |                 max_loop_iterations = self.max_tool_iterations  # Use configurable setting
1286 |                 current_iteration = 0
1287 |                 
1288 |                 while current_iteration < max_loop_iterations:
1289 |                     # Add retry logic for follow-up API calls
1290 |                     max_retries = 3
1291 |                     retry_count = 0
1292 |                     follow_up = None
1293 |                     
1294 |                     while retry_count < max_retries:
1295 |                         try:
1296 |                             follow_up = self.client.chat.completions.create(
1297 |                                 model=self.model,
1298 |                                 messages=self.messages,
1299 |                                 tools=api_tools,  # Pass tools to enable recursive function calling
1300 |                                 stream=False
1301 |                             )
1302 |                             break  # Success, exit retry loop
1303 |                         except Exception as e:
1304 |                             retry_count += 1
1305 |                             if retry_count >= max_retries:
1306 |                                 raise  # Re-raise if we've exhausted retries
1307 |                                 
1308 |                             # Exponential backoff
1309 |                             wait_time = 2 ** retry_count
1310 |                             if self.verbose:
1311 |                                 console.print(f"[yellow]Follow-up API call failed, retrying in {wait_time}s... ({retry_count}/{max_retries})[/yellow]")
1312 |                             time.sleep(wait_time)
1313 |                     
1314 |                     # Check if the follow-up response contains more tool calls
1315 |                     assistant_message = follow_up.choices[0].message
1316 |                     follow_up_text = assistant_message.content or ""
1317 |                     
1318 |                     # If there are no more tool calls, we're done with the loop
1319 |                     if not hasattr(assistant_message, 'tool_calls') or not assistant_message.tool_calls:
1320 |                         if follow_up_text:
1321 |                             console.print(Markdown(follow_up_text))
1322 |                             response_text += "\n" + follow_up_text
1323 |                         
1324 |                         # Add the final assistant message to the conversation
1325 |                         self.messages.append({"role": "assistant", "content": follow_up_text})
1326 |                         break
1327 |                     
1328 |                     # Process the new tool calls
1329 |                     current_tool_calls = []
1330 |                     for tool_call in assistant_message.tool_calls:
1331 |                         # Handle both object-style and dict-style tool calls
1332 |                         if isinstance(tool_call, dict):
1333 |                             processed_tool_call = tool_call.copy()
1334 |                         else:
1335 |                             # Convert object to dict
1336 |                             processed_tool_call = {
1337 |                                 "id": tool_call.id,
1338 |                                 "function": {
1339 |                                     "name": tool_call.function.name,
1340 |                                     "arguments": tool_call.function.arguments
1341 |                                 }
1342 |                             }
1343 |                         
1344 |                         # Ensure type field is present
1345 |                         processed_tool_call["type"] = "function"
1346 |                         current_tool_calls.append(processed_tool_call)
1347 |                     
1348 |                     # Add the assistant message with tool calls
1349 |                     self.messages.append({
1350 |                         "role": "assistant",
1351 |                         "content": follow_up_text,
1352 |                         "tool_calls": current_tool_calls
1353 |                     })
1354 |                     
1355 |                     # Process the new tool calls
1356 |                     with console.status(f"[bold green]Running tools (iteration {current_iteration + 1})...[/bold green]"):
1357 |                         tool_responses = self.process_tool_calls(assistant_message.tool_calls, query=user_input)
1358 |                     
1359 |                     # Increment the iteration counter
1360 |                     current_iteration += 1
1361 |                 
1362 |                 # If we've reached the maximum number of iterations, add a warning
1363 |                 if current_iteration >= max_loop_iterations:
1364 |                     warning_message = f"[yellow]Warning: Reached maximum number of tool call iterations ({max_loop_iterations}). Some operations may be incomplete.[/yellow]"
1365 |                     console.print(warning_message)
1366 |                     response_text += f"\n\n{warning_message}"
1367 |             
1368 |             # Add assistant response to messages if there were no tool calls
1369 |             # (we already added it above if there were tool calls)
1370 |             if not current_tool_calls:
1371 |                 self.messages.append({"role": "assistant", "content": response_text})
1372 |             
1373 |             return response_text
1374 |         else:
1375 |             # Non-streaming response
1376 |             response = self.client.chat.completions.create(
1377 |                 model=self.model,
1378 |                 messages=self.messages,
1379 |                 tools=api_tools,
1380 |                 temperature=self.temperature,
1381 |                 stream=False
1382 |             )
1383 |             
1384 |             # Track token usage
1385 |             if hasattr(response, 'usage'):
1386 |                 self.token_usage["prompt"] += response.usage.prompt_tokens
1387 |                 self.token_usage["completion"] += response.usage.completion_tokens
1388 |                 self.token_usage["total"] += response.usage.total_tokens
1389 |             
1390 |             assistant_message = response.choices[0].message
1391 |             response_text = assistant_message.content or ""
1392 |             
1393 |             # Process tool calls if any
1394 |             if assistant_message.tool_calls:
1395 |                 # Add assistant message with tool_calls to messages
1396 |                 # Convert tool_calls to a list of dictionaries with "type" field
1397 |                 processed_tool_calls = []
1398 |                 for tool_call in assistant_message.tool_calls:
1399 |                     # Handle both object-style and dict-style tool calls
1400 |                     if isinstance(tool_call, dict):
1401 |                         processed_tool_call = tool_call.copy()
1402 |                     else:
1403 |                         # Convert object to dict
1404 |                         processed_tool_call = {
1405 |                             "id": tool_call.id,
1406 |                             "function": {
1407 |                                 "name": tool_call.function.name,
1408 |                                 "arguments": tool_call.function.arguments
1409 |                             }
1410 |                         }
1411 |                     
1412 |                     # Ensure type field is present
1413 |                     processed_tool_call["type"] = "function"
1414 |                     processed_tool_calls.append(processed_tool_call)
1415 |                 
1416 |                 self.messages.append({
1417 |                     "role": "assistant",
1418 |                     "content": response_text,
1419 |                     "tool_calls": processed_tool_calls
1420 |                 })
1421 |                 
1422 |                 with console.status("[bold green]Running tools..."):
1423 |                     tool_responses = self.process_tool_calls(assistant_message.tool_calls, query=user_input)
1424 |                 
1425 |                 # Continue the conversation with tool responses
1426 |                 # Implement looping function calls to allow for recursive tool usage
1427 |                 max_loop_iterations = self.max_tool_iterations  # Use configurable setting
1428 |                 current_iteration = 0
1429 |                 
1430 |                 while current_iteration < max_loop_iterations:
1431 |                     # Add retry logic for follow-up API calls
1432 |                     max_retries = 3
1433 |                     retry_count = 0
1434 |                     follow_up = None
1435 |                     
1436 |                     while retry_count < max_retries:
1437 |                         try:
1438 |                             follow_up = self.client.chat.completions.create(
1439 |                                 model=self.model,
1440 |                                 messages=self.messages,
1441 |                                 tools=api_tools,  # Pass tools to enable recursive function calling
1442 |                                 stream=False
1443 |                             )
1444 |                             break  # Success, exit retry loop
1445 |                         except Exception as e:
1446 |                             retry_count += 1
1447 |                             if retry_count >= max_retries:
1448 |                                 raise  # Re-raise if we've exhausted retries
1449 |                                 
1450 |                             # Exponential backoff
1451 |                             wait_time = 2 ** retry_count
1452 |                             if self.verbose:
1453 |                                 console.print(f"[yellow]Follow-up API call failed, retrying in {wait_time}s... ({retry_count}/{max_retries})[/yellow]")
1454 |                             time.sleep(wait_time)
1455 |                     
1456 |                     # Check if the follow-up response contains more tool calls
1457 |                     assistant_message = follow_up.choices[0].message
1458 |                     follow_up_text = assistant_message.content or ""
1459 |                     
1460 |                     # If there are no more tool calls, we're done with the loop
1461 |                     if not hasattr(assistant_message, 'tool_calls') or not assistant_message.tool_calls:
1462 |                         if follow_up_text:
1463 |                             console.print(Markdown(follow_up_text))
1464 |                             response_text += "\n" + follow_up_text
1465 |                         
1466 |                         # Add the final assistant message to the conversation
1467 |                         self.messages.append({"role": "assistant", "content": follow_up_text})
1468 |                         break
1469 |                     
1470 |                     # Process the new tool calls
1471 |                     current_tool_calls = []
1472 |                     for tool_call in assistant_message.tool_calls:
1473 |                         # Handle both object-style and dict-style tool calls
1474 |                         if isinstance(tool_call, dict):
1475 |                             processed_tool_call = tool_call.copy()
1476 |                         else:
1477 |                             # Convert object to dict
1478 |                             processed_tool_call = {
1479 |                                 "id": tool_call.id,
1480 |                                 "function": {
1481 |                                     "name": tool_call.function.name,
1482 |                                     "arguments": tool_call.function.arguments
1483 |                                 }
1484 |                             }
1485 |                         
1486 |                         # Ensure type field is present
1487 |                         processed_tool_call["type"] = "function"
1488 |                         current_tool_calls.append(processed_tool_call)
1489 |                     
1490 |                     # Add the assistant message with tool calls
1491 |                     self.messages.append({
1492 |                         "role": "assistant",
1493 |                         "content": follow_up_text,
1494 |                         "tool_calls": current_tool_calls
1495 |                     })
1496 |                     
1497 |                     # Process the new tool calls
1498 |                     with console.status(f"[bold green]Running tools (iteration {current_iteration + 1})...[/bold green]"):
1499 |                         tool_responses = self.process_tool_calls(assistant_message.tool_calls, query=user_input)
1500 |                     
1501 |                     # Increment the iteration counter
1502 |                     current_iteration += 1
1503 |                 
1504 |                 # If we've reached the maximum number of iterations, add a warning
1505 |                 if current_iteration >= max_loop_iterations:
1506 |                     warning_message = f"[yellow]Warning: Reached maximum number of tool call iterations ({max_loop_iterations}). Some operations may be incomplete.[/yellow]"
1507 |                     console.print(warning_message)
1508 |                     response_text += f"\n\n{warning_message}"
1509 |             else:
1510 |                 console.print(Markdown(response_text))
1511 |             
1512 |             # Add assistant response to messages if not already added
1513 |             # (we already added it above if there were tool calls)
1514 |             if not assistant_message.tool_calls:
1515 |                 self.messages.append({"role": "assistant", "content": response_text})
1516 |             
1517 |             return response_text
1518 | 
1519 | # TODO: Create a more flexible system prompt mechanism with customizable templates
1520 | def get_system_prompt():
1521 |     return """You are OpenAI Code Assistant, a CLI tool that helps users with software engineering tasks and general information.
1522 | Use the available tools to assist the user with their requests.
1523 | 
1524 | # Tone and style
1525 | You should be concise, direct, and to the point. When you run a non-trivial bash command, 
1526 | you should explain what the command does and why you are running it.
1527 | Output text to communicate with the user; all text you output outside of tool use is displayed to the user.
1528 | Remember that your output will be displayed on a command line interface.
1529 | 
1530 | # Tool usage policy
1531 | - When doing file search, remember to search effectively with the available tools.
1532 | - Always use the appropriate tool for the task.
1533 | - Use parallel tool calls when appropriate to improve performance.
1534 | - NEVER commit changes unless the user explicitly asks you to.
1535 | - For weather queries, use the Weather tool to provide real-time information.
1536 | 
1537 | # Tasks
1538 | The user will primarily request you perform software engineering tasks:
1539 | 1. Solving bugs
1540 | 2. Adding new functionality 
1541 | 3. Refactoring code
1542 | 4. Explaining code
1543 | 5. Writing tests
1544 | 
1545 | For these tasks:
1546 | 1. Use search tools to understand the codebase
1547 | 2. Implement solutions using the available tools
1548 | 3. Verify solutions with tests if possible
1549 | 4. Run lint and typecheck commands when appropriate
1550 | 
1551 | The user may also ask for general information:
1552 | 1. Weather conditions
1553 | 2. Simple calculations
1554 | 3. General knowledge questions
1555 | 
1556 | # Code style
1557 | - Follow the existing code style of the project
1558 | - Maintain consistent naming conventions
1559 | - Use appropriate libraries that are already in the project
1560 | - Add comments when code is complex or non-obvious
1561 | 
1562 | IMPORTANT: You should minimize output tokens as much as possible while maintaining helpfulness, 
1563 | quality, and accuracy. Answer concisely with short lines of text unless the user asks for detail.
1564 | """
1565 | 
1566 | # TODO: Add version information and CLI arguments
1567 | # TODO: Add logging configuration
1568 | # TODO: Create a proper CLI command structure with subcommands
1569 | 
1570 | # Hosting and replication capabilities
1571 | class HostingManager:
1572 |     """Manages hosting and replication of the assistant"""
1573 |     
1574 |     def __init__(self, host="127.0.0.1", port=8000):
1575 |         self.host = host
1576 |         self.port = port
1577 |         self.app = FastAPI(title="OpenAI Code Assistant API")
1578 |         self.conversation_pool = {}
1579 |         self.setup_api()
1580 |         
1581 |     def setup_api(self):
1582 |         """Configure the FastAPI application"""
1583 |         # Add CORS middleware
1584 |         self.app.add_middleware(
1585 |             CORSMiddleware,
1586 |             allow_origins=["*"],  # In production, restrict this to specific domains
1587 |             allow_credentials=True,
1588 |             allow_methods=["*"],
1589 |             allow_headers=["*"],
1590 |         )
1591 |         
1592 |         # Define API routes
1593 |         @self.app.get("/")
1594 |         async def root():
1595 |             return {"message": "OpenAI Code Assistant API", "status": "running"}
1596 |         
1597 |         @self.app.post("/conversation")
1598 |         async def create_conversation(
1599 |             request: Request,
1600 |             background_tasks: BackgroundTasks,
1601 |             model: str = DEFAULT_MODEL,
1602 |             temperature: float = DEFAULT_TEMPERATURE
1603 |         ):
1604 |             """Create a new conversation instance"""
1605 |             conversation_id = str(uuid4())
1606 |             
1607 |             # Initialize conversation in background
1608 |             background_tasks.add_task(self._init_conversation, conversation_id, model, temperature)
1609 |             
1610 |             return {
1611 |                 "conversation_id": conversation_id,
1612 |                 "status": "initializing",
1613 |                 "model": model
1614 |             }
1615 |         
1616 |         @self.app.post("/conversation/{conversation_id}/message")
1617 |         async def send_message(
1618 |             conversation_id: str,
1619 |             request: Request
1620 |         ):
1621 |             """Send a message to a conversation"""
1622 |             if conversation_id not in self.conversation_pool:
1623 |                 raise HTTPException(status_code=404, detail="Conversation not found")
1624 |                 
1625 |             data = await request.json()
1626 |             user_input = data.get("message", "")
1627 |             
1628 |             # Get conversation instance
1629 |             conversation = self.conversation_pool[conversation_id]
1630 |             
1631 |             # Process message
1632 |             try:
1633 |                 response = conversation.get_response(user_input, stream=False)
1634 |                 return {
1635 |                     "conversation_id": conversation_id,
1636 |                     "response": response
1637 |                 }
1638 |             except Exception as e:
1639 |                 raise HTTPException(status_code=500, detail=f"Error processing message: {str(e)}")
1640 |         
1641 |         @self.app.post("/conversation/{conversation_id}/message/stream")
1642 |         async def stream_message(
1643 |             conversation_id: str,
1644 |             request: Request
1645 |         ):
1646 |             """Stream a message response from a conversation"""
1647 |             if conversation_id not in self.conversation_pool:
1648 |                 raise HTTPException(status_code=404, detail="Conversation not found")
1649 |                 
1650 |             data = await request.json()
1651 |             user_input = data.get("message", "")
1652 |             
1653 |             # Get conversation instance
1654 |             conversation = self.conversation_pool[conversation_id]
1655 |             
1656 |             # Create async generator for streaming
1657 |             async def response_generator():
1658 |                 # Add user message
1659 |                 conversation.messages.append({"role": "user", "content": user_input})
1660 |                 
1661 |                 # Create tools list for API
1662 |                 api_tools = []
1663 |                 for tool in conversation.tools:
1664 |                     api_tools.append({
1665 |                         "type": "function",
1666 |                         "function": {
1667 |                             "name": tool.name,
1668 |                             "description": tool.description,
1669 |                             "parameters": tool.parameters
1670 |                         }
1671 |                     })
1672 |                 
1673 |                 # Stream response
1674 |                 try:
1675 |                     stream = conversation.client.chat.completions.create(
1676 |                         model=conversation.model,
1677 |                         messages=conversation.messages,
1678 |                         tools=api_tools,
1679 |                         temperature=conversation.temperature,
1680 |                         stream=True
1681 |                     )
1682 |                     
1683 |                     current_tool_calls = []
1684 |                     tool_call_chunks = {}
1685 |                     response_text = ""
1686 |                     
1687 |                     for chunk in stream:
1688 |                         # If there's content, yield it
1689 |                         if chunk.choices[0].delta.content:
1690 |                             content_piece = chunk.choices[0].delta.content
1691 |                             response_text += content_piece
1692 |                             yield json.dumps({"type": "content", "content": content_piece}) + "\n"
1693 |                         
1694 |                         # Process tool calls
1695 |                         delta = chunk.choices[0].delta
1696 |                         if delta.tool_calls:
1697 |                             for tool_call_delta in delta.tool_calls:
1698 |                                 # Initialize tool call in chunks dictionary if new
1699 |                                 if tool_call_delta.index not in tool_call_chunks:
1700 |                                     tool_call_chunks[tool_call_delta.index] = {
1701 |                                         "id": "",
1702 |                                         "function": {"name": "", "arguments": ""}
1703 |                                     }
1704 |                                 
1705 |                                 # Update tool call data
1706 |                                 if tool_call_delta.id:
1707 |                                     tool_call_chunks[tool_call_delta.index]["id"] = tool_call_delta.id
1708 |                                 
1709 |                                 if tool_call_delta.function:
1710 |                                     if tool_call_delta.function.name:
1711 |                                         tool_call_chunks[tool_call_delta.index]["function"]["name"] = tool_call_delta.function.name
1712 |                                     
1713 |                                     if tool_call_delta.function.arguments:
1714 |                                         tool_call_chunks[tool_call_delta.index]["function"]["arguments"] += tool_call_delta.function.arguments
1715 |                     
1716 |                     # Convert tool call chunks to actual tool calls
1717 |                     for index, tool_call_data in tool_call_chunks.items():
1718 |                         current_tool_calls.append({
1719 |                             "id": tool_call_data["id"],
1720 |                             "function": {
1721 |                                 "name": tool_call_data["function"]["name"],
1722 |                                 "arguments": tool_call_data["function"]["arguments"]
1723 |                             }
1724 |                         })
1725 |                     
1726 |                     # Process tool calls if any
1727 |                     if current_tool_calls:
1728 |                         # Add assistant message with tool_calls to messages
1729 |                         processed_tool_calls = []
1730 |                         for tool_call in current_tool_calls:
1731 |                             processed_tool_call = tool_call.copy()
1732 |                             processed_tool_call["type"] = "function"
1733 |                             processed_tool_calls.append(processed_tool_call)
1734 |                         
1735 |                         conversation.messages.append({
1736 |                             "role": "assistant", 
1737 |                             "content": response_text,
1738 |                             "tool_calls": processed_tool_calls
1739 |                         })
1740 |                         
1741 |                         # Notify client that tools are running
1742 |                         yield json.dumps({"type": "status", "status": "running_tools"}) + "\n"
1743 |                         
1744 |                         # Process tool calls
1745 |                         tool_responses = conversation.process_tool_calls(current_tool_calls, query=user_input)
1746 |                         
1747 |                         # Notify client of tool results
1748 |                         for response in tool_responses:
1749 |                             yield json.dumps({
1750 |                                 "type": "tool_result", 
1751 |                                 "tool": response["function_name"],
1752 |                                 "result": response["result"]
1753 |                             }) + "\n"
1754 |                         
1755 |                         # Continue the conversation with tool responses
1756 |                         max_loop_iterations = conversation.max_tool_iterations
1757 |                         current_iteration = 0
1758 |                         
1759 |                         while current_iteration < max_loop_iterations:
1760 |                             follow_up = conversation.client.chat.completions.create(
1761 |                                 model=conversation.model,
1762 |                                 messages=conversation.messages,
1763 |                                 tools=api_tools,
1764 |                                 stream=False
1765 |                             )
1766 |                             
1767 |                             # Check if the follow-up response contains more tool calls
1768 |                             assistant_message = follow_up.choices[0].message
1769 |                             follow_up_text = assistant_message.content or ""
1770 |                             
1771 |                             # If there are no more tool calls, we're done with the loop
1772 |                             if not hasattr(assistant_message, 'tool_calls') or not assistant_message.tool_calls:
1773 |                                 if follow_up_text:
1774 |                                     yield json.dumps({"type": "content", "content": follow_up_text}) + "\n"
1775 |                                 
1776 |                                 # Add the final assistant message to the conversation
1777 |                                 conversation.messages.append({"role": "assistant", "content": follow_up_text})
1778 |                                 break
1779 |                             
1780 |                             # Process the new tool calls
1781 |                             current_tool_calls = []
1782 |                             for tool_call in assistant_message.tool_calls:
1783 |                                 if isinstance(tool_call, dict):
1784 |                                     processed_tool_call = tool_call.copy()
1785 |                                 else:
1786 |                                     processed_tool_call = {
1787 |                                         "id": tool_call.id,
1788 |                                         "function": {
1789 |                                             "name": tool_call.function.name,
1790 |                                             "arguments": tool_call.function.arguments
1791 |                                         }
1792 |                                     }
1793 |                                 
1794 |                                 processed_tool_call["type"] = "function"
1795 |                                 current_tool_calls.append(processed_tool_call)
1796 |                             
1797 |                             # Add the assistant message with tool calls
1798 |                             conversation.messages.append({
1799 |                                 "role": "assistant",
1800 |                                 "content": follow_up_text,
1801 |                                 "tool_calls": current_tool_calls
1802 |                             })
1803 |                             
1804 |                             # Notify client that tools are running
1805 |                             yield json.dumps({
1806 |                                 "type": "status", 
1807 |                                 "status": f"running_tools_iteration_{current_iteration + 1}"
1808 |                             }) + "\n"
1809 |                             
1810 |                             # Process the new tool calls
1811 |                             tool_responses = conversation.process_tool_calls(assistant_message.tool_calls, query=user_input)
1812 |                             
1813 |                             # Notify client of tool results
1814 |                             for response in tool_responses:
1815 |                                 yield json.dumps({
1816 |                                     "type": "tool_result", 
1817 |                                     "tool": response["function_name"],
1818 |                                     "result": response["result"]
1819 |                                 }) + "\n"
1820 |                             
1821 |                             # Increment the iteration counter
1822 |                             current_iteration += 1
1823 |                         
1824 |                         # If we've reached the maximum number of iterations, add a warning
1825 |                         if current_iteration >= max_loop_iterations:
1826 |                             warning_message = f"Warning: Reached maximum number of tool call iterations ({max_loop_iterations}). Some operations may be incomplete."
1827 |                             yield json.dumps({"type": "warning", "warning": warning_message}) + "\n"
1828 |                     else:
1829 |                         # Add assistant response to messages
1830 |                         conversation.messages.append({"role": "assistant", "content": response_text})
1831 |                     
1832 |                     # Signal completion
1833 |                     yield json.dumps({"type": "status", "status": "complete"}) + "\n"
1834 |                     
1835 |                 except Exception as e:
1836 |                     yield json.dumps({"type": "error", "error": str(e)}) + "\n"
1837 |             
1838 |             return StreamingResponse(response_generator(), media_type="text/event-stream")
1839 |         
1840 |         @self.app.get("/conversation/{conversation_id}")
1841 |         async def get_conversation(conversation_id: str):
1842 |             """Get conversation details"""
1843 |             if conversation_id not in self.conversation_pool:
1844 |                 raise HTTPException(status_code=404, detail="Conversation not found")
1845 |             
1846 |             conversation = self.conversation_pool[conversation_id]
1847 |             
1848 |             return {
1849 |                 "conversation_id": conversation_id,
1850 |                 "model": conversation.model,
1851 |                 "temperature": conversation.temperature,
1852 |                 "message_count": len(conversation.messages),
1853 |                 "token_usage": conversation.token_usage
1854 |             }
1855 |         
1856 |         @self.app.delete("/conversation/{conversation_id}")
1857 |         async def delete_conversation(conversation_id: str):
1858 |             """Delete a conversation"""
1859 |             if conversation_id not in self.conversation_pool:
1860 |                 raise HTTPException(status_code=404, detail="Conversation not found")
1861 |             
1862 |             del self.conversation_pool[conversation_id]
1863 |             
1864 |             return {"status": "deleted", "conversation_id": conversation_id}
1865 |         
1866 |         @self.app.get("/health")
1867 |         async def health_check():
1868 |             """Health check endpoint"""
1869 |             return {
1870 |                 "status": "healthy",
1871 |                 "active_conversations": len(self.conversation_pool),
1872 |                 "uptime": time.time() - self.start_time
1873 |             }
1874 |     
1875 |     async def _init_conversation(self, conversation_id, model, temperature):
1876 |         """Initialize a conversation instance"""
1877 |         conversation = Conversation()
1878 |         conversation.model = model
1879 |         conversation.temperature = temperature
1880 |         conversation.messages.append({"role": "system", "content": get_system_prompt()})
1881 |         
1882 |         self.conversation_pool[conversation_id] = conversation
1883 |     
1884 |     def start(self):
1885 |         """Start the API server"""
1886 |         self.start_time = time.time()
1887 |         uvicorn.run(self.app, host=self.host, port=self.port)
1888 |     
1889 |     def start_background(self):
1890 |         """Start the API server in a background thread"""
1891 |         self.start_time = time.time()
1892 |         thread = threading.Thread(target=uvicorn.run, args=(self.app,), 
1893 |                                  kwargs={"host": self.host, "port": self.port})
1894 |         thread.daemon = True
1895 |         thread.start()
1896 |         return thread
1897 | 
1898 | class ReplicationManager:
1899 |     """Manages replication across multiple instances"""
1900 |     
1901 |     def __init__(self, primary=True, sync_interval=60):
1902 |         self.primary = primary
1903 |         self.sync_interval = sync_interval
1904 |         self.peers = []
1905 |         self.conversation_cache = {}
1906 |         self.last_sync = time.time()
1907 |         self.sync_lock = threading.Lock()
1908 |     
1909 |     def add_peer(self, host, port):
1910 |         """Add a peer instance to replicate with"""
1911 |         peer = {"host": host, "port": port}
1912 |         if peer not in self.peers:
1913 |             self.peers.append(peer)
1914 |             return True
1915 |         return False
1916 |     
1917 |     def remove_peer(self, host, port):
1918 |         """Remove a peer instance"""
1919 |         peer = {"host": host, "port": port}
1920 |         if peer in self.peers:
1921 |             self.peers.remove(peer)
1922 |             return True
1923 |         return False
1924 |     
1925 |     def sync_conversation(self, conversation_id, conversation):
1926 |         """Sync a conversation to all peers"""
1927 |         if not self.peers:
1928 |             return
1929 |         
1930 |         # Serialize conversation
1931 |         try:
1932 |             serialized = pickle.dumps(conversation)
1933 |             
1934 |             # Calculate hash for change detection
1935 |             conversation_hash = hashlib.md5(serialized).hexdigest()
1936 |             
1937 |             # Check if conversation has changed
1938 |             if conversation_id in self.conversation_cache:
1939 |                 if self.conversation_cache[conversation_id] == conversation_hash:
1940 |                     return  # No changes, skip sync
1941 |             
1942 |             # Update cache
1943 |             self.conversation_cache[conversation_id] = conversation_hash
1944 |             
1945 |             # Sync to peers
1946 |             for peer in self.peers:
1947 |                 try:
1948 |                     url = f"http://{peer['host']}:{peer['port']}/sync/conversation/{conversation_id}"
1949 |                     requests.post(url, data=serialized, 
1950 |                                  headers={"Content-Type": "application/octet-stream"})
1951 |                 except Exception as e:
1952 |                     logging.error(f"Failed to sync with peer {peer['host']}:{peer['port']}: {e}")
1953 |         except Exception as e:
1954 |             logging.error(f"Error serializing conversation: {e}")
1955 |     
1956 |     def start_sync_thread(self, conversation_pool):
1957 |         """Start background thread for periodic syncing"""
1958 |         def sync_worker():
1959 |             while True:
1960 |                 time.sleep(self.sync_interval)
1961 |                 
1962 |                 with self.sync_lock:
1963 |                     for conversation_id, conversation in conversation_pool.items():
1964 |                         self.sync_conversation(conversation_id, conversation)
1965 |         
1966 |         thread = threading.Thread(target=sync_worker)
1967 |         thread.daemon = True
1968 |         thread.start()
1969 |         return thread
1970 | 
1971 | @app.command()
1972 | def serve(
1973 |     host: str = typer.Option("127.0.0.1", "--host", help="Host address to bind to"),
1974 |     port: int = typer.Option(8000, "--port", "-p", help="Port to listen on"),
1975 |     workers: int = typer.Option(1, "--workers", "-w", help="Number of worker processes"),
1976 |     enable_replication: bool = typer.Option(False, "--enable-replication", help="Enable replication across instances"),
1977 |     primary: bool = typer.Option(True, "--primary/--secondary", help="Whether this is a primary or secondary instance"),
1978 |     peers: List[str] = typer.Option([], "--peer", help="Peer instances to replicate with (host:port)")
1979 | ):
1980 |     """
1981 |     Start the OpenAI Code Assistant as a web service
1982 |     """
1983 |     console.print(Panel.fit(
1984 |         f"[bold green]OpenAI Code Assistant API Server[/bold green]\n"
1985 |         f"Host: {host}\n"
1986 |         f"Port: {port}\n"
1987 |         f"Workers: {workers}\n"
1988 |         f"Replication: {'Enabled' if enable_replication else 'Disabled'}\n"
1989 |         f"Role: {'Primary' if primary else 'Secondary'}\n"
1990 |         f"Peers: {', '.join(peers) if peers else 'None'}",
1991 |         title="Server Starting",
1992 |         border_style="green"
1993 |     ))
1994 |     
1995 |     # Check API key
1996 |     if not os.getenv("OPENAI_API_KEY"):
1997 |         console.print("[bold red]Error:[/bold red] No OpenAI API key found. Please set the OPENAI_API_KEY environment variable.")
1998 |         return
1999 |     
2000 |     # Start server
2001 |     if workers > 1:
2002 |         # Use multiprocessing for multiple workers
2003 |         console.print(f"Starting server with {workers} workers...")
2004 |         uvicorn.run(
2005 |             "cli:create_app",
2006 |             host=host,
2007 |             port=port,
2008 |             workers=workers,
2009 |             factory=True
2010 |         )
2011 |     else:
2012 |         # Single process mode
2013 |         hosting_manager = HostingManager(host=host, port=port)
2014 |         
2015 |         # Setup replication if enabled
2016 |         if enable_replication:
2017 |             replication_manager = ReplicationManager(primary=primary)
2018 |             
2019 |             # Add peers
2020 |             for peer in peers:
2021 |                 try:
2022 |                     peer_host, peer_port = peer.split(":")
2023 |                     replication_manager.add_peer(peer_host, int(peer_port))
2024 |                 except ValueError:
2025 |                     console.print(f"[yellow]Warning: Invalid peer format: {peer}. Use host:port format.[/yellow]")
2026 |             
2027 |             # Start sync thread
2028 |             replication_manager.start_sync_thread(hosting_manager.conversation_pool)
2029 |             
2030 |             console.print(f"Replication enabled with {len(replication_manager.peers)} peers")
2031 |         
2032 |         # Start server
2033 |         hosting_manager.start()
2034 | 
2035 | def create_app():
2036 |     """Factory function for creating the FastAPI app (used with multiple workers)"""
2037 |     hosting_manager = HostingManager()
2038 |     return hosting_manager.app
2039 | 
2040 | @app.command()
2041 | def mcp_serve(
2042 |     host: str = typer.Option("127.0.0.1", "--host", help="Host address to bind to"),
2043 |     port: int = typer.Option(8000, "--port", "-p", help="Port to listen on"),
2044 |     dev_mode: bool = typer.Option(False, "--dev", help="Enable development mode with additional logging"),
2045 |     dependencies: List[str] = typer.Option([], "--dependencies", help="Additional Python dependencies to install"),
2046 |     env_file: str = typer.Option(None, "--env-file", help="Path to .env file with environment variables"),
2047 |     cache_type: str = typer.Option("memory", "--cache", help="Cache type: 'memory' or 'redis'"),
2048 |     redis_url: str = typer.Option(None, "--redis-url", help="Redis URL for cache (if cache_type is 'redis')"),
2049 |     reload: bool = typer.Option(False, "--reload", help="Enable auto-reload on code changes")
2050 | ):
2051 |     """
2052 |     Start the OpenAI Code Assistant as an MCP (Model Context Protocol) server
2053 |     
2054 |     This allows the assistant to be used as a context provider for MCP clients
2055 |     like Claude Desktop or other MCP-compatible applications.
2056 |     """
2057 |     # Load environment variables from file if specified
2058 |     if env_file:
2059 |         if os.path.exists(env_file):
2060 |             load_dotenv(env_file)
2061 |             console.print(f"[green]Loaded environment variables from {env_file}[/green]")
2062 |         else:
2063 |             console.print(f"[yellow]Warning: Environment file {env_file} not found[/yellow]")
2064 |     
2065 |     # Install additional dependencies if specified
2066 |     required_deps = ["prometheus-client", "tiktoken"]
2067 |     if cache_type == "redis":
2068 |         required_deps.append("redis")
2069 |     
2070 |     all_deps = required_deps + list(dependencies)
2071 |     
2072 |     if all_deps:
2073 |         console.print(f"[bold]Installing dependencies: {', '.join(all_deps)}[/bold]")
2074 |         try:
2075 |             import subprocess
2076 |             subprocess.check_call([sys.executable, "-m", "pip", "install", *all_deps])
2077 |             console.print("[green]Dependencies installed successfully[/green]")
2078 |         except Exception as e:
2079 |             console.print(f"[red]Error installing dependencies: {str(e)}[/red]")
2080 |             return
2081 |     
2082 |     # Configure logging for development mode
2083 |     if dev_mode:
2084 |         import logging
2085 |         logging.basicConfig(level=logging.DEBUG)
2086 |         console.print("[yellow]Development mode enabled with debug logging[/yellow]")
2087 |     
2088 |     # Print server information
2089 |     cache_info = f"Cache: {cache_type}"
2090 |     if cache_type == "redis" and redis_url:
2091 |         cache_info += f" ({redis_url})"
2092 |     
2093 |     console.print(Panel.fit(
2094 |         f"[bold green]OpenAI Code Assistant MCP Server[/bold green]\n"
2095 |         f"Host: {host}\n"
2096 |         f"Port: {port}\n"
2097 |         f"Development Mode: {'Enabled' if dev_mode else 'Disabled'}\n"
2098 |         f"Auto-reload: {'Enabled' if reload else 'Disabled'}\n"
2099 |         f"{cache_info}\n"
2100 |         f"API Key: {'Configured' if os.getenv('OPENAI_API_KEY') else 'Not Configured'}",
2101 |         title="MCP Server Starting",
2102 |         border_style="green"
2103 |     ))
2104 |     
2105 |     # Check API key
2106 |     if not os.getenv("OPENAI_API_KEY"):
2107 |         console.print("[bold red]Error:[/bold red] No OpenAI API key found. Please set the OPENAI_API_KEY environment variable.")
2108 |         return
2109 |     
2110 |     # Create required directories
2111 |     base_dir = os.path.dirname(os.path.abspath(__file__))
2112 |     os.makedirs(os.path.join(base_dir, "data"), exist_ok=True)
2113 |     os.makedirs(os.path.join(base_dir, "templates"), exist_ok=True)
2114 |     os.makedirs(os.path.join(base_dir, "static"), exist_ok=True)
2115 |     
2116 |     try:
2117 |         # Import the MCP server module
2118 |         from mcp_server import MCPServer
2119 |         
2120 |         # Start the MCP server
2121 |         server = MCPServer(cache_type=cache_type, redis_url=redis_url)
2122 |         server.start(host=host, port=port, reload=reload)
2123 |     except ImportError:
2124 |         console.print("[bold red]Error:[/bold red] MCP server module not found. Make sure mcp_server.py is in the same directory.")
2125 |     except Exception as e:
2126 |         console.print(f"[bold red]Error starting MCP server:[/bold red] {str(e)}")
2127 |         if dev_mode:
2128 |             import traceback
2129 |             console.print(traceback.format_exc())
2130 | 
2131 | @app.command()
2132 | def mcp_client(
2133 |     server_path: str = typer.Argument(..., help="Path to the MCP server script or module"),
2134 |     model: str = typer.Option("gpt-4o", "--model", "-m", help="Model to use for reasoning"),
2135 |     host: str = typer.Option("127.0.0.1", "--host", help="Host address for the MCP server"),
2136 |     port: int = typer.Option(8000, "--port", "-p", help="Port for the MCP server")
2137 | ):
2138 |     """
2139 |     Connect to an MCP server using OpenAI Code Assistant as the reasoning engine
2140 |     
2141 |     This allows using the assistant to interact with any MCP-compatible server.
2142 |     """
2143 |     console.print(Panel.fit(
2144 |         f"[bold green]OpenAI Code Assistant MCP Client[/bold green]\n"
2145 |         f"Server: {server_path}\n"
2146 |         f"Model: {model}\n"
2147 |         f"Host: {host}\n"
2148 |         f"Port: {port}",
2149 |         title="MCP Client Starting",
2150 |         border_style="green"
2151 |     ))
2152 |     
2153 |     # Check if server path exists
2154 |     if not os.path.exists(server_path):
2155 |         console.print(f"[bold red]Error:[/bold red] Server script not found at {server_path}")
2156 |         return
2157 |     
2158 |     # Check API key
2159 |     if not os.getenv("OPENAI_API_KEY"):
2160 |         console.print("[bold red]Error:[/bold red] No OpenAI API key found. Please set the OPENAI_API_KEY environment variable.")
2161 |         return
2162 |     
2163 |     try:
2164 |         # Start the server in a subprocess
2165 |         import subprocess
2166 |         import signal
2167 |         
2168 |         # Start server process
2169 |         console.print(f"[bold]Starting MCP server from {server_path}...[/bold]")
2170 |         server_process = subprocess.Popen(
2171 |             [sys.executable, server_path],
2172 |             stdout=subprocess.PIPE,
2173 |             stderr=subprocess.PIPE,
2174 |             text=True
2175 |         )
2176 |         
2177 |         # Wait for server to start
2178 |         time.sleep(2)
2179 |         
2180 |         # Check if server started successfully
2181 |         if server_process.poll() is not None:
2182 |             console.print("[bold red]Error:[/bold red] Failed to start MCP server")
2183 |             stdout, stderr = server_process.communicate()
2184 |             console.print(f"[red]Server output:[/red]\n{stdout}\n{stderr}")
2185 |             return
2186 |         
2187 |         console.print("[green]MCP server started successfully[/green]")
2188 |         
2189 |         # Initialize conversation
2190 |         conversation = Conversation()
2191 |         conversation.model = model
2192 |         
2193 |         # Add system prompt
2194 |         conversation.messages.append({
2195 |             "role": "system", 
2196 |             "content": "You are an MCP client connecting to a Model Context Protocol server. "
2197 |                       "Use the available tools to interact with the server and help the user."
2198 |         })
2199 |         
2200 |         # Register MCP-specific tools
2201 |         mcp_tools = [
2202 |             Tool(
2203 |                 name="MCPGetContext",
2204 |                 description="Get context from the MCP server using a prompt template",
2205 |                 parameters={
2206 |                     "type": "object",
2207 |                     "properties": {
2208 |                         "prompt_id": {
2209 |                             "type": "string",
2210 |                             "description": "ID of the prompt template to use"
2211 |                         },
2212 |                         "parameters": {
2213 |                             "type": "object",
2214 |                             "description": "Parameters for the prompt template"
2215 |                         }
2216 |                     },
2217 |                     "required": ["prompt_id"]
2218 |                 },
2219 |                 function=lambda prompt_id, parameters=None: _mcp_get_context(host, port, prompt_id, parameters or {})
2220 |             ),
2221 |             Tool(
2222 |                 name="MCPListPrompts",
2223 |                 description="List available prompt templates from the MCP server",
2224 |                 parameters={
2225 |                     "type": "object",
2226 |                     "properties": {}
2227 |                 },
2228 |                 function=lambda: _mcp_list_prompts(host, port)
2229 |             ),
2230 |             Tool(
2231 |                 name="MCPGetPrompt",
2232 |                 description="Get details of a specific prompt template from the MCP server",
2233 |                 parameters={
2234 |                     "type": "object",
2235 |                     "properties": {
2236 |                         "prompt_id": {
2237 |                             "type": "string",
2238 |                             "description": "ID of the prompt template to get"
2239 |                         }
2240 |                     },
2241 |                     "required": ["prompt_id"]
2242 |                 },
2243 |                 function=lambda prompt_id: _mcp_get_prompt(host, port, prompt_id)
2244 |             )
2245 |         ]
2246 |         
2247 |         # Add MCP tools to conversation
2248 |         conversation.tools.extend(mcp_tools)
2249 |         for tool in mcp_tools:
2250 |             conversation.tool_map[tool.name] = tool.function
2251 |         
2252 |         # Main interaction loop
2253 |         console.print("[bold]MCP Client ready. Type your questions or commands.[/bold]")
2254 |         console.print("[bold]Type 'exit' to quit.[/bold]")
2255 |         
2256 |         while True:
2257 |             try:
2258 |                 user_input = Prompt.ask("\n[bold blue]>>[/bold blue]")
2259 |                 
2260 |                 # Handle exit
2261 |                 if user_input.lower() in ("exit", "quit", "/exit", "/quit"):
2262 |                     console.print("[bold yellow]Shutting down MCP client...[/bold yellow]")
2263 |                     break
2264 |                 
2265 |                 # Get response
2266 |                 conversation.get_response(user_input)
2267 |                 
2268 |             except KeyboardInterrupt:
2269 |                 console.print("\n[bold yellow]Operation cancelled by user.[/bold yellow]")
2270 |                 if Prompt.ask("[bold]Exit?[/bold]", choices=["y", "n"], default="n") == "y":
2271 |                     break
2272 |                 continue
2273 |             except Exception as e:
2274 |                 console.print(f"[bold red]Error:[/bold red] {str(e)}")
2275 |         
2276 |         # Clean up
2277 |         console.print("[bold]Stopping MCP server...[/bold]")
2278 |         server_process.terminate()
2279 |         server_process.wait(timeout=5)
2280 |         
2281 |     except Exception as e:
2282 |         console.print(f"[bold red]Error:[/bold red] {str(e)}")
2283 | 
2284 | # MCP client helper functions
2285 | def _mcp_get_context(host, port, prompt_id, parameters):
2286 |     """Get context from MCP server"""
2287 |     try:
2288 |         url = f"http://{host}:{port}/context"
2289 |         response = requests.post(
2290 |             url,
2291 |             json={
2292 |                 "prompt_id": prompt_id,
2293 |                 "parameters": parameters
2294 |             }
2295 |         )
2296 |         
2297 |         if response.status_code != 200:
2298 |             return f"Error: {response.status_code} - {response.text}"
2299 |         
2300 |         data = response.json()
2301 |         return f"Context (ID: {data['context_id']}):\n\n{data['context']}"
2302 |     except Exception as e:
2303 |         return f"Error connecting to MCP server: {str(e)}"
2304 | 
2305 | def _mcp_list_prompts(host, port):
2306 |     """List available prompt templates from MCP server"""
2307 |     try:
2308 |         url = f"http://{host}:{port}/prompts"
2309 |         response = requests.get(url)
2310 |         
2311 |         if response.status_code != 200:
2312 |             return f"Error: {response.status_code} - {response.text}"
2313 |         
2314 |         data = response.json()
2315 |         prompts = data.get("prompts", [])
2316 |         
2317 |         if not prompts:
2318 |             return "No prompt templates available"
2319 |         
2320 |         result = "Available prompt templates:\n\n"
2321 |         for prompt in prompts:
2322 |             result += f"ID: {prompt['id']}\n"
2323 |             result += f"Description: {prompt['description']}\n"
2324 |             result += f"Parameters: {', '.join(prompt.get('parameters', {}).keys())}\n\n"
2325 |         
2326 |         return result
2327 |     except Exception as e:
2328 |         return f"Error connecting to MCP server: {str(e)}"
2329 | 
2330 | def _mcp_get_prompt(host, port, prompt_id):
2331 |     """Get details of a specific prompt template"""
2332 |     try:
2333 |         url = f"http://{host}:{port}/prompts/{prompt_id}"
2334 |         response = requests.get(url)
2335 |         
2336 |         if response.status_code != 200:
2337 |             return f"Error: {response.status_code} - {response.text}"
2338 |         
2339 |         prompt = response.json()
2340 |         
2341 |         result = f"Prompt Template: {prompt['id']}\n\n"
2342 |         result += f"Description: {prompt['description']}\n\n"
2343 |         result += "Parameters:\n"
2344 |         
2345 |         for param_name, param_info in prompt.get("parameters", {}).items():
2346 |             result += f"- {param_name}: {param_info.get('description', '')}\n"
2347 |         
2348 |         result += f"\nTemplate:\n{prompt['template']}\n"
2349 |         
2350 |         return result
2351 |     except Exception as e:
2352 |         return f"Error connecting to MCP server: {str(e)}"
2353 | 
2354 | @app.command()
2355 | def mcp_multi_agent(
2356 |     server_path: str = typer.Argument(..., help="Path to the MCP server script or module"),
2357 |     config: str = typer.Option(None, "--config", "-c", help="Path to agent configuration JSON file"),
2358 |     host: str = typer.Option("127.0.0.1", "--host", help="Host address for the MCP server"),
2359 |     port: int = typer.Option(8000, "--port", "-p", help="Port for the MCP server")
2360 | ):
2361 |     """
2362 |     Start a multi-agent MCP client with multiple specialized agents
2363 |     
2364 |     This allows using multiple agents with different roles to collaborate
2365 |     on complex tasks by connecting to an MCP server.
2366 |     """
2367 |     # Load configuration
2368 |     if config:
2369 |         if not os.path.exists(config):
2370 |             console.print(f"[bold red]Error:[/bold red] Configuration file not found at {config}")
2371 |             return
2372 |         
2373 |         try:
2374 |             with open(config, 'r') as f:
2375 |                 config_data = json.load(f)
2376 |         except Exception as e:
2377 |             console.print(f"[bold red]Error loading configuration:[/bold red] {str(e)}")
2378 |             return
2379 |     else:
2380 |         # Default configuration
2381 |         config_data = {
2382 |             "agents": [
2383 |                 {
2384 |                     "name": "Primary",
2385 |                     "role": "primary",
2386 |                     "system_prompt": "You are a helpful assistant that uses an MCP server to provide information.",
2387 |                     "model": "gpt-4o",
2388 |                     "temperature": 0.0
2389 |                 }
2390 |             ],
2391 |             "coordination": {
2392 |                 "strategy": "single",
2393 |                 "primary_agent": "Primary"
2394 |             },
2395 |             "settings": {
2396 |                 "max_turns_per_agent": 1,
2397 |                 "enable_agent_reflection": False,
2398 |                 "enable_cross_agent_communication": False,
2399 |                 "enable_user_selection": False
2400 |             }
2401 |         }
2402 |     
2403 |     # Display configuration
2404 |     agent_names = [agent["name"] for agent in config_data["agents"]]
2405 |     console.print(Panel.fit(
2406 |         f"[bold green]OpenAI Code Assistant Multi-Agent MCP Client[/bold green]\n"
2407 |         f"Server: {server_path}\n"
2408 |         f"Host: {host}:{port}\n"
2409 |         f"Agents: {', '.join(agent_names)}\n"
2410 |         f"Coordination: {config_data['coordination']['strategy']}\n"
2411 |         f"Primary Agent: {config_data['coordination']['primary_agent']}",
2412 |         title="Multi-Agent MCP Client Starting",
2413 |         border_style="green"
2414 |     ))
2415 |     
2416 |     # Check if server path exists
2417 |     if not os.path.exists(server_path):
2418 |         console.print(f"[bold red]Error:[/bold red] Server script not found at {server_path}")
2419 |         return
2420 |     
2421 |     # Check API key
2422 |     if not os.getenv("OPENAI_API_KEY"):
2423 |         console.print("[bold red]Error:[/bold red] No OpenAI API key found. Please set the OPENAI_API_KEY environment variable.")
2424 |         return
2425 |     
2426 |     try:
2427 |         # Start the server in a subprocess
2428 |         import subprocess
2429 |         import signal
2430 |         
2431 |         # Start server process
2432 |         console.print(f"[bold]Starting MCP server from {server_path}...[/bold]")
2433 |         server_process = subprocess.Popen(
2434 |             [sys.executable, server_path],
2435 |             stdout=subprocess.PIPE,
2436 |             stderr=subprocess.PIPE,
2437 |             text=True
2438 |         )
2439 |         
2440 |         # Wait for server to start
2441 |         time.sleep(2)
2442 |         
2443 |         # Check if server started successfully
2444 |         if server_process.poll() is not None:
2445 |             console.print("[bold red]Error:[/bold red] Failed to start MCP server")
2446 |             stdout, stderr = server_process.communicate()
2447 |             console.print(f"[red]Server output:[/red]\n{stdout}\n{stderr}")
2448 |             return
2449 |         
2450 |         console.print("[green]MCP server started successfully[/green]")
2451 |         
2452 |         # Initialize agents
2453 |         agents = {}
2454 |         for agent_config in config_data["agents"]:
2455 |             # Create conversation for agent
2456 |             agent = Conversation()
2457 |             agent.model = agent_config.get("model", "gpt-4o")
2458 |             agent.temperature = agent_config.get("temperature", 0.0)
2459 |             
2460 |             # Add system prompt
2461 |             agent.messages.append({
2462 |                 "role": "system", 
2463 |                 "content": agent_config.get("system_prompt", "You are a helpful assistant.")
2464 |             })
2465 |             
2466 |             # Register MCP-specific tools
2467 |             mcp_tools = [
2468 |                 Tool(
2469 |                     name="MCPGetContext",
2470 |                     description="Get context from the MCP server using a prompt template",
2471 |                     parameters={
2472 |                         "type": "object",
2473 |                         "properties": {
2474 |                             "prompt_id": {
2475 |                                 "type": "string",
2476 |                                 "description": "ID of the prompt template to use"
2477 |                             },
2478 |                             "parameters": {
2479 |                                 "type": "object",
2480 |                                 "description": "Parameters for the prompt template"
2481 |                             }
2482 |                         },
2483 |                         "required": ["prompt_id"]
2484 |                     },
2485 |                     function=lambda prompt_id, parameters=None: _mcp_get_context(host, port, prompt_id, parameters or {})
2486 |                 ),
2487 |                 Tool(
2488 |                     name="MCPListPrompts",
2489 |                     description="List available prompt templates from the MCP server",
2490 |                     parameters={
2491 |                         "type": "object",
2492 |                         "properties": {}
2493 |                     },
2494 |                     function=lambda: _mcp_list_prompts(host, port)
2495 |                 ),
2496 |                 Tool(
2497 |                     name="MCPGetPrompt",
2498 |                     description="Get details of a specific prompt template from the MCP server",
2499 |                     parameters={
2500 |                         "type": "object",
2501 |                         "properties": {
2502 |                             "prompt_id": {
2503 |                                 "type": "string",
2504 |                                 "description": "ID of the prompt template to get"
2505 |                             }
2506 |                         },
2507 |                         "required": ["prompt_id"]
2508 |                     },
2509 |                     function=lambda prompt_id: _mcp_get_prompt(host, port, prompt_id)
2510 |                 )
2511 |             ]
2512 |             
2513 |             # Add MCP tools to agent
2514 |             agent.tools.extend(mcp_tools)
2515 |             for tool in mcp_tools:
2516 |                 agent.tool_map[tool.name] = tool.function
2517 |             
2518 |             # Add agent to agents dictionary
2519 |             agents[agent_config["name"]] = {
2520 |                 "config": agent_config,
2521 |                 "conversation": agent,
2522 |                 "history": []
2523 |             }
2524 |         
2525 |         # Get primary agent
2526 |         primary_agent_name = config_data["coordination"]["primary_agent"]
2527 |         if primary_agent_name not in agents:
2528 |             console.print(f"[bold red]Error:[/bold red] Primary agent '{primary_agent_name}' not found in configuration")
2529 |             return
2530 |         
2531 |         # Main interaction loop
2532 |         console.print("[bold]Multi-Agent MCP Client ready. Type your questions or commands.[/bold]")
2533 |         console.print("[bold]Special commands:[/bold]")
2534 |         console.print("  [blue]/agents[/blue] - List available agents")
2535 |         console.print("  [blue]/talk <agent_name> <message>[/blue] - Send message to specific agent")
2536 |         console.print("  [blue]/history[/blue] - Show conversation history")
2537 |         console.print("  [blue]/exit[/blue] - Exit the client")
2538 |         
2539 |         conversation_history = []
2540 |         
2541 |         while True:
2542 |             try:
2543 |                 user_input = Prompt.ask("\n[bold blue]>>[/bold blue]")
2544 |                 
2545 |                 # Handle exit
2546 |                 if user_input.lower() in ("exit", "quit", "/exit", "/quit"):
2547 |                     console.print("[bold yellow]Shutting down multi-agent MCP client...[/bold yellow]")
2548 |                     break
2549 |                 
2550 |                 # Handle special commands
2551 |                 if user_input.startswith("/agents"):
2552 |                     console.print("[bold]Available Agents:[/bold]")
2553 |                     for name, agent_data in agents.items():
2554 |                         role = agent_data["config"]["role"]
2555 |                         model = agent_data["config"]["model"]
2556 |                         console.print(f"  [green]{name}[/green] ({role}, {model})")
2557 |                     continue
2558 |                 
2559 |                 if user_input.startswith("/history"):
2560 |                     console.print("[bold]Conversation History:[/bold]")
2561 |                     for i, entry in enumerate(conversation_history, 1):
2562 |                         if entry["role"] == "user":
2563 |                             console.print(f"[blue]{i}. User:[/blue] {entry['content']}")
2564 |                         else:
2565 |                             console.print(f"[green]{i}. {entry['agent']}:[/green] {entry['content']}")
2566 |                     continue
2567 |                 
2568 |                 if user_input.startswith("/talk "):
2569 |                     parts = user_input.split(" ", 2)
2570 |                     if len(parts) < 3:
2571 |                         console.print("[yellow]Usage: /talk <agent_name> <message>[/yellow]")
2572 |                         continue
2573 |                     
2574 |                     agent_name = parts[1]
2575 |                     message = parts[2]
2576 |                     
2577 |                     if agent_name not in agents:
2578 |                         console.print(f"[yellow]Agent '{agent_name}' not found. Use /agents to see available agents.[/yellow]")
2579 |                         continue
2580 |                     
2581 |                     # Add message to history
2582 |                     conversation_history.append({
2583 |                         "role": "user",
2584 |                         "content": message,
2585 |                         "target_agent": agent_name
2586 |                     })
2587 |                     
2588 |                     # Get response from specific agent
2589 |                     console.print(f"[bold]Asking {agent_name}...[/bold]")
2590 |                     agent = agents[agent_name]["conversation"]
2591 |                     response = agent.get_response(message)
2592 |                     
2593 |                     # Add response to history
2594 |                     conversation_history.append({
2595 |                         "role": "assistant",
2596 |                         "agent": agent_name,
2597 |                         "content": response
2598 |                     })
2599 |                     
2600 |                     # Add to agent's history
2601 |                     agents[agent_name]["history"].append({
2602 |                         "role": "user",
2603 |                         "content": message
2604 |                     })
2605 |                     agents[agent_name]["history"].append({
2606 |                         "role": "assistant",
2607 |                         "content": response
2608 |                     })
2609 |                     
2610 |                     continue
2611 |                 
2612 |                 # Regular message - use coordination strategy
2613 |                 strategy = config_data["coordination"]["strategy"]
2614 |                 
2615 |                 # Add message to history
2616 |                 conversation_history.append({
2617 |                     "role": "user",
2618 |                     "content": user_input
2619 |                 })
2620 |                 
2621 |                 if strategy == "single" or strategy == "primary":
2622 |                     # Just use the primary agent
2623 |                     agent = agents[primary_agent_name]["conversation"]
2624 |                     response = agent.get_response(user_input)
2625 |                     
2626 |                     # Add response to history
2627 |                     conversation_history.append({
2628 |                         "role": "assistant",
2629 |                         "agent": primary_agent_name,
2630 |                         "content": response
2631 |                     })
2632 |                     
2633 |                     # Add to agent's history
2634 |                     agents[primary_agent_name]["history"].append({
2635 |                         "role": "user",
2636 |                         "content": user_input
2637 |                     })
2638 |                     agents[primary_agent_name]["history"].append({
2639 |                         "role": "assistant",
2640 |                         "content": response
2641 |                     })
2642 |                     
2643 |                 elif strategy == "round_robin":
2644 |                     # Ask each agent in turn
2645 |                     console.print("[bold]Consulting all agents...[/bold]")
2646 |                     
2647 |                     for agent_name, agent_data in agents.items():
2648 |                         console.print(f"[bold]Response from {agent_name}:[/bold]")
2649 |                         agent = agent_data["conversation"]
2650 |                         response = agent.get_response(user_input)
2651 |                         
2652 |                         # Add response to history
2653 |                         conversation_history.append({
2654 |                             "role": "assistant",
2655 |                             "agent": agent_name,
2656 |                             "content": response
2657 |                         })
2658 |                         
2659 |                         # Add to agent's history
2660 |                         agent_data["history"].append({
2661 |                             "role": "user",
2662 |                             "content": user_input
2663 |                         })
2664 |                         agent_data["history"].append({
2665 |                             "role": "assistant",
2666 |                             "content": response
2667 |                         })
2668 |                 
2669 |                 elif strategy == "voting":
2670 |                     # Ask all agents and show all responses
2671 |                     console.print("[bold]Collecting responses from all agents...[/bold]")
2672 |                     
2673 |                     responses = {}
2674 |                     for agent_name, agent_data in agents.items():
2675 |                         agent = agent_data["conversation"]
2676 |                         response = agent.get_response(user_input)
2677 |                         responses[agent_name] = response
2678 |                         
2679 |                         # Add to agent's history
2680 |                         agent_data["history"].append({
2681 |                             "role": "user",
2682 |                             "content": user_input
2683 |                         })
2684 |                         agent_data["history"].append({
2685 |                             "role": "assistant",
2686 |                             "content": response
2687 |                         })
2688 |                     
2689 |                     # Display all responses
2690 |                     for agent_name, response in responses.items():
2691 |                         console.print(f"[bold]Response from {agent_name}:[/bold]")
2692 |                         console.print(response)
2693 |                         
2694 |                         # Add response to history
2695 |                         conversation_history.append({
2696 |                             "role": "assistant",
2697 |                             "agent": agent_name,
2698 |                             "content": response
2699 |                         })
2700 |                 
2701 |                 else:
2702 |                     console.print(f"[yellow]Unknown coordination strategy: {strategy}[/yellow]")
2703 |                     # Default to primary agent
2704 |                     agent = agents[primary_agent_name]["conversation"]
2705 |                     response = agent.get_response(user_input)
2706 |                     
2707 |                     # Add response to history
2708 |                     conversation_history.append({
2709 |                         "role": "assistant",
2710 |                         "agent": primary_agent_name,
2711 |                         "content": response
2712 |                     })
2713 |                     
2714 |                     # Add to agent's history
2715 |                     agents[primary_agent_name]["history"].append({
2716 |                         "role": "user",
2717 |                         "content": user_input
2718 |                     })
2719 |                     agents[primary_agent_name]["history"].append({
2720 |                         "role": "assistant",
2721 |                         "content": response
2722 |                     })
2723 |                 
2724 |             except KeyboardInterrupt:
2725 |                 console.print("\n[bold yellow]Operation cancelled by user.[/bold yellow]")
2726 |                 if Prompt.ask("[bold]Exit?[/bold]", choices=["y", "n"], default="n") == "y":
2727 |                     break
2728 |                 continue
2729 |             except Exception as e:
2730 |                 console.print(f"[bold red]Error:[/bold red] {str(e)}")
2731 |         
2732 |         # Clean up
2733 |         console.print("[bold]Stopping MCP server...[/bold]")
2734 |         server_process.terminate()
2735 |         server_process.wait(timeout=5)
2736 |         
2737 |     except Exception as e:
2738 |         console.print(f"[bold red]Error:[/bold red] {str(e)}")
2739 | 
2740 | @app.command()
2741 | def main(
2742 |     model: str = typer.Option(DEFAULT_MODEL, "--model", "-m", help="Specify the model to use"),
2743 |     temperature: float = typer.Option(DEFAULT_TEMPERATURE, "--temperature", "-t", help="Set temperature for response generation"),
2744 |     verbose: bool = typer.Option(False, "--verbose", "-v", help="Enable verbose output with additional information"),
2745 |     enable_rl: bool = typer.Option(True, "--enable-rl/--disable-rl", help="Enable/disable reinforcement learning for tool optimization"),
2746 |     rl_update: bool = typer.Option(False, "--rl-update", help="Manually trigger an update of the RL model"),
2747 | ):
2748 |     """
2749 |     OpenAI Code Assistant - A command-line coding assistant 
2750 |     that uses OpenAI APIs with function calling and streaming
2751 |     """
2752 |     # TODO: Check for updates on startup
2753 |     # TODO: Add environment setup verification
2754 |     
2755 |     # Create welcome panel with more details
2756 |     rl_status = "enabled" if enable_rl else "disabled"
2757 |     console.print(Panel.fit(
2758 |         f"[bold green]OpenAI Code Assistant[/bold green]\n"
2759 |         f"Model: {model} (Temperature: {temperature})\n"
2760 |         f"Reinforcement Learning: {rl_status}\n"
2761 |         "Type your questions or commands. Use /help for available commands.",
2762 |         title="Welcome",
2763 |         border_style="green"
2764 |     ))
2765 |     
2766 |     # Check API key
2767 |     if not os.getenv("OPENAI_API_KEY"):
2768 |         console.print("[bold red]Error:[/bold red] No OpenAI API key found. Please set the OPENAI_API_KEY environment variable.")
2769 |         console.print("You can create a .env file with your API key or set it in your environment.")
2770 |         return
2771 |     
2772 |     # Initialize conversation
2773 |     conversation = Conversation()
2774 |     
2775 |     # Override model and temperature if specified
2776 |     if model != DEFAULT_MODEL:
2777 |         conversation.model = model
2778 |     conversation.temperature = temperature
2779 |     
2780 |     # Configure verbose mode
2781 |     conversation.verbose = verbose
2782 |     
2783 |     # Configure RL mode
2784 |     if not enable_rl and hasattr(conversation, 'tool_optimizer') and conversation.tool_optimizer is not None:
2785 |         os.environ["ENABLE_TOOL_OPTIMIZATION"] = "0"
2786 |         conversation.tool_optimizer = None
2787 |         console.print("[yellow]Reinforcement learning disabled[/yellow]")
2788 |     
2789 |     # Handle manual RL update if requested
2790 |     if rl_update and hasattr(conversation, 'tool_optimizer') and conversation.tool_optimizer is not None:
2791 |         try:
2792 |             with console.status("[bold blue]Updating RL model...[/bold blue]"):
2793 |                 result = conversation.tool_optimizer.optimizer.update_model()
2794 |             console.print(f"[green]RL model update result:[/green] {result['status']}")
2795 |             if 'metrics' in result:
2796 |                 console.print(Panel.fit(
2797 |                     "\n".join([f"{k}: {v}" for k, v in result['metrics'].items()]),
2798 |                     title="RL Metrics",
2799 |                     border_style="blue"
2800 |                 ))
2801 |         except Exception as e:
2802 |             console.print(f"[red]Error updating RL model:[/red] {e}")
2803 |     
2804 |     # Add system prompt
2805 |     conversation.messages.append({"role": "system", "content": get_system_prompt()})
2806 |     
2807 |     # TODO: Add context collection for file system and git information
2808 |     # TODO: Add session persistence to allow resuming conversations
2809 |     
2810 |     # Main interaction loop
2811 |     while True:
2812 |         try:
2813 |             user_input = Prompt.ask("\n[bold blue]>>[/bold blue]")
2814 |             
2815 |             # Handle exit
2816 |             if user_input.lower() in ("exit", "quit", "/exit", "/quit"):
2817 |                 console.print("[bold yellow]Goodbye![/bold yellow]")
2818 |                 break
2819 |             
2820 |             # Get response without wrapping it in a status indicator
2821 |             # This allows the streaming to work properly
2822 |             try:
2823 |                 conversation.get_response(user_input)
2824 |             except Exception as e:
2825 |                 console.print(f"[bold red]Error during response generation:[/bold red] {str(e)}")
2826 |                 
2827 |                 # Provide more helpful error messages for common issues
2828 |                 if "api_key" in str(e).lower():
2829 |                     console.print("[yellow]Hint: Check your OpenAI API key.[/yellow]")
2830 |                 elif "rate limit" in str(e).lower():
2831 |                     console.print("[yellow]Hint: You've hit a rate limit. Try again in a moment.[/yellow]")
2832 |                 elif "context_length_exceeded" in str(e).lower() or "maximum context length" in str(e).lower():
2833 |                     console.print("[yellow]Hint: The conversation is too long. Try using /compact to reduce its size.[/yellow]")
2834 |                 elif "Missing required parameter" in str(e):
2835 |                     console.print("[yellow]Hint: There's an API format issue. Try restarting the conversation.[/yellow]")
2836 |                 
2837 |                 # Offer recovery options
2838 |                 recovery_choice = Prompt.ask(
2839 |                     "[bold]Would you like to:[/bold]",
2840 |                     choices=["continue", "debug", "compact", "restart", "exit"],
2841 |                     default="continue"
2842 |                 )
2843 |                 
2844 |                 if recovery_choice == "debug":
2845 |                     # Show debug information
2846 |                     debug_info = {
2847 |                         "model": conversation.model,
2848 |                         "temperature": conversation.temperature,
2849 |                         "message_count": len(conversation.messages),
2850 |                         "token_usage": conversation.token_usage,
2851 |                         "conversation_id": conversation.conversation_id,
2852 |                         "session_duration": time.time() - conversation.session_start_time,
2853 |                         "tools_count": len(conversation.tools),
2854 |                         "python_version": sys.version,
2855 |                         "openai_version": OpenAI.__version__ if hasattr(OpenAI, "__version__") else "Unknown"
2856 |                     }
2857 |                     console.print(Panel(json.dumps(debug_info, indent=2), title="Debug Information", border_style="yellow"))
2858 |                 elif recovery_choice == "compact":
2859 |                     # Compact the conversation
2860 |                     result = conversation.compact()
2861 |                     console.print(f"[green]{result}[/green]")
2862 |                 elif recovery_choice == "restart":
2863 |                     # Restart the conversation
2864 |                     conversation = Conversation()
2865 |                     conversation.model = model
2866 |                     conversation.temperature = temperature
2867 |                     conversation.verbose = verbose
2868 |                     conversation.messages.append({"role": "system", "content": get_system_prompt()})
2869 |                     console.print("[green]Conversation restarted.[/green]")
2870 |                 elif recovery_choice == "exit":
2871 |                     console.print("[bold yellow]Goodbye![/bold yellow]")
2872 |                     break
2873 |             
2874 |         except KeyboardInterrupt:
2875 |             console.print("\n[bold yellow]Operation cancelled by user.[/bold yellow]")
2876 |             # Offer options after cancellation
2877 |             cancel_choice = Prompt.ask(
2878 |                 "[bold]Would you like to:[/bold]",
2879 |                 choices=["continue", "exit"],
2880 |                 default="continue"
2881 |             )
2882 |             if cancel_choice == "exit":
2883 |                 console.print("[bold yellow]Goodbye![/bold yellow]")
2884 |                 break
2885 |             continue
2886 |         except Exception as e:
2887 |             console.print(f"[bold red]Unexpected error:[/bold red] {str(e)}")
2888 |             import traceback
2889 |             console.print(traceback.format_exc())
2890 |             # Ask if user wants to continue despite the error
2891 |             if Prompt.ask("[bold]Continue?[/bold]", choices=["y", "n"], default="y") == "n":
2892 |                 break
2893 | 
2894 | if __name__ == "__main__":
2895 |     app()
2896 | 
```
Page 6/6FirstPrevNextLast