#
tokens: 47417/50000 33/39 files (page 1/2)
lines: on (toggle) GitHub
raw markdown copy reset
This is page 1 of 2. Use http://codebase.md/fradser/mcp-server-mas-sequential-thinking?lines=true&page={x} to view the full context.

# Directory Structure

```
├── .env.example
├── .gitignore
├── .python-version
├── CHANGELOG.md
├── CLAUDE.md
├── Dockerfile
├── pyproject.toml
├── README.md
├── README.zh-CN.md
├── smithery.yaml
├── src
│   └── mcp_server_mas_sequential_thinking
│       ├── __init__.py
│       ├── config
│       │   ├── __init__.py
│       │   ├── constants.py
│       │   └── modernized_config.py
│       ├── core
│       │   ├── __init__.py
│       │   ├── models.py
│       │   ├── session.py
│       │   └── types.py
│       ├── infrastructure
│       │   ├── __init__.py
│       │   ├── logging_config.py
│       │   └── persistent_memory.py
│       ├── main.py
│       ├── processors
│       │   ├── __init__.py
│       │   ├── multi_thinking_core.py
│       │   └── multi_thinking_processor.py
│       ├── routing
│       │   ├── __init__.py
│       │   ├── agno_workflow_router.py
│       │   ├── ai_complexity_analyzer.py
│       │   ├── complexity_types.py
│       │   └── multi_thinking_router.py
│       ├── services
│       │   ├── __init__.py
│       │   ├── context_builder.py
│       │   ├── processing_orchestrator.py
│       │   ├── response_formatter.py
│       │   ├── response_processor.py
│       │   ├── retry_handler.py
│       │   ├── server_core.py
│       │   ├── thought_processor_refactored.py
│       │   └── workflow_executor.py
│       └── utils
│           ├── __init__.py
│           └── utils.py
└── uv.lock
```

# Files

--------------------------------------------------------------------------------
/.python-version:
--------------------------------------------------------------------------------

```
1 | 3.13
2 | 
```

--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------

```
 1 | # Python-generated files
 2 | __pycache__/
 3 | *.py[oc]
 4 | build/
 5 | dist/
 6 | wheels/
 7 | *.egg-info
 8 | 
 9 | # Virtual environments
10 | .venv
11 | .env
12 | 
13 | # Testing
14 | .hypothesis/
15 | 
```

--------------------------------------------------------------------------------
/.env.example:
--------------------------------------------------------------------------------

```
  1 | # --- LLM Configuration ---
  2 | # Select the LLM provider: "deepseek" (default), "groq", "openrouter", "github", or "ollama"
  3 | LLM_PROVIDER="deepseek"
  4 | 
  5 | # Provide the API key for the chosen provider:
  6 | # GROQ_API_KEY="your_groq_api_key"
  7 | DEEPSEEK_API_KEY="your_deepseek_api_key"
  8 | # OPENROUTER_API_KEY="your_openrouter_api_key"
  9 | # GITHUB_TOKEN="ghp_your_github_personal_access_token"
 10 | # Note: Ollama requires no API key but needs local installation
 11 | # Note: GitHub Models requires a GitHub Personal Access Token with appropriate scopes
 12 | 
 13 | # Optional: Base URL override (e.g., for custom endpoints)
 14 | LLM_BASE_URL="your_base_url_if_needed"
 15 | 
 16 | # Optional: Specify different models for Enhanced (Complex Synthesis) and Standard (Individual Processing)
 17 | # Defaults are set within the code based on the provider if these are not set.
 18 | # Example for Groq:
 19 | # GROQ_ENHANCED_MODEL_ID="openai/gpt-oss-120b"  # For complex synthesis
 20 | # GROQ_STANDARD_MODEL_ID="openai/gpt-oss-20b"  # For individual processing
 21 | # Example for DeepSeek:
 22 | # DEEPSEEK_ENHANCED_MODEL_ID="deepseek-chat"  # For complex synthesis
 23 | # DEEPSEEK_STANDARD_MODEL_ID="deepseek-chat"  # For individual processing
 24 | # Example for GitHub Models:
 25 | # GITHUB_ENHANCED_MODEL_ID="openai/gpt-5"  # For complex synthesis
 26 | # GITHUB_STANDARD_MODEL_ID="openai/gpt-5-min"  # For individual processing
 27 | # Example for OpenRouter:
 28 | # OPENROUTER_ENHANCED_MODEL_ID="deepseek/deepseek-chat-v3-0324"  # For synthesis
 29 | # OPENROUTER_STANDARD_MODEL_ID="deepseek/deepseek-r1"  # For processing
 30 | # Example for Ollama:
 31 | # OLLAMA_ENHANCED_MODEL_ID="devstral:24b"  # For complex synthesis
 32 | # OLLAMA_STANDARD_MODEL_ID="devstral:24b"  # For individual processing
 33 | 
 34 | # --- Enhanced Agno 1.8+ Features ---
 35 | # Enable enhanced agents with advanced reasoning, memory, and structured outputs
 36 | USE_ENHANCED_AGENTS="true"
 37 | 
 38 | # Team mode: "standard", "enhanced", "hybrid", or "multi_thinking"
 39 | # standard: Traditional agent setup (backward compatible)
 40 | # enhanced: Use new Agno 1.8+ features (memory, reasoning, structured outputs)
 41 | # hybrid: Mix of standard and enhanced agents for optimal performance
 42 | # multi_thinking: Use Multi-Thinking methodology for balanced thinking (recommended)
 43 | TEAM_MODE="standard"
 44 | 
 45 | # --- External Tools ---
 46 | # Required ONLY if web research capabilities are needed for thinking agents
 47 | EXA_API_KEY="your_exa_api_key"
 48 | 
 49 | # --- Adaptive Routing & Cost Optimization ---
 50 | # Enable adaptive routing (automatically selects single vs multi-agent based on complexity)
 51 | ENABLE_ADAPTIVE_ROUTING="true"
 52 | 
 53 | # Multi-Thinking intelligent routing
 54 | ENABLE_MULTI_THINKING="true"  # Enable Multi-Thinking methodology for enhanced thinking
 55 | MULTI_THINKING_COMPLEXITY_THRESHOLD="5.0"  # Minimum complexity to trigger Multi-Thinking processing
 56 | 
 57 | # Cost optimization settings
 58 | DAILY_BUDGET_LIMIT=""  # e.g., "5.0" for $5 daily limit
 59 | MONTHLY_BUDGET_LIMIT=""  # e.g., "50.0" for $50 monthly limit
 60 | PER_THOUGHT_BUDGET_LIMIT=""  # e.g., "0.10" for $0.10 per thought limit
 61 | QUALITY_THRESHOLD="0.7"  # Minimum quality score (0.0-1.0)
 62 | 
 63 | # Response optimization settings
 64 | RESPONSE_STYLE="practical"  # "practical", "academic", or "balanced"
 65 | MAX_RESPONSE_LENGTH="800"  # Maximum response length in words
 66 | ENFORCE_SIMPLICITY="true"  # Remove excessive academic complexity
 67 | 
 68 | # Provider cost overrides (cost per 1K tokens)
 69 | # DEEPSEEK_COST_PER_1K_TOKENS="0.0002"
 70 | # GROQ_COST_PER_1K_TOKENS="0.0"
 71 | # GITHUB_COST_PER_1K_TOKENS="0.0005"
 72 | # OPENROUTER_COST_PER_1K_TOKENS="0.001"
 73 | # OLLAMA_COST_PER_1K_TOKENS="0.0"
 74 | 
 75 | # --- Persistent Memory ---
 76 | # Database URL for persistent storage (defaults to local SQLite)
 77 | DATABASE_URL=""  # e.g., "postgresql://user:pass@localhost/dbname" or leave empty for SQLite
 78 | 
 79 | # Memory management
 80 | MEMORY_PRUNING_DAYS="30"  # Prune sessions older than X days
 81 | MEMORY_KEEP_RECENT="100"  # Always keep X most recent sessions
 82 | 
 83 | # Note: Logs are stored in ~/.mas_sequential_thinking/logs/ directory
 84 | # The log file is named mas_sequential_thinking.log with rotation
 85 | 
 86 | # --- Logging and Performance ---
 87 | # Core logging configuration
 88 | LOG_LEVEL="INFO"  # "DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL"
 89 | LOG_FORMAT="text"  # "text" (readable) or "json" (structured)
 90 | LOG_TARGETS="file,console"  # Comma-separated: "file", "console"
 91 | LOG_FILE_MAX_SIZE="10MB"  # Maximum log file size (supports KB, MB, GB)
 92 | LOG_FILE_BACKUP_COUNT="5"  # Number of backup log files to keep
 93 | LOG_SAMPLING_RATE="1.0"  # Log sampling rate (0.0-1.0, 1.0 = all logs)
 94 | 
 95 | # Smart logging configuration (reduces log verbosity while maintaining insights)
 96 | SMART_LOGGING="true"  # Enable intelligent logging with adaptive verbosity
 97 | SMART_LOG_LEVEL="performance"  # "critical", "performance", "routing", "debug"
 98 | LOG_PERFORMANCE_ISSUES="true"  # Always log slow/expensive processing
 99 | LOG_RESPONSE_QUALITY="true"  # Log overly complex or academic responses
100 | 
101 | # Performance and error handling
102 | MAX_RETRIES="3"
103 | TIMEOUT="30.0"
104 | PERFORMANCE_MONITORING="true"  # Enable real-time performance monitoring
105 | PERFORMANCE_BASELINE_TIME="30.0"  # Baseline time per thought in seconds
106 | PERFORMANCE_BASELINE_EFFICIENCY="0.8"  # Target efficiency score
```

--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------

```markdown
  1 | # Sequential Thinking Multi-Agent System (MAS) ![](https://img.shields.io/badge/A%20FRAD%20PRODUCT-WIP-yellow)
  2 | 
  3 | [![smithery badge](https://smithery.ai/badge/@FradSer/mcp-server-mas-sequential-thinking)](https://smithery.ai/server/@FradSer/mcp-server-mas-sequential-thinking) [![Twitter Follow](https://img.shields.io/twitter/follow/FradSer?style=social)](https://twitter.com/FradSer) [![Python Version](https://img.shields.io/badge/python-3.10+-blue.svg)](https://www.python.org/downloads/) [![Framework](https://img.shields.io/badge/Framework-Agno-orange.svg)](https://github.com/cognitivecomputations/agno)
  4 | 
  5 | English | [简体中文](README.zh-CN.md)
  6 | 
  7 | This project implements an advanced sequential thinking process using a **Multi-Agent System (MAS)** built with the **Agno** framework and served via **MCP**. It represents a significant evolution from simpler state-tracking approaches by leveraging coordinated, specialized agents for deeper analysis and problem decomposition.
  8 | 
  9 | [![MseeP.ai Security Assessment Badge](https://mseep.net/pr/fradser-mcp-server-mas-sequential-thinking-badge.png)](https://mseep.ai/app/fradser-mcp-server-mas-sequential-thinking)
 10 | 
 11 | ## What is This?
 12 | 
 13 | This is an **MCP server** - not a standalone application. It runs as a background service that extends your LLM client (like Claude Desktop) with sophisticated sequential thinking capabilities. The server provides a `sequentialthinking` tool that processes thoughts through multiple specialized AI agents, each examining the problem from a different cognitive angle.
 14 | 
 15 | ## Core Architecture: Multi-Dimensional Thinking Agents
 16 | 
 17 | The system employs **6 specialized thinking agents**, each focused on a distinct cognitive perspective:
 18 | 
 19 | ### 1. **Factual Agent**
 20 | - **Focus**: Objective facts and verified data
 21 | - **Approach**: Analytical, evidence-based reasoning
 22 | - **Capabilities**:
 23 |   - Web research for current facts (via ExaTools)
 24 |   - Data verification and source citation
 25 |   - Information gap identification
 26 | - **Time allocation**: 120 seconds for thorough analysis
 27 | 
 28 | ### 2. **Emotional Agent**
 29 | - **Focus**: Intuition and emotional intelligence
 30 | - **Approach**: Gut reactions and feelings
 31 | - **Capabilities**:
 32 |   - Quick intuitive responses (30-second snapshots)
 33 |   - Visceral reactions without justification
 34 |   - Emotional pattern recognition
 35 | - **Time allocation**: 30 seconds (quick reaction mode)
 36 | 
 37 | ### 3. **Critical Agent**
 38 | - **Focus**: Risk assessment and problem identification
 39 | - **Approach**: Logical scrutiny and devil's advocate
 40 | - **Capabilities**:
 41 |   - Research counterexamples and failures (via ExaTools)
 42 |   - Identify logical flaws and risks
 43 |   - Challenge assumptions constructively
 44 | - **Time allocation**: 120 seconds for deep analysis
 45 | 
 46 | ### 4. **Optimistic Agent**
 47 | - **Focus**: Benefits, opportunities, and value
 48 | - **Approach**: Positive exploration with realistic grounding
 49 | - **Capabilities**:
 50 |   - Research success stories (via ExaTools)
 51 |   - Identify feasible opportunities
 52 |   - Explore best-case scenarios logically
 53 | - **Time allocation**: 120 seconds for balanced optimism
 54 | 
 55 | ### 5. **Creative Agent**
 56 | - **Focus**: Innovation and alternative solutions
 57 | - **Approach**: Lateral thinking and idea generation
 58 | - **Capabilities**:
 59 |   - Cross-industry innovation research (via ExaTools)
 60 |   - Divergent thinking techniques
 61 |   - Multiple solution generation
 62 | - **Time allocation**: 240 seconds (creativity needs time)
 63 | 
 64 | ### 6. **Synthesis Agent**
 65 | - **Focus**: Integration and metacognitive orchestration
 66 | - **Approach**: Holistic synthesis and final answer generation
 67 | - **Capabilities**:
 68 |   - Integrate all perspectives into coherent response
 69 |   - Answer the original question directly
 70 |   - Provide actionable, user-friendly insights
 71 | - **Time allocation**: 60 seconds for synthesis
 72 | - **Note**: Uses enhanced model, does NOT include ExaTools (focuses on integration)
 73 | 
 74 | ## AI-Powered Intelligent Routing
 75 | 
 76 | The system uses **AI-driven complexity analysis** to determine the optimal thinking sequence:
 77 | 
 78 | ### Processing Strategies:
 79 | 1. **Single Agent** (Simple questions)
 80 |    - Direct factual or emotional response
 81 |    - Fastest processing for straightforward queries
 82 | 
 83 | 2. **Double Agent** (Moderate complexity)
 84 |    - Two-step sequences (e.g., Optimistic → Critical)
 85 |    - Balanced perspectives for evaluation tasks
 86 | 
 87 | 3. **Triple Agent** (Core thinking)
 88 |    - Factual → Creative → Synthesis
 89 |    - Philosophical and analytical problems
 90 | 
 91 | 4. **Full Sequence** (Complex problems)
 92 |    - All 6 agents orchestrated together
 93 |    - Comprehensive multi-perspective analysis
 94 | 
 95 | The AI analyzer evaluates:
 96 | - Problem complexity and semantic depth
 97 | - Primary problem type (factual, emotional, creative, philosophical, etc.)
 98 | - Required thinking modes for optimal solution
 99 | - Appropriate model selection (Enhanced vs Standard)
100 | 
101 | ### AI Routing Flow Diagram
102 | 
103 | ```mermaid
104 | flowchart TD
105 |     A[Input Thought] --> B[AI Complexity Analyzer]
106 | 
107 |     B --> C{Problem Analysis}
108 |     C --> C1[Complexity Score<br/>0-100]
109 |     C --> C2[Problem Type<br/>FACTUAL/EMOTIONAL/<br/>CREATIVE/PHILOSOPHICAL]
110 |     C --> C3[Required Thinking Modes]
111 | 
112 |     C1 --> D{Routing Decision}
113 |     C2 --> D
114 |     C3 --> D
115 | 
116 |     D -->|Score: 0-25<br/>Simple| E1[Single Agent Strategy]
117 |     D -->|Score: 26-50<br/>Moderate| E2[Double Agent Strategy]
118 |     D -->|Score: 51-75<br/>Complex| E3[Triple Agent Strategy]
119 |     D -->|Score: 76-100<br/>Highly Complex| E4[Full Sequence Strategy]
120 | 
121 |     %% Single Agent Flow
122 |     E1 --> F1[Factual Agent<br/>120s + ExaTools]
123 |     F1 --> G1[Direct Response]
124 | 
125 |     %% Double Agent Flow (Full Parallel)
126 |     E2 --> DA1[Both Agents Run in Parallel]
127 |     DA1 --> DA2["Agent 1 e.g. Optimistic<br/>120s + ExaTools"]
128 |     DA1 --> DA3["Agent 2 e.g. Critical<br/>120s + ExaTools"]
129 |     DA2 --> G2[Programmatic Synthesis<br/>Combines both parallel results]
130 |     DA3 --> G2
131 | 
132 |     %% Triple Agent Flow (Full Parallel)
133 |     E3 --> TA1[All 3 Agents Run in Parallel]
134 |     TA1 --> TA2[Factual Agent<br/>120s + ExaTools]
135 |     TA1 --> TA3[Creative Agent<br/>240s + ExaTools]
136 |     TA1 --> TA4[Critical Agent<br/>120s + ExaTools]
137 |     TA2 --> G3[Programmatic Synthesis<br/>Integrates all 3 results]
138 |     TA3 --> G3
139 |     TA4 --> G3
140 | 
141 |     %% Full Sequence Flow (3-Step Process)
142 |     E4 --> FS1[Step 1: Initial Synthesis<br/>60s Enhanced Model<br/>Initial orchestration]
143 |     FS1 --> FS2[Step 2: Parallel Execution<br/>5 Agents Run Simultaneously]
144 | 
145 |     FS2 --> FS2A[Factual Agent<br/>120s + ExaTools]
146 |     FS2 --> FS2B[Emotional Agent<br/>30s Quick Response]
147 |     FS2 --> FS2C[Optimistic Agent<br/>120s + ExaTools]
148 |     FS2 --> FS2D[Critical Agent<br/>120s + ExaTools]
149 |     FS2 --> FS2E[Creative Agent<br/>240s + ExaTools]
150 | 
151 |     FS2A --> FS3[Step 3: Final Synthesis<br/>60s Enhanced Model<br/>Integrates all parallel results]
152 |     FS2B --> FS3
153 |     FS2C --> FS3
154 |     FS2D --> FS3
155 |     FS2E --> FS3
156 | 
157 |     FS3 --> G4[Final Synthesis Output<br/>Comprehensive integrated result]
158 | 
159 |     G1 --> H[Next Iteration or<br/>Final Answer]
160 |     G2 --> H
161 |     G3 --> H
162 |     G4 --> H
163 | 
164 |     style A fill:#e1f5fe
165 |     style B fill:#f3e5f5
166 |     style C fill:#fff3e0
167 |     style D fill:#e8f5e8
168 |     style TA1 fill:#ffecb3
169 |     style FS2 fill:#ffecb3
170 |     style G1 fill:#fce4ec
171 |     style G2 fill:#fce4ec
172 |     style G3 fill:#fce4ec
173 |     style G4 fill:#fce4ec
174 |     style H fill:#f1f8e9
175 | ```
176 | 
177 | **Key Insights:**
178 | - **Parallel Execution**: Non-synthesis agents run simultaneously for maximum efficiency
179 | - **Synthesis Integration**: Synthesis agents process parallel results sequentially
180 | - **Two Processing Types**:
181 |   - **Synthesis Agent**: Real AI agent using Enhanced Model for integration
182 |   - **Programmatic Synthesis**: Code-based combination when no Synthesis Agent
183 | - **Performance**: Parallel processing optimizes both speed and quality
184 | 
185 | ## Research Capabilities (ExaTools Integration)
186 | 
187 | **4 out of 6 agents** are equipped with web research capabilities via ExaTools:
188 | 
189 | - **Factual Agent**: Search for current facts, statistics, verified data
190 | - **Critical Agent**: Find counterexamples, failed cases, regulatory issues
191 | - **Optimistic Agent**: Research success stories, positive case studies
192 | - **Creative Agent**: Discover innovations across different industries
193 | - **Emotional & Synthesis Agents**: No ExaTools (focused on internal processing)
194 | 
195 | Research is **optional** - requires `EXA_API_KEY` environment variable. The system works perfectly without it, using pure reasoning capabilities.
196 | 
197 | ## Model Intelligence
198 | 
199 | ### Dual Model Strategy:
200 | - **Enhanced Model**: Used for Synthesis agent (complex integration tasks)
201 | - **Standard Model**: Used for individual thinking agents
202 | - **AI Selection**: System automatically chooses the right model based on task complexity
203 | 
204 | ### Supported Providers:
205 | - **DeepSeek** (default) - High performance, cost-effective
206 | - **Groq** - Ultra-fast inference
207 | - **OpenRouter** - Access to multiple models
208 | - **GitHub Models** - OpenAI models via GitHub API
209 | - **Anthropic** - Claude models with prompt caching
210 | - **Ollama** - Local model execution
211 | 
212 | ## Key Differences from Original Version (TypeScript)
213 | 
214 | This Python/Agno implementation marks a fundamental shift from the original TypeScript version:
215 | 
216 | | Feature/Aspect      | Python/Agno Version (Current)                                        | TypeScript Version (Original)                        |
217 | | :------------------ | :------------------------------------------------------------------- | :--------------------------------------------------- |
218 | | **Architecture**    | **Multi-Agent System (MAS)**; Active processing by a team of agents. | **Single Class State Tracker**; Simple logging/storing. |
219 | | **Intelligence**    | **Distributed Agent Logic**; Embedded in specialized agents & Coordinator. | **External LLM Only**; No internal intelligence.     |
220 | | **Processing**      | **Active Analysis & Synthesis**; Agents *act* on the thought.      | **Passive Logging**; Merely recorded the thought.    |
221 | | **Frameworks**      | **Agno (MAS) + FastMCP (Server)**; Uses dedicated MAS library.     | **MCP SDK only**.                                    |
222 | | **Coordination**    | **Explicit Team Coordination Logic** (`Team` in `coordinate` mode).  | **None**; No coordination concept.                   |
223 | | **Validation**      | **Pydantic Schema Validation**; Robust data validation.            | **Basic Type Checks**; Less reliable.              |
224 | | **External Tools**  | **Integrated (Exa via Researcher)**; Can perform research tasks.   | **None**.                                            |
225 | | **Logging**         | **Structured Python Logging (File + Console)**; Configurable.      | **Console Logging with Chalk**; Basic.             |
226 | | **Language & Ecosystem** | **Python**; Leverages Python AI/ML ecosystem.                    | **TypeScript/Node.js**.                              |
227 | 
228 | In essence, the system evolved from a passive thought *recorder* to an active thought *processor* powered by a collaborative team of AI agents.
229 | 
230 | ## How it Works (Multi-Dimensional Processing)
231 | 
232 | 1.  **Initiation:** An external LLM uses the `sequentialthinking` tool to define the problem and initiate the process.
233 | 2.  **Tool Call:** The LLM calls the `sequentialthinking` tool with the current thought, structured according to the `ThoughtData` model.
234 | 3.  **AI Complexity Analysis:** The system uses AI-powered analysis to determine the optimal thinking sequence based on problem complexity and type.
235 | 4.  **Agent Routing:** Based on the analysis, the system routes the thought to the appropriate thinking agents (single, double, triple, or full sequence).
236 | 5.  **Parallel Processing:** Multiple thinking agents process the thought simultaneously from their specialized perspectives:
237 |    - Factual agents gather objective data (with optional web research)
238 |    - Critical agents identify risks and problems
239 |    - Optimistic agents explore opportunities and benefits
240 |    - Creative agents generate innovative solutions
241 |    - Emotional agents provide intuitive insights
242 | 6.  **Research Integration:** Agents equipped with ExaTools conduct targeted web research to enhance their analysis.
243 | 7.  **Synthesis & Integration:** The Synthesis agent integrates all perspectives into a coherent, actionable response using enhanced models.
244 | 8.  **Response Generation:** The system returns a comprehensive analysis with guidance for next steps.
245 | 9.  **Iteration:** The calling LLM uses the synthesized response to formulate the next thinking step or conclude the process.
246 | 
247 | ## Token Consumption Warning
248 | 
249 | **High Token Usage:** Due to the Multi-Agent System architecture, this tool consumes significantly **more tokens** than single-agent alternatives or the previous TypeScript version. Each `sequentialthinking` call invokes multiple specialized agents simultaneously, leading to substantially higher token usage (potentially 5-10x more than simple approaches).
250 | 
251 | This parallel processing leads to substantially higher token usage (potentially 5-10x more) compared to simpler sequential approaches, but provides correspondingly deeper and more comprehensive analysis.
252 | 
253 | ## MCP Tool: `sequentialthinking`
254 | 
255 | The server exposes a single MCP tool that processes sequential thoughts:
256 | 
257 | ### Parameters:
258 | ```typescript
259 | {
260 |   thought: string,              // Current thinking step content
261 |   thoughtNumber: number,         // Sequence number (≥1)
262 |   totalThoughts: number,         // Estimated total steps
263 |   nextThoughtNeeded: boolean,    // Is another step required?
264 |   isRevision: boolean,           // Revising previous thought?
265 |   branchFromThought?: number,    // Branch point (for exploration)
266 |   branchId?: string,             // Branch identifier
267 |   needsMoreThoughts: boolean     // Need to extend sequence?
268 | }
269 | ```
270 | 
271 | ### Response:
272 | Returns synthesized analysis from the multi-agent system with:
273 | - Processed thought analysis
274 | - Guidance for next steps
275 | - Branch and revision tracking
276 | - Status and metadata
277 | 
278 | ## Installation
279 | 
280 | ### Prerequisites
281 | 
282 | - Python 3.10+
283 | - LLM API access (choose one):
284 |     - **DeepSeek**: `DEEPSEEK_API_KEY` (default, recommended)
285 |     - **Groq**: `GROQ_API_KEY`
286 |     - **OpenRouter**: `OPENROUTER_API_KEY`
287 |     - **GitHub Models**: `GITHUB_TOKEN`
288 |     - **Anthropic**: `ANTHROPIC_API_KEY`
289 |     - **Ollama**: Local installation (no API key)
290 | - **Optional**: `EXA_API_KEY` for web research capabilities
291 | - `uv` package manager (recommended) or `pip`
292 | 
293 | ### Quick Start
294 | 
295 | #### 1. Install via Smithery (Recommended)
296 | 
297 | ```bash
298 | npx -y @smithery/cli install @FradSer/mcp-server-mas-sequential-thinking --client claude
299 | ```
300 | 
301 | #### 2. Manual Installation
302 | 
303 | ```bash
304 | # Clone the repository
305 | git clone https://github.com/FradSer/mcp-server-mas-sequential-thinking.git
306 | cd mcp-server-mas-sequential-thinking
307 | 
308 | # Install with uv (recommended)
309 | uv pip install .
310 | 
311 | # Or with pip
312 | pip install .
313 | ```
314 | 
315 | ### Configuration
316 | 
317 | #### For MCP Clients (Claude Desktop, etc.)
318 | 
319 | Add to your MCP client configuration:
320 | 
321 | ```json
322 | {
323 |   "mcpServers": {
324 |     "sequential-thinking": {
325 |       "command": "mcp-server-mas-sequential-thinking",
326 |       "env": {
327 |         "LLM_PROVIDER": "deepseek",
328 |         "DEEPSEEK_API_KEY": "your_api_key",
329 |         "EXA_API_KEY": "your_exa_key_optional"
330 |       }
331 |     }
332 |   }
333 | }
334 | ```
335 | 
336 | #### Environment Variables
337 | 
338 | Create a `.env` file or set these variables:
339 | 
340 | ```bash
341 | # LLM Provider (required)
342 | LLM_PROVIDER="deepseek"  # deepseek, groq, openrouter, github, anthropic, ollama
343 | DEEPSEEK_API_KEY="sk-..."
344 | 
345 | # Optional: Enhanced/Standard Model Selection
346 | # DEEPSEEK_ENHANCED_MODEL_ID="deepseek-chat"  # For synthesis
347 | # DEEPSEEK_STANDARD_MODEL_ID="deepseek-chat"  # For other agents
348 | 
349 | # Optional: Web Research (enables ExaTools)
350 | # EXA_API_KEY="your_exa_api_key"
351 | 
352 | # Optional: Custom endpoint
353 | # LLM_BASE_URL="https://custom-endpoint.com"
354 | ```
355 | 
356 | ### Model Configuration Examples
357 | 
358 | ```bash
359 | # Groq with different models
360 | GROQ_ENHANCED_MODEL_ID="openai/gpt-oss-120b"
361 | GROQ_STANDARD_MODEL_ID="openai/gpt-oss-20b"
362 | 
363 | # Anthropic with Claude models
364 | ANTHROPIC_ENHANCED_MODEL_ID="claude-3-5-sonnet-20241022"
365 | ANTHROPIC_STANDARD_MODEL_ID="claude-3-5-haiku-20241022"
366 | 
367 | # GitHub Models
368 | GITHUB_ENHANCED_MODEL_ID="gpt-4o"
369 | GITHUB_STANDARD_MODEL_ID="gpt-4o-mini"
370 | ```
371 | 
372 | ## Usage
373 | 
374 | ### As MCP Server
375 | 
376 | Once installed and configured in your MCP client:
377 | 
378 | 1. The `sequentialthinking` tool becomes available
379 | 2. Your LLM can use it to process complex thoughts
380 | 3. The system automatically routes to appropriate thinking agents
381 | 4. Results are synthesized and returned to your LLM
382 | 
383 | ### Direct Execution
384 | 
385 | Run the server manually for testing:
386 | 
387 | ```bash
388 | # Using installed script
389 | mcp-server-mas-sequential-thinking
390 | 
391 | # Using uv
392 | uv run mcp-server-mas-sequential-thinking
393 | 
394 | # Using Python
395 | python src/mcp_server_mas_sequential_thinking/main.py
396 | ```
397 | 
398 | ## Development
399 | 
400 | ### Setup
401 | 
402 | ```bash
403 | # Clone repository
404 | git clone https://github.com/FradSer/mcp-server-mas-sequential-thinking.git
405 | cd mcp-server-mas-sequential-thinking
406 | 
407 | # Create virtual environment
408 | python -m venv .venv
409 | source .venv/bin/activate  # On Windows: .venv\Scripts\activate
410 | 
411 | # Install with dev dependencies
412 | uv pip install -e ".[dev]"
413 | ```
414 | 
415 | ### Code Quality
416 | 
417 | ```bash
418 | # Format and lint
419 | uv run ruff check . --fix
420 | uv run ruff format .
421 | uv run mypy .
422 | 
423 | # Run tests (when available)
424 | uv run pytest
425 | ```
426 | 
427 | ### Testing with MCP Inspector
428 | 
429 | ```bash
430 | npx @modelcontextprotocol/inspector uv run mcp-server-mas-sequential-thinking
431 | ```
432 | 
433 | Open http://127.0.0.1:6274/ and test the `sequentialthinking` tool.
434 | 
435 | ## System Characteristics
436 | 
437 | ### Strengths:
438 | - **Multi-perspective analysis**: 6 different cognitive approaches
439 | - **AI-powered routing**: Intelligent complexity analysis
440 | - **Research capabilities**: 4 agents with web search (optional)
441 | - **Flexible processing**: Single to full sequence strategies
442 | - **Model optimization**: Enhanced/Standard model selection
443 | - **Provider agnostic**: Works with multiple LLM providers
444 | 
445 | ### Considerations:
446 | - **Token usage**: Multi-agent processing uses more tokens than single-agent
447 | - **Processing time**: Complex sequences take longer but provide deeper insights
448 | - **API costs**: Research capabilities require separate Exa API subscription
449 | - **Model selection**: Enhanced models cost more but provide better synthesis
450 | 
451 | ## Project Structure
452 | 
453 | ```
454 | mcp-server-mas-sequential-thinking/
455 | ├── src/mcp_server_mas_sequential_thinking/
456 | │   ├── main.py                          # MCP server entry point
457 | │   ├── processors/
458 | │   │   ├── multi_thinking_core.py       # 6 thinking agents definition
459 | │   │   └── multi_thinking_processor.py  # Sequential processing logic
460 | │   ├── routing/
461 | │   │   ├── ai_complexity_analyzer.py    # AI-powered analysis
462 | │   │   └── multi_thinking_router.py     # Intelligent routing
463 | │   ├── services/
464 | │   │   ├── thought_processor_refactored.py
465 | │   │   ├── workflow_executor.py
466 | │   │   └── context_builder.py
467 | │   └── config/
468 | │       ├── modernized_config.py         # Provider strategies
469 | │       └── constants.py                 # System constants
470 | ├── pyproject.toml                       # Project configuration
471 | └── README.md                            # This file
472 | ```
473 | 
474 | ## Changelog
475 | 
476 | See [CHANGELOG.md](CHANGELOG.md) for version history.
477 | 
478 | ## Contributing
479 | 
480 | Contributions are welcome! Please ensure:
481 | 
482 | 1. Code follows project style (ruff, mypy)
483 | 2. Commit messages use conventional commits format
484 | 3. All tests pass before submitting PR
485 | 4. Documentation is updated as needed
486 | 
487 | ## License
488 | 
489 | This project is licensed under the MIT License - see the LICENSE file for details.
490 | 
491 | ## Acknowledgments
492 | 
493 | - Built with [Agno](https://github.com/agno-agi/agno) v2.0+ framework
494 | - Model Context Protocol by [Anthropic](https://www.anthropic.com/)
495 | - Research capabilities powered by [Exa](https://exa.ai/) (optional)
496 | - Multi-dimensional thinking inspired by Edward de Bono's work
497 | 
498 | ## Support
499 | 
500 | - GitHub Issues: [Report bugs or request features](https://github.com/FradSer/mcp-server-mas-sequential-thinking/issues)
501 | - Documentation: Check CLAUDE.md for detailed implementation notes
502 | - MCP Protocol: [Official MCP Documentation](https://modelcontextprotocol.io/)
503 | 
504 | ---
505 | 
506 | **Note**: This is an MCP server, designed to work with MCP-compatible clients like Claude Desktop. It is not a standalone chat application.
```

--------------------------------------------------------------------------------
/CLAUDE.md:
--------------------------------------------------------------------------------

```markdown
  1 | # CLAUDE.md
  2 | 
  3 | This file provides guidance to Claude Code (claude.ai/code) when working with code in this repository.
  4 | 
  5 | ## Essential Commands
  6 | 
  7 | ```bash
  8 | # Setup & Installation
  9 | uv pip install -e ".[dev]"                              # Install all dependencies
 10 | uv run python -c "import agno; print('Agno imported successfully')"  # Verify setup
 11 | 
 12 | # Development Workflow
 13 | uv run mcp-server-mas-sequential-thinking               # Run server
 14 | uv run ruff check . --fix && uv run ruff format . && uv run mypy .  # Code quality
 15 | uv run pytest --cov=. --cov-report=html                # Test with coverage (no tests currently)
 16 | 
 17 | # Monitoring & Debugging
 18 | tail -f ~/.sequential_thinking/logs/sequential_thinking.log  # Live logs
 19 | grep "ERROR\|WARNING" ~/.sequential_thinking/logs/sequential_thinking.log  # Error search
 20 | ```
 21 | 
 22 | ### Additional Commands
 23 | - **Upgrade agno**: `uv pip install --upgrade agno`
 24 | - **Alternative server runs**: `uvx mcp-server-mas-sequential-thinking` or `uv run python src/mcp_server_mas_sequential_thinking/main.py`
 25 | - **MCP Inspector**: `npx @modelcontextprotocol/inspector uv run python src/mcp_server_mas_sequential_thinking/main.py`
 26 | 
 27 | ## Project Overview
 28 | 
 29 | **Pure Multi-Thinking Implementation** built with **Agno v2.0** framework and served via MCP. Features **AI-powered intelligent routing** with streamlined architecture (src layout, Python 3.10+). The system processes thoughts through multi-directional thinking methodology with AI-driven complexity analysis and optimized model selection.
 30 | 
 31 | ### Core Architecture
 32 | 
 33 | **Entry Point:** `src/mcp_server_mas_sequential_thinking/main.py`
 34 | - FastMCP application with `sequentialthinking` tool
 35 | - Uses refactored service-based architecture with dependency injection
 36 | - Global state management via `ServerState` and `ThoughtProcessor`
 37 | 
 38 | **Multi-Thinking Processing Flow:**
 39 | ```
 40 | External LLM → sequentialthinking tool → ThoughtProcessor → WorkflowExecutor → MultiThinkingWorkflowRouter → MultiThinkingSequentialProcessor → Individual Thinking Agents → Synthesis
 41 | ```
 42 | 
 43 | **Core Services (Dependency Injection):**
 44 | - **ThoughtProcessor**: Main orchestrator using specialized services
 45 | - **WorkflowExecutor**: Manages Multi-Thinking workflow execution
 46 | - **ContextBuilder**: Builds context-aware prompts
 47 | - **ResponseFormatter**: Formats final responses
 48 | - **SessionMemory**: Tracks thought history and branching
 49 | 
 50 | **AI-Powered Routing System:**
 51 | - **MultiThinkingIntelligentRouter**: AI-driven complexity analysis determines thinking sequence
 52 | - **AIComplexityAnalyzer**: Uses LLM to assess thought complexity, problem type, and required thinking modes
 53 | - **MultiThinkingSequentialProcessor**: Executes chosen sequence with model optimization
 54 | - **Thinking Complexity levels**: SINGLE, DOUBLE, TRIPLE, FULL sequences
 55 | - **Model Intelligence**: Enhanced model for Blue Hat synthesis, Standard model for individual hats
 56 | 
 57 | ### Configuration & Data Flow
 58 | 
 59 | **Environment Variables:**
 60 | - `LLM_PROVIDER`: Provider selection (deepseek, groq, openrouter, ollama, github, anthropic)
 61 | - `{PROVIDER}_API_KEY`: API keys (e.g., `DEEPSEEK_API_KEY`, `GITHUB_TOKEN`)
 62 | - `{PROVIDER}_ENHANCED_MODEL_ID`: Enhanced model for complex synthesis (Blue Hat)
 63 | - `{PROVIDER}_STANDARD_MODEL_ID`: Standard model for individual hat processing
 64 | - `EXA_API_KEY`: Research capabilities (if using research agents)
 65 | 
 66 | **AI-Driven Model Strategy:**
 67 | - **Enhanced Models**: Used for Blue Hat (metacognitive) thinking - complex synthesis, integration
 68 | - **Standard Models**: Used for individual hat processing (White, Red, Black, Yellow, Green)
 69 | - **Intelligent Selection**: System automatically chooses appropriate model based on hat type and AI-assessed complexity
 70 | - **AI Analysis**: Replaces rule-based pattern matching with semantic understanding
 71 | 
 72 | **Processing Strategies (AI-Determined):**
 73 | 1. **Single Hat**: Simple focused thinking (White Hat facts, Red Hat emotions, etc.)
 74 | 2. **Double Hat**: Two-step sequences (e.g., Optimistic→Critical for idea evaluation)
 75 | 3. **Triple Hat**: Core philosophical thinking (Factual→Creative→Synthesis)
 76 | 4. **Full Sequence**: Complete Multi-Thinking methodology with Blue Hat orchestration
 77 | 
 78 | ### Streamlined Module Architecture
 79 | 
 80 | **Core Framework:**
 81 | - `core/session.py`: SessionMemory for thought history (simplified, no Team dependency)
 82 | - `core/models.py`: ThoughtData validation and core data structures
 83 | - `core/types.py`: Type definitions and protocols
 84 | - `config/modernized_config.py`: Provider strategies with Enhanced/Standard model configuration
 85 | - `config/constants.py`: All system constants and configuration values
 86 | 
 87 | **Multi-Thinking Implementation:**
 88 | - `processors/multi_thinking_processor.py`: Main Multi-Thinking sequential processor
 89 | - `processors/multi_thinking_core.py`: Hat definitions, agent factory, core logic
 90 | - `routing/multi_thinking_router.py`: AI-powered intelligent routing based on thought complexity
 91 | - `routing/ai_complexity_analyzer.py`: AI-driven complexity and problem type analysis
 92 | - `routing/agno_workflow_router.py`: Agno Workflow integration layer
 93 | - `routing/complexity_types.py`: Core complexity analysis types and enums
 94 | 
 95 | **Service Layer:**
 96 | - `services/thought_processor_refactored.py`: Main thought processor with dependency injection
 97 | - `services/workflow_executor.py`: Multi-Thinking workflow execution
 98 | - `services/context_builder.py`: Context-aware prompt building
 99 | - `services/response_formatter.py`: Response formatting and extraction
100 | - `services/server_core.py`: Server lifecycle and state management
101 | 
102 | **Infrastructure:**
103 | - `infrastructure/logging_config.py`: Structured logging with rotation
104 | - `infrastructure/persistent_memory.py`: Memory persistence capabilities
105 | - `utils/utils.py`: Logging utilities and helper functions
106 | 
107 | ### Architecture Characteristics
108 | 
109 | - **Clean Architecture**: Dependency injection, separation of concerns, service-based design
110 | - **AI-Driven Intelligence**: Pure AI-based complexity analysis replacing rule-based systems
111 | - **Multi-Thinking Focus**: Streamlined implementation without legacy multi-agent complexity
112 | - **Model Optimization**: Smart model selection (Enhanced for synthesis, Standard for processing)
113 | - **Modern Python**: Dataclasses, type hints, async/await, pattern matching
114 | - **Environment-based config**: No config files, all via environment variables
115 | - **Structured logging**: Rotation to `~/.sequential_thinking/logs/`
116 | 
117 | ## Enhanced/Standard Model Configuration
118 | 
119 | **Naming Convention:**
120 | - `{PROVIDER}_ENHANCED_MODEL_ID`: For complex synthesis tasks (Blue Hat thinking)
121 | - `{PROVIDER}_STANDARD_MODEL_ID`: For individual hat processing
122 | 
123 | **Examples:**
124 | ```bash
125 | # GitHub Models
126 | GITHUB_ENHANCED_MODEL_ID="openai/gpt-5"      # Blue Hat synthesis
127 | GITHUB_STANDARD_MODEL_ID="openai/gpt-5-min"  # Individual hats
128 | 
129 | # DeepSeek
130 | DEEPSEEK_ENHANCED_MODEL_ID="deepseek-chat"   # Both synthesis and processing
131 | DEEPSEEK_STANDARD_MODEL_ID="deepseek-chat"
132 | 
133 | # Anthropic
134 | ANTHROPIC_ENHANCED_MODEL_ID="claude-3-5-sonnet-20241022"  # Synthesis
135 | ANTHROPIC_STANDARD_MODEL_ID="claude-3-5-haiku-20241022"   # Processing
136 | ```
137 | 
138 | **Usage Strategy:**
139 | - **Enhanced Model**: Blue Hat (metacognitive orchestrator) uses enhanced model for final synthesis
140 | - **Standard Model**: Individual hats (White, Red, Black, Yellow, Green) use standard model
141 | - **AI-Driven Selection**: System intelligently chooses model based on hat type and AI-assessed complexity
142 | 
143 | ## Agno v2.0 Integration
144 | 
145 | **Framework Features:**
146 | - **Workflow Integration**: Uses Agno Workflow system for Multi-Thinking processing
147 | - **Agent Factory**: Creates specialized hat agents with ReasoningTools
148 | - **Performance**: ~10,000x faster agent creation, ~50x less memory vs LangGraph
149 | - **Version**: Requires `agno>=2.0.5`
150 | 
151 | **Key Integration Points:**
152 | - `MultiThinkingWorkflowRouter`: Bridges MCP and Agno Workflow systems
153 | - `MultiThinkingAgentFactory`: Creates individual hat agents using Agno v2.0
154 | - **StepOutput**: Workflow results converted to Agno StepOutput format
155 | 
156 | **For Agno Documentation**: Use deepwiki MCP reference with repoName: `agno-agi/agno`
157 | 
158 | ## AI-Powered Complexity Analysis
159 | 
160 | **Key Innovation**: The system uses AI instead of rule-based pattern matching for complexity analysis:
161 | 
162 | - **AIComplexityAnalyzer**: Uses LLM to assess thought complexity, semantic depth, and problem characteristics
163 | - **Problem Type Detection**: AI identifies primary problem type (FACTUAL, EMOTIONAL, CREATIVE, PHILOSOPHICAL, etc.)
164 | - **Thinking Modes Recommendation**: AI suggests required thinking modes for optimal processing
165 | - **Semantic Understanding**: Replaces keyword matching with contextual analysis across languages
166 | 
167 | **Benefits over Rule-Based Systems:**
168 | - Better handling of nuanced, philosophical, or cross-cultural content
169 | - Adaptive to new problem types without code changes
170 | - Semantic understanding vs simple pattern matching
171 | - Reduced maintenance overhead (no keyword lists to maintain)
172 | 
173 | ## Development Notes
174 | 
175 | **No Test Suite**: The project currently has no test files - all tests were removed during recent cleanup.
176 | 
177 | **Recent Architecture Changes**:
178 | - Removed legacy multi-agent systems (agents/, optimization/, analysis/ modules)
179 | - Consolidated configuration (removed processing_constants.py redundancy)
180 | - Streamlined to 8 core modules focused on AI-driven Multi-Thinking
181 | 
182 | **Code Quality**: Uses ruff for linting/formatting, mypy for type checking. Run `uv run ruff check . --fix && uv run ruff format . && uv run mypy .` before committing.
```

--------------------------------------------------------------------------------
/src/mcp_server_mas_sequential_thinking/utils/__init__.py:
--------------------------------------------------------------------------------

```python
 1 | """Utilities module for MCP Sequential Thinking Server.
 2 | 
 3 | This module contains utility functions and helper classes.
 4 | """
 5 | 
 6 | from .utils import setup_logging
 7 | 
 8 | __all__ = [
 9 |     # From utils
10 |     "setup_logging",
11 | ]
12 | 
```

--------------------------------------------------------------------------------
/src/mcp_server_mas_sequential_thinking/utils/utils.py:
--------------------------------------------------------------------------------

```python
 1 | """Application utilities - completely rewritten for new architecture."""
 2 | 
 3 | from mcp_server_mas_sequential_thinking.infrastructure.logging_config import (
 4 |     get_logger,
 5 |     setup_logging,
 6 | )
 7 | 
 8 | # Re-export for convenience - no backward compatibility
 9 | __all__ = ["get_logger", "setup_logging"]
10 | 
```

--------------------------------------------------------------------------------
/src/mcp_server_mas_sequential_thinking/infrastructure/__init__.py:
--------------------------------------------------------------------------------

```python
 1 | """Infrastructure module for MCP Sequential Thinking Server.
 2 | 
 3 | This module contains infrastructure concerns including logging configuration
 4 | and persistent memory.
 5 | """
 6 | 
 7 | from .logging_config import LogTimer, MetricsLogger, get_logger, setup_logging
 8 | from .persistent_memory import (
 9 |     PersistentMemoryManager,
10 |     create_persistent_memory,
11 | )
12 | 
13 | __all__ = [
14 |     "LogTimer",
15 |     "MetricsLogger",
16 |     # From persistent_memory
17 |     "PersistentMemoryManager",
18 |     "create_persistent_memory",
19 |     # From logging_config
20 |     "get_logger",
21 |     "setup_logging",
22 | ]
23 | 
```

--------------------------------------------------------------------------------
/src/mcp_server_mas_sequential_thinking/core/__init__.py:
--------------------------------------------------------------------------------

```python
 1 | """Core domain module for MCP Sequential Thinking Server.
 2 | 
 3 | This module contains the core domain logic including data models,
 4 | types, and session management functionality.
 5 | """
 6 | 
 7 | from .models import ThoughtData
 8 | from .session import SessionMemory
 9 | from .types import (
10 |     ConfigurationError,
11 |     CoordinationPlan,
12 |     ExecutionMode,
13 |     ProcessingMetadata,
14 |     TeamCreationError,
15 |     ThoughtProcessingError,
16 | )
17 | 
18 | __all__ = [
19 |     "ConfigurationError",
20 |     "CoordinationPlan",
21 |     "ExecutionMode",
22 |     "ProcessingMetadata",
23 |     # From session
24 |     "SessionMemory",
25 |     "TeamCreationError",
26 |     # From models
27 |     "ThoughtData",
28 |     # From types
29 |     "ThoughtProcessingError",
30 | ]
31 | 
```

--------------------------------------------------------------------------------
/Dockerfile:
--------------------------------------------------------------------------------

```dockerfile
 1 | # Generated by https://smithery.ai. See: https://smithery.ai/docs/config#dockerfile
 2 | FROM python:3.10-alpine
 3 | 
 4 | # Set environment variables to prevent Python from writing .pyc files
 5 | ENV PYTHONDONTWRITEBYTECODE=1 \
 6 |     PYTHONUNBUFFERED=1
 7 | 
 8 | # Install build dependencies
 9 | RUN apk add --no-cache gcc musl-dev libffi-dev openssl-dev
10 | 
11 | # Set working directory
12 | WORKDIR /app
13 | 
14 | # Copy project files
15 | COPY pyproject.toml ./
16 | COPY main.py ./
17 | COPY README.md ./
18 | 
19 | # Upgrade pip and install hatchling build tool
20 | RUN pip install --upgrade pip && \
21 |     pip install hatchling
22 | 
23 | # Build and install the project
24 | RUN pip install .
25 | 
26 | # Command to run the MCP server
27 | CMD ["mcp-server-mas-sequential-thinking"]
28 | 
```

--------------------------------------------------------------------------------
/src/mcp_server_mas_sequential_thinking/processors/__init__.py:
--------------------------------------------------------------------------------

```python
 1 | """Processors module for MCP Sequential Thinking Server.
 2 | 
 3 | This module contains processing logic including multi-thinking core functionality
 4 | and multi-thinking processing implementation.
 5 | """
 6 | 
 7 | from .multi_thinking_core import (
 8 |     MultiThinkingAgentFactory,
 9 |     ThinkingDirection,
10 |     create_thinking_agent,
11 |     get_all_thinking_directions,
12 |     get_thinking_timing,
13 | )
14 | from .multi_thinking_processor import (
15 |     MultiThinkingProcessingResult,
16 |     MultiThinkingSequentialProcessor,
17 |     create_multi_thinking_step_output,
18 | )
19 | 
20 | __all__ = [
21 |     "MultiThinkingAgentFactory",
22 |     # From multi_thinking_processor
23 |     "MultiThinkingProcessingResult",
24 |     "MultiThinkingSequentialProcessor",
25 |     # From multi_thinking_core
26 |     "ThinkingDirection",
27 |     "create_multi_thinking_step_output",
28 |     "create_thinking_agent",
29 |     "get_all_thinking_directions",
30 |     "get_thinking_timing",
31 | ]
32 | 
```

--------------------------------------------------------------------------------
/src/mcp_server_mas_sequential_thinking/config/__init__.py:
--------------------------------------------------------------------------------

```python
 1 | """Configuration module for MCP Sequential Thinking Server.
 2 | 
 3 | This module contains all configuration-related functionality including
 4 | constants, processing constants, and modernized configuration management.
 5 | """
 6 | 
 7 | from .constants import (
 8 |     ComplexityThresholds,
 9 |     DefaultTimeouts,
10 |     DefaultValues,
11 |     FieldLengthLimits,
12 |     LoggingLimits,
13 |     PerformanceMetrics,
14 |     ProcessingDefaults,
15 |     QualityThresholds,
16 |     SecurityConstants,
17 |     ValidationLimits,
18 | )
19 | from .modernized_config import check_required_api_keys, get_model_config
20 | 
21 | __all__ = [
22 |     # From constants
23 |     "ComplexityThresholds",
24 |     "DefaultTimeouts",
25 |     "DefaultValues",
26 |     "FieldLengthLimits",
27 |     "LoggingLimits",
28 |     "PerformanceMetrics",
29 |     "ProcessingDefaults",
30 |     "QualityThresholds",
31 |     "SecurityConstants",
32 |     "ValidationLimits",
33 |     # From modernized_config
34 |     "check_required_api_keys",
35 |     "get_model_config",
36 | ]
37 | 
```

--------------------------------------------------------------------------------
/src/mcp_server_mas_sequential_thinking/routing/__init__.py:
--------------------------------------------------------------------------------

```python
 1 | """Routing module for MCP Sequential Thinking Server.
 2 | 
 3 | This module contains routing and workflow logic including adaptive routing,
 4 | workflow routing, optimization, and multi-thinking routing functionality.
 5 | """
 6 | 
 7 | from .agno_workflow_router import (
 8 |     MultiThinkingWorkflowResult,
 9 |     MultiThinkingWorkflowRouter,
10 | )
11 | from .ai_complexity_analyzer import AIComplexityAnalyzer
12 | from .complexity_types import ComplexityLevel, ProcessingStrategy
13 | from .multi_thinking_router import (
14 |     MultiThinkingIntelligentRouter,
15 |     create_multi_thinking_router,
16 | )
17 | 
18 | __all__ = [
19 |     # From ai_complexity_analyzer
20 |     "AIComplexityAnalyzer",
21 |     # From complexity_types
22 |     "ComplexityLevel",
23 |     # From multi_thinking_router
24 |     "MultiThinkingIntelligentRouter",
25 |     # From agno_workflow_router
26 |     "MultiThinkingWorkflowResult",
27 |     "MultiThinkingWorkflowRouter",
28 |     "ProcessingStrategy",
29 |     "create_multi_thinking_router",
30 | ]
31 | 
```

--------------------------------------------------------------------------------
/src/mcp_server_mas_sequential_thinking/services/__init__.py:
--------------------------------------------------------------------------------

```python
 1 | """Services module for MCP Sequential Thinking Server.
 2 | 
 3 | This module contains business logic services including server core,
 4 | response processing, retry handling, and specialized processing services.
 5 | """
 6 | 
 7 | from .context_builder import ContextBuilder
 8 | from .processing_orchestrator import ProcessingOrchestrator
 9 | from .response_formatter import ResponseExtractor, ResponseFormatter
10 | from .response_processor import ResponseProcessor
11 | from .retry_handler import TeamProcessingRetryHandler
12 | from .server_core import (
13 |     ServerConfig,
14 |     ServerState,
15 |     ThoughtProcessor,
16 |     create_server_lifespan,
17 |     create_validated_thought_data,
18 | )
19 | from .workflow_executor import WorkflowExecutor
20 | 
21 | __all__ = [
22 |     # From context_builder
23 |     "ContextBuilder",
24 |     # From processing_orchestrator
25 |     "ProcessingOrchestrator",
26 |     # From response_formatter
27 |     "ResponseExtractor",
28 |     "ResponseFormatter",
29 |     # From response_processor
30 |     "ResponseProcessor",
31 |     # From server_core
32 |     "ServerConfig",
33 |     "ServerState",
34 |     # From retry_handler
35 |     "TeamProcessingRetryHandler",
36 |     "ThoughtProcessor",
37 |     # From workflow_executor
38 |     "WorkflowExecutor",
39 |     "create_server_lifespan",
40 |     "create_validated_thought_data",
41 | ]
42 | 
```

--------------------------------------------------------------------------------
/smithery.yaml:
--------------------------------------------------------------------------------

```yaml
 1 | # Smithery configuration file: https://smithery.ai/docs/config#smitheryyaml
 2 | 
 3 | startCommand:
 4 |   type: stdio
 5 |   configSchema:
 6 |     # JSON Schema defining the configuration options for the MCP.
 7 |     type: object
 8 |     required:
 9 |       - llmProvider
10 |       - deepseekApiKey
11 |     properties:
12 |       llmProvider:
13 |         type: string
14 |         default: deepseek
15 |         description: "LLM Provider. Options: deepseek, groq, openrouter."
16 |       deepseekApiKey:
17 |         type: string
18 |         description: API key for DeepSeek. Required if llmProvider is 'deepseek'.
19 |       groqApiKey:
20 |         type: string
21 |         default: ""
22 |         description: API key for Groq. Required if llmProvider is 'groq'.
23 |       openrouterApiKey:
24 |         type: string
25 |         default: ""
26 |         description: API key for OpenRouter. Required if llmProvider is 'openrouter'.
27 |       exaApiKey:
28 |         type: string
29 |         default: ""
30 |         description: API key for Exa to enable web research capabilities for thinking agents.
31 |   commandFunction:
32 |     # A JS function that produces the CLI command based on the given config to start the MCP on stdio.
33 |     |-
34 |     (config) => ({
35 |       command: 'mcp-server-mas-sequential-thinking',
36 |       args: [],
37 |       env: {
38 |         LLM_PROVIDER: config.llmProvider,
39 |         DEEPSEEK_API_KEY: config.deepseekApiKey,
40 |         GROQ_API_KEY: config.groqApiKey,
41 |         OPENROUTER_API_KEY: config.openrouterApiKey,
42 |         EXA_API_KEY: config.exaApiKey
43 |       }
44 |     })
45 |   exampleConfig:
46 |     llmProvider: deepseek
47 |     deepseekApiKey: your_deepseek_api_key
48 |     groqApiKey: ""
49 |     openrouterApiKey: ""
50 |     exaApiKey: your_exa_api_key
51 | 
```

--------------------------------------------------------------------------------
/src/mcp_server_mas_sequential_thinking/__init__.py:
--------------------------------------------------------------------------------

```python
 1 | """MCP Sequential Thinking Server with AI-Powered Routing.
 2 | 
 3 | A sophisticated Multi-Agent System (MAS) for sequential thinking with intelligent
 4 | AI-based routing using advanced multi-thinking methodology.
 5 | 
 6 | AI ROUTING FEATURES:
 7 | - AI-powered complexity analysis replacing rule-based scoring
 8 | - Intelligent thinking sequence selection (single, double, triple, full)
 9 | - Semantic understanding of philosophical and technical depth
10 | - Automatic synthesis integration for complex thought processing
11 | 
12 | CORE ARCHITECTURE:
13 | - AI-first routing with semantic complexity understanding
14 | - Multi-thinking methodology with intelligent orchestration
15 | - Unified agent factory eliminating code duplication
16 | - Separated server concerns for better maintainability
17 | - Optimized performance with async processing
18 | 
19 | Key Features:
20 | - **AI Routing**: Semantic complexity analysis and strategy selection
21 | - **Multi-Thinking Integration**: Factual, emotional, critical, optimistic, creative,
22 |   and synthesis processing
23 | - **Multi-Provider Support**: DeepSeek, Groq, OpenRouter, GitHub, Ollama
24 | - **Philosophical Understanding**: Deep analysis for existential questions
25 | 
26 | Usage:
27 |     # Server usage
28 |     uv run mcp-server-mas-sequential-thinking
29 | 
30 |     # Direct processing
31 |     from mcp_server_mas_sequential_thinking.services.server_core import ThoughtProcessor
32 |     from mcp_server_mas_sequential_thinking.core.session import SessionMemory
33 | 
34 |     processor = ThoughtProcessor(session_memory)
35 |     result = await processor.process_thought(thought_data)
36 | 
37 | AI Routing Strategies:
38 |     - Single Direction: Simple factual questions → quick focused processing
39 |     - Double/Triple Direction: Moderate complexity → focused thinking sequences
40 |     - Full Multi-Thinking: Complex philosophical questions → comprehensive analysis
41 | 
42 | Configuration:
43 |     Environment variables:
44 |     - LLM_PROVIDER: Primary provider (deepseek, groq, openrouter, github, ollama)
45 |     - {PROVIDER}_API_KEY: API keys for providers
46 |     - {PROVIDER}_{TEAM|AGENT}_MODEL_ID: Model selection
47 |     - AI_CONFIDENCE_THRESHOLD: Routing confidence threshold (default: 0.7)
48 | 
49 | Performance Benefits:
50 |     - Intelligent complexity assessment using AI understanding
51 |     - Automated synthesis processing for coherent responses
52 |     - Semantic depth recognition for philosophical questions
53 |     - Efficient thinking sequence selection based on content analysis
54 | """
55 | 
56 | __version__ = "0.6.0"
57 | 
58 | 
59 | def get_version() -> str:
60 |     """Get package version."""
61 |     return __version__
62 | 
```

--------------------------------------------------------------------------------
/src/mcp_server_mas_sequential_thinking/routing/complexity_types.py:
--------------------------------------------------------------------------------

```python
 1 | """Core complexity analysis types and enums.
 2 | 
 3 | This file contains the essential types needed for complexity analysis,
 4 | extracted from the old adaptive_routing.py to support AI-first architecture.
 5 | """
 6 | 
 7 | from abc import ABC, abstractmethod
 8 | from dataclasses import dataclass
 9 | from enum import Enum
10 | from typing import TYPE_CHECKING
11 | 
12 | if TYPE_CHECKING:
13 |     from mcp_server_mas_sequential_thinking.core.models import ThoughtData
14 | 
15 | 
16 | class ProcessingStrategy(Enum):
17 |     """Processing strategy types."""
18 | 
19 |     SINGLE_AGENT = "single_agent"
20 |     MULTI_AGENT = "multi_agent"
21 |     HYBRID = "hybrid"
22 | 
23 | 
24 | class ComplexityLevel(Enum):
25 |     """Complexity levels for thought processing."""
26 | 
27 |     SIMPLE = "simple"
28 |     MODERATE = "moderate"
29 |     COMPLEX = "complex"
30 |     HIGHLY_COMPLEX = "highly_complex"
31 | 
32 | 
33 | @dataclass
34 | class ComplexityMetrics:
35 |     """Metrics for thought complexity analysis."""
36 | 
37 |     # Core metrics (used by both AI and fallback analyzers)
38 |     complexity_score: float  # Primary score (0-100)
39 | 
40 |     # Detailed breakdown (optional, for debugging/analysis)
41 |     word_count: int = 0
42 |     sentence_count: int = 0
43 |     question_count: int = 0
44 |     technical_terms: int = 0
45 |     branching_references: int = 0
46 |     research_indicators: int = 0
47 |     analysis_depth: int = 0
48 |     philosophical_depth_boost: int = 0
49 | 
50 |     # AI Analysis Results (critical for routing decisions)
51 |     primary_problem_type: str = "GENERAL"  # AI-determined problem type
52 |     thinking_modes_needed: list[str] | None = None  # AI-recommended thinking modes
53 | 
54 |     # Analysis metadata
55 |     analyzer_type: str = "unknown"  # "ai" or "basic"
56 |     reasoning: str = ""  # Why this score was assigned
57 | 
58 |     def __post_init__(self):
59 |         if self.thinking_modes_needed is None:
60 |             self.thinking_modes_needed = ["SYNTHESIS"]
61 | 
62 | 
63 | @dataclass
64 | class RoutingDecision:
65 |     """Decision result from routing analysis."""
66 | 
67 |     strategy: ProcessingStrategy
68 |     complexity_level: ComplexityLevel
69 |     complexity_score: float
70 |     reasoning: str
71 |     estimated_token_usage: tuple[int, int]  # (min, max)
72 |     estimated_cost: float
73 |     specialist_recommendations: list[str] | None = None
74 | 
75 |     def __post_init__(self):
76 |         if self.specialist_recommendations is None:
77 |             self.specialist_recommendations = []
78 | 
79 | 
80 | class ComplexityAnalyzer(ABC):
81 |     """Abstract base class for complexity analysis."""
82 | 
83 |     @abstractmethod
84 |     async def analyze(self, thought_data: "ThoughtData") -> ComplexityMetrics:
85 |         """Analyze thought complexity and return metrics."""
86 | 
```

--------------------------------------------------------------------------------
/src/mcp_server_mas_sequential_thinking/core/session.py:
--------------------------------------------------------------------------------

```python
 1 | """Session management for thought history and branching."""
 2 | 
 3 | import asyncio
 4 | from dataclasses import dataclass, field
 5 | 
 6 | from mcp_server_mas_sequential_thinking.config.constants import ValidationLimits
 7 | 
 8 | from .models import ThoughtData
 9 | from .types import BranchId, ThoughtNumber
10 | 
11 | 
12 | @dataclass
13 | class SessionMemory:
14 |     """Manages thought history and branches with optimized lookups and DoS protection.
15 | 
16 |     Thread-safe implementation with async locks to prevent race conditions.
17 |     """
18 | 
19 |     thought_history: list[ThoughtData] = field(default_factory=list)
20 |     branches: dict[BranchId, list[ThoughtData]] = field(default_factory=dict)
21 |     # High-performance cache for O(1) thought lookups by number
22 |     _thought_cache: dict[ThoughtNumber, ThoughtData] = field(
23 |         default_factory=dict, init=False, repr=False
24 |     )
25 |     # Thread safety lock for concurrent access protection
26 |     _lock: asyncio.Lock = field(default_factory=asyncio.Lock, init=False, repr=False)
27 | 
28 |     # DoS protection constants as class attributes
29 |     MAX_THOUGHTS_PER_SESSION: int = ValidationLimits.MAX_THOUGHTS_PER_SESSION
30 |     MAX_BRANCHES_PER_SESSION: int = ValidationLimits.MAX_BRANCHES_PER_SESSION
31 |     MAX_THOUGHTS_PER_BRANCH: int = ValidationLimits.MAX_THOUGHTS_PER_BRANCH
32 | 
33 |     async def add_thought(self, thought: ThoughtData) -> None:
34 |         """Add thought with efficient DoS protection and optimized branch management.
35 | 
36 |         Thread-safe implementation with async lock to prevent race conditions.
37 |         """
38 |         async with self._lock:
39 |             # Early DoS protection checks with descriptive errors
40 |             self._validate_session_limits(thought)
41 | 
42 |             # Update data structures atomically under lock
43 |             self.thought_history.append(thought)
44 |             self._thought_cache[thought.thoughtNumber] = thought
45 | 
46 |             # Handle branching with optimized setdefault pattern
47 |             if thought.branchFromThought is not None and thought.branchId is not None:
48 |                 self.branches.setdefault(thought.branchId, []).append(thought)
49 | 
50 |     def _validate_session_limits(self, thought: ThoughtData) -> None:
51 |         """Validate session limits with early exit optimization."""
52 |         # Primary session limit check
53 |         if len(self.thought_history) >= self.MAX_THOUGHTS_PER_SESSION:
54 |             raise ValueError(
55 |                 f"Session exceeds maximum {self.MAX_THOUGHTS_PER_SESSION} thoughts"
56 |             )
57 | 
58 |         # Branch-specific validations only if needed
59 |         if not thought.branchId:
60 |             return
61 | 
62 |         # Check total branch limit
63 |         if len(self.branches) >= self.MAX_BRANCHES_PER_SESSION:
64 |             raise ValueError(
65 |                 f"Session exceeds maximum {self.MAX_BRANCHES_PER_SESSION} branches"
66 |             )
67 | 
68 |         # Check individual branch limit
69 |         if (
70 |             thought.branchId in self.branches
71 |             and len(self.branches[thought.branchId]) >= self.MAX_THOUGHTS_PER_BRANCH
72 |         ):
73 |             raise ValueError(
74 |                 f"Branch '{thought.branchId}' exceeds maximum "
75 |                 f"{self.MAX_THOUGHTS_PER_BRANCH} thoughts"
76 |             )
77 | 
78 |     async def find_thought_content(self, thought_number: ThoughtNumber) -> str:
79 |         """Find the content of a specific thought by number using optimized cache lookup."""
80 |         async with self._lock:
81 |             # Use cache for O(1) lookup instead of O(n) search
82 |             thought = self._thought_cache.get(thought_number)
83 |             return thought.thought if thought else "Unknown thought"
84 | 
85 |     async def get_branch_summary(self) -> dict[BranchId, int]:
86 |         """Get summary of all branches."""
87 |         async with self._lock:
88 |             return {
89 |                 branch_id: len(thoughts)
90 |                 for branch_id, thoughts in self.branches.items()
91 |             }
92 | 
93 |     def get_current_branch_id(self, thought: ThoughtData) -> str:
94 |         """Get the current branch ID for a thought with improved logic."""
95 |         return thought.branchId or "main"
96 | 
```

--------------------------------------------------------------------------------
/pyproject.toml:
--------------------------------------------------------------------------------

```toml
  1 | [project]
  2 | name = "mcp-server-mas-sequential-thinking"
  3 | version = "0.7.0"
  4 | description = "MCP Agent Implementation for Sequential Thinking"
  5 | readme = "README.md"
  6 | requires-python = ">=3.10"
  7 | authors = [
  8 |     { name = "Frad LEE", email = "[email protected]" },
  9 |     { name = "Alain Ivars", email = "[email protected]" },
 10 | ]
 11 | dependencies = [
 12 |     "agno>=2.0.5",
 13 |     "asyncio",
 14 |     "exa-py",
 15 |     "python-dotenv",
 16 |     "mcp",
 17 |     "groq",
 18 |     "ollama",
 19 |     "openrouter",
 20 |     "httpx[socks]>=0.28.1",
 21 |     "sqlalchemy",
 22 | ]
 23 | 
 24 | [project.optional-dependencies]
 25 | dev = [
 26 |     "pytest",
 27 |     "black",
 28 |     "isort",
 29 |     "mypy",
 30 |     "ruff",
 31 | ]
 32 | 
 33 | [project.scripts]
 34 | mcp-server-mas-sequential-thinking = "mcp_server_mas_sequential_thinking.main:run"
 35 | 
 36 | [build-system]
 37 | requires = ["hatchling"]
 38 | build-backend = "hatchling.build"
 39 | 
 40 | [tool.hatch.build.targets.wheel]
 41 | packages = ["src/mcp_server_mas_sequential_thinking"]
 42 | 
 43 | [dependency-groups]
 44 | dev = [
 45 |     "pytest>=8.4.1",
 46 | ]
 47 | 
 48 | [tool.ruff]
 49 | target-version = "py310"
 50 | line-length = 88
 51 | src = ["src", "tests"]
 52 | 
 53 | [tool.ruff.lint]
 54 | select = [
 55 |     "E",      # pycodestyle errors
 56 |     "W",      # pycodestyle warnings
 57 |     "F",      # pyflakes
 58 |     "I",      # isort
 59 |     "N",      # pep8-naming
 60 |     "D",      # pydocstyle
 61 |     "UP",     # pyupgrade
 62 |     "YTT",    # flake8-2020
 63 |     "ANN",    # flake8-annotations
 64 |     "ASYNC",  # flake8-async
 65 |     "S",      # flake8-bandit
 66 |     "BLE",    # flake8-blind-except
 67 |     "FBT",    # flake8-boolean-trap
 68 |     "B",      # flake8-bugbear
 69 |     "A",      # flake8-builtins
 70 |     "COM",    # flake8-commas
 71 |     "C4",     # flake8-comprehensions
 72 |     "DTZ",    # flake8-datetimez
 73 |     "T10",    # flake8-debugger
 74 |     "DJ",     # flake8-django
 75 |     "EM",     # flake8-errmsg
 76 |     "EXE",    # flake8-executable
 77 |     "FA",     # flake8-future-annotations
 78 |     "ISC",    # flake8-implicit-str-concat
 79 |     "ICN",    # flake8-import-conventions
 80 |     "G",      # flake8-logging-format
 81 |     "INP",    # flake8-no-pep420
 82 |     "PIE",    # flake8-pie
 83 |     "T20",    # flake8-print
 84 |     "PYI",    # flake8-pyi
 85 |     "PT",     # flake8-pytest-style
 86 |     "Q",      # flake8-quotes
 87 |     "RSE",    # flake8-raise
 88 |     "RET",    # flake8-return
 89 |     "SLF",    # flake8-self
 90 |     "SLOT",   # flake8-slots
 91 |     "SIM",    # flake8-simplify
 92 |     "TID",    # flake8-tidy-imports
 93 |     "TCH",    # flake8-type-checking
 94 |     "INT",    # flake8-gettext
 95 |     "ARG",    # flake8-unused-arguments
 96 |     "PTH",    # flake8-use-pathlib
 97 |     "ERA",    # eradicate
 98 |     "PD",     # pandas-vet
 99 |     "PGH",    # pygrep-hooks
100 |     "PL",     # pylint
101 |     "TRY",    # tryceratops
102 |     "FLY",    # flynt
103 |     "NPY",    # numpy
104 |     "AIR",    # airflow
105 |     "PERF",   # perflint
106 |     "FURB",   # refurb
107 |     "LOG",    # flake8-logging
108 |     "RUF",    # ruff-specific rules
109 | ]
110 | ignore = [
111 |     "D100",    # Missing docstring in public module
112 |     "D101",    # Missing docstring in public class
113 |     "D102",    # Missing docstring in public method
114 |     "D103",    # Missing docstring in public function
115 |     "D104",    # Missing docstring in public package
116 |     "D105",    # Missing docstring in magic method
117 |     "D107",    # Missing docstring in __init__
118 |     "COM812",  # Trailing comma missing
119 |     "ISC001",  # Implicitly concatenated string literals on one line
120 |     "FBT001",  # Boolean positional arg in function definition
121 |     "FBT002",  # Boolean default positional argument in function definition
122 |     "S101",    # Use of assert detected
123 |     "PLR0913", # Too many arguments to function call
124 |     "PLR2004", # Magic value used in comparison
125 |     "TRY003",  # Avoid specifying long messages outside the exception class
126 |     "EM101",   # Exception must not use a string literal, assign to variable first
127 |     "EM102",   # Exception must not use an f-string literal, assign to variable first
128 | ]
129 | 
130 | [tool.ruff.lint.per-file-ignores]
131 | "tests/*" = ["S101", "PLR2004", "ANN", "D"]
132 | "__init__.py" = ["F401"]
133 | 
134 | [tool.ruff.lint.pydocstyle]
135 | convention = "google"
136 | 
137 | [tool.ruff.lint.isort]
138 | known-first-party = ["mcp_server_mas_sequential_thinking"]
139 | split-on-trailing-comma = true
140 | 
141 | [tool.ruff.format]
142 | quote-style = "double"
143 | indent-style = "space"
144 | skip-magic-trailing-comma = false
145 | line-ending = "auto"
146 | 
```

--------------------------------------------------------------------------------
/src/mcp_server_mas_sequential_thinking/core/models.py:
--------------------------------------------------------------------------------

```python
  1 | """Streamlined models with consolidated validation logic."""
  2 | 
  3 | from enum import Enum
  4 | from typing import Any
  5 | 
  6 | from pydantic import BaseModel, Field, model_validator
  7 | 
  8 | from mcp_server_mas_sequential_thinking.config.constants import (
  9 |     FieldLengthLimits,
 10 |     ValidationLimits,
 11 | )
 12 | 
 13 | from .types import BranchId, ThoughtNumber
 14 | 
 15 | 
 16 | class ThoughtType(Enum):
 17 |     """Types of thoughts in the sequential thinking process."""
 18 | 
 19 |     STANDARD = "standard"
 20 |     REVISION = "revision"
 21 |     BRANCH = "branch"
 22 | 
 23 | 
 24 | def _validate_thought_relationships(data: dict) -> None:
 25 |     """Validate thought relationships with optimized validation logic."""
 26 |     # Extract values once with modern dict methods
 27 |     data.get("isRevision", False)
 28 |     branch_from_thought = data.get("branchFromThought")
 29 |     branch_id = data.get("branchId")
 30 |     current_number = data.get("thoughtNumber")
 31 | 
 32 |     # Collect validation errors efficiently
 33 |     errors = []
 34 | 
 35 |     # Relationship validation with guard clauses
 36 |     if branch_id is not None and branch_from_thought is None:
 37 |         errors.append("branchId requires branchFromThought to be set")
 38 | 
 39 |     # Numeric validation with early exit
 40 |     if current_number is None:
 41 |         if errors:
 42 |             raise ValueError("; ".join(errors))
 43 |         return
 44 | 
 45 |     # Validate numeric relationships
 46 |     if branch_from_thought is not None and branch_from_thought >= current_number:
 47 |         errors.append("branchFromThought must be less than current thoughtNumber")
 48 | 
 49 |     if errors:
 50 |         raise ValueError("; ".join(errors))
 51 | 
 52 | 
 53 | class ThoughtData(BaseModel):
 54 |     """Streamlined thought data model with consolidated validation."""
 55 | 
 56 |     model_config = {"validate_assignment": True, "frozen": True}
 57 | 
 58 |     # Core fields
 59 |     thought: str = Field(
 60 |         ...,
 61 |         min_length=FieldLengthLimits.MIN_STRING_LENGTH,
 62 |         description="Content of the thought",
 63 |     )
 64 |     # MCP API compatibility - camelCase field names required
 65 |     thoughtNumber: ThoughtNumber = Field(  # noqa: N815
 66 |         ...,
 67 |         ge=ValidationLimits.MIN_THOUGHT_NUMBER,
 68 |         description="Sequence number starting from 1",
 69 |     )
 70 |     totalThoughts: int = Field(  # noqa: N815
 71 |         ...,
 72 |         ge=1,
 73 |         description="Estimated total thoughts",
 74 |     )
 75 |     nextThoughtNeeded: bool = Field(  # noqa: N815
 76 |         ..., description="Whether another thought is needed"
 77 |     )
 78 | 
 79 |     # Required workflow fields
 80 |     isRevision: bool = Field(  # noqa: N815
 81 |         ..., description="Whether this revises a previous thought"
 82 |     )
 83 |     branchFromThought: ThoughtNumber | None = Field(  # noqa: N815
 84 |         ...,
 85 |         ge=ValidationLimits.MIN_THOUGHT_NUMBER,
 86 |         description="Thought number to branch from",
 87 |     )
 88 |     branchId: BranchId | None = Field(  # noqa: N815
 89 |         ..., description="Unique branch identifier"
 90 |     )
 91 |     needsMoreThoughts: bool = Field(  # noqa: N815
 92 |         ..., description="Whether more thoughts are needed beyond estimate"
 93 |     )
 94 | 
 95 |     @property
 96 |     def thought_type(self) -> ThoughtType:
 97 |         """Determine the type of thought based on field values."""
 98 |         if self.isRevision:
 99 |             return ThoughtType.REVISION
100 |         if self.branchFromThought is not None:
101 |             return ThoughtType.BRANCH
102 |         return ThoughtType.STANDARD
103 | 
104 |     @model_validator(mode="before")
105 |     @classmethod
106 |     def validate_thought_data(cls, data: dict[str, Any]) -> dict[str, Any]:
107 |         """Consolidated validation with simplified logic."""
108 |         if isinstance(data, dict):
109 |             _validate_thought_relationships(data)
110 |         return data
111 | 
112 |     def format_for_log(self) -> str:
113 |         """Format thought for logging with optimized type-specific formatting."""
114 |         # Use match statement for modern Python pattern matching
115 |         match self.thought_type:
116 |             case ThoughtType.REVISION:
117 |                 prefix = (
118 |                     f"Revision {self.thoughtNumber}/{self.totalThoughts} "
119 |                     f"(revising #{self.branchFromThought})"
120 |                 )
121 |             case ThoughtType.BRANCH:
122 |                 prefix = (
123 |                     f"Branch {self.thoughtNumber}/{self.totalThoughts} "
124 |                     f"(from #{self.branchFromThought}, ID: {self.branchId})"
125 |                 )
126 |             case _:  # ThoughtType.STANDARD
127 |                 prefix = f"Thought {self.thoughtNumber}/{self.totalThoughts}"
128 | 
129 |         # Use multiline string formatting for better readability
130 |         return (
131 |             f"{prefix}\n"
132 |             f"  Content: {self.thought}\n"
133 |             f"  Next: {self.nextThoughtNeeded}, More: {self.needsMoreThoughts}"
134 |         )
135 | 
```

--------------------------------------------------------------------------------
/src/mcp_server_mas_sequential_thinking/services/context_builder.py:
--------------------------------------------------------------------------------

```python
  1 | """Context building service for thought processing.
  2 | 
  3 | This service handles building context-aware prompts from thought data,
  4 | managing session history, and constructing appropriate inputs for processing.
  5 | """
  6 | 
  7 | from mcp_server_mas_sequential_thinking.core import SessionMemory, ThoughtData
  8 | from mcp_server_mas_sequential_thinking.utils import setup_logging
  9 | 
 10 | logger = setup_logging()
 11 | 
 12 | 
 13 | class ContextBuilder:
 14 |     """Service responsible for building context-aware prompts and managing thought context."""
 15 | 
 16 |     def __init__(self, session: SessionMemory) -> None:
 17 |         """Initialize the context builder with session memory.
 18 | 
 19 |         Args:
 20 |             session: The session memory instance for accessing thought history
 21 |         """
 22 |         self._session = session
 23 | 
 24 |     async def build_context_prompt(self, thought_data: ThoughtData) -> str:
 25 |         """Build context-aware input prompt with optimized string construction.
 26 | 
 27 |         This method creates contextual prompts based on thought type:
 28 |         - Revision thoughts include original content
 29 |         - Branch thoughts include origin content
 30 |         - Sequential thoughts use basic format
 31 | 
 32 |         Args:
 33 |             thought_data: The thought data to build context for
 34 | 
 35 |         Returns:
 36 |             Formatted prompt string with appropriate context
 37 |         """
 38 |         # Pre-calculate base components for efficiency
 39 |         base = f"Process Thought #{thought_data.thoughtNumber}:\n"
 40 |         content = f'\nThought Content: "{thought_data.thought}"'
 41 | 
 42 |         # Add context using pattern matching with optimized string building
 43 |         match thought_data:
 44 |             case ThoughtData(isRevision=True, branchFromThought=revision_num) if (
 45 |                 revision_num
 46 |             ):
 47 |                 original = await self._find_thought_content_safe(revision_num)
 48 |                 context = f'**REVISION of Thought #{revision_num}** (Original: "{original}")\n'
 49 |                 return f"{base}{context}{content}"
 50 | 
 51 |             case ThoughtData(branchFromThought=branch_from, branchId=branch_id) if (
 52 |                 branch_from and branch_id
 53 |             ):
 54 |                 origin = await self._find_thought_content_safe(branch_from)
 55 |                 context = f'**BRANCH (ID: {branch_id}) from Thought #{branch_from}** (Origin: "{origin}")\n'
 56 |                 return f"{base}{context}{content}"
 57 | 
 58 |             case _:
 59 |                 return f"{base}{content}"
 60 | 
 61 |     async def _find_thought_content_safe(self, thought_number: int) -> str:
 62 |         """Safely find thought content with error handling.
 63 | 
 64 |         Args:
 65 |             thought_number: The thought number to find
 66 | 
 67 |         Returns:
 68 |             The thought content or a placeholder if not found
 69 |         """
 70 |         try:
 71 |             return await self._session.find_thought_content(thought_number)
 72 |         except Exception:
 73 |             return "[not found]"
 74 | 
 75 |     async def log_context_building(
 76 |         self, thought_data: ThoughtData, input_prompt: str
 77 |     ) -> None:
 78 |         """Log context building details for debugging and monitoring.
 79 | 
 80 |         Args:
 81 |             thought_data: The thought data being processed
 82 |             input_prompt: The built prompt
 83 |         """
 84 |         logger.info("📝 CONTEXT BUILDING:")
 85 | 
 86 |         if thought_data.isRevision and thought_data.branchFromThought:
 87 |             logger.info(
 88 |                 "  Type: Revision of thought #%s", thought_data.branchFromThought
 89 |             )
 90 |             original = await self._find_thought_content_safe(
 91 |                 thought_data.branchFromThought
 92 |             )
 93 |             logger.info("  Original thought: %s", original)
 94 |         elif thought_data.branchFromThought and thought_data.branchId:
 95 |             logger.info(
 96 |                 "  Type: Branch '%s' from thought #%s",
 97 |                 thought_data.branchId,
 98 |                 thought_data.branchFromThought,
 99 |             )
100 |             origin = await self._find_thought_content_safe(
101 |                 thought_data.branchFromThought
102 |             )
103 |             logger.info("  Branch origin: %s", origin)
104 |         else:
105 |             logger.info("  Type: Sequential thought #%s", thought_data.thoughtNumber)
106 | 
107 |         logger.info("  Session thoughts: %d total", len(self._session.thought_history))
108 |         logger.info("  Input thought: %s", thought_data.thought)
109 |         logger.info("  Built prompt length: %d chars", len(input_prompt))
110 |         logger.info("  Built prompt:\n%s", input_prompt)
111 | 
112 |         # Use field length limits constant if available
113 |         separator_length = 60  # Default fallback
114 |         try:
115 |             from mcp_server_mas_sequential_thinking.config import FieldLengthLimits
116 | 
117 |             separator_length = FieldLengthLimits.SEPARATOR_LENGTH
118 |         except ImportError:
119 |             pass
120 | 
121 |         logger.info("  %s", "=" * separator_length)
122 | 
123 |     def create_simplified_prompt(self, input_prompt: str) -> str:
124 |         """Create a simplified prompt for single-agent processing.
125 | 
126 |         Args:
127 |             input_prompt: The original input prompt
128 | 
129 |         Returns:
130 |             Simplified prompt optimized for single-agent processing
131 |         """
132 |         return f"""Process this thought efficiently:
133 | 
134 | {input_prompt}
135 | 
136 | Provide a focused response with clear guidance for the next step."""
137 | 
```

--------------------------------------------------------------------------------
/src/mcp_server_mas_sequential_thinking/services/retry_handler.py:
--------------------------------------------------------------------------------

```python
  1 | """Retry handling utilities for robust processing."""
  2 | 
  3 | import asyncio
  4 | import time
  5 | from collections.abc import Callable
  6 | from dataclasses import dataclass
  7 | from typing import Any, TypeVar
  8 | 
  9 | from mcp_server_mas_sequential_thinking.config.constants import (
 10 |     DefaultTimeouts,
 11 |     PerformanceMetrics,
 12 | )
 13 | from mcp_server_mas_sequential_thinking.core.types import ThoughtProcessingError
 14 | from mcp_server_mas_sequential_thinking.infrastructure.logging_config import get_logger
 15 | 
 16 | logger = get_logger(__name__)
 17 | 
 18 | T = TypeVar("T")
 19 | 
 20 | 
 21 | @dataclass
 22 | class RetryConfig:
 23 |     """Configuration for retry operations."""
 24 | 
 25 |     max_attempts: int = DefaultTimeouts.MAX_RETRY_ATTEMPTS
 26 |     sleep_duration: float = PerformanceMetrics.RETRY_SLEEP_DURATION
 27 |     exponential_base: float = DefaultTimeouts.RETRY_EXPONENTIAL_BASE
 28 |     use_exponential_backoff: bool = False
 29 | 
 30 | 
 31 | class RetryHandler:
 32 |     """Handles retry logic with configurable strategies."""
 33 | 
 34 |     def __init__(self, config: RetryConfig | None = None) -> None:
 35 |         """Initialize retry handler with configuration."""
 36 |         self.config = config or RetryConfig()
 37 | 
 38 |     async def execute_with_retry(
 39 |         self,
 40 |         operation: Callable[[], Any],
 41 |         operation_name: str,
 42 |         context_info: dict | None = None,
 43 |     ) -> Any:
 44 |         """Execute operation with retry logic."""
 45 |         last_exception = None
 46 |         max_retries = self.config.max_attempts
 47 | 
 48 |         for retry_count in range(max_retries + 1):
 49 |             try:
 50 |                 self._log_attempt(
 51 |                     retry_count, max_retries, operation_name, context_info
 52 |                 )
 53 | 
 54 |                 start_time = time.time()
 55 |                 result = await operation()
 56 |                 processing_time = time.time() - start_time
 57 | 
 58 |                 self._log_success(operation_name, processing_time)
 59 |                 return result
 60 | 
 61 |             except Exception as e:
 62 |                 last_exception = e
 63 |                 self._log_error(retry_count, max_retries, operation_name, e)
 64 | 
 65 |                 if retry_count < max_retries:
 66 |                     await self._wait_before_retry(retry_count)
 67 |                 else:
 68 |                     self._log_exhaustion(max_retries, operation_name)
 69 |                     raise ThoughtProcessingError(
 70 |                         f"{operation_name} failed after {max_retries + 1} attempts: {e}"
 71 |                     ) from e
 72 | 
 73 |         # Should never reach here, but provide safety
 74 |         raise ThoughtProcessingError(
 75 |             f"Unexpected error in retry logic for {operation_name}"
 76 |         ) from last_exception
 77 | 
 78 |     def _log_attempt(
 79 |         self,
 80 |         retry_count: int,
 81 |         max_retries: int,
 82 |         operation_name: str,
 83 |         context_info: dict | None,
 84 |     ) -> None:
 85 |         """Log retry attempt information."""
 86 |         logger.info(
 87 |             f"Processing attempt {retry_count + 1}/{max_retries + 1}: {operation_name}"
 88 |         )
 89 | 
 90 |         if context_info:
 91 |             for key, value in context_info.items():
 92 |                 logger.info(f"  {key}: {value}")
 93 | 
 94 |     def _log_success(self, operation_name: str, processing_time: float) -> None:
 95 |         """Log successful operation completion."""
 96 |         logger.info(f"✅ {operation_name} completed in {processing_time:.3f}s")
 97 | 
 98 |     def _log_error(
 99 |         self, retry_count: int, max_retries: int, operation_name: str, error: Exception
100 |     ) -> None:
101 |         """Log error information."""
102 |         logger.error(f"{operation_name} error on attempt {retry_count + 1}: {error}")
103 | 
104 |         if retry_count < max_retries:
105 |             logger.info(f"Retrying... ({retry_count + 1}/{max_retries})")
106 | 
107 |     def _log_exhaustion(self, max_retries: int, operation_name: str) -> None:
108 |         """Log retry exhaustion."""
109 |         logger.error(f"All retry attempts exhausted for {operation_name}")
110 | 
111 |     async def _wait_before_retry(self, retry_count: int) -> None:
112 |         """Wait before retry with optional exponential backoff."""
113 |         if self.config.use_exponential_backoff:
114 |             wait_time = self.config.sleep_duration * (
115 |                 self.config.exponential_base**retry_count
116 |             )
117 |         else:
118 |             wait_time = self.config.sleep_duration
119 | 
120 |         await asyncio.sleep(wait_time)
121 | 
122 | 
123 | class TeamProcessingRetryHandler(RetryHandler):
124 |     """Specialized retry handler for team processing operations."""
125 | 
126 |     def __init__(self) -> None:
127 |         """Initialize with team processing specific configuration."""
128 |         config = RetryConfig(
129 |             max_attempts=DefaultTimeouts.MAX_RETRY_ATTEMPTS,
130 |             sleep_duration=PerformanceMetrics.RETRY_SLEEP_DURATION,
131 |             use_exponential_backoff=True,
132 |         )
133 |         super().__init__(config)
134 | 
135 |     async def execute_team_processing(
136 |         self, team_operation: Callable[[], Any], team_info: dict, complexity_level: str
137 |     ) -> Any:
138 |         """Execute team processing with specialized retry logic."""
139 |         context_info = {
140 |             "complexity": complexity_level,
141 |             "team": team_info.get("name", "unknown"),
142 |             "agents": team_info.get("member_count", 0),
143 |             "leader": team_info.get("leader_model", "unknown"),
144 |         }
145 | 
146 |         return await self.execute_with_retry(
147 |             team_operation, "MULTI-AGENT TEAM PROCESSING", context_info
148 |         )
149 | 
```

--------------------------------------------------------------------------------
/src/mcp_server_mas_sequential_thinking/services/response_processor.py:
--------------------------------------------------------------------------------

```python
  1 | """Response processing utilities for consistent response handling."""
  2 | 
  3 | from dataclasses import dataclass
  4 | from typing import Any
  5 | 
  6 | from mcp_server_mas_sequential_thinking.infrastructure.logging_config import get_logger
  7 | 
  8 | logger = get_logger(__name__)
  9 | 
 10 | 
 11 | @dataclass
 12 | class ProcessedResponse:
 13 |     """Standardized response data structure."""
 14 | 
 15 |     content: str
 16 |     raw_response: Any
 17 |     processing_time: float | None = None
 18 |     metadata: dict | None = None
 19 | 
 20 | 
 21 | class ResponseExtractor:
 22 |     """Handles extraction of content from various response types."""
 23 | 
 24 |     @staticmethod
 25 |     def extract_content(response: Any) -> str:
 26 |         """Extract string content from various response formats."""
 27 |         if response is None:
 28 |             return ""
 29 | 
 30 |         # Handle string responses directly
 31 |         if isinstance(response, str):
 32 |             return response
 33 | 
 34 |         # Handle RunOutput from Agno framework
 35 |         if hasattr(response, "content"):
 36 |             content = response.content
 37 |             if isinstance(content, str):
 38 |                 return content
 39 |             if isinstance(content, dict):
 40 |                 # Extract from dict-based content
 41 |                 return ResponseExtractor._extract_from_dict(content)
 42 |             if hasattr(content, "__str__"):
 43 |                 return str(content)
 44 | 
 45 |         # Handle dictionary responses
 46 |         if isinstance(response, dict):
 47 |             return ResponseExtractor._extract_from_dict(response)
 48 | 
 49 |         # Handle objects with text/message attributes
 50 |         for attr in ["text", "message", "result", "output"]:
 51 |             if hasattr(response, attr):
 52 |                 value = getattr(response, attr)
 53 |                 if isinstance(value, str):
 54 |                     return value
 55 | 
 56 |         # Fallback to string conversion
 57 |         logger.warning(f"Unknown response type {type(response)}, converting to string")
 58 |         return str(response)
 59 | 
 60 |     @staticmethod
 61 |     def _extract_from_dict(content_dict: dict) -> str:
 62 |         """Extract content from dictionary-based responses."""
 63 |         # Common content keys in order of preference
 64 |         content_keys = ["content", "text", "message", "result", "output", "response"]
 65 | 
 66 |         for key in content_keys:
 67 |             if key in content_dict:
 68 |                 value = content_dict[key]
 69 |                 if isinstance(value, str):
 70 |                     return value
 71 |                 if isinstance(value, dict) and "result" in value:
 72 |                     # Handle nested result structures
 73 |                     return str(value["result"])
 74 | 
 75 |         # If no standard key found, try to get first string value
 76 |         for value in content_dict.values():
 77 |             if isinstance(value, str) and value.strip():
 78 |                 return value
 79 | 
 80 |         # Fallback to string representation
 81 |         return str(content_dict)
 82 | 
 83 | 
 84 | class ResponseProcessor:
 85 |     """Comprehensive response processing with logging and validation."""
 86 | 
 87 |     def __init__(self) -> None:
 88 |         """Initialize response processor."""
 89 |         self.extractor = ResponseExtractor()
 90 | 
 91 |     def process_response(
 92 |         self,
 93 |         response: Any,
 94 |         processing_time: float | None = None,
 95 |         context: str = "processing",
 96 |     ) -> ProcessedResponse:
 97 |         """Process response with extraction, validation, and logging."""
 98 |         content = self.extractor.extract_content(response)
 99 | 
100 |         # Validate content
101 |         if not content or not content.strip():
102 |             logger.warning(f"Empty response content in {context}")
103 |             content = f"[Empty response from {context}]"
104 | 
105 |         # Create metadata
106 |         metadata = {
107 |             "context": context,
108 |             "response_type": type(response).__name__,
109 |             "content_length": len(content),
110 |             "has_content": bool(content.strip()),
111 |         }
112 | 
113 |         processed = ProcessedResponse(
114 |             content=content,
115 |             raw_response=response,
116 |             processing_time=processing_time,
117 |             metadata=metadata,
118 |         )
119 | 
120 |         self._log_response_details(processed, context)
121 |         return processed
122 | 
123 |     def _log_response_details(self, processed: ProcessedResponse, context: str) -> None:
124 |         """Log detailed response information."""
125 |         logger.info(f"📝 {context.upper()} RESPONSE:")
126 |         if processed.metadata:
127 |             logger.info(f"  Type: {processed.metadata.get('response_type', 'unknown')}")
128 |             logger.info(
129 |                 f"  Length: {processed.metadata.get('content_length', 0)} chars"
130 |             )
131 |         else:
132 |             logger.info("  Type: unknown")
133 |             logger.info(f"  Length: {len(processed.content)} chars")
134 | 
135 |         if processed.processing_time:
136 |             logger.info(f"  Processing time: {processed.processing_time:.3f}s")
137 | 
138 |         # Log content preview (first 100 chars)
139 |         content_preview = processed.content[:100]
140 |         if len(processed.content) > 100:
141 |             content_preview += "..."
142 | 
143 |         logger.info(f"  Preview: {content_preview}")
144 | 
145 | 
146 | class ResponseFormatter:
147 |     """Formats responses for consistent output."""
148 | 
149 |     @staticmethod
150 |     def format_for_client(processed: ProcessedResponse) -> str:
151 |         """Format processed response for client consumption."""
152 |         content = processed.content.strip()
153 | 
154 |         # Ensure content ends with appropriate punctuation
155 |         if content and not content.endswith((".", "!", "?", ":", ";")):
156 |             content += "."
157 | 
158 |         return content
159 | 
160 |     @staticmethod
161 |     def format_with_metadata(processed: ProcessedResponse) -> dict:
162 |         """Format response with metadata for debugging."""
163 |         return {
164 |             "content": processed.content,
165 |             "metadata": processed.metadata,
166 |             "processing_time": processed.processing_time,
167 |         }
168 | 
```

--------------------------------------------------------------------------------
/CHANGELOG.md:
--------------------------------------------------------------------------------

```markdown
  1 | # Changelog
  2 | 
  3 | All notable changes to this project will be documented in this file.
  4 | 
  5 | The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.1.0/),
  6 | and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
  7 | 
  8 | ## [Unreleased]
  9 | 
 10 | ## [0.7.0] - 2025-09-24
 11 | 
 12 | ### Added
 13 | - Parallel execution for thinking agents to improve processing performance
 14 | - Comprehensive Mermaid diagrams in documentation showing parallel processing flows
 15 | - Detailed agent descriptions in README files with multi-dimensional thinking methodology
 16 | - Comparison table with original TypeScript version highlighting architectural differences
 17 | 
 18 | ### Changed
 19 | - **PERFORMANCE**: Converted non-synthesis agents to run in parallel using asyncio.gather for significant speed improvements
 20 | - **GROQ PROVIDER**: Updated Groq provider to use OpenAI GPT-OSS models (openai/gpt-oss-120b for enhanced, openai/gpt-oss-20b for standard)
 21 | - Complete restructure of README files with cleaner formatting and better organization
 22 | - Improved documentation clarity by removing all emoji characters from codebase and documentation
 23 | 
 24 | ### Fixed
 25 | - Resolved MetricsLogger import error that was preventing server startup
 26 | - Fixed missing MetricsLogger class implementation in logging configuration
 27 | - Corrected Mermaid diagram syntax errors in README files
 28 | - Removed references to non-existent PerformanceTracker class
 29 | 
 30 | ## [0.5.0] - 2025-09-17
 31 | 
 32 | ### Added
 33 | - Comprehensive TDD test coverage for refactoring and quality improvement
 34 | - Default settings and processing strategy enum for enhanced configuration
 35 | - Adaptive architecture with cost optimization capabilities
 36 | - Comprehensive test infrastructure and unit tests
 37 | - Magic number extraction to constants for better maintainability
 38 | 
 39 | ### Changed
 40 | - **BREAKING**: Migration to Agno v2.0 with architectural updates (~10,000x faster agent creation, ~50x less memory usage)
 41 | - Upgraded Agno to version 2.0.5 with enhanced agent features
 42 | - Reorganized types module and cleaned duplicates for better structure
 43 | - Modernized codebase with enhanced type safety and annotations
 44 | - Adopted src layout for Python project structure following best practices
 45 | - Optimized code structure and performance across modules
 46 | 
 47 | ### Fixed
 48 | - Resolved mypy type checking errors across all modules
 49 | - Comprehensive security and quality improvements
 50 | - Updated minimum Agno version to 2.0.5 for compatibility
 51 | 
 52 | ### Documentation
 53 | - Updated CLAUDE.md with Agno v2.0 migration details and corrected commands
 54 | - Enhanced guidance for src layout and development requirements
 55 | - Improved test documentation and GitHub provider information
 56 | 
 57 | ## [0.4.1] - 2025-08-06
 58 | 
 59 | ### Fixed
 60 | - app_lifespan function signature for FastMCP compatibility
 61 | 
 62 | ### Changed
 63 | - Restructured main.py with modular architecture for better maintainability
 64 | 
 65 | ## [0.4.0] - 2025-08-06
 66 | 
 67 | ### Added
 68 | - Support for Kimi K2 model via OpenRouter integration
 69 | - Enhanced model provider options and configuration flexibility
 70 | 
 71 | ### Changed
 72 | - CHANGELOG.md following Keep a Changelog standards
 73 | - Moved changelog from README.md to dedicated CHANGELOG.md file
 74 | 
 75 | ## [0.3.0] - 2025-08-01
 76 | 
 77 | ### Added
 78 | - Support for Ollama FULL LOCAL (no API key needed, but requires Ollama installed and running locally)
 79 | - Local LLM inference capabilities through Ollama integration
 80 | - Enhanced model configuration options for local deployment
 81 | - MseeP.ai security assessment badge
 82 | 
 83 | ### Changed
 84 | - Restored DeepSeek as default LLM provider
 85 | - Improved package naming and configuration
 86 | - Updated dependencies to support local inference
 87 | - Enhanced agent memory management (disabled for individual agents)
 88 | 
 89 | ### Fixed
 90 | - Package naming issues in configuration
 91 | - Dependency conflicts resolved
 92 | - Merge conflicts between branches
 93 | 
 94 | ## [0.2.3] - 2025-04-22
 95 | 
 96 | ### Changed
 97 | - Updated version alignment in project configuration and lock file
 98 | 
 99 | ## [0.2.2] - 2025-04-10
100 | 
101 | ### Changed
102 | - Default agent model ID for DeepSeek changed from `deepseek-reasoner` to `deepseek-chat`
103 | - Improved model selection recommendations
104 | 
105 | ## [0.2.1] - 2025-04-10
106 | 
107 | ### Changed
108 | - Model selection recommendations updated in documentation
109 | - Enhanced guidance for coordinator vs specialist model selection
110 | 
111 | ## [0.2.0] - 2025-04-06
112 | 
113 | ### Added
114 | - Major refactoring of sequential thinking team structure
115 | - Enhanced coordination logic
116 | - Improved JSON output format
117 | - LLM configuration and model selection enhancements
118 | 
119 | ### Changed
120 | - Agent model IDs updated for better performance
121 | - Project structure improvements
122 | 
123 | ## [0.1.3] - 2025-04-06
124 | 
125 | ### Changed
126 | - Project entry point script from `main:main` to `main:run`
127 | - Updated documentation for improved user guidance
128 | - Cleaned up dependencies in lock file
129 | 
130 | ## [0.1.0] - 2025-04-06
131 | 
132 | ### Added
133 | - Initial project structure and configuration files
134 | - Multi-Agent System (MAS) architecture using Agno framework
135 | - Sequential thinking tool with coordinated specialist agents
136 | - Support for multiple LLM providers (DeepSeek, Groq, OpenRouter)
137 | - Pydantic validation for robust data integrity
138 | - Integration with external tools (Exa for research)
139 | - Structured logging with file and console output
140 | - Support for thought revisions and branching
141 | - MCP server implementation with FastMCP
142 | - Distributed intelligence across specialized agents
143 | 
144 | [Unreleased]: https://github.com/FradSer/mcp-server-mas-sequential-thinking/compare/v0.7.0...HEAD
145 | [0.7.0]: https://github.com/FradSer/mcp-server-mas-sequential-thinking/compare/v0.5.0...v0.7.0
146 | [0.5.0]: https://github.com/FradSer/mcp-server-mas-sequential-thinking/compare/v0.4.1...v0.5.0
147 | [0.4.1]: https://github.com/FradSer/mcp-server-mas-sequential-thinking/compare/v0.4.0...v0.4.1
148 | [0.4.0]: https://github.com/FradSer/mcp-server-mas-sequential-thinking/compare/v0.3.0...v0.4.0
149 | [0.3.0]: https://github.com/FradSer/mcp-server-mas-sequential-thinking/compare/v0.2.3...v0.3.0
150 | [0.2.3]: https://github.com/FradSer/mcp-server-mas-sequential-thinking/compare/v0.2.2...v0.2.3
151 | [0.2.2]: https://github.com/FradSer/mcp-server-mas-sequential-thinking/compare/v0.2.1...v0.2.2
152 | [0.2.1]: https://github.com/FradSer/mcp-server-mas-sequential-thinking/compare/v0.2.0...v0.2.1
153 | [0.2.0]: https://github.com/FradSer/mcp-server-mas-sequential-thinking/compare/v0.1.3...v0.2.0
154 | [0.1.3]: https://github.com/FradSer/mcp-server-mas-sequential-thinking/compare/v0.1.0...v0.1.3
155 | [0.1.0]: https://github.com/FradSer/mcp-server-mas-sequential-thinking/releases/tag/v0.1.0
```

--------------------------------------------------------------------------------
/src/mcp_server_mas_sequential_thinking/services/workflow_executor.py:
--------------------------------------------------------------------------------

```python
  1 | """Workflow execution service for multi-thinking processing.
  2 | 
  3 | This service handles the execution of multi-thinking workflows,
  4 | managing workflow routing and coordination of processing strategies.
  5 | """
  6 | 
  7 | import time
  8 | from typing import TYPE_CHECKING, Any
  9 | 
 10 | from mcp_server_mas_sequential_thinking.config.constants import PerformanceMetrics
 11 | from mcp_server_mas_sequential_thinking.core import SessionMemory, ThoughtData
 12 | from mcp_server_mas_sequential_thinking.utils import setup_logging
 13 | 
 14 | if TYPE_CHECKING:
 15 |     from mcp_server_mas_sequential_thinking.routing import MultiThinkingWorkflowResult
 16 | 
 17 | logger = setup_logging()
 18 | 
 19 | 
 20 | class WorkflowExecutor:
 21 |     """Service responsible for executing multi-thinking workflows."""
 22 | 
 23 |     def __init__(self, session: SessionMemory) -> None:
 24 |         """Initialize the workflow executor with session memory.
 25 | 
 26 |         Args:
 27 |             session: The session memory instance for accessing team and context
 28 |         """
 29 |         self._session = session
 30 |         self._agno_router: Any = None
 31 |         self._initialize_multi_thinking_workflow()
 32 | 
 33 |     def _initialize_multi_thinking_workflow(self) -> None:
 34 |         """Initialize multi-thinking workflow router.
 35 | 
 36 |         Uses dynamic import to avoid circular dependency issues.
 37 |         """
 38 |         logger.info("Initializing Multi-Thinking Workflow Router")
 39 |         # Dynamic import to avoid circular dependency
 40 |         from mcp_server_mas_sequential_thinking.routing import (  # noqa: PLC0415
 41 |             MultiThinkingWorkflowRouter,
 42 |         )
 43 | 
 44 |         self._agno_router = MultiThinkingWorkflowRouter()
 45 |         logger.info("✅ Multi-Thinking Workflow Router ready")
 46 | 
 47 |     async def execute_workflow(
 48 |         self, thought_data: ThoughtData, input_prompt: str, start_time: float
 49 |     ) -> tuple[str, "MultiThinkingWorkflowResult", float]:
 50 |         """Execute multi-thinking workflow for the given thought.
 51 | 
 52 |         Args:
 53 |             thought_data: The thought data to process
 54 |             input_prompt: The context-aware input prompt
 55 |             start_time: Processing start time for metrics
 56 | 
 57 |         Returns:
 58 |             Tuple of (final_response, workflow_result, total_time)
 59 |         """
 60 |         # Execute multi-thinking workflow
 61 |         workflow_result: MultiThinkingWorkflowResult = (
 62 |             await self._agno_router.process_thought_workflow(thought_data, input_prompt)
 63 |         )
 64 | 
 65 |         total_time = time.time() - start_time
 66 | 
 67 |         return workflow_result.content, workflow_result, total_time
 68 | 
 69 |     def log_workflow_completion(
 70 |         self,
 71 |         thought_data: ThoughtData,
 72 |         workflow_result: "MultiThinkingWorkflowResult",
 73 |         total_time: float,
 74 |         final_response: str,
 75 |     ) -> None:
 76 |         """Log workflow completion with multi-thinking specific metrics.
 77 | 
 78 |         Args:
 79 |             thought_data: The processed thought data
 80 |             workflow_result: The workflow execution result
 81 |             total_time: Total processing time
 82 |             final_response: The final formatted response
 83 |         """
 84 |         # Basic completion info
 85 |         completion_metrics = {
 86 |             f"Thought #{thought_data.thoughtNumber}": "processed successfully",
 87 |             "Strategy": workflow_result.strategy_used,
 88 |             "Complexity Score": f"{workflow_result.complexity_score:.1f}/100",
 89 |             "Step": workflow_result.step_name,
 90 |             "Processing time": f"{workflow_result.processing_time:.3f}s",
 91 |             "Total time": f"{total_time:.3f}s",
 92 |             "Response length": f"{len(final_response)} chars",
 93 |         }
 94 | 
 95 |         self._log_metrics_block(
 96 |             "🧠 MULTI-THINKING WORKFLOW COMPLETION:", completion_metrics
 97 |         )
 98 |         self._log_separator()
 99 | 
100 |         # Performance metrics
101 |         execution_consistency = self._calculate_execution_consistency(
102 |             workflow_result.strategy_used != "error_fallback"
103 |         )
104 |         efficiency_score = self._calculate_efficiency_score(
105 |             workflow_result.processing_time
106 |         )
107 | 
108 |         performance_metrics = {
109 |             "Execution Consistency": execution_consistency,
110 |             "Efficiency Score": efficiency_score,
111 |             "Response Length": f"{len(final_response)} chars",
112 |             "Strategy Executed": workflow_result.strategy_used,
113 |         }
114 |         self._log_metrics_block("📊 WORKFLOW PERFORMANCE METRICS:", performance_metrics)
115 | 
116 |     def _log_metrics_block(self, title: str, metrics: dict[str, Any]) -> None:
117 |         """Log a formatted metrics block.
118 | 
119 |         Args:
120 |             title: The title for the metrics block
121 |             metrics: Dictionary of metrics to log
122 |         """
123 |         logger.info("%s", title)
124 |         for key, value in metrics.items():
125 |             if isinstance(value, float):
126 |                 logger.info("  %s: %.2f", key, value)
127 |             else:
128 |                 logger.info("  %s: %s", key, value)
129 | 
130 |     def _log_separator(self, length: int = 60) -> None:
131 |         """Log a separator line.
132 | 
133 |         Args:
134 |             length: Length of the separator line
135 |         """
136 |         # Use performance metrics constant
137 |         length = PerformanceMetrics.SEPARATOR_LENGTH
138 |         logger.info("  %s", "=" * length)
139 | 
140 |     def _calculate_efficiency_score(self, processing_time: float) -> float:
141 |         """Calculate efficiency score using standard metrics.
142 | 
143 |         Args:
144 |             processing_time: The processing time in seconds
145 | 
146 |         Returns:
147 |             Efficiency score between 0 and 1
148 |         """
149 |         # Use constants from PerformanceMetrics
150 |         perfect_score = PerformanceMetrics.PERFECT_EFFICIENCY_SCORE
151 |         threshold = PerformanceMetrics.EFFICIENCY_TIME_THRESHOLD
152 |         minimum_score = PerformanceMetrics.MINIMUM_EFFICIENCY_SCORE
153 | 
154 |         return (
155 |             perfect_score
156 |             if processing_time < threshold
157 |             else max(minimum_score, threshold / processing_time)
158 |         )
159 | 
160 |     def _calculate_execution_consistency(self, success_indicator: bool) -> float:
161 |         """Calculate execution consistency using standard metrics.
162 | 
163 |         Args:
164 |             success_indicator: Whether execution was successful
165 | 
166 |         Returns:
167 |             Execution consistency score
168 |         """
169 |         # Use constants from PerformanceMetrics
170 |         perfect_consistency = PerformanceMetrics.PERFECT_EXECUTION_CONSISTENCY
171 |         default_consistency = PerformanceMetrics.DEFAULT_EXECUTION_CONSISTENCY
172 | 
173 |         return perfect_consistency if success_indicator else default_consistency
174 | 
```

--------------------------------------------------------------------------------
/src/mcp_server_mas_sequential_thinking/infrastructure/logging_config.py:
--------------------------------------------------------------------------------

```python
  1 | """Streamlined logging configuration based on Python best practices.
  2 | 
  3 | Replaces complex 985-line implementation with focused, performance-optimized approach.
  4 | """
  5 | 
  6 | import logging
  7 | import logging.handlers
  8 | import os
  9 | import sys
 10 | from pathlib import Path
 11 | 
 12 | 
 13 | def setup_logging(level: str | None = None) -> logging.Logger:
 14 |     """Setup streamlined logging with environment-based configuration.
 15 | 
 16 |     Args:
 17 |         level: Log level override. If None, uses LOG_LEVEL env var or defaults to INFO.
 18 | 
 19 |     Returns:
 20 |         Configured logger instance for the application.
 21 |     """
 22 |     # Determine log level from environment or parameter
 23 |     log_level = level or os.getenv("LOG_LEVEL", "INFO")
 24 | 
 25 |     try:
 26 |         numeric_level = getattr(logging, log_level.upper())
 27 |     except AttributeError:
 28 |         numeric_level = logging.INFO
 29 | 
 30 |     # Create logs directory
 31 |     log_dir = Path.home() / ".sequential_thinking" / "logs"
 32 |     log_dir.mkdir(parents=True, exist_ok=True)
 33 | 
 34 |     # Configure root logger for this application
 35 |     logger = logging.getLogger("sequential_thinking")
 36 |     logger.setLevel(numeric_level)
 37 | 
 38 |     # Clear any existing handlers to avoid duplicates
 39 |     logger.handlers.clear()
 40 | 
 41 |     # Console handler for development/debugging
 42 |     if os.getenv("ENVIRONMENT") != "production":
 43 |         console_handler = logging.StreamHandler(sys.stdout)
 44 |         console_handler.setLevel(numeric_level)
 45 |         console_formatter = logging.Formatter(
 46 |             "%(asctime)s - %(levelname)s - %(message)s", datefmt="%H:%M:%S"
 47 |         )
 48 |         console_handler.setFormatter(console_formatter)
 49 |         logger.addHandler(console_handler)
 50 | 
 51 |     # File handler with rotation for persistent logging
 52 |     log_file = log_dir / "sequential_thinking.log"
 53 |     file_handler = logging.handlers.RotatingFileHandler(
 54 |         log_file,
 55 |         maxBytes=5 * 1024 * 1024,  # 5MB
 56 |         backupCount=3,
 57 |         encoding="utf-8",
 58 |     )
 59 |     file_handler.setLevel(numeric_level)
 60 |     file_formatter = logging.Formatter(
 61 |         "%(asctime)s - %(name)s - %(levelname)s - %(funcName)s:%(lineno)d - %(message)s"
 62 |     )
 63 |     file_handler.setFormatter(file_formatter)
 64 |     logger.addHandler(file_handler)
 65 | 
 66 |     # Prevent propagation to root logger to avoid duplicates
 67 |     logger.propagate = False
 68 | 
 69 |     return logger
 70 | 
 71 | 
 72 | def get_logger(name: str | None = None) -> logging.Logger:
 73 |     """Get a logger instance with consistent configuration.
 74 | 
 75 |     Args:
 76 |         name: Logger name. If None, uses calling module's name.
 77 | 
 78 |     Returns:
 79 |         Logger instance.
 80 |     """
 81 |     if name is None:
 82 |         # Get caller's module name for better traceability
 83 |         import inspect
 84 | 
 85 |         frame = inspect.currentframe().f_back
 86 |         name = frame.f_globals.get("__name__", "sequential_thinking")
 87 | 
 88 |     return logging.getLogger(name)
 89 | 
 90 | 
 91 | def log_performance_metric(
 92 |     logger: logging.Logger, operation: str, duration: float, **kwargs
 93 | ) -> None:
 94 |     """Log performance metrics in consistent format.
 95 | 
 96 |     Uses lazy evaluation to avoid string formatting overhead.
 97 |     """
 98 |     if logger.isEnabledFor(logging.INFO):
 99 |         extras = ", ".join(f"{k}={v}" for k, v in kwargs.items()) if kwargs else ""
100 |         logger.info(
101 |             "Performance: %s completed in %.2fs%s",
102 |             operation,
103 |             duration,
104 |             f" ({extras})" if extras else "",
105 |         )
106 | 
107 | 
108 | def log_routing_decision(
109 |     logger: logging.Logger, strategy: str, complexity: float, reasoning: str = ""
110 | ) -> None:
111 |     """Log AI routing decisions with consistent structure."""
112 |     logger.info(
113 |         "AI Routing: strategy=%s, complexity=%.1f%s",
114 |         strategy,
115 |         complexity,
116 |         f", reason={reasoning}" if reasoning else "",
117 |     )
118 | 
119 | 
120 | def log_thought_processing(
121 |     logger: logging.Logger,
122 |     stage: str,
123 |     thought_number: int,
124 |     thought_length: int = 0,
125 |     **context,
126 | ) -> None:
127 |     """Log thought processing stages with structured data."""
128 |     if logger.isEnabledFor(logging.INFO):
129 |         ctx_str = ", ".join(f"{k}={v}" for k, v in context.items()) if context else ""
130 |         logger.info(
131 |             "Thought Processing: stage=%s, number=%d, length=%d%s",
132 |             stage,
133 |             thought_number,
134 |             thought_length,
135 |             f", {ctx_str}" if ctx_str else "",
136 |         )
137 | 
138 | 
139 | class LogTimer:
140 |     """Context manager for timing operations with automatic logging."""
141 | 
142 |     def __init__(
143 |         self, logger: logging.Logger, operation: str, level: int = logging.INFO
144 |     ) -> None:
145 |         self.logger = logger
146 |         self.operation = operation
147 |         self.level = level
148 |         self.start_time = None
149 | 
150 |     def __enter__(self):
151 |         if self.logger.isEnabledFor(logging.DEBUG):
152 |             self.logger.debug("Starting: %s", self.operation)
153 | 
154 |         import time
155 | 
156 |         self.start_time = time.time()
157 |         return self
158 | 
159 |     def __exit__(self, exc_type, exc_val, exc_tb):
160 |         import time
161 | 
162 |         duration = time.time() - self.start_time
163 | 
164 |         if exc_type is None:
165 |             if self.logger.isEnabledFor(self.level):
166 |                 self.logger.log(
167 |                     self.level, "Completed: %s (%.2fs)", self.operation, duration
168 |                 )
169 |         else:
170 |             self.logger.error(
171 |                 "Failed: %s (%.2fs) - %s", self.operation, duration, exc_val
172 |             )
173 | 
174 | 
175 | # Legacy compatibility - maintain existing function names but with simplified implementation
176 | def create_logger(name: str) -> logging.Logger:
177 |     """Legacy compatibility function."""
178 |     return get_logger(name)
179 | 
180 | 
181 | def configure_logging(level: str = "INFO") -> logging.Logger:
182 |     """Legacy compatibility function."""
183 |     return setup_logging(level)
184 | 
185 | 
186 | class MetricsLogger:
187 |     """Simple metrics logger for structured logging output."""
188 | 
189 |     def __init__(self, logger_name: str = "sequential_thinking") -> None:
190 |         """Initialize metrics logger with specified logger name."""
191 |         self.logger = logging.getLogger(logger_name)
192 | 
193 |     def log_metrics_block(self, title: str, metrics: dict) -> None:
194 |         """Log a block of metrics with a title.
195 | 
196 |         Args:
197 |             title: Block title to display
198 |             metrics: Dictionary of metrics to log
199 |         """
200 |         if not self.logger.isEnabledFor(logging.INFO):
201 |             return
202 | 
203 |         self.logger.info("%s", title)
204 |         for key, value in metrics.items():
205 |             self.logger.info("  %s: %s", key, value)
206 | 
207 |     def log_separator(self, length: int = 60) -> None:
208 |         """Log a separator line.
209 | 
210 |         Args:
211 |             length: Length of the separator line
212 |         """
213 |         if self.logger.isEnabledFor(logging.INFO):
214 |             self.logger.info("-" * length)
215 | 
```

--------------------------------------------------------------------------------
/src/mcp_server_mas_sequential_thinking/services/response_formatter.py:
--------------------------------------------------------------------------------

```python
  1 | """Response formatting service for thought processing.
  2 | 
  3 | This service handles formatting and synthesizing responses from various processing
  4 | modes, extracting content from Agno RunOutput objects, and preparing final responses.
  5 | """
  6 | 
  7 | from mcp_server_mas_sequential_thinking.core import ThoughtData
  8 | from mcp_server_mas_sequential_thinking.utils import setup_logging
  9 | 
 10 | logger = setup_logging()
 11 | 
 12 | 
 13 | class ResponseFormatter:
 14 |     """Service responsible for formatting and synthesizing responses."""
 15 | 
 16 |     def __init__(self) -> None:
 17 |         """Initialize the response formatter."""
 18 | 
 19 |     def format_response(self, content: str, thought_data: ThoughtData) -> str:
 20 |         """Format response for MCP - clean content without additional guidance.
 21 | 
 22 |         MCP servers should return clean content and let the AI decide next steps.
 23 | 
 24 |         Args:
 25 |             content: The raw response content to format
 26 |             thought_data: The thought data context
 27 | 
 28 |         Returns:
 29 |             Formatted response string
 30 |         """
 31 |         # MCP servers should return clean content, let AI decide next steps
 32 |         final_response = content
 33 | 
 34 |         # Log response formatting details
 35 |         self._log_response_formatting(content, thought_data, final_response)
 36 | 
 37 |         return final_response
 38 | 
 39 |     def extract_response_content(self, response) -> str:
 40 |         """Extract clean content from Agno RunOutput objects.
 41 | 
 42 |         Handles various response types and extracts text content properly.
 43 | 
 44 |         Args:
 45 |             response: The response object from Agno processing
 46 | 
 47 |         Returns:
 48 |             Extracted text content
 49 |         """
 50 |         # Import ResponseExtractor to handle the extraction
 51 |         from mcp_server_mas_sequential_thinking.services.response_processor import (
 52 |             ResponseExtractor,
 53 |         )
 54 | 
 55 |         return ResponseExtractor.extract_content(response)
 56 | 
 57 |     def _log_response_formatting(
 58 |         self, content: str, thought_data: ThoughtData, final_response: str
 59 |     ) -> None:
 60 |         """Log response formatting details for debugging and monitoring.
 61 | 
 62 |         Args:
 63 |             content: The original response content
 64 |             thought_data: The thought data context
 65 |             final_response: The final formatted response
 66 |         """
 67 |         logger.info("📤 RESPONSE FORMATTING:")
 68 |         logger.info(f"  Original content length: {len(content)} chars")
 69 |         logger.info(f"  Next needed: {thought_data.nextThoughtNeeded}")
 70 |         logger.info(f"  Final response length: {len(final_response)} chars")
 71 |         logger.info(f"  Final response:\n{final_response}")
 72 | 
 73 |         # Use field length limits constant if available
 74 |         separator_length = 60  # Default fallback
 75 |         try:
 76 |             from mcp_server_mas_sequential_thinking.config import FieldLengthLimits
 77 | 
 78 |             separator_length = FieldLengthLimits.SEPARATOR_LENGTH
 79 |         except ImportError:
 80 |             pass
 81 | 
 82 |         logger.info(f"  {'=' * separator_length}")
 83 | 
 84 |     def log_input_details(
 85 |         self, input_prompt: str, context_description: str = "input"
 86 |     ) -> None:
 87 |         """Log input details with consistent formatting.
 88 | 
 89 |         Args:
 90 |             input_prompt: The input prompt to log
 91 |             context_description: Description of the context
 92 |         """
 93 |         logger.info(f"  Input length: {len(input_prompt)} chars")
 94 |         logger.info(f"  Full {context_description}:\\n{input_prompt}")
 95 | 
 96 |         # Use performance metrics constant if available
 97 |         separator_length = 60  # Default fallback
 98 |         try:
 99 |             from mcp_server_mas_sequential_thinking.config import PerformanceMetrics
100 | 
101 |             separator_length = PerformanceMetrics.SEPARATOR_LENGTH
102 |         except ImportError:
103 |             pass
104 | 
105 |         logger.info(f"  {'=' * separator_length}")
106 | 
107 |     def log_output_details(
108 |         self,
109 |         response_content: str,
110 |         processing_time: float,
111 |         context_description: str = "response",
112 |     ) -> None:
113 |         """Log output details with consistent formatting.
114 | 
115 |         Args:
116 |             response_content: The response content to log
117 |             processing_time: Processing time in seconds
118 |             context_description: Description of the context
119 |         """
120 |         logger.info(f"  Processing time: {processing_time:.3f}s")
121 |         logger.info(f"  Output length: {len(response_content)} chars")
122 |         logger.info(f"  Full {context_description}:\\n{response_content}")
123 | 
124 |         # Use performance metrics constant if available
125 |         separator_length = 60  # Default fallback
126 |         try:
127 |             from mcp_server_mas_sequential_thinking.config import PerformanceMetrics
128 | 
129 |             separator_length = PerformanceMetrics.SEPARATOR_LENGTH
130 |         except ImportError:
131 |             pass
132 | 
133 |         logger.info(f"  {'=' * separator_length}")
134 | 
135 | 
136 | class ResponseExtractor:
137 |     """Utility class for extracting content from various response types."""
138 | 
139 |     @staticmethod
140 |     def extract_content(response) -> str:
141 |         """Extract clean content from Agno RunOutput objects.
142 | 
143 |         This method handles various response formats and extracts the actual
144 |         text content that should be returned to the user.
145 | 
146 |         Args:
147 |             response: The response object from agent processing
148 | 
149 |         Returns:
150 |             Extracted text content as string
151 |         """
152 |         # Handle string responses directly
153 |         if isinstance(response, str):
154 |             return response.strip()
155 | 
156 |         # Handle Agno RunOutput objects
157 |         if hasattr(response, "content"):
158 |             content = response.content
159 |             if isinstance(content, str):
160 |                 return content.strip()
161 |             if isinstance(content, list):
162 |                 # Handle list of content items
163 |                 text_parts = []
164 |                 for item in content:
165 |                     if isinstance(item, str):
166 |                         text_parts.append(item)
167 |                     elif hasattr(item, "text"):
168 |                         text_parts.append(item.text)
169 |                     elif hasattr(item, "content"):
170 |                         text_parts.append(str(item.content))
171 |                 return "\n".join(text_parts).strip()
172 | 
173 |         # Handle objects with text attribute
174 |         if hasattr(response, "text"):
175 |             return response.text.strip()
176 | 
177 |         # Handle objects with message attribute
178 |         if hasattr(response, "message"):
179 |             message = response.message
180 |             if isinstance(message, str):
181 |                 return message.strip()
182 |             if hasattr(message, "content"):
183 |                 return str(message.content).strip()
184 | 
185 |         # Fallback: convert to string
186 |         return str(response).strip()
187 | 
```

--------------------------------------------------------------------------------
/src/mcp_server_mas_sequential_thinking/services/thought_processor_refactored.py:
--------------------------------------------------------------------------------

```python
  1 | """Refactored ThoughtProcessor using dependency injection and single responsibility services.
  2 | 
  3 | This module provides a clean, maintainable implementation of thought processing
  4 | that delegates responsibilities to specialized services following clean architecture.
  5 | """
  6 | 
  7 | import time
  8 | 
  9 | from mcp_server_mas_sequential_thinking.config import ProcessingDefaults
 10 | from mcp_server_mas_sequential_thinking.core import (
 11 |     ProcessingMetadata,
 12 |     SessionMemory,
 13 |     ThoughtData,
 14 |     ThoughtProcessingError,
 15 | )
 16 | from mcp_server_mas_sequential_thinking.infrastructure import (
 17 |     MetricsLogger,
 18 | )
 19 | from mcp_server_mas_sequential_thinking.utils import setup_logging
 20 | 
 21 | from .context_builder import ContextBuilder
 22 | from .processing_orchestrator import ProcessingOrchestrator
 23 | from .response_formatter import ResponseFormatter
 24 | from .response_processor import ResponseProcessor
 25 | from .retry_handler import TeamProcessingRetryHandler
 26 | from .workflow_executor import WorkflowExecutor
 27 | 
 28 | logger = setup_logging()
 29 | 
 30 | 
 31 | class ThoughtProcessor:
 32 |     """Refactored thought processor using dependency injection and clean architecture.
 33 | 
 34 |     This class orchestrates thought processing by delegating specific responsibilities
 35 |     to specialized services, maintaining a clean separation of concerns.
 36 |     """
 37 | 
 38 |     __slots__ = (
 39 |         "_context_builder",
 40 |         "_metrics_logger",
 41 |         "_performance_tracker",
 42 |         "_processing_orchestrator",
 43 |         "_response_formatter",
 44 |         "_session",
 45 |         "_workflow_executor",
 46 |     )
 47 | 
 48 |     def __init__(self, session: SessionMemory) -> None:
 49 |         """Initialize the thought processor with dependency injection.
 50 | 
 51 |         Args:
 52 |             session: The session memory instance for accessing team and context
 53 |         """
 54 |         self._session = session
 55 | 
 56 |         # Initialize core services with dependency injection
 57 |         self._context_builder = ContextBuilder(session)
 58 |         self._workflow_executor = WorkflowExecutor(session)
 59 |         self._response_formatter = ResponseFormatter()
 60 | 
 61 |         # Initialize supporting services
 62 |         response_processor = ResponseProcessor()
 63 |         retry_handler = TeamProcessingRetryHandler()
 64 |         self._processing_orchestrator = ProcessingOrchestrator(
 65 |             session, response_processor, retry_handler
 66 |         )
 67 | 
 68 |         # Initialize monitoring services
 69 |         self._metrics_logger = MetricsLogger()
 70 | 
 71 |         logger.info("ThoughtProcessor initialized with specialized services")
 72 | 
 73 |     async def process_thought(self, thought_data: ThoughtData) -> str:
 74 |         """Process a thought through the appropriate workflow with comprehensive error handling.
 75 | 
 76 |         This is the main public API method that maintains backward compatibility
 77 |         while using the new service-based architecture internally.
 78 | 
 79 |         Args:
 80 |             thought_data: The thought data to process
 81 | 
 82 |         Returns:
 83 |             Processed thought response
 84 | 
 85 |         Raises:
 86 |             ThoughtProcessingError: If processing fails
 87 |         """
 88 |         try:
 89 |             return await self._process_thought_internal(thought_data)
 90 |         except Exception as e:
 91 |             error_msg = f"Failed to process {thought_data.thought_type.value} thought #{thought_data.thoughtNumber}: {e}"
 92 |             logger.error(error_msg, exc_info=True)
 93 |             metadata: ProcessingMetadata = {
 94 |                 "error_count": ProcessingDefaults.ERROR_COUNT_INITIAL,
 95 |                 "retry_count": ProcessingDefaults.RETRY_COUNT_INITIAL,
 96 |                 "processing_time": ProcessingDefaults.PROCESSING_TIME_INITIAL,
 97 |             }
 98 |             raise ThoughtProcessingError(error_msg, metadata) from e
 99 | 
100 |     async def _process_thought_internal(self, thought_data: ThoughtData) -> str:
101 |         """Internal thought processing logic using specialized services.
102 | 
103 |         Args:
104 |             thought_data: The thought data to process
105 | 
106 |         Returns:
107 |             Processed thought response
108 |         """
109 |         start_time = time.time()
110 | 
111 |         # Log thought data and add to session (now async for thread safety)
112 |         self._log_thought_data(thought_data)
113 |         await self._session.add_thought(thought_data)
114 | 
115 |         # Build context using specialized service (now async for thread safety)
116 |         input_prompt = await self._context_builder.build_context_prompt(thought_data)
117 |         await self._context_builder.log_context_building(thought_data, input_prompt)
118 | 
119 |         # Execute Multi-Thinking workflow using specialized service
120 |         (
121 |             content,
122 |             workflow_result,
123 |             total_time,
124 |         ) = await self._workflow_executor.execute_workflow(
125 |             thought_data, input_prompt, start_time
126 |         )
127 | 
128 |         # Format response using specialized service
129 |         final_response = self._response_formatter.format_response(content, thought_data)
130 | 
131 |         # Log workflow completion
132 |         self._workflow_executor.log_workflow_completion(
133 |             thought_data, workflow_result, total_time, final_response
134 |         )
135 | 
136 |         return final_response
137 | 
138 |     def _log_thought_data(self, thought_data: ThoughtData) -> None:
139 |         """Log comprehensive thought data information using centralized logger.
140 | 
141 |         Args:
142 |             thought_data: The thought data to log
143 |         """
144 |         basic_info = {
145 |             f"Thought #{thought_data.thoughtNumber}": f"{thought_data.thoughtNumber}/{thought_data.totalThoughts}",
146 |             "Type": thought_data.thought_type.value,
147 |             "Content": thought_data.thought,
148 |             "Next needed": thought_data.nextThoughtNeeded,
149 |             "Needs more": thought_data.needsMoreThoughts,
150 |         }
151 | 
152 |         # Add conditional fields
153 |         if thought_data.isRevision:
154 |             basic_info["Is revision"] = (
155 |                 f"True (revises thought #{thought_data.branchFromThought})"
156 |             )
157 |         if thought_data.branchFromThought:
158 |             basic_info["Branch from"] = (
159 |                 f"#{thought_data.branchFromThought} (ID: {thought_data.branchId})"
160 |             )
161 | 
162 |         basic_info["Raw data"] = thought_data.format_for_log()
163 | 
164 |         self._metrics_logger.log_metrics_block("🧩 THOUGHT DATA:", basic_info)
165 | 
166 |         # Use field length limits constant if available
167 |         separator_length = 60  # Default fallback
168 |         try:
169 |             from mcp_server_mas_sequential_thinking.config import FieldLengthLimits
170 | 
171 |             separator_length = FieldLengthLimits.SEPARATOR_LENGTH
172 |         except ImportError:
173 |             pass
174 | 
175 |         self._metrics_logger.log_separator(separator_length)
176 | 
177 |     # Legacy methods for backward compatibility - these delegate to orchestrator
178 |     async def _execute_single_agent_processing(
179 |         self, input_prompt: str, routing_decision=None
180 |     ) -> str:
181 |         """Legacy method - delegates to orchestrator for backward compatibility."""
182 |         return await self._processing_orchestrator.execute_single_agent_processing(
183 |             input_prompt, simplified=True
184 |         )
185 | 
186 |     async def _execute_team_processing(self, input_prompt: str) -> str:
187 |         """Legacy method - delegates to orchestrator for backward compatibility."""
188 |         return await self._processing_orchestrator.execute_team_processing(input_prompt)
189 | 
190 |     def _extract_response_content(self, response) -> str:
191 |         """Legacy method - delegates to formatter for backward compatibility."""
192 |         return self._response_formatter.extract_response_content(response)
193 | 
194 |     def _build_context_prompt(self, thought_data: ThoughtData) -> str:
195 |         """Legacy method - delegates to context builder for backward compatibility."""
196 |         return self._context_builder.build_context_prompt(thought_data)
197 | 
198 |     def _format_response(self, content: str, thought_data: ThoughtData) -> str:
199 |         """Legacy method - delegates to formatter for backward compatibility."""
200 |         return self._response_formatter.format_response(content, thought_data)
201 | 
```

--------------------------------------------------------------------------------
/src/mcp_server_mas_sequential_thinking/core/types.py:
--------------------------------------------------------------------------------

```python
  1 | """Type definitions for better type safety."""
  2 | 
  3 | from __future__ import annotations
  4 | 
  5 | from dataclasses import dataclass
  6 | from enum import Enum
  7 | from typing import TYPE_CHECKING, Any, Protocol, TypedDict
  8 | 
  9 | if TYPE_CHECKING:
 10 |     from agno.agent import Agent
 11 |     from agno.models.base import Model
 12 |     from agno.team.team import Team
 13 | 
 14 |     from mcp_server_mas_sequential_thinking.config.modernized_config import ModelConfig
 15 |     from mcp_server_mas_sequential_thinking.core.models import ThoughtData
 16 | 
 17 | # Type aliases for better semantic meaning
 18 | ThoughtNumber = int
 19 | BranchId = str
 20 | ProviderName = str
 21 | TeamType = str
 22 | AgentType = str
 23 | ConfigDict = dict[str, Any]
 24 | InstructionsList = list[str]
 25 | SuccessCriteriaList = list[str]
 26 | 
 27 | 
 28 | class ExecutionMode(Enum):
 29 |     """Execution modes for different routing strategies."""
 30 | 
 31 |     SINGLE_AGENT = "single_agent"
 32 |     SELECTIVE_TEAM = "selective_team"  # Hybrid with specific specialists
 33 |     FULL_TEAM = "full_team"  # Complete multi-agent team
 34 | 
 35 | 
 36 | @dataclass
 37 | class CoordinationPlan:
 38 |     """Comprehensive plan combining routing and coordination decisions."""
 39 | 
 40 |     # Routing decisions - using string values for simplicity
 41 |     strategy: str  # ProcessingStrategy value
 42 |     complexity_level: str  # ComplexityLevel value
 43 |     complexity_score: float
 44 | 
 45 |     # Coordination decisions
 46 |     execution_mode: ExecutionMode
 47 |     specialist_roles: list[str]
 48 |     task_breakdown: list[str]
 49 |     coordination_strategy: str
 50 | 
 51 |     # Execution parameters
 52 |     timeout_seconds: float
 53 |     expected_interactions: int
 54 |     team_size: int
 55 | 
 56 |     # Reasoning and metadata
 57 |     reasoning: str
 58 |     confidence: float
 59 |     original_thought: str
 60 | 
 61 |     @classmethod
 62 |     def from_routing_decision(
 63 |         cls, routing_decision: dict[str, Any], thought_data: ThoughtData
 64 |     ) -> CoordinationPlan:
 65 |         """Create coordination plan from adaptive routing decision."""
 66 |         # Map ProcessingStrategy to ExecutionMode
 67 |         strategy_to_mode = {
 68 |             "single_agent": ExecutionMode.SINGLE_AGENT,
 69 |             "hybrid": ExecutionMode.SELECTIVE_TEAM,
 70 |             "multi_agent": ExecutionMode.FULL_TEAM,
 71 |         }
 72 | 
 73 |         # Map complexity to specialist roles
 74 |         complexity_to_specialists: dict[str, list[str]] = {
 75 |             "simple": ["general"],
 76 |             "moderate": ["planner", "analyzer"],
 77 |             "complex": ["planner", "researcher", "analyzer", "critic"],
 78 |             "highly_complex": [
 79 |                 "planner",
 80 |                 "researcher",
 81 |                 "analyzer",
 82 |                 "critic",
 83 |                 "synthesizer",
 84 |             ],
 85 |         }
 86 | 
 87 |         execution_mode = strategy_to_mode.get(
 88 |             routing_decision.strategy.value, ExecutionMode.SINGLE_AGENT
 89 |         )
 90 |         specialists = complexity_to_specialists.get(
 91 |             routing_decision.complexity_level.value, ["general"]
 92 |         )
 93 | 
 94 |         return cls(
 95 |             strategy=routing_decision.strategy.value,
 96 |             complexity_level=routing_decision.complexity_level.value,
 97 |             complexity_score=routing_decision.complexity_score,
 98 |             execution_mode=execution_mode,
 99 |             specialist_roles=specialists,
100 |             team_size=len(specialists),
101 |             coordination_strategy="adaptive_routing_based",
102 |             task_breakdown=[
103 |                 f"Process {thought_data.thought_type.value} thought",
104 |                 "Generate guidance",
105 |             ],
106 |             expected_interactions=len(specialists),
107 |             timeout_seconds=300.0,  # Default timeout
108 |             reasoning=routing_decision.reasoning,
109 |             confidence=0.8,  # Default confidence for rule-based routing
110 |             original_thought=thought_data.thought,
111 |         )
112 | 
113 | 
114 | class ProcessingMetadata(TypedDict, total=False):
115 |     """Type-safe processing metadata structure."""
116 | 
117 |     strategy: str
118 |     complexity_score: float
119 |     estimated_cost: float
120 |     actual_cost: float
121 |     token_usage: int
122 |     processing_time: float
123 |     specialists: list[str]
124 |     provider: str
125 |     routing_reasoning: str
126 |     error_count: int
127 |     retry_count: int
128 | 
129 | 
130 | class SessionStats(TypedDict, total=False):
131 |     """Type-safe session statistics structure."""
132 | 
133 |     total_thoughts: int
134 |     total_cost: float
135 |     total_tokens: int
136 |     average_processing_time: float
137 |     error_rate: float
138 |     successful_thoughts: int
139 |     failed_thoughts: int
140 | 
141 | 
142 | class ComplexityMetrics(TypedDict):
143 |     """Type-safe complexity analysis metrics."""
144 | 
145 |     word_count: int
146 |     sentence_count: int
147 |     question_count: int
148 |     technical_terms: int
149 |     has_branching: bool
150 |     has_research_keywords: bool
151 |     has_analysis_keywords: bool
152 |     overall_score: float
153 | 
154 | 
155 | class ModelProvider(Protocol):
156 |     """Protocol for model provider implementations."""
157 | 
158 |     id: str
159 |     cost_per_token: float
160 | 
161 | 
162 | class AgentFactory(Protocol):
163 |     """Protocol for agent factory implementations."""
164 | 
165 |     def create_team_agents(self, model: Model, team_type: str) -> dict[str, Agent]:
166 |         """Create team agents with specified model and team type."""
167 |         ...
168 | 
169 | 
170 | class TeamBuilder(Protocol):
171 |     """Protocol for team builder implementations."""
172 | 
173 |     def build_team(self, config: ConfigDict, agent_factory: AgentFactory) -> Team:
174 |         """Build a team with specified configuration and agent factory."""
175 |         ...
176 | 
177 | 
178 | class CostEstimator(Protocol):
179 |     """Protocol for cost estimation with type safety."""
180 | 
181 |     def estimate_cost(
182 |         self, strategy: str, complexity_score: float, provider: str
183 |     ) -> tuple[tuple[int, int], float]:
184 |         """Estimate cost for processing strategy."""
185 |         ...
186 | 
187 | 
188 | class ComplexityAnalyzer(Protocol):
189 |     """Protocol for complexity analysis with type safety."""
190 | 
191 |     def analyze(self, thought_text: str) -> ComplexityMetrics:
192 |         """Analyze thought complexity and return metrics."""
193 |         ...
194 | 
195 | 
196 | class ThoughtProcessor(Protocol):
197 |     """Protocol for thought processing with type safety."""
198 | 
199 |     async def process_thought(self, thought_data: ThoughtData) -> str:
200 |         """Process a thought and return the result."""
201 |         ...
202 | 
203 | 
204 | class SessionManager(Protocol):
205 |     """Protocol for session management with type safety."""
206 | 
207 |     def add_thought(self, thought_data: ThoughtData) -> None:
208 |         """Add a thought to the session."""
209 |         ...
210 | 
211 |     def find_thought_content(self, thought_number: int) -> str:
212 |         """Find thought content by number."""
213 |         ...
214 | 
215 |     def get_branch_summary(self) -> dict[str, int]:
216 |         """Get summary of all branches."""
217 |         ...
218 | 
219 | 
220 | class ConfigurationProvider(Protocol):
221 |     """Protocol for configuration management with type safety."""
222 | 
223 |     def get_model_config(self, provider_name: str | None = None) -> ModelConfig:
224 |         """Get model configuration."""
225 |         ...
226 | 
227 |     def check_required_api_keys(self, provider_name: str | None = None) -> list[str]:
228 |         """Check for required API keys."""
229 |         ...
230 | 
231 | 
232 | # Custom Exception Classes
233 | class ValidationError(ValueError):
234 |     """Exception raised when data validation fails."""
235 | 
236 | 
237 | class ConfigurationError(Exception):
238 |     """Exception raised when configuration is invalid."""
239 | 
240 | 
241 | class ThoughtProcessingError(Exception):
242 |     """Exception raised when thought processing fails."""
243 | 
244 |     def __init__(
245 |         self, message: str, metadata: ProcessingMetadata | None = None
246 |     ) -> None:
247 |         super().__init__(message)
248 |         self.metadata: ProcessingMetadata = metadata or {}
249 | 
250 | 
251 | class TeamCreationError(Exception):
252 |     """Exception raised when team creation fails."""
253 | 
254 | 
255 | class RoutingDecisionError(ThoughtProcessingError):
256 |     """Error in adaptive routing decision making."""
257 | 
258 | 
259 | class CostOptimizationError(ThoughtProcessingError):
260 |     """Error in cost optimization logic."""
261 | 
262 | 
263 | class PersistentStorageError(ThoughtProcessingError):
264 |     """Error in persistent memory storage."""
265 | 
266 | 
267 | class ModelConfigurationError(ConfigurationError):
268 |     """Error in model configuration."""
269 | 
270 | 
271 | class ProviderError(Exception):
272 |     """Error related to LLM providers."""
273 | 
274 | 
275 | class AgentCreationError(Exception):
276 |     """Error in agent creation."""
277 | 
```

--------------------------------------------------------------------------------
/src/mcp_server_mas_sequential_thinking/main.py:
--------------------------------------------------------------------------------

```python
  1 | """Refactored MCP Sequential Thinking Server with separated concerns and reduced complexity."""
  2 | 
  3 | import asyncio
  4 | import sys
  5 | from collections.abc import AsyncIterator
  6 | from contextlib import asynccontextmanager
  7 | from html import escape
  8 | 
  9 | from dotenv import load_dotenv
 10 | from mcp.server.fastmcp import FastMCP
 11 | from pydantic import ValidationError
 12 | 
 13 | from .config import ProcessingDefaults, SecurityConstants, ValidationLimits
 14 | from .core import ThoughtProcessingError
 15 | 
 16 | # Import refactored modules
 17 | from .services import (
 18 |     ServerConfig,
 19 |     ServerState,
 20 |     ThoughtProcessor,
 21 |     create_server_lifespan,
 22 |     create_validated_thought_data,
 23 | )
 24 | from .utils import setup_logging
 25 | 
 26 | # Initialize environment and logging
 27 | load_dotenv()
 28 | logger = setup_logging()
 29 | 
 30 | # Global server state with thread safety
 31 | _server_state: ServerState | None = None
 32 | _thought_processor: ThoughtProcessor | None = None
 33 | _processor_lock = asyncio.Lock()
 34 | 
 35 | 
 36 | @asynccontextmanager
 37 | async def app_lifespan(app) -> AsyncIterator[None]:
 38 |     """Simplified application lifespan using refactored server core."""
 39 |     global _server_state, _thought_processor
 40 | 
 41 |     logger.info("Starting Sequential Thinking Server")
 42 | 
 43 |     async with create_server_lifespan() as server_state:
 44 |         _server_state = server_state
 45 |         logger.info("Server ready for requests")
 46 |         yield
 47 | 
 48 |     _server_state = None
 49 |     _thought_processor = None
 50 |     logger.info("Server stopped")
 51 | 
 52 | 
 53 | # Initialize FastMCP with lifespan
 54 | mcp = FastMCP(lifespan=app_lifespan)
 55 | 
 56 | 
 57 | def sanitize_and_validate_input(text: str, max_length: int, field_name: str) -> str:
 58 |     """Sanitize and validate input with comprehensive security checks."""
 59 |     # Early validation with guard clause
 60 |     if not text or not text.strip():
 61 |         raise ValueError(f"{field_name} cannot be empty")
 62 | 
 63 |     # Strip and normalize whitespace first
 64 |     text = text.strip()
 65 | 
 66 |     # Check for potential prompt injection patterns using centralized constants
 67 |     text_lower = text.lower()
 68 |     for pattern in SecurityConstants.INJECTION_PATTERNS:
 69 |         if pattern in text_lower:
 70 |             raise ValueError(
 71 |                 f"Potential security risk detected in {field_name}. "
 72 |                 f"Input contains suspicious pattern: '{pattern}'"
 73 |             )
 74 | 
 75 |     # Additional security checks
 76 |     if text.count('"') > 10 or text.count("'") > 10:
 77 |         raise ValueError(
 78 |             f"Excessive quotation marks detected in {field_name}. "
 79 |             "This may indicate an injection attempt."
 80 |         )
 81 | 
 82 |     # Sanitize HTML entities and special characters
 83 |     sanitized_text = escape(text)
 84 | 
 85 |     # Length validation with descriptive error
 86 |     if len(sanitized_text) > max_length:
 87 |         raise ValueError(
 88 |             f"{field_name} exceeds maximum length of {max_length} characters"
 89 |         )
 90 | 
 91 |     return sanitized_text
 92 | 
 93 | 
 94 | @mcp.prompt("sequential-thinking")
 95 | def sequential_thinking_prompt(problem: str, context: str = "") -> list[dict]:
 96 |     """Enhanced starter prompt for sequential thinking with better formatting."""
 97 |     # Sanitize and validate inputs
 98 |     try:
 99 |         problem = sanitize_and_validate_input(
100 |             problem, ValidationLimits.MAX_PROBLEM_LENGTH, "Problem statement"
101 |         )
102 |         context = (
103 |             sanitize_and_validate_input(
104 |                 context, ValidationLimits.MAX_CONTEXT_LENGTH, "Context"
105 |             )
106 |             if context
107 |             else ""
108 |         )
109 |     except ValueError as e:
110 |         raise ValueError(f"Input validation failed: {e}")
111 | 
112 |     user_prompt = f"""Initiate sequential thinking for: {problem}
113 | {f"Context: {context}" if context else ""}"""
114 | 
115 |     assistant_guide = f"""Starting sequential thinking process for: {problem}
116 | 
117 | Process Guidelines:
118 | 1. Estimate appropriate number of total thoughts based on problem complexity
119 | 2. Begin with: "Plan comprehensive analysis for: {problem}"
120 | 3. Use revisions (isRevision=True) to improve previous thoughts
121 | 4. Use branching (branchFromThought, branchId) for alternative approaches
122 | 5. Each thought should be detailed with clear reasoning
123 | 6. Progress systematically through analysis phases
124 | 
125 | System Architecture:
126 | - Multi-Thinking methodology with intelligent routing
127 | - Factual, Emotional, Critical, Optimistic, Creative, and Synthesis perspectives
128 | - Adaptive thinking sequence based on thought complexity and type
129 | - Comprehensive integration through Synthesis thinking
130 | 
131 | Ready to begin systematic analysis."""
132 | 
133 |     return [
134 |         {
135 |             "description": "Enhanced sequential thinking starter with comprehensive guidelines",
136 |             "messages": [
137 |                 {"role": "user", "content": {"type": "text", "text": user_prompt}},
138 |                 {
139 |                     "role": "assistant",
140 |                     "content": {"type": "text", "text": assistant_guide},
141 |                 },
142 |             ],
143 |         }
144 |     ]
145 | 
146 | 
147 | @mcp.tool()
148 | async def sequentialthinking(
149 |     thought: str,
150 |     thoughtNumber: int,
151 |     totalThoughts: int,
152 |     nextThoughtNeeded: bool,
153 |     isRevision: bool,
154 |     branchFromThought: int | None,
155 |     branchId: str | None,
156 |     needsMoreThoughts: bool,
157 | ) -> str:
158 |     """Advanced sequential thinking tool with multi-agent coordination.
159 | 
160 |     Processes thoughts through a specialized team of AI agents that coordinate
161 |     to provide comprehensive analysis, planning, research, critique, and synthesis.
162 | 
163 |     Args:
164 |         thought: Content of the thinking step (required)
165 |         thoughtNumber: Sequence number starting from {ThoughtProcessingLimits.MIN_THOUGHT_SEQUENCE} (≥{ThoughtProcessingLimits.MIN_THOUGHT_SEQUENCE})
166 |         totalThoughts: Estimated total thoughts required (≥1)
167 |         nextThoughtNeeded: Whether another thought step follows this one
168 |         isRevision: Whether this thought revises a previous thought
169 |         branchFromThought: Thought number to branch from for alternative exploration
170 |         branchId: Unique identifier for the branch (required if branchFromThought set)
171 |         needsMoreThoughts: Whether more thoughts are needed beyond the initial estimate
172 | 
173 |     Returns:
174 |         Synthesized response from the multi-agent team with guidance for next steps
175 | 
176 |     Raises:
177 |         ProcessingError: When thought processing fails
178 |         ValidationError: When input validation fails
179 |         RuntimeError: When server state is invalid
180 |     """
181 |     # Capture server state locally to avoid async race conditions
182 |     current_server_state = _server_state
183 |     if current_server_state is None:
184 |         return "Server Error: Server not initialized"
185 | 
186 |     try:
187 |         # Create and validate thought data using refactored function
188 |         thought_data = create_validated_thought_data(
189 |             thought=thought,
190 |             thoughtNumber=thoughtNumber,
191 |             totalThoughts=totalThoughts,
192 |             nextThoughtNeeded=nextThoughtNeeded,
193 |             isRevision=isRevision,
194 |             branchFromThought=branchFromThought,
195 |             branchId=branchId,
196 |             needsMoreThoughts=needsMoreThoughts,
197 |         )
198 | 
199 |         # Use captured state directly to avoid race conditions
200 |         global _thought_processor
201 |         async with _processor_lock:
202 |             if _thought_processor is None:
203 |                 logger.info(
204 |                     "Initializing ThoughtProcessor with Multi-Thinking workflow"
205 |                 )
206 |                 _thought_processor = ThoughtProcessor(current_server_state.session)
207 | 
208 |         result = await _thought_processor.process_thought(thought_data)
209 | 
210 |         logger.info(f"Successfully processed thought #{thoughtNumber}")
211 |         return result
212 | 
213 |     except ValidationError as e:
214 |         error_msg = f"Input validation failed for thought #{thoughtNumber}: {e}"
215 |         logger.exception(error_msg)
216 |         return f"Validation Error: {e}"
217 | 
218 |     except ThoughtProcessingError as e:
219 |         error_msg = f"Processing failed for thought #{thoughtNumber}: {e}"
220 |         logger.exception(error_msg)
221 |         if hasattr(e, "metadata") and e.metadata:
222 |             logger.exception(f"Error metadata: {e.metadata}")
223 |         return f"Processing Error: {e}"
224 | 
225 |     except Exception as e:
226 |         error_msg = f"Unexpected error processing thought #{thoughtNumber}: {e}"
227 |         logger.exception(error_msg)
228 |         return f"Unexpected Error: {e}"
229 | 
230 | 
231 | def run() -> None:
232 |     """Run the MCP server with enhanced error handling and graceful shutdown."""
233 |     config = ServerConfig.from_environment()
234 |     logger.info(f"Starting Sequential Thinking Server with {config.provider} provider")
235 | 
236 |     try:
237 |         # Run server with stdio transport
238 |         mcp.run(transport="stdio")
239 | 
240 |     except KeyboardInterrupt:
241 |         logger.info("Server stopped by user (SIGINT)")
242 | 
243 |     except SystemExit as e:
244 |         logger.info(f"Server stopped with exit code: {e.code}")
245 |         raise
246 | 
247 |     except Exception as e:
248 |         logger.error(f"Critical server error: {e}", exc_info=True)
249 |         sys.exit(ProcessingDefaults.EXIT_CODE_ERROR)
250 | 
251 |     finally:
252 |         logger.info("Server shutdown sequence complete")
253 | 
254 | 
255 | def main() -> None:
256 |     """Main entry point with proper error handling."""
257 |     try:
258 |         run()
259 |     except Exception as e:
260 |         logger.critical(f"Fatal error in main: {e}", exc_info=True)
261 |         sys.exit(ProcessingDefaults.EXIT_CODE_ERROR)
262 | 
263 | 
264 | if __name__ == "__main__":
265 |     main()
266 | 
```

--------------------------------------------------------------------------------
/src/mcp_server_mas_sequential_thinking/config/constants.py:
--------------------------------------------------------------------------------

```python
  1 | """Constants for the MCP Sequential Thinking Server."""
  2 | 
  3 | from enum import Enum
  4 | from typing import ClassVar
  5 | 
  6 | 
  7 | class TokenCosts:
  8 |     """Token cost constants for different providers (cost per 1000 tokens)."""
  9 | 
 10 |     DEEPSEEK_COST_PER_1K = 0.0002
 11 |     GROQ_COST_PER_1K = 0.0001
 12 |     OPENROUTER_COST_PER_1K = 0.001
 13 |     GITHUB_COST_PER_1K = 0.0005
 14 |     OLLAMA_COST_PER_1K = 0.0000
 15 |     DEFAULT_COST_PER_1K = 0.0002
 16 | 
 17 | 
 18 | class ComplexityScoring:
 19 |     """Complexity analysis scoring constants."""
 20 | 
 21 |     MAX_SCORE = 100.0
 22 |     WORD_COUNT_MAX_POINTS = 15
 23 |     WORD_COUNT_DIVISOR = 20
 24 |     SENTENCE_MULTIPLIER = 2
 25 |     SENTENCE_MAX_POINTS = 10
 26 |     QUESTION_MULTIPLIER = 3
 27 |     QUESTION_MAX_POINTS = 15
 28 |     TECHNICAL_TERM_MULTIPLIER = 2
 29 |     TECHNICAL_TERM_MAX_POINTS = 20
 30 |     BRANCHING_MULTIPLIER = 5
 31 |     BRANCHING_MAX_POINTS = 15
 32 |     RESEARCH_MULTIPLIER = 3
 33 |     RESEARCH_MAX_POINTS = 15
 34 |     ANALYSIS_MULTIPLIER = 2
 35 |     ANALYSIS_MAX_POINTS = 10
 36 | 
 37 | 
 38 | class TokenEstimates:
 39 |     """Token estimation ranges by complexity and strategy."""
 40 | 
 41 |     # Single agent token estimates (min, max)
 42 |     SINGLE_AGENT_SIMPLE = (400, 800)
 43 |     SINGLE_AGENT_MODERATE = (600, 1200)
 44 |     SINGLE_AGENT_COMPLEX = (800, 1600)
 45 |     SINGLE_AGENT_HIGHLY_COMPLEX = (1000, 2000)
 46 | 
 47 |     # Multi-agent token estimates (min, max)
 48 |     MULTI_AGENT_SIMPLE = (2000, 4000)
 49 |     MULTI_AGENT_MODERATE = (3000, 6000)
 50 |     MULTI_AGENT_COMPLEX = (4000, 8000)
 51 |     MULTI_AGENT_HIGHLY_COMPLEX = (5000, 10000)
 52 | 
 53 | 
 54 | class ValidationLimits:
 55 |     """Input validation and system limits."""
 56 | 
 57 |     MAX_PROBLEM_LENGTH = 500
 58 |     MAX_CONTEXT_LENGTH = 300
 59 |     MAX_THOUGHTS_PER_SESSION = 1000
 60 |     MAX_BRANCHES_PER_SESSION = 50
 61 |     MAX_THOUGHTS_PER_BRANCH = 100
 62 |     GITHUB_TOKEN_LENGTH = 40
 63 |     MIN_THOUGHT_NUMBER = 1
 64 | 
 65 | 
 66 | class DefaultTimeouts:
 67 |     """Default timeout values in seconds."""
 68 | 
 69 |     PROCESSING_TIMEOUT = 30.0
 70 |     DEEPSEEK_PROCESSING_TIMEOUT = 120.0  # Legacy timeout for Deepseek (deprecated)
 71 |     MULTI_AGENT_TIMEOUT_MULTIPLIER = 2.0  # Multiply timeout for multi-agent
 72 | 
 73 |     # Adaptive timeout strategy (v0.5.1+)
 74 |     ADAPTIVE_BASE_DEEPSEEK = 90.0  # Base timeout for Deepseek with adaptive scaling
 75 |     ADAPTIVE_BASE_GROQ = 20.0  # Base timeout for Groq
 76 |     ADAPTIVE_BASE_OPENAI = 45.0  # Base timeout for OpenAI
 77 |     ADAPTIVE_BASE_DEFAULT = 30.0  # Default base timeout
 78 | 
 79 |     # Maximum timeouts (safety ceiling)
 80 |     MAX_TIMEOUT_DEEPSEEK = 300.0  # 5 minutes absolute maximum for Deepseek
 81 |     MAX_TIMEOUT_DEFAULT = 180.0  # 3 minutes maximum for others
 82 | 
 83 |     # Complexity multipliers for adaptive timeouts
 84 |     COMPLEXITY_SIMPLE_MULTIPLIER = 1.0
 85 |     COMPLEXITY_MODERATE_MULTIPLIER = 1.5
 86 |     COMPLEXITY_COMPLEX_MULTIPLIER = 2.0
 87 |     COMPLEXITY_HIGHLY_COMPLEX_MULTIPLIER = 3.0
 88 | 
 89 |     # Retry configuration
 90 |     RETRY_EXPONENTIAL_BASE = 1.5  # Exponential backoff base
 91 |     MAX_RETRY_ATTEMPTS = 2  # Maximum retry attempts
 92 | 
 93 |     SESSION_CLEANUP_DAYS = 30
 94 |     RECENT_SESSION_KEEP_COUNT = 100
 95 | 
 96 | 
 97 | class LoggingLimits:
 98 |     """Logging configuration constants."""
 99 | 
100 |     LOG_FILE_MAX_BYTES = 5 * 1024 * 1024  # 5MB
101 |     LOG_BACKUP_COUNT = 3
102 |     SENSITIVE_DATA_MIN_LENGTH = 8
103 | 
104 | 
105 | class QualityThresholds:
106 |     """Quality scoring and budget utilization thresholds."""
107 | 
108 |     DEFAULT_QUALITY_THRESHOLD = 0.7
109 |     HIGH_BUDGET_UTILIZATION = 0.8
110 |     VERY_HIGH_BUDGET_UTILIZATION = 0.9
111 |     MULTI_AGENT_HIGH_USAGE = 0.7
112 |     SINGLE_AGENT_HIGH_USAGE = 0.8
113 |     MINIMUM_USAGE_FOR_SUGGESTIONS = 10
114 |     SIGNIFICANT_COST_THRESHOLD = 0.01
115 | 
116 | 
117 | class ProviderDefaults:
118 |     """Default provider configurations."""
119 | 
120 |     DEFAULT_QUALITY_SCORE = 0.8
121 |     DEFAULT_RESPONSE_TIME = 2.0
122 |     DEFAULT_UPTIME_SCORE = 0.95
123 |     DEFAULT_ERROR_RATE = 0.05
124 |     DEFAULT_CONTEXT_LENGTH = 4096
125 | 
126 | 
127 | class ComplexityThresholds:
128 |     """Complexity level thresholds for scoring."""
129 | 
130 |     SIMPLE_MAX = 25.0
131 |     MODERATE_MAX = 50.0
132 |     COMPLEX_MAX = 75.0
133 |     # HIGHLY_COMPLEX is anything above COMPLEX_MAX
134 | 
135 | 
136 | class DefaultValues:
137 |     """Default configuration values."""
138 | 
139 |     DEFAULT_LLM_PROVIDER = "deepseek"
140 |     DEFAULT_TEAM_MODE = "standard"
141 |     DEFAULT_LOG_LEVEL = "INFO"
142 |     DEFAULT_MAX_RETRIES = 3
143 |     DEFAULT_TIMEOUT = 30.0
144 | 
145 | 
146 | class PerformanceMetrics:
147 |     """Performance measurement constants."""
148 | 
149 |     # Efficiency calculation thresholds
150 |     EFFICIENCY_TIME_THRESHOLD = 60.0  # seconds
151 |     PERFECT_EXECUTION_CONSISTENCY = 1.0
152 |     DEFAULT_EXECUTION_CONSISTENCY = 0.9
153 |     PERFECT_EFFICIENCY_SCORE = 1.0
154 |     MINIMUM_EFFICIENCY_SCORE = 0.5
155 | 
156 |     # Retry and sleep constants
157 |     RETRY_SLEEP_DURATION = 1.0  # seconds
158 | 
159 |     # Logging formatting
160 |     SEPARATOR_LENGTH = 50
161 | 
162 | 
163 | class ProcessingDefaults:
164 |     """Default values for thought processing."""
165 | 
166 |     ERROR_COUNT_INITIAL = 1
167 |     RETRY_COUNT_INITIAL = 0
168 |     PROCESSING_TIME_INITIAL = 0.0
169 |     TEAM_INITIALIZER_INDEX = 1
170 |     SINGLE_AGENT_TIMEOUT_MULTIPLIER = 0.5
171 |     EXIT_CODE_ERROR = 1
172 | 
173 | 
174 | class FieldLengthLimits:
175 |     """Field length limits for various inputs."""
176 | 
177 |     MIN_STRING_LENGTH = 1
178 |     MAX_STANDARD_STRING = 2000
179 |     MAX_DESCRIPTION_LENGTH = 1000
180 |     MAX_BRANCH_ID_LENGTH = 100
181 |     SEPARATOR_LENGTH = 50
182 | 
183 | 
184 | class DatabaseConstants:
185 |     """Database configuration constants."""
186 | 
187 |     SESSION_CLEANUP_BATCH_SIZE = 100
188 |     THOUGHT_QUERY_LIMIT = 1000
189 |     CONNECTION_POOL_SIZE = 5
190 |     CONNECTION_POOL_OVERFLOW = 10
191 | 
192 | 
193 | class ThoughtProcessingLimits:
194 |     """Limits for thought processing workflow."""
195 | 
196 |     MIN_THOUGHT_SEQUENCE = 1
197 |     MAX_TEAM_DELEGATION_COUNT = 2
198 |     ANALYSIS_TIME_LIMIT_SECONDS = 5
199 |     MIN_PROCESSING_STEPS = 1
200 |     MAX_PROCESSING_STEPS = 6
201 | 
202 | 
203 | class TechnicalTerms:
204 |     """Technical terms for complexity analysis."""
205 | 
206 |     KEYWORDS: ClassVar[list[str]] = [
207 |         "algorithm",
208 |         "data",
209 |         "analysis",
210 |         "system",
211 |         "process",
212 |         "design",
213 |         "implementation",
214 |         "architecture",
215 |         "framework",
216 |         "model",
217 |         "structure",
218 |         "optimization",
219 |         "performance",
220 |         "scalability",
221 |         "integration",
222 |         "api",
223 |         "database",
224 |         "security",
225 |         "authentication",
226 |         "authorization",
227 |         "testing",
228 |         "deployment",
229 |         "configuration",
230 |         "monitoring",
231 |         "logging",
232 |         "debugging",
233 |         "refactoring",
234 |         "migration",
235 |         "synchronization",
236 |         "caching",
237 |         "protocol",
238 |         "interface",
239 |         "inheritance",
240 |         "polymorphism",
241 |         "abstraction",
242 |         "encapsulation",
243 |     ]
244 | 
245 | 
246 | class DefaultSettings:
247 |     """Default application settings."""
248 | 
249 |     DEFAULT_PROVIDER = "deepseek"
250 |     DEFAULT_COMPLEXITY_THRESHOLD = 30.0
251 |     DEFAULT_TOKEN_BUFFER = 0.2
252 |     DEFAULT_SESSION_TIMEOUT = 3600
253 | 
254 | 
255 | class CostOptimizationConstants:
256 |     """Constants for cost optimization calculations."""
257 | 
258 |     # Quality scoring weights
259 |     QUALITY_WEIGHT = 0.4
260 |     COST_WEIGHT = 0.3
261 |     SPEED_WEIGHT = 0.2
262 |     RELIABILITY_WEIGHT = 0.1
263 | 
264 |     # Cost calculation factors
265 |     COST_NORMALIZATION_FACTOR = 0.0003
266 |     COST_EPSILON = 0.0001  # Prevent division by zero
267 |     DEFAULT_COST_ESTIMATE = 0.0002
268 |     SPEED_NORMALIZATION_BASE = 10
269 |     SPEED_THRESHOLD = 1
270 | 
271 |     # Quality scoring bounds
272 |     MIN_QUALITY_SCORE = 0.0
273 |     MAX_QUALITY_SCORE = 1.0
274 | 
275 |     # Budget utilization thresholds
276 |     HIGH_BUDGET_UTILIZATION = 0.8
277 |     MODERATE_BUDGET_UTILIZATION = 0.7
278 |     CRITICAL_BUDGET_UTILIZATION = 0.9
279 | 
280 |     # Complexity bonuses
281 |     QUALITY_COMPLEXITY_BONUS = 0.2
282 |     COST_COMPLEXITY_BONUS = 0.0001
283 | 
284 |     # Provider optimization
285 |     HIGH_USAGE_PENALTY = 2.0
286 |     MODERATE_USAGE_PENALTY = 0.5
287 |     QUALITY_UPDATE_WEIGHT = 0.1
288 |     OLD_QUALITY_WEIGHT = 0.9
289 | 
290 |     # Usage analysis thresholds
291 |     MIN_DATA_THRESHOLD = 10
292 |     HIGH_MULTI_AGENT_RATIO = 0.7
293 |     HIGH_SINGLE_AGENT_RATIO = 0.8
294 |     MINIMUM_COST_DIFFERENCE = 0.01
295 | 
296 |     # Provider-specific configurations
297 |     GROQ_RATE_LIMIT = 14400
298 |     GROQ_CONTEXT_LENGTH = 32768
299 |     GROQ_QUALITY_SCORE = 0.75
300 |     GROQ_RESPONSE_TIME = 0.8
301 | 
302 |     DEEPSEEK_QUALITY_SCORE = 0.85
303 |     DEEPSEEK_CONTEXT_LENGTH = 128000
304 | 
305 |     GITHUB_CONTEXT_LENGTH = 128000
306 | 
307 |     OPENROUTER_RESPONSE_TIME = 3.0
308 |     OPENROUTER_CONTEXT_LENGTH = 200000
309 | 
310 |     OLLAMA_QUALITY_SCORE = 0.70
311 |     OLLAMA_RESPONSE_TIME = 5.0
312 |     OLLAMA_CONTEXT_LENGTH = 8192
313 | 
314 | 
315 | class ComplexityAnalysisConstants:
316 |     """Constants for complexity analysis calculations."""
317 | 
318 |     # Multilingual text analysis requires different tokenization strategies
319 |     CHINESE_WORD_RATIO = 2  # Character-based scripts need different word boundaries
320 |     CHINESE_DOMINANCE_THRESHOLD = 0.3  # Script detection threshold for optimization
321 | 
322 |     # Complexity scoring weights (extracted from adaptive_routing.py)
323 |     WORD_COUNT_WEIGHT = 0.15
324 |     SENTENCE_WEIGHT = 0.10
325 |     QUESTION_WEIGHT = 0.15
326 |     TECHNICAL_TERM_WEIGHT = 0.20
327 |     BRANCHING_WEIGHT = 0.15
328 |     RESEARCH_WEIGHT = 0.15
329 |     ANALYSIS_DEPTH_WEIGHT = 0.10
330 | 
331 |     # Complexity level thresholds
332 |     SIMPLE_THRESHOLD = 25.0
333 |     MODERATE_THRESHOLD = 50.0
334 |     COMPLEX_THRESHOLD = 75.0
335 | 
336 |     # Branching bonus for actual branch context
337 |     BRANCHING_CONTEXT_BONUS = 2
338 | 
339 |     # Text analysis limits
340 |     MAX_PREVIEW_LENGTH = 200
341 |     MIN_SENTENCE_LENGTH = 3
342 | 
343 | 
344 | class SecurityConstants:
345 |     """Security-related constants for input validation."""
346 | 
347 |     # Patterns that indicate potential prompt injection attempts
348 |     INJECTION_PATTERNS: ClassVar[list[str]] = [
349 |         # System/role instruction injections
350 |         "system:",
351 |         "user:",
352 |         "assistant:",
353 |         "role:",
354 |         # Prompt escape attempts
355 |         "ignore previous",
356 |         "ignore all",
357 |         "disregard",
358 |         # Code execution attempts
359 |         "```python",
360 |         "```bash",
361 |         "exec(",
362 |         "eval(",
363 |         "__import__",
364 |         # Instruction manipulation
365 |         "new instructions",
366 |         "override",
367 |         "instead of",
368 |         # Data extraction attempts
369 |         "print(",
370 |         "console.log",
371 |         "alert(",
372 |         "document.cookie",
373 |     ]
374 | 
375 | 
376 | class ProcessingStrategy(Enum):
377 |     """Processing strategy enumeration."""
378 | 
379 |     SINGLE_AGENT = "single_agent"
380 |     MULTI_AGENT = "multi_agent"
381 |     ADAPTIVE = "adaptive"
382 | 
```

--------------------------------------------------------------------------------
/src/mcp_server_mas_sequential_thinking/routing/ai_complexity_analyzer.py:
--------------------------------------------------------------------------------

```python
  1 | """AI-Powered Complexity Analyzer.
  2 | 
  3 | Uses an AI agent to intelligently assess thought complexity, replacing the rule-based approach
  4 | with more nuanced understanding of context, semantics, and depth.
  5 | """
  6 | 
  7 | import json
  8 | import logging
  9 | from typing import TYPE_CHECKING, Any
 10 | 
 11 | from agno.agent import Agent
 12 | 
 13 | from mcp_server_mas_sequential_thinking.config.modernized_config import get_model_config
 14 | 
 15 | from .complexity_types import ComplexityAnalyzer, ComplexityMetrics
 16 | 
 17 | if TYPE_CHECKING:
 18 |     from mcp_server_mas_sequential_thinking.core.models import ThoughtData
 19 | 
 20 | logger = logging.getLogger(__name__)
 21 | 
 22 | 
 23 | COMPLEXITY_ANALYSIS_PROMPT = """
 24 | You are an expert complexity analyzer for thought processing systems. Your task is to analyze the cognitive complexity of a given thought and return a structured assessment.
 25 | 
 26 | Analyze the following thought and provide complexity metrics:
 27 | 
 28 | **Thought to Analyze:** "{thought}"
 29 | 
 30 | **Instructions:**
 31 | 1. Consider semantic depth, philosophical implications, conceptual complexity
 32 | 2. Evaluate required cognitive resources (memory, reasoning, creativity)
 33 | 3. Assess multi-dimensional thinking requirements
 34 | 4. Consider cultural and linguistic nuances across different languages
 35 | 
 36 | **Response Format:** Return ONLY a valid JSON object with these exact fields:
 37 | ```json
 38 | {{
 39 |     "complexity_score": <float 0-100>,
 40 |     "word_count": <int>,
 41 |     "sentence_count": <int>,
 42 |     "question_count": <int>,
 43 |     "technical_terms": <int>,
 44 |     "branching_references": <int>,
 45 |     "research_indicators": <int>,
 46 |     "analysis_depth": <int>,
 47 |     "philosophical_depth_boost": <int 0-15>,
 48 |     "primary_problem_type": "<FACTUAL|EMOTIONAL|CRITICAL|OPTIMISTIC|CREATIVE|SYNTHESIS|EVALUATIVE|PHILOSOPHICAL|DECISION>",
 49 |     "thinking_modes_needed": ["<list of required thinking modes>"],
 50 |     "reasoning": "<brief explanation of scoring and problem type analysis>"
 51 | }}
 52 | ```
 53 | 
 54 | **Scoring Guidelines:**
 55 | - 0-10: Simple factual questions or basic statements
 56 | - 11-25: Moderate complexity, requires some analysis
 57 | - 26-50: Complex topics requiring deep thinking
 58 | - 51-75: Highly complex, multi-faceted problems
 59 | - 76-100: Extremely complex philosophical/existential questions
 60 | 
 61 | **Problem Type Analysis:**
 62 | - FACTUAL: Information seeking, definitions, statistics (what, when, where, who)
 63 | - EMOTIONAL: Feelings, intuition, personal experiences (feel, sense, worry)
 64 | - CRITICAL: Risk assessment, problems, disadvantages (issue, risk, wrong)
 65 | - OPTIMISTIC: Benefits, opportunities, positive aspects (good, benefit, advantage)
 66 | - CREATIVE: Innovation, alternatives, new ideas (creative, brainstorm, imagine)
 67 | - SYNTHESIS: Integration, summary, holistic view (combine, overall, strategy)
 68 | - EVALUATIVE: Comparison, assessment, judgment (compare, evaluate, best)
 69 | - PHILOSOPHICAL: Meaning, existence, values (purpose, meaning, ethics)
 70 | - DECISION: Choice making, selection, recommendations (decide, choose, should)
 71 | 
 72 | **Thinking Modes Needed:**
 73 | Select appropriate modes based on problem characteristics:
 74 | - FACTUAL thinking for information gathering
 75 | - EMOTIONAL thinking for intuitive insights
 76 | - CRITICAL thinking for risk analysis
 77 | - OPTIMISTIC thinking for opportunity identification
 78 | - CREATIVE thinking for innovation
 79 | - SYNTHESIS thinking for integration
 80 | 
 81 | **Special Considerations:**
 82 | - Philosophical questions like "Why do we live if we die?" should score 40-70+
 83 | - Short but profound questions can have high complexity
 84 | - Consider emotional and existential weight, not just length
 85 | - Multilingual philosophical concepts preserve cultural context
 86 | 
 87 | Analyze now:
 88 | """
 89 | 
 90 | 
 91 | class AIComplexityAnalyzer(ComplexityAnalyzer):
 92 |     """AI-powered complexity analyzer using language models."""
 93 | 
 94 |     def __init__(self, model_config: Any | None = None) -> None:
 95 |         self.model_config = model_config or get_model_config()
 96 |         self._agent: Agent | None = None
 97 | 
 98 |     def _get_agent(self) -> Agent:
 99 |         """Lazy initialization of the analysis agent."""
100 |         if self._agent is None:
101 |             model = self.model_config.create_agent_model()
102 |             self._agent = Agent(
103 |                 name="ComplexityAnalyzer",
104 |                 model=model,
105 |                 introduction="You are an expert in cognitive complexity assessment, specializing in philosophy and deep thinking analysis.",
106 |             )
107 |         return self._agent
108 | 
109 |     async def analyze(self, thought_data: "ThoughtData") -> ComplexityMetrics:
110 |         """Analyze thought complexity using AI agent."""
111 |         logger.info("🤖 AI COMPLEXITY ANALYSIS:")
112 |         logger.info(f"  📝 Analyzing: {thought_data.thought[:100]}...")
113 | 
114 |         try:
115 |             agent = self._get_agent()
116 |             prompt = COMPLEXITY_ANALYSIS_PROMPT.format(thought=thought_data.thought)
117 | 
118 |             # Get AI analysis
119 |             result = await agent.arun(input=prompt)
120 | 
121 |             # Extract JSON response
122 |             response_text = self._extract_response_content(result)
123 |             complexity_data = self._parse_json_response(response_text)
124 | 
125 |             # Create metrics object with AI assessment
126 |             metrics = ComplexityMetrics(
127 |                 complexity_score=complexity_data.get("complexity_score", 0.0),
128 |                 word_count=complexity_data.get("word_count", 0),
129 |                 sentence_count=complexity_data.get("sentence_count", 0),
130 |                 question_count=complexity_data.get("question_count", 0),
131 |                 technical_terms=complexity_data.get("technical_terms", 0),
132 |                 branching_references=complexity_data.get("branching_references", 0),
133 |                 research_indicators=complexity_data.get("research_indicators", 0),
134 |                 analysis_depth=complexity_data.get("analysis_depth", 0),
135 |                 philosophical_depth_boost=complexity_data.get(
136 |                     "philosophical_depth_boost", 0
137 |                 ),
138 |                 # AI Analysis Results (critical for routing)
139 |                 primary_problem_type=complexity_data.get(
140 |                     "primary_problem_type", "GENERAL"
141 |                 ),
142 |                 thinking_modes_needed=complexity_data.get(
143 |                     "thinking_modes_needed", ["SYNTHESIS"]
144 |                 ),
145 |                 analyzer_type="ai",
146 |                 reasoning=complexity_data.get("reasoning", "AI analysis"),
147 |             )
148 | 
149 |             logger.info(f"  🎯 AI Complexity Score: {metrics.complexity_score:.1f}/100")
150 |             logger.info(
151 |                 f"  💭 Reasoning: {complexity_data.get('reasoning', 'No reasoning provided')[:100]}..."
152 |             )
153 | 
154 |             return metrics
155 | 
156 |         except Exception as e:
157 |             logger.exception(f"❌ AI complexity analysis failed: {e}")
158 |             # Fallback to basic analysis
159 |             return self._basic_fallback_analysis(thought_data)
160 | 
161 |     def _extract_response_content(self, result: Any) -> str:
162 |         """Extract content from agent response."""
163 |         if hasattr(result, "content"):
164 |             return str(result.content)
165 |         return str(result)
166 | 
167 |     def _parse_json_response(self, response_text: str) -> dict[str, Any]:
168 |         """Parse JSON from AI response, handling various formats."""
169 |         # Try to find JSON in the response
170 |         lines = response_text.strip().split("\n")
171 | 
172 |         for line in lines:
173 |             line = line.strip()
174 |             if line.startswith("{") and line.endswith("}"):
175 |                 try:
176 |                     return json.loads(line)
177 |                 except json.JSONDecodeError:
178 |                     continue
179 | 
180 |         # Try to extract JSON from code blocks
181 |         if "```json" in response_text:
182 |             start = response_text.find("```json") + 7
183 |             end = response_text.find("```", start)
184 |             if end > start:
185 |                 json_text = response_text[start:end].strip()
186 |                 try:
187 |                     return json.loads(json_text)
188 |                 except json.JSONDecodeError:
189 |                     pass
190 | 
191 |         # Try parsing the entire response as JSON
192 |         try:
193 |             return json.loads(response_text)
194 |         except json.JSONDecodeError:
195 |             logger.warning(
196 |                 f"Failed to parse AI response as JSON: {response_text[:200]}"
197 |             )
198 |             raise ValueError("Could not parse AI complexity analysis response")
199 | 
200 |     def _basic_fallback_analysis(
201 |         self, thought_data: "ThoughtData"
202 |     ) -> ComplexityMetrics:
203 |         """Fallback to basic analysis if AI fails."""
204 |         logger.warning("🔄 Falling back to basic complexity analysis")
205 | 
206 |         text = thought_data.thought.lower()
207 | 
208 |         # Basic metrics
209 |         words = len(text.split())
210 |         sentences = len([s for s in text.split(".") if s.strip()])
211 |         questions = text.count("?") + text.count("?")
212 | 
213 |         # Simple heuristics
214 |         philosophical_terms = [
215 |             "意义",
216 |             "存在",
217 |             "生命",
218 |             "死亡",
219 |             "为什么",
220 |             "why",
221 |             "meaning",
222 |             "life",
223 |             "death",
224 |         ]
225 |         philosophical_count = sum(1 for term in philosophical_terms if term in text)
226 | 
227 |         # Basic scoring
228 |         base_score = min(words * 2 + questions * 5 + philosophical_count * 10, 100)
229 | 
230 |         return ComplexityMetrics(
231 |             complexity_score=base_score,
232 |             word_count=words,
233 |             sentence_count=max(sentences, 1),
234 |             question_count=questions,
235 |             technical_terms=philosophical_count,
236 |             branching_references=0,
237 |             research_indicators=0,
238 |             analysis_depth=philosophical_count,
239 |             philosophical_depth_boost=min(philosophical_count * 5, 15),
240 |             # Basic AI analysis results for fallback
241 |             primary_problem_type="PHILOSOPHICAL"
242 |             if philosophical_count > 0
243 |             else "GENERAL",
244 |             thinking_modes_needed=["SYNTHESIS", "CREATIVE"]
245 |             if philosophical_count > 2
246 |             else ["FACTUAL"],
247 |             analyzer_type="basic_fallback",
248 |             reasoning="Fallback analysis due to AI failure",
249 |         )
250 | 
251 | 
252 | # No more monkey patching needed - complexity_score is now a direct field
253 | 
254 | 
255 | def create_ai_complexity_analyzer() -> AIComplexityAnalyzer:
256 |     """Create AI complexity analyzer instance."""
257 |     return AIComplexityAnalyzer()
258 | 
```

--------------------------------------------------------------------------------
/src/mcp_server_mas_sequential_thinking/services/processing_orchestrator.py:
--------------------------------------------------------------------------------

```python
  1 | """Processing orchestration service for thought coordination.
  2 | 
  3 | This service handles the orchestration of different processing strategies,
  4 | coordinating between single-agent and multi-agent approaches, and managing
  5 | execution flows based on complexity analysis.
  6 | """
  7 | 
  8 | import time
  9 | from typing import TYPE_CHECKING
 10 | 
 11 | from agno.agent import Agent
 12 | 
 13 | from mcp_server_mas_sequential_thinking.config import get_model_config
 14 | from mcp_server_mas_sequential_thinking.core import (
 15 |     SessionMemory,
 16 |     ThoughtProcessingError,
 17 | )
 18 | from mcp_server_mas_sequential_thinking.utils import setup_logging
 19 | 
 20 | if TYPE_CHECKING:
 21 |     from mcp_server_mas_sequential_thinking.routing import ComplexityLevel
 22 |     from mcp_server_mas_sequential_thinking.services.response_processor import (
 23 |         ResponseProcessor,
 24 |     )
 25 |     from mcp_server_mas_sequential_thinking.services.retry_handler import (
 26 |         TeamProcessingRetryHandler,
 27 |     )
 28 | 
 29 | logger = setup_logging()
 30 | 
 31 | 
 32 | class ProcessingOrchestrator:
 33 |     """Service responsible for orchestrating different processing strategies."""
 34 | 
 35 |     def __init__(
 36 |         self,
 37 |         session: SessionMemory,
 38 |         response_processor: "ResponseProcessor",
 39 |         retry_handler: "TeamProcessingRetryHandler",
 40 |     ) -> None:
 41 |         """Initialize the processing orchestrator.
 42 | 
 43 |         Args:
 44 |             session: The session memory instance
 45 |             response_processor: Response processing service
 46 |             retry_handler: Retry handling service
 47 |         """
 48 |         self._session = session
 49 |         self._response_processor = response_processor
 50 |         self._retry_handler = retry_handler
 51 | 
 52 |         # Initialize performance tracking
 53 |         from mcp_server_mas_sequential_thinking.infrastructure import (
 54 |             MetricsLogger,
 55 |         )
 56 | 
 57 |         self._metrics_logger = MetricsLogger()
 58 | 
 59 |     async def execute_single_agent_processing(
 60 |         self, input_prompt: str, simplified: bool = False
 61 |     ) -> str:
 62 |         """Execute single-agent processing for simple thoughts.
 63 | 
 64 |         Args:
 65 |             input_prompt: The input prompt to process
 66 |             simplified: Whether to use simplified prompt format
 67 | 
 68 |         Returns:
 69 |             Processed response content
 70 |         """
 71 |         try:
 72 |             # Create a lightweight agent for single processing
 73 |             simple_agent = self._create_simple_agent(
 74 |                 processing_type="simple thought" if simplified else "thought",
 75 |                 use_markdown=not simplified,
 76 |             )
 77 | 
 78 |             # Optionally simplify the prompt
 79 |             prompt_to_use = (
 80 |                 self._create_simplified_prompt(input_prompt)
 81 |                 if simplified
 82 |                 else input_prompt
 83 |             )
 84 | 
 85 |             # Log single agent call details
 86 |             self._log_single_agent_call(simple_agent, prompt_to_use)
 87 | 
 88 |             start_time = time.time()
 89 |             response = await simple_agent.arun(prompt_to_use)
 90 |             processing_time = time.time() - start_time
 91 | 
 92 |             # Extract content from Agno RunOutput
 93 |             response_content = self._extract_response_content(response)
 94 | 
 95 |             # Log single agent response details
 96 |             self._log_single_agent_response(response_content, processing_time)
 97 | 
 98 |             logger.info("Single-agent processing completed successfully")
 99 |             return response_content
100 | 
101 |         except Exception as e:
102 |             logger.warning(f"Single-agent processing failed, falling back to team: {e}")
103 |             # Fallback to team processing
104 |             return await self.execute_team_processing(input_prompt)
105 | 
106 |     async def execute_team_processing(self, input_prompt: str) -> str:
107 |         """Execute team processing without timeout restrictions.
108 | 
109 |         Args:
110 |             input_prompt: The input prompt to process
111 | 
112 |         Returns:
113 |             Processed response content
114 |         """
115 |         try:
116 |             response = await self._session.team.arun(input_prompt)
117 |             return self._extract_response_content(response)
118 |         except Exception as e:
119 |             raise ThoughtProcessingError(f"Team coordination failed: {e}") from e
120 | 
121 |     async def execute_team_processing_with_retries(
122 |         self, input_prompt: str, complexity_level: "ComplexityLevel"
123 |     ) -> str:
124 |         """Execute team processing using centralized retry handler.
125 | 
126 |         Args:
127 |             input_prompt: The input prompt to process
128 |             complexity_level: Complexity level for retry strategy
129 | 
130 |         Returns:
131 |             Processed response content
132 |         """
133 |         team_info = self._get_team_info()
134 |         self._metrics_logger.log_team_details(team_info)
135 |         self._metrics_logger.log_input_details(input_prompt)
136 | 
137 |         async def team_operation():
138 |             start_time = time.time()
139 |             response = await self._session.team.arun(input_prompt)
140 |             processing_time = time.time() - start_time
141 | 
142 |             processed_response = self._response_processor.process_response(
143 |                 response, processing_time, "MULTI-AGENT TEAM"
144 |             )
145 | 
146 |             self._performance_tracker.record_processing(processing_time, True)
147 |             return processed_response.content
148 | 
149 |         return await self._retry_handler.execute_team_processing(
150 |             team_operation, team_info, complexity_level.value
151 |         )
152 | 
153 |     def _create_simple_agent(
154 |         self, processing_type: str = "thought", use_markdown: bool = False
155 |     ) -> Agent:
156 |         """Create a simple agent for single-thought processing.
157 | 
158 |         Args:
159 |             processing_type: Type of processing for instructions
160 |             use_markdown: Whether to enable markdown formatting
161 | 
162 |         Returns:
163 |             Configured Agent instance
164 |         """
165 |         model_config = get_model_config()
166 |         single_model = model_config.create_team_model()
167 | 
168 |         return Agent(
169 |             name="SimpleProcessor",
170 |             role="Simple Thought Processor",
171 |             description=f"Processes {processing_type}s efficiently without multi-agent overhead",
172 |             model=single_model,
173 |             instructions=[
174 |                 f"You are processing a {processing_type} efficiently.",
175 |                 "Provide a focused, clear response.",
176 |                 "Include guidance for the next step.",
177 |                 "Be concise but helpful.",
178 |             ],
179 |             markdown=use_markdown,
180 |         )
181 | 
182 |     def _create_simplified_prompt(self, input_prompt: str) -> str:
183 |         """Create a simplified prompt for single-agent processing.
184 | 
185 |         Args:
186 |             input_prompt: The original input prompt
187 | 
188 |         Returns:
189 |             Simplified prompt
190 |         """
191 |         return f"""Process this thought efficiently:
192 | 
193 | {input_prompt}
194 | 
195 | Provide a focused response with clear guidance for the next step."""
196 | 
197 |     def _extract_response_content(self, response) -> str:
198 |         """Extract clean content from Agno RunOutput objects.
199 | 
200 |         Args:
201 |             response: The response object from agent processing
202 | 
203 |         Returns:
204 |             Extracted text content
205 |         """
206 |         from mcp_server_mas_sequential_thinking.services.response_formatter import (
207 |             ResponseExtractor,
208 |         )
209 | 
210 |         return ResponseExtractor.extract_content(response)
211 | 
212 |     def _get_team_info(self) -> dict:
213 |         """Extract team information for logging and retry handling.
214 | 
215 |         Returns:
216 |             Dictionary containing team information
217 |         """
218 |         team = self._session.team
219 |         return {
220 |             "name": team.name,
221 |             "member_count": len(team.members),
222 |             "leader_class": team.model.__class__.__name__,
223 |             "leader_model": getattr(team.model, "id", "unknown"),
224 |             "member_names": ", ".join([m.name for m in team.members]),
225 |         }
226 | 
227 |     def _log_single_agent_call(self, agent: Agent, prompt: str) -> None:
228 |         """Log single agent call details.
229 | 
230 |         Args:
231 |             agent: The agent being used
232 |             prompt: The prompt being processed
233 |         """
234 |         logger.info("🤖 SINGLE-AGENT CALL:")
235 |         logger.info(f"  Agent: {agent.name} ({agent.role})")
236 |         logger.info(
237 |             f"  Model: {getattr(agent.model, 'id', 'unknown')} ({agent.model.__class__.__name__})"
238 |         )
239 |         self._log_input_details(prompt)
240 | 
241 |     def _log_single_agent_response(self, content: str, processing_time: float) -> None:
242 |         """Log single agent response details.
243 | 
244 |         Args:
245 |             content: The response content
246 |             processing_time: Processing time in seconds
247 |         """
248 |         logger.info("✅ SINGLE-AGENT RESPONSE:")
249 |         self._log_output_details(content, processing_time)
250 | 
251 |     def _log_input_details(
252 |         self, input_prompt: str, context_description: str = "input"
253 |     ) -> None:
254 |         """Log input details with consistent formatting.
255 | 
256 |         Args:
257 |             input_prompt: The input prompt to log
258 |             context_description: Description of the context
259 |         """
260 |         logger.info(f"  Input length: {len(input_prompt)} chars")
261 |         logger.info(f"  Full {context_description}:\\n{input_prompt}")
262 | 
263 |         # Use performance metrics constant if available
264 |         separator_length = 60  # Default fallback
265 |         try:
266 |             from mcp_server_mas_sequential_thinking.config import PerformanceMetrics
267 | 
268 |             separator_length = PerformanceMetrics.SEPARATOR_LENGTH
269 |         except ImportError:
270 |             pass
271 | 
272 |         logger.info(f"  {'=' * separator_length}")
273 | 
274 |     def _log_output_details(
275 |         self,
276 |         response_content: str,
277 |         processing_time: float,
278 |         context_description: str = "response",
279 |     ) -> None:
280 |         """Log output details with consistent formatting.
281 | 
282 |         Args:
283 |             response_content: The response content to log
284 |             processing_time: Processing time in seconds
285 |             context_description: Description of the context
286 |         """
287 |         logger.info(f"  Processing time: {processing_time:.3f}s")
288 |         logger.info(f"  Output length: {len(response_content)} chars")
289 |         logger.info(f"  Full {context_description}:\\n{response_content}")
290 | 
291 |         # Use performance metrics constant if available
292 |         separator_length = 60  # Default fallback
293 |         try:
294 |             from mcp_server_mas_sequential_thinking.config import PerformanceMetrics
295 | 
296 |             separator_length = PerformanceMetrics.SEPARATOR_LENGTH
297 |         except ImportError:
298 |             pass
299 | 
300 |         logger.info(f"  {'=' * separator_length}")
301 | 
```
Page 1/2FirstPrevNextLast