This is page 8 of 12. Use http://codebase.md/minipuft/claude-prompts-mcp?page={x} to view the full context. # Directory Structure ``` ├── .actrc ├── .gitattributes ├── .github │ └── workflows │ ├── ci.yml │ ├── mcp-compliance.yml │ └── pr-validation.yml ├── .gitignore ├── agent.md ├── assets │ └── logo.png ├── CLAUDE.md ├── config │ └── framework-state.json ├── docs │ ├── architecture.md │ ├── chain-modification-examples.md │ ├── contributing.md │ ├── enhanced-gate-system.md │ ├── execution-architecture-guide.md │ ├── installation-guide.md │ ├── mcp-tool-usage-guide.md │ ├── mcp-tools-reference.md │ ├── prompt-format-guide.md │ ├── prompt-management.md │ ├── prompt-vs-template-guide.md │ ├── README.md │ ├── template-development-guide.md │ ├── TODO.md │ ├── troubleshooting.md │ └── version-history.md ├── LICENSE ├── local-test.sh ├── plans │ ├── nunjucks-dynamic-chain-orchestration.md │ ├── outputschema-realtime-progress-and-validation.md │ ├── parallel-conditional-execution-analysis.md │ ├── sqlite-storage-migration.md │ └── symbolic-command-language-implementation.md ├── README.md ├── scripts │ ├── setup-windows-testing.sh │ ├── test_server.js │ ├── test-all-platforms.sh │ └── windows-tests │ ├── test-windows-paths.js │ ├── test-windows-startup.sh │ └── windows-env.sh └── server ├── config │ ├── framework-state.json │ └── tool-descriptions.json ├── config.json ├── jest.config.cjs ├── LICENSE ├── package-lock.json ├── package.json ├── prompts │ ├── analysis │ │ ├── advanced_analysis_engine.md │ │ ├── content_analysis.md │ │ ├── deep_analysis.md │ │ ├── deep_research.md │ │ ├── markdown_notebook.md │ │ ├── note_integration.md │ │ ├── note_refinement.md │ │ ├── notes.md │ │ ├── progressive_research.md │ │ ├── prompts.json │ │ ├── query_refinement.md │ │ └── review.md │ ├── architecture │ │ ├── prompts.json │ │ └── strategic-system-alignment.md │ ├── content_processing │ │ ├── format_enhancement.md │ │ ├── noteIntegration.md │ │ ├── obsidian_metadata_optimizer.md │ │ ├── prompts.json │ │ ├── vault_related_notes_finder.md │ │ └── video_notes_enhanced.md │ ├── debugging │ │ ├── analyze_logs.md │ │ └── prompts.json │ ├── development │ │ ├── analyze_code_structure.md │ │ ├── analyze_file_structure.md │ │ ├── code_review_optimization_chain.md │ │ ├── component_flow_analysis.md │ │ ├── create_modularization_plan.md │ │ ├── detect_code_issues.md │ │ ├── detect_project_commands.md │ │ ├── expert_code_implementation.md │ │ ├── generate_comprehensive_claude_md.md │ │ ├── prompts.json │ │ ├── strategicImplement.md │ │ ├── suggest_code_improvements.md │ │ └── transform_code_to_modules.md │ ├── documentation │ │ ├── create_docs_chain.md │ │ ├── docs-content-creation.md │ │ ├── docs-content-planning.md │ │ ├── docs-final-assembly.md │ │ ├── docs-project-analysis.md │ │ ├── docs-review-refinement.md │ │ └── prompts.json │ ├── education │ │ ├── prompts.json │ │ └── vault_integrated_notes.md │ ├── general │ │ ├── diagnose.md │ │ └── prompts.json │ ├── promptsConfig.json │ └── testing │ ├── final_verification_test.md │ └── prompts.json ├── README.md ├── scripts │ └── validate-dependencies.js ├── src │ ├── api │ │ └── index.ts │ ├── chain-session │ │ └── manager.ts │ ├── config │ │ └── index.ts │ ├── Dockerfile │ ├── execution │ │ ├── context │ │ │ ├── context-resolver.ts │ │ │ ├── framework-injector.ts │ │ │ └── index.ts │ │ ├── index.ts │ │ ├── parsers │ │ │ ├── argument-parser.ts │ │ │ ├── index.ts │ │ │ └── unified-command-parser.ts │ │ └── types.ts │ ├── frameworks │ │ ├── framework-manager.ts │ │ ├── framework-state-manager.ts │ │ ├── index.ts │ │ ├── integration │ │ │ ├── framework-semantic-integration.ts │ │ │ └── index.ts │ │ ├── methodology │ │ │ ├── guides │ │ │ │ ├── 5w1h-guide.ts │ │ │ │ ├── cageerf-guide.ts │ │ │ │ ├── react-guide.ts │ │ │ │ └── scamper-guide.ts │ │ │ ├── index.ts │ │ │ ├── interfaces.ts │ │ │ └── registry.ts │ │ ├── prompt-guidance │ │ │ ├── index.ts │ │ │ ├── methodology-tracker.ts │ │ │ ├── service.ts │ │ │ ├── system-prompt-injector.ts │ │ │ └── template-enhancer.ts │ │ └── types │ │ ├── index.ts │ │ ├── integration-types.ts │ │ ├── methodology-types.ts │ │ └── prompt-guidance-types.ts │ ├── gates │ │ ├── constants.ts │ │ ├── core │ │ │ ├── gate-definitions.ts │ │ │ ├── gate-loader.ts │ │ │ ├── gate-validator.ts │ │ │ ├── index.ts │ │ │ └── temporary-gate-registry.ts │ │ ├── definitions │ │ │ ├── code-quality.json │ │ │ ├── content-structure.json │ │ │ ├── educational-clarity.json │ │ │ ├── framework-compliance.json │ │ │ ├── research-quality.json │ │ │ ├── security-awareness.json │ │ │ └── technical-accuracy.json │ │ ├── gate-state-manager.ts │ │ ├── guidance │ │ │ ├── FrameworkGuidanceFilter.ts │ │ │ └── GateGuidanceRenderer.ts │ │ ├── index.ts │ │ ├── intelligence │ │ │ ├── GatePerformanceAnalyzer.ts │ │ │ └── GateSelectionEngine.ts │ │ ├── templates │ │ │ ├── code_quality_validation.md │ │ │ ├── educational_clarity_validation.md │ │ │ ├── framework_compliance_validation.md │ │ │ ├── research_self_validation.md │ │ │ ├── security_validation.md │ │ │ ├── structure_validation.md │ │ │ └── technical_accuracy_validation.md │ │ └── types.ts │ ├── index.ts │ ├── logging │ │ └── index.ts │ ├── mcp-tools │ │ ├── config-utils.ts │ │ ├── constants.ts │ │ ├── index.ts │ │ ├── prompt-engine │ │ │ ├── core │ │ │ │ ├── engine.ts │ │ │ │ ├── executor.ts │ │ │ │ ├── index.ts │ │ │ │ └── types.ts │ │ │ ├── index.ts │ │ │ ├── processors │ │ │ │ ├── response-formatter.ts │ │ │ │ └── template-processor.ts │ │ │ └── utils │ │ │ ├── category-extractor.ts │ │ │ ├── classification.ts │ │ │ ├── context-builder.ts │ │ │ └── validation.ts │ │ ├── prompt-manager │ │ │ ├── analysis │ │ │ │ ├── comparison-engine.ts │ │ │ │ ├── gate-analyzer.ts │ │ │ │ └── prompt-analyzer.ts │ │ │ ├── core │ │ │ │ ├── index.ts │ │ │ │ ├── manager.ts │ │ │ │ └── types.ts │ │ │ ├── index.ts │ │ │ ├── operations │ │ │ │ └── file-operations.ts │ │ │ ├── search │ │ │ │ ├── filter-parser.ts │ │ │ │ └── prompt-matcher.ts │ │ │ └── utils │ │ │ ├── category-manager.ts │ │ │ └── validation.ts │ │ ├── shared │ │ │ └── structured-response-builder.ts │ │ ├── system-control.ts │ │ ├── tool-description-manager.ts │ │ └── types │ │ └── shared-types.ts │ ├── metrics │ │ ├── analytics-service.ts │ │ ├── index.ts │ │ └── types.ts │ ├── performance │ │ ├── index.ts │ │ └── monitor.ts │ ├── prompts │ │ ├── category-manager.ts │ │ ├── converter.ts │ │ ├── file-observer.ts │ │ ├── hot-reload-manager.ts │ │ ├── index.ts │ │ ├── loader.ts │ │ ├── promptUtils.ts │ │ ├── registry.ts │ │ └── types.ts │ ├── runtime │ │ ├── application.ts │ │ └── startup.ts │ ├── semantic │ │ ├── configurable-semantic-analyzer.ts │ │ └── integrations │ │ ├── index.ts │ │ └── llm-clients.ts │ ├── server │ │ ├── index.ts │ │ └── transport │ │ └── index.ts │ ├── smithery.yaml │ ├── text-references │ │ ├── conversation.ts │ │ └── index.ts │ ├── types │ │ └── index.ts │ ├── types.ts │ └── utils │ ├── chainUtils.ts │ ├── errorHandling.ts │ ├── global-resource-tracker.ts │ ├── index.ts │ └── jsonUtils.ts ├── tests │ ├── ci-startup-validation.js │ ├── enhanced-validation │ │ ├── contract-validation │ │ │ ├── contract-test-suite.js │ │ │ ├── interface-contracts.js │ │ │ └── interface-contracts.ts │ │ ├── environment-validation │ │ │ ├── environment-parity-checker.js │ │ │ └── environment-test-suite.js │ │ ├── lifecycle-validation │ │ │ ├── lifecycle-test-suite.js │ │ │ └── process-lifecycle-validator.js │ │ └── validation-orchestrator.js │ ├── helpers │ │ └── test-helpers.js │ ├── integration │ │ ├── mcp-tools.test.ts │ │ ├── server-startup.test.ts │ │ └── unified-parsing-integration.test.ts │ ├── performance │ │ ├── parsing-system-benchmark.test.ts │ │ └── server-performance.test.ts │ ├── scripts │ │ ├── consolidated-tools.js │ │ ├── establish-performance-baselines.js │ │ ├── functional-mcp-validation.js │ │ ├── integration-mcp-tools.js │ │ ├── integration-routing-system.js │ │ ├── integration-server-startup.js │ │ ├── integration-unified-parsing.js │ │ ├── methodology-guides.js │ │ ├── performance-memory.js │ │ ├── runtime-integration.js │ │ ├── unit-conversation-manager.js │ │ ├── unit-semantic-analyzer.js │ │ └── unit-unified-parsing.js │ ├── setup.ts │ ├── test-enhanced-parsing.js │ └── unit │ ├── conversation-manager.test.ts │ ├── semantic-analyzer-three-tier.test.ts │ └── unified-parsing-system.test.ts ├── tsconfig.json └── tsconfig.test.json ``` # Files -------------------------------------------------------------------------------- /docs/prompt-management.md: -------------------------------------------------------------------------------- ```markdown # Prompt Management This document describes how to manage prompts in the MCP server using the **consolidated prompt management system** through the `prompt_manager` tool and distributed prompts configuration. ## Consolidated Architecture Overview The MCP server uses **3 consolidated tools** for all prompt management operations: - **`prompt_manager`**: Complete lifecycle management with intelligent analysis and filtering - **`prompt_engine`**: Execute prompts with framework integration and gate validation - **`system_control`**: Framework switching, analytics, and system administration **Key Benefits:** - **Action-Based Interface**: Single tools with multiple actions instead of separate tools - **Intelligent Features**: Type analysis, framework integration, advanced filtering - **MCP Protocol Only**: No HTTP API - works through MCP-compatible clients ## Distributed Prompts Configuration System The server organizes prompts using a distributed configuration system where prompts are organized by category, with each category having its own configuration file. ### Key Components 1. **promptsConfig.json** - Main configuration file defining categories and imports 2. **Category-specific prompts.json files** - Each category has its own prompts.json file 3. **Prompt .md files** - Individual prompt templates using Nunjucks templating ## Main Configuration (promptsConfig.json) The main configuration file defines all available categories and specifies which category-specific prompts.json files to import: ```json { "categories": [ { "id": "general", "name": "General", "description": "General-purpose prompts for everyday tasks" }, { "id": "analysis", "name": "Analysis", "description": "Analytical and research-focused prompts" }, { "id": "development", "name": "Development", "description": "Software development and coding prompts" } // More categories... ], "imports": [ "prompts/general/prompts.json", "prompts/analysis/prompts.json", "prompts/development/prompts.json" // More imports... ] } ``` ### Categories Each category in the `categories` array has: - `id` (string) - Unique identifier for the category - `name` (string) - Display name for the category - `description` (string) - Description of the category's purpose ### Imports The `imports` array lists paths to category-specific prompts.json files, relative to the server's working directory. ## Category-Specific Prompts Files Each category has its own prompts.json file (e.g., `prompts/general/prompts.json`): ```json { "prompts": [ { "id": "content_analysis", "name": "Content Analysis", "category": "analysis", "description": "Systematic analysis of content using structured methodology", "file": "content_analysis.md", "arguments": [ { "name": "content", "description": "The content to analyze", "required": true }, { "name": "focus", "description": "Specific focus area for analysis", "required": false } ] } // More prompts... ] } ``` Each prompt has: - `id` (string) - Unique identifier - `name` (string) - Display name - `category` (string) - Category this prompt belongs to - `description` (string) - What the prompt does - `file` (string) - Path to .md file with template - `arguments` (array) - Arguments the prompt accepts - `isChain` (boolean, optional) - Whether this is a chain prompt - `chainSteps` (array, optional) - Steps for chain prompts - `onEmptyInvocation` (string, optional) - Behavior when invoked without arguments ## Consolidated Prompt Management ### prompt_manager Tool Actions The `prompt_manager` tool provides comprehensive prompt lifecycle management through **action-based commands**: #### Core Actions - `list` - List and filter prompts with intelligent search - `create` - Auto-detect type and create appropriate prompt - `create_prompt` - Create basic prompt (fast variable substitution) - `create_template` - Create framework-enhanced template - `update` - Update existing prompts - `delete` - Delete prompts with safety checks #### Advanced Actions - `analyze_type` - Analyze prompt and recommend execution type - `migrate_type` - Convert between prompt types (prompt ↔ template) - `modify` - Precision editing of specific sections - `reload` - Trigger hot-reload of prompt system ### Basic Prompt Management #### Listing Prompts ```bash # List all prompts prompt_manager list # List prompts in specific category prompt_manager list filter="category:analysis" # List by execution type prompt_manager list filter="type:template" # Combined filters prompt_manager list filter="category:development type:chain" # Intent-based search prompt_manager list filter="intent:debugging" ``` #### Creating Prompts ```bash # Auto-detect appropriate type prompt_manager create name="Data Processor" category="analysis" \ description="Process and analyze data systematically" \ content="Analyze {{data}} and provide insights on {{focus_area}}" # Create basic prompt (fast execution) prompt_manager create_prompt name="Simple Greeting" category="general" \ description="Basic personalized greeting" \ content="Hello {{name}}, welcome to {{service}}!" \ arguments='[{"name":"name","required":true},{"name":"service","required":false}]' # Create framework-enhanced template prompt_manager create_template name="Research Analysis" category="analysis" \ description="Comprehensive research analysis using active methodology" \ content="Research {{topic}} using systematic approach. Focus on {{aspects}}." \ arguments='[{"name":"topic","required":true},{"name":"aspects","required":false}]' ``` #### Updating Prompts ```bash # Update prompt content prompt_manager update id="data_processor" \ content="Enhanced analysis of {{data}} with focus on {{methodology}}" # Update prompt metadata prompt_manager update id="greeting_prompt" \ name="Enhanced Greeting" \ description="Improved greeting with personalization" # Precision section editing prompt_manager modify id="analysis_prompt" \ section="User Message Template" \ new_content="Analyze {{content}} using {{framework}} methodology" ``` #### Deleting Prompts ```bash # Delete prompt with safety checks prompt_manager delete id="old_prompt" # The system will warn if prompt is referenced by chains or other prompts ``` ### Advanced Features #### Type Analysis & Migration ```bash # Analyze existing prompt for optimization recommendations prompt_manager analyze_type id="basic_analysis" # Returns: execution type, framework suitability, improvement suggestions # Convert prompt to framework-enhanced template prompt_manager migrate_type id="simple_prompt" target_type="template" # Convert template back to basic prompt for speed prompt_manager migrate_type id="complex_template" target_type="prompt" ``` #### Framework Integration ```bash # Switch to desired framework before creating templates system_control switch_framework framework="CAGEERF" reason="Complex analysis needed" # Create framework-aware template prompt_manager create_template name="Strategic Analysis" category="business" \ description="CAGEERF-enhanced strategic analysis" \ content="Analyze {{situation}} using comprehensive structured approach" # Templates automatically use active framework methodology ``` #### Chain Prompt Creation ```bash # Create multi-step chain prompt prompt_manager create_template name="Research Workflow" category="research" \ description="Multi-step research and analysis workflow" \ content="Research workflow for {{topic}} with comprehensive analysis" \ chain_steps='[ { "promptId": "data_collection", "stepName": "Data Collection", "inputMapping": {"topic": "research_topic"}, "outputMapping": {"collected_data": "step1_output"} }, { "promptId": "data_analysis", "stepName": "Analysis", "inputMapping": {"data": "step1_output"}, "outputMapping": {"analysis_result": "final_output"} } ]' ``` ### Intelligent Filtering System The `prompt_manager list` command supports advanced filtering: #### Filter Syntax - **Category**: `category:analysis`, `category:development` - **Type**: `type:prompt`, `type:template`, `type:chain` - **Intent**: `intent:debugging`, `intent:analysis`, `intent:creation` - **Confidence**: `confidence:>80`, `confidence:70-90` - **Framework**: `framework:CAGEERF`, `framework:ReACT` #### Advanced Examples ```bash # Find high-confidence templates in analysis category prompt_manager list filter="category:analysis type:template confidence:>85" # Find debugging-related prompts prompt_manager list filter="intent:debugging" # Find prompts suitable for current framework system_control status # Check active framework prompt_manager list filter="framework:CAGEERF type:template" ``` ## Advanced Templating with Nunjucks The prompt templating system supports **Nunjucks** for dynamic prompt construction: ### Key Features - **Conditional Logic (`{% if %}`)**: Show/hide content based on arguments - **Loops (`{% for %}`)**: Iterate over arrays dynamically - **Standard Placeholders**: `{{variable}}` syntax continues to work - **Macros (`{% macro %}`)**: Reusable template components - **Filters (`|`)**: Transform data (upper, lower, default, etc.) ### Template Processing 1. **Nunjucks Rendering**: Process `{% %}` tags and `{{ }}` placeholders 2. **Text Reference Expansion**: Handle long text references (ref:xyz) 3. **Framework Enhancement**: Apply active methodology if template type ### Examples #### Conditional Logic ```nunjucks {% if user_name %} Hello, {{user_name}}! Thanks for providing your name. {% else %} Hello there! {% endif %} {% if analysis_type == "comprehensive" %} This requires detailed CAGEERF methodology analysis. {% elif analysis_type == "quick" %} Using streamlined ReACT approach. {% endif %} ``` #### Loops ```nunjucks Please analyze the following data points: {% for item in data_list %} - {{ loop.index }}. {{ item }} {% endfor %} ``` #### Macros for Reusability ```nunjucks {% macro analysis_section(title, content, methodology) %} ## {{ title }} **Methodology**: {{ methodology }} **Content**: {{ content }} {% endmacro %} {{ analysis_section("Market Analysis", market_data, "CAGEERF") }} {{ analysis_section("Risk Assessment", risk_data, "5W1H") }} ``` #### Filters ```nunjucks Topic: {{ topic_name | upper }} Priority: {{ priority_level | default("Medium") }} Items: {{ item_count | length }} total Summary: {{ long_text | truncate(100) }} ``` ## Integration with Consolidated Architecture ### MCP Tool Coordination ```bash # Complete workflow using all 3 tools system_control status # Check system state system_control switch_framework framework="CAGEERF" # Set methodology prompt_manager create_template name="..." category="..." # Create template prompt_engine >>template_name input="data" gate_validation=true # Execute with gates system_control analytics # Monitor performance ``` ### Framework-Aware Operations ```bash # Framework affects template creation and execution system_control list_frameworks # See available frameworks system_control switch_framework framework="ReACT" reason="Problem-solving focus" # Templates created after switching inherit framework prompt_manager create_template name="Problem Solver" category="analysis" # Execute with framework enhancement prompt_engine >>problem_solver issue="complex problem" execution_mode="template" ``` ### Performance Monitoring ```bash # Monitor prompt management operations system_control analytics include_history=true # Shows: prompt creation stats, execution statistics, framework usage # Check system health system_control health # Includes: prompt loading status, template processing health, framework integration ``` ## File Management ### Automatic File Operations When using `prompt_manager`, the system automatically: 1. **Creates .md files** in appropriate category directories 2. **Updates prompts.json** in category folders 3. **Maintains file consistency** across configuration and files 4. **Triggers hot-reload** to refresh the system ### File Structure ``` prompts/ ├── analysis/ │ ├── prompts.json # Category prompt registry │ ├── content_analysis.md # Individual prompt templates │ └── research_workflow.md ├── development/ │ ├── prompts.json │ ├── code_review.md │ └── debugging_guide.md └── promptsConfig.json # Main configuration ``` ### Generated .md File Structure ```markdown # Prompt Name ## Description Prompt description explaining purpose and usage ## System Message Optional system message for framework enhancement ## User Message Template Template content with {{variables}} and Nunjucks logic ## Arguments - name: Description (required/optional) - focus: Analysis focus area (optional) ## Chain Steps (for chain prompts) 1. Step 1: Data Collection 2. Step 2: Analysis 3. Step 3: Recommendations ``` ## Troubleshooting ### Common Issues #### Tool Not Found Errors - **Issue**: `create_category tool not found` - **Solution**: Use `prompt_manager` with action: `prompt_manager create_category` #### Legacy Tool References - **Issue**: Documentation mentions `update_prompt` standalone tool - **Solution**: Use consolidated tool: `prompt_manager update id="..." content="..."` #### HTTP API Errors - **Issue**: HTTP fetch examples don't work - **Solution**: MCP server uses MCP protocol only - use MCP-compatible clients #### Framework Integration Issues - **Issue**: Templates not getting framework enhancement - **Solution**: Verify active framework with `system_control status` and use `create_template` action ### Debug Commands ```bash # Check system health including prompt loading system_control health # Verify prompt registration prompt_manager list # Check framework integration system_control status # View comprehensive diagnostics system_control diagnostics ``` ## Best Practices ### Prompt Type Selection - **Basic Prompts**: Use `create_prompt` for simple variable substitution (fastest) - **Framework Templates**: Use `create_template` for analysis, reasoning, complex tasks - **Chains**: Provide `chain_steps` array for multi-step workflows - chain status detected automatically ### Framework Integration - Switch to appropriate framework before creating templates - Use `analyze_type` to get recommendations for existing prompts - Use `migrate_type` to upgrade prompts for framework enhancement ### Organization - Group related prompts into logical categories - Use descriptive names and comprehensive descriptions - Leverage Nunjucks for maintainable, reusable templates - Test prompts with various argument combinations ### Performance Optimization - Use basic prompts for simple operations (bypasses framework overhead) - Use templates when methodology enhancement adds value - Monitor performance with `system_control analytics` - Consider prompt complexity vs. execution speed trade-offs ## Advanced Workflows ### Template Development Workflow ```bash # 1. Analyze requirements prompt_manager analyze_type id="existing_prompt" # If converting existing # 2. Set appropriate framework system_control switch_framework framework="CAGEERF" # 3. Create framework-enhanced template prompt_manager create_template name="Advanced Analysis" category="research" # 4. Test execution with gates prompt_engine >>advanced_analysis input="test data" gate_validation=true # 5. Monitor performance system_control analytics ``` ### Chain Development Workflow ```bash # 1. Create individual step prompts prompt_manager create_template name="collect_data" category="research" prompt_manager create_template name="analyze_data" category="research" prompt_manager create_template name="generate_insights" category="research" # 2. Create chain prompt linking steps prompt_manager create_template name="research_pipeline" category="research" \ chain_steps='[{"promptId":"collect_data","stepName":"Collection"}, {"promptId":"analyze_data","stepName":"Analysis"}]' # 3. Execute complete chain with LLM coordination (requires semantic LLM integration) prompt_engine >>research_pipeline topic="market analysis" llm_driven_execution=true # 4. Monitor chain execution system_control status # Check execution state ``` ## Migration from Legacy Tools If you have references to old tool names: | Legacy Tool | Consolidated Usage | | ----------------------- | ----------------------------------- | | `create_category` | `prompt_manager create_category` | | `update_prompt` | `prompt_manager create` or `update` | | `delete_prompt` | `prompt_manager delete` | | `modify_prompt_section` | `prompt_manager modify` | | `reload_prompts` | `prompt_manager reload` | | `listprompts` | `prompt_manager list` | **Key Changes:** - **No HTTP API**: Use MCP protocol through compatible clients - **Action-Based**: Single tools with actions instead of separate tools - **Enhanced Features**: Type analysis, framework integration, intelligent filtering - **Consolidated**: 3 tools instead of 24+ legacy tools --- The consolidated prompt management system provides sophisticated prompt lifecycle management while maintaining simplicity and performance. The `prompt_manager` tool offers comprehensive capabilities from basic CRUD operations to advanced features like type analysis, framework integration, and intelligent filtering, all within the efficient 3-tool consolidated architecture. ``` -------------------------------------------------------------------------------- /plans/outputschema-realtime-progress-and-validation.md: -------------------------------------------------------------------------------- ```markdown # OutputSchema Real-Time Progress & Gate Validation Implementation Plan ## Overview This plan integrates real-time progress tracking with visual gate validation to create a comprehensive user experience for chain executions and quality gate monitoring. The implementation builds upon the current Option 2 (Minimal Compliance) foundation while adding progressive enhancement capabilities for modern MCP clients. ## Core Feature Integration ### 1. Real-Time Progress Tracking #### Chain Execution Progress - **Live Progress Bars**: Visual percentage indicators for overall chain completion - **Step-by-Step Status**: Individual step progress with state transitions (pending → running → completed/failed) - **Time Analytics**: Real-time ETA calculations based on historical execution patterns - **Performance Metrics**: Live memory usage, CPU utilization, and execution speed tracking - **Multi-Chain Monitoring**: Concurrent chain execution progress with session isolation #### Progress Data Structure ```typescript interface RealTimeChainProgress extends ChainProgress { // Enhanced progress tracking progressPercentage: number; // 0-100 overall completion estimatedTimeRemaining: number; // ETA in milliseconds currentStepProgress: number; // 0-100 current step completion // Performance metrics performanceMetrics: { memoryUsage: NodeJS.MemoryUsage; executionSpeed: number; // steps per second resourceUtilization: number; // 0-1 CPU/memory usage }; // Historical context averageStepTime: number; // milliseconds slowestStepId?: string; fastestStepId?: string; } ``` ### 2. Visual Gate Validation #### Interactive Validation Display - **Real-Time Gate Status**: Live indicators showing gate evaluation progress - **Quality Score Visualization**: Graphical representation of validation scores (0-1 range) - **Detailed Failure Analysis**: Structured explanations with actionable recommendations - **Retry Mechanism Integration**: Visual retry controls with attempt tracking - **Validation History**: Historical gate performance with trend analysis #### Gate Validation Data Structure ```typescript interface VisualGateValidation extends GateValidationResult { // Enhanced validation tracking validationProgress: number; // 0-100 validation completion currentGateIndex: number; // Which gate is being evaluated // Visual representation data gateStatusMap: Map<string, { status: 'pending' | 'running' | 'passed' | 'failed'; score?: number; progressPercentage: number; startTime: number; estimatedCompletion?: number; }>; // Retry mechanism retryAttempts: Array<{ attemptNumber: number; timestamp: number; result: 'passed' | 'failed' | 'timeout'; improvedGates: string[]; // Gates that passed after retry }>; // User actionable data recommendations: Array<{ gateId: string; priority: 'high' | 'medium' | 'low'; actionType: 'retry' | 'modify' | 'skip' | 'review'; description: string; estimatedFixTime?: number; }>; } ``` ## Technical Implementation Strategy ### Phase 1: Schema Enhancement (Week 1) #### Enhanced OutputSchema Definitions ```typescript // Extend existing chainProgressSchema export const enhancedChainProgressSchema = chainProgressSchema.extend({ progressPercentage: z.number().min(0).max(100), estimatedTimeRemaining: z.number().optional(), currentStepProgress: z.number().min(0).max(100), performanceMetrics: z.object({ memoryUsage: z.object({ heapUsed: z.number(), heapTotal: z.number(), external: z.number() }), executionSpeed: z.number(), resourceUtilization: z.number().min(0).max(1) }), averageStepTime: z.number(), slowestStepId: z.string().optional(), fastestStepId: z.string().optional() }); // Enhanced gate validation schema export const visualGateValidationSchema = gateValidationSchema.extend({ validationProgress: z.number().min(0).max(100), currentGateIndex: z.number(), gateStatusMap: z.record(z.object({ status: z.enum(['pending', 'running', 'passed', 'failed']), score: z.number().optional(), progressPercentage: z.number().min(0).max(100), startTime: z.number(), estimatedCompletion: z.number().optional() })), retryAttempts: z.array(z.object({ attemptNumber: z.number(), timestamp: z.number(), result: z.enum(['passed', 'failed', 'timeout']), improvedGates: z.array(z.string()) })), recommendations: z.array(z.object({ gateId: z.string(), priority: z.enum(['high', 'medium', 'low']), actionType: z.enum(['retry', 'modify', 'skip', 'review']), description: z.string(), estimatedFixTime: z.number().optional() })) }); ``` #### Integration Points - **ChainSessionManager**: Add progress tracking hooks to existing session management - **GateEvaluator**: Enhance gate evaluation with progress callbacks - **FrameworkStateManager**: Add performance metrics collection - **ResponseFormatter**: Extend formatters to handle enhanced schemas ### Phase 2: Real-Time Infrastructure (Week 2) #### SSE Event Streaming ```typescript // New progress event types export enum ProgressEventType { CHAIN_STARTED = 'chain:started', CHAIN_PROGRESS = 'chain:progress', STEP_STARTED = 'step:started', STEP_COMPLETED = 'step:completed', GATE_VALIDATION_STARTED = 'gate:validation:started', GATE_VALIDATION_PROGRESS = 'gate:validation:progress', GATE_VALIDATION_COMPLETED = 'gate:validation:completed', EXECUTION_METRICS = 'execution:metrics' } // Progress event streaming service export class ProgressEventStreamer { private sseConnections: Map<string, Response> = new Map(); broadcast(eventType: ProgressEventType, data: any): void { const event = { type: eventType, timestamp: Date.now(), data }; this.sseConnections.forEach((response, clientId) => { response.write(`data: ${JSON.stringify(event)}\n\n`); }); } addClient(clientId: string, response: Response): void { response.writeHead(200, { 'Content-Type': 'text/event-stream', 'Cache-Control': 'no-cache', 'Connection': 'keep-alive' }); this.sseConnections.set(clientId, response); } } ``` #### Chain Execution Integration ```typescript // Enhanced ChainOrchestrator with progress tracking export class EnhancedChainOrchestrator extends ChainOrchestrator { private progressStreamer: ProgressEventStreamer; async executeStep( session: ChainExecutionSession, stepId: string ): Promise<StepExecutionResult> { // Emit step start event this.progressStreamer.broadcast(ProgressEventType.STEP_STARTED, { sessionId: session.sessionId, stepId, stepName: session.chainDefinition.steps[stepId]?.name, progressPercentage: this.calculateOverallProgress(session, stepId) }); // Execute step with performance monitoring const startTime = Date.now(); const startMemory = process.memoryUsage(); const result = await super.executeStep(session, stepId); // Calculate performance metrics const executionTime = Date.now() - startTime; const endMemory = process.memoryUsage(); const memoryDelta = endMemory.heapUsed - startMemory.heapUsed; // Emit progress update this.progressStreamer.broadcast(ProgressEventType.CHAIN_PROGRESS, { sessionId: session.sessionId, currentStep: stepId, progressPercentage: this.calculateOverallProgress(session, stepId), estimatedTimeRemaining: this.calculateETA(session, stepId), performanceMetrics: { stepExecutionTime: executionTime, memoryDelta, currentMemoryUsage: endMemory } }); return result; } } ``` ### Phase 3: Gate Validation Enhancement (Week 3) #### Enhanced Gate Evaluation ```typescript export class VisualGateEvaluator extends BaseGateEvaluator { private progressStreamer: ProgressEventStreamer; async evaluateGates( content: string, gates: GateDefinition[], context: EvaluationContext ): Promise<VisualGateValidation> { const sessionId = context.sessionId || 'anonymous'; // Initialize gate status map const gateStatusMap = new Map(); gates.forEach((gate, index) => { gateStatusMap.set(gate.id, { status: 'pending', progressPercentage: 0, startTime: 0 }); }); // Emit validation start event this.progressStreamer.broadcast(ProgressEventType.GATE_VALIDATION_STARTED, { sessionId, totalGates: gates.length, gateStatusMap: Object.fromEntries(gateStatusMap) }); const results: GateEvaluationResult[] = []; const retryAttempts: Array<any> = []; for (let i = 0; i < gates.length; i++) { const gate = gates[i]; // Update gate status to running gateStatusMap.set(gate.id, { status: 'running', progressPercentage: 0, startTime: Date.now() }); // Emit progress update this.progressStreamer.broadcast(ProgressEventType.GATE_VALIDATION_PROGRESS, { sessionId, currentGateIndex: i, currentGateId: gate.id, overallProgress: (i / gates.length) * 100, gateStatusMap: Object.fromEntries(gateStatusMap) }); // Evaluate gate with progress callbacks const result = await this.evaluateGateWithProgress(gate, content, context, (progress) => { gateStatusMap.set(gate.id, { status: 'running', progressPercentage: progress, startTime: gateStatusMap.get(gate.id)!.startTime }); this.progressStreamer.broadcast(ProgressEventType.GATE_VALIDATION_PROGRESS, { sessionId, currentGateIndex: i, currentGateId: gate.id, currentGateProgress: progress, overallProgress: ((i + progress/100) / gates.length) * 100, gateStatusMap: Object.fromEntries(gateStatusMap) }); }); // Update final status gateStatusMap.set(gate.id, { status: result.passed ? 'passed' : 'failed', progressPercentage: 100, startTime: gateStatusMap.get(gate.id)!.startTime, score: result.score }); results.push(result); // Handle retry logic if gate failed if (!result.passed && gate.allowRetry) { const retryResult = await this.handleGateRetry(gate, content, context, retryAttempts); if (retryResult) { gateStatusMap.set(gate.id, { status: 'passed', progressPercentage: 100, startTime: gateStatusMap.get(gate.id)!.startTime, score: retryResult.score }); results[results.length - 1] = retryResult; } } } // Generate recommendations const recommendations = this.generateActionableRecommendations(results, gates); // Emit final validation complete event this.progressStreamer.broadcast(ProgressEventType.GATE_VALIDATION_COMPLETED, { sessionId, overallResult: results.every(r => r.passed), totalTime: Date.now() - (gateStatusMap.values().next().value?.startTime || 0), recommendations }); return { ...this.consolidateResults(results), validationProgress: 100, currentGateIndex: gates.length - 1, gateStatusMap: Object.fromEntries(gateStatusMap), retryAttempts, recommendations }; } } ``` ### Phase 4: Client Integration (Week 4) #### WebUI Components ```typescript // React component for chain progress visualization export const ChainProgressTracker: React.FC<{ sessionId: string; onComplete?: (result: ChainExecutionResult) => void; }> = ({ sessionId, onComplete }) => { const [progress, setProgress] = useState<RealTimeChainProgress | null>(null); const [gateValidation, setGateValidation] = useState<VisualGateValidation | null>(null); useEffect(() => { // Connect to SSE stream const eventSource = new EventSource(`/api/progress/stream/${sessionId}`); eventSource.addEventListener('chain:progress', (event) => { const data = JSON.parse(event.data); setProgress(data); }); eventSource.addEventListener('gate:validation:progress', (event) => { const data = JSON.parse(event.data); setGateValidation(data); }); return () => eventSource.close(); }, [sessionId]); return ( <div className="chain-progress-tracker"> <ChainProgressBar progress={progress} /> <StepStatusIndicators steps={progress?.steps || []} /> <GateValidationDisplay validation={gateValidation} /> <PerformanceMetrics metrics={progress?.performanceMetrics} /> </div> ); }; ``` #### MCP Client Integration ```typescript // Enhanced MCP client with progress tracking export class ProgressAwareMCPClient extends MCPClient { private progressCallbacks: Map<string, (progress: any) => void> = new Map(); async executeChainWithProgress( chainId: string, args: any, onProgress?: (progress: RealTimeChainProgress) => void, onGateValidation?: (validation: VisualGateValidation) => void ): Promise<ToolResponse> { const sessionId = generateSessionId(); // Register progress callbacks if (onProgress) { this.progressCallbacks.set(`${sessionId}:progress`, onProgress); } if (onGateValidation) { this.progressCallbacks.set(`${sessionId}:gate`, onGateValidation); } // Execute chain with enhanced response parsing const response = await this.callTool('prompt_engine', { command: `>>${chainId}`, ...args, sessionOptions: { sessionId, trackProgress: true, enableGateVisualization: true } }); // Parse structured content for progress data if (response.structuredContent?.chainProgress) { onProgress?.(response.structuredContent.chainProgress); } if (response.structuredContent?.gateValidation) { onGateValidation?.(response.structuredContent.gateValidation); } return response; } } ``` ### Phase 5: Backward Compatibility & Progressive Enhancement #### Compatibility Strategy ```typescript // Feature detection and graceful degradation export class ProgressCompatibilityLayer { static detectClientCapabilities(request: Request): ClientCapabilities { const userAgent = request.headers['user-agent'] || ''; const acceptHeader = request.headers.accept || ''; return { supportsSSE: acceptHeader.includes('text/event-stream'), supportsStructuredOutput: request.headers['x-mcp-structured'] === 'true', supportsProgressTracking: request.headers['x-progress-tracking'] === 'true', clientType: this.identifyClientType(userAgent) }; } static adaptResponse( response: ToolResponse, capabilities: ClientCapabilities ): ToolResponse { if (!capabilities.supportsStructuredOutput) { // Return Option 2 minimal compliance response return { content: response.content, isError: response.isError }; } if (!capabilities.supportsProgressTracking) { // Remove progress-specific structured data const { chainProgress, gateValidation, ...otherData } = response.structuredContent || {}; return { ...response, structuredContent: otherData }; } return response; // Full enhanced response } } ``` ## Implementation Benefits ### User Experience Enhancement - **Visual Feedback**: Clear progress indicators reduce uncertainty during long-running operations - **Error Understanding**: Detailed gate validation results help users understand and fix issues - **Performance Insights**: Real-time metrics help users optimize their chains and prompts - **Professional Interface**: Modern UI components suitable for enterprise environments ### Developer Benefits - **Debugging Capabilities**: Detailed progress tracking aids in identifying bottlenecks - **Performance Optimization**: Real-time metrics enable performance tuning - **Quality Assurance**: Enhanced gate validation improves output quality - **Extensibility**: Modular design allows easy addition of new progress types ### System Integration - **Backward Compatible**: Maintains Option 2 minimal compliance as foundation - **Progressive Enhancement**: Advanced features only activate for capable clients - **Scalable Architecture**: Event-driven design supports multiple concurrent clients - **Future-Proof**: Extensible schema design accommodates future enhancements ## Success Metrics ### Technical Metrics - **Performance Impact**: < 5% overhead for progress tracking - **Memory Usage**: < 10MB additional memory for progress state - **Response Time**: < 100ms additional latency for enhanced responses - **Compatibility**: 100% backward compatibility maintained ### User Experience Metrics - **Progress Clarity**: Users report clear understanding of execution status - **Error Resolution**: Reduced time to fix gate validation failures - **Perceived Performance**: Improved user satisfaction with long-running operations - **Adoption Rate**: Percentage of clients utilizing enhanced features ## Risk Mitigation ### Performance Risks - **Mitigation**: Configurable progress granularity (can reduce update frequency) - **Monitoring**: Real-time performance impact measurement - **Fallback**: Automatic degradation to minimal compliance if performance degrades ### Compatibility Risks - **Mitigation**: Comprehensive client capability detection - **Testing**: Automated testing with various client types - **Documentation**: Clear migration guides for client implementations ### Implementation Complexity - **Mitigation**: Phased rollout with incremental feature addition - **Testing**: Extensive integration testing at each phase - **Documentation**: Detailed implementation guides and examples This implementation plan provides a comprehensive foundation for real-time progress tracking and visual gate validation while maintaining the stability and compatibility of the existing system. ``` -------------------------------------------------------------------------------- /server/tests/scripts/integration-server-startup.js: -------------------------------------------------------------------------------- ```javascript #!/usr/bin/env node /** * Server Startup Integration Tests - Node.js Script Version * Tests extracted from GitHub Actions inline scripts */ async function runServerStartupIntegrationTests() { // Global timeout for entire test suite const globalTimeout = setTimeout(() => { console.error('❌ Integration tests timed out after 2 minutes - forcing exit'); console.log('💀 Global timeout triggered - forcing immediate process exit...'); process.exit(1); }, 120000); // 2 minutes try { console.log('🧪 Running Server Startup Integration tests...'); console.log('📋 Testing full server initialization sequence and error recovery'); // Import modules const { Application } = await import('../../dist/runtime/application.js'); const { createSimpleLogger } = await import('../../dist/logging/index.js'); const { globalResourceTracker } = await import('../../dist/utils/global-resource-tracker.js'); let logger, orchestrator; // Timeout wrapper for individual operations async function runWithTimeout(operation, timeoutMs = 10000, operationName = 'operation') { return Promise.race([ operation(), new Promise((_, reject) => setTimeout(() => reject(new Error(`${operationName} timed out after ${timeoutMs}ms`)), timeoutMs) ) ]); } // Setup for each test - reuse orchestrator to prevent multiple full startups function setupTest() { if (!logger) { logger = createSimpleLogger(); } if (!orchestrator) { orchestrator = new Application(logger); } return orchestrator; } // Simple assertion helpers function assertEqual(actual, expected, testName) { const actualStr = JSON.stringify(actual); const expectedStr = JSON.stringify(expected); if (actualStr === expectedStr) { console.log(`✅ ${testName}: PASSED`); return true; } else { console.error(`❌ ${testName}: FAILED`); console.error(` Expected: ${expectedStr}`); console.error(` Actual: ${actualStr}`); return false; } } function assertTruthy(value, testName) { if (value) { console.log(`✅ ${testName}: PASSED`); return true; } else { console.error(`❌ ${testName}: FAILED - Expected truthy value, got: ${value}`); return false; } } function assertType(value, expectedType, testName) { if (typeof value === expectedType) { console.log(`✅ ${testName}: PASSED`); return true; } else { console.error(`❌ ${testName}: FAILED - Expected type ${expectedType}, got: ${typeof value}`); return false; } } function assertHasProperty(obj, property, testName) { if (obj && typeof obj === 'object' && property in obj) { console.log(`✅ ${testName}: PASSED`); return true; } else { console.error(`❌ ${testName}: FAILED - Object does not have property: ${property}`); return false; } } function assertGreaterThan(actual, expected, testName) { if (actual > expected) { console.log(`✅ ${testName}: PASSED (${actual} > ${expected})`); return true; } else { console.error(`❌ ${testName}: FAILED (${actual} <= ${expected})`); return false; } } function assertGreaterThanOrEqual(actual, expected, testName) { if (actual >= expected) { console.log(`✅ ${testName}: PASSED (${actual} >= ${expected})`); return true; } else { console.error(`❌ ${testName}: FAILED (${actual} < ${expected})`); return false; } } function assertLessThan(actual, expected, testName) { if (actual < expected) { console.log(`✅ ${testName}: PASSED (${actual} < ${expected})`); return true; } else { console.error(`❌ ${testName}: FAILED (${actual} >= ${expected})`); return false; } } function assertIsArray(value, testName) { if (Array.isArray(value)) { console.log(`✅ ${testName}: PASSED`); return true; } else { console.error(`❌ ${testName}: FAILED - Expected array, got: ${typeof value}`); return false; } } let testResults = []; // Test 1: Full Server Initialization Sequence console.log('🔍 Test 1: Full Server Initialization Sequence'); setupTest(); try { // Step 1: Load Configuration await runWithTimeout(() => orchestrator.loadConfiguration(), 5000, 'Configuration loading'); testResults.push(assertTruthy(orchestrator.config, 'Configuration loaded')); testResults.push(assertTruthy(orchestrator.config !== null, 'Configuration not null')); // Step 2: Load Prompts Data await runWithTimeout(() => orchestrator.loadPromptsData(), 10000, 'Prompts data loading'); const promptsCount = orchestrator.promptsData ? orchestrator.promptsData.length : 0; testResults.push(assertGreaterThanOrEqual(promptsCount, 0, 'Prompts data loaded or initialized')); // Step 3: Initialize Modules await runWithTimeout(() => orchestrator.initializeModules(), 8000, 'Module initialization'); testResults.push(assertTruthy(orchestrator.mcpToolsManager, 'MCP tools manager initialized')); testResults.push(assertTruthy(orchestrator.mcpToolsManager !== null, 'MCP tools manager not null')); // Step 4: Get Diagnostic Info const healthInfo = await runWithTimeout(() => orchestrator.getDiagnosticInfo(), 3000, 'Diagnostic info retrieval'); testResults.push(assertTruthy(healthInfo, 'Health info retrieved')); testResults.push(assertType(healthInfo, 'object', 'Health info is object')); testResults.push(assertGreaterThan(Object.keys(healthInfo).length, 0, 'Health info has properties')); } catch (error) { console.error(`❌ Full initialization sequence failed: ${error.message}`); testResults.push(false); } // Test 2: Configuration Loading console.log('🔍 Test 2: Configuration Loading'); // Reuse the already configured orchestrator from Test 1 try { // Configuration already loaded in Test 1, just verify it exists testResults.push(assertTruthy(orchestrator.config, 'Configuration object exists')); testResults.push(assertHasProperty(orchestrator.config, 'server', 'Config has server property')); if (orchestrator.config && orchestrator.config.server) { testResults.push(assertHasProperty(orchestrator.config.server, 'name', 'Server config has name')); testResults.push(assertHasProperty(orchestrator.config.server, 'version', 'Server config has version')); testResults.push(assertType(orchestrator.config.server.name, 'string', 'Server name is string')); testResults.push(assertGreaterThan(orchestrator.config.server.name.length, 0, 'Server name not empty')); } else { testResults.push(false); testResults.push(false); testResults.push(false); testResults.push(false); } } catch (error) { console.error(`❌ Configuration loading failed: ${error.message}`); testResults.push(false); testResults.push(false); testResults.push(false); testResults.push(false); testResults.push(false); testResults.push(false); } // Test 3: Prompts Data Loading console.log('🔍 Test 3: Prompts Data Loading'); // Reuse the already initialized orchestrator from Test 1 try { // Prompts data already loaded in Test 1, just verify it exists testResults.push(assertTruthy(orchestrator.promptsData !== undefined, 'Prompts data property exists')); const promptsDataIsValid = Array.isArray(orchestrator.promptsData) || orchestrator.promptsData === null; testResults.push(assertTruthy(promptsDataIsValid, 'Prompts data is array or null')); if (orchestrator.promptsData && orchestrator.promptsData.length > 0) { const firstPrompt = orchestrator.promptsData[0]; testResults.push(assertHasProperty(firstPrompt, 'id', 'First prompt has id')); testResults.push(assertHasProperty(firstPrompt, 'name', 'First prompt has name')); } else { // If no prompts, that's still valid testResults.push(assertTruthy(true, 'Empty prompts handled gracefully')); testResults.push(assertTruthy(true, 'Empty prompts handled gracefully')); } } catch (error) { console.error(`❌ Prompts data loading failed: ${error.message}`); testResults.push(false); testResults.push(false); testResults.push(false); testResults.push(false); } // Test 4: Module Initialization console.log('🔍 Test 4: Module Initialization'); // Reuse the already initialized orchestrator from Test 1 try { // Modules already initialized in Test 1, just verify they exist testResults.push(assertTruthy(orchestrator.mcpToolsManager, 'MCP tools manager initialized')); testResults.push(assertTruthy(orchestrator.mcpToolsManager !== null, 'MCP tools manager not null')); } catch (error) { console.error(`❌ Module initialization failed: ${error.message}`); testResults.push(false); testResults.push(false); } // Test 5: Health Diagnostics console.log('🔍 Test 5: Health Diagnostics'); // Reuse the already initialized orchestrator from Test 1 try { // Get fresh diagnostic info from the already initialized orchestrator const healthInfo = await runWithTimeout(() => orchestrator.getDiagnosticInfo(), 3000, 'Health diagnostics'); testResults.push(assertTruthy(healthInfo, 'Health info exists')); testResults.push(assertType(healthInfo, 'object', 'Health info is object')); const diagnosticKeys = Object.keys(healthInfo); testResults.push(assertGreaterThan(diagnosticKeys.length, 0, 'Health info has diagnostic keys')); const hasRelevantKeys = diagnosticKeys.some(key => key.includes('status') || key.includes('config') || key.includes('prompts') || key.includes('tools') ); testResults.push(assertTruthy(hasRelevantKeys, 'Health info contains relevant diagnostic keys')); // Test partial initialization health info const partialOrchestrator = new Application(logger); await partialOrchestrator.loadConfiguration(); const partialHealthInfo = await partialOrchestrator.getDiagnosticInfo(); testResults.push(assertTruthy(partialHealthInfo, 'Partial health info exists')); testResults.push(assertType(partialHealthInfo, 'object', 'Partial health info is object')); // Clean up partial orchestrator try { await partialOrchestrator.shutdown(); partialOrchestrator.cleanup(); } catch (error) { console.warn('⚠️ Warning: Error cleaning up partialOrchestrator:', error.message); } } catch (error) { console.error(`❌ Health diagnostics failed: ${error.message}`); testResults.push(false); testResults.push(false); testResults.push(false); testResults.push(false); testResults.push(false); testResults.push(false); } // Test 6: Error Recovery console.log('🔍 Test 6: Error Recovery'); try { // Test configuration loading errors const failingOrchestrator = new Application(logger); const originalLoadConfig = failingOrchestrator.loadConfiguration; failingOrchestrator.loadConfiguration = async () => { throw new Error('Mock config error'); }; let configErrorThrown = false; try { await failingOrchestrator.loadConfiguration(); } catch (error) { if (error.message === 'Mock config error') { configErrorThrown = true; } } testResults.push(assertTruthy(configErrorThrown, 'Configuration loading error handled')); testResults.push(assertTruthy(failingOrchestrator.config === undefined, 'Config remains undefined after error')); // Test module initialization errors const moduleFailOrchestrator = new Application(logger); await moduleFailOrchestrator.loadConfiguration(); await moduleFailOrchestrator.loadPromptsData(); moduleFailOrchestrator.initializeModules = async () => { throw new Error('Mock module error'); }; let moduleErrorThrown = false; try { await moduleFailOrchestrator.initializeModules(); } catch (error) { if (error.message === 'Mock module error') { moduleErrorThrown = true; } } testResults.push(assertTruthy(moduleErrorThrown, 'Module initialization error handled')); // Clean up error recovery test instances try { await failingOrchestrator.shutdown(); failingOrchestrator.cleanup(); } catch (error) { console.warn('⚠️ Warning: Error cleaning up failingOrchestrator:', error.message); } try { await moduleFailOrchestrator.shutdown(); moduleFailOrchestrator.cleanup(); } catch (error) { console.warn('⚠️ Warning: Error cleaning up moduleFailOrchestrator:', error.message); } } catch (error) { console.error(`❌ Error recovery test failed: ${error.message}`); testResults.push(false); testResults.push(false); testResults.push(false); } // Test 7: Performance Validation console.log('🔍 Test 7: Performance Validation'); setupTest(); try { const start = Date.now(); await orchestrator.loadConfiguration(); await orchestrator.loadPromptsData(); await orchestrator.initializeModules(); const duration = Date.now() - start; testResults.push(assertLessThan(duration, 10000, 'Initialization completes within 10 seconds')); // Test step timing const timings = {}; const timingOrchestrator = new Application(logger); let stepStart = Date.now(); await timingOrchestrator.loadConfiguration(); timings.config = Date.now() - stepStart; stepStart = Date.now(); await timingOrchestrator.loadPromptsData(); timings.prompts = Date.now() - stepStart; stepStart = Date.now(); await timingOrchestrator.initializeModules(); timings.modules = Date.now() - stepStart; testResults.push(assertLessThan(timings.config, 5000, 'Configuration loading under 5 seconds')); testResults.push(assertLessThan(timings.prompts, 5000, 'Prompts loading under 5 seconds')); testResults.push(assertLessThan(timings.modules, 5000, 'Module initialization under 5 seconds')); console.log(`📊 Initialization timings - Config: ${timings.config}ms, Prompts: ${timings.prompts}ms, Modules: ${timings.modules}ms`); // Clean up timing orchestrator try { await timingOrchestrator.shutdown(); timingOrchestrator.cleanup(); } catch (error) { console.warn('⚠️ Warning: Error cleaning up timingOrchestrator:', error.message); } } catch (error) { console.error(`❌ Performance validation failed: ${error.message}`); testResults.push(false); testResults.push(false); testResults.push(false); testResults.push(false); } // Results Summary const passedTests = testResults.filter(result => result).length; const totalTests = testResults.length; console.log('\n📊 Server Startup Integration Tests Summary:'); console.log(` ✅ Passed: ${passedTests}/${totalTests} tests`); console.log(` 📊 Success Rate: ${((passedTests/totalTests)*100).toFixed(1)}%`); // Cleanup: Properly shutdown Application instances to prevent hanging processes if (orchestrator) { try { await orchestrator.shutdown(); orchestrator.cleanup(); } catch (error) { console.warn('⚠️ Warning: Error during orchestrator cleanup:', error.message); } } // Check for remaining resources before exit console.log('\n🔍 Checking for remaining global resources...'); globalResourceTracker.logDiagnostics(); const cleared = globalResourceTracker.emergencyCleanup(); if (cleared > 0) { console.log(`💀 Emergency cleanup cleared ${cleared} additional resources`); } if (passedTests === totalTests) { console.log('🎉 All Server Startup Integration tests passed!'); // Emergency process exit to prevent hanging due to global Node.js resources console.log('💀 Forcing process exit to prevent hanging from global timers...'); setTimeout(() => process.exit(0), 100); // Small delay to ensure log output return true; } else { console.error('❌ Some Server Startup Integration tests failed'); // Emergency process exit for failure case as well console.log('💀 Forcing process exit to prevent hanging from global timers...'); setTimeout(() => process.exit(1), 100); // Small delay to ensure log output return false; } } catch (error) { console.error('❌ Server Startup Integration tests failed with error:', error.message); if (error.stack) { console.error('Stack trace:', error.stack); } // Emergency process exit for error case as well console.log('💀 Forcing process exit due to test error to prevent hanging from global timers...'); setTimeout(() => process.exit(1), 100); // Small delay to ensure log output return false; } finally { // Clear the global timeout clearTimeout(globalTimeout); // Emergency cleanup: Ensure all Application instances are properly shut down if (typeof orchestrator !== 'undefined' && orchestrator) { try { await orchestrator.shutdown(); orchestrator.cleanup(); } catch (error) { console.warn('⚠️ Emergency cleanup warning:', error.message); } } } } // Run the tests if (import.meta.url === `file://${process.argv[1]}`) { runServerStartupIntegrationTests().catch(error => { console.error('❌ Test execution failed:', error); process.exit(1); }); } export { runServerStartupIntegrationTests }; ``` -------------------------------------------------------------------------------- /server/src/frameworks/prompt-guidance/system-prompt-injector.ts: -------------------------------------------------------------------------------- ```typescript /** * System Prompt Injector - Phase 3 Implementation * * Handles intelligent injection of methodology guidance into system prompts. * Extracted from framework-manager execution logic for better separation of concerns. */ import { Logger } from "../../logging/index.js"; import { ConvertedPrompt } from "../../types/index.js"; import { IMethodologyGuide, FrameworkDefinition, SystemPromptInjectionResult, PromptGuidanceConfig } from "../types/index.js"; import { ContentAnalysisResult } from "../../semantic/configurable-semantic-analyzer.js"; /** * System prompt injection configuration */ export interface SystemPromptInjectorConfig { enableTemplateVariables: boolean; enableContextualEnhancement: boolean; enableValidationGuidance: boolean; injectionMethod: 'template' | 'append' | 'prepend' | 'smart' | 'semantic-aware'; maxPromptLength: number; // Phase 4: Semantic awareness settings enableSemanticAwareness: boolean; semanticComplexityAdaptation: boolean; semanticInjectionStrategy: 'conservative' | 'moderate' | 'aggressive'; } /** * System Prompt Injector * * Intelligently injects methodology guidance into system prompts based on * active framework and prompt characteristics. */ export class SystemPromptInjector { private logger: Logger; private config: SystemPromptInjectorConfig; constructor(logger: Logger, config?: Partial<SystemPromptInjectorConfig>) { this.logger = logger; this.config = { enableTemplateVariables: true, enableContextualEnhancement: true, enableValidationGuidance: true, injectionMethod: 'smart', maxPromptLength: 4000, // Phase 4: Semantic awareness defaults enableSemanticAwareness: true, semanticComplexityAdaptation: true, semanticInjectionStrategy: 'moderate', ...config }; } /** * Inject methodology guidance into system prompt * Extracted from framework-manager.generateSystemPrompt() * Phase 4: Enhanced with semantic analysis awareness */ injectMethodologyGuidance( prompt: ConvertedPrompt, framework: FrameworkDefinition, methodologyGuide: IMethodologyGuide, semanticAnalysis?: ContentAnalysisResult ): SystemPromptInjectionResult { const startTime = Date.now(); this.logger.debug(`Injecting ${framework.methodology} guidance into system prompt for ${prompt.name}`); try { // Generate base guidance from methodology guide const baseGuidance = this.generateBaseGuidance(methodologyGuide, prompt, semanticAnalysis); // Create enhanced system prompt using framework template const enhancedPrompt = this.createEnhancedPrompt( framework.systemPromptTemplate, prompt, framework, baseGuidance, semanticAnalysis ); // Apply template variable substitution const finalPrompt = this.applyTemplateVariables(enhancedPrompt, prompt, framework); // Validate prompt length and quality const validationResult = this.validateInjectedPrompt(finalPrompt, prompt, framework); const result: SystemPromptInjectionResult = { originalPrompt: prompt.userMessageTemplate || '', enhancedPrompt: finalPrompt, injectedGuidance: baseGuidance, sourceFramework: framework, metadata: { injectionTime: new Date(), injectionMethod: this.getEffectiveInjectionMethod(semanticAnalysis), variablesUsed: this.extractUsedVariables(finalPrompt), confidence: validationResult.confidence, processingTimeMs: Date.now() - startTime, validationPassed: validationResult.passed, // Phase 4: Semantic analysis metadata semanticAware: semanticAnalysis !== undefined, semanticComplexity: semanticAnalysis?.complexity, semanticConfidence: semanticAnalysis?.confidence } }; this.logger.debug(`System prompt injection completed for ${framework.methodology} in ${result.metadata.processingTimeMs}ms`); return result; } catch (error) { this.logger.error(`Failed to inject methodology guidance for ${framework.methodology}:`, error); // Return fallback result with original prompt return { originalPrompt: prompt.userMessageTemplate || '', enhancedPrompt: prompt.userMessageTemplate || '', injectedGuidance: '', sourceFramework: framework, metadata: { injectionTime: new Date(), injectionMethod: 'template', variablesUsed: [], confidence: 0, processingTimeMs: Date.now() - startTime, validationPassed: false, error: error instanceof Error ? error.message : 'Unknown error' } }; } } /** * Generate base methodology guidance from guide * Extracted from framework-manager.generateSystemPromptTemplate() * Phase 4: Enhanced with semantic analysis */ private generateBaseGuidance( guide: IMethodologyGuide, prompt: ConvertedPrompt, semanticAnalysis?: ContentAnalysisResult ): string { // Get methodology-specific system prompt guidance const baseGuidance = guide.getSystemPromptGuidance({ promptName: prompt.name, promptCategory: prompt.category, promptType: prompt.executionMode || 'prompt' }); // Phase 4: Enhance with semantic-aware contextual guidance if (this.config.enableContextualEnhancement) { const contextualGuidance = this.generateContextualGuidance(guide, prompt, semanticAnalysis); return `${baseGuidance}\n\n${contextualGuidance}`; } // Add semantic complexity-specific guidance if available if (this.config.enableSemanticAwareness && semanticAnalysis) { const semanticGuidance = this.generateSemanticGuidance(guide, semanticAnalysis); if (semanticGuidance) { return `${baseGuidance}\n\n${semanticGuidance}`; } } return baseGuidance; } /** * Create enhanced system prompt with methodology integration * Phase 4: Enhanced with semantic analysis awareness */ private createEnhancedPrompt( template: string, prompt: ConvertedPrompt, framework: FrameworkDefinition, guidance: string, semanticAnalysis?: ContentAnalysisResult ): string { // Use smart injection method by default switch (this.config.injectionMethod) { case 'template': return this.injectViaTemplate(template, guidance); case 'append': return `${template}\n\n${guidance}`; case 'prepend': return `${guidance}\n\n${template}`; case 'semantic-aware': return this.semanticAwareInject(template, guidance, prompt, framework, semanticAnalysis); case 'smart': default: return this.smartInject(template, guidance, prompt, framework, semanticAnalysis); } } /** * Smart injection that adapts to prompt characteristics * Phase 4: Enhanced with semantic analysis awareness */ private smartInject( template: string, guidance: string, prompt: ConvertedPrompt, framework: FrameworkDefinition, semanticAnalysis?: ContentAnalysisResult ): string { // Use semantic analysis if available and semantic awareness is enabled if (this.config.enableSemanticAwareness && semanticAnalysis) { return this.semanticAwareInject(template, guidance, prompt, framework, semanticAnalysis); } // Fallback to original smart injection logic // For complex prompts or chain types, use template integration if (prompt.executionMode === 'chain' || (prompt.arguments && prompt.arguments.length > 3)) { return this.injectViaTemplate(template, guidance); } // For simple prompts, append guidance return `${template}\n\n## ${framework.methodology} Methodology Guidance\n\n${guidance}`; } /** * Inject guidance via template replacement */ private injectViaTemplate(template: string, guidance: string): string { // Look for guidance placeholder in template if (template.includes('{METHODOLOGY_GUIDANCE}')) { return template.replace('{METHODOLOGY_GUIDANCE}', guidance); } // If no placeholder, append to template return `${template}\n\n${guidance}`; } /** * Apply template variable substitution * Extracted from framework-manager.generateSystemPrompt() */ private applyTemplateVariables( prompt: string, convertedPrompt: ConvertedPrompt, framework: FrameworkDefinition ): string { if (!this.config.enableTemplateVariables) { return prompt; } let processedPrompt = prompt; // Replace standard template variables processedPrompt = processedPrompt.replace(/\{PROMPT_NAME\}/g, convertedPrompt.name || 'Prompt'); processedPrompt = processedPrompt.replace(/\{PROMPT_CATEGORY\}/g, convertedPrompt.category || 'general'); processedPrompt = processedPrompt.replace(/\{FRAMEWORK_NAME\}/g, framework.name); processedPrompt = processedPrompt.replace(/\{METHODOLOGY\}/g, framework.methodology); // Replace prompt-specific variables if (convertedPrompt.executionMode) { processedPrompt = processedPrompt.replace(/\{PROMPT_TYPE\}/g, convertedPrompt.executionMode); } return processedPrompt; } /** * Generate contextual guidance based on prompt characteristics * Phase 4: Enhanced with semantic analysis */ private generateContextualGuidance( guide: IMethodologyGuide, prompt: ConvertedPrompt, semanticAnalysis?: ContentAnalysisResult ): string { const contextParts = []; // Phase 4: Use semantic analysis for enhanced contextual guidance if (semanticAnalysis && this.config.enableSemanticAwareness) { // Add semantic complexity-based guidance switch (semanticAnalysis.complexity) { case 'high': contextParts.push(`High complexity detected - apply ${guide.methodology} with extra attention to systematic breakdown and validation.`); break; case 'medium': contextParts.push(`Medium complexity detected - ensure ${guide.methodology} methodology is applied comprehensively.`); break; case 'low': contextParts.push(`Low complexity detected - apply ${guide.methodology} efficiently while maintaining quality.`); break; } // Add semantic execution characteristics guidance if (semanticAnalysis.executionCharacteristics.hasStructuredReasoning) { contextParts.push(`Structured reasoning detected - leverage ${guide.methodology} systematic approach.`); } if (semanticAnalysis.executionCharacteristics.hasComplexAnalysis) { contextParts.push(`Complex analysis patterns detected - emphasize ${guide.methodology} analytical rigor.`); } // Add semantic confidence-based guidance if (semanticAnalysis.confidence < 0.7) { contextParts.push(`Uncertain semantic analysis - apply ${guide.methodology} with additional validation steps.`); } } else { // Fallback to original logic when semantic analysis unavailable // Add complexity-based guidance if (prompt.arguments && prompt.arguments.length > 2) { contextParts.push(`This prompt has multiple parameters - apply ${guide.methodology} systematically to each component.`); } // Add type-specific guidance if (prompt.executionMode === 'chain') { contextParts.push(`Chain execution detected - maintain ${guide.methodology} consistency across all steps.`); } // Add category-specific guidance if (prompt.category === 'analysis') { contextParts.push(`Analysis prompt detected - emphasize thorough ${guide.methodology} analytical phases.`); } } return contextParts.join('\n'); } /** * Validate injected prompt quality and characteristics */ private validateInjectedPrompt( prompt: string, originalPrompt: ConvertedPrompt, framework: FrameworkDefinition ): { passed: boolean; confidence: number; issues: string[] } { const issues: string[] = []; let confidence = 1.0; // Check prompt length if (prompt.length > this.config.maxPromptLength) { issues.push(`Prompt length (${prompt.length}) exceeds maximum (${this.config.maxPromptLength})`); confidence -= 0.2; } // Check methodology integration if (!prompt.toLowerCase().includes(framework.methodology.toLowerCase())) { issues.push(`Methodology ${framework.methodology} not clearly referenced in prompt`); confidence -= 0.3; } // Check template variable resolution const unresolvedVariables = prompt.match(/\{[A-Z_]+\}/g); if (unresolvedVariables && unresolvedVariables.length > 0) { issues.push(`Unresolved template variables: ${unresolvedVariables.join(', ')}`); confidence -= 0.1 * unresolvedVariables.length; } // Ensure minimum confidence confidence = Math.max(confidence, 0); return { passed: issues.length === 0, confidence, issues }; } /** * Phase 4: Semantic-aware injection that adapts based on semantic analysis */ private semanticAwareInject( template: string, guidance: string, prompt: ConvertedPrompt, framework: FrameworkDefinition, semanticAnalysis?: ContentAnalysisResult ): string { if (!semanticAnalysis) { return this.injectViaTemplate(template, guidance); } // Determine injection strategy based on semantic complexity and characteristics const injectionStrategy = this.determineSemanticInjectionStrategy(semanticAnalysis); switch (injectionStrategy) { case 'minimal': // Low complexity - simple append return `${template}\n\n${guidance}`; case 'structured': // Medium complexity - organized injection return `${template}\n\n## ${framework.methodology} Methodology Guidance\n\n${guidance}`; case 'comprehensive': // High complexity - full template integration return this.injectViaTemplate(template, `## ${framework.methodology} Methodology Framework\n\n${guidance}`); default: return this.injectViaTemplate(template, guidance); } } /** * Phase 4: Generate semantic-specific guidance based on analysis results */ private generateSemanticGuidance( guide: IMethodologyGuide, semanticAnalysis: ContentAnalysisResult ): string { const guidanceParts = []; // Add analysis mode-specific guidance if (semanticAnalysis.analysisMetadata.mode === 'semantic') { guidanceParts.push(`Semantic analysis mode: Apply ${guide.methodology} with intelligent pattern recognition.`); } else if (semanticAnalysis.analysisMetadata.mode === 'structural') { guidanceParts.push(`Structural analysis mode: Apply ${guide.methodology} with systematic template analysis.`); } // Add capability-based guidance if (semanticAnalysis.capabilities.hasSemanticUnderstanding) { guidanceParts.push(`Enhanced semantic understanding available - leverage for nuanced ${guide.methodology} application.`); } // Add limitation-aware guidance if (semanticAnalysis.limitations.length > 0) { guidanceParts.push(`Analysis limitations detected - apply ${guide.methodology} with extra validation.`); } return guidanceParts.length > 0 ? guidanceParts.join('\n') : ''; } /** * Phase 4: Determine injection strategy based on semantic analysis */ private determineSemanticInjectionStrategy(semanticAnalysis: ContentAnalysisResult): 'minimal' | 'structured' | 'comprehensive' { // Base strategy on configured approach if (this.config.semanticInjectionStrategy === 'conservative') { return 'minimal'; } else if (this.config.semanticInjectionStrategy === 'aggressive') { return 'comprehensive'; } // Moderate strategy - adapt based on semantic characteristics const complexityScore = this.calculateSemanticComplexityScore(semanticAnalysis); if (complexityScore >= 0.8) { return 'comprehensive'; } else if (complexityScore >= 0.5) { return 'structured'; } else { return 'minimal'; } } /** * Phase 4: Calculate semantic complexity score for injection decisions */ private calculateSemanticComplexityScore(semanticAnalysis: ContentAnalysisResult): number { let score = 0; // Base complexity mapping switch (semanticAnalysis.complexity) { case 'high': score += 0.6; break; case 'medium': score += 0.4; break; case 'low': score += 0.2; break; } // Execution characteristics influence const chars = semanticAnalysis.executionCharacteristics; if (chars.hasStructuredReasoning) score += 0.1; if (chars.hasComplexAnalysis) score += 0.1; if (chars.hasChainSteps) score += 0.1; if (chars.argumentCount > 3) score += 0.1; // Confidence influence (higher confidence = more decisive injection) score += (semanticAnalysis.confidence * 0.2); return Math.min(score, 1.0); } /** * Phase 4: Get effective injection method based on semantic analysis */ private getEffectiveInjectionMethod(semanticAnalysis?: ContentAnalysisResult): string { if (!semanticAnalysis || !this.config.enableSemanticAwareness) { return this.config.injectionMethod; } if (this.config.injectionMethod === 'semantic-aware') { const strategy = this.determineSemanticInjectionStrategy(semanticAnalysis); return `semantic-aware-${strategy}`; } return this.config.injectionMethod; } /** * Extract variables that were used in template processing */ private extractUsedVariables(prompt: string): string[] { const originalVariables = [ 'PROMPT_NAME', 'PROMPT_CATEGORY', 'FRAMEWORK_NAME', 'METHODOLOGY', 'PROMPT_TYPE', 'METHODOLOGY_GUIDANCE' ]; return originalVariables.filter(variable => !prompt.includes(`{${variable}}`) ); } /** * Update injector configuration */ updateConfig(config: Partial<SystemPromptInjectorConfig>): void { this.config = { ...this.config, ...config }; this.logger.debug('SystemPromptInjector configuration updated', config); } /** * Get current injector configuration */ getConfig(): SystemPromptInjectorConfig { return { ...this.config }; } } /** * Create and configure a SystemPromptInjector instance */ export function createSystemPromptInjector( logger: Logger, config?: Partial<SystemPromptInjectorConfig> ): SystemPromptInjector { return new SystemPromptInjector(logger, config); } ``` -------------------------------------------------------------------------------- /server/src/frameworks/framework-state-manager.ts: -------------------------------------------------------------------------------- ```typescript /** * Stateful Framework State Manager * * Manages the active framework methodology state and provides framework switching capabilities. * This tracks switching mechanics (timing, success/failure, counts) and framework state. * This is separate from execution strategy analysis - it handles WHICH framework methodology * to apply (CAGEERF, ReACT, 5W1H, SCAMPER) while semantic analysis handles execution strategies. */ import { EventEmitter } from "events"; import { Logger } from "../logging/index.js"; import { FrameworkManager, createFrameworkManager } from "./framework-manager.js"; import { FrameworkDefinition, FrameworkExecutionContext, FrameworkSelectionCriteria } from "./types/index.js"; import * as fs from 'fs/promises'; import * as path from 'path'; /** * Persisted framework state (saved to file) */ export interface PersistedFrameworkState { version: string; frameworkSystemEnabled: boolean; activeFramework: string; lastSwitchedAt: string; switchReason: string; } /** * Framework state information */ export interface FrameworkState { activeFramework: string; previousFramework: string | null; switchedAt: Date; switchReason: string; isHealthy: boolean; frameworkSystemEnabled: boolean; // NEW: Controls whether framework system is enabled/disabled switchingMetrics: { switchCount: number; averageResponseTime: number; errorCount: number; }; } /** * Framework switch request */ export interface FrameworkSwitchRequest { targetFramework: string; reason?: string; criteria?: FrameworkSelectionCriteria; } /** * Framework system health information */ export interface FrameworkSystemHealth { status: "healthy" | "degraded" | "error"; activeFramework: string; frameworkSystemEnabled: boolean; // NEW: Whether framework system is enabled availableFrameworks: string[]; lastSwitchTime: Date | null; switchingMetrics: { totalSwitches: number; successfulSwitches: number; failedSwitches: number; averageResponseTime: number; }; issues: string[]; } /** * Stateful Framework State Manager Events */ export interface FrameworkStateManagerEvents { 'framework-switched': (previousFramework: string, newFramework: string, reason: string) => void; 'framework-error': (framework: string, error: Error) => void; 'health-changed': (health: FrameworkSystemHealth) => void; 'framework-system-toggled': (enabled: boolean, reason: string) => void; // NEW: Framework system enabled/disabled } /** * Stateful Framework State Manager * * Maintains framework state across operations and provides switching capabilities */ export class FrameworkStateManager extends EventEmitter { private logger: Logger; private frameworkManager: FrameworkManager | null = null; private currentState: FrameworkState; private switchHistory: Array<{ from: string; to: string; timestamp: Date; reason: string }> = []; private switchingMetrics = { totalSwitches: 0, successfulSwitches: 0, failedSwitches: 0, averageResponseTime: 0, errorCount: 0 }; private isInitialized: boolean = false; private runtimeStatePath: string; constructor(logger: Logger, serverRoot?: string) { super(); this.logger = logger; // Set state file path - place in config directory for better organization const rootPath = path.resolve(serverRoot || process.cwd()); this.runtimeStatePath = path.join(rootPath, 'runtime-state', 'framework-state.json'); // Initialize with default framework state (will be overridden by loadPersistedState) this.currentState = { activeFramework: 'CAGEERF', // Default to CAGEERF previousFramework: null, switchedAt: new Date(), switchReason: 'Initial framework selection', isHealthy: true, frameworkSystemEnabled: false, // NEW: Framework system disabled by default (changed from true) switchingMetrics: { switchCount: 0, averageResponseTime: 0, errorCount: 0 } }; } /** * Initialize the framework state manager */ async initialize(): Promise<void> { if (this.isInitialized) { this.logger.debug("FrameworkStateManager already initialized"); return; } // Load persisted state before setting up framework manager await this.loadPersistedState(); this.logger.info("Initializing Framework State Manager..."); try { // Initialize framework manager this.frameworkManager = await createFrameworkManager(this.logger); // Validate default framework exists const defaultFramework = this.frameworkManager.getFramework(this.currentState.activeFramework); if (!defaultFramework) { throw new Error(`Default framework '${this.currentState.activeFramework}' not found`); } this.isInitialized = true; this.logger.info(`Framework State Manager initialized with active framework: ${this.currentState.activeFramework}`); // Emit initial health status this.emit('health-changed', this.getSystemHealth()); } catch (error) { this.logger.error("Failed to initialize Framework State Manager:", error); throw error; } } /** * Get current framework state */ getCurrentState(): FrameworkState { this.ensureInitialized(); return { ...this.currentState }; } /** * Load persisted state from file */ private async loadPersistedState(): Promise<void> { try { const stateContent = await fs.readFile(this.runtimeStatePath, 'utf-8'); const persistedState: PersistedFrameworkState = JSON.parse(stateContent); if (this.isValidPersistedState(persistedState)) { this.currentState.frameworkSystemEnabled = persistedState.frameworkSystemEnabled; this.currentState.activeFramework = persistedState.activeFramework; this.currentState.switchedAt = new Date(persistedState.lastSwitchedAt); this.currentState.switchReason = persistedState.switchReason; this.logger.info( `✅ Loaded framework state cache: ${ persistedState.frameworkSystemEnabled ? 'enabled' : 'disabled' }, active: ${persistedState.activeFramework}` ); return; } this.logger.warn( `⚠️ Invalid framework state cache at ${this.runtimeStatePath}, falling back to defaults` ); } catch (error: any) { if (error?.code !== 'ENOENT') { this.logger.warn( `⚠️ Failed to load framework state cache ${this.runtimeStatePath}: ${ error instanceof Error ? error.message : String(error) }` ); } } this.logger.info('📁 No framework state cache found, using defaults'); await this.saveStateToFile(); } /** * Save current state to file */ private async saveStateToFile(): Promise<void> { try { const persistedState: PersistedFrameworkState = { version: '1.0.0', frameworkSystemEnabled: this.currentState.frameworkSystemEnabled, activeFramework: this.currentState.activeFramework, lastSwitchedAt: this.currentState.switchedAt.toISOString(), switchReason: this.currentState.switchReason }; const runtimeDir = path.dirname(this.runtimeStatePath); await fs.mkdir(runtimeDir, { recursive: true }); await fs.writeFile( this.runtimeStatePath, JSON.stringify(persistedState, null, 2) ); this.logger.debug(`💾 Framework state saved to ${this.runtimeStatePath}`); } catch (error) { this.logger.error(`❌ Failed to save framework state: ${error instanceof Error ? error.message : String(error)}`); } } /** * Validate persisted state structure */ private isValidPersistedState(state: any): state is PersistedFrameworkState { return ( state && typeof state.version === 'string' && typeof state.frameworkSystemEnabled === 'boolean' && typeof state.activeFramework === 'string' && typeof state.lastSwitchedAt === 'string' && typeof state.switchReason === 'string' && ['CAGEERF', 'ReACT', '5W1H', 'SCAMPER'].includes(state.activeFramework) ); } /** * Get active framework definition */ getActiveFramework(): FrameworkDefinition { this.ensureInitialized(); const framework = this.frameworkManager!.getFramework(this.currentState.activeFramework); if (!framework) { throw new Error(`Active framework '${this.currentState.activeFramework}' not found`); } return framework; } /** * Get all available frameworks */ getAvailableFrameworks(): FrameworkDefinition[] { this.ensureInitialized(); return this.frameworkManager!.listFrameworks(true); // Only enabled frameworks } /** * Switch to a different framework */ async switchFramework(request: FrameworkSwitchRequest): Promise<boolean> { this.ensureInitialized(); const startTime = performance.now(); this.switchingMetrics.totalSwitches++; try { this.logger.info(`Attempting to switch framework from '${this.currentState.activeFramework}' to '${request.targetFramework}'`); // Validate target framework exists const targetFramework = this.frameworkManager!.getFramework(request.targetFramework); if (!targetFramework) { const availableFrameworks = this.frameworkManager!.listFrameworks().map(f => f.id).join(', '); const errorMsg = `Target framework '${request.targetFramework}' not found. Available frameworks: [${availableFrameworks}]`; this.logger.error(errorMsg); throw new Error(errorMsg); } this.logger.debug(`Target framework found: ${targetFramework.name} (${targetFramework.id})`); if (!targetFramework.enabled) { const errorMsg = `Target framework '${request.targetFramework}' (${targetFramework.name}) is disabled`; this.logger.error(errorMsg); throw new Error(errorMsg); } // Check if already active if (this.currentState.activeFramework === request.targetFramework) { this.logger.info(`Framework '${request.targetFramework}' is already active`); return true; } // Perform the switch const previousFramework = this.currentState.activeFramework; const switchReason = request.reason || `Switched to ${request.targetFramework}`; // Update state this.currentState = { activeFramework: request.targetFramework, previousFramework: previousFramework, switchedAt: new Date(), switchReason: switchReason, isHealthy: true, frameworkSystemEnabled: this.currentState.frameworkSystemEnabled, switchingMetrics: { switchCount: this.currentState.switchingMetrics.switchCount + 1, averageResponseTime: this.currentState.switchingMetrics.averageResponseTime, errorCount: this.currentState.switchingMetrics.errorCount } }; // Record switch history this.switchHistory.push({ from: previousFramework, to: request.targetFramework, timestamp: new Date(), reason: switchReason }); // Update switching performance metrics const switchTime = performance.now() - startTime; this.updateSwitchingMetrics(switchTime, true); // Save state to file this.saveStateToFile().catch(error => { this.logger.error(`Failed to persist framework switch state: ${error instanceof Error ? error.message : String(error)}`); }); this.logger.info(`✅ Framework switch successful: '${previousFramework}' -> '${request.targetFramework}' (${switchTime.toFixed(1)}ms)`); this.logger.info(`New active framework: ${targetFramework.name} - ${targetFramework.description}`); // Emit events this.emit('framework-switched', previousFramework, request.targetFramework, switchReason); this.emit('health-changed', this.getSystemHealth()); return true; } catch (error) { const switchTime = performance.now() - startTime; this.updateSwitchingMetrics(switchTime, false); this.switchingMetrics.errorCount++; this.currentState.switchingMetrics.errorCount++; this.currentState.isHealthy = false; this.logger.error(`Failed to switch framework to '${request.targetFramework}':`, error); this.emit('framework-error', request.targetFramework, error instanceof Error ? error : new Error(String(error))); return false; } } /** * Generate execution context using active framework */ generateExecutionContext(prompt: any, criteria?: FrameworkSelectionCriteria): FrameworkExecutionContext | null { this.ensureInitialized(); // NEW: Return null if framework system is disabled if (!this.currentState.frameworkSystemEnabled) { return null; } // Use framework manager to generate context with active framework const mergedCriteria: FrameworkSelectionCriteria = { userPreference: this.currentState.activeFramework as any, ...criteria }; return this.frameworkManager!.generateExecutionContext(prompt, mergedCriteria); } /** * Get framework system health */ getSystemHealth(): FrameworkSystemHealth { this.ensureInitialized(); const issues: string[] = []; let status: "healthy" | "degraded" | "error" = "healthy"; // Check for health issues if (this.currentState.switchingMetrics.errorCount > 0) { issues.push(`${this.currentState.switchingMetrics.errorCount} framework switching errors detected`); status = this.currentState.switchingMetrics.errorCount > 5 ? "error" : "degraded"; } if (!this.currentState.isHealthy) { issues.push("Framework system is in unhealthy state"); status = "error"; } const activeFramework = this.frameworkManager!.getFramework(this.currentState.activeFramework); if (!activeFramework?.enabled) { issues.push(`Active framework '${this.currentState.activeFramework}' is disabled`); status = "error"; } return { status, activeFramework: this.currentState.activeFramework, frameworkSystemEnabled: this.currentState.frameworkSystemEnabled, // NEW: Include enabled state availableFrameworks: this.frameworkManager!.listFrameworks(true).map(f => f.id), lastSwitchTime: this.switchHistory.length > 0 ? this.switchHistory[this.switchHistory.length - 1].timestamp : null, switchingMetrics: { ...this.switchingMetrics }, issues }; } /** * Get framework switch history */ getSwitchHistory(limit?: number): Array<{ from: string; to: string; timestamp: Date; reason: string }> { const history = [...this.switchHistory].reverse(); // Most recent first return limit ? history.slice(0, limit) : history; } /** * Reset switching performance metrics */ resetMetrics(): void { this.switchingMetrics = { totalSwitches: 0, successfulSwitches: 0, failedSwitches: 0, averageResponseTime: 0, errorCount: 0 }; this.currentState.switchingMetrics = { switchCount: 0, averageResponseTime: 0, errorCount: 0 }; this.logger.info("Framework state manager switching metrics reset"); } /** * Enable the framework system */ enableFrameworkSystem(reason?: string): void { this.ensureInitialized(); if (this.currentState.frameworkSystemEnabled) { this.logger.info("Framework system is already enabled"); return; } const enableReason = reason || "Framework system enabled"; this.currentState.frameworkSystemEnabled = true; this.currentState.switchReason = enableReason; this.currentState.switchedAt = new Date(); this.logger.info(`✅ Framework system enabled: ${enableReason}`); // Save state to file this.saveStateToFile().catch(error => { this.logger.error(`Failed to persist framework enable state: ${error instanceof Error ? error.message : String(error)}`); }); // Emit events this.emit('framework-system-toggled', true, enableReason); this.emit('health-changed', this.getSystemHealth()); } /** * Disable the framework system */ disableFrameworkSystem(reason?: string): void { this.ensureInitialized(); if (!this.currentState.frameworkSystemEnabled) { this.logger.info("Framework system is already disabled"); return; } const disableReason = reason || "Framework system disabled"; this.currentState.frameworkSystemEnabled = false; this.currentState.switchReason = disableReason; this.currentState.switchedAt = new Date(); this.logger.info(`🚫 Framework system disabled: ${disableReason}`); // Save state to file this.saveStateToFile().catch(error => { this.logger.error(`Failed to persist framework disable state: ${error instanceof Error ? error.message : String(error)}`); }); // Emit events this.emit('framework-system-toggled', false, disableReason); this.emit('health-changed', this.getSystemHealth()); } /** * Check if framework system is enabled */ isFrameworkSystemEnabled(): boolean { this.ensureInitialized(); return this.currentState.frameworkSystemEnabled; } /** * Set framework system enabled state (for config loading) */ setFrameworkSystemEnabled(enabled: boolean, reason?: string): void { if (enabled) { this.enableFrameworkSystem(reason || "Loaded from configuration"); } else { this.disableFrameworkSystem(reason || "Loaded from configuration"); } } // Private helper methods private ensureInitialized(): void { if (!this.isInitialized || !this.frameworkManager) { throw new Error("FrameworkStateManager not initialized. Call initialize() first."); } } private updateSwitchingMetrics(responseTime: number, success: boolean): void { if (success) { this.switchingMetrics.successfulSwitches++; } else { this.switchingMetrics.failedSwitches++; } // Update average response time for switching operations const totalOperations = this.switchingMetrics.successfulSwitches + this.switchingMetrics.failedSwitches; this.switchingMetrics.averageResponseTime = (this.switchingMetrics.averageResponseTime * (totalOperations - 1) + responseTime) / totalOperations; this.currentState.switchingMetrics.averageResponseTime = this.switchingMetrics.averageResponseTime; } } /** * Create and initialize framework state manager */ export async function createFrameworkStateManager(logger: Logger, serverRoot?: string): Promise<FrameworkStateManager> { const manager = new FrameworkStateManager(logger, serverRoot); await manager.initialize(); return manager; } ``` -------------------------------------------------------------------------------- /server/src/index.ts: -------------------------------------------------------------------------------- ```typescript /** * MCP Claude Prompts Server - Main Entry Point * Minimal entry point with comprehensive error handling, health checks, and validation */ import { Logger } from "./logging/index.js"; import { startApplication } from "./runtime/application.js"; import { ConfigManager } from "./config/index.js"; /** * Health check and validation state */ interface ApplicationHealth { startup: boolean; modules: boolean; server: boolean; lastCheck: number; } /** * Application state for health monitoring and rollback */ let applicationHealth: ApplicationHealth = { startup: false, modules: false, server: false, lastCheck: Date.now(), }; let orchestrator: any = null; let logger: Logger | null = null; let isShuttingDown = false; /** * Validate application health */ async function validateApplicationHealth(): Promise<boolean> { try { if (!orchestrator) { return false; } // Use the orchestrator's comprehensive health validation const healthCheck = orchestrator.validateHealth(); // Update health state with detailed information applicationHealth = { startup: healthCheck.modules.foundation, modules: healthCheck.modules.modulesInitialized, server: healthCheck.modules.serverRunning, lastCheck: Date.now(), }; // Log health issues if any if (!healthCheck.healthy && logger && healthCheck.issues.length > 0) { logger.warn("Health validation found issues:", healthCheck.issues); } return healthCheck.healthy; } catch (error) { if (logger) { logger.error("Health validation failed:", error); } return false; } } /** * Rollback mechanism for startup failures */ async function rollbackStartup(error: Error): Promise<void> { // Use stderr for error output to avoid interfering with stdio transport console.error("Critical startup failure, attempting rollback:", error); try { if (orchestrator) { console.error( "Attempting graceful shutdown of partial initialization..." ); await orchestrator.shutdown(); orchestrator = null; } // Reset health state applicationHealth = { startup: false, modules: false, server: false, lastCheck: Date.now(), }; console.error("Rollback completed"); } catch (rollbackError) { console.error("Error during rollback:", rollbackError); } } /** * Check if we're running in a test environment */ function isTestEnvironment(): boolean { return ( process.env.NODE_ENV === 'test' || process.argv.includes('--suppress-debug') || process.argv.includes('--test-mode') || // Detect GitHub Actions CI environment process.env.GITHUB_ACTIONS === 'true' || process.env.CI === 'true' || // Detect common test runner patterns process.argv.some(arg => arg.includes('test') || arg.includes('jest') || arg.includes('mocha')) || // Detect if called from integration test scripts process.argv[1]?.includes('tests/scripts/') ); } /** * Setup periodic health checks * SUPPRESSED in test environments to prevent hanging processes */ function setupHealthMonitoring(): void { if (!logger) return; // Skip health monitoring in test environments to prevent hanging processes if (isTestEnvironment()) { logger.debug("Health monitoring suppressed in test environment"); return; } // Health check every 30 seconds const healthInterval = setInterval(async () => { if (isShuttingDown || !logger) return; try { const isHealthy = await validateApplicationHealth(); if (!isHealthy) { logger.warn("Health check failed - application may be degraded"); // Log current status for debugging if (orchestrator) { const diagnostics = orchestrator.getDiagnosticInfo(); logger.warn("Diagnostic information:", { health: diagnostics.health, performance: diagnostics.performance, errors: diagnostics.errors, }); } } else { // Periodic performance logging (every 5th health check = 2.5 minutes) if (Date.now() % (5 * 30000) < 30000) { const performance = orchestrator.getPerformanceMetrics(); logger.info("Performance metrics:", { uptime: `${Math.floor(performance.uptime / 60)} minutes`, memoryUsage: `${Math.round( performance.memoryUsage.heapUsed / 1024 / 1024 )}MB`, prompts: performance.application.promptsLoaded, categories: performance.application.categoriesLoaded, }); } } } catch (error) { logger.error("Error during health check:", error); // Emergency diagnostic collection try { const emergency = getDetailedDiagnostics(); logger.error("Emergency diagnostics:", emergency); } catch (diagError) { logger.error("Failed to collect emergency diagnostics:", diagError); } } }, 30000); logger.info( "Health monitoring enabled (30-second intervals with performance tracking)" ); } /** * Setup comprehensive error handlers */ function setupErrorHandlers(): void { // Handle uncaught exceptions with rollback process.on("uncaughtException", async (error) => { console.error("Uncaught exception detected:", error); if (logger) { logger.error( "Uncaught exception - initiating emergency shutdown:", error ); } isShuttingDown = true; try { if (orchestrator) { await orchestrator.shutdown(); } } catch (shutdownError) { console.error("Error during emergency shutdown:", shutdownError); } process.exit(1); }); // Handle unhandled promise rejections with rollback process.on("unhandledRejection", async (reason, promise) => { console.error( "Unhandled promise rejection at:", promise, "reason:", reason ); if (logger) { logger.error( "Unhandled promise rejection - initiating emergency shutdown:", { reason, promise } ); } isShuttingDown = true; try { if (orchestrator) { await orchestrator.shutdown(); } } catch (shutdownError) { console.error("Error during emergency shutdown:", shutdownError); } process.exit(1); }); // Handle SIGINT (Ctrl+C) gracefully process.on("SIGINT", async () => { if (logger) { logger.info("Received SIGINT (Ctrl+C), initiating graceful shutdown..."); } else { console.error( "Received SIGINT (Ctrl+C), initiating graceful shutdown..." ); } await gracefulShutdown(0); }); // Handle SIGTERM gracefully process.on("SIGTERM", async () => { if (logger) { logger.info("Received SIGTERM, initiating graceful shutdown..."); } else { console.error("Received SIGTERM, initiating graceful shutdown..."); } await gracefulShutdown(0); }); } /** * Graceful shutdown with validation */ async function gracefulShutdown(exitCode: number = 0): Promise<void> { if (isShuttingDown) { return; // Prevent multiple shutdown attempts } isShuttingDown = true; try { if (logger) { logger.info("Starting graceful shutdown sequence..."); } // Validate current state before shutdown if (orchestrator) { const status = orchestrator.getStatus(); if (logger) { logger.info("Application status before shutdown:", status); } // Perform graceful shutdown await orchestrator.shutdown(); if (logger) { logger.info("Orchestrator shutdown completed successfully"); } } // Final health state update applicationHealth = { startup: false, modules: false, server: false, lastCheck: Date.now(), }; if (logger) { logger.info("Graceful shutdown completed successfully"); } else { console.error("Graceful shutdown completed successfully"); } } catch (error) { if (logger) { logger.error("Error during graceful shutdown:", error); } else { console.error("Error during graceful shutdown:", error); } exitCode = 1; } process.exit(exitCode); } /** * Display help information */ function showHelp(): void { console.log(` MCP Claude Prompts Server v1.3.0 - Consolidated Architecture with Systematic Framework Application USAGE: node dist/index.js [OPTIONS] OPTIONS: --transport=TYPE Transport type: stdio (default) or sse --quiet Minimal output mode (production-friendly) --verbose Detailed diagnostics and strategy information --debug-startup Alias for --verbose with extra debugging --startup-test Validate startup and exit (for testing) --help Show this help message ENVIRONMENT VARIABLES: MCP_SERVER_ROOT Override server root directory detection (recommended) MCP_PROMPTS_CONFIG_PATH Direct path to prompts configuration file OPTIMIZED STARTUP MODES: Production: node dist/index.js --quiet --transport=stdio Development: node dist/index.js --verbose --transport=sse Debugging: node dist/index.js --debug-startup Silent: node dist/index.js --quiet EXAMPLES: # Standard usage node dist/index.js # Claude Desktop (recommended configuration) node dist/index.js --transport=stdio --quiet # Development with detailed logging node dist/index.js --verbose --transport=sse # With environment override (fastest startup) MCP_SERVER_ROOT=/path/to/server node dist/index.js --quiet PERFORMANCE FEATURES: ✓ Optimized strategy ordering (fastest detection first) ✓ Early termination on first success ✓ Environment variable bypass for instant detection ✓ Conditional logging based on verbosity level ✓ Intelligent fallback with user guidance TROUBLESHOOTING: Use --verbose to see detailed server root detection strategies Set MCP_SERVER_ROOT environment variable for instant path detection Use --quiet in production for clean startup logs For more information, visit: https://github.com/minipuft/claude-prompts-mcp `); } /** * Parse and validate command line arguments */ function parseCommandLineArgs(): { shouldExit: boolean; exitCode: number } { const args = process.argv.slice(2); // Check for help flag if (args.includes("--help") || args.includes("-h")) { showHelp(); return { shouldExit: true, exitCode: 0 }; } // Validate transport argument const transportArg = args.find((arg) => arg.startsWith("--transport=")); if (transportArg) { const transport = transportArg.split("=")[1]; if (!["stdio", "sse"].includes(transport)) { console.error( `Error: Invalid transport '${transport}'. Supported: stdio, sse` ); console.error("Use --help for usage information"); return { shouldExit: true, exitCode: 1 }; } } // Validate that conflicting flags aren't used together const isQuiet = args.includes("--quiet"); const isVerbose = args.includes("--verbose") || args.includes("--debug-startup"); if (isQuiet && isVerbose) { console.error("Error: Cannot use --quiet and --verbose flags together"); console.error("Use --help for usage information"); return { shouldExit: true, exitCode: 1 }; } return { shouldExit: false, exitCode: 0 }; } /** * Main application entry point with comprehensive error handling and validation */ async function main(): Promise<void> { try { // Parse and validate command line arguments const { shouldExit, exitCode } = parseCommandLineArgs(); if (shouldExit) { process.exit(exitCode); } // Check for startup validation mode (for GitHub Actions) const args = process.argv.slice(2); const isStartupTest = args.includes('--startup-test'); const isCI = process.env.CI === 'true' || process.env.NODE_ENV === 'test'; const isVerbose = args.includes('--verbose') || args.includes('--debug-startup'); if (isStartupTest && isVerbose) { // In CI mode, use console.log for debug to avoid stderr issues const debugLog = isCI ? console.log : console.error; debugLog("DEBUG: Running in startup validation mode"); debugLog(`DEBUG: Platform: ${process.platform}`); debugLog(`DEBUG: Node.js version: ${process.version}`); debugLog(`DEBUG: Working directory: ${process.cwd()}`); debugLog(`DEBUG: MCP_SERVER_ROOT: ${process.env.MCP_SERVER_ROOT || 'not set'}`); debugLog(`DEBUG: MCP_PROMPTS_CONFIG_PATH: ${process.env.MCP_PROMPTS_CONFIG_PATH || 'not set'}`); } // Setup error handlers first setupErrorHandlers(); // Use appropriate output stream based on environment - only if verbose if (isVerbose) { const statusLog = isCI ? console.log : console.error; statusLog("Starting MCP Claude Prompts Server..."); } // Initialize the application using the orchestrator const debugLog = isCI ? console.log : console.error; if (isVerbose) { debugLog("DEBUG: About to call startApplication()..."); } try { orchestrator = await startApplication(); if (isVerbose) { debugLog("DEBUG: startApplication() completed successfully"); } } catch (startupError) { const error = startupError instanceof Error ? startupError : new Error(String(startupError)); if (isVerbose) { debugLog("DEBUG: startApplication() failed with error:", error.message); debugLog("DEBUG: Error stack:", error.stack); } // Additional diagnostics for Windows if (isVerbose && process.platform === 'win32') { debugLog("DEBUG: Windows-specific diagnostics:"); debugLog(`DEBUG: Process argv: ${JSON.stringify(process.argv)}`); debugLog(`DEBUG: Environment keys: ${Object.keys(process.env).filter(k => k.startsWith('MCP_')).join(', ')}`); // Check if paths exist const fs = await import('fs'); const path = await import('path'); const serverRoot = process.env.MCP_SERVER_ROOT || process.cwd(); debugLog(`DEBUG: Checking server root: ${serverRoot}`); debugLog(`DEBUG: Server root exists: ${fs.existsSync(serverRoot)}`); const configPath = path.join(serverRoot, 'config.json'); debugLog(`DEBUG: Config path: ${configPath}`); debugLog(`DEBUG: Config exists: ${fs.existsSync(configPath)}`); // Use ConfigManager for consistent path resolution try { const tempConfigManager = new ConfigManager(configPath); await tempConfigManager.loadConfig(); const promptsConfigPath = tempConfigManager.getPromptsFilePath(); debugLog(`DEBUG: Prompts config path: ${promptsConfigPath}`); debugLog(`DEBUG: Prompts config exists: ${fs.existsSync(promptsConfigPath)}`); } catch (tempError) { debugLog(`DEBUG: Could not load config for path debugging: ${tempError}`); } } throw error; } // Get logger reference for global error handling if (isVerbose) { debugLog("DEBUG: Getting logger reference..."); } const modules = orchestrator.getModules(); logger = modules.logger; if (isVerbose) { debugLog("DEBUG: Logger reference obtained"); } // Validate initial startup with detailed diagnostics if (isVerbose) { debugLog("DEBUG: About to validate application health..."); } const initialHealth = await validateApplicationHealth(); if (isVerbose) { debugLog("DEBUG: Health validation result:", initialHealth); } if (!initialHealth) { // Get detailed health info for debugging const healthDetails = orchestrator.validateHealth(); if (isVerbose) { debugLog("DEBUG: Detailed health check results:", JSON.stringify(healthDetails, null, 2)); } throw new Error( "Initial health validation failed - application may not be properly initialized. " + "Health details: " + JSON.stringify(healthDetails.issues) ); } // If this is a startup test, exit successfully after validation if (isStartupTest) { if (isVerbose) { const successLog = isCI ? console.log : console.error; successLog("✅ MCP Claude Prompts Server startup validation completed successfully"); successLog("✅ All phases completed: Foundation → Data Loading → Module Initialization → Server Setup"); successLog("✅ Health validation passed - server is ready for operation"); } await orchestrator.shutdown(); process.exit(0); } // Log successful startup with details if (logger) { logger.info("🚀 MCP Claude Prompts Server started successfully"); // Log comprehensive application status const status = orchestrator.getStatus(); logger.info("📊 Application status:", { running: status.running, transport: status.transport, promptsLoaded: status.promptsLoaded, categoriesLoaded: status.categoriesLoaded, uptime: process.uptime(), memoryUsage: process.memoryUsage(), pid: process.pid, nodeVersion: process.version, }); // Setup health monitoring setupHealthMonitoring(); // Log successful complete initialization logger.info( "✅ Application initialization completed - all systems operational" ); } } catch (error) { // Comprehensive error handling with rollback console.error("❌ Failed to start MCP Claude Prompts Server:", error); if (logger) { logger.error("Fatal startup error:", error); } // Attempt rollback await rollbackStartup( error instanceof Error ? error : new Error(String(error)) ); // Exit with error code process.exit(1); } } /** * Export health check function for external monitoring */ export function getApplicationHealth(): ApplicationHealth { return { ...applicationHealth }; } /** * Export orchestrator diagnostic information for external monitoring */ export function getDetailedDiagnostics(): any { if (!orchestrator) { return { available: false, reason: "Orchestrator not initialized", timestamp: new Date().toISOString(), }; } try { return { available: true, timestamp: new Date().toISOString(), ...orchestrator.getDiagnosticInfo(), }; } catch (error) { return { available: false, reason: `Error collecting diagnostics: ${ error instanceof Error ? error.message : String(error) }`, timestamp: new Date().toISOString(), }; } } /** * Export graceful shutdown for external management */ export { gracefulShutdown }; // Start the application with comprehensive error handling main().catch(async (error) => { console.error("💥 Fatal error during startup:", error); // Final fallback - attempt rollback and exit await rollbackStartup( error instanceof Error ? error : new Error(String(error)) ); process.exit(1); }); ``` -------------------------------------------------------------------------------- /server/src/frameworks/methodology/guides/react-guide.ts: -------------------------------------------------------------------------------- ```typescript /** * ReACT Methodology Guide * Provides guidance for applying ReACT (Reasoning and Acting) methodology to prompt creation, * processing, and execution without hijacking semantic analysis functionality */ import type { ConvertedPrompt } from "../../../types/index.js"; import { ContentAnalysisResult } from "../../../semantic/configurable-semantic-analyzer.js"; import { IMethodologyGuide, BaseMethodologyGuide, PromptCreationGuidance, ProcessingGuidance, StepGuidance, MethodologyEnhancement, MethodologyValidation, ProcessingStep, ExecutionStep, QualityGate, TemplateEnhancement, MethodologyToolDescriptions } from "../interfaces.js"; /** * ReACT Methodology Guide Implementation * Guides the application of ReACT (Reasoning and Acting) principles without replacing semantic analysis */ export class ReACTMethodologyGuide extends BaseMethodologyGuide { readonly frameworkId = "react"; readonly frameworkName = "ReACT Framework"; readonly methodology = "ReACT"; readonly version = "1.0.0"; /** * Guide prompt creation using ReACT structure * Helps users create prompts that follow ReACT methodology */ guidePromptCreation(intent: string, context?: Record<string, any>): PromptCreationGuidance { return { structureGuidance: { systemPromptSuggestions: [ "Begin with clear reasoning about the problem", "Define specific actions to take", "Establish observation and feedback mechanisms", "Plan reasoning adjustment based on observations", "Set criteria for cycle completion" ], userTemplateSuggestions: [ "Structure request using Reason-Act-Observe cycles", "Provide clear problem statement for reasoning", "Define specific actions that can be taken", "Specify what observations should be made", "Indicate success criteria for completion" ], argumentSuggestions: [ { name: "problem", type: "string", description: "The problem or challenge to address systematically", methodologyReason: "ReACT requires clear problem definition for effective reasoning", examples: ["performance optimization", "user experience issue", "technical implementation"] }, { name: "actions", type: "array", description: "Potential actions that can be taken", methodologyReason: "ReACT methodology emphasizes specific, purposeful actions", examples: ["analyze metrics", "test hypothesis", "implement solution"] }, { name: "success_criteria", type: "string", description: "Clear criteria for when the objective is achieved", methodologyReason: "ReACT cycles need defined completion points", examples: ["performance improves by 20%", "user satisfaction increases", "tests pass"] } ] }, methodologyElements: { requiredSections: ["Reason", "Act", "Observe"], optionalSections: ["Adjust", "Continue"], sectionDescriptions: { "Reason": "Think through the problem systematically and plan approach", "Act": "Take specific, purposeful actions based on reasoning", "Observe": "Analyze results, feedback, and outcomes of actions", "Adjust": "Modify reasoning and approach based on observations", "Continue": "Repeat cycles until objective is achieved" } }, qualityGuidance: { clarityEnhancements: [ "Make reasoning explicit and traceable", "Define actions with clear, measurable outcomes", "Specify what to observe and how to measure results" ], completenessChecks: [ "Ensure reasoning addresses the core problem", "Verify actions are specific and actionable", "Confirm observation mechanisms are defined" ], specificityImprovements: [ "Replace abstract reasoning with concrete analysis steps", "Define specific metrics for observation phase", "Include clear decision points for cycle continuation" ] } }; } /** * Guide template processing with ReACT methodology */ guideTemplateProcessing(template: string, executionType: string): ProcessingGuidance { const reactSteps: ProcessingStep[] = [ { id: "reasoning_phase", name: "Reasoning Phase", description: "Think through the problem systematically and develop approach", methodologyBasis: "ReACT Reasoning phase", order: 1, required: true }, { id: "action_planning", name: "Action Planning", description: "Plan specific, purposeful actions based on reasoning", methodologyBasis: "ReACT Action phase", order: 2, required: true }, { id: "action_execution", name: "Action Execution", description: "Execute planned actions systematically", methodologyBasis: "ReACT Action phase", order: 3, required: true }, { id: "observation_analysis", name: "Observation Analysis", description: "Analyze results, feedback, and outcomes", methodologyBasis: "ReACT Observe phase", order: 4, required: true }, { id: "reasoning_adjustment", name: "Reasoning Adjustment", description: "Adjust approach based on observations", methodologyBasis: "ReACT cycle continuation", order: 5, required: false }, { id: "cycle_evaluation", name: "Cycle Evaluation", description: "Evaluate if objective is achieved or cycle should continue", methodologyBasis: "ReACT cycle management", order: 6, required: false } ]; return { processingSteps: reactSteps, templateEnhancements: { systemPromptAdditions: [ "Apply ReACT methodology with reasoning-action cycles", "Think systematically before taking actions", "Observe and analyze results after each action", "Adjust approach based on observations" ], userPromptModifications: [ "Structure response using Reason-Act-Observe cycles", "Make reasoning explicit and traceable", "Include observation and adjustment phases" ], contextualHints: [ "Focus on systematic problem-solving approach", "Emphasize learning from action outcomes", "Apply iterative reasoning improvement" ] }, executionFlow: { preProcessingSteps: [ "Validate problem definition clarity", "Confirm available actions are specified", "Verify observation mechanisms are defined" ], postProcessingSteps: [ "Review reasoning-action alignment", "Assess observation completeness", "Evaluate cycle termination criteria" ], validationSteps: [ "Reasoning quality check", "Action specificity verification", "Observation mechanism validation", "Cycle completion assessment" ] } }; } /** * Guide execution steps using ReACT methodology */ guideExecutionSteps(prompt: ConvertedPrompt, semanticAnalysis: ContentAnalysisResult): StepGuidance { const executionSteps: ExecutionStep[] = [ { id: "systematic_reasoning", name: "Systematic Reasoning", action: "Think through the problem systematically and develop initial approach", methodologyPhase: "Reason", dependencies: [], expected_output: "Clear reasoning about problem and planned approach" }, { id: "purposeful_action", name: "Purposeful Action", action: "Take specific, measurable actions based on reasoning", methodologyPhase: "Act", dependencies: ["systematic_reasoning"], expected_output: "Concrete actions taken with clear objectives" }, { id: "result_observation", name: "Result Observation", action: "Observe and analyze results, feedback, and outcomes", methodologyPhase: "Observe", dependencies: ["purposeful_action"], expected_output: "Detailed analysis of action results and feedback" }, { id: "reasoning_adjustment", name: "Reasoning Adjustment", action: "Adjust reasoning and approach based on observations", methodologyPhase: "Adjust", dependencies: ["result_observation"], expected_output: "Updated reasoning and modified approach" }, { id: "cycle_continuation", name: "Cycle Continuation", action: "Determine if objective is achieved or if cycle should continue", methodologyPhase: "Continue", dependencies: ["reasoning_adjustment"], expected_output: "Decision on cycle completion or continuation" } ]; // Adjust steps based on execution type from semantic analyzer const stepEnhancements: Record<string, string[]> = {}; const stepValidation: Record<string, string[]> = {}; if (semanticAnalysis.executionType === "chain") { if (semanticAnalysis.executionCharacteristics.advancedChainFeatures?.requiresAdvancedExecution) { // Advanced chains with workflow-like features stepEnhancements["reasoning_adjustment"] = [ "Define decision points and branching logic", "Plan workflow state transitions based on observations", "Establish error handling for failed actions" ]; stepValidation["reasoning_adjustment"] = [ "Decision logic validation", "State transition verification", "Error handling completeness check" ]; } else { // Basic chains stepEnhancements["systematic_reasoning"] = [ "Plan reasoning steps in sequence", "Define dependencies between reasoning phases", "Establish clear handoff points between steps" ]; stepValidation["systematic_reasoning"] = [ "Step sequence validation", "Dependency chain verification", "Handoff point adequacy check" ]; } } return { stepSequence: executionSteps, stepEnhancements, stepValidation }; } /** * Enhance execution with ReACT methodology */ enhanceWithMethodology(prompt: ConvertedPrompt, context: Record<string, any>): MethodologyEnhancement { const reactGates: QualityGate[] = [ { id: "reasoning_quality", name: "Reasoning Quality", description: "Verify systematic and logical reasoning approach", methodologyArea: "Reason", validationCriteria: [ "Problem analysis is systematic and thorough", "Reasoning is explicit and traceable", "Approach is logically structured" ], priority: "high" }, { id: "action_specificity", name: "Action Specificity", description: "Ensure actions are specific, purposeful, and measurable", methodologyArea: "Act", validationCriteria: [ "Actions are concrete and specific", "Actions have clear objectives", "Actions are measurable and observable" ], priority: "high" }, { id: "observation_completeness", name: "Observation Completeness", description: "Validate comprehensive observation and analysis of results", methodologyArea: "Observe", validationCriteria: [ "Results are observed systematically", "Feedback is analyzed thoroughly", "Outcomes are measured against objectives" ], priority: "high" }, { id: "cycle_effectiveness", name: "Cycle Effectiveness", description: "Assess effectiveness of reasoning-action cycles", methodologyArea: "Continue", validationCriteria: [ "Cycles show learning and improvement", "Adjustments are based on observations", "Progress toward objective is evident" ], priority: "medium" } ]; const templateSuggestions: TemplateEnhancement[] = [ { section: "system", type: "addition", description: "Add ReACT methodology guidance", content: "Apply the ReACT methodology: Reason through the problem systematically, take specific purposeful Actions, Observe results and feedback, then adjust your reasoning based on observations. Continue cycles until the objective is achieved.", methodologyJustification: "Ensures systematic application of reasoning-action cycles", impact: "high" }, { section: "user", type: "structure", description: "Structure response using ReACT cycles", content: "Please structure your response using ReACT cycles: 1) Reasoning about the problem, 2) Specific actions to take, 3) Observations of results, 4) Reasoning adjustments, 5) Continuation decision.", methodologyJustification: "Guides systematic problem-solving through reasoning-action cycles", impact: "medium" } ]; return { systemPromptGuidance: this.getSystemPromptGuidance(context), processingEnhancements: this.guideTemplateProcessing("", "template").processingSteps, methodologyGates: reactGates, templateSuggestions, enhancementMetadata: this.createEnhancementMetadata( 0.85, "ReACT methodology provides systematic reasoning-action cycles for problem solving" ) }; } /** * Validate methodology compliance */ validateMethodologyCompliance(prompt: ConvertedPrompt): MethodologyValidation { const combinedText = this.getCombinedText(prompt); const text = combinedText.toLowerCase(); // Check for ReACT phase presence const phases = { reason: /reason|think|analy|consider|approach/i.test(text), act: /act|action|implement|execute|do|perform/i.test(text), observe: /observe|result|outcome|feedback|measure|assess/i.test(text), adjust: /adjust|modify|change|improve|refine/i.test(text), continue: /continue|repeat|cycle|iterate/i.test(text) }; const presentPhases = Object.values(phases).filter(Boolean).length; const compliance_score = presentPhases / 5; // 5 ReACT phases const strengths: string[] = []; const improvement_areas: string[] = []; if (phases.reason) strengths.push("Systematic reasoning approach present"); else improvement_areas.push("Add systematic reasoning and problem analysis"); if (phases.act) strengths.push("Action-oriented approach evident"); else improvement_areas.push("Include specific, purposeful actions"); if (phases.observe) strengths.push("Observation and result analysis mentioned"); else improvement_areas.push("Add observation and result analysis"); if (phases.adjust) strengths.push("Adjustment and improvement considered"); else improvement_areas.push("Include reasoning adjustment based on observations"); const specific_suggestions: TemplateEnhancement[] = []; if (!phases.reason) { specific_suggestions.push({ section: "system", type: "addition", description: "Add systematic reasoning approach", content: "Begin by reasoning through the problem systematically before taking actions.", methodologyJustification: "ReACT Reasoning phase requires systematic problem analysis", impact: "high" }); } if (!phases.observe) { specific_suggestions.push({ section: "system", type: "addition", description: "Add observation and result analysis", content: "Observe and analyze results after taking actions to inform next steps.", methodologyJustification: "ReACT Observe phase is crucial for learning and adjustment", impact: "high" }); } return { compliant: compliance_score > 0.6, compliance_score, strengths, improvement_areas, specific_suggestions, methodology_gaps: improvement_areas }; } /** * Get ReACT-specific system prompt guidance */ getSystemPromptGuidance(context: Record<string, any>): string { return `Apply the ReACT methodology systematically: **Reason**: Think through the problem systematically and develop a clear approach **Act**: Take specific, purposeful actions based on your reasoning **Observe**: Analyze results, feedback, and outcomes of your actions carefully **Adjust**: Modify your reasoning and approach based on observations **Continue**: Repeat the cycle until the objective is achieved Focus on explicit reasoning, measurable actions, systematic observation, and continuous improvement through reasoning-action cycles. Each cycle should build upon learnings from previous cycles.`; } /** * Get ReACT-specific tool descriptions */ getToolDescriptions(): MethodologyToolDescriptions { return { prompt_engine: { description: "🚀 PROMPT TEMPLATE ENGINE [REACT-ENHANCED]: Processes prompt templates with systematic ReACT (Reasoning-Acting) methodology for iterative problem-solving. Guides systematic Reason → Act → Observe → Adjust cycles for continuous improvement and adaptive execution. WARNING: You are responsible for interpreting and executing the returned content, which contains iterative analytical instructions.", parameters: { execution_mode: "Override intelligent auto-detection with ReACT-aware selection (default: auto, iterative reasoning-enhanced)" } }, prompt_manager: { description: "📝 INTELLIGENT PROMPT MANAGER [REACT-ENHANCED]: Complete lifecycle management with systematic ReACT methodology integration. Creates iterative analysis templates that guide reasoning-action cycles through explicit Reasoning, purposeful Acting, systematic Observation, and continuous Adjustment. Optimized for adaptive problem-solving and iterative improvement tasks.", parameters: { action: "Management action with ReACT iterative approach: 'create_template' (reasoning-action cycle templates), 'analyze_type' (reasoning pattern analysis), 'migrate_type' (adaptive conversion), etc." } }, system_control: { description: "⚙️ INTELLIGENT SYSTEM CONTROL [REACT-ENHANCED]: System administration with ReACT iterative improvement methodology. Guides systematic Reason-Act-Observe-Adjust cycles for framework management, performance monitoring, and adaptive system optimization. Supports continuous improvement through reasoning-based decision making.", parameters: { action: "System action with ReACT methodology: 'switch_framework' (reasoning-based framework selection), 'analytics' (observation-based metrics), 'health' (systematic health reasoning), etc." } } }; } } ``` -------------------------------------------------------------------------------- /docs/version-history.md: -------------------------------------------------------------------------------- ```markdown # Version History ## Claude Prompts MCP Server Version History This document tracks the evolution of the Claude Prompts MCP Server, documenting major releases, features, improvements, and breaking changes. --- ## Version 1.2.0 - Execution Mode Enhancement & Architectural Consolidation **Release Date**: January 2025 **Codename**: "Performance & Precision" ### 🎯 Major Features #### **Four-Tier Execution Mode System** - **New Mode**: Added `prompt` mode for direct high-speed variable substitution - **Enhanced MCP Schema**: Full execution mode support: `auto`, `prompt`, `template`, `chain` - **Performance-Aware Auto Detection**: Intelligent structural analysis for optimal mode selection - **Execution Mode Control**: Users can now specify exact execution mode or use improved auto detection #### **Structural Analysis Architecture** - **Reliable Detection Logic**: Removed unreliable "analysis intent" detection, replaced with verifiable structural indicators - **Framework Separation**: Clean separation between current structural capabilities and future LLM semantic analysis - **Future-Proof Stub**: Added `detectAnalysisIntentLLM()` stub method for future semantic integration - **Honest Capabilities**: Documentation now accurately represents what the system actually does #### **System Consolidation & Performance** - **87.5% Architecture Reduction**: Maintained 3 consolidated MCP tools while removing overlapping systems - **Dependency Validation**: Automated system consolidation validation preventing architecture drift - **Execution System Cleanup**: Removed deprecated `UnifiedPromptProcessor`, consolidated on `ExecutionCoordinator` - **Performance Optimization**: Near-instantaneous static prompt execution, optimized template processing ### 🛠️ Infrastructure Improvements #### **MCP Tool Interface Enhancement** - **Complete Mode Coverage**: All four execution modes now accessible via MCP interface - **Backward Compatibility**: Existing `auto` mode continues to work with improved detection logic - **Direct Performance Access**: Users can bypass framework overhead for simple variable substitution - **Consistent Tool Architecture**: Maintained 3-tool consolidation while adding precision control #### **Consolidated MCP Tools (3 Intelligent Tools)** - **`prompt_engine`**: Unified execution with intelligent analysis, four-tier mode system (auto/prompt/template/chain), and semantic detection - **`prompt_manager`**: Complete lifecycle management with smart filtering, advanced search syntax, and analysis-driven discovery - **`system_control`**: Framework management, analytics, health monitoring, and comprehensive system administration #### **Framework Performance Integration** - **Framework-Aware Detection**: Template mode applies methodology guidance (CAGEERF, ReACT, 5W1H, SCAMPER) - **Performance-First Default**: Simple variable substitution defaults to prompt mode (instant execution) - **Quality When Needed**: Complex analysis automatically uses template mode with framework enhancement - **Chain Mode Optimization**: LLM-driven iterative execution remains unchanged and optimized ### 🔧 Technical Enhancements #### **Execution Mode Detection Logic** ```typescript // New Detection Algorithm (Performance-Aware) if (hasChainSteps) return "chain"; else if (hasComplexTemplateLogic) return "template"; else if (hasTemplateVars && hasMultipleArgs) return "template"; else return "prompt"; // Performance-first default ``` #### **Documentation Accuracy Overhaul** - **Four Major Docs Updated**: `mcp-tools-reference.md`, `enhanced-gate-system.md`, `prompt-management.md`, `prompt-vs-template-guide.md` - **Phantom Features Removed**: Eliminated documentation of non-existent tools, APIs, and capabilities - **MCP Protocol Focus**: All examples now use actual MCP tool interface instead of fictional HTTP APIs - **Implementation Alignment**: Documentation now matches actual system capabilities ### 📊 Performance Impact #### **Execution Speed Improvements** - **Simple Variable Substitution**: Near-instantaneous execution (significant improvement over framework overhead) - **Static Content**: Instant execution with no processing overhead - **Template Processing**: Optimized processing speed with framework enhancement when needed - **Auto Detection Efficiency**: Substantially reduced unnecessary framework processing for simple cases #### **System Health Validation** - **CI/CD Pipeline**: All GitHub Actions workflows validated with cross-platform testing - **Dependency Validation**: Automated consolidation validation prevents system drift - **TypeScript Compliance**: Full type safety maintained across all changes - **Build Performance**: No impact on compilation or startup times ### 🐛 Architectural Fixes #### **System Consolidation** - **Removed Duplicate Systems**: Fixed dependency validation violations by removing overlapping execution systems - **Path Resolution**: Fixed double `prompts/prompts` path issue in prompt manager - **Import Cleanup**: Removed all references to deprecated systems - **Type Consistency**: Aligned internal types with MCP schema interface #### **Documentation Accuracy** - **Tool Count Correction**: Fixed "20+ tools" claims to accurate "3 consolidated tools" - **Feature Claims**: Removed false "framework effectiveness measurement" and "intelligent selection" claims - **Usage Examples**: All documentation now shows working MCP tool commands - **Architecture Representation**: Documentation reflects actual 87.5% consolidation achievement ### ⚙️ Quality Assurance #### **Testing & Validation** - **All Four Modes Tested**: Verified `auto`, `prompt`, `template`, and `chain` modes work correctly - **Performance Benchmarking**: Confirmed instant static execution and improved auto detection - **CI/CD Validation**: GitHub Actions workflows pass with architectural changes - **Integration Testing**: MCP protocol compliance maintained across all changes #### **Backward Compatibility** - **Zero Breaking Changes**: All existing functionality preserved - **API Compatibility**: MCP tool interface remains consistent for existing users - **Template Processing**: All existing prompts work without modification - **Framework System**: CAGEERF, ReACT, 5W1H, SCAMPER methodologies unchanged ### 🎯 Key Benefits #### **Performance Optimization** - **Substantial Reduction** in unnecessary framework processing for simple variable substitution - **Instant Execution** for static content and simple prompts - **Smart Performance Defaults** that choose speed when quality enhancement isn't needed - **Precision Control** for users who want specific execution behavior #### **Architectural Integrity** - **Honest Documentation** that accurately represents system capabilities - **Clean Separation** between current structural analysis and future semantic capabilities - **System Consolidation** maintained while adding user control options - **Future-Ready Architecture** with proper stubs for LLM semantic integration #### **Developer Experience** - **Complete Execution Control**: Direct access to all four execution modes - **Reliable Auto Detection**: Structural-only logic with 90% confidence rating - **Clear Performance Trade-offs**: Documentation clearly explains speed vs quality options - **Validated Architecture**: Automated checks prevent system architecture drift --- ## Version 1.1.0 - Framework System Enhancement & Gate Integration **Release Date**: December 2024 **Codename**: "Framework Foundation" ### 🎯 Major Features #### **Framework Methodology System** - **Framework Manager**: Stateless framework orchestration with methodology guide loading - **Framework State Manager**: Stateful framework tracking with runtime switching capabilities - **Methodology Guides**: CAGEERF, ReACT, 5W1H, SCAMPER framework implementations - **Framework Integration**: Semantic analysis coordination with framework selection #### **Enhanced Gate Validation System** - **Gate Registry**: Centralized validation rule management with framework awareness - **Framework-Aware Gates**: Validation criteria that adapt based on active framework - **Multi-Level Validation**: Support for validation, approval, condition, and quality gate types - **Integration Layer**: Framework-semantic integration for intelligent validation #### **Execution Strategy Architecture** - **Strategy Pattern**: Prompt, chain, and workflow execution strategies - **Execution Engine**: Orchestrated execution with context management and error recovery - **Template Processing Pipeline**: Framework injection with Nunjucks template processing - **Conversation State**: Framework-aware conversation enhancement and state persistence #### **System Integration & Architecture** - **Semantic Analyzer**: Configurable analysis with multiple integration modes - **Multi-Phase Startup**: Orchestrated initialization with dependency management - **Framework Switching**: Runtime framework changes with state persistence - **Integration Factory**: Analysis integration factory for flexible semantic analysis ### 🛠️ Infrastructure Improvements #### **Enhanced Type System** - **Framework Interface Types**: Complete type definitions for methodology guide contracts - **Execution Strategy Types**: Enhanced interfaces for strategy pattern implementation - **Integration Types**: Comprehensive types for framework-semantic integration #### **Methodology Guide Interface** - **IMethodologyGuide Contract**: Standardized interface for all framework implementations - **Framework Integration Points**: Clear integration patterns for framework-aware components - **Semantic Analysis Coordination**: Structured interfaces for analysis-framework cooperation #### **MCP Architecture Foundation** - **Pre-Consolidation Tools**: Early MCP tool implementations before major consolidation - **Framework Integration**: Initial framework system integration with MCP protocol - **Multi-Transport Support**: STDIO and SSE transport layer implementations ### 🔧 Technical Enhancements #### **Framework Methodology Implementation** - **CAGEERF Methodology**: Context, Analysis, Goals, Execution, Evaluation, Refinement, Framework - **ReACT Methodology**: Reasoning and Acting pattern for systematic problem-solving - **5W1H Methodology**: Who, What, When, Where, Why, How systematic analysis - **SCAMPER Methodology**: Creative problem-solving framework implementation #### **Framework State Management** - **Active Framework Tracking**: Runtime framework state with switching capabilities - **Framework History**: Framework change tracking and performance monitoring - **State Persistence**: Framework state maintained across server sessions - **Event-Driven Communication**: Framework state changes communicated through events #### **Gate Integration System** - **Framework-Aware Evaluation**: Gate validation that adapts to active framework - **Strategy-Based Gates**: Gate evaluation integrated with execution strategy pattern - **Methodology Validation**: Framework-specific validation criteria and quality gates - **Multi-Level Gate Support**: Validation, approval, condition, and quality gate types ### 📝 Template Updates #### **Framework-Enhanced Templates** - **Chain Execution Type**: Templates enhanced with framework methodology guidance - **Structured Output**: Multi-step analysis process with framework-specific instructions - **Quality Integration**: Built-in framework validation for template completeness #### **Methodology-Aware Processing** - **Framework Injection**: Templates enhanced with methodology-specific system prompts - **Template Pipeline**: Framework injection integrated with Nunjucks processing - **Context Enhancement**: Framework-aware template context and variable substitution ### 🐛 Bug Fixes - **TypeScript Compilation**: Fixed interface inheritance issues in type system - **Async Function Types**: Corrected Promise return types in gate validation - **Execution Mode Validation**: Fixed type checking for execution mode detection ### ⚙️ Configuration Updates - **Framework Configuration**: Support for framework selection in prompt metadata - **Analysis Integration**: Configurable semantic analysis integration modes - **Framework Switching**: Runtime framework switching configuration options ### 📊 System Architecture - **Multi-Phase Orchestration**: Enhanced startup orchestration with dependency management - **Framework Integration**: Framework system integrated with semantic analysis - **State Management**: Framework state persistence and tracking capabilities - **Performance Monitoring**: Framework switching performance and health monitoring ### 🔄 Migration Guide #### **For Users (Claude)** - **Framework System**: Access framework switching through MCP system control tools - **Methodology Selection**: Choose appropriate framework (CAGEERF, ReACT, 5W1H, SCAMPER) for tasks - **Gate Validation**: Framework-aware validation automatically adapts to active framework - **Template Enhancement**: Templates now benefit from framework methodology guidance #### **For Developers** - **Framework Integration**: Import framework managers and methodology guide interfaces - **Type Imports**: Import framework interface types and strategy pattern interfaces - **Gate Integration**: Use framework-aware gates for methodology-specific validation ### 🎯 Key Benefits #### **Framework System Advantages** - **Methodology-Driven Architecture** replaces hard-coded framework logic with flexible guides - **Runtime Framework Switching** enables dynamic methodology selection based on task needs - **Framework-Aware Validation** adapts quality gates to methodology requirements - **Systematic Approach** to prompt creation and processing through established frameworks #### **Developer Experience** - **Clear Framework Architecture** with separation between stateless and stateful components - **Extensible Guide System** allows easy addition of new methodologies - **Strategy Pattern Implementation** enables clean execution strategy separation - **Type-Safe Framework Integration** with comprehensive interfaces and contracts #### **System Architecture** - **Foundation for Consolidation** establishes architecture later consolidated in v1.2.0 - **Multi-Phase Orchestration** provides robust startup and dependency management - **Framework Integration Points** enable seamless semantic analysis coordination - **Scalable Gate System** supports methodology-specific validation criteria --- ## Version 1.0.0 - Initial Release **Release Date**: [Previous Release Date] **Codename**: "Foundation" ### 🎯 Initial Features - **Basic MCP Server**: Core Model Context Protocol server implementation - **Prompt Management**: Basic prompt creation, update, and deletion tools - **Template Processing**: Nunjucks-based template engine with variable substitution - **Chain Execution**: Basic support for prompt chains with sequential execution - **Hot-Reloading**: Dynamic prompt reloading without server restart - **Multiple Transports**: Support for STDIO and SSE transport protocols ### 🛠️ Core Tools (Pre-Consolidation) - **Early MCP Tools**: Initial implementation with scattered tool architecture - **Prompt Management Tools**: Basic prompt creation, update, and deletion functionality - **System Tools**: Display available prompts and usage information - **Template Tools**: Edit specific sections and reload prompts without restart - **Hot-Reload Support**: Dynamic prompt reloading system foundation - **Multi-Tool Architecture**: Foundation later consolidated into 3 intelligent tools in v1.2.0 ### 📁 Template System - **Markdown Templates**: Support for markdown-based prompt templates - **Variable Substitution**: Basic `{{variable}}` syntax for dynamic content - **Category Organization**: Logical grouping of prompts by category - **Import System**: Modular prompt organization with category-specific files ### 🔧 Infrastructure - **TypeScript Foundation**: Full TypeScript implementation with type safety - **Configuration Management**: JSON-based configuration system - **Logging System**: Comprehensive logging with multiple levels - **Error Handling**: Basic error handling and validation ### 📊 Architecture - **Orchestration Engine**: Multi-phase startup with dependency management - **Module System**: Modular architecture for extensibility - **Health Monitoring**: Basic health checks and status reporting - **Transport Layer**: Abstraction for multiple client protocols --- ## Planned Releases ### Version 1.3.0 - LLM Semantic Analysis Integration (Planned) - **Semantic Analysis Layer**: Implementation of `detectAnalysisIntentLLM()` with actual LLM integration - **Intelligent Mode Detection**: Context-aware execution mode selection based on semantic understanding - **Content Quality Assessment**: LLM-powered analysis of prompt complexity and requirements - **Smart Framework Selection**: Automatic methodology selection based on task semantics ### Version 1.4.0 - Advanced Chain Orchestration (Planned) - **Automatic Chain Execution**: Full automation of multi-step processes - **Conditional Branching**: Support for conditional logic in chains - **Parallel Execution**: Concurrent execution of independent chain steps - **Chain Templates**: Pre-built chain templates for common processes ### Version 1.5.0 - AI-Powered Enhancements (Planned) - **Smart Gate Generation**: AI-generated quality gates based on prompt content - **Adaptive Execution**: Learning system that improves execution based on usage patterns - **Intelligent Error Recovery**: AI-powered suggestions for fixing failed executions - **Content Quality Scoring**: Advanced AI-based content quality assessment ### Version 2.0.0 - Enterprise Features (Planned) - **Multi-User Support**: User authentication and permission systems - **Workspace Management**: Isolated prompt workspaces for different projects - **Advanced Analytics**: Comprehensive analytics dashboard with visualizations - **API Extensions**: REST API for external integrations --- ## Migration and Compatibility ### Version Compatibility Matrix | Feature | v1.0.0 | v1.1.0 | v1.2.0 | v1.3.0 (Planned) | | ------------------------- | ------ | --------------- | -------------- | ---------------- | | Basic Prompt Execution | ✅ | ✅ | ✅ Enhanced | ✅ | | Four-Tier Execution Modes | ❌ | ❌ | ✅ New | ✅ | | Performance Optimization | ❌ | ❌ | ✅ Instant Execute | ✅ Enhanced | | Structural Auto Detection | ❌ | ❌ | ✅ New | ✅ LLM-Enhanced | | Chain Execution | ✅ | ✅ Enhanced | ✅ Optimized | ✅ Automated | | Framework System | ❌ | ✅ New | ✅ Enhanced | ✅ Advanced | | Gate Validation | ❌ | ✅ New | ✅ Enhanced | ✅ Advanced | | MCP Tool Consolidation | ❌ | ❌ | ✅ 87.5% Reduction | ✅ | | Step Confirmation | ❌ | ❌ | ✅ New | ✅ | | Architecture Validation | ❌ | ❌ | ✅ New | ✅ | ### Breaking Changes - **None in v1.1.0**: Full backward compatibility maintained with deprecated tool aliases - **None in v1.2.0**: Full backward compatibility maintained, all existing functionality preserved ### Deprecation Timeline - **v1.1.0**: Framework system established, pre-consolidation tool architecture maintained - **v1.2.0**: Intelligent command routing implementation, enhanced parser with multi-strategy parsing --- ## Contributing to Releases ### Release Process 1. **Feature Development**: Implement features in feature branches 2. **Testing & Validation**: Comprehensive testing of new features 3. **Documentation Updates**: Update relevant documentation 4. **Version History**: Update this document with release details 5. **Release Notes**: Generate detailed release notes 6. **Version Tagging**: Tag release in version control ### Version Numbering We follow [Semantic Versioning (SemVer)](https://semver.org/): - **MAJOR** version for incompatible API changes - **MINOR** version for backward-compatible functionality additions - **PATCH** version for backward-compatible bug fixes ### Release Schedule - **Major Releases**: Quarterly (every 3 months) - **Minor Releases**: Monthly (new features and enhancements) - **Patch Releases**: As needed (bug fixes and security updates) --- _For detailed technical information about any release, please refer to the corresponding documentation in the `/docs` directory._ ``` -------------------------------------------------------------------------------- /docs/mcp-tools-reference.md: -------------------------------------------------------------------------------- ```markdown # MCP Tools Reference (v1.4.0 - Phase 2A Conditional Branching) This document provides a comprehensive reference for the 3 intelligent MCP (Model Context Protocol) tools that power the Claude Prompts Server. The server implements **intelligent command routing** with built-in command detection and multi-strategy parsing while maintaining full functionality. ## Architecture Overview The server uses **three consolidated tools** that provide complete prompt management and execution capabilities: - **`prompt_engine`** - Universal execution with systematic analysis and structural detection - **`prompt_manager`** - Complete lifecycle management with smart filtering and type analysis - **`system_control`** - Framework management, switching performance analytics, and system administration ## Interaction Model MCP clients execute server capabilities by sending tool requests. Each tool uses Zod schema validation for type safety and provides structured responses with comprehensive error handling. --- ## Core Consolidated Tools ### `prompt_engine` 🎯 **Universal Prompt Executor**: Systematically executes prompts, templates, and chains with structural analysis and quality gate validation. Automatically detects execution type and handles step-by-step progression. **Key Capabilities**: - **Structural Analysis**: File structure detection for execution routing (prompt/template/chain) - **Framework Integration**: Applies active framework methodology (CAGEERF, ReACT, 5W1H, SCAMPER) - **Quality Gates**: Configurable validation with systematic retry mechanisms - **LLM-Driven Chains**: Step-by-step workflow coordination with state management **Parameters**: ```typescript { command: string; // Required: Command to execute (>>prompt_name args) execution_mode?: "auto" | "template" | "chain"; // Optional: Override detection gate_validation?: boolean; // Optional: Enable quality gates step_confirmation?: boolean; // Optional: Confirm each chain step llm_driven_execution?: boolean; // Optional: Enable LLM-driven chain coordination (requires semantic LLM integration) force_restart?: boolean; // Optional: Force restart chain from beginning, clearing all state session_id?: string; // Optional: Specific session ID to use or resume chain_uri?: string; // Optional: Full chain URI for precise session control } ``` **Execution Types**: - **Prompt**: Basic variable substitution (fastest, no framework injection) - **Template**: Framework-enhanced processing with methodology injection - **Chain**: LLM-driven sequential execution with step validation **Example Usage**: ```bash # Basic execution with structural detection prompt_engine >>content_analysis text="my data" # Force template execution with framework enhancement prompt_engine >>analysis_prompt input="data" execution_mode="template" # Chain execution with LLM coordination (requires semantic LLM integration enabled) prompt_engine >>research_chain topic="AI" llm_driven_execution=true step_confirmation=true ``` #### Chain Execution Parameters For chain execution, the prompt engine supports advanced session management and URI-based control: **Session Control Parameters**: - `force_restart` - Clear all existing state and restart from beginning - `session_id` - Resume specific session ID - `chain_uri` - Use URI-based control for precise session management **Example Session Control Usage**: ```bash # Auto-resume existing session (default behavior) prompt_engine >>research_chain topic="AI" # Force restart from beginning prompt_engine >>research_chain topic="AI" force_restart=true # Resume specific session prompt_engine >>research_chain topic="AI" session_id="chain-session-1234" # URI-based control prompt_engine >>research_chain chain_uri="chain://research_chain?force_restart=true" ``` #### Chain URI Syntax Chain URIs provide precise control over chain execution with the following syntax: **URI Format**: ``` chain://chainId[/sessionId[/stepId]][?queryParams] ``` **Components**: - `chainId` - Chain identifier (required) - `sessionId` - Specific session ID (optional) - `stepId` - Specific step ID (optional, for future use) - `queryParams` - Execution options as query parameters **Query Parameters**: - `force_restart=true` - Force restart clearing all state - `framework=CAGEERF` - Specify framework methodology - `step_confirmation=true` - Enable step-by-step confirmation - `error_handling=continue` - Error handling strategy - `max_retries=5` - Maximum retry attempts per step - `conditional_mode=true` - Enable conditional branching execution (Phase 2A) - `conditional_debug=true` - Enable conditional execution debugging **URI Examples**: ```bash # Basic execution with auto-session resolution prompt_engine chain_uri="chain://research_pipeline" # Force restart with query parameters prompt_engine chain_uri="chain://research_pipeline?force_restart=true" # Specific session resumption prompt_engine chain_uri="chain://research_pipeline/session-abc123" # Custom framework and options prompt_engine chain_uri="chain://research_pipeline?framework=CAGEERF&step_confirmation=true" # Complex configuration prompt_engine chain_uri="chain://research_pipeline?force_restart=true&framework=ReACT&error_handling=continue" # Conditional branching mode (NEW in Phase 2A) prompt_engine chain_uri="chain://research_pipeline?conditional_mode=true" # Conditional debugging and enhanced workflow prompt_engine chain_uri="chain://research_pipeline?conditional_mode=true&conditional_debug=true&step_confirmation=true" ``` **Smart Error Recovery**: When chains get stuck in failed state, the system provides actionable guidance with specific recovery options including restart URIs, session resume URIs, and troubleshooting recommendations. #### Advanced Conditional Branching (Phase 2A) The system now supports sophisticated conditional execution with safe JavaScript expression evaluation for dynamic workflow control. **Conditional Chain Definition Example**: ```json { "id": "analysis_workflow", "name": "Conditional Analysis Workflow", "steps": [ { "id": "data_validation", "promptId": "validate_data", "name": "Data Validation", "order": 0, "dependencies": [], "conditionalExecution": { "type": "always", "description": "Always validate input data" } }, { "id": "simple_analysis", "promptId": "basic_analysis", "name": "Simple Analysis", "order": 1, "dependencies": ["data_validation"], "conditionalExecution": { "type": "conditional", "expression": "utils.length(steps.data_validation.result) < 1000", "description": "Use simple analysis for small datasets" } }, { "id": "complex_analysis", "promptId": "advanced_analysis", "name": "Complex Analysis", "order": 2, "dependencies": ["data_validation"], "conditionalExecution": { "type": "conditional", "expression": "utils.length(steps.data_validation.result) >= 1000", "description": "Use advanced analysis for large datasets" } }, { "id": "error_recovery", "promptId": "handle_errors", "name": "Error Recovery", "order": 3, "dependencies": ["simple_analysis", "complex_analysis"], "conditionalExecution": { "type": "skip_if_success", "description": "Only run if previous steps had errors" } } ] } ``` **Conditional Execution Types**: - `always` - Always execute this step - `conditional` - Execute based on JavaScript expression evaluation - `skip_if_error` - Skip if current step has errors - `skip_if_success` - Skip if current step succeeded (run only on failure) - `branch_to` - Branch to specific step based on condition - `skip_to` - Skip to specific step (future use) **Expression Evaluation Context**: Conditional expressions have access to: - `steps` - Results from previous steps (e.g., `steps.data_validation.result`) - `vars` - Chain variables (e.g., `vars.userInput`) - `utils` - Utility functions for safe operations **Available Utility Functions**: - `utils.exists(value)` - Check if value exists (not null/undefined) - `utils.contains(string, substring)` - Check if string contains substring - `utils.length(value)` - Get length of string, array, or object - `utils.toNumber(value)` - Convert to number safely - `utils.toString(value)` - Convert to string safely - `utils.matches(string, regex)` - Test regex pattern **Security Features**: - **Expression Validation**: Dangerous patterns (eval, require, process) are blocked - **Timeout Protection**: Expressions timeout after 5 seconds - **Sandboxed Execution**: No access to global objects or Node.js APIs - **Safe Evaluation**: Uses isolated execution context **Conditional Execution Examples**: ```bash # Enable conditional mode for advanced workflow control prompt_engine >>analysis_workflow data="large_dataset" conditional_mode=true # Enable debugging to see conditional evaluation details prompt_engine >>analysis_workflow data="test" conditional_mode=true conditional_debug=true # Combine with URI syntax for precise control prompt_engine chain_uri="chain://analysis_workflow?conditional_mode=true&conditional_debug=true&framework=CAGEERF" ``` ### `prompt_manager` 📋 **Complete Lifecycle Management**: Create, update, delete, analyze, and manage prompts with advanced filtering and type analysis capabilities. **Key Capabilities**: - **Smart Filtering**: Advanced filter syntax (category:, type:, confidence:, etc.) - **Type Analysis**: Automatic execution type detection and classification - **Lifecycle Management**: Full CRUD operations with hot-reload support - **Migration Tools**: Convert between prompt types and analyze existing prompts **Actions Available**: - `list` - List and filter prompts with advanced search - `create` - Create new prompts with type detection - `create_prompt` - Create basic prompt (fast variable substitution) - `create_template` - Create framework-enhanced template - `update` - Update existing prompts - `delete` - Remove prompts with cleanup - `analyze_type` - Analyze prompt and recommend execution type - `migrate_type` - Convert between prompt and template types - `modify` - Modify specific sections of prompts **Advanced Filtering**: ```bash # Filter by category prompt_manager list filter="category:analysis" # Filter by execution type prompt_manager list filter="type:template" # Combined filters prompt_manager list filter="category:code type:template" # Intent-based search prompt_manager list filter="intent:debugging" ``` **Example Usage**: ```bash # List all prompts with advanced filtering prompt_manager list # Create framework-enhanced template prompt_manager create_template name="code_analyzer" category="development" \ content="Analyze {{code}} for security and performance issues" # Create conditional chain with branching logic (Phase 2A) prompt_manager create_template name="conditional_analysis" category="analysis" \ content="Dynamic analysis workflow with conditional branching" \ chain_steps='[ { "promptId": "data_check", "stepName": "Data Validation", "conditionalExecution": {"type": "always"} }, { "promptId": "simple_analysis", "stepName": "Simple Analysis", "conditionalExecution": { "type": "conditional", "expression": "utils.length(steps.data_check.result) < 1000" } }, { "promptId": "complex_analysis", "stepName": "Complex Analysis", "conditionalExecution": { "type": "conditional", "expression": "utils.length(steps.data_check.result) >= 1000" } } ]' # Analyze existing prompt type prompt_manager analyze_type prompt_id="my_prompt" # Update prompt content prompt_manager update id="analysis_prompt" content="new template content" ``` ### `system_control` ⚙️ **Framework Management & System Administration**: Control framework switching, monitor switching performance, manage system health, and access comprehensive analytics. **Key Capabilities**: - **Framework Switching**: Runtime methodology switching (CAGEERF, ReACT, 5W1H, SCAMPER) - **Switching Performance**: Track switching mechanics (timing, success rate, error count) - **System Health**: Monitor server health, module status, and resource usage - **Usage Analytics**: Framework usage statistics and system performance metrics **Actions Available**: - `status` - Comprehensive system status and framework state - `switch_framework` - Change active framework methodology - `list_frameworks` - Show available frameworks with details - `analytics` - System performance analytics and usage metrics - `health` - Health monitoring and diagnostic information - `diagnostics` - Detailed system diagnostics and recommendations - `reset_metrics` - Reset framework switching performance metrics - `switch_history` - View framework switching history - `config` - System configuration management **Framework Management**: ```bash # Check current framework status system_control status # Switch framework methodology system_control switch_framework framework="ReACT" reason="Problem-solving focus" # View available frameworks system_control list_frameworks show_details=true # Get framework switching history system_control switch_history limit=10 ``` **Analytics & Monitoring**: ```bash # View system analytics system_control analytics include_history=true # Check system health system_control health # Run diagnostics system_control diagnostics # Reset switching performance metrics system_control reset_metrics confirm=true ``` --- ## Tool Response Format All tools return standardized responses with consistent error handling: ```typescript interface ToolResponse { content: Array<{ type: "text" | "resource"; text?: string; resource?: { uri: string; text: string; }; }>; isError?: boolean; } ``` --- ## Framework System Integration ### Available Frameworks The system provides **4 proven methodologies** for systematic thinking: - **CAGEERF**: Comprehensive structured approach (Context, Analysis, Goals, Execution, Evaluation, Refinement, Framework) - **ReACT**: Reasoning and Acting pattern for systematic problem-solving - **5W1H**: Who, What, When, Where, Why, How systematic analysis - **SCAMPER**: Creative problem-solving (Substitute, Combine, Adapt, Modify, Put to other uses, Eliminate, Reverse) ### Framework Selection Framework selection uses **rule-based logic** based on: - **User Preference**: Manual selection takes priority - **Execution Type**: Templates get framework enhancement, prompts bypass for speed - **Structural Complexity**: Basic structural analysis informs suggestions - **Current Active Framework**: Templates use the currently active methodology ### Switching Performance Metrics The system tracks **framework switching mechanics** (not effectiveness): - **Switch Count**: Total number of framework switches - **Switch Success Rate**: Percentage of successful framework switches - **Switch Time**: Average time for framework switching operations - **Error Count**: Number of failed switching attempts **Note**: Metrics track switching performance, not methodology effectiveness or output quality. --- ## Usage Examples ### Complete Workflow Example ```bash # 1. Check system status and active framework system_control status # 2. Switch to appropriate framework for your task system_control switch_framework framework="CAGEERF" reason="Complex analysis needed" # 3. List relevant prompts prompt_manager list filter="category:analysis type:template" # 4. Execute analysis with framework enhancement prompt_engine >>comprehensive_analysis data="my research data" execution_mode="template" # 5. Check switching performance system_control analytics ``` ### Chain Workflow Example ```bash # 1. Create analysis chain prompt_manager create_template name="research_chain" category="research" \ content="Multi-step research analysis workflow" \ chain_steps='[{"promptId":"data_collection","stepName":"Data Collection"},{"promptId":"analysis_step","stepName":"Analysis"},{"promptId":"summary_step","stepName":"Summary"}]' # 2. Execute chain with step validation and LLM coordination prompt_engine >>research_chain topic="AI trends" llm_driven_execution=true gate_validation=true # 3. Monitor chain execution through system status system_control status ``` ### Conditional Branching Workflow Example (Phase 2A) ```bash # 1. Create conditional analysis chain with branching logic prompt_manager create_template name="adaptive_analysis" category="analysis" \ content="Adaptive analysis with conditional execution paths" \ chain_steps='[ { "promptId": "input_assessment", "stepName": "Input Assessment", "conditionalExecution": {"type": "always"} }, { "promptId": "quick_analysis", "stepName": "Quick Analysis", "conditionalExecution": { "type": "conditional", "expression": "utils.contains(steps.input_assessment.result, \"simple\")" } }, { "promptId": "deep_analysis", "stepName": "Deep Analysis", "conditionalExecution": { "type": "conditional", "expression": "utils.contains(steps.input_assessment.result, \"complex\")" } }, { "promptId": "error_handler", "stepName": "Error Recovery", "conditionalExecution": {"type": "skip_if_success"} } ]' # 2. Execute conditional chain with debugging enabled prompt_engine >>adaptive_analysis input="complex data analysis task" \ conditional_mode=true conditional_debug=true step_confirmation=true # 3. Alternative execution with URI syntax for precise control prompt_engine chain_uri="chain://adaptive_analysis?conditional_mode=true&conditional_debug=true&framework=CAGEERF&step_confirmation=true" # 4. Monitor conditional execution and branching decisions system_control status ``` --- ## Error Handling All tools implement comprehensive error handling: - **Validation Errors**: Invalid parameters or missing required fields - **Execution Errors**: Prompt execution failures with detailed context - **System Errors**: Framework switching failures or system issues - **Recovery Suggestions**: Actionable guidance for resolving issues --- ## Performance Characteristics ### Execution Speed Comparison ``` Prompt Execution: 50-200ms (basic variable substitution) Template Execution: 200-800ms (framework enhancement + validation) Chain Execution: Variable (depends on step count and LLM response time) ``` ### Tool Consolidation Benefits **Intelligent Routing**: Enhanced command detection with automatic tool routing - **Simplified Interface**: Single tools handle multiple related functions - **Consistent Experience**: Standardized response format and error handling - **Reduced Complexity**: Easier to learn and use - **Maintained Functionality**: All original capabilities preserved --- ## Migration from Legacy Tools If you have references to old tool names, here's the mapping: | Legacy Tool | Consolidated Tool | Action | |------------|------------------|---------| | `execute_prompt` | `prompt_engine` | Direct replacement | | `listprompts` | `prompt_manager` | `list` action | | `update_prompt` | `prompt_manager` | `create` or `update` actions | | `delete_prompt` | `prompt_manager` | `delete` action | | `modify_prompt_section` | `prompt_manager` | `modify` action | | `reload_prompts` | `system_control` | `reload` action | | `execution_analytics` | `system_control` | `analytics` action | ### API Parameter Changes **Chain Creation Simplified** (v1.2.0+): | Old API (Deprecated) | New API | |---------------------|---------| | `isChain=true` | ❌ **Removed** - redundant parameter | | `chain_steps='[...]'` | ✅ **Chain detection automatic** | **Migration Example**: ```bash # ❌ OLD - Don't use isChain anymore prompt_manager create name="chain" isChain=true chain_steps='[...]' # ✅ NEW - Chain detected automatically from steps prompt_manager create name="chain" chain_steps='[{"promptId":"step1","stepName":"Step 1"}]' ``` --- ## System Requirements - **MCP Client**: Any MCP-compatible client (Claude Desktop, Cursor Windsurf, etc.) - **Transport**: STDIO (primary) or SSE (web clients) - **Node.js**: Version 16 or higher - **Memory**: ~50MB base usage, scales with prompt library size --- **Documentation Version**: 1.4.0 (Phase 2A Conditional Branching) **Last Updated**: 2025-01-30 **Compatibility**: Universal MCP client support with advanced conditional workflow capabilities ``` -------------------------------------------------------------------------------- /server/src/prompts/loader.ts: -------------------------------------------------------------------------------- ```typescript /** * Prompt Loader Module * Handles loading prompts from category-specific configuration files and markdown templates */ import * as fs from "fs/promises"; import { readFile } from "fs/promises"; import path from "path"; import { Logger } from "../logging/index.js"; import { CategoryPromptsResult, PromptData, PromptsConfigFile, } from "../types/index.js"; import { safeWriteFile } from "./promptUtils.js"; import { CategoryManager, createCategoryManager } from "./category-manager.js"; /** * Prompt Loader class */ export class PromptLoader { private logger: Logger; private categoryManager: CategoryManager; constructor(logger: Logger) { this.logger = logger; this.categoryManager = createCategoryManager(logger); } /** * Load prompts from category-specific prompts.json files */ async loadCategoryPrompts( configPath: string ): Promise<CategoryPromptsResult> { try { this.logger.info( `🔍 PromptLoader: Starting to load category prompts from: ${configPath}` ); // Read the promptsConfig.json file this.logger.info("📖 Reading promptsConfig.json file..."); const configContent = await readFile(configPath, "utf8"); this.logger.info( `✓ Config file read successfully, ${configContent.length} characters` ); let promptsConfig: PromptsConfigFile; try { this.logger.info("🔧 Parsing promptsConfig.json..."); promptsConfig = JSON.parse(configContent) as PromptsConfigFile; this.logger.info("✓ Config file parsed successfully"); } catch (jsonError) { this.logger.error( `❌ Error parsing config file ${configPath}:`, jsonError ); throw new Error( `Invalid JSON in config file: ${ jsonError instanceof Error ? jsonError.message : String(jsonError) }` ); } // Log the parsed config structure this.logger.info(`📋 Config structure analysis:`); this.logger.info( ` - Categories defined: ${promptsConfig.categories?.length || 0}` ); this.logger.info( ` - Import paths defined: ${promptsConfig.imports?.length || 0}` ); if (promptsConfig.categories?.length > 0) { this.logger.info("📂 Categories found:"); promptsConfig.categories.forEach((cat) => { this.logger.info(` - ${cat.name} (${cat.id}): ${cat.description}`); }); } if (promptsConfig.imports?.length > 0) { this.logger.info("📥 Import paths to process:"); promptsConfig.imports.forEach((importPath, index) => { this.logger.info(` ${index + 1}. ${importPath}`); }); } // Ensure required properties exist if (!promptsConfig.categories) { this.logger.warn( `⚠️ Config file ${configPath} does not have a 'categories' array. Initializing it.` ); promptsConfig.categories = []; } if (!promptsConfig.imports || !Array.isArray(promptsConfig.imports)) { this.logger.warn( `⚠️ Config file ${configPath} does not have a valid 'imports' array. Initializing it.` ); promptsConfig.imports = []; } // Load and validate categories using CategoryManager const categoryValidation = await this.categoryManager.loadCategories(promptsConfig.categories); if (!categoryValidation.isValid) { this.logger.error("❌ Category validation failed:"); categoryValidation.issues.forEach(issue => this.logger.error(` - ${issue}`)); throw new Error(`Category validation failed: ${categoryValidation.issues.join('; ')}`); } if (categoryValidation.warnings.length > 0) { this.logger.warn("⚠️ Category validation warnings:"); categoryValidation.warnings.forEach(warning => this.logger.warn(` - ${warning}`)); } // Get validated categories const categories = this.categoryManager.getCategories(); // Initialize an array to store all prompts let allPrompts: PromptData[] = []; let totalImportProcessed = 0; let totalImportsFailed = 0; this.logger.info( `🚀 Starting to process ${promptsConfig.imports.length} import paths...` ); // Load prompts from each import path for (const importPath of promptsConfig.imports) { totalImportProcessed++; this.logger.info( `\n📦 Processing import ${totalImportProcessed}/${promptsConfig.imports.length}: ${importPath}` ); try { // Construct the full path to the import file const fullImportPath = path.join( path.dirname(configPath), importPath ); this.logger.info(` 🔍 Full path: ${fullImportPath}`); // Check if the file exists try { await fs.access(fullImportPath); this.logger.info(` ✓ Import file exists`); } catch (error) { this.logger.warn( ` ⚠️ Import file not found: ${importPath}. Creating empty file.` ); // Create the directory if it doesn't exist const dir = path.dirname(fullImportPath); await fs.mkdir(dir, { recursive: true }); // Create an empty prompts file await safeWriteFile( fullImportPath, JSON.stringify({ prompts: [] }, null, 2), "utf8" ); this.logger.info(` ✓ Created empty prompts file`); } // Read the file this.logger.info(` 📖 Reading import file...`); const fileContent = await readFile(fullImportPath, "utf8"); this.logger.info( ` ✓ File read successfully, ${fileContent.length} characters` ); let categoryPromptsFile: any; try { categoryPromptsFile = JSON.parse(fileContent); this.logger.info(` ✓ Import file parsed successfully`); } catch (jsonError) { this.logger.error( ` ❌ Error parsing import file ${importPath}:`, jsonError ); this.logger.info( ` 🔧 Creating empty prompts file for ${importPath} due to parsing error.` ); categoryPromptsFile = { prompts: [] }; await safeWriteFile( fullImportPath, JSON.stringify(categoryPromptsFile, null, 2), "utf8" ); } // Ensure prompts property exists and is an array if (!categoryPromptsFile.prompts) { this.logger.warn( ` ⚠️ Import file ${importPath} does not have a 'prompts' array. Initializing it.` ); categoryPromptsFile.prompts = []; await safeWriteFile( fullImportPath, JSON.stringify(categoryPromptsFile, null, 2), "utf8" ); } else if (!Array.isArray(categoryPromptsFile.prompts)) { this.logger.warn( ` ⚠️ Import file ${importPath} has an invalid 'prompts' property (not an array). Resetting it.` ); categoryPromptsFile.prompts = []; await safeWriteFile( fullImportPath, JSON.stringify(categoryPromptsFile, null, 2), "utf8" ); } this.logger.info( ` 📊 Found ${categoryPromptsFile.prompts.length} prompts in this import` ); // Update the file path to be relative to the category folder const categoryPath = path.dirname(importPath); const beforeCount = categoryPromptsFile.prompts.length; const categoryPrompts = categoryPromptsFile.prompts .map((prompt: PromptData, index: number) => { // Ensure prompt has all required properties if (!prompt.id || !prompt.name || !prompt.file) { this.logger.warn( ` ⚠️ Skipping invalid prompt ${ index + 1 } in ${importPath}: missing required properties (id: ${!!prompt.id}, name: ${!!prompt.name}, file: ${!!prompt.file})` ); return null; } // If the file path is already absolute or starts with the category folder, keep it as is if ( prompt.file.startsWith("/") || prompt.file.startsWith(categoryPath) ) { return prompt; } // Otherwise, update the file path to include the category folder return { ...prompt, file: path.join(categoryPath, prompt.file), }; }) .filter(Boolean); // Remove any null entries (invalid prompts) const afterCount = categoryPrompts.length; if (beforeCount !== afterCount) { this.logger.warn( ` ⚠️ ${ beforeCount - afterCount } prompts were filtered out due to validation issues` ); } this.logger.info( ` ✅ Successfully processed ${afterCount} valid prompts from ${importPath}` ); // Add the prompts to the array allPrompts = [...allPrompts, ...categoryPrompts]; } catch (error) { totalImportsFailed++; this.logger.error( ` ❌ Error loading prompts from ${importPath}:`, error ); } } this.logger.info(`\n🎯 IMPORT PROCESSING SUMMARY:`); this.logger.info(` Total imports processed: ${totalImportProcessed}`); this.logger.info(` Imports failed: ${totalImportsFailed}`); this.logger.info( ` Imports succeeded: ${totalImportProcessed - totalImportsFailed}` ); this.logger.info(` Total prompts collected: ${allPrompts.length}`); this.logger.info(` Categories available: ${categories.length}`); // Validate category-prompt relationships using CategoryManager this.logger.info(`🔍 Validating category-prompt relationships...`); const promptCategoryValidation = this.categoryManager.validatePromptCategories(allPrompts); if (!promptCategoryValidation.isValid) { this.logger.error("❌ Category-prompt relationship validation failed:"); promptCategoryValidation.issues.forEach(issue => this.logger.error(` - ${issue}`)); this.logger.warn("Continuing with loading but some prompts may not display correctly"); } if (promptCategoryValidation.warnings.length > 0) { this.logger.warn("⚠️ Category-prompt relationship warnings:"); promptCategoryValidation.warnings.forEach(warning => this.logger.warn(` - ${warning}`)); } // Generate category statistics for debugging const categoryStats = this.categoryManager.getCategoryStatistics(allPrompts); this.logger.info(`📊 Category Statistics:`); this.logger.info(` Categories with prompts: ${categoryStats.categoriesWithPrompts}/${categoryStats.totalCategories}`); this.logger.info(` Empty categories: ${categoryStats.emptyCategoriesCount}`); this.logger.info(` Average prompts per category: ${categoryStats.averagePromptsPerCategory.toFixed(1)}`); const result = { promptsData: allPrompts, categories }; this.logger.info( `✅ PromptLoader.loadCategoryPrompts() completed successfully` ); return result; } catch (error) { this.logger.error(`❌ PromptLoader.loadCategoryPrompts() FAILED:`, error); throw error; } } /** * Get the CategoryManager instance for external access */ getCategoryManager(): CategoryManager { return this.categoryManager; } /** * Load prompt content from markdown file */ async loadPromptFile( filePath: string, basePath: string ): Promise<{ systemMessage?: string; userMessageTemplate: string; isChain?: boolean; gateConfiguration?: { include?: string[]; exclude?: string[]; framework_gates?: boolean; temporary_gates?: Array<{ id?: string; name: string; type: 'validation' | 'approval' | 'condition'; scope: 'execution' | 'session' | 'chain' | 'step'; description: string; guidance: string; pass_criteria: any[]; expires_at?: number; source?: 'manual' | 'automatic' | 'analysis'; context?: Record<string, any>; }>; gate_scope?: 'execution' | 'session' | 'chain' | 'step'; inherit_chain_gates?: boolean; }; chainSteps?: Array<{ promptId: string; stepName: string; gates?: string[]; inputMapping?: Record<string, string>; outputMapping?: Record<string, string>; }>; }> { try { const fullPath = path.join(basePath, filePath); const content = await readFile(fullPath, "utf8"); // Extract system message and user message template from markdown const systemMessageMatch = content.match( /## System Message\s*\n([\s\S]*?)(?=\n##|$)/ ); const userMessageMatch = content.match( /## User Message Template\s*\n([\s\S]*)$/ ); const systemMessage = systemMessageMatch ? systemMessageMatch[1].trim() : undefined; let userMessageTemplate = userMessageMatch ? userMessageMatch[1].trim() : ""; // Extract gate configuration if present (Phase 3: Enhanced gate configuration with temporary gates) let gateConfiguration: { include?: string[]; exclude?: string[]; framework_gates?: boolean; temporary_gates?: Array<{ id?: string; name: string; type: 'validation' | 'approval' | 'condition'; scope: 'execution' | 'session' | 'chain' | 'step'; description: string; guidance: string; pass_criteria: any[]; expires_at?: number; source?: 'manual' | 'automatic' | 'analysis'; context?: Record<string, any>; }>; gate_scope?: 'execution' | 'session' | 'chain' | 'step'; inherit_chain_gates?: boolean; } | undefined; const gateConfigMatch = content.match( /## Gate Configuration\s*\n```json\s*\n([\s\S]*?)\n```/ ); if (gateConfigMatch) { try { const gateConfigContent = gateConfigMatch[1].trim(); const parsedConfig = JSON.parse(gateConfigContent); // Validate and normalize the gate configuration if (Array.isArray(parsedConfig)) { // Simple array format: ["gate1", "gate2"] gateConfiguration = { include: parsedConfig, framework_gates: true }; } else if (typeof parsedConfig === 'object' && parsedConfig !== null) { // Object format: {"include": [...], "exclude": [...], "framework_gates": true, "temporary_gates": [...]} gateConfiguration = { include: Array.isArray(parsedConfig.include) ? parsedConfig.include : undefined, exclude: Array.isArray(parsedConfig.exclude) ? parsedConfig.exclude : undefined, framework_gates: typeof parsedConfig.framework_gates === 'boolean' ? parsedConfig.framework_gates : true, temporary_gates: Array.isArray(parsedConfig.temporary_gates) ? parsedConfig.temporary_gates : undefined, gate_scope: typeof parsedConfig.gate_scope === 'string' ? parsedConfig.gate_scope : undefined, inherit_chain_gates: typeof parsedConfig.inherit_chain_gates === 'boolean' ? parsedConfig.inherit_chain_gates : undefined }; } this.logger.debug(`[LOADER] Gate configuration parsed for ${filePath}:`, gateConfiguration); // Phase 3 Fix: Strip Gate Configuration section from userMessageTemplate // so it doesn't appear in the output to the user if (gateConfigMatch) { const gateConfigSectionRegex = /## Gate Configuration\s*\n```json\s*\n[\s\S]*?\n```\s*/; userMessageTemplate = userMessageTemplate.replace(gateConfigSectionRegex, '').trim(); this.logger.debug(`[LOADER] Stripped Gate Configuration section from user message template for ${filePath}`); } } catch (gateConfigError) { this.logger.warn(`[LOADER] Failed to parse gate configuration in ${filePath}:`, gateConfigError); } } // Extract chain information if present const chainMatch = content.match( /## Chain Steps\s*\n([\s\S]*?)(?=\n##|$)/ ); let chainSteps: Array<{ promptId: string; stepName: string; gates?: string[]; inputMapping?: Record<string, string>; outputMapping?: Record<string, string>; }> = []; if (chainMatch) { const chainContent = chainMatch[1].trim(); // Enhanced regex to match markdown format with optional gates const stepMatches = chainContent.matchAll( /(\d+)\.\s*promptId:\s*([^\n]+)\s*\n\s*stepName:\s*([^\n]+)(?:\s*\n\s*gates:\s*([^\n]+))?(?:\s*\n\s*inputMapping:\s*([\s\S]*?)(?=\s*\n\s*(?:outputMapping|promptId|\d+\.|$)))?\s*(?:\n\s*outputMapping:\s*([\s\S]*?)(?=\s*\n\s*(?:promptId|\d+\.|$)))?\s*/g ); for (const match of stepMatches) { const [ _, stepNumber, promptId, stepName, gatesStr, inputMappingStr, outputMappingStr, ] = match; const step: { promptId: string; stepName: string; gates?: string[]; inputMapping?: Record<string, string>; outputMapping?: Record<string, string>; } = { promptId: promptId.trim(), stepName: stepName.trim(), }; // Parse gates if present if (gatesStr) { try { // Handle both JSON array format ["gate1", "gate2"] and simple list format const gatesStrTrimmed = gatesStr.trim(); if (gatesStrTrimmed.startsWith('[') && gatesStrTrimmed.endsWith(']')) { // JSON array format step.gates = JSON.parse(gatesStrTrimmed); } else { // Simple comma-separated format: "gate1, gate2" step.gates = gatesStrTrimmed.split(',').map(g => g.trim()).filter(g => g.length > 0); } this.logger.debug(`Loaded ${step.gates?.length || 0} gate(s) for step ${stepNumber}: ${step.gates?.join(', ') || ''}`); } catch (e) { this.logger.warn( `Invalid gates format in chain step ${stepNumber} of ${filePath}: ${e}` ); } } if (inputMappingStr) { try { // Parse YAML-style mapping into JSON object const inputMapping: Record<string, string> = {}; const lines = inputMappingStr.trim().split("\n"); for (const line of lines) { const [key, value] = line .trim() .split(":") .map((s) => s.trim()); if (key && value) { inputMapping[key] = value; } } step.inputMapping = inputMapping; } catch (e) { this.logger.warn( `Invalid input mapping in chain step ${stepNumber} of ${filePath}: ${e}` ); } } if (outputMappingStr) { try { // Parse YAML-style mapping into JSON object const outputMapping: Record<string, string> = {}; const lines = outputMappingStr.trim().split("\n"); for (const line of lines) { const [key, value] = line .trim() .split(":") .map((s) => s.trim()); if (key && value) { outputMapping[key] = value; } } step.outputMapping = outputMapping; } catch (e) { this.logger.warn( `Invalid output mapping in chain step ${stepNumber} of ${filePath}: ${e}` ); } } chainSteps.push(step); } this.logger.debug( `Loaded chain with ${chainSteps.length} steps from ${filePath}` ); } if (!userMessageTemplate && !(chainSteps.length > 0)) { throw new Error(`No user message template found in ${filePath}`); } return { systemMessage, userMessageTemplate, gateConfiguration, chainSteps }; } catch (error) { this.logger.error(`Error loading prompt file ${filePath}:`, error); throw error; } } } ``` -------------------------------------------------------------------------------- /server/src/frameworks/methodology/guides/5w1h-guide.ts: -------------------------------------------------------------------------------- ```typescript /** * 5W1H Methodology Guide * Provides guidance for applying 5W1H (Who, What, When, Where, Why, How) methodology to prompt creation, * processing, and execution without hijacking semantic analysis functionality */ import type { ConvertedPrompt } from "../../../types/index.js"; import { ContentAnalysisResult } from "../../../semantic/configurable-semantic-analyzer.js"; import { IMethodologyGuide, BaseMethodologyGuide, PromptCreationGuidance, ProcessingGuidance, StepGuidance, MethodologyEnhancement, MethodologyValidation, ProcessingStep, ExecutionStep, QualityGate, TemplateEnhancement, MethodologyToolDescriptions } from "../interfaces.js"; /** * 5W1H Methodology Guide Implementation * Guides the application of 5W1H systematic questioning without replacing semantic analysis */ export class FiveW1HMethodologyGuide extends BaseMethodologyGuide { readonly frameworkId = "5w1h"; readonly frameworkName = "5W1H Framework"; readonly methodology = "5W1H"; readonly version = "1.0.0"; /** * Guide prompt creation using 5W1H structure * Helps users create prompts that follow 5W1H methodology */ guidePromptCreation(intent: string, context?: Record<string, any>): PromptCreationGuidance { return { structureGuidance: { systemPromptSuggestions: [ "Identify all stakeholders and actors involved", "Define exactly what needs to be accomplished", "Establish timing and sequence requirements", "Determine location and context constraints", "Understand underlying purposes and motivations", "Plan specific methods and implementation approaches" ], userTemplateSuggestions: [ "Structure request using 5W1H systematic questioning", "Identify who is involved or affected", "Define what exactly needs to be done", "Specify when this should happen", "Clarify where this takes place", "Explain why this is important", "Detail how this should be accomplished" ], argumentSuggestions: [ { name: "stakeholders", type: "array", description: "People, roles, or entities involved or affected", methodologyReason: "5W1H 'Who' question requires comprehensive stakeholder identification", examples: ["users", "developers", "managers", "customers"] }, { name: "objectives", type: "string", description: "Specific goals and deliverables to accomplish", methodologyReason: "5W1H 'What' question needs clear objective definition", examples: ["improve user experience", "reduce processing time", "increase sales"] }, { name: "timeline", type: "string", description: "When this should be completed or timing requirements", methodologyReason: "5W1H 'When' question establishes timing and urgency", examples: ["by end of quarter", "within 2 weeks", "before product launch"] }, { name: "context", type: "string", description: "Where this takes place or contextual constraints", methodologyReason: "5W1H 'Where' question identifies location and context factors", examples: ["web application", "mobile platform", "enterprise environment"] }, { name: "purpose", type: "string", description: "Why this is important and underlying motivations", methodologyReason: "5W1H 'Why' question uncovers fundamental motivations", examples: ["improve customer satisfaction", "reduce operational costs", "meet compliance"] } ] }, methodologyElements: { requiredSections: ["Who", "What", "When", "Where", "Why", "How"], optionalSections: ["Validation", "Dependencies"], sectionDescriptions: { "Who": "Identify stakeholders, actors, and people involved or affected", "What": "Define exactly what needs to be accomplished and deliverables", "When": "Establish timing, deadlines, and sequence requirements", "Where": "Determine location, context, and environmental constraints", "Why": "Understand underlying purposes, motivations, and importance", "How": "Plan specific methods, approaches, and implementation strategies" } }, qualityGuidance: { clarityEnhancements: [ "Make stakeholder roles and responsibilities explicit", "Define specific, measurable objectives and deliverables", "Provide concrete timelines and deadlines" ], completenessChecks: [ "Ensure all 6 questions (Who, What, When, Where, Why, How) are addressed", "Verify stakeholder identification is comprehensive", "Confirm objectives are specific and measurable" ], specificityImprovements: [ "Replace generic stakeholder references with specific roles", "Add quantifiable success criteria and metrics", "Include specific implementation steps and methods" ] } }; } /** * Guide template processing with 5W1H methodology */ guideTemplateProcessing(template: string, executionType: string): ProcessingGuidance { const fiveW1HSteps: ProcessingStep[] = [ { id: "stakeholder_identification", name: "Stakeholder Identification (Who)", description: "Identify all relevant stakeholders, actors, and people involved", methodologyBasis: "5W1H Who question", order: 1, required: true }, { id: "objective_definition", name: "Objective Definition (What)", description: "Define exactly what needs to be accomplished and deliverables", methodologyBasis: "5W1H What question", order: 2, required: true }, { id: "timing_establishment", name: "Timing Establishment (When)", description: "Establish timing, deadlines, and sequence requirements", methodologyBasis: "5W1H When question", order: 3, required: true }, { id: "context_determination", name: "Context Determination (Where)", description: "Determine location, context, and environmental constraints", methodologyBasis: "5W1H Where question", order: 4, required: true }, { id: "purpose_understanding", name: "Purpose Understanding (Why)", description: "Understand underlying purposes, motivations, and importance", methodologyBasis: "5W1H Why question", order: 5, required: true }, { id: "method_planning", name: "Method Planning (How)", description: "Plan specific methods, approaches, and implementation strategies", methodologyBasis: "5W1H How question", order: 6, required: true } ]; return { processingSteps: fiveW1HSteps, templateEnhancements: { systemPromptAdditions: [ "Apply 5W1H systematic questioning methodology", "Address Who, What, When, Where, Why, How comprehensively", "Ensure complete coverage of all stakeholders and requirements", "Use systematic questioning to uncover hidden requirements" ], userPromptModifications: [ "Structure response addressing all 5W1H questions", "Provide comprehensive stakeholder analysis", "Include detailed implementation planning" ], contextualHints: [ "Focus on comprehensive requirement gathering", "Emphasize stakeholder perspective consideration", "Apply systematic analysis to ensure nothing is missed" ] }, executionFlow: { preProcessingSteps: [ "Validate 5W1H question completeness", "Confirm stakeholder identification scope", "Verify objective clarity and measurability" ], postProcessingSteps: [ "Review 5W1H coverage completeness", "Assess stakeholder consideration adequacy", "Evaluate implementation plan specificity" ], validationSteps: [ "Who question completeness check", "What question specificity verification", "When question timing validation", "Where question context assessment", "Why question motivation analysis", "How question method evaluation" ] } }; } /** * Guide execution steps using 5W1H methodology */ guideExecutionSteps(prompt: ConvertedPrompt, semanticAnalysis: ContentAnalysisResult): StepGuidance { const executionSteps: ExecutionStep[] = [ { id: "comprehensive_who_analysis", name: "Comprehensive Who Analysis", action: "Identify and analyze all stakeholders, actors, and affected parties", methodologyPhase: "Who", dependencies: [], expected_output: "Complete stakeholder map with roles and responsibilities" }, { id: "specific_what_definition", name: "Specific What Definition", action: "Define exactly what needs to be accomplished with measurable deliverables", methodologyPhase: "What", dependencies: ["comprehensive_who_analysis"], expected_output: "Clear, specific, measurable objectives and deliverables" }, { id: "detailed_when_planning", name: "Detailed When Planning", action: "Establish comprehensive timing, deadlines, and sequence requirements", methodologyPhase: "When", dependencies: ["specific_what_definition"], expected_output: "Detailed timeline with milestones and dependencies" }, { id: "contextual_where_assessment", name: "Contextual Where Assessment", action: "Determine location, context, environment, and situational constraints", methodologyPhase: "Where", dependencies: ["detailed_when_planning"], expected_output: "Comprehensive context analysis with constraints identified" }, { id: "fundamental_why_exploration", name: "Fundamental Why Exploration", action: "Uncover underlying purposes, motivations, and fundamental importance", methodologyPhase: "Why", dependencies: ["contextual_where_assessment"], expected_output: "Deep understanding of motivations and fundamental purposes" }, { id: "specific_how_planning", name: "Specific How Planning", action: "Develop detailed methods, approaches, and implementation strategies", methodologyPhase: "How", dependencies: ["fundamental_why_exploration"], expected_output: "Comprehensive implementation plan with specific methods" } ]; // Adjust steps based on execution type from semantic analyzer const stepEnhancements: Record<string, string[]> = {}; const stepValidation: Record<string, string[]> = {}; if (semanticAnalysis.executionType === "chain") { stepEnhancements["specific_how_planning"] = [ "Plan sequential implementation steps", "Define clear handoff points between phases", "Establish validation checkpoints for each step" ]; stepValidation["specific_how_planning"] = [ "Sequential step validation", "Handoff point verification", "Checkpoint adequacy assessment" ]; } else if (semanticAnalysis.executionType === "template") { stepEnhancements["comprehensive_who_analysis"] = [ "Create stakeholder templates and categories", "Develop reusable stakeholder analysis patterns", "Establish standard stakeholder consideration checklists" ]; stepValidation["comprehensive_who_analysis"] = [ "Template completeness validation", "Pattern reusability verification", "Checklist coverage assessment" ]; } return { stepSequence: executionSteps, stepEnhancements, stepValidation }; } /** * Enhance execution with 5W1H methodology */ enhanceWithMethodology(prompt: ConvertedPrompt, context: Record<string, any>): MethodologyEnhancement { const fiveW1HGates: QualityGate[] = [ { id: "stakeholder_completeness", name: "Stakeholder Completeness", description: "Verify comprehensive stakeholder identification and analysis", methodologyArea: "Who", validationCriteria: [ "All relevant stakeholders identified", "Roles and responsibilities defined", "Stakeholder interests and concerns considered" ], priority: "high" }, { id: "objective_specificity", name: "Objective Specificity", description: "Ensure objectives are specific, measurable, and well-defined", methodologyArea: "What", validationCriteria: [ "Objectives are specific and clear", "Deliverables are measurable", "Success criteria are defined" ], priority: "high" }, { id: "timing_comprehensiveness", name: "Timing Comprehensiveness", description: "Validate thorough timing and scheduling consideration", methodologyArea: "When", validationCriteria: [ "Timeline is realistic and detailed", "Dependencies and constraints considered", "Milestones and checkpoints defined" ], priority: "medium" }, { id: "context_thoroughness", name: "Context Thoroughness", description: "Assess comprehensive context and environmental analysis", methodologyArea: "Where", validationCriteria: [ "Environmental factors considered", "Contextual constraints identified", "Location and situational factors addressed" ], priority: "medium" }, { id: "purpose_depth", name: "Purpose Depth", description: "Verify deep understanding of underlying motivations", methodologyArea: "Why", validationCriteria: [ "Fundamental purposes understood", "Underlying motivations explored", "Value and importance articulated" ], priority: "high" }, { id: "method_practicality", name: "Method Practicality", description: "Ensure methods and approaches are practical and detailed", methodologyArea: "How", validationCriteria: [ "Methods are specific and actionable", "Implementation approach is practical", "Resources and capabilities considered" ], priority: "medium" } ]; const templateSuggestions: TemplateEnhancement[] = [ { section: "system", type: "addition", description: "Add 5W1H methodology guidance", content: "Apply the 5W1H methodology systematically: Who (stakeholders), What (objectives), When (timing), Where (context), Why (purpose), How (methods). Ensure comprehensive coverage of all questions for thorough analysis.", methodologyJustification: "Ensures systematic application of comprehensive questioning", impact: "high" }, { section: "user", type: "structure", description: "Structure response using 5W1H questions", content: "Please structure your response addressing: 1) Who is involved or affected, 2) What needs to be accomplished, 3) When this should happen, 4) Where this takes place, 5) Why this is important, 6) How this should be done.", methodologyJustification: "Guides comprehensive analysis through systematic questioning", impact: "medium" } ]; return { systemPromptGuidance: this.getSystemPromptGuidance(context), processingEnhancements: this.guideTemplateProcessing("", "template").processingSteps, methodologyGates: fiveW1HGates, templateSuggestions, enhancementMetadata: this.createEnhancementMetadata( 0.9, "5W1H methodology ensures comprehensive analysis through systematic questioning" ) }; } /** * Validate methodology compliance */ validateMethodologyCompliance(prompt: ConvertedPrompt): MethodologyValidation { const combinedText = this.getCombinedText(prompt); const text = combinedText.toLowerCase(); // Check for 5W1H question presence const questions = { who: /who|stakeholder|actor|people|person|role|user|team/i.test(text), what: /what|objective|goal|deliverable|accomplish|achieve/i.test(text), when: /when|timing|deadline|schedule|time|date|timeline/i.test(text), where: /where|location|context|environment|place|setting/i.test(text), why: /why|purpose|reason|motivation|importance|value|benefit/i.test(text), how: /how|method|approach|strategy|process|implementation/i.test(text) }; const presentQuestions = Object.values(questions).filter(Boolean).length; const compliance_score = presentQuestions / 6; // 6 questions in 5W1H const strengths: string[] = []; const improvement_areas: string[] = []; if (questions.who) strengths.push("Stakeholder consideration present"); else improvement_areas.push("Identify stakeholders and people involved"); if (questions.what) strengths.push("Objective definition evident"); else improvement_areas.push("Define specific objectives and deliverables"); if (questions.when) strengths.push("Timing consideration included"); else improvement_areas.push("Establish timing and scheduling requirements"); if (questions.where) strengths.push("Context awareness demonstrated"); else improvement_areas.push("Consider location and contextual factors"); if (questions.why) strengths.push("Purpose and motivation addressed"); else improvement_areas.push("Explore underlying purposes and motivations"); if (questions.how) strengths.push("Implementation approach considered"); else improvement_areas.push("Plan specific methods and implementation approaches"); const specific_suggestions: TemplateEnhancement[] = []; if (!questions.who) { specific_suggestions.push({ section: "system", type: "addition", description: "Add stakeholder identification", content: "Identify all stakeholders, actors, and people involved or affected by this.", methodologyJustification: "5W1H Who question requires comprehensive stakeholder analysis", impact: "high" }); } if (!questions.why) { specific_suggestions.push({ section: "system", type: "addition", description: "Add purpose exploration", content: "Explore the underlying purposes, motivations, and importance of this effort.", methodologyJustification: "5W1H Why question uncovers fundamental motivations", impact: "high" }); } return { compliant: compliance_score > 0.7, compliance_score, strengths, improvement_areas, specific_suggestions, methodology_gaps: improvement_areas }; } /** * Get 5W1H-specific system prompt guidance */ getSystemPromptGuidance(context: Record<string, any>): string { return `Apply the 5W1H methodology systematically: **Who**: Identify all stakeholders, actors, and people involved or affected **What**: Define exactly what needs to be accomplished and deliverables **When**: Establish timing, deadlines, and sequence requirements **Where**: Determine location, context, and environmental constraints **Why**: Understand underlying purposes, motivations, and fundamental importance **How**: Plan specific methods, approaches, and implementation strategies Use systematic questioning to ensure comprehensive coverage and uncover hidden requirements. Address each question thoroughly to build complete understanding and effective solutions.`; } /** * Get 5W1H-specific tool descriptions */ getToolDescriptions(): MethodologyToolDescriptions { return { prompt_engine: { description: "🚀 PROMPT TEMPLATE ENGINE [5W1H-ENHANCED]: Processes prompt templates with systematic 5W1H questioning methodology for comprehensive analysis. Guides thorough exploration through Who (stakeholders), What (objectives), When (timing), Where (context), Why (motivation), and How (methods). WARNING: You are responsible for interpreting and executing the returned content, which contains systematic questioning instructions.", parameters: { execution_mode: "Override intelligent auto-detection with 5W1H-aware selection (default: auto, systematic questioning-enhanced)" } }, prompt_manager: { description: "📝 INTELLIGENT PROMPT MANAGER [5W1H-ENHANCED]: Complete lifecycle management with systematic 5W1H questioning methodology integration. Creates comprehensive analysis templates that guide systematic exploration through Who, What, When, Where, Why, and How dimensions. Optimized for thorough requirement analysis and complete solution development.", parameters: { action: "Management action with 5W1H systematic approach: 'create_template' (comprehensive questioning templates), 'analyze_type' (stakeholder analysis), 'migrate_type' (systematic conversion), etc." } }, system_control: { description: "⚙️ INTELLIGENT SYSTEM CONTROL [5W1H-ENHANCED]: System administration with 5W1H systematic questioning methodology. Guides comprehensive exploration through Who (users), What (objectives), When (timing), Where (environments), Why (purposes), and How (methods) for thorough system management and decision-making.", parameters: { action: "System action with 5W1H methodology: 'switch_framework' (systematic framework selection), 'analytics' (comprehensive questioning-based metrics), 'health' (thorough system analysis), etc." } } }; } } ```