#
tokens: 49409/50000 23/252 files (page 3/18)
lines: on (toggle) GitHub
raw markdown copy reset
This is page 3 of 18. Use http://codebase.md/minipuft/claude-prompts-mcp?lines=true&page={x} to view the full context.

# Directory Structure

```
├── .actrc
├── .gitattributes
├── .github
│   └── workflows
│       ├── ci.yml
│       ├── mcp-compliance.yml
│       └── pr-validation.yml
├── .gitignore
├── agent.md
├── assets
│   └── logo.png
├── CLAUDE.md
├── config
│   └── framework-state.json
├── docs
│   ├── architecture.md
│   ├── chain-modification-examples.md
│   ├── contributing.md
│   ├── enhanced-gate-system.md
│   ├── execution-architecture-guide.md
│   ├── installation-guide.md
│   ├── mcp-tool-usage-guide.md
│   ├── mcp-tools-reference.md
│   ├── prompt-format-guide.md
│   ├── prompt-management.md
│   ├── prompt-vs-template-guide.md
│   ├── README.md
│   ├── template-development-guide.md
│   ├── TODO.md
│   ├── troubleshooting.md
│   └── version-history.md
├── LICENSE
├── local-test.sh
├── plans
│   ├── nunjucks-dynamic-chain-orchestration.md
│   ├── outputschema-realtime-progress-and-validation.md
│   ├── parallel-conditional-execution-analysis.md
│   ├── sqlite-storage-migration.md
│   └── symbolic-command-language-implementation.md
├── README.md
├── scripts
│   ├── setup-windows-testing.sh
│   ├── test_server.js
│   ├── test-all-platforms.sh
│   └── windows-tests
│       ├── test-windows-paths.js
│       ├── test-windows-startup.sh
│       └── windows-env.sh
└── server
    ├── config
    │   ├── framework-state.json
    │   └── tool-descriptions.json
    ├── config.json
    ├── jest.config.cjs
    ├── LICENSE
    ├── package-lock.json
    ├── package.json
    ├── prompts
    │   ├── analysis
    │   │   ├── advanced_analysis_engine.md
    │   │   ├── content_analysis.md
    │   │   ├── deep_analysis.md
    │   │   ├── deep_research.md
    │   │   ├── markdown_notebook.md
    │   │   ├── note_integration.md
    │   │   ├── note_refinement.md
    │   │   ├── notes.md
    │   │   ├── progressive_research.md
    │   │   ├── prompts.json
    │   │   ├── query_refinement.md
    │   │   └── review.md
    │   ├── architecture
    │   │   ├── prompts.json
    │   │   └── strategic-system-alignment.md
    │   ├── content_processing
    │   │   ├── format_enhancement.md
    │   │   ├── noteIntegration.md
    │   │   ├── obsidian_metadata_optimizer.md
    │   │   ├── prompts.json
    │   │   ├── vault_related_notes_finder.md
    │   │   └── video_notes_enhanced.md
    │   ├── debugging
    │   │   ├── analyze_logs.md
    │   │   └── prompts.json
    │   ├── development
    │   │   ├── analyze_code_structure.md
    │   │   ├── analyze_file_structure.md
    │   │   ├── code_review_optimization_chain.md
    │   │   ├── component_flow_analysis.md
    │   │   ├── create_modularization_plan.md
    │   │   ├── detect_code_issues.md
    │   │   ├── detect_project_commands.md
    │   │   ├── expert_code_implementation.md
    │   │   ├── generate_comprehensive_claude_md.md
    │   │   ├── prompts.json
    │   │   ├── strategicImplement.md
    │   │   ├── suggest_code_improvements.md
    │   │   └── transform_code_to_modules.md
    │   ├── documentation
    │   │   ├── create_docs_chain.md
    │   │   ├── docs-content-creation.md
    │   │   ├── docs-content-planning.md
    │   │   ├── docs-final-assembly.md
    │   │   ├── docs-project-analysis.md
    │   │   ├── docs-review-refinement.md
    │   │   └── prompts.json
    │   ├── education
    │   │   ├── prompts.json
    │   │   └── vault_integrated_notes.md
    │   ├── general
    │   │   ├── diagnose.md
    │   │   └── prompts.json
    │   ├── promptsConfig.json
    │   └── testing
    │       ├── final_verification_test.md
    │       └── prompts.json
    ├── README.md
    ├── scripts
    │   └── validate-dependencies.js
    ├── src
    │   ├── api
    │   │   └── index.ts
    │   ├── chain-session
    │   │   └── manager.ts
    │   ├── config
    │   │   └── index.ts
    │   ├── Dockerfile
    │   ├── execution
    │   │   ├── context
    │   │   │   ├── context-resolver.ts
    │   │   │   ├── framework-injector.ts
    │   │   │   └── index.ts
    │   │   ├── index.ts
    │   │   ├── parsers
    │   │   │   ├── argument-parser.ts
    │   │   │   ├── index.ts
    │   │   │   └── unified-command-parser.ts
    │   │   └── types.ts
    │   ├── frameworks
    │   │   ├── framework-manager.ts
    │   │   ├── framework-state-manager.ts
    │   │   ├── index.ts
    │   │   ├── integration
    │   │   │   ├── framework-semantic-integration.ts
    │   │   │   └── index.ts
    │   │   ├── methodology
    │   │   │   ├── guides
    │   │   │   │   ├── 5w1h-guide.ts
    │   │   │   │   ├── cageerf-guide.ts
    │   │   │   │   ├── react-guide.ts
    │   │   │   │   └── scamper-guide.ts
    │   │   │   ├── index.ts
    │   │   │   ├── interfaces.ts
    │   │   │   └── registry.ts
    │   │   ├── prompt-guidance
    │   │   │   ├── index.ts
    │   │   │   ├── methodology-tracker.ts
    │   │   │   ├── service.ts
    │   │   │   ├── system-prompt-injector.ts
    │   │   │   └── template-enhancer.ts
    │   │   └── types
    │   │       ├── index.ts
    │   │       ├── integration-types.ts
    │   │       ├── methodology-types.ts
    │   │       └── prompt-guidance-types.ts
    │   ├── gates
    │   │   ├── constants.ts
    │   │   ├── core
    │   │   │   ├── gate-definitions.ts
    │   │   │   ├── gate-loader.ts
    │   │   │   ├── gate-validator.ts
    │   │   │   ├── index.ts
    │   │   │   └── temporary-gate-registry.ts
    │   │   ├── definitions
    │   │   │   ├── code-quality.json
    │   │   │   ├── content-structure.json
    │   │   │   ├── educational-clarity.json
    │   │   │   ├── framework-compliance.json
    │   │   │   ├── research-quality.json
    │   │   │   ├── security-awareness.json
    │   │   │   └── technical-accuracy.json
    │   │   ├── gate-state-manager.ts
    │   │   ├── guidance
    │   │   │   ├── FrameworkGuidanceFilter.ts
    │   │   │   └── GateGuidanceRenderer.ts
    │   │   ├── index.ts
    │   │   ├── intelligence
    │   │   │   ├── GatePerformanceAnalyzer.ts
    │   │   │   └── GateSelectionEngine.ts
    │   │   ├── templates
    │   │   │   ├── code_quality_validation.md
    │   │   │   ├── educational_clarity_validation.md
    │   │   │   ├── framework_compliance_validation.md
    │   │   │   ├── research_self_validation.md
    │   │   │   ├── security_validation.md
    │   │   │   ├── structure_validation.md
    │   │   │   └── technical_accuracy_validation.md
    │   │   └── types.ts
    │   ├── index.ts
    │   ├── logging
    │   │   └── index.ts
    │   ├── mcp-tools
    │   │   ├── config-utils.ts
    │   │   ├── constants.ts
    │   │   ├── index.ts
    │   │   ├── prompt-engine
    │   │   │   ├── core
    │   │   │   │   ├── engine.ts
    │   │   │   │   ├── executor.ts
    │   │   │   │   ├── index.ts
    │   │   │   │   └── types.ts
    │   │   │   ├── index.ts
    │   │   │   ├── processors
    │   │   │   │   ├── response-formatter.ts
    │   │   │   │   └── template-processor.ts
    │   │   │   └── utils
    │   │   │       ├── category-extractor.ts
    │   │   │       ├── classification.ts
    │   │   │       ├── context-builder.ts
    │   │   │       └── validation.ts
    │   │   ├── prompt-manager
    │   │   │   ├── analysis
    │   │   │   │   ├── comparison-engine.ts
    │   │   │   │   ├── gate-analyzer.ts
    │   │   │   │   └── prompt-analyzer.ts
    │   │   │   ├── core
    │   │   │   │   ├── index.ts
    │   │   │   │   ├── manager.ts
    │   │   │   │   └── types.ts
    │   │   │   ├── index.ts
    │   │   │   ├── operations
    │   │   │   │   └── file-operations.ts
    │   │   │   ├── search
    │   │   │   │   ├── filter-parser.ts
    │   │   │   │   └── prompt-matcher.ts
    │   │   │   └── utils
    │   │   │       ├── category-manager.ts
    │   │   │       └── validation.ts
    │   │   ├── shared
    │   │   │   └── structured-response-builder.ts
    │   │   ├── system-control.ts
    │   │   ├── tool-description-manager.ts
    │   │   └── types
    │   │       └── shared-types.ts
    │   ├── metrics
    │   │   ├── analytics-service.ts
    │   │   ├── index.ts
    │   │   └── types.ts
    │   ├── performance
    │   │   ├── index.ts
    │   │   └── monitor.ts
    │   ├── prompts
    │   │   ├── category-manager.ts
    │   │   ├── converter.ts
    │   │   ├── file-observer.ts
    │   │   ├── hot-reload-manager.ts
    │   │   ├── index.ts
    │   │   ├── loader.ts
    │   │   ├── promptUtils.ts
    │   │   ├── registry.ts
    │   │   └── types.ts
    │   ├── runtime
    │   │   ├── application.ts
    │   │   └── startup.ts
    │   ├── semantic
    │   │   ├── configurable-semantic-analyzer.ts
    │   │   └── integrations
    │   │       ├── index.ts
    │   │       └── llm-clients.ts
    │   ├── server
    │   │   ├── index.ts
    │   │   └── transport
    │   │       └── index.ts
    │   ├── smithery.yaml
    │   ├── text-references
    │   │   ├── conversation.ts
    │   │   └── index.ts
    │   ├── types
    │   │   └── index.ts
    │   ├── types.ts
    │   └── utils
    │       ├── chainUtils.ts
    │       ├── errorHandling.ts
    │       ├── global-resource-tracker.ts
    │       ├── index.ts
    │       └── jsonUtils.ts
    ├── tests
    │   ├── ci-startup-validation.js
    │   ├── enhanced-validation
    │   │   ├── contract-validation
    │   │   │   ├── contract-test-suite.js
    │   │   │   ├── interface-contracts.js
    │   │   │   └── interface-contracts.ts
    │   │   ├── environment-validation
    │   │   │   ├── environment-parity-checker.js
    │   │   │   └── environment-test-suite.js
    │   │   ├── lifecycle-validation
    │   │   │   ├── lifecycle-test-suite.js
    │   │   │   └── process-lifecycle-validator.js
    │   │   └── validation-orchestrator.js
    │   ├── helpers
    │   │   └── test-helpers.js
    │   ├── integration
    │   │   ├── mcp-tools.test.ts
    │   │   ├── server-startup.test.ts
    │   │   └── unified-parsing-integration.test.ts
    │   ├── performance
    │   │   ├── parsing-system-benchmark.test.ts
    │   │   └── server-performance.test.ts
    │   ├── scripts
    │   │   ├── consolidated-tools.js
    │   │   ├── establish-performance-baselines.js
    │   │   ├── functional-mcp-validation.js
    │   │   ├── integration-mcp-tools.js
    │   │   ├── integration-routing-system.js
    │   │   ├── integration-server-startup.js
    │   │   ├── integration-unified-parsing.js
    │   │   ├── methodology-guides.js
    │   │   ├── performance-memory.js
    │   │   ├── runtime-integration.js
    │   │   ├── unit-conversation-manager.js
    │   │   ├── unit-semantic-analyzer.js
    │   │   └── unit-unified-parsing.js
    │   ├── setup.ts
    │   ├── test-enhanced-parsing.js
    │   └── unit
    │       ├── conversation-manager.test.ts
    │       ├── semantic-analyzer-three-tier.test.ts
    │       └── unified-parsing-system.test.ts
    ├── tsconfig.json
    └── tsconfig.test.json
```

# Files

--------------------------------------------------------------------------------
/server/prompts/architecture/strategic-system-alignment.md:
--------------------------------------------------------------------------------

```markdown
  1 | # Strategic System Alignment
  2 | 
  3 | ## Description
  4 | Strategically align systems and code towards architectural rules and goals with systematic implementation planning and progress tracking in /plans markdown notes
  5 | 
  6 | ## System Message
  7 | You are an expert system architect specializing in strategic alignment and systematic implementation. Your role is to analyze systems, identify alignment gaps with architectural rules and goals, select optimal implementation tactics, and track progress through structured markdown documentation.
  8 | 
  9 | # CORE METHODOLOGY: Strategic Alignment Framework
 10 | 
 11 | ## Phase 1: Context Discovery & Analysis
 12 | 
 13 | ### 1A. System State Analysis
 14 | **Objective**: Understand current architecture and implementation state
 15 | 
 16 | **Actions**:
 17 | - Read architectural documentation (CLAUDE.md, README, architecture docs)
 18 | - Analyze codebase structure and patterns
 19 | - Identify existing systems, coordinators, and integration points
 20 | - Map current dependencies and data flows
 21 | - Document performance characteristics and constraints
 22 | 
 23 | ### 1B. Rules & Goals Alignment Assessment
 24 | **Objective**: Identify gaps between current state and desired state
 25 | 
 26 | **Actions**:
 27 | - Compare current implementation against architectural rules
 28 | - Evaluate alignment with stated goals
 29 | - Identify violations, anti-patterns, and technical debt
 30 | - Quantify alignment score (0-100%) for each rule/goal
 31 | 
 32 | **Gap Analysis**:
 33 | - **Critical Gaps**: Blocking violations requiring immediate action
 34 | - **High Priority Gaps**: Significant misalignment affecting architecture
 35 | - **Medium Priority Gaps**: Improvements needed for maintainability
 36 | - **Low Priority Gaps**: Nice-to-have optimizations
 37 | 
 38 | ### 1C. Risk Assessment
 39 | **Objective**: Understand risks of both action and inaction
 40 | 
 41 | **Risk Categories**:
 42 | - **High Risk**: Breaking changes, performance regressions, data loss potential
 43 | - **Medium Risk**: API changes requiring migration, significant refactoring
 44 | - **Low Risk**: Internal changes, backward compatible improvements
 45 | 
 46 | ## Phase 2: Strategic Planning
 47 | 
 48 | ### 2A. Tactic Selection
 49 | 
 50 | **Available Tactics**:
 51 | 
 52 | 1. **Rename Refactoring** (Risk: Low, Impact: High)
 53 | 2. **Extract Module/Service** (Risk: Medium, Impact: High)
 54 | 3. **Consolidate Duplicates** (Risk: Medium-High, Impact: High)
 55 | 4. **Deprecation Path** (Risk: Low, Impact: Medium)
 56 | 5. **Event-Driven Coordination** (Risk: Medium, Impact: High)
 57 | 6. **Documentation Enhancement** (Risk: Very Low, Impact: Medium)
 58 | 7. **Performance Optimization** (Risk: Medium, Impact: Varies)
 59 | 
 60 | ### 2B. Implementation Sequencing
 61 | 
 62 | **Phase Structure**:
 63 | - **Phase 0: Preparation** - Documentation, baseline metrics, backup plans
 64 | - **Phase 1: Low-Risk Foundation** - Renames, documentation, non-breaking improvements
 65 | - **Phase 2: Structural Changes** - Extractions, consolidations, refactoring
 66 | - **Phase 3: Integration Updates** - Coordination changes, event-driven updates
 67 | - **Phase 4: Validation** - Performance testing, integration testing, documentation
 68 | 
 69 | ## Phase 3: Progress Tracking System
 70 | 
 71 | ### 3A. Markdown Progress Note Management
 72 | 
 73 | **Location Strategy**:
 74 | 1. Check for existing note in `/plans/` matching the system name
 75 | 2. If exists: Read and update existing note
 76 | 3. If not: Create new note at `/plans/system-alignment-[system-name].md`
 77 | 
 78 | **Required Sections**:
 79 | ```markdown
 80 | # System Alignment Progress: [Component/System Name]
 81 | 
 82 | **Started**: [Date]
 83 | **Last Updated**: [Date]
 84 | **Status**: [Planning | In Progress | Validation | Completed]
 85 | 
 86 | ## Executive Summary
 87 | [Overview of alignment goals and current status]
 88 | 
 89 | ## Alignment Assessment
 90 | 
 91 | ### Rules Compliance
 92 | | Rule | Current | Target | Gap | Priority |
 93 | |------|---------|--------|-----|----------|
 94 | 
 95 | ### Goals Progress
 96 | | Goal | Current % | Target % | Status |
 97 | |------|-----------|----------|--------|
 98 | 
 99 | ## Implementation Plan
100 | 
101 | ### Phase 0: Preparation
102 | - [ ] Tasks
103 | **Status**: [Not Started | In Progress | Completed]
104 | 
105 | ## Tactical Decisions
106 | 
107 | ### Tactic 1: [Name]
108 | **Selected**: [Date]
109 | **Rationale**: [Why]
110 | **Risk**: [Low|Medium|High]
111 | **Status**: [Planned|In Progress|Completed]
112 | 
113 | ## Progress Log
114 | 
115 | ### [Date] - [Phase]
116 | **Actions**: [What was done]
117 | **Outcomes**: [Results]
118 | **Issues**: [Problems]
119 | **Next**: [Steps]
120 | 
121 | ## Validation Results
122 | [Tests, metrics, compliance]
123 | 
124 | ## Outstanding Issues
125 | [Current blockers]
126 | 
127 | ## Lessons Learned
128 | [Insights]
129 | ```
130 | 
131 | ### 3B. Update Protocol
132 | 
133 | **Update After**:
134 | - Each tactic completion
135 | - Phase transitions
136 | - Blocking issues
137 | - Validation checkpoints
138 | 
139 | ## Output Format
140 | 
141 | ### 1. Context Analysis Summary
142 | [Current state, rules, goals, constraints]
143 | 
144 | ### 2. Alignment Assessment
145 | [Gap analysis with priorities]
146 | 
147 | ### 3. Strategic Plan
148 | [Sequenced tactics with rationale]
149 | 
150 | ### 4. Progress Note Status
151 | [Created/Updated location]
152 | 
153 | ### 5. Next Immediate Actions
154 | [Top 3-5 actions]
155 | 
156 | ### 6. Validation Checkpoints
157 | [Key milestones]
158 | 
159 | ## Guidelines
160 | 
161 | - **Evidence-Based**: Back decisions with code analysis
162 | - **Risk-Aware**: Plan mitigation strategies
163 | - **Pragmatic**: Balance ideal vs practical
164 | - **Iterative**: Incremental progress with gates
165 | - **Transparent**: Document all decisions
166 | - **Goal-Oriented**: Align with rules and goals
167 | 
168 | ## User Message Template
169 | Align the following system/component:
170 | 
171 | {{task_description}}
172 | 
173 | {% if context_files %}
174 | Context Files: {{context_files}}
175 | {% endif %}
176 | 
177 | {% if architectural_rules %}
178 | Architectural Rules: {{architectural_rules}}
179 | {% endif %}
180 | 
181 | {% if goals %}
182 | Goals: {{goals}}
183 | {% endif %}
184 | 
185 | {% if constraints %}
186 | Constraints: {{constraints}}
187 | {% endif %}
188 | 
```

--------------------------------------------------------------------------------
/server/tests/scripts/runtime-integration.js:
--------------------------------------------------------------------------------

```javascript
  1 | #!/usr/bin/env node
  2 | /**
  3 |  * Runtime System Integration Tests
  4 |  * Tests the new consolidated runtime architecture (application.ts + startup.ts)
  5 |  * Cross-platform compatible test script with robust error handling
  6 |  */
  7 | 
  8 | import { fileURLToPath } from 'url';
  9 | import path from 'path';
 10 | import { readFileSync } from 'fs';
 11 | 
 12 | const __filename = fileURLToPath(import.meta.url);
 13 | const __dirname = path.dirname(__filename);
 14 | 
 15 | async function runtimeIntegrationTests() {
 16 |   try {
 17 |     console.log('🧪 Running runtime system integration tests...');
 18 |     console.log(`🔧 Platform: ${process.platform}`);
 19 |     console.log(`🔧 Node.js: ${process.version}`);
 20 |     console.log(`🔧 Working directory: ${process.cwd()}`);
 21 |     
 22 |     // Check if build artifacts exist
 23 |     const distPath = path.join(__dirname, '../../dist');
 24 |     console.log(`🔍 Checking build artifacts at: ${distPath}`);
 25 |     
 26 |     try {
 27 |       const runtimePath = path.join(distPath, 'runtime', 'application.js');
 28 |       const startupPath = path.join(distPath, 'runtime', 'startup.js');
 29 |       const utilsPath = path.join(distPath, 'utils', 'index.js');
 30 |       
 31 |       console.log(`🔍 Looking for runtime module: ${runtimePath}`);
 32 |       console.log(`🔍 Looking for startup module: ${startupPath}`);
 33 |       console.log(`🔍 Looking for utils module: ${utilsPath}`);
 34 |       
 35 |       // Verify files exist before importing
 36 |       const fs = await import('fs');
 37 |       if (!fs.existsSync(runtimePath)) {
 38 |         throw new Error(`Runtime application module not found at: ${runtimePath}`);
 39 |       }
 40 |       if (!fs.existsSync(startupPath)) {
 41 |         throw new Error(`Startup module not found at: ${startupPath}`);
 42 |       }
 43 |       if (!fs.existsSync(utilsPath)) {
 44 |         throw new Error(`Utils module not found at: ${utilsPath}`);
 45 |       }
 46 |       
 47 |       console.log('✅ Build artifacts verified');
 48 |     } catch (fsError) {
 49 |       console.error('❌ Build artifacts check failed:', fsError.message);
 50 |       throw fsError;
 51 |     }
 52 |     
 53 |     // Dynamic imports with error handling
 54 |     let Application, MockLogger;
 55 |     
 56 |     try {
 57 |       console.log('🔍 Importing Application runtime...');
 58 |       const runtimeModule = await import('../../dist/runtime/application.js');
 59 |       Application = runtimeModule.Application;
 60 |       
 61 |       if (!Application) {
 62 |         throw new Error('Application not exported from runtime module');
 63 |       }
 64 |       console.log('✅ Application runtime imported successfully');
 65 |     } catch (importError) {
 66 |       console.error('❌ Failed to import Application runtime:', importError.message);
 67 |       throw importError;
 68 |     }
 69 |     
 70 |     try {
 71 |       console.log('🔍 Importing MockLogger...');
 72 |       const utilsModule = await import('../../dist/utils/index.js');
 73 |       MockLogger = utilsModule.MockLogger;
 74 |       
 75 |       if (!MockLogger) {
 76 |         throw new Error('MockLogger not exported from utils module');
 77 |       }
 78 |       console.log('✅ MockLogger imported successfully');
 79 |     } catch (importError) {
 80 |       console.error('❌ Failed to import MockLogger:', importError.message);
 81 |       throw importError;
 82 |     }
 83 |     
 84 |     console.log('🔍 Test 1: New runtime system validation');
 85 |     
 86 |     const logger = new MockLogger();
 87 |     console.log('✅ Logger instance created');
 88 |     
 89 |     const app = new Application(logger);
 90 |     console.log('✅ Application runtime instance created');
 91 |     
 92 |     // Test configuration loading
 93 |     try {
 94 |       await app.loadConfiguration();
 95 |       console.log('✅ Configuration loaded successfully');
 96 |     } catch (configError) {
 97 |       console.error('❌ Configuration loading failed:', configError.message);
 98 |       throw configError;
 99 |     }
100 |     
101 |     // Test prompts data loading
102 |     try {
103 |       await app.loadPromptsData();
104 |       console.log('✅ Prompts data loaded successfully');
105 |     } catch (promptsError) {
106 |       console.error('❌ Prompts data loading failed:', promptsError.message);
107 |       throw promptsError;
108 |     }
109 |     
110 |     // Test modules initialization
111 |     try {
112 |       await app.initializeModules();
113 |       console.log('✅ Modules initialized successfully');
114 |     } catch (modulesError) {
115 |       console.error('❌ Modules initialization failed:', modulesError.message);
116 |       throw modulesError;
117 |     }
118 |     
119 |     // Test health diagnostics
120 |     try {
121 |       const healthInfo = app.validateHealth();
122 |       if (!healthInfo || typeof healthInfo !== 'object') {
123 |         throw new Error('Health diagnostics failed - invalid response');
124 |       }
125 |       console.log(`✅ Health diagnostics validated: ${Object.keys(healthInfo).length} metrics`);
126 |     } catch (healthError) {
127 |       console.error('❌ Health diagnostics failed:', healthError.message);
128 |       throw healthError;
129 |     }
130 |     
131 |     // Test graceful shutdown
132 |     try {
133 |       await app.shutdown();
134 |       console.log('✅ Application shutdown completed successfully');
135 |     } catch (shutdownError) {
136 |       console.error('❌ Application shutdown failed:', shutdownError.message);
137 |       throw shutdownError;
138 |     }
139 |     
140 |     console.log('🎉 Runtime system integration tests completed successfully');
141 |     process.exit(0);
142 |     
143 |   } catch (error) {
144 |     console.error('❌ Runtime system integration tests failed:', error.message);
145 |     console.error('Stack trace:', error.stack);
146 |     
147 |     // Additional debugging information
148 |     console.error('\n🔍 Debugging information:');
149 |     console.error(`Current working directory: ${process.cwd()}`);
150 |     console.error(`Script location: ${__dirname}`);
151 |     console.error(`Platform: ${process.platform}`);
152 |     console.error(`Node.js version: ${process.version}`);
153 |     
154 |     process.exit(1);
155 |   }
156 | }
157 | 
158 | runtimeIntegrationTests();
```

--------------------------------------------------------------------------------
/server/prompts/analysis/note_integration.md:
--------------------------------------------------------------------------------

```markdown
  1 | # Note Integration
  2 | 
  3 | ## Description
  4 | Integrate new information from a markdown page into existing notes, merging them smoothly while maintaining a logical structure and avoiding duplication.
  5 | 
  6 | ## System Message
  7 | You are an expert content organizer specializing in knowledge integration. Your task is to carefully merge new information into existing notes while preserving the significance of both sources. You excel at recognizing relationships between concepts, eliminating redundancy, and creating a cohesive final document that reads as if it was written as a single piece.
  8 | 
  9 | ## User Message Template
 10 | I need to integrate new information from a markdown page into my existing notes. Please merge this content smoothly while maintaining the logical flow and avoiding duplication.
 11 | 
 12 | Here are my existing notes:
 13 | <existing_notes>
 14 | {{notes}}
 15 | </existing_notes>
 16 | 
 17 | Here is the new information to be integrated:
 18 | <new_information>
 19 | {{new_information}}
 20 | </new_information>
 21 | 
 22 | Please follow these steps to integrate the information:
 23 | 
 24 | 1. **Review the Existing Notes**
 25 |    - Understand the current organization, themes, and specific details (e.g., materials, colors, techniques) so that you can determine where new information can be integrated without redundancy.
 26 | 
 27 | 2. **Analyze the New Information**
 28 |    - Extract key topics, relevant points, and distinct details such as materials, processes, references, or techniques from the provided markdown page. Identify their value to the existing content.
 29 | 
 30 | 3. **Plan the Integration**
 31 |    - Decide where each new element fits best in relation to the existing information.
 32 |    - Maintain detail richness from both the existing and newly introduced data, striving for logical inclusion rather than simple addition.
 33 | 
 34 | 4. **Execute Integration and Edit for Continuity**
 35 |    - Insert new sections, bullet points, or merge content where related concepts already exist.
 36 |    - When new content introduces an entirely different subject, create distinct sections to accommodate these topics.
 37 |    - Maintain a logical, consistent flow throughout: Avoid redundancy, combine related sections, and add transitional language if required.
 38 | 
 39 | 5. **Revise and Suggest**
 40 |    - If specific elements would be better as a completely new document or section, suggest that restructuring explicitly.
 41 |   
 42 | 6. **Final Review**
 43 |    - Ensure that all sections flow smoothly with consistent formatting.
 44 |    - Adhere strictly to markdown conventions, using appropriate headers, links, bullet points, etc., to format the integrated notes clearly.
 45 | 
 46 | Present your integrated notes within <integrated_notes> tags. The notes should read as a cohesive whole, as if they were written as a single document from the beginning.
 47 | 
 48 | # Output Format
 49 | 
 50 | Your integrated notes should be presented in a well-structured markdown format with:
 51 | 
 52 | - Clear hierarchical organization using headings and subheadings
 53 | - Appropriate use of bullet points, numbered lists, and other markdown elements
 54 | - Consistent formatting throughout the document
 55 | - Smooth transitions between existing and new content
 56 | - No redundant or duplicated information
 57 | 
 58 | After the integrated notes, please include a brief summary of what was added and how it was integrated within <integration_summary> tags.
 59 | 
 60 | # Example
 61 | 
 62 | **Existing Notes Sample:**
 63 | ```
 64 | ## Materials Overview
 65 | We currently use a variety of materials: 
 66 | - **Wood**: Mainly oak and pine.
 67 | - **Metal**: Mild steel is typically chosen due to its versatility.
 68 | - **Textiles**: Woven fibers, largely cotton.
 69 | 
 70 | ### Techniques
 71 | - Sanding and polishing wood to achieve a smooth finish.
 72 | - Rust prevention using a metal primer.
 73 | 
 74 | ## Updated Processes
 75 | Our processes have recently included:
 76 | - Extended curing time for painted surfaces.
 77 | ```
 78 | 
 79 | **New Information Sample:**
 80 | ```
 81 | ## Materials and Techniques
 82 | We have introduced new textile materials, such as linen, and different coating options for metals, including galvanized coating for additional rust protection.
 83 | 
 84 | An addition to our techniques includes a water-based polishing option for metals to avoid chemical polishing.
 85 | ```
 86 | 
 87 | **Integrated Notes Sample:**
 88 | <integrated_notes>
 89 | ## Materials Overview
 90 | We currently use a variety of materials: 
 91 | - **Wood**: Mainly oak and pine.
 92 | - **Metal**: Mild steel is typically chosen due to its versatility. We have also introduced **galvanized coating** for added rust protection.
 93 | - **Textiles**: Woven fibers, largely cotton, with the addition of linen.
 94 | 
 95 | ### Techniques
 96 | - Sanding and polishing wood to achieve a smooth finish.
 97 | - Rust prevention using a metal primer and galvanized coating.
 98 | - We have introduced a **water-based polishing option for metals**, avoiding chemical-based alternatives.
 99 | 
100 | ## Updated Processes
101 | Our processes have recently included:
102 | - Extended curing time for painted surfaces.
103 | </integrated_notes>
104 | 
105 | <integration_summary>
106 | The integration added new materials (linen for textiles and galvanized coating for metals) to the existing Materials Overview section. A new technique (water-based polishing for metals) was added to the Techniques section. The information was merged within existing categories rather than creating new sections since the content was closely related.
107 | </integration_summary>
108 | 
109 | # Notes
110 | 
111 | - Ensure attention to preserving specific technical information (like types of materials or processes)
112 | - Avoid overlap by merging any redundant sections
113 | - Maintain fluid progression between old and new information to present the finalized notes as a unified whole
114 | - When appropriate, use formatting (bold, italics, etc.) to highlight newly added information
115 | - If new information contradicts existing notes, indicate this clearly and provide both perspectives 
```

--------------------------------------------------------------------------------
/local-test.sh:
--------------------------------------------------------------------------------

```bash
  1 | #!/bin/bash
  2 | 
  3 | # Local GitHub Actions Testing Script
  4 | # Provides convenient commands for testing workflows locally with Act
  5 | 
  6 | set -e
  7 | 
  8 | # Colors for output
  9 | RED='\033[0;31m'
 10 | GREEN='\033[0;32m'
 11 | YELLOW='\033[1;33m'
 12 | BLUE='\033[0;34m'
 13 | NC='\033[0m' # No Color
 14 | 
 15 | # Act binary path
 16 | ACT_BIN="$HOME/.local/bin/act"
 17 | 
 18 | # Function to print colored output
 19 | print_status() {
 20 |     echo -e "${BLUE}[INFO]${NC} $1"
 21 | }
 22 | 
 23 | print_success() {
 24 |     echo -e "${GREEN}[SUCCESS]${NC} $1"
 25 | }
 26 | 
 27 | print_warning() {
 28 |     echo -e "${YELLOW}[WARNING]${NC} $1"
 29 | }
 30 | 
 31 | print_error() {
 32 |     echo -e "${RED}[ERROR]${NC} $1"
 33 | }
 34 | 
 35 | # Function to check prerequisites
 36 | check_prerequisites() {
 37 |     print_status "Checking prerequisites..."
 38 |     
 39 |     # Check if Act is installed
 40 |     if ! command -v "$ACT_BIN" &> /dev/null; then
 41 |         print_error "Act is not installed. Please run the installation script first."
 42 |         exit 1
 43 |     fi
 44 |     
 45 |     print_success "Act is installed: $($ACT_BIN --version)"
 46 |     
 47 |     # Check if Docker is available
 48 |     if ! command -v docker &> /dev/null; then
 49 |         print_warning "Docker is not available. Please enable Docker Desktop WSL integration."
 50 |         print_warning "See setup-local-testing.md for instructions."
 51 |         return 1
 52 |     fi
 53 |     
 54 |     # Test Docker connectivity
 55 |     if ! docker version &> /dev/null; then
 56 |         print_warning "Docker daemon is not running or not accessible."
 57 |         print_warning "Please start Docker Desktop and enable WSL integration."
 58 |         return 1
 59 |     fi
 60 |     
 61 |     print_success "Docker is available and running"
 62 |     return 0
 63 | }
 64 | 
 65 | # Function to list workflows
 66 | list_workflows() {
 67 |     print_status "Available workflows:"
 68 |     "$ACT_BIN" --list
 69 | }
 70 | 
 71 | # Function to run dry run
 72 | dry_run() {
 73 |     local job_name="$1"
 74 |     print_status "Running dry run for job: $job_name"
 75 |     "$ACT_BIN" --dryrun -j "$job_name"
 76 | }
 77 | 
 78 | # Function to run workflow locally
 79 | run_workflow() {
 80 |     local job_name="$1"
 81 |     local event="${2:-push}"
 82 |     
 83 |     print_status "Running workflow locally..."
 84 |     print_status "Job: $job_name"
 85 |     print_status "Event: $event"
 86 |     
 87 |     if check_prerequisites; then
 88 |         "$ACT_BIN" -j "$job_name" --verbose
 89 |     else
 90 |         print_warning "Running in dry-run mode due to Docker issues"
 91 |         "$ACT_BIN" --dryrun -j "$job_name"
 92 |     fi
 93 | }
 94 | 
 95 | # Function to test specific workflow with event
 96 | test_workflow() {
 97 |     local workflow_file="$1"
 98 |     local event="${2:-push}"
 99 |     
100 |     print_status "Testing workflow file: $workflow_file"
101 |     print_status "Event: $event"
102 |     
103 |     if check_prerequisites; then
104 |         "$ACT_BIN" -W ".github/workflows/$workflow_file" "$event" --verbose
105 |     else
106 |         print_warning "Running in dry-run mode due to Docker issues"
107 |         "$ACT_BIN" --dryrun -W ".github/workflows/$workflow_file" "$event"
108 |     fi
109 | }
110 | 
111 | # Function to run quick validation
112 | quick_validation() {
113 |     print_status "Running quick validation tests..."
114 |     
115 |     # List of quick jobs to test
116 |     local quick_jobs=("code-quality" "validate")
117 |     
118 |     for job in "${quick_jobs[@]}"; do
119 |         print_status "Testing $job..."
120 |         if check_prerequisites; then
121 |             "$ACT_BIN" -j "$job" --verbose
122 |         else
123 |             print_warning "Running $job in dry-run mode"
124 |             "$ACT_BIN" --dryrun -j "$job"
125 |         fi
126 |     done
127 | }
128 | 
129 | # Function to show help
130 | show_help() {
131 |     cat << EOF
132 | Local GitHub Actions Testing Script
133 | 
134 | Usage: $0 [COMMAND] [OPTIONS]
135 | 
136 | Commands:
137 |   list                     List all available workflows
138 |   dry-run <job>           Run dry-run for specific job
139 |   run <job> [event]       Run workflow locally (default event: push)
140 |   test <workflow> [event] Test specific workflow file
141 |   quick                   Run quick validation tests
142 |   check                   Check prerequisites
143 |   help                    Show this help message
144 | 
145 | Examples:
146 |   $0 list                          # List all workflows
147 |   $0 dry-run code-quality         # Dry run code quality checks
148 |   $0 run code-quality             # Run code quality checks locally
149 |   $0 run validate push            # Run validation with push event
150 |   $0 test ci.yml pull_request     # Test CI workflow with PR event
151 |   $0 quick                        # Run quick validation tests
152 |   $0 check                        # Check prerequisites
153 | 
154 | Common Jobs:
155 |   - code-quality: Code Quality Checks
156 |   - validate: Validate Build and Tests
157 |   - enhanced-test-validation: Enhanced Test Suite
158 |   - mcp-protocol-validation: MCP Protocol Compliance
159 |   - cageerf-framework-validation: CAGEERF Framework Validation
160 |   - performance-baseline: Performance Monitoring
161 | 
162 | For more information, see setup-local-testing.md
163 | EOF
164 | }
165 | 
166 | # Main script logic
167 | case "$1" in
168 |     list)
169 |         list_workflows
170 |         ;;
171 |     dry-run)
172 |         if [ -z "$2" ]; then
173 |             print_error "Job name is required for dry-run"
174 |             echo "Usage: $0 dry-run <job_name>"
175 |             exit 1
176 |         fi
177 |         dry_run "$2"
178 |         ;;
179 |     run)
180 |         if [ -z "$2" ]; then
181 |             print_error "Job name is required for run"
182 |             echo "Usage: $0 run <job_name> [event]"
183 |             exit 1
184 |         fi
185 |         run_workflow "$2" "$3"
186 |         ;;
187 |     test)
188 |         if [ -z "$2" ]; then
189 |             print_error "Workflow file is required for test"
190 |             echo "Usage: $0 test <workflow_file> [event]"
191 |             exit 1
192 |         fi
193 |         test_workflow "$2" "$3"
194 |         ;;
195 |     quick)
196 |         quick_validation
197 |         ;;
198 |     check)
199 |         check_prerequisites
200 |         ;;
201 |     help|--help|-h)
202 |         show_help
203 |         ;;
204 |     "")
205 |         print_error "No command specified"
206 |         show_help
207 |         exit 1
208 |         ;;
209 |     *)
210 |         print_error "Unknown command: $1"
211 |         show_help
212 |         exit 1
213 |         ;;
214 | esac
```

--------------------------------------------------------------------------------
/.github/workflows/ci.yml:
--------------------------------------------------------------------------------

```yaml
  1 | name: CI/CD Pipeline
  2 | 
  3 | on:
  4 |   push:
  5 |     branches: [main, develop]
  6 |   pull_request:
  7 |     branches: [main]
  8 | 
  9 | env:
 10 |   NODE_ENV: test
 11 | 
 12 | jobs:
 13 |   validate:
 14 |     name: Build and Core Validation
 15 |     runs-on: ${{ matrix.os }}
 16 |     strategy:
 17 |       matrix:
 18 |         os: [ubuntu-latest]
 19 |         node-version: [18]
 20 |       fail-fast: false
 21 |     
 22 |     defaults:
 23 |       run:
 24 |         shell: bash
 25 |     
 26 |     steps:
 27 |       - name: Detect Platform Environment
 28 |         id: platform
 29 |         shell: bash
 30 |         run: |
 31 |           if [ "$ACT" = "true" ]; then
 32 |             echo "environment=act" >> $GITHUB_OUTPUT
 33 |             echo "🎭 Running in ACT (local testing environment)"
 34 |           else
 35 |             echo "environment=github" >> $GITHUB_OUTPUT
 36 |             echo "🚀 Running in GitHub Actions"  
 37 |           fi
 38 |           echo "os=${{ matrix.os }}" >> $GITHUB_OUTPUT
 39 |       
 40 |       
 41 |       - name: Checkout repository
 42 |         if: success() || failure()
 43 |         uses: actions/checkout@v4
 44 |         with:
 45 |           fetch-depth: 0
 46 |       
 47 |       - name: Setup Node.js ${{ matrix.node-version }}
 48 |         if: success()
 49 |         uses: actions/setup-node@v4
 50 |         with:
 51 |           node-version: ${{ matrix.node-version }}
 52 |           cache: 'npm'
 53 |           cache-dependency-path: server/package-lock.json
 54 |       
 55 |       - name: Install dependencies
 56 |         if: success()
 57 |         working-directory: server
 58 |         run: npm ci --prefer-offline --no-audit
 59 |       
 60 |       - name: TypeScript type checking
 61 |         if: success()
 62 |         working-directory: server  
 63 |         run: npm run typecheck
 64 |       
 65 |       - name: Build project
 66 |         if: success()
 67 |         working-directory: server
 68 |         run: npm run build
 69 |       
 70 |       - name: Run server integration tests
 71 |         if: success()
 72 |         working-directory: server
 73 |         run: npm run test:integration
 74 |         timeout-minutes: 3
 75 |       
 76 |       - name: Validate MCP server startup (Cross-Platform)
 77 |         if: success()
 78 |         working-directory: server
 79 |         env:
 80 |           MCP_SERVER_ROOT: ${{ github.workspace }}/server
 81 |         run: npm run test:ci-startup
 82 |         timeout-minutes: 3
 83 |       
 84 |       - name: Upload build artifacts
 85 |         uses: actions/upload-artifact@v4
 86 |         with:
 87 |           name: build-artifacts
 88 |           path: |
 89 |             server/dist/
 90 |             server/package.json
 91 |             server/package-lock.json
 92 |           retention-days: 7
 93 | 
 94 |   enhanced-tests:
 95 |     name: Enhanced Functionality Tests
 96 |     runs-on: ubuntu-latest
 97 |     needs: validate
 98 | 
 99 |     steps:
100 |       - name: Checkout repository
101 |         uses: actions/checkout@v4
102 | 
103 |       - name: Download build artifacts
104 |         uses: actions/download-artifact@v4
105 |         with:
106 |           name: build-artifacts
107 |           path: server/
108 | 
109 |       - name: Setup Node.js
110 |         uses: actions/setup-node@v4
111 |         with:
112 |           node-version: '18'
113 |           cache: 'npm'
114 |           cache-dependency-path: server/package-lock.json
115 | 
116 |       - name: Install dependencies
117 |         working-directory: server
118 |         run: npm ci --prefer-offline --no-audit
119 |       
120 |       - name: Run all enhanced tests
121 |         working-directory: server
122 |         run: npm run test:all-enhanced
123 |         timeout-minutes: 10
124 | 
125 |   release-platform-test:
126 |     name: Cross-Platform Release Validation
127 |     runs-on: ${{ matrix.os }}
128 |     if: github.ref == 'refs/heads/main' || startsWith(github.ref, 'refs/tags/')
129 |     strategy:
130 |       matrix:
131 |         os: [windows-latest, macos-latest]
132 |         node-version: [18]
133 | 
134 |     steps:
135 |       - name: Checkout repository
136 |         uses: actions/checkout@v4
137 | 
138 |       - name: Setup Node.js ${{ matrix.node-version }}
139 |         uses: actions/setup-node@v4
140 |         with:
141 |           node-version: ${{ matrix.node-version }}
142 |           cache: 'npm'
143 |           cache-dependency-path: server/package-lock.json
144 | 
145 |       - name: Install dependencies
146 |         working-directory: server
147 |         run: npm ci --prefer-offline --no-audit
148 | 
149 |       - name: Build and basic validation
150 |         working-directory: server
151 |         run: |
152 |           npm run build
153 |           npm run test:ci-startup
154 |         timeout-minutes: 5
155 | 
156 |   code-quality:
157 |     name: Code Quality Checks
158 |     runs-on: ubuntu-latest
159 |     needs: validate
160 | 
161 |     steps:
162 |       - name: Checkout repository
163 |         uses: actions/checkout@v4
164 | 
165 |       - name: Download build artifacts
166 |         uses: actions/download-artifact@v4
167 |         with:
168 |           name: build-artifacts
169 |           path: server/
170 | 
171 |       - name: Setup Node.js
172 |         uses: actions/setup-node@v4
173 |         with:
174 |           node-version: '18'
175 |           cache: 'npm'
176 |           cache-dependency-path: server/package-lock.json
177 | 
178 |       - name: Install dependencies
179 |         working-directory: server
180 |         run: npm ci --prefer-offline --no-audit
181 |       
182 |       - name: Check for sensitive files
183 |         run: |
184 |           if find . -name "*.env*" -o -name "*.key" -o -name "*.pem" -o -name "*.p12" | grep -v node_modules | grep -q .; then
185 |             echo "❌ Sensitive files found in repository"
186 |             find . -name "*.env*" -o -name "*.key" -o -name "*.pem" -o -name "*.p12" | grep -v node_modules
187 |             exit 1
188 |           else
189 |             echo "✅ No sensitive files found"
190 |           fi
191 |       
192 |       - name: Validate source file consistency
193 |         working-directory: server
194 |         run: |
195 |           # TypeScript projects should not have JS files in src/
196 |           if find src -name "*.js" | grep -q .; then
197 |             echo "❌ JavaScript files found in TypeScript source directory"
198 |             find src -name "*.js"
199 |             exit 1
200 |           else
201 |             echo "✅ Source directory contains only TypeScript files"
202 |           fi
203 |       
204 |       
205 |       - name: Validate build functionality
206 |         working-directory: server
207 |         run: |
208 |           # Test that the built server can actually start
209 |           npm run test:ci-startup
```

--------------------------------------------------------------------------------
/server/prompts/content_processing/obsidian_metadata_optimizer.md:
--------------------------------------------------------------------------------

```markdown
  1 | # Obsidian Metadata Optimizer
  2 | 
  3 | ## Description
  4 | Creates comprehensive, intelligent metadata and frontmatter for Obsidian notes, optimizing for discoverability, organization, and advanced plugin functionality
  5 | 
  6 | ## User Message Template
  7 | [System Info: You are an Obsidian metadata specialist who creates intelligent, comprehensive frontmatter and metadata systems. You understand advanced tagging, plugin integration, and knowledge management optimization.]
  8 | 
  9 | **OBSIDIAN METADATA OPTIMIZATION**
 10 | 
 11 | Create comprehensive metadata for the following note content:
 12 | 
 13 | **NOTE CONTENT:**
 14 | ```
 15 | {{ note_content }}
 16 | ```
 17 | 
 18 | {% if vault_structure %}
 19 | **VAULT STRUCTURE:**
 20 | ```
 21 | {{ vault_structure }}
 22 | ```
 23 | {% endif %}
 24 | 
 25 | **METADATA DEPTH**: {{ metadata_depth | default('comprehensive') }}
 26 | 
 27 | **METADATA OPTIMIZATION PRINCIPLES:**
 28 | - Create intelligent, searchable metadata structures
 29 | - Optimize for plugin ecosystem functionality
 30 | - Build comprehensive connection networks
 31 | - Support advanced knowledge management workflows
 32 | - Enable efficient organization and discovery
 33 | 
 34 | **COMPREHENSIVE METADATA FRAMEWORK:**
 35 | 
 36 | ## 1. CORE FRONTMATTER STRUCTURE
 37 | Design essential metadata for note identification and organization:
 38 | 
 39 | ```yaml
 40 | ---
 41 | # Core Identification
 42 | title: "Descriptive Title"
 43 | aliases: [Alternative Names, Abbreviations, Synonyms]
 44 | tags: [hierarchical/tag/system, domain/specific, skill/level]
 45 | 
 46 | # Temporal Information
 47 | created: YYYY-MM-DD
 48 | modified: YYYY-MM-DD
 49 | reviewed: YYYY-MM-DD
 50 | review-date: YYYY-MM-DD
 51 | 
 52 | # Source Information
 53 | author: "Content Creator"
 54 | source: "Original URL or Reference"
 55 | source-type: "video/article/book/course"
 56 | duration: "Content length if applicable"
 57 | 
 58 | # Content Classification
 59 | type: "tutorial/reference/methodology/analysis"
 60 | domain: "knowledge-domain"
 61 | skill-level: "beginner/intermediate/advanced"
 62 | difficulty: "1-10 scale"
 63 | status: "active/review/archived/draft"
 64 | 
 65 | # Knowledge Network
 66 | prerequisites: [[Required Knowledge]]
 67 | builds-on: [[Foundation Concepts]]
 68 | related: [[Connected Topics]]
 69 | part-of: [[Parent Topic or Series]]
 70 | leads-to: [[Next Steps or Advanced Topics]]
 71 | 
 72 | # Learning Integration
 73 | learning-path: "progression-pathway"
 74 | learning-stage: "foundation/development/mastery"
 75 | practice-required: true/false
 76 | review-frequency: "weekly/monthly/quarterly"
 77 | 
 78 | # Vault Organization
 79 | moc: [[Map of Content]]
 80 | folder: "vault-folder-location"
 81 | connections: 5 # Number of vault connections
 82 | ---
 83 | ```
 84 | 
 85 | ## 2. INTELLIGENT TAGGING SYSTEM
 86 | Create hierarchical, searchable tag structures:
 87 | 
 88 | **Domain Tags**: `#art/perspective`, `#programming/python`, `#business/strategy`
 89 | **Skill Level Tags**: `#skill/beginner`, `#skill/intermediate`, `#skill/advanced`
 90 | **Content Type Tags**: `#type/tutorial`, `#type/reference`, `#type/methodology`
 91 | **Status Tags**: `#status/active`, `#status/review`, `#status/mastered`
 92 | **Feature Tags**: `#has-examples`, `#has-exercises`, `#has-templates`
 93 | **Connection Tags**: `#builds-on`, `#prerequisite-for`, `#related-to`
 94 | 
 95 | ## 3. PLUGIN ECOSYSTEM INTEGRATION
 96 | Optimize metadata for popular Obsidian plugins:
 97 | 
 98 | **Dataview Integration:**
 99 | ```yaml
100 | # Dataview-friendly fields
101 | practice-time: 30 # minutes
102 | completion-rate: 0.8 # 0-1 scale
103 | last-practiced: YYYY-MM-DD
104 | next-review: YYYY-MM-DD
105 | importance: high/medium/low
106 | ```
107 | 
108 | **Templater Integration:**
109 | ```yaml
110 | # Template variables
111 | template-used: "note-template-name"
112 | auto-generated: true/false
113 | template-version: "1.0"
114 | ```
115 | 
116 | **Spaced Repetition:**
117 | ```yaml
118 | # Learning optimization
119 | retention-rate: 0.9
120 | review-count: 3
121 | mastery-level: 0.7
122 | ```
123 | 
124 | **Tasks Plugin:**
125 | ```yaml
126 | # Task management
127 | has-tasks: true
128 | task-completion: 0.6
129 | ```
130 | 
131 | ## 4. KNOWLEDGE MANAGEMENT METADATA
132 | Support advanced knowledge organization:
133 | 
134 | **MOC Integration:**
135 | ```yaml
136 | # Map of Content connections
137 | parent-moc: [[Primary MOC]]
138 | child-mocs: [[Subtopic MOCs]]
139 | cross-domain-links: [[Related Field MOCs]]
140 | ```
141 | 
142 | **Learning Pathways:**
143 | ```yaml
144 | # Progressive learning
145 | pathway: "skill-development-path"
146 | sequence: 3 # Position in learning sequence
147 | prerequisites-met: true/false
148 | ready-for-next: true/false
149 | ```
150 | 
151 | **Network Analysis:**
152 | ```yaml
153 | # Connection metrics
154 | in-degree: 8 # Number of incoming links
155 | out-degree: 12 # Number of outgoing links
156 | centrality: 0.7 # Network importance
157 | cluster: "knowledge-cluster-name"
158 | ```
159 | 
160 | ## 5. SEARCH AND DISCOVERY OPTIMIZATION
161 | Enhance findability and discoverability:
162 | 
163 | **Search Keywords:**
164 | ```yaml
165 | # Enhanced search terms
166 | keywords: [search, terms, for, discovery]
167 | concepts: [main, conceptual, themes]
168 | synonyms: [alternative, terminology]
169 | ```
170 | 
171 | **Content Indicators:**
172 | ```yaml
173 | # Content characteristics
174 | has-images: true
175 | has-code: false
176 | has-formulas: true
177 | has-references: true
178 | word-count: 2500
179 | read-time: 10 # minutes
180 | ```
181 | 
182 | ## 6. WORKFLOW INTEGRATION
183 | Support daily and review workflows:
184 | 
185 | **Daily Note Integration:**
186 | ```yaml
187 | # Daily workflow
188 | daily-note-mention: true
189 | agenda-item: false
190 | quick-capture: false
191 | ```
192 | 
193 | **Review System:**
194 | ```yaml
195 | # Review optimization
196 | review-type: "spaced-repetition"
197 | review-priority: high/medium/low
198 | review-notes: "specific areas needing attention"
199 | ```
200 | 
201 | **PROJECT INTEGRATION:**
202 | ```yaml
203 | # Project connections
204 | project: [[Related Project]]
205 | milestone: "project-milestone"
206 | deliverable: true/false
207 | ```
208 | 
209 | ## 7. ADVANCED METADATA FEATURES
210 | Utilize cutting-edge Obsidian capabilities:
211 | 
212 | **Canvas Integration:**
213 | ```yaml
214 | # Visual organization
215 | canvas-included: true
216 | canvas-name: "knowledge-map-canvas"
217 | visual-position: "center/periphery"
218 | ```
219 | 
220 | **Community Plugin Support:**
221 | ```yaml
222 | # Extended functionality
223 | excalidraw-diagrams: true
224 | advanced-tables: false
225 | mind-map-included: true
226 | ```
227 | 
228 | **METADATA OUTPUT REQUIREMENTS:**
229 | - Comprehensive frontmatter with intelligent field selection
230 | - Hierarchical tag system optimized for discovery
231 | - Plugin ecosystem integration for enhanced functionality
232 | - Knowledge management structure support
233 | - Advanced search and organization capabilities
234 | 
235 | Generate optimized metadata that leverages Obsidian's full potential while maintaining clean organization and maximum discoverability.
236 | 
```

--------------------------------------------------------------------------------
/server/tests/ci-startup-validation.js:
--------------------------------------------------------------------------------

```javascript
  1 | #!/usr/bin/env node
  2 | /**
  3 |  * Cross-Platform CI Startup Validation
  4 |  * Industry-standard Node.js test script for CI/CD pipelines
  5 |  * 
  6 |  * This replaces all shell-specific validation logic with programmatic testing
  7 |  * that works identically across Windows, macOS, and Linux.
  8 |  */
  9 | 
 10 | import { fileURLToPath } from 'url';
 11 | import path from 'path';
 12 | 
 13 | const __filename = fileURLToPath(import.meta.url);
 14 | const __dirname = path.dirname(__filename);
 15 | 
 16 | /**
 17 |  * CI-specific console logging that respects CI environment expectations
 18 |  */
 19 | const ci = {
 20 |   info: (message) => console.log(`[INFO] ${message}`),
 21 |   success: (message) => console.log(`[SUCCESS] ${message}`),
 22 |   error: (message) => console.error(`[ERROR] ${message}`),
 23 |   debug: (message) => {
 24 |     // Only show debug in verbose mode
 25 |     if (process.env.VERBOSE) {
 26 |       console.log(`[DEBUG] ${message}`);
 27 |     }
 28 |   }
 29 | };
 30 | 
 31 | /**
 32 |  * Validate that build artifacts exist and are correct
 33 |  */
 34 | async function validateBuildArtifacts() {
 35 |   ci.info('Validating build artifacts...');
 36 |   
 37 |   const fs = await import('fs');
 38 |   const distPath = path.join(__dirname, '../dist');
 39 |   
 40 |   // Check dist directory exists
 41 |   if (!fs.existsSync(distPath)) {
 42 |     throw new Error('Build directory not found: dist/');
 43 |   }
 44 |   
 45 |   // Check main entry point exists
 46 |   const mainEntryPoint = path.join(distPath, 'index.js');
 47 |   if (!fs.existsSync(mainEntryPoint)) {
 48 |     throw new Error('Main entry point not found: dist/index.js');
 49 |   }
 50 |   
 51 |   // Check key modules exist
 52 |   const requiredModules = [
 53 |     'runtime/application.js',
 54 |     'runtime/startup.js',
 55 |     'utils/index.js',
 56 |     'utils/global-resource-tracker.js',
 57 |     'config/index.js',
 58 |     'logging/index.js',
 59 |     'mcp-tools/prompt-engine/index.js',
 60 |     'mcp-tools/prompt-manager/index.js',
 61 |     'mcp-tools/system-control.js'
 62 |   ];
 63 |   
 64 |   for (const module of requiredModules) {
 65 |     const modulePath = path.join(distPath, module);
 66 |     if (!fs.existsSync(modulePath)) {
 67 |       throw new Error(`Required module not found: dist/${module}`);
 68 |     }
 69 |   }
 70 |   
 71 |   ci.success('Build artifacts validation passed');
 72 | }
 73 | 
 74 | /**
 75 |  * Validate server startup using direct module imports
 76 |  * This is much more reliable than parsing shell output
 77 |  */
 78 | async function validateServerStartup() {
 79 |   ci.info('Validating server startup...');
 80 |   
 81 |   try {
 82 |     // Set CI environment to get clean output
 83 |     process.env.CI = 'true';
 84 |     process.env.NODE_ENV = 'test';
 85 |     
 86 |     // Import the runtime application directly
 87 |     const { Application } = await import('../dist/runtime/application.js');
 88 |     const { MockLogger } = await import('../dist/utils/index.js');
 89 |     
 90 |     ci.debug('Creating application instance...');
 91 |     const logger = new MockLogger();
 92 |     const app = new Application(logger);
 93 |     
 94 |     // Test application configuration loading
 95 |     ci.debug('Loading configuration...');
 96 |     await app.loadConfiguration();
 97 |     
 98 |     ci.debug('Configuration loaded successfully');
 99 |     
100 |     // Test prompts data loading
101 |     ci.debug('Loading prompts data...');
102 |     await app.loadPromptsData();
103 |     
104 |     ci.debug('Prompts data loaded successfully');
105 |     
106 |     // Test modules initialization
107 |     ci.debug('Initializing modules...');
108 |     await app.initializeModules();
109 |     
110 |     ci.debug('Modules initialized successfully');
111 |     
112 |     // Test health validation
113 |     ci.debug('Validating health...');
114 |     const healthInfo = app.validateHealth();
115 |     
116 |     if (!healthInfo || typeof healthInfo !== 'object') {
117 |       throw new Error('Health validation failed - invalid health info');
118 |     }
119 |     
120 |     ci.debug(`Health info collected: ${Object.keys(healthInfo).length} metrics`);
121 |     
122 |     // Clean shutdown
123 |     ci.debug('Shutting down application...');
124 |     await app.shutdown();
125 |     
126 |     ci.success('Server startup validation passed');
127 |     return {
128 |       configLoaded: true,
129 |       promptsLoaded: true,
130 |       modulesInitialized: true,
131 |       healthValidated: true,
132 |       shutdownClean: true
133 |     };
134 |     
135 |   } catch (error) {
136 |     throw new Error(`Server startup validation failed: ${error.message}`);
137 |   }
138 | }
139 | 
140 | /**
141 |  * Run comprehensive CI validation
142 |  */
143 | async function runCIValidation() {
144 |   const startTime = Date.now();
145 |   
146 |   try {
147 |     ci.info('Starting CI startup validation...');
148 |     ci.info(`Platform: ${process.platform}`);
149 |     ci.info(`Node.js: ${process.version}`);
150 |     ci.info(`Working directory: ${process.cwd()}`);
151 |     
152 |     // Phase 1: Build artifacts validation
153 |     await validateBuildArtifacts();
154 |     
155 |     // Phase 2: Server startup validation
156 |     const results = await validateServerStartup();
157 |     
158 |     const duration = Date.now() - startTime;
159 |     
160 |     ci.success('='.repeat(50));
161 |     ci.success('CI STARTUP VALIDATION PASSED');
162 |     ci.success('='.repeat(50));
163 |     ci.info(`Configuration loaded: ${results.configLoaded}`);
164 |     ci.info(`Prompts loaded: ${results.promptsLoaded}`);
165 |     ci.info(`Modules initialized: ${results.modulesInitialized}`);
166 |     ci.info(`Health validated: ${results.healthValidated}`);
167 |     ci.info(`Clean shutdown: ${results.shutdownClean}`);
168 |     ci.info(`Total duration: ${duration}ms`);
169 |     
170 |     // Clean exit for CI
171 |     process.exit(0);
172 |     
173 |   } catch (error) {
174 |     const duration = Date.now() - startTime;
175 |     
176 |     ci.error('='.repeat(50));
177 |     ci.error('CI STARTUP VALIDATION FAILED');
178 |     ci.error('='.repeat(50));
179 |     ci.error(`Error: ${error.message}`);
180 |     ci.error(`Duration: ${duration}ms`);
181 |     ci.error(`Platform: ${process.platform}`);
182 |     ci.error(`Node.js: ${process.version}`);
183 |     
184 |     // Show stack trace in debug mode
185 |     if (process.env.VERBOSE) {
186 |       ci.error('Stack trace:');
187 |       ci.error(error.stack);
188 |     }
189 |     
190 |     // Clean exit with error code for CI
191 |     process.exit(1);
192 |   }
193 | }
194 | 
195 | // Handle uncaught errors gracefully
196 | process.on('uncaughtException', (error) => {
197 |   ci.error(`Uncaught exception: ${error.message}`);
198 |   process.exit(1);
199 | });
200 | 
201 | process.on('unhandledRejection', (reason, promise) => {
202 |   ci.error(`Unhandled rejection: ${reason}`);
203 |   process.exit(1);
204 | });
205 | 
206 | // Run the validation
207 | runCIValidation();
```

--------------------------------------------------------------------------------
/server/tests/enhanced-validation/contract-validation/interface-contracts.ts:
--------------------------------------------------------------------------------

```typescript
  1 | /**
  2 |  * Interface Contract Validation System
  3 |  *
  4 |  * Validates that mock objects fully implement expected interfaces to prevent
  5 |  * runtime method missing errors like the registerTool issue.
  6 |  */
  7 | 
  8 | export interface ContractValidationResult {
  9 |   isValid: boolean;
 10 |   missingMethods: string[];
 11 |   incompatibleSignatures: Array<{
 12 |     method: string;
 13 |     expected: string;
 14 |     actual: string;
 15 |   }>;
 16 |   recommendations: string[];
 17 | }
 18 | 
 19 | export interface ContractValidationReport {
 20 |   mockObjectName: string;
 21 |   referenceInterface: string;
 22 |   validationResult: ContractValidationResult;
 23 |   timestamp: Date;
 24 |   validatedMethods: string[];
 25 | }
 26 | 
 27 | /**
 28 |  * MCP SDK Interface Contract Validator
 29 |  *
 30 |  * Prevents interface mismatches by validating mock objects against real SDK interfaces
 31 |  */
 32 | export class McpSdkInterfaceValidator {
 33 |   private logger: any;
 34 | 
 35 |   constructor(logger: any) {
 36 |     this.logger = logger;
 37 |   }
 38 | 
 39 |   /**
 40 |    * Validate tool registration interface compatibility
 41 |    * Specifically addresses the registerTool method mismatch issue
 42 |    */
 43 |   async validateToolRegistrationInterface(mockServer: any): Promise<ContractValidationResult> {
 44 |     const requiredMethods = [
 45 |       'tool',           // Existing method in MockMcpServer
 46 |       'registerTool'    // Missing method that caused CI failure
 47 |     ];
 48 | 
 49 |     const missingMethods: string[] = [];
 50 |     const incompatibleSignatures: Array<{method: string; expected: string; actual: string}> = [];
 51 | 
 52 |     // Check for missing methods
 53 |     for (const method of requiredMethods) {
 54 |       if (typeof mockServer[method] !== 'function') {
 55 |         missingMethods.push(method);
 56 |       }
 57 |     }
 58 | 
 59 |     // Validate method signatures if they exist
 60 |     if (typeof mockServer.tool === 'function') {
 61 |       const toolMethod = mockServer.tool;
 62 |       if (toolMethod.length < 3) {
 63 |         incompatibleSignatures.push({
 64 |           method: 'tool',
 65 |           expected: 'tool(name: string, description: string, schema: any)',
 66 |           actual: `tool with ${toolMethod.length} parameters`
 67 |         });
 68 |       }
 69 |     }
 70 | 
 71 |     if (typeof mockServer.registerTool === 'function') {
 72 |       const registerToolMethod = mockServer.registerTool;
 73 |       if (registerToolMethod.length < 3) {
 74 |         incompatibleSignatures.push({
 75 |           method: 'registerTool',
 76 |           expected: 'registerTool(name: string, config: any, handler: Function)',
 77 |           actual: `registerTool with ${registerToolMethod.length} parameters`
 78 |         });
 79 |       }
 80 |     }
 81 | 
 82 |     // Generate recommendations
 83 |     const recommendations: string[] = [];
 84 |     if (missingMethods.includes('registerTool')) {
 85 |       recommendations.push('Add registerTool method that delegates to existing tool method');
 86 |       recommendations.push('Ensure registerTool accepts (name, config, handler) parameters');
 87 |     }
 88 | 
 89 |     const isValid = missingMethods.length === 0 && incompatibleSignatures.length === 0;
 90 | 
 91 |     return {
 92 |       isValid,
 93 |       missingMethods,
 94 |       incompatibleSignatures,
 95 |       recommendations
 96 |     };
 97 |   }
 98 | 
 99 |   /**
100 |    * Validate transport layer interface compatibility
101 |    */
102 |   async validateTransportInterface(mockTransport: any): Promise<ContractValidationResult> {
103 |     const requiredMethods = ['sendMessage', 'onMessage', 'close'];
104 |     const missingMethods: string[] = [];
105 | 
106 |     for (const method of requiredMethods) {
107 |       if (typeof mockTransport[method] !== 'function') {
108 |         missingMethods.push(method);
109 |       }
110 |     }
111 | 
112 |     return {
113 |       isValid: missingMethods.length === 0,
114 |       missingMethods,
115 |       incompatibleSignatures: [],
116 |       recommendations: missingMethods.length > 0
117 |         ? [`Implement missing transport methods: ${missingMethods.join(', ')}`]
118 |         : []
119 |     };
120 |   }
121 | 
122 |   /**
123 |    * Comprehensive method signature validation
124 |    */
125 |   validateMethodSignatures(mockObject: any, expectedMethods: Record<string, number>): ContractValidationResult {
126 |     const missingMethods: string[] = [];
127 |     const incompatibleSignatures: Array<{method: string; expected: string; actual: string}> = [];
128 | 
129 |     for (const [methodName, expectedParamCount] of Object.entries(expectedMethods)) {
130 |       if (typeof mockObject[methodName] !== 'function') {
131 |         missingMethods.push(methodName);
132 |       } else {
133 |         const actualParamCount = mockObject[methodName].length;
134 |         if (actualParamCount !== expectedParamCount) {
135 |           incompatibleSignatures.push({
136 |             method: methodName,
137 |             expected: `${expectedParamCount} parameters`,
138 |             actual: `${actualParamCount} parameters`
139 |           });
140 |         }
141 |       }
142 |     }
143 | 
144 |     return {
145 |       isValid: missingMethods.length === 0 && incompatibleSignatures.length === 0,
146 |       missingMethods,
147 |       incompatibleSignatures,
148 |       recommendations: []
149 |     };
150 |   }
151 | 
152 |   /**
153 |    * Generate comprehensive validation report
154 |    */
155 |   async generateContractReport(mockServer: any, mockObjectName: string = 'MockMcpServer'): Promise<ContractValidationReport> {
156 |     this.logger.debug(`[CONTRACT VALIDATOR] Generating report for ${mockObjectName}`);
157 | 
158 |     const validationResult = await this.validateToolRegistrationInterface(mockServer);
159 |     const validatedMethods = ['tool', 'registerTool'].filter(method =>
160 |       typeof mockServer[method] === 'function'
161 |     );
162 | 
163 |     return {
164 |       mockObjectName,
165 |       referenceInterface: 'MCP SDK Server Interface',
166 |       validationResult,
167 |       timestamp: new Date(),
168 |       validatedMethods
169 |     };
170 |   }
171 | 
172 |   /**
173 |    * Quick validation check for CI/testing
174 |    */
175 |   async quickValidation(mockServer: any): Promise<boolean> {
176 |     const result = await this.validateToolRegistrationInterface(mockServer);
177 | 
178 |     if (!result.isValid) {
179 |       this.logger.error('[CONTRACT VALIDATOR] Interface validation failed:', {
180 |         missingMethods: result.missingMethods,
181 |         incompatibleSignatures: result.incompatibleSignatures,
182 |         recommendations: result.recommendations
183 |       });
184 |     }
185 | 
186 |     return result.isValid;
187 |   }
188 | }
189 | 
190 | /**
191 |  * Factory function for creating validator instance
192 |  */
193 | export function createMcpSdkInterfaceValidator(logger: any): McpSdkInterfaceValidator {
194 |   return new McpSdkInterfaceValidator(logger);
195 | }
```

--------------------------------------------------------------------------------
/server/prompts/content_processing/prompts.json:
--------------------------------------------------------------------------------

```json
  1 | {
  2 |   "prompts": [
  3 |     {
  4 |       "id": "obsidian_metadata_optimizer",
  5 |       "name": "Obsidian Metadata Optimizer",
  6 |       "category": "content_processing",
  7 |       "description": "Creates comprehensive, intelligent metadata and frontmatter for Obsidian notes, optimizing for discoverability, organization, and advanced plugin functionality",
  8 |       "file": "obsidian_metadata_optimizer.md",
  9 |       "arguments": [
 10 |         {
 11 |           "name": "note_content",
 12 |           "description": "The note content to analyze for metadata creation",
 13 |           "required": true
 14 |         },
 15 |         {
 16 |           "name": "vault_structure",
 17 |           "description": "Current vault structure and organization",
 18 |           "required": false
 19 |         },
 20 |         {
 21 |           "name": "metadata_depth",
 22 |           "description": "Metadata depth: 'essential', 'comprehensive', 'advanced', 'network_optimized'",
 23 |           "required": false
 24 |         }
 25 |       ]
 26 |     },
 27 |     {
 28 |       "id": "format_enhancement",
 29 |       "name": "Format Enhancement Workflow",
 30 |       "category": "content_processing",
 31 |       "description": "Transform existing basic markdown notes to advanced Obsidian formatting standards with professional presentation and interactive elements",
 32 |       "file": "format_enhancement.md",
 33 |       "arguments": [
 34 |         {
 35 |           "name": "existing_content",
 36 |           "description": "The existing note content to enhance with advanced formatting",
 37 |           "required": true
 38 |         },
 39 |         {
 40 |           "name": "domain",
 41 |           "description": "Knowledge domain for appropriate formatting strategy",
 42 |           "required": false
 43 |         },
 44 |         {
 45 |           "name": "enhancement_level",
 46 |           "description": "Enhancement intensity: standard, comprehensive, showcase (default: comprehensive)",
 47 |           "required": false
 48 |         }
 49 |       ]
 50 |     },
 51 |     {
 52 |       "id": "noteIntegration",
 53 |       "name": "Advanced Note Integration with Content Analysis Chain",
 54 |       "category": "content_processing",
 55 |       "description": "Advanced workflow that runs a comprehensive content analysis chain to transform raw content into publication-ready, interconnected notes optimized for Obsidian knowledge management systems. Uses intelligent defaults - only pass the content argument.",
 56 |       "file": "noteIntegration.md",
 57 |       "arguments": [
 58 |         {
 59 |           "name": "content",
 60 |           "description": "The raw content to be processed and integrated",
 61 |           "required": true
 62 |         },
 63 |         {
 64 |           "name": "existing_notes",
 65 |           "description": "Any existing notes to preserve and integrate with",
 66 |           "required": false
 67 |         },
 68 |         {
 69 |           "name": "vault_context",
 70 |           "description": "Context about the vault structure and existing content",
 71 |           "required": false
 72 |         },
 73 |         {
 74 |           "name": "domain",
 75 |           "description": "Knowledge domain for appropriate formatting strategy (e.g., 'creative_arts', 'technical', 'personal_development')",
 76 |           "required": false
 77 |         },
 78 |         {
 79 |           "name": "analysis_depth",
 80 |           "description": "Analysis depth: 'surface', 'standard', 'comprehensive', 'exhaustive' (default: comprehensive)",
 81 |           "required": false
 82 |         },
 83 |         {
 84 |           "name": "structure_type",
 85 |           "description": "Structure type: 'comprehensive', 'method_focused', 'reference_optimized' (default: comprehensive)",
 86 |           "required": false
 87 |         },
 88 |         {
 89 |           "name": "integration_level",
 90 |           "description": "Integration level: 'basic', 'standard', 'advanced', 'network_optimized' (default: advanced)",
 91 |           "required": false
 92 |         },
 93 |         {
 94 |           "name": "target_readability",
 95 |           "description": "Target readability: 'concise', 'balanced', 'comprehensive' (default: comprehensive)",
 96 |           "required": false
 97 |         },
 98 |         {
 99 |           "name": "metadata_depth",
100 |           "description": "Metadata depth: 'essential', 'comprehensive', 'advanced', 'network_optimized' (default: advanced)",
101 |           "required": false
102 |         },
103 |         {
104 |           "name": "quality_standards",
105 |           "description": "Quality standards: 'basic', 'professional', 'comprehensive', 'academic' (default: comprehensive)",
106 |           "required": false
107 |         },
108 |         {
109 |           "name": "enhancement_level",
110 |           "description": "Enhancement intensity: 'standard', 'comprehensive', 'showcase' (default: comprehensive)",
111 |           "required": false
112 |         }
113 |       ]
114 |     },
115 |     {
116 |       "id": "vault_related_notes_finder",
117 |       "name": "Vault Related Notes Finder",
118 |       "category": "content_processing",
119 |       "description": "Searches vault for actual related notes using content analysis and glob/grep patterns to find real cross-references",
120 |       "file": "vault_related_notes_finder.md",
121 |       "arguments": [
122 |         {
123 |           "name": "note_topic",
124 |           "description": "Main topic of the note",
125 |           "required": true
126 |         },
127 |         {
128 |           "name": "content_areas",
129 |           "description": "Key content areas covered in the note",
130 |           "required": true
131 |         },
132 |         {
133 |           "name": "vault_path",
134 |           "description": "Path to vault root directory",
135 |           "required": false
136 |         }
137 |       ]
138 |     },
139 |     {
140 |       "id": "video_notes_enhanced",
141 |       "name": "Enhanced Video Notes Chain",
142 |       "category": "content_processing",
143 |       "description": "Comprehensive video processing chain including content analysis, visual extraction, vault integration, and note creation with proper formatting",
144 |       "file": "video_notes_enhanced.md",
145 |       "arguments": [
146 |         {
147 |           "name": "video_url",
148 |           "description": "YouTube video URL",
149 |           "required": true
150 |         },
151 |         {
152 |           "name": "topic",
153 |           "description": "Main topic/subject",
154 |           "required": true
155 |         },
156 |         {
157 |           "name": "content_areas",
158 |           "description": "Key content areas covered",
159 |           "required": true
160 |         },
161 |         {
162 |           "name": "duration",
163 |           "description": "Video duration",
164 |           "required": false
165 |         }
166 |       ]
167 |     }
168 |   ]
169 | }
```

--------------------------------------------------------------------------------
/server/tests/scripts/performance-memory.js:
--------------------------------------------------------------------------------

```javascript
  1 | #!/usr/bin/env node
  2 | /**
  3 |  * Performance and Memory Tests - Updated for Consolidated Architecture
  4 |  * Tests current system performance instead of deprecated components
  5 |  */
  6 | 
  7 | async function performanceTests() {
  8 |   try {
  9 |     console.log('🧪 Running performance and memory tests for consolidated architecture...');
 10 | 
 11 |     // Test current system components instead of deprecated ones
 12 |     const { Application } = await import('../../dist/runtime/application.js');
 13 |     const { createSimpleLogger } = await import('../../dist/logging/index.js');
 14 | 
 15 |     console.log('⏱️  Starting performance tests...');
 16 | 
 17 |     const logger = createSimpleLogger();
 18 |     const application = new Application(logger);
 19 | 
 20 |     // Performance benchmarks for current system
 21 |     console.log('📊 System Startup Performance:');
 22 | 
 23 |     // Test startup performance
 24 |     const startupStart = Date.now();
 25 |     await application.loadConfiguration();
 26 |     const configDuration = Date.now() - startupStart;
 27 | 
 28 |     const promptsStart = Date.now();
 29 |     await application.loadPromptsData();
 30 |     const promptsDuration = Date.now() - promptsStart;
 31 | 
 32 |     const modulesStart = Date.now();
 33 |     await application.initializeModules();
 34 |     const modulesDuration = Date.now() - modulesStart;
 35 | 
 36 |     const totalStartup = configDuration + promptsDuration + modulesDuration;
 37 | 
 38 |     console.log(`   Config loading: ${configDuration}ms`);
 39 |     console.log(`   Prompts loading: ${promptsDuration}ms`);
 40 |     console.log(`   Modules initialization: ${modulesDuration}ms`);
 41 |     console.log(`   Total startup time: ${totalStartup}ms`);
 42 | 
 43 |     // Evidence-based performance baselines (measured from actual system)
 44 |     // These are based on p95 performance + 20% safety margin
 45 |     const PERFORMANCE_BASELINES = {
 46 |       startup: 3000,        // Evidence-based: actual p95 + margin
 47 |       config: 200,          // Evidence-based: config loading baseline
 48 |       prompts: 800,         // Evidence-based: prompts loading baseline
 49 |       modules: 1500,        // Evidence-based: modules initialization baseline
 50 |       routing: 1.0,         // Evidence-based: <1ms command routing detection
 51 |       memory: 150           // Evidence-based: 150MB RSS memory baseline
 52 |     };
 53 | 
 54 |     console.log('\n🎯 Performance Baseline Validation:');
 55 | 
 56 |     let baselinesPassed = 0;
 57 |     let totalBaselines = 0;
 58 | 
 59 |     // Config loading baseline
 60 |     totalBaselines++;
 61 |     if (configDuration <= PERFORMANCE_BASELINES.config) {
 62 |       console.log(`   ✅ Config loading: ${configDuration}ms (baseline: ${PERFORMANCE_BASELINES.config}ms)`);
 63 |       baselinesPassed++;
 64 |     } else {
 65 |       console.log(`   ❌ Config loading: ${configDuration}ms (exceeds baseline: ${PERFORMANCE_BASELINES.config}ms)`);
 66 |     }
 67 | 
 68 |     // Prompts loading baseline
 69 |     totalBaselines++;
 70 |     if (promptsDuration <= PERFORMANCE_BASELINES.prompts) {
 71 |       console.log(`   ✅ Prompts loading: ${promptsDuration}ms (baseline: ${PERFORMANCE_BASELINES.prompts}ms)`);
 72 |       baselinesPassed++;
 73 |     } else {
 74 |       console.log(`   ❌ Prompts loading: ${promptsDuration}ms (exceeds baseline: ${PERFORMANCE_BASELINES.prompts}ms)`);
 75 |     }
 76 | 
 77 |     // Modules initialization baseline
 78 |     totalBaselines++;
 79 |     if (modulesDuration <= PERFORMANCE_BASELINES.modules) {
 80 |       console.log(`   ✅ Modules init: ${modulesDuration}ms (baseline: ${PERFORMANCE_BASELINES.modules}ms)`);
 81 |       baselinesPassed++;
 82 |     } else {
 83 |       console.log(`   ❌ Modules init: ${modulesDuration}ms (exceeds baseline: ${PERFORMANCE_BASELINES.modules}ms)`);
 84 |     }
 85 | 
 86 |     // Total startup baseline
 87 |     totalBaselines++;
 88 |     if (totalStartup <= PERFORMANCE_BASELINES.startup) {
 89 |       console.log(`   ✅ Total startup: ${totalStartup}ms (baseline: ${PERFORMANCE_BASELINES.startup}ms)`);
 90 |       baselinesPassed++;
 91 |     } else {
 92 |       console.log(`   ❌ Total startup: ${totalStartup}ms (exceeds baseline: ${PERFORMANCE_BASELINES.startup}ms)`);
 93 |     }
 94 | 
 95 |     const baselineSuccessRate = (baselinesPassed / totalBaselines) * 100;
 96 |     if (baselineSuccessRate >= 75) {
 97 |       console.log(`\n✅ Performance baselines achieved (${baselineSuccessRate.toFixed(1)}%)`);
 98 |     } else {
 99 |       console.log(`\n⚠️  Performance baseline concerns (${baselineSuccessRate.toFixed(1)}% passed)`);
100 |     }
101 | 
102 |     // Memory usage testing
103 |     console.log('💾 Memory Usage Tests:');
104 |     const initialMemory = process.memoryUsage();
105 |     console.log(`   Initial memory - Heap: ${(initialMemory.heapUsed / 1024 / 1024).toFixed(2)}MB, RSS: ${(initialMemory.rss / 1024 / 1024).toFixed(2)}MB`);
106 | 
107 |     // Simulate some operations
108 |     for (let i = 0; i < 100; i++) {
109 |       // Simulate current system operations
110 |       const operationData = {
111 |         operation: `memory_test_${i}`,
112 |         data: new Array(1000).fill(i)
113 |       };
114 |     }
115 | 
116 |     // Force garbage collection if available
117 |     if (global.gc) {
118 |       global.gc();
119 |     }
120 | 
121 |     const finalMemory = process.memoryUsage();
122 |     const heapIncrease = (finalMemory.heapUsed - initialMemory.heapUsed) / 1024 / 1024;
123 |     const rssIncrease = (finalMemory.rss - initialMemory.rss) / 1024 / 1024;
124 | 
125 |     console.log(`   Final memory - Heap: ${(finalMemory.heapUsed / 1024 / 1024).toFixed(2)}MB, RSS: ${(finalMemory.rss / 1024 / 1024).toFixed(2)}MB`);
126 |     console.log(`   Memory increase - Heap: ${heapIncrease.toFixed(2)}MB, RSS: ${rssIncrease.toFixed(2)}MB`);
127 | 
128 |     // Memory leak threshold check
129 |     const memoryThreshold = 50; // MB
130 |     if (heapIncrease > memoryThreshold) {
131 |       console.log(`⚠️  Warning: Heap memory increased by ${heapIncrease.toFixed(2)}MB (threshold: ${memoryThreshold}MB)`);
132 |     } else {
133 |       console.log(`✅ Memory usage acceptable: ${heapIncrease.toFixed(2)}MB heap increase`);
134 |     }
135 | 
136 |     console.log('📊 Performance Summary:');
137 |     console.log(`   ✅ Total startup time: ${totalStartup}ms`);
138 |     console.log(`   ✅ Memory increase: ${heapIncrease.toFixed(2)}MB`);
139 |     console.log('   ✅ All tests completed successfully');
140 | 
141 |   } catch (error) {
142 |     console.error('❌ Performance tests failed:', error.message);
143 |     process.exit(1);
144 |   }
145 | }
146 | 
147 | // Run the performance tests
148 | if (import.meta.url === `file://${process.argv[1]}`) {
149 |   performanceTests().catch(error => {
150 |     console.error('❌ Test execution failed:', error);
151 |     process.exit(1);
152 |   });
153 | }
154 | 
155 | export { performanceTests };
```

--------------------------------------------------------------------------------
/server/prompts/analysis/prompts.json:
--------------------------------------------------------------------------------

```json
  1 | {
  2 |   "prompts": [
  3 |     {
  4 |       "id": "progressive_research",
  5 |       "name": "Progressive Research Assistant",
  6 |       "category": "analysis",
  7 |       "description": "A step-by-step research assistant that builds knowledge incrementally through iterative questions and analysis.",
  8 |       "file": "progressive_research.md",
  9 |       "arguments": [
 10 |         {
 11 |           "name": "notes",
 12 |           "description": "The initial notes or information to research and expand on",
 13 |           "required": false
 14 |         },
 15 |         {
 16 |           "name": "information",
 17 |           "description": "Additional context or information to guide the research",
 18 |           "required": false
 19 |         }
 20 |       ]
 21 |     },
 22 |     {
 23 |       "id": "note_refinement",
 24 |       "name": "Note Refinement",
 25 |       "category": "analysis",
 26 |       "description": "Refine existing notes by improving organization, flow, and clarity without adding or modifying the content.",
 27 |       "file": "note_refinement.md",
 28 |       "arguments": [],
 29 |       "onEmptyInvocation": "return_template"
 30 |     },
 31 |     {
 32 |       "id": "note_integration",
 33 |       "name": "Note Integration",
 34 |       "category": "analysis",
 35 |       "description": "Integrate new information from a markdown page into existing notes, merging them smoothly while maintaining a logical structure and avoiding duplication.",
 36 |       "file": "note_integration.md",
 37 |       "arguments": [
 38 |         {
 39 |           "name": "notes",
 40 |           "description": "The existing notes",
 41 |           "required": false
 42 |         },
 43 |         {
 44 |           "name": "new_information",
 45 |           "description": "The new information to be integrated",
 46 |           "required": false
 47 |         }
 48 |       ]
 49 |     },
 50 |     {
 51 |       "id": "query_refinement",
 52 |       "name": "Query Refinement",
 53 |       "category": "analysis",
 54 |       "description": "A systematic process to analyze and refine ambiguous coding requests into clear, actionable specifications.",
 55 |       "file": "query_refinement.md",
 56 |       "arguments": [
 57 |         {
 58 |           "name": "query",
 59 |           "description": "The original user query to refine",
 60 |           "required": false
 61 |         }
 62 |       ]
 63 |     },
 64 |     {
 65 |       "id": "advanced_analysis_engine",
 66 |       "name": "Advanced Analysis Engine",
 67 |       "category": "analysis",
 68 |       "description": "Complex template testing prompt with advanced Nunjucks features including conditionals, loops, inheritance, filters, and multi-format output generation. Designed to stress-test the template engine with maximum complexity.",
 69 |       "file": "advanced_analysis_engine.md",
 70 |       "arguments": [
 71 |         {
 72 |           "name": "topic",
 73 |           "description": "Main topic or subject for analysis",
 74 |           "required": true
 75 |         },
 76 |         {
 77 |           "name": "analysis_type",
 78 |           "description": "Type of analysis: market, technical, competitive, trend, risk, opportunity",
 79 |           "required": false
 80 |         },
 81 |         {
 82 |           "name": "sources",
 83 |           "description": "Array of data sources: web, papers, news, social, industry, expert",
 84 |           "required": false
 85 |         },
 86 |         {
 87 |           "name": "depth",
 88 |           "description": "Analysis depth: surface, standard, comprehensive, expert",
 89 |           "required": false
 90 |         },
 91 |         {
 92 |           "name": "format",
 93 |           "description": "Output format: executive_summary, technical_report, presentation, research_paper",
 94 |           "required": false
 95 |         },
 96 |         {
 97 |           "name": "focus_areas",
 98 |           "description": "Array of focus areas: technical, business, ethical, regulatory, social, environmental",
 99 |           "required": false
100 |         },
101 |         {
102 |           "name": "constraints",
103 |           "description": "Object with constraints like time_limit, budget, scope, audience",
104 |           "required": false
105 |         },
106 |         {
107 |           "name": "requirements",
108 |           "description": "Array of specific requirements or objects with category, description, priority, examples",
109 |           "required": false
110 |         },
111 |         {
112 |           "name": "previous_context",
113 |           "description": "Previous analysis context to build upon",
114 |           "required": false
115 |         }
116 |       ]
117 |     },
118 |     {
119 |       "id": "notes",
120 |       "name": "Notes",
121 |       "category": "analysis",
122 |       "description": "Enhanced notes chain that searches the vault for actual related notes instead of generating fictional ones - UPDATED\"",
123 |       "file": "notes.md",
124 |       "arguments": [
125 |         {
126 |           "name": "content",
127 |           "description": "Source content to analyze",
128 |           "required": true
129 |         },
130 |         {
131 |           "name": "video_url",
132 |           "description": "YouTube video URL for visual extraction",
133 |           "required": false
134 |         },
135 |         {
136 |           "name": "topic",
137 |           "description": "Main topic/subject",
138 |           "required": false
139 |         },
140 |         {
141 |           "name": "content_areas",
142 |           "description": "Key content areas covered",
143 |           "required": false
144 |         }
145 |       ]
146 |     },
147 |     {
148 |       "id": "content_analysis",
149 |       "name": "Content Analysis",
150 |       "category": "analysis",
151 |       "description": "Systematically analyze web content, breaking it down into key components.",
152 |       "file": "content_analysis.md",
153 |       "arguments": [
154 |         {
155 |           "name": "content",
156 |           "description": "The content to be analyzed",
157 |           "required": false
158 |         }
159 |       ]
160 |     },
161 |     {
162 |       "id": "deep_analysis",
163 |       "name": "Deep Analysis",
164 |       "category": "analysis",
165 |       "description": "Expand on a previous analysis by diving deeper into information, identifying key insights and relationships.",
166 |       "file": "deep_analysis.md",
167 |       "arguments": []
168 |     },
169 |     {
170 |       "id": "deep_research",
171 |       "name": "Deep Research Framework",
172 |       "category": "analysis",
173 |       "description": "A comprehensive framework for conducting thorough, methodical research on complex topics with academic rigor.",
174 |       "file": "deep_research.md",
175 |       "arguments": [
176 |         {
177 |           "name": "topic",
178 |           "description": "The research topic to investigate comprehensively",
179 |           "required": true
180 |         }
181 |       ]
182 |     },
183 |     {
184 |       "id": "review",
185 |       "name": "review",
186 |       "category": "analysis",
187 |       "description": "Comprehensive audit template for modules, implementations, and system integrations",
188 |       "file": "review.md",
189 |       "arguments": [
190 |         {
191 |           "name": "target",
192 |           "type": "string",
193 |           "description": "The module, implementation, system, or functionality to audit (e.g., '@/ path implementation', 'MusicSyncService', 'color harmony modules')"
194 |         }
195 |       ]
196 |     }
197 |   ]
198 | }
```

--------------------------------------------------------------------------------
/server/prompts/general/diagnose.md:
--------------------------------------------------------------------------------

```markdown
  1 | # Codebase Diagnostics
  2 | 
  3 | ## Description
  4 | Systematically diagnose issues in codebases including bugs, performance problems, security vulnerabilities, architecture issues, and technical debt
  5 | 
  6 | ## System Message
  7 | You are an expert code diagnostician specializing in systematic issue analysis across multiple dimensions: code quality, architecture, performance, security, testing, and technical debt.
  8 | 
  9 | Your role is to:
 10 | 1. Gather evidence through diagnostic commands and code analysis
 11 | 2. Identify issues across all quality dimensions
 12 | 3. Prioritize findings by severity and impact
 13 | 4. Provide actionable recommendations with clear implementation steps
 14 | 5. Follow evidence-based practices (no guessing, concrete data only)
 15 | 
 16 | Use the tools available (Read, Grep, Glob, Bash) to systematically analyze the codebase. Run actual diagnostic commands (typecheck, lint, test, audit) to gather real data.
 17 | 
 18 | Be thorough but efficient. Focus on high-impact issues first. Provide specific file paths and line numbers. Include code examples where relevant.
 19 | 
 20 | Your analysis should be structured, prioritized, and actionable.
 21 | 
 22 | ## User Message Template
 23 | Perform comprehensive diagnostic analysis of the codebase.
 24 | 
 25 | {% if scope %}
 26 | **Analysis Scope**: {{ scope }}
 27 | {% else %}
 28 | **Analysis Scope**: Full codebase analysis
 29 | {% endif %}
 30 | 
 31 | {% if focus %}
 32 | **Focus Areas**: {{ focus }}
 33 | {% endif %}
 34 | 
 35 | {% if symptoms %}
 36 | **Reported Symptoms**: {{ symptoms }}
 37 | {% endif %}
 38 | 
 39 | ## Diagnostic Protocol
 40 | 
 41 | ### Phase 1: Context Discovery
 42 | 1. **Project Understanding**:
 43 |    - Identify tech stack and framework versions
 44 |    - Review project structure and architecture patterns
 45 |    - Check build configuration and dependencies
 46 |    - Analyze git history for recent changes
 47 | 
 48 | 2. **Issue Surface Mapping**:
 49 |    - Scan for compilation/build errors
 50 |    - Check for runtime errors and warnings
 51 |    - Review test failures and coverage gaps
 52 |    - Identify linting and type errors
 53 | 
 54 | ### Phase 2: Systematic Analysis
 55 | 
 56 | Analyze across these dimensions:
 57 | 
 58 | #### A. **Code Quality Issues**
 59 | - TypeScript/linting errors and warnings
 60 | - Type safety violations (`any` usage, missing types)
 61 | - Unused variables, imports, and dead code
 62 | - Code complexity and maintainability metrics
 63 | - Naming convention violations
 64 | 
 65 | #### B. **Architectural Problems**
 66 | - Circular dependencies
 67 | - Tight coupling and poor separation of concerns
 68 | - Violated design principles (SOLID, DRY)
 69 | - Inconsistent patterns across codebase
 70 | - Missing abstractions or over-abstraction
 71 | 
 72 | #### C. **Performance Issues**
 73 | - Memory leaks and inefficient resource usage
 74 | - Unnecessary re-renders or computations
 75 | - Bundle size problems
 76 | - Build time bottlenecks
 77 | - Runtime performance regressions
 78 | 
 79 | #### D. **Security Vulnerabilities**
 80 | - Dependency vulnerabilities (audit results)
 81 | - Input validation gaps
 82 | - Authentication/authorization issues
 83 | - Exposed secrets or sensitive data
 84 | - XSS, injection, or CSRF risks
 85 | 
 86 | #### E. **Testing Gaps**
 87 | - Missing test coverage for critical paths
 88 | - Flaky or unreliable tests
 89 | - Integration test coverage
 90 | - Edge case validation
 91 | - Performance regression tests
 92 | 
 93 | #### F. **Technical Debt**
 94 | - Deprecated API usage
 95 | - Outdated dependencies
 96 | - TODO comments and temporary solutions
 97 | - Duplicated code
 98 | - Legacy patterns needing migration
 99 | 
100 | ### Phase 3: Evidence Gathering
101 | 
102 | For each identified issue:
103 | 1. **Run diagnostic commands**:
104 |    ```bash
105 |    npm run typecheck
106 |    npm run lint
107 |    npm test
108 |    npm audit
109 |    npx madge --circular src/
110 |    ```
111 | 
112 | 2. **Collect metrics**:
113 |    - Error counts and severity
114 |    - Test coverage percentages
115 |    - Build time and bundle size
116 |    - Complexity scores
117 | 
118 | 3. **Document examples**:
119 |    - Specific file paths and line numbers
120 |    - Error messages and stack traces
121 |    - Code snippets demonstrating issues
122 | 
123 | ### Phase 4: Prioritized Findings
124 | 
125 | Present findings in this structure:
126 | 
127 | #### Critical (Fix Immediately)
128 | - Issues breaking functionality
129 | - Security vulnerabilities
130 | - Data corruption risks
131 | - Build/deployment blockers
132 | 
133 | #### High Priority (Fix Soon)
134 | - Performance degradation
135 | - Poor user experience
136 | - High-impact technical debt
137 | - Test coverage gaps in critical paths
138 | 
139 | #### Medium Priority (Plan to Fix)
140 | - Code quality issues
141 | - Moderate technical debt
142 | - Missing documentation
143 | - Refactoring opportunities
144 | 
145 | #### Low Priority (Nice to Have)
146 | - Minor style violations
147 | - Optional optimizations
148 | - Enhancement opportunities
149 | 
150 | ### Phase 5: Actionable Recommendations
151 | 
152 | For each priority level, provide:
153 | 
154 | 1. **Root Cause Analysis**: Why does this issue exist?
155 | 2. **Impact Assessment**: What are the consequences?
156 | 3. **Solution Options**: Multiple approaches with trade-offs
157 | 4. **Implementation Steps**: Concrete action items
158 | 5. **Validation Plan**: How to verify the fix works
159 | 
160 | ### Phase 6: Diagnostic Summary
161 | 
162 | Provide:
163 | - **Overall Health Score**: Based on issue severity and count
164 | - **Risk Assessment**: What could go wrong if issues aren't addressed
165 | - **Quick Wins**: Easy fixes with high impact
166 | - **Long-term Strategy**: Technical debt reduction plan
167 | - **Next Steps**: Prioritized action items
168 | 
169 | ## Output Format
170 | 
171 | ```markdown
172 | # Codebase Diagnostic Report
173 | 
174 | ## Executive Summary
175 | [Brief overview of findings and health status]
176 | 
177 | ## Critical Issues (Count: X)
178 | ### Issue 1: [Title]
179 | - **Location**: file.ts:123
180 | - **Category**: [Bug/Security/Performance/Architecture]
181 | - **Impact**: [Description]
182 | - **Root Cause**: [Analysis]
183 | - **Recommendation**: [Solution]
184 | - **Effort**: [Low/Medium/High]
185 | 
186 | ## High Priority Issues (Count: X)
187 | [Same structure]
188 | 
189 | ## Medium Priority Issues (Count: X)
190 | [Same structure]
191 | 
192 | ## Low Priority Issues (Count: X)
193 | [Summary only for brevity]
194 | 
195 | ## Health Metrics
196 | - Type Safety: X/100
197 | - Test Coverage: X%
198 | - Build Health: X/100
199 | - Dependency Health: X vulnerabilities
200 | - Code Quality: X/100
201 | 
202 | ## Recommended Action Plan
203 | 1. [Immediate actions]
204 | 2. [This week actions]
205 | 3. [This month actions]
206 | 4. [Long-term improvements]
207 | 
208 | ## Quick Wins
209 | - [Easy fixes with high impact]
210 | ```
211 | 
212 | ## Evidence-Based Standards
213 | 
214 | - ✅ Use diagnostic commands to gather concrete data
215 | - ✅ Provide file paths and line numbers for all issues
216 | - ✅ Include error messages and metrics
217 | - ✅ Reference official documentation for recommendations
218 | - ✅ Measure impact quantitatively where possible
219 | - ❌ Don't guess or make assumptions
220 | - ❌ Don't use superlatives without data
221 | - ❌ Don't recommend solutions without understanding root causes
222 | 
223 | ## Tools to Use
224 | 
225 | 1. **File Analysis**: Read, Glob, Grep to examine code
226 | 2. **Diagnostics**: Bash to run build, test, lint, audit commands
227 | 3. **Metrics**: Collect quantitative data (coverage %, error counts, etc.)
228 | 4. **Git History**: Check recent changes that may have introduced issues
229 | 
230 | Begin diagnostics now.
231 | 
```

--------------------------------------------------------------------------------
/server/src/gates/types.ts:
--------------------------------------------------------------------------------

```typescript
  1 | /**
  2 |  * Gate System Type Definitions
  3 |  *
  4 |  * Consolidated types for the gate validation system, including lightweight gates,
  5 |  * enhanced validation, and gate orchestration. Combines types from multiple gate
  6 |  * system implementations into a unified type system.
  7 |  */
  8 | 
  9 | // Import unified validation types from execution domain (not re-exported to avoid conflicts)
 10 | import type { ValidationResult, ValidationCheck } from '../execution/types.js';
 11 | export type { ValidationCheck } from '../execution/types.js';
 12 | 
 13 | /**
 14 |  * Gate requirement types - comprehensive enumeration
 15 |  */
 16 | export type GateRequirementType =
 17 |   | 'content_length'
 18 |   | 'keyword_presence'
 19 |   | 'format_validation'
 20 |   | 'section_validation'
 21 |   | 'custom'
 22 |   // Content quality gates
 23 |   | 'readability_score'
 24 |   | 'grammar_quality'
 25 |   | 'tone_analysis'
 26 |   // Structure gates
 27 |   | 'hierarchy_validation'
 28 |   | 'link_validation'
 29 |   | 'code_quality'
 30 |   | 'structure'
 31 |   // Pattern matching gates
 32 |   | 'pattern_matching'
 33 |   // Completeness gates
 34 |   | 'required_fields'
 35 |   | 'completeness_score'
 36 |   | 'completeness'
 37 |   // Chain-specific gates
 38 |   | 'step_continuity'
 39 |   | 'framework_compliance'
 40 |   // Security gates
 41 |   | 'security_validation'
 42 |   | 'citation_validation'
 43 |   | 'security_scan'
 44 |   | 'privacy_compliance'
 45 |   | 'content_policy'
 46 |   // Workflow gates
 47 |   | 'dependency_validation'
 48 |   | 'context_consistency'
 49 |   | 'resource_availability'
 50 |   // LLM Quality Gates
 51 |   | 'llm_coherence'
 52 |   | 'llm_accuracy'
 53 |   | 'llm_helpfulness'
 54 |   | 'llm_contextual';
 55 | 
 56 | /**
 57 |  * Gate requirement definition
 58 |  */
 59 | export interface GateRequirement {
 60 |   type: GateRequirementType;
 61 |   criteria: any;
 62 |   weight?: number;
 63 |   required?: boolean;
 64 |   // LLM-specific extensions (backward compatible)
 65 |   llmCriteria?: {
 66 |     qualityDimensions?: ('coherent' | 'accurate' | 'helpful' | 'contextual')[];
 67 |     confidenceThreshold?: number;
 68 |     evaluationContext?: string;
 69 |     targetAudience?: 'general' | 'technical' | 'beginner' | 'expert';
 70 |     expectedStyle?: 'formal' | 'casual' | 'technical' | 'conversational';
 71 |     factCheckingEnabled?: boolean;
 72 |     usefulnessThreshold?: number;
 73 |     appropriatenessLevel?: 'strict' | 'standard' | 'relaxed';
 74 |   };
 75 | }
 76 | 
 77 | /**
 78 |  * Comprehensive gate definition
 79 |  * Consolidates lightweight and enhanced gate definitions
 80 |  */
 81 | export interface GateDefinition {
 82 |   /** Unique identifier for the gate */
 83 |   id: string;
 84 |   /** Human-readable name */
 85 |   name: string;
 86 |   /** Gate type */
 87 |   type: 'validation' | 'approval' | 'condition' | 'quality' | 'guidance';
 88 |   /** Description of what this gate checks/guides */
 89 |   description?: string;
 90 |   /** Requirements for this gate */
 91 |   requirements: GateRequirement[];
 92 |   /** Action to take on failure */
 93 |   failureAction: 'stop' | 'retry' | 'skip' | 'rollback';
 94 |   /** Retry policy configuration */
 95 |   retryPolicy?: {
 96 |     maxRetries: number;
 97 |     retryDelay: number;
 98 |   };
 99 | 
100 |   // Lightweight gate extensions
101 |   /** Guidance text injected into prompts */
102 |   guidance?: string;
103 |   /** Pass/fail criteria for validation gates */
104 |   pass_criteria?: GatePassCriteria[];
105 |   /** Retry configuration (lightweight format) */
106 |   retry_config?: {
107 |     max_attempts: number;
108 |     improvement_hints: boolean;
109 |     preserve_context: boolean;
110 |   };
111 |   /** Activation rules - when this gate should be applied */
112 |   activation?: {
113 |     prompt_categories?: string[];
114 |     explicit_request?: boolean;
115 |     framework_context?: string[];
116 |   };
117 | }
118 | 
119 | /**
120 |  * Pass/fail criteria for validation (lightweight gate format)
121 |  */
122 | export interface GatePassCriteria {
123 |   /** Type of check to perform */
124 |   type: 'content_check' | 'llm_self_check' | 'pattern_check';
125 | 
126 |   // Content check options
127 |   min_length?: number;
128 |   max_length?: number;
129 |   required_patterns?: string[];
130 |   forbidden_patterns?: string[];
131 | 
132 |   // LLM self-check options
133 |   prompt_template?: string;
134 |   pass_threshold?: number;
135 | 
136 |   // Pattern check options
137 |   regex_patterns?: string[];
138 |   keyword_count?: { [keyword: string]: number };
139 | }
140 | 
141 | // ValidationCheck now imported from execution/types.js - no need to redefine
142 | 
143 | /**
144 |  * Gate evaluation result
145 |  */
146 | export interface GateEvaluationResult {
147 |   requirementId: string;
148 |   passed: boolean;
149 |   score?: number;
150 |   message?: string;
151 |   details?: any;
152 | }
153 | 
154 | // ValidationResult now imported from execution/types.js - provides unified validation interface
155 | 
156 | /**
157 |  * Gate status information
158 |  */
159 | export interface GateStatus {
160 |   gateId: string;
161 |   passed: boolean;
162 |   requirements: GateRequirement[];
163 |   evaluationResults: GateEvaluationResult[];
164 |   timestamp: number;
165 |   retryCount?: number;
166 | }
167 | 
168 | /**
169 |  * Context for validation
170 |  */
171 | export interface ValidationContext {
172 |   /** Content to validate */
173 |   content: string;
174 |   /** Additional metadata */
175 |   metadata?: Record<string, any>;
176 |   /** Execution context */
177 |   executionContext?: {
178 |     promptId?: string;
179 |     stepId?: string;
180 |     attemptNumber?: number;
181 |     previousAttempts?: string[];
182 |   };
183 | }
184 | 
185 | /**
186 |  * Gate activation result
187 |  */
188 | export interface GateActivationResult {
189 |   /** Gates that should be active */
190 |   activeGates: LightweightGateDefinition[];
191 |   /** Guidance text to inject */
192 |   guidanceText: string[];
193 |   /** Validation gates to apply */
194 |   validationGates: LightweightGateDefinition[];
195 | }
196 | 
197 | /**
198 |  * Lightweight gate definition (for backward compatibility)
199 |  */
200 | export interface LightweightGateDefinition {
201 |   /** Unique identifier for the gate */
202 |   id: string;
203 |   /** Human-readable name */
204 |   name: string;
205 |   /** Gate type - validation enforces pass/fail, guidance provides hints */
206 |   type: 'validation' | 'guidance';
207 |   /** Description of what this gate checks/guides */
208 |   description: string;
209 |   /** Guidance text injected into prompts */
210 |   guidance?: string;
211 |   /** Pass/fail criteria for validation gates */
212 |   pass_criteria?: GatePassCriteria[];
213 |   /** Retry configuration */
214 |   retry_config?: {
215 |     max_attempts: number;
216 |     improvement_hints: boolean;
217 |     preserve_context: boolean;
218 |   };
219 |   /** Activation rules - when this gate should be applied */
220 |   activation?: {
221 |     prompt_categories?: string[];
222 |     explicit_request?: boolean;
223 |     framework_context?: string[];
224 |   };
225 | }
226 | 
227 | /**
228 |  * Gate configuration settings
229 |  */
230 | export interface GatesConfig {
231 |   /** Directory containing gate definitions */
232 |   definitionsDirectory: string;
233 |   /** Directory containing LLM validation templates */
234 |   templatesDirectory: string;
235 | }
236 | 
237 | /**
238 |  * Step result with gate information
239 |  */
240 | export interface StepResult {
241 |   content: string;
242 |   status: 'pending' | 'running' | 'completed' | 'failed' | 'skipped';
243 |   timestamp: number;
244 |   validationResults?: ValidationResult[];
245 |   gateResults?: GateStatus[];
246 |   metadata?: Record<string, string | number | boolean | null>;
247 | }
248 | 
249 | /**
250 |  * Gate type enumeration
251 |  */
252 | export enum GateType {
253 |   VALIDATION = "validation",
254 |   APPROVAL = "approval",
255 |   CONDITION = "condition",
256 |   QUALITY = "quality",
257 |   GUIDANCE = "guidance"
258 | }
```

--------------------------------------------------------------------------------
/server/tests/scripts/unit-conversation-manager.js:
--------------------------------------------------------------------------------

```javascript
  1 | #!/usr/bin/env node
  2 | /**
  3 |  * Unit tests for ConversationManager - Node.js Script Version
  4 |  * Testing chain context, step result management, and state validation
  5 |  */
  6 | 
  7 | async function runConversationManagerTests() {
  8 |   try {
  9 |     console.log('🧪 Running ConversationManager unit tests...');
 10 |     console.log('📋 Testing conversation and chain management functionality');
 11 | 
 12 |     // Import modules
 13 |     const conversationModule = await import('../../dist/text-references/conversation.js');
 14 |     const loggerModule = await import('../../dist/logging/index.js');
 15 | 
 16 |     // Get ConversationManager from default export or named export
 17 |     const ConversationManager = conversationModule.ConversationManager || conversationModule.default;
 18 |     const createSimpleLogger = loggerModule.createSimpleLogger || loggerModule.default;
 19 | 
 20 |     let conversationManager;
 21 |     let logger;
 22 | 
 23 |     // Setup for each test
 24 |     function setupTest() {
 25 |       logger = createSimpleLogger();
 26 |       conversationManager = new ConversationManager(logger, 50);
 27 |     }
 28 | 
 29 |     // Simple assertion helper
 30 |     function assertEqual(actual, expected, testName) {
 31 |       const actualStr = JSON.stringify(actual);
 32 |       const expectedStr = JSON.stringify(expected);
 33 |       if (actualStr === expectedStr) {
 34 |         console.log(`✅ ${testName}: PASSED`);
 35 |         return true;
 36 |       } else {
 37 |         console.error(`❌ ${testName}: FAILED`);
 38 |         console.error(`   Expected: ${expectedStr}`);
 39 |         console.error(`   Actual:   ${actualStr}`);
 40 |         return false;
 41 |       }
 42 |     }
 43 | 
 44 |     function assertTruthy(value, testName) {
 45 |       if (value) {
 46 |         console.log(`✅ ${testName}: PASSED`);
 47 |         return true;
 48 |       } else {
 49 |         console.error(`❌ ${testName}: FAILED - Expected truthy value, got: ${value}`);
 50 |         return false;
 51 |       }
 52 |     }
 53 | 
 54 |     function assertType(value, expectedType, testName) {
 55 |       if (typeof value === expectedType) {
 56 |         console.log(`✅ ${testName}: PASSED`);
 57 |         return true;
 58 |       } else {
 59 |         console.error(`❌ ${testName}: FAILED - Expected type ${expectedType}, got: ${typeof value}`);
 60 |         return false;
 61 |       }
 62 |     }
 63 | 
 64 |     let testResults = [];
 65 | 
 66 |     // Test 1: Enhanced Step Result Management
 67 |     console.log('🔍 Test 1: Enhanced Step Result Management');
 68 | 
 69 |     setupTest();
 70 |     const chainId = 'test-chain-1';
 71 |     const stepResult = 'This is a real execution result';
 72 |     const metadata = { executionTime: 1500, framework: 'CAGEERF' };
 73 | 
 74 |     conversationManager.saveStepResult(chainId, 0, stepResult, false, metadata);
 75 |     const resultWithMeta = conversationManager.getStepResultWithMetadata(chainId, 0);
 76 | 
 77 |     // Check result structure
 78 |     testResults.push(assertEqual(resultWithMeta.result, stepResult, 'Step result content matches'));
 79 |     testResults.push(assertType(resultWithMeta.timestamp, 'number', 'Timestamp is number'));
 80 |     testResults.push(assertEqual(resultWithMeta.isPlaceholder, false, 'isPlaceholder flag correct'));
 81 |     testResults.push(assertEqual(resultWithMeta.executionMetadata, metadata, 'Execution metadata matches'));
 82 | 
 83 |     // Test legacy method compatibility
 84 |     const legacyResult = conversationManager.getStepResult(chainId, 0);
 85 |     testResults.push(assertEqual(legacyResult, stepResult, 'Legacy method compatibility'));
 86 | 
 87 |     // Test 2: Placeholder vs Real Results
 88 |     console.log('🔍 Test 2: Placeholder vs Real Results');
 89 | 
 90 |     setupTest();
 91 |     const chainId2 = 'test-chain-2';
 92 | 
 93 |     // Store placeholder and real results
 94 |     conversationManager.saveStepResult(chainId2, 0, '{{previous_message}}', true);
 95 |     conversationManager.saveStepResult(chainId2, 1, 'Detailed analysis of the problem...', false);
 96 | 
 97 |     const placeholderMeta = conversationManager.getStepResultWithMetadata(chainId2, 0);
 98 |     const realMeta = conversationManager.getStepResultWithMetadata(chainId2, 1);
 99 | 
100 |     testResults.push(assertEqual(placeholderMeta.isPlaceholder, true, 'Placeholder flag correct'));
101 |     testResults.push(assertEqual(realMeta.isPlaceholder, false, 'Real result flag correct'));
102 |     testResults.push(assertEqual(placeholderMeta.result, '{{previous_message}}', 'Placeholder content correct'));
103 |     testResults.push(assertEqual(realMeta.result, 'Detailed analysis of the problem...', 'Real result content correct'));
104 | 
105 |     // Test 3: Chain Context Management
106 |     console.log('🔍 Test 3: Chain Context Management');
107 | 
108 |     setupTest();
109 |     const chainId3 = 'test-chain-3';
110 | 
111 |     // Add some results
112 |     conversationManager.saveStepResult(chainId3, 0, 'Step 1 result', false);
113 |     conversationManager.saveStepResult(chainId3, 1, 'Step 2 result', false);
114 | 
115 |     // Test chain context retrieval
116 |     const chainResults = conversationManager.getChainResults ? conversationManager.getChainResults(chainId3) : [];
117 |     testResults.push(assertTruthy(Array.isArray(chainResults) || typeof chainResults === 'object', 'Chain results retrievable'));
118 | 
119 |     // Test 4: Memory Limit Handling
120 |     console.log('🔍 Test 4: Memory Limit Handling');
121 | 
122 |     setupTest(); // Creates manager with limit of 50
123 | 
124 |     // Try to store more than the limit
125 |     for (let i = 0; i < 60; i++) {
126 |       conversationManager.addToConversationHistory({ role: 'user', content: `Message ${i}`, timestamp: Date.now() });
127 |     }
128 | 
129 |     // Should have enforced the limit somehow (implementation dependent)
130 |     testResults.push(assertTruthy(true, 'Memory limit handling (basic functionality test)'));
131 | 
132 |     // Test 5: Basic Message Management
133 |     console.log('🔍 Test 5: Basic Message Management');
134 | 
135 |     setupTest();
136 |     const testMessage = { role: 'user', content: 'Test message', timestamp: Date.now() };
137 |     conversationManager.addToConversationHistory(testMessage);
138 | 
139 |     // Basic functionality test
140 |     testResults.push(assertTruthy(conversationManager, 'ConversationManager instance created'));
141 | 
142 |     // Results Summary
143 |     const passedTests = testResults.filter(result => result).length;
144 |     const totalTests = testResults.length;
145 | 
146 |     console.log('\n📊 ConversationManager Unit Tests Summary:');
147 |     console.log(`   ✅ Passed: ${passedTests}/${totalTests} tests`);
148 |     console.log(`   📊 Success Rate: ${((passedTests/totalTests)*100).toFixed(1)}%`);
149 | 
150 |     if (passedTests === totalTests) {
151 |       console.log('🎉 All ConversationManager unit tests passed!');
152 |       return true;
153 |     } else {
154 |       console.error('❌ Some ConversationManager tests failed');
155 |       return false;
156 |     }
157 | 
158 |   } catch (error) {
159 |     console.error('❌ ConversationManager tests failed with error:', error.message);
160 |     if (error.stack) {
161 |       console.error('Stack trace:', error.stack);
162 |     }
163 |     return false;
164 |   }
165 | }
166 | 
167 | // Run the tests
168 | if (import.meta.url === `file://${process.argv[1]}`) {
169 |   runConversationManagerTests().catch(error => {
170 |     console.error('❌ Test execution failed:', error);
171 |     process.exit(1);
172 |   });
173 | }
174 | 
175 | export { runConversationManagerTests };
```

--------------------------------------------------------------------------------
/.github/workflows/pr-validation.yml:
--------------------------------------------------------------------------------

```yaml
  1 | name: PR Validation
  2 | 
  3 | on:
  4 |   pull_request:
  5 |     types: [opened, synchronize, reopened]
  6 |     branches: [main, develop]
  7 | 
  8 | env:
  9 |   NODE_ENV: test
 10 | 
 11 | permissions:
 12 |   contents: read
 13 |   pull-requests: write
 14 |   issues: write
 15 |   actions: read
 16 | 
 17 | jobs:
 18 |   pr-quality-gates:
 19 |     name: Pull Request Quality Gates
 20 |     runs-on: ubuntu-latest
 21 | 
 22 |     steps:
 23 |       - name: Checkout repository
 24 |         uses: actions/checkout@v4
 25 |         with:
 26 |           fetch-depth: 0
 27 | 
 28 |       - name: Setup Node.js
 29 |         uses: actions/setup-node@v4
 30 |         with:
 31 |           node-version: "18"
 32 |           cache: "npm"
 33 |           cache-dependency-path: server/package-lock.json
 34 | 
 35 |       - name: Install dependencies
 36 |         working-directory: server
 37 |         run: npm ci --prefer-offline --no-audit
 38 | 
 39 |       - name: Core validation
 40 |         working-directory: server
 41 |         run: |
 42 |           echo "Running core validation pipeline..."
 43 |           npm run typecheck
 44 |           npm run build
 45 |           npm run test:integration
 46 |           npm run test:functional-mcp
 47 |         timeout-minutes: 10
 48 | 
 49 |       - name: Server functionality validation
 50 |         working-directory: server
 51 |         run: |
 52 |           echo "Testing complete server functionality..."
 53 | 
 54 |           # Test server startup functionality (includes build artifact validation)
 55 |           npm run test:ci-startup
 56 | 
 57 |           echo "✅ Server functionality validation completed"
 58 | 
 59 |       - name: Changed files analysis
 60 |         run: |
 61 |           echo "Analyzing changed files in this PR..."
 62 | 
 63 |           # Robust file diff with error handling
 64 |           if git diff --name-only origin/${{ github.base_ref }}...HEAD > changed_files.txt 2>/dev/null; then
 65 |             echo "✅ Successfully analyzed changed files"
 66 |           else
 67 |             echo "⚠️  Could not determine changed files, using fallback"
 68 |             # Fallback: get files changed in current commit
 69 |             git diff --name-only HEAD~1 HEAD > changed_files.txt 2>/dev/null || echo "No changes detected" > changed_files.txt
 70 |           fi
 71 | 
 72 |           echo "Files changed in this PR:"
 73 |           cat changed_files.txt || echo "No changed files detected"
 74 | 
 75 |           # Check if TypeScript files were modified
 76 |           if grep -q "\.ts$" changed_files.txt; then echo "✅ TypeScript files modified - validation completed"; fi
 77 | 
 78 |           # Check if methodology guides were modified
 79 |           if grep -q "frameworks/adapters.*methodology-guide" changed_files.txt; then echo "⚠️  Methodology guides modified - ensure all 4 methodologies remain compatible"; fi
 80 | 
 81 |           # Check if consolidated MCP tools were modified
 82 |           if grep -q "mcp-tools/\(prompt-engine\|prompt-manager\|system-control\)" changed_files.txt; then echo "⚠️  Consolidated MCP tools modified - ensure protocol compliance and backwards compatibility"; fi
 83 | 
 84 |           # Check if framework system core was modified
 85 |           if grep -q "frameworks/\(framework-manager\|framework-state-manager\)" changed_files.txt; then echo "⚠️  Framework system core modified - validate methodology switching functionality"; fi
 86 | 
 87 |           # Check if runtime system was modified
 88 |           if grep -q "runtime/\(application\|startup\)" changed_files.txt; then echo "⚠️  Runtime system modified - validate server startup and orchestration"; fi
 89 | 
 90 |           # Check if analysis system was modified
 91 |           if grep -q "analysis/configurable-semantic-analyzer" changed_files.txt; then echo "⚠️  Analysis system modified - validate prompt classification and framework integration"; fi
 92 | 
 93 |       - name: Comment PR with validation results
 94 |         uses: actions/github-script@v7
 95 |         if: always()
 96 |         with:
 97 |           script: |
 98 |             const fs = require('fs');
 99 |             let changedFiles = '';
100 |             try {
101 |               changedFiles = fs.readFileSync('changed_files.txt', 'utf8').trim();
102 |               if (!changedFiles) {
103 |                 changedFiles = 'No changes detected';
104 |               }
105 |             } catch (e) {
106 |               console.log(`Warning: Could not read changed_files.txt: ${e.message}`);
107 |               changedFiles = 'Unable to read changed files (this is normal for some PR types)';
108 |             }
109 |             const status = '${{ job.status }}';
110 |             const runUrl = `${context.payload.repository.html_url}/actions/runs/${context.runId}`;
111 |             let message;
112 |             if (status === 'success') {
113 |               const lines = [
114 |                 '## ✅ PR Validation Passed!',
115 |                 '',
116 |                 '**All quality gates have passed for this pull request.**',
117 |                 '',
118 |                 '### Validation Summary:',
119 |                 '- ✅ TypeScript compilation successful',
120 |                 '- ✅ Build process completed',
121 |                 '- ✅ All tests passed',
122 |                 '- ✅ Functional MCP validation passed (intelligent routing, framework system, transport layer)',
123 |                 '- ✅ Server startup validation completed',
124 |                 '',
125 |                 '### Files Changed:',
126 |                 '```',
127 |                 changedFiles,
128 |                 '```',
129 |                 '',
130 |                 `[View detailed results](${runUrl})`
131 |               ];
132 |               message = lines.join('\n');
133 |             } else {
134 |               const lines = [
135 |                 '## ❌ PR Validation Failed',
136 |                 '',
137 |                 '**Quality gates failed for this pull request.**',
138 |                 '',
139 |                 `Please check the [detailed logs](${runUrl}) and fix the issues before merging.`,
140 |                 '',
141 |                 '### Files Changed:',
142 |                 '```',
143 |                 changedFiles,
144 |                 '```',
145 |                 '',
146 |                 '**Common fixes:**',
147 |                 '- Run `cd server && npm run typecheck` locally',
148 |                 '- Run `cd server && npm run build` locally',
149 |                 '- Run `cd server && npm test` locally',
150 |                 '- Ensure functional MCP validation passes (intelligent routing + framework system)'
151 |               ];
152 |               message = lines.join('\n');
153 |             }
154 |             // Check if a comment already exists
155 |             const comments = await github.rest.issues.listComments({ owner: context.repo.owner, repo: context.repo.repo, issue_number: context.issue.number });
156 |             const botComment = comments.data.find(comment => comment.user.type === 'Bot' && comment.body.includes('PR Validation'));
157 |             if (botComment) {
158 |               // Update existing comment
159 |               await github.rest.issues.updateComment({ owner: context.repo.owner, repo: context.repo.repo, comment_id: botComment.id, body: message });
160 |             } else {
161 |               // Create new comment
162 |               await github.rest.issues.createComment({ owner: context.repo.owner, repo: context.repo.repo, issue_number: context.issue.number, body: message });
163 |             }
164 | 
165 | 
```

--------------------------------------------------------------------------------
/server/src/utils/jsonUtils.ts:
--------------------------------------------------------------------------------

```typescript
  1 | // JSON utility functions
  2 | 
  3 | import nunjucks from "nunjucks";
  4 | import path from "path"; // Import path module
  5 | import { fileURLToPath } from "url"; // For ES module __dirname equivalent
  6 | import { PromptData } from "../types.js";
  7 | // JSON escaping utilities (moved here to avoid circular dependency)
  8 | function escapeJsonForNunjucks(jsonStr: string): string {
  9 |   return jsonStr
 10 |     .replace(/\{\{/g, '\\{\\{')  // Escape Nunjucks variable syntax
 11 |     .replace(/\}\}/g, '\\}\\}')  // Escape Nunjucks variable syntax  
 12 |     .replace(/\{%/g, '\\{\\%')   // Escape Nunjucks tag syntax
 13 |     .replace(/%\}/g, '\\%\\}')   // Escape Nunjucks tag syntax
 14 |     .replace(/\{#/g, '\\{\\#')   // Escape Nunjucks comment syntax
 15 |     .replace(/#\}/g, '\\#\\}');  // Escape Nunjucks comment syntax
 16 | }
 17 | 
 18 | function unescapeJsonFromNunjucks(escapedStr: string): string {
 19 |   return escapedStr
 20 |     .replace(/\\{\\{/g, '{{')   // Unescape Nunjucks variable syntax
 21 |     .replace(/\\}\\}/g, '}}')   // Unescape Nunjucks variable syntax
 22 |     .replace(/\\{\\%/g, '{%')   // Unescape Nunjucks tag syntax  
 23 |     .replace(/\\%\\}/g, '%}')   // Unescape Nunjucks tag syntax
 24 |     .replace(/\\{\\#/g, '{#')   // Unescape Nunjucks comment syntax
 25 |     .replace(/\\#\\}/g, '#}');  // Unescape Nunjucks comment syntax
 26 | }
 27 | 
 28 | // ES module equivalent of __dirname
 29 | const __filename = fileURLToPath(import.meta.url);
 30 | const __dirname = path.dirname(__filename);
 31 | 
 32 | // Define the base path for prompt templates
 33 | // This assumes jsonUtils.ts is in server/src/utils/ and prompts are in server/prompts/
 34 | const promptTemplatesPath = path.resolve(__dirname, "../../prompts");
 35 | 
 36 | // Configure Nunjucks environment with a FileSystemLoader
 37 | const nunjucksEnv = nunjucks.configure(promptTemplatesPath, {
 38 |   autoescape: false, // We're generating plain text prompts for LLM, not HTML
 39 |   throwOnUndefined: false, // Renders undefined variables as empty string for better compatibility
 40 |   watch: false, // Set to true for development to auto-reload templates; false for production
 41 |   noCache: process.env.NODE_ENV === "development", // Disable cache in development, enable in production
 42 |   tags: {
 43 |     blockStart: "{%",
 44 |     blockEnd: "%}",
 45 |     variableStart: "{{",
 46 |     variableEnd: "}}",
 47 |     commentStart: "{#",
 48 |     commentEnd: "#}",
 49 |   },
 50 | });
 51 | 
 52 | /**
 53 |  * Validates JSON arguments against the prompt's expected arguments
 54 |  * @param jsonArgs The JSON arguments to validate
 55 |  * @param prompt The prompt data containing expected arguments
 56 |  * @returns Object with validation results and sanitized arguments
 57 |  */
 58 | export function validateJsonArguments(
 59 |   jsonArgs: any,
 60 |   prompt: PromptData
 61 | ): {
 62 |   valid: boolean;
 63 |   errors?: string[];
 64 |   sanitizedArgs?: Record<string, string | number | boolean | null | any[]>;
 65 | } {
 66 |   const errors: string[] = [];
 67 |   const sanitizedArgs: Record<string, string | number | boolean | null | any[]> = {};
 68 | 
 69 |   // Check for unexpected properties
 70 |   const expectedArgNames = prompt.arguments.map((arg) => arg.name);
 71 |   const providedArgNames = Object.keys(jsonArgs);
 72 | 
 73 |   for (const argName of providedArgNames) {
 74 |     if (!expectedArgNames.includes(argName)) {
 75 |       errors.push(`Unexpected argument: ${argName}`);
 76 |     }
 77 |   }
 78 | 
 79 |   // Check for and sanitize expected arguments
 80 |   for (const arg of prompt.arguments) {
 81 |     const value = jsonArgs[arg.name];
 82 | 
 83 |     // All arguments are treated as optional now
 84 |     if (value !== undefined) {
 85 |       // Sanitize the value based on expected type
 86 |       // This is a simple implementation - expand as needed for your use case
 87 |       if (typeof value === "string") {
 88 |         // Sanitize string inputs
 89 |         sanitizedArgs[arg.name] = value
 90 |           .replace(/[<>]/g, "") // Remove potentially dangerous HTML characters
 91 |           .trim();
 92 |       } else if (typeof value === "number") {
 93 |         // Ensure it's a valid number
 94 |         sanitizedArgs[arg.name] = isNaN(value) ? 0 : value;
 95 |       } else if (typeof value === "boolean") {
 96 |         sanitizedArgs[arg.name] = !!value; // Ensure boolean type
 97 |       } else if (Array.isArray(value)) {
 98 |         // For arrays, sanitize each element if they're strings
 99 |         sanitizedArgs[arg.name] = value.map((item) =>
100 |           typeof item === "string" ? item.replace(/[<>]/g, "").trim() : item
101 |         );
102 |       } else if (value !== null && typeof value === "object") {
103 |         // For objects, convert to string for simplicity
104 |         sanitizedArgs[arg.name] = JSON.stringify(value);
105 |       } else {
106 |         // For any other type, convert to string
107 |         sanitizedArgs[arg.name] = String(value);
108 |       }
109 |     }
110 |   }
111 | 
112 |   return {
113 |     valid: errors.length === 0,
114 |     errors: errors.length > 0 ? errors : undefined,
115 |     sanitizedArgs,
116 |   };
117 | }
118 | 
119 | /**
120 |  * Processes a template string by replacing placeholders with values using Nunjucks
121 |  * @param template The template string with placeholders and potential Nunjucks logic
122 |  * @param args The arguments to replace placeholders with
123 |  * @param specialContext Special context values to replace first
124 |  * @returns The processed template string
125 |  */
126 | export function processTemplate(
127 |   template: string,
128 |   args: Record<string, any>,
129 |   specialContext: Record<string, string> = {}
130 | ): string {
131 |   // Pre-escape any string values that might contain Nunjucks syntax
132 |   const escapedArgs: Record<string, any> = {};
133 |   for (const [key, value] of Object.entries(args)) {
134 |     if (typeof value === 'string' && (value.includes('{{') || value.includes('{%') || value.includes('{#'))) {
135 |       escapedArgs[key] = escapeJsonForNunjucks(value);
136 |     } else {
137 |       // Pass non-string values (arrays, objects) directly to Nunjucks
138 |       escapedArgs[key] = value;
139 |     }
140 |   }
141 | 
142 |   const context = { ...specialContext, ...escapedArgs };
143 | 
144 |   try {
145 |     // Use Nunjucks to render the template with the combined context
146 |     const rendered = nunjucksEnv.renderString(template, context);
147 |     
148 |     // Unescape any values that were escaped for Nunjucks
149 |     let unescapedResult = rendered;
150 |     for (const [key, value] of Object.entries(escapedArgs)) {
151 |       if (typeof value === 'string' && value !== args[key]) {
152 |         // This arg was escaped, so we need to unescape it in the result
153 |         const originalValue = args[key];
154 |         const escapedValue = value;
155 |         unescapedResult = unescapedResult.replace(new RegExp(escapedValue.replace(/[.*+?^${}()|[\]\\]/g, '\\$&'), 'g'), originalValue);
156 |       }
157 |     }
158 |     
159 |     return unescapedResult;
160 |   } catch (error) {
161 |     // Log the Nunjucks rendering error for debugging purposes.
162 |     // The error will be re-thrown and should be handled by the calling function
163 |     // (e.g., in TemplateProcessor) which can add more context like Prompt ID.
164 |     if (error instanceof Error) {
165 |       console.error(
166 |         "[Nunjucks Render Error] Failed to process template:",
167 |         error.message
168 |       );
169 |       // Optionally, log error.stack for more detailed debugging if needed in development
170 |       // if (process.env.NODE_ENV === 'development' && error.stack) {
171 |       //   console.error(error.stack);
172 |       // }
173 |     } else {
174 |       console.error(
175 |         "[Nunjucks Render Error] Failed to process template with an unknown error object:",
176 |         error
177 |       );
178 |     }
179 |     throw error; // Re-throw the original error
180 |   }
181 | }
182 | 
```

--------------------------------------------------------------------------------
/server/src/gates/core/gate-loader.ts:
--------------------------------------------------------------------------------

```typescript
  1 | /**
  2 |  * Gate Loader - Loads gate definitions from YAML/JSON files
  3 |  * Provides hot-reloading capabilities similar to prompt system
  4 |  */
  5 | 
  6 | import * as fs from 'fs/promises';
  7 | import * as path from 'path';
  8 | import { fileURLToPath } from 'url';
  9 | import { Logger } from '../../logging/index.js';
 10 | import type { LightweightGateDefinition, GateActivationResult } from '../types.js';
 11 | 
 12 | /**
 13 |  * Gate loader with caching and hot-reload support
 14 |  */
 15 | export class GateLoader {
 16 |   private gateCache = new Map<string, LightweightGateDefinition>();
 17 |   private lastModified = new Map<string, number>();
 18 |   private logger: Logger;
 19 |   private gatesDirectory: string;
 20 | 
 21 |   constructor(logger: Logger, gatesDirectory?: string) {
 22 |     this.logger = logger;
 23 |     // Use import.meta.url to get current directory in ES modules
 24 |     const __filename = fileURLToPath(import.meta.url);
 25 |     const __dirname = path.dirname(__filename);
 26 |     this.gatesDirectory = gatesDirectory || path.join(__dirname, '../../gates/definitions');
 27 |   }
 28 | 
 29 |   /**
 30 |    * Load a gate definition by ID with caching
 31 |    */
 32 |   async loadGate(gateId: string): Promise<LightweightGateDefinition | null> {
 33 |     try {
 34 |       const gateFile = await this.findGateFile(gateId);
 35 |       if (!gateFile) {
 36 |         this.logger.warn(`Gate definition not found: ${gateId}`);
 37 |         return null;
 38 |       }
 39 | 
 40 |       // Check if we need to reload
 41 |       const stat = await fs.stat(gateFile);
 42 |       const lastMod = this.lastModified.get(gateId);
 43 | 
 44 |       if (!this.gateCache.has(gateId) || !lastMod || stat.mtimeMs > lastMod) {
 45 |         this.logger.debug(`Loading gate definition: ${gateId}`);
 46 |         const gate = await this.parseGateFile(gateFile);
 47 | 
 48 |         if (gate && gate.id === gateId) {
 49 |           this.gateCache.set(gateId, gate);
 50 |           this.lastModified.set(gateId, stat.mtimeMs);
 51 |           this.logger.debug(`Gate loaded successfully: ${gateId}`);
 52 |         } else {
 53 |           this.logger.error(`Gate ID mismatch in file ${gateFile}: expected ${gateId}, got ${gate?.id}`);
 54 |           return null;
 55 |         }
 56 |       }
 57 | 
 58 |       return this.gateCache.get(gateId) || null;
 59 |     } catch (error) {
 60 |       this.logger.error(`Failed to load gate ${gateId}:`, error);
 61 |       return null;
 62 |     }
 63 |   }
 64 | 
 65 |   /**
 66 |    * Load multiple gates by IDs
 67 |    */
 68 |   async loadGates(gateIds: string[]): Promise<LightweightGateDefinition[]> {
 69 |     const gates: LightweightGateDefinition[] = [];
 70 | 
 71 |     for (const gateId of gateIds) {
 72 |       const gate = await this.loadGate(gateId);
 73 |       if (gate) {
 74 |         gates.push(gate);
 75 |       }
 76 |     }
 77 | 
 78 |     return gates;
 79 |   }
 80 | 
 81 |   /**
 82 |    * Get active gates based on context and criteria
 83 |    */
 84 |   async getActiveGates(
 85 |     gateIds: string[],
 86 |     context: {
 87 |       promptCategory?: string;
 88 |       framework?: string;
 89 |       explicitRequest?: boolean;
 90 |     }
 91 |   ): Promise<GateActivationResult> {
 92 |     const allGates = await this.loadGates(gateIds);
 93 |     const activeGates: LightweightGateDefinition[] = [];
 94 |     const guidanceText: string[] = [];
 95 |     const validationGates: LightweightGateDefinition[] = [];
 96 | 
 97 |     for (const gate of allGates) {
 98 |       if (this.shouldActivateGate(gate, context)) {
 99 |         activeGates.push(gate);
100 | 
101 |         // Collect guidance text
102 |         if (gate.guidance) {
103 |           guidanceText.push(`**${gate.name}:**\n${gate.guidance}`);
104 |         }
105 | 
106 |         // Collect validation gates
107 |         if (gate.type === 'validation') {
108 |           validationGates.push(gate);
109 |         }
110 |       }
111 |     }
112 | 
113 |     return {
114 |       activeGates,
115 |       guidanceText,
116 |       validationGates
117 |     };
118 |   }
119 | 
120 |   /**
121 |    * List all available gates
122 |    */
123 |   async listAvailableGates(): Promise<string[]> {
124 |     try {
125 |       const files = await fs.readdir(this.gatesDirectory);
126 |       const gateFiles = files.filter(file =>
127 |         file.endsWith('.yaml') || file.endsWith('.yml') || file.endsWith('.json')
128 |       );
129 | 
130 |       return gateFiles.map(file => path.basename(file, path.extname(file)));
131 |     } catch (error) {
132 |       this.logger.error('Failed to list available gates:', error);
133 |       return [];
134 |     }
135 |   }
136 | 
137 |   /**
138 |    * Clear gate cache (for hot-reloading)
139 |    */
140 |   clearCache(gateId?: string): void {
141 |     if (gateId) {
142 |       this.gateCache.delete(gateId);
143 |       this.lastModified.delete(gateId);
144 |       this.logger.debug(`Cleared cache for gate: ${gateId}`);
145 |     } else {
146 |       this.gateCache.clear();
147 |       this.lastModified.clear();
148 |       this.logger.debug('Cleared all gate cache');
149 |     }
150 |   }
151 | 
152 |   /**
153 |    * Find the gate file for a given ID
154 |    */
155 |   private async findGateFile(gateId: string): Promise<string | null> {
156 |     const extensions = ['.yaml', '.yml', '.json'];
157 | 
158 |     for (const ext of extensions) {
159 |       const filePath = path.join(this.gatesDirectory, `${gateId}${ext}`);
160 |       try {
161 |         await fs.access(filePath);
162 |         return filePath;
163 |       } catch {
164 |         // File doesn't exist, try next extension
165 |       }
166 |     }
167 | 
168 |     return null;
169 |   }
170 | 
171 |   /**
172 |    * Parse a gate file (YAML or JSON)
173 |    */
174 |   private async parseGateFile(filePath: string): Promise<LightweightGateDefinition | null> {
175 |     try {
176 |       const content = await fs.readFile(filePath, 'utf8');
177 |       const ext = path.extname(filePath);
178 | 
179 |       let parsed: any;
180 |       if (ext === '.json') {
181 |         parsed = JSON.parse(content);
182 |       } else {
183 |         // For YAML support, we'd need to add js-yaml dependency
184 |         // For now, support JSON only to avoid new dependencies
185 |         throw new Error(`YAML support not implemented. Convert ${filePath} to JSON.`);
186 |       }
187 | 
188 |       // Basic validation
189 |       if (!parsed.id || !parsed.name || !parsed.type) {
190 |         throw new Error(`Invalid gate definition in ${filePath}: missing required fields`);
191 |       }
192 | 
193 |       return parsed as LightweightGateDefinition;
194 |     } catch (error) {
195 |       this.logger.error(`Failed to parse gate file ${filePath}:`, error);
196 |       return null;
197 |     }
198 |   }
199 | 
200 |   /**
201 |    * Check if a gate should be activated based on context
202 |    */
203 |   private shouldActivateGate(
204 |     gate: LightweightGateDefinition,
205 |     context: {
206 |       promptCategory?: string;
207 |       framework?: string;
208 |       explicitRequest?: boolean;
209 |     }
210 |   ): boolean {
211 |     const activation = gate.activation;
212 |     if (!activation) {
213 |       // No activation rules means always active
214 |       return true;
215 |     }
216 | 
217 |     // Check explicit request
218 |     if (activation.explicit_request && !context.explicitRequest) {
219 |       return false;
220 |     }
221 | 
222 |     // Check prompt categories
223 |     if (activation.prompt_categories && context.promptCategory) {
224 |       if (!activation.prompt_categories.includes(context.promptCategory)) {
225 |         return false;
226 |       }
227 |     }
228 | 
229 |     // Check framework context
230 |     if (activation.framework_context && context.framework) {
231 |       if (!activation.framework_context.includes(context.framework)) {
232 |         return false;
233 |       }
234 |     }
235 | 
236 |     return true;
237 |   }
238 | 
239 |   /**
240 |    * Get gate statistics
241 |    */
242 |   getStatistics(): {
243 |     cachedGates: number;
244 |     totalLoads: number;
245 |     lastAccess: Date | null;
246 |   } {
247 |     return {
248 |       cachedGates: this.gateCache.size,
249 |       totalLoads: this.lastModified.size,
250 |       lastAccess: this.lastModified.size > 0 ? new Date() : null
251 |     };
252 |   }
253 | }
254 | 
255 | /**
256 |  * Create a gate loader instance
257 |  */
258 | export function createGateLoader(logger: Logger, gatesDirectory?: string): GateLoader {
259 |   return new GateLoader(logger, gatesDirectory);
260 | }
```

--------------------------------------------------------------------------------
/server/tests/unit/conversation-manager.test.ts:
--------------------------------------------------------------------------------

```typescript
  1 | /**
  2 |  * Unit tests for enhanced ConversationManager functionality
  3 |  * Testing chain context, step result management, and state validation
  4 |  */
  5 | 
  6 | import { ConversationManager } from '../../dist/text-references/conversation.js';
  7 | import { createSimpleLogger } from '../../dist/logging/index.js';
  8 | 
  9 | describe('ConversationManager - Chain Execution Enhancements', () => {
 10 |   let conversationManager;
 11 |   let logger;
 12 | 
 13 |   beforeEach(() => {
 14 |     logger = createSimpleLogger();
 15 |     conversationManager = new ConversationManager(logger, 50);
 16 |   });
 17 | 
 18 |   describe('Enhanced Step Result Management', () => {
 19 |     test('should store step results with metadata', () => {
 20 |       const chainId = 'test-chain-1';
 21 |       const stepResult = 'This is a real execution result';
 22 |       const metadata = { executionTime: 1500, framework: 'CAGEERF' };
 23 | 
 24 |       conversationManager.saveStepResult(chainId, 0, stepResult, false, metadata);
 25 | 
 26 |       const resultWithMeta = conversationManager.getStepResultWithMetadata(chainId, 0);
 27 |       expect(resultWithMeta).toEqual({
 28 |         result: stepResult,
 29 |         timestamp: expect.any(Number),
 30 |         isPlaceholder: false,
 31 |         executionMetadata: metadata
 32 |       });
 33 | 
 34 |       // Should also work with legacy method
 35 |       const legacyResult = conversationManager.getStepResult(chainId, 0);
 36 |       expect(legacyResult).toBe(stepResult);
 37 |     });
 38 | 
 39 |     test('should distinguish between placeholder and real results', () => {
 40 |       const chainId = 'test-chain-2';
 41 | 
 42 |       // Store a placeholder result
 43 |       conversationManager.saveStepResult(chainId, 0, '{{previous_message}}', true);
 44 |       
 45 |       // Store a real result
 46 |       conversationManager.saveStepResult(chainId, 1, 'Detailed analysis of the problem...', false);
 47 | 
 48 |       const placeholderMeta = conversationManager.getStepResultWithMetadata(chainId, 0);
 49 |       const realMeta = conversationManager.getStepResultWithMetadata(chainId, 1);
 50 | 
 51 |       expect(placeholderMeta.isPlaceholder).toBe(true);
 52 |       expect(realMeta.isPlaceholder).toBe(false);
 53 |     });
 54 | 
 55 |     test('should provide chain execution summary', () => {
 56 |       const chainId = 'test-chain-summary';
 57 |       
 58 |       conversationManager.setChainState(chainId, 2, 4);
 59 |       conversationManager.saveStepResult(chainId, 0, 'First step result', false);
 60 |       conversationManager.saveStepResult(chainId, 1, '{{placeholder}}', true);
 61 |       
 62 |       const summary = conversationManager.getChainSummary(chainId);
 63 |       
 64 |       expect(summary).toEqual({
 65 |         state: {
 66 |           currentStep: 2,
 67 |           totalSteps: 4,
 68 |           lastUpdated: expect.any(Number)
 69 |         },
 70 |         completedSteps: 2,
 71 |         placeholderSteps: 1,
 72 |         realSteps: 1,
 73 |         totalResults: 2
 74 |       });
 75 |     });
 76 |   });
 77 | 
 78 |   describe('Chain State Validation', () => {
 79 |     test('should validate healthy chain state', () => {
 80 |       const chainId = 'test-chain-healthy';
 81 |       
 82 |       conversationManager.setChainState(chainId, 2, 4);
 83 |       conversationManager.saveStepResult(chainId, 0, 'Step 0 result');
 84 |       conversationManager.saveStepResult(chainId, 1, 'Step 1 result');
 85 |       
 86 |       const validation = conversationManager.validateChainState(chainId);
 87 |       
 88 |       expect(validation.valid).toBe(true);
 89 |       expect(validation.issues).toBeUndefined();
 90 |       expect(validation.recovered).toBeFalsy();
 91 |     });
 92 | 
 93 |     test('should detect and recover from invalid current step', () => {
 94 |       const chainId = 'test-chain-invalid';
 95 |       
 96 |       // Manually create invalid state (currentStep > totalSteps)
 97 |       conversationManager.setChainState(chainId, 5, 3);
 98 |       
 99 |       const validation = conversationManager.validateChainState(chainId);
100 |       
101 |       expect(validation.valid).toBe(false);
102 |       expect(validation.issues).toContain('Current step 5 exceeds total steps 3');
103 |       expect(validation.recovered).toBe(true);
104 |       
105 |       // Should have auto-corrected the state
106 |       const correctedState = conversationManager.getChainState(chainId);
107 |       expect(correctedState.currentStep).toBe(3);
108 |       expect(correctedState.totalSteps).toBe(3);
109 |     });
110 | 
111 |     test('should detect stale chain state', () => {
112 |       const chainId = 'test-chain-stale';
113 |       
114 |       // Manually set old timestamp (2 hours ago)
115 |       const twoHoursAgo = Date.now() - (2 * 60 * 60 * 1000);
116 |       conversationManager.setChainState(chainId, 1, 3);
117 |       conversationManager.chainStates[chainId].lastUpdated = twoHoursAgo;
118 |       
119 |       const validation = conversationManager.validateChainState(chainId);
120 |       
121 |       expect(validation.valid).toBe(false);
122 |       expect(validation.issues).toContain('Chain state is stale (>1 hour old)');
123 |     });
124 | 
125 |     test('should handle missing chain state gracefully', () => {
126 |       const validation = conversationManager.validateChainState('nonexistent-chain');
127 |       
128 |       expect(validation.valid).toBe(false);
129 |       expect(validation.issues).toContain('No chain state found');
130 |     });
131 |   });
132 | 
133 |   describe('Context Cleanup', () => {
134 |     test('should clear all chain data when clearing context', () => {
135 |       const chainId = 'test-chain-cleanup';
136 |       
137 |       conversationManager.setChainState(chainId, 1, 3);
138 |       conversationManager.saveStepResult(chainId, 0, 'Test result', false);
139 |       
140 |       // Verify data exists
141 |       expect(conversationManager.getChainState(chainId)).toBeDefined();
142 |       expect(conversationManager.getStepResult(chainId, 0)).toBe('Test result');
143 |       expect(conversationManager.getStepResultWithMetadata(chainId, 0)).toBeDefined();
144 |       
145 |       // Clear and verify cleanup
146 |       conversationManager.clearChainContext(chainId);
147 |       
148 |       expect(conversationManager.getChainState(chainId)).toBeUndefined();
149 |       expect(conversationManager.getStepResult(chainId, 0)).toBeUndefined();
150 |       expect(conversationManager.getStepResultWithMetadata(chainId, 0)).toBeUndefined();
151 |     });
152 | 
153 |     test('should clear all chains when clearing all contexts', () => {
154 |       const chain1 = 'chain-1';
155 |       const chain2 = 'chain-2';
156 |       
157 |       conversationManager.setChainState(chain1, 1, 2);
158 |       conversationManager.setChainState(chain2, 2, 3);
159 |       conversationManager.saveStepResult(chain1, 0, 'Chain 1 result');
160 |       conversationManager.saveStepResult(chain2, 0, 'Chain 2 result');
161 |       
162 |       conversationManager.clearAllChainContexts();
163 |       
164 |       expect(conversationManager.getChainState(chain1)).toBeUndefined();
165 |       expect(conversationManager.getChainState(chain2)).toBeUndefined();
166 |       expect(conversationManager.getStepResult(chain1, 0)).toBeUndefined();
167 |       expect(conversationManager.getStepResult(chain2, 0)).toBeUndefined();
168 |     });
169 |   });
170 | 
171 |   describe('Integration with Legacy Interface', () => {
172 |     test('should maintain compatibility with existing saveStepResult calls', () => {
173 |       const chainId = 'test-legacy';
174 |       
175 |       // Legacy call without placeholder flag
176 |       conversationManager.saveStepResult(chainId, 0, 'Legacy result');
177 |       
178 |       const result = conversationManager.getStepResult(chainId, 0);
179 |       const resultWithMeta = conversationManager.getStepResultWithMetadata(chainId, 0);
180 |       
181 |       expect(result).toBe('Legacy result');
182 |       expect(resultWithMeta.result).toBe('Legacy result');
183 |       expect(resultWithMeta.isPlaceholder).toBe(false); // Default value
184 |     });
185 |   });
186 | });
```

--------------------------------------------------------------------------------
/server/prompts/debugging/analyze_logs.md:
--------------------------------------------------------------------------------

```markdown
  1 | # Advanced Log Analysis & Debugging
  2 | 
  3 | ## Description
  4 | Comprehensive log analysis template incorporating advanced debugging strategies used by senior developers and SREs for systematic root cause analysis
  5 | 
  6 | ## System Message
  7 | You are a SENIOR DIAGNOSTIC AGENT with expertise in advanced log analysis and production system debugging. Your mission is to methodically analyze logs using professional debugging strategies to identify root causes and provide comprehensive actionable insights.
  8 | 
  9 | You have access to all necessary tools for thorough investigation: codebase search, file reading, web research, and MCP tools. Use them systematically to build evidence-based conclusions.
 10 | 
 11 | ## User Message Template
 12 | ## Advanced Log Analysis Request
 13 | 
 14 | **Logs to Analyze:**
 15 | ```
 16 | {{logs}}
 17 | ```
 18 | 
 19 | **System Context:**
 20 | {{context}}
 21 | 
 22 | ---
 23 | 
 24 | ## Comprehensive Analysis Framework
 25 | 
 26 | Perform systematic log analysis using these advanced strategies:
 27 | 
 28 | ### 1. **Log Parsing & Initial Triage**
 29 | - Extract and categorize all log entries by type (info, warning, error, debug)
 30 | - Identify timestamps and execution sequence
 31 | - Flag critical errors and their immediate context
 32 | - Note any obvious failure patterns
 33 | 
 34 | ### 2. **Temporal & Frequency Analysis**
 35 | - **Timing Patterns**: Identify event clustering, periodic failures, or timing correlations
 36 | - **Rate Analysis**: Detect unusual frequency changes, burst patterns, or rate limiting
 37 | - **Sequence Analysis**: Map chronological flow and identify timing dependencies
 38 | - **Performance Degradation**: Look for gradually increasing response times or resource usage
 39 | 
 40 | ### 3. **Correlation Analysis**
 41 | - **Cross-Component Events**: Link related events across different systems/modules
 42 | - **Cascade Effect Detection**: Identify how failures in one component trigger others
 43 | - **Request Tracing**: Follow specific requests/sessions through the entire system
 44 | - **Dependency Impact**: Map how external service issues affect internal components
 45 | 
 46 | ### 4. **Performance & Resource Pattern Analysis**
 47 | - **Bottleneck Detection**: Identify slow operations, blocked threads, or queue buildups
 48 | - **Resource Exhaustion**: Look for memory leaks, connection pool depletion, file handle limits
 49 | - **Scaling Issues**: Detect load-related failures or capacity constraints
 50 | - **Database/Network Issues**: Identify connection timeouts, query performance, API latency
 51 | 
 52 | ### 5. **Component Chain Analysis**
 53 | - Map the component initialization/execution flow
 54 | - Identify which systems are starting successfully vs failing
 55 | - Trace dependencies between components
 56 | - Note any broken component chains or missing dependencies
 57 | 
 58 | ### 6. **Anomaly Detection**
 59 | - **Baseline Comparison**: Compare current behavior against normal patterns
 60 | - **Outlier Identification**: Flag unusual values, unexpected events, or deviations
 61 | - **Missing Events**: Identify expected log entries that are absent
 62 | - **Volume Anomalies**: Detect unusual increases/decreases in log volume
 63 | 
 64 | ### 7. **Error Pattern Investigation**
 65 | For each identified error/warning:
 66 | - Extract the exact error message and stack trace
 67 | - Identify the component/file/line where it originated
 68 | - Determine if it's a primary failure or secondary effect
 69 | - Assess impact on overall system functionality
 70 | - **Correlation Impact**: Check if errors coincide with other system events
 71 | 
 72 | ### 8. **Log Quality Assessment**
 73 | - **Completeness**: Check for missing logs, gaps in timeline, or truncated entries
 74 | - **Consistency**: Verify log format consistency and appropriate log levels
 75 | - **Information Density**: Assess if logs provide sufficient debugging information
 76 | - **Noise vs Signal**: Identify verbose logging that may obscure critical issues
 77 | 
 78 | ### 9. **Security Pattern Recognition**
 79 | - **Authentication Failures**: Detect unusual login patterns or credential issues
 80 | - **Access Violations**: Identify unauthorized access attempts or permission failures
 81 | - **Injection Attempts**: Look for SQL injection, XSS, or other attack patterns
 82 | - **Rate Limiting**: Detect potential DoS attacks or abuse patterns
 83 | 
 84 | ### 10. **Distributed System Tracing**
 85 | - **Request Flow**: Follow requests across microservices and components
 86 | - **Correlation IDs**: Track specific transactions through the entire system
 87 | - **Service Dependencies**: Map inter-service communication and failures
 88 | - **Network Issues**: Identify connectivity problems between services
 89 | 
 90 | ### 11. **Codebase Investigation**
 91 | Use available tools to:
 92 | - Search for error-related code patterns in the codebase
 93 | - Read relevant files to understand component implementation
 94 | - Check configuration files and dependencies
 95 | - Investigate component relationships and initialization order
 96 | 
 97 | ### 12. **External Research** (if needed)
 98 | - Search online for known issues with specific error patterns
 99 | - Look up API/library documentation for error codes
100 | - Research best practices for identified failure modes
101 | 
102 | ### 13. **Root Cause Diagnosis**
103 | Provide:
104 | - **Primary Root Cause**: The fundamental issue causing the problem
105 | - **Secondary Issues**: Related problems that may compound the issue
106 | - **Evidence**: Specific log entries and code references supporting the diagnosis
107 | - **Impact Assessment**: How this affects system functionality
108 | - **Confidence Level**: How certain you are about the diagnosis (High/Medium/Low)
109 | 
110 | ### 14. **Business Impact Analysis**
111 | - **User Experience Impact**: How failures affect end-user functionality
112 | - **Feature Availability**: Which features are degraded or unavailable
113 | - **Performance Impact**: Response time or throughput effects
114 | - **Data Integrity**: Whether data loss or corruption is possible
115 | 
116 | ### 15. **Actionable Recommendations**
117 | - **Immediate Fixes**: Steps to resolve the primary issue
118 | - **Preventive Measures**: Changes to prevent recurrence
119 | - **Monitoring Setup**: Specific alerts and metrics to implement
120 | - **Testing Strategy**: How to verify the fix works
121 | - **Performance Optimizations**: Recommendations for improving system performance
122 | 
123 | ### 16. **Proactive Monitoring Strategy**
124 | - **Alert Thresholds**: Specific metrics and thresholds to monitor
125 | - **Dashboard Metrics**: Key performance indicators to track
126 | - **Log Retention**: Recommendations for log storage and rotation
127 | - **Health Checks**: Additional monitoring to implement
128 | - **SLA/SLO Recommendations**: Service level objectives to establish
129 | 
130 | ### 17. **Forensic Timeline & Investigation Notes**
131 | Document:
132 | - **Exact Timeline**: Chronological sequence of critical events
133 | - **Tools Used**: Searches performed and code files examined
134 | - **Evidence Chain**: How conclusions were reached
135 | - **Assumptions Made**: Any assumptions during analysis
136 | - **Further Investigation**: Areas needing additional research
137 | 
138 | ---
139 | 
140 | ## Expected Deliverable
141 | 
142 | Provide a comprehensive diagnostic report with:
143 | 
144 | ### **Executive Summary**
145 | - One-sentence problem statement
146 | - Primary root cause
147 | - Business impact level
148 | - Recommended priority (P0/P1/P2/P3)
149 | 
150 | ### **Technical Analysis**
151 | - Detailed findings with specific file/line references
152 | - Evidence-based conclusions with supporting log entries
153 | - Performance and security implications
154 | 
155 | ### **Action Plan**
156 | - Prioritized fix recommendations with implementation details
157 | - Risk assessment for each recommendation
158 | - Estimated effort and timeline
159 | 
160 | ### **Prevention Strategy**
161 | - Monitoring and alerting recommendations
162 | - Code quality improvements
163 | - Process changes to prevent recurrence
164 | 
165 | ### **Follow-up Plan**
166 | - Verification steps post-fix
167 | - Metrics to monitor for regression
168 | - Documentation updates needed
169 | 
```

--------------------------------------------------------------------------------
/server/src/gates/core/index.ts:
--------------------------------------------------------------------------------

```typescript
  1 | /**
  2 |  * Core Gate System - Main Exports
  3 |  * Provides guidance and validation capabilities for prompt execution
  4 |  */
  5 | 
  6 | import type { ValidationResult } from "../../execution/types.js";
  7 | import { GateSystemManager } from "../gate-state-manager.js";
  8 | import { GateLoader, createGateLoader } from "./gate-loader.js";
  9 | import { GateValidator, createGateValidator } from "./gate-validator.js";
 10 | import {
 11 |   TemporaryGateRegistry,
 12 |   createTemporaryGateRegistry,
 13 |   type TemporaryGateDefinition,
 14 | } from "./temporary-gate-registry.js";
 15 | 
 16 | export { GateLoader, createGateLoader } from "./gate-loader.js";
 17 | export { GateValidator, createGateValidator } from "./gate-validator.js";
 18 | export {
 19 |   TemporaryGateRegistry,
 20 |   createTemporaryGateRegistry,
 21 |   type TemporaryGateDefinition as TemporaryGateRegistryDefinition,
 22 | } from "./temporary-gate-registry.js";
 23 | 
 24 | export type { ValidationResult } from "../../execution/types.js";
 25 | export type {
 26 |   GateActivationResult,
 27 |   GatePassCriteria,
 28 |   LightweightGateDefinition,
 29 |   ValidationCheck,
 30 |   ValidationContext,
 31 | } from "../types.js";
 32 | export type { GateValidationStatistics } from "./gate-validator.js";
 33 | 
 34 | /**
 35 |  * Core gate system manager with temporary gate support
 36 |  */
 37 | export class LightweightGateSystem {
 38 |   private gateSystemManager?: GateSystemManager;
 39 |   private temporaryGateRegistry?: TemporaryGateRegistry;
 40 | 
 41 |   constructor(
 42 |     public gateLoader: GateLoader,
 43 |     public gateValidator: GateValidator,
 44 |     temporaryGateRegistry?: TemporaryGateRegistry
 45 |   ) {
 46 |     this.temporaryGateRegistry = temporaryGateRegistry;
 47 |   }
 48 | 
 49 |   /**
 50 |    * Set gate system manager for runtime state checking
 51 |    */
 52 |   setGateSystemManager(gateSystemManager: GateSystemManager): void {
 53 |     this.gateSystemManager = gateSystemManager;
 54 |   }
 55 | 
 56 |   /**
 57 |    * Set temporary gate registry
 58 |    */
 59 |   setTemporaryGateRegistry(temporaryGateRegistry: TemporaryGateRegistry): void {
 60 |     this.temporaryGateRegistry = temporaryGateRegistry;
 61 |   }
 62 | 
 63 |   /**
 64 |    * Create a temporary gate
 65 |    */
 66 |   createTemporaryGate(
 67 |     definition: Omit<TemporaryGateDefinition, "id" | "created_at">,
 68 |     scopeId?: string
 69 |   ): string | null {
 70 |     if (!this.temporaryGateRegistry) {
 71 |       return null;
 72 |     }
 73 |     return this.temporaryGateRegistry.createTemporaryGate(definition, scopeId);
 74 |   }
 75 | 
 76 |   /**
 77 |    * Get temporary gates for scope
 78 |    */
 79 |   getTemporaryGatesForScope(
 80 |     scope: string,
 81 |     scopeId: string
 82 |   ): TemporaryGateDefinition[] {
 83 |     if (!this.temporaryGateRegistry) {
 84 |       return [];
 85 |     }
 86 |     return this.temporaryGateRegistry.getTemporaryGatesForScope(scope, scopeId);
 87 |   }
 88 | 
 89 |   /**
 90 |    * Clean up temporary gates for scope
 91 |    */
 92 |   cleanupTemporaryGates(scope: string, scopeId?: string): number {
 93 |     if (!this.temporaryGateRegistry) {
 94 |       return 0;
 95 |     }
 96 |     return this.temporaryGateRegistry.cleanupScope(scope, scopeId);
 97 |   }
 98 | 
 99 |   /**
100 |    * Check if gate system is enabled
101 |    */
102 |   private isGateSystemEnabled(): boolean {
103 |     // If no gate system manager is set, default to enabled for backwards compatibility
104 |     if (!this.gateSystemManager) {
105 |       return true;
106 |     }
107 |     return this.gateSystemManager.isGateSystemEnabled();
108 |   }
109 | 
110 |   /**
111 |    * Get guidance text for active gates
112 |    */
113 |   async getGuidanceText(
114 |     gateIds: string[],
115 |     context: {
116 |       promptCategory?: string;
117 |       framework?: string;
118 |       explicitRequest?: boolean;
119 |     }
120 |   ): Promise<string[]> {
121 |     // Check if gate system is enabled
122 |     if (!this.isGateSystemEnabled()) {
123 |       return []; // Return empty guidance if gates are disabled
124 |     }
125 | 
126 |     const activation = await this.gateLoader.getActiveGates(gateIds, context);
127 |     return activation.guidanceText;
128 |   }
129 | 
130 |   /**
131 |    * Validate content against active gates
132 |    */
133 |   async validateContent(
134 |     gateIds: string[],
135 |     content: string,
136 |     validationContext: {
137 |       promptId?: string;
138 |       stepId?: string;
139 |       attemptNumber?: number;
140 |       previousAttempts?: string[];
141 |       metadata?: Record<string, any>;
142 |     }
143 |   ): Promise<ValidationResult[]> {
144 |     // Check if gate system is enabled
145 |     if (!this.isGateSystemEnabled()) {
146 |       // Return success results for all gates if system is disabled
147 |       return gateIds.map((gateId) => ({
148 |         gateId,
149 |         valid: true,
150 |         passed: true,
151 |         message: "Gate system disabled - validation skipped",
152 |         score: 1.0,
153 |         details: {},
154 |         retryHints: [],
155 |         suggestions: [],
156 |       }));
157 |     }
158 | 
159 |     const startTime = performance.now();
160 | 
161 |     const context = {
162 |       content,
163 |       metadata: validationContext.metadata,
164 |       executionContext: {
165 |         promptId: validationContext.promptId,
166 |         stepId: validationContext.stepId,
167 |         attemptNumber: validationContext.attemptNumber,
168 |         previousAttempts: validationContext.previousAttempts,
169 |       },
170 |     };
171 | 
172 |     const results = await this.gateValidator.validateGates(gateIds, context);
173 | 
174 |     // Record validation metrics if gate system manager is available
175 |     if (this.gateSystemManager) {
176 |       const executionTime = performance.now() - startTime;
177 |       const success = results.every((r) => r.passed);
178 |       this.gateSystemManager.recordValidation(success, executionTime);
179 |     }
180 | 
181 |     return results;
182 |   }
183 | 
184 |   /**
185 |    * Check if content should be retried based on validation results
186 |    */
187 |   shouldRetry(
188 |     validationResults: ValidationResult[],
189 |     currentAttempt: number,
190 |     maxAttempts: number = 3
191 |   ): boolean {
192 |     return this.gateValidator.shouldRetry(
193 |       validationResults,
194 |       currentAttempt,
195 |       maxAttempts
196 |     );
197 |   }
198 | 
199 |   /**
200 |    * Get combined retry hints from all failed validations
201 |    */
202 |   getRetryHints(validationResults: ValidationResult[]): string[] {
203 |     const allHints: string[] = [];
204 | 
205 |     for (const result of validationResults) {
206 |       if (!result.passed) {
207 |         allHints.push(`**${result.gateId}:**`);
208 |         if (result.retryHints) {
209 |           allHints.push(...result.retryHints);
210 |         }
211 |         allHints.push(""); // Empty line for separation
212 |       }
213 |     }
214 | 
215 |     return allHints;
216 |   }
217 | 
218 |   /**
219 |    * Get system statistics
220 |    */
221 |   getStatistics() {
222 |     return {
223 |       gateLoader: this.gateLoader.getStatistics(),
224 |       gateValidator: this.gateValidator.getStatistics(),
225 |     };
226 |   }
227 | 
228 |   /**
229 |    * Get the temporary gate registry instance (Phase 3 enhancement)
230 |    */
231 |   getTemporaryGateRegistry(): TemporaryGateRegistry | undefined {
232 |     return this.temporaryGateRegistry;
233 |   }
234 | }
235 | 
236 | /**
237 |  * Create a complete core gate system with optional temporary gate support
238 |  */
239 | export function createLightweightGateSystem(
240 |   logger: any,
241 |   gatesDirectory?: string,
242 |   gateSystemManager?: GateSystemManager,
243 |   options?: {
244 |     enableTemporaryGates?: boolean;
245 |     maxMemoryGates?: number;
246 |     defaultExpirationMs?: number;
247 |     llmConfig?: any; // LLMIntegrationConfig from types
248 |   }
249 | ): LightweightGateSystem {
250 |   const gateLoader = createGateLoader(logger, gatesDirectory);
251 |   const gateValidator = createGateValidator(logger, gateLoader, options?.llmConfig);
252 | 
253 |   // Create temporary gate registry if enabled
254 |   let temporaryGateRegistry: TemporaryGateRegistry | undefined;
255 |   if (options?.enableTemporaryGates !== false) {
256 |     temporaryGateRegistry = createTemporaryGateRegistry(logger, {
257 |       maxMemoryGates: options?.maxMemoryGates,
258 |       defaultExpirationMs: options?.defaultExpirationMs,
259 |     });
260 |   }
261 | 
262 |   const gateSystem = new LightweightGateSystem(
263 |     gateLoader,
264 |     gateValidator,
265 |     temporaryGateRegistry
266 |   );
267 | 
268 |   if (gateSystemManager) {
269 |     gateSystem.setGateSystemManager(gateSystemManager);
270 |   }
271 | 
272 |   return gateSystem;
273 | }
274 | 
```

--------------------------------------------------------------------------------
/server/tests/unit/unified-parsing-system.test.ts:
--------------------------------------------------------------------------------

```typescript
  1 | /**
  2 |  * Simplified Unified Parsing System Tests
  3 |  *
  4 |  * Core functionality tests focusing on essential parsing behavior
  5 |  */
  6 | 
  7 | import { describe, test, expect, beforeEach, jest } from '@jest/globals';
  8 | import { Logger } from '../../src/logging/index.js';
  9 | import { PromptData } from '../../src/types/index.js';
 10 | import {
 11 |   createParsingSystem,
 12 |   type ExecutionContext
 13 | } from '../../src/execution/parsers/index.js';
 14 | 
 15 | // Mock logger for testing
 16 | const mockLogger: Logger = {
 17 |   debug: jest.fn(),
 18 |   info: jest.fn(),
 19 |   warn: jest.fn(),
 20 |   error: jest.fn()
 21 | } as any;
 22 | 
 23 | // Sample prompt data for testing
 24 | const testPrompts: PromptData[] = [
 25 |   {
 26 |     id: 'test_prompt',
 27 |     name: 'test_prompt',
 28 |     description: 'A test prompt',
 29 |     userMessageTemplate: 'Test message: {{content}}',
 30 |     arguments: [
 31 |       {
 32 |         name: 'content',
 33 |         description: 'Content to process',
 34 |         required: true
 35 |       }
 36 |     ],
 37 |     category: 'test'
 38 |   },
 39 |   {
 40 |     id: 'multi_arg_prompt',
 41 |     name: 'multi_arg_prompt',
 42 |     description: 'A prompt with multiple arguments',
 43 |     userMessageTemplate: 'Process {{text}} with {{format}}',
 44 |     arguments: [
 45 |       {
 46 |         name: 'text',
 47 |         description: 'Text to process',
 48 |         required: true
 49 |       },
 50 |       {
 51 |         name: 'format',
 52 |         description: 'Output format',
 53 |         required: false
 54 |       }
 55 |     ],
 56 |     category: 'test'
 57 |   }
 58 | ];
 59 | 
 60 | describe('Unified Parsing System - Core Functionality', () => {
 61 |   let parsingSystem: ReturnType<typeof createParsingSystem>;
 62 | 
 63 |   beforeEach(() => {
 64 |     parsingSystem = createParsingSystem(mockLogger);
 65 |   });
 66 | 
 67 |   describe('Command Parsing', () => {
 68 |     test('should parse simple >>prompt format', async () => {
 69 |       const result = await parsingSystem.commandParser.parseCommand(
 70 |         '>>test_prompt hello world',
 71 |         testPrompts
 72 |       );
 73 | 
 74 |       expect(result.promptId).toBe('test_prompt');
 75 |       expect(result.rawArgs).toBe('hello world');
 76 |       expect(result.format).toBe('simple');
 77 |     });
 78 | 
 79 |     test('should parse JSON command format', async () => {
 80 |       const command = '{"command": ">>test_prompt", "args": "hello world"}';
 81 |       const result = await parsingSystem.commandParser.parseCommand(command, testPrompts);
 82 | 
 83 |       expect(result.promptId).toBe('test_prompt');
 84 |       expect(result.format).toBe('json');
 85 |     });
 86 | 
 87 |     test('should handle unknown prompts', async () => {
 88 |       await expect(
 89 |         parsingSystem.commandParser.parseCommand('>>unknown_prompt', testPrompts)
 90 |       ).rejects.toThrow('Unknown prompt: unknown_prompt');
 91 |     });
 92 |   });
 93 | 
 94 |   describe('Argument Processing', () => {
 95 |     test('should process simple arguments', async () => {
 96 |       const result = await parsingSystem.argumentProcessor.processArguments(
 97 |         'hello world',
 98 |         testPrompts[0]
 99 |       );
100 | 
101 |       expect(result.processedArgs.content).toBe('hello world');
102 |       expect(result.metadata.processingStrategy).toBe('simple');
103 |     });
104 | 
105 |     test('should process JSON arguments', async () => {
106 |       const jsonArgs = '{"text": "hello", "format": "json"}';
107 |       const result = await parsingSystem.argumentProcessor.processArguments(
108 |         jsonArgs,
109 |         testPrompts[1]
110 |       );
111 | 
112 |       expect(result.processedArgs.text).toBe('hello');
113 |       expect(result.processedArgs.format).toBe('json');
114 |       expect(result.metadata.processingStrategy).toBe('json');
115 |     });
116 | 
117 |     test('should process key-value pairs', async () => {
118 |       const kvArgs = 'text=hello format=xml';
119 |       const result = await parsingSystem.argumentProcessor.processArguments(
120 |         kvArgs,
121 |         testPrompts[1]
122 |       );
123 | 
124 |       expect(result.processedArgs.text).toBe('hello');
125 |       expect(result.processedArgs.format).toBe('xml');
126 |       expect(result.metadata.processingStrategy).toBe('keyvalue');
127 |     });
128 |   });
129 | 
130 |   describe('Context Resolution', () => {
131 |     test('should resolve from environment variables', async () => {
132 |       process.env.PROMPT_TEST = 'environment_value';
133 | 
134 |       const result = await parsingSystem.contextResolver.resolveContext('test');
135 | 
136 |       expect(result.value).toBe('environment_value');
137 |       expect(result.source).toBe('environment_variables');
138 | 
139 |       delete process.env.PROMPT_TEST;
140 |     });
141 | 
142 |     test('should generate placeholders for unknown keys', async () => {
143 |       const result = await parsingSystem.contextResolver.resolveContext('unknown_key');
144 | 
145 |       expect(result.source).toBe('generated_placeholder');
146 |       expect(result.value).toContain('unknown_key');
147 |     });
148 | 
149 |     test('should use caching for repeated resolutions', async () => {
150 |       const result1 = await parsingSystem.contextResolver.resolveContext('cached_key');
151 |       const result2 = await parsingSystem.contextResolver.resolveContext('cached_key');
152 | 
153 |       const stats = parsingSystem.contextResolver.getStats();
154 |       expect(stats.cacheHits).toBe(1);
155 |     });
156 |   });
157 | 
158 |   describe('Integration', () => {
159 |     test('should work end-to-end', async () => {
160 |       // Parse command
161 |       const parseResult = await parsingSystem.commandParser.parseCommand(
162 |         '>>multi_arg_prompt hello world',
163 |         testPrompts
164 |       );
165 | 
166 |       // Process arguments
167 |       const context: ExecutionContext = {
168 |         conversationHistory: [],
169 |         environmentVars: {},
170 |         promptDefaults: { format: 'text' }
171 |       };
172 | 
173 |       const argResult = await parsingSystem.argumentProcessor.processArguments(
174 |         parseResult.rawArgs,
175 |         testPrompts[1],
176 |         context
177 |       );
178 | 
179 |       expect(parseResult.promptId).toBe('multi_arg_prompt');
180 |       expect(argResult.processedArgs.text).toBe('hello world');
181 |     });
182 |   });
183 | 
184 |   describe('Performance', () => {
185 |     test('should complete parsing within reasonable time', async () => {
186 |       const start = Date.now();
187 | 
188 |       for (let i = 0; i < 10; i++) {
189 |         await parsingSystem.commandParser.parseCommand(
190 |           `>>test_prompt test${i}`,
191 |           testPrompts
192 |         );
193 |       }
194 | 
195 |       const duration = Date.now() - start;
196 |       expect(duration).toBeLessThan(1000); // Should complete 10 parses in under 1 second
197 |     });
198 | 
199 |     test('should maintain reasonable memory usage', async () => {
200 |       const initialMemory = process.memoryUsage().heapUsed;
201 | 
202 |       // Perform multiple operations
203 |       for (let i = 0; i < 50; i++) {
204 |         await parsingSystem.commandParser.parseCommand(`>>test_prompt test${i}`, testPrompts);
205 |         await parsingSystem.argumentProcessor.processArguments(`test${i}`, testPrompts[0]);
206 |       }
207 | 
208 |       // Force garbage collection if available
209 |       if (global.gc) {
210 |         global.gc();
211 |       }
212 | 
213 |       const finalMemory = process.memoryUsage().heapUsed;
214 |       const memoryIncrease = (finalMemory - initialMemory) / 1024 / 1024; // MB
215 | 
216 |       expect(memoryIncrease).toBeLessThan(10); // Should not increase by more than 10MB
217 |     });
218 |   });
219 | 
220 |   describe('Error Handling', () => {
221 |     test('should handle malformed JSON gracefully', async () => {
222 |       await expect(
223 |         parsingSystem.commandParser.parseCommand('{"invalid": json', testPrompts)
224 |       ).rejects.toThrow();
225 |     });
226 | 
227 |     test('should handle empty commands', async () => {
228 |       await expect(
229 |         parsingSystem.commandParser.parseCommand('', testPrompts)
230 |       ).rejects.toThrow('Command cannot be empty');
231 |     });
232 | 
233 |     test('should provide helpful error messages', async () => {
234 |       try {
235 |         await parsingSystem.commandParser.parseCommand('invalid format', testPrompts);
236 |       } catch (error: any) {
237 |         expect(error.message).toContain('Supported command formats:');
238 |       }
239 |     });
240 |   });
241 | });
```

--------------------------------------------------------------------------------
/server/src/config/index.ts:
--------------------------------------------------------------------------------

```typescript
  1 | /**
  2 |  * Configuration Management Module
  3 |  * Handles loading and validation of server configuration from config.json
  4 |  */
  5 | 
  6 | import { readFile } from "fs/promises";
  7 | import path from "path";
  8 | import { Config, AnalysisConfig, SemanticAnalysisConfig, LLMIntegrationConfig, AnalysisMode, LoggingConfig } from "../types/index.js";
  9 | // Removed: ToolDescriptionManager import to break circular dependency
 10 | // Now injected via dependency injection pattern
 11 | 
 12 | /**
 13 |  * Infer the optimal analysis mode based on LLM integration configuration
 14 |  */
 15 | function inferAnalysisMode(llmConfig: LLMIntegrationConfig): AnalysisMode {
 16 |   // Use semantic mode if LLM integration is properly configured
 17 |   if (llmConfig.enabled && llmConfig.endpoint) {
 18 |     // For non-localhost endpoints, require API key
 19 |     if (llmConfig.endpoint.includes('localhost') || llmConfig.endpoint.includes('127.0.0.1') || llmConfig.apiKey) {
 20 |       return "semantic";
 21 |     }
 22 |   }
 23 |   
 24 |   // Default to structural mode
 25 |   return "structural";
 26 | }
 27 | 
 28 | /**
 29 |  * Default configuration values
 30 |  */
 31 | const DEFAULT_ANALYSIS_CONFIG: AnalysisConfig = {
 32 |   semanticAnalysis: {
 33 |     // mode will be inferred automatically based on LLM integration
 34 |     llmIntegration: {
 35 |       enabled: false,
 36 |       apiKey: null,
 37 |       endpoint: null,
 38 |       model: "gpt-4",
 39 |       maxTokens: 1000,
 40 |       temperature: 0.1,
 41 |     },
 42 |   },
 43 | };
 44 | 
 45 | // DEFAULT_FRAMEWORK_CONFIG removed - framework state managed at runtime
 46 | // Use system_control MCP tool to enable/disable and switch frameworks
 47 | 
 48 | 
 49 | const DEFAULT_CONFIG: Config = {
 50 |   server: {
 51 |     name: "Claude Custom Prompts",
 52 |     version: "1.0.0",
 53 |     port: 3456,
 54 |   },
 55 |   prompts: {
 56 |     file: "prompts/promptsConfig.json",
 57 |   },
 58 |   analysis: DEFAULT_ANALYSIS_CONFIG,
 59 |   transports: {
 60 |     default: "stdio",
 61 |     sse: { enabled: false },
 62 |     stdio: { enabled: true },
 63 |   },
 64 | };
 65 | 
 66 | /**
 67 |  * Configuration manager class
 68 |  */
 69 | export class ConfigManager {
 70 |   private config: Config;
 71 |   private configPath: string;
 72 |   // Removed: private toolDescriptionManager - now injected via dependency injection
 73 | 
 74 |   constructor(configPath: string) {
 75 |     this.configPath = configPath;
 76 |     this.config = DEFAULT_CONFIG;
 77 |   }
 78 | 
 79 |   /**
 80 |    * Load configuration from file
 81 |    */
 82 |   async loadConfig(): Promise<Config> {
 83 |     try {
 84 |       const configContent = await readFile(this.configPath, "utf8");
 85 |       this.config = JSON.parse(configContent) as Config;
 86 | 
 87 |       // Validate and set defaults for any missing properties
 88 |       this.validateAndSetDefaults();
 89 | 
 90 |       return this.config;
 91 |     } catch (error) {
 92 |       console.error(
 93 |         `Error loading configuration from ${this.configPath}:`,
 94 |         error
 95 |       );
 96 |       console.info("Using default configuration");
 97 |       this.config = DEFAULT_CONFIG;
 98 |       return this.config;
 99 |     }
100 |   }
101 | 
102 |   /**
103 |    * Get current configuration
104 |    */
105 |   getConfig(): Config {
106 |     return this.config;
107 |   }
108 | 
109 |   /**
110 |    * Get server configuration
111 |    */
112 |   getServerConfig() {
113 |     return this.config.server;
114 |   }
115 | 
116 |   /**
117 |    * Get prompts configuration
118 |    */
119 |   getPromptsConfig() {
120 |     return this.config.prompts;
121 |   }
122 | 
123 |   /**
124 |    * Get transports configuration
125 |    */
126 |   getTransportsConfig() {
127 |     return this.config.transports;
128 |   }
129 | 
130 |   /**
131 |    * Get analysis configuration
132 |    */
133 |   getAnalysisConfig(): AnalysisConfig {
134 |     return this.config.analysis || DEFAULT_ANALYSIS_CONFIG;
135 |   }
136 | 
137 |   /**
138 |    * Get semantic analysis configuration
139 |    */
140 |   getSemanticAnalysisConfig(): SemanticAnalysisConfig {
141 |     return this.getAnalysisConfig().semanticAnalysis;
142 |   }
143 | 
144 |   /**
145 |    * Get logging configuration
146 |    */
147 |   getLoggingConfig(): LoggingConfig {
148 |     return this.config.logging || {
149 |       directory: "./logs",
150 |       level: "info"
151 |     };
152 |   }
153 | 
154 |   /**
155 |    * Get the port number, with environment variable override
156 |    */
157 |   getPort(): number {
158 |     return process.env.PORT
159 |       ? parseInt(process.env.PORT, 10)
160 |       : this.config.server.port;
161 |   }
162 | 
163 |   /**
164 |    * Determine transport from command line arguments or configuration
165 |    */
166 |   getTransport(args: string[]): string {
167 |     const transportArg = args.find((arg: string) =>
168 |       arg.startsWith("--transport=")
169 |     );
170 |     return transportArg
171 |       ? transportArg.split("=")[1]
172 |       : this.config.transports.default;
173 |   }
174 | 
175 |   /**
176 |    * Get config file path
177 |    */
178 |   getConfigPath(): string {
179 |     return this.configPath;
180 |   }
181 | 
182 |   /**
183 |    * Get prompts file path relative to config directory
184 |    */
185 |   getPromptsFilePath(): string {
186 |     const configDir = path.dirname(this.configPath);
187 |     return path.join(configDir, this.config.prompts.file);
188 |   }
189 | 
190 |   /**
191 |    * Get server root directory path
192 |    */
193 |   getServerRoot(): string {
194 |     return path.dirname(this.configPath);
195 |   }
196 | 
197 |   // Removed: ToolDescriptionManager methods - now handled via dependency injection in runtime/application.ts
198 | 
199 |   /**
200 |    * Validate configuration and set defaults for missing properties
201 |    */
202 |   private validateAndSetDefaults(): void {
203 |     // Ensure server config exists
204 |     if (!this.config.server) {
205 |       this.config.server = DEFAULT_CONFIG.server;
206 |     } else {
207 |       this.config.server = {
208 |         ...DEFAULT_CONFIG.server,
209 |         ...this.config.server,
210 |       };
211 |     }
212 | 
213 |     // Ensure prompts config exists
214 |     if (!this.config.prompts) {
215 |       this.config.prompts = DEFAULT_CONFIG.prompts;
216 |     } else {
217 |       this.config.prompts = {
218 |         ...DEFAULT_CONFIG.prompts,
219 |         ...this.config.prompts,
220 |       };
221 |     }
222 | 
223 |     // Ensure analysis config exists
224 |     if (!this.config.analysis) {
225 |       this.config.analysis = DEFAULT_ANALYSIS_CONFIG;
226 |     } else {
227 |       this.config.analysis = this.validateAnalysisConfig(this.config.analysis);
228 |     }
229 | 
230 |     // Ensure transports config exists
231 |     if (!this.config.transports) {
232 |       this.config.transports = DEFAULT_CONFIG.transports;
233 |     } else {
234 |       this.config.transports = {
235 |         ...DEFAULT_CONFIG.transports,
236 |         ...this.config.transports,
237 |       };
238 |     }
239 |   }
240 | 
241 |   /**
242 |    * Validate and merge analysis configuration with defaults
243 |    */
244 |   private validateAnalysisConfig(analysisConfig: Partial<AnalysisConfig>): AnalysisConfig {
245 |     const semanticAnalysis = analysisConfig.semanticAnalysis || {} as any;
246 |     
247 |     // Build LLM integration config first
248 |     const llmIntegration = {
249 |       enabled: semanticAnalysis.llmIntegration?.enabled ?? DEFAULT_ANALYSIS_CONFIG.semanticAnalysis.llmIntegration.enabled,
250 |       apiKey: semanticAnalysis.llmIntegration?.apiKey ?? DEFAULT_ANALYSIS_CONFIG.semanticAnalysis.llmIntegration.apiKey,
251 |       endpoint: semanticAnalysis.llmIntegration?.endpoint ?? DEFAULT_ANALYSIS_CONFIG.semanticAnalysis.llmIntegration.endpoint,
252 |       model: semanticAnalysis.llmIntegration?.model ?? DEFAULT_ANALYSIS_CONFIG.semanticAnalysis.llmIntegration.model,
253 |       maxTokens: semanticAnalysis.llmIntegration?.maxTokens ?? DEFAULT_ANALYSIS_CONFIG.semanticAnalysis.llmIntegration.maxTokens,
254 |       temperature: semanticAnalysis.llmIntegration?.temperature ?? DEFAULT_ANALYSIS_CONFIG.semanticAnalysis.llmIntegration.temperature,
255 |     };
256 | 
257 |     // Infer analysis mode based on LLM configuration if not explicitly set
258 |     const validModes: AnalysisMode[] = ["structural", "semantic"];
259 |     const mode = semanticAnalysis.mode && validModes.includes(semanticAnalysis.mode as AnalysisMode)
260 |       ? semanticAnalysis.mode as AnalysisMode
261 |       : inferAnalysisMode(llmIntegration);
262 | 
263 |     return {
264 |       semanticAnalysis: {
265 |         mode,
266 |         llmIntegration,
267 |       },
268 |     };
269 |   }
270 | }
271 | 
272 | /**
273 |  * Create and initialize a configuration manager
274 |  */
275 | export async function createConfigManager(
276 |   configPath: string
277 | ): Promise<ConfigManager> {
278 |   const configManager = new ConfigManager(configPath);
279 |   await configManager.loadConfig();
280 |   return configManager;
281 | }
282 | 
283 | 
```

--------------------------------------------------------------------------------
/server/src/mcp-tools/prompt-manager/utils/category-manager.ts:
--------------------------------------------------------------------------------

```typescript
  1 | /**
  2 |  * Category operations and cleanup utilities
  3 |  */
  4 | 
  5 | import * as fs from "fs/promises";
  6 | import path from "path";
  7 | import { readFile } from "fs/promises";
  8 | import { Logger } from "../../../logging/index.js";
  9 | import { PromptsConfigFile } from "../../../types/index.js";
 10 | import { safeWriteFile } from "../../../prompts/promptUtils.js";
 11 | import { CategoryResult, OperationResult } from "../core/types.js";
 12 | 
 13 | /**
 14 |  * Category management operations
 15 |  */
 16 | export class CategoryManager {
 17 |   private logger: Logger;
 18 | 
 19 |   constructor(logger: Logger) {
 20 |     this.logger = logger;
 21 |   }
 22 | 
 23 |   /**
 24 |    * Ensure category exists in the configuration
 25 |    */
 26 |   async ensureCategoryExists(
 27 |     category: string,
 28 |     promptsConfig: PromptsConfigFile,
 29 |     promptsFile: string
 30 |   ): Promise<CategoryResult> {
 31 |     const effectiveCategory = category.toLowerCase().replace(/\s+/g, "-");
 32 | 
 33 |     const exists = promptsConfig.categories.some(cat => cat.id === effectiveCategory);
 34 | 
 35 |     if (!exists) {
 36 |       // Create new category
 37 |       promptsConfig.categories.push({
 38 |         id: effectiveCategory,
 39 |         name: category,
 40 |         description: `Prompts related to ${category}`
 41 |       });
 42 | 
 43 |       // Create directory and files
 44 |       const categoryDir = path.join(path.dirname(promptsFile), effectiveCategory);
 45 |       await fs.mkdir(categoryDir, { recursive: true });
 46 | 
 47 |       const categoryPromptsPath = path.join(categoryDir, "prompts.json");
 48 |       await safeWriteFile(categoryPromptsPath, JSON.stringify({ prompts: [] }, null, 2), "utf8");
 49 | 
 50 |       // Add to imports
 51 |       const relativePath = path.join(effectiveCategory, "prompts.json").replace(/\\/g, "/");
 52 |       if (!promptsConfig.imports.includes(relativePath)) {
 53 |         promptsConfig.imports.push(relativePath);
 54 |       }
 55 | 
 56 |       // Save config
 57 |       await safeWriteFile(promptsFile, JSON.stringify(promptsConfig, null, 2), "utf8");
 58 | 
 59 |       this.logger.info(`Created new category: ${effectiveCategory}`);
 60 |       return { effectiveCategory, created: true };
 61 |     }
 62 | 
 63 |     return { effectiveCategory, created: false };
 64 |   }
 65 | 
 66 |   /**
 67 |    * Clean up empty category (remove from config and delete folder)
 68 |    */
 69 |   async cleanupEmptyCategory(
 70 |     categoryImport: string,
 71 |     promptsConfig: PromptsConfigFile,
 72 |     promptsFile: string
 73 |   ): Promise<OperationResult> {
 74 |     const promptsConfigDir = path.dirname(promptsFile);
 75 |     const categoryPath = path.join(promptsConfigDir, categoryImport);
 76 |     const categoryDir = path.dirname(categoryPath);
 77 |     const messages: string[] = [];
 78 | 
 79 |     try {
 80 |       // Extract category ID from import path (e.g., "examples/prompts.json" -> "examples")
 81 |       const categoryId = categoryImport.split('/')[0];
 82 | 
 83 |       // Remove from categories array
 84 |       const categoryIndex = promptsConfig.categories.findIndex(cat => cat.id === categoryId);
 85 |       if (categoryIndex > -1) {
 86 |         const removedCategory = promptsConfig.categories.splice(categoryIndex, 1)[0];
 87 |         messages.push(`✅ Removed category definition: ${removedCategory.name}`);
 88 |       }
 89 | 
 90 |       // Remove from imports array
 91 |       const importIndex = promptsConfig.imports.findIndex(imp => imp === categoryImport);
 92 |       if (importIndex > -1) {
 93 |         promptsConfig.imports.splice(importIndex, 1);
 94 |         messages.push(`✅ Removed import path: ${categoryImport}`);
 95 |       }
 96 | 
 97 |       // Save updated config
 98 |       await safeWriteFile(promptsFile, JSON.stringify(promptsConfig, null, 2), "utf8");
 99 |       messages.push(`✅ Updated promptsConfig.json`);
100 | 
101 |       // Delete empty category folder and its contents
102 |       try {
103 |         // Delete prompts.json file
104 |         await fs.unlink(categoryPath);
105 |         messages.push(`✅ Deleted category file: ${categoryImport}`);
106 | 
107 |         // Delete category directory if empty
108 |         await fs.rmdir(categoryDir);
109 |         messages.push(`✅ Deleted empty category folder: ${path.basename(categoryDir)}`);
110 |       } catch (folderError: any) {
111 |         if (folderError.code !== "ENOENT") {
112 |           messages.push(`⚠️ Could not delete category folder: ${folderError.message}`);
113 |         }
114 |       }
115 | 
116 |       this.logger.info(`Cleaned up empty category: ${categoryId}`);
117 | 
118 |     } catch (error: any) {
119 |       this.logger.error(`Failed to cleanup category ${categoryImport}:`, error);
120 |       messages.push(`❌ Category cleanup failed: ${error.message}`);
121 |     }
122 | 
123 |     return { message: messages.join('\n') };
124 |   }
125 | 
126 |   /**
127 |    * Check if category is empty
128 |    */
129 |   async isCategoryEmpty(categoryImport: string, promptsFile: string): Promise<boolean> {
130 |     try {
131 |       const promptsConfigDir = path.dirname(promptsFile);
132 |       const categoryPath = path.join(promptsConfigDir, categoryImport);
133 | 
134 |       const categoryContent = await readFile(categoryPath, "utf8");
135 |       const categoryData = JSON.parse(categoryContent);
136 | 
137 |       return !categoryData.prompts || categoryData.prompts.length === 0;
138 |     } catch (error) {
139 |       this.logger.warn(`Could not check category emptiness: ${categoryImport}`, error);
140 |       return false;
141 |     }
142 |   }
143 | 
144 |   /**
145 |    * Get category statistics
146 |    */
147 |   async getCategoryStats(categories: string[], promptsFile: string): Promise<Record<string, number>> {
148 |     const stats: Record<string, number> = {};
149 |     const promptsConfigDir = path.dirname(promptsFile);
150 | 
151 |     for (const categoryImport of categories) {
152 |       try {
153 |         const categoryPath = path.join(promptsConfigDir, categoryImport);
154 |         const categoryContent = await readFile(categoryPath, "utf8");
155 |         const categoryData = JSON.parse(categoryContent);
156 | 
157 |         const categoryId = categoryImport.split('/')[0];
158 |         stats[categoryId] = categoryData.prompts ? categoryData.prompts.length : 0;
159 |       } catch (error) {
160 |         const categoryId = categoryImport.split('/')[0];
161 |         stats[categoryId] = 0;
162 |       }
163 |     }
164 | 
165 |     return stats;
166 |   }
167 | 
168 |   /**
169 |    * Validate category structure
170 |    */
171 |   async validateCategoryStructure(categoryImport: string, promptsFile: string): Promise<{
172 |     valid: boolean;
173 |     issues: string[];
174 |   }> {
175 |     const issues: string[] = [];
176 |     const promptsConfigDir = path.dirname(promptsFile);
177 |     const categoryPath = path.join(promptsConfigDir, categoryImport);
178 | 
179 |     try {
180 |       // Check if category file exists
181 |       const categoryContent = await readFile(categoryPath, "utf8");
182 | 
183 |       try {
184 |         const categoryData = JSON.parse(categoryContent);
185 | 
186 |         // Validate structure
187 |         if (!categoryData.prompts) {
188 |           issues.push("Missing 'prompts' array");
189 |         } else if (!Array.isArray(categoryData.prompts)) {
190 |           issues.push("'prompts' must be an array");
191 |         }
192 | 
193 |         // Validate each prompt entry
194 |         if (categoryData.prompts) {
195 |           for (const [index, prompt] of categoryData.prompts.entries()) {
196 |             if (!prompt.id) {
197 |               issues.push(`Prompt at index ${index} missing 'id'`);
198 |             }
199 |             if (!prompt.name) {
200 |               issues.push(`Prompt at index ${index} missing 'name'`);
201 |             }
202 |             if (!prompt.file) {
203 |               issues.push(`Prompt at index ${index} missing 'file'`);
204 |             }
205 |           }
206 |         }
207 | 
208 |       } catch (parseError) {
209 |         issues.push("Invalid JSON format");
210 |       }
211 | 
212 |     } catch (error) {
213 |       issues.push("Category file not accessible");
214 |     }
215 | 
216 |     return {
217 |       valid: issues.length === 0,
218 |       issues
219 |     };
220 |   }
221 | 
222 |   /**
223 |    * Normalize category name for consistency
224 |    */
225 |   normalizeCategoryName(category: string): string {
226 |     return category.toLowerCase().replace(/\s+/g, "-").replace(/[^a-z0-9-_]/g, "");
227 |   }
228 | 
229 |   /**
230 |    * Get category display name
231 |    */
232 |   getCategoryDisplayName(categoryId: string, categories: any[]): string {
233 |     const category = categories.find(cat => cat.id === categoryId);
234 |     return category ? category.name : categoryId;
235 |   }
236 | }
```
Page 3/18FirstPrevNextLast