This is page 4 of 12. Use http://codebase.md/minipuft/claude-prompts-mcp?page={x} to view the full context.
# Directory Structure
```
├── .actrc
├── .gitattributes
├── .github
│ └── workflows
│ ├── ci.yml
│ ├── mcp-compliance.yml
│ └── pr-validation.yml
├── .gitignore
├── agent.md
├── assets
│ └── logo.png
├── CLAUDE.md
├── config
│ └── framework-state.json
├── docs
│ ├── architecture.md
│ ├── chain-modification-examples.md
│ ├── contributing.md
│ ├── enhanced-gate-system.md
│ ├── execution-architecture-guide.md
│ ├── installation-guide.md
│ ├── mcp-tool-usage-guide.md
│ ├── mcp-tools-reference.md
│ ├── prompt-format-guide.md
│ ├── prompt-management.md
│ ├── prompt-vs-template-guide.md
│ ├── README.md
│ ├── template-development-guide.md
│ ├── TODO.md
│ ├── troubleshooting.md
│ └── version-history.md
├── LICENSE
├── local-test.sh
├── plans
│ ├── nunjucks-dynamic-chain-orchestration.md
│ ├── outputschema-realtime-progress-and-validation.md
│ ├── parallel-conditional-execution-analysis.md
│ ├── sqlite-storage-migration.md
│ └── symbolic-command-language-implementation.md
├── README.md
├── scripts
│ ├── setup-windows-testing.sh
│ ├── test_server.js
│ ├── test-all-platforms.sh
│ └── windows-tests
│ ├── test-windows-paths.js
│ ├── test-windows-startup.sh
│ └── windows-env.sh
└── server
├── config
│ ├── framework-state.json
│ └── tool-descriptions.json
├── config.json
├── jest.config.cjs
├── LICENSE
├── package-lock.json
├── package.json
├── prompts
│ ├── analysis
│ │ ├── advanced_analysis_engine.md
│ │ ├── content_analysis.md
│ │ ├── deep_analysis.md
│ │ ├── deep_research.md
│ │ ├── markdown_notebook.md
│ │ ├── note_integration.md
│ │ ├── note_refinement.md
│ │ ├── notes.md
│ │ ├── progressive_research.md
│ │ ├── prompts.json
│ │ ├── query_refinement.md
│ │ └── review.md
│ ├── architecture
│ │ ├── prompts.json
│ │ └── strategic-system-alignment.md
│ ├── content_processing
│ │ ├── format_enhancement.md
│ │ ├── noteIntegration.md
│ │ ├── obsidian_metadata_optimizer.md
│ │ ├── prompts.json
│ │ ├── vault_related_notes_finder.md
│ │ └── video_notes_enhanced.md
│ ├── debugging
│ │ ├── analyze_logs.md
│ │ └── prompts.json
│ ├── development
│ │ ├── analyze_code_structure.md
│ │ ├── analyze_file_structure.md
│ │ ├── code_review_optimization_chain.md
│ │ ├── component_flow_analysis.md
│ │ ├── create_modularization_plan.md
│ │ ├── detect_code_issues.md
│ │ ├── detect_project_commands.md
│ │ ├── expert_code_implementation.md
│ │ ├── generate_comprehensive_claude_md.md
│ │ ├── prompts.json
│ │ ├── strategicImplement.md
│ │ ├── suggest_code_improvements.md
│ │ └── transform_code_to_modules.md
│ ├── documentation
│ │ ├── create_docs_chain.md
│ │ ├── docs-content-creation.md
│ │ ├── docs-content-planning.md
│ │ ├── docs-final-assembly.md
│ │ ├── docs-project-analysis.md
│ │ ├── docs-review-refinement.md
│ │ └── prompts.json
│ ├── education
│ │ ├── prompts.json
│ │ └── vault_integrated_notes.md
│ ├── general
│ │ ├── diagnose.md
│ │ └── prompts.json
│ ├── promptsConfig.json
│ └── testing
│ ├── final_verification_test.md
│ └── prompts.json
├── README.md
├── scripts
│ └── validate-dependencies.js
├── src
│ ├── api
│ │ └── index.ts
│ ├── chain-session
│ │ └── manager.ts
│ ├── config
│ │ └── index.ts
│ ├── Dockerfile
│ ├── execution
│ │ ├── context
│ │ │ ├── context-resolver.ts
│ │ │ ├── framework-injector.ts
│ │ │ └── index.ts
│ │ ├── index.ts
│ │ ├── parsers
│ │ │ ├── argument-parser.ts
│ │ │ ├── index.ts
│ │ │ └── unified-command-parser.ts
│ │ └── types.ts
│ ├── frameworks
│ │ ├── framework-manager.ts
│ │ ├── framework-state-manager.ts
│ │ ├── index.ts
│ │ ├── integration
│ │ │ ├── framework-semantic-integration.ts
│ │ │ └── index.ts
│ │ ├── methodology
│ │ │ ├── guides
│ │ │ │ ├── 5w1h-guide.ts
│ │ │ │ ├── cageerf-guide.ts
│ │ │ │ ├── react-guide.ts
│ │ │ │ └── scamper-guide.ts
│ │ │ ├── index.ts
│ │ │ ├── interfaces.ts
│ │ │ └── registry.ts
│ │ ├── prompt-guidance
│ │ │ ├── index.ts
│ │ │ ├── methodology-tracker.ts
│ │ │ ├── service.ts
│ │ │ ├── system-prompt-injector.ts
│ │ │ └── template-enhancer.ts
│ │ └── types
│ │ ├── index.ts
│ │ ├── integration-types.ts
│ │ ├── methodology-types.ts
│ │ └── prompt-guidance-types.ts
│ ├── gates
│ │ ├── constants.ts
│ │ ├── core
│ │ │ ├── gate-definitions.ts
│ │ │ ├── gate-loader.ts
│ │ │ ├── gate-validator.ts
│ │ │ ├── index.ts
│ │ │ └── temporary-gate-registry.ts
│ │ ├── definitions
│ │ │ ├── code-quality.json
│ │ │ ├── content-structure.json
│ │ │ ├── educational-clarity.json
│ │ │ ├── framework-compliance.json
│ │ │ ├── research-quality.json
│ │ │ ├── security-awareness.json
│ │ │ └── technical-accuracy.json
│ │ ├── gate-state-manager.ts
│ │ ├── guidance
│ │ │ ├── FrameworkGuidanceFilter.ts
│ │ │ └── GateGuidanceRenderer.ts
│ │ ├── index.ts
│ │ ├── intelligence
│ │ │ ├── GatePerformanceAnalyzer.ts
│ │ │ └── GateSelectionEngine.ts
│ │ ├── templates
│ │ │ ├── code_quality_validation.md
│ │ │ ├── educational_clarity_validation.md
│ │ │ ├── framework_compliance_validation.md
│ │ │ ├── research_self_validation.md
│ │ │ ├── security_validation.md
│ │ │ ├── structure_validation.md
│ │ │ └── technical_accuracy_validation.md
│ │ └── types.ts
│ ├── index.ts
│ ├── logging
│ │ └── index.ts
│ ├── mcp-tools
│ │ ├── config-utils.ts
│ │ ├── constants.ts
│ │ ├── index.ts
│ │ ├── prompt-engine
│ │ │ ├── core
│ │ │ │ ├── engine.ts
│ │ │ │ ├── executor.ts
│ │ │ │ ├── index.ts
│ │ │ │ └── types.ts
│ │ │ ├── index.ts
│ │ │ ├── processors
│ │ │ │ ├── response-formatter.ts
│ │ │ │ └── template-processor.ts
│ │ │ └── utils
│ │ │ ├── category-extractor.ts
│ │ │ ├── classification.ts
│ │ │ ├── context-builder.ts
│ │ │ └── validation.ts
│ │ ├── prompt-manager
│ │ │ ├── analysis
│ │ │ │ ├── comparison-engine.ts
│ │ │ │ ├── gate-analyzer.ts
│ │ │ │ └── prompt-analyzer.ts
│ │ │ ├── core
│ │ │ │ ├── index.ts
│ │ │ │ ├── manager.ts
│ │ │ │ └── types.ts
│ │ │ ├── index.ts
│ │ │ ├── operations
│ │ │ │ └── file-operations.ts
│ │ │ ├── search
│ │ │ │ ├── filter-parser.ts
│ │ │ │ └── prompt-matcher.ts
│ │ │ └── utils
│ │ │ ├── category-manager.ts
│ │ │ └── validation.ts
│ │ ├── shared
│ │ │ └── structured-response-builder.ts
│ │ ├── system-control.ts
│ │ ├── tool-description-manager.ts
│ │ └── types
│ │ └── shared-types.ts
│ ├── metrics
│ │ ├── analytics-service.ts
│ │ ├── index.ts
│ │ └── types.ts
│ ├── performance
│ │ ├── index.ts
│ │ └── monitor.ts
│ ├── prompts
│ │ ├── category-manager.ts
│ │ ├── converter.ts
│ │ ├── file-observer.ts
│ │ ├── hot-reload-manager.ts
│ │ ├── index.ts
│ │ ├── loader.ts
│ │ ├── promptUtils.ts
│ │ ├── registry.ts
│ │ └── types.ts
│ ├── runtime
│ │ ├── application.ts
│ │ └── startup.ts
│ ├── semantic
│ │ ├── configurable-semantic-analyzer.ts
│ │ └── integrations
│ │ ├── index.ts
│ │ └── llm-clients.ts
│ ├── server
│ │ ├── index.ts
│ │ └── transport
│ │ └── index.ts
│ ├── smithery.yaml
│ ├── text-references
│ │ ├── conversation.ts
│ │ └── index.ts
│ ├── types
│ │ └── index.ts
│ ├── types.ts
│ └── utils
│ ├── chainUtils.ts
│ ├── errorHandling.ts
│ ├── global-resource-tracker.ts
│ ├── index.ts
│ └── jsonUtils.ts
├── tests
│ ├── ci-startup-validation.js
│ ├── enhanced-validation
│ │ ├── contract-validation
│ │ │ ├── contract-test-suite.js
│ │ │ ├── interface-contracts.js
│ │ │ └── interface-contracts.ts
│ │ ├── environment-validation
│ │ │ ├── environment-parity-checker.js
│ │ │ └── environment-test-suite.js
│ │ ├── lifecycle-validation
│ │ │ ├── lifecycle-test-suite.js
│ │ │ └── process-lifecycle-validator.js
│ │ └── validation-orchestrator.js
│ ├── helpers
│ │ └── test-helpers.js
│ ├── integration
│ │ ├── mcp-tools.test.ts
│ │ ├── server-startup.test.ts
│ │ └── unified-parsing-integration.test.ts
│ ├── performance
│ │ ├── parsing-system-benchmark.test.ts
│ │ └── server-performance.test.ts
│ ├── scripts
│ │ ├── consolidated-tools.js
│ │ ├── establish-performance-baselines.js
│ │ ├── functional-mcp-validation.js
│ │ ├── integration-mcp-tools.js
│ │ ├── integration-routing-system.js
│ │ ├── integration-server-startup.js
│ │ ├── integration-unified-parsing.js
│ │ ├── methodology-guides.js
│ │ ├── performance-memory.js
│ │ ├── runtime-integration.js
│ │ ├── unit-conversation-manager.js
│ │ ├── unit-semantic-analyzer.js
│ │ └── unit-unified-parsing.js
│ ├── setup.ts
│ ├── test-enhanced-parsing.js
│ └── unit
│ ├── conversation-manager.test.ts
│ ├── semantic-analyzer-three-tier.test.ts
│ └── unified-parsing-system.test.ts
├── tsconfig.json
└── tsconfig.test.json
```
# Files
--------------------------------------------------------------------------------
/server/src/prompts/category-manager.ts:
--------------------------------------------------------------------------------
```typescript
/**
* Category Manager Module
* Handles category management logic with validation, organization, and relationship tracking
*/
import { Logger } from "../logging/index.js";
import { Category, PromptData } from "../types/index.js";
// Import category interfaces from prompts/types.ts instead of redefining
import type {
CategoryValidationResult,
CategoryStatistics,
CategoryPromptRelationship
} from './types.js';
/**
* CategoryManager class
* Centralizes all category-related operations with validation and consistency checking
*/
export class CategoryManager {
private logger: Logger;
private categories: Category[] = [];
constructor(logger: Logger) {
this.logger = logger;
}
/**
* Load and validate categories from configuration
*/
async loadCategories(categories: Category[]): Promise<CategoryValidationResult> {
this.logger.debug(`CategoryManager: Loading ${categories.length} categories`);
const result: CategoryValidationResult = {
isValid: true,
issues: [],
warnings: []
};
// Validate categories
const validatedCategories: Category[] = [];
const seenIds = new Set<string>();
const seenNames = new Set<string>();
for (let i = 0; i < categories.length; i++) {
const category = categories[i];
// Validate required fields
if (!category.id || typeof category.id !== 'string') {
result.issues.push(`Category ${i + 1}: Missing or invalid 'id' field`);
result.isValid = false;
continue;
}
if (!category.name || typeof category.name !== 'string') {
result.issues.push(`Category ${i + 1} (${category.id}): Missing or invalid 'name' field`);
result.isValid = false;
continue;
}
if (!category.description || typeof category.description !== 'string') {
result.warnings.push(`Category ${category.id}: Missing or empty description`);
}
// Check for duplicates
if (seenIds.has(category.id)) {
result.issues.push(`Duplicate category ID found: ${category.id}`);
result.isValid = false;
continue;
}
if (seenNames.has(category.name)) {
result.warnings.push(`Duplicate category name found: ${category.name}`);
}
seenIds.add(category.id);
seenNames.add(category.name);
// Clean and normalize category
const normalizedCategory: Category = {
id: category.id.trim(),
name: category.name.trim(),
description: (category.description || '').trim()
};
validatedCategories.push(normalizedCategory);
}
this.categories = validatedCategories;
this.logger.info(`CategoryManager: Loaded ${this.categories.length} valid categories`);
if (result.issues.length > 0) {
this.logger.error(`CategoryManager: ${result.issues.length} validation issues found`);
result.issues.forEach(issue => this.logger.error(` - ${issue}`));
}
if (result.warnings.length > 0) {
this.logger.warn(`CategoryManager: ${result.warnings.length} warnings found`);
result.warnings.forEach(warning => this.logger.warn(` - ${warning}`));
}
return result;
}
/**
* Get all categories
*/
getCategories(): Category[] {
return [...this.categories];
}
/**
* Get category by ID
*/
getCategoryById(id: string): Category | undefined {
return this.categories.find(cat => cat.id === id);
}
/**
* Get category by name
*/
getCategoryByName(name: string): Category | undefined {
return this.categories.find(cat => cat.name === name);
}
/**
* Validate that all prompt categories exist
*/
validatePromptCategories(prompts: PromptData[]): CategoryValidationResult {
const result: CategoryValidationResult = {
isValid: true,
issues: [],
warnings: []
};
const categoryIds = new Set(this.categories.map(cat => cat.id));
const usedCategories = new Set<string>();
for (const prompt of prompts) {
if (!prompt.category) {
result.issues.push(`Prompt '${prompt.id}' has no category assigned`);
result.isValid = false;
continue;
}
if (!categoryIds.has(prompt.category)) {
result.issues.push(`Prompt '${prompt.id}' references non-existent category: ${prompt.category}`);
result.isValid = false;
continue;
}
usedCategories.add(prompt.category);
}
// Check for unused categories
for (const category of this.categories) {
if (!usedCategories.has(category.id)) {
result.warnings.push(`Category '${category.id}' (${category.name}) has no prompts assigned`);
}
}
return result;
}
/**
* Get prompts by category
*/
getPromptsByCategory(prompts: PromptData[], categoryId: string): PromptData[] {
return prompts.filter(prompt => prompt.category === categoryId);
}
/**
* Get category statistics
*/
getCategoryStatistics(prompts: PromptData[]): CategoryStatistics {
const categoryBreakdown: Array<{ category: Category; promptCount: number }> = [];
let totalPrompts = 0;
for (const category of this.categories) {
const categoryPrompts = this.getPromptsByCategory(prompts, category.id);
const promptCount = categoryPrompts.length;
categoryBreakdown.push({
category,
promptCount
});
totalPrompts += promptCount;
}
const categoriesWithPrompts = categoryBreakdown.filter(item => item.promptCount > 0).length;
const emptyCategoriesCount = this.categories.length - categoriesWithPrompts;
const averagePromptsPerCategory = this.categories.length > 0
? totalPrompts / this.categories.length
: 0;
return {
totalCategories: this.categories.length,
categoriesWithPrompts,
emptyCategoriesCount,
averagePromptsPerCategory,
categoryBreakdown
};
}
/**
* Get category-prompt relationships
*/
getCategoryPromptRelationships(prompts: PromptData[]): CategoryPromptRelationship[] {
return this.categories.map(category => {
const categoryPrompts = this.getPromptsByCategory(prompts, category.id);
return {
categoryId: category.id,
categoryName: category.name,
promptIds: categoryPrompts.map(p => p.id),
promptCount: categoryPrompts.length,
hasChains: categoryPrompts.some(p => p.file && p.file.includes('chain')),
hasTemplates: categoryPrompts.some(p => p.file && p.file.includes('template'))
};
});
}
/**
* Organize prompts by category for display
*/
organizePromptsByCategory(prompts: PromptData[]): Map<Category, PromptData[]> {
const organized = new Map<Category, PromptData[]>();
for (const category of this.categories) {
const categoryPrompts = this.getPromptsByCategory(prompts, category.id);
organized.set(category, categoryPrompts);
}
return organized;
}
/**
* Check consistency between categories and prompts
*/
checkConsistency(prompts: PromptData[]): {
consistent: boolean;
issues: string[];
orphanedPrompts: PromptData[];
emptyCategories: Category[];
} {
const issues: string[] = [];
const orphanedPrompts: PromptData[] = [];
const emptyCategories: Category[] = [];
const categoryIds = new Set(this.categories.map(cat => cat.id));
// Find orphaned prompts (prompts with invalid category references)
for (const prompt of prompts) {
if (prompt.category && !categoryIds.has(prompt.category)) {
orphanedPrompts.push(prompt);
issues.push(`Prompt '${prompt.id}' references non-existent category: ${prompt.category}`);
}
}
// Find empty categories
for (const category of this.categories) {
const categoryPrompts = this.getPromptsByCategory(prompts, category.id);
if (categoryPrompts.length === 0) {
emptyCategories.push(category);
}
}
const consistent = issues.length === 0 && orphanedPrompts.length === 0;
return {
consistent,
issues,
orphanedPrompts,
emptyCategories
};
}
/**
* Get debug information for troubleshooting
*/
getDebugInfo(prompts?: PromptData[]): {
categoriesLoaded: number;
categoryIds: string[];
categoryNames: string[];
statistics?: CategoryStatistics;
consistency?: ReturnType<CategoryManager["checkConsistency"]>;
} {
const debugInfo = {
categoriesLoaded: this.categories.length,
categoryIds: this.categories.map(cat => cat.id),
categoryNames: this.categories.map(cat => cat.name),
statistics: prompts ? this.getCategoryStatistics(prompts) : undefined,
consistency: prompts ? this.checkConsistency(prompts) : undefined
};
return debugInfo;
}
}
/**
* Factory function to create a CategoryManager instance
*/
export function createCategoryManager(logger: Logger): CategoryManager {
return new CategoryManager(logger);
}
```
--------------------------------------------------------------------------------
/server/src/frameworks/types/methodology-types.ts:
--------------------------------------------------------------------------------
```typescript
/**
* Methodology Guide Type Definitions
*
* Contains all types related to methodology guides, framework definitions,
* and methodology-specific interfaces. This consolidates types from multiple
* sources to eliminate duplication.
*/
import type { ConvertedPrompt } from '../../execution/types.js';
import type { ContentAnalysisResult } from '../../semantic/configurable-semantic-analyzer.js';
/**
* Framework methodology definitions
* Each framework provides system prompt templates and execution guidelines
*/
export type FrameworkMethodology = "CAGEERF" | "ReACT" | "5W1H" | "SCAMPER" | "AUTO";
/**
* Framework definition structure
*/
export interface FrameworkDefinition {
id: string;
name: string;
description: string;
methodology: FrameworkMethodology;
systemPromptTemplate: string;
executionGuidelines: string[];
applicableTypes: string[];
priority: number;
enabled: boolean;
}
/**
* Framework execution context
*/
export interface FrameworkExecutionContext {
selectedFramework: FrameworkDefinition;
systemPrompt: string;
executionGuidelines: string[];
metadata: {
selectionReason: string;
confidence: number;
appliedAt: Date;
};
}
/**
* Framework selection criteria
*/
export interface FrameworkSelectionCriteria {
promptType?: string;
complexity?: 'low' | 'medium' | 'high';
domain?: string;
userPreference?: FrameworkMethodology;
executionType?: 'template' | 'chain';
}
/**
* Guidance for creating new prompts based on methodology
*/
export interface PromptCreationGuidance {
// Structure guidance for different methodology sections
structureGuidance: {
systemPromptSuggestions: string[];
userTemplateSuggestions: string[];
argumentSuggestions: ArgumentGuidance[];
};
// Methodology-specific prompt elements
methodologyElements: {
requiredSections: string[];
optionalSections: string[];
sectionDescriptions: Record<string, string>;
};
// Quality improvement suggestions
qualityGuidance: {
clarityEnhancements: string[];
completenessChecks: string[];
specificityImprovements: string[];
};
}
/**
* Guidance for processing templates during execution
*/
export interface ProcessingGuidance {
// Methodology-specific processing steps
processingSteps: ProcessingStep[];
// Template enhancement suggestions
templateEnhancements: {
systemPromptAdditions: string[];
userPromptModifications: string[];
contextualHints: string[];
};
// Execution flow guidance
executionFlow: {
preProcessingSteps: string[];
postProcessingSteps: string[];
validationSteps: string[];
};
}
/**
* Guidance for step sequencing in execution
*/
export interface StepGuidance {
// Methodology-specific step sequence
stepSequence: ExecutionStep[];
// Step-specific enhancements
stepEnhancements: Record<string, string[]>;
// Quality gates for each step
stepValidation: Record<string, string[]>;
}
/**
* Overall methodology enhancement for execution
*/
export interface MethodologyEnhancement {
// System prompt enhancements
systemPromptGuidance: string;
// Processing enhancements
processingEnhancements: ProcessingStep[];
// Quality gates specific to methodology
methodologyGates: QualityGate[];
// Template structure suggestions
templateSuggestions: TemplateEnhancement[];
// Execution metadata
enhancementMetadata: {
methodology: string;
confidence: number;
applicabilityReason: string;
appliedAt: Date;
};
}
/**
* Core interfaces for guidance components
*/
export interface ArgumentGuidance {
name: string;
type: string;
description: string;
methodologyReason: string;
examples: string[];
}
export interface ProcessingStep {
id: string;
name: string;
description: string;
methodologyBasis: string;
order: number;
required: boolean;
}
export interface ExecutionStep {
id: string;
name: string;
action: string;
methodologyPhase: string;
dependencies: string[];
expected_output: string;
}
export interface QualityGate {
id: string;
name: string;
description: string;
methodologyArea: string;
validationCriteria: string[];
priority: 'high' | 'medium' | 'low';
}
export interface TemplateEnhancement {
section: 'system' | 'user' | 'arguments' | 'metadata';
type: 'addition' | 'modification' | 'structure';
description: string;
content: string;
methodologyJustification: string;
impact: 'high' | 'medium' | 'low';
}
/**
* Tool-specific descriptions for a methodology
*/
export interface MethodologyToolDescription {
description?: string;
parameters?: Record<string, string>;
}
/**
* Complete tool descriptions provided by a methodology guide
*/
export interface MethodologyToolDescriptions {
prompt_engine?: MethodologyToolDescription;
prompt_manager?: MethodologyToolDescription;
system_control?: MethodologyToolDescription;
}
/**
* Methodology validation results
*/
export interface MethodologyValidation {
compliant: boolean;
compliance_score: number; // 0.0 to 1.0
strengths: string[];
improvement_areas: string[];
specific_suggestions: TemplateEnhancement[];
methodology_gaps: string[];
}
/**
* Main interface for methodology guides
* Framework adapters implement this to provide guidance rather than analysis
*/
export interface IMethodologyGuide {
// Framework identification
readonly frameworkId: string;
readonly frameworkName: string;
readonly methodology: string;
readonly version: string;
/**
* Guide the creation of new prompts using this methodology
* @param intent The user's intent or goal for the prompt
* @param context Additional context information
* @returns Guidance for structuring the prompt according to methodology
*/
guidePromptCreation(
intent: string,
context?: Record<string, any>
): PromptCreationGuidance;
/**
* Guide template processing during execution
* @param template The template being processed
* @param executionType The execution strategy from semantic analyzer
* @returns Processing guidance based on methodology
*/
guideTemplateProcessing(
template: string,
executionType: string
): ProcessingGuidance;
/**
* Guide execution step sequencing
* @param prompt The prompt being executed
* @param semanticAnalysis Results from unified semantic analyzer
* @returns Step-by-step guidance based on methodology
*/
guideExecutionSteps(
prompt: ConvertedPrompt,
semanticAnalysis: ContentAnalysisResult
): StepGuidance;
/**
* Enhance execution with methodology-specific improvements
* @param prompt The prompt to enhance
* @param context Current execution context
* @returns Methodology enhancements to apply
*/
enhanceWithMethodology(
prompt: ConvertedPrompt,
context: Record<string, any>
): MethodologyEnhancement;
/**
* Validate that a prompt follows methodology principles
* @param prompt The prompt to validate
* @returns Validation results and improvement suggestions
*/
validateMethodologyCompliance(
prompt: ConvertedPrompt
): MethodologyValidation;
/**
* Get methodology-specific system prompt guidance
* @param context Execution context
* @returns System prompt additions for this methodology
*/
getSystemPromptGuidance(
context: Record<string, any>
): string;
/**
* Get methodology-specific tool descriptions (optional)
* Provides custom descriptions for MCP tools when this methodology is active
* @returns Tool descriptions customized for this methodology
*/
getToolDescriptions?(): MethodologyToolDescriptions;
}
/**
* Base class for methodology guides
* Provides common functionality for all methodology implementations
*/
export abstract class BaseMethodologyGuide implements IMethodologyGuide {
abstract readonly frameworkId: string;
abstract readonly frameworkName: string;
abstract readonly methodology: string;
abstract readonly version: string;
abstract guidePromptCreation(
intent: string,
context?: Record<string, any>
): PromptCreationGuidance;
abstract guideTemplateProcessing(
template: string,
executionType: string
): ProcessingGuidance;
abstract guideExecutionSteps(
prompt: ConvertedPrompt,
semanticAnalysis: ContentAnalysisResult
): StepGuidance;
abstract enhanceWithMethodology(
prompt: ConvertedPrompt,
context: Record<string, any>
): MethodologyEnhancement;
abstract validateMethodologyCompliance(
prompt: ConvertedPrompt
): MethodologyValidation;
abstract getSystemPromptGuidance(
context: Record<string, any>
): string;
/**
* Helper method to extract combined text from prompt
*/
protected getCombinedText(prompt: ConvertedPrompt): string {
return [
prompt.systemMessage || '',
prompt.userMessageTemplate || '',
prompt.description || ''
].filter(text => text.trim()).join(' ');
}
/**
* Helper method to create enhancement metadata
*/
protected createEnhancementMetadata(confidence: number, reason: string) {
return {
methodology: this.methodology,
confidence,
applicabilityReason: reason,
appliedAt: new Date()
};
}
}
```
--------------------------------------------------------------------------------
/server/src/logging/index.ts:
--------------------------------------------------------------------------------
```typescript
/**
* Logging Module
* Handles file logging and transport-aware console logging
*/
import { appendFile, writeFile } from "fs/promises";
import { LogLevel, TransportType } from "../types/index.js";
/**
* Logger interface compatible with existing code
*/
export interface Logger {
info: (message: string, ...args: any[]) => void;
error: (message: string, ...args: any[]) => void;
warn: (message: string, ...args: any[]) => void;
debug: (message: string, ...args: any[]) => void;
}
/**
* Logging configuration options for EnhancedLogger
*/
export interface EnhancedLoggingConfig {
logFile: string;
transport: string;
enableDebug?: boolean;
configuredLevel?: string; // NEW: Support config-based log level
}
/**
* Enhanced logger implementation with file and console logging
*/
export class EnhancedLogger implements Logger {
private logFile: string;
private transport: string;
private enableDebug: boolean;
private isCI: boolean;
private configuredLevel: LogLevel;
private static readonly LOG_LEVEL_PRIORITY = {
[LogLevel.ERROR]: 0,
[LogLevel.WARN]: 1,
[LogLevel.INFO]: 2,
[LogLevel.DEBUG]: 3,
};
constructor(config: EnhancedLoggingConfig) {
this.logFile = config.logFile;
this.transport = config.transport;
this.enableDebug = config.enableDebug || false;
this.isCI = process.env.CI === 'true' || process.env.NODE_ENV === 'test';
// Map config level to LogLevel enum with fallback to INFO
this.configuredLevel = this.parseLogLevel(config.configuredLevel || 'info');
}
/**
* Parse string log level to LogLevel enum
*/
private parseLogLevel(level: string): LogLevel {
const normalizedLevel = level.toUpperCase();
switch (normalizedLevel) {
case 'DEBUG': return LogLevel.DEBUG;
case 'INFO': return LogLevel.INFO;
case 'WARN': return LogLevel.WARN;
case 'ERROR': return LogLevel.ERROR;
default:
console.warn(`Unknown log level "${level}", defaulting to INFO`);
return LogLevel.INFO;
}
}
/**
* Check if a log level should be output based on configuration
*/
private shouldLog(level: LogLevel): boolean {
// Command-line flags override config
if (this.enableDebug) {
return true; // Show everything in debug mode
}
const levelPriority = EnhancedLogger.LOG_LEVEL_PRIORITY[level];
const configPriority = EnhancedLogger.LOG_LEVEL_PRIORITY[this.configuredLevel];
return levelPriority <= configPriority;
}
/**
* Initialize the log file with a clean start
*/
async initLogFile(): Promise<void> {
try {
const timestamp = new Date().toISOString();
await writeFile(
this.logFile,
`--- MCP Server Log Started at ${timestamp} ---\n`,
"utf8"
);
} catch (error) {
console.error(`Error initializing log file:`, error);
}
}
/**
* Write a message to the log file
*/
private async logToFile(
level: LogLevel,
message: string,
...args: any[]
): Promise<void> {
// Check if this log level should be output based on configuration
if (!this.shouldLog(level)) {
return;
}
try {
let logMessage = `[${new Date().toISOString()}] [${level}] ${message}`;
if (args.length > 0) {
logMessage += ` ${args
.map((arg) => (typeof arg === "object" ? JSON.stringify(arg) : arg))
.join(" ")}`;
}
await appendFile(this.logFile, logMessage + "\n", "utf8");
} catch (error) {
console.error("Error writing to log file:", error);
}
}
/**
* Log to console based on transport and environment
*/
private logToConsole(level: LogLevel, message: string, ...args: any[]): void {
// Check if this log level should be output based on configuration
if (!this.shouldLog(level)) {
return;
}
// In CI environment, always log errors and warnings regardless of transport
// This ensures critical issues are visible in CI output
if (this.isCI) {
if (level === LogLevel.ERROR || level === LogLevel.WARN) {
switch (level) {
case LogLevel.ERROR:
console.error(`[ERROR] ${message}`, ...args);
break;
case LogLevel.WARN:
console.warn(`[WARN] ${message}`, ...args);
break;
}
return;
}
// In CI, suppress DEBUG messages unless explicitly enabled
if (level === LogLevel.DEBUG && !this.enableDebug) {
return;
}
}
// Standard logging for non-CI environments
if (this.transport !== TransportType.STDIO) {
switch (level) {
case LogLevel.INFO:
console.log(`[INFO] ${message}`, ...args);
break;
case LogLevel.ERROR:
console.error(`[ERROR] ${message}`, ...args);
break;
case LogLevel.WARN:
console.warn(`[WARN] ${message}`, ...args);
break;
case LogLevel.DEBUG:
console.log(`[DEBUG] ${message}`, ...args);
break;
}
}
}
/**
* Info level logging
*/
info(message: string, ...args: any[]): void {
this.logToConsole(LogLevel.INFO, message, ...args);
this.logToFile(LogLevel.INFO, message, ...args);
}
/**
* Error level logging
*/
error(message: string, ...args: any[]): void {
this.logToConsole(LogLevel.ERROR, message, ...args);
this.logToFile(LogLevel.ERROR, message, ...args);
}
/**
* Warning level logging
*/
warn(message: string, ...args: any[]): void {
this.logToConsole(LogLevel.WARN, message, ...args);
this.logToFile(LogLevel.WARN, message, ...args);
}
/**
* Debug level logging
*/
debug(message: string, ...args: any[]): void {
this.logToConsole(LogLevel.DEBUG, message, ...args);
this.logToFile(LogLevel.DEBUG, message, ...args);
}
/**
* Update transport type (useful when transport is determined after logger creation)
*/
setTransport(transport: string): void {
this.transport = transport;
}
/**
* Enable or disable debug logging
*/
setDebugEnabled(enabled: boolean): void {
this.enableDebug = enabled;
}
/**
* Log startup information
*/
logStartupInfo(transport: string, config: any): void {
this.info(`Server starting up - Process ID: ${process.pid}`);
this.info(`Node version: ${process.version}`);
this.info(`Working directory: ${process.cwd()}`);
this.info(`Using transport: ${transport}`);
this.info(`Command-line arguments: ${JSON.stringify(process.argv)}`);
this.debug("Configuration:", JSON.stringify(config, null, 2));
}
/**
* Log memory usage information
*/
logMemoryUsage(): void {
this.info(
`Server process memory usage: ${JSON.stringify(process.memoryUsage())}`
);
}
}
/**
* Create a logger instance
*/
export function createLogger(config: EnhancedLoggingConfig): EnhancedLogger {
return new EnhancedLogger(config);
}
/**
* Create a simple logger for areas that don't need the full enhanced logger
* Now supports verbosity control via command-line flags
*/
export function createSimpleLogger(transport: string = "sse"): Logger {
const enableConsole = transport !== TransportType.STDIO;
// Check command-line flags for verbosity control
const args = process.argv.slice(2);
const isVerbose =
args.includes("--verbose") || args.includes("--debug-startup");
const isQuiet = args.includes("--quiet");
return {
info: (message: string, ...args: any[]) => {
if (enableConsole && !isQuiet) {
console.log(`[INFO] ${message}`, ...args);
}
},
error: (message: string, ...args: any[]) => {
if (enableConsole && !isQuiet) {
console.error(`[ERROR] ${message}`, ...args);
}
},
warn: (message: string, ...args: any[]) => {
if (enableConsole && !isQuiet) {
console.warn(`[WARN] ${message}`, ...args);
}
},
debug: (message: string, ...args: any[]) => {
if (enableConsole && isVerbose) {
console.log(`[DEBUG] ${message}`, ...args);
}
},
};
}
/**
* Setup console redirection for STDIO transport
* This prevents log messages from interfering with JSON MCP messages
*/
export function setupConsoleRedirection(logger: Logger): void {
const originalConsoleLog = console.log;
const originalConsoleError = console.error;
console.log = (...args) => {
logger.debug("CONSOLE: " + args.join(" "));
};
console.error = (...args) => {
logger.error("CONSOLE_ERROR: " + args.join(" "));
};
}
/**
* Setup process event handlers for logging
*/
export function setupProcessEventHandlers(logger: Logger): void {
// Handle graceful shutdown
process.on("SIGINT", () => {
logger.info("Shutting down server...");
process.exit(0);
});
// Handle uncaught exceptions
process.on("uncaughtException", (error) => {
logger.error("Uncaught exception:", error);
});
// Handle unhandled promise rejections
process.on("unhandledRejection", (reason, promise) => {
logger.error("Unhandled Rejection at:", promise, "reason:", reason);
});
// Log when the stdin closes (which happens when the parent process terminates)
process.stdin.on("end", () => {
logger.info("STDIN stream ended - parent process may have terminated");
process.exit(0);
});
}
```
--------------------------------------------------------------------------------
/server/src/gates/intelligence/GatePerformanceAnalyzer.ts:
--------------------------------------------------------------------------------
```typescript
/**
* Gate Performance Analyzer - Performance Metrics & Optimization
*
* Single responsibility: Track gate performance metrics and provide optimization recommendations.
* Clean dependencies: Only logger for performance tracking.
*/
import type { Logger } from '../../logging/index.js';
import { GatePerformanceMetrics, GateSystemAnalytics } from '../core/gate-definitions.js';
/**
* Performance trend data
*/
interface PerformanceTrend {
gateId: string;
trend: 'improving' | 'declining' | 'stable';
changePercent: number;
recommendation: string;
}
/**
* Gate performance analyzer with metrics tracking and optimization recommendations
*/
export class GatePerformanceAnalyzer {
private gateMetrics = new Map<string, GatePerformanceMetrics>();
private sessionStartTime: Date;
private totalExecutions = 0;
private logger: Logger;
constructor(logger: Logger) {
this.logger = logger;
this.sessionStartTime = new Date();
this.logger.debug('[GATE PERFORMANCE ANALYZER] Initialized');
}
/**
* Record gate execution performance
*
* @param gateId - Gate identifier
* @param executionTime - Time taken for gate execution (ms)
* @param success - Whether the gate execution was successful
*/
recordGateExecution(gateId: string, executionTime: number, success: boolean): void {
this.logger.debug('[GATE PERFORMANCE ANALYZER] Recording execution:', {
gateId,
executionTime,
success
});
let metrics = this.gateMetrics.get(gateId);
if (!metrics) {
metrics = {
gateId,
avgExecutionTime: executionTime,
successRate: success ? 1.0 : 0.0,
retryRate: success ? 0.0 : 1.0,
lastUsed: new Date(),
usageCount: 1
};
} else {
// Update metrics with rolling average
const totalTime = metrics.avgExecutionTime * metrics.usageCount + executionTime;
metrics.usageCount++;
metrics.avgExecutionTime = totalTime / metrics.usageCount;
// Update success rate
const totalSuccesses = metrics.successRate * (metrics.usageCount - 1) + (success ? 1 : 0);
metrics.successRate = totalSuccesses / metrics.usageCount;
// Update retry rate
const totalRetries = metrics.retryRate * (metrics.usageCount - 1) + (success ? 0 : 1);
metrics.retryRate = totalRetries / metrics.usageCount;
metrics.lastUsed = new Date();
}
this.gateMetrics.set(gateId, metrics);
this.totalExecutions++;
}
/**
* Get performance analytics for all gates
*
* @returns Complete gate system analytics
*/
getPerformanceAnalytics(): GateSystemAnalytics {
const allMetrics = Array.from(this.gateMetrics.values());
if (allMetrics.length === 0) {
return {
totalGates: 0,
avgExecutionTime: 0,
overallSuccessRate: 0,
topPerformingGates: [],
underperformingGates: [],
recommendations: ['No gate performance data available yet']
};
}
// Calculate overall metrics
const totalGates = allMetrics.length;
const avgExecutionTime = allMetrics.reduce((sum, m) => sum + m.avgExecutionTime, 0) / totalGates;
const overallSuccessRate = allMetrics.reduce((sum, m) => sum + m.successRate, 0) / totalGates;
// Sort gates by performance
const sortedByPerformance = [...allMetrics].sort((a, b) => {
const scoreA = this.calculatePerformanceScore(a);
const scoreB = this.calculatePerformanceScore(b);
return scoreB - scoreA;
});
const topPerformingGates = sortedByPerformance
.slice(0, 3)
.map(m => m.gateId);
const underperformingGates = sortedByPerformance
.slice(-3)
.filter(m => this.calculatePerformanceScore(m) < 0.7)
.map(m => m.gateId);
const recommendations = this.generateOptimizationRecommendations(allMetrics);
return {
totalGates,
avgExecutionTime: Math.round(avgExecutionTime),
overallSuccessRate: Math.round(overallSuccessRate * 100) / 100,
topPerformingGates,
underperformingGates,
recommendations
};
}
/**
* Get metrics for a specific gate
*
* @param gateId - Gate identifier
* @returns Gate performance metrics or null if not found
*/
getGateMetrics(gateId: string): GatePerformanceMetrics | null {
const metrics = this.gateMetrics.get(gateId);
return metrics ? { ...metrics } : null;
}
/**
* Get performance trends for analysis
*
* @returns Array of performance trends
*/
getPerformanceTrends(): PerformanceTrend[] {
const trends: PerformanceTrend[] = [];
for (const metrics of this.gateMetrics.values()) {
const trend = this.calculateTrend(metrics);
trends.push(trend);
}
return trends.sort((a, b) => Math.abs(b.changePercent) - Math.abs(a.changePercent));
}
/**
* Calculate performance score for a gate (0-1, higher is better)
*/
private calculatePerformanceScore(metrics: GatePerformanceMetrics): number {
const successWeight = 0.6;
const speedWeight = 0.3;
const usageWeight = 0.1;
// Normalize execution time (assuming 500ms is baseline)
const speedScore = Math.max(0, Math.min(1, (500 - metrics.avgExecutionTime) / 500 + 0.5));
// Normalize usage count (logarithmic scale)
const usageScore = Math.min(1, Math.log10(metrics.usageCount + 1) / 2);
return (
metrics.successRate * successWeight +
speedScore * speedWeight +
usageScore * usageWeight
);
}
/**
* Calculate performance trend for a gate
*/
private calculateTrend(metrics: GatePerformanceMetrics): PerformanceTrend {
// Simple trend analysis based on recent performance
// In a real implementation, this would track historical data
let trend: 'improving' | 'declining' | 'stable' = 'stable';
let changePercent = 0;
let recommendation = 'Performance is stable';
const performanceScore = this.calculatePerformanceScore(metrics);
if (performanceScore > 0.8) {
trend = 'improving';
changePercent = 5; // Mock improvement
recommendation = 'Excellent performance, consider as a model for other gates';
} else if (performanceScore < 0.5) {
trend = 'declining';
changePercent = -10; // Mock decline
recommendation = 'Performance needs attention, consider optimization';
} else {
trend = 'stable';
changePercent = 0;
recommendation = 'Performance is acceptable, monitor for changes';
}
return {
gateId: metrics.gateId,
trend,
changePercent,
recommendation
};
}
/**
* Generate optimization recommendations based on metrics
*/
private generateOptimizationRecommendations(allMetrics: GatePerformanceMetrics[]): string[] {
const recommendations: string[] = [];
// Check for slow gates
const slowGates = allMetrics.filter(m => m.avgExecutionTime > 300);
if (slowGates.length > 0) {
recommendations.push(
`Optimize slow gates: ${slowGates.map(g => g.gateId).join(', ')} (>${300}ms avg)`
);
}
// Check for low success rates
const unreliableGates = allMetrics.filter(m => m.successRate < 0.8);
if (unreliableGates.length > 0) {
recommendations.push(
`Improve reliability of: ${unreliableGates.map(g => g.gateId).join(', ')} (<80% success)`
);
}
// Check for unused gates
const underusedGates = allMetrics.filter(m => m.usageCount < 5);
if (underusedGates.length > 0) {
recommendations.push(
`Review gate relevance: ${underusedGates.map(g => g.gateId).join(', ')} (low usage)`
);
}
// Overall system recommendations
const avgSuccessRate = allMetrics.reduce((sum, m) => sum + m.successRate, 0) / allMetrics.length;
if (avgSuccessRate < 0.85) {
recommendations.push('Overall system success rate is below optimal (85%), review gate criteria');
}
if (recommendations.length === 0) {
recommendations.push('Gate system performance is optimal, no immediate optimizations needed');
}
return recommendations;
}
/**
* Reset all performance metrics
*/
resetMetrics(): void {
this.gateMetrics.clear();
this.sessionStartTime = new Date();
this.totalExecutions = 0;
this.logger.info('[GATE PERFORMANCE ANALYZER] Performance metrics reset');
}
/**
* Get session statistics
*/
getSessionStatistics() {
const sessionDuration = Date.now() - this.sessionStartTime.getTime();
const avgExecutionsPerMinute = this.totalExecutions / (sessionDuration / 60000);
return {
sessionDuration: Math.round(sessionDuration / 1000), // seconds
totalExecutions: this.totalExecutions,
avgExecutionsPerMinute: Math.round(avgExecutionsPerMinute * 10) / 10,
uniqueGatesUsed: this.gateMetrics.size,
sessionStartTime: this.sessionStartTime.toISOString()
};
}
/**
* Export metrics for external analysis
*/
exportMetrics(): { metrics: GatePerformanceMetrics[]; session: any } {
return {
metrics: Array.from(this.gateMetrics.values()),
session: this.getSessionStatistics()
};
}
}
/**
* Factory function for creating gate performance analyzer
*/
export function createGatePerformanceAnalyzer(logger: Logger): GatePerformanceAnalyzer {
return new GatePerformanceAnalyzer(logger);
}
```
--------------------------------------------------------------------------------
/server/src/mcp-tools/prompt-manager/analysis/comparison-engine.ts:
--------------------------------------------------------------------------------
```typescript
/**
* Before/after analysis comparison engine
*/
import { Logger } from "../../../logging/index.js";
import { PromptClassification } from "../core/types.js";
/**
* Comparison result interface
*/
export interface ComparisonResult {
hasChanges: boolean;
summary: string;
changes: ComparisonChange[];
recommendations: string[];
}
/**
* Individual comparison change
*/
export interface ComparisonChange {
type: 'execution_type' | 'framework_requirement' | 'gates' | 'confidence' | 'complexity';
before: any;
after: any;
impact: 'positive' | 'negative' | 'neutral';
description: string;
}
/**
* Analysis comparison engine for tracking prompt evolution
*/
export class ComparisonEngine {
private logger: Logger;
constructor(logger: Logger) {
this.logger = logger;
}
/**
* Compare two prompt analyses and generate change summary
*/
compareAnalyses(
before: PromptClassification,
after: PromptClassification,
promptId: string
): ComparisonResult {
const changes: ComparisonChange[] = [];
// Compare execution type
if (before.executionType !== after.executionType) {
changes.push({
type: 'execution_type',
before: before.executionType,
after: after.executionType,
impact: this.assessExecutionTypeChange(before.executionType, after.executionType),
description: `Execution type changed from ${before.executionType} to ${after.executionType}`
});
}
// Compare framework requirements
if (before.requiresFramework !== after.requiresFramework) {
changes.push({
type: 'framework_requirement',
before: before.requiresFramework,
after: after.requiresFramework,
impact: after.requiresFramework ? 'positive' : 'neutral',
description: `Framework requirement ${after.requiresFramework ? 'added' : 'removed'}`
});
}
// Compare gates
const gateChanges = this.compareGates(before.suggestedGates, after.suggestedGates);
changes.push(...gateChanges);
// Compare confidence (if both are available and significantly different)
if (Math.abs(before.confidence - after.confidence) > 0.2) {
changes.push({
type: 'confidence',
before: before.confidence,
after: after.confidence,
impact: after.confidence > before.confidence ? 'positive' : 'negative',
description: `Analysis confidence ${after.confidence > before.confidence ? 'improved' : 'decreased'} (${Math.round((after.confidence - before.confidence) * 100)}%)`
});
}
return {
hasChanges: changes.length > 0,
summary: this.generateSummary(changes),
changes,
recommendations: this.generateRecommendations(changes, before, after)
};
}
/**
* Compare gate suggestions
*/
private compareGates(beforeGates: string[], afterGates: string[]): ComparisonChange[] {
const changes: ComparisonChange[] = [];
const beforeSet = new Set(beforeGates);
const afterSet = new Set(afterGates);
const addedGates = [...afterSet].filter(g => !beforeSet.has(g));
const removedGates = [...beforeSet].filter(g => !afterSet.has(g));
if (addedGates.length > 0) {
changes.push({
type: 'gates',
before: beforeGates,
after: afterGates,
impact: 'positive',
description: `Added quality gates: ${addedGates.join(', ')}`
});
}
if (removedGates.length > 0) {
changes.push({
type: 'gates',
before: beforeGates,
after: afterGates,
impact: 'neutral',
description: `Removed gates: ${removedGates.join(', ')}`
});
}
return changes;
}
/**
* Assess the impact of execution type changes
*/
private assessExecutionTypeChange(
before: string,
after: string
): 'positive' | 'negative' | 'neutral' {
// Define execution type hierarchy (complexity order)
const complexity: Record<string, number> = {
'prompt': 1,
'template': 2,
'chain': 3
};
const beforeComplexity = complexity[before] || 0;
const afterComplexity = complexity[after] || 0;
if (afterComplexity > beforeComplexity) {
return 'positive'; // Upgrading to more sophisticated type
} else if (afterComplexity < beforeComplexity) {
return 'neutral'; // Simplifying (could be positive optimization)
}
return 'neutral';
}
/**
* Generate summary of changes
*/
private generateSummary(changes: ComparisonChange[]): string {
if (changes.length === 0) {
return "No significant changes detected";
}
const typeChanges = changes.filter(c => c.type === 'execution_type');
const gateChanges = changes.filter(c => c.type === 'gates');
const frameworkChanges = changes.filter(c => c.type === 'framework_requirement');
const parts: string[] = [];
if (typeChanges.length > 0) {
const change = typeChanges[0];
parts.push(`🔄 **Type**: ${change.before} → ${change.after}`);
}
if (frameworkChanges.length > 0) {
const change = frameworkChanges[0];
const status = change.after ? 'enabled' : 'disabled';
parts.push(`🧠 **Framework**: ${status}`);
}
if (gateChanges.length > 0) {
const addedGates = gateChanges.filter(c => c.description.includes('Added'));
const removedGates = gateChanges.filter(c => c.description.includes('Removed'));
if (addedGates.length > 0) {
parts.push(`✅ **Added Gates**`);
}
if (removedGates.length > 0) {
parts.push(`❌ **Removed Gates**`);
}
}
if (parts.length === 0) {
return "Analysis metrics updated";
}
return `📊 **Analysis Changes**: ${parts.join(' • ')}`;
}
/**
* Generate recommendations based on changes
*/
private generateRecommendations(
changes: ComparisonChange[],
before: PromptClassification,
after: PromptClassification
): string[] {
const recommendations: string[] = [];
// Execution type recommendations
const typeChanges = changes.filter(c => c.type === 'execution_type');
if (typeChanges.length > 0) {
const change = typeChanges[0];
if (change.after === 'chain' && change.before !== 'chain') {
recommendations.push("💡 Consider adding chain validation gates for multi-step execution");
} else if (change.after === 'template' && change.before === 'prompt') {
recommendations.push("💡 Framework integration now available for structured analysis");
} else if (change.after === 'prompt' && change.before !== 'prompt') {
recommendations.push("⚡ Simplified execution should improve performance");
}
}
// Framework recommendations
const frameworkChanges = changes.filter(c => c.type === 'framework_requirement');
if (frameworkChanges.length > 0) {
const change = frameworkChanges[0];
if (change.after && !change.before) {
recommendations.push("🎯 Enable CAGEERF or ReACT framework for optimal results");
} else if (!change.after && change.before) {
recommendations.push("🚀 Framework overhead removed - consider basic prompt execution");
}
}
// Gate recommendations
const gateChanges = changes.filter(c => c.type === 'gates');
if (gateChanges.some(c => c.description.includes('Added'))) {
recommendations.push("🔒 New quality gates will improve execution reliability");
}
// Confidence recommendations
const confidenceChanges = changes.filter(c => c.type === 'confidence');
if (confidenceChanges.length > 0) {
const change = confidenceChanges[0];
if (change.impact === 'negative') {
recommendations.push("⚠️ Lower confidence suggests prompt may need refinement");
} else if (change.impact === 'positive') {
recommendations.push("✅ Improved confidence indicates better prompt structure");
}
}
return recommendations;
}
/**
* Generate change summary for display
*/
generateDisplaySummary(result: ComparisonResult): string | null {
if (!result.hasChanges) {
return null;
}
let summary = result.summary;
if (result.recommendations.length > 0) {
summary += `\n\n💡 **Recommendations**:\n`;
result.recommendations.forEach((rec, i) => {
summary += `${i + 1}. ${rec}\n`;
});
}
return summary;
}
/**
* Track analysis evolution over time
*/
trackEvolution(
promptId: string,
classification: PromptClassification
): void {
// Log significant analysis data for evolution tracking
this.logger.debug(`Analysis evolution for ${promptId}:`, {
executionType: classification.executionType,
requiresFramework: classification.requiresFramework,
confidence: classification.confidence,
gates: classification.suggestedGates.length,
analysisMode: classification.analysisMode,
timestamp: new Date().toISOString()
});
}
/**
* Assess overall improvement direction
*/
assessImprovement(changes: ComparisonChange[]): 'improved' | 'degraded' | 'neutral' {
const positiveChanges = changes.filter(c => c.impact === 'positive').length;
const negativeChanges = changes.filter(c => c.impact === 'negative').length;
if (positiveChanges > negativeChanges) {
return 'improved';
} else if (negativeChanges > positiveChanges) {
return 'degraded';
}
return 'neutral';
}
}
```
--------------------------------------------------------------------------------
/server/src/prompts/converter.ts:
--------------------------------------------------------------------------------
```typescript
/**
* Prompt Converter Module
* Handles converting markdown prompts to JSON structure with validation
*/
import path from "path";
import { Logger } from "../logging/index.js";
import type { ConvertedPrompt } from "../execution/types.js";
import type { PromptData } from "./types.js";
import { isChainPrompt } from "../utils/chainUtils.js";
import { PromptLoader } from "./loader.js";
/**
* Prompt Converter class
*/
export class PromptConverter {
private logger: Logger;
private loader: PromptLoader;
constructor(logger: Logger, loader?: PromptLoader) {
this.logger = logger;
this.loader = loader || new PromptLoader(logger);
}
/**
* Convert markdown prompts to JSON structure in memory
*/
async convertMarkdownPromptsToJson(
promptsData: PromptData[],
basePath?: string
): Promise<ConvertedPrompt[]> {
const convertedPrompts: ConvertedPrompt[] = [];
this.logger.info(
`Converting ${promptsData.length} markdown prompts to JSON structure...`
);
for (const promptData of promptsData) {
try {
// Determine base path for loading files
const fileBasePath = basePath || path.join(process.cwd(), "..");
// Load the prompt file content using the loader
const promptFile = await this.loader.loadPromptFile(
promptData.file,
fileBasePath
);
// Load chain steps from markdown-embedded format
let chainSteps = promptFile.chainSteps || [];
// Create converted prompt structure
const convertedPrompt: ConvertedPrompt = {
id: promptData.id,
name: promptData.name,
description: promptData.description,
category: promptData.category,
systemMessage: promptFile.systemMessage,
userMessageTemplate: promptFile.userMessageTemplate,
arguments: promptData.arguments.map((arg) => ({
name: arg.name,
description: arg.description,
required: arg.required,
})),
// Include chain information from markdown-embedded chainSteps
chainSteps: chainSteps,
// Phase 2: Include gate configuration from prompt file
gateConfiguration: promptFile.gateConfiguration,
tools: promptData.tools || false,
onEmptyInvocation:
promptData.onEmptyInvocation || "execute_if_possible",
};
// NOTE: All chains now use markdown-embedded format
// Modular chain system has been removed - chains are defined inline within markdown files
if (isChainPrompt(convertedPrompt) && chainSteps.length === 0) {
this.logger.debug(`Chain prompt '${convertedPrompt.id}' has no embedded chain steps - will be treated as single prompt`);
}
// Validate the onEmptyInvocation field
if (
promptData.onEmptyInvocation &&
promptData.onEmptyInvocation !== "return_template" &&
promptData.onEmptyInvocation !== "execute_if_possible"
) {
this.logger.warn(
`Prompt '${promptData.id}' has an invalid 'onEmptyInvocation' value: "${promptData.onEmptyInvocation}". ` +
`Defaulting to "execute_if_possible". Allowed values are "return_template" or "execute_if_possible".`
);
convertedPrompt.onEmptyInvocation = "execute_if_possible";
}
// Validate the converted prompt
const validation = this.validateConvertedPrompt(convertedPrompt);
if (!validation.isValid) {
this.logger.warn(
`Prompt ${
promptData.id
} has validation issues: ${validation.errors.join(", ")}`
);
// Continue processing even with warnings
}
convertedPrompts.push(convertedPrompt);
} catch (error) {
this.logger.error(`Error converting prompt ${promptData.id}:`, error);
// Continue with other prompts even if one fails
}
}
this.logger.info(
`Successfully converted ${convertedPrompts.length} prompts`
);
return convertedPrompts;
}
/**
* Validate a converted prompt
*/
validateConvertedPrompt(prompt: ConvertedPrompt): {
isValid: boolean;
errors: string[];
warnings: string[];
} {
const errors: string[] = [];
const warnings: string[] = [];
// Check required fields
if (!prompt.id) {
errors.push("Missing required field: id");
}
if (!prompt.name) {
errors.push("Missing required field: name");
}
if (!prompt.category) {
errors.push("Missing required field: category");
}
// Check that either userMessageTemplate exists or it's a valid chain
if (!prompt.userMessageTemplate && !((prompt.chainSteps?.length || 0) > 0)) {
errors.push(
"Either userMessageTemplate must be provided or prompt must be a valid chain"
);
}
// Validate chain prompts
if ((prompt.chainSteps?.length || 0) > 0) {
if (!prompt.chainSteps || prompt.chainSteps.length === 0) {
errors.push("Chain prompt must have at least one chain step");
} else {
// Validate each chain step
prompt.chainSteps.forEach((step, index) => {
if (!step.promptId) {
errors.push(`Chain step ${index + 1} missing promptId`);
}
if (!step.stepName) {
errors.push(`Chain step ${index + 1} missing stepName`);
}
});
}
}
// Validate arguments
if (prompt.arguments) {
prompt.arguments.forEach((arg, index) => {
if (!arg.name) {
errors.push(`Argument ${index + 1} missing name`);
}
if (typeof arg.required !== "boolean") {
warnings.push(
`Argument ${arg.name || index + 1} has invalid required value`
);
}
});
}
// Check for placeholder validation in template
if (prompt.userMessageTemplate) {
// Validate template syntax - reject Handlebars syntax
if (prompt.userMessageTemplate.includes('{{#if') ||
prompt.userMessageTemplate.includes('{{/if') ||
prompt.userMessageTemplate.includes('{{#each') ||
prompt.userMessageTemplate.includes('{{/each') ||
prompt.userMessageTemplate.includes('{{#unless') ||
prompt.userMessageTemplate.includes('{{/unless')) {
errors.push(
`Handlebars syntax detected in template. This system uses Nunjucks syntax.\n` +
`Replace: {{#if condition}} → {% if condition %}\n` +
`Replace: {{/if}} → {% endif %}\n` +
`Replace: {{#each items}} → {% for item in items %}\n` +
`Replace: {{/each}} → {% endfor %}`
);
}
const placeholders = this.extractPlaceholders(prompt.userMessageTemplate);
const argumentNames = prompt.arguments.map((arg) => arg.name);
// Find placeholders that don't have corresponding arguments
const orphanedPlaceholders = placeholders.filter(
(placeholder) =>
!argumentNames.includes(placeholder) &&
!this.isSpecialPlaceholder(placeholder)
);
if (orphanedPlaceholders.length > 0) {
warnings.push(
`Template has placeholders without arguments: ${orphanedPlaceholders.join(
", "
)}`
);
}
// Find arguments that aren't used in the template
const unusedArguments = argumentNames.filter(
(argName) => !placeholders.includes(argName)
);
if (unusedArguments.length > 0) {
warnings.push(
`Arguments not used in template: ${unusedArguments.join(", ")}`
);
}
}
return {
isValid: errors.length === 0,
errors,
warnings,
};
}
/**
* Extract placeholders from a template string
*/
private extractPlaceholders(template: string): string[] {
const placeholderRegex = /\{\{([^}]+)\}\}/g;
const placeholders: string[] = [];
let match;
while ((match = placeholderRegex.exec(template)) !== null) {
const placeholder = match[1].trim();
if (!placeholders.includes(placeholder)) {
placeholders.push(placeholder);
}
}
return placeholders;
}
/**
* Check if a placeholder is a special system placeholder
*/
private isSpecialPlaceholder(placeholder: string): boolean {
const specialPlaceholders = [
"previous_message",
"tools_available",
"current_step_number",
"total_steps",
"current_step_name",
"step_number",
"step_name",
];
return (
specialPlaceholders.includes(placeholder) ||
placeholder.startsWith("ref:")
);
}
/**
* Get conversion statistics
*/
getConversionStats(
originalCount: number,
convertedPrompts: ConvertedPrompt[]
): {
totalOriginal: number;
totalConverted: number;
successRate: number;
chainPrompts: number;
regularPrompts: number;
totalArguments: number;
} {
const chainPrompts = convertedPrompts.filter((p) => isChainPrompt(p)).length;
const regularPrompts = convertedPrompts.length - chainPrompts;
const totalArguments = convertedPrompts.reduce(
(sum, p) => sum + p.arguments.length,
0
);
return {
totalOriginal: originalCount,
totalConverted: convertedPrompts.length,
successRate:
originalCount > 0 ? convertedPrompts.length / originalCount : 0,
chainPrompts,
regularPrompts,
totalArguments,
};
}
}
```
--------------------------------------------------------------------------------
/scripts/setup-windows-testing.sh:
--------------------------------------------------------------------------------
```bash
#!/bin/bash
# Windows Container Testing Setup Script
# Sets up multiple approaches for testing Windows compatibility locally
set -e
# Colors for output
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m' # No Color
print_status() {
echo -e "${BLUE}[INFO]${NC} $1"
}
print_success() {
echo -e "${GREEN}[SUCCESS]${NC} $1"
}
print_warning() {
echo -e "${YELLOW}[WARNING]${NC} $1"
}
print_error() {
echo -e "${RED}[ERROR]${NC} $1"
}
print_status "Setting up Windows container testing environment..."
# Check current system
print_status "Checking system capabilities..."
echo "Docker version: $(docker --version)"
echo "Docker info:"
docker system info | grep -E "(Operating System|OSType|Architecture|Kernel Version)"
# Method 1: Check if Windows containers are available
print_status "Method 1: Checking for native Windows container support..."
if docker pull mcr.microsoft.com/windows/nanoserver:ltsc2022 2>/dev/null; then
print_success "Native Windows containers are available!"
WINDOWS_NATIVE=true
else
print_warning "Native Windows containers not available (expected in WSL2/Linux Docker)"
WINDOWS_NATIVE=false
fi
# Method 2: Set up Node.js Windows simulation
print_status "Method 2: Setting up Node.js Windows simulation..."
if docker pull node:18-alpine 2>/dev/null; then
print_success "Node.js Alpine images available for cross-platform testing"
NODE_SIMULATION=true
else
print_error "Node.js images not available"
NODE_SIMULATION=false
fi
# Method 3: Create Windows environment simulation (inline)
print_status "Method 3: Windows environment simulation (using inline variables)..."
cat > scripts/windows-tests/windows-env.sh << 'EOF'
#!/bin/bash
# Windows environment simulation - no sensitive files
export RUNNER_OS=Windows
export PATH="/c/Windows/System32:/c/Windows:/c/Windows/System32/Wbem:$PATH"
export USERPROFILE=/c/Users/runneradmin
export TEMP=/c/Users/runneradmin/AppData/Local/Temp
export TMP=/c/Users/runneradmin/AppData/Local/Temp
export HOMEDRIVE=C:
export HOMEPATH=/Users/runneradmin
export PATHEXT=.COM:.EXE:.BAT:.CMD:.VBS:.VBE:.JS:.JSE:.WSF:.WSH:.MSC
echo "Windows environment variables set"
EOF
chmod +x scripts/windows-tests/windows-env.sh
# Method 4: Enhanced Act configuration for Windows testing
print_status "Method 4: Creating enhanced Act configuration..."
cp .actrc .actrc.backup
cat > .actrc.windows-enhanced << 'EOF'
# Enhanced Windows Testing Configuration
# Primary testing (Linux-based but Windows-compatible Node.js testing)
-P ubuntu-latest=catthehacker/ubuntu:act-22.04
-P windows-latest=node:18-alpine
-P windows-2022=node:18-alpine
-P windows-2019=node:16-alpine
-P macos-latest=catthehacker/ubuntu:act-22.04
# Environment variables for Windows simulation
--env NODE_ENV=test
--env CI=true
--env RUNNER_OS=Windows
--env RUNNER_TEMP=/tmp
--env RUNNER_TOOL_CACHE=/opt/hostedtoolcache
# Enhanced settings
--verbose
--container-daemon-socket unix:///var/run/docker.sock
--artifact-server-path /tmp/act-artifacts
--bind
EOF
# Method 5: Create Windows-specific test scripts
print_status "Method 5: Creating Windows-specific test scenarios..."
mkdir -p scripts/windows-tests
cat > scripts/windows-tests/test-windows-paths.js << 'EOF'
// Test Windows path handling
const path = require('path');
const os = require('os');
console.log('Testing Windows-compatible path handling...');
console.log('Platform:', os.platform());
console.log('Path separator:', path.sep);
console.log('Path delimiter:', path.delimiter);
// Test path operations that should work cross-platform
const testPath = path.join('server', 'src', 'index.ts');
console.log('Cross-platform path:', testPath);
// Test environment variables
console.log('NODE_ENV:', process.env.NODE_ENV);
console.log('RUNNER_OS:', process.env.RUNNER_OS);
console.log('✅ Windows compatibility test passed');
EOF
cat > scripts/windows-tests/test-windows-startup.sh << 'EOF'
#!/bin/bash
# Test Windows-like startup scenarios
echo "Testing Windows-compatible startup..."
# Simulate Windows environment
export RUNNER_OS=Windows
export PATH="/c/Windows/System32:$PATH"
# Test Node.js startup
cd server
echo "Testing Node.js startup in Windows-like environment..."
node --version
npm --version
# Test our application
echo "Testing MCP server startup..."
npm run help
echo "✅ Windows startup test completed"
EOF
chmod +x scripts/windows-tests/test-windows-startup.sh
# Method 6: Create multi-platform test runner
print_status "Method 6: Creating comprehensive test runner..."
cat > scripts/test-all-platforms.sh << 'EOF'
#!/bin/bash
# Comprehensive multi-platform testing script
set -e
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m'
print_test() {
echo -e "${BLUE}[TEST]${NC} $1"
}
print_pass() {
echo -e "${GREEN}[PASS]${NC} $1"
}
print_fail() {
echo -e "${RED}[FAIL]${NC} $1"
}
# Test 1: Ubuntu (Linux) - Primary platform
print_test "Testing Ubuntu/Linux platform..."
if ./local-test.sh dry-run code-quality >/dev/null 2>&1; then
print_pass "Ubuntu/Linux testing works"
else
print_fail "Ubuntu/Linux testing failed"
fi
# Test 2: Windows simulation with Node.js
print_test "Testing Windows simulation (Node.js)..."
if docker run --rm -v "$PWD":/workspace -w /workspace/server node:18-alpine npm --version >/dev/null 2>&1; then
print_pass "Windows simulation (Node.js) works"
else
print_fail "Windows simulation (Node.js) failed"
fi
# Test 3: Cross-platform Node.js compatibility
print_test "Testing cross-platform Node.js compatibility..."
if node scripts/windows-tests/test-windows-paths.js >/dev/null 2>&1; then
print_pass "Cross-platform compatibility works"
else
print_fail "Cross-platform compatibility failed"
fi
# Test 4: Windows environment simulation
print_test "Testing Windows environment simulation..."
if source .env.windows && echo "Windows env loaded" >/dev/null 2>&1; then
print_pass "Windows environment simulation works"
else
print_fail "Windows environment simulation failed"
fi
echo ""
echo "Multi-platform testing summary completed!"
EOF
chmod +x scripts/test-all-platforms.sh
# Create usage instructions
print_status "Creating usage instructions..."
cat > WINDOWS-TESTING.md << 'EOF'
# Windows Container Testing Setup
This setup provides multiple approaches for testing Windows compatibility locally in a WSL2/Linux Docker environment.
## Available Methods
### Method 1: Native Windows Containers (if available)
```bash
# Only works if Docker is configured for Windows containers
docker pull mcr.microsoft.com/windows/nanoserver:ltsc2022
```
### Method 2: Node.js Windows Simulation
```bash
# Use Alpine Node.js images for lightweight Windows-compatible testing
./local-test.sh run code-quality --actrc .actrc.windows-enhanced
```
### Method 3: Cross-Platform Node.js Testing
```bash
# Test Node.js compatibility across platforms
node scripts/windows-tests/test-windows-paths.js
scripts/windows-tests/test-windows-startup.sh
```
### Method 4: Environment Simulation
```bash
# Load Windows-like environment variables
source scripts/windows-tests/windows-env.sh
```
### Method 5: Comprehensive Testing
```bash
# Run all platform tests
scripts/test-all-platforms.sh
```
## Usage Examples
### Quick Windows Simulation Test
```bash
# Test with Windows-like configuration
ACT_RC=.actrc.windows-enhanced ./local-test.sh dry-run code-quality
```
### Cross-Platform Build Test
```bash
# Test Node.js builds across platforms
docker run --rm -v "$PWD":/workspace -w /workspace/server node:18-alpine npm run build
docker run --rm -v "$PWD":/workspace -w /workspace/server node:18-windowsservercore npm run build
```
### Comprehensive CI Simulation
```bash
# Simulate full CI pipeline with all platforms
./scripts/test-all-platforms.sh
```
## Configuration Files
- `.actrc.windows-enhanced` - Enhanced Act configuration for Windows testing
- `scripts/windows-tests/windows-env.sh` - Windows environment simulation script
- `scripts/windows-tests/` - Windows-specific test scripts
- `scripts/test-all-platforms.sh` - Comprehensive test runner
## Notes
- True Windows containers require Windows host or Docker Desktop Windows mode
- This setup provides the next best thing: cross-platform Node.js testing
- Environment simulation helps catch Windows-specific path and environment issues
- All tests are designed to work in WSL2/Linux Docker environments
## Troubleshooting
1. **Windows containers not available**: This is expected in WSL2. Use Node.js simulation instead.
2. **Path issues**: Use Node.js `path` module for cross-platform path handling.
3. **Environment variables**: Test with both Linux and Windows-style environment variables.
EOF
# Summary and next steps
print_status "Setup complete! Summary:"
echo ""
print_success "✅ Windows testing environment configured"
print_success "✅ Multiple testing approaches available"
print_success "✅ Cross-platform Node.js testing ready"
print_success "✅ Environment simulation configured"
print_success "✅ Comprehensive test runner created"
echo ""
print_status "Next steps:"
echo "1. Review WINDOWS-TESTING.md for usage instructions"
echo "2. Run: scripts/test-all-platforms.sh"
echo "3. Test specific scenarios with the enhanced Act configuration"
echo ""
print_status "Available test commands:"
echo "• scripts/test-all-platforms.sh - Comprehensive testing"
echo "• ACT_RC=.actrc.windows-enhanced ./local-test.sh dry-run code-quality"
echo "• node scripts/windows-tests/test-windows-paths.js"
echo "• scripts/windows-tests/test-windows-startup.sh"
```
--------------------------------------------------------------------------------
/docs/troubleshooting.md:
--------------------------------------------------------------------------------
```markdown
# Troubleshooting Guide
This guide helps you diagnose and fix common issues with the Claude Prompts MCP Server.
## 🚨 Quick Fixes for Common Issues
### Server Won't Start
**Symptoms:**
- Server exits immediately after startup
- "Unable to determine server root directory" error
- Module not found errors
**Solutions:**
1. **Set Environment Variables (Recommended)**
```bash
# Windows
set MCP_SERVER_ROOT=E:\path\to\claude-prompts-mcp\server
set MCP_PROMPTS_CONFIG_PATH=E:\path\to\claude-prompts-mcp\server\promptsConfig.json
# macOS/Linux
export MCP_SERVER_ROOT=/path/to/claude-prompts-mcp/server
export MCP_PROMPTS_CONFIG_PATH=/path/to/claude-prompts-mcp/server/promptsConfig.json
```
2. **Use Absolute Paths in Claude Desktop Config**
```json
{
"mcpServers": {
"claude-prompts-mcp": {
"command": "node",
"args": ["E:\\full\\path\\to\\server\\dist\\index.js"],
"env": {
"MCP_SERVER_ROOT": "E:\\full\\path\\to\\server",
"MCP_PROMPTS_CONFIG_PATH": "E:\\full\\path\\to\\server\\promptsConfig.json"
}
}
}
}
```
3. **Check Working Directory**
```bash
cd claude-prompts-mcp/server
npm start
```
### Claude Desktop Can't Find the Server
**Symptoms:**
- Claude says MCP server is unavailable
- No prompts appear in Claude
- Connection timeout errors
**Diagnostic Steps:**
1. **Test Server Independently**
```bash
cd server
npm run build
node dist/index.js --transport=stdio --verbose
```
2. **Check Claude Desktop Logs**
- Windows: `%APPDATA%\Claude\logs\`
- macOS: `~/Library/Logs/Claude/`
- Look for MCP server errors
3. **Verify Configuration**
```bash
# Check if config files exist
ls -la config.json promptsConfig.json
# Validate JSON syntax
node -e "console.log(JSON.parse(require('fs').readFileSync('config.json')))"
```
### Prompts Not Loading
**Symptoms:**
- `>>listprompts` shows no results
- "No prompts loaded" in server logs
- Prompt files exist but aren't recognized
**Solutions:**
1. **Check Prompts Configuration**
```bash
# Verify promptsConfig.json syntax
node -e "console.log(JSON.parse(require('fs').readFileSync('promptsConfig.json')))"
# Check category imports
ls -la prompts/*/prompts.json
```
2. **Validate Prompt File Structure**
```bash
# Check category-specific prompts.json files
find prompts -name "prompts.json" -exec echo "=== {} ===" \; -exec cat {} \;
```
3. **Test Individual Categories**
```bash
# Start with verbose logging
npm start -- --verbose
```
### Hot-Reload Not Working
**Symptoms:**
- Changes to prompts don't appear without restart
- `>>reload_prompts` fails
- File watchers not triggering
**Solutions:**
1. **Manual Reload**
```bash
>>reload_prompts reason="manual test"
```
2. **Check File Permissions**
```bash
# Ensure files are writable
ls -la prompts/*/prompts.json
chmod 644 prompts/*/prompts.json
```
3. **Restart Server Process**
```bash
# Full restart
npm stop
npm start
```
## 🔍 Diagnostic Tools
### Server Health Check
Run diagnostic commands to check server health:
```bash
# Check if server responds
curl http://localhost:9090/status
# Test MCP tools directly
echo '{"method": "listprompts", "params": {}}' | node dist/index.js --transport=stdio
```
### Verbose Logging
Enable detailed logging for troubleshooting:
```bash
# Start with maximum verbosity
npm start -- --verbose --debug-startup
# Or set log level in config.json
{
"logging": {
"level": "debug",
"directory": "./logs"
}
}
```
### Path Resolution Debugging
Use built-in path detection diagnostics:
```bash
# Test path detection strategies
node dist/index.js --verbose
```
The server will show detailed information about:
- Environment variables
- Working directory detection
- Config file resolution
- Prompt file loading
## 🐛 Common Error Messages
### "Unable to determine server root directory"
**Cause:** Path detection failed in Claude Desktop environment
**Fix:**
1. Set `MCP_SERVER_ROOT` environment variable
2. Use absolute paths in Claude Desktop config
3. Ensure working directory is correct
### "Prompts configuration file NOT FOUND"
**Cause:** promptsConfig.json path is incorrect
**Fix:**
1. Verify file exists: `ls -la promptsConfig.json`
2. Check file permissions
### "Error loading prompt: [filename]"
**Cause:** Invalid markdown format or missing sections
**Fix:**
1. Validate markdown syntax
2. Ensure required sections exist:
- Title (# heading)
- Description
- User Message Template (## heading)
3. Check for special characters in filenames
### "Module not found" errors
**Cause:** Dependencies not installed or build incomplete
**Fix:**
```bash
# Reinstall dependencies
rm -rf node_modules package-lock.json
npm install
# Rebuild project
npm run build
```
## 🔧 Advanced Troubleshooting
### Claude Desktop Integration Issues
**Problem:** Server works standalone but fails in Claude Desktop
**Investigation:**
1. **Environment Differences**
```bash
# Compare environments
node -e "console.log(process.env)" > standalone-env.json
# Then check Claude Desktop logs for environment
```
2. **Working Directory Issues**
```javascript
// Add to server startup for debugging
console.log("Working directory:", process.cwd());
console.log("Script location:", process.argv[1]);
console.log("__dirname equivalent:", new URL(".", import.meta.url).pathname);
```
3. **Permission Problems**
```bash
# Check if Claude Desktop can access files
ls -la dist/index.js
chmod +x dist/index.js
```
````
### Network and Transport Issues
**Problem:** SSE transport fails or connection drops
**Solutions:**
1. **Check Port Availability**
```bash
netstat -an | grep 9090
lsof -i :9090
````
2. **Test Different Transport**
```json
{
"transports": {
"default": "stdio",
"sse": { "enabled": false }
}
}
```
3. **Firewall Configuration**
- Ensure port 9090 is open
- Check antivirus software
- Verify localhost access
## 🛠️ Development and Testing
### Running Tests
```bash
# Run test suite
npm test
# Run with coverage
npm run test:coverage
# Test specific modules
npm test -- --grep "PromptManager"
```
### Manual Testing Process
```bash
# 1. Clean build
npm run clean
npm run build
# 2. Test configuration loading
node -e "
const config = require('./dist/config/index.js');
const manager = new config.ConfigManager('./config.json');
manager.loadConfig().then(() => console.log('Config OK'));
"
# 3. Test prompt loading
node -e "
const prompts = require('./dist/prompts/index.js');
// Test prompt loading logic
"
# 4. Test MCP tools
echo '{"method": "listprompts", "params": {}}' | node dist/index.js --transport=stdio
```
### Creating Minimal Test Cases
For bug reports, create minimal reproduction:
```bash
# Minimal server setup
mkdir test-server
cd test-server
npm init -y
npm install @modelcontextprotocol/sdk
# Minimal config.json
echo '{
"server": { "name": "test", "version": "1.0.0" },
"prompts": { "file": "promptsConfig.json" }
}' > config.json
# Minimal promptsConfig.json
echo '{
"categories": [{"id": "test", "name": "Test"}],
"imports": ["prompts/test/prompts.json"]
}' > promptsConfig.json
```
## 📋 Collecting Debug Information
When reporting issues, include:
### System Information
```bash
# System details
node --version
npm --version
uname -a # or systeminfo on Windows
# Project information
git rev-parse HEAD
npm list --depth=0
```
### Server Logs
```bash
# Capture server startup with full verbosity
npm start -- --verbose --debug-startup 2>&1 | tee server-debug.log
```
### Configuration Files
```bash
# Sanitize and share configs (remove sensitive data)
cat config.json
cat promptsConfig.json
find prompts -name "prompts.json" -exec echo "=== {} ===" \; -exec cat {} \;
```
### Claude Desktop Configuration
```json
// Share your claude_desktop_config.json (remove paths if needed)
{
"mcpServers": {
"claude-prompts-mcp": {
// Your configuration here
}
}
}
```
## 🚀 Performance Optimization
### Startup Time Optimization
```bash
# Profile startup time
time npm start
# Optimize with environment variables
export MCP_SERVER_ROOT="/full/path/to/server"
export MCP_PROMPTS_CONFIG_PATH="/full/path/to/server/promptsConfig.json"
```
### Memory Usage Optimization
```javascript
// Monitor memory in config.json
{
"logging": {
"level": "info",
"memoryMonitoring": true
}
}
```
### Prompt Loading Optimization
- Keep prompt files reasonably sized (< 100KB each)
- Limit number of categories (< 20 for best performance)
- Use text references for very long content
- Avoid deeply nested category structures
## 🆘 Getting Help
If you're still experiencing issues:
1. **Search Existing Issues**: Check [GitHub Issues](https://github.com/minipuft/claude-prompts-mcp/issues)
2. **Create Detailed Bug Report**:
- Include error messages and logs
- Share your configuration (sanitized)
- Provide reproduction steps
- Include system information
3. **Join Community Discussions**: [GitHub Discussions](https://github.com/minipuft/claude-prompts-mcp/discussions)
4. **Emergency Debugging**: Use `--verbose --debug-startup` flags for maximum diagnostic output
Remember: Most issues are related to path resolution or configuration problems. Setting the environment variables `MCP_SERVER_ROOT` and `MCP_PROMPTS_CONFIG_PATH` solves 90% of setup issues! 🎯
```
--------------------------------------------------------------------------------
/server/tests/integration/mcp-tools.test.ts:
--------------------------------------------------------------------------------
```typescript
/**
* MCP Tools Integration Tests - Consolidated Architecture
* Tests for the current 3 intelligent MCP tools with enhanced command routing
*/
import { createConsolidatedPromptEngine } from '../../dist/mcp-tools/prompt-engine/index.js';
import { createConsolidatedPromptManager } from '../../dist/mcp-tools/prompt-manager/index.js';
import { createConsolidatedSystemControl } from '../../dist/mcp-tools/system-control.js';
import { MockLogger, MockMcpServer, testPrompts } from '../helpers/test-helpers.js';
describe('Consolidated MCP Tools Integration', () => {
let logger: MockLogger;
let mockMcpServer: MockMcpServer;
let promptEngine: any;
let promptManager: any;
let systemControl: any;
beforeEach(() => {
logger = new MockLogger();
mockMcpServer = new MockMcpServer();
// Updated mock dependencies to match current architecture
const mockPromptManagerComponent = {
processTemplateAsync: () => Promise.resolve('mocked template result'),
convertedPrompts: [testPrompts.simple],
promptsData: [testPrompts.simple],
loadAndConvertPrompts: () => Promise.resolve([testPrompts.simple])
};
const mockSemanticAnalyzer = {
analyzePrompt: () => Promise.resolve({
executionType: 'template',
requiresExecution: true,
confidence: 0.8
}),
getConfig: () => ({
llmIntegration: { enabled: false }
})
};
const mockFrameworkManager = {
getCurrentFramework: () => ({ frameworkId: 'CAGEERF', frameworkName: 'CAGEERF' }),
generateExecutionContext: () => ({
systemPrompt: 'test system prompt',
framework: 'CAGEERF'
})
};
const mockConfigManager = {
getConfig: () => ({
server: { name: 'test-server', version: '1.0.0' },
gates: { definitionsDirectory: "src/gates/definitions", templatesDirectory: "src/gates/templates" }
}),
getPromptsFilePath: () => '/test/prompts.json'
};
const mockConversationManager = {
addToConversationHistory: () => {},
getConversationHistory: () => [],
saveStepResult: () => {},
getStepResult: () => null,
setChainSessionManager: (manager: any) => {
// Mock implementation that accepts the chain session manager
// This prevents the null reference error in ChainSessionManager constructor
},
setTextReferenceManager: (manager: any) => {
// Mock implementation for text reference manager integration
}
};
const mockTextReferenceManager = {
extractReferences: () => [],
resolveReferences: () => {},
addReference: () => {},
saveStepResult: (stepId: string, data: any) => {
// Mock implementation for step result storage
},
getStepResult: (stepId: string) => {
// Mock implementation returns null for non-existent steps
return null;
}
};
const mockMcpToolsManager = {
initialize: () => {},
getTools: () => [],
promptManagerTool: { handleAction: () => Promise.resolve({ content: [], isError: false }) },
systemControl: { handleAction: () => Promise.resolve({ content: [], isError: false }) }
};
// Create consolidated tools with complete dependencies
promptEngine = createConsolidatedPromptEngine(
logger,
mockMcpServer as any,
mockPromptManagerComponent as any,
mockConfigManager as any,
mockSemanticAnalyzer as any,
mockConversationManager as any,
mockTextReferenceManager as any,
mockMcpToolsManager
);
promptManager = createConsolidatedPromptManager(
logger,
mockMcpServer as any,
mockConfigManager as any,
mockSemanticAnalyzer as any,
undefined, // frameworkStateManager
mockFrameworkManager as any,
() => Promise.resolve(), // onRefresh
() => Promise.resolve() // onRestart
);
systemControl = createConsolidatedSystemControl(
logger,
mockMcpServer as any,
mockFrameworkManager as any,
undefined, // frameworkStateManager
mockMcpToolsManager
);
// Simulate MCP tool registration process for performance test validation
mockMcpServer.tool('prompt_engine', 'Unified prompt execution engine', { type: 'object' });
mockMcpServer.tool('prompt_manager', 'Complete prompt lifecycle management', { type: 'object' });
mockMcpServer.tool('system_control', 'Framework and system management', { type: 'object' });
});
afterEach(() => {
logger.clear();
mockMcpServer.clear();
});
describe('Consolidated Prompt Engine', () => {
test('should create prompt engine tool', () => {
expect(promptEngine).toBeDefined();
expect(typeof promptEngine.executePromptCommand).toBe('function');
});
test('should have routing detection capabilities', () => {
expect(promptEngine).toBeDefined();
// The routing functionality is now integrated into executePromptCommand
expect(typeof promptEngine.executePromptCommand).toBe('function');
});
});
describe('Consolidated Prompt Manager', () => {
test('should create prompt manager tool', () => {
expect(promptManager).toBeDefined();
expect(typeof promptManager.handleAction).toBe('function');
});
test('should handle prompt lifecycle management', () => {
expect(promptManager).toBeDefined();
expect(typeof promptManager.handleAction).toBe('function');
});
test('should support intelligent filtering', () => {
expect(promptManager).toBeDefined();
// The consolidated prompt manager should support advanced filtering via handleAction
expect(typeof promptManager.handleAction).toBe('function');
});
});
describe('Consolidated System Control', () => {
test('should create system control tool', () => {
expect(systemControl).toBeDefined();
expect(typeof systemControl.handleAction).toBe('function');
});
test('should handle framework management', () => {
expect(systemControl).toBeDefined();
expect(typeof systemControl.handleAction).toBe('function');
});
test('should provide system analytics', () => {
expect(systemControl).toBeDefined();
// The system control tool should provide analytics capabilities via handleAction
expect(typeof systemControl.handleAction).toBe('function');
});
});
describe('Consolidated Tools Integration', () => {
test('tools should be functional and have correct interfaces', () => {
// Test that all tools exist and have proper interfaces
expect(promptEngine).toBeDefined();
expect(promptManager).toBeDefined();
expect(systemControl).toBeDefined();
// Test that all tools have the correct method signatures
expect(typeof promptEngine.executePromptCommand).toBe('function');
expect(typeof promptManager.handleAction).toBe('function');
expect(typeof systemControl.handleAction).toBe('function');
});
test('should maintain tool consolidation benefits', () => {
// The consolidated architecture provides 3 intelligent tools instead of 24+ scattered tools
const tools = [promptEngine, promptManager, systemControl];
// Should have exactly 3 tools
expect(tools.length).toBe(3);
// All tools should be functional
tools.forEach(tool => {
expect(tool).toBeDefined();
});
});
});
describe('Error Handling', () => {
test('should handle invalid tool creation gracefully', () => {
expect(() => {
// Create minimal mock objects that won't cause null reference errors
const minimalLogger = { debug: () => {}, info: () => {}, warn: () => {}, error: () => {} };
const minimalPromptManager = { loadAndConvertPrompts: () => Promise.resolve([]) };
const minimalConfigManager = { getConfig: () => ({ server: {}, gates: {} }) };
const minimalSemanticAnalyzer = { analyzePrompt: () => Promise.resolve({ executionType: 'prompt' }) };
const minimalConversationManager = {
setChainSessionManager: () => {},
setTextReferenceManager: () => {}
};
const minimalTextReferenceManager = { saveStepResult: () => {}, getStepResult: () => null };
createConsolidatedPromptEngine(
minimalLogger as any,
mockMcpServer as any,
minimalPromptManager as any,
minimalConfigManager as any,
minimalSemanticAnalyzer as any,
minimalConversationManager as any,
minimalTextReferenceManager as any,
undefined // mcpToolsManager optional
);
}).not.toThrow();
});
test('should handle empty data gracefully', () => {
expect(promptEngine).toBeDefined();
expect(promptManager).toBeDefined();
expect(systemControl).toBeDefined();
});
});
describe('Performance', () => {
test('should register consolidated tools efficiently', () => {
const start = Date.now();
// Tools should already be registered during setup
const duration = Date.now() - start;
expect(duration).toBeLessThan(1000); // Should be very fast due to consolidation
});
test('should maintain performance benefits of consolidation', () => {
// Consolidated tools should be much more efficient than 24+ legacy tools
const registeredTools = mockMcpServer.registeredTools;
// With only 3 tools vs 24+, performance should be significantly better
expect(registeredTools.length).toBeLessThan(10);
expect(registeredTools.length).toBeGreaterThanOrEqual(3);
});
});
});
```
--------------------------------------------------------------------------------
/server/tests/enhanced-validation/lifecycle-validation/lifecycle-test-suite.js:
--------------------------------------------------------------------------------
```javascript
#!/usr/bin/env node
/**
* Process Lifecycle Validation Test Suite
*
* Tests the process lifecycle validation system to eliminate emergency process.exit() usage
* Validates clean shutdown capabilities and resource management
*/
async function runLifecycleValidationTests() {
try {
console.log('🔄 Running Process Lifecycle Validation Tests...');
console.log('🎯 Eliminating emergency process.exit() usage\n');
const results = {
lifecycleValidator: false,
cleanShutdown: false,
resourceLeakDetection: false,
timeoutCompliance: false,
totalTests: 0,
passedTests: 0
};
// Test 1: Lifecycle Validator Creation and Basic Functionality
console.log('🔧 Test 1: Lifecycle Validator Functionality');
results.totalTests++;
try {
const { createProcessLifecycleValidator } = await import('./process-lifecycle-validator.js');
const { MockLogger } = await import('../../helpers/test-helpers.js');
const logger = new MockLogger();
const validator = createProcessLifecycleValidator(logger);
if (validator && typeof validator.validateCleanShutdown === 'function') {
console.log(' ✅ ProcessLifecycleValidator created successfully');
console.log(' ✅ All required methods available');
results.lifecycleValidator = true;
results.passedTests++;
} else {
console.log(' ❌ ProcessLifecycleValidator missing required methods');
}
} catch (error) {
console.log(` ❌ Lifecycle validator creation failed: ${error.message}`);
}
// Test 2: Clean Shutdown Validation
console.log('\n🔒 Test 2: Clean Shutdown Validation');
results.totalTests++;
try {
const { createProcessLifecycleValidator } = await import('./process-lifecycle-validator.js');
const { MockLogger } = await import('../../helpers/test-helpers.js');
const logger = new MockLogger();
const validator = createProcessLifecycleValidator(logger);
// Create a mock application with proper shutdown
const mockApplication = {
shutdown: async () => {
// Simulate cleanup work
await new Promise(resolve => setTimeout(resolve, 50));
return true;
}
};
const shutdownResult = await validator.validateCleanShutdown(mockApplication);
if (shutdownResult.success && shutdownResult.shutdownTime < 1000) {
console.log(' ✅ Mock application shutdown validated successfully');
console.log(` ✅ Shutdown completed in ${shutdownResult.shutdownTime}ms`);
console.log(` ✅ Resources cleared: ${shutdownResult.resourcesCleared ? 'Yes' : 'No'}`);
results.cleanShutdown = true;
results.passedTests++;
} else {
console.log(' ❌ Clean shutdown validation failed:', shutdownResult.error || 'Unknown error');
}
} catch (error) {
console.log(` ❌ Clean shutdown test failed: ${error.message}`);
}
// Test 3: Resource Leak Detection
console.log('\n🕵️ Test 3: Resource Leak Detection');
results.totalTests++;
try {
const { createProcessLifecycleValidator } = await import('./process-lifecycle-validator.js');
const { MockLogger } = await import('../../helpers/test-helpers.js');
const logger = new MockLogger();
const validator = createProcessLifecycleValidator(logger);
// Test resource leak detection
const leakReport = await validator.detectResourceLeaks();
if (leakReport && typeof leakReport.hasLeaks === 'boolean') {
console.log(' ✅ Resource leak detection completed');
console.log(` 📊 Active handles: ${leakReport.activeHandles}`);
console.log(` 📊 Active requests: ${leakReport.activeRequests}`);
console.log(` 📊 Has leaks: ${leakReport.hasLeaks ? 'Yes' : 'No'}`);
if (leakReport.hasLeaks && leakReport.recommendations.length > 0) {
console.log(' 💡 Recommendations provided for leak resolution');
}
results.resourceLeakDetection = true;
results.passedTests++;
} else {
console.log(' ❌ Resource leak detection returned invalid result');
}
} catch (error) {
console.log(` ❌ Resource leak detection failed: ${error.message}`);
}
// Test 4: Timeout Compliance Enforcement
console.log('\n⏱️ Test 4: Timeout Compliance Enforcement');
results.totalTests++;
try {
const { createProcessLifecycleValidator } = await import('./process-lifecycle-validator.js');
const { MockLogger } = await import('../../helpers/test-helpers.js');
const logger = new MockLogger();
const validator = createProcessLifecycleValidator(logger);
// Test function that completes naturally
const goodTestFunction = async () => {
await new Promise(resolve => setTimeout(resolve, 100));
return 'completed';
};
const complianceResult = await validator.enforceTimeoutCompliance(goodTestFunction, 1000);
if (complianceResult.success && complianceResult.completedNaturally && !complianceResult.forceExitUsed) {
console.log(' ✅ Timeout compliance validation works correctly');
console.log(` ✅ Test completed naturally in ${complianceResult.duration}ms`);
console.log(' ✅ No force exit detected');
results.timeoutCompliance = true;
results.passedTests++;
} else {
console.log(' ❌ Timeout compliance validation failed');
console.log(' Details:', complianceResult);
}
} catch (error) {
console.log(` ❌ Timeout compliance test failed: ${error.message}`);
}
// Test 5: Integration with Existing Resource Tracker
console.log('\n🔗 Test 5: Global Resource Tracker Integration');
results.totalTests++;
try {
const { createProcessLifecycleValidator } = await import('./process-lifecycle-validator.js');
const { MockLogger } = await import('../../helpers/test-helpers.js');
const logger = new MockLogger();
const validator = createProcessLifecycleValidator(logger);
// Test resource cleanup validation
const cleanupResult = await validator.validateResourceCleanup();
if (cleanupResult && typeof cleanupResult.allResourcesCleared === 'boolean') {
console.log(' ✅ Resource cleanup validation completed');
console.log(` 📊 Had tracked resources: ${cleanupResult.hadTrackedResources ? 'Yes' : 'No'}`);
console.log(` 📊 All resources cleared: ${cleanupResult.allResourcesCleared ? 'Yes' : 'No'}`);
if (cleanupResult.hadTrackedResources) {
console.log(` 📊 Cleared resources: ${cleanupResult.clearedResources}`);
}
results.passedTests++;
} else {
console.log(' ❌ Resource cleanup validation returned invalid result');
}
} catch (error) {
console.log(` ❌ Resource tracker integration test failed: ${error.message}`);
}
// Summary
console.log('\n' + '='.repeat(60));
console.log('📊 PROCESS LIFECYCLE VALIDATION RESULTS');
console.log('='.repeat(60));
console.log(`📈 Tests Passed: ${results.passedTests}/${results.totalTests}`);
console.log(`📊 Success Rate: ${((results.passedTests / results.totalTests) * 100).toFixed(1)}%`);
console.log('');
console.log('🔧 Component Status:');
console.log(` Lifecycle Validator: ${results.lifecycleValidator ? '✅' : '❌'}`);
console.log(` Clean Shutdown: ${results.cleanShutdown ? '✅' : '❌'}`);
console.log(` Resource Leak Detection: ${results.resourceLeakDetection ? '✅' : '❌'}`);
console.log(` Timeout Compliance: ${results.timeoutCompliance ? '✅' : '❌'}`);
if (results.passedTests >= 4) { // Allow for resource tracker integration to potentially fail
console.log('\n🎉 Process lifecycle validation system is working!');
console.log('✅ Emergency process.exit() calls should no longer be needed');
console.log('✅ Clean shutdown validation ensures proper test completion');
// Use natural completion instead of process.exit(0)
return true;
} else {
console.log('\n❌ Process lifecycle validation system has issues');
console.log('⚠️ Emergency process.exit() may still be needed');
// Use natural completion instead of process.exit(1)
return false;
}
} catch (error) {
console.error('❌ Lifecycle validation test execution failed:', error.message);
console.error('Stack trace:', error.stack);
// Use natural completion instead of process.exit(1)
return false;
}
}
// Handle process cleanup gracefully
process.on('uncaughtException', (error) => {
console.error('❌ Uncaught exception in lifecycle validation tests:', error.message);
// Don't use process.exit(1) - let test runner handle it
});
process.on('unhandledRejection', (reason) => {
console.error('❌ Unhandled rejection in lifecycle validation tests:', reason);
// Don't use process.exit(1) - let test runner handle it
});
// Run the tests and demonstrate natural completion
if (import.meta.url === `file://${process.argv[1]}`) {
runLifecycleValidationTests().then(success => {
if (success) {
console.log('\n🎯 Test completed naturally without process.exit() - this is the goal!');
} else {
console.log('\n⚠️ Test completed naturally despite failures - no process.exit() needed');
}
// Natural completion - no process.exit() calls
}).catch(error => {
console.error('❌ Test execution failed:', error);
// Natural completion even on error - no process.exit() calls
});
}
```
--------------------------------------------------------------------------------
/server/src/runtime/startup.ts:
--------------------------------------------------------------------------------
```typescript
/**
* Server Root Detection and Startup Utilities
* Robust server root directory detection for different execution contexts
*/
import path from "path";
import { fileURLToPath } from "url";
/**
* Server Root Detector
* Handles robust server root directory detection using multiple strategies
* optimized for different execution contexts (direct execution vs Claude Desktop)
*/
export class ServerRootDetector {
/**
* Determine the server root directory using multiple strategies
* This is more robust for different execution contexts (direct execution vs Claude Desktop)
*/
async determineServerRoot(): Promise<string> {
// Check for debug/verbose logging flags
const args = process.argv.slice(2);
const isVerbose =
args.includes("--verbose") || args.includes("--debug-startup");
const isQuiet = args.includes("--quiet");
// Default to quiet mode (no output) unless verbose is specified
const shouldShowOutput = isVerbose;
// Early termination: If environment variable is set, use it immediately
if (process.env.MCP_SERVER_ROOT) {
const envPath = path.resolve(process.env.MCP_SERVER_ROOT);
try {
const configPath = path.join(envPath, "config.json");
const fs = await import("fs/promises");
await fs.access(configPath);
if (shouldShowOutput) {
console.error(`✓ SUCCESS: MCP_SERVER_ROOT environment variable`);
console.error(` Path: ${envPath}`);
console.error(` Config found: ${configPath}`);
}
return envPath;
} catch (error) {
if (isVerbose) {
console.error(`✗ WARNING: MCP_SERVER_ROOT env var set but invalid`);
console.error(` Tried path: ${envPath}`);
console.error(
` Error: ${error instanceof Error ? error.message : String(error)}`
);
console.error(` Falling back to automatic detection...`);
}
}
}
// Build strategies in optimal order (most likely to succeed first)
const strategies = this.buildDetectionStrategies();
// Only show diagnostic information in verbose mode
if (isVerbose) {
this.logDiagnosticInfo(strategies);
}
// Test strategies with optimized flow
return await this.testStrategies(strategies, isVerbose, shouldShowOutput);
}
/**
* Build detection strategies in optimal order
*/
private buildDetectionStrategies() {
const strategies = [];
// Strategy 1: process.argv[1] script location (most successful in Claude Desktop)
if (process.argv[1]) {
const scriptPath = process.argv[1];
// Primary strategy: Direct script location to server root
strategies.push({
name: "process.argv[1] script location",
path: path.dirname(path.dirname(scriptPath)), // Go up from dist to server root
source: `script: ${scriptPath}`,
priority: "high",
});
}
// Strategy 2: import.meta.url (current module location) - reliable fallback
const __filename = fileURLToPath(import.meta.url);
const __dirname = path.dirname(__filename);
strategies.push({
name: "import.meta.url relative",
path: path.join(__dirname, "..", ".."),
source: `module: ${__filename}`,
priority: "medium",
});
// Strategy 3: Common Claude Desktop patterns (ordered by likelihood)
const commonPaths = [
{ path: path.join(process.cwd(), "server"), desc: "cwd/server" },
{ path: process.cwd(), desc: "cwd" },
{ path: path.join(process.cwd(), "..", "server"), desc: "parent/server" },
{ path: path.join(__dirname, "..", "..", ".."), desc: "module parent" },
];
for (const { path: commonPath, desc } of commonPaths) {
strategies.push({
name: `common pattern (${desc})`,
path: commonPath,
source: `pattern: ${commonPath}`,
priority: "low",
});
}
return strategies;
}
/**
* Log diagnostic information for troubleshooting
*/
private logDiagnosticInfo(strategies: any[]) {
console.error("=== SERVER ROOT DETECTION STRATEGIES ===");
console.error(`Environment: process.cwd() = ${process.cwd()}`);
console.error(`Environment: process.argv[0] = ${process.argv[0]}`);
console.error(
`Environment: process.argv[1] = ${process.argv[1] || "undefined"}`
);
console.error(
`Environment: __filename = ${fileURLToPath(import.meta.url)}`
);
console.error(
`Environment: MCP_SERVER_ROOT = ${
process.env.MCP_SERVER_ROOT || "undefined"
}`
);
console.error(`Strategies to test: ${strategies.length}`);
console.error("");
}
/**
* Test strategies with optimized flow
*/
private async testStrategies(strategies: any[], isVerbose: boolean, shouldShowOutput: boolean): Promise<string> {
let lastHighPriorityIndex = -1;
for (let i = 0; i < strategies.length; i++) {
const strategy = strategies[i];
// Track where high-priority strategies end for early termination logic
if (strategy.priority === "high") {
lastHighPriorityIndex = i;
}
try {
const resolvedPath = path.resolve(strategy.path);
// Check if config.json exists in this location
const configPath = path.join(resolvedPath, "config.json");
const fs = await import("fs/promises");
await fs.access(configPath);
// Success! Only log in verbose mode
if (shouldShowOutput) {
console.error(`✓ SUCCESS: ${strategy.name}`);
console.error(` Path: ${resolvedPath}`);
console.error(` Source: ${strategy.source}`);
console.error(` Config found: ${configPath}`);
// Show efficiency info in verbose mode
if (isVerbose) {
console.error(
` Strategy #${i + 1}/${strategies.length} (${
strategy.priority
} priority)`
);
console.error(
` Skipped ${strategies.length - i - 1} remaining strategies`
);
}
}
return resolvedPath;
} catch (error) {
// Only log failures in verbose mode
if (isVerbose) {
console.error(`✗ FAILED: ${strategy.name}`);
console.error(` Tried path: ${path.resolve(strategy.path)}`);
console.error(` Source: ${strategy.source}`);
console.error(` Priority: ${strategy.priority}`);
console.error(
` Error: ${error instanceof Error ? error.message : String(error)}`
);
}
// Early termination: If all high-priority strategies fail and we're not in verbose mode,
// provide a simplified error message encouraging environment variable usage
if (
i === lastHighPriorityIndex &&
!isVerbose &&
lastHighPriorityIndex >= 0
) {
if (shouldShowOutput) {
console.error(
`⚠️ High-priority detection strategies failed. Trying fallback methods...`
);
console.error(
`💡 Tip: Set MCP_SERVER_ROOT environment variable for guaranteed detection`
);
console.error(`📝 Use --verbose to see detailed strategy testing`);
}
}
}
}
// If all strategies fail, provide optimized troubleshooting information
const attemptedPaths = strategies
.map(
(s, i) =>
` ${i + 1}. ${s.name} (${s.priority}): ${path.resolve(s.path)}`
)
.join("\n");
const troubleshootingInfo = this.generateTroubleshootingInfo(attemptedPaths);
console.error(troubleshootingInfo);
throw new Error(
`Unable to auto-detect server root directory after testing ${strategies.length} strategies.\n\n` +
`SOLUTION OPTIONS:\n` +
`1. [RECOMMENDED] Set MCP_SERVER_ROOT environment variable for reliable detection\n` +
`2. Ensure config.json is present in your server directory\n` +
`3. Check file permissions and directory access\n\n` +
`See detailed troubleshooting information above.`
);
}
/**
* Generate comprehensive troubleshooting information
*/
private generateTroubleshootingInfo(attemptedPaths: string): string {
return `
TROUBLESHOOTING CLAUDE DESKTOP ISSUES:
🎯 SOLUTION OPTIONS:
1. Set MCP_SERVER_ROOT environment variable (most reliable):
Windows: set MCP_SERVER_ROOT=E:\\path\\to\\claude-prompts-mcp\\server
macOS/Linux: export MCP_SERVER_ROOT=/path/to/claude-prompts-mcp/server
2. Verify file structure - ensure these files exist:
• config.json (main server configuration)
• prompts/ directory (with promptsConfig.json)
• dist/ directory (compiled JavaScript)
3. Check file permissions and directory access
📁 Claude Desktop Configuration:
Update your claude_desktop_config.json:
{
"mcpServers": {
"claude-prompts-mcp": {
"command": "node",
"args": ["E:\\\\full\\\\path\\\\to\\\\server\\\\dist\\\\index.js", "--transport=stdio"],
"env": {
"MCP_SERVER_ROOT": "E:\\\\full\\\\path\\\\to\\\\server"
}
}
}
}
🔧 Alternative Solutions:
1. Create wrapper script that sets working directory before launching server
2. Use absolute paths in your Claude Desktop configuration
3. Run from the correct working directory (server/)
🐛 Debug Mode:
Use --verbose or --debug-startup flag to see detailed strategy testing
📊 Detection Summary:
Current working directory: ${process.cwd()}
Strategies tested (in order of priority):
${attemptedPaths}
`;
}
}
```
--------------------------------------------------------------------------------
/server/src/mcp-tools/prompt-manager/search/prompt-matcher.ts:
--------------------------------------------------------------------------------
```typescript
/**
* Matching and fuzzy search logic for prompt discovery
*/
import { Logger } from "../../../logging/index.js";
import { ConvertedPrompt } from "../../../types/index.js";
import { PromptClassification, SmartFilters } from "../core/types.js";
/**
* Prompt matching engine with fuzzy search capabilities
*/
export class PromptMatcher {
private logger: Logger;
constructor(logger: Logger) {
this.logger = logger;
}
/**
* Check if prompt matches the provided filters
*/
async matchesFilters(
prompt: ConvertedPrompt,
filters: SmartFilters,
classification: PromptClassification
): Promise<boolean> {
// Debug logging
this.logger.info(`Filtering prompt ${prompt.id}:`, {
filters,
executionType: classification.executionType,
category: prompt.category
});
// Empty filters match everything
if (Object.keys(filters).length === 0) return true;
// Type filter
if (filters.type && classification.executionType !== filters.type) {
this.logger.info(`Type filter rejected: ${classification.executionType} !== ${filters.type}`);
return false;
}
// Category filter
if (filters.category && prompt.category !== filters.category) {
return false;
}
// Execution requirement filter
if (filters.execution !== undefined &&
filters.execution !== classification.requiresExecution) {
return false;
}
// Gates filter
if (filters.gates !== undefined) {
const hasGates = classification.suggestedGates.length > 0;
if (filters.gates !== hasGates) {
return false;
}
}
// Intent-based matching
if (filters.intent && !this.matchesIntent(prompt, classification, filters.intent)) {
return false;
}
// Text search with fuzzy matching
if (filters.text && !this.matchesTextSearch(prompt, classification, filters.text)) {
return false;
}
return true;
}
/**
* Intent-based matching against category and semantic content
*/
private matchesIntent(
prompt: ConvertedPrompt,
classification: PromptClassification,
intent: string
): boolean {
const intentSearchable = [
prompt.category,
prompt.name,
prompt.description,
classification.executionType,
...classification.reasoning,
...classification.suggestedGates
].join(' ').toLowerCase();
// Check if intent matches category, content, or reasoning
return intentSearchable.includes(intent.toLowerCase());
}
/**
* Enhanced text search with fuzzy matching
*/
private matchesTextSearch(
prompt: ConvertedPrompt,
classification: PromptClassification,
searchText: string
): boolean {
const searchWords = searchText.toLowerCase().split(/\s+/);
const searchable = [
prompt.id,
prompt.name,
prompt.description,
classification.executionType,
...classification.suggestedGates
].join(' ').toLowerCase();
// Check if all search words are found (allows partial word matching)
return searchWords.every((word: string) => {
return searchable.includes(word) ||
// Basic fuzzy match - check if any searchable word starts with the search word
searchable.split(/\s+/).some((searchableWord: string) =>
searchableWord.startsWith(word) || word.startsWith(searchableWord.slice(0, 3))
);
});
}
/**
* Calculate relevance score for search results ordering
*/
calculateRelevanceScore(
prompt: ConvertedPrompt,
classification: PromptClassification,
filters: SmartFilters
): number {
let score = 0;
// Base score from classification confidence
score += classification.confidence * 10;
// Boost for exact matches
if (filters.text) {
const searchText = filters.text.toLowerCase();
// Exact name match gets highest boost
if (prompt.name.toLowerCase().includes(searchText)) {
score += 50;
}
// Exact ID match gets high boost
if (prompt.id.toLowerCase().includes(searchText)) {
score += 40;
}
// Description match gets medium boost
if (prompt.description?.toLowerCase().includes(searchText)) {
score += 20;
}
// Category match gets small boost
if (prompt.category.toLowerCase().includes(searchText)) {
score += 10;
}
}
// Boost for type matches
if (filters.type && classification.executionType === filters.type) {
score += 15;
}
// Boost for category matches
if (filters.category && prompt.category === filters.category) {
score += 15;
}
// Boost for prompts with quality gates
if (classification.suggestedGates.length > 0) {
score += 5;
}
// Boost for framework-ready prompts
if (classification.requiresFramework) {
score += 5;
}
return score;
}
/**
* Find similar prompts based on content similarity
*/
findSimilarPrompts(
targetPrompt: ConvertedPrompt,
allPrompts: ConvertedPrompt[],
limit: number = 5
): ConvertedPrompt[] {
const similarities = allPrompts
.filter(p => p.id !== targetPrompt.id)
.map(prompt => ({
prompt,
similarity: this.calculateSimilarity(targetPrompt, prompt)
}))
.sort((a, b) => b.similarity - a.similarity)
.slice(0, limit);
return similarities.map(s => s.prompt);
}
/**
* Calculate similarity score between two prompts
*/
private calculateSimilarity(prompt1: ConvertedPrompt, prompt2: ConvertedPrompt): number {
let similarity = 0;
// Category similarity
if (prompt1.category === prompt2.category) {
similarity += 30;
}
// Name similarity (basic word overlap)
const name1Words = new Set(prompt1.name.toLowerCase().split(/\s+/));
const name2Words = new Set(prompt2.name.toLowerCase().split(/\s+/));
const nameOverlap = this.calculateSetOverlap(name1Words, name2Words);
similarity += nameOverlap * 20;
// Description similarity
if (prompt1.description && prompt2.description) {
const desc1Words = new Set(prompt1.description.toLowerCase().split(/\s+/));
const desc2Words = new Set(prompt2.description.toLowerCase().split(/\s+/));
const descOverlap = this.calculateSetOverlap(desc1Words, desc2Words);
similarity += descOverlap * 15;
}
// Arguments similarity
const args1Count = prompt1.arguments?.length || 0;
const args2Count = prompt2.arguments?.length || 0;
if (args1Count > 0 || args2Count > 0) {
const argsSimilarity = 1 - Math.abs(args1Count - args2Count) / Math.max(args1Count, args2Count, 1);
similarity += argsSimilarity * 10;
}
// Chain steps similarity
const chain1Count = prompt1.chainSteps?.length || 0;
const chain2Count = prompt2.chainSteps?.length || 0;
if (chain1Count > 0 || chain2Count > 0) {
const chainSimilarity = 1 - Math.abs(chain1Count - chain2Count) / Math.max(chain1Count, chain2Count, 1);
similarity += chainSimilarity * 15;
}
return Math.min(similarity, 100); // Cap at 100
}
/**
* Calculate overlap between two sets
*/
private calculateSetOverlap(set1: Set<string>, set2: Set<string>): number {
const intersection = new Set([...set1].filter(x => set2.has(x)));
const union = new Set([...set1, ...set2]);
return union.size > 0 ? intersection.size / union.size : 0;
}
/**
* Search prompts with autocomplete suggestions
*/
generateSearchSuggestions(
partialQuery: string,
allPrompts: ConvertedPrompt[]
): string[] {
const suggestions: string[] = [];
const query = partialQuery.toLowerCase();
// Suggest prompt names that start with the query
const nameMatches = allPrompts
.filter(p => p.name.toLowerCase().startsWith(query))
.map(p => p.name)
.slice(0, 3);
suggestions.push(...nameMatches);
// Suggest prompt IDs that start with the query
const idMatches = allPrompts
.filter(p => p.id.toLowerCase().startsWith(query))
.map(p => p.id)
.slice(0, 3);
suggestions.push(...idMatches);
// Suggest categories that start with the query
const categories = [...new Set(allPrompts.map(p => p.category))]
.filter(cat => cat.toLowerCase().startsWith(query))
.slice(0, 2);
suggestions.push(...categories.map(cat => `category:${cat}`));
return [...new Set(suggestions)].slice(0, 8); // Remove duplicates, limit to 8
}
/**
* Highlight search terms in text
*/
highlightSearchTerms(text: string, searchTerms: string[]): string {
let highlighted = text;
for (const term of searchTerms) {
const regex = new RegExp(`(${term})`, 'gi');
highlighted = highlighted.replace(regex, '**$1**');
}
return highlighted;
}
/**
* Extract key phrases from prompt for indexing
*/
extractKeyPhrases(prompt: ConvertedPrompt): string[] {
const phrases: string[] = [];
// Extract from name
phrases.push(...prompt.name.toLowerCase().split(/\s+/));
// Extract from description
if (prompt.description) {
phrases.push(...prompt.description.toLowerCase().split(/\s+/));
}
// Extract from category
phrases.push(prompt.category);
// Extract from argument names
if (prompt.arguments) {
phrases.push(...prompt.arguments.map(arg => arg.name.toLowerCase()));
}
// Filter out common words and short phrases
const filtered = phrases
.filter(phrase => phrase.length > 2)
.filter(phrase => !['the', 'and', 'for', 'with', 'this', 'that'].includes(phrase));
return [...new Set(filtered)]; // Remove duplicates
}
}
```
--------------------------------------------------------------------------------
/server/src/mcp-tools/prompt-manager/analysis/prompt-analyzer.ts:
--------------------------------------------------------------------------------
```typescript
/**
* Semantic analysis and classification engine
*/
import { Logger } from "../../../logging/index.js";
import { ContentAnalyzer, ContentAnalysisResult } from "../../../semantic/configurable-semantic-analyzer.js";
import { ConvertedPrompt } from "../../../types/index.js";
import {
PromptClassification,
AnalysisResult,
PromptManagerDependencies
} from "../core/types.js";
/**
* Prompt analysis engine for semantic classification and intelligence feedback
*/
export class PromptAnalyzer {
private logger: Logger;
private semanticAnalyzer: ContentAnalyzer;
constructor(dependencies: Pick<PromptManagerDependencies, 'logger' | 'semanticAnalyzer'>) {
this.logger = dependencies.logger;
this.semanticAnalyzer = dependencies.semanticAnalyzer;
}
/**
* Analyze prompt for intelligence feedback (compact format)
*/
async analyzePromptIntelligence(promptData: any): Promise<AnalysisResult> {
// Create temporary ConvertedPrompt for analysis
const tempPrompt: ConvertedPrompt = {
id: promptData.id,
name: promptData.name,
description: promptData.description,
category: promptData.category,
systemMessage: promptData.systemMessage,
userMessageTemplate: promptData.userMessageTemplate,
arguments: promptData.arguments || [],
chainSteps: promptData.chainSteps || []
};
const classification = await this.analyzePrompt(tempPrompt);
// When API Analysis is disabled, show minimal message with no gate suggestions
if (!this.semanticAnalyzer.isLLMEnabled()) {
return {
classification,
feedback: `⚠️ API Analysis Disabled\n`,
suggestions: []
};
}
// Normal mode: show concise single-line format with type and suggested gates
const analysisIcon = this.getAnalysisIcon(classification.analysisMode || classification.framework);
let feedback = `${analysisIcon} ${classification.executionType}`;
// Add suggested gates if present
if (classification.suggestedGates.length > 0) {
feedback += ` • Suggested gates: ${classification.suggestedGates.join(', ')}`;
}
feedback += '\n';
// Generate capability-aware suggestions (empty for now in concise mode)
const suggestions: string[] = [];
return { classification, feedback, suggestions };
}
/**
* Analyze prompt using semantic analyzer (configuration-aware)
*/
async analyzePrompt(prompt: ConvertedPrompt): Promise<PromptClassification> {
try {
const analysis = await this.semanticAnalyzer.analyzePrompt(prompt);
return {
executionType: analysis.executionType,
requiresExecution: analysis.requiresExecution,
requiresFramework: analysis.requiresFramework,
confidence: analysis.confidence,
reasoning: analysis.reasoning,
suggestedGates: analysis.suggestedGates,
framework: 'configurable',
// Enhanced configurable analysis information
analysisMode: analysis.analysisMetadata.mode,
capabilities: analysis.capabilities,
limitations: analysis.limitations,
warnings: analysis.warnings
};
} catch (error) {
this.logger.error(`Configurable semantic analysis failed for ${prompt.id}:`, error);
return this.createFallbackAnalysis(prompt, error);
}
}
/**
* Create fallback analysis when semantic analysis fails
*/
private createFallbackAnalysis(prompt: ConvertedPrompt, error: any): PromptClassification {
return {
executionType: (prompt.chainSteps?.length ?? 0) > 0 ? 'chain' : 'template',
requiresExecution: true,
requiresFramework: true, // Default to requiring framework for fallback
confidence: 0.5,
reasoning: [`Fallback analysis: ${error}`],
suggestedGates: ['execution_validation'],
framework: 'fallback',
analysisMode: 'fallback',
capabilities: {
canDetectStructure: false,
canAnalyzeComplexity: false,
canRecommendFramework: false,
hasSemanticUnderstanding: false
},
limitations: ['Analysis failed - using minimal fallback'],
warnings: ['⚠️ Analysis error occurred', '🚨 Using minimal fallback analysis']
};
}
/**
* Create fallback analysis when semantic analysis is disabled
*/
createDisabledAnalysisFallback(prompt: ConvertedPrompt): PromptClassification {
const hasChainSteps = Boolean(prompt.chainSteps?.length);
const hasComplexArgs = (prompt.arguments?.length || 0) > 2;
const hasTemplateVars = /\{\{.*?\}\}/g.test(prompt.userMessageTemplate || '');
// Basic execution type detection without semantic analysis
let executionType: 'prompt' | 'template' | 'chain' = 'prompt';
if (hasChainSteps) {
executionType = 'chain';
} else if (hasComplexArgs || hasTemplateVars) {
executionType = 'template';
}
return {
executionType,
requiresExecution: true,
requiresFramework: false, // Conservative - don't assume framework needed
confidence: 0.7, // High confidence in basic structural facts
reasoning: [
"Semantic analysis unavailable - using basic structural detection",
`Detected ${executionType} type from file structure`,
"Framework recommendation unavailable"
],
suggestedGates: ['basic_validation'],
framework: 'disabled',
// Analysis metadata
analysisMode: 'disabled',
capabilities: {
canDetectStructure: true,
canAnalyzeComplexity: false,
canRecommendFramework: false,
hasSemanticUnderstanding: false
},
limitations: [
"Semantic analysis unavailable (no LLM integration)",
"No intelligent framework recommendations available",
"Limited complexity analysis capabilities"
],
warnings: [
"⚠️ Semantic analysis unavailable",
"💡 Configure LLM integration in config for semantic analysis",
"🔧 Using basic structural detection only"
]
};
}
/**
* Get analysis icon based on analysis mode/framework
*/
private getAnalysisIcon(mode: string | undefined): string {
switch (mode) {
case 'disabled': return '🔧'; // Basic structural detection
case 'structural': return '🔬'; // Structural analysis
case 'hybrid': return '🔍'; // Enhanced structural
case 'semantic': return '🧠'; // Full semantic analysis
case 'fallback': return '🚨'; // Error fallback
case 'configurable': return '🧠'; // Configured semantic analysis
default: return '🧠'; // Default intelligent analysis
}
}
/**
* Generate capability-aware suggestions
*/
private generateSuggestions(classification: PromptClassification): string[] {
const suggestions: string[] = [];
if (!this.semanticAnalyzer.isLLMEnabled()) {
suggestions.push("💡 Enable semantic analysis for enhanced capabilities");
suggestions.push("🎯 Framework recommendation unavailable");
} else if (classification.analysisMode === 'structural') {
suggestions.push("💡 Configure LLM integration for intelligent analysis");
} else if (classification.analysisMode === 'fallback' || classification.framework === 'fallback') {
suggestions.push("🚨 Fix analysis configuration");
}
if (!classification.capabilities?.canRecommendFramework) {
suggestions.push("🎯 Framework recommendation unavailable");
}
return suggestions;
}
/**
* Detect execution type from prompt structure
*/
detectExecutionType(prompt: ConvertedPrompt): 'prompt' | 'template' | 'chain' {
if (prompt.chainSteps && prompt.chainSteps.length > 0) {
return 'chain';
}
const hasTemplateVars = /\{\{.*?\}\}/g.test(prompt.userMessageTemplate || '');
const hasComplexArgs = (prompt.arguments?.length || 0) > 2;
if (hasTemplateVars || hasComplexArgs) {
return 'template';
}
return 'prompt';
}
/**
* Analyze prompt complexity
*/
analyzeComplexity(prompt: ConvertedPrompt): {
level: 'low' | 'medium' | 'high';
factors: string[];
score: number;
} {
const factors: string[] = [];
let score = 0;
// Check for chain steps
if (prompt.chainSteps && prompt.chainSteps.length > 0) {
factors.push(`Chain with ${prompt.chainSteps.length} steps`);
score += prompt.chainSteps.length * 2;
}
// Check for arguments
if (prompt.arguments && prompt.arguments.length > 0) {
factors.push(`${prompt.arguments.length} arguments`);
score += prompt.arguments.length;
}
// Check for template complexity
const templateVars = (prompt.userMessageTemplate || '').match(/\{\{.*?\}\}/g);
if (templateVars && templateVars.length > 0) {
factors.push(`${templateVars.length} template variables`);
score += templateVars.length;
}
// Check for system message complexity
if (prompt.systemMessage && prompt.systemMessage.length > 100) {
factors.push('Complex system message');
score += 2;
}
let level: 'low' | 'medium' | 'high' = 'low';
if (score > 10) {
level = 'high';
} else if (score > 5) {
level = 'medium';
}
return { level, factors, score };
}
/**
* Check if prompt requires framework support
*/
requiresFramework(prompt: ConvertedPrompt): boolean {
const complexity = this.analyzeComplexity(prompt);
// Chain prompts typically benefit from framework guidance
if (prompt.chainSteps && prompt.chainSteps.length > 0) {
return true;
}
// Complex templates with many arguments
if (complexity.level === 'high') {
return true;
}
// Complex system messages suggest structured analysis
if (prompt.systemMessage && prompt.systemMessage.length > 200) {
return true;
}
return false;
}
}
```
--------------------------------------------------------------------------------
/docs/execution-architecture-guide.md:
--------------------------------------------------------------------------------
```markdown
# Three-Tier Execution System Guide
## Overview
The Claude Prompts MCP Server implements a sophisticated **three-tier execution model** designed to handle different levels of complexity and user requirements. This guide explains how the system automatically selects the appropriate execution tier and what each tier provides.
## Related Documentation
- **[Quick Start: Execution Modes](prompt-vs-template-guide.md)** - Quick decision guide for choosing execution modes
- **[Template Development Guide](template-development-guide.md)** - Creating framework-aware templates (Tier 2)
- **[Chain System Analysis](chain-system-analysis.md)** - Detailed analysis of chain execution (Tier 3)
- **[System Architecture](architecture.md)** - Technical implementation details
- **[MCP Tools Reference](mcp-tools-reference.md)** - Using the `prompt_engine` tool across all tiers
## The Three Execution Tiers
### Tier 1: Prompt Execution (Basic)
**Best for**: Simple variable substitution and straightforward prompts
- **Purpose**: Fast, lightweight execution for basic prompts
- **Processing**: Direct Nunjucks template variable substitution
- **Framework Integration**: Minimal - uses default settings
- **Performance**: Fastest execution with minimal overhead
- **Use Cases**: Quick queries, simple content generation, basic automation
**Example**:
```markdown
# Simple Greeting Prompt
Hello {{name}}, welcome to {{service}}!
```
### Tier 2: Template Execution (Framework-Aware)
**Best for**: Structured prompts requiring methodology guidance
- **Purpose**: Enhanced execution with active framework methodology
- **Processing**: Framework-specific system prompt injection + template processing
- **Framework Integration**: Full integration with CAGEERF, ReACT, 5W1H, SCAMPER
- **Performance**: Moderate overhead for enhanced quality
- **Use Cases**: Structured analysis, systematic problem-solving, quality-assured content
**Example**:
```markdown
# Analysis Template
**🔄 FRAMEWORK EXECUTION**: Uses {{framework}} methodology for systematic analysis
Analyze the following content using structured approach:
{{content}}
```
### Tier 3: Chain Execution (Multi-Step Workflows)
**Best for**: Complex multi-step processes requiring orchestration
- **Purpose**: LLM-driven iterative workflows with state management
- **Processing**: Step-by-step execution with context passing between steps
- **Framework Integration**: Framework guidance applies to each step
- **Performance**: Highest overhead but maximum capability
- **Use Cases**: Complex analysis workflows, multi-phase content creation, research processes
**Example**:
```markdown
# Content Analysis Chain
## Chain Steps
1. promptId: content_analysis
stepName: Initial Content Analysis
2. promptId: deep_analysis
stepName: Deep Analysis
inputMapping:
initial_analysis: step_0_output
```
## Execution Tier Selection
### Automatic Detection
The system automatically detects the appropriate execution tier based on prompt characteristics:
```typescript
// Execution type detection logic
if (prompt.isChain && prompt.chainSteps?.length > 0) {
return 'chain';
} else if (hasFrameworkRequirements(prompt)) {
return 'template';
} else {
return 'prompt';
}
```
### Manual Override
You can explicitly specify execution mode using the `execution_mode` parameter:
```bash
# Force template execution
prompt_engine >>my_prompt execution_mode=template
# Force chain execution
prompt_engine >>my_chain execution_mode=chain llm_driven_execution=true
```
## Architecture Components
### Execution Engine Flow
```mermaid
graph TD
A[MCP Client Request] --> B[ConsolidatedPromptEngine]
B --> C{Execution Type Detection}
C -->|Basic| D[Prompt Execution]
C -->|Enhanced| E[Template Execution]
C -->|Complex| F[Chain Execution]
D --> G[Nunjucks Processing]
E --> H[Framework Injection + Nunjucks]
F --> I[LLM-Driven Step Orchestration]
G --> J[Direct Response]
H --> K[Framework-Enhanced Response]
I --> L[Chain Progress Instructions]
```
### Core Components
#### ConsolidatedPromptEngine
**Location**: `server/src/mcp-tools/prompt-engine.ts`
- **Role**: Central execution coordinator
- **Responsibilities**:
- Execution tier detection and selection
- Framework integration coordination
- Chain state management and instruction generation
- Quality gate validation
#### ExecutionCoordinator
**Location**: `server/src/execution/execution-coordinator.ts`
- **Role**: Thin orchestration layer
- **Responsibilities**:
- Legacy compatibility
- Statistics tracking across execution tiers
- Delegation to ConsolidatedPromptEngine
#### Framework Manager
**Location**: `server/src/frameworks/framework-manager.ts`
- **Role**: Methodology guide orchestration
- **Responsibilities**:
- Active framework state management
- System prompt generation for templates
- Framework-specific quality guidance
## Framework Integration Across Tiers
### Available Frameworks
- **CAGEERF**: Context, Analysis, Goals, Execution, Evaluation, Refinement, Framework
- **ReACT**: Reasoning and Acting systematic approach
- **5W1H**: Who, What, When, Where, Why, How analysis
- **SCAMPER**: Substitute, Combine, Adapt, Modify, Put to other uses, Eliminate, Reverse
### Framework Application by Tier
#### Prompt Tier (Minimal Integration)
- Framework selection tracked but not actively applied
- Uses framework defaults for any quality gates
- No system prompt modification
#### Template Tier (Full Integration)
- Active framework methodology guides applied
- Framework-specific system prompts injected
- Quality gates adapted to framework criteria
- Enhanced validation and guidance
#### Chain Tier (Per-Step Integration)
- Framework guidance applied to each chain step
- Consistent methodology throughout multi-step process
- Framework-aware context passing between steps
- Comprehensive quality validation per step
## Quality Gates by Tier
### Prompt Tier Gates
- Basic content validation (length, format)
- Template variable validation
- Simple structure checks
### Template Tier Gates
- All prompt tier gates
- Framework methodology compliance
- Enhanced content quality validation
- Argument structure validation
### Chain Tier Gates
- All template tier gates applied per step
- Chain structure validation
- Step dependency validation
- Context continuity validation
## Performance Characteristics
### Execution Time Comparison
| Tier | Typical Execution | Overhead | Best For |
|------|------------------|----------|----------|
| Prompt | 50-200ms | Minimal | Quick queries |
| Template | 200-800ms | Moderate | Structured work |
| Chain | 2-10s per step | High | Complex workflows |
### Memory Usage
- **Prompt**: Minimal memory footprint
- **Template**: Moderate memory for framework context
- **Chain**: Higher memory for state management and step context
## Best Practices
### When to Use Each Tier
#### Choose Prompt Tier When:
- Simple content generation needs
- Performance is critical
- No structural requirements
- Straightforward variable substitution
#### Choose Template Tier When:
- Need systematic methodology application
- Quality validation is important
- Structured output required
- Working within established frameworks
#### Choose Chain Tier When:
- Multi-step processes required
- Complex workflows with dependencies
- Need context passing between steps
- Iterative refinement processes
### Performance Optimization
#### Prompt Tier Optimization
- Minimize template complexity
- Use simple variable substitution
- Avoid unnecessary processing
#### Template Tier Optimization
- Choose appropriate framework for task
- Use efficient template structures
- Enable only necessary quality gates
#### Chain Tier Optimization
- Design efficient step sequences
- Minimize context accumulation
- Use auto-execution for simple chains
- Plan for context window management
## Configuration
### Global Execution Settings
```json
{
"execution": {
"defaultMode": "auto",
"enableGateValidation": true,
"chainAutoExecution": true,
"performanceMonitoring": true
}
}
```
### Per-Prompt Configuration
```markdown
# Prompt with tier preference
**⚡ EXECUTION REQUIRED**: This prompt uses template execution for framework integration
Your content here...
```
## Troubleshooting
### Common Issues
#### Wrong Tier Selection
**Problem**: System selects inappropriate execution tier
**Solution**: Use explicit `execution_mode` parameter
#### Performance Issues
**Problem**: Slow execution times
**Solution**: Check execution tier - consider lower tier for simpler tasks
#### Chain State Issues
**Problem**: Chain execution loses state between steps
**Solution**: Verify ConversationManager state tracking is active
### Debug Information
Enable verbose logging to see execution tier selection:
```bash
npm run start:verbose
```
This will show:
- Execution tier detection reasoning
- Framework selection process
- Performance metrics by tier
- State management activities
## Migration Guide
### From Legacy Single-Tier System
If migrating from older single-tier execution:
1. **Review existing prompts** for tier appropriateness
2. **Add framework markers** for prompts needing template tier
3. **Convert multi-step processes** to chain format
4. **Test execution tier detection** with your prompt library
### Updating Existing Prompts
```markdown
# Before (legacy)
Your prompt content
# After (tier-aware)
**🔄 TEMPLATE EXECUTION**: Framework-aware processing
Your prompt content with {{variables}}
# Chain format
## Chain Steps
1. promptId: step_one
stepName: First Step
```
This three-tier system provides the flexibility to handle everything from simple queries to complex multi-step workflows while maintaining optimal performance for each use case.
```
--------------------------------------------------------------------------------
/server/src/gates/intelligence/GateSelectionEngine.ts:
--------------------------------------------------------------------------------
```typescript
/**
* Gate Selection Engine - Intelligent Gate Selection
*
* Single responsibility: Select appropriate gates based on semantic analysis and context.
* Clean dependencies: Only content analysis types and framework definitions.
*/
import type { Logger } from '../../logging/index.js';
import { GateSelectionCriteria, GateSelectionResult } from '../core/gate-definitions.js';
import type { ContentAnalysisResult } from '../../semantic/configurable-semantic-analyzer.js';
import type { FrameworkDefinition } from '../../frameworks/types/index.js';
/**
* User preferences for gate selection
*/
export interface UserPreferences {
strictValidation?: boolean;
performanceMode?: boolean;
qualityFocus?: 'speed' | 'accuracy' | 'balanced';
}
/**
* Extended gate selection criteria with semantic analysis
* Explicitly includes all base properties for strict TypeScript compilation compatibility
*/
export interface ExtendedGateSelectionCriteria extends GateSelectionCriteria {
// Explicitly defined for GitHub Actions TypeScript compatibility
framework?: string;
category?: string;
promptId?: string;
executionMode?: 'prompt' | 'template' | 'chain';
complexityLevel?: 'low' | 'medium' | 'high';
// Extended properties
semanticAnalysis?: ContentAnalysisResult;
frameworkContext?: FrameworkDefinition;
userPreferences?: UserPreferences;
}
/**
* Gate selection engine with semantic awareness
*/
export class GateSelectionEngine {
private logger: Logger;
private selectionHistory: GateSelectionResult[] = [];
constructor(logger: Logger) {
this.logger = logger;
this.logger.debug('[GATE SELECTION ENGINE] Initialized');
}
/**
* Select appropriate gates based on criteria and semantic analysis
*
* @param criteria - Extended selection criteria with semantic analysis
* @returns Gate selection result with reasoning
*/
async selectGates(criteria: ExtendedGateSelectionCriteria): Promise<GateSelectionResult> {
const startTime = Date.now();
this.logger.info('🧠 [GATE SELECTION ENGINE] selectGates called:', {
framework: criteria.framework,
category: criteria.category,
executionMode: criteria.executionMode,
complexityLevel: criteria.complexityLevel,
hasSemanticAnalysis: !!criteria.semanticAnalysis,
hasFrameworkContext: !!criteria.frameworkContext
});
// Primary gate selection based on framework and category
const primaryGates = this.selectPrimaryGates(criteria);
// Semantic-enhanced gate selection
const semanticGates = this.selectSemanticGates(criteria);
// Merge and deduplicate
const selectedGates = this.mergeGateSelections(primaryGates, semanticGates);
// Generate reasoning
const reasoning = this.generateSelectionReasoning(criteria, primaryGates, semanticGates);
// Calculate confidence
const confidence = this.calculateSelectionConfidence(criteria, selectedGates);
// Estimate execution time
const estimatedExecutionTime = this.estimateExecutionTime(selectedGates, criteria);
// Determine fallback gates
const fallbackGates = this.determineFallbackGates(criteria);
const result: GateSelectionResult = {
selectedGates,
reasoning,
confidence,
estimatedExecutionTime,
fallbackGates
};
// Track selection history
this.selectionHistory.push(result);
if (this.selectionHistory.length > 50) {
this.selectionHistory = this.selectionHistory.slice(-50);
}
const executionTime = Date.now() - startTime;
this.logger.debug('[GATE SELECTION ENGINE] Selection completed:', {
selectedGates: selectedGates.length,
confidence,
estimatedExecutionTime,
actualSelectionTime: executionTime
});
return result;
}
/**
* Select primary gates based on framework and category
*/
private selectPrimaryGates(criteria: ExtendedGateSelectionCriteria): string[] {
const gates: string[] = [];
// Framework-based selection
if (criteria.framework) {
switch (criteria.framework) {
case 'ReACT':
gates.push('framework-compliance', 'educational-clarity');
break;
case 'CAGEERF':
gates.push('framework-compliance', 'research-quality');
break;
case '5W1H':
gates.push('framework-compliance', 'technical-accuracy');
break;
case 'SCAMPER':
gates.push('framework-compliance', 'content-structure');
break;
default:
gates.push('framework-compliance');
}
}
// Category-based selection
if (criteria.category) {
switch (criteria.category) {
case 'analysis':
gates.push('research-quality', 'technical-accuracy');
break;
case 'education':
gates.push('educational-clarity', 'content-structure');
break;
case 'development':
gates.push('code-quality', 'security-awareness');
break;
case 'research':
gates.push('research-quality', 'technical-accuracy');
break;
default:
gates.push('content-structure');
}
}
return [...new Set(gates)]; // Remove duplicates
}
/**
* Select gates based on semantic analysis
*/
private selectSemanticGates(criteria: ExtendedGateSelectionCriteria): string[] {
if (!criteria.semanticAnalysis) {
return [];
}
const gates: string[] = [];
const analysis = criteria.semanticAnalysis;
// Example semantic-based selection (would be expanded with real analysis)
if (analysis.confidence && analysis.confidence > 0.8) {
gates.push('technical-accuracy');
}
if (criteria.executionMode === 'chain') {
gates.push('educational-clarity');
}
if (criteria.complexityLevel === 'high') {
gates.push('research-quality');
}
return gates;
}
/**
* Merge multiple gate selections and remove duplicates
*/
private mergeGateSelections(...selections: string[][]): string[] {
const allGates = selections.flat();
return [...new Set(allGates)];
}
/**
* Generate human-readable reasoning for gate selection
*/
private generateSelectionReasoning(
criteria: ExtendedGateSelectionCriteria,
primaryGates: string[],
semanticGates: string[]
): string[] {
const reasoning: string[] = [];
if (criteria.framework) {
reasoning.push(`Selected framework-specific gates for ${criteria.framework} methodology`);
}
if (criteria.category) {
reasoning.push(`Applied category-specific gates for ${criteria.category} content`);
}
if (semanticGates.length > 0) {
reasoning.push(`Enhanced selection with semantic analysis recommendations`);
}
if (criteria.complexityLevel) {
reasoning.push(`Adjusted for ${criteria.complexityLevel} complexity level`);
}
if (criteria.userPreferences?.performanceMode) {
reasoning.push(`Optimized for performance mode`);
}
return reasoning;
}
/**
* Calculate confidence score for gate selection
*/
private calculateSelectionConfidence(
criteria: ExtendedGateSelectionCriteria,
selectedGates: string[]
): number {
let confidence = 0.5; // Base confidence
// Increase confidence with more context
if (criteria.framework) confidence += 0.2;
if (criteria.category) confidence += 0.2;
if (criteria.semanticAnalysis) confidence += 0.1;
// Adjust based on gate count
if (selectedGates.length >= 2 && selectedGates.length <= 4) {
confidence += 0.1; // Good balance
} else if (selectedGates.length > 4) {
confidence -= 0.1; // Too many gates might be overwhelming
}
return Math.min(Math.max(confidence, 0), 1);
}
/**
* Estimate execution time for selected gates
*/
private estimateExecutionTime(selectedGates: string[], criteria: ExtendedGateSelectionCriteria): number {
// Base time per gate (in milliseconds)
const baseTimePerGate = 100;
// Complexity multipliers
const complexityMultipliers = {
low: 0.8,
medium: 1.0,
high: 1.5
};
const multiplier = complexityMultipliers[criteria.complexityLevel || 'medium'];
return selectedGates.length * baseTimePerGate * multiplier;
}
/**
* Determine fallback gates if primary selection fails
*/
private determineFallbackGates(criteria: ExtendedGateSelectionCriteria): string[] {
// Default fallback gates
const fallbacks = ['content-structure'];
// Add framework-specific fallback if available
if (criteria.framework) {
fallbacks.push('framework-compliance');
}
return fallbacks;
}
/**
* Get selection history for analysis
*/
getSelectionHistory(): GateSelectionResult[] {
return [...this.selectionHistory];
}
/**
* Clear selection history
*/
clearHistory(): void {
this.selectionHistory = [];
this.logger.debug('[GATE SELECTION ENGINE] Selection history cleared');
}
/**
* Get selection statistics
*/
getStatistics() {
const totalSelections = this.selectionHistory.length;
const averageGatesSelected = totalSelections > 0
? this.selectionHistory.reduce((sum, result) => sum + result.selectedGates.length, 0) / totalSelections
: 0;
const averageConfidence = totalSelections > 0
? this.selectionHistory.reduce((sum, result) => sum + result.confidence, 0) / totalSelections
: 0;
return {
totalSelections,
averageGatesSelected: Math.round(averageGatesSelected * 10) / 10,
averageConfidence: Math.round(averageConfidence * 100) / 100,
historySize: this.selectionHistory.length
};
}
}
/**
* Factory function for creating gate selection engine
*/
export function createGateSelectionEngine(logger: Logger): GateSelectionEngine {
return new GateSelectionEngine(logger);
}
// Interfaces are already exported via declaration above
```
--------------------------------------------------------------------------------
/server/src/mcp-tools/shared/structured-response-builder.ts:
--------------------------------------------------------------------------------
```typescript
/**
* Unified MCP Structured Response Builder
*
* Provides consistent structured response creation across all MCP tools
* to ensure MCP protocol compliance when outputSchema is defined.
*
* This addresses the issue where tools must provide structuredContent
* when they declare an outputSchema, as required by MCP protocol.
*/
import { ToolResponse } from "../../types/index.js";
import { ErrorContext } from "../types/shared-types.js";
/**
* Metadata for creating structured responses
*/
export interface ResponseMetadata {
/** Tool name (prompt_manager, prompt_engine, system_control) */
tool: string;
/** Operation being performed (create, update, delete, execute, etc.) */
operation: string;
/** Type of execution for this operation */
executionType?: "prompt" | "template" | "chain";
/** Execution time in milliseconds */
executionTime?: number;
/** Whether framework processing was enabled */
frameworkEnabled?: boolean;
/** Framework that was used (if any) */
frameworkUsed?: string;
/** Number of steps executed (for chain operations) */
stepsExecuted?: number;
/** Session ID for tracking related operations */
sessionId?: string;
/** Tool-specific operation data */
operationData?: Record<string, any>;
/** Analytics data to include */
analytics?: {
totalExecutions: number;
successRate: number;
averageExecutionTime: number;
frameworkSwitches?: number;
gateValidationCount?: number;
errorCount?: number;
uptime: number;
} | Record<string, any>;
/** Gate validation results */
gateValidation?: {
enabled: boolean;
passed: boolean;
totalGates: number;
failedGates: Array<any>;
passedGates?: Array<any>;
executionTime: number;
retryCount?: number;
};
}
/**
* Context for error responses
*/
/**
* Unified structured response builder for MCP tools
*/
export class StructuredResponseBuilder {
/**
* Create a structured tool response with consistent metadata
*/
static createToolResponse(content: string, metadata: ResponseMetadata): ToolResponse {
const startTime = Date.now();
const executionId = `${metadata.tool.toLowerCase()}-${metadata.operation}-${startTime}`;
const response: ToolResponse = {
content: [{ type: "text", text: content }],
isError: false,
structuredContent: {
executionMetadata: {
executionId,
executionType: metadata.executionType || "prompt",
startTime,
endTime: startTime + (metadata.executionTime || 0),
executionTime: metadata.executionTime || 0,
frameworkEnabled: metadata.frameworkEnabled || false,
frameworkUsed: metadata.frameworkUsed,
stepsExecuted: metadata.stepsExecuted,
sessionId: metadata.sessionId
}
}
};
// Add optional structured content fields
if (metadata.analytics) {
response.structuredContent!.analytics = metadata.analytics as any;
}
if (metadata.gateValidation) {
response.structuredContent!.gateValidation = metadata.gateValidation;
}
// Add tool-specific operation data
if (metadata.operationData) {
response.structuredContent = {
...response.structuredContent,
operationData: {
tool: metadata.tool,
operation: metadata.operation,
...metadata.operationData
}
};
}
return response;
}
/**
* Create a structured error response
*/
static createErrorResponse(error: Error | string, context: ErrorContext): ToolResponse {
const timestamp = Date.now();
const toolName = context.tool || 'unknown';
const executionId = `${toolName.toLowerCase()}-error-${timestamp}`;
const errorMessage = error instanceof Error ? error.message : error;
return {
content: [{ type: "text", text: `Error: ${errorMessage}` }],
isError: true,
structuredContent: {
executionMetadata: {
executionId,
executionType: "prompt",
startTime: timestamp,
endTime: timestamp,
executionTime: 0,
frameworkEnabled: false
},
errorInfo: {
errorCode: toolName.toUpperCase() + "_ERROR",
errorType: context.errorType || "system",
message: errorMessage,
details: context.details,
timestamp,
severity: context.severity || "medium",
suggestedActions: context.suggestedActions,
relatedComponents: context.relatedComponents
}
}
};
}
/**
* Create a simple response with minimal metadata (for backward compatibility)
*/
static createSimpleResponse(content: string, tool: string, operation: string): ToolResponse {
return this.createToolResponse(content, {
tool,
operation,
executionType: "prompt",
frameworkEnabled: false
});
}
/**
* Create a response for prompt operations with analysis data
*/
static createPromptResponse(
content: string,
operation: string,
promptData?: {
promptId?: string;
category?: string;
analysisResult?: any;
affectedFiles?: string[];
},
includeStructuredContent: boolean = false
): ToolResponse {
// Return simple text response by default for Claude Code visibility
if (!includeStructuredContent) {
return {
content: [{ type: "text", text: content }],
isError: false
};
}
// Include structured metadata when explicitly requested
return this.createToolResponse(content, {
tool: "prompt_manager",
operation,
executionType: "prompt",
frameworkEnabled: false,
operationData: {
promptId: promptData?.promptId,
category: promptData?.category,
analysisResult: promptData?.analysisResult,
affectedFiles: promptData?.affectedFiles
}
});
}
/**
* Create a response for execution operations
*/
static createExecutionResponse(
content: string,
operation: string,
executionData?: {
executionType?: "prompt" | "template" | "chain";
executionTime?: number;
frameworkUsed?: string;
stepsExecuted?: number;
sessionId?: string;
gateResults?: any;
},
includeStructuredContent: boolean = true
): ToolResponse {
// For template/prompt execution, return simple text response so Claude Code can see instructions
if (!includeStructuredContent) {
return {
content: [{ type: "text", text: content }],
isError: false
};
}
// For other operations (chains, etc.), include full structured metadata
return this.createToolResponse(content, {
tool: "prompt_engine",
operation,
executionType: executionData?.executionType || "prompt",
executionTime: executionData?.executionTime,
frameworkEnabled: !!executionData?.frameworkUsed,
frameworkUsed: executionData?.frameworkUsed,
stepsExecuted: executionData?.stepsExecuted,
sessionId: executionData?.sessionId,
gateValidation: executionData?.gateResults
});
}
/**
* Create a response for system control operations
*/
static createSystemResponse(
content: string,
operation: string,
systemData?: {
frameworkState?: any;
systemHealth?: any;
configChanges?: any;
analytics?: any;
},
includeStructuredContent: boolean = false
): ToolResponse {
// Return simple text response by default for Claude Code visibility
if (!includeStructuredContent) {
return {
content: [{ type: "text", text: content }],
isError: false
};
}
// Include structured metadata when explicitly requested
return this.createToolResponse(content, {
tool: "system_control",
operation,
executionType: "prompt",
frameworkEnabled: true,
analytics: systemData?.analytics,
operationData: {
frameworkState: systemData?.frameworkState,
systemHealth: systemData?.systemHealth,
configChanges: systemData?.configChanges
}
});
}
}
// Export convenience functions for easier usage (using function wrappers to avoid class reference timing issues)
export function createToolResponse(content: string, metadata: ResponseMetadata): ToolResponse {
return StructuredResponseBuilder.createToolResponse(content, metadata);
}
export function createErrorResponse(error: Error | string, context: ErrorContext): ToolResponse {
return StructuredResponseBuilder.createErrorResponse(error, context);
}
export function createSimpleResponse(content: string, tool: string, operation: string): ToolResponse {
return StructuredResponseBuilder.createSimpleResponse(content, tool, operation);
}
export function createPromptResponse(
content: string,
operation: string,
promptData?: {
promptId?: string;
category?: string;
analysisResult?: any;
affectedFiles?: string[];
},
includeStructuredContent: boolean = false
): ToolResponse {
return StructuredResponseBuilder.createPromptResponse(content, operation, promptData, includeStructuredContent);
}
export function createExecutionResponse(
content: string,
operation: string,
executionData?: {
executionType?: "prompt" | "template" | "chain";
executionTime?: number;
frameworkUsed?: string;
stepsExecuted?: number;
sessionId?: string;
gateResults?: any;
},
includeStructuredContent: boolean = true
): ToolResponse {
return StructuredResponseBuilder.createExecutionResponse(content, operation, executionData, includeStructuredContent);
}
export function createSystemResponse(
content: string,
operation: string,
systemData?: {
frameworkState?: any;
systemHealth?: any;
configChanges?: any;
analytics?: any;
},
includeStructuredContent: boolean = false
): ToolResponse {
return StructuredResponseBuilder.createSystemResponse(content, operation, systemData, includeStructuredContent);
}
```
--------------------------------------------------------------------------------
/server/src/frameworks/methodology/registry.ts:
--------------------------------------------------------------------------------
```typescript
/**
* Methodology Registry - Phase 2 Implementation
*
* Centralized registry for loading and managing methodology guides.
* Extracted from FrameworkManager to provide clear separation of concerns
* and enable better methodology guide management.
*/
import { Logger } from "../../logging/index.js";
import {
IMethodologyGuide,
FrameworkDefinition,
FrameworkMethodology
} from "../types/index.js";
import { CAGEERFMethodologyGuide } from "./guides/cageerf-guide.js";
import { ReACTMethodologyGuide } from "./guides/react-guide.js";
import { FiveW1HMethodologyGuide } from "./guides/5w1h-guide.js";
import { SCAMPERMethodologyGuide } from "./guides/scamper-guide.js";
/**
* Methodology registry configuration
*/
export interface MethodologyRegistryConfig {
/** Whether to auto-load built-in methodology guides */
autoLoadBuiltIn: boolean;
/** Custom methodology guides to load */
customGuides?: IMethodologyGuide[];
/** Whether to validate guides on registration */
validateOnRegistration: boolean;
}
/**
* Methodology guide registry entry
*/
export interface MethodologyGuideEntry {
guide: IMethodologyGuide;
registeredAt: Date;
isBuiltIn: boolean;
enabled: boolean;
metadata: {
loadTime: number;
validationStatus: 'passed' | 'failed' | 'not_validated';
lastUsed?: Date;
};
}
/**
* Methodology Registry
*
* Manages the loading, registration, and lifecycle of methodology guides.
* Provides a clean separation between guide management and framework orchestration.
*/
export class MethodologyRegistry {
private guides = new Map<string, MethodologyGuideEntry>();
private logger: Logger;
private config: MethodologyRegistryConfig;
private initialized = false;
constructor(logger: Logger, config: Partial<MethodologyRegistryConfig> = {}) {
this.logger = logger;
this.config = {
autoLoadBuiltIn: config.autoLoadBuiltIn ?? true,
customGuides: config.customGuides ?? [],
validateOnRegistration: config.validateOnRegistration ?? true
};
}
/**
* Initialize the methodology registry
*/
async initialize(): Promise<void> {
if (this.initialized) {
this.logger.debug("MethodologyRegistry already initialized");
return;
}
this.logger.info("Initializing MethodologyRegistry...");
const startTime = performance.now();
try {
// Load built-in methodology guides if enabled
if (this.config.autoLoadBuiltIn) {
await this.loadBuiltInGuides();
}
// Load custom guides if provided
if (this.config.customGuides && this.config.customGuides.length > 0) {
await this.loadCustomGuides(this.config.customGuides);
}
const loadTime = performance.now() - startTime;
this.initialized = true;
this.logger.info(
`MethodologyRegistry initialized with ${this.guides.size} guides in ${loadTime.toFixed(1)}ms`
);
} catch (error) {
this.logger.error("Failed to initialize MethodologyRegistry:", error);
throw error;
}
}
/**
* Register a methodology guide
*/
async registerGuide(
guide: IMethodologyGuide,
isBuiltIn: boolean = false
): Promise<boolean> {
const startTime = performance.now();
try {
// Validate guide if required
if (this.config.validateOnRegistration) {
const validationResult = this.validateGuide(guide);
if (!validationResult.valid) {
this.logger.warn(
`Guide validation failed for ${guide.frameworkId}: ${validationResult.errors.join(', ')}`
);
return false;
}
}
// Check for existing guide with same ID
if (this.guides.has(guide.frameworkId)) {
this.logger.warn(`Guide with ID '${guide.frameworkId}' already registered, replacing...`);
}
// Create registry entry
const entry: MethodologyGuideEntry = {
guide,
registeredAt: new Date(),
isBuiltIn,
enabled: true,
metadata: {
loadTime: performance.now() - startTime,
validationStatus: this.config.validateOnRegistration ? 'passed' : 'not_validated'
}
};
this.guides.set(guide.frameworkId, entry);
this.logger.debug(
`Registered ${isBuiltIn ? 'built-in' : 'custom'} methodology guide: ${guide.frameworkName} (${guide.frameworkId})`
);
return true;
} catch (error) {
this.logger.error(`Failed to register methodology guide ${guide.frameworkId}:`, error);
return false;
}
}
/**
* Get a methodology guide by ID
*/
getGuide(guideId: string): IMethodologyGuide | undefined {
this.ensureInitialized();
const entry = this.guides.get(guideId.toLowerCase());
if (entry && entry.enabled) {
// Update last used timestamp
entry.metadata.lastUsed = new Date();
return entry.guide;
}
return undefined;
}
/**
* Get all registered methodology guides
*/
getAllGuides(enabledOnly: boolean = true): IMethodologyGuide[] {
this.ensureInitialized();
const guides: IMethodologyGuide[] = [];
for (const [_, entry] of this.guides) {
if (!enabledOnly || entry.enabled) {
guides.push(entry.guide);
}
}
return guides;
}
/**
* Get guide entries with metadata
*/
getGuideEntries(enabledOnly: boolean = true): MethodologyGuideEntry[] {
this.ensureInitialized();
const entries: MethodologyGuideEntry[] = [];
for (const [_, entry] of this.guides) {
if (!enabledOnly || entry.enabled) {
entries.push(entry);
}
}
return entries;
}
/**
* Check if a guide is registered
*/
hasGuide(guideId: string): boolean {
this.ensureInitialized();
return this.guides.has(guideId.toLowerCase());
}
/**
* Enable or disable a methodology guide
*/
setGuideEnabled(guideId: string, enabled: boolean): boolean {
this.ensureInitialized();
const entry = this.guides.get(guideId.toLowerCase());
if (entry) {
entry.enabled = enabled;
this.logger.info(`Methodology guide '${guideId}' ${enabled ? 'enabled' : 'disabled'}`);
return true;
}
this.logger.warn(`Cannot ${enabled ? 'enable' : 'disable'} guide '${guideId}': not found`);
return false;
}
/**
* Get registry statistics
*/
getRegistryStats() {
this.ensureInitialized();
const entries = Array.from(this.guides.values());
const enabledCount = entries.filter(e => e.enabled).length;
const builtInCount = entries.filter(e => e.isBuiltIn).length;
return {
totalGuides: entries.length,
enabledGuides: enabledCount,
builtInGuides: builtInCount,
customGuides: entries.length - builtInCount,
averageLoadTime: entries.reduce((sum, e) => sum + e.metadata.loadTime, 0) / entries.length || 0,
initialized: this.initialized
};
}
// Private implementation methods
/**
* Load built-in methodology guides
*/
private async loadBuiltInGuides(): Promise<void> {
this.logger.debug("Loading built-in methodology guides...");
const builtInGuides = [
new CAGEERFMethodologyGuide(),
new ReACTMethodologyGuide(),
new FiveW1HMethodologyGuide(),
new SCAMPERMethodologyGuide()
];
for (const guide of builtInGuides) {
const success = await this.registerGuide(guide, true);
if (!success) {
this.logger.warn(`Failed to register built-in guide: ${guide.frameworkName}`);
}
}
this.logger.info(`Loaded ${builtInGuides.length} built-in methodology guides`);
}
/**
* Load custom methodology guides
*/
private async loadCustomGuides(customGuides: IMethodologyGuide[]): Promise<void> {
this.logger.debug(`Loading ${customGuides.length} custom methodology guides...`);
for (const guide of customGuides) {
const success = await this.registerGuide(guide, false);
if (!success) {
this.logger.warn(`Failed to register custom guide: ${guide.frameworkName}`);
}
}
this.logger.info(`Loaded ${customGuides.length} custom methodology guides`);
}
/**
* Validate a methodology guide
*/
private validateGuide(guide: IMethodologyGuide): { valid: boolean; errors: string[] } {
const errors: string[] = [];
// Check required properties
if (!guide.frameworkId || typeof guide.frameworkId !== 'string') {
errors.push('frameworkId is required and must be a string');
}
if (!guide.frameworkName || typeof guide.frameworkName !== 'string') {
errors.push('frameworkName is required and must be a string');
}
if (!guide.methodology || typeof guide.methodology !== 'string') {
errors.push('methodology is required and must be a string');
}
if (!guide.version || typeof guide.version !== 'string') {
errors.push('version is required and must be a string');
}
// Check required methods exist
const requiredMethods = [
'guidePromptCreation',
'guideTemplateProcessing',
'guideExecutionSteps',
'enhanceWithMethodology',
'validateMethodologyCompliance',
'getSystemPromptGuidance'
];
for (const method of requiredMethods) {
if (typeof (guide as any)[method] !== 'function') {
errors.push(`Required method '${method}' is missing or not a function`);
}
}
return {
valid: errors.length === 0,
errors
};
}
/**
* Ensure registry is initialized
*/
private ensureInitialized(): void {
if (!this.initialized) {
throw new Error("MethodologyRegistry not initialized. Call initialize() first.");
}
}
/**
* Get initialization status
*/
get isInitialized(): boolean {
return this.initialized;
}
}
/**
* Create and initialize a MethodologyRegistry instance
*/
export async function createMethodologyRegistry(
logger: Logger,
config?: Partial<MethodologyRegistryConfig>
): Promise<MethodologyRegistry> {
const registry = new MethodologyRegistry(logger, config);
await registry.initialize();
return registry;
}
```
--------------------------------------------------------------------------------
/server/tests/scripts/unit-semantic-analyzer.js:
--------------------------------------------------------------------------------
```javascript
#!/usr/bin/env node
/**
* Semantic Analyzer Unit Tests - Node.js Script Version
* Tests the enhanced semantic analyzer for prompt/template/chain/workflow classification
*/
async function runSemanticAnalyzerTests() {
try {
console.log('🧪 Running Semantic Analyzer unit tests...');
console.log('📋 Testing prompt classification and analysis functionality');
// Import modules - use configurable semantic analyzer which exists
const semanticModule = await import('../../dist/analysis/configurable-semantic-analyzer.js');
// Get SemanticAnalyzer from available exports
const ConfigurableSemanticAnalyzer = semanticModule.ConfigurableSemanticAnalyzer;
// Mock logger
const mockLogger = {
debug: () => {},
info: () => {},
warn: () => {},
error: () => {}
};
let analyzer;
// Setup for each test
function setupTest() {
analyzer = new ConfigurableSemanticAnalyzer(mockLogger, {
enableCaching: false // Disable caching for consistent testing
});
}
// Simple assertion helpers
function assertEqual(actual, expected, testName) {
if (actual === expected) {
console.log(`✅ ${testName}: PASSED`);
return true;
} else {
console.error(`❌ ${testName}: FAILED`);
console.error(` Expected: ${expected}`);
console.error(` Actual: ${actual}`);
return false;
}
}
function assertGreaterThan(actual, expected, testName) {
if (actual > expected) {
console.log(`✅ ${testName}: PASSED (${actual} > ${expected})`);
return true;
} else {
console.error(`❌ ${testName}: FAILED (${actual} <= ${expected})`);
return false;
}
}
function assertTruthy(value, testName) {
if (value) {
console.log(`✅ ${testName}: PASSED`);
return true;
} else {
console.error(`❌ ${testName}: FAILED - Expected truthy value, got: ${value}`);
return false;
}
}
let testResults = [];
// Test 1: Basic Prompt Classification
console.log('🔍 Test 1: Simple Prompt Classification');
setupTest();
const simplePrompt = {
id: 'test_simple',
name: 'Simple Test',
description: 'Simple variable substitution',
category: 'test',
userMessageTemplate: 'Hello {{name}}, how are you?',
arguments: [{ name: 'name', required: true, description: 'User name' }]
};
const simpleAnalysis = await analyzer.analyzePrompt(simplePrompt);
testResults.push(assertEqual(simpleAnalysis.executionType, 'prompt', 'Simple prompt classified as "prompt"'));
testResults.push(assertEqual(simpleAnalysis.requiresFramework, false, 'Simple prompt requires no framework'));
testResults.push(assertGreaterThan(simpleAnalysis.confidence, 0.5, 'Simple prompt confidence > 0.5'));
testResults.push(assertEqual(simpleAnalysis.frameworkRecommendation?.shouldUseFramework, false, 'No framework recommended for simple prompt'));
// Test 2: Template Classification
console.log('🔍 Test 2: Template Classification');
setupTest();
const templatePrompt = {
id: 'test_template',
name: 'Template Test',
description: 'Complex template with conditional logic',
category: 'test',
userMessageTemplate: `
{% if analysis_type == 'detailed' %}
Perform detailed analysis of {{content}} considering:
{% for aspect in aspects %}
- {{aspect}}
{% endfor %}
{% else %}
Quick analysis of {{content}}
{% endif %}
`,
arguments: [
{ name: 'content', required: true, description: 'Content to analyze' },
{ name: 'analysis_type', required: false, description: 'Type of analysis' },
{ name: 'aspects', required: false, description: 'Analysis aspects' }
]
};
const templateAnalysis = await analyzer.analyzePrompt(templatePrompt);
// ConfigurableSemanticAnalyzer analyzes based on content structure - 'chain' is valid for conditional logic
testResults.push(assertTruthy(['template', 'prompt', 'chain'].includes(templateAnalysis.executionType), `Template classified appropriately (got: ${templateAnalysis.executionType})`));
testResults.push(assertGreaterThan(templateAnalysis.confidence, 0.3, 'Template confidence reasonable'));
// Test 3: Chain Classification
console.log('🔍 Test 3: Chain Classification');
setupTest();
const chainPrompt = {
id: 'test_chain',
name: 'Chain Test',
description: 'Multi-step chain execution with dependencies',
category: 'test',
userMessageTemplate: `
Step 1: Analyze {{input_data}}
Step 2: Based on the analysis from step 1, generate {{output_format}}
Step 3: Validate the output and provide {{final_result}}
`,
arguments: [
{ name: 'input_data', required: true, description: 'Initial data' },
{ name: 'output_format', required: true, description: 'Desired output format' },
{ name: 'final_result', required: false, description: 'Final result type' }
]
};
const chainAnalysis = await analyzer.analyzePrompt(chainPrompt);
testResults.push(assertTruthy(['chain', 'template', 'prompt'].includes(chainAnalysis.executionType), 'Chain classified to valid type'));
testResults.push(assertGreaterThan(chainAnalysis.confidence, 0.3, 'Chain confidence reasonable'));
// Test 4: Workflow Classification
console.log('🔍 Test 4: Workflow Classification');
setupTest();
const workflowPrompt = {
id: 'test_workflow',
name: 'Workflow Test',
description: 'Complex workflow with decision points and branching logic',
category: 'test',
userMessageTemplate: `
WORKFLOW: Complex Decision Process
IF condition_a THEN:
EXECUTE branch_a WITH {{param_a}}
VALIDATE result_a
IF valid THEN continue ELSE abort
ELSE:
EXECUTE branch_b WITH {{param_b}}
LOOP through {{items}} and process each
MERGE results and finalize
FINALLY: Generate {{final_output}}
`,
arguments: [
{ name: 'condition_a', required: true, description: 'Primary condition' },
{ name: 'param_a', required: false, description: 'Branch A parameter' },
{ name: 'param_b', required: false, description: 'Branch B parameter' },
{ name: 'items', required: false, description: 'Items to process' },
{ name: 'final_output', required: true, description: 'Final output type' }
]
};
const workflowAnalysis = await analyzer.analyzePrompt(workflowPrompt);
testResults.push(assertTruthy(['workflow', 'chain', 'template', 'prompt'].includes(workflowAnalysis.executionType), 'Workflow classified to valid type'));
testResults.push(assertGreaterThan(workflowAnalysis.confidence, 0.2, 'Workflow confidence reasonable'));
// Test 5: Framework Requirements
console.log('🔍 Test 5: Framework Requirements Analysis');
setupTest();
const complexPrompt = {
id: 'test_complex',
name: 'Complex Analysis',
description: 'Requires systematic analysis with CAGEERF methodology',
category: 'analysis',
userMessageTemplate: `
Conduct comprehensive analysis using systematic approach:
1. CONTEXT: Understand the situation {{situation}}
2. ANALYSIS: Deep dive into {{subject}}
3. GOALS: Define clear objectives
4. EXECUTION: Implement solution
5. EVALUATION: Assess outcomes
6. REFINEMENT: Iterate and improve
`,
arguments: [
{ name: 'situation', required: true, description: 'Situation to analyze' },
{ name: 'subject', required: true, description: 'Analysis subject' }
]
};
const complexAnalysis = await analyzer.analyzePrompt(complexPrompt);
testResults.push(assertTruthy(typeof complexAnalysis.requiresFramework === 'boolean', 'Framework requirement determined'));
testResults.push(assertTruthy(complexAnalysis.frameworkRecommendation, 'Framework recommendation provided'));
// Test 6: Confidence Scoring
console.log('🔍 Test 6: Confidence Scoring Validation');
// Test that confidence is always within valid range
const analysisResults = [simpleAnalysis, templateAnalysis, chainAnalysis, workflowAnalysis, complexAnalysis];
for (let i = 0; i < analysisResults.length; i++) {
const analysis = analysisResults[i];
testResults.push(assertGreaterThan(analysis.confidence, 0, `Analysis ${i+1} confidence > 0`));
testResults.push(assertTruthy(analysis.confidence <= 1, `Analysis ${i+1} confidence <= 1`));
}
// Test 7: Reasoning Provided
console.log('🔍 Test 7: Analysis Reasoning');
for (let i = 0; i < analysisResults.length; i++) {
const analysis = analysisResults[i];
testResults.push(assertTruthy(Array.isArray(analysis.reasoning), `Analysis ${i+1} has reasoning array`));
testResults.push(assertTruthy(analysis.reasoning.length > 0, `Analysis ${i+1} reasoning not empty`));
}
// Results Summary
const passedTests = testResults.filter(result => result).length;
const totalTests = testResults.length;
console.log('\n📊 Semantic Analyzer Unit Tests Summary:');
console.log(` ✅ Passed: ${passedTests}/${totalTests} tests`);
console.log(` 📊 Success Rate: ${((passedTests/totalTests)*100).toFixed(1)}%`);
if (passedTests === totalTests) {
console.log('🎉 All Semantic Analyzer unit tests passed!');
return true;
} else {
console.error('❌ Some Semantic Analyzer tests failed');
return false;
}
} catch (error) {
console.error('❌ Semantic Analyzer tests failed with error:', error.message);
if (error.stack) {
console.error('Stack trace:', error.stack);
}
return false;
}
}
// Run the tests
if (import.meta.url === `file://${process.argv[1]}`) {
runSemanticAnalyzerTests().catch(error => {
console.error('❌ Test execution failed:', error);
process.exit(1);
});
}
export { runSemanticAnalyzerTests };
```
--------------------------------------------------------------------------------
/server/prompts/development/prompts.json:
--------------------------------------------------------------------------------
```json
{
"prompts": [
{
"id": "expert_code_implementation",
"name": "Expert Code Implementation",
"category": "development",
"description": "Expert implementation of refined coding requirements with comprehensive explanation and best practices.",
"file": "expert_code_implementation.md",
"arguments": [
{
"name": "refined_query",
"description": "The refined coding request",
"required": true
},
{
"name": "language",
"description": "The programming language to use",
"required": true
},
{
"name": "requirements",
"description": "The functional requirements",
"required": true
},
{
"name": "constraints",
"description": "Any performance/security/quality considerations",
"required": false
}
],
"gates": [
"security-awareness",
"code-quality",
"technical-accuracy"
]
},
{
"id": "code_review_optimization_chain",
"name": "Comprehensive Code Review",
"category": "development",
"description": "Systematic 6-step code review covering structure, functionality, security, performance, optimization, and quality assurance",
"file": "code_review_optimization_chain.md",
"arguments": [
{
"name": "target_code",
"description": "The code to be reviewed and optimized",
"required": true
},
{
"name": "language_framework",
"description": "Programming language and framework being used",
"required": true
},
{
"name": "performance_goals",
"description": "Specific performance targets or optimization goals",
"required": false
}
]
},
{
"id": "detect_project_commands",
"name": "Project Commands Detection",
"category": "development",
"description": "Intelligently detects and configures project validation commands (lint, test, build) for different project types with Enhanced CAGEERF integration",
"file": "detect_project_commands.md",
"arguments": [
{
"name": "project_path",
"description": "Path to the project directory",
"required": true
},
{
"name": "project_type",
"description": "Type of project (nodejs, rust, python, go, etc.)",
"required": true
}
]
},
{
"id": "generate_comprehensive_claude_md",
"name": "Comprehensive CLAUDE.md Generator",
"category": "development",
"description": "Generates comprehensive CLAUDE.md files with all 6 development criteria, Enhanced CAGEERF integration, and project-specific configurations",
"file": "generate_comprehensive_claude_md.md",
"arguments": [
{
"name": "project_path",
"description": "Path to the project directory",
"required": true
},
{
"name": "project_name",
"description": "Name of the project",
"required": true
},
{
"name": "project_type",
"description": "Type of project",
"required": true
},
{
"name": "architecture_level",
"description": "Architecture sophistication level 1-10",
"required": true
},
{
"name": "project_commands",
"description": "JSON object with detected project commands",
"required": true
}
]
},
{
"id": "analyze_file_structure",
"name": "Analyze File Structure",
"category": "development",
"description": "Analyzes a file's structure to identify potential modules, dependencies, and organization patterns for refactoring.",
"file": "analyze_file_structure.md",
"arguments": [
{
"name": "code",
"description": "The source code to analyze",
"required": true
},
{
"name": "language",
"description": "Programming language of the code",
"required": true
},
{
"name": "file_path",
"description": "Path to the file being analyzed",
"required": true
}
]
},
{
"id": "create_modularization_plan",
"name": "Create Modularization Plan",
"category": "development",
"description": "Creates a detailed plan for splitting a file into smaller, more maintainable modules based on analysis of its structure and dependencies.",
"file": "create_modularization_plan.md",
"arguments": [
{
"name": "language",
"description": "Programming language of the code",
"required": true
},
{
"name": "file_path",
"description": "Path to the file being modularized",
"required": true
},
{
"name": "analysis_results",
"description": "Results from the file structure analysis",
"required": true
}
]
},
{
"id": "transform_code_to_modules",
"name": "Transform Code to Modules",
"category": "development",
"description": "Transforms a large file into multiple smaller module files according to a modularization plan, ensuring proper imports/exports and maintaining functionality.",
"file": "transform_code_to_modules.md",
"arguments": [
{
"name": "language",
"description": "Programming language of the code",
"required": true
},
{
"name": "file_path",
"description": "Path to the original file",
"required": true
},
{
"name": "original_code",
"description": "The original source code to be modularized",
"required": true
},
{
"name": "modularization_plan",
"description": "The detailed plan for how to split the code",
"required": true
}
]
},
{
"id": "test_temporary_gates",
"name": "Test Temporary Gates Implementation",
"category": "development",
"description": "Test prompt for validating the enhanced gate system with temporary gates. Demonstrates 5-level precedence, execution-scoped lifecycle management, and multiple gate types.",
"file": "test-temporary-gates.md",
"arguments": [
{
"name": "analysis_type",
"description": "Type of analysis to perform (e.g., 'technical specification', 'architectural design', 'implementation plan')",
"required": true
},
{
"name": "topic",
"description": "The main subject to analyze",
"required": true
},
{
"name": "content",
"description": "The content or scenario to analyze",
"required": true
},
{
"name": "focus_area",
"description": "Specific area to emphasize in the analysis",
"required": true
},
{
"name": "context",
"description": "The broader context or use case for the analysis",
"required": true
}
],
"gates": [
"content-structure",
"technical-accuracy"
]
},
{
"id": "strategicImplement",
"name": "Strategic Implementation",
"category": "development",
"description": "Systematically implement a plan phase with architectural alignment, compatibility checking, and progress tracking",
"file": "strategicImplement.md",
"arguments": [
{
"name": "plan_path",
"type": "string",
"description": "Path to the plan document to implement (e.g., plans/current/feature-plan.md)"
},
{
"name": "phase_identifier",
"type": "string",
"description": "Specific phase to implement (e.g., 'Phase 1', 'Phase 2A', 'Step 3: Integration')"
}
]
},
{
"id": "component_flow_analysis",
"name": "Component Flow Analysis",
"category": "development",
"description": "Comprehensive component review that tracks data flow, lifecycle, dependencies, and integration points",
"file": "component_flow_analysis.md",
"arguments": [
{
"name": "component_path",
"type": "string",
"description": "Path to the component file (e.g., src/components/UserProfile.tsx)"
},
{
"name": "component_code",
"type": "string",
"description": "The complete component source code to analyze"
},
{
"name": "framework",
"type": "string",
"description": "Framework being used (React, Vue, Angular, Svelte, etc.)"
},
{
"name": "language",
"type": "string",
"description": "Programming language (JavaScript, TypeScript, etc.)"
}
]
},
{
"id": "test_concise_format",
"name": "Test Concise Format",
"category": "development",
"description": "Testing the new concise message format",
"file": "test_concise_format.md",
"arguments": [
{
"name": "test_param",
"type": "string",
"description": "Test parameter"
}
]
},
{
"id": "test_concise_v2",
"name": "Test Concise Format V2",
"category": "development",
"description": "Testing the simplified concise message format after restart",
"file": "test_concise_v2.md",
"arguments": [
{
"name": "input",
"type": "string",
"description": "Test input"
}
]
},
{
"id": "test_final_concise",
"name": "Test Final Concise Format",
"category": "development",
"description": "Final test of concise message format with isLLMEnabled() fix",
"file": "test_final_concise.md",
"arguments": [
{
"name": "test_input",
"type": "string",
"description": "Test input parameter"
}
]
}
]
}
```
--------------------------------------------------------------------------------
/server/src/gates/guidance/GateGuidanceRenderer.ts:
--------------------------------------------------------------------------------
```typescript
/**
* Gate Guidance Renderer - User-Facing Guidance Generation
*
* Single responsibility: Generate and format gate guidance for users.
* Clean dependencies: Only imports what it needs for guidance rendering.
*/
import * as fs from 'fs/promises';
import * as path from 'path';
import { fileURLToPath } from 'url';
import type { Logger } from '../../logging/index.js';
import { GateDefinition, GateContext } from '../core/gate-definitions.js';
import { filterFrameworkGuidance, hasFrameworkSpecificContent } from './FrameworkGuidanceFilter.js';
import type { TemporaryGateRegistry } from '../core/temporary-gate-registry.js';
/**
* Gate guidance renderer with framework-specific filtering and temporary gate support
*/
export class GateGuidanceRenderer {
private gateCache = new Map<string, GateDefinition>();
private gatesDirectory: string;
private logger: Logger;
private temporaryGateRegistry?: TemporaryGateRegistry;
constructor(logger: Logger, gatesDirectory?: string, temporaryGateRegistry?: TemporaryGateRegistry) {
this.logger = logger;
// Use same directory resolution pattern as existing system
const __filename = fileURLToPath(import.meta.url);
const __dirname = path.dirname(__filename);
this.gatesDirectory = gatesDirectory || path.join(__dirname, '../../gates/definitions');
this.temporaryGateRegistry = temporaryGateRegistry;
this.logger.debug('[GATE GUIDANCE RENDERER] Initialized with directory:', this.gatesDirectory);
if (temporaryGateRegistry) {
this.logger.debug('[GATE GUIDANCE RENDERER] Temporary gate registry enabled');
}
}
/**
* Generate formatted gate guidance for display to users
*
* @param gateIds - Array of gate IDs to render
* @param context - Context for gate activation and framework filtering
* @returns Formatted guidance text ready for display
*/
async renderGuidance(gateIds: string[], context: GateContext = {}): Promise<string> {
this.logger.info('🎨 [GATE GUIDANCE RENDERER] renderGuidance called:', {
gateIds,
framework: context.framework,
category: context.category,
gatesDirectory: this.gatesDirectory
});
if (gateIds.length === 0) {
this.logger.debug('[GATE GUIDANCE RENDERER] No gates provided, returning empty guidance');
return '';
}
const guidanceTexts: string[] = [];
for (const gateId of gateIds) {
try {
const gate = await this.loadGateDefinition(gateId);
if (gate) {
if (this.shouldActivateGate(gate, context)) {
guidanceTexts.push(this.formatGateGuidance(gate, context));
this.logger.debug('[GATE GUIDANCE RENDERER] Added guidance for gate:', gateId);
} else {
this.logger.debug('[GATE GUIDANCE RENDERER] Skipped gate (not applicable):', gateId);
}
} else {
this.logger.debug('[GATE GUIDANCE RENDERER] Failed to load gate:', gateId);
}
} catch (error) {
this.logger.warn('[GATE GUIDANCE RENDERER] Failed to load gate:', gateId, error);
}
}
if (guidanceTexts.length === 0) {
this.logger.debug('[GATE GUIDANCE RENDERER] No applicable gates found, returning empty guidance');
return '';
}
// Format as supplemental guidance section (clean formatting)
// NOTE: Framework is already described in tool descriptions and system prompt
// so we don't duplicate the framework reference in the header
const supplementalGuidance = `
---
## 🎯 Quality Enhancement Gates
${guidanceTexts.join('\n\n')}
---`;
this.logger.debug('[GATE GUIDANCE RENDERER] Generated supplemental guidance:', {
gateCount: guidanceTexts.length,
guidanceLength: supplementalGuidance.length
});
return supplementalGuidance;
}
/**
* Load gate definition from temporary registry or file system
*/
private async loadGateDefinition(gateId: string): Promise<GateDefinition | null> {
// Check cache first (performance optimization)
if (this.gateCache.has(gateId)) {
return this.gateCache.get(gateId)!;
}
// Phase 3 Enhancement: Check temporary gate registry first for temp_ prefixed gates
if (gateId.startsWith('temp_') && this.temporaryGateRegistry) {
this.logger.debug('[GATE GUIDANCE RENDERER] Attempting to load temporary gate:', gateId);
const tempGate = this.temporaryGateRegistry.getTemporaryGate(gateId);
if (tempGate) {
// Convert temporary gate to standard gate definition format
const gate: GateDefinition = {
id: tempGate.id,
name: tempGate.name,
guidance: tempGate.guidance,
activation: {
explicit_request: true // Temporary gates are explicitly requested
}
};
// Cache for reuse during this execution
this.gateCache.set(gateId, gate);
this.logger.info('[GATE GUIDANCE RENDERER] ✅ Loaded temporary gate:', {
gateId,
name: tempGate.name,
guidanceLength: tempGate.guidance.length
});
return gate;
} else {
this.logger.warn('[GATE GUIDANCE RENDERER] ⚠️ Temporary gate not found in registry:', gateId);
}
}
// Fall back to filesystem for non-temporary gates or if registry lookup fails
try {
const gateFile = path.join(this.gatesDirectory, `${gateId}.json`);
const fileContent = await fs.readFile(gateFile, 'utf-8');
const gateData = JSON.parse(fileContent);
// Extract essential fields for guidance rendering
const gate: GateDefinition = {
id: gateData.id,
name: gateData.name,
guidance: gateData.guidance || '',
activation: gateData.activation || {}
};
// Cache for reuse (performance optimization)
this.gateCache.set(gateId, gate);
this.logger.debug('[GATE GUIDANCE RENDERER] Loaded and cached gate definition from filesystem:', gateId);
return gate;
} catch (error) {
this.logger.error('[GATE GUIDANCE RENDERER] Failed to load gate definition:', gateId, error);
return null;
}
}
/**
* Check if gate should be activated for current context
*
* Framework gates (gate_type: "framework") bypass category checks and activate
* based on framework context alone. This ensures framework methodology guidance
* applies universally across all categories.
*/
private shouldActivateGate(gate: GateDefinition, context: GateContext): boolean {
const activation = gate.activation;
const isFrameworkGate = gate.gate_type === 'framework' || gate.id === 'framework-compliance';
this.logger.debug('[GATE GUIDANCE RENDERER] shouldActivateGate called:', {
gateId: gate.id,
gateType: gate.gate_type,
isFrameworkGate,
contextFramework: context.framework,
activationFrameworkContext: activation.framework_context,
contextCategory: context.category,
activationPromptCategories: activation.prompt_categories
});
// Check framework context match
if (context.framework && activation.framework_context) {
if (!activation.framework_context.includes(context.framework)) {
this.logger.debug('[GATE GUIDANCE RENDERER] Gate not activated - framework mismatch:', {
gateId: gate.id,
expectedFrameworks: activation.framework_context,
actualFramework: context.framework
});
return false;
}
// Framework gates activate on framework match alone (bypass category checks)
if (isFrameworkGate) {
this.logger.info('[GATE GUIDANCE RENDERER] ✅ Framework gate activated (universal):', {
gateId: gate.id,
framework: context.framework,
category: context.category
});
return true;
}
}
// Category gates check category context match
if (context.category && activation.prompt_categories) {
if (!activation.prompt_categories.includes(context.category)) {
this.logger.debug('[GATE GUIDANCE RENDERER] Gate not activated - category mismatch:', {
gateId: gate.id,
expectedCategories: activation.prompt_categories,
actualCategory: context.category
});
return false;
}
}
// If no specific criteria or all criteria match, activate
this.logger.debug('[GATE GUIDANCE RENDERER] Gate activated:', gate.id);
return true;
}
/**
* Format gate guidance for display with framework-specific filtering
*/
private formatGateGuidance(gate: GateDefinition, context: GateContext): string {
let guidance = gate.guidance;
// Apply framework filtering if framework is specified and guidance has framework content
if (context.framework && hasFrameworkSpecificContent(guidance)) {
guidance = filterFrameworkGuidance(guidance, context.framework);
this.logger.debug('[GATE GUIDANCE RENDERER] Applied framework filtering for:', context.framework);
}
return `### ${gate.name}\n${guidance}`;
}
/**
* Get available gate IDs (for testing and diagnostics)
*/
async getAvailableGates(): Promise<string[]> {
try {
const files = await fs.readdir(this.gatesDirectory);
const gateIds = files
.filter(file => file.endsWith('.json'))
.map(file => file.replace('.json', ''));
this.logger.debug('[GATE GUIDANCE RENDERER] Available gates:', gateIds);
return gateIds;
} catch (error) {
this.logger.error('[GATE GUIDANCE RENDERER] Failed to list gate definitions:', error);
return [];
}
}
/**
* Clear cache (for hot-reloading support)
*/
clearCache(): void {
this.gateCache.clear();
this.logger.debug('[GATE GUIDANCE RENDERER] Cache cleared for hot-reloading');
}
/**
* Get renderer statistics (for monitoring)
*/
getStatistics(): { cachedGates: number; gatesDirectory: string } {
return {
cachedGates: this.gateCache.size,
gatesDirectory: this.gatesDirectory
};
}
}
/**
* Factory function for creating gate guidance renderer
*/
export function createGateGuidanceRenderer(logger: Logger, gatesDirectory?: string, temporaryGateRegistry?: TemporaryGateRegistry): GateGuidanceRenderer {
return new GateGuidanceRenderer(logger, gatesDirectory, temporaryGateRegistry);
}
```
--------------------------------------------------------------------------------
/server/src/mcp-tools/prompt-engine/utils/context-builder.ts:
--------------------------------------------------------------------------------
```typescript
/**
* Context Builder - Handles execution context building
*
* Extracted from ConsolidatedPromptEngine to provide focused
* context building capabilities with clear separation of concerns.
*/
import { createLogger } from "../../../logging/index.js";
import { FrameworkManager } from "../../../frameworks/framework-manager.js";
import { FrameworkExecutionContext } from "../../../frameworks/types/index.js";
import { FrameworkStateManager } from "../../../frameworks/framework-state-manager.js";
import { ConvertedPrompt } from "../../../types/index.js";
import { ExecutionContext } from "../../../execution/parsers/index.js";
const logger = createLogger({
logFile: '/tmp/context-builder.log',
transport: 'stdio',
enableDebug: false,
configuredLevel: 'info'
});
export interface EnhancedExecutionContext extends ExecutionContext {
promptId: string;
promptArgs: Record<string, any>;
executionMode: string;
sessionId: string;
forceRestart: boolean;
enableGates: boolean;
frameworkId?: string;
contextData: Record<string, any>;
frameworkContext?: FrameworkExecutionContext;
metadata?: Record<string, any>;
performance?: {
startTime: number;
memoryUsage?: number;
};
}
/**
* ContextBuilder handles all execution context building
*
* This class provides:
* - Execution context creation and enhancement
* - Framework integration and context injection
* - Performance tracking and metadata collection
* - Context validation and preparation
*/
export class ContextBuilder {
private frameworkManager?: FrameworkManager;
private frameworkStateManager?: FrameworkStateManager;
constructor(
frameworkManager?: FrameworkManager,
frameworkStateManager?: FrameworkStateManager
) {
this.frameworkManager = frameworkManager;
this.frameworkStateManager = frameworkStateManager;
}
/**
* Build enhanced execution context
*/
public buildExecutionContext(
promptId: string,
promptArgs: Record<string, any>,
convertedPrompt: ConvertedPrompt,
options: Record<string, any> = {}
): EnhancedExecutionContext {
try {
logger.debug('🔧 [ContextBuilder] Building execution context', {
promptId,
argsCount: Object.keys(promptArgs).length,
hasFrameworkManager: !!this.frameworkManager
});
const startTime = Date.now();
const memoryUsage = this.getMemoryUsage();
// Build base context - properly typed for execution parsers
const baseExecutionContext: ExecutionContext = {
conversationHistory: options.conversationHistory,
environmentVars: options.environmentVars,
promptDefaults: options.promptDefaults,
userSession: options.userSession,
systemContext: options.systemContext
};
// Build enhanced context
const enhancedContext: EnhancedExecutionContext = {
...baseExecutionContext,
promptId,
promptArgs,
executionMode: options.executionMode || 'auto',
sessionId: options.sessionId || this.generateSessionId(),
forceRestart: options.forceRestart || false,
enableGates: options.enableGates !== false,
frameworkId: options.frameworkId,
contextData: options.contextData || {},
metadata: this.buildMetadata(convertedPrompt, options),
performance: {
startTime,
memoryUsage
}
};
// Add framework context if available
if (this.frameworkManager && this.frameworkStateManager) {
enhancedContext.frameworkContext = this.buildFrameworkContext(
convertedPrompt,
promptArgs,
options
);
}
logger.debug('✅ [ContextBuilder] Execution context built successfully', {
promptId,
hasFrameworkContext: !!enhancedContext.frameworkContext,
sessionId: enhancedContext.sessionId
});
return enhancedContext;
} catch (error) {
logger.error('❌ [ContextBuilder] Context building failed', {
promptId,
error: error instanceof Error ? error.message : String(error)
});
// Return minimal context on error
return {
promptId,
promptArgs,
executionMode: 'auto',
sessionId: this.generateSessionId(),
forceRestart: false,
enableGates: true,
contextData: {},
metadata: { error: error instanceof Error ? error.message : String(error) },
performance: { startTime: Date.now() }
};
}
}
/**
* Build framework-specific execution context
*/
private buildFrameworkContext(
convertedPrompt: ConvertedPrompt,
promptArgs: Record<string, any>,
options: Record<string, any>
): FrameworkExecutionContext | undefined {
try {
if (!this.frameworkManager || !this.frameworkStateManager) {
return undefined;
}
logger.debug('🎯 [ContextBuilder] Building framework context');
const activeFramework = this.frameworkStateManager.getActiveFramework();
const frameworkId = options.frameworkId || activeFramework;
if (!frameworkId) {
logger.debug('No framework specified, skipping framework context');
return undefined;
}
const frameworkContext = this.frameworkManager.generateExecutionContext(
convertedPrompt,
{
promptType: options.executionType || 'prompt',
complexity: 'medium',
userPreference: frameworkId as any
}
);
logger.debug('✅ [ContextBuilder] Framework context built', {
frameworkId,
hasSystemPrompt: !!frameworkContext.systemPrompt
});
return frameworkContext;
} catch (error) {
logger.warn('⚠️ [ContextBuilder] Framework context building failed', {
error: error instanceof Error ? error.message : String(error)
});
return undefined;
}
}
/**
* Build execution metadata
*/
private buildMetadata(
convertedPrompt: ConvertedPrompt,
options: Record<string, any>
): Record<string, any> {
return {
promptId: convertedPrompt.id,
promptTitle: convertedPrompt.name,
promptCategory: convertedPrompt.category,
promptVersion: '1.0', // ConvertedPrompt doesn't have version property
executionId: this.generateExecutionId(),
timestamp: new Date().toISOString(),
userAgent: options.userAgent,
clientInfo: options.clientInfo,
environment: process.env.NODE_ENV || 'development',
nodeVersion: process.version,
platform: process.platform
};
}
/**
* Generate unique session ID
*/
private generateSessionId(): string {
return `session_${Date.now()}_${Math.random().toString(36).substring(2, 9)}`;
}
/**
* Generate unique execution ID
*/
private generateExecutionId(): string {
return `exec_${Date.now()}_${Math.random().toString(36).substring(2, 9)}`;
}
/**
* Get current memory usage
*/
private getMemoryUsage(): number {
try {
return process.memoryUsage().heapUsed;
} catch (error) {
logger.warn('⚠️ [ContextBuilder] Failed to get memory usage', {
error: error instanceof Error ? error.message : String(error)
});
return 0;
}
}
/**
* Validate execution context
*/
public validateContext(context: EnhancedExecutionContext): {
isValid: boolean;
errors: string[];
warnings: string[];
} {
const errors: string[] = [];
const warnings: string[] = [];
try {
// Required fields validation
if (!context.promptId) {
errors.push("Prompt ID is required");
}
if (!context.sessionId) {
errors.push("Session ID is required");
}
if (!context.promptArgs) {
warnings.push("No prompt arguments provided");
}
// Context data validation
if (context.contextData && typeof context.contextData !== 'object') {
errors.push("Context data must be an object");
}
// Performance data validation
if (context.performance && !context.performance.startTime) {
warnings.push("Performance tracking missing start time");
}
// Framework context validation
if (context.frameworkContext) {
if (!context.frameworkContext.selectedFramework) {
warnings.push("Framework context missing selected framework");
}
}
const isValid = errors.length === 0;
logger.debug('🔍 [ContextBuilder] Context validation completed', {
isValid,
errorsCount: errors.length,
warningsCount: warnings.length
});
return { isValid, errors, warnings };
} catch (error) {
logger.error('❌ [ContextBuilder] Context validation failed', {
error: error instanceof Error ? error.message : String(error)
});
return {
isValid: false,
errors: [`Validation error: ${error instanceof Error ? error.message : String(error)}`],
warnings
};
}
}
/**
* Clone context for reuse
*/
public cloneContext(
context: EnhancedExecutionContext,
overrides: Partial<EnhancedExecutionContext> = {}
): EnhancedExecutionContext {
try {
const clonedContext: EnhancedExecutionContext = {
...context,
...overrides,
promptArgs: { ...context.promptArgs, ...(overrides.promptArgs || {}) },
contextData: { ...context.contextData, ...(overrides.contextData || {}) },
metadata: { ...context.metadata, ...(overrides.metadata || {}) },
performance: {
startTime: context.performance?.startTime || Date.now(),
memoryUsage: context.performance?.memoryUsage,
...(overrides.performance || {})
}
};
// Update execution tracking
if (clonedContext.metadata) {
clonedContext.metadata.clonedFrom = context.metadata?.executionId;
clonedContext.metadata.executionId = this.generateExecutionId();
}
logger.debug('🔄 [ContextBuilder] Context cloned successfully', {
originalId: context.metadata?.executionId,
clonedId: clonedContext.metadata?.executionId
});
return clonedContext;
} catch (error) {
logger.error('❌ [ContextBuilder] Context cloning failed', {
error: error instanceof Error ? error.message : String(error)
});
throw error;
}
}
}
```