#
tokens: 48044/50000 15/189 files (page 5/9)
lines: off (toggle) GitHub
raw markdown copy
This is page 5 of 9. Use http://codebase.md/portel-dev/ncp?page={x} to view the full context.

# Directory Structure

```
├── .dockerignore
├── .dxtignore
├── .github
│   ├── FEATURE_STORY_TEMPLATE.md
│   ├── ISSUE_TEMPLATE
│   │   ├── bug_report.yml
│   │   ├── config.yml
│   │   ├── feature_request.yml
│   │   └── mcp_server_request.yml
│   ├── pull_request_template.md
│   └── workflows
│       ├── ci.yml
│       ├── publish-mcp-registry.yml
│       └── release.yml
├── .gitignore
├── .mcpbignore
├── .npmignore
├── .release-it.json
├── CHANGELOG.md
├── CODE_OF_CONDUCT.md
├── COMPLETE-IMPLEMENTATION-SUMMARY.md
├── CONTRIBUTING.md
├── CRITICAL-ISSUES-FOUND.md
├── docs
│   ├── clients
│   │   ├── claude-desktop.md
│   │   ├── cline.md
│   │   ├── continue.md
│   │   ├── cursor.md
│   │   ├── perplexity.md
│   │   └── README.md
│   ├── download-stats.md
│   ├── guides
│   │   ├── clipboard-security-pattern.md
│   │   ├── how-it-works.md
│   │   ├── mcp-prompts-for-user-interaction.md
│   │   ├── mcpb-installation.md
│   │   ├── ncp-registry-command.md
│   │   ├── pre-release-checklist.md
│   │   ├── telemetry-design.md
│   │   └── testing.md
│   ├── images
│   │   ├── ncp-add.png
│   │   ├── ncp-find.png
│   │   ├── ncp-help.png
│   │   ├── ncp-import.png
│   │   ├── ncp-list.png
│   │   └── ncp-transformation-flow.png
│   ├── mcp-registry-setup.md
│   ├── pr-schema-additions.ts
│   └── stories
│       ├── 01-dream-and-discover.md
│       ├── 02-secrets-in-plain-sight.md
│       ├── 03-sync-and-forget.md
│       ├── 04-double-click-install.md
│       ├── 05-runtime-detective.md
│       └── 06-official-registry.md
├── DYNAMIC-RUNTIME-SUMMARY.md
├── EXTENSION-CONFIG-DISCOVERY.md
├── INSTALL-EXTENSION.md
├── INTERNAL-MCP-ARCHITECTURE.md
├── jest.config.js
├── LICENSE
├── MANAGEMENT-TOOLS-COMPLETE.md
├── manifest.json
├── manifest.json.backup
├── MCP-CONFIG-SCHEMA-IMPLEMENTATION-EXAMPLE.ts
├── MCP-CONFIG-SCHEMA-SIMPLE-EXAMPLE.json
├── MCP-CONFIGURATION-SCHEMA-FORMAT.json
├── MCPB-ARCHITECTURE-DECISION.md
├── NCP-EXTENSION-COMPLETE.md
├── package-lock.json
├── package.json
├── parity-between-cli-and-mcp.txt
├── PROMPTS-IMPLEMENTATION.md
├── README-COMPARISON.md
├── README.md
├── README.new.md
├── REGISTRY-INTEGRATION-COMPLETE.md
├── RELEASE-PROCESS-IMPROVEMENTS.md
├── RELEASE-SUMMARY.md
├── RELEASE.md
├── RUNTIME-DETECTION-COMPLETE.md
├── scripts
│   ├── cleanup
│   │   └── scan-repository.js
│   └── sync-server-version.cjs
├── SECURITY.md
├── server.json
├── src
│   ├── analytics
│   │   ├── analytics-formatter.ts
│   │   ├── log-parser.ts
│   │   └── visual-formatter.ts
│   ├── auth
│   │   ├── oauth-device-flow.ts
│   │   └── token-store.ts
│   ├── cache
│   │   ├── cache-patcher.ts
│   │   ├── csv-cache.ts
│   │   └── schema-cache.ts
│   ├── cli
│   │   └── index.ts
│   ├── discovery
│   │   ├── engine.ts
│   │   ├── mcp-domain-analyzer.ts
│   │   ├── rag-engine.ts
│   │   ├── search-enhancer.ts
│   │   └── semantic-enhancement-engine.ts
│   ├── extension
│   │   └── extension-init.ts
│   ├── index-mcp.ts
│   ├── index.ts
│   ├── internal-mcps
│   │   ├── internal-mcp-manager.ts
│   │   ├── ncp-management.ts
│   │   └── types.ts
│   ├── orchestrator
│   │   └── ncp-orchestrator.ts
│   ├── profiles
│   │   └── profile-manager.ts
│   ├── server
│   │   ├── mcp-prompts.ts
│   │   └── mcp-server.ts
│   ├── services
│   │   ├── config-prompter.ts
│   │   ├── config-schema-reader.ts
│   │   ├── error-handler.ts
│   │   ├── output-formatter.ts
│   │   ├── registry-client.ts
│   │   ├── tool-context-resolver.ts
│   │   ├── tool-finder.ts
│   │   ├── tool-schema-parser.ts
│   │   └── usage-tips-generator.ts
│   ├── testing
│   │   ├── create-real-mcp-definitions.ts
│   │   ├── dummy-mcp-server.ts
│   │   ├── mcp-definitions.json
│   │   ├── real-mcp-analyzer.ts
│   │   ├── real-mcp-definitions.json
│   │   ├── real-mcps.csv
│   │   ├── setup-dummy-mcps.ts
│   │   ├── setup-tiered-profiles.ts
│   │   ├── test-profile.json
│   │   ├── test-semantic-enhancement.ts
│   │   └── verify-profile-scaling.ts
│   ├── transports
│   │   └── filtered-stdio-transport.ts
│   └── utils
│       ├── claude-desktop-importer.ts
│       ├── client-importer.ts
│       ├── client-registry.ts
│       ├── config-manager.ts
│       ├── health-monitor.ts
│       ├── highlighting.ts
│       ├── logger.ts
│       ├── markdown-renderer.ts
│       ├── mcp-error-parser.ts
│       ├── mcp-wrapper.ts
│       ├── ncp-paths.ts
│       ├── parameter-prompter.ts
│       ├── paths.ts
│       ├── progress-spinner.ts
│       ├── response-formatter.ts
│       ├── runtime-detector.ts
│       ├── schema-examples.ts
│       ├── security.ts
│       ├── text-utils.ts
│       ├── update-checker.ts
│       ├── updater.ts
│       └── version.ts
├── STORY-DRIVEN-DOCUMENTATION.md
├── STORY-FIRST-WORKFLOW.md
├── test
│   ├── __mocks__
│   │   ├── chalk.js
│   │   ├── transformers.js
│   │   ├── updater.js
│   │   └── version.ts
│   ├── cache-loading-focused.test.ts
│   ├── cache-optimization.test.ts
│   ├── cli-help-validation.sh
│   ├── coverage-boost.test.ts
│   ├── curated-ecosystem-validation.test.ts
│   ├── discovery-engine.test.ts
│   ├── discovery-fallback-focused.test.ts
│   ├── ecosystem-discovery-focused.test.ts
│   ├── ecosystem-discovery-validation-simple.test.ts
│   ├── final-80-percent-push.test.ts
│   ├── final-coverage-push.test.ts
│   ├── health-integration.test.ts
│   ├── health-monitor.test.ts
│   ├── helpers
│   │   └── mock-server-manager.ts
│   ├── integration
│   │   └── mcp-client-simulation.test.cjs
│   ├── logger.test.ts
│   ├── mcp-ecosystem-discovery.test.ts
│   ├── mcp-error-parser.test.ts
│   ├── mcp-immediate-response-check.js
│   ├── mcp-server-protocol.test.ts
│   ├── mcp-timeout-scenarios.test.ts
│   ├── mcp-wrapper.test.ts
│   ├── mock-mcps
│   │   ├── aws-server.js
│   │   ├── base-mock-server.mjs
│   │   ├── brave-search-server.js
│   │   ├── docker-server.js
│   │   ├── filesystem-server.js
│   │   ├── git-server.mjs
│   │   ├── github-server.js
│   │   ├── neo4j-server.js
│   │   ├── notion-server.js
│   │   ├── playwright-server.js
│   │   ├── postgres-server.js
│   │   ├── shell-server.js
│   │   ├── slack-server.js
│   │   └── stripe-server.js
│   ├── mock-smithery-mcp
│   │   ├── index.js
│   │   ├── package.json
│   │   └── smithery.yaml
│   ├── ncp-orchestrator.test.ts
│   ├── orchestrator-health-integration.test.ts
│   ├── orchestrator-simple-branches.test.ts
│   ├── performance-benchmark.test.ts
│   ├── quick-coverage.test.ts
│   ├── rag-engine.test.ts
│   ├── regression-snapshot.test.ts
│   ├── search-enhancer.test.ts
│   ├── session-id-passthrough.test.ts
│   ├── setup.ts
│   ├── tool-context-resolver.test.ts
│   ├── tool-schema-parser.test.ts
│   ├── user-story-discovery.test.ts
│   └── version-util.test.ts
└── tsconfig.json
```

# Files

--------------------------------------------------------------------------------
/STORY-DRIVEN-DOCUMENTATION.md:
--------------------------------------------------------------------------------

```markdown
# Story-Driven Documentation Strategy

## 🎯 **Core Principle**

**Stories explain WHY → HOW → WHAT** (not the other way around)

Each feature becomes a narrative that:
1. **Starts with pain** (relatable problem)
2. **Shows the journey** (how we solve it)
3. **Delivers benefits** (why it matters)
4. **Optionally dives deep** (technical details for curious readers)

---

## 📚 **The Six Core Stories**

### **Story 1: The Dream-and-Discover Story** 🌟
*Why AI doesn't see your tools upfront*

**The Pain:**
Your AI is drowning in 50+ tool schemas. It reads them all, gets confused, picks the wrong one, and wastes your time.

**The Journey:**
Instead of showing all tools at once, NCP lets your AI **dream** of the perfect tool. It describes what it needs in plain language. NCP's semantic search finds the exact tool that matches that dream.

**The Magic:**
- **AI thinks clearly** - No cognitive overload from 50 schemas
- **Computer stays cool** - MCPs load on-demand, not all at once
- **You save money** - 97% fewer tokens burned on tool schemas
- **Work flows faster** - Sub-second tool discovery vs 8-second analysis

**Technical Deep-Dive:** [Link to semantic search implementation]

---

### **Story 2: The Secrets-in-Plain-Sight Story** 🔐
*How your API keys stay invisible to AI*

**The Pain:**
"Add GitHub MCP with token ghp_abc123..." → Your secret just entered the AI chat. It's in logs. It's in training data. It's everywhere.

**The Journey:**
NCP uses a **clipboard handshake**:
1. AI shows you a prompt: "Copy your config to clipboard BEFORE clicking YES"
2. You copy `{"env":{"TOKEN":"secret"}}`
3. You click YES
4. NCP reads clipboard *server-side*
5. AI sees: "MCP added with credentials" (NOT your token!)

**The Magic:**
- **AI never sees secrets** - Not in chat, not in logs, not anywhere
- **You stay in control** - Explicit consent, you know what happens
- **Audit trail clean** - "YES" is logged, tokens aren't

**How It Works:** Clipboard is read server-side (in NCP's process), never sent to AI. The AI conversation only contains the approval ("YES"), not the secrets.

**Technical Deep-Dive:** [Link to clipboard security pattern]

---

### **Story 3: The Sync-and-Forget Story** 🔄
*Why you never configure the same MCP twice*

**The Pain:**
You added 10 MCPs to Claude Desktop. Now you want them in NCP. Do you configure everything again? Copy-paste 10 configs? 😫

**The Journey:**
NCP auto-syncs from Claude Desktop **on every startup**:
- Reads your `claude_desktop_config.json`
- Detects all .mcpb extensions
- Imports everything into your chosen NCP profile
- Stays in sync forever (re-checks on each boot)

**The Magic:**
- **Zero manual work** - Add MCP to Claude Desktop → NCP gets it automatically
- **Always in sync** - Install new .mcpb → NCP detects it on next startup
- **One source of truth** - Configure in Claude Desktop, NCP follows

**Why Continuous?** Because users install new MCPs frequently. One-time import would drift out of sync. Continuous sync means NCP always has your latest setup.

**Technical Deep-Dive:** [Link to client-importer and auto-sync implementation]

---

### **Story 4: The Double-Click-Install Story** 📦
*Why installing NCP feels like installing an app*

**The Pain:**
Installing MCPs usually means: read docs → install npm package → edit JSON → restart client → pray it works. Too many steps!

**The Journey:**
1. Download `ncp.mcpb` from releases
2. Double-click it
3. Claude Desktop prompts: "Install NCP extension?"
4. Click "Install"
5. Done. All your MCPs are now unified.

**The Magic:**
- **Feels native** - Just like installing a regular app
- **Zero terminal commands** - No npm, no config editing
- **Auto-imports MCPs** - Syncs from Claude Desktop instantly
- **Optional CLI** - Can enable global `ncp` command if you want it

**What's .mcpb?** Claude Desktop's native extension format. It's a bundled MCP with manifest, pre-built code, and optional user configuration UI.

**Technical Deep-Dive:** [Link to .mcpb architecture and bundling]

---

### **Story 5: The Runtime-Detective Story** 🕵️
*How NCP knows which Node.js to use*

**The Pain:**
Claude Desktop ships its own Node.js. System has a different Node.js. Which one should .mcpb extensions use? Get it wrong → extensions break.

**The Journey:**
NCP detects runtime **dynamically on every boot**:
- Checks `process.execPath` (how NCP itself was launched)
- If launched via Claude's bundled Node → uses that for extensions
- If launched via system Node → uses system runtime
- If user toggles "Use Built-in Node.js for MCP" → adapts automatically

**The Magic:**
- **Zero config** - No manual runtime selection needed
- **Adapts instantly** - Toggle setting → NCP respects it on next boot
- **Extensions work** - Always use correct Node.js/Python
- **Debug-friendly** - Logs show which runtime was detected

**Why Dynamic?** Users toggle settings frequently. Static detection (at install time) would lock you into one runtime. Dynamic detection (at boot time) respects changes immediately.

**Technical Deep-Dive:** [Link to runtime-detector.ts]

---

### **Story 6: The Official-Registry Story** 🌐
*How AI discovers 2,200+ MCPs without you*

**The Pain:**
You: "I need a database MCP"
Old way: Open browser → Search → Find npm package → Copy install command → Configure manually

**The Journey:**
With NCP + Registry integration:
1. You: "Find database MCPs"
2. AI searches official MCP Registry
3. Shows numbered list: "1. PostgreSQL ⭐ 2. MongoDB 📦 3. Redis..."
4. You: "Install 1 and 3"
5. AI imports them with correct commands
6. Done!

**The Magic:**
- **AI browses for you** - Searches 2,200+ MCPs from registry.modelcontextprotocol.io
- **Shows what matters** - Name, description, download count, official status
- **Batch install** - Pick multiple, import all at once
- **Correct config** - Registry knows the right command + args

**What's the Registry?** Anthropic's official MCP directory. It's the npm registry for MCPs - central source of truth for discovery.

**Technical Deep-Dive:** [Link to registry-client.ts and discovery flow]

---

## 🏗️ **How to Structure Documentation**

### **1. User-Facing Docs (README.md)**

```markdown
# NCP - Your AI's Personal Assistant

[Open with Story 1 - Dream and Discover]

## The Six Stories That Make NCP Different

1. 🌟 Dream and Discover - [2 min read]
2. 🔐 Secrets in Plain Sight - [2 min read]
3. 🔄 Sync and Forget - [2 min read]
4. 📦 Double-Click Install - [2 min read]
5. 🕵️ Runtime Detective - [2 min read]
6. 🌐 Official Registry - [2 min read]

## Quick Start
[Installation + verification in 3 steps]

## Need More?
- 📖 Technical Details → [ARCHITECTURE.md]
- 🐛 Troubleshooting → [TROUBLESHOOTING.md]
- 🤝 Contributing → [CONTRIBUTING.md]
```

### **2. Story Pages (docs/stories/)**

Each story gets its own page:
- `docs/stories/01-dream-and-discover.md`
- `docs/stories/02-secrets-in-plain-sight.md`
- `docs/stories/03-sync-and-forget.md`
- `docs/stories/04-double-click-install.md`
- `docs/stories/05-runtime-detective.md`
- `docs/stories/06-official-registry.md`

**Format:**
```markdown
# Story Name

## The Pain [30 seconds]
Describe the problem in human terms

## The Journey [1 minute]
Show how NCP solves it (story format)

## The Magic [30 seconds]
Bullet points - benefits in plain language

## How It Works [optional, 2 minutes]
Light technical explanation for curious readers

## Deep Dive [link]
Link to technical implementation docs
```

### **3. Technical Docs (docs/technical/)**

For developers who want implementation details:
- `docs/technical/semantic-search.md`
- `docs/technical/clipboard-security.md`
- `docs/technical/auto-import.md`
- `docs/technical/mcpb-bundling.md`
- `docs/technical/runtime-detection.md`
- `docs/technical/registry-integration.md`

---

## 🎨 **Writing Guidelines**

### **DO:**
- ✅ Start with pain (make it relatable)
- ✅ Use analogies (child with toys, buffet vs pizza)
- ✅ Show cause-effect ("By doing X, you get Y")
- ✅ Keep paragraphs short (2-3 sentences max)
- ✅ Use active voice ("NCP detects" not "is detected by")
- ✅ Add emojis for visual anchors (🎯 🔐 🔄)

### **DON'T:**
- ❌ Lead with implementation ("NCP uses vector embeddings...")
- ❌ Use jargon without context ("FAISS indexing with cosine similarity")
- ❌ Write walls of text (break it up!)
- ❌ Assume technical knowledge (explain like reader is smart but new)

---

## 📊 **Story Quality Checklist**

Before publishing a story, verify:

- [ ] **Pain is relatable** - Reader nods "yes, I've felt that"
- [ ] **Journey is clear** - Non-technical person understands flow
- [ ] **Benefits are tangible** - "Saves money" "Works faster" not "Better architecture"
- [ ] **Technical truth** - Accurate, not oversimplified to wrongness
- [ ] **Reading time realistic** - Can actually read in stated time
- [ ] **One core idea** - Story focuses on ONE thing, not three

---

## 🚀 **Migration Plan**

### **Phase 1: Create Story Pages**
1. Write 6 story markdown files in `docs/stories/`
2. Keep existing README for now
3. Get feedback on story quality

### **Phase 2: Restructure README**
1. Open with strongest story (Dream and Discover)
2. Add story index with reading times
3. Move installation to "Quick Start" section
4. Link to stories + technical docs

### **Phase 3: Update Technical Docs**
1. Move implementation details to `docs/technical/`
2. Keep COMPLETE-IMPLEMENTATION-SUMMARY.md for internal reference
3. Create ARCHITECTURE.md that links stories → technical details

### **Phase 4: Add Story Navigation**
1. Add "Next Story" links between stories
2. Create visual story map (flowchart showing connections)
3. Add "Story Index" page

---

## 💡 **Example: Before/After**

### **Before (Feature-First):**
```
## Semantic Search

NCP uses FAISS vector similarity search with OpenAI text-embedding-3-small
to match user queries against tool descriptions. The similarity threshold
is 0.3 with cosine distance metric.
```

### **After (Story-First):**
```
## Dream and Discover

Instead of showing your AI 50+ tools upfront, NCP lets it dream:

"I need something that can read files..."

NCP's semantic search understands the *intent* and finds the perfect tool
in milliseconds. No cognitive overload. No wrong tool selection. Just
instant discovery.

*Curious how semantic search works? [Read the technical details →]*
```

---

## 🎯 **Success Metrics**

A story is successful when:

1. **Non-technical person understands benefit** in 2 minutes
2. **Technical person finds depth** if they want it
3. **User can explain to colleague** what NCP does
4. **Feature becomes memorable** ("Oh, the clipboard handshake!")

---

## 📝 **Next Steps**

1. ✅ Review this strategy document
2. ⏳ Write first story (Dream and Discover) as example
3. ⏳ Get feedback and iterate
4. ⏳ Write remaining 5 stories
5. ⏳ Restructure README with story-first approach
6. ⏳ Migrate technical details to separate docs

---

**The goal: Anyone can understand what NCP does and why it matters - in 10 minutes, without a CS degree.** 🎉

```

--------------------------------------------------------------------------------
/docs/guides/pre-release-checklist.md:
--------------------------------------------------------------------------------

```markdown
# Pre-Release Checklist

This checklist MUST be completed before ANY release to npm. Skipping items leads to broken releases and user trust erosion.

## ✅ Phase 1: Code Quality (5 minutes)

### 1.1 Tests Pass
```bash
npm run build                    # TypeScript compiles
npm test                         # All tests pass
npm run test:critical            # MCP protocol tests pass
```

### 1.2 No Obvious Issues
```bash
npm run lint                     # ESLint passes (if configured)
git status                       # No uncommitted changes
git log --oneline -5            # Review recent commits
```

---

## ✅ Phase 2: Package Verification (5 minutes)

### 2.1 Inspect Package Contents
```bash
npm pack --dry-run

# Verify:
✓ dist/ folder included
✓ package.json, README.md, LICENSE included
✓ src/ excluded (TypeScript source)
✓ *.map files excluded (source maps)
✓ test/ excluded
✓ docs/ excluded (except essential ones)
✓ .env, tokens, secrets excluded
```

### 2.2 Check Package Size
```bash
# Should be < 500KB typically
# If > 1MB, investigate what's bloating it
ls -lh *.tgz
```

---

## ✅ Phase 3: Local Installation Test (10 minutes)

### 3.1 Test Published Package Locally
```bash
# Pack and install locally
npm pack
cd /tmp
npm install /path/to/ncp-production-clean/portel-ncp-*.tgz

# Verify CLI works
npx @portel/ncp --version
npx @portel/ncp find "list files"

# Expected: Version shown, tools listed
```

### 3.2 Test with Profile
```bash
cd /tmp/test-ncp
npx @portel/ncp add filesystem --command npx --args @modelcontextprotocol/server-filesystem

# Expected: MCP added to ~/.ncp/profiles/all.json
cat ~/.ncp/profiles/all.json  # Verify it's there
```

---

## ✅ Phase 4: MCP Integration Test (15 minutes) **[CRITICAL - THIS WAS MISSING]**

### 4.1 Create Test Claude Desktop Config
```bash
# Create temporary Claude config for testing
mkdir -p ~/test-claude-desktop
cat > ~/test-claude-desktop/config.json << 'EOF'
{
  "mcpServers": {
    "ncp": {
      "command": "npx",
      "args": ["@portel/ncp@local-test"]
    }
  }
}
EOF
```

### 4.2 Test MCP Server Directly (Without Claude Desktop)
```bash
# Create test script to simulate AI client
cat > /tmp/test-mcp-client.js << 'EOF'
const { spawn } = require('child_process');

async function testMCPServer() {
  console.log('Starting NCP MCP server...');

  const ncp = spawn('npx', ['@portel/ncp'], {
    stdio: ['pipe', 'pipe', 'inherit'],
    env: { ...process.env, NCP_MODE: 'mcp' }
  });

  // Test 1: Initialize
  const initRequest = {
    jsonrpc: '2.0',
    id: 1,
    method: 'initialize',
    params: {
      protocolVersion: '2024-11-05',
      capabilities: {},
      clientInfo: { name: 'test-client', version: '1.0.0' }
    }
  };

  ncp.stdin.write(JSON.stringify(initRequest) + '\n');

  // Test 2: tools/list (should respond < 100ms)
  setTimeout(() => {
    const listRequest = {
      jsonrpc: '2.0',
      id: 2,
      method: 'tools/list'
    };
    ncp.stdin.write(JSON.stringify(listRequest) + '\n');
  }, 10);

  // Test 3: find (should not return empty during indexing)
  setTimeout(() => {
    const findRequest = {
      jsonrpc: '2.0',
      id: 3,
      method: 'tools/call',
      params: {
        name: 'find',
        arguments: { description: 'list files' }
      }
    };
    ncp.stdin.write(JSON.stringify(findRequest) + '\n');
  }, 50);

  // Collect responses
  let responseBuffer = '';
  ncp.stdout.on('data', (data) => {
    responseBuffer += data.toString();
    const lines = responseBuffer.split('\n');

    lines.slice(0, -1).forEach(line => {
      if (line.trim()) {
        try {
          const response = JSON.parse(line);
          console.log('Response:', JSON.stringify(response, null, 2));

          // Validate response
          if (response.id === 2) {
            if (!response.result?.tools || response.result.tools.length === 0) {
              console.error('❌ FAIL: tools/list returned no tools');
              process.exit(1);
            }
            console.log('✓ tools/list OK');
          }

          if (response.id === 3) {
            const text = response.result?.content?.[0]?.text || '';
            if (text.includes('No tools found') && !text.includes('Indexing')) {
              console.error('❌ FAIL: find returned empty without indexing message');
              process.exit(1);
            }
            console.log('✓ find OK (partial results or indexing message shown)');

            // Success
            setTimeout(() => {
              console.log('✅ All MCP tests passed');
              ncp.kill();
              process.exit(0);
            }, 100);
          }
        } catch (e) {
          // Ignore parse errors for partial JSON
        }
      }
    });

    responseBuffer = lines[lines.length - 1];
  });

  // Timeout after 10 seconds
  setTimeout(() => {
    console.error('❌ FAIL: Test timeout');
    ncp.kill();
    process.exit(1);
  }, 10000);
}

testMCPServer();
EOF

node /tmp/test-mcp-client.js

# Expected output:
# ✓ tools/list OK
# ✓ find OK (partial results or indexing message shown)
# ✅ All MCP tests passed
```

### 4.3 Test Cache Persistence
```bash
# Clear cache
rm -rf ~/.ncp/cache/*

# Run first time (creates cache)
node /tmp/test-mcp-client.js

# Check cache was created correctly
cat ~/.ncp/cache/all-cache-meta.json | jq .profileHash
# Expected: Non-empty hash (e.g., "d5b54172ea975e47...")

# Run second time (should use cache)
node /tmp/test-mcp-client.js

# Expected: Same profileHash, no re-indexing
```

### 4.4 Test with Real AI Client (If Available)
```bash
# Option A: Test with Claude Desktop
# 1. Update Claude Desktop config to use local package
# 2. Restart Claude Desktop
# 3. Ask: "What MCP tools do you have?"
# 4. Verify: Returns tools within 2 seconds, not empty

# Option B: Test with Perplexity
# (Similar steps)

# Expected: AI sees tools, can use them, no empty results
```

---

## ✅ Phase 5: Performance & Resource Check (5 minutes)

### 5.1 Startup Time
```bash
time npx @portel/ncp find

# Expected: < 3 seconds for cached profile
# Expected: < 30 seconds for 50-MCP profile (first time)
```

### 5.2 Memory Usage
```bash
# Start NCP in background
npx @portel/ncp &
NCP_PID=$!

# Check memory after 10 seconds
sleep 10
ps aux | grep $NCP_PID

# Expected: < 200MB for typical profile
```

### 5.3 Cache Size
```bash
du -sh ~/.ncp/cache/

# Expected: < 10MB for typical profile
```

---

## ✅ Phase 6: Documentation Accuracy (5 minutes)

### 6.1 README Examples Work
```bash
# Copy-paste examples from README.md and verify they work
# Common ones:
npx @portel/ncp add filesystem
npx @portel/ncp find "search files"
npx @portel/ncp run filesystem:read_file --parameters '{"path":"test.txt"}'
```

### 6.2 Version Numbers Match
```bash
# Check version consistency
grep '"version"' package.json
grep 'version' server.json
cat CHANGELOG.md | head -20

# Expected: All show same version (e.g., 1.4.4)
```

---

## ✅ Phase 7: GitHub Checks (5 minutes)

### 7.1 CI/CD Passes
```bash
# Check GitHub Actions status
gh run list --limit 5

# Expected: All green ✓
```

### 7.2 No Secrets in Code
```bash
# Scan for common secret patterns
grep -r "sk-" . --exclude-dir=node_modules
grep -r "ghp_" . --exclude-dir=node_modules
grep -r "AKIA" . --exclude-dir=node_modules

# Expected: No matches (or only in .env.example)
```

---

## ✅ Phase 8: Breaking Changes Review (2 minutes)

### 8.1 API Compatibility
```
Review changes since last release:
- Did we change tool names? (find → search)
- Did we change parameter names?
- Did we remove features?
- Did we change output format?

If YES to any: Bump MINOR version (1.4.x → 1.5.0)
If NO to all: Bump PATCH version (1.4.3 → 1.4.4)
```

### 8.2 Migration Guide
```
If breaking changes:
- Update CHANGELOG.md with migration steps
- Add deprecation warnings (don't just remove)
- Update examples in README
```

---

## ✅ Phase 9: Release Prep (5 minutes)

### 9.1 Update Version
```bash
# Use npm version to update
npm version patch  # or minor, or major

# This updates:
# - package.json
# - package-lock.json
# - Creates git tag
```

### 9.2 Update Changelog
```bash
# Add to CHANGELOG.md
## [1.4.4] - 2025-01-XX

### Fixed
- Cache profileHash now persists correctly across restarts
- Indexing progress shown immediately, preventing race condition
- Partial results returned during indexing (parity with CLI)

### Impact
- Fixes empty results in AI assistants during startup
- Prevents unnecessary re-indexing on every restart
```

### 9.3 Final Commit
```bash
git add -A
git commit -m "chore: release v1.4.4"
git push origin main --tags
```

---

## ✅ Phase 10: Publish (3 minutes)

### 10.1 Publish to npm
```bash
npm publish

# Monitor for errors
# Check: https://www.npmjs.com/package/@portel/ncp
```

### 10.2 Verify Published Package
```bash
# Wait 1 minute for npm to propagate
sleep 60

# Install from npm and test
cd /tmp/verify-release
npm install @portel/ncp@latest
npx @portel/ncp --version

# Expected: Shows new version (1.4.4)
```

### 10.3 Test MCP Integration Post-Publish
```bash
# Update Claude Desktop to use latest
# Restart, verify it works with AI

# If fails: npm unpublish @portel/[email protected] (within 72 hours)
```

---

## ✅ Phase 11: Announce (5 minutes)

### 11.1 GitHub Release
```bash
gh release create v1.4.4 \
  --title "v1.4.4 - Critical Fixes" \
  --notes "$(cat CHANGELOG.md | head -20)"
```

### 11.2 Update MCP Registry
```bash
# Trigger registry update workflow if needed
gh workflow run publish-mcp-registry.yml
```

---

## 🚨 STOP Gates - Release Only If:

### Gate 1: Unit Tests
- ✅ All tests pass
- ✅ No skipped tests
- ✅ Coverage > 70%

### Gate 2: Package Integrity
- ✅ Package size < 1MB
- ✅ No source files in dist
- ✅ No secrets in code

### Gate 3: MCP Integration (NEW - CRITICAL)
- ✅ tools/list responds < 100ms
- ✅ find returns results (not empty)
- ✅ Cache profileHash persists
- ✅ No re-indexing on restart

### Gate 4: Real-World Test
- ✅ Works with Claude Desktop OR Perplexity
- ✅ AI can discover and use tools
- ✅ No errors in logs

### Gate 5: Documentation
- ✅ README examples work
- ✅ CHANGELOG updated
- ✅ Version numbers match

---

## Time Estimate: 60 minutes total

**If you can't spend 60 minutes testing, don't release.**

A broken release costs:
- 4+ hours of debugging and hotfixes
- User trust
- Product reputation
- 3-4 version bumps (1.4.0 → 1.4.1 → 1.4.2 → 1.4.3)

---

## Automation Opportunities

### Short-term (Next Week)
1. Create `npm run test:integration` that runs Phase 4 tests
2. Add `npm run test:pre-release` that runs Phases 1-5
3. Create GitHub Action that runs pre-release checks on tags

### Long-term (Next Month)
1. E2E testing with actual Claude Desktop instance
2. Automated cache validation tests
3. Performance regression tests
4. Canary releases (npm publish with tag `next`)

---

## Lessons Learned (2024-01-03)

### What Failed
- Released 1.4.0 without real-world MCP integration testing
- Unit tests passed but didn't catch cache/race condition bugs
- No checklist = inconsistent quality

### What We're Changing
- **Phase 4 is now mandatory** - Test with actual MCP client before release
- **Cache tests are critical** - Verify profileHash, restart behavior
- **No shortcuts** - 60 minutes is non-negotiable

### Success Criteria for Next Release
- Zero hotfixes after 1.4.4
- AI assistants work perfectly on first try
- Users trust NCP as reliable infrastructure

```

--------------------------------------------------------------------------------
/test/mock-mcps/base-mock-server.mjs:
--------------------------------------------------------------------------------

```
#!/usr/bin/env node

/**
 * Base Mock MCP Server
 * Provides a template for creating realistic MCP servers for testing
 * These servers respond to MCP protocol but don't actually execute tools
 */

console.error('[DEBUG] Loading base mock server module...');

// Import SDK modules and debug each import step
let Server;
let StdioServerTransport;
let McpTypes;
import { z } from 'zod';

try {
  const serverModule = await import('@modelcontextprotocol/sdk/server/index.js');
  Server = serverModule.Server;
  console.error('[DEBUG] Successfully loaded Server module');
} catch (err) {
  console.error('[ERROR] Failed to load Server module:', err);
  throw err;
}

try {
  const stdioModule = await import('@modelcontextprotocol/sdk/server/stdio.js');
  StdioServerTransport = stdioModule.StdioServerTransport;
  console.error('[DEBUG] Successfully loaded StdioServerTransport module');
} catch (err) {
  console.error('[ERROR] Failed to load StdioServerTransport module:', err);
  throw err;
}

try {
  McpTypes = await import('@modelcontextprotocol/sdk/types.js');
  console.error('[DEBUG] Successfully loaded McpTypes module. Exports:', Object.keys(McpTypes));
} catch (err) {
  console.error('[ERROR] Failed to load McpTypes module:', err);
  throw err;
}

// Log exports for debugging
console.error('[DEBUG] Available MCP types:', Object.keys(McpTypes));

class MockMCPServer {
  constructor(serverInfo, tools, resources = [], capabilities = {
    tools: {
      listTools: true,
      callTool: true,
      find: true,
    },
    resources: {},
  }) {
    console.error('[DEBUG] MockMCPServer constructor called');
    console.error('[DEBUG] Server info:', JSON.stringify(serverInfo, null, 2));
    console.error('[DEBUG] Capabilities:', JSON.stringify(capabilities, null, 2));
    
    this.serverInfo = serverInfo; // Store server info for reference
    
    try {
      this.server = new Server(serverInfo, { capabilities });
      console.error('[DEBUG] Server instance created successfully');
    } catch (err) {
      console.error('[ERROR] Failed to create Server instance:', err);
      console.error('[ERROR] Error stack:', err.stack);
      throw err;
    }
    
    this.tools = tools;
    this.resources = resources;
    
    try {
      this.setupHandlers();
      console.error('[DEBUG] Handlers set up successfully');
    } catch (err) {
      console.error('[ERROR] Failed to set up handlers:', err);
      console.error('[ERROR] Error stack:', err.stack);
      throw err;
    }
  }

  setupHandlers() {
    try {
      console.error('[DEBUG] Setting up server request handlers');

      console.error('[DEBUG] McpTypes.ListToolsRequestSchema:', McpTypes.ListToolsRequestSchema);
      
      // List available tools
      this.server.setRequestHandler(McpTypes.ListToolsRequestSchema, async () => ({
        tools: this.tools,
      }));
      console.error('[DEBUG] Set up tools/list handler');

      // Handle tool calls (always return success with mock data)
      this.server.setRequestHandler(McpTypes.CallToolRequestSchema, async (request) => {
        const { name, arguments: args } = request.params;

        // Find the tool
        const tool = this.tools.find(t => t.name === name);
        if (!tool) {
          throw new McpTypes.McpError(McpTypes.ErrorCode.MethodNotFound, `Tool "${name}" not found`);
        }

        // Return mock successful response
        return {
          content: [
            {
              type: "text",
              text: `Mock execution of ${name} with args: ${JSON.stringify(args, null, 2)}\n\nThis is a test MCP server - no actual operation was performed.`
            }
          ]
        };
      });
      console.error('[DEBUG] Set up tools/call handler');

                  // Handle find requests using standard MCP schema
      const FindToolsSchema = z.object({
        method: z.literal("tools/find"),
        params: z.object({
          query: z.string(),
        })
      });

      this.server.setRequestHandler(FindToolsSchema, async (request) => {
        try {
          // For the git-server, just return all tools that match the query string
          const { query } = request.params;
          const matchingTools = this.tools.filter(tool => 
            tool.name.toLowerCase().includes(query.toLowerCase()) ||
            (tool.description && tool.description.toLowerCase().includes(query.toLowerCase()))
          );
          
          return {
            tools: matchingTools
          };
        } catch (err) {
          console.error('[ERROR] Error in tools/find handler:', err);
          throw err;
        }
      });
      console.error('[DEBUG] Set up tools/find handler');

      // List resources (if any)
      this.server.setRequestHandler(McpTypes.ListResourcesRequestSchema, async () => ({
        resources: this.resources,
      }));
      console.error('[DEBUG] Set up resources/list handler');

      // Read resources (if any)
      this.server.setRequestHandler(McpTypes.ReadResourceRequestSchema, async (request) => {
        const resource = this.resources.find(r => r.uri === request.params.uri);
        if (!resource) {
          throw new McpTypes.McpError(McpTypes.ErrorCode.InvalidRequest, `Resource not found: ${request.params.uri}`);
        }

        return {
          contents: [
            {
              uri: request.params.uri,
              mimeType: "text/plain",
              text: `Mock resource content for ${request.params.uri}`
            }
          ]
        };
      });
      console.error('[DEBUG] Set up resources/read handler');
    } catch (err) {
      console.error('[ERROR] Error in setupHandlers:', err);
      console.error('[ERROR] Error stack:', err.stack);
      console.error('[ERROR] Server state:', {
        serverInfo: this.serverInfo,
        serverCapabilities: this.server?.capabilities,
        availableSchemas: Object.keys(McpTypes)
      });
      throw err;
    }
  }

  async run() {
    try {
      const name = this.serverInfo.name;
      console.error('[DEBUG] Starting mock MCP server...');
      console.error('[DEBUG] Server name:', name);
      console.error('[DEBUG] Server info:', JSON.stringify(this.serverInfo, null, 2));
      console.error('[DEBUG] Server capabilities:', JSON.stringify(this.server.capabilities, null, 2));

      // Validate server is ready for transport
      if (!this.server) {
        throw new Error('Server instance not initialized');
      }

      // Set up transport
      console.error('[DEBUG] Creating StdioServerTransport...');
      let transport;
      try {
        transport = new StdioServerTransport();
        console.error('[DEBUG] StdioServerTransport instance:', transport);
        console.error('[DEBUG] StdioServerTransport created successfully');
      } catch (err) {
        console.error('[ERROR] Failed to create StdioServerTransport:');
        console.error('[ERROR] Error message:', err.message);
        console.error('[ERROR] Error stack:', err.stack);
        console.error('[ERROR] Error details:', err);
        throw err;
      }
      
      // Connect server
      console.error('[DEBUG] Connecting server to transport...');
      try {
        const connectResult = await this.server.connect(transport);
        console.error('[DEBUG] Server connected to transport successfully');
        console.error('[DEBUG] Connect result:', connectResult);
      } catch (err) {
        console.error('[ERROR] Failed to connect server to transport:');
        console.error('[ERROR] Error message:', err.message);
        console.error('[ERROR] Error stack:', err.stack);
        console.error('[ERROR] Error details:', err);
        console.error('[ERROR] Server state:', {
          serverInfo: this.serverInfo,
          capabilities: this.server.capabilities,
          transportState: transport
        });
        throw err;
      }
      
        // Signal that we're ready with name and capabilities on both stdout and stderr for robustness
      const readyMessage = `[READY] ${name}\n`;
      const readyJson = JSON.stringify({
        event: 'ready',
        name,
        capabilities: this.server.capabilities,
        timestamp: Date.now()
      });
      
      // Signal that we're ready on stdout first (more reliable)
      console.error('[DEBUG] About to send ready signal to stdout...');
      
      // Buffer outputs to avoid interleaving
      const outputBuffer = [];
      outputBuffer.push(readyMessage);
      outputBuffer.push(readyJson + '\n');
      outputBuffer.push(readyMessage);
      
      // Write all buffered outputs at once
      try {
        process.stdout.write(outputBuffer.join(''));
        console.error('[DEBUG] Successfully wrote ready signal to stdout');
      } catch (err) {
        console.error('[ERROR] Failed to write to stdout:', err);
        throw err;
      }
      
      // Then send to stderr for debugging
      try {
        process.stderr.write(`[STARTUP] ${name}: sending ready signal\n`);
        process.stderr.write(readyMessage);
        process.stderr.write(`[STARTUP] ${name}: ${readyJson}\n`);
        console.error('[DEBUG] Successfully wrote debug info to stderr');
      } catch (err) {
        console.error('[ERROR] Failed to write to stderr:', err);
        throw err;
      }      // Add debug info after ready signal
      process.stderr.write(`[STARTUP] ${name}: adding capabilities info\n`);
      console.error(`Mock MCP server ${name} running on stdio`);
      console.error(`[CAPABILITIES] ${JSON.stringify(this.server.capabilities)}`);

      // Keep the process alive but ensure we can exit
      const stdin = process.stdin.resume();
      stdin.unref(); // Allow process to exit if stdin is the only thing keeping it alive

      // Set up a startup timeout
      const startupTimeout = setTimeout(() => {
        console.error(`[TIMEOUT] ${name} server startup timeout after ${process.uptime()}s`);
        console.error('[DEBUG] Process state:', {
          pid: process.pid,
          uptime: process.uptime(),
          memory: process.memoryUsage(),
          connections: this.server?.transport?.connections || []
        });
        process.exit(1);
      }, 10000);

      // Make sure the timeout doesn't keep the process alive
      startupTimeout.unref();
      
      // Monitor event loop blockage
      let lastCheck = Date.now();
      const blockageCheck = setInterval(() => {
        const now = Date.now();
        const delay = now - lastCheck - 1000; // Should be ~1000ms
        if (delay > 100) { // Over 100ms delay indicates blockage
          console.error(`[WARN] Event loop blocked for ${delay}ms in ${name} server`);
        }
        lastCheck = now;
      }, 1000);
      
      blockageCheck.unref(); // Don't prevent exit

      // Handle cleanup
      process.on('SIGTERM', () => {
        clearTimeout(startupTimeout);
        console.error(`[SHUTDOWN] ${this.serverInfo.name}`);
        process.exit(0);
      });

      // Handle other signals
      process.on('SIGINT', () => {
        clearTimeout(startupTimeout);
        const name = this.server.info?.name || this.serverInfo.name;
        console.error(`[SHUTDOWN] ${name} (interrupted)`);
        process.exit(0);
      });
    } catch (error) {
      const name = this.serverInfo?.name || "unknown";
      console.error(`Error starting mock server ${name}:`, error);
      console.error('Server info:', JSON.stringify(this.serverInfo, null, 2));
      console.error('Server capabilities:', JSON.stringify(this.server.capabilities, null, 2));
      process.exit(1);
    }
  }
}

export { MockMCPServer };
```

--------------------------------------------------------------------------------
/test/performance-benchmark.test.ts:
--------------------------------------------------------------------------------

```typescript
/**
 * Performance Benchmark Tests
 * Demonstrates the performance improvements from cache optimization
 */

import { CachePatcher } from '../src/cache/cache-patcher.js';
import { NCPOrchestrator } from '../src/orchestrator/ncp-orchestrator.js';
import { existsSync, rmSync, mkdirSync, writeFileSync } from 'fs';
import { join } from 'path';
import { tmpdir } from 'os';

// Mock profile data for testing
const createMockProfile = (mcpCount: number = 10) => {
  const profile = {
    name: 'test-profile',
    description: 'Test profile for benchmarking',
    mcpServers: {} as any,
    metadata: {
      created: new Date().toISOString(),
      modified: new Date().toISOString()
    }
  };

  // Add multiple MCPs to simulate real-world usage
  for (let i = 1; i <= mcpCount; i++) {
    profile.mcpServers[`test-mcp-${i}`] = {
      command: 'echo',
      args: [`MCP ${i} simulation`],
      env: {}
    };
  }

  return profile;
};

// Mock tools for each MCP
const createMockTools = (mcpName: string, toolCount: number = 50) => {
  const tools = [];
  for (let i = 1; i <= toolCount; i++) {
    tools.push({
      name: `tool_${i}`,
      description: `Tool ${i} for ${mcpName} - performs operation ${i}`,
      inputSchema: {
        type: 'object',
        properties: {
          input: { type: 'string' }
        }
      }
    });
  }
  return tools;
};

describe('Performance Benchmarks', () => {
  let tempDir: string;
  let tempProfilesDir: string;
  let tempCacheDir: string;

  beforeEach(() => {
    // Create temporary directories
    tempDir = join(tmpdir(), 'ncp-perf-test-' + Date.now());
    tempProfilesDir = join(tempDir, 'profiles');
    tempCacheDir = join(tempDir, 'cache');

    mkdirSync(tempDir, { recursive: true });
    mkdirSync(tempProfilesDir, { recursive: true });
    mkdirSync(tempCacheDir, { recursive: true });
  });

  afterEach(() => {
    // Clean up
    if (existsSync(tempDir)) {
      rmSync(tempDir, { recursive: true, force: true });
    }
  });

  describe('Cache Operations Performance', () => {
    test('should demonstrate fast cache patching vs full rebuild', async () => {
      // Create a custom cache patcher for testing
      class TestCachePatcher extends CachePatcher {
        constructor() {
          super();
          this['cacheDir'] = tempCacheDir;
          this['toolMetadataCachePath'] = join(tempCacheDir, 'all-tools.json');
          this['embeddingsCachePath'] = join(tempCacheDir, 'embeddings.json');
        }
      }

      const cachePatcher = new TestCachePatcher();
      const profile = createMockProfile(5); // 5 MCPs with 50 tools each

      console.log('\\n📊 Performance Benchmark: Cache Operations');
      console.log('=' .repeat(60));

      // Benchmark: Adding MCPs one by one (incremental patching)
      const incrementalStart = process.hrtime.bigint();

      for (const [mcpName, config] of Object.entries(profile.mcpServers)) {
        const tools = createMockTools(mcpName);
        const serverInfo = { name: mcpName, version: '1.0.0' };

        await cachePatcher.patchAddMCP(mcpName, config as any, tools, serverInfo);
      }

      const incrementalEnd = process.hrtime.bigint();
      const incrementalTime = Number(incrementalEnd - incrementalStart) / 1_000_000; // Convert to ms

      // Update profile hash
      const profileHash = cachePatcher.generateProfileHash(profile);
      await cachePatcher.updateProfileHash(profileHash);

      // Get cache statistics
      const stats = await cachePatcher.getCacheStats();

      console.log(`✅ Incremental cache building: ${incrementalTime.toFixed(2)}ms`);
      console.log(`   • MCPs processed: ${stats.mcpCount}`);
      console.log(`   • Tools cached: ${stats.toolCount}`);
      console.log(`   • Average time per MCP: ${(incrementalTime / stats.mcpCount).toFixed(2)}ms`);

      // Benchmark: Cache validation (startup simulation)
      const validationStart = process.hrtime.bigint();

      const isValid = await cachePatcher.validateCacheWithProfile(profileHash);

      const validationEnd = process.hrtime.bigint();
      const validationTime = Number(validationEnd - validationStart) / 1_000_000;

      console.log(`⚡ Cache validation: ${validationTime.toFixed(2)}ms`);
      console.log(`   • Cache valid: ${isValid}`);

      // Performance assertions
      expect(incrementalTime).toBeLessThan(1000); // Should complete in under 1 second
      expect(validationTime).toBeLessThan(50);     // Should validate in under 50ms
      expect(isValid).toBe(true);
      expect(stats.mcpCount).toBe(5);
      expect(stats.toolCount).toBe(250); // 5 MCPs × 50 tools each

    }, 10000); // 10 second timeout

    test('should demonstrate cache removal performance', async () => {
      class TestCachePatcher extends CachePatcher {
        constructor() {
          super();
          this['cacheDir'] = tempCacheDir;
          this['toolMetadataCachePath'] = join(tempCacheDir, 'all-tools.json');
          this['embeddingsCachePath'] = join(tempCacheDir, 'embeddings.json');
        }
      }

      const cachePatcher = new TestCachePatcher();

      // Pre-populate cache with test data
      for (let i = 1; i <= 3; i++) {
        const mcpName = `test-mcp-${i}`;
        const config = { command: 'echo', args: ['test'] };
        const tools = createMockTools(mcpName, 20);
        await cachePatcher.patchAddMCP(mcpName, config, tools, {});
      }

      console.log('\\n🗑️  Performance Benchmark: Cache Removal');
      console.log('=' .repeat(60));

      const removalStart = process.hrtime.bigint();

      // Remove an MCP from cache
      await cachePatcher.patchRemoveMCP('test-mcp-2');
      await cachePatcher.patchRemoveEmbeddings('test-mcp-2');

      const removalEnd = process.hrtime.bigint();
      const removalTime = Number(removalEnd - removalStart) / 1_000_000;

      const stats = await cachePatcher.getCacheStats();

      console.log(`🔧 MCP removal: ${removalTime.toFixed(2)}ms`);
      console.log(`   • Remaining MCPs: ${stats.mcpCount}`);
      console.log(`   • Remaining tools: ${stats.toolCount}`);

      expect(removalTime).toBeLessThan(100); // Should complete in under 100ms
      expect(stats.mcpCount).toBe(2);        // Should have 2 MCPs left
      expect(stats.toolCount).toBe(40);      // Should have 40 tools left (2 MCPs × 20 tools)

    }, 5000);
  });

  describe('Memory Usage Optimization', () => {
    test('should demonstrate efficient memory usage with cache', async () => {
      class TestCachePatcher extends CachePatcher {
        constructor() {
          super();
          this['cacheDir'] = tempCacheDir;
          this['toolMetadataCachePath'] = join(tempCacheDir, 'all-tools.json');
        }
      }

      const cachePatcher = new TestCachePatcher();

      // Measure initial memory
      const initialMemory = process.memoryUsage();

      // Add a realistic number of MCPs and tools
      const mcpCount = 10;
      const toolsPerMCP = 100;

      for (let i = 1; i <= mcpCount; i++) {
        const mcpName = `memory-test-mcp-${i}`;
        const config = { command: 'echo', args: ['test'] };
        const tools = createMockTools(mcpName, toolsPerMCP);

        await cachePatcher.patchAddMCP(mcpName, config, tools, {});
      }

      // Measure memory after caching
      const finalMemory = process.memoryUsage();
      const memoryDiff = finalMemory.heapUsed - initialMemory.heapUsed;
      const totalTools = mcpCount * toolsPerMCP;

      console.log('\\n🧠 Memory Usage Analysis');
      console.log('=' .repeat(60));
      console.log(`📊 Total tools cached: ${totalTools}`);
      console.log(`📈 Memory increase: ${(memoryDiff / 1024 / 1024).toFixed(2)} MB`);
      console.log(`⚖️  Memory per tool: ${(memoryDiff / totalTools).toFixed(0)} bytes`);

      // Memory should be reasonable (less than 50MB for 1000 tools)
      expect(memoryDiff).toBeLessThan(50 * 1024 * 1024); // Less than 50MB
      expect(memoryDiff / totalTools).toBeLessThan(12288); // Less than 12KB per tool (CI-friendly threshold)

    }, 10000);
  });

  describe('Startup Time Simulation', () => {
    test('should demonstrate optimized vs legacy startup times', async () => {
      // This test simulates the performance difference between optimized and legacy startup

      const profile = createMockProfile(8); // 8 MCPs
      const profileHash = 'test-startup-hash';

      console.log('\\n🚀 Startup Performance Simulation');
      console.log('=' .repeat(60));

      // Simulate optimized startup (cache hit)
      const optimizedStart = process.hrtime.bigint();

      // Fast operations that optimized startup would do:
      // 1. Profile hash validation
      const hashValidation = process.hrtime.bigint();
      // Hash generation is very fast
      const testHash = require('crypto').createHash('sha256')
        .update(JSON.stringify(profile.mcpServers))
        .digest('hex');
      const hashTime = Number(process.hrtime.bigint() - hashValidation) / 1_000_000;

      // 2. Cache loading simulation (just file I/O)
      const cacheLoadStart = process.hrtime.bigint();
      // Simulate loading cached data
      const mockCacheData = {
        version: '1.0.0',
        profileHash: testHash,
        mcps: {} as any
      };

      // Simulate processing cached MCPs (no network calls)
      for (let i = 0; i < 8; i++) {
        const tools = createMockTools(`mcp-${i}`, 50);
        mockCacheData.mcps[`mcp-${i}`] = {
          tools,
          serverInfo: { name: `mcp-${i}`, version: '1.0.0' }
        };
      }

      const cacheLoadTime = Number(process.hrtime.bigint() - cacheLoadStart) / 1_000_000;
      const optimizedTotal = Number(process.hrtime.bigint() - optimizedStart) / 1_000_000;

      // Simulate legacy startup (cache miss - would need to probe all MCPs)
      const legacyStart = process.hrtime.bigint();

      // Legacy startup would need to:
      // 1. Probe each MCP server (simulated network delay)
      let totalProbeTime = 0;
      for (let i = 0; i < 8; i++) {
        const probeStart = process.hrtime.bigint();
        // Simulate MCP probing (even with 100ms timeout per MCP)
        await new Promise(resolve => setTimeout(resolve, 50)); // 50ms per MCP
        totalProbeTime += Number(process.hrtime.bigint() - probeStart) / 1_000_000;
      }

      // 2. Index all tools (simulation)
      const indexingStart = process.hrtime.bigint();
      // Simulate tool indexing overhead
      await new Promise(resolve => setTimeout(resolve, 100)); // 100ms indexing
      const indexingTime = Number(process.hrtime.bigint() - indexingStart) / 1_000_000;

      const legacyTotal = Number(process.hrtime.bigint() - legacyStart) / 1_000_000;

      console.log('⚡ Optimized startup (cache hit):');
      console.log(`   • Profile hash validation: ${hashTime.toFixed(2)}ms`);
      console.log(`   • Cache loading: ${cacheLoadTime.toFixed(2)}ms`);
      console.log(`   • Total time: ${optimizedTotal.toFixed(2)}ms`);
      console.log('');
      console.log('🐌 Legacy startup (cache miss):');
      console.log(`   • MCP probing: ${totalProbeTime.toFixed(2)}ms`);
      console.log(`   • Tool indexing: ${indexingTime.toFixed(2)}ms`);
      console.log(`   • Total time: ${legacyTotal.toFixed(2)}ms`);
      console.log('');
      console.log(`🎯 Performance improvement: ${(legacyTotal / optimizedTotal).toFixed(1)}x faster`);
      console.log(`💾 Time saved: ${(legacyTotal - optimizedTotal).toFixed(2)}ms`);

      // Performance assertions based on PRD targets
      expect(optimizedTotal).toBeLessThan(250);     // Target: 250ms startup
      expect(legacyTotal).toBeGreaterThan(400);     // Legacy would be much slower
      expect(legacyTotal / optimizedTotal).toBeGreaterThan(2); // At least 2x improvement

    }, 15000); // 15 second timeout for this test
  });
});
```

--------------------------------------------------------------------------------
/docs/guides/how-it-works.md:
--------------------------------------------------------------------------------

```markdown
# NCP Technical Guide

## The N-to-1 Problem & Solution

### The N Problem: Cognitive Overload

When AI assistants connect directly to multiple MCP servers, they face cognitive overload:

```mermaid
graph TB
    AI[AI Assistant] --> MCP1[Filesystem MCP<br/>12 tools]
    AI --> MCP2[Database MCP<br/>8 tools]
    AI --> MCP3[Email MCP<br/>6 tools]
    AI --> MCP4[Web MCP<br/>15 tools]
    AI --> MCP5[Shell MCP<br/>10 tools]
    AI --> MCP6[Cloud MCP<br/>20 tools]
```

**Problems:**
- **Schema Complexity**: Each MCP exposes 5-15+ tools with detailed schemas
- **Context Explosion**: 50+ tools = 150k+ tokens in context
- **Decision Paralysis**: AI must analyze dozens of similar tools
- **Response Delays**: 3-8 second response times due to analysis overhead

**Example**: A typical setup with filesystem, git, web, email, and database MCPs presents 71+ tool schemas to the AI simultaneously.

### The 1 Solution: N-to-1 Orchestration

```mermaid
graph TB
    AI[AI Assistant] --> NCP[NCP Hub<br/>2 unified tools]
    NCP --> MCP1[Filesystem MCP<br/>12 tools]
    NCP --> MCP2[Database MCP<br/>8 tools]
    NCP --> MCP3[Email MCP<br/>6 tools]
    NCP --> MCP4[Web MCP<br/>15 tools]
    NCP --> MCP5[Shell MCP<br/>10 tools]
    NCP --> MCP6[Cloud MCP<br/>20 tools]
```

NCP consolidates complexity behind a simple interface:
- **Unified Schema**: AI sees just 2 tools (`find` and `run`)
- **Smart Routing**: NCP handles tool discovery and execution
- **Context Reduction**: 150k+ tokens → 8k tokens
- **Fast Responses**: Sub-second tool selection

**Result**: N complex MCP servers → 1 simple interface. AI sees just 2 tools (`find` and `run`), NCP handles everything behind the scenes.

## Token Savings Analysis

### Real-World Measurements

| Setup Size | Tools Exposed | Context Without NCP | Context With NCP | Token Savings |
|------------|---------------|-------------------|------------------|---------------|
| **Small** (5 MCPs) | 25-30 tools | ~15,000 tokens | ~8,000 tokens | **47%** |
| **Medium** (15 MCPs) | 75-90 tools | ~45,000 tokens | ~12,000 tokens | **73%** |
| **Large** (30 MCPs) | 150+ tools | ~90,000 tokens | ~15,000 tokens | **83%** |
| **Enterprise** (50+ MCPs) | 250+ tools | ~150,000 tokens | ~20,000 tokens | **87%** |

### Why Such Massive Savings?

1. **Schema Consolidation**: 50+ detailed tool schemas → 2 simple schemas
2. **Lazy Loading**: Tools only loaded when actually needed, not preemptively
3. **Smart Caching**: Vector embeddings cached locally, no regeneration overhead
4. **Health Filtering**: Broken/unavailable tools excluded from context automatically
5. **Semantic Compression**: Natural language queries vs. formal tool specifications

## Architecture Deep Dive

### Dual Architecture: Server + Client

NCP operates as both an **MCP server** (to your AI client) and an **MCP client** (to downstream MCPs):

```mermaid
graph LR
    subgraph "AI Client Layer"
        Claude[Claude Desktop]
        VSCode[VS Code]
        Cursor[Cursor]
    end
    subgraph "NCP Hub Layer"
        Server[MCP Server Interface]
        Orchestrator[Intelligent Orchestrator]
        Client[MCP Client Pool]
    end
    subgraph "MCP Ecosystem"
        FS[Filesystem]
        DB[Database]
        Email[Email]
        Web[Web APIs]
        Shell[Shell]
    end
    Claude --> Server
    VSCode --> Server
    Cursor --> Server
    Server --> Orchestrator
    Orchestrator --> Client
    Client --> FS
    Client --> DB
    Client --> Email
    Client --> Web
    Client --> Shell
```

### Core Components

#### 1. Semantic Discovery Engine
- **Vector Embeddings**: Uses @xenova/transformers for semantic matching
- **Query Processing**: Converts natural language to tool capabilities
- **Confidence Scoring**: Ranks tools by relevance (0-1 scale)
- **Cache Management**: Persistent embeddings for fast repeated searches

```typescript
interface DiscoveryResult {
  tool: string;
  mcp: string;
  confidence: number;
  description: string;
  schema: ToolSchema;
}
```

#### 2. Intelligent Orchestrator
- **Health-Aware Routing**: Automatic failover to healthy alternatives
- **Connection Pooling**: Efficient resource management
- **Load Balancing**: Distributes requests across available MCPs
- **Error Recovery**: Graceful handling of MCP failures

#### 3. Health Monitor
- **Continuous Monitoring**: Tracks MCP server status in real-time
- **Automatic Blacklisting**: Removes unhealthy servers from routing
- **Recovery Detection**: Automatically re-enables recovered servers
- **Performance Metrics**: Latency and success rate tracking

#### 4. Connection Pool Manager
- **Lazy Loading**: MCPs only loaded when needed
- **Resource Cleanup**: Automatic connection management
- **Memory Optimization**: Efficient use of system resources
- **Concurrent Execution**: Parallel tool execution when possible

### Token Optimization Process

```mermaid
flowchart TD
    Query["AI Query: read a file"] --> Semantic[Semantic Analysis]
    Semantic --> Cache{Embeddings Cached?}
    Cache -->|Yes| Search[Vector Search]
    Cache -->|No| Generate[Generate Embeddings]
    Generate --> Store[Cache Embeddings]
    Store --> Search
    Search --> Rank[Rank by Confidence]
    Rank --> Health{Health Check}
    Health -->|Healthy| Return[Return Top Results]
    Health -->|Unhealthy| Alternative[Find Alternatives]
    Alternative --> Return
    Return --> Tokens[Minimal Token Usage]
```

**Process Flow:**
1. **Request Interception**: AI sends natural language query to NCP
2. **Semantic Analysis**: Vector search finds relevant tools
3. **Health Filtering**: Only healthy MCPs included in results
4. **Schema Simplification**: Complex schemas abstracted to simple interface
5. **Response Optimization**: Minimal context returned to AI

**Result**: Instead of loading 50+ tool schemas (150k+ tokens), AI sees 2 unified tools (8k tokens) with intelligent routing behind the scenes.

## Performance Characteristics

### Response Time Improvements
- **Without NCP**: 3-8 seconds (analysis overhead)
- **With NCP**: 0.5-1.5 seconds (direct semantic search)
- **Improvement**: 3-5x faster tool selection

### Memory Usage
- **Schema Storage**: 95% reduction in AI context memory
- **Cache Efficiency**: Embeddings cached for instant retrieval
- **Resource Management**: Automatic cleanup prevents memory leaks

### Scalability
- **Horizontal**: Support for 100+ MCP servers
- **Vertical**: Efficient single-machine resource usage
- **Network**: Minimal bandwidth usage through smart caching

## Advanced Features

### Profile System
Organize MCPs by environment, project, or use case:

```json
{
  "profiles": {
    "development": {
      "stripe": { "env": { "API_KEY": "sk_test_..." } },
      "database": { "args": ["--host", "localhost"] }
    },
    "production": {
      "stripe": { "env": { "API_KEY": "sk_live_..." } },
      "database": { "args": ["--host", "prod.db.com"] }
    }
  }
}
```

### Health-Aware Execution
Automatic failover and recovery:

```typescript
interface HealthStatus {
  status: 'healthy' | 'degraded' | 'unhealthy';
  latency: number;
  successRate: number;
  lastCheck: Date;
  alternatives?: string[];
}
```

### Vector Similarity Search
Semantic tool discovery using embeddings:

```typescript
interface ToolEmbedding {
  tool: string;
  mcp: string;
  vector: number[];
  description: string;
  keywords: string[];
}
```

## Integration Patterns

### MCP Client Compatibility
NCP maintains full compatibility with:
- **Claude Desktop**: Native MCP protocol support
- **VS Code**: MCP extension integration
- **Cursor**: Built-in MCP support
- **Custom Clients**: Standard JSON-RPC 2.0 protocol

### Tool Execution Flow
```mermaid
sequenceDiagram
    participant AI as AI Assistant
    participant NCP as NCP Hub
    participant Vector as Vector Search
    participant Health as Health Monitor
    participant MCP1 as Target MCP

    AI->>NCP: "Find tools to read files"
    NCP->>Vector: Semantic search query
    Vector-->>NCP: Ranked tool matches
    NCP->>Health: Check tool availability
    Health-->>NCP: Healthy tools only
    NCP-->>AI: Filtered, ranked results
    AI->>NCP: Execute file_read tool
    NCP->>Health: Verify MCP status
    Health-->>NCP: MCP healthy
    NCP->>MCP1: Proxied tool call
    MCP1-->>NCP: Tool response
    NCP-->>AI: Formatted response
```

1. AI sends natural language query
2. NCP performs semantic search
3. Best matching tools returned with confidence scores
4. AI selects tool and sends execution request
5. NCP routes to appropriate MCP server
6. Results returned with error handling

### Error Handling Strategy
- **Graceful Degradation**: Partial failures don't break workflow
- **Automatic Retry**: Transient failures handled transparently
- **Alternative Routing**: Backup tools suggested when primary fails
- **User Notification**: Clear error messages with actionable advice

## Security Considerations

### API Key Management
- **Environment Isolation**: Separate credentials per profile
- **No Storage**: Credentials passed through, never persisted
- **Process Isolation**: Each MCP runs in separate process

### Network Security
- **Local Communication**: All MCP communication over localhost
- **No External Calls**: NCP doesn't make external network requests
- **Process Sandboxing**: MCPs isolated from each other

### Access Control
- **Profile Permissions**: Fine-grained access control per profile
- **Tool Filtering**: Restrict access to specific tools/MCPs
- **Audit Logging**: Optional request/response logging

## Troubleshooting Guide

### Common Issues

#### High Memory Usage
- **Cause**: Too many MCPs loaded simultaneously
- **Solution**: Use profiles to segment MCPs
- **Prevention**: Configure lazy loading

#### Slow Response Times
- **Cause**: Unhealthy MCPs in pool
- **Solution**: Run `ncp list --depth 1` to check health
- **Prevention**: Enable automatic health monitoring

#### Tool Discovery Failures
- **Cause**: Embedding cache corruption or no MCPs configured
- **Solution**: Check `ncp list` and ensure MCPs are properly added
- **Prevention**: Regular configuration validation

### Debug Mode
Enable detailed logging:
```bash
DEBUG=ncp:* ncp find "file tools"
```

### Performance Monitoring
Real-time health checking:
```bash
ncp list --depth 1    # Check MCP health status
ncp config validate   # Validate configuration
```

## Advanced Configuration Patterns

### Multi-Environment Orchestration
```bash
# Environment-specific MCP pools
ncp add stripe-dev npx stripe-cli --env STRIPE_KEY=sk_test_...
ncp add stripe-prod npx stripe-cli --env STRIPE_KEY=sk_live_...

# Conditional routing based on context
ncp run "stripe:create_payment" --context="development"
```

### High-Availability Setups
```bash
# Redundant MCP configurations
ncp add filesystem-primary npx @modelcontextprotocol/server-filesystem ~/primary
ncp add filesystem-backup npx @modelcontextprotocol/server-filesystem ~/backup

# Automatic failover testing
ncp config validate --check-redundancy
```

## Contributing to NCP

### Development Setup
```bash
git clone https://github.com/portel-dev/ncp
cd ncp
npm install
npm run dev
```

### Testing Strategy
- **Unit Tests**: Core component testing
- **Integration Tests**: End-to-end MCP workflows
- **Performance Tests**: Token usage and response time validation
- **Compatibility Tests**: Cross-platform MCP client testing

### Architecture Principles
1. **Simplicity**: Simple interface hiding complex orchestration
2. **Performance**: Sub-second response times required
3. **Reliability**: Graceful handling of MCP failures
4. **Scalability**: Support for 100+ MCPs
5. **Compatibility**: Full MCP protocol compliance

---

**The Magic**: NCP maintains real connections to all your MCP servers, but presents them through one intelligent interface that speaks your AI's language, dramatically reducing cognitive load and token costs while improving performance.
```

--------------------------------------------------------------------------------
/test/tool-context-resolver.test.ts:
--------------------------------------------------------------------------------

```typescript
/**
 * Comprehensive Tests for ToolContextResolver
 * Following ncp-oss3 patterns for 95%+ coverage
 */

import { describe, it, expect, beforeEach } from '@jest/globals';
import { ToolContextResolver } from '../src/services/tool-context-resolver';

describe('ToolContextResolver - Comprehensive Coverage', () => {
  beforeEach(() => {
    // Reset any runtime modifications between tests
  });

  describe('🎯 Context Resolution by Tool Identifier', () => {
    it('should resolve context from tool identifier format', () => {
      // Test mcp:tool format parsing
      expect(ToolContextResolver.getContext('filesystem:read_file')).toBe('filesystem');
      expect(ToolContextResolver.getContext('stripe:create_payment')).toBe('payment');
      expect(ToolContextResolver.getContext('github:get_repo')).toBe('development');
    });

    it('should handle tool identifier with no colon separator', () => {
      // Test edge case: no colon separator
      expect(ToolContextResolver.getContext('filesystem')).toBe('filesystem');
      expect(ToolContextResolver.getContext('unknown-mcp')).toBe('general');
    });

    it('should handle empty tool identifier', () => {
      expect(ToolContextResolver.getContext('')).toBe('general');
    });

    it('should handle tool identifier with multiple colons', () => {
      expect(ToolContextResolver.getContext('namespace:mcp:tool')).toBe('general');
    });
  });

  describe('🎯 Direct MCP Context Resolution', () => {
    it('should resolve all predefined MCP contexts', () => {
      // Test every single predefined mapping for 100% coverage
      expect(ToolContextResolver.getContextByMCP('filesystem')).toBe('filesystem');
      expect(ToolContextResolver.getContextByMCP('memory')).toBe('database');
      expect(ToolContextResolver.getContextByMCP('shell')).toBe('system');
      expect(ToolContextResolver.getContextByMCP('sequential-thinking')).toBe('ai');
      expect(ToolContextResolver.getContextByMCP('portel')).toBe('development');
      expect(ToolContextResolver.getContextByMCP('tavily')).toBe('web');
      expect(ToolContextResolver.getContextByMCP('desktop-commander')).toBe('system');
      expect(ToolContextResolver.getContextByMCP('stripe')).toBe('payment');
      expect(ToolContextResolver.getContextByMCP('context7-mcp')).toBe('documentation');
      expect(ToolContextResolver.getContextByMCP('search')).toBe('search');
      expect(ToolContextResolver.getContextByMCP('weather')).toBe('weather');
      expect(ToolContextResolver.getContextByMCP('http')).toBe('web');
      expect(ToolContextResolver.getContextByMCP('github')).toBe('development');
      expect(ToolContextResolver.getContextByMCP('gitlab')).toBe('development');
      expect(ToolContextResolver.getContextByMCP('slack')).toBe('communication');
      expect(ToolContextResolver.getContextByMCP('discord')).toBe('communication');
      expect(ToolContextResolver.getContextByMCP('email')).toBe('communication');
      expect(ToolContextResolver.getContextByMCP('database')).toBe('database');
      expect(ToolContextResolver.getContextByMCP('redis')).toBe('database');
      expect(ToolContextResolver.getContextByMCP('mongodb')).toBe('database');
      expect(ToolContextResolver.getContextByMCP('postgresql')).toBe('database');
      expect(ToolContextResolver.getContextByMCP('mysql')).toBe('database');
      expect(ToolContextResolver.getContextByMCP('elasticsearch')).toBe('search');
      expect(ToolContextResolver.getContextByMCP('docker')).toBe('system');
      expect(ToolContextResolver.getContextByMCP('kubernetes')).toBe('system');
      expect(ToolContextResolver.getContextByMCP('aws')).toBe('cloud');
      expect(ToolContextResolver.getContextByMCP('azure')).toBe('cloud');
      expect(ToolContextResolver.getContextByMCP('gcp')).toBe('cloud');
    });

    it('should handle case insensitive MCP names', () => {
      expect(ToolContextResolver.getContextByMCP('FILESYSTEM')).toBe('filesystem');
      expect(ToolContextResolver.getContextByMCP('GitHub')).toBe('development');
      expect(ToolContextResolver.getContextByMCP('AWS')).toBe('cloud');
    });

    it('should handle empty and null MCP names', () => {
      expect(ToolContextResolver.getContextByMCP('')).toBe('general');
      expect(ToolContextResolver.getContextByMCP(null as any)).toBe('general');
      expect(ToolContextResolver.getContextByMCP(undefined as any)).toBe('general');
    });
  });

  describe('🎯 Pattern Matching Rules Coverage', () => {
    it('should match filesystem patterns', () => {
      expect(ToolContextResolver.getContextByMCP('file-manager')).toBe('filesystem');
      expect(ToolContextResolver.getContextByMCP('fs-utils')).toBe('filesystem');
      expect(ToolContextResolver.getContextByMCP('custom-file-system')).toBe('filesystem');
    });

    it('should match database patterns', () => {
      expect(ToolContextResolver.getContextByMCP('my-db')).toBe('database');
      expect(ToolContextResolver.getContextByMCP('data-store')).toBe('database');
      expect(ToolContextResolver.getContextByMCP('user-data')).toBe('database');
    });

    it('should match web patterns', () => {
      expect(ToolContextResolver.getContextByMCP('web-scraper')).toBe('web');
      expect(ToolContextResolver.getContextByMCP('http-client')).toBe('web');
      expect(ToolContextResolver.getContextByMCP('api-gateway')).toBe('web');
    });

    it('should match cloud patterns', () => {
      expect(ToolContextResolver.getContextByMCP('cloud-storage')).toBe('cloud');
      expect(ToolContextResolver.getContextByMCP('aws-lambda')).toBe('cloud');
      expect(ToolContextResolver.getContextByMCP('azure-functions')).toBe('cloud');
      expect(ToolContextResolver.getContextByMCP('gcp-compute')).toBe('cloud');
    });

    it('should match system patterns', () => {
      expect(ToolContextResolver.getContextByMCP('docker-compose')).toBe('system');
      expect(ToolContextResolver.getContextByMCP('container-runtime')).toBe('system');
    });

    it('should match development patterns', () => {
      expect(ToolContextResolver.getContextByMCP('git-manager')).toBe('development');
      expect(ToolContextResolver.getContextByMCP('github-actions')).toBe('development');
    });

    it('should fall back to general for unknown patterns', () => {
      expect(ToolContextResolver.getContextByMCP('random-mcp')).toBe('general');
      expect(ToolContextResolver.getContextByMCP('unknown-service')).toBe('general');
      expect(ToolContextResolver.getContextByMCP('123456')).toBe('general');
    });
  });

  describe('🎯 Context Enumeration and Validation', () => {
    it('should return all known contexts', () => {
      const contexts = ToolContextResolver.getAllContexts();

      expect(contexts).toContain('filesystem');
      expect(contexts).toContain('database');
      expect(contexts).toContain('system');
      expect(contexts).toContain('ai');
      expect(contexts).toContain('development');
      expect(contexts).toContain('web');
      expect(contexts).toContain('payment');
      expect(contexts).toContain('documentation');
      expect(contexts).toContain('search');
      expect(contexts).toContain('weather');
      expect(contexts).toContain('communication');
      expect(contexts).toContain('cloud');
      expect(contexts).toContain('general');

      // Should be sorted
      const sortedContexts = [...contexts].sort();
      expect(contexts).toEqual(sortedContexts);
    });

    it('should validate known contexts', () => {
      expect(ToolContextResolver.isKnownContext('filesystem')).toBe(true);
      expect(ToolContextResolver.isKnownContext('web')).toBe(true);
      expect(ToolContextResolver.isKnownContext('general')).toBe(true);
      expect(ToolContextResolver.isKnownContext('unknown')).toBe(false);
      expect(ToolContextResolver.isKnownContext('')).toBe(false);
    });
  });

  describe('🎯 Runtime Configuration', () => {
    it('should allow adding new mappings', () => {
      // Add a new mapping
      ToolContextResolver.addMapping('custom-mcp', 'custom');

      expect(ToolContextResolver.getContextByMCP('custom-mcp')).toBe('custom');
      expect(ToolContextResolver.getContextByMCP('CUSTOM-MCP')).toBe('custom');
    });

    it('should allow updating existing mappings', () => {
      // Update an existing mapping
      const original = ToolContextResolver.getContextByMCP('github');
      ToolContextResolver.addMapping('github', 'version-control');

      expect(ToolContextResolver.getContextByMCP('github')).toBe('version-control');

      // Restore original for other tests
      ToolContextResolver.addMapping('github', original);
    });

    it('should handle case normalization in addMapping', () => {
      ToolContextResolver.addMapping('TEST-MCP', 'test');

      expect(ToolContextResolver.getContextByMCP('test-mcp')).toBe('test');
      expect(ToolContextResolver.getContextByMCP('TEST-MCP')).toBe('test');
    });
  });

  describe('🎯 Reverse Context Lookup', () => {
    it('should find MCPs for specific contexts', () => {
      const filesystemMCPs = ToolContextResolver.getMCPsForContext('filesystem');
      expect(filesystemMCPs).toContain('filesystem');
      expect(filesystemMCPs).toEqual(filesystemMCPs.sort()); // Should be sorted

      const databaseMCPs = ToolContextResolver.getMCPsForContext('database');
      expect(databaseMCPs).toContain('memory');
      expect(databaseMCPs).toContain('database');
      expect(databaseMCPs).toContain('redis');
      expect(databaseMCPs).toContain('mongodb');
      expect(databaseMCPs).toContain('postgresql');
      expect(databaseMCPs).toContain('mysql');

      const systemMCPs = ToolContextResolver.getMCPsForContext('system');
      expect(systemMCPs).toContain('shell');
      expect(systemMCPs).toContain('desktop-commander');
      expect(systemMCPs).toContain('docker');
      expect(systemMCPs).toContain('kubernetes');

      const developmentMCPs = ToolContextResolver.getMCPsForContext('development');
      expect(developmentMCPs).toContain('portel');
      expect(developmentMCPs).toContain('github');
      expect(developmentMCPs).toContain('gitlab');

      const communicationMCPs = ToolContextResolver.getMCPsForContext('communication');
      expect(communicationMCPs).toContain('slack');
      expect(communicationMCPs).toContain('discord');
      expect(communicationMCPs).toContain('email');

      const cloudMCPs = ToolContextResolver.getMCPsForContext('cloud');
      expect(cloudMCPs).toContain('aws');
      expect(cloudMCPs).toContain('azure');
      expect(cloudMCPs).toContain('gcp');
    });

    it('should return empty array for unknown contexts', () => {
      expect(ToolContextResolver.getMCPsForContext('unknown')).toEqual([]);
      expect(ToolContextResolver.getMCPsForContext('')).toEqual([]);
    });

    it('should handle contexts with single MCP', () => {
      const aiMCPs = ToolContextResolver.getMCPsForContext('ai');
      expect(aiMCPs).toEqual(['sequential-thinking']);

      const paymentMCPs = ToolContextResolver.getMCPsForContext('payment');
      expect(paymentMCPs).toEqual(['stripe']);

      const weatherMCPs = ToolContextResolver.getMCPsForContext('weather');
      expect(weatherMCPs).toEqual(['weather']);
    });
  });

  describe('🎯 Edge Cases and Error Handling', () => {
    it('should handle special characters in MCP names', () => {
      expect(ToolContextResolver.getContextByMCP('mcp-with-dashes')).toBe('general');
      expect(ToolContextResolver.getContextByMCP('mcp_with_underscores')).toBe('general');
      expect(ToolContextResolver.getContextByMCP('mcp.with.dots')).toBe('general');
    });

    it('should handle numeric MCP names', () => {
      expect(ToolContextResolver.getContextByMCP('123')).toBe('general');
      expect(ToolContextResolver.getContextByMCP('mcp-v2')).toBe('general');
    });

    it('should handle very long MCP names', () => {
      const longName = 'a'.repeat(1000);
      expect(ToolContextResolver.getContextByMCP(longName)).toBe('general');
    });

    it('should handle whitespace in MCP names', () => {
      expect(ToolContextResolver.getContextByMCP(' filesystem ')).toBe('filesystem');
      expect(ToolContextResolver.getContextByMCP('github\t')).toBe('development');
    });
  });
});
```

--------------------------------------------------------------------------------
/INTERNAL-MCP-ARCHITECTURE.md:
--------------------------------------------------------------------------------

```markdown
# Internal MCP Architecture - Complete! 🎉

## ✅ **What Was Implemented**

We've successfully implemented an **internal MCP architecture** where NCP exposes management tools as if they were regular MCPs, but they're handled internally without external processes.

---

## 🏗️ **Architecture Overview**

### **Before: Direct Exposure** ❌
```
NCP MCP Server
├── find (top-level)
├── run (top-level)
├── add_mcp (top-level)     ← Exposed directly!
└── remove_mcp (top-level)  ← Exposed directly!
```

### **After: Internal MCP Pattern** ✅
```
NCP MCP Server
├── find (top-level)  ← Search tools in configured MCPs
└── run (top-level)   ← Execute ANY tool (external or internal)

Internal MCPs (discovered via find, executed via run):
└── ncp (internal MCP)
    ├── add       ← ncp:add
    ├── remove    ← ncp:remove
    ├── list      ← ncp:list
    ├── import    ← ncp:import (clipboard/file/discovery)
    └── export    ← ncp:export (clipboard/file)
```

---

## 🔑 **Key Concepts**

### **1. The "Inception" Pattern**

| Tool | Purpose | Analogy |
|------|---------|---------|
| **`find`** (top-level) | Find tools in **configured** MCPs | "What can I do with what I have?" |
| **`ncp:import`** (internal) | Find **new MCPs** from registry | "What else can I add?" (inception!) |

### **2. Internal vs External MCPs**

| Aspect | External MCPs | Internal MCPs |
|--------|---------------|---------------|
| **Process** | Separate process (node, python, etc.) | No process (handled internally) |
| **Discovery** | Same (appears in `find` results) | Same (appears in `find` results) |
| **Execution** | Via MCP protocol (stdio transport) | Direct method call |
| **Configuration** | Needs command, args, env | Hardcoded in NCP |
| **Examples** | github, filesystem, brave-search | ncp (management tools) |

---

## 📁 **Files Created**

### **1. Internal MCP Types** (`src/internal-mcps/types.ts`)
```typescript
export interface InternalTool {
  name: string;
  description: string;
  inputSchema: { /* JSON Schema */ };
}

export interface InternalMCP {
  name: string;
  description: string;
  tools: InternalTool[];
  executeTool(toolName: string, parameters: any): Promise<InternalToolResult>;
}
```

### **2. NCP Management MCP** (`src/internal-mcps/ncp-management.ts`)

Implements all management tools:

```typescript
export class NCPManagementMCP implements InternalMCP {
  name = 'ncp';
  description = 'NCP configuration management tools';

  tools = [
    {
      name: 'add',
      description: 'Add single MCP (with clipboard security)',
      inputSchema: { mcp_name, command, args?, profile? }
    },
    {
      name: 'remove',
      description: 'Remove MCP',
      inputSchema: { mcp_name, profile? }
    },
    {
      name: 'list',
      description: 'List configured MCPs',
      inputSchema: { profile? }
    },
    {
      name: 'import',
      description: 'Bulk import MCPs',
      inputSchema: {
        from: 'clipboard' | 'file' | 'discovery',
        source?: string  // file path or search query
      }
    },
    {
      name: 'export',
      description: 'Export configuration',
      inputSchema: {
        to: 'clipboard' | 'file',
        destination?: string,  // file path
        profile?: string
      }
    }
  ];
}
```

### **3. Internal MCP Manager** (`src/internal-mcps/internal-mcp-manager.ts`)

Manages all internal MCPs:

```typescript
export class InternalMCPManager {
  private internalMCPs: Map<string, InternalMCP> = new Map();

  constructor() {
    // Register internal MCPs
    this.registerInternalMCP(new NCPManagementMCP());
  }

  initialize(profileManager: ProfileManager): void {
    // Initialize each internal MCP with ProfileManager
  }

  async executeInternalTool(mcpName: string, toolName: string, params: any) {
    // Route to appropriate internal MCP
  }

  isInternalMCP(mcpName: string): boolean {
    // Check if MCP is internal
  }
}
```

---

## 🔄 **Integration with Orchestrator**

### **Changes to `NCPOrchestrator`**

**1. Added InternalMCPManager:**
```typescript
private internalMCPManager: InternalMCPManager;

constructor() {
  // ...
  this.internalMCPManager = new InternalMCPManager();
}
```

**2. Initialize internal MCPs after ProfileManager:**
```typescript
private async loadProfile() {
  if (!this.profileManager) {
    this.profileManager = new ProfileManager();
    await this.profileManager.initialize();

    // Initialize internal MCPs with ProfileManager
    this.internalMCPManager.initialize(this.profileManager);
  }
}
```

**3. Add internal MCPs to tool discovery:**
```typescript
async initialize() {
  // ... index external MCPs ...

  // Add internal MCPs to discovery
  this.addInternalMCPsToDiscovery();
}

private addInternalMCPsToDiscovery() {
  const internalMCPs = this.internalMCPManager.getAllInternalMCPs();

  for (const mcp of internalMCPs) {
    // Add to definitions
    this.definitions.set(mcp.name, { /* ... */ });

    // Add tools to allTools
    for (const tool of mcp.tools) {
      this.allTools.push({ name: tool.name, description: tool.description, mcpName: mcp.name });
      this.toolToMCP.set(`${mcp.name}:${tool.name}`, mcp.name);
    }

    // Index in discovery engine
    this.discovery.indexMCPTools(mcp.name, discoveryTools);
  }
}
```

**4. Route internal tool execution:**
```typescript
async run(toolName: string, parameters: any) {
  // Parse tool name
  const [mcpName, actualToolName] = toolName.split(':');

  // Check if internal MCP
  if (this.internalMCPManager.isInternalMCP(mcpName)) {
    return await this.internalMCPManager.executeInternalTool(
      mcpName,
      actualToolName,
      parameters
    );
  }

  // Otherwise, execute as external MCP
  // ...
}
```

---

## 🎯 **Tool Definitions**

### **`ncp:add`**
```typescript
{
  from: 'clipboard' | 'file' | 'discovery',
  source?: string
}

// Examples:
ncp:add { mcp_name: "github", command: "npx", args: ["-y", "@modelcontextprotocol/server-github"] }
// User can copy {"env":{"GITHUB_TOKEN":"secret"}} to clipboard before approving
```

### **`ncp:remove`**
```typescript
{
  mcp_name: string,
  profile?: string
}

// Example:
ncp:remove { mcp_name: "github" }
```

### **`ncp:list`**
```typescript
{
  profile?: string
}

// Example:
ncp:list { }  // Lists all MCPs in 'all' profile
```

### **`ncp:import`** (Unified bulk import)
```typescript
{
  from: 'clipboard' | 'file' | 'discovery',
  source?: string
}

// Mode 1: From clipboard
ncp:import { }  // Reads JSON from clipboard

// Mode 2: From file
ncp:import { from: "file", source: "~/configs/my-mcps.json" }

// Mode 3: From discovery (registry)
ncp:import { from: "discovery", source: "github automation" }
// Shows numbered list → User selects → Prompts for each → Imports all
```

### **`ncp:export`**
```typescript
{
  to: 'clipboard' | 'file',
  destination?: string,
  profile?: string
}

// Example 1: To clipboard
ncp:export { }  // Exports to clipboard

// Example 2: To file
ncp:export { to: "file", destination: "~/backups/ncp-config.json" }
```

---

## 🚀 **User Experience**

### **Scenario: Add GitHub MCP**

**User:** "Add GitHub MCP"

**AI workflow:**
1. Calls `prompts/get confirm_add_mcp` → Shows dialog
2. User copies `{"env":{"GITHUB_TOKEN":"ghp_..."}}` → Clicks YES
3. AI calls `run` with `ncp:add` → Tool executes internally
4. Returns success (secrets never seen by AI!)

### **Scenario: Bulk Import from Clipboard**

**User:** "Import MCPs from my clipboard"

**AI workflow:**
1. User copies JSON config to clipboard:
   ```json
   {
     "mcpServers": {
       "github": { "command": "npx", "args": [...] },
       "filesystem": { "command": "npx", "args": [...] }
     }
   }
   ```
2. AI calls `run` with `ncp:import { }`
3. NCP reads clipboard → Imports all MCPs
4. Returns: "✅ Imported 2 MCPs from clipboard"

### **Scenario: Discovery Mode** (Future)

**User:** "Find MCPs for GitHub automation"

**AI workflow:**
1. Calls `run` with `ncp:import { from: "discovery", source: "github automation" }`
2. NCP queries registry → Returns numbered list:
   ```
   1. github - Official GitHub MCP
   2. github-actions - Trigger workflows
   3. octokit - Full GitHub API
   ```
3. AI shows list to user → User responds "1,3"
4. For each selected:
   - Show `confirm_add_mcp` prompt
   - User copies secrets if needed → Clicks YES
   - Add MCP with clipboard config
5. Returns: "✅ Imported 2 MCPs"

---

## 🔒 **Security Benefits**

### **Clipboard Security Pattern** (From Phase 1)
- ✅ User explicitly instructed to copy before clicking YES
- ✅ Secrets read server-side (never exposed to AI)
- ✅ Audit trail shows approval, not secrets
- ✅ Informed consent (not sneaky background reading)

### **Internal MCP Architecture** (Phase 2)
- ✅ Management tools discoverable like any MCP
- ✅ No direct exposure in top-level tools
- ✅ Consistent interface (find → run)
- ✅ Can be extended with more internal MCPs

---

## 📊 **Before vs After**

### **Before: Direct Exposure**
```
tools/list → 4 tools
  - find
  - run
  - add_mcp      ← Direct exposure!
  - remove_mcp   ← Direct exposure!
```

### **After: Internal MCP Pattern**
```
tools/list → 2 tools
  - find
  - run

find results → Includes internal MCPs
  - ncp:add
  - ncp:remove
  - ncp:list
  - ncp:import
  - ncp:export

run → Routes internal MCPs to InternalMCPManager
```

---

## 🎯 **Benefits**

1. **Clean Separation** - Top-level tools remain minimal (find, run)
2. **Consistency** - Internal MCPs work exactly like external MCPs
3. **Discoverability** - Users find management tools via `find`
4. **Extensibility** - Easy to add more internal MCPs
5. **Security** - Clipboard pattern integrated into management tools
6. **No Process Overhead** - Internal MCPs execute instantly (no stdio transport)

---

## 🧪 **Testing**

### **Test 1: Discover Internal MCPs**
```bash
echo '{"jsonrpc":"2.0","id":1,"method":"tools/call","params":{"name":"find","arguments":{"description":"ncp"}}}' | npx ncp
```

**Expected:** Returns `ncp:add`, `ncp:remove`, `ncp:list`, `ncp:import`, `ncp:export`

### **Test 2: List Configured MCPs**
```bash
echo '{"jsonrpc":"2.0","id":2,"method":"tools/call","params":{"name":"run","arguments":{"tool":"ncp:list"}}}' | npx ncp
```

**Expected:** Returns list of configured MCPs

### **Test 3: Add MCP**
```bash
# First show prompt
echo '{"jsonrpc":"2.0","id":3,"method":"prompts/get","params":{"name":"confirm_add_mcp","arguments":{"mcp_name":"test","command":"echo","args":["hello"]}}}' | npx ncp

# Then call add tool
echo '{"jsonrpc":"2.0","id":4,"method":"tools/call","params":{"name":"run","arguments":{"tool":"ncp:add","parameters":{"mcp_name":"test","command":"echo","args":["hello"]}}}}' | npx ncp
```

**Expected:** MCP added to profile

---

## 🚀 **Next Steps** (Future Phases)

### **Phase 3: Registry Integration** (Pending)
- Implement `ncp:import` discovery mode
- Query MCP registry API
- Show numbered/checkbox list
- Batch prompt + import workflow

### **Phase 4: Advanced Features**
- `ncp:update` - Update MCP configuration
- `ncp:enable` / `ncp:disable` - Toggle MCPs without removing
- `ncp:validate` - Test MCP before adding
- `ncp:clone` - Duplicate MCP with different config

---

## 📝 **Key Implementation Details**

### **Tool ID Format**
```typescript
// External MCPs: "mcpName:toolName"
"github:create_issue"
"filesystem:read_file"

// Internal MCPs: "mcpName:toolName"
"ncp:add"
"ncp:import"
```

### **Tool Routing Logic**
```typescript
if (toolIdentifier.includes(':')) {
  const [mcpName, toolName] = toolIdentifier.split(':');

  if (internalMCPManager.isInternalMCP(mcpName)) {
    // Route to internal MCP
    return internalMCPManager.executeInternalTool(mcpName, toolName, params);
  } else {
    // Route to external MCP via MCP protocol
    return await connection.client.callTool({ name: toolName, arguments: params });
  }
}
```

---

## ✅ **Implementation Complete!**

We've successfully created an elegant internal MCP architecture that:
- ✅ Keeps top-level tools minimal (find, run only)
- ✅ Exposes management tools as an internal MCP (`ncp`)
- ✅ Maintains clipboard security pattern
- ✅ Provides clean parameter design (`from/to` + `source/destination`)
- ✅ Integrates seamlessly with tool discovery
- ✅ Routes execution correctly (internal vs external)

**The foundation is solid. Ready for registry integration (Phase 3)!** 🎉

```

--------------------------------------------------------------------------------
/docs/stories/05-runtime-detective.md:
--------------------------------------------------------------------------------

```markdown
# 🕵️ Story 5: Runtime Detective

*How NCP automatically uses the right Node.js - even when you toggle Claude Desktop settings*

**Reading time:** 2 minutes

---

## 😵 The Pain

You installed NCP as a .mcpb extension in Claude Desktop. It works perfectly! Then...

**Scenario 1: The Mystery Crash**

```
[You toggle "Use Built-in Node.js for MCP" setting in Claude Desktop]
[Restart Claude Desktop]
[NCP starts loading your MCPs...]
[Filesystem MCP: ❌ FAILED]
[GitHub MCP: ❌ FAILED]
[Database MCP: ❌ FAILED]

You: "What broke?! It was working 5 minutes ago!"
```

**The Problem:** Your .mcpb extensions were using Claude Desktop's bundled Node.js (v20). You toggled the setting. Now they're trying to use your system Node.js (v18). Some Node.js 20 features don't exist in v18. Everything breaks.

**Scenario 2: The Path Confusion**

```
[NCP installed globally via npm]
[Uses system Node.js /usr/local/bin/node]
[.mcpb extensions installed in Claude Desktop]
[Expect Claude's bundled Node.js]

NCP spawns extension:
  command: "node /path/to/extension/index.js"

Which node???
  - System node (/usr/local/bin/node)? Wrong version!
  - Claude's bundled node? Don't know the path!
  - Extension breaks silently
```

**The Root Problem:** Node.js runtime is a **moving target**:

- Claude Desktop ships its own Node.js (predictable version)
- Your system has different Node.js (unpredictable version)
- Users toggle settings (changes which runtime to use)
- Extensions need to match the runtime NCP is using
- **Getting it wrong = everything breaks**

---

## 🕵️ The Journey

NCP acts as a **runtime detective** - it figures out which runtime it's using, then ensures all MCPs use the same one.

### **How Detection Works:**

**On Every Startup** (not just once):

```typescript
// Step 1: Check how NCP itself was launched
const myPath = process.execPath;
// Example: /Applications/Claude.app/.../node

// Step 2: Is this Claude Desktop's bundled runtime?
if (myPath.includes('/Claude.app/') ||
    myPath.includes('/Claude/resources/')) {
  // Yes! I'm running via Claude's bundled Node.js
  runtime = 'bundled';
  nodePath = '/Applications/Claude.app/.../node';
  pythonPath = '/Applications/Claude.app/.../python3';
} else {
  // No! I'm running via system runtime
  runtime = 'system';
  nodePath = 'node';    // Use system node
  pythonPath = 'python3'; // Use system python
}

// Step 3: Log what we detected (for debugging)
console.log(`Runtime detected: ${runtime}`);
console.log(`Node path: ${nodePath}`);
```

**Why Every Startup?** Because the runtime can change!

- User toggles "Use Built-in Node.js" → Runtime changes
- User switches between .mcpb and npm install → Runtime changes
- User updates Claude Desktop → Bundled runtime path changes

**Static detection (at install time) would break. Dynamic detection (at runtime) adapts.**

### **How MCP Spawning Works:**

When NCP needs to start an MCP:

```typescript
// MCP config from manifest.json
const mcpConfig = {
  command: "node",  // Generic command
  args: ["${__dirname}/dist/index.js"]
};

// Runtime detector translates to actual runtime
const actualCommand = getRuntimeForCommand(mcpConfig.command);
// If detected bundled: "/Applications/Claude.app/.../node"
// If detected system: "node"

// Spawn MCP with correct runtime
spawn(actualCommand, mcpConfig.args);
```

**Result:** MCPs always use the same runtime NCP is using. No mismatches. No breaks.

---

## ✨ The Magic

What you get with dynamic runtime detection:

### **🎯 Just Works**
- Install NCP any way (npm, .mcpb, manual)
- NCP detects runtime automatically
- MCPs use correct runtime automatically
- Zero configuration required

### **🔄 Adapts to Settings**
- Toggle "Use Built-in Node.js" → NCP adapts on next startup
- Switch between Claude Desktop and system → NCP adapts
- Update Claude Desktop → NCP finds new runtime path

### **🐛 No Version Mismatches**
- NCP running via Node 20 → MCPs use Node 20
- NCP running via Node 18 → MCPs use Node 18
- **Always matched.** No subtle version bugs.

### **🔍 Debuggable**
- NCP logs detected runtime on startup
- Shows Node path, Python path
- Easy to verify correct runtime selected

### **⚡ Works Across Platforms**
- macOS: Detects `/Applications/Claude.app/...`
- Windows: Detects `C:\...\Claude\resources\...`
- Linux: Detects `/opt/Claude/resources/...`

---

## 🔍 How It Works (The Technical Story)

### **Runtime Detection Algorithm:**

```typescript
// src/utils/runtime-detector.ts

export function detectRuntime(): RuntimeInfo {
  const currentNodePath = process.execPath;

  // Check if we're running via Claude Desktop's bundled Node
  const claudeBundledNode = getBundledRuntimePath('claude-desktop', 'node');
  // Returns: "/Applications/Claude.app/.../node" (platform-specific)

  // If our execPath matches the bundled Node path → bundled runtime
  if (currentNodePath === claudeBundledNode) {
    return {
      type: 'bundled',
      nodePath: claudeBundledNode,
      pythonPath: getBundledRuntimePath('claude-desktop', 'python')
    };
  }

  // Check if execPath is inside Claude.app → probably bundled
  const isInsideClaudeApp = currentNodePath.includes('/Claude.app/') ||
                            currentNodePath.includes('\\Claude\\');

  if (isInsideClaudeApp && existsSync(claudeBundledNode)) {
    return {
      type: 'bundled',
      nodePath: claudeBundledNode,
      pythonPath: getBundledRuntimePath('claude-desktop', 'python')
    };
  }

  // Otherwise → system runtime
  return {
    type: 'system',
    nodePath: 'node',      // Use system node
    pythonPath: 'python3'  // Use system python
  };
}
```

### **Command Translation:**

```typescript
// src/utils/runtime-detector.ts

export function getRuntimeForExtension(command: string): string {
  const runtime = detectRuntime();

  // If command is 'node' → translate to actual runtime
  if (command === 'node' || command.endsWith('/node')) {
    return runtime.nodePath;
  }

  // If command is 'python3' → translate to actual runtime
  if (command === 'python3' || command === 'python') {
    return runtime.pythonPath || command;
  }

  // Other commands → return as-is
  return command;
}
```

### **Client Registry (Platform-Specific Paths):**

```typescript
// src/utils/client-registry.ts

export const CLIENT_REGISTRY = {
  'claude-desktop': {
    bundledRuntimes: {
      node: {
        darwin: '/Applications/Claude.app/.../node',
        win32: '%LOCALAPPDATA%/Programs/Claude/.../node.exe',
        linux: '/opt/Claude/resources/.../node'
      },
      python: {
        darwin: '/Applications/Claude.app/.../python3',
        win32: '%LOCALAPPDATA%/Programs/Claude/.../python.exe',
        linux: '/opt/Claude/resources/.../python3'
      }
    }
  }
};
```

**NCP knows where Claude Desktop hides its runtimes on every platform!**

---

## 🎨 The Analogy That Makes It Click

**Static Runtime (Wrong Approach) = Directions Written on Paper** 🗺️

```
"Go to 123 Main Street"
[Next week: Store moves to 456 Oak Avenue]
[Your paper still says 123 Main Street]
[You arrive at wrong location]
[Confused why nothing works]
```

**Dynamic Runtime (NCP Approach) = GPS Navigation** 📍

```
"Navigate to Store"
[GPS finds current location of store]
[Store moves? GPS updates automatically]
[You always arrive at correct location]
[Never confused, always works]
```

**NCP doesn't remember where runtime was. It detects where runtime IS.**

---

## 🧪 See It Yourself

Try this experiment:

### **Test 1: Detect Current Runtime**

```bash
# Install NCP and check logs
ncp list

# Look for startup logs:
[Runtime Detection]
  Type: bundled
  Node: /Applications/Claude.app/.../node
  Python: /Applications/Claude.app/.../python3
  Process execPath: /Applications/Claude.app/.../node
```

### **Test 2: Toggle Setting and See Adaptation**

```bash
# Before toggle
[Claude Desktop: "Use Built-in Node.js for MCP" = ON]
[Restart Claude Desktop]
[Check logs: Type: bundled]

# Toggle setting
[Claude Desktop: "Use Built-in Node.js for MCP" = OFF]
[Restart Claude Desktop]
[Check logs: Type: system]

# NCP adapted automatically!
```

### **Test 3: Install via npm and Compare**

```bash
# Install NCP globally
npm install -g @portel/ncp

# Run and check detection
ncp list

# Look for startup logs:
[Runtime Detection]
  Type: system
  Node: node
  Python: python3
  Process execPath: /usr/local/bin/node

# Different runtime detected! But MCPs will still use system runtime consistently.
```

---

## 🚀 Why This Changes Everything

### **Before Runtime Detection (Chaos):**

```
User installs .mcpb extension
→ Works with bundled Node.js

User toggles "Use Built-in Node.js" setting
→ MCPs try to use system Node.js
→ Version mismatch
→ Cryptic errors
→ User spends 2 hours debugging

User gives up, uninstalls
```

### **After Runtime Detection (Harmony):**

```
User installs .mcpb extension
→ Works with bundled Node.js

User toggles "Use Built-in Node.js" setting
→ NCP detects change on next startup
→ MCPs automatically switch to system Node.js
→ Everything still works

User: "That was easy! It just works."
```

**The difference:** **Adaptability.**

---

## 🎯 Why Dynamic (Not Static)?

**Question:** Why detect runtime on every startup? Why not cache the result?

**Answer:** Because the runtime isn't stable!

**Things that change runtime:**

1. **User toggles settings** (most common)
2. **User updates Claude Desktop** (bundled runtime path changes)
3. **User updates system Node.js** (system runtime version changes)
4. **User switches installation method** (.mcpb → npm or vice versa)
5. **CI/CD environment** (different runtime per environment)

**Static detection** = Breaks when any of these change (frequent!)

**Dynamic detection** = Adapts automatically (resilient!)

**Cost:** ~5ms on startup to detect runtime.

**Benefit:** Never breaks due to runtime changes.

**Obvious trade-off.**

---

## 🔒 Edge Cases Handled

### **Edge Case 1: Claude Desktop Not Installed**

```typescript
// getBundledRuntimePath returns null if Claude Desktop not found
if (!claudeBundledNode) {
  // Fall back to system runtime
  return { type: 'system', nodePath: 'node', pythonPath: 'python3' };
}
```

### **Edge Case 2: Bundled Runtime Missing**

```typescript
// Check if bundled runtime actually exists
if (claudeBundledNode && existsSync(claudeBundledNode)) {
  // Use it
} else {
  // Fall back to system
}
```

### **Edge Case 3: Running in Test Environment**

```typescript
// In tests, use system runtime (for predictability)
if (process.env.NODE_ENV === 'test') {
  return { type: 'system', nodePath: 'node', pythonPath: 'python3' };
}
```

### **Edge Case 4: Symlinked Global Install**

```typescript
// process.execPath follows symlinks
// /usr/local/bin/ncp (symlink) → /usr/lib/node_modules/ncp/... (real)
const realPath = realpathSync(process.execPath);
// Use real path for detection
```

**NCP handles all the weird scenarios. You don't have to think about it.**

---

## 📚 Deep Dive

Want the full technical implementation?

- **Runtime Detector:** [src/utils/runtime-detector.ts]
- **Client Registry:** [src/utils/client-registry.ts]
- **Command Translation:** [Runtime detection summary]
- **Platform Support:** [docs/technical/platform-detection.md]

---

## 🔗 Next Story

**[Story 6: Official Registry →](06-official-registry.md)**

*How AI discovers 2,200+ MCPs without you lifting a finger*

---

## 💬 Questions?

**Q: What if I want to force a specific runtime?**

A: Set environment variable: `NCP_FORCE_RUNTIME=/path/to/node`. NCP will respect it. (Advanced users only!)

**Q: Can I see which runtime was detected?**

A: Yes! Check NCP startup logs or run `ncp --debug`. Shows detected runtime type and paths.

**Q: What if Claude Desktop's bundled runtime is broken?**

A: NCP will detect it's not working (spawn fails) and log error. You can manually configure system runtime as fallback.

**Q: Does runtime detection work for Python MCPs?**

A: Yes! NCP detects both Node.js and Python bundled runtimes. Same logic applies.

**Q: What about other runtimes (Go, Rust, etc.)?**

A: MCPs in compiled languages (Go, Rust) don't need runtime detection. They're self-contained binaries. NCP just runs them as-is.

---

**[← Previous Story](04-double-click-install.md)** | **[Back to Story Index](../README.md#the-six-stories)** | **[Next Story →](06-official-registry.md)**

```

--------------------------------------------------------------------------------
/test/integration/mcp-client-simulation.test.cjs:
--------------------------------------------------------------------------------

```
#!/usr/bin/env node
/**
 * Integration Test: MCP Client Simulation
 *
 * Simulates real AI client behavior (Claude Desktop, Perplexity) to catch bugs
 * that unit tests miss. This should be run before EVERY release.
 *
 * Tests:
 * 1. Server responds to initialize immediately
 * 2. tools/list returns tools < 100ms even during indexing
 * 3. find returns partial results during indexing (not empty)
 * 4. Cache profileHash persists across restarts
 * 5. Second startup uses cache (no re-indexing)
 */

const { spawn } = require('child_process');
const fs = require('fs');
const path = require('path');
const os = require('os');

// Test configuration
const NCP_DIR = path.join(os.homedir(), '.ncp');
const PROFILES_DIR = path.join(NCP_DIR, 'profiles');
const CACHE_DIR = path.join(NCP_DIR, 'cache');
const TEST_PROFILE = 'integration-test';
const TIMEOUT_MS = 10000;

// Ensure test profile exists
function setupTestProfile() {
  // Create .ncp directory structure
  if (!fs.existsSync(PROFILES_DIR)) {
    fs.mkdirSync(PROFILES_DIR, { recursive: true });
  }
  if (!fs.existsSync(CACHE_DIR)) {
    fs.mkdirSync(CACHE_DIR, { recursive: true });
  }

  // Create minimal test profile with filesystem MCP
  const profilePath = path.join(PROFILES_DIR, `${TEST_PROFILE}.json`);
  const testProfile = {
    mcpServers: {
      filesystem: {
        command: 'npx',
        args: ['@modelcontextprotocol/server-filesystem']
      }
    }
  };

  fs.writeFileSync(profilePath, JSON.stringify(testProfile, null, 2));
  logInfo(`Created test profile at ${profilePath}`);
}

// ANSI colors for output
const colors = {
  green: '\x1b[32m',
  red: '\x1b[31m',
  yellow: '\x1b[33m',
  blue: '\x1b[34m',
  reset: '\x1b[0m'
};

function log(emoji, message, color = 'reset') {
  console.log(`${emoji} ${colors[color]}${message}${colors.reset}`);
}

function logError(message) {
  log('❌', `FAIL: ${message}`, 'red');
}

function logSuccess(message) {
  log('✓', message, 'green');
}

function logInfo(message) {
  log('ℹ️', message, 'blue');
}

class MCPClientSimulator {
  constructor() {
    this.ncp = null;
    this.responses = [];
    this.responseBuffer = '';
    this.requestId = 0;
  }

  start() {
    return new Promise((resolve, reject) => {
      logInfo('Starting NCP MCP server...');

      this.ncp = spawn('node', ['dist/index.js', '--profile', TEST_PROFILE], {
        stdio: ['pipe', 'pipe', 'pipe'],
        env: {
          ...process.env,
          NCP_MODE: 'mcp',
          NO_COLOR: 'true',  // Disable colors in output
          NCP_DEBUG: 'true'  // Enable debug logging
        }
      });

      this.ncp.stdout.on('data', (data) => {
        this.responseBuffer += data.toString();
        const lines = this.responseBuffer.split('\n');

        lines.slice(0, -1).forEach(line => {
          if (line.trim()) {
            try {
              const response = JSON.parse(line);
              this.responses.push(response);
            } catch (e) {
              // Ignore non-JSON lines (logs, etc.)
            }
          }
        });

        this.responseBuffer = lines[lines.length - 1];
      });

      this.ncp.stderr.on('data', (data) => {
        // Collect stderr for debugging
        const msg = data.toString();
        if (msg.includes('[DEBUG]')) {
          console.log(msg.trim());
        }
      });

      this.ncp.on('error', reject);

      // Give it a moment to start
      setTimeout(resolve, 100);
    });
  }

  sendRequest(method, params = {}) {
    this.requestId++;
    const request = {
      jsonrpc: '2.0',
      id: this.requestId,
      method,
      params
    };

    this.ncp.stdin.write(JSON.stringify(request) + '\n');
    return this.requestId;
  }

  waitForResponse(id, timeoutMs = 5000) {
    return new Promise((resolve, reject) => {
      const startTime = Date.now();

      const checkResponse = () => {
        const response = this.responses.find(r => r.id === id);
        if (response) {
          resolve(response);
          return;
        }

        if (Date.now() - startTime > timeoutMs) {
          reject(new Error(`Timeout waiting for response to request ${id}`));
          return;
        }

        setTimeout(checkResponse, 10);
      };

      checkResponse();
    });
  }

  async stop() {
    if (this.ncp) {
      this.ncp.kill();
      await new Promise(resolve => setTimeout(resolve, 100));
    }
  }
}

async function test1_Initialize() {
  logInfo('Test 1: Initialize request responds immediately');

  const client = new MCPClientSimulator();
  await client.start();

  const startTime = Date.now();
  const id = client.sendRequest('initialize', {
    protocolVersion: '2024-11-05',
    capabilities: {},
    clientInfo: { name: 'test-client', version: '1.0.0' }
  });

  const response = await client.waitForResponse(id);
  const duration = Date.now() - startTime;

  await client.stop();

  if (response.error) {
    logError(`Initialize failed: ${response.error.message}`);
    return false;
  }

  if (duration > 1000) {
    logError(`Initialize took ${duration}ms (should be < 1000ms)`);
    return false;
  }

  if (!response.result?.protocolVersion) {
    logError('Initialize response missing protocolVersion');
    return false;
  }

  logSuccess(`Initialize responded in ${duration}ms`);
  return true;
}

async function test2_ToolsListDuringIndexing() {
  logInfo('Test 2: tools/list responds < 100ms even during indexing');

  const client = new MCPClientSimulator();
  await client.start();

  // Call tools/list immediately (during indexing)
  const startTime = Date.now();
  const id = client.sendRequest('tools/list');

  const response = await client.waitForResponse(id);
  const duration = Date.now() - startTime;

  await client.stop();

  if (response.error) {
    logError(`tools/list failed: ${response.error.message}`);
    return false;
  }

  if (duration > 100) {
    logError(`tools/list took ${duration}ms (should be < 100ms)`);
    return false;
  }

  if (!response.result?.tools || response.result.tools.length === 0) {
    logError('tools/list returned no tools');
    return false;
  }

  const toolNames = response.result.tools.map(t => t.name);
  if (!toolNames.includes('find') || !toolNames.includes('run')) {
    logError(`tools/list missing required tools. Got: ${toolNames.join(', ')}`);
    return false;
  }

  logSuccess(`tools/list responded in ${duration}ms with ${response.result.tools.length} tools`);
  return true;
}

async function test3_FindDuringIndexing() {
  logInfo('Test 3: find returns partial results during indexing (not empty)');

  const client = new MCPClientSimulator();
  await client.start();

  // Call find immediately (during indexing) - like Perplexity does
  const id = client.sendRequest('tools/call', {
    name: 'find',
    arguments: { description: 'list files' }
  });

  const response = await client.waitForResponse(id, 10000);

  await client.stop();

  if (response.error) {
    logError(`find failed: ${response.error.message}`);
    return false;
  }

  const text = response.result?.content?.[0]?.text || '';

  // Should either:
  // 1. Return partial results with indexing message
  // 2. Return "indexing in progress" message
  // Should NOT return blank or "No tools found" without context

  if (text.includes('No tools found') && !text.includes('Indexing')) {
    logError('find returned empty without indexing context');
    return false;
  }

  if (text.length === 0) {
    logError('find returned empty response');
    return false;
  }

  const hasIndexingMessage = text.includes('Indexing in progress') || text.includes('indexing');
  const hasResults = text.includes('**') || text.includes('tools') || text.includes('MCP');

  if (!hasIndexingMessage && !hasResults) {
    logError('find response has neither indexing message nor results');
    return false;
  }

  logSuccess(`find returned ${hasResults ? 'partial results' : 'indexing message'}`);
  return true;
}

async function test4_CacheProfileHashPersists() {
  logInfo('Test 4: Cache profileHash persists correctly');

  // Clear cache first
  const metaPath = path.join(CACHE_DIR, `${TEST_PROFILE}-cache-meta.json`);
  const csvPath = path.join(CACHE_DIR, `${TEST_PROFILE}-tools.csv`);

  if (fs.existsSync(metaPath)) {
    fs.unlinkSync(metaPath);
  }
  if (fs.existsSync(csvPath)) {
    fs.unlinkSync(csvPath);
  }

  // Start server and let it create cache
  const client1 = new MCPClientSimulator();
  await client1.start();

  const id1 = client1.sendRequest('tools/call', {
    name: 'find',
    arguments: {}
  });

  await client1.waitForResponse(id1, 10000);

  // Wait a bit for indexing to potentially complete
  await new Promise(resolve => setTimeout(resolve, 2000));

  await client1.stop();

  // Wait for cache to be finalized and written
  await new Promise(resolve => setTimeout(resolve, 1000));

  // Check cache metadata
  if (!fs.existsSync(metaPath)) {
    logError('Cache metadata file not created');
    logInfo(`Expected at: ${metaPath}`);

    // List what's in cache dir for debugging
    if (fs.existsSync(CACHE_DIR)) {
      const files = fs.readdirSync(CACHE_DIR);
      logInfo(`Files in cache dir: ${files.join(', ')}`);
    }

    return false;
  }

  const metadata = JSON.parse(fs.readFileSync(metaPath, 'utf-8'));

  if (!metadata.profileHash || metadata.profileHash === '') {
    logError(`profileHash is empty: "${metadata.profileHash}"`);
    return false;
  }

  logSuccess(`Cache profileHash saved: ${metadata.profileHash.substring(0, 16)}...`);
  return true;
}

async function test5_NoReindexingOnRestart() {
  logInfo('Test 5: Second startup uses cache (no re-indexing)');

  const metaPath = path.join(CACHE_DIR, `${TEST_PROFILE}-cache-meta.json`);

  // Get initial cache state
  const metaBefore = JSON.parse(fs.readFileSync(metaPath, 'utf-8'));
  const hashBefore = metaBefore.profileHash;
  const lastUpdatedBefore = metaBefore.lastUpdated;

  // Wait a moment to ensure timestamp would change if re-indexed
  await new Promise(resolve => setTimeout(resolve, 1000));

  // Start server again
  const client = new MCPClientSimulator();
  await client.start();

  const id = client.sendRequest('tools/call', {
    name: 'find',
    arguments: {}
  });

  await client.waitForResponse(id, 10000);
  await client.stop();

  // Wait for any potential cache updates
  await new Promise(resolve => setTimeout(resolve, 500));

  // Check cache wasn't regenerated
  const metaAfter = JSON.parse(fs.readFileSync(metaPath, 'utf-8'));
  const hashAfter = metaAfter.profileHash;

  if (hashBefore !== hashAfter) {
    logError(`profileHash changed on restart (cache invalidated):\n  Before: ${hashBefore}\n  After: ${hashAfter}`);
    return false;
  }

  // Note: lastUpdated might change slightly due to timestamp updates, that's OK
  // The key is profileHash stays the same

  logSuccess('Cache persisted correctly (profileHash unchanged on restart)');
  return true;
}

async function runAllTests() {
  console.log('\n' + '='.repeat(60));
  console.log('🧪 NCP Integration Test Suite');
  console.log('   Simulating Real AI Client Behavior');
  console.log('='.repeat(60) + '\n');

  // Setup test environment
  setupTestProfile();

  const tests = [
    test1_Initialize,
    test2_ToolsListDuringIndexing,
    test3_FindDuringIndexing,
    test4_CacheProfileHashPersists,
    test5_NoReindexingOnRestart
  ];

  let passed = 0;
  let failed = 0;

  for (const test of tests) {
    try {
      const result = await test();
      if (result) {
        passed++;
      } else {
        failed++;
      }
    } catch (error) {
      logError(`${test.name} threw error: ${error.message}`);
      failed++;
    }
    console.log(''); // Blank line between tests
  }

  console.log('='.repeat(60));
  console.log(`📊 Results: ${passed} passed, ${failed} failed`);
  console.log('='.repeat(60) + '\n');

  if (failed > 0) {
    console.log('❌ INTEGRATION TESTS FAILED - DO NOT RELEASE\n');
    process.exit(1);
  } else {
    console.log('✅ ALL INTEGRATION TESTS PASSED - Safe to release\n');
    process.exit(0);
  }
}

// Cleanup on exit
process.on('exit', () => {
  // Clean up test profile cache if needed
  const metaPath = path.join(CACHE_DIR, `${TEST_PROFILE}-cache-meta.json`);
  if (fs.existsSync(metaPath)) {
    // Optionally clean up: fs.unlinkSync(metaPath);
  }
});

// Run tests
runAllTests().catch(error => {
  logError(`Test suite crashed: ${error.message}`);
  console.error(error);
  process.exit(1);
});

```

--------------------------------------------------------------------------------
/src/analytics/log-parser.ts:
--------------------------------------------------------------------------------

```typescript
/**
 * NCP Analytics Log Parser
 * Parses real MCP session logs to extract performance and usage insights
 */

import { readFileSync, readdirSync, statSync } from 'fs';
import { join } from 'path';
import * as os from 'os';

export interface MCPSession {
  mcpName: string;
  startTime: Date;
  endTime?: Date;
  duration?: number;
  toolCount?: number;
  tools?: string[];
  exitCode?: number;
  success: boolean;
  responseSize: number;
  errorMessages: string[];
}

export interface AnalyticsReport {
  totalSessions: number;
  uniqueMCPs: number;
  timeRange: { start: Date; end: Date };
  successRate: number;
  avgSessionDuration: number;
  totalResponseSize: number;
  topMCPsByUsage: Array<{ name: string; sessions: number; successRate: number }>;
  topMCPsByTools: Array<{ name: string; toolCount: number }>;
  performanceMetrics: {
    fastestMCPs: Array<{ name: string; avgDuration: number }>;
    slowestMCPs: Array<{ name: string; avgDuration: number }>;
    mostReliable: Array<{ name: string; successRate: number }>;
    leastReliable: Array<{ name: string; successRate: number }>;
  };
  dailyUsage: Record<string, number>;
  hourlyUsage: Record<number, number>;
}

export class NCPLogParser {
  private logsDir: string;

  constructor() {
    // Always use global ~/.ncp/logs for analytics data
    // This ensures we analyze the real usage data, not local development data
    this.logsDir = join(os.homedir(), '.ncp', 'logs');
  }

  /**
   * Parse a single log file to extract session data
   */
  private parseLogFile(filePath: string): MCPSession[] {
    try {
      const content = readFileSync(filePath, 'utf-8');
      const sessions: MCPSession[] = [];

      // Extract MCP name from filename: mcp-{name}-2025w39.log
      const fileName = filePath.split('/').pop() || '';
      const mcpMatch = fileName.match(/mcp-(.+)-\d{4}w\d{2}\.log/);
      const mcpName = mcpMatch ? mcpMatch[1] : 'unknown';

      // Split content into individual sessions
      const sessionBlocks = content.split(/--- MCP .+ Session Started: .+ ---/);

      for (let i = 1; i < sessionBlocks.length; i++) {
        const block = sessionBlocks[i];
        const session = this.parseSessionBlock(mcpName, block);
        if (session) {
          sessions.push(session);
        }
      }

      return sessions;
    } catch (error) {
      console.error(`Error parsing log file ${filePath}:`, error);
      return [];
    }
  }

  /**
   * Parse individual session block
   */
  private parseSessionBlock(mcpName: string, block: string): MCPSession | null {
    try {
      const lines = block.split('\n').filter(line => line.trim());

      // Find session start time from the previous separator
      const sessionStartRegex = /--- MCP .+ Session Started: (.+) ---/;
      let startTime: Date | undefined;

      // Look for start time in the content before this block
      const startMatch = block.match(sessionStartRegex);
      if (startMatch) {
        startTime = new Date(startMatch[1]);
      } else {
        // Fallback: use first timestamp we can find
        const firstLine = lines[0];
        if (firstLine) {
          startTime = new Date(); // Use current time as fallback
        }
      }

      if (!startTime) return null;

      let toolCount = 0;
      let tools: string[] = [];
      let exitCode: number | undefined;
      let responseSize = 0;
      let errorMessages: string[] = [];
      let endTime: Date | undefined;

      for (const line of lines) {
        // Extract tool information
        if (line.includes('Loaded MCP with') && line.includes('tools:')) {
          const toolMatch = line.match(/Loaded MCP with (\d+) tools: (.+)/);
          if (toolMatch) {
            toolCount = parseInt(toolMatch[1]);
            tools = toolMatch[2].split(', ').map(t => t.trim());
          }
        }

        // Extract JSON responses and their size
        if (line.startsWith('[STDOUT]') && line.includes('{"result"')) {
          const jsonPart = line.substring('[STDOUT] '.length);
          responseSize += jsonPart.length;
        }

        // Extract errors
        if (line.includes('[STDERR]') && (line.includes('Error') || line.includes('Failed'))) {
          errorMessages.push(line);
        }

        // Extract exit code
        if (line.includes('[EXIT] Process exited with code')) {
          const exitMatch = line.match(/code (\d+)/);
          if (exitMatch) {
            exitCode = parseInt(exitMatch[1]);
            endTime = new Date(startTime.getTime() + 5000); // Estimate end time
          }
        }
      }

      // Calculate duration (estimated)
      const duration = endTime ? endTime.getTime() - startTime.getTime() : undefined;
      const success = exitCode === 0 || exitCode === undefined || (toolCount > 0 && responseSize > 0);

      return {
        mcpName,
        startTime,
        endTime,
        duration,
        toolCount: toolCount || undefined,
        tools: tools.length > 0 ? tools : undefined,
        exitCode,
        success,
        responseSize,
        errorMessages
      };
    } catch (error) {
      return null;
    }
  }

  /**
   * Parse all log files and generate analytics report
   * @param options - Filter options for time range
   */
  async parseAllLogs(options?: {
    from?: Date;
    to?: Date;
    period?: number; // days
    today?: boolean;
  }): Promise<AnalyticsReport> {
    const sessions: MCPSession[] = [];

    try {
      const logFiles = readdirSync(this.logsDir)
        .filter(file => file.endsWith('.log'))
        .map(file => join(this.logsDir, file));

      console.log(`📊 Parsing ${logFiles.length} log files...`);

      // Calculate date range
      let fromDate: Date | undefined;
      let toDate: Date | undefined;

      if (options?.today) {
        // Today only
        fromDate = new Date();
        fromDate.setHours(0, 0, 0, 0);
        toDate = new Date();
        toDate.setHours(23, 59, 59, 999);
      } else if (options?.period) {
        // Last N days
        toDate = new Date();
        fromDate = new Date();
        fromDate.setDate(fromDate.getDate() - options.period);
        fromDate.setHours(0, 0, 0, 0);
      } else if (options?.from || options?.to) {
        // Custom range
        fromDate = options.from;
        toDate = options.to || new Date();

        // If toDate is provided, set to end of that day
        if (options?.to) {
          toDate = new Date(options.to);
          toDate.setHours(23, 59, 59, 999);
        }

        // If fromDate is provided, set to start of that day
        if (options?.from) {
          fromDate = new Date(options.from);
          fromDate.setHours(0, 0, 0, 0);
        }
      }

      for (const logFile of logFiles) {
        const fileSessions = this.parseLogFile(logFile);

        // Filter sessions by date range if specified
        const filteredSessions = fromDate || toDate
          ? fileSessions.filter(session => {
              if (fromDate && session.startTime < fromDate) return false;
              if (toDate && session.startTime > toDate) return false;
              return true;
            })
          : fileSessions;

        sessions.push(...filteredSessions);
      }

      if (fromDate || toDate) {
        const rangeDesc = options?.today
          ? 'today'
          : options?.period
          ? `last ${options.period} days`
          : `${fromDate?.toLocaleDateString() || 'start'} to ${toDate?.toLocaleDateString() || 'now'}`;
        console.log(`📅 Filtering for ${rangeDesc}: ${sessions.length} sessions`);
      }

      return this.generateReport(sessions);
    } catch (error) {
      console.error('Error reading logs directory:', error);
      return this.generateReport(sessions);
    }
  }

  /**
   * Generate comprehensive analytics report
   */
  private generateReport(sessions: MCPSession[]): AnalyticsReport {
    if (sessions.length === 0) {
      return {
        totalSessions: 0,
        uniqueMCPs: 0,
        timeRange: { start: new Date(), end: new Date() },
        successRate: 0,
        avgSessionDuration: 0,
        totalResponseSize: 0,
        topMCPsByUsage: [],
        topMCPsByTools: [],
        performanceMetrics: {
          fastestMCPs: [],
          slowestMCPs: [],
          mostReliable: [],
          leastReliable: []
        },
        dailyUsage: {},
        hourlyUsage: {}
      };
    }

    // Basic metrics
    const totalSessions = sessions.length;
    const uniqueMCPs = new Set(sessions.map(s => s.mcpName)).size;
    const successfulSessions = sessions.filter(s => s.success).length;
    const successRate = (successfulSessions / totalSessions) * 100;

    // Time range
    const sortedByTime = sessions.filter(s => s.startTime).sort((a, b) => a.startTime.getTime() - b.startTime.getTime());
    const timeRange = {
      start: sortedByTime[0]?.startTime || new Date(),
      end: sortedByTime[sortedByTime.length - 1]?.startTime || new Date()
    };

    // Duration metrics
    const sessionsWithDuration = sessions.filter(s => s.duration && s.duration > 0);
    const avgSessionDuration = sessionsWithDuration.length > 0
      ? sessionsWithDuration.reduce((sum, s) => sum + (s.duration || 0), 0) / sessionsWithDuration.length
      : 0;

    // Response size
    const totalResponseSize = sessions.reduce((sum, s) => sum + s.responseSize, 0);

    // MCP usage statistics
    const mcpStats = new Map<string, { sessions: number; successes: number; totalTools: number; durations: number[] }>();

    for (const session of sessions) {
      const stats = mcpStats.get(session.mcpName) || { sessions: 0, successes: 0, totalTools: 0, durations: [] };
      stats.sessions++;
      if (session.success) stats.successes++;
      if (session.toolCount) stats.totalTools = Math.max(stats.totalTools, session.toolCount);
      if (session.duration && session.duration > 0) stats.durations.push(session.duration);
      mcpStats.set(session.mcpName, stats);
    }

    // Top MCPs by usage
    const topMCPsByUsage = Array.from(mcpStats.entries())
      .map(([name, stats]) => ({
        name,
        sessions: stats.sessions,
        successRate: (stats.successes / stats.sessions) * 100
      }))
      .sort((a, b) => b.sessions - a.sessions)
      .slice(0, 10);

    // Top MCPs by tool count
    const topMCPsByTools = Array.from(mcpStats.entries())
      .filter(([_, stats]) => stats.totalTools > 0)
      .map(([name, stats]) => ({
        name,
        toolCount: stats.totalTools
      }))
      .sort((a, b) => b.toolCount - a.toolCount)
      .slice(0, 10);

    // Performance metrics
    const mcpPerformance = Array.from(mcpStats.entries())
      .map(([name, stats]) => ({
        name,
        avgDuration: stats.durations.length > 0 ? stats.durations.reduce((a, b) => a + b, 0) / stats.durations.length : 0,
        successRate: (stats.successes / stats.sessions) * 100
      }))
      .filter(m => m.avgDuration > 0);

    const fastestMCPs = mcpPerformance
      .sort((a, b) => a.avgDuration - b.avgDuration)
      .slice(0, 5);

    const slowestMCPs = mcpPerformance
      .sort((a, b) => b.avgDuration - a.avgDuration)
      .slice(0, 5);

    const mostReliable = Array.from(mcpStats.entries())
      .map(([name, stats]) => ({
        name,
        successRate: (stats.successes / stats.sessions) * 100
      }))
      .filter(m => mcpStats.get(m.name)!.sessions >= 3) // At least 3 sessions for reliability
      .sort((a, b) => b.successRate - a.successRate)
      .slice(0, 5);

    const leastReliable = Array.from(mcpStats.entries())
      .map(([name, stats]) => ({
        name,
        successRate: (stats.successes / stats.sessions) * 100
      }))
      .filter(m => mcpStats.get(m.name)!.sessions >= 3)
      .sort((a, b) => a.successRate - b.successRate)
      .slice(0, 5);

    // Daily usage
    const dailyUsage: Record<string, number> = {};
    for (const session of sessions) {
      const day = session.startTime.toISOString().split('T')[0];
      dailyUsage[day] = (dailyUsage[day] || 0) + 1;
    }

    // Hourly usage
    const hourlyUsage: Record<number, number> = {};
    for (const session of sessions) {
      const hour = session.startTime.getHours();
      hourlyUsage[hour] = (hourlyUsage[hour] || 0) + 1;
    }

    return {
      totalSessions,
      uniqueMCPs,
      timeRange,
      successRate,
      avgSessionDuration,
      totalResponseSize,
      topMCPsByUsage,
      topMCPsByTools,
      performanceMetrics: {
        fastestMCPs,
        slowestMCPs,
        mostReliable,
        leastReliable
      },
      dailyUsage,
      hourlyUsage
    };
  }
}
```

--------------------------------------------------------------------------------
/test/ecosystem-discovery-validation-simple.test.ts:
--------------------------------------------------------------------------------

```typescript
/**
 * Simple Ecosystem Discovery Validation
 * Tests that NCP can find relevant tools from our realistic MCP ecosystem
 */

import { DiscoveryEngine } from '../src/discovery/engine.js';

describe('Simple Ecosystem Discovery Validation', () => {
  let engine: DiscoveryEngine;

  beforeAll(async () => {
    engine = new DiscoveryEngine();
    await engine.initialize();

    // Clear any existing cached tools to ensure clean test environment
    await engine['ragEngine'].clearCache();

    // Create comprehensive ecosystem with 20 realistic tools
    const ecosystemTools = [
      // Database Operations
      { name: 'query', description: 'Execute SQL queries to retrieve data from PostgreSQL database tables. Find records, search data, analyze information.', mcpName: 'postgres-test' },
      { name: 'insert', description: 'Insert new records into PostgreSQL database tables. Store customer data, add new information, create records.', mcpName: 'postgres-test' },
      { name: 'execute_cypher', description: 'Execute Cypher queries on Neo4j graph database. Query relationships, find patterns, analyze connections.', mcpName: 'neo4j-test' },

      // Payment Processing
      { name: 'create_payment', description: 'Process credit card payments and charges from customers. Charge customer for order, process payment from customer.', mcpName: 'stripe-test' },
      { name: 'refund_payment', description: 'Process refunds for previously charged payments. Refund cancelled subscription, return customer money.', mcpName: 'stripe-test' },

      // Developer Tools
      { name: 'create_repository', description: 'Create a new GitHub repository with configuration options. Set up new project, initialize repository.', mcpName: 'github-test' },
      { name: 'create_issue', description: 'Create GitHub issues for bug reports and feature requests. Report bugs, request features, track tasks.', mcpName: 'github-test' },
      { name: 'commit_changes', description: 'Create Git commits to save changes to version history. Save progress, commit code changes, record modifications.', mcpName: 'git-test' },
      { name: 'create_branch', description: 'Create new Git branches for feature development and parallel work. Start new features, create development branches.', mcpName: 'git-test' },

      // File Operations
      { name: 'read_file', description: 'Read contents of files from local filesystem. Load configuration files, read text documents, access data files.', mcpName: 'filesystem-test' },
      { name: 'write_file', description: 'Write content to files on local filesystem. Create configuration files, save data, generate reports.', mcpName: 'filesystem-test' },
      { name: 'create_directory', description: 'Create new directories and folder structures. Organize files, set up project structure, create folder hierarchies.', mcpName: 'filesystem-test' },

      // Web Automation
      { name: 'click_element', description: 'Click on web page elements using selectors. Click buttons, links, form elements.', mcpName: 'playwright-test' },
      { name: 'take_screenshot', description: 'Capture screenshots of web pages for testing and documentation. Take page screenshots, save visual evidence.', mcpName: 'playwright-test' },
      { name: 'fill_form_field', description: 'Fill form inputs and text fields on web pages. Enter text, complete forms, input data.', mcpName: 'playwright-test' },

      // Cloud & Infrastructure
      { name: 'create_ec2_instance', description: 'Launch new EC2 virtual machine instances with configuration. Create servers, deploy applications to cloud.', mcpName: 'aws-test' },
      { name: 'upload_to_s3', description: 'Upload files and objects to S3 storage buckets. Store files in cloud, backup data, host static content.', mcpName: 'aws-test' },
      { name: 'run_container', description: 'Run Docker containers from images with configuration options. Deploy applications, start services.', mcpName: 'docker-test' },
      { name: 'send_message', description: 'Send messages to Slack channels or direct messages. Share updates, notify teams, communicate with colleagues.', mcpName: 'slack-test' },
      { name: 'web_search', description: 'Search the web using Brave Search API with privacy protection. Find information, research topics, get current data.', mcpName: 'brave-search-test' },
    ];

    // Group by MCP and index
    const toolsByMCP = new Map();
    for (const tool of ecosystemTools) {
      if (!toolsByMCP.has(tool.mcpName)) {
        toolsByMCP.set(tool.mcpName, []);
      }
      toolsByMCP.get(tool.mcpName).push({
        name: tool.name,
        description: tool.description
      });
    }

    // Index each MCP
    for (const [mcpName, tools] of toolsByMCP) {
      await engine['ragEngine'].indexMCP(mcpName, tools);
    }
  });

  describe('Domain-Specific Discovery', () => {
    it('finds database tools for data queries', async () => {
      const results = await engine.findRelevantTools('query customer data from database', 8);
      expect(results.length).toBeGreaterThan(0);

      const hasDbTool = results.some(t =>
        (t.name.includes('postgres') && t.name.includes('query')) ||
        (t.name.includes('neo4j') && t.name.includes('cypher'))
      );
      expect(hasDbTool).toBeTruthy();
    });

    it('finds payment tools for financial operations', async () => {
      const results = await engine.findRelevantTools('process credit card payment', 8);
      expect(results.length).toBeGreaterThan(0);

      const hasPaymentTool = results.some(t =>
        t.name.includes('stripe') && (t.name.includes('payment') || t.name.includes('create'))
      );
      expect(hasPaymentTool).toBeTruthy();
    });

    it('finds version control tools for code management', async () => {
      const results = await engine.findRelevantTools('commit code changes', 8);
      expect(results.length).toBeGreaterThan(0);

      const hasGitTool = results.some(t =>
        t.name.includes('git') && t.name.includes('commit')
      );
      expect(hasGitTool).toBeTruthy();
    });

    it('finds file system tools for file operations', async () => {
      const results = await engine.findRelevantTools('save configuration to file', 8);
      expect(results.length).toBeGreaterThan(0);

      const hasFileTool = results.some(t =>
        t.name.includes('filesystem') && t.name.includes('write')
      );
      expect(hasFileTool).toBeTruthy();
    });

    it('finds web automation tools for browser tasks', async () => {
      const results = await engine.findRelevantTools('take screenshot of webpage', 8);
      expect(results.length).toBeGreaterThan(0);

      const hasWebTool = results.some(t =>
        t.name.includes('playwright') && t.name.includes('screenshot')
      );
      expect(hasWebTool).toBeTruthy();
    });

    it('finds cloud tools for infrastructure deployment', async () => {
      const results = await engine.findRelevantTools('deploy server to AWS cloud', 8);
      expect(results.length).toBeGreaterThan(0);

      // Debug: Log what tools are actually returned
      console.log('Cloud deployment query returned:', results.map(t => ({ name: t.name, confidence: t.confidence || 'N/A' })));

      const hasCloudTool = results.some(t =>
        t.name.includes('ec2') || t.name.includes('instance') || t.name.includes('container')
      );
      if (!hasCloudTool) {
        console.log('Expected to find tools with ec2/instance/container but got:', results.map(t => t.name));
      }
      expect(hasCloudTool).toBeTruthy();
    });
  });

  describe('Cross-Domain Scenarios', () => {
    it('handles complex multi-domain queries', async () => {
      const results = await engine.findRelevantTools('build and deploy web application with database', 12);
      expect(results.length).toBeGreaterThan(3);

      // Should find tools from multiple domains - check for any relevant tools
      const hasDeploymentTools = results.some(r =>
        r.name.includes('docker') || r.name.includes('aws') ||
        r.name.includes('git') || r.name.includes('github')
      );
      const hasDatabaseTools = results.some(r =>
        r.name.includes('postgres') || r.name.includes('neo4j')
      );
      const hasFileTools = results.some(r =>
        r.name.includes('filesystem') || r.name.includes('file')
      );

      // Should find tools from at least one relevant domain
      const foundRelevantTools = hasDeploymentTools || hasDatabaseTools || hasFileTools;
      expect(foundRelevantTools).toBeTruthy();
    });

    it('prioritizes relevant tools for specific contexts', async () => {
      const results = await engine.findRelevantTools('refund customer payment for cancelled order', 6);
      expect(results.length).toBeGreaterThan(0);

      // Refund should be prioritized over create payment
      const refundTool = results.find(t => t.name.includes('refund'));
      const createTool = results.find(t => t.name.includes('create_payment'));

      if (refundTool && createTool) {
        expect(results.indexOf(refundTool)).toBeLessThan(results.indexOf(createTool));
      } else {
        expect(refundTool).toBeDefined(); // At minimum, refund tool should be found
      }
    });
  });

  describe('Ecosystem Scale Validation', () => {
    it('demonstrates improved specificity with diverse tool set', async () => {
      // Test that having diverse tools improves matching specificity
      const specificQuery = 'create GitHub issue for bug report';
      const results = await engine.findRelevantTools(specificQuery, 6);

      expect(results.length).toBeGreaterThan(0);

      // Should find the specific GitHub issue tool
      const issueTool = results.find(t =>
        t.name.includes('github') && t.name.includes('issue')
      );
      expect(issueTool).toBeDefined();
    });

    it('maintains performance with ecosystem scale', async () => {
      const start = Date.now();

      const results = await engine.findRelevantTools('analyze user data and generate report', 8);

      const duration = Date.now() - start;

      expect(results.length).toBeGreaterThan(0);
      expect(duration).toBeLessThan(1000); // Should complete under 1 second
    });

    it('provides consistent results across similar queries', async () => {
      const query1 = 'store files in cloud storage';
      const query2 = 'upload files to cloud bucket';

      const results1 = await engine.findRelevantTools(query1, 5);
      const results2 = await engine.findRelevantTools(query2, 5);

      expect(results1.length).toBeGreaterThan(0);
      expect(results2.length).toBeGreaterThan(0);

      // Should both find S3 upload tool
      const hasS3_1 = results1.some(t => t.name.includes('s3') || t.name.includes('upload'));
      const hasS3_2 = results2.some(t => t.name.includes('s3') || t.name.includes('upload'));

      expect(hasS3_1).toBeTruthy();
      expect(hasS3_2).toBeTruthy();
    });
  });

  describe('Coverage Validation', () => {
    it('can discover tools from all major ecosystem domains', async () => {
      const domains = [
        { name: 'Database', query: 'database query', expectPattern: ['query', 'cypher'] },
        { name: 'Payment', query: 'payment processing', expectPattern: ['payment', 'create_payment'] },
        { name: 'Version Control', query: 'git repository', expectPattern: ['repository', 'branch', 'commit'] },
        { name: 'File System', query: 'file operations', expectPattern: ['file', 'read_file', 'write_file'] },
        { name: 'Web Automation', query: 'browser automation', expectPattern: ['click', 'screenshot', 'fill'] },
        { name: 'Cloud', query: 'cloud deployment', expectPattern: ['ec2', 'container', 's3'] },
        { name: 'Communication', query: 'team messaging', expectPattern: ['message', 'send_message'] },
        { name: 'Search', query: 'web search', expectPattern: ['search', 'web_search'] }
      ];

      let successCount = 0;
      for (const domain of domains) {
        const results = await engine.findRelevantTools(domain.query, 8);

        if (results.length === 0) {
          console.log(`⚠️  ${domain.name} query "${domain.query}" returned no results`);
          continue;
        }

        const found = results.some(t =>
          domain.expectPattern.some(pattern => t.name.includes(pattern))
        );

        if (found) {
          successCount++;
        } else {
          console.log(`❌ ${domain.name} query "${domain.query}" failed pattern matching:`);
          console.log('  Expected patterns:', domain.expectPattern);
          console.log('  Got tools:', results.map(t => t.name));
        }
      }

      // Expect at least 80% of domains to work (4 out of 5)
      expect(successCount).toBeGreaterThanOrEqual(4);
    });
  });
});
```

--------------------------------------------------------------------------------
/src/cache/cache-patcher.ts:
--------------------------------------------------------------------------------

```typescript
/**
 * Cache Patcher for NCP
 * Provides incremental, MCP-by-MCP cache patching operations
 * Enables fast startup by avoiding full re-indexing
 */

import { readFileSync, writeFileSync, existsSync, mkdirSync } from 'fs';
import { join } from 'path';
import { createHash } from 'crypto';
import { getCacheDirectory } from '../utils/ncp-paths.js';
import { logger } from '../utils/logger.js';

export interface Tool {
  name: string;
  description: string;
  inputSchema?: any;
}

export interface ToolMetadataCache {
  version: string;
  profileHash: string;        // SHA256 of entire profile
  lastModified: number;
  mcps: {
    [mcpName: string]: {
      configHash: string;      // SHA256 of command+args+env
      discoveredAt: number;
      tools: Array<{
        name: string;
        description: string;
        inputSchema: any;
      }>;
      serverInfo: {
        name: string;
        version: string;
        description?: string;
      };
    }
  }
}

export interface EmbeddingsCache {
  version: string;
  modelVersion: string;        // all-MiniLM-L6-v2
  lastModified: number;
  vectors: {
    [toolId: string]: number[];  // toolId = "mcpName:toolName"
  };
  metadata: {
    [toolId: string]: {
      mcpName: string;
      generatedAt: number;
      enhancedDescription: string;  // Used for generation
    }
  }
}

export interface MCPConfig {
  command?: string;  // Optional: for stdio transport
  args?: string[];
  env?: Record<string, string>;
  url?: string;  // Optional: for HTTP/SSE transport
}

export class CachePatcher {
  private cacheDir: string;
  private toolMetadataCachePath: string;
  private embeddingsCachePath: string;
  private embeddingsMetadataCachePath: string;

  constructor() {
    this.cacheDir = getCacheDirectory();
    this.toolMetadataCachePath = join(this.cacheDir, 'all-tools.json');
    this.embeddingsCachePath = join(this.cacheDir, 'embeddings.json');
    this.embeddingsMetadataCachePath = join(this.cacheDir, 'embeddings-metadata.json');

    // Ensure cache directory exists
    if (!existsSync(this.cacheDir)) {
      mkdirSync(this.cacheDir, { recursive: true });
    }
  }

  /**
   * Generate SHA256 hash for MCP configuration
   */
  generateConfigHash(config: MCPConfig): string {
    const hashInput = JSON.stringify({
      command: config.command,
      args: config.args || [],
      env: config.env || {}
    });
    return createHash('sha256').update(hashInput).digest('hex');
  }

  /**
   * Generate SHA256 hash for entire profile
   */
  generateProfileHash(profile: any): string {
    const hashInput = JSON.stringify(profile.mcpServers || {});
    return createHash('sha256').update(hashInput).digest('hex');
  }

  /**
   * Load cache with atomic file operations and error handling
   */
  private async loadCache<T>(path: string, defaultValue: T): Promise<T> {
    try {
      if (!existsSync(path)) {
        logger.debug(`Cache file not found: ${path}, using default`);
        return defaultValue;
      }

      const content = readFileSync(path, 'utf-8');
      const parsed = JSON.parse(content);
      logger.debug(`Loaded cache from ${path}`);
      return parsed as T;
    } catch (error: any) {
      logger.warn(`Failed to load cache from ${path}: ${error.message}, using default`);
      return defaultValue;
    }
  }

  /**
   * Save cache with atomic file operations to prevent corruption
   */
  private async saveCache<T>(path: string, data: T): Promise<void> {
    try {
      const tmpPath = `${path}.tmp`;
      const content = JSON.stringify(data, null, 2);

      // Write to temporary file first
      writeFileSync(tmpPath, content, 'utf-8');

      // Atomic replacement
      await this.atomicReplace(tmpPath, path);

      logger.debug(`Saved cache to ${path}`);
    } catch (error: any) {
      logger.error(`Failed to save cache to ${path}: ${error.message}`);
      throw error;
    }
  }

  /**
   * Atomic file replacement to prevent corruption
   */
  private async atomicReplace(tmpPath: string, finalPath: string): Promise<void> {
    const fs = await import('fs/promises');
    await fs.rename(tmpPath, finalPath);
  }

  /**
   * Load tool metadata cache
   */
  async loadToolMetadataCache(): Promise<ToolMetadataCache> {
    const defaultCache: ToolMetadataCache = {
      version: '1.0.0',
      profileHash: '',
      lastModified: Date.now(),
      mcps: {}
    };

    return await this.loadCache(this.toolMetadataCachePath, defaultCache);
  }

  /**
   * Save tool metadata cache
   */
  async saveToolMetadataCache(cache: ToolMetadataCache): Promise<void> {
    cache.lastModified = Date.now();
    await this.saveCache(this.toolMetadataCachePath, cache);
  }

  /**
   * Load embeddings cache
   */
  async loadEmbeddingsCache(): Promise<EmbeddingsCache> {
    const defaultCache: EmbeddingsCache = {
      version: '1.0.0',
      modelVersion: 'all-MiniLM-L6-v2',
      lastModified: Date.now(),
      vectors: {},
      metadata: {}
    };

    return await this.loadCache(this.embeddingsCachePath, defaultCache);
  }

  /**
   * Save embeddings cache
   */
  async saveEmbeddingsCache(cache: EmbeddingsCache): Promise<void> {
    cache.lastModified = Date.now();
    await this.saveCache(this.embeddingsCachePath, cache);
  }

  /**
   * Patch tool metadata cache - Add MCP
   */
  async patchAddMCP(mcpName: string, config: MCPConfig, tools: Tool[], serverInfo: any): Promise<void> {
    logger.info(`🔧 Patching tool metadata cache: adding ${mcpName}`);

    const cache = await this.loadToolMetadataCache();
    const configHash = this.generateConfigHash(config);

    cache.mcps[mcpName] = {
      configHash,
      discoveredAt: Date.now(),
      tools: tools.map(tool => ({
        name: tool.name,
        description: tool.description || 'No description available',
        inputSchema: tool.inputSchema || {}
      })),
      serverInfo: {
        name: serverInfo?.name || mcpName,
        version: serverInfo?.version || '1.0.0',
        description: serverInfo?.description
      }
    };

    await this.saveToolMetadataCache(cache);
    logger.info(`✅ Added ${tools.length} tools from ${mcpName} to metadata cache`);
  }

  /**
   * Patch tool metadata cache - Remove MCP
   */
  async patchRemoveMCP(mcpName: string): Promise<void> {
    logger.info(`🔧 Patching tool metadata cache: removing ${mcpName}`);

    const cache = await this.loadToolMetadataCache();

    if (cache.mcps[mcpName]) {
      const toolCount = cache.mcps[mcpName].tools.length;
      delete cache.mcps[mcpName];
      await this.saveToolMetadataCache(cache);
      logger.info(`✅ Removed ${toolCount} tools from ${mcpName} from metadata cache`);
    } else {
      logger.warn(`MCP ${mcpName} not found in metadata cache`);
    }
  }

  /**
   * Patch tool metadata cache - Update MCP
   */
  async patchUpdateMCP(mcpName: string, config: MCPConfig, tools: Tool[], serverInfo: any): Promise<void> {
    logger.info(`🔧 Patching tool metadata cache: updating ${mcpName}`);

    // Remove then add for clean update
    await this.patchRemoveMCP(mcpName);
    await this.patchAddMCP(mcpName, config, tools, serverInfo);
  }

  /**
   * Patch embeddings cache - Add MCP tools
   */
  async patchAddEmbeddings(mcpName: string, toolEmbeddings: Map<string, any>): Promise<void> {
    logger.info(`🔧 Patching embeddings cache: adding ${mcpName} vectors`);

    const cache = await this.loadEmbeddingsCache();
    let addedCount = 0;

    for (const [toolId, embeddingData] of toolEmbeddings) {
      if (embeddingData && embeddingData.embedding) {
        // Convert Float32Array to regular array for JSON serialization
        cache.vectors[toolId] = Array.from(embeddingData.embedding);
        cache.metadata[toolId] = {
          mcpName,
          generatedAt: Date.now(),
          enhancedDescription: embeddingData.enhancedDescription || ''
        };
        addedCount++;
      }
    }

    await this.saveEmbeddingsCache(cache);
    logger.info(`✅ Added ${addedCount} embeddings for ${mcpName}`);
  }

  /**
   * Patch embeddings cache - Remove MCP tools
   */
  async patchRemoveEmbeddings(mcpName: string): Promise<void> {
    logger.info(`🔧 Patching embeddings cache: removing ${mcpName} vectors`);

    const cache = await this.loadEmbeddingsCache();
    let removedCount = 0;

    // Remove all tool embeddings for this MCP
    const toolIdsToRemove = Object.keys(cache.metadata).filter(
      toolId => cache.metadata[toolId].mcpName === mcpName
    );

    for (const toolId of toolIdsToRemove) {
      delete cache.vectors[toolId];
      delete cache.metadata[toolId];
      removedCount++;
    }

    await this.saveEmbeddingsCache(cache);
    logger.info(`✅ Removed ${removedCount} embeddings for ${mcpName}`);
  }

  /**
   * Update profile hash in tool metadata cache
   */
  async updateProfileHash(profileHash: string): Promise<void> {
    const cache = await this.loadToolMetadataCache();
    cache.profileHash = profileHash;
    await this.saveToolMetadataCache(cache);
    logger.debug(`Updated profile hash: ${profileHash.substring(0, 8)}...`);
  }

  /**
   * Validate if cache is current with profile
   */
  async validateCacheWithProfile(currentProfileHash: string): Promise<boolean> {
    try {
      const cache = await this.loadToolMetadataCache();

      // Handle empty or corrupt cache
      if (!cache || !cache.profileHash) {
        logger.info('Cache validation failed: no profile hash found');
        return false;
      }

      // Handle version mismatches
      if (cache.version !== '1.0.0') {
        logger.info(`Cache validation failed: version mismatch (${cache.version} → 1.0.0)`);
        return false;
      }

      const isValid = cache.profileHash === currentProfileHash;

      if (!isValid) {
        logger.info(`Cache validation failed: profile changed (${cache.profileHash?.substring(0, 8)}... → ${currentProfileHash.substring(0, 8)}...)`);
      } else {
        logger.debug(`Cache validation passed: ${currentProfileHash.substring(0, 8)}...`);
      }

      return isValid;
    } catch (error: any) {
      logger.warn(`Cache validation error: ${error.message}`);
      return false;
    }
  }

  /**
   * Validate cache integrity and repair if needed
   */
  async validateAndRepairCache(): Promise<{ valid: boolean; repaired: boolean }> {
    try {
      const stats = await this.getCacheStats();

      if (!stats.toolMetadataExists) {
        logger.warn('Tool metadata cache missing');
        return { valid: false, repaired: false };
      }

      const cache = await this.loadToolMetadataCache();

      // Check for corruption
      if (!cache.mcps || typeof cache.mcps !== 'object') {
        logger.warn('Cache corruption detected: invalid mcps structure');
        return { valid: false, repaired: false };
      }

      // Check for missing tools
      let hasMissingTools = false;
      for (const [mcpName, mcpData] of Object.entries(cache.mcps)) {
        if (!Array.isArray(mcpData.tools)) {
          logger.warn(`Cache corruption detected: invalid tools array for ${mcpName}`);
          hasMissingTools = true;
        }
      }

      if (hasMissingTools) {
        logger.warn('Cache has missing or invalid tool data');
        return { valid: false, repaired: false };
      }

      logger.debug('Cache integrity validation passed');
      return { valid: true, repaired: false };

    } catch (error: any) {
      logger.error(`Cache validation failed: ${error.message}`);
      return { valid: false, repaired: false };
    }
  }

  /**
   * Get cache statistics
   */
  async getCacheStats(): Promise<{
    toolMetadataExists: boolean;
    embeddingsExists: boolean;
    mcpCount: number;
    toolCount: number;
    embeddingCount: number;
    lastModified: Date | null;
  }> {
    const toolMetadataExists = existsSync(this.toolMetadataCachePath);
    const embeddingsExists = existsSync(this.embeddingsCachePath);

    let mcpCount = 0;
    let toolCount = 0;
    let embeddingCount = 0;
    let lastModified: Date | null = null;

    if (toolMetadataExists) {
      try {
        const cache = await this.loadToolMetadataCache();
        mcpCount = Object.keys(cache.mcps).length;
        toolCount = Object.values(cache.mcps).reduce((sum, mcp) => sum + mcp.tools.length, 0);
        lastModified = new Date(cache.lastModified);
      } catch (error) {
        // Ignore errors for stats
      }
    }

    if (embeddingsExists) {
      try {
        const cache = await this.loadEmbeddingsCache();
        embeddingCount = Object.keys(cache.vectors).length;
      } catch (error) {
        // Ignore errors for stats
      }
    }

    return {
      toolMetadataExists,
      embeddingsExists,
      mcpCount,
      toolCount,
      embeddingCount,
      lastModified
    };
  }
}
```

--------------------------------------------------------------------------------
/test/tool-schema-parser.test.ts:
--------------------------------------------------------------------------------

```typescript
/**
 * Comprehensive Tests for ToolSchemaParser
 * Following ncp-oss3 patterns for 95%+ coverage
 */

import { describe, it, expect } from '@jest/globals';
import { ToolSchemaParser, ParameterInfo } from '../src/services/tool-schema-parser';

describe('ToolSchemaParser - Comprehensive Coverage', () => {

  const sampleSchema = {
    properties: {
      path: {
        type: 'string',
        description: 'File path to read'
      },
      encoding: {
        type: 'string',
        description: 'File encoding (optional)'
      },
      maxSize: {
        type: 'number',
        description: 'Maximum file size in bytes'
      },
      recursive: {
        type: 'boolean',
        description: 'Whether to read recursively'
      }
    },
    required: ['path', 'maxSize']
  };

  const emptySchema = {};
  const noPropertiesSchema = { required: ['something'] };
  const noRequiredSchema = {
    properties: {
      optional1: { type: 'string' },
      optional2: { type: 'number' }
    }
  };

  describe('🎯 Parameter Parsing - Core Functionality', () => {
    it('should parse complete schema with all parameter types', () => {
      const params = ToolSchemaParser.parseParameters(sampleSchema);

      expect(params).toHaveLength(4);

      // Check path parameter (required string)
      const pathParam = params.find(p => p.name === 'path');
      expect(pathParam).toEqual({
        name: 'path',
        type: 'string',
        required: true,
        description: 'File path to read'
      });

      // Check encoding parameter (optional string)
      const encodingParam = params.find(p => p.name === 'encoding');
      expect(encodingParam).toEqual({
        name: 'encoding',
        type: 'string',
        required: false,
        description: 'File encoding (optional)'
      });

      // Check maxSize parameter (required number)
      const maxSizeParam = params.find(p => p.name === 'maxSize');
      expect(maxSizeParam).toEqual({
        name: 'maxSize',
        type: 'number',
        required: true,
        description: 'Maximum file size in bytes'
      });

      // Check recursive parameter (optional boolean)
      const recursiveParam = params.find(p => p.name === 'recursive');
      expect(recursiveParam).toEqual({
        name: 'recursive',
        type: 'boolean',
        required: false,
        description: 'Whether to read recursively'
      });
    });

    it('should handle schema with missing properties', () => {
      const params = ToolSchemaParser.parseParameters(noPropertiesSchema);
      expect(params).toEqual([]);
    });

    it('should handle schema with no required array', () => {
      const params = ToolSchemaParser.parseParameters(noRequiredSchema);
      expect(params).toHaveLength(2);

      params.forEach(param => {
        expect(param.required).toBe(false);
      });
    });

    it('should handle properties without type information', () => {
      const schemaWithoutTypes = {
        properties: {
          mystery1: { description: 'Unknown type parameter' },
          mystery2: { /* no type or description */ }
        },
        required: ['mystery1']
      };

      const params = ToolSchemaParser.parseParameters(schemaWithoutTypes);
      expect(params).toHaveLength(2);

      const mystery1 = params.find(p => p.name === 'mystery1');
      expect(mystery1).toEqual({
        name: 'mystery1',
        type: 'unknown',
        required: true,
        description: 'Unknown type parameter'
      });

      const mystery2 = params.find(p => p.name === 'mystery2');
      expect(mystery2).toEqual({
        name: 'mystery2',
        type: 'unknown',
        required: false,
        description: undefined
      });
    });
  });

  describe('🎯 Edge Cases and Error Handling', () => {
    it('should handle null and undefined schemas', () => {
      expect(ToolSchemaParser.parseParameters(null)).toEqual([]);
      expect(ToolSchemaParser.parseParameters(undefined)).toEqual([]);
    });

    it('should handle non-object schemas', () => {
      expect(ToolSchemaParser.parseParameters('string')).toEqual([]);
      expect(ToolSchemaParser.parseParameters(123)).toEqual([]);
      expect(ToolSchemaParser.parseParameters([])).toEqual([]);
      expect(ToolSchemaParser.parseParameters(true)).toEqual([]);
    });

    it('should handle empty schema object', () => {
      expect(ToolSchemaParser.parseParameters(emptySchema)).toEqual([]);
    });

    it('should handle schema with null/undefined properties', () => {
      const badSchema = {
        properties: null,
        required: undefined
      };
      expect(ToolSchemaParser.parseParameters(badSchema)).toEqual([]);
    });

    it('should handle schema with non-array required field', () => {
      const invalidRequiredSchema = {
        properties: {
          param1: { type: 'string' }
        },
        required: 'not-an-array'
      };
      const params = ToolSchemaParser.parseParameters(invalidRequiredSchema);
      expect(params).toHaveLength(1);
      expect(params[0].required).toBe(false);
    });
  });

  describe('🎯 Required Parameters Filtering', () => {
    it('should extract only required parameters', () => {
      const requiredParams = ToolSchemaParser.getRequiredParameters(sampleSchema);

      expect(requiredParams).toHaveLength(2);
      expect(requiredParams.map(p => p.name)).toEqual(['path', 'maxSize']);

      requiredParams.forEach(param => {
        expect(param.required).toBe(true);
      });
    });

    it('should return empty array for schema with no required parameters', () => {
      const requiredParams = ToolSchemaParser.getRequiredParameters(noRequiredSchema);
      expect(requiredParams).toEqual([]);
    });

    it('should handle invalid schemas in getRequiredParameters', () => {
      expect(ToolSchemaParser.getRequiredParameters(null)).toEqual([]);
      expect(ToolSchemaParser.getRequiredParameters({})).toEqual([]);
    });
  });

  describe('🎯 Optional Parameters Filtering', () => {
    it('should extract only optional parameters', () => {
      const optionalParams = ToolSchemaParser.getOptionalParameters(sampleSchema);

      expect(optionalParams).toHaveLength(2);
      expect(optionalParams.map(p => p.name)).toEqual(['encoding', 'recursive']);

      optionalParams.forEach(param => {
        expect(param.required).toBe(false);
      });
    });

    it('should return all parameters when none are required', () => {
      const optionalParams = ToolSchemaParser.getOptionalParameters(noRequiredSchema);
      expect(optionalParams).toHaveLength(2);

      optionalParams.forEach(param => {
        expect(param.required).toBe(false);
      });
    });

    it('should handle invalid schemas in getOptionalParameters', () => {
      expect(ToolSchemaParser.getOptionalParameters(null)).toEqual([]);
      expect(ToolSchemaParser.getOptionalParameters({})).toEqual([]);
    });
  });

  describe('🎯 Required Parameters Detection', () => {
    it('should detect schemas with required parameters', () => {
      expect(ToolSchemaParser.hasRequiredParameters(sampleSchema)).toBe(true);
    });

    it('should detect schemas without required parameters', () => {
      expect(ToolSchemaParser.hasRequiredParameters(noRequiredSchema)).toBe(false);
      expect(ToolSchemaParser.hasRequiredParameters(emptySchema)).toBe(false);
    });

    it('should handle edge cases in hasRequiredParameters', () => {
      expect(ToolSchemaParser.hasRequiredParameters(null)).toBe(false);
      expect(ToolSchemaParser.hasRequiredParameters(undefined)).toBe(false);
      expect(ToolSchemaParser.hasRequiredParameters('string')).toBe(false);
      expect(ToolSchemaParser.hasRequiredParameters(123)).toBe(false);
    });

    it('should handle schema with empty required array', () => {
      const emptyRequiredSchema = {
        properties: { param1: { type: 'string' } },
        required: []
      };
      expect(ToolSchemaParser.hasRequiredParameters(emptyRequiredSchema)).toBe(false);
    });

    it('should handle schema with non-array required field', () => {
      const invalidRequiredSchema = {
        properties: { param1: { type: 'string' } },
        required: 'not-an-array'
      };
      expect(ToolSchemaParser.hasRequiredParameters(invalidRequiredSchema)).toBe(false);
    });
  });

  describe('🎯 Parameter Counting', () => {
    it('should count all parameter types correctly', () => {
      const counts = ToolSchemaParser.countParameters(sampleSchema);

      expect(counts).toEqual({
        total: 4,
        required: 2,
        optional: 2
      });
    });

    it('should count parameters in schema with no required fields', () => {
      const counts = ToolSchemaParser.countParameters(noRequiredSchema);

      expect(counts).toEqual({
        total: 2,
        required: 0,
        optional: 2
      });
    });

    it('should count parameters in schema with all required fields', () => {
      const allRequiredSchema = {
        properties: {
          param1: { type: 'string' },
          param2: { type: 'number' }
        },
        required: ['param1', 'param2']
      };

      const counts = ToolSchemaParser.countParameters(allRequiredSchema);

      expect(counts).toEqual({
        total: 2,
        required: 2,
        optional: 0
      });
    });

    it('should handle empty schemas in countParameters', () => {
      expect(ToolSchemaParser.countParameters(emptySchema)).toEqual({
        total: 0,
        required: 0,
        optional: 0
      });

      expect(ToolSchemaParser.countParameters(null)).toEqual({
        total: 0,
        required: 0,
        optional: 0
      });
    });
  });

  describe('🎯 Individual Parameter Lookup', () => {
    it('should find existing parameters by name', () => {
      const pathParam = ToolSchemaParser.getParameter(sampleSchema, 'path');
      expect(pathParam).toEqual({
        name: 'path',
        type: 'string',
        required: true,
        description: 'File path to read'
      });

      const encodingParam = ToolSchemaParser.getParameter(sampleSchema, 'encoding');
      expect(encodingParam).toEqual({
        name: 'encoding',
        type: 'string',
        required: false,
        description: 'File encoding (optional)'
      });
    });

    it('should return undefined for non-existent parameters', () => {
      expect(ToolSchemaParser.getParameter(sampleSchema, 'nonexistent')).toBeUndefined();
      expect(ToolSchemaParser.getParameter(sampleSchema, '')).toBeUndefined();
    });

    it('should handle invalid schemas in getParameter', () => {
      expect(ToolSchemaParser.getParameter(null, 'any')).toBeUndefined();
      expect(ToolSchemaParser.getParameter({}, 'any')).toBeUndefined();
      expect(ToolSchemaParser.getParameter('invalid', 'any')).toBeUndefined();
    });

    it('should handle case-sensitive parameter names', () => {
      expect(ToolSchemaParser.getParameter(sampleSchema, 'Path')).toBeUndefined();
      expect(ToolSchemaParser.getParameter(sampleSchema, 'PATH')).toBeUndefined();
      expect(ToolSchemaParser.getParameter(sampleSchema, 'path')).toBeDefined();
    });
  });

  describe('🎯 Complex Schema Scenarios', () => {
    it('should handle nested object schemas', () => {
      const nestedSchema = {
        properties: {
          config: {
            type: 'object',
            description: 'Configuration object',
            properties: {
              nested: { type: 'string' }
            }
          }
        },
        required: ['config']
      };

      const params = ToolSchemaParser.parseParameters(nestedSchema);
      expect(params).toHaveLength(1);
      expect(params[0]).toEqual({
        name: 'config',
        type: 'object',
        required: true,
        description: 'Configuration object'
      });
    });

    it('should handle array type schemas', () => {
      const arraySchema = {
        properties: {
          items: {
            type: 'array',
            description: 'List of items',
            items: { type: 'string' }
          }
        },
        required: ['items']
      };

      const params = ToolSchemaParser.parseParameters(arraySchema);
      expect(params[0]).toEqual({
        name: 'items',
        type: 'array',
        required: true,
        description: 'List of items'
      });
    });

    it('should handle schemas with special characters in property names', () => {
      const specialSchema = {
        properties: {
          'kebab-case': { type: 'string' },
          'snake_case': { type: 'number' },
          'dot.notation': { type: 'boolean' },
          'space name': { type: 'string' }
        },
        required: ['kebab-case', 'space name']
      };

      const params = ToolSchemaParser.parseParameters(specialSchema);
      expect(params).toHaveLength(4);

      const kebabParam = params.find(p => p.name === 'kebab-case');
      expect(kebabParam?.required).toBe(true);

      const spaceParam = params.find(p => p.name === 'space name');
      expect(spaceParam?.required).toBe(true);

      const snakeParam = params.find(p => p.name === 'snake_case');
      expect(snakeParam?.required).toBe(false);
    });
  });
});
```

--------------------------------------------------------------------------------
/docs/stories/06-official-registry.md:
--------------------------------------------------------------------------------

```markdown
# 🌐 Story 6: Official Registry

*How AI discovers 2,200+ MCPs without you lifting a finger*

**Reading time:** 2 minutes

---

## 😤 The Pain

You need a database MCP. Here's what you have to do today:

**The Manual Discovery Process:**

```
Step 1: Google "MCP database"
→ Find blog post from 3 months ago
→ List is outdated

Step 2: Visit Smithery.ai
→ Browse through categories
→ 2,200+ MCPs to wade through
→ No way to preview without installing

Step 3: Find promising MCP
→ Click to GitHub repo
→ Read README (hopefully it's good)
→ Find npm package name
→ Hope it's maintained

Step 4: Copy installation command
→ npm install -g @someone/mcp-postgres
→ Still not sure if it's the right one

Step 5: Configure it
→ Add to config file
→ Restart Claude Desktop
→ Test it
→ Realize it's not what you needed

Step 6: Remove and try another
→ Repeat steps 3-5 multiple times
→ 2 hours wasted
```

**Questions you can't easily answer:**
- Which database MCPs are **official** vs community?
- Which are **actively maintained**?
- Which are **most popular**?
- What **env vars** do they need?
- How do I install them **correctly**?

**You're on your own. Good luck.**

---

## 🌐 The Journey

NCP connects to the **Official MCP Registry** - Anthropic's central directory of vetted MCPs. Your AI can search it directly.

### **Discovery Through Conversation:**

**You:** "Find database MCPs"

**AI:** [Calls `ncp:import` with discovery mode]

```typescript
ncp:import({
  from: "discovery",
  source: "database"
})
```

**NCP:** [Searches registry.modelcontextprotocol.io]

**AI shows you:**

```
Found 8 database MCPs from official registry:

1. ⭐ @modelcontextprotocol/server-postgres
   PostgreSQL database integration
   Status: Official | Downloads: 1,240

2. 📦 @modelcontextprotocol/server-sqlite
   SQLite database integration
   Status: Official | Downloads: 890

3. 📦 @modelcontextprotocol/server-mongodb
   MongoDB database integration
   Status: Community | Downloads: 456

4. 📦 mcp-mysql-server
   MySQL database integration
   Status: Community | Downloads: 234

[... 4 more ...]

Which would you like to install?
```

**You:** "Install 1 and 2"

**AI:** [Calls import with selection]

```typescript
ncp:import({
  from: "discovery",
  source: "database",
  selection: "1,2"
})
```

**NCP:** [Imports PostgreSQL and SQLite MCPs with correct configs]

**Result:**
```
✅ Installed @modelcontextprotocol/server-postgres
✅ Installed @modelcontextprotocol/server-sqlite

Both MCPs ready to use! If they require credentials, use clipboard
security pattern (Story 2) to configure API keys safely.
```

**Total time: 30 seconds.** (vs 2 hours manually)

---

## ✨ The Magic

What you get with registry integration:

### **🔍 AI-Powered Discovery**
- **Search by intent:** "Find file tools" not "grep filesystem npm"
- **Semantic matching:** Registry understands what you need
- **Natural language:** No technical keywords required
- **Conversational:** Back-and-forth with AI to refine results

### **⭐ Curated Results**
- **Official badge:** Shows Anthropic-maintained MCPs
- **Download counts:** See what's popular and trusted
- **Status indicators:** Official vs Community vs Experimental
- **Version info:** Always get latest stable version

### **📦 One-Click Install**
- **Select by number:** "Install 1, 3, and 5"
- **Range selection:** "Install 1-5"
- **Install all:** "Install *"
- **Batch import:** Multiple MCPs installed in parallel

### **✅ Correct Configuration**
- **Registry knows the command:** `npx` or `node` or custom
- **Registry knows the args:** Package identifier, required flags
- **Registry knows env vars:** Shows what credentials you need
- **No guessing:** NCP gets it right the first time

### **🔒 Safe Credentials**
- **Registry shows:** "This MCP needs GITHUB_TOKEN"
- **You provide:** Via clipboard security pattern (Story 2)
- **AI never sees:** Your actual token
- **Works seamlessly:** Discovery + secure config in one flow

---

## 🔍 How It Works (The Technical Story)

### **Registry API:**

```typescript
// NCP talks to official MCP Registry
const REGISTRY_BASE = 'https://registry.modelcontextprotocol.io/v0';

// Search endpoint
GET /v0/servers?limit=50
→ Returns: List of all MCPs with metadata

// Details endpoint
GET /v0/servers/{encoded_name}
→ Returns: Full details including env vars, packages, etc.
```

### **Search Flow:**

```typescript
// User: "Find database MCPs"
// AI calls: ncp:import({ from: "discovery", source: "database" })

// Step 1: Search registry
const results = await fetch(`${REGISTRY_BASE}/servers?limit=50`);
const allServers = await results.json();

// Step 2: Filter by query
const filtered = allServers.servers.filter(s =>
  s.server.name.toLowerCase().includes('database') ||
  s.server.description?.toLowerCase().includes('database')
);

// Step 3: Format as numbered list
const candidates = filtered.map((server, index) => ({
  number: index + 1,
  name: server.server.name,
  displayName: extractShortName(server.server.name),
  description: server.server.description,
  status: server._meta?.['io.modelcontextprotocol.registry/official']?.status,
  downloads: getDownloadCount(server), // From registry metadata
  version: server.server.version
}));

// Return to AI for display
```

### **Import Flow:**

```typescript
// User: "Install 1 and 3"
// AI calls: ncp:import({ from: "discovery", source: "database", selection: "1,3" })

// Step 1: Parse selection
const selected = parseSelection("1,3", candidates);
// Returns: [candidates[0], candidates[2]]

// Step 2: Get detailed info for each
for (const candidate of selected) {
  const details = await fetch(`${REGISTRY_BASE}/servers/${encodeURIComponent(candidate.name)}`);
  const server = await details.json();

  // Extract install config
  const pkg = server.server.packages[0];
  const config = {
    command: pkg.runtimeHint || 'npx',
    args: [pkg.identifier],
    env: {} // User provides via clipboard if needed
  };

  // Import using internal add command
  await internalAdd(candidate.displayName, config);
}
```

### **Caching:**

```typescript
// Registry responses cached for 5 minutes
const CACHE_TTL = 5 * 60 * 1000;

// First search: Hits network (~200ms)
ncp:import({ from: "discovery", source: "database" })

// Repeat search within 5 min: Hits cache (0ms)
ncp:import({ from: "discovery", source: "database" })

// After 5 min: Cache expires, fetches fresh data
```

---

## 🎨 The Analogy That Makes It Click

**Manual Discovery = Library Without Card Catalog** 📚

```
You walk into library with 2,200 books.
No organization. No search system. No librarian.
You wander the aisles hoping to find what you need.
Read book spines one by one.
Pull out books to check if they're relevant.
3 hours later: Found 2 books, not sure if they're the best.
```

**Registry Discovery = Amazon Search** 🔍

```
You open Amazon.
Type: "database book"
See: Reviews, ratings, bestsellers, "customers also bought"
Filter: By rating, by relevance, by date
Click: Buy recommended book
5 minutes later: Book on the way, confident it's what you need.
```

**Registry gives MCPs the search/discovery experience of modern marketplaces.**

---

## 🧪 See It Yourself

Try this experiment:

### **Test 1: Search Registry**

```bash
# Manual way (old)
[Open browser]
[Go to smithery.ai]
[Search "filesystem"]
[Read through results]
[Copy npm command]
[Run in terminal]
[Total time: 5 minutes]

# Registry way (new)
You: "Find filesystem MCPs"
AI: [Shows numbered list from registry]
You: "Install 1"
AI: [Installs in seconds]
[Total time: 30 seconds]
```

### **Test 2: Compare Official vs Community**

```
You: "Find GitHub MCPs"

AI shows:
1. ⭐ @modelcontextprotocol/server-github [Official]
2. 📦 github-mcp-enhanced [Community]
3. 📦 mcp-github-toolkit [Community]

You can see at a glance which is official/supported!
```

### **Test 3: Batch Install**

```
You: "Find AI reasoning MCPs"

AI shows:
1. sequential-thinking
2. memory
3. thinking-protocol
4. context-manager

You: "Install all"
AI: [Installs 1-4 in parallel]

Done in seconds!
```

---

## 🚀 Why This Changes Everything

### **Before Registry (Fragmented Discovery):**

**The ecosystem was scattered:**
- Some MCPs on Smithery.ai
- Some on GitHub awesome lists
- Some only documented in blog posts
- No central source of truth
- No quality indicators
- No official vs community distinction

**Finding MCPs was hard. Choosing the right one was harder.**

### **After Registry (Unified Discovery):**

**The ecosystem is organized:**
- ✅ All MCPs in central registry (registry.modelcontextprotocol.io)
- ✅ Clear official vs community badges
- ✅ Download counts show popularity
- ✅ Correct install commands included
- ✅ AI can search and install directly
- ✅ One source of truth for all MCPs

**Finding MCPs is easy. Choosing the right one is obvious.**

---

## 🎯 Selection Syntax

NCP supports flexible selection formats:

```typescript
// Individual numbers
selection: "1,3,5"
→ Installs: #1, #3, #5

// Ranges
selection: "1-5"
→ Installs: #1, #2, #3, #4, #5

// Mixed
selection: "1,3,7-10"
→ Installs: #1, #3, #7, #8, #9, #10

// All results
selection: "*"
→ Installs: Everything shown

// Just one
selection: "1"
→ Installs: #1 only
```

**Natural syntax. No programming knowledge required.**

---

## 📊 Registry Metadata

What registry provides per MCP:

```typescript
{
  server: {
    name: "io.github.modelcontextprotocol/server-filesystem",
    description: "File system operations",
    version: "0.2.0",
    repository: {
      url: "https://github.com/modelcontextprotocol/servers",
      type: "git"
    },
    packages: [{
      identifier: "@modelcontextprotocol/server-filesystem",
      version: "0.2.0",
      runtimeHint: "npx",
      environmentVariables: [
        {
          name: "ROOT_PATH",
          description: "Root directory for file operations",
          isRequired: true
        }
      ]
    }]
  },
  _meta: {
    'io.modelcontextprotocol.registry/official': {
      status: "official"  // or "community"
    }
  }
}
```

**Registry tells NCP exactly how to install and configure each MCP.**

---

## 🔒 Security Considerations

**Q: Can malicious MCPs enter the registry?**

**A: Registry has curation process:**

1. **Official MCPs:** Maintained by Anthropic, fully vetted
2. **Community MCPs:** User-submitted, reviewed before listing
3. **Each MCP shows status:** Official vs Community badge visible
4. **Source code linked:** GitHub repo always shown
5. **Download counts:** Popular = more eyes = more security

**Best practices:**

- ✅ Prefer official MCPs when available
- ✅ Check GitHub repo before installing community MCPs
- ✅ Review source code if handling sensitive data
- ✅ Start with high-download-count MCPs (battle-tested)

**Registry doesn't execute code. It's a directory. You're still in control of what runs.**

---

## 📚 Deep Dive

Want the full technical implementation?

- **Registry Client:** [src/services/registry-client.ts]
- **Discovery Mode:** [src/internal-mcps/ncp-management.ts] (import tool)
- **Selection Parser:** [Parse selection format]
- **API Docs:** [https://registry.modelcontextprotocol.io/](https://registry.modelcontextprotocol.io/)

---

## 🔗 Complete the Journey

**[← Back to Story 1: Dream and Discover](01-dream-and-discover.md)**

You've now read all 6 core stories that make NCP special:

1. ✅ **Dream and Discover** - AI searches by intent, not by browsing tools
2. ✅ **Secrets in Plain Sight** - Clipboard handshake keeps credentials safe
3. ✅ **Sync and Forget** - Auto-imports Claude Desktop MCPs forever
4. ✅ **Double-Click Install** - .mcpb makes installation feel native
5. ✅ **Runtime Detective** - Adapts to your Node.js runtime automatically
6. ✅ **Official Registry** - Discovers 2,200+ MCPs through conversation

**Together, these stories explain why NCP transforms how you work with MCPs.**

---

## 💬 Questions?

**Q: How often is registry updated?**

A: Registry is live. New MCPs appear as soon as they're approved. NCP caches results for 5 minutes, then fetches fresh data.

**Q: Can I search for specific features?**

A: Yes! Try: "Find MCPs with email capabilities" or "Find MCPs for web scraping". Semantic search works across name + description.

**Q: What if registry is down?**

A: NCP falls back gracefully. You can still use existing MCPs and install new ones manually via `ncp add`.

**Q: Can I submit my MCP to registry?**

A: Yes! Visit [registry.modelcontextprotocol.io](https://registry.modelcontextprotocol.io/) for submission guidelines. (Process managed by Anthropic)

**Q: What about MCPs not in registry?**

A: You can still install them manually: `ncp add myserver npx my-custom-mcp`. Registry is for discovery convenience, not a requirement.

---

**[← Previous Story](05-runtime-detective.md)** | **[Back to Story Index](../README.md#the-six-stories)**

---

## 🎉 What's Next?

Now that you understand how NCP works through these six stories, you're ready to:

1. **[Install NCP →](../README.md#installation)** - Get started in 30 seconds
2. **[Try the examples →](../README.md#test-drive)** - See it in action
3. **[Read technical docs →](../technical/)** - Deep dive into implementation
4. **[Contribute →](../../CONTRIBUTING.md)** - Help make NCP even better

**Welcome to the NCP community!** 🚀

```

--------------------------------------------------------------------------------
/src/analytics/visual-formatter.ts:
--------------------------------------------------------------------------------

```typescript
/**
 * NCP Visual Analytics Formatter
 * Enhanced terminal output with CLI charts and graphs
 */

import chalk from 'chalk';
import { AnalyticsReport } from './log-parser.js';

export class VisualAnalyticsFormatter {
  /**
   * Format analytics dashboard with visual charts
   */
  static async formatVisualDashboard(report: AnalyticsReport): Promise<string> {
    const output: string[] = [];

    // Header with enhanced styling
    output.push('');
    output.push(chalk.bold.cyan('🚀 NCP Impact Analytics Dashboard (Visual)'));
    output.push(chalk.dim('═'.repeat(60)));
    output.push('');

    // Overview Section with Key Metrics
    output.push(chalk.bold.white('📊 KEY METRICS OVERVIEW'));
    output.push('');

    const days = Math.ceil((report.timeRange.end.getTime() - report.timeRange.start.getTime()) / (1000 * 60 * 60 * 24));
    const period = days <= 1 ? 'today' : `last ${days} days`;

    // Create metrics display with visual bars
    const metrics = [
      { label: 'Total Sessions', value: report.totalSessions, unit: 'sessions', color: chalk.green },
      { label: 'Unique MCPs', value: report.uniqueMCPs, unit: 'servers', color: chalk.cyan },
      { label: 'Success Rate', value: Math.round(report.successRate), unit: '%', color: chalk.yellow },
      { label: 'Response Data', value: Math.round(report.totalResponseSize / 1024 / 1024), unit: 'MB', color: chalk.blue }
    ];

    for (const metric of metrics) {
      const bar = this.createHorizontalBar(metric.value, Math.max(...metrics.map(m => m.value)), 25);
      output.push(`${metric.color(metric.label.padEnd(15))}: ${bar} ${metric.color(metric.value.toLocaleString())} ${chalk.dim(metric.unit)}`);
    }
    output.push('');

    // Usage Trends Chart
    if (Object.keys(report.dailyUsage).length > 3) {
      output.push(chalk.bold.white('📈 DAILY USAGE TRENDS'));
      output.push('');

      const dailyData = Object.entries(report.dailyUsage)
        .sort(([a], [b]) => a.localeCompare(b))
        .map(([_, usage]) => usage);

      if (dailyData.length > 1) {
        // Create simple ASCII line chart
        const chart = this.createLineChart(dailyData, 8, 40);
        output.push(chalk.green(chart));
        output.push(chalk.dim('   └─ Sessions per day over time'));
      }
      output.push('');
    }

    // Top MCPs Usage Chart
    if (report.topMCPsByUsage.length > 0) {
      output.push(chalk.bold.white('🔥 TOP MCP USAGE DISTRIBUTION'));
      output.push('');

      const topMCPs = report.topMCPsByUsage.slice(0, 8);
      const maxSessions = Math.max(...topMCPs.map(mcp => mcp.sessions));

      for (const mcp of topMCPs) {
        const percentage = ((mcp.sessions / report.totalSessions) * 100).toFixed(1);
        const bar = this.createColorfulBar(mcp.sessions, maxSessions, 30);
        const successIcon = mcp.successRate >= 95 ? '✅' : mcp.successRate >= 80 ? '⚠️' : '❌';

        output.push(`${chalk.cyan(mcp.name.padEnd(20))} ${bar} ${chalk.white(mcp.sessions.toString().padStart(3))} ${chalk.dim(`(${percentage}%)`)} ${successIcon}`);
      }
      output.push('');
    }

    // Performance Distribution
    if (report.performanceMetrics.fastestMCPs.length > 0) {
      output.push(chalk.bold.white('⚡ PERFORMANCE DISTRIBUTION'));
      output.push('');

      // Create performance buckets
      const performanceData = report.performanceMetrics.fastestMCPs.concat(report.performanceMetrics.slowestMCPs);
      const durations = performanceData.map(mcp => mcp.avgDuration).filter(d => d > 0);

      if (durations.length > 3) {
        // Create performance distribution chart
        const chart = this.createLineChart(durations.slice(0, 20), 6, 35);
        output.push(chalk.yellow(chart));
        output.push(chalk.dim('   └─ Response times across MCPs (ms)'));
      }
      output.push('');
    }

    // Value Delivered Section with Visual Impact
    output.push(chalk.bold.white('💰 VALUE IMPACT VISUALIZATION (ESTIMATES)'));
    output.push('');

    // Calculate savings
    const estimatedTokensWithoutNCP = report.totalSessions * report.uniqueMCPs * 100;
    const estimatedTokensWithNCP = report.totalSessions * 50;
    const tokenSavings = estimatedTokensWithoutNCP - estimatedTokensWithNCP;
    const costSavings = (tokenSavings / 1000) * 0.002;

    // Visual representation of savings
    const savingsData = [
      { label: 'Without NCP', value: estimatedTokensWithoutNCP, color: chalk.red },
      { label: 'With NCP', value: estimatedTokensWithNCP, color: chalk.green }
    ];

    const maxTokens = Math.max(...savingsData.map(s => s.value));
    for (const saving of savingsData) {
      const bar = this.createHorizontalBar(saving.value, maxTokens, 40);
      output.push(`${saving.label.padEnd(12)}: ${bar} ${saving.color((saving.value / 1000000).toFixed(1))}M tokens`);
    }

    output.push('');
    output.push(`💎 ${chalk.bold.green((tokenSavings / 1000000).toFixed(1))}M tokens saved = ${chalk.bold.green('$' + costSavings.toFixed(2))} cost reduction`);
    output.push(`🧠 ${chalk.bold.green((((report.uniqueMCPs - 1) / report.uniqueMCPs) * 100).toFixed(1) + '%')} cognitive load reduction`);
    output.push('');

    // Environmental Impact with Visual Scale
    output.push(chalk.bold.white('🌱 ENVIRONMENTAL IMPACT SCALE (ROUGH ESTIMATES)'));
    output.push('');

    const sessionsWithoutNCP = report.totalSessions * report.uniqueMCPs;
    const computeReduction = sessionsWithoutNCP - report.totalSessions;
    const estimatedEnergyKWh = computeReduction * 0.0002;
    const estimatedCO2kg = estimatedEnergyKWh * 0.5;

    // Visual representation of environmental savings
    const envData = [
      { label: 'Energy Saved', value: estimatedEnergyKWh, unit: 'kWh', icon: '⚡' },
      { label: 'CO₂ Avoided', value: estimatedCO2kg, unit: 'kg', icon: '🌍' },
      { label: 'Connections Saved', value: computeReduction / 1000, unit: 'k', icon: '🔌' }
    ];

    const maxEnvValue = Math.max(...envData.map(e => e.value));
    for (const env of envData) {
      const bar = this.createGreenBar(env.value, maxEnvValue, 25);
      output.push(`${env.icon} ${env.label.padEnd(18)}: ${bar} ${chalk.green(env.value.toFixed(1))} ${chalk.dim(env.unit)}`);
    }
    output.push('');

    // Footer with enhanced tips
    output.push(chalk.bold.white('💡 INTERACTIVE COMMANDS'));
    output.push('');
    output.push(chalk.dim('  📊 ') + chalk.cyan('ncp analytics performance') + chalk.dim(' - Detailed performance metrics'));
    output.push(chalk.dim('  📁 ') + chalk.cyan('ncp analytics export') + chalk.dim(' - Export data to CSV'));
    output.push(chalk.dim('  🔄 ') + chalk.cyan('ncp analytics dashboard') + chalk.dim(' - Refresh this dashboard'));
    output.push('');

    return output.join('\\n');
  }

  /**
   * Create horizontal progress bar with custom styling
   */
  private static createHorizontalBar(value: number, max: number, width: number): string {
    const percentage = max > 0 ? value / max : 0;
    const filled = Math.round(percentage * width);
    const empty = width - filled;

    const filledChar = '█';
    const emptyChar = '░';

    return chalk.green(filledChar.repeat(filled)) + chalk.dim(emptyChar.repeat(empty));
  }

  /**
   * Create colorful bar with gradient effect
   */
  private static createColorfulBar(value: number, max: number, width: number): string {
    const percentage = max > 0 ? value / max : 0;
    const filled = Math.round(percentage * width);
    const empty = width - filled;

    // Create gradient effect based on value
    let coloredBar = '';
    for (let i = 0; i < filled; i++) {
      const progress = i / width;
      if (progress < 0.3) {
        coloredBar += chalk.red('█');
      } else if (progress < 0.6) {
        coloredBar += chalk.yellow('█');
      } else {
        coloredBar += chalk.green('█');
      }
    }

    return coloredBar + chalk.dim('░'.repeat(empty));
  }

  /**
   * Create green-themed bar for environmental metrics
   */
  private static createGreenBar(value: number, max: number, width: number): string {
    const percentage = max > 0 ? value / max : 0;
    const filled = Math.round(percentage * width);
    const empty = width - filled;

    const filledBar = chalk.bgGreen.black('█'.repeat(filled));
    const emptyBar = chalk.dim('░'.repeat(empty));

    return filledBar + emptyBar;
  }

  /**
   * Format performance report with enhanced visuals
   */
  static async formatVisualPerformance(report: AnalyticsReport): Promise<string> {
    const output: string[] = [];

    output.push('');
    output.push(chalk.bold.cyan('⚡ NCP Performance Analytics (Visual)'));
    output.push(chalk.dim('═'.repeat(50)));
    output.push('');

    // Performance Overview with Gauges
    output.push(chalk.bold.white('🎯 PERFORMANCE GAUGES'));
    output.push('');

    const performanceMetrics = [
      { label: 'Success Rate', value: report.successRate, max: 100, unit: '%', color: chalk.green },
      { label: 'Avg Response', value: report.avgSessionDuration || 5000, max: 10000, unit: 'ms', color: chalk.yellow },
      { label: 'MCPs Active', value: report.uniqueMCPs, max: 2000, unit: 'servers', color: chalk.cyan }
    ];

    for (const metric of performanceMetrics) {
      const gauge = this.createGauge(metric.value, metric.max);
      output.push(`${metric.label.padEnd(15)}: ${gauge} ${metric.color(metric.value.toFixed(1))}${metric.unit}`);
    }
    output.push('');

    // Performance Leaderboard with Visual Ranking
    if (report.performanceMetrics.fastestMCPs.length > 0) {
      output.push(chalk.bold.white('🏆 SPEED CHAMPIONS PODIUM'));
      output.push('');

      const topPerformers = report.performanceMetrics.fastestMCPs.slice(0, 5);
      const medals = ['🥇', '🥈', '🥉', '🏅', '🎖️'];

      for (let i = 0; i < topPerformers.length; i++) {
        const mcp = topPerformers[i];
        const medal = medals[i] || '⭐';
        const speedBar = this.createSpeedBar(mcp.avgDuration, 10000);

        output.push(`${medal} ${chalk.cyan(mcp.name.padEnd(20))} ${speedBar} ${chalk.bold.green(mcp.avgDuration.toFixed(0))}ms`);
      }
      output.push('');
    }

    // Reliability Champions
    if (report.performanceMetrics.mostReliable.length > 0) {
      output.push(chalk.bold.white('🛡️ RELIABILITY CHAMPIONS'));
      output.push('');

      const reliablePerformers = report.performanceMetrics.mostReliable.slice(0, 5);

      for (let i = 0; i < reliablePerformers.length; i++) {
        const mcp = reliablePerformers[i];
        const reliabilityBar = this.createReliabilityBar(mcp.successRate);
        const shield = mcp.successRate >= 99 ? '🛡️' : mcp.successRate >= 95 ? '🔰' : '⚡';

        output.push(`${shield} ${chalk.cyan(mcp.name.padEnd(20))} ${reliabilityBar} ${chalk.bold.green(mcp.successRate.toFixed(1))}%`);
      }
      output.push('');
    }

    return output.join('\\n');
  }

  /**
   * Create gauge visualization
   */
  private static createGauge(value: number, max: number): string {
    const percentage = Math.min(value / max, 1);
    const gaugeWidth = 20;
    const filled = Math.round(percentage * gaugeWidth);

    // Create gauge with different colors based on performance
    let gauge = '[';
    for (let i = 0; i < gaugeWidth; i++) {
      if (i < filled) {
        if (percentage > 0.8) gauge += chalk.green('█');
        else if (percentage > 0.5) gauge += chalk.yellow('█');
        else gauge += chalk.red('█');
      } else {
        gauge += chalk.dim('░');
      }
    }
    gauge += ']';

    return gauge;
  }

  /**
   * Create speed bar (faster = more green)
   */
  private static createSpeedBar(duration: number, maxDuration: number): string {
    const speed = Math.max(0, 1 - (duration / maxDuration)); // Invert: faster = higher score
    const barWidth = 15;
    const filled = Math.round(speed * barWidth);

    return chalk.green('█'.repeat(filled)) + chalk.dim('░'.repeat(barWidth - filled));
  }

  /**
   * Create reliability bar
   */
  private static createReliabilityBar(successRate: number): string {
    const barWidth = 15;
    const filled = Math.round((successRate / 100) * barWidth);

    return chalk.blue('█'.repeat(filled)) + chalk.dim('░'.repeat(barWidth - filled));
  }

  /**
   * Create simple ASCII line chart
   */
  private static createLineChart(data: number[], height: number, width: number): string {
    if (data.length === 0) return '';

    const min = Math.min(...data);
    const max = Math.max(...data);
    const range = max - min || 1;

    const lines: string[] = [];

    // Create chart grid
    for (let row = 0; row < height; row++) {
      const threshold = max - (row / (height - 1)) * range;
      let line = '   ';

      for (let col = 0; col < Math.min(data.length, width); col++) {
        const value = data[col];
        const prevValue = col > 0 ? data[col - 1] : value;

        // Determine character based on value relative to threshold
        if (value >= threshold) {
          // Different characters for trends
          if (col > 0) {
            if (value > prevValue) line += '╱'; // Rising
            else if (value < prevValue) line += '╲'; // Falling
            else line += '─'; // Flat
          } else {
            line += '●'; // Start point
          }
        } else {
          line += ' '; // Empty space
        }
      }
      lines.push(line);
    }

    // Add axis
    const axis = '   ' + '─'.repeat(Math.min(data.length, width));
    lines.push(axis);

    return lines.join('\n');
  }
}
```
Page 5/9FirstPrevNextLast