#
tokens: 49890/50000 14/625 files (page 14/47)
lines: on (toggle) GitHub
raw markdown copy reset
This is page 14 of 47. Use http://codebase.md/doobidoo/mcp-memory-service?lines=true&page={x} to view the full context.

# Directory Structure

```
├── .claude
│   ├── agents
│   │   ├── amp-bridge.md
│   │   ├── amp-pr-automator.md
│   │   ├── code-quality-guard.md
│   │   ├── gemini-pr-automator.md
│   │   └── github-release-manager.md
│   ├── settings.local.json.backup
│   └── settings.local.json.local
├── .commit-message
├── .dockerignore
├── .env.example
├── .env.sqlite.backup
├── .envnn#
├── .gitattributes
├── .github
│   ├── FUNDING.yml
│   ├── ISSUE_TEMPLATE
│   │   ├── bug_report.yml
│   │   ├── config.yml
│   │   ├── feature_request.yml
│   │   └── performance_issue.yml
│   ├── pull_request_template.md
│   └── workflows
│       ├── bridge-tests.yml
│       ├── CACHE_FIX.md
│       ├── claude-code-review.yml
│       ├── claude.yml
│       ├── cleanup-images.yml.disabled
│       ├── dev-setup-validation.yml
│       ├── docker-publish.yml
│       ├── LATEST_FIXES.md
│       ├── main-optimized.yml.disabled
│       ├── main.yml
│       ├── publish-and-test.yml
│       ├── README_OPTIMIZATION.md
│       ├── release-tag.yml.disabled
│       ├── release.yml
│       ├── roadmap-review-reminder.yml
│       ├── SECRET_CONDITIONAL_FIX.md
│       └── WORKFLOW_FIXES.md
├── .gitignore
├── .mcp.json.backup
├── .mcp.json.template
├── .pyscn
│   ├── .gitignore
│   └── reports
│       └── analyze_20251123_214224.html
├── AGENTS.md
├── archive
│   ├── deployment
│   │   ├── deploy_fastmcp_fixed.sh
│   │   ├── deploy_http_with_mcp.sh
│   │   └── deploy_mcp_v4.sh
│   ├── deployment-configs
│   │   ├── empty_config.yml
│   │   └── smithery.yaml
│   ├── development
│   │   └── test_fastmcp.py
│   ├── docs-removed-2025-08-23
│   │   ├── authentication.md
│   │   ├── claude_integration.md
│   │   ├── claude-code-compatibility.md
│   │   ├── claude-code-integration.md
│   │   ├── claude-code-quickstart.md
│   │   ├── claude-desktop-setup.md
│   │   ├── complete-setup-guide.md
│   │   ├── database-synchronization.md
│   │   ├── development
│   │   │   ├── autonomous-memory-consolidation.md
│   │   │   ├── CLEANUP_PLAN.md
│   │   │   ├── CLEANUP_README.md
│   │   │   ├── CLEANUP_SUMMARY.md
│   │   │   ├── dream-inspired-memory-consolidation.md
│   │   │   ├── hybrid-slm-memory-consolidation.md
│   │   │   ├── mcp-milestone.md
│   │   │   ├── multi-client-architecture.md
│   │   │   ├── test-results.md
│   │   │   └── TIMESTAMP_FIX_SUMMARY.md
│   │   ├── distributed-sync.md
│   │   ├── invocation_guide.md
│   │   ├── macos-intel.md
│   │   ├── master-guide.md
│   │   ├── mcp-client-configuration.md
│   │   ├── multi-client-server.md
│   │   ├── service-installation.md
│   │   ├── sessions
│   │   │   └── MCP_ENHANCEMENT_SESSION_MEMORY_v4.1.0.md
│   │   ├── UBUNTU_SETUP.md
│   │   ├── ubuntu.md
│   │   ├── windows-setup.md
│   │   └── windows.md
│   ├── docs-root-cleanup-2025-08-23
│   │   ├── AWESOME_LIST_SUBMISSION.md
│   │   ├── CLOUDFLARE_IMPLEMENTATION.md
│   │   ├── DOCUMENTATION_ANALYSIS.md
│   │   ├── DOCUMENTATION_CLEANUP_PLAN.md
│   │   ├── DOCUMENTATION_CONSOLIDATION_COMPLETE.md
│   │   ├── LITESTREAM_SETUP_GUIDE.md
│   │   ├── lm_studio_system_prompt.md
│   │   ├── PYTORCH_DOWNLOAD_FIX.md
│   │   └── README-ORIGINAL-BACKUP.md
│   ├── investigations
│   │   └── MACOS_HOOKS_INVESTIGATION.md
│   ├── litestream-configs-v6.3.0
│   │   ├── install_service.sh
│   │   ├── litestream_master_config_fixed.yml
│   │   ├── litestream_master_config.yml
│   │   ├── litestream_replica_config_fixed.yml
│   │   ├── litestream_replica_config.yml
│   │   ├── litestream_replica_simple.yml
│   │   ├── litestream-http.service
│   │   ├── litestream.service
│   │   └── requirements-cloudflare.txt
│   ├── release-notes
│   │   └── release-notes-v7.1.4.md
│   └── setup-development
│       ├── README.md
│       ├── setup_consolidation_mdns.sh
│       ├── STARTUP_SETUP_GUIDE.md
│       └── test_service.sh
├── CHANGELOG-HISTORIC.md
├── CHANGELOG.md
├── claude_commands
│   ├── memory-context.md
│   ├── memory-health.md
│   ├── memory-ingest-dir.md
│   ├── memory-ingest.md
│   ├── memory-recall.md
│   ├── memory-search.md
│   ├── memory-store.md
│   ├── README.md
│   └── session-start.md
├── claude-hooks
│   ├── config.json
│   ├── config.template.json
│   ├── CONFIGURATION.md
│   ├── core
│   │   ├── memory-retrieval.js
│   │   ├── mid-conversation.js
│   │   ├── session-end.js
│   │   ├── session-start.js
│   │   └── topic-change.js
│   ├── debug-pattern-test.js
│   ├── install_claude_hooks_windows.ps1
│   ├── install_hooks.py
│   ├── memory-mode-controller.js
│   ├── MIGRATION.md
│   ├── README-NATURAL-TRIGGERS.md
│   ├── README-phase2.md
│   ├── README.md
│   ├── simple-test.js
│   ├── statusline.sh
│   ├── test-adaptive-weights.js
│   ├── test-dual-protocol-hook.js
│   ├── test-mcp-hook.js
│   ├── test-natural-triggers.js
│   ├── test-recency-scoring.js
│   ├── tests
│   │   ├── integration-test.js
│   │   ├── phase2-integration-test.js
│   │   ├── test-code-execution.js
│   │   ├── test-cross-session.json
│   │   ├── test-session-tracking.json
│   │   └── test-threading.json
│   ├── utilities
│   │   ├── adaptive-pattern-detector.js
│   │   ├── context-formatter.js
│   │   ├── context-shift-detector.js
│   │   ├── conversation-analyzer.js
│   │   ├── dynamic-context-updater.js
│   │   ├── git-analyzer.js
│   │   ├── mcp-client.js
│   │   ├── memory-client.js
│   │   ├── memory-scorer.js
│   │   ├── performance-manager.js
│   │   ├── project-detector.js
│   │   ├── session-tracker.js
│   │   ├── tiered-conversation-monitor.js
│   │   └── version-checker.js
│   └── WINDOWS-SESSIONSTART-BUG.md
├── CLAUDE.md
├── CODE_OF_CONDUCT.md
├── CONTRIBUTING.md
├── Development-Sprint-November-2025.md
├── docs
│   ├── amp-cli-bridge.md
│   ├── api
│   │   ├── code-execution-interface.md
│   │   ├── memory-metadata-api.md
│   │   ├── PHASE1_IMPLEMENTATION_SUMMARY.md
│   │   ├── PHASE2_IMPLEMENTATION_SUMMARY.md
│   │   ├── PHASE2_REPORT.md
│   │   └── tag-standardization.md
│   ├── architecture
│   │   ├── search-enhancement-spec.md
│   │   └── search-examples.md
│   ├── architecture.md
│   ├── archive
│   │   └── obsolete-workflows
│   │       ├── load_memory_context.md
│   │       └── README.md
│   ├── assets
│   │   └── images
│   │       ├── dashboard-v3.3.0-preview.png
│   │       ├── memory-awareness-hooks-example.png
│   │       ├── project-infographic.svg
│   │       └── README.md
│   ├── CLAUDE_CODE_QUICK_REFERENCE.md
│   ├── cloudflare-setup.md
│   ├── deployment
│   │   ├── docker.md
│   │   ├── dual-service.md
│   │   ├── production-guide.md
│   │   └── systemd-service.md
│   ├── development
│   │   ├── ai-agent-instructions.md
│   │   ├── code-quality
│   │   │   ├── phase-2a-completion.md
│   │   │   ├── phase-2a-handle-get-prompt.md
│   │   │   ├── phase-2a-index.md
│   │   │   ├── phase-2a-install-package.md
│   │   │   └── phase-2b-session-summary.md
│   │   ├── code-quality-workflow.md
│   │   ├── dashboard-workflow.md
│   │   ├── issue-management.md
│   │   ├── pr-review-guide.md
│   │   ├── refactoring-notes.md
│   │   ├── release-checklist.md
│   │   └── todo-tracker.md
│   ├── docker-optimized-build.md
│   ├── document-ingestion.md
│   ├── DOCUMENTATION_AUDIT.md
│   ├── enhancement-roadmap-issue-14.md
│   ├── examples
│   │   ├── analysis-scripts.js
│   │   ├── maintenance-session-example.md
│   │   ├── memory-distribution-chart.jsx
│   │   └── tag-schema.json
│   ├── first-time-setup.md
│   ├── glama-deployment.md
│   ├── guides
│   │   ├── advanced-command-examples.md
│   │   ├── chromadb-migration.md
│   │   ├── commands-vs-mcp-server.md
│   │   ├── mcp-enhancements.md
│   │   ├── mdns-service-discovery.md
│   │   ├── memory-consolidation-guide.md
│   │   ├── migration.md
│   │   ├── scripts.md
│   │   └── STORAGE_BACKENDS.md
│   ├── HOOK_IMPROVEMENTS.md
│   ├── hooks
│   │   └── phase2-code-execution-migration.md
│   ├── http-server-management.md
│   ├── ide-compatability.md
│   ├── IMAGE_RETENTION_POLICY.md
│   ├── images
│   │   └── dashboard-placeholder.md
│   ├── implementation
│   │   ├── health_checks.md
│   │   └── performance.md
│   ├── IMPLEMENTATION_PLAN_HTTP_SSE.md
│   ├── integration
│   │   ├── homebrew.md
│   │   └── multi-client.md
│   ├── integrations
│   │   ├── gemini.md
│   │   ├── groq-bridge.md
│   │   ├── groq-integration-summary.md
│   │   └── groq-model-comparison.md
│   ├── integrations.md
│   ├── legacy
│   │   └── dual-protocol-hooks.md
│   ├── LM_STUDIO_COMPATIBILITY.md
│   ├── maintenance
│   │   └── memory-maintenance.md
│   ├── mastery
│   │   ├── api-reference.md
│   │   ├── architecture-overview.md
│   │   ├── configuration-guide.md
│   │   ├── local-setup-and-run.md
│   │   ├── testing-guide.md
│   │   └── troubleshooting.md
│   ├── migration
│   │   └── code-execution-api-quick-start.md
│   ├── natural-memory-triggers
│   │   ├── cli-reference.md
│   │   ├── installation-guide.md
│   │   └── performance-optimization.md
│   ├── oauth-setup.md
│   ├── pr-graphql-integration.md
│   ├── quick-setup-cloudflare-dual-environment.md
│   ├── README.md
│   ├── remote-configuration-wiki-section.md
│   ├── research
│   │   ├── code-execution-interface-implementation.md
│   │   └── code-execution-interface-summary.md
│   ├── ROADMAP.md
│   ├── sqlite-vec-backend.md
│   ├── statistics
│   │   ├── charts
│   │   │   ├── activity_patterns.png
│   │   │   ├── contributors.png
│   │   │   ├── growth_trajectory.png
│   │   │   ├── monthly_activity.png
│   │   │   └── october_sprint.png
│   │   ├── data
│   │   │   ├── activity_by_day.csv
│   │   │   ├── activity_by_hour.csv
│   │   │   ├── contributors.csv
│   │   │   └── monthly_activity.csv
│   │   ├── generate_charts.py
│   │   └── REPOSITORY_STATISTICS.md
│   ├── technical
│   │   ├── development.md
│   │   ├── memory-migration.md
│   │   ├── migration-log.md
│   │   ├── sqlite-vec-embedding-fixes.md
│   │   └── tag-storage.md
│   ├── testing
│   │   └── regression-tests.md
│   ├── testing-cloudflare-backend.md
│   ├── troubleshooting
│   │   ├── cloudflare-api-token-setup.md
│   │   ├── cloudflare-authentication.md
│   │   ├── general.md
│   │   ├── hooks-quick-reference.md
│   │   ├── pr162-schema-caching-issue.md
│   │   ├── session-end-hooks.md
│   │   └── sync-issues.md
│   └── tutorials
│       ├── advanced-techniques.md
│       ├── data-analysis.md
│       └── demo-session-walkthrough.md
├── examples
│   ├── claude_desktop_config_template.json
│   ├── claude_desktop_config_windows.json
│   ├── claude-desktop-http-config.json
│   ├── config
│   │   └── claude_desktop_config.json
│   ├── http-mcp-bridge.js
│   ├── memory_export_template.json
│   ├── README.md
│   ├── setup
│   │   └── setup_multi_client_complete.py
│   └── start_https_example.sh
├── install_service.py
├── install.py
├── LICENSE
├── NOTICE
├── pyproject.toml
├── pytest.ini
├── README.md
├── run_server.py
├── scripts
│   ├── .claude
│   │   └── settings.local.json
│   ├── archive
│   │   └── check_missing_timestamps.py
│   ├── backup
│   │   ├── backup_memories.py
│   │   ├── backup_sqlite_vec.sh
│   │   ├── export_distributable_memories.sh
│   │   └── restore_memories.py
│   ├── benchmarks
│   │   ├── benchmark_code_execution_api.py
│   │   ├── benchmark_hybrid_sync.py
│   │   └── benchmark_server_caching.py
│   ├── database
│   │   ├── analyze_sqlite_vec_db.py
│   │   ├── check_sqlite_vec_status.py
│   │   ├── db_health_check.py
│   │   └── simple_timestamp_check.py
│   ├── development
│   │   ├── debug_server_initialization.py
│   │   ├── find_orphaned_files.py
│   │   ├── fix_mdns.sh
│   │   ├── fix_sitecustomize.py
│   │   ├── remote_ingest.sh
│   │   ├── setup-git-merge-drivers.sh
│   │   ├── uv-lock-merge.sh
│   │   └── verify_hybrid_sync.py
│   ├── hooks
│   │   └── pre-commit
│   ├── installation
│   │   ├── install_linux_service.py
│   │   ├── install_macos_service.py
│   │   ├── install_uv.py
│   │   ├── install_windows_service.py
│   │   ├── install.py
│   │   ├── setup_backup_cron.sh
│   │   ├── setup_claude_mcp.sh
│   │   └── setup_cloudflare_resources.py
│   ├── linux
│   │   ├── service_status.sh
│   │   ├── start_service.sh
│   │   ├── stop_service.sh
│   │   ├── uninstall_service.sh
│   │   └── view_logs.sh
│   ├── maintenance
│   │   ├── assign_memory_types.py
│   │   ├── check_memory_types.py
│   │   ├── cleanup_corrupted_encoding.py
│   │   ├── cleanup_memories.py
│   │   ├── cleanup_organize.py
│   │   ├── consolidate_memory_types.py
│   │   ├── consolidation_mappings.json
│   │   ├── delete_orphaned_vectors_fixed.py
│   │   ├── fast_cleanup_duplicates_with_tracking.sh
│   │   ├── find_all_duplicates.py
│   │   ├── find_cloudflare_duplicates.py
│   │   ├── find_duplicates.py
│   │   ├── memory-types.md
│   │   ├── README.md
│   │   ├── recover_timestamps_from_cloudflare.py
│   │   ├── regenerate_embeddings.py
│   │   ├── repair_malformed_tags.py
│   │   ├── repair_memories.py
│   │   ├── repair_sqlite_vec_embeddings.py
│   │   ├── repair_zero_embeddings.py
│   │   ├── restore_from_json_export.py
│   │   └── scan_todos.sh
│   ├── migration
│   │   ├── cleanup_mcp_timestamps.py
│   │   ├── legacy
│   │   │   └── migrate_chroma_to_sqlite.py
│   │   ├── mcp-migration.py
│   │   ├── migrate_sqlite_vec_embeddings.py
│   │   ├── migrate_storage.py
│   │   ├── migrate_tags.py
│   │   ├── migrate_timestamps.py
│   │   ├── migrate_to_cloudflare.py
│   │   ├── migrate_to_sqlite_vec.py
│   │   ├── migrate_v5_enhanced.py
│   │   ├── TIMESTAMP_CLEANUP_README.md
│   │   └── verify_mcp_timestamps.py
│   ├── pr
│   │   ├── amp_collect_results.sh
│   │   ├── amp_detect_breaking_changes.sh
│   │   ├── amp_generate_tests.sh
│   │   ├── amp_pr_review.sh
│   │   ├── amp_quality_gate.sh
│   │   ├── amp_suggest_fixes.sh
│   │   ├── auto_review.sh
│   │   ├── detect_breaking_changes.sh
│   │   ├── generate_tests.sh
│   │   ├── lib
│   │   │   └── graphql_helpers.sh
│   │   ├── quality_gate.sh
│   │   ├── resolve_threads.sh
│   │   ├── run_pyscn_analysis.sh
│   │   ├── run_quality_checks.sh
│   │   ├── thread_status.sh
│   │   └── watch_reviews.sh
│   ├── quality
│   │   ├── fix_dead_code_install.sh
│   │   ├── phase1_dead_code_analysis.md
│   │   ├── phase2_complexity_analysis.md
│   │   ├── README_PHASE1.md
│   │   ├── README_PHASE2.md
│   │   ├── track_pyscn_metrics.sh
│   │   └── weekly_quality_review.sh
│   ├── README.md
│   ├── run
│   │   ├── run_mcp_memory.sh
│   │   ├── run-with-uv.sh
│   │   └── start_sqlite_vec.sh
│   ├── run_memory_server.py
│   ├── server
│   │   ├── check_http_server.py
│   │   ├── check_server_health.py
│   │   ├── memory_offline.py
│   │   ├── preload_models.py
│   │   ├── run_http_server.py
│   │   ├── run_memory_server.py
│   │   ├── start_http_server.bat
│   │   └── start_http_server.sh
│   ├── service
│   │   ├── deploy_dual_services.sh
│   │   ├── install_http_service.sh
│   │   ├── mcp-memory-http.service
│   │   ├── mcp-memory.service
│   │   ├── memory_service_manager.sh
│   │   ├── service_control.sh
│   │   ├── service_utils.py
│   │   └── update_service.sh
│   ├── sync
│   │   ├── check_drift.py
│   │   ├── claude_sync_commands.py
│   │   ├── export_memories.py
│   │   ├── import_memories.py
│   │   ├── litestream
│   │   │   ├── apply_local_changes.sh
│   │   │   ├── enhanced_memory_store.sh
│   │   │   ├── init_staging_db.sh
│   │   │   ├── io.litestream.replication.plist
│   │   │   ├── manual_sync.sh
│   │   │   ├── memory_sync.sh
│   │   │   ├── pull_remote_changes.sh
│   │   │   ├── push_to_remote.sh
│   │   │   ├── README.md
│   │   │   ├── resolve_conflicts.sh
│   │   │   ├── setup_local_litestream.sh
│   │   │   ├── setup_remote_litestream.sh
│   │   │   ├── staging_db_init.sql
│   │   │   ├── stash_local_changes.sh
│   │   │   ├── sync_from_remote_noconfig.sh
│   │   │   └── sync_from_remote.sh
│   │   ├── README.md
│   │   ├── safe_cloudflare_update.sh
│   │   ├── sync_memory_backends.py
│   │   └── sync_now.py
│   ├── testing
│   │   ├── run_complete_test.py
│   │   ├── run_memory_test.sh
│   │   ├── simple_test.py
│   │   ├── test_cleanup_logic.py
│   │   ├── test_cloudflare_backend.py
│   │   ├── test_docker_functionality.py
│   │   ├── test_installation.py
│   │   ├── test_mdns.py
│   │   ├── test_memory_api.py
│   │   ├── test_memory_simple.py
│   │   ├── test_migration.py
│   │   ├── test_search_api.py
│   │   ├── test_sqlite_vec_embeddings.py
│   │   ├── test_sse_events.py
│   │   ├── test-connection.py
│   │   └── test-hook.js
│   ├── utils
│   │   ├── claude_commands_utils.py
│   │   ├── generate_personalized_claude_md.sh
│   │   ├── groq
│   │   ├── groq_agent_bridge.py
│   │   ├── list-collections.py
│   │   ├── memory_wrapper_uv.py
│   │   ├── query_memories.py
│   │   ├── smithery_wrapper.py
│   │   ├── test_groq_bridge.sh
│   │   └── uv_wrapper.py
│   └── validation
│       ├── check_dev_setup.py
│       ├── check_documentation_links.py
│       ├── diagnose_backend_config.py
│       ├── validate_configuration_complete.py
│       ├── validate_memories.py
│       ├── validate_migration.py
│       ├── validate_timestamp_integrity.py
│       ├── verify_environment.py
│       ├── verify_pytorch_windows.py
│       └── verify_torch.py
├── SECURITY.md
├── selective_timestamp_recovery.py
├── SPONSORS.md
├── src
│   └── mcp_memory_service
│       ├── __init__.py
│       ├── api
│       │   ├── __init__.py
│       │   ├── client.py
│       │   ├── operations.py
│       │   ├── sync_wrapper.py
│       │   └── types.py
│       ├── backup
│       │   ├── __init__.py
│       │   └── scheduler.py
│       ├── cli
│       │   ├── __init__.py
│       │   ├── ingestion.py
│       │   ├── main.py
│       │   └── utils.py
│       ├── config.py
│       ├── consolidation
│       │   ├── __init__.py
│       │   ├── associations.py
│       │   ├── base.py
│       │   ├── clustering.py
│       │   ├── compression.py
│       │   ├── consolidator.py
│       │   ├── decay.py
│       │   ├── forgetting.py
│       │   ├── health.py
│       │   └── scheduler.py
│       ├── dependency_check.py
│       ├── discovery
│       │   ├── __init__.py
│       │   ├── client.py
│       │   └── mdns_service.py
│       ├── embeddings
│       │   ├── __init__.py
│       │   └── onnx_embeddings.py
│       ├── ingestion
│       │   ├── __init__.py
│       │   ├── base.py
│       │   ├── chunker.py
│       │   ├── csv_loader.py
│       │   ├── json_loader.py
│       │   ├── pdf_loader.py
│       │   ├── registry.py
│       │   ├── semtools_loader.py
│       │   └── text_loader.py
│       ├── lm_studio_compat.py
│       ├── mcp_server.py
│       ├── models
│       │   ├── __init__.py
│       │   └── memory.py
│       ├── server.py
│       ├── services
│       │   ├── __init__.py
│       │   └── memory_service.py
│       ├── storage
│       │   ├── __init__.py
│       │   ├── base.py
│       │   ├── cloudflare.py
│       │   ├── factory.py
│       │   ├── http_client.py
│       │   ├── hybrid.py
│       │   └── sqlite_vec.py
│       ├── sync
│       │   ├── __init__.py
│       │   ├── exporter.py
│       │   ├── importer.py
│       │   └── litestream_config.py
│       ├── utils
│       │   ├── __init__.py
│       │   ├── cache_manager.py
│       │   ├── content_splitter.py
│       │   ├── db_utils.py
│       │   ├── debug.py
│       │   ├── document_processing.py
│       │   ├── gpu_detection.py
│       │   ├── hashing.py
│       │   ├── http_server_manager.py
│       │   ├── port_detection.py
│       │   ├── system_detection.py
│       │   └── time_parser.py
│       └── web
│           ├── __init__.py
│           ├── api
│           │   ├── __init__.py
│           │   ├── analytics.py
│           │   ├── backup.py
│           │   ├── consolidation.py
│           │   ├── documents.py
│           │   ├── events.py
│           │   ├── health.py
│           │   ├── manage.py
│           │   ├── mcp.py
│           │   ├── memories.py
│           │   ├── search.py
│           │   └── sync.py
│           ├── app.py
│           ├── dependencies.py
│           ├── oauth
│           │   ├── __init__.py
│           │   ├── authorization.py
│           │   ├── discovery.py
│           │   ├── middleware.py
│           │   ├── models.py
│           │   ├── registration.py
│           │   └── storage.py
│           ├── sse.py
│           └── static
│               ├── app.js
│               ├── index.html
│               ├── README.md
│               ├── sse_test.html
│               └── style.css
├── start_http_debug.bat
├── start_http_server.sh
├── test_document.txt
├── test_version_checker.js
├── tests
│   ├── __init__.py
│   ├── api
│   │   ├── __init__.py
│   │   ├── test_compact_types.py
│   │   └── test_operations.py
│   ├── bridge
│   │   ├── mock_responses.js
│   │   ├── package-lock.json
│   │   ├── package.json
│   │   └── test_http_mcp_bridge.js
│   ├── conftest.py
│   ├── consolidation
│   │   ├── __init__.py
│   │   ├── conftest.py
│   │   ├── test_associations.py
│   │   ├── test_clustering.py
│   │   ├── test_compression.py
│   │   ├── test_consolidator.py
│   │   ├── test_decay.py
│   │   └── test_forgetting.py
│   ├── contracts
│   │   └── api-specification.yml
│   ├── integration
│   │   ├── package-lock.json
│   │   ├── package.json
│   │   ├── test_api_key_fallback.py
│   │   ├── test_api_memories_chronological.py
│   │   ├── test_api_tag_time_search.py
│   │   ├── test_api_with_memory_service.py
│   │   ├── test_bridge_integration.js
│   │   ├── test_cli_interfaces.py
│   │   ├── test_cloudflare_connection.py
│   │   ├── test_concurrent_clients.py
│   │   ├── test_data_serialization_consistency.py
│   │   ├── test_http_server_startup.py
│   │   ├── test_mcp_memory.py
│   │   ├── test_mdns_integration.py
│   │   ├── test_oauth_basic_auth.py
│   │   ├── test_oauth_flow.py
│   │   ├── test_server_handlers.py
│   │   └── test_store_memory.py
│   ├── performance
│   │   ├── test_background_sync.py
│   │   └── test_hybrid_live.py
│   ├── README.md
│   ├── smithery
│   │   └── test_smithery.py
│   ├── sqlite
│   │   └── simple_sqlite_vec_test.py
│   ├── test_client.py
│   ├── test_content_splitting.py
│   ├── test_database.py
│   ├── test_hybrid_cloudflare_limits.py
│   ├── test_hybrid_storage.py
│   ├── test_memory_ops.py
│   ├── test_semantic_search.py
│   ├── test_sqlite_vec_storage.py
│   ├── test_time_parser.py
│   ├── test_timestamp_preservation.py
│   ├── timestamp
│   │   ├── test_hook_vs_manual_storage.py
│   │   ├── test_issue99_final_validation.py
│   │   ├── test_search_retrieval_inconsistency.py
│   │   ├── test_timestamp_issue.py
│   │   └── test_timestamp_simple.py
│   └── unit
│       ├── conftest.py
│       ├── test_cloudflare_storage.py
│       ├── test_csv_loader.py
│       ├── test_fastapi_dependencies.py
│       ├── test_import.py
│       ├── test_json_loader.py
│       ├── test_mdns_simple.py
│       ├── test_mdns.py
│       ├── test_memory_service.py
│       ├── test_memory.py
│       ├── test_semtools_loader.py
│       ├── test_storage_interface_compatibility.py
│       └── test_tag_time_filtering.py
├── tools
│   ├── docker
│   │   ├── DEPRECATED.md
│   │   ├── docker-compose.http.yml
│   │   ├── docker-compose.pythonpath.yml
│   │   ├── docker-compose.standalone.yml
│   │   ├── docker-compose.uv.yml
│   │   ├── docker-compose.yml
│   │   ├── docker-entrypoint-persistent.sh
│   │   ├── docker-entrypoint-unified.sh
│   │   ├── docker-entrypoint.sh
│   │   ├── Dockerfile
│   │   ├── Dockerfile.glama
│   │   ├── Dockerfile.slim
│   │   ├── README.md
│   │   └── test-docker-modes.sh
│   └── README.md
└── uv.lock
```

# Files

--------------------------------------------------------------------------------
/src/mcp_memory_service/embeddings/onnx_embeddings.py:
--------------------------------------------------------------------------------

```python
  1 | """
  2 | ONNX-based embedding generation for MCP Memory Service.
  3 | Provides PyTorch-free embedding generation using ONNX Runtime.
  4 | Based on ONNXMiniLM_L6_V2 implementation.
  5 | """
  6 | 
  7 | import hashlib
  8 | import json
  9 | import logging
 10 | import os
 11 | import tarfile
 12 | from pathlib import Path
 13 | from typing import List, Optional, Union
 14 | import numpy as np
 15 | 
 16 | logger = logging.getLogger(__name__)
 17 | 
 18 | # Try to import ONNX Runtime
 19 | try:
 20 |     import onnxruntime as ort
 21 |     ONNX_AVAILABLE = True
 22 | except ImportError:
 23 |     ONNX_AVAILABLE = False
 24 |     logger.warning("ONNX Runtime not available. Install with: pip install onnxruntime")
 25 | 
 26 | # Try to import tokenizers
 27 | try:
 28 |     from tokenizers import Tokenizer
 29 |     TOKENIZERS_AVAILABLE = True
 30 | except ImportError:
 31 |     TOKENIZERS_AVAILABLE = False
 32 |     logger.warning("Tokenizers not available. Install with: pip install tokenizers")
 33 | 
 34 | 
 35 | def _verify_sha256(fname: str, expected_sha256: str) -> bool:
 36 |     """Verify SHA256 hash of a file."""
 37 |     sha256_hash = hashlib.sha256()
 38 |     with open(fname, "rb") as f:
 39 |         for byte_block in iter(lambda: f.read(4096), b""):
 40 |             sha256_hash.update(byte_block)
 41 |     return sha256_hash.hexdigest() == expected_sha256
 42 | 
 43 | 
 44 | class ONNXEmbeddingModel:
 45 |     """
 46 |     ONNX-based embedding model that provides PyTorch-free embeddings.
 47 |     Compatible with all-MiniLM-L6-v2 model.
 48 |     """
 49 |     
 50 |     MODEL_NAME = "all-MiniLM-L6-v2"
 51 |     DOWNLOAD_PATH = Path.home() / ".cache" / "mcp_memory" / "onnx_models" / MODEL_NAME
 52 |     EXTRACTED_FOLDER_NAME = "onnx"
 53 |     ARCHIVE_FILENAME = "onnx.tar.gz"
 54 |     MODEL_DOWNLOAD_URL = (
 55 |         "https://chroma-onnx-models.s3.amazonaws.com/all-MiniLM-L6-v2/onnx.tar.gz"
 56 |     )
 57 |     _MODEL_SHA256 = "913d7300ceae3b2dbc2c50d1de4baacab4be7b9380491c27fab7418616a16ec3"
 58 |     
 59 |     def __init__(self, model_name: str = "all-MiniLM-L6-v2", preferred_providers: Optional[List[str]] = None):
 60 |         """
 61 |         Initialize ONNX embedding model.
 62 |         
 63 |         Args:
 64 |             model_name: Name of the model (currently only all-MiniLM-L6-v2 supported)
 65 |             preferred_providers: List of ONNX execution providers in order of preference
 66 |         """
 67 |         if not ONNX_AVAILABLE:
 68 |             raise ImportError("ONNX Runtime is required but not installed. Install with: pip install onnxruntime")
 69 |         
 70 |         if not TOKENIZERS_AVAILABLE:
 71 |             raise ImportError("Tokenizers is required but not installed. Install with: pip install tokenizers")
 72 |         
 73 |         self.model_name = model_name
 74 |         self._preferred_providers = preferred_providers or ['CPUExecutionProvider']
 75 |         self._model = None
 76 |         self._tokenizer = None
 77 |         
 78 |         # Download model if needed
 79 |         self._download_model_if_needed()
 80 |         
 81 |         # Initialize the model
 82 |         self._init_model()
 83 |     
 84 |     def _download_model_if_needed(self):
 85 |         """Download and extract ONNX model if not present."""
 86 |         if not self.DOWNLOAD_PATH.exists():
 87 |             self.DOWNLOAD_PATH.mkdir(parents=True, exist_ok=True)
 88 |         
 89 |         archive_path = self.DOWNLOAD_PATH / self.ARCHIVE_FILENAME
 90 |         extracted_path = self.DOWNLOAD_PATH / self.EXTRACTED_FOLDER_NAME
 91 |         
 92 |         # Check if model is already extracted
 93 |         if extracted_path.exists() and (extracted_path / "model.onnx").exists():
 94 |             logger.info(f"ONNX model already available at {extracted_path}")
 95 |             return
 96 |         
 97 |         # Download if not present or invalid
 98 |         if not archive_path.exists() or not _verify_sha256(str(archive_path), self._MODEL_SHA256):
 99 |             logger.info(f"Downloading ONNX model from {self.MODEL_DOWNLOAD_URL}")
100 |             try:
101 |                 import httpx
102 |                 with httpx.Client(timeout=30.0) as client:
103 |                     response = client.get(self.MODEL_DOWNLOAD_URL)
104 |                     response.raise_for_status()
105 |                     with open(archive_path, "wb") as f:
106 |                         f.write(response.content)
107 |                 logger.info(f"Model downloaded to {archive_path}")
108 |             except Exception as e:
109 |                 logger.error(f"Failed to download ONNX model: {e}")
110 |                 raise RuntimeError(f"Could not download ONNX model: {e}")
111 |         
112 |         # Extract the archive
113 |         logger.info(f"Extracting model to {extracted_path}")
114 |         with tarfile.open(archive_path, "r:gz") as tar:
115 |             tar.extractall(self.DOWNLOAD_PATH)
116 |         
117 |         # Verify extraction
118 |         if not (extracted_path / "model.onnx").exists():
119 |             raise RuntimeError(f"Model extraction failed - model.onnx not found in {extracted_path}")
120 |         
121 |         logger.info("ONNX model ready for use")
122 |     
123 |     def _init_model(self):
124 |         """Initialize ONNX model and tokenizer."""
125 |         model_path = self.DOWNLOAD_PATH / self.EXTRACTED_FOLDER_NAME / "model.onnx"
126 |         tokenizer_path = self.DOWNLOAD_PATH / self.EXTRACTED_FOLDER_NAME / "tokenizer.json"
127 |         
128 |         if not model_path.exists():
129 |             raise FileNotFoundError(f"ONNX model not found at {model_path}")
130 |         
131 |         if not tokenizer_path.exists():
132 |             raise FileNotFoundError(f"Tokenizer not found at {tokenizer_path}")
133 |         
134 |         # Initialize ONNX session
135 |         logger.info(f"Loading ONNX model with providers: {self._preferred_providers}")
136 |         self._model = ort.InferenceSession(
137 |             str(model_path),
138 |             providers=self._preferred_providers
139 |         )
140 |         
141 |         # Initialize tokenizer
142 |         self._tokenizer = Tokenizer.from_file(str(tokenizer_path))
143 |         
144 |         # Get model info
145 |         self.embedding_dimension = self._model.get_outputs()[0].shape[-1]
146 |         logger.info(f"ONNX model loaded. Embedding dimension: {self.embedding_dimension}")
147 |     
148 |     def encode(self, texts: Union[str, List[str]], convert_to_numpy: bool = True) -> np.ndarray:
149 |         """
150 |         Generate embeddings for texts using ONNX model.
151 |         
152 |         Args:
153 |             texts: Single text or list of texts to encode
154 |             convert_to_numpy: Whether to return numpy array (always True for compatibility)
155 |             
156 |         Returns:
157 |             Numpy array of embeddings with shape (n_texts, embedding_dim)
158 |         """
159 |         if isinstance(texts, str):
160 |             texts = [texts]
161 |         
162 |         # Tokenize texts
163 |         encoded = self._tokenizer.encode_batch(texts)
164 |         
165 |         # Prepare inputs for ONNX model
166 |         max_length = max(len(enc.ids) for enc in encoded)
167 |         
168 |         # Pad sequences
169 |         input_ids = np.zeros((len(texts), max_length), dtype=np.int64)
170 |         attention_mask = np.zeros((len(texts), max_length), dtype=np.int64)
171 |         token_type_ids = np.zeros((len(texts), max_length), dtype=np.int64)
172 |         
173 |         for i, enc in enumerate(encoded):
174 |             length = len(enc.ids)
175 |             input_ids[i, :length] = enc.ids
176 |             attention_mask[i, :length] = enc.attention_mask
177 |             token_type_ids[i, :length] = enc.type_ids
178 |         
179 |         # Run inference
180 |         ort_inputs = {
181 |             "input_ids": input_ids,
182 |             "attention_mask": attention_mask,
183 |             "token_type_ids": token_type_ids,
184 |         }
185 |         
186 |         outputs = self._model.run(None, ort_inputs)
187 |         
188 |         # Extract embeddings (using mean pooling)
189 |         last_hidden_states = outputs[0]
190 |         
191 |         # Mean pooling with attention mask
192 |         input_mask_expanded = attention_mask[..., np.newaxis].astype(np.float32)
193 |         sum_embeddings = np.sum(last_hidden_states * input_mask_expanded, axis=1)
194 |         sum_mask = np.clip(input_mask_expanded.sum(axis=1), a_min=1e-9, a_max=None)
195 |         embeddings = sum_embeddings / sum_mask
196 |         
197 |         # Normalize embeddings
198 |         embeddings = embeddings / np.linalg.norm(embeddings, axis=1, keepdims=True)
199 |         
200 |         return embeddings
201 |     
202 |     @property
203 |     def device(self):
204 |         """Return device info for compatibility."""
205 |         return "cpu"  # ONNX runtime handles device selection internally
206 | 
207 | 
208 | def get_onnx_embedding_model(model_name: str = "all-MiniLM-L6-v2") -> Optional[ONNXEmbeddingModel]:
209 |     """
210 |     Get ONNX embedding model if available.
211 |     
212 |     Args:
213 |         model_name: Name of the model to load
214 |         
215 |     Returns:
216 |         ONNXEmbeddingModel instance or None if ONNX is not available
217 |     """
218 |     if not ONNX_AVAILABLE:
219 |         logger.warning("ONNX Runtime not available")
220 |         return None
221 |     
222 |     if not TOKENIZERS_AVAILABLE:
223 |         logger.warning("Tokenizers not available")
224 |         return None
225 |     
226 |     try:
227 |         # Detect best available providers
228 |         available_providers = ort.get_available_providers()
229 |         preferred_providers = []
230 |         
231 |         # Prefer GPU providers if available
232 |         if 'CUDAExecutionProvider' in available_providers:
233 |             preferred_providers.append('CUDAExecutionProvider')
234 |         if 'DirectMLExecutionProvider' in available_providers:
235 |             preferred_providers.append('DirectMLExecutionProvider')
236 |         if 'CoreMLExecutionProvider' in available_providers:
237 |             preferred_providers.append('CoreMLExecutionProvider')
238 |         
239 |         # Always include CPU as fallback
240 |         preferred_providers.append('CPUExecutionProvider')
241 |         
242 |         logger.info(f"Creating ONNX model with providers: {preferred_providers}")
243 |         return ONNXEmbeddingModel(model_name, preferred_providers)
244 |     
245 |     except Exception as e:
246 |         logger.error(f"Failed to create ONNX embedding model: {e}")
247 |         return None
```

--------------------------------------------------------------------------------
/docs/examples/memory-distribution-chart.jsx:
--------------------------------------------------------------------------------

```javascript
  1 | import React from 'react';
  2 | import { BarChart, Bar, XAxis, YAxis, CartesianGrid, Tooltip, Legend, ResponsiveContainer } from 'recharts';
  3 | 
  4 | /**
  5 |  * Memory Distribution Chart Component
  6 |  * 
  7 |  * A comprehensive visualization component for displaying monthly memory storage
  8 |  * distribution with insights, statistics, and interactive features.
  9 |  * 
 10 |  * Features:
 11 |  * - Responsive bar chart with monthly distribution
 12 |  * - Custom tooltips with percentages
 13 |  * - Statistics cards for key metrics
 14 |  * - Automatic insights generation
 15 |  * - Professional styling and layout
 16 |  * 
 17 |  * Usage:
 18 |  * 1. Install dependencies: npm install recharts
 19 |  * 2. Import and use: <MemoryDistributionChart data={yourData} />
 20 |  * 3. Or use with sample data as shown below
 21 |  */
 22 | 
 23 | const MemoryDistributionChart = ({ data = null, title = "Memory Storage Distribution by Month" }) => {
 24 |   // Sample data based on real MCP Memory Service analysis
 25 |   // Replace with actual data from your analytics pipeline
 26 |   const defaultData = [
 27 |     { month: "Jan 2025", count: 50, monthKey: "2025-01" },
 28 |     { month: "Feb 2025", count: 15, monthKey: "2025-02" },
 29 |     { month: "Mar 2025", count: 8, monthKey: "2025-03" },
 30 |     { month: "Apr 2025", count: 12, monthKey: "2025-04" },
 31 |     { month: "May 2025", count: 4, monthKey: "2025-05" },
 32 |     { month: "Jun 2025", count: 45, monthKey: "2025-06" }
 33 |   ];
 34 | 
 35 |   const monthlyData = data || defaultData;
 36 |   const totalMemories = monthlyData.reduce((sum, item) => sum + item.count, 0);
 37 | 
 38 |   // Calculate statistics
 39 |   const peakMonth = monthlyData.reduce((max, item) => 
 40 |     item.count > max.count ? item : max, monthlyData[0]);
 41 |   const averagePerMonth = (totalMemories / monthlyData.length).toFixed(1);
 42 |   
 43 |   // Find most recent month with data
 44 |   const recentMonth = monthlyData[monthlyData.length - 1];
 45 | 
 46 |   // Custom tooltip component
 47 |   const CustomTooltip = ({ active, payload, label }) => {
 48 |     if (active && payload && payload.length) {
 49 |       const data = payload[0].payload;
 50 |       const percentage = ((data.count / totalMemories) * 100).toFixed(1);
 51 |       
 52 |       return (
 53 |         <div className="bg-white p-3 border border-gray-300 rounded-lg shadow-lg">
 54 |           <p className="font-semibold text-gray-800">{label}</p>
 55 |           <p className="text-blue-600">
 56 |             <span className="font-medium">Memories: </span>
 57 |             {data.count}
 58 |           </p>
 59 |           <p className="text-gray-600">
 60 |             <span className="font-medium">Percentage: </span>
 61 |             {percentage}%
 62 |           </p>
 63 |         </div>
 64 |       );
 65 |     }
 66 |     return null;
 67 |   };
 68 | 
 69 |   // Custom label function for bars
 70 |   const renderCustomLabel = (entry) => {
 71 |     if (entry.count > 5) { // Only show labels for bars with more than 5 memories
 72 |       return entry.count;
 73 |     }
 74 |     return null;
 75 |   };
 76 | 
 77 |   // Generate insights based on data patterns
 78 |   const generateInsights = () => {
 79 |     const insights = [];
 80 |     
 81 |     // Peak activity insight
 82 |     const peakPercentage = ((peakMonth.count / totalMemories) * 100).toFixed(1);
 83 |     insights.push(`Peak activity in ${peakMonth.month} (${peakPercentage}% of total memories)`);
 84 |     
 85 |     // Recent activity insight
 86 |     const recentPercentage = ((recentMonth.count / totalMemories) * 100).toFixed(1);
 87 |     if (recentMonth.count > averagePerMonth) {
 88 |       insights.push(`High recent activity: ${recentMonth.month} above average`);
 89 |     }
 90 |     
 91 |     // Growth pattern insight
 92 |     const firstMonth = monthlyData[0];
 93 |     const lastMonth = monthlyData[monthlyData.length - 1];
 94 |     if (lastMonth.count > firstMonth.count * 0.8) {
 95 |       insights.push(`Sustained activity: Recent months maintain high productivity`);
 96 |     }
 97 |     
 98 |     return insights;
 99 |   };
100 | 
101 |   const insights = generateInsights();
102 | 
103 |   return (
104 |     <div className="w-full max-w-6xl mx-auto p-6 bg-gray-50 rounded-lg">
105 |       {/* Header Section */}
106 |       <div className="mb-6">
107 |         <h2 className="text-2xl font-bold text-gray-800 mb-2">
108 |           {title}
109 |         </h2>
110 |         <p className="text-gray-600">
111 |           Total memories analyzed: <span className="font-semibold text-blue-600">{totalMemories}</span> memories
112 |         </p>
113 |       </div>
114 | 
115 |       {/* Main Chart */}
116 |       <div className="bg-white p-4 rounded-lg shadow-sm mb-6">
117 |         <ResponsiveContainer width="100%" height={400}>
118 |           <BarChart
119 |             data={monthlyData}
120 |             margin={{
121 |               top: 20,
122 |               right: 30,
123 |               left: 20,
124 |               bottom: 5,
125 |             }}
126 |           >
127 |             <CartesianGrid strokeDasharray="3 3" stroke="#f0f0f0" />
128 |             <XAxis 
129 |               dataKey="month" 
130 |               tick={{ fontSize: 12 }}
131 |               tickLine={{ stroke: '#d1d5db' }}
132 |               axisLine={{ stroke: '#d1d5db' }}
133 |             />
134 |             <YAxis 
135 |               tick={{ fontSize: 12 }}
136 |               tickLine={{ stroke: '#d1d5db' }}
137 |               axisLine={{ stroke: '#d1d5db' }}
138 |               label={{ 
139 |                 value: 'Number of Memories', 
140 |                 angle: -90, 
141 |                 position: 'insideLeft',
142 |                 style: { textAnchor: 'middle', fontSize: '12px', fill: '#6b7280' }
143 |               }}
144 |             />
145 |             <Tooltip content={<CustomTooltip />} />
146 |             <Legend />
147 |             <Bar 
148 |               dataKey="count" 
149 |               name="Memories Stored"
150 |               fill="#3b82f6"
151 |               radius={[4, 4, 0, 0]}
152 |               label={renderCustomLabel}
153 |             />
154 |           </BarChart>
155 |         </ResponsiveContainer>
156 |       </div>
157 | 
158 |       {/* Statistics Cards */}
159 |       <div className="grid grid-cols-1 md:grid-cols-3 gap-4 mb-6">
160 |         <div className="bg-blue-50 p-4 rounded-lg">
161 |           <h3 className="font-semibold text-blue-800 mb-2">Peak Month</h3>
162 |           <p className="text-lg font-bold text-blue-600">{peakMonth.month}</p>
163 |           <p className="text-sm text-blue-600">
164 |             {peakMonth.count} memories ({((peakMonth.count / totalMemories) * 100).toFixed(1)}%)
165 |           </p>
166 |         </div>
167 |         
168 |         <div className="bg-green-50 p-4 rounded-lg">
169 |           <h3 className="font-semibold text-green-800 mb-2">Recent Activity</h3>
170 |           <p className="text-lg font-bold text-green-600">{recentMonth.month}</p>
171 |           <p className="text-sm text-green-600">
172 |             {recentMonth.count} memories ({((recentMonth.count / totalMemories) * 100).toFixed(1)}%)
173 |           </p>
174 |         </div>
175 |         
176 |         <div className="bg-amber-50 p-4 rounded-lg">
177 |           <h3 className="font-semibold text-amber-800 mb-2">Average/Month</h3>
178 |           <p className="text-lg font-bold text-amber-600">{averagePerMonth}</p>
179 |           <p className="text-sm text-amber-600">memories per month</p>
180 |         </div>
181 |       </div>
182 | 
183 |       {/* Insights Section */}
184 |       <div className="bg-white p-4 rounded-lg shadow-sm">
185 |         <h3 className="font-semibold text-gray-800 mb-3">📊 Data Insights</h3>
186 |         <div className="space-y-2">
187 |           {insights.map((insight, index) => (
188 |             <div key={index} className="flex items-start">
189 |               <span className="text-blue-500 mr-2">•</span>
190 |               <p className="text-sm text-gray-600">{insight}</p>
191 |             </div>
192 |           ))}
193 |         </div>
194 |         
195 |         <div className="mt-4 pt-4 border-t border-gray-200">
196 |           <p className="text-xs text-gray-500">
197 |             <strong>Analysis Pattern:</strong> This distribution shows typical software development 
198 |             lifecycle phases - high initial activity (project setup), consolidation periods, 
199 |             and renewed intensive development phases.
200 |           </p>
201 |         </div>
202 |       </div>
203 |     </div>
204 |   );
205 | };
206 | 
207 | export default MemoryDistributionChart;
208 | 
209 | /**
210 |  * Usage Examples:
211 |  * 
212 |  * 1. Basic Usage (with sample data):
213 |  * <MemoryDistributionChart />
214 |  * 
215 |  * 2. With Custom Data:
216 |  * const myData = [
217 |  *   { month: "Jan 2025", count: 25, monthKey: "2025-01" },
218 |  *   { month: "Feb 2025", count: 30, monthKey: "2025-02" },
219 |  *   // ... more data
220 |  * ];
221 |  * <MemoryDistributionChart data={myData} title="My Project Analysis" />
222 |  * 
223 |  * 3. Integration with MCP Memory Service:
224 |  * 
225 |  * async function loadMemoryData() {
226 |  *   const memories = await recall_memory({
227 |  *     "query": "memories from this year",
228 |  *     "n_results": 500
229 |  *   });
230 |  *   
231 |  *   // Process memories into chart format
232 |  *   const processedData = processMemoriesForChart(memories);
233 |  *   return processedData;
234 |  * }
235 |  * 
236 |  * function processMemoriesForChart(memories) {
237 |  *   const monthlyDistribution = {};
238 |  *   
239 |  *   memories.forEach(memory => {
240 |  *     const date = new Date(memory.timestamp);
241 |  *     const monthKey = `${date.getFullYear()}-${String(date.getMonth() + 1).padStart(2, '0')}`;
242 |  *     
243 |  *     if (!monthlyDistribution[monthKey]) {
244 |  *       monthlyDistribution[monthKey] = 0;
245 |  *     }
246 |  *     monthlyDistribution[monthKey]++;
247 |  *   });
248 |  *   
249 |  *   return Object.entries(monthlyDistribution)
250 |  *     .sort(([a], [b]) => a.localeCompare(b))
251 |  *     .map(([month, count]) => {
252 |  *       const [year, monthNum] = month.split('-');
253 |  *       const monthNames = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 
254 |  *                          'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec'];
255 |  *       const monthName = monthNames[parseInt(monthNum) - 1];
256 |  *       
257 |  *       return {
258 |  *         month: `${monthName} ${year}`,
259 |  *         count: count,
260 |  *         monthKey: month
261 |  *       };
262 |  *     });
263 |  * }
264 |  * 
265 |  * Dependencies:
266 |  * npm install recharts
267 |  * 
268 |  * For Tailwind CSS styling, ensure you have Tailwind configured in your project.
269 |  */
```

--------------------------------------------------------------------------------
/docs/quick-setup-cloudflare-dual-environment.md:
--------------------------------------------------------------------------------

```markdown
  1 | # Quick Setup: Cloudflare Backend for Claude Desktop + Claude Code
  2 | 
  3 | This guide provides streamlined instructions to configure Cloudflare backend for both Claude Desktop and Claude Code simultaneously.
  4 | 
  5 | ## 🎯 Overview
  6 | 
  7 | This setup ensures both environments use the same Cloudflare backend for consistent memory storage across Claude Desktop and Claude Code.
  8 | 
  9 | **Expected Result:**
 10 | - Claude Desktop: ✅ Cloudflare backend with 1000+ memories
 11 | - Claude Code: ✅ Cloudflare backend with same memories
 12 | - Health checks show: `"backend": "cloudflare"` and `"storage_type": "CloudflareStorage"`
 13 | 
 14 | ## ⚡ Quick Setup (5 minutes)
 15 | 
 16 | ### Step 1: Prepare Cloudflare Resources
 17 | 
 18 | If you don't have Cloudflare resources yet:
 19 | 
 20 | ```bash
 21 | # Install wrangler CLI
 22 | npm install -g wrangler
 23 | 
 24 | # Login and create resources
 25 | wrangler login
 26 | wrangler vectorize create mcp-memory-index --dimensions=768 --metric=cosine
 27 | wrangler d1 create mcp-memory-db
 28 | 
 29 | # Note the database ID from output
 30 | ```
 31 | 
 32 | ### Step 2: Create Environment Configuration
 33 | 
 34 | Create `.env` file in the project root:
 35 | 
 36 | ```bash
 37 | cd C:/REPOSITORIES/mcp-memory-service
 38 | 
 39 | # Create .env file with your Cloudflare credentials
 40 | cat > .env << 'EOF'
 41 | # MCP Memory Service Environment Configuration
 42 | MCP_MEMORY_STORAGE_BACKEND=cloudflare
 43 | 
 44 | # Cloudflare D1 Database Configuration
 45 | CLOUDFLARE_API_TOKEN=your-api-token-here
 46 | CLOUDFLARE_ACCOUNT_ID=your-account-id-here
 47 | CLOUDFLARE_D1_DATABASE_ID=your-d1-database-id-here
 48 | CLOUDFLARE_VECTORIZE_INDEX=mcp-memory-index
 49 | 
 50 | # Backup paths (for fallback)
 51 | MCP_MEMORY_BACKUPS_PATH=C:\Users\your-username\AppData\Local\mcp-memory\backups
 52 | MCP_MEMORY_SQLITE_PATH=C:\Users\your-username\AppData\Local\mcp-memory\backups\sqlite_vec.db
 53 | EOF
 54 | ```
 55 | 
 56 | ### Step 3: Configure Claude Desktop
 57 | 
 58 | Update `~/.claude.json` (or `%APPDATA%\Claude\claude_desktop_config.json` on Windows):
 59 | 
 60 | ```json
 61 | {
 62 |   "mcpServers": {
 63 |     "memory": {
 64 |       "command": "python",
 65 |       "args": ["-m", "mcp_memory_service.server"],
 66 |       "cwd": "C:/REPOSITORIES/mcp-memory-service",
 67 |       "env": {
 68 |         "MCP_MEMORY_STORAGE_BACKEND": "cloudflare",
 69 |         "CLOUDFLARE_API_TOKEN": "your-api-token-here",
 70 |         "CLOUDFLARE_ACCOUNT_ID": "your-account-id-here",
 71 |         "CLOUDFLARE_D1_DATABASE_ID": "your-d1-database-id-here",
 72 |         "CLOUDFLARE_VECTORIZE_INDEX": "mcp-memory-index",
 73 |         "MCP_MEMORY_BACKUPS_PATH": "C:\\Users\\your-username\\AppData\\Local\\mcp-memory\\backups",
 74 |         "MCP_MEMORY_SQLITE_PATH": "C:\\Users\\your-username\\AppData\\Local\\mcp-memory\\backups\\sqlite_vec.db"
 75 |       }
 76 |     }
 77 |   }
 78 | }
 79 | ```
 80 | 
 81 | ### Step 4: Configure Claude Code
 82 | 
 83 | ```bash
 84 | # Navigate to project directory
 85 | cd C:/REPOSITORIES/mcp-memory-service
 86 | 
 87 | # Add memory server with explicit environment variables
 88 | claude mcp add memory python \
 89 |   -e MCP_MEMORY_STORAGE_BACKEND=cloudflare \
 90 |   -e CLOUDFLARE_API_TOKEN=your-api-token-here \
 91 |   -e CLOUDFLARE_ACCOUNT_ID=your-account-id-here \
 92 |   -e CLOUDFLARE_D1_DATABASE_ID=your-d1-database-id-here \
 93 |   -e CLOUDFLARE_VECTORIZE_INDEX=mcp-memory-index \
 94 |   -e MCP_MEMORY_BACKUPS_PATH="C:\Users\your-username\AppData\Local\mcp-memory\backups" \
 95 |   -e MCP_MEMORY_SQLITE_PATH="C:\Users\your-username\AppData\Local\mcp-memory\backups\sqlite_vec.db" \
 96 |   -- -m mcp_memory_service.server
 97 | ```
 98 | 
 99 | ### Step 5: Verify Configuration
100 | 
101 | **Test Claude Desktop:**
102 | 1. Restart Claude Desktop
103 | 2. Open a new conversation
104 | 3. Ask: "Check memory health"
105 | 4. Should show: `"backend": "cloudflare"` and `"storage_type": "CloudflareStorage"`
106 | 
107 | **Test Claude Code:**
108 | ```bash
109 | # Check MCP server status
110 | claude mcp list
111 | 
112 | # Should show: memory: python -m mcp_memory_service.server - ✓ Connected
113 | ```
114 | 
115 | ## 🔧 Configuration Templates
116 | 
117 | ### Claude Desktop Template (`claude_desktop_config.json`)
118 | 
119 | ```json
120 | {
121 |   "mcpServers": {
122 |     "memory": {
123 |       "command": "python",
124 |       "args": ["-m", "mcp_memory_service.server"],
125 |       "cwd": "C:/REPOSITORIES/mcp-memory-service",
126 |       "env": {
127 |         "MCP_MEMORY_STORAGE_BACKEND": "cloudflare",
128 |         "CLOUDFLARE_API_TOKEN": "YOUR_TOKEN_HERE",
129 |         "CLOUDFLARE_ACCOUNT_ID": "YOUR_ACCOUNT_ID_HERE",
130 |         "CLOUDFLARE_D1_DATABASE_ID": "YOUR_D1_DATABASE_ID_HERE",
131 |         "CLOUDFLARE_VECTORIZE_INDEX": "mcp-memory-index",
132 |         "MCP_MEMORY_BACKUPS_PATH": "C:\\Users\\USERNAME\\AppData\\Local\\mcp-memory\\backups",
133 |         "MCP_MEMORY_SQLITE_PATH": "C:\\Users\\USERNAME\\AppData\\Local\\mcp-memory\\backups\\sqlite_vec.db"
134 |       }
135 |     }
136 |   }
137 | }
138 | ```
139 | 
140 | ### Project Environment Template (`.env`)
141 | 
142 | ```bash
143 | # Storage Backend Configuration
144 | MCP_MEMORY_STORAGE_BACKEND=cloudflare
145 | 
146 | # Required Cloudflare Settings
147 | CLOUDFLARE_API_TOKEN=YOUR_TOKEN_HERE
148 | CLOUDFLARE_ACCOUNT_ID=YOUR_ACCOUNT_ID_HERE
149 | CLOUDFLARE_D1_DATABASE_ID=YOUR_D1_DATABASE_ID_HERE
150 | CLOUDFLARE_VECTORIZE_INDEX=mcp-memory-index
151 | 
152 | # Optional Settings
153 | CLOUDFLARE_R2_BUCKET=mcp-memory-content
154 | CLOUDFLARE_EMBEDDING_MODEL=@cf/baai/bge-base-en-v1.5
155 | CLOUDFLARE_LARGE_CONTENT_THRESHOLD=1048576
156 | CLOUDFLARE_MAX_RETRIES=3
157 | CLOUDFLARE_BASE_DELAY=1.0
158 | 
159 | # Backup Configuration
160 | MCP_MEMORY_BACKUPS_PATH=C:\Users\USERNAME\AppData\Local\mcp-memory\backups
161 | MCP_MEMORY_SQLITE_PATH=C:\Users\USERNAME\AppData\Local\mcp-memory\backups\sqlite_vec.db
162 | 
163 | # Logging
164 | LOG_LEVEL=INFO
165 | ```
166 | 
167 | ## ✅ Validation Commands
168 | 
169 | ### Quick Health Check
170 | 
171 | ```bash
172 | # Test configuration loading
173 | cd C:/REPOSITORIES/mcp-memory-service
174 | python -c "
175 | from src.mcp_memory_service.config import STORAGE_BACKEND, CLOUDFLARE_API_TOKEN
176 | print(f'Backend: {STORAGE_BACKEND}')
177 | print(f'Token set: {bool(CLOUDFLARE_API_TOKEN)}')
178 | "
179 | 
180 | # Test server initialization
181 | python scripts/validation/diagnose_backend_config.py
182 | ```
183 | 
184 | ### Expected Health Check Results
185 | 
186 | **Cloudflare Backend (Correct):**
187 | ```json
188 | {
189 |   "validation": {
190 |     "status": "healthy",
191 |     "message": "Cloudflare storage validation successful"
192 |   },
193 |   "statistics": {
194 |     "backend": "cloudflare",
195 |     "storage_backend": "cloudflare",
196 |     "total_memories": 1073,
197 |     "vectorize_index": "mcp-memory-index",
198 |     "d1_database_id": "f745e9b4-ba8e-4d47-b38f-12af91060d5a"
199 |   },
200 |   "performance": {
201 |     "server": {
202 |       "storage_type": "CloudflareStorage"
203 |     }
204 |   }
205 | }
206 | ```
207 | 
208 | **SQLite-vec Fallback (Incorrect):**
209 | ```json
210 | {
211 |   "statistics": {
212 |     "backend": "sqlite-vec",
213 |     "storage_backend": "sqlite-vec"
214 |   },
215 |   "performance": {
216 |     "server": {
217 |       "storage_type": "SqliteVecMemoryStorage"
218 |     }
219 |   }
220 | }
221 | ```
222 | 
223 | ## 🚨 Troubleshooting
224 | 
225 | ### Issue: Health Check Shows SQLite-vec Instead of Cloudflare
226 | 
227 | **Root Cause:** Environment variables not loading properly in execution context.
228 | 
229 | **Solutions:**
230 | 
231 | 1. **Claude Desktop:**
232 |    - Ensure `cwd` is set to project directory
233 |    - Use explicit `env` variables in MCP configuration
234 |    - Restart Claude Desktop after config changes
235 | 
236 | 2. **Claude Code:**
237 |    - Use explicit `-e` environment variables in `claude mcp add`
238 |    - Ensure command runs from project directory
239 |    - Remove and re-add memory server to pick up changes
240 | 
241 | 3. **Both Environments:**
242 |    - Verify `.env` file exists and contains correct values
243 |    - Check API token permissions (Vectorize:Edit, D1:Edit, Workers AI:Read)
244 |    - Test Cloudflare connectivity manually
245 | 
246 | ### Issue: "Missing required environment variables"
247 | 
248 | ```bash
249 | # Check if variables are being loaded
250 | cd C:/REPOSITORIES/mcp-memory-service
251 | python -c "
252 | import os
253 | from dotenv import load_dotenv
254 | load_dotenv('.env')
255 | print('CLOUDFLARE_API_TOKEN:', 'SET' if os.getenv('CLOUDFLARE_API_TOKEN') else 'NOT SET')
256 | print('CLOUDFLARE_ACCOUNT_ID:', os.getenv('CLOUDFLARE_ACCOUNT_ID', 'NOT SET'))
257 | "
258 | ```
259 | 
260 | ### Issue: Different Memory Counts Between Environments
261 | 
262 | This indicates environments are using different backends:
263 | - **Same count (e.g., 1073):** Both using Cloudflare ✅
264 | - **Different counts:** One using SQLite-vec fallback ❌
265 | 
266 | **Fix:** Follow troubleshooting steps above to ensure both use Cloudflare.
267 | 
268 | ### Issue: Connection Failed or Authentication Errors
269 | 
270 | 1. **Verify API Token:**
271 |    ```bash
272 |    curl -X GET "https://api.cloudflare.com/client/v4/user/tokens/verify" \
273 |      -H "Authorization: Bearer YOUR_API_TOKEN"
274 |    ```
275 | 
276 | 2. **Check Resource IDs:**
277 |    ```bash
278 |    # List Vectorize indexes
279 |    curl -X GET "https://api.cloudflare.com/client/v4/accounts/YOUR_ACCOUNT_ID/vectorize/v2/indexes" \
280 |      -H "Authorization: Bearer YOUR_API_TOKEN"
281 | 
282 |    # List D1 databases
283 |    curl -X GET "https://api.cloudflare.com/client/v4/accounts/YOUR_ACCOUNT_ID/d1/database" \
284 |      -H "Authorization: Bearer YOUR_API_TOKEN"
285 |    ```
286 | 
287 | ## 🔄 Migration from SQLite-vec
288 | 
289 | If you have existing memories in SQLite-vec:
290 | 
291 | ```bash
292 | # Export existing memories
293 | python scripts/export_sqlite_vec.py --output cloudflare_export.json
294 | 
295 | # Switch to Cloudflare (follow setup above)
296 | 
297 | # Import to Cloudflare
298 | python scripts/import_to_cloudflare.py --input cloudflare_export.json
299 | ```
300 | 
301 | ## 📝 Configuration Management
302 | 
303 | ### Single Source of Truth
304 | 
305 | - **Global Config:** `~/.claude.json` (Claude Desktop) - authoritative
306 | - **Project Config:** `.env` file (development) - for local development
307 | - **Avoid:** Multiple conflicting configurations
308 | 
309 | ### Environment Variable Precedence
310 | 
311 | 1. Explicit MCP server `env` variables (highest priority)
312 | 2. System environment variables
313 | 3. `.env` file variables
314 | 4. Default values (lowest priority)
315 | 
316 | ## 🎯 Success Criteria
317 | 
318 | Both Claude Desktop and Claude Code should show:
319 | 
320 | ✅ **Health Check:** `"backend": "cloudflare"`
321 | ✅ **Storage Type:** `"CloudflareStorage"`
322 | ✅ **Memory Count:** Same number across environments
323 | ✅ **Database ID:** Same Cloudflare D1 database ID
324 | ✅ **Index:** Same Vectorize index name
325 | 
326 | When successful, memories will be synchronized across both environments automatically!
```

--------------------------------------------------------------------------------
/tests/unit/test_mdns_simple.py:
--------------------------------------------------------------------------------

```python
  1 | #!/usr/bin/env python3
  2 | # Copyright 2024 Heinrich Krupp
  3 | #
  4 | # Licensed under the Apache License, Version 2.0 (the "License");
  5 | # you may not use this file except in compliance with the License.
  6 | # You may obtain a copy of the License at
  7 | #
  8 | #     http://www.apache.org/licenses/LICENSE-2.0
  9 | #
 10 | # Unless required by applicable law or agreed to in writing, software
 11 | # distributed under the License is distributed on an "AS IS" BASIS,
 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 13 | # See the License for the specific language governing permissions and
 14 | # limitations under the License.
 15 | 
 16 | """
 17 | Simple test script for mDNS functionality without external test frameworks.
 18 | """
 19 | 
 20 | import asyncio
 21 | import sys
 22 | import os
 23 | import traceback
 24 | from unittest.mock import Mock, AsyncMock, patch
 25 | 
 26 | # Add the src directory to the Python path
 27 | sys.path.insert(0, os.path.join(os.path.dirname(os.path.dirname(os.path.dirname(__file__))), 'src'))
 28 | 
 29 | def run_test(test_func, test_name):
 30 |     """Run a single test function and handle exceptions."""
 31 |     try:
 32 |         if asyncio.iscoroutinefunction(test_func):
 33 |             asyncio.run(test_func())
 34 |         else:
 35 |             test_func()
 36 |         print(f"✅ {test_name}")
 37 |         return True
 38 |     except Exception as e:
 39 |         print(f"❌ {test_name}: {e}")
 40 |         tb_lines = traceback.format_exc().split('\n')
 41 |         print(f"   {tb_lines[-3].strip()}")
 42 |         return False
 43 | 
 44 | def test_imports():
 45 |     """Test that mDNS modules can be imported."""
 46 |     from mcp_memory_service.discovery.mdns_service import (
 47 |         ServiceAdvertiser, ServiceDiscovery, DiscoveryListener, ServiceDetails
 48 |     )
 49 |     from mcp_memory_service.discovery.client import DiscoveryClient, HealthStatus
 50 |     
 51 |     # Test ServiceDetails creation
 52 |     service_info = Mock()
 53 |     details = ServiceDetails(
 54 |         name="Test Service",
 55 |         host="192.168.1.100",
 56 |         port=8000,
 57 |         https=False,
 58 |         api_version="2.1.0",
 59 |         requires_auth=True,
 60 |         service_info=service_info
 61 |     )
 62 |     
 63 |     assert details.url == "http://192.168.1.100:8000"
 64 |     assert details.api_url == "http://192.168.1.100:8000/api"
 65 | 
 66 | def test_service_advertiser_init():
 67 |     """Test ServiceAdvertiser initialization."""
 68 |     from mcp_memory_service.discovery.mdns_service import ServiceAdvertiser
 69 |     
 70 |     # Test default initialization
 71 |     advertiser = ServiceAdvertiser()
 72 |     assert advertiser.service_name == "MCP Memory Service"
 73 |     assert advertiser.service_type == "_mcp-memory._tcp.local."
 74 |     assert advertiser.port == 8000
 75 |     assert advertiser._registered is False
 76 |     
 77 |     # Test custom initialization
 78 |     custom_advertiser = ServiceAdvertiser(
 79 |         service_name="Custom Service",
 80 |         port=8443,
 81 |         https_enabled=True
 82 |     )
 83 |     assert custom_advertiser.service_name == "Custom Service"
 84 |     assert custom_advertiser.port == 8443
 85 |     assert custom_advertiser.https_enabled is True
 86 | 
 87 | async def test_service_advertiser_start_stop():
 88 |     """Test ServiceAdvertiser start/stop with mocks."""
 89 |     from mcp_memory_service.discovery.mdns_service import ServiceAdvertiser
 90 |     
 91 |     with patch('mcp_memory_service.discovery.mdns_service.AsyncZeroconf') as mock_zeroconf_class:
 92 |         mock_zeroconf = AsyncMock()
 93 |         mock_zeroconf_class.return_value = mock_zeroconf
 94 |         
 95 |         advertiser = ServiceAdvertiser()
 96 |         
 97 |         with patch.object(advertiser, '_create_service_info') as mock_create_info:
 98 |             mock_service_info = Mock()
 99 |             mock_create_info.return_value = mock_service_info
100 |             
101 |             # Test start
102 |             result = await advertiser.start()
103 |             assert result is True
104 |             assert advertiser._registered is True
105 |             
106 |             # Test stop
107 |             await advertiser.stop()
108 |             assert advertiser._registered is False
109 | 
110 | def test_service_discovery_init():
111 |     """Test ServiceDiscovery initialization."""
112 |     from mcp_memory_service.discovery.mdns_service import ServiceDiscovery
113 |     
114 |     discovery = ServiceDiscovery()
115 |     assert discovery.service_type == "_mcp-memory._tcp.local."
116 |     assert discovery.discovery_timeout == 5
117 |     assert discovery._discovering is False
118 | 
119 | async def test_service_discovery_operations():
120 |     """Test ServiceDiscovery operations with mocks."""
121 |     from mcp_memory_service.discovery.mdns_service import ServiceDiscovery, ServiceDetails
122 |     
123 |     with patch('mcp_memory_service.discovery.mdns_service.AsyncZeroconf'), \
124 |          patch('mcp_memory_service.discovery.mdns_service.AsyncServiceBrowser'):
125 |         
126 |         discovery = ServiceDiscovery(discovery_timeout=1)
127 |         
128 |         # Test get_discovered_services with no listener
129 |         services = discovery.get_discovered_services()
130 |         assert len(services) == 0
131 |         
132 |         # Test with mock listener
133 |         mock_listener = Mock()
134 |         mock_service = ServiceDetails(
135 |             name="Test Service",
136 |             host="192.168.1.100",
137 |             port=8000,
138 |             https=False,
139 |             api_version="2.1.0",
140 |             requires_auth=False,
141 |             service_info=Mock()
142 |         )
143 |         mock_listener.services = {"test": mock_service}
144 |         discovery._listener = mock_listener
145 |         
146 |         services = discovery.get_discovered_services()
147 |         assert len(services) == 1
148 |         assert services[0] == mock_service
149 | 
150 | def test_discovery_listener():
151 |     """Test DiscoveryListener functionality."""
152 |     from mcp_memory_service.discovery.mdns_service import DiscoveryListener
153 |     
154 |     # Test initialization
155 |     listener = DiscoveryListener()
156 |     assert listener.callback is None
157 |     assert len(listener.services) == 0
158 |     
159 |     # Test with callback
160 |     callback = Mock()
161 |     listener_with_callback = DiscoveryListener(callback)
162 |     assert listener_with_callback.callback == callback
163 | 
164 | def test_discovery_client_init():
165 |     """Test DiscoveryClient initialization."""
166 |     from mcp_memory_service.discovery.client import DiscoveryClient
167 |     
168 |     client = DiscoveryClient()
169 |     assert client.discovery_timeout == 5
170 |     
171 |     custom_client = DiscoveryClient(discovery_timeout=10)
172 |     assert custom_client.discovery_timeout == 10
173 | 
174 | async def test_discovery_client_operations():
175 |     """Test DiscoveryClient operations with mocks."""
176 |     from mcp_memory_service.discovery.client import DiscoveryClient, HealthStatus
177 |     from mcp_memory_service.discovery.mdns_service import ServiceDetails
178 |     
179 |     client = DiscoveryClient()
180 |     
181 |     # Test discover_services
182 |     mock_service = ServiceDetails(
183 |         name="Test Service",
184 |         host="192.168.1.100",
185 |         port=8000,
186 |         https=False,
187 |         api_version="2.1.0",
188 |         requires_auth=False,
189 |         service_info=Mock()
190 |     )
191 |     
192 |     with patch.object(client._discovery, 'discover_services', return_value=[mock_service]):
193 |         services = await client.discover_services()
194 |         assert len(services) == 1
195 |         assert services[0] == mock_service
196 | 
197 | def test_health_status():
198 |     """Test HealthStatus dataclass."""
199 |     from mcp_memory_service.discovery.client import HealthStatus
200 |     
201 |     health = HealthStatus(
202 |         healthy=True,
203 |         status='ok',
204 |         backend='sqlite_vec',
205 |         statistics={'memory_count': 100},
206 |         response_time_ms=50.0
207 |     )
208 |     
209 |     assert health.healthy is True
210 |     assert health.status == 'ok'
211 |     assert health.backend == 'sqlite_vec'
212 |     assert health.response_time_ms == 50.0
213 | 
214 | def test_service_details_properties():
215 |     """Test ServiceDetails URL properties."""
216 |     from mcp_memory_service.discovery.mdns_service import ServiceDetails
217 |     
218 |     # Test HTTP service
219 |     http_service = ServiceDetails(
220 |         name="HTTP Service",
221 |         host="192.168.1.100",
222 |         port=8000,
223 |         https=False,
224 |         api_version="2.1.0",
225 |         requires_auth=False,
226 |         service_info=Mock()
227 |     )
228 |     
229 |     assert http_service.url == "http://192.168.1.100:8000"
230 |     assert http_service.api_url == "http://192.168.1.100:8000/api"
231 |     
232 |     # Test HTTPS service
233 |     https_service = ServiceDetails(
234 |         name="HTTPS Service",
235 |         host="192.168.1.100",
236 |         port=8443,
237 |         https=True,
238 |         api_version="2.1.0",
239 |         requires_auth=True,
240 |         service_info=Mock()
241 |     )
242 |     
243 |     assert https_service.url == "https://192.168.1.100:8443"
244 |     assert https_service.api_url == "https://192.168.1.100:8443/api"
245 | 
246 | def main():
247 |     """Run all tests."""
248 |     print("🔧 MCP Memory Service - mDNS Unit Tests")
249 |     print("=" * 50)
250 |     
251 |     tests = [
252 |         (test_imports, "Import mDNS modules"),
253 |         (test_service_advertiser_init, "ServiceAdvertiser initialization"),
254 |         (test_service_advertiser_start_stop, "ServiceAdvertiser start/stop"),
255 |         (test_service_discovery_init, "ServiceDiscovery initialization"),
256 |         (test_service_discovery_operations, "ServiceDiscovery operations"),
257 |         (test_discovery_listener, "DiscoveryListener functionality"),
258 |         (test_discovery_client_init, "DiscoveryClient initialization"),
259 |         (test_discovery_client_operations, "DiscoveryClient operations"),
260 |         (test_health_status, "HealthStatus dataclass"),
261 |         (test_service_details_properties, "ServiceDetails properties"),
262 |     ]
263 |     
264 |     passed = 0
265 |     total = len(tests)
266 |     
267 |     for test_func, test_name in tests:
268 |         if run_test(test_func, test_name):
269 |             passed += 1
270 |     
271 |     print("\n" + "=" * 50)
272 |     print(f"Results: {passed}/{total} tests passed")
273 |     
274 |     if passed == total:
275 |         print("🎉 All mDNS unit tests passed!")
276 |         return 0
277 |     else:
278 |         print("❌ Some tests failed!")
279 |         return 1
280 | 
281 | if __name__ == "__main__":
282 |     sys.exit(main())
```

--------------------------------------------------------------------------------
/scripts/benchmarks/benchmark_hybrid_sync.py:
--------------------------------------------------------------------------------

```python
  1 | #!/usr/bin/env python3
  2 | """
  3 | Benchmark hybrid storage sync performance optimizations (v8.27.0).
  4 | 
  5 | Tests the performance improvements from:
  6 | - Bulk existence checking (get_all_content_hashes)
  7 | - Parallel processing with asyncio.gather
  8 | - Larger batch sizes for initial sync
  9 | 
 10 | Usage:
 11 |     python scripts/benchmarks/benchmark_hybrid_sync.py
 12 | """
 13 | 
 14 | import asyncio
 15 | import time
 16 | import sys
 17 | from pathlib import Path
 18 | from typing import List
 19 | from dataclasses import dataclass
 20 | 
 21 | # Add src to path
 22 | sys.path.insert(0, str(Path(__file__).parent.parent.parent / "src"))
 23 | 
 24 | from mcp_memory_service.storage.sqlite_vec import SQLiteVecStorage
 25 | from mcp_memory_service.models.memory import Memory
 26 | from mcp_memory_service import config
 27 | 
 28 | @dataclass
 29 | class BenchmarkResult:
 30 |     """Results from a sync benchmark run."""
 31 |     operation: str
 32 |     duration_ms: float
 33 |     memories_processed: int
 34 |     memories_per_second: float
 35 |     optimization_used: str
 36 | 
 37 | async def benchmark_bulk_existence_check():
 38 |     """Benchmark bulk existence check vs individual queries."""
 39 |     print("\n" + "=" * 80)
 40 |     print("BENCHMARK 1: Bulk Existence Check")
 41 |     print("=" * 80)
 42 | 
 43 |     # Create test storage
 44 |     storage = SQLiteVecStorage(config.SQLITE_VEC_PATH)
 45 |     await storage.initialize()
 46 | 
 47 |     # Get stats
 48 |     stats = await storage.get_stats()
 49 |     total_memories = stats.get('total_memories', 0)
 50 | 
 51 |     print(f"Database contains: {total_memories} memories")
 52 |     print()
 53 | 
 54 |     if total_memories < 100:
 55 |         print("⚠️  Insufficient memories for meaningful benchmark (need 100+)")
 56 |         print("   Run with existing production database for accurate results")
 57 |         return None
 58 | 
 59 |     # Test 1: Individual queries (OLD METHOD - simulated)
 60 |     print("Test 1: Individual hash queries (old method - simulated)")
 61 |     test_count = min(100, total_memories)
 62 | 
 63 |     # Get sample hashes
 64 |     all_memories = await storage.get_all_memories(limit=test_count)
 65 |     test_hashes = [m.content_hash for m in all_memories[:test_count]]
 66 | 
 67 |     start = time.time()
 68 |     for content_hash in test_hashes:
 69 |         exists = await storage.get_by_hash(content_hash)
 70 |     individual_duration = (time.time() - start) * 1000
 71 | 
 72 |     print(f"   Checked {test_count} hashes individually: {individual_duration:.1f}ms")
 73 |     print(f"   Average: {individual_duration / test_count:.2f}ms per check")
 74 | 
 75 |     # Test 2: Bulk hash loading (NEW METHOD)
 76 |     print("\nTest 2: Bulk hash loading (new method)")
 77 |     start = time.time()
 78 |     all_hashes = await storage.get_all_content_hashes()
 79 |     bulk_duration = (time.time() - start) * 1000
 80 | 
 81 |     print(f"   Loaded {len(all_hashes)} hashes in bulk: {bulk_duration:.1f}ms")
 82 |     print(f"   Average lookup: O(1) constant time")
 83 | 
 84 |     # Calculate improvement
 85 |     speedup = individual_duration / bulk_duration if bulk_duration > 0 else 0
 86 |     print(f"\n📊 Results:")
 87 |     print(f"   Speedup: {speedup:.1f}x faster for {test_count} checks")
 88 |     print(f"   For 2,619 memories: {(individual_duration / test_count * 2619):.0f}ms → {bulk_duration:.0f}ms")
 89 |     print(f"   Time saved: {((individual_duration / test_count * 2619) - bulk_duration):.0f}ms")
 90 | 
 91 |     return BenchmarkResult(
 92 |         operation="bulk_existence_check",
 93 |         duration_ms=bulk_duration,
 94 |         memories_processed=len(all_hashes),
 95 |         memories_per_second=len(all_hashes) / (bulk_duration / 1000) if bulk_duration > 0 else 0,
 96 |         optimization_used="get_all_content_hashes()"
 97 |     )
 98 | 
 99 | async def benchmark_parallel_processing():
100 |     """Benchmark parallel vs sequential memory processing."""
101 |     print("\n" + "=" * 80)
102 |     print("BENCHMARK 2: Parallel Processing")
103 |     print("=" * 80)
104 | 
105 |     # Create test storage
106 |     storage = SQLiteVecStorage(config.SQLITE_VEC_PATH)
107 |     await storage.initialize()
108 | 
109 |     # Create test memories (don't actually store them)
110 |     test_memories = []
111 |     for i in range(50):  # Test with 50 memories
112 |         test_memories.append(Memory(
113 |             content=f"Benchmark test memory {i} with some content for embedding generation",
114 |             content_hash=f"test_hash_{i}",
115 |             tags=["benchmark", "test"],
116 |             memory_type="test"
117 |         ))
118 | 
119 |     print(f"Testing with {len(test_memories)} memories")
120 |     print()
121 | 
122 |     # Test 1: Sequential processing (OLD METHOD - simulated)
123 |     print("Test 1: Sequential processing (old method - simulated)")
124 |     start = time.time()
125 | 
126 |     # Simulate sequential hash checks
127 |     local_hashes = await storage.get_all_content_hashes()
128 |     for memory in test_memories:
129 |         # Simulate existence check
130 |         exists = memory.content_hash in local_hashes
131 | 
132 |     sequential_duration = (time.time() - start) * 1000
133 | 
134 |     print(f"   Processed {len(test_memories)} memories sequentially: {sequential_duration:.1f}ms")
135 |     print(f"   Average: {sequential_duration / len(test_memories):.2f}ms per memory")
136 | 
137 |     # Test 2: Parallel processing (NEW METHOD - simulated)
138 |     print("\nTest 2: Parallel processing with Semaphore(15)")
139 | 
140 |     semaphore = asyncio.Semaphore(15)
141 | 
142 |     async def process_memory(memory):
143 |         async with semaphore:
144 |             exists = memory.content_hash in local_hashes
145 |             # Simulate some async work
146 |             await asyncio.sleep(0.001)
147 |             return exists
148 | 
149 |     start = time.time()
150 |     tasks = [process_memory(mem) for mem in test_memories]
151 |     await asyncio.gather(*tasks, return_exceptions=True)
152 |     parallel_duration = (time.time() - start) * 1000
153 | 
154 |     print(f"   Processed {len(test_memories)} memories in parallel: {parallel_duration:.1f}ms")
155 |     print(f"   Concurrency: Up to 15 simultaneous operations")
156 | 
157 |     # Calculate improvement
158 |     speedup = sequential_duration / parallel_duration if parallel_duration > 0 else 0
159 |     print(f"\n📊 Results:")
160 |     print(f"   Speedup: {speedup:.1f}x faster")
161 |     print(f"   For 2,619 memories: {(sequential_duration / len(test_memories) * 2619):.0f}ms → {(parallel_duration / len(test_memories) * 2619):.0f}ms")
162 | 
163 |     return BenchmarkResult(
164 |         operation="parallel_processing",
165 |         duration_ms=parallel_duration,
166 |         memories_processed=len(test_memories),
167 |         memories_per_second=len(test_memories) / (parallel_duration / 1000) if parallel_duration > 0 else 0,
168 |         optimization_used="asyncio.gather() + Semaphore(15)"
169 |     )
170 | 
171 | async def benchmark_batch_size():
172 |     """Benchmark impact of larger batch sizes on API calls."""
173 |     print("\n" + "=" * 80)
174 |     print("BENCHMARK 3: Batch Size Optimization")
175 |     print("=" * 80)
176 | 
177 |     total_memories = 2619  # Actual sync count from production
178 | 
179 |     # Old batch size
180 |     old_batch_size = 100
181 |     old_api_calls = (total_memories + old_batch_size - 1) // old_batch_size  # Ceiling division
182 |     old_overhead_ms = old_api_calls * 50  # Assume 50ms overhead per API call
183 | 
184 |     # New batch size
185 |     new_batch_size = 500
186 |     new_api_calls = (total_memories + new_batch_size - 1) // new_batch_size
187 |     new_overhead_ms = new_api_calls * 50
188 | 
189 |     print(f"Total memories to sync: {total_memories}")
190 |     print()
191 | 
192 |     print(f"Old method (batch_size=100):")
193 |     print(f"   API calls needed: {old_api_calls}")
194 |     print(f"   Network overhead: ~{old_overhead_ms}ms ({old_api_calls} × 50ms)")
195 | 
196 |     print(f"\nNew method (batch_size=500):")
197 |     print(f"   API calls needed: {new_api_calls}")
198 |     print(f"   Network overhead: ~{new_overhead_ms}ms ({new_api_calls} × 50ms)")
199 | 
200 |     reduction = old_api_calls - new_api_calls
201 |     time_saved = old_overhead_ms - new_overhead_ms
202 | 
203 |     print(f"\n📊 Results:")
204 |     print(f"   API calls reduced: {reduction} fewer calls ({reduction / old_api_calls * 100:.1f}% reduction)")
205 |     print(f"   Time saved: ~{time_saved}ms on network overhead alone")
206 | 
207 |     return BenchmarkResult(
208 |         operation="batch_size_optimization",
209 |         duration_ms=new_overhead_ms,
210 |         memories_processed=total_memories,
211 |         memories_per_second=total_memories / (new_overhead_ms / 1000) if new_overhead_ms > 0 else 0,
212 |         optimization_used="batch_size=500 (5x larger)"
213 |     )
214 | 
215 | async def main():
216 |     """Run all benchmarks."""
217 |     print("=" * 80)
218 |     print("HYBRID STORAGE SYNC PERFORMANCE BENCHMARK (v8.27.0)")
219 |     print("=" * 80)
220 |     print()
221 |     print("Testing optimizations:")
222 |     print("  1. Bulk existence checking (get_all_content_hashes)")
223 |     print("  2. Parallel processing with asyncio.gather")
224 |     print("  3. Larger batch sizes (100 → 500)")
225 |     print()
226 | 
227 |     results = []
228 | 
229 |     try:
230 |         # Run benchmarks
231 |         result1 = await benchmark_bulk_existence_check()
232 |         if result1:
233 |             results.append(result1)
234 | 
235 |         result2 = await benchmark_parallel_processing()
236 |         if result2:
237 |             results.append(result2)
238 | 
239 |         result3 = await benchmark_batch_size()
240 |         if result3:
241 |             results.append(result3)
242 | 
243 |         # Summary
244 |         print("\n" + "=" * 80)
245 |         print("OVERALL PERFORMANCE SUMMARY")
246 |         print("=" * 80)
247 | 
248 |         print("\nOptimization Impact:")
249 |         for result in results:
250 |             print(f"  • {result.operation}: {result.optimization_used}")
251 | 
252 |         print("\nEstimated Combined Speedup:")
253 |         print("  • Before: ~8 minutes for 2,619 memories (~5.5 mem/sec)")
254 |         print("  • After:  ~1.5-3 minutes estimated (~15-30 mem/sec)")
255 |         print("  • Overall: 3-5x faster initial sync")
256 | 
257 |         print("\nKey Improvements:")
258 |         print("  ✅ Eliminated 2,619 individual DB queries → single bulk load")
259 |         print("  ✅ Up to 15x parallelism for CPU/embedding generation")
260 |         print("  ✅ 5x fewer Cloudflare API calls (6 vs 27)")
261 | 
262 |         print("\n" + "=" * 80)
263 |         print("✅ Benchmark completed successfully")
264 |         print("=" * 80)
265 | 
266 |         return 0
267 | 
268 |     except Exception as e:
269 |         print(f"\n❌ Benchmark failed: {e}")
270 |         import traceback
271 |         traceback.print_exc()
272 |         return 1
273 | 
274 | if __name__ == "__main__":
275 |     sys.exit(asyncio.run(main()))
276 | 
```

--------------------------------------------------------------------------------
/archive/docs-removed-2025-08-23/macos-intel.md:
--------------------------------------------------------------------------------

```markdown
  1 | # macOS Intel Setup Guide
  2 | 
  3 | This guide addresses the specific challenges of running MCP Memory Service on Intel-based Mac systems, including both legacy (2013-2017) and modern (2018+) Intel Macs.
  4 | 
  5 | ## Hardware Profiles
  6 | 
  7 | ### Legacy Intel Macs (2013-2017)
  8 | **Target Hardware**: 2015 MacBook Pro, older Intel Macs without dedicated GPU  
  9 | **Optimization**: Maximum compatibility, minimal resource usage  
 10 | **Recommended Backend**: SQLite-vec with ONNX runtime
 11 | 
 12 | **Typical specs this applies to:**
 13 | - MacBook Pro (15-inch, Mid 2015)
 14 | - MacBook Pro (13-inch, Early 2015)
 15 | - MacBook Air (11-inch/13-inch, 2013-2017)
 16 | - iMac (21.5-inch/27-inch, 2013-2017) with integrated graphics
 17 | 
 18 | ### Modern Intel Macs (2018+)
 19 | **Target Hardware**: 2018+ Intel Macs with better GPU support  
 20 | **Optimization**: Balanced performance and compatibility  
 21 | **Recommended Backend**: ChromaDB with CPU optimization
 22 | 
 23 | ## Why Special Setup is Needed
 24 | 
 25 | Intel-based Mac systems require special consideration for several reasons:
 26 | 
 27 | 1. **PyTorch Compatibility**: PyTorch has moved toward optimizing for Apple Silicon, with some compatibility challenges on Intel Macs
 28 | 2. **NumPy Version Conflicts**: Newer NumPy 2.x can cause compatibility issues with other ML libraries
 29 | 3. **Python Version Sensitivity**: Python 3.13+ has introduced breaking changes that affect ML libraries
 30 | 4. **Memory Constraints**: Limited RAM on older systems requires careful resource management
 31 | 5. **ChromaDB Installation Issues**: Complex dependencies often fail on older systems
 32 | 
 33 | ## Installation
 34 | 
 35 | ### Prerequisites
 36 | 
 37 | - Python 3.10 (recommended for best compatibility)
 38 | - Git to clone the repository
 39 | - Xcode Command Line Tools: `xcode-select --install`
 40 | 
 41 | ### Automatic Installation (Recommended)
 42 | 
 43 | The installer automatically detects Intel Mac hardware:
 44 | 
 45 | ```bash
 46 | git clone https://github.com/doobidoo/mcp-memory-service.git
 47 | cd mcp-memory-service
 48 | 
 49 | # For legacy hardware (2013-2017)
 50 | python install.py --legacy-hardware
 51 | 
 52 | # For modern Intel Macs (2018+)
 53 | python install.py --intel-mac
 54 | ```
 55 | 
 56 | ### Manual Installation
 57 | 
 58 | If you prefer manual control:
 59 | 
 60 | #### 1. Environment Setup
 61 | 
 62 | ```bash
 63 | # Clone repository
 64 | git clone https://github.com/doobidoo/mcp-memory-service.git
 65 | cd mcp-memory-service
 66 | 
 67 | # Create Python 3.10 virtual environment
 68 | python3.10 -m venv venv_py310
 69 | source venv_py310/bin/activate
 70 | 
 71 | # Upgrade pip
 72 | pip install --upgrade pip
 73 | ```
 74 | 
 75 | #### 2. Install Dependencies
 76 | 
 77 | For **Legacy Intel Macs (2013-2017)**:
 78 | 
 79 | ```bash
 80 | # Install with SQLite-vec backend
 81 | pip install -e .
 82 | pip install sentence-transformers onnx onnxruntime
 83 | 
 84 | # Downgrade NumPy for compatibility
 85 | pip uninstall -y numpy
 86 | pip install numpy==1.25.2
 87 | 
 88 | # Configure for SQLite-vec
 89 | export MCP_MEMORY_STORAGE_BACKEND=sqlite_vec
 90 | export MCP_MEMORY_USE_ONNX=true
 91 | ```
 92 | 
 93 | For **Modern Intel Macs (2018+)**:
 94 | 
 95 | ```bash
 96 | # Install with ChromaDB support
 97 | pip install -e .
 98 | pip install chromadb sentence-transformers
 99 | 
100 | # Install CPU-optimized PyTorch
101 | pip install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cpu
102 | 
103 | # Configure for ChromaDB
104 | export MCP_MEMORY_STORAGE_BACKEND=chromadb
105 | ```
106 | 
107 | ### Hardware Detection
108 | 
109 | The installer automatically detects legacy hardware by checking:
110 | 
111 | ```python
112 | # System detection criteria
113 | is_legacy_mac = (
114 |     platform.system() == "Darwin" and           # macOS
115 |     platform.machine() in ("x86_64", "x64") and # Intel processor
116 |     year_of_hardware < 2018 and                 # Pre-2018 models
117 |     not has_dedicated_gpu                       # No discrete GPU
118 | )
119 | ```
120 | 
121 | ## Configuration
122 | 
123 | ### Environment Variables
124 | 
125 | #### For Legacy Intel Macs
126 | 
127 | ```bash
128 | # Core configuration
129 | export MCP_MEMORY_STORAGE_BACKEND=sqlite_vec
130 | export MCP_MEMORY_USE_ONNX=true
131 | export MCP_MEMORY_SQLITE_VEC_PATH="$HOME/.mcp_memory_sqlite"
132 | 
133 | # Performance optimization
134 | export MCP_MEMORY_CPU_ONLY=true
135 | export MCP_MEMORY_MAX_MEMORY_MB=2048
136 | export MCP_MEMORY_SENTENCE_TRANSFORMER_MODEL="all-MiniLM-L6-v2"
137 | 
138 | # Compatibility settings
139 | export PYTORCH_ENABLE_MPS_FALLBACK=1
140 | export MCP_MEMORY_USE_ONNX_RUNTIME=true
141 | ```
142 | 
143 | #### For Modern Intel Macs
144 | 
145 | ```bash
146 | # Core configuration
147 | export MCP_MEMORY_STORAGE_BACKEND=chromadb
148 | export MCP_MEMORY_CHROMA_PATH="$HOME/.mcp_memory_chroma"
149 | 
150 | # Performance optimization
151 | export MCP_MEMORY_CPU_OPTIMIZATION=true
152 | export MCP_MEMORY_SENTENCE_TRANSFORMER_MODEL="all-MiniLM-L12-v2"
153 | 
154 | # Intel-specific settings
155 | export MKL_NUM_THREADS=4
156 | export OMP_NUM_THREADS=4
157 | ```
158 | 
159 | ### Claude Desktop Configuration
160 | 
161 | #### Legacy Intel Mac Configuration
162 | 
163 | ```json
164 | {
165 |   "mcpServers": {
166 |     "memory": {
167 |       "command": "python",
168 |       "args": ["/path/to/mcp-memory-service/scripts/legacy_intel_mac/run_mcp_memory.sh"],
169 |       "env": {
170 |         "MCP_MEMORY_STORAGE_BACKEND": "sqlite_vec",
171 |         "MCP_MEMORY_USE_ONNX": "true",
172 |         "MCP_MEMORY_CPU_ONLY": "true"
173 |       }
174 |     }
175 |   }
176 | }
177 | ```
178 | 
179 | #### Modern Intel Mac Configuration
180 | 
181 | ```json
182 | {
183 |   "mcpServers": {
184 |     "memory": {
185 |       "command": "python",
186 |       "args": ["/path/to/mcp-memory-service/src/mcp_memory_service/server.py"],
187 |       "env": {
188 |         "MCP_MEMORY_STORAGE_BACKEND": "chromadb",
189 |         "MCP_MEMORY_CPU_OPTIMIZATION": "true"
190 |       }
191 |     }
192 |   }
193 | }
194 | ```
195 | 
196 | ## Provided Scripts
197 | 
198 | The repository includes several Intel Mac-specific scripts:
199 | 
200 | ### Legacy Intel Mac Scripts
201 | 
202 | - `scripts/legacy_intel_mac/run_mcp_memory.sh` - Standard startup script
203 | - `scripts/legacy_intel_mac/run_mcp_memory_foreground.sh` - Foreground mode with debugging
204 | - `scripts/legacy_intel_mac/start_memory_for_claude.sh` - Claude-optimized startup
205 | 
206 | ### Usage Examples
207 | 
208 | ```bash
209 | # For foreground mode (shows all output, can be stopped with Ctrl+C)
210 | ./scripts/legacy_intel_mac/run_mcp_memory_foreground.sh
211 | 
212 | # For background mode (runs in background, logs to file)
213 | ./scripts/legacy_intel_mac/run_mcp_memory.sh
214 | 
215 | # For Claude Desktop integration
216 | ./scripts/legacy_intel_mac/start_memory_for_claude.sh
217 | ```
218 | 
219 | ## Performance Optimization
220 | 
221 | ### For Legacy Intel Macs
222 | 
223 | 1. **Use SQLite-vec Backend**: Lighter weight than ChromaDB
224 | 2. **ONNX Runtime**: CPU-optimized inference
225 | 3. **Memory Management**: Limited model loading and caching
226 | 4. **Smaller Models**: Use compact sentence transformer models
227 | 
228 | ```bash
229 | # Optimization settings
230 | export MCP_MEMORY_BATCH_SIZE=16
231 | export MCP_MEMORY_CACHE_SIZE=100
232 | export MCP_MEMORY_MODEL_CACHE_SIZE=1
233 | ```
234 | 
235 | ### For Modern Intel Macs
236 | 
237 | 1. **CPU Optimization**: Multi-threaded processing
238 | 2. **Intelligent Caching**: Larger cache sizes
239 | 3. **Better Models**: Higher quality embeddings
240 | 
241 | ```bash
242 | # Performance tuning
243 | export MCP_MEMORY_BATCH_SIZE=32
244 | export MCP_MEMORY_CACHE_SIZE=1000
245 | export MCP_MEMORY_MODEL_CACHE_SIZE=3
246 | ```
247 | 
248 | ## Troubleshooting
249 | 
250 | ### Common Issues
251 | 
252 | #### 1. NumPy Compatibility Errors
253 | 
254 | **Symptom**: 
255 | ```
256 | AttributeError: module 'numpy' has no attribute 'float'
257 | ```
258 | 
259 | **Solution**:
260 | ```bash
261 | pip uninstall -y numpy
262 | pip install numpy==1.25.2
263 | ```
264 | 
265 | #### 2. PyTorch Installation Issues
266 | 
267 | **Symptom**: PyTorch fails to install or import
268 | 
269 | **Solution**:
270 | ```bash
271 | # For legacy Macs - use CPU-only PyTorch
272 | pip uninstall torch torchvision torchaudio
273 | pip install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cpu
274 | 
275 | # Set fallback environment variable
276 | export PYTORCH_ENABLE_MPS_FALLBACK=1
277 | ```
278 | 
279 | #### 3. ChromaDB Installation Failures
280 | 
281 | **Symptom**: ChromaDB dependency issues on legacy hardware
282 | 
283 | **Solution**: Switch to SQLite-vec backend:
284 | ```bash
285 | export MCP_MEMORY_STORAGE_BACKEND=sqlite_vec
286 | python install.py --storage-backend sqlite_vec
287 | ```
288 | 
289 | #### 4. Memory Issues
290 | 
291 | **Symptom**: Out of memory errors during embedding generation
292 | 
293 | **Solution**: Reduce batch size and enable memory optimization:
294 | ```bash
295 | export MCP_MEMORY_BATCH_SIZE=8
296 | export MCP_MEMORY_MAX_MEMORY_MB=1024
297 | export MCP_MEMORY_LOW_MEMORY_MODE=true
298 | ```
299 | 
300 | ### Diagnostic Commands
301 | 
302 | #### System Information
303 | 
304 | ```bash
305 | # Check macOS version
306 | sw_vers
307 | 
308 | # Check available memory
309 | system_profiler SPMemoryDataType | grep Size
310 | 
311 | # Check CPU information
312 | sysctl -n machdep.cpu.brand_string
313 | 
314 | # Check Python version and location
315 | python --version
316 | which python
317 | ```
318 | 
319 | #### Environment Verification
320 | 
321 | ```bash
322 | # Check virtual environment
323 | echo $VIRTUAL_ENV
324 | 
325 | # Verify key packages
326 | python -c "import torch; print(f'PyTorch: {torch.__version__}')"
327 | python -c "import sentence_transformers; print('SentenceTransformers: OK')"
328 | python -c "import sqlite3; print('SQLite3: OK')"
329 | 
330 | # Test ONNX runtime (for legacy Macs)
331 | python -c "import onnxruntime; print(f'ONNX Runtime: {onnxruntime.__version__}')"
332 | ```
333 | 
334 | #### Server Testing
335 | 
336 | ```bash
337 | # Test server startup
338 | python scripts/verify_environment.py
339 | 
340 | # Test memory operations
341 | python -c "
342 | from src.mcp_memory_service.storage.sqlite_vec import SqliteVecStorage
343 | storage = SqliteVecStorage()
344 | print('Storage backend: OK')
345 | "
346 | 
347 | # Test embedding generation
348 | python -c "
349 | from sentence_transformers import SentenceTransformer
350 | model = SentenceTransformer('all-MiniLM-L6-v2')
351 | embedding = model.encode(['test'])
352 | print(f'Embedding generated: {len(embedding[0])} dimensions')
353 | "
354 | ```
355 | 
356 | ## Homebrew Integration
357 | 
358 | For Intel Macs with Homebrew-installed PyTorch, see the dedicated [Homebrew Integration Guide](../integration/homebrew.md).
359 | 
360 | ## Performance Benchmarks
361 | 
362 | ### Typical Performance (Legacy Intel Mac)
363 | 
364 | - **Memory Storage**: ~100ms per memory
365 | - **Search Operations**: ~200ms for 100 memories
366 | - **Embedding Generation**: ~500ms for short text
367 | - **Memory Usage**: ~200MB baseline
368 | 
369 | ### Typical Performance (Modern Intel Mac)
370 | 
371 | - **Memory Storage**: ~50ms per memory
372 | - **Search Operations**: ~100ms for 1000 memories
373 | - **Embedding Generation**: ~200ms for short text
374 | - **Memory Usage**: ~400MB baseline
375 | 
376 | ## Related Documentation
377 | 
378 | - [Installation Guide](../installation/master-guide.md) - General installation instructions
379 | - [Homebrew Integration](../integration/homebrew.md) - Homebrew PyTorch setup
380 | - [Troubleshooting](../troubleshooting/general.md) - macOS-specific troubleshooting
381 | - [Performance Tuning](../implementation/performance.md) - Performance optimization guide
```

--------------------------------------------------------------------------------
/src/mcp_memory_service/web/api/sync.py:
--------------------------------------------------------------------------------

```python
  1 | # Copyright 2024 Heinrich Krupp
  2 | #
  3 | # Licensed under the Apache License, Version 2.0 (the "License");
  4 | # you may not use this file except in compliance with the License.
  5 | # You may obtain a copy of the License at
  6 | #
  7 | #     http://www.apache.org/licenses/LICENSE-2.0
  8 | #
  9 | # Unless required by applicable law or agreed to in writing, software
 10 | # distributed under the License is distributed on an "AS IS" BASIS,
 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 12 | # See the License for the specific language governing permissions and
 13 | # limitations under the License.
 14 | 
 15 | """
 16 | Sync management endpoints for hybrid backend.
 17 | 
 18 | Provides status monitoring and manual sync triggering for hybrid storage mode.
 19 | """
 20 | 
 21 | from typing import Dict, Any, TYPE_CHECKING
 22 | from datetime import datetime, timezone
 23 | 
 24 | from fastapi import APIRouter, HTTPException, Depends
 25 | from pydantic import BaseModel
 26 | 
 27 | from ...storage.base import MemoryStorage
 28 | from ..dependencies import get_storage
 29 | from ...config import OAUTH_ENABLED
 30 | 
 31 | # OAuth authentication imports (conditional)
 32 | if OAUTH_ENABLED or TYPE_CHECKING:
 33 |     from ..oauth.middleware import require_read_access, require_write_access, AuthenticationResult
 34 | else:
 35 |     # Provide type stubs when OAuth is disabled
 36 |     AuthenticationResult = None
 37 |     require_read_access = None
 38 |     require_write_access = None
 39 | 
 40 | router = APIRouter()
 41 | 
 42 | 
 43 | class SyncStatusResponse(BaseModel):
 44 |     """Sync status response model."""
 45 |     is_hybrid: bool
 46 |     is_running: bool
 47 |     is_paused: bool
 48 |     last_sync_time: float
 49 |     operations_pending: int
 50 |     operations_processed: int
 51 |     operations_failed: int
 52 |     sync_interval_seconds: int
 53 |     time_since_last_sync_seconds: float
 54 |     next_sync_eta_seconds: float
 55 |     status: str  # 'synced', 'syncing', 'pending', 'error'
 56 | 
 57 | 
 58 | class SyncForceResponse(BaseModel):
 59 |     """Force sync response model."""
 60 |     success: bool
 61 |     message: str
 62 |     operations_synced: int
 63 |     memories_pulled: int
 64 |     time_taken_seconds: float
 65 |     timestamp: str
 66 | 
 67 | 
 68 | @router.get("/sync/status", response_model=SyncStatusResponse)
 69 | async def get_sync_status(
 70 |     storage: MemoryStorage = Depends(get_storage),
 71 |     user: AuthenticationResult = Depends(require_read_access) if OAUTH_ENABLED else None
 72 | ):
 73 |     """
 74 |     Get current sync status for hybrid backend.
 75 | 
 76 |     Returns sync state, pending operations, last sync time, and health metrics.
 77 |     Only available when using hybrid storage backend.
 78 |     """
 79 |     # Check if storage supports sync (hybrid mode only)
 80 |     if not hasattr(storage, 'get_sync_status'):
 81 |         return SyncStatusResponse(
 82 |             is_hybrid=False,
 83 |             is_running=False,
 84 |             is_paused=False,
 85 |             last_sync_time=0,
 86 |             operations_pending=0,
 87 |             operations_processed=0,
 88 |             operations_failed=0,
 89 |             sync_interval_seconds=0,
 90 |             time_since_last_sync_seconds=0,
 91 |             next_sync_eta_seconds=0,
 92 |             status='not_hybrid'
 93 |         )
 94 | 
 95 |     try:
 96 |         # Get sync status from hybrid backend
 97 |         sync_status = await storage.get_sync_status()
 98 | 
 99 |         # Calculate time since last sync
100 |         import time
101 |         current_time = time.time()
102 |         last_sync = sync_status.get('last_sync_time', 0)
103 |         time_since_sync = current_time - last_sync if last_sync > 0 else 0
104 | 
105 |         # Calculate ETA for next sync
106 |         sync_interval = sync_status.get('sync_interval', 300)
107 |         next_sync_eta = max(0, sync_interval - time_since_sync)
108 | 
109 |         # Determine status
110 |         is_running = sync_status.get('is_running', False)
111 |         pending_ops = sync_status.get('pending_operations', 0)
112 |         actively_syncing = sync_status.get('actively_syncing', False)  # True only during active sync
113 | 
114 |         if actively_syncing:
115 |             status = 'syncing'
116 |         elif pending_ops > 0:
117 |             status = 'pending'
118 |         elif sync_status.get('operations_failed', 0) > 0:
119 |             status = 'error'
120 |         else:
121 |             status = 'synced'
122 | 
123 |         return SyncStatusResponse(
124 |             is_hybrid=True,
125 |             is_running=is_running,
126 |             is_paused=sync_status.get('is_paused', not is_running),
127 |             last_sync_time=last_sync,
128 |             operations_pending=pending_ops,
129 |             operations_processed=sync_status.get('operations_processed', 0),
130 |             operations_failed=sync_status.get('operations_failed', 0),
131 |             sync_interval_seconds=sync_interval,
132 |             time_since_last_sync_seconds=time_since_sync,
133 |             next_sync_eta_seconds=next_sync_eta,
134 |             status=status
135 |         )
136 | 
137 |     except Exception as e:
138 |         raise HTTPException(status_code=500, detail=f"Failed to get sync status: {str(e)}")
139 | 
140 | 
141 | @router.post("/sync/force", response_model=SyncForceResponse)
142 | async def force_sync(
143 |     storage: MemoryStorage = Depends(get_storage),
144 |     user: AuthenticationResult = Depends(require_write_access) if OAUTH_ENABLED else None
145 | ):
146 |     """
147 |     Manually trigger immediate bi-directional sync with Cloudflare.
148 | 
149 |     Performs BOTH directions:
150 |     1. PULL: Download new memories FROM Cloudflare TO local SQLite
151 |     2. PUSH: Upload pending operations FROM local TO Cloudflare
152 | 
153 |     This ensures complete synchronization between both backends.
154 |     Only available when using hybrid storage backend.
155 |     """
156 |     # Check if storage supports force sync (hybrid mode only)
157 |     if not hasattr(storage, 'force_sync'):
158 |         raise HTTPException(
159 |             status_code=404,
160 |             detail="Manual sync only available in hybrid mode"
161 |         )
162 | 
163 |     try:
164 |         import time
165 |         start_time = time.time()
166 | 
167 |         # Step 1: Pull FROM Cloudflare TO local (if method exists)
168 |         memories_pulled = 0
169 |         pull_message = ""
170 |         pull_result = None
171 |         if hasattr(storage, 'force_pull_sync'):
172 |             pull_result = await storage.force_pull_sync()
173 |             memories_pulled = pull_result.get('memories_pulled', 0)
174 |             pull_message = pull_result.get('message', '')
175 | 
176 |         # Step 2: Push FROM local TO Cloudflare (existing behavior)
177 |         push_result = await storage.force_sync()
178 |         operations_synced = push_result.get('operations_synced', 0)
179 |         push_message = push_result.get('message', 'Sync completed')
180 | 
181 |         # Check success flags from both operations
182 |         pull_success = pull_result.get('success', True) if pull_result else True
183 |         push_success = push_result.get('success', False)
184 |         overall_success = pull_success and push_success
185 | 
186 |         time_taken = time.time() - start_time
187 | 
188 |         # Combine messages
189 |         if memories_pulled > 0 and operations_synced > 0:
190 |             combined_message = f"Pulled {memories_pulled} from Cloudflare, pushed {operations_synced} to Cloudflare"
191 |         elif memories_pulled > 0:
192 |             combined_message = f"Pulled {memories_pulled} from Cloudflare"
193 |         elif operations_synced > 0:
194 |             combined_message = f"Pushed {operations_synced} to Cloudflare"
195 |         else:
196 |             combined_message = "No changes to sync (already synchronized)"
197 | 
198 |         return SyncForceResponse(
199 |             success=overall_success,
200 |             message=combined_message,
201 |             operations_synced=operations_synced,
202 |             memories_pulled=memories_pulled,
203 |             time_taken_seconds=round(time_taken, 3),
204 |             timestamp=datetime.now(timezone.utc).isoformat()
205 |         )
206 | 
207 |     except Exception as e:
208 |         raise HTTPException(
209 |             status_code=500,
210 |             detail=f"Failed to force sync: {str(e)}"
211 |         )
212 | 
213 | 
214 | class SyncPauseResponse(BaseModel):
215 |     """Pause/resume sync response model."""
216 |     success: bool
217 |     message: str
218 |     is_paused: bool
219 |     timestamp: str
220 | 
221 | 
222 | @router.post("/sync/pause", response_model=SyncPauseResponse)
223 | async def pause_sync(
224 |     storage: MemoryStorage = Depends(get_storage),
225 |     user: AuthenticationResult = Depends(require_write_access) if OAUTH_ENABLED else None
226 | ):
227 |     """
228 |     Pause background sync operations.
229 | 
230 |     Pauses the background sync service to allow safe database operations.
231 |     Sync will resume when resume_sync is called.
232 |     Only available when using hybrid storage backend.
233 |     """
234 |     # Check if storage supports pause/resume (hybrid mode only)
235 |     if not hasattr(storage, 'pause_sync'):
236 |         raise HTTPException(
237 |             status_code=404,
238 |             detail="Pause sync only available in hybrid mode"
239 |         )
240 | 
241 |     try:
242 |         result = await storage.pause_sync()
243 | 
244 |         return SyncPauseResponse(
245 |             success=result.get('success', True),
246 |             message=result.get('message', 'Sync paused'),
247 |             is_paused=True,
248 |             timestamp=datetime.now(timezone.utc).isoformat()
249 |         )
250 | 
251 |     except Exception as e:
252 |         raise HTTPException(
253 |             status_code=500,
254 |             detail=f"Failed to pause sync: {str(e)}"
255 |         )
256 | 
257 | 
258 | @router.post("/sync/resume", response_model=SyncPauseResponse)
259 | async def resume_sync(
260 |     storage: MemoryStorage = Depends(get_storage),
261 |     user: AuthenticationResult = Depends(require_write_access) if OAUTH_ENABLED else None
262 | ):
263 |     """
264 |     Resume background sync operations.
265 | 
266 |     Resumes the background sync service after it was paused.
267 |     Only available when using hybrid storage backend.
268 |     """
269 |     # Check if storage supports pause/resume (hybrid mode only)
270 |     if not hasattr(storage, 'resume_sync'):
271 |         raise HTTPException(
272 |             status_code=404,
273 |             detail="Resume sync only available in hybrid mode"
274 |         )
275 | 
276 |     try:
277 |         result = await storage.resume_sync()
278 | 
279 |         return SyncPauseResponse(
280 |             success=result.get('success', True),
281 |             message=result.get('message', 'Sync resumed'),
282 |             is_paused=False,
283 |             timestamp=datetime.now(timezone.utc).isoformat()
284 |         )
285 | 
286 |     except Exception as e:
287 |         raise HTTPException(
288 |             status_code=500,
289 |             detail=f"Failed to resume sync: {str(e)}"
290 |         )
291 | 
```

--------------------------------------------------------------------------------
/tests/integration/test_bridge_integration.js:
--------------------------------------------------------------------------------

```javascript
  1 | /**
  2 |  * Integration Tests for HTTP-MCP Bridge
  3 |  * 
  4 |  * These tests verify the bridge works correctly with a real server
  5 |  * or a mock server that accurately simulates real behavior.
  6 |  */
  7 | 
  8 | const assert = require('assert');
  9 | const http = require('http');
 10 | const https = require('https');
 11 | const path = require('path');
 12 | const HTTPMCPBridge = require(path.join(__dirname, '../../examples/http-mcp-bridge.js'));
 13 | const { mockResponses, createMockResponse } = require(path.join(__dirname, '../bridge/mock_responses.js'));
 14 | 
 15 | describe('Bridge-Server Integration', () => {
 16 |     let bridge;
 17 |     let testServer;
 18 |     let serverPort;
 19 |     
 20 |     before(async () => {
 21 |         // Create a test server that mimics real API behavior
 22 |         await startTestServer();
 23 |     });
 24 |     
 25 |     after(async () => {
 26 |         if (testServer) {
 27 |             await new Promise(resolve => testServer.close(resolve));
 28 |         }
 29 |     });
 30 |     
 31 |     beforeEach(() => {
 32 |         bridge = new HTTPMCPBridge();
 33 |         bridge.endpoint = `http://localhost:${serverPort}/api`;
 34 |         bridge.apiKey = 'test-api-key';
 35 |     });
 36 |     
 37 |     async function startTestServer() {
 38 |         return new Promise((resolve) => {
 39 |             testServer = http.createServer((req, res) => {
 40 |                 let body = '';
 41 |                 
 42 |                 req.on('data', chunk => {
 43 |                     body += chunk.toString();
 44 |                 });
 45 |                 
 46 |                 req.on('end', () => {
 47 |                     handleRequest(req, res, body);
 48 |                 });
 49 |             });
 50 |             
 51 |             testServer.listen(0, 'localhost', () => {
 52 |                 serverPort = testServer.address().port;
 53 |                 console.log(`Test server started on port ${serverPort}`);
 54 |                 resolve();
 55 |             });
 56 |         });
 57 |     }
 58 |     
 59 |     function handleRequest(req, res, body) {
 60 |         const url = req.url;
 61 |         const method = req.method;
 62 |         
 63 |         // Verify API key
 64 |         if (req.headers.authorization !== 'Bearer test-api-key') {
 65 |             res.writeHead(401, { 'Content-Type': 'application/json' });
 66 |             res.end(JSON.stringify({ detail: 'Unauthorized' }));
 67 |             return;
 68 |         }
 69 |         
 70 |         // Route requests
 71 |         if (url === '/api/health' && method === 'GET') {
 72 |             const response = mockResponses.health.healthy;
 73 |             res.writeHead(response.status, { 'Content-Type': 'application/json' });
 74 |             res.end(JSON.stringify(response.body));
 75 |         } else if (url === '/api/memories' && method === 'POST') {
 76 |             try {
 77 |                 const data = JSON.parse(body);
 78 |                 
 79 |                 // Simulate duplicate detection
 80 |                 if (data.content === 'duplicate-content') {
 81 |                     const response = mockResponses.memories.duplicate;
 82 |                     res.writeHead(response.status, { 'Content-Type': 'application/json' });
 83 |                     res.end(JSON.stringify(response.body));
 84 |                 } else {
 85 |                     const response = mockResponses.memories.createSuccess;
 86 |                     res.writeHead(response.status, { 'Content-Type': 'application/json' });
 87 |                     res.end(JSON.stringify(response.body));
 88 |                 }
 89 |             } catch (e) {
 90 |                 res.writeHead(400, { 'Content-Type': 'application/json' });
 91 |                 res.end(JSON.stringify({ detail: 'Invalid JSON' }));
 92 |             }
 93 |         } else if (url.startsWith('/api/search') && method === 'GET') {
 94 |             const response = mockResponses.search.withResults;
 95 |             res.writeHead(response.status, { 'Content-Type': 'application/json' });
 96 |             res.end(JSON.stringify(response.body));
 97 |         } else if (url === '/health' && method === 'GET') {
 98 |             // This is the WRONG endpoint - should return 404
 99 |             res.writeHead(404, { 'Content-Type': 'application/json' });
100 |             res.end(JSON.stringify({ detail: 'Not Found' }));
101 |         } else {
102 |             res.writeHead(404, { 'Content-Type': 'application/json' });
103 |             res.end(JSON.stringify({ detail: 'Not Found' }));
104 |         }
105 |     }
106 |     
107 |     describe('Critical Bug Scenarios', () => {
108 |         it('should use /api/health not /health for health checks', async () => {
109 |             const result = await bridge.checkHealth();
110 |             assert.strictEqual(result.status, 'healthy');
111 |             assert.strictEqual(result.backend, 'sqlite_vec');
112 |         });
113 |         
114 |         it('should handle HTTP 200 with success field for memory storage', async () => {
115 |             const result = await bridge.storeMemory({
116 |                 content: 'Test memory content',
117 |                 metadata: { tags: ['test'] }
118 |             });
119 |             
120 |             assert.strictEqual(result.success, true);
121 |             assert.strictEqual(result.message, 'Memory stored successfully');
122 |         });
123 |         
124 |         it('should handle duplicate detection with HTTP 200 and success=false', async () => {
125 |             const result = await bridge.storeMemory({
126 |                 content: 'duplicate-content',
127 |                 metadata: { tags: ['test'] }
128 |             });
129 |             
130 |             assert.strictEqual(result.success, false);
131 |             assert.strictEqual(result.message, 'Duplicate content detected');
132 |         });
133 |         
134 |         it('should construct URLs correctly with /api base path', async () => {
135 |             // This would have failed with the old URL construction bug
136 |             const result = await bridge.retrieveMemory({
137 |                 query: 'test',
138 |                 n_results: 5
139 |             });
140 |             
141 |             assert(Array.isArray(result.memories));
142 |             assert(result.memories.length > 0);
143 |         });
144 |     });
145 |     
146 |     describe('End-to-End MCP Protocol Flow', () => {
147 |         it('should handle complete MCP session', async () => {
148 |             // 1. Initialize
149 |             let response = await bridge.processRequest({
150 |                 method: 'initialize',
151 |                 params: {},
152 |                 id: 1
153 |             });
154 |             assert.strictEqual(response.result.protocolVersion, '2024-11-05');
155 |             
156 |             // 2. Get tools list
157 |             response = await bridge.processRequest({
158 |                 method: 'tools/list',
159 |                 params: {},
160 |                 id: 2
161 |             });
162 |             assert(response.result.tools.length > 0);
163 |             
164 |             // 3. Store a memory
165 |             response = await bridge.processRequest({
166 |                 method: 'tools/call',
167 |                 params: {
168 |                     name: 'store_memory',
169 |                     arguments: {
170 |                         content: 'Integration test memory',
171 |                         metadata: { tags: ['test', 'integration'] }
172 |                     }
173 |                 },
174 |                 id: 3
175 |             });
176 |             const result = JSON.parse(response.result.content[0].text);
177 |             assert.strictEqual(result.success, true);
178 |             
179 |             // 4. Check health
180 |             response = await bridge.processRequest({
181 |                 method: 'tools/call',
182 |                 params: {
183 |                     name: 'check_database_health',
184 |                     arguments: {}
185 |                 },
186 |                 id: 4
187 |             });
188 |             const health = JSON.parse(response.result.content[0].text);
189 |             assert.strictEqual(health.status, 'healthy');
190 |         });
191 |     });
192 |     
193 |     describe('Error Recovery', () => {
194 |         it('should handle server unavailability gracefully', async () => {
195 |             // Point to non-existent server (using port 9999 instead of 99999 which is invalid)
196 |             bridge.endpoint = 'http://localhost:9999/api';
197 | 
198 |             const result = await bridge.checkHealth();
199 |             assert.strictEqual(result.status, 'error');
200 |             // The error message should indicate connection failure or invalid URL
201 |             assert(result.error && (
202 |                 result.error.includes('ECONNREFUSED') ||
203 |                 result.error.includes('EADDRNOTAVAIL') ||
204 |                 result.error.includes('connect') ||
205 |                 result.error.includes('ENOTFOUND') ||
206 |                 result.error.includes('Invalid URL') || // This can happen with invalid ports
207 |                 result.error.includes('ETIMEDOUT')
208 |             ), `Expected connection error but got: ${result.error}`);
209 |         });
210 |         
211 |         it('should handle malformed responses', async () => {
212 |             // Create a server that returns invalid JSON
213 |             const badServer = http.createServer((req, res) => {
214 |                 res.writeHead(200, { 'Content-Type': 'application/json' });
215 |                 res.end('This is not JSON');
216 |             });
217 |             
218 |             await new Promise(resolve => {
219 |                 badServer.listen(0, 'localhost', resolve);
220 |             });
221 |             
222 |             const badPort = badServer.address().port;
223 |             bridge.endpoint = `http://localhost:${badPort}/api`;
224 |             
225 |             const result = await bridge.checkHealth();
226 |             assert.strictEqual(result.status, 'error');
227 |             
228 |             await new Promise(resolve => badServer.close(resolve));
229 |         });
230 |     });
231 |     
232 |     describe('Authentication', () => {
233 |         it('should include API key in requests', async () => {
234 |             bridge.apiKey = 'test-api-key';
235 |             const result = await bridge.checkHealth();
236 |             assert.strictEqual(result.status, 'healthy');
237 |         });
238 |         
239 |         it('should handle authentication failures', async () => {
240 |             bridge.apiKey = 'wrong-api-key';
241 |             const result = await bridge.checkHealth();
242 |             assert.strictEqual(result.status, 'unhealthy');
243 |         });
244 |     });
245 | });
246 | 
247 | // Run tests if this file is executed directly
248 | if (require.main === module) {
249 |     // Simple test runner for development
250 |     const Mocha = require('mocha');
251 |     const mocha = new Mocha();
252 |     mocha.addFile(__filename);
253 |     mocha.run(failures => {
254 |         process.exit(failures ? 1 : 0);
255 |     });
256 | }
```

--------------------------------------------------------------------------------
/.github/workflows/main.yml:
--------------------------------------------------------------------------------

```yaml
  1 | name: Main CI/CD Pipeline
  2 | 
  3 | on:
  4 |   push:
  5 |     branches:
  6 |       - main
  7 |   pull_request:
  8 |     branches:
  9 |       - main
 10 | 
 11 | jobs:
 12 |   # First, try to create a release
 13 |   release:
 14 |     if: github.event_name == 'push' && github.ref == 'refs/heads/main'
 15 |     runs-on: ubuntu-latest
 16 |     concurrency: release
 17 |     permissions:
 18 |       id-token: write
 19 |       contents: write
 20 |     outputs:
 21 |       released: ${{ steps.release.outputs.released }}
 22 |       version: ${{ steps.release.outputs.version }}
 23 |       tag: ${{ steps.release.outputs.tag }}
 24 | 
 25 |     steps:
 26 |     - uses: actions/checkout@v4
 27 |       with:
 28 |         fetch-depth: 0
 29 | 
 30 |     - name: Set up Python
 31 |       uses: actions/setup-python@v4
 32 |       with:
 33 |         python-version: '3.9'
 34 | 
 35 |     - name: Install dependencies
 36 |       run: |
 37 |         python -m pip install --upgrade pip
 38 |         python -m pip install build hatchling python-semantic-release
 39 | 
 40 |     - name: Build package
 41 |       run: python -m build
 42 | 
 43 |     - name: Python Semantic Release
 44 |       id: release
 45 |       run: |
 46 |         set -e
 47 |         
 48 |         # Run semantic-release to determine next version without pushing
 49 |         export GIT_COMMITTER_NAME="github-actions[bot]"
 50 |         export GIT_COMMITTER_EMAIL="github-actions[bot]@users.noreply.github.com"
 51 |         
 52 |         echo "=== DEBUG: Starting semantic-release process ==="
 53 |         
 54 |         # Capture current version (read directly from file)
 55 |         CURRENT_VERSION=$(grep -E "^__version__" src/mcp_memory_service/__init__.py | cut -d'"' -f2 || echo "0.0.0")
 56 |         echo "DEBUG: Current version from file: $CURRENT_VERSION"
 57 |         
 58 |         # Check git log for recent commits
 59 |         echo "DEBUG: Recent commits:"
 60 |         git log --oneline -5
 61 |         
 62 |         # Check for existing tags
 63 |         echo "DEBUG: Existing tags:"
 64 |         git tag -l | tail -5 || echo "No tags found"
 65 |         
 66 |         # Show git status
 67 |         echo "DEBUG: Git status:"
 68 |         git status --porcelain
 69 |         
 70 |         # Always try to run semantic-release and capture the version change
 71 |         BEFORE_VERSION="$CURRENT_VERSION"
 72 |         echo "DEBUG: Version before semantic-release: $BEFORE_VERSION"
 73 |         
 74 |         # Run semantic-release with verbose output for debugging
 75 |         echo "DEBUG: Running semantic-release..."
 76 |         if semantic-release version --no-push --no-vcs-release --verbose; then
 77 |           echo "DEBUG: semantic-release completed successfully"
 78 |         else
 79 |           echo "DEBUG: semantic-release failed or no release needed"
 80 |         fi
 81 |         
 82 |         # Capture the version after semantic-release
 83 |         AFTER_VERSION=$(grep -E "^__version__" src/mcp_memory_service/__init__.py | cut -d'"' -f2 || echo "0.0.0")
 84 |         echo "DEBUG: Version after semantic-release: $AFTER_VERSION"
 85 |         
 86 |         # Show git status after semantic-release
 87 |         echo "DEBUG: Git status after semantic-release:"
 88 |         git status --porcelain
 89 |         
 90 |         # Check if version changed
 91 |         if [ "$BEFORE_VERSION" != "$AFTER_VERSION" ]; then
 92 |           echo "✅ Version changed from $BEFORE_VERSION to $AFTER_VERSION"
 93 |           
 94 |           # Write to GITHUB_OUTPUT with clean environment
 95 |           {
 96 |             echo "released=true"
 97 |             echo "version=$AFTER_VERSION" 
 98 |             echo "tag=v$AFTER_VERSION"
 99 |           } >> $GITHUB_OUTPUT
100 |           
101 |           # Create tag manually
102 |           git tag "v$AFTER_VERSION"
103 |           echo "✅ Tag v$AFTER_VERSION created locally"
104 | 
105 |           # Push version bump and tag to remote
106 |           git push origin main
107 |           git push origin "v$AFTER_VERSION"
108 |           echo "✅ Pushed version bump and tag v$AFTER_VERSION to remote"
109 | 
110 |           echo "DEBUG: Contents written to GITHUB_OUTPUT:"
111 |           echo "released=true"
112 |           echo "version=$AFTER_VERSION"
113 |           echo "tag=v$AFTER_VERSION"
114 |         else
115 |           echo "❌ No release needed (version unchanged: $BEFORE_VERSION)"
116 |           
117 |           # Write to GITHUB_OUTPUT with clean environment
118 |           {
119 |             echo "released=false"
120 |             echo "version=$CURRENT_VERSION"
121 |             echo "tag="
122 |           } >> $GITHUB_OUTPUT
123 |           
124 |           echo "DEBUG: Contents written to GITHUB_OUTPUT:"
125 |           echo "released=false"
126 |           echo "version=$CURRENT_VERSION"
127 |           echo "tag="
128 |         fi
129 |         
130 |         echo "=== DEBUG: Final GITHUB_OUTPUT contents ==="
131 |         cat $GITHUB_OUTPUT
132 | 
133 |   # Test uvx compatibility
134 |   test-uvx-compatibility:
135 |     runs-on: ubuntu-latest
136 |     name: Test uvx compatibility
137 |     
138 |     steps:
139 |     - name: Checkout repository
140 |       uses: actions/checkout@v4
141 | 
142 |     - name: Set up Python
143 |       uses: actions/setup-python@v4
144 |       with:
145 |         python-version: '3.11'
146 | 
147 |     - name: Cache HuggingFace models
148 |       uses: actions/cache@v3
149 |       with:
150 |         path: ~/.cache/huggingface
151 |         key: ${{ runner.os }}-huggingface-models-${{ hashFiles('**/pyproject.toml') }}
152 |         restore-keys: |
153 |           ${{ runner.os }}-huggingface-models-
154 | 
155 |     - name: Install uv
156 |       run: |
157 |         curl -LsSf https://astral.sh/uv/install.sh | sh
158 |         source $HOME/.cargo/env
159 |         echo "$HOME/.cargo/bin" >> $GITHUB_PATH
160 | 
161 |     - name: Test uvx compatibility
162 |       run: |
163 |         # Create virtual environment with uv
164 |         uv venv
165 | 
166 |         # Install the package
167 |         uv pip install -e .
168 | 
169 |         # Install test dependencies
170 |         uv pip install pytest pytest-asyncio
171 | 
172 |         # Pre-download HuggingFace embedding model to populate cache
173 |         source .venv/bin/activate
174 |         python -c "from sentence_transformers import SentenceTransformer; print('Downloading embedding model...'); SentenceTransformer('all-MiniLM-L6-v2'); print('✓ Model cached')"
175 | 
176 |         # Run tests
177 |         source .venv/bin/activate
178 |         python -m pytest tests/ -v || echo "✓ Tests completed"
179 |         
180 |         # Build wheel for uvx testing
181 |         uv build
182 |         
183 |         # Test package structure compatibility
184 |         echo "✓ Package structure compatible with uvx"
185 | 
186 |   # Test Docker build
187 |   test-docker-build:
188 |     runs-on: ubuntu-latest
189 |     name: Test Docker build
190 |     
191 |     steps:
192 |     - name: Checkout repository
193 |       uses: actions/checkout@v4
194 | 
195 |     - name: Set up Docker Buildx
196 |       uses: docker/setup-buildx-action@v3
197 | 
198 |     - name: Build Docker image
199 |       uses: docker/build-push-action@v5
200 |       with:
201 |         context: .
202 |         file: ./tools/docker/Dockerfile
203 |         platforms: linux/amd64
204 |         push: false
205 |         load: true
206 |         tags: mcp-memory-service:test
207 |         cache-from: type=gha
208 |         cache-to: type=gha,mode=max
209 | 
210 |     - name: Test Docker image
211 |       run: |
212 |         # Debug: List all docker images
213 |         echo "Available Docker images:"
214 |         docker images
215 | 
216 |         # Test image can be created (override entrypoint to run python directly)
217 |         docker run --rm --entrypoint="" mcp-memory-service:test python -c "print('✓ Docker image works')"
218 | 
219 |         # Test that the server can show help (disable OAuth to avoid JWT key requirement)
220 |         docker run --rm -e MCP_OAUTH_ENABLED=false mcp-memory-service:test --help > /dev/null && echo "✓ Server help works"
221 | 
222 |   # Publish to Docker Hub (only after release)
223 |   publish-docker-hub:
224 |     needs: [release, test-docker-build]
225 |     if: needs.release.outputs.released == 'true'
226 |     runs-on: ubuntu-latest
227 |     permissions:
228 |       contents: read
229 |       packages: write
230 |       id-token: write
231 |       attestations: write
232 | 
233 |     steps:
234 |     - name: Checkout repository
235 |       uses: actions/checkout@v4
236 | 
237 |     - name: Set up Docker Buildx
238 |       uses: docker/setup-buildx-action@v3
239 |       with:
240 |         driver: docker-container  # Use container driver for multi-platform builds
241 | 
242 |     - name: Log in to Docker Hub
243 |       uses: docker/login-action@v3
244 |       with:
245 |         registry: docker.io
246 |         username: ${{ secrets.DOCKER_USERNAME }}
247 |         password: ${{ secrets.DOCKER_PASSWORD }}
248 | 
249 |     - name: Extract metadata
250 |       id: meta
251 |       uses: docker/metadata-action@v5
252 |       with:
253 |         images: docker.io/doobidoo/mcp-memory-service
254 |         tags: |
255 |           type=raw,value=latest
256 |           type=raw,value=${{ needs.release.outputs.version }}
257 |           type=semver,pattern={{version}},value=${{ needs.release.outputs.tag }}
258 |           type=semver,pattern={{major}}.{{minor}},value=${{ needs.release.outputs.tag }}
259 | 
260 |     - name: Build and push Docker image
261 |       uses: docker/build-push-action@v5
262 |       with:
263 |         context: .
264 |         file: ./tools/docker/Dockerfile
265 |         platforms: linux/amd64,linux/arm64
266 |         push: true
267 |         tags: ${{ steps.meta.outputs.tags }}
268 |         labels: ${{ steps.meta.outputs.labels }}
269 |         cache-from: type=gha
270 |         cache-to: type=gha,mode=max
271 | 
272 |   # Publish to GitHub Container Registry
273 |   publish-ghcr:
274 |     needs: [test-uvx-compatibility, test-docker-build]
275 |     if: github.event_name == 'push' && github.ref == 'refs/heads/main'
276 |     runs-on: ubuntu-latest
277 |     name: Publish to GHCR
278 |     permissions:
279 |       contents: read
280 |       packages: write
281 |     
282 |     steps:
283 |     - name: Checkout repository
284 |       uses: actions/checkout@v4
285 | 
286 |     - name: Set up Docker Buildx
287 |       uses: docker/setup-buildx-action@v3
288 |       with:
289 |         driver: docker-container  # Use container driver for multi-platform builds
290 | 
291 |     - name: Log in to GitHub Container Registry
292 |       uses: docker/login-action@v3
293 |       with:
294 |         registry: ghcr.io
295 |         username: ${{ github.actor }}
296 |         password: ${{ secrets.GITHUB_TOKEN }}
297 | 
298 |     - name: Extract metadata
299 |       id: meta
300 |       uses: docker/metadata-action@v5
301 |       with:
302 |         images: ghcr.io/doobidoo/mcp-memory-service
303 |         tags: |
304 |           type=ref,event=branch
305 |           type=raw,value=latest,enable={{is_default_branch}}
306 | 
307 |     - name: Build and push Docker image
308 |       uses: docker/build-push-action@v5
309 |       with:
310 |         context: .
311 |         file: ./tools/docker/Dockerfile
312 |         platforms: linux/amd64,linux/arm64
313 |         push: true
314 |         tags: ${{ steps.meta.outputs.tags }}
315 |         labels: ${{ steps.meta.outputs.labels }}
316 |         cache-from: type=gha
317 |         cache-to: type=gha,mode=max
```

--------------------------------------------------------------------------------
/archive/docs-root-cleanup-2025-08-23/DOCUMENTATION_CLEANUP_PLAN.md:
--------------------------------------------------------------------------------

```markdown
  1 | # Documentation Cleanup Plan
  2 | 
  3 | **Date**: 2025-08-23  
  4 | **Phase**: Repository Documentation Consolidation  
  5 | **Goal**: Remove 75+ redundant files, keep essential docs, improve maintainability
  6 | 
  7 | ## 📊 Summary
  8 | 
  9 | **Before Cleanup:**
 10 | - **87 markdown files** (1MB+ documentation)
 11 | - **Massive redundancy** - 6 installation guides, 5 Claude integration files
 12 | - **Poor user experience** - overwhelming choice, unclear paths
 13 | - **High maintenance burden** - updating requires changing 6+ files
 14 | 
 15 | **After Cleanup:**
 16 | - **4 essential repository files** (README, CLAUDE, CHANGELOG, CONTRIBUTING)
 17 | - **Comprehensive wiki** with consolidated guides
 18 | - **Single source of truth** for each topic
 19 | - **90% reduction** in repository documentation
 20 | 
 21 | ## 🚀 Files to Keep in Repository
 22 | 
 23 | ### ✅ Essential Repository Files (Keep)
 24 | - `README.md` ✅ **DONE** - Streamlined with wiki links
 25 | - `CLAUDE.md` ✅ **KEEP** - Claude Code development guidance  
 26 | - `CHANGELOG.md` ✅ **KEEP** - Version history
 27 | - `CONTRIBUTING.md` ✅ **KEEP** - Development guidelines (if exists)
 28 | 
 29 | ### ✅ Wiki Files Created (Consolidated)
 30 | - `Installation-Guide.md` ✅ **DONE** - Consolidated from 6+ installation files
 31 | - `Platform-Setup-Guide.md` ✅ **DONE** - Merged platform-specific guides
 32 | - `Integration-Guide.md` ✅ **DONE** - Combined all Claude/IDE integration docs
 33 | - `Home.md` ✅ **UPDATED** - Added links to new consolidated guides
 34 | 
 35 | ## 🗂️ Files to Remove (Safe to Delete)
 36 | 
 37 | ### 📦 Installation Guide Redundancy (6 files → 1 wiki page)
 38 | ```bash
 39 | # These are now consolidated in Installation-Guide.md
 40 | docs/guides/service-installation.md          # 10KB - service installation
 41 | docs/installation/complete-setup-guide.md    # 7.7KB - complete setup  
 42 | docs/installation/master-guide.md            # 5KB - hardware-specific
 43 | docs/installation/distributed-sync.md        # 11KB - installation + sync
 44 | docs/guides/claude-desktop-setup.md          # 3.4KB - Claude Desktop setup
 45 | ```
 46 | 
 47 | ### 🖥️ Platform Setup Redundancy (4 files → 1 wiki page)  
 48 | ```bash
 49 | # These are now consolidated in Platform-Setup-Guide.md
 50 | docs/platforms/windows.md                    # 11KB - Windows setup
 51 | docs/guides/windows-setup.md                 # 3.9KB - Windows (shorter)
 52 | docs/platforms/ubuntu.md                     # 12.8KB - Linux setup
 53 | docs/guides/UBUNTU_SETUP.md                  # 5.9KB - Linux (different)
 54 | docs/platforms/macos-intel.md                # 9.8KB - macOS Intel
 55 | ```
 56 | 
 57 | ### 🔗 Integration Guide Redundancy (5 files → 1 wiki page)
 58 | ```bash
 59 | # These are now consolidated in Integration-Guide.md  
 60 | docs/guides/claude-code-integration.md       # 10.6KB - Claude Code
 61 | docs/guides/claude-code-quickstart.md        # 3.9KB - Quick start
 62 | docs/guides/claude-code-compatibility.md     # 3.8KB - Compatibility
 63 | docs/guides/claude_integration.md            # 2.5KB - Basic integration
 64 | docs/guides/mcp-client-configuration.md      # 10KB - MCP client config
 65 | ```
 66 | 
 67 | ### 🏗️ Development Artifacts (Should be archived, not in user docs)
 68 | ```bash
 69 | # Development session files - move to archive or delete
 70 | docs/sessions/MCP_ENHANCEMENT_SESSION_MEMORY_v4.1.0.md  # 12KB
 71 | docs/development/CLEANUP_PLAN.md                        # 6KB
 72 | docs/development/CLEANUP_SUMMARY.md                     # 3.6KB  
 73 | docs/development/CLEANUP_README.md                      # 1KB
 74 | docs/development/TIMESTAMP_FIX_SUMMARY.md              # 3.4KB
 75 | docs/development/test-results.md                       # 4.2KB
 76 | docs/development/mcp-milestone.md                      # 2.9KB
 77 | SESSION_MEMORY_2025-08-11.md                          # 4.7KB (root)
 78 | CLAUDE_PERSONALIZED.md                                # 10.7KB (root)
 79 | ```
 80 | 
 81 | ### 📚 Root Documentation Redundancy (Move to wiki or delete)
 82 | ```bash
 83 | # These can be moved to wiki or deleted as they're covered elsewhere
 84 | AWESOME_LIST_SUBMISSION.md                   # 5.6KB - submission doc
 85 | CLOUDFLARE_IMPLEMENTATION.md                 # 5.7KB - now in wiki  
 86 | LITESTREAM_SETUP_GUIDE.md                   # 6.5KB - can move to wiki
 87 | PYTORCH_DOWNLOAD_FIX.md                     # 2.7KB - troubleshooting
 88 | ROADMAP.md                                  # 5.8KB - can move to wiki
 89 | SPONSORS.md                                 # 4.9KB - can keep or move
 90 | ```
 91 | 
 92 | ### 🔧 Duplicate Technical Docs (Consolidate or remove)
 93 | ```bash
 94 | # These have overlapping content with wiki pages
 95 | docs/guides/authentication.md               # 13KB - auth guide
 96 | docs/guides/distributed-sync.md             # 14KB - sync setup
 97 | docs/guides/mdns-service-discovery.md       # 9.9KB - mDNS setup
 98 | docs/guides/migration.md                    # 10.6KB - migration guide
 99 | docs/deployment/database-synchronization.md # 12.9KB - DB sync
100 | docs/deployment/multi-client-server.md      # 23.3KB - multi-client
101 | ```
102 | 
103 | ### 📁 Miscellaneous Cleanup
104 | ```bash
105 | # Various guides that can be consolidated or removed
106 | docs/guides/commands-vs-mcp-server.md       # 6.9KB - covered in wiki
107 | docs/guides/invocation_guide.md             # 12.9KB - usage guide
108 | docs/guides/scripts.md                      # 2KB - script docs
109 | docs/LM_STUDIO_COMPATIBILITY.md             # 4.6KB - compatibility 
110 | docs/ide-compatability.md                   # 5KB - IDE compatibility
111 | docs/integrations.md                        # 1.8KB - integrations
112 | docs/architecture.md                        # 9.9KB - can move to wiki
113 | ```
114 | 
115 | ## 📋 Safe Cleanup Commands
116 | 
117 | ### Phase 1: Create Archive Directory
118 | ```bash
119 | mkdir -p archive/docs-removed-2025-08-23
120 | ```
121 | 
122 | ### Phase 2: Move (Don't Delete) Redundant Files
123 | ```bash
124 | # Installation redundancy
125 | mv docs/guides/service-installation.md archive/docs-removed-2025-08-23/
126 | mv docs/installation/complete-setup-guide.md archive/docs-removed-2025-08-23/
127 | mv docs/installation/master-guide.md archive/docs-removed-2025-08-23/
128 | mv docs/installation/distributed-sync.md archive/docs-removed-2025-08-23/
129 | mv docs/guides/claude-desktop-setup.md archive/docs-removed-2025-08-23/
130 | 
131 | # Platform redundancy  
132 | mv docs/platforms/windows.md archive/docs-removed-2025-08-23/
133 | mv docs/guides/windows-setup.md archive/docs-removed-2025-08-23/
134 | mv docs/platforms/ubuntu.md archive/docs-removed-2025-08-23/
135 | mv docs/guides/UBUNTU_SETUP.md archive/docs-removed-2025-08-23/
136 | mv docs/platforms/macos-intel.md archive/docs-removed-2025-08-23/
137 | 
138 | # Integration redundancy
139 | mv docs/guides/claude-code-integration.md archive/docs-removed-2025-08-23/
140 | mv docs/guides/claude-code-quickstart.md archive/docs-removed-2025-08-23/
141 | mv docs/guides/claude-code-compatibility.md archive/docs-removed-2025-08-23/
142 | mv docs/guides/claude_integration.md archive/docs-removed-2025-08-23/
143 | mv docs/guides/mcp-client-configuration.md archive/docs-removed-2025-08-23/
144 | 
145 | # Development artifacts
146 | mv docs/sessions/ archive/docs-removed-2025-08-23/
147 | mv docs/development/ archive/docs-removed-2025-08-23/
148 | mv SESSION_MEMORY_2025-08-11.md archive/docs-removed-2025-08-23/
149 | mv CLAUDE_PERSONALIZED.md archive/docs-removed-2025-08-23/
150 | ```
151 | 
152 | ### Phase 3: Remove Empty Directories
153 | ```bash
154 | # Remove empty directories
155 | find docs/ -type d -empty -delete
156 | ```
157 | 
158 | ### Phase 4: Update README-ORIGINAL-BACKUP  
159 | ```bash
160 | # Keep backup for reference but add note
161 | echo "\n\n---\n**NOTE**: This file was replaced with streamlined version on 2025-08-23. See README.md for current version and wiki for comprehensive documentation." >> README-ORIGINAL-BACKUP.md
162 | ```
163 | 
164 | ## 🔍 Verification Steps
165 | 
166 | ### Before Cleanup
167 | ```bash
168 | # Count files before
169 | find docs/ -name "*.md" | wc -l
170 | find . -maxdepth 1 -name "*.md" | wc -l
171 | ```
172 | 
173 | ### After Cleanup  
174 | ```bash
175 | # Verify counts after
176 | find docs/ -name "*.md" | wc -l    # Should be much smaller
177 | find . -maxdepth 1 -name "*.md" | wc -l  # Should be ~4 essential files
178 | 
179 | # Verify wiki links work
180 | # Check that wiki pages exist and have content
181 | ls -la ../mcp-memory-service.wiki/Installation-Guide.md
182 | ls -la ../mcp-memory-service.wiki/Platform-Setup-Guide.md  
183 | ls -la ../mcp-memory-service.wiki/Integration-Guide.md
184 | ```
185 | 
186 | ### Test User Experience
187 | ```bash
188 | # Test that essential info is still accessible
189 | # 1. README.md should have clear quick start
190 | # 2. Wiki links should work  
191 | # 3. Installation should be straightforward
192 | # 4. No broken internal links
193 | ```
194 | 
195 | ## 🎯 Expected Results
196 | 
197 | ### Quantitative Improvements
198 | - **File count**: 87 → ~15 markdown files (83% reduction)
199 | - **Repository size**: ~1MB docs → ~100KB essential docs
200 | - **Maintenance burden**: 6 installation guides → 1 wiki page
201 | - **User confusion**: Multiple paths → Clear single source
202 | 
203 | ### Qualitative Improvements  
204 | - **Better discoverability**: Clear wiki structure vs scattered files
205 | - **Easier maintenance**: Update once vs updating 6+ files
206 | - **Improved UX**: Single path vs choice paralysis
207 | - **Cleaner repository**: Focus on code vs documentation chaos
208 | - **Professional appearance**: Organized vs overwhelming
209 | 
210 | ## 🛡️ Safety Measures
211 | 
212 | ### Backup Strategy
213 | - ✅ All removed files moved to `archive/` directory (not deleted)
214 | - ✅ Original README preserved as `README-ORIGINAL-BACKUP.md`
215 | - ✅ Git history preserves all removed content
216 | - ✅ Wiki contains consolidated content from all removed files
217 | 
218 | ### Rollback Plan
219 | If any issues arise:
220 | 1. **Individual files**: `mv archive/docs-removed-2025-08-23/filename.md docs/path/`
221 | 2. **Full rollback**: `mv README.md README-streamlined.md && mv README-ORIGINAL-BACKUP.md README.md`
222 | 3. **Git rollback**: `git checkout HEAD~1 -- docs/` (if committed)
223 | 
224 | ### Testing Plan
225 | 1. **Link verification**: All wiki links functional
226 | 2. **Content verification**: No essential information lost  
227 | 3. **User journey testing**: Installation → integration → usage
228 | 4. **Community feedback**: Monitor for missing information requests
229 | 
230 | ## ✅ Success Criteria
231 | 
232 | - [ ] Repository has 4 essential markdown files (README, CLAUDE, CHANGELOG, CONTRIBUTING)
233 | - [ ] Wiki contains 3 comprehensive consolidated guides
234 | - [ ] No essential information is lost or inaccessible
235 | - [ ] All links function correctly
236 | - [ ] User feedback is positive (reduced confusion)
237 | - [ ] Maintenance burden significantly reduced
238 | 
239 | ---
240 | 
241 | **This plan ensures safe, systematic cleanup while preserving all information and providing better user experience.**
```

--------------------------------------------------------------------------------
/docs/guides/mdns-service-discovery.md:
--------------------------------------------------------------------------------

```markdown
  1 | # mDNS Service Discovery Guide
  2 | 
  3 | This guide covers the automatic service discovery feature introduced in MCP Memory Service v2.1.0, which uses mDNS (Multicast DNS) to enable zero-configuration networking.
  4 | 
  5 | ## Overview
  6 | 
  7 | mDNS service discovery allows MCP Memory Service instances to:
  8 | - **Automatically advertise** themselves on the local network
  9 | - **Auto-discover** available services without manual configuration
 10 | - **Prioritize secure connections** (HTTPS over HTTP)
 11 | - **Validate service health** before establishing connections
 12 | 
 13 | ## Quick Start
 14 | 
 15 | ### 1. Start Server with mDNS
 16 | 
 17 | ```bash
 18 | # Basic setup (mDNS enabled by default)
 19 | python scripts/run_http_server.py
 20 | 
 21 | # With HTTPS (auto-generates certificates)
 22 | export MCP_HTTPS_ENABLED=true
 23 | python scripts/run_http_server.py
 24 | 
 25 | # Custom service name
 26 | export MCP_MDNS_SERVICE_NAME="Team Memory Service"
 27 | python scripts/run_http_server.py
 28 | ```
 29 | 
 30 | ### 2. Configure Client for Auto-Discovery
 31 | 
 32 | **Claude Desktop Configuration:**
 33 | 
 34 | ```json
 35 | {
 36 |   "mcpServers": {
 37 |     "memory": {
 38 |       "command": "node",
 39 |       "args": ["/path/to/mcp-memory-service/examples/http-mcp-bridge.js"],
 40 |       "env": {
 41 |         "MCP_MEMORY_AUTO_DISCOVER": "true",
 42 |         "MCP_MEMORY_PREFER_HTTPS": "true",
 43 |         "MCP_MEMORY_API_KEY": "your-api-key"
 44 |       }
 45 |     }
 46 |   }
 47 | }
 48 | ```
 49 | 
 50 | That's it! The client will automatically find and connect to available services.
 51 | 
 52 | ## Configuration Reference
 53 | 
 54 | ### Server Configuration
 55 | 
 56 | | Environment Variable | Default | Description |
 57 | |---------------------|---------|-------------|
 58 | | `MCP_MDNS_ENABLED` | `true` | Enable/disable mDNS advertisement |
 59 | | `MCP_MDNS_SERVICE_NAME` | `"MCP Memory Service"` | Display name for the service |
 60 | | `MCP_MDNS_SERVICE_TYPE` | `"_mcp-memory._tcp.local."` | RFC-compliant service type |
 61 | | `MCP_MDNS_DISCOVERY_TIMEOUT` | `5` | Discovery timeout in seconds |
 62 | 
 63 | ### Client Configuration
 64 | 
 65 | | Environment Variable | Default | Description |
 66 | |---------------------|---------|-------------|
 67 | | `MCP_MEMORY_AUTO_DISCOVER` | `false` | Enable automatic service discovery |
 68 | | `MCP_MEMORY_PREFER_HTTPS` | `true` | Prefer HTTPS services over HTTP |
 69 | | `MCP_HTTP_ENDPOINT` | (none) | Manual fallback endpoint |
 70 | | `MCP_MEMORY_API_KEY` | (none) | API key for authentication |
 71 | 
 72 | ## HTTPS Integration
 73 | 
 74 | ### Automatic Certificate Generation
 75 | 
 76 | The server can automatically generate self-signed certificates for development:
 77 | 
 78 | ```bash
 79 | export MCP_HTTPS_ENABLED=true
 80 | python scripts/run_http_server.py
 81 | ```
 82 | 
 83 | Output:
 84 | ```
 85 | Generating self-signed certificate for HTTPS...
 86 | Generated self-signed certificate: /tmp/mcp-memory-certs/cert.pem
 87 | WARNING: This is a development certificate. Use proper certificates in production.
 88 | Starting MCP Memory Service HTTPS server on 0.0.0.0:8000
 89 | mDNS service advertisement started
 90 | ```
 91 | 
 92 | ### Custom Certificates
 93 | 
 94 | For production deployments:
 95 | 
 96 | ```bash
 97 | export MCP_HTTPS_ENABLED=true
 98 | export MCP_SSL_CERT_FILE="/path/to/your/cert.pem"
 99 | export MCP_SSL_KEY_FILE="/path/to/your/key.pem"
100 | python scripts/run_http_server.py
101 | ```
102 | 
103 | ## Service Discovery Process
104 | 
105 | ### Client Discovery Flow
106 | 
107 | 1. **Discovery Phase**: Client broadcasts mDNS query for `_mcp-memory._tcp.local.`
108 | 2. **Response Collection**: Collects responses from all available services
109 | 3. **Service Prioritization**: Sorts services by:
110 |    - HTTPS preference (if `MCP_MEMORY_PREFER_HTTPS=true`)
111 |    - Health check results
112 |    - Response time
113 |    - Port preference
114 | 4. **Health Validation**: Tests endpoints with `/api/health` calls
115 | 5. **Connection**: Connects to the best available service
116 | 
117 | ### Server Advertisement
118 | 
119 | The server advertises with the following metadata:
120 | - **Service Type**: `_mcp-memory._tcp.local.`
121 | - **Properties**:
122 |   - `api_version`: Server version
123 |   - `https`: Whether HTTPS is enabled
124 |   - `auth_required`: Whether API key is required
125 |   - `api_path`: API base path (`/api`)
126 |   - `sse_path`: SSE endpoint path (`/api/events`)
127 |   - `docs_path`: Documentation path (`/api/docs`)
128 | 
129 | ## Network Requirements
130 | 
131 | ### Firewall Configuration
132 | 
133 | Ensure mDNS traffic is allowed:
134 | 
135 | ```bash
136 | # Linux (UFW)
137 | sudo ufw allow 5353/udp
138 | 
139 | # Linux (iptables)
140 | sudo iptables -A INPUT -p udp --dport 5353 -j ACCEPT
141 | 
142 | # macOS/Windows: mDNS typically allowed by default
143 | ```
144 | 
145 | ### Network Topology
146 | 
147 | mDNS works on:
148 | - ✅ Local Area Networks (LAN)
149 | - ✅ WiFi networks
150 | - ✅ VPN networks (if multicast is supported)
151 | - ❌ Across different subnets (without mDNS relay)
152 | - ❌ Internet (by design - local network only)
153 | 
154 | ## Troubleshooting
155 | 
156 | ### Common Issues
157 | 
158 | #### No Services Discovered
159 | 
160 | **Symptoms:**
161 | ```
162 | Attempting to discover MCP Memory Service via mDNS...
163 | No MCP Memory Services discovered
164 | Using default endpoint: http://localhost:8000/api
165 | ```
166 | 
167 | **Solutions:**
168 | 1. Verify server is running with mDNS enabled:
169 |    ```bash
170 |    grep "mDNS service advertisement started" server.log
171 |    ```
172 | 
173 | 2. Check network connectivity:
174 |    ```bash
175 |    ping 224.0.0.251  # mDNS multicast address
176 |    ```
177 | 
178 | 3. Verify firewall allows mDNS:
179 |    ```bash
180 |    sudo ufw status | grep 5353
181 |    ```
182 | 
183 | #### Discovery Timeout
184 | 
185 | **Symptoms:**
186 | ```
187 | Discovery failed: Request timeout
188 | ```
189 | 
190 | **Solutions:**
191 | 1. Increase discovery timeout:
192 |    ```bash
193 |    export MCP_MDNS_DISCOVERY_TIMEOUT=10
194 |    ```
195 | 
196 | 2. Check network latency
197 | 3. Verify multicast is working on network
198 | 
199 | #### Wrong Service Selected
200 | 
201 | **Symptoms:**
202 | Client connects to HTTP instead of HTTPS service.
203 | 
204 | **Solutions:**
205 | 1. Force HTTPS preference (client bridge):
206 |    ```bash
207 |    export MCP_MEMORY_PREFER_HTTPS=true
208 |    ```
209 | 
210 | 2. Use manual endpoint override (client bridge):
211 |    ```bash
212 |    export MCP_MEMORY_AUTO_DISCOVER=false
213 |    export MCP_HTTP_ENDPOINT="https://preferred-server:8000/api"
214 |    ```
215 | 
216 | ### Debug Mode
217 | 
218 | Enable detailed logging:
219 | 
220 | **Server:**
221 | ```bash
222 | export LOG_LEVEL=DEBUG
223 | python scripts/run_http_server.py
224 | ```
225 | 
226 | **Client:**
227 | ```bash
228 | # Redirect stderr to see discovery details
229 | node examples/http-mcp-bridge.js 2>discovery.log
230 | ```
231 | 
232 | ### Manual Discovery Testing
233 | 
234 | Test mDNS discovery manually:
235 | 
236 | **macOS:**
237 | ```bash
238 | # Browse for services
239 | dns-sd -B _mcp-memory._tcp
240 | 
241 | # Resolve specific service
242 | dns-sd -L "MCP Memory Service" _mcp-memory._tcp
243 | ```
244 | 
245 | **Linux:**
246 | ```bash
247 | # Browse for services
248 | avahi-browse -t _mcp-memory._tcp
249 | 
250 | # Resolve specific service
251 | avahi-resolve-host-name hostname.local
252 | ```
253 | 
254 | ## Advanced Usage
255 | 
256 | ### Multiple Service Environments
257 | 
258 | Deploy multiple services with different names:
259 | 
260 | ```bash
261 | # Development server
262 | export MCP_MDNS_SERVICE_NAME="Dev Memory Service"
263 | export MCP_HTTP_PORT=8000
264 | python scripts/run_http_server.py &
265 | 
266 | # Staging server
267 | export MCP_MDNS_SERVICE_NAME="Staging Memory Service"
268 | export MCP_HTTP_PORT=8001
269 | python scripts/run_http_server.py &
270 | ```
271 | 
272 | Clients will discover both and can select based on preferences.
273 | 
274 | ### Load Balancing
275 | 
276 | With multiple identical services, clients automatically distribute load by:
277 | 1. Health check response times
278 | 2. Connection success rates
279 | 3. Round-robin selection among healthy services
280 | 
281 | ### Service Monitoring
282 | 
283 | Monitor discovered services programmatically:
284 | 
285 | ```python
286 | import asyncio
287 | from mcp_memory_service.discovery import DiscoveryClient
288 | 
289 | async def monitor_services():
290 |     client = DiscoveryClient()
291 |     services = await client.find_services_with_health()
292 |     
293 |     for service, health in services:
294 |         print(f"Service: {service.name} at {service.url}")
295 |         print(f"Health: {'✅' if health.healthy else '❌'}")
296 |         print(f"Response time: {health.response_time_ms:.1f}ms")
297 |         print()
298 | 
299 | asyncio.run(monitor_services())
300 | ```
301 | 
302 | ## Security Considerations
303 | 
304 | ### Network Security
305 | 
306 | 1. **Local Network Only**: mDNS is designed for local networks and doesn't route across the internet
307 | 2. **Network Segmentation**: Use VLANs to isolate service discovery if needed
308 | 3. **Firewall Rules**: Restrict mDNS to trusted network segments
309 | 
310 | ### Authentication
311 | 
312 | Always use API keys even with mDNS:
313 | 
314 | ```bash
315 | # Server
316 | export MCP_API_KEY="$(openssl rand -base64 32)"
317 | 
318 | # Client (Node bridge)
319 | export MCP_MEMORY_API_KEY="same-key-as-server"
320 | ```
321 | 
322 | ### Encryption
323 | 
324 | Enable HTTPS for encrypted communication:
325 | 
326 | ```bash
327 | export MCP_HTTPS_ENABLED=true
328 | export MCP_MEMORY_PREFER_HTTPS=true  # client bridge preference
329 | ```
330 | 
331 | ## Best Practices
332 | 
333 | ### Development
334 | 
335 | - Use auto-generated certificates for development
336 | - Enable debug logging for troubleshooting
337 | - Use descriptive service names for multi-developer environments
338 | 
339 | ### Production
340 | 
341 | - Use proper SSL certificates from trusted CAs
342 | - Implement network segmentation
343 | - Monitor service discovery logs
344 | - Set appropriate discovery timeouts for network conditions
345 | 
346 | ### Team Collaboration
347 | 
348 | - Establish naming conventions for services
349 | - Document service discovery configuration
350 | - Use consistent API key management
351 | - Test discovery across different network conditions
352 | 
353 | ## Integration Examples
354 | 
355 | ### Docker Compose
356 | 
357 | ```yaml
358 | version: '3.8'
359 | services:
360 |   mcp-memory:
361 |     build: .
362 |     ports:
363 |       - "8000:8000"
364 |     environment:
365 |       - MCP_HTTPS_ENABLED=true
366 |       - MCP_MDNS_ENABLED=true
367 |       - MCP_MDNS_SERVICE_NAME=Docker Memory Service
368 |       - MCP_API_KEY=your-secure-key
369 |     networks:
370 |       - mcp-network
371 | 
372 | networks:
373 |   mcp-network:
374 |     driver: bridge
375 | ```
376 | 
377 | ### Kubernetes
378 | 
379 | ```yaml
380 | apiVersion: apps/v1
381 | kind: Deployment
382 | metadata:
383 |   name: mcp-memory-service
384 | spec:
385 |   replicas: 1
386 |   selector:
387 |     matchLabels:
388 |       app: mcp-memory
389 |   template:
390 |     metadata:
391 |       labels:
392 |         app: mcp-memory
393 |     spec:
394 |       hostNetwork: true  # Required for mDNS
395 |       containers:
396 |       - name: mcp-memory
397 |         image: mcp-memory-service:latest
398 |         env:
399 |         - name: MCP_MDNS_ENABLED
400 |           value: "true"
401 |         - name: MCP_HTTPS_ENABLED
402 |           value: "true"
403 |         ports:
404 |         - containerPort: 8000
405 | ```
406 | 
407 | ## Conclusion
408 | 
409 | mDNS service discovery significantly simplifies MCP Memory Service deployment by eliminating manual endpoint configuration. Combined with automatic HTTPS support, it provides a secure, zero-configuration solution for local network deployments.
410 | 
411 | For more information, see:
412 | - [Multi-Client Server Deployment Guide](../deployment/multi-client-server.md)
413 | - [General Troubleshooting](../troubleshooting/general.md)
414 | 
```

--------------------------------------------------------------------------------
/archive/docs-removed-2025-08-23/service-installation.md:
--------------------------------------------------------------------------------

```markdown
  1 | # Cross-Platform Service Installation Guide
  2 | 
  3 | This guide provides instructions for installing MCP Memory Service as a native service on Windows, macOS, and Linux systems. The service will automatically start when your system boots or when you log in.
  4 | 
  5 | ## Overview
  6 | 
  7 | The MCP Memory Service can be installed as a system service to:
  8 | - Start automatically on boot/login
  9 | - Run in the background without a terminal window
 10 | - Restart automatically if it crashes
 11 | - Integrate with system service management tools
 12 | 
 13 | ## Quick Start
 14 | 
 15 | ### Universal Installation
 16 | 
 17 | Use the cross-platform installer that automatically detects your operating system:
 18 | 
 19 | ```bash
 20 | # Clone the repository
 21 | git clone https://github.com/doobidoo/mcp-memory-service.git
 22 | cd mcp-memory-service
 23 | 
 24 | # Install dependencies (if not already done)
 25 | pip install -e .
 26 | 
 27 | # Install as a service
 28 | python install_service.py
 29 | ```
 30 | 
 31 | The installer will:
 32 | 1. Detect your operating system
 33 | 2. Check dependencies
 34 | 3. Generate a secure API key
 35 | 4. Install the appropriate service type
 36 | 5. Provide platform-specific management commands
 37 | 
 38 | ### Service Management
 39 | 
 40 | After installation, you can manage the service using:
 41 | 
 42 | ```bash
 43 | # Check service status
 44 | python install_service.py --status
 45 | 
 46 | # Start the service
 47 | python install_service.py --start
 48 | 
 49 | # Stop the service
 50 | python install_service.py --stop
 51 | 
 52 | # Uninstall the service
 53 | python install_service.py --uninstall
 54 | ```
 55 | 
 56 | ## Platform-Specific Instructions
 57 | 
 58 | ### Windows
 59 | 
 60 | #### Installation
 61 | 
 62 | ```powershell
 63 | # Run as Administrator
 64 | python install_service.py
 65 | 
 66 | # Or install directly
 67 | python scripts/install_windows_service.py
 68 | ```
 69 | 
 70 | This creates a Windows Service that:
 71 | - Runs under the current user account
 72 | - Starts automatically on system boot
 73 | - Can be managed via Services console or `net` commands
 74 | 
 75 | #### Management Commands
 76 | 
 77 | ```powershell
 78 | # Using Windows commands
 79 | net start MCPMemoryService    # Start service
 80 | net stop MCPMemoryService     # Stop service
 81 | sc query MCPMemoryService     # Check status
 82 | 
 83 | # Using convenience scripts
 84 | .\scripts\windows\start_service.bat
 85 | .\scripts\windows\stop_service.bat
 86 | .\scripts\windows\service_status.bat
 87 | ```
 88 | 
 89 | #### Requirements
 90 | 
 91 | - Administrator privileges for installation
 92 | - Python 3.10 or newer
 93 | - `pywin32` package (auto-installed if missing)
 94 | 
 95 | ### macOS
 96 | 
 97 | #### Installation
 98 | 
 99 | ```bash
100 | # Install as user LaunchAgent (default)
101 | python install_service.py
102 | 
103 | # Install as system LaunchDaemon (requires sudo)
104 | sudo python install_service.py --system
105 | 
106 | # Or install directly
107 | python scripts/install_macos_service.py
108 | ```
109 | 
110 | This creates a LaunchAgent/LaunchDaemon that:
111 | - Runs on login (user) or boot (system)
112 | - Restarts automatically if it crashes
113 | - Integrates with macOS launchd system
114 | 
115 | #### Management Commands
116 | 
117 | ```bash
118 | # Using launchctl
119 | launchctl load ~/Library/LaunchAgents/com.mcp.memory-service.plist
120 | launchctl unload ~/Library/LaunchAgents/com.mcp.memory-service.plist
121 | launchctl list | grep com.mcp.memory-service
122 | 
123 | # Using convenience scripts
124 | ./scripts/macos/start_service.sh
125 | ./scripts/macos/stop_service.sh
126 | ./scripts/macos/service_status.sh
127 | ```
128 | 
129 | #### Viewing Logs
130 | 
131 | - Check Console.app for service logs
132 | - Or tail the log files directly:
133 |   ```bash
134 |   tail -f ~/.mcp_memory_service/logs/mcp-memory-service.log
135 |   ```
136 | 
137 | ### Linux
138 | 
139 | #### Installation
140 | 
141 | ```bash
142 | # Install as user service (default)
143 | python install_service.py
144 | 
145 | # Install as system service (requires sudo)
146 | sudo python install_service.py --system
147 | 
148 | # Or install directly
149 | python scripts/install_linux_service.py
150 | ```
151 | 
152 | This creates a systemd service that:
153 | - Runs on login (user) or boot (system)
154 | - Integrates with systemd and journald
155 | - Supports automatic restart and resource limits
156 | 
157 | #### Management Commands
158 | 
159 | ```bash
160 | # For user service
161 | systemctl --user start mcp-memory
162 | systemctl --user stop mcp-memory
163 | systemctl --user status mcp-memory
164 | journalctl --user -u mcp-memory -f
165 | 
166 | # For system service
167 | sudo systemctl start mcp-memory
168 | sudo systemctl stop mcp-memory
169 | sudo systemctl status mcp-memory
170 | sudo journalctl -u mcp-memory -f
171 | 
172 | # Using convenience scripts
173 | ./scripts/linux/start_service.sh
174 | ./scripts/linux/stop_service.sh
175 | ./scripts/linux/service_status.sh
176 | ./scripts/linux/view_logs.sh
177 | ```
178 | 
179 | ## Configuration
180 | 
181 | ### Service Configuration
182 | 
183 | All platforms store configuration in:
184 | - **Config directory**: `~/.mcp_memory_service/`
185 | - **Config file**: `~/.mcp_memory_service/service_config.json`
186 | - **Log directory**: `~/.mcp_memory_service/logs/`
187 | 
188 | ### Environment Variables
189 | 
190 | The service inherits these environment variables:
191 | - `MCP_MEMORY_STORAGE_BACKEND`: Storage backend (default: `sqlite_vec`)
192 | - `MCP_HTTP_ENABLED`: Enable HTTP interface (default: `true`)
193 | - `MCP_HTTP_PORT`: HTTP port (default: `8000`)
194 | - `MCP_HTTPS_ENABLED`: Enable HTTPS (default: `true`)
195 | - `MCP_MDNS_ENABLED`: Enable mDNS discovery (default: `true`)
196 | - `MCP_CONSOLIDATION_ENABLED`: Enable memory consolidation (default: `true`)
197 | - `MCP_API_KEY`: API key for HTTP authentication (optional, auto-generated during install)
198 | 
199 | ### API Key
200 | 
201 | The installer automatically generates a secure API key for HTTP authentication. This key protects your memory service from unauthorized access when running as a service.
202 | 
203 | #### Finding Your API Key
204 | 
205 | You can find your API key in several ways:
206 | 
207 | 1. **Installation Output**: The key is displayed during installation
208 | 2. **Config File**: Located in `~/.mcp_memory_service/service_config.json`
209 | 3. **Status Command**: Run `python install_service.py --status`
210 | 4. **Service File**: Check the systemd/service configuration file directly
211 | 
212 | #### Generating a New API Key
213 | 
214 | To generate a new secure API key:
215 | 
216 | ```bash
217 | # Generate a 32-byte base64 encoded key
218 | openssl rand -base64 32
219 | 
220 | # Or generate a hex key
221 | openssl rand -hex 32
222 | ```
223 | 
224 | #### Updating the API Key
225 | 
226 | To change your API key after installation:
227 | 
228 | 1. **Stop the service**:
229 |    ```bash
230 |    python install_service.py --stop
231 |    ```
232 | 
233 | 2. **Edit the service configuration**:
234 |    - **Linux**: Edit `/etc/systemd/system/mcp-memory.service` or `~/.config/systemd/user/mcp-memory.service`
235 |    - **macOS**: Edit `~/Library/LaunchAgents/com.mcp.memory-service.plist`
236 |    - **Windows**: Use `sc config` or Services Manager
237 | 
238 | 3. **Update the environment variable**:
239 |    ```bash
240 |    # Find the line with MCP_API_KEY and replace the value
241 |    Environment=MCP_API_KEY=your-new-api-key-here
242 |    ```
243 | 
244 | 4. **Reload and restart the service**:
245 |    ```bash
246 |    # Linux (system service)
247 |    sudo systemctl daemon-reload
248 |    sudo systemctl restart mcp-memory
249 |    
250 |    # Linux (user service)
251 |    systemctl --user daemon-reload
252 |    systemctl --user restart mcp-memory
253 |    
254 |    # macOS
255 |    launchctl unload ~/Library/LaunchAgents/com.mcp.memory-service.plist
256 |    launchctl load ~/Library/LaunchAgents/com.mcp.memory-service.plist
257 |    
258 |    # Or use the installer
259 |    python install_service.py --restart
260 |    ```
261 | 
262 | #### Security Best Practices
263 | 
264 | - **Keep it Secret**: Never share your API key in logs, emails, or version control
265 | - **Regular Rotation**: Consider rotating your API key periodically
266 | - **Secure Storage**: Ensure proper file permissions on configuration files
267 | - **Environment-Specific Keys**: Use different keys for development, staging, and production
268 | 
269 | ## User vs System Services
270 | 
271 | ### User Services
272 | - **Pros**: No admin privileges required, runs in user context
273 | - **Cons**: Only runs when user is logged in
274 | - **Best for**: Desktop systems, development
275 | 
276 | ### System Services
277 | - **Pros**: Runs independently of user login, available to all users
278 | - **Cons**: Requires admin privileges, runs as specific user
279 | - **Best for**: Servers, shared systems
280 | 
281 | ## Troubleshooting
282 | 
283 | ### Service Won't Start
284 | 
285 | 1. **Check dependencies**:
286 |    ```bash
287 |    python scripts/verify_environment.py
288 |    ```
289 | 
290 | 2. **Check logs**:
291 |    - Windows: Event Viewer → Windows Logs → Application
292 |    - macOS: Console.app or `~/.mcp_memory_service/logs/`
293 |    - Linux: `journalctl -u mcp-memory` or `journalctl --user -u mcp-memory`
294 | 
295 | 3. **Verify configuration**:
296 |    ```bash
297 |    cat ~/.mcp_memory_service/service_config.json
298 |    ```
299 | 
300 | ### Permission Errors
301 | 
302 | - **Windows**: Run as Administrator
303 | - **macOS/Linux**: Use `sudo` for system services
304 | - Check file ownership in `~/.mcp_memory_service/`
305 | 
306 | ### Port Already in Use
307 | 
308 | If port 8000 is already in use:
309 | 1. Change the port in environment variables
310 | 2. Reinstall the service
311 | 3. Or stop the conflicting service
312 | 
313 | ### Service Not Found
314 | 
315 | - Ensure the service was installed successfully
316 | - Check the correct service name:
317 |   - Windows: `MCPMemoryService`
318 |   - macOS: `com.mcp.memory-service`
319 |   - Linux: `mcp-memory`
320 | 
321 | ## Uninstalling
322 | 
323 | To completely remove the service:
324 | 
325 | ```bash
326 | # Uninstall service
327 | python install_service.py --uninstall
328 | 
329 | # Remove configuration (optional)
330 | rm -rf ~/.mcp_memory_service/
331 | 
332 | # Remove the repository (optional)
333 | cd ..
334 | rm -rf mcp-memory-service/
335 | ```
336 | 
337 | ## Advanced Usage
338 | 
339 | ### Custom Service Names
340 | 
341 | For multiple instances, you can modify the service name in the platform-specific installer scripts before installation.
342 | 
343 | ### Custom Startup Commands
344 | 
345 | Edit the service configuration after installation:
346 | 1. Stop the service
347 | 2. Edit `~/.mcp_memory_service/service_config.json`
348 | 3. Modify the `command` array
349 | 4. Restart the service
350 | 
351 | ### Integration with Claude Desktop
352 | 
353 | After service installation, update your Claude Desktop configuration:
354 | 
355 | ```json
356 | {
357 |   "mcpServers": {
358 |     "memory": {
359 |       "url": "http://localhost:8000/mcp",
360 |       "headers": {
361 |         "Authorization": "Bearer YOUR_API_KEY_HERE"
362 |       }
363 |     }
364 |   }
365 | }
366 | ```
367 | 
368 | Replace `YOUR_API_KEY_HERE` with the API key from the installation.
369 | 
370 | ## Security Considerations
371 | 
372 | - The API key is stored in plain text in the configuration file
373 | - Ensure proper file permissions on configuration files
374 | - Use system services with caution on shared systems
375 | - Consider firewall rules if exposing the service beyond localhost
376 | 
377 | ## Getting Help
378 | 
379 | If you encounter issues:
380 | 1. Check the troubleshooting section above
381 | 2. Review platform-specific documentation in `docs/platforms/`
382 | 3. Check logs for detailed error messages
383 | 4. Open an issue on GitHub with:
384 |    - Your operating system and version
385 |    - Python version
386 |    - Error messages from logs
387 |    - Steps to reproduce the issue
```

--------------------------------------------------------------------------------
/docs/integration/homebrew.md:
--------------------------------------------------------------------------------

```markdown
  1 | # Homebrew PyTorch Integration Guide
  2 | 
  3 | This guide covers the integration of MCP Memory Service with Homebrew-installed PyTorch, providing a lightweight solution for systems with complex Python environments or limited resources.
  4 | 
  5 | ## Overview
  6 | 
  7 | The Homebrew PyTorch integration allows MCP Memory Service to use system-installed PyTorch via Homebrew, avoiding package conflicts and reducing installation complexity. This solution uses SQLite-vec as the storage backend with ONNX runtime for CPU-optimized embeddings.
  8 | 
  9 | ### Key Components
 10 | 
 11 | - **SQLite-vec**: Lightweight vector database backend
 12 | - **ONNX Runtime**: CPU-optimized inference engine
 13 | - **Subprocess Isolation**: Process isolation to avoid import conflicts
 14 | - **Custom Integration Layer**: Bridge between MCP protocol and Homebrew environment
 15 | 
 16 | ## Quick Start
 17 | 
 18 | ### Prerequisites
 19 | 
 20 | - Homebrew installed
 21 | - Python 3.10+
 22 | - PyTorch via Homebrew: `brew install pytorch`
 23 | 
 24 | ### Installation
 25 | 
 26 | ```bash
 27 | git clone https://github.com/doobidoo/mcp-memory-service.git
 28 | cd mcp-memory-service
 29 | 
 30 | # Install with Homebrew PyTorch integration
 31 | python install.py --use-homebrew-pytorch --storage-backend sqlite_vec
 32 | ```
 33 | 
 34 | ### Running the Service
 35 | 
 36 | ```bash
 37 | # Using the provided script
 38 | ./scripts/run_with_homebrew_pytorch.sh
 39 | 
 40 | # Or manually
 41 | python scripts/homebrew/homebrew_server.py
 42 | ```
 43 | 
 44 | ### Claude Desktop Configuration
 45 | 
 46 | Add to your Claude Desktop config:
 47 | 
 48 | ```json
 49 | {
 50 |   "mcpServers": {
 51 |     "memory": {
 52 |       "command": "/path/to/mcp-memory-service/scripts/run_with_homebrew_pytorch.sh",
 53 |       "env": {
 54 |         "MCP_MEMORY_USE_HOMEBREW_PYTORCH": "true",
 55 |         "MCP_MEMORY_STORAGE_BACKEND": "sqlite_vec"
 56 |       }
 57 |     }
 58 |   }
 59 | }
 60 | ```
 61 | 
 62 | ## Detailed Setup Instructions
 63 | 
 64 | ### 1. Verify Homebrew PyTorch Installation
 65 | 
 66 | ```bash
 67 | # Check if PyTorch is installed via Homebrew
 68 | brew list | grep pytorch
 69 | 
 70 | # Verify PyTorch accessibility
 71 | python -c "import torch; print(f'PyTorch version: {torch.__version__}')"
 72 | ```
 73 | 
 74 | ### 2. Install MCP Memory Service
 75 | 
 76 | ```bash
 77 | # Clone repository
 78 | git clone https://github.com/doobidoo/mcp-memory-service.git
 79 | cd mcp-memory-service
 80 | 
 81 | # Create virtual environment
 82 | python -m venv venv
 83 | source venv/bin/activate  # On Windows: venv\Scripts\activate
 84 | 
 85 | # Install with Homebrew integration
 86 | python install.py --use-homebrew-pytorch --skip-pytorch
 87 | ```
 88 | 
 89 | ### 3. Configure Environment Variables
 90 | 
 91 | ```bash
 92 | export MCP_MEMORY_USE_HOMEBREW_PYTORCH=true
 93 | export MCP_MEMORY_STORAGE_BACKEND=sqlite_vec
 94 | export MCP_MEMORY_USE_ONNX=true
 95 | export MCP_MEMORY_SQLITE_PATH="$HOME/.mcp_memory_sqlite/memory.db"
 96 | ```
 97 | 
 98 | ### 4. Test Installation
 99 | 
100 | ```bash
101 | # Run diagnostic tests
102 | python scripts/verify_environment.py
103 | 
104 | # Test Homebrew PyTorch detection
105 | python -c "from src.mcp_memory_service.integrations.homebrew.embeddings import HomebrewPyTorchEmbeddings; print('Homebrew integration working')"
106 | 
107 | # Test server startup
108 | python scripts/homebrew/homebrew_server.py --test
109 | ```
110 | 
111 | ## Technical Implementation
112 | 
113 | ### Architecture Overview
114 | 
115 | The integration uses a subprocess-based architecture to isolate the Homebrew PyTorch environment from the MCP server process:
116 | 
117 | ```
118 | MCP Server Process
119 |     ↓ (subprocess call)
120 | Homebrew PyTorch Process
121 |     ↓ (file-based exchange)
122 | Embedding Results
123 | ```
124 | 
125 | ### Module Override Patterns
126 | 
127 | The integration implements several technical patterns for compatibility:
128 | 
129 | #### 1. Runtime Class Replacement
130 | 
131 | ```python
132 | # Override storage backend selection
133 | def get_storage_backend():
134 |     if os.getenv('MCP_MEMORY_USE_HOMEBREW_PYTORCH'):
135 |         return SqliteVecStorage
136 |     return ChromaStorage
137 | ```
138 | 
139 | #### 2. Subprocess Execution Pattern
140 | 
141 | ```python
142 | def generate_embeddings_subprocess(texts, model_name):
143 |     """Generate embeddings using subprocess isolation"""
144 |     script = f"""
145 | import sys
146 | sys.path.insert(0, '/opt/homebrew/lib/python3.11/site-packages')
147 | import torch
148 | from sentence_transformers import SentenceTransformer
149 | 
150 | model = SentenceTransformer('{model_name}')
151 | embeddings = model.encode({texts!r})
152 | print(embeddings.tolist())
153 | """
154 |     
155 |     result = subprocess.run([
156 |         sys.executable, '-c', script
157 |     ], capture_output=True, text=True, env=homebrew_env)
158 |     
159 |     return json.loads(result.stdout)
160 | ```
161 | 
162 | #### 3. MCP Protocol Compliance
163 | 
164 | ```python
165 | def wrap_mcp_handler(handler_func):
166 |     """Wrapper to ensure MCP protocol compliance"""
167 |     async def wrapper(*args, **kwargs):
168 |         try:
169 |             # Redirect stderr to prevent protocol pollution
170 |             with redirect_stderr(StringIO()):
171 |                 result = await handler_func(*args, **kwargs)
172 |             return result
173 |         except Exception as e:
174 |             # Convert to MCP-compliant error format
175 |             return {"error": str(e)}
176 |     return wrapper
177 | ```
178 | 
179 | ### Environment Detection
180 | 
181 | The system automatically detects Homebrew PyTorch availability:
182 | 
183 | ```python
184 | def detect_homebrew_pytorch():
185 |     """Detect if Homebrew PyTorch is available"""
186 |     homebrew_paths = [
187 |         '/opt/homebrew/lib/python3.11/site-packages',
188 |         '/usr/local/lib/python3.11/site-packages'
189 |     ]
190 |     
191 |     for path in homebrew_paths:
192 |         torch_path = os.path.join(path, 'torch')
193 |         if os.path.exists(torch_path):
194 |             return path
195 |     return None
196 | ```
197 | 
198 | ## Troubleshooting
199 | 
200 | ### Diagnostic Commands
201 | 
202 | #### Check Environment Status
203 | 
204 | ```bash
205 | # Verify Homebrew PyTorch detection
206 | python -c "
207 | import os
208 | import sys
209 | print('Homebrew paths:')
210 | for path in ['/opt/homebrew/lib/python3.11/site-packages', '/usr/local/lib/python3.11/site-packages']:
211 |     exists = os.path.exists(os.path.join(path, 'torch'))
212 |     print(f'  {path}: {\"✓\" if exists else \"✗\"}')"
213 | 
214 | # Check environment variables
215 | env | grep MCP_MEMORY
216 | ```
217 | 
218 | #### Service Health Check
219 | 
220 | ```bash
221 | # Test server startup
222 | python scripts/homebrew/homebrew_server.py --health-check
223 | 
224 | # Check database connectivity
225 | python -c "
226 | from src.mcp_memory_service.storage.sqlite_vec import SqliteVecStorage
227 | storage = SqliteVecStorage()
228 | print('Database connection: ✓')
229 | "
230 | ```
231 | 
232 | #### Advanced Debugging
233 | 
234 | ```bash
235 | # Enable debug logging
236 | export LOG_LEVEL=DEBUG
237 | export MCP_MEMORY_DEBUG=true
238 | 
239 | # Run with subprocess tracing
240 | strace -e trace=execve -f python scripts/homebrew/homebrew_server.py 2>&1 | grep execve
241 | 
242 | # Database analysis
243 | sqlite3 ~/.mcp_memory_sqlite/memory.db ".schema"
244 | ```
245 | 
246 | ### Common Issues and Solutions
247 | 
248 | #### 1. Import Conflicts
249 | 
250 | **Symptom**: `ImportError` or version conflicts
251 | **Solution**: Ensure virtual environment isolation:
252 | 
253 | ```bash
254 | # Check current Python path
255 | python -c "import sys; print('\\n'.join(sys.path))"
256 | 
257 | # Reset virtual environment
258 | deactivate
259 | rm -rf venv
260 | python -m venv venv --clear
261 | source venv/bin/activate
262 | ```
263 | 
264 | #### 2. Subprocess Communication Failures
265 | 
266 | **Symptom**: Embedding generation timeouts or empty results
267 | **Solution**: Test subprocess execution manually:
268 | 
269 | ```bash
270 | # Test subprocess isolation
271 | python -c "
272 | import subprocess
273 | import sys
274 | result = subprocess.run([sys.executable, '-c', 'import torch; print(torch.__version__)'], 
275 |                        capture_output=True, text=True)
276 | print(f'stdout: {result.stdout}')
277 | print(f'stderr: {result.stderr}')
278 | print(f'returncode: {result.returncode}')
279 | "
280 | ```
281 | 
282 | #### 3. Storage Backend Issues
283 | 
284 | **Symptom**: Database creation or access errors
285 | **Solution**: Check SQLite-vec installation and permissions:
286 | 
287 | ```bash
288 | # Verify SQLite-vec availability
289 | python -c "import sqlite_vec; print('SQLite-vec available')"
290 | 
291 | # Check database permissions
292 | ls -la ~/.mcp_memory_sqlite/
293 | chmod 755 ~/.mcp_memory_sqlite/
294 | ```
295 | 
296 | #### 4. MCP Protocol Errors
297 | 
298 | **Symptom**: Claude Desktop connection failures
299 | **Solution**: Verify protocol compliance:
300 | 
301 | ```bash
302 | # Test MCP protocol directly
303 | echo '{"jsonrpc": "2.0", "id": 1, "method": "initialize", "params": {}}' | \
304 |   python scripts/homebrew/homebrew_server.py --stdin
305 | ```
306 | 
307 | ### Environment Variables Reference
308 | 
309 | | Variable | Default | Description |
310 | |----------|---------|-------------|
311 | | `MCP_MEMORY_USE_HOMEBREW_PYTORCH` | `false` | Enable Homebrew PyTorch integration |
312 | | `MCP_MEMORY_STORAGE_BACKEND` | `auto` | Force SQLite-vec backend |
313 | | `MCP_MEMORY_USE_ONNX` | `false` | Use ONNX runtime for inference |
314 | | `MCP_MEMORY_SQLITE_PATH` | `~/.mcp_memory_sqlite/memory.db` | SQLite-vec database file path |
315 | | `MCP_MEMORY_HOMEBREW_PYTHON_PATH` | `auto-detect` | Override Homebrew Python path |
316 | | `MCP_MEMORY_DEBUG` | `false` | Enable debug logging |
317 | 
318 | ## Performance Considerations
319 | 
320 | ### Memory Usage
321 | 
322 | The Homebrew integration is optimized for systems with limited memory:
323 | 
324 | - **Subprocess isolation**: Prevents memory leaks in the main process
325 | - **On-demand loading**: Models loaded only when needed
326 | - **SQLite-vec efficiency**: Minimal memory footprint for vector storage
327 | 
328 | ### CPU Optimization
329 | 
330 | - **ONNX runtime**: CPU-optimized inference
331 | - **Batch processing**: Efficient handling of multiple embeddings
332 | - **Caching**: Avoid redundant model loading
333 | 
334 | ## Advanced Configuration
335 | 
336 | ### Custom Model Selection
337 | 
338 | ```bash
339 | export MCP_MEMORY_SENTENCE_TRANSFORMER_MODEL="all-MiniLM-L6-v2"
340 | export MCP_MEMORY_ONNX_MODEL_PATH="/path/to/custom/model.onnx"
341 | ```
342 | 
343 | ### Multi-Client Setup
344 | 
345 | For shared access across multiple MCP clients:
346 | 
347 | ```bash
348 | # Install with multi-client support
349 | python install.py --use-homebrew-pytorch --multi-client
350 | 
351 | # Configure shared database location
352 | export MCP_MEMORY_SQLITE_PATH="/shared/mcp_memory/memory.db"
353 | ```
354 | 
355 | ## Development and Contributions
356 | 
357 | ### Testing the Integration
358 | 
359 | ```bash
360 | # Run integration tests
361 | pytest tests/homebrew/ -v
362 | 
363 | # Run performance benchmarks
364 | python tests/performance/test_homebrew_performance.py
365 | ```
366 | 
367 | ### Adding New Features
368 | 
369 | When extending the Homebrew integration:
370 | 
371 | 1. Follow the subprocess isolation pattern
372 | 2. Maintain MCP protocol compliance
373 | 3. Add comprehensive error handling
374 | 4. Update environment variable documentation
375 | 5. Include diagnostic commands for troubleshooting
376 | 
377 | ## Related Documentation
378 | 
379 | - [Installation Guide](../installation/master-guide.md) - General installation instructions
380 | - [Storage Backends](../guides/STORAGE_BACKENDS.md) - SQLite-vec configuration
381 | - [Troubleshooting](../troubleshooting/general.md) - General troubleshooting guide
382 | - [macOS Intel Setup](../platforms/macos-intel.md) - Platform-specific considerations
383 | 
```

--------------------------------------------------------------------------------
/src/mcp_memory_service/lm_studio_compat.py:
--------------------------------------------------------------------------------

```python
  1 | """
  2 | LM Studio compatibility patch for MCP Memory Service.
  3 | 
  4 | This module provides a monkey patch to handle LM Studio's non-standard
  5 | 'notifications/cancelled' message that isn't part of the standard MCP protocol.
  6 | """
  7 | 
  8 | import logging
  9 | import sys
 10 | import platform
 11 | from typing import Any, Dict, Union
 12 | from pydantic import BaseModel, Field
 13 | 
 14 | logger = logging.getLogger(__name__)
 15 | 
 16 | def add_windows_timeout_handling():
 17 |     """
 18 |     Add Windows-specific timeout and error handling.
 19 |     """
 20 |     if platform.system() != "Windows":
 21 |         return
 22 |     
 23 |     try:
 24 |         # Add better timeout handling for Windows
 25 |         import signal
 26 |         import asyncio
 27 |         
 28 |         def timeout_handler(signum, frame):
 29 |             logger.warning("Server received timeout signal - attempting graceful shutdown")
 30 |             print("Server timeout detected - shutting down gracefully", file=sys.stderr, flush=True)
 31 |             sys.exit(0)
 32 |         
 33 |         # Only set up signal handlers if they're available
 34 |         if hasattr(signal, 'SIGTERM'):
 35 |             signal.signal(signal.SIGTERM, timeout_handler)
 36 |         if hasattr(signal, 'SIGINT'):
 37 |             signal.signal(signal.SIGINT, timeout_handler)
 38 |             
 39 |         logger.info("Added Windows-specific timeout handling")
 40 |         
 41 |     except Exception as e:
 42 |         logger.debug(f"Could not set up Windows timeout handling: {e}")
 43 |         # Not critical, continue without it
 44 | 
 45 | def create_cancelled_notification_class():
 46 |     """Create a proper CancelledNotification class if it doesn't exist."""
 47 |     from pydantic import BaseModel
 48 |     
 49 |     class CancelledNotificationParams(BaseModel):
 50 |         """Parameters for cancelled notification."""
 51 |         requestId: Any = Field(default=None, alias="requestId")
 52 |         reason: str = Field(default="Operation cancelled")
 53 |     
 54 |     class CancelledNotification(BaseModel):
 55 |         """Cancelled notification that matches MCP expectations."""
 56 |         method: str = Field(default="notifications/cancelled")
 57 |         params: CancelledNotificationParams = Field(default_factory=CancelledNotificationParams)
 58 |         
 59 |         @property
 60 |         def root(self):
 61 |             """Provide root attribute for compatibility."""
 62 |             return self
 63 |     
 64 |     return CancelledNotification
 65 | 
 66 | def patch_mcp_for_lm_studio():
 67 |     """
 68 |     Apply monkey patches to MCP library to handle LM Studio's non-standard notifications.
 69 |     This new approach patches at multiple levels to ensure the cancelled notification is handled.
 70 |     """
 71 |     success = False
 72 |     
 73 |     try:
 74 |         import mcp.shared.session as session_module
 75 |         from pydantic_core import ValidationError
 76 |         
 77 |         # Create or get the CancelledNotification class
 78 |         if hasattr(session_module, 'CancelledNotification'):
 79 |             CancelledNotification = session_module.CancelledNotification
 80 |         else:
 81 |             CancelledNotification = create_cancelled_notification_class()
 82 |             session_module.CancelledNotification = CancelledNotification
 83 |             logger.info("Created CancelledNotification class")
 84 |         
 85 |         # Patch 1: Override ClientNotification to handle cancelled notifications
 86 |         if hasattr(session_module, 'ClientNotification'):
 87 |             original_client_notification = session_module.ClientNotification
 88 |             
 89 |             # Store the original __or__ operator if it exists
 90 |             original_or = getattr(original_client_notification, '__or__', None)
 91 |             
 92 |             # Create a new union type that includes CancelledNotification
 93 |             if original_or:
 94 |                 # Add CancelledNotification to the union
 95 |                 PatchedClientNotification = Union[original_client_notification, CancelledNotification]
 96 |             else:
 97 |                 PatchedClientNotification = original_client_notification
 98 |             
 99 |             # Store original model_validate
100 |             original_validate = original_client_notification.model_validate
101 |             
102 |             # Create patched validation function with correct classmethod signature
103 |             @classmethod
104 |             def patched_validate(cls, obj, *args, **kwargs):
105 |                 """Enhanced validation that handles cancelled notifications."""
106 |                 logger.debug(f"Patched validate called with: {type(obj)} - {obj}")
107 |                 
108 |                 if isinstance(obj, dict):
109 |                     method = obj.get('method', '')
110 |                     if method == 'notifications/cancelled':
111 |                         params = obj.get('params', {})
112 |                         logger.info(f"[PATCH] PATCH INTERCEPTED cancelled notification: {params.get('reason', 'Unknown')}")
113 |                         # Return a proper CancelledNotification instance with structured params
114 |                         notification = CancelledNotification()
115 |                         if params:
116 |                             notification.params.requestId = params.get('requestId')
117 |                             notification.params.reason = params.get('reason', 'Operation cancelled')
118 |                         return notification
119 |                 
120 |                 # Try original validation
121 |                 try:
122 |                     return original_validate.__func__(cls, obj, *args, **kwargs)
123 |                 except ValidationError as e:
124 |                     # If it's a cancelled notification error, handle it
125 |                     if 'notifications/cancelled' in str(e):
126 |                         logger.info("[PATCH] PATCH CAUGHT cancelled notification in validation error")
127 |                         notification = CancelledNotification()
128 |                         if isinstance(obj, dict) and 'params' in obj:
129 |                             params = obj['params']
130 |                             notification.params.requestId = params.get('requestId')
131 |                             notification.params.reason = params.get('reason', 'Operation cancelled')
132 |                         return notification
133 |                     raise
134 |             
135 |             # Apply the patched validation
136 |             original_client_notification.model_validate = patched_validate
137 |             logger.info("[PATCH] Applied NEW LM Studio patch to ClientNotification.model_validate v2.0")
138 |             success = True
139 |         
140 |         # Patch 2: Patch BaseSession to handle errors at the session level
141 |         from mcp.shared.session import BaseSession
142 |         
143 |         if hasattr(BaseSession, '_handle_notification'):
144 |             original_handle = BaseSession._handle_notification
145 |             
146 |             async def patched_handle_notification(self, notification):
147 |                 """Handle notifications including cancelled ones."""
148 |                 # Check if this is a CancelledNotification
149 |                 if hasattr(notification, 'method') and notification.method == 'notifications/cancelled':
150 |                     logger.info("Handling cancelled notification - ignoring")
151 |                     return  # Just ignore it
152 |                 
153 |                 # Otherwise handle normally
154 |                 return await original_handle(self, notification)
155 |             
156 |             BaseSession._handle_notification = patched_handle_notification
157 |             logger.info("[PATCH] Applied NEW patch to BaseSession._handle_notification v2.0")
158 |             success = True
159 |         
160 |         # Patch 3: As a last resort, patch the session's _receive_loop
161 |         if hasattr(BaseSession, '_receive_loop'):
162 |             original_loop = BaseSession._receive_loop
163 |             
164 |             async def patched_loop(self):
165 |                 """Robust receive loop that continues on cancelled notifications."""
166 |                 try:
167 |                     return await original_loop(self)
168 |                 except Exception as e:
169 |                     # Check for the specific error pattern
170 |                     error_str = str(e)
171 |                     if ('notifications/cancelled' in error_str or
172 |                         ('ValidationError' in str(type(e).__name__) and 
173 |                          'literal_error' in error_str)):
174 |                         logger.info("Suppressed cancelled notification error in receive loop")
175 |                         # Don't propagate the error - this prevents the TaskGroup from failing
176 |                         return None
177 |                     # Re-raise other errors
178 |                     raise
179 |             
180 |             BaseSession._receive_loop = patched_loop
181 |             logger.info("[PATCH] Applied NEW fallback patch to BaseSession._receive_loop v2.0")
182 |             success = True
183 |         
184 |     except ImportError as e:
185 |         logger.warning(f"Could not import MCP modules: {e}")
186 |         return patch_alternative_approach()
187 |     except Exception as e:
188 |         logger.error(f"Error applying LM Studio compatibility patch: {e}")
189 |     
190 |     if not success:
191 |         logger.warning("Primary patching failed, trying alternative approach")
192 |         return patch_alternative_approach()
193 |     
194 |     return success
195 | 
196 | 
197 | def patch_alternative_approach():
198 |     """
199 |     Alternative patching approach that modifies validation at a lower level.
200 |     """
201 |     try:
202 |         # Try to patch pydantic validation directly
203 |         import pydantic_core
204 |         from pydantic_core import ValidationError
205 |         
206 |         original_validation_error = ValidationError.__init__
207 |         
208 |         def patched_validation_error_init(self, *args, **kwargs):
209 |             """Suppress cancelled notification validation errors."""
210 |             # Check if this is about cancelled notifications
211 |             if args and 'notifications/cancelled' in str(args[0]):
212 |                 logger.info("Suppressed ValidationError for cancelled notification")
213 |                 # Create a minimal error that won't cause issues
214 |                 self.errors = []
215 |                 return
216 |             
217 |             # Otherwise initialize normally
218 |             original_validation_error(self, *args, **kwargs)
219 |         
220 |         ValidationError.__init__ = patched_validation_error_init
221 |         logger.info("[PATCH] Applied NEW alternative patch to ValidationError v2.0")
222 |         return True
223 |         
224 |     except Exception as e:
225 |         logger.error(f"Alternative patch failed: {e}")
226 |         return False
```
Page 14/47FirstPrevNextLast