This is page 22 of 47. Use http://codebase.md/doobidoo/mcp-memory-service?lines=true&page={x} to view the full context.
# Directory Structure
```
├── .claude
│ ├── agents
│ │ ├── amp-bridge.md
│ │ ├── amp-pr-automator.md
│ │ ├── code-quality-guard.md
│ │ ├── gemini-pr-automator.md
│ │ └── github-release-manager.md
│ ├── settings.local.json.backup
│ └── settings.local.json.local
├── .commit-message
├── .dockerignore
├── .env.example
├── .env.sqlite.backup
├── .envnn#
├── .gitattributes
├── .github
│ ├── FUNDING.yml
│ ├── ISSUE_TEMPLATE
│ │ ├── bug_report.yml
│ │ ├── config.yml
│ │ ├── feature_request.yml
│ │ └── performance_issue.yml
│ ├── pull_request_template.md
│ └── workflows
│ ├── bridge-tests.yml
│ ├── CACHE_FIX.md
│ ├── claude-code-review.yml
│ ├── claude.yml
│ ├── cleanup-images.yml.disabled
│ ├── dev-setup-validation.yml
│ ├── docker-publish.yml
│ ├── LATEST_FIXES.md
│ ├── main-optimized.yml.disabled
│ ├── main.yml
│ ├── publish-and-test.yml
│ ├── README_OPTIMIZATION.md
│ ├── release-tag.yml.disabled
│ ├── release.yml
│ ├── roadmap-review-reminder.yml
│ ├── SECRET_CONDITIONAL_FIX.md
│ └── WORKFLOW_FIXES.md
├── .gitignore
├── .mcp.json.backup
├── .mcp.json.template
├── .pyscn
│ ├── .gitignore
│ └── reports
│ └── analyze_20251123_214224.html
├── AGENTS.md
├── archive
│ ├── deployment
│ │ ├── deploy_fastmcp_fixed.sh
│ │ ├── deploy_http_with_mcp.sh
│ │ └── deploy_mcp_v4.sh
│ ├── deployment-configs
│ │ ├── empty_config.yml
│ │ └── smithery.yaml
│ ├── development
│ │ └── test_fastmcp.py
│ ├── docs-removed-2025-08-23
│ │ ├── authentication.md
│ │ ├── claude_integration.md
│ │ ├── claude-code-compatibility.md
│ │ ├── claude-code-integration.md
│ │ ├── claude-code-quickstart.md
│ │ ├── claude-desktop-setup.md
│ │ ├── complete-setup-guide.md
│ │ ├── database-synchronization.md
│ │ ├── development
│ │ │ ├── autonomous-memory-consolidation.md
│ │ │ ├── CLEANUP_PLAN.md
│ │ │ ├── CLEANUP_README.md
│ │ │ ├── CLEANUP_SUMMARY.md
│ │ │ ├── dream-inspired-memory-consolidation.md
│ │ │ ├── hybrid-slm-memory-consolidation.md
│ │ │ ├── mcp-milestone.md
│ │ │ ├── multi-client-architecture.md
│ │ │ ├── test-results.md
│ │ │ └── TIMESTAMP_FIX_SUMMARY.md
│ │ ├── distributed-sync.md
│ │ ├── invocation_guide.md
│ │ ├── macos-intel.md
│ │ ├── master-guide.md
│ │ ├── mcp-client-configuration.md
│ │ ├── multi-client-server.md
│ │ ├── service-installation.md
│ │ ├── sessions
│ │ │ └── MCP_ENHANCEMENT_SESSION_MEMORY_v4.1.0.md
│ │ ├── UBUNTU_SETUP.md
│ │ ├── ubuntu.md
│ │ ├── windows-setup.md
│ │ └── windows.md
│ ├── docs-root-cleanup-2025-08-23
│ │ ├── AWESOME_LIST_SUBMISSION.md
│ │ ├── CLOUDFLARE_IMPLEMENTATION.md
│ │ ├── DOCUMENTATION_ANALYSIS.md
│ │ ├── DOCUMENTATION_CLEANUP_PLAN.md
│ │ ├── DOCUMENTATION_CONSOLIDATION_COMPLETE.md
│ │ ├── LITESTREAM_SETUP_GUIDE.md
│ │ ├── lm_studio_system_prompt.md
│ │ ├── PYTORCH_DOWNLOAD_FIX.md
│ │ └── README-ORIGINAL-BACKUP.md
│ ├── investigations
│ │ └── MACOS_HOOKS_INVESTIGATION.md
│ ├── litestream-configs-v6.3.0
│ │ ├── install_service.sh
│ │ ├── litestream_master_config_fixed.yml
│ │ ├── litestream_master_config.yml
│ │ ├── litestream_replica_config_fixed.yml
│ │ ├── litestream_replica_config.yml
│ │ ├── litestream_replica_simple.yml
│ │ ├── litestream-http.service
│ │ ├── litestream.service
│ │ └── requirements-cloudflare.txt
│ ├── release-notes
│ │ └── release-notes-v7.1.4.md
│ └── setup-development
│ ├── README.md
│ ├── setup_consolidation_mdns.sh
│ ├── STARTUP_SETUP_GUIDE.md
│ └── test_service.sh
├── CHANGELOG-HISTORIC.md
├── CHANGELOG.md
├── claude_commands
│ ├── memory-context.md
│ ├── memory-health.md
│ ├── memory-ingest-dir.md
│ ├── memory-ingest.md
│ ├── memory-recall.md
│ ├── memory-search.md
│ ├── memory-store.md
│ ├── README.md
│ └── session-start.md
├── claude-hooks
│ ├── config.json
│ ├── config.template.json
│ ├── CONFIGURATION.md
│ ├── core
│ │ ├── memory-retrieval.js
│ │ ├── mid-conversation.js
│ │ ├── session-end.js
│ │ ├── session-start.js
│ │ └── topic-change.js
│ ├── debug-pattern-test.js
│ ├── install_claude_hooks_windows.ps1
│ ├── install_hooks.py
│ ├── memory-mode-controller.js
│ ├── MIGRATION.md
│ ├── README-NATURAL-TRIGGERS.md
│ ├── README-phase2.md
│ ├── README.md
│ ├── simple-test.js
│ ├── statusline.sh
│ ├── test-adaptive-weights.js
│ ├── test-dual-protocol-hook.js
│ ├── test-mcp-hook.js
│ ├── test-natural-triggers.js
│ ├── test-recency-scoring.js
│ ├── tests
│ │ ├── integration-test.js
│ │ ├── phase2-integration-test.js
│ │ ├── test-code-execution.js
│ │ ├── test-cross-session.json
│ │ ├── test-session-tracking.json
│ │ └── test-threading.json
│ ├── utilities
│ │ ├── adaptive-pattern-detector.js
│ │ ├── context-formatter.js
│ │ ├── context-shift-detector.js
│ │ ├── conversation-analyzer.js
│ │ ├── dynamic-context-updater.js
│ │ ├── git-analyzer.js
│ │ ├── mcp-client.js
│ │ ├── memory-client.js
│ │ ├── memory-scorer.js
│ │ ├── performance-manager.js
│ │ ├── project-detector.js
│ │ ├── session-tracker.js
│ │ ├── tiered-conversation-monitor.js
│ │ └── version-checker.js
│ └── WINDOWS-SESSIONSTART-BUG.md
├── CLAUDE.md
├── CODE_OF_CONDUCT.md
├── CONTRIBUTING.md
├── Development-Sprint-November-2025.md
├── docs
│ ├── amp-cli-bridge.md
│ ├── api
│ │ ├── code-execution-interface.md
│ │ ├── memory-metadata-api.md
│ │ ├── PHASE1_IMPLEMENTATION_SUMMARY.md
│ │ ├── PHASE2_IMPLEMENTATION_SUMMARY.md
│ │ ├── PHASE2_REPORT.md
│ │ └── tag-standardization.md
│ ├── architecture
│ │ ├── search-enhancement-spec.md
│ │ └── search-examples.md
│ ├── architecture.md
│ ├── archive
│ │ └── obsolete-workflows
│ │ ├── load_memory_context.md
│ │ └── README.md
│ ├── assets
│ │ └── images
│ │ ├── dashboard-v3.3.0-preview.png
│ │ ├── memory-awareness-hooks-example.png
│ │ ├── project-infographic.svg
│ │ └── README.md
│ ├── CLAUDE_CODE_QUICK_REFERENCE.md
│ ├── cloudflare-setup.md
│ ├── deployment
│ │ ├── docker.md
│ │ ├── dual-service.md
│ │ ├── production-guide.md
│ │ └── systemd-service.md
│ ├── development
│ │ ├── ai-agent-instructions.md
│ │ ├── code-quality
│ │ │ ├── phase-2a-completion.md
│ │ │ ├── phase-2a-handle-get-prompt.md
│ │ │ ├── phase-2a-index.md
│ │ │ ├── phase-2a-install-package.md
│ │ │ └── phase-2b-session-summary.md
│ │ ├── code-quality-workflow.md
│ │ ├── dashboard-workflow.md
│ │ ├── issue-management.md
│ │ ├── pr-review-guide.md
│ │ ├── refactoring-notes.md
│ │ ├── release-checklist.md
│ │ └── todo-tracker.md
│ ├── docker-optimized-build.md
│ ├── document-ingestion.md
│ ├── DOCUMENTATION_AUDIT.md
│ ├── enhancement-roadmap-issue-14.md
│ ├── examples
│ │ ├── analysis-scripts.js
│ │ ├── maintenance-session-example.md
│ │ ├── memory-distribution-chart.jsx
│ │ └── tag-schema.json
│ ├── first-time-setup.md
│ ├── glama-deployment.md
│ ├── guides
│ │ ├── advanced-command-examples.md
│ │ ├── chromadb-migration.md
│ │ ├── commands-vs-mcp-server.md
│ │ ├── mcp-enhancements.md
│ │ ├── mdns-service-discovery.md
│ │ ├── memory-consolidation-guide.md
│ │ ├── migration.md
│ │ ├── scripts.md
│ │ └── STORAGE_BACKENDS.md
│ ├── HOOK_IMPROVEMENTS.md
│ ├── hooks
│ │ └── phase2-code-execution-migration.md
│ ├── http-server-management.md
│ ├── ide-compatability.md
│ ├── IMAGE_RETENTION_POLICY.md
│ ├── images
│ │ └── dashboard-placeholder.md
│ ├── implementation
│ │ ├── health_checks.md
│ │ └── performance.md
│ ├── IMPLEMENTATION_PLAN_HTTP_SSE.md
│ ├── integration
│ │ ├── homebrew.md
│ │ └── multi-client.md
│ ├── integrations
│ │ ├── gemini.md
│ │ ├── groq-bridge.md
│ │ ├── groq-integration-summary.md
│ │ └── groq-model-comparison.md
│ ├── integrations.md
│ ├── legacy
│ │ └── dual-protocol-hooks.md
│ ├── LM_STUDIO_COMPATIBILITY.md
│ ├── maintenance
│ │ └── memory-maintenance.md
│ ├── mastery
│ │ ├── api-reference.md
│ │ ├── architecture-overview.md
│ │ ├── configuration-guide.md
│ │ ├── local-setup-and-run.md
│ │ ├── testing-guide.md
│ │ └── troubleshooting.md
│ ├── migration
│ │ └── code-execution-api-quick-start.md
│ ├── natural-memory-triggers
│ │ ├── cli-reference.md
│ │ ├── installation-guide.md
│ │ └── performance-optimization.md
│ ├── oauth-setup.md
│ ├── pr-graphql-integration.md
│ ├── quick-setup-cloudflare-dual-environment.md
│ ├── README.md
│ ├── remote-configuration-wiki-section.md
│ ├── research
│ │ ├── code-execution-interface-implementation.md
│ │ └── code-execution-interface-summary.md
│ ├── ROADMAP.md
│ ├── sqlite-vec-backend.md
│ ├── statistics
│ │ ├── charts
│ │ │ ├── activity_patterns.png
│ │ │ ├── contributors.png
│ │ │ ├── growth_trajectory.png
│ │ │ ├── monthly_activity.png
│ │ │ └── october_sprint.png
│ │ ├── data
│ │ │ ├── activity_by_day.csv
│ │ │ ├── activity_by_hour.csv
│ │ │ ├── contributors.csv
│ │ │ └── monthly_activity.csv
│ │ ├── generate_charts.py
│ │ └── REPOSITORY_STATISTICS.md
│ ├── technical
│ │ ├── development.md
│ │ ├── memory-migration.md
│ │ ├── migration-log.md
│ │ ├── sqlite-vec-embedding-fixes.md
│ │ └── tag-storage.md
│ ├── testing
│ │ └── regression-tests.md
│ ├── testing-cloudflare-backend.md
│ ├── troubleshooting
│ │ ├── cloudflare-api-token-setup.md
│ │ ├── cloudflare-authentication.md
│ │ ├── general.md
│ │ ├── hooks-quick-reference.md
│ │ ├── pr162-schema-caching-issue.md
│ │ ├── session-end-hooks.md
│ │ └── sync-issues.md
│ └── tutorials
│ ├── advanced-techniques.md
│ ├── data-analysis.md
│ └── demo-session-walkthrough.md
├── examples
│ ├── claude_desktop_config_template.json
│ ├── claude_desktop_config_windows.json
│ ├── claude-desktop-http-config.json
│ ├── config
│ │ └── claude_desktop_config.json
│ ├── http-mcp-bridge.js
│ ├── memory_export_template.json
│ ├── README.md
│ ├── setup
│ │ └── setup_multi_client_complete.py
│ └── start_https_example.sh
├── install_service.py
├── install.py
├── LICENSE
├── NOTICE
├── pyproject.toml
├── pytest.ini
├── README.md
├── run_server.py
├── scripts
│ ├── .claude
│ │ └── settings.local.json
│ ├── archive
│ │ └── check_missing_timestamps.py
│ ├── backup
│ │ ├── backup_memories.py
│ │ ├── backup_sqlite_vec.sh
│ │ ├── export_distributable_memories.sh
│ │ └── restore_memories.py
│ ├── benchmarks
│ │ ├── benchmark_code_execution_api.py
│ │ ├── benchmark_hybrid_sync.py
│ │ └── benchmark_server_caching.py
│ ├── database
│ │ ├── analyze_sqlite_vec_db.py
│ │ ├── check_sqlite_vec_status.py
│ │ ├── db_health_check.py
│ │ └── simple_timestamp_check.py
│ ├── development
│ │ ├── debug_server_initialization.py
│ │ ├── find_orphaned_files.py
│ │ ├── fix_mdns.sh
│ │ ├── fix_sitecustomize.py
│ │ ├── remote_ingest.sh
│ │ ├── setup-git-merge-drivers.sh
│ │ ├── uv-lock-merge.sh
│ │ └── verify_hybrid_sync.py
│ ├── hooks
│ │ └── pre-commit
│ ├── installation
│ │ ├── install_linux_service.py
│ │ ├── install_macos_service.py
│ │ ├── install_uv.py
│ │ ├── install_windows_service.py
│ │ ├── install.py
│ │ ├── setup_backup_cron.sh
│ │ ├── setup_claude_mcp.sh
│ │ └── setup_cloudflare_resources.py
│ ├── linux
│ │ ├── service_status.sh
│ │ ├── start_service.sh
│ │ ├── stop_service.sh
│ │ ├── uninstall_service.sh
│ │ └── view_logs.sh
│ ├── maintenance
│ │ ├── assign_memory_types.py
│ │ ├── check_memory_types.py
│ │ ├── cleanup_corrupted_encoding.py
│ │ ├── cleanup_memories.py
│ │ ├── cleanup_organize.py
│ │ ├── consolidate_memory_types.py
│ │ ├── consolidation_mappings.json
│ │ ├── delete_orphaned_vectors_fixed.py
│ │ ├── fast_cleanup_duplicates_with_tracking.sh
│ │ ├── find_all_duplicates.py
│ │ ├── find_cloudflare_duplicates.py
│ │ ├── find_duplicates.py
│ │ ├── memory-types.md
│ │ ├── README.md
│ │ ├── recover_timestamps_from_cloudflare.py
│ │ ├── regenerate_embeddings.py
│ │ ├── repair_malformed_tags.py
│ │ ├── repair_memories.py
│ │ ├── repair_sqlite_vec_embeddings.py
│ │ ├── repair_zero_embeddings.py
│ │ ├── restore_from_json_export.py
│ │ └── scan_todos.sh
│ ├── migration
│ │ ├── cleanup_mcp_timestamps.py
│ │ ├── legacy
│ │ │ └── migrate_chroma_to_sqlite.py
│ │ ├── mcp-migration.py
│ │ ├── migrate_sqlite_vec_embeddings.py
│ │ ├── migrate_storage.py
│ │ ├── migrate_tags.py
│ │ ├── migrate_timestamps.py
│ │ ├── migrate_to_cloudflare.py
│ │ ├── migrate_to_sqlite_vec.py
│ │ ├── migrate_v5_enhanced.py
│ │ ├── TIMESTAMP_CLEANUP_README.md
│ │ └── verify_mcp_timestamps.py
│ ├── pr
│ │ ├── amp_collect_results.sh
│ │ ├── amp_detect_breaking_changes.sh
│ │ ├── amp_generate_tests.sh
│ │ ├── amp_pr_review.sh
│ │ ├── amp_quality_gate.sh
│ │ ├── amp_suggest_fixes.sh
│ │ ├── auto_review.sh
│ │ ├── detect_breaking_changes.sh
│ │ ├── generate_tests.sh
│ │ ├── lib
│ │ │ └── graphql_helpers.sh
│ │ ├── quality_gate.sh
│ │ ├── resolve_threads.sh
│ │ ├── run_pyscn_analysis.sh
│ │ ├── run_quality_checks.sh
│ │ ├── thread_status.sh
│ │ └── watch_reviews.sh
│ ├── quality
│ │ ├── fix_dead_code_install.sh
│ │ ├── phase1_dead_code_analysis.md
│ │ ├── phase2_complexity_analysis.md
│ │ ├── README_PHASE1.md
│ │ ├── README_PHASE2.md
│ │ ├── track_pyscn_metrics.sh
│ │ └── weekly_quality_review.sh
│ ├── README.md
│ ├── run
│ │ ├── run_mcp_memory.sh
│ │ ├── run-with-uv.sh
│ │ └── start_sqlite_vec.sh
│ ├── run_memory_server.py
│ ├── server
│ │ ├── check_http_server.py
│ │ ├── check_server_health.py
│ │ ├── memory_offline.py
│ │ ├── preload_models.py
│ │ ├── run_http_server.py
│ │ ├── run_memory_server.py
│ │ ├── start_http_server.bat
│ │ └── start_http_server.sh
│ ├── service
│ │ ├── deploy_dual_services.sh
│ │ ├── install_http_service.sh
│ │ ├── mcp-memory-http.service
│ │ ├── mcp-memory.service
│ │ ├── memory_service_manager.sh
│ │ ├── service_control.sh
│ │ ├── service_utils.py
│ │ └── update_service.sh
│ ├── sync
│ │ ├── check_drift.py
│ │ ├── claude_sync_commands.py
│ │ ├── export_memories.py
│ │ ├── import_memories.py
│ │ ├── litestream
│ │ │ ├── apply_local_changes.sh
│ │ │ ├── enhanced_memory_store.sh
│ │ │ ├── init_staging_db.sh
│ │ │ ├── io.litestream.replication.plist
│ │ │ ├── manual_sync.sh
│ │ │ ├── memory_sync.sh
│ │ │ ├── pull_remote_changes.sh
│ │ │ ├── push_to_remote.sh
│ │ │ ├── README.md
│ │ │ ├── resolve_conflicts.sh
│ │ │ ├── setup_local_litestream.sh
│ │ │ ├── setup_remote_litestream.sh
│ │ │ ├── staging_db_init.sql
│ │ │ ├── stash_local_changes.sh
│ │ │ ├── sync_from_remote_noconfig.sh
│ │ │ └── sync_from_remote.sh
│ │ ├── README.md
│ │ ├── safe_cloudflare_update.sh
│ │ ├── sync_memory_backends.py
│ │ └── sync_now.py
│ ├── testing
│ │ ├── run_complete_test.py
│ │ ├── run_memory_test.sh
│ │ ├── simple_test.py
│ │ ├── test_cleanup_logic.py
│ │ ├── test_cloudflare_backend.py
│ │ ├── test_docker_functionality.py
│ │ ├── test_installation.py
│ │ ├── test_mdns.py
│ │ ├── test_memory_api.py
│ │ ├── test_memory_simple.py
│ │ ├── test_migration.py
│ │ ├── test_search_api.py
│ │ ├── test_sqlite_vec_embeddings.py
│ │ ├── test_sse_events.py
│ │ ├── test-connection.py
│ │ └── test-hook.js
│ ├── utils
│ │ ├── claude_commands_utils.py
│ │ ├── generate_personalized_claude_md.sh
│ │ ├── groq
│ │ ├── groq_agent_bridge.py
│ │ ├── list-collections.py
│ │ ├── memory_wrapper_uv.py
│ │ ├── query_memories.py
│ │ ├── smithery_wrapper.py
│ │ ├── test_groq_bridge.sh
│ │ └── uv_wrapper.py
│ └── validation
│ ├── check_dev_setup.py
│ ├── check_documentation_links.py
│ ├── diagnose_backend_config.py
│ ├── validate_configuration_complete.py
│ ├── validate_memories.py
│ ├── validate_migration.py
│ ├── validate_timestamp_integrity.py
│ ├── verify_environment.py
│ ├── verify_pytorch_windows.py
│ └── verify_torch.py
├── SECURITY.md
├── selective_timestamp_recovery.py
├── SPONSORS.md
├── src
│ └── mcp_memory_service
│ ├── __init__.py
│ ├── api
│ │ ├── __init__.py
│ │ ├── client.py
│ │ ├── operations.py
│ │ ├── sync_wrapper.py
│ │ └── types.py
│ ├── backup
│ │ ├── __init__.py
│ │ └── scheduler.py
│ ├── cli
│ │ ├── __init__.py
│ │ ├── ingestion.py
│ │ ├── main.py
│ │ └── utils.py
│ ├── config.py
│ ├── consolidation
│ │ ├── __init__.py
│ │ ├── associations.py
│ │ ├── base.py
│ │ ├── clustering.py
│ │ ├── compression.py
│ │ ├── consolidator.py
│ │ ├── decay.py
│ │ ├── forgetting.py
│ │ ├── health.py
│ │ └── scheduler.py
│ ├── dependency_check.py
│ ├── discovery
│ │ ├── __init__.py
│ │ ├── client.py
│ │ └── mdns_service.py
│ ├── embeddings
│ │ ├── __init__.py
│ │ └── onnx_embeddings.py
│ ├── ingestion
│ │ ├── __init__.py
│ │ ├── base.py
│ │ ├── chunker.py
│ │ ├── csv_loader.py
│ │ ├── json_loader.py
│ │ ├── pdf_loader.py
│ │ ├── registry.py
│ │ ├── semtools_loader.py
│ │ └── text_loader.py
│ ├── lm_studio_compat.py
│ ├── mcp_server.py
│ ├── models
│ │ ├── __init__.py
│ │ └── memory.py
│ ├── server.py
│ ├── services
│ │ ├── __init__.py
│ │ └── memory_service.py
│ ├── storage
│ │ ├── __init__.py
│ │ ├── base.py
│ │ ├── cloudflare.py
│ │ ├── factory.py
│ │ ├── http_client.py
│ │ ├── hybrid.py
│ │ └── sqlite_vec.py
│ ├── sync
│ │ ├── __init__.py
│ │ ├── exporter.py
│ │ ├── importer.py
│ │ └── litestream_config.py
│ ├── utils
│ │ ├── __init__.py
│ │ ├── cache_manager.py
│ │ ├── content_splitter.py
│ │ ├── db_utils.py
│ │ ├── debug.py
│ │ ├── document_processing.py
│ │ ├── gpu_detection.py
│ │ ├── hashing.py
│ │ ├── http_server_manager.py
│ │ ├── port_detection.py
│ │ ├── system_detection.py
│ │ └── time_parser.py
│ └── web
│ ├── __init__.py
│ ├── api
│ │ ├── __init__.py
│ │ ├── analytics.py
│ │ ├── backup.py
│ │ ├── consolidation.py
│ │ ├── documents.py
│ │ ├── events.py
│ │ ├── health.py
│ │ ├── manage.py
│ │ ├── mcp.py
│ │ ├── memories.py
│ │ ├── search.py
│ │ └── sync.py
│ ├── app.py
│ ├── dependencies.py
│ ├── oauth
│ │ ├── __init__.py
│ │ ├── authorization.py
│ │ ├── discovery.py
│ │ ├── middleware.py
│ │ ├── models.py
│ │ ├── registration.py
│ │ └── storage.py
│ ├── sse.py
│ └── static
│ ├── app.js
│ ├── index.html
│ ├── README.md
│ ├── sse_test.html
│ └── style.css
├── start_http_debug.bat
├── start_http_server.sh
├── test_document.txt
├── test_version_checker.js
├── tests
│ ├── __init__.py
│ ├── api
│ │ ├── __init__.py
│ │ ├── test_compact_types.py
│ │ └── test_operations.py
│ ├── bridge
│ │ ├── mock_responses.js
│ │ ├── package-lock.json
│ │ ├── package.json
│ │ └── test_http_mcp_bridge.js
│ ├── conftest.py
│ ├── consolidation
│ │ ├── __init__.py
│ │ ├── conftest.py
│ │ ├── test_associations.py
│ │ ├── test_clustering.py
│ │ ├── test_compression.py
│ │ ├── test_consolidator.py
│ │ ├── test_decay.py
│ │ └── test_forgetting.py
│ ├── contracts
│ │ └── api-specification.yml
│ ├── integration
│ │ ├── package-lock.json
│ │ ├── package.json
│ │ ├── test_api_key_fallback.py
│ │ ├── test_api_memories_chronological.py
│ │ ├── test_api_tag_time_search.py
│ │ ├── test_api_with_memory_service.py
│ │ ├── test_bridge_integration.js
│ │ ├── test_cli_interfaces.py
│ │ ├── test_cloudflare_connection.py
│ │ ├── test_concurrent_clients.py
│ │ ├── test_data_serialization_consistency.py
│ │ ├── test_http_server_startup.py
│ │ ├── test_mcp_memory.py
│ │ ├── test_mdns_integration.py
│ │ ├── test_oauth_basic_auth.py
│ │ ├── test_oauth_flow.py
│ │ ├── test_server_handlers.py
│ │ └── test_store_memory.py
│ ├── performance
│ │ ├── test_background_sync.py
│ │ └── test_hybrid_live.py
│ ├── README.md
│ ├── smithery
│ │ └── test_smithery.py
│ ├── sqlite
│ │ └── simple_sqlite_vec_test.py
│ ├── test_client.py
│ ├── test_content_splitting.py
│ ├── test_database.py
│ ├── test_hybrid_cloudflare_limits.py
│ ├── test_hybrid_storage.py
│ ├── test_memory_ops.py
│ ├── test_semantic_search.py
│ ├── test_sqlite_vec_storage.py
│ ├── test_time_parser.py
│ ├── test_timestamp_preservation.py
│ ├── timestamp
│ │ ├── test_hook_vs_manual_storage.py
│ │ ├── test_issue99_final_validation.py
│ │ ├── test_search_retrieval_inconsistency.py
│ │ ├── test_timestamp_issue.py
│ │ └── test_timestamp_simple.py
│ └── unit
│ ├── conftest.py
│ ├── test_cloudflare_storage.py
│ ├── test_csv_loader.py
│ ├── test_fastapi_dependencies.py
│ ├── test_import.py
│ ├── test_json_loader.py
│ ├── test_mdns_simple.py
│ ├── test_mdns.py
│ ├── test_memory_service.py
│ ├── test_memory.py
│ ├── test_semtools_loader.py
│ ├── test_storage_interface_compatibility.py
│ └── test_tag_time_filtering.py
├── tools
│ ├── docker
│ │ ├── DEPRECATED.md
│ │ ├── docker-compose.http.yml
│ │ ├── docker-compose.pythonpath.yml
│ │ ├── docker-compose.standalone.yml
│ │ ├── docker-compose.uv.yml
│ │ ├── docker-compose.yml
│ │ ├── docker-entrypoint-persistent.sh
│ │ ├── docker-entrypoint-unified.sh
│ │ ├── docker-entrypoint.sh
│ │ ├── Dockerfile
│ │ ├── Dockerfile.glama
│ │ ├── Dockerfile.slim
│ │ ├── README.md
│ │ └── test-docker-modes.sh
│ └── README.md
└── uv.lock
```
# Files
--------------------------------------------------------------------------------
/claude-hooks/utilities/memory-client.js:
--------------------------------------------------------------------------------
```javascript
1 | /**
2 | * Unified Memory Client
3 | * Supports both HTTP and MCP protocols with automatic fallback
4 | */
5 |
6 | const https = require('https');
7 | const http = require('http');
8 | const { MCPClient } = require('./mcp-client');
9 |
10 | class MemoryClient {
11 | constructor(config) {
12 | this.config = config;
13 | this.protocol = config.protocol || 'auto';
14 | this.preferredProtocol = config.preferredProtocol || 'mcp';
15 | this.fallbackEnabled = config.fallbackEnabled !== false;
16 | this.httpConfig = config.http || {};
17 | this.mcpConfig = config.mcp || {};
18 |
19 | // Connection state
20 | this.activeProtocol = null;
21 | this.httpAvailable = null;
22 | this.mcpAvailable = null;
23 | this.mcpClient = null;
24 |
25 | // Cache successful connections
26 | this.connectionCache = new Map();
27 | }
28 |
29 | /**
30 | * Initialize connection using the configured protocol
31 | */
32 | async connect() {
33 | if (this.protocol === 'http') {
34 | return this.connectHTTP();
35 | } else if (this.protocol === 'mcp') {
36 | return this.connectMCP();
37 | } else {
38 | // Auto mode: try preferred first, then fallback
39 | return this.connectAuto();
40 | }
41 | }
42 |
43 | /**
44 | * Auto-connect: try preferred protocol first, fallback if needed
45 | */
46 | async connectAuto() {
47 | const protocols = this.preferredProtocol === 'mcp' ? ['mcp', 'http'] : ['http', 'mcp'];
48 |
49 | for (const protocol of protocols) {
50 | try {
51 | if (protocol === 'mcp') {
52 | await this.connectMCP();
53 | this.activeProtocol = 'mcp';
54 | return { protocol: 'mcp', client: this.mcpClient };
55 | } else {
56 | await this.connectHTTP();
57 | this.activeProtocol = 'http';
58 | return { protocol: 'http', client: null };
59 | }
60 | } catch (error) {
61 | if (!this.fallbackEnabled || protocols.length === 1) {
62 | throw error;
63 | }
64 | // Continue to try next protocol
65 | continue;
66 | }
67 | }
68 |
69 | throw new Error('Failed to connect using any available protocol');
70 | }
71 |
72 | /**
73 | * Connect using MCP protocol
74 | */
75 | async connectMCP() {
76 | if (this.mcpClient) {
77 | return this.mcpClient;
78 | }
79 |
80 | this.mcpClient = new MCPClient(
81 | this.mcpConfig.serverCommand,
82 | {
83 | workingDir: this.mcpConfig.serverWorkingDir,
84 | connectionTimeout: this.mcpConfig.connectionTimeout || 5000,
85 | toolCallTimeout: this.mcpConfig.toolCallTimeout || 10000
86 | }
87 | );
88 |
89 | // Handle MCP client errors gracefully
90 | this.mcpClient.on('error', (error) => {
91 | this.mcpAvailable = false;
92 | });
93 |
94 | await this.mcpClient.connect();
95 | this.mcpAvailable = true;
96 | this.activeProtocol = 'mcp';
97 | return this.mcpClient;
98 | }
99 |
100 | /**
101 | * Connect using HTTP protocol
102 | */
103 | async connectHTTP() {
104 | // Test HTTP connection with a simple health check
105 | const healthResult = await this.queryHealthHTTP();
106 | if (!healthResult.success) {
107 | throw new Error(`HTTP connection failed: ${healthResult.error}`);
108 | }
109 | this.httpAvailable = true;
110 | this.activeProtocol = 'http';
111 | return true;
112 | }
113 |
114 | /**
115 | * Query health status using active protocol
116 | */
117 | async getHealthStatus() {
118 | if (this.activeProtocol === 'mcp' && this.mcpClient) {
119 | return this.mcpClient.getHealthStatus();
120 | } else if (this.activeProtocol === 'http') {
121 | return this.queryHealthHTTP();
122 | } else {
123 | throw new Error('No active connection available');
124 | }
125 | }
126 |
127 | /**
128 | * Query health via HTTP with automatic HTTPS → HTTP fallback
129 | */
130 | async queryHealthHTTP() {
131 | const healthPath = this.httpConfig.useDetailedHealthCheck ?
132 | '/api/health/detailed' : '/api/health';
133 |
134 | // Parse the configured endpoint to extract protocol, host, and port
135 | let endpointUrl;
136 | try {
137 | endpointUrl = new URL(this.httpConfig.endpoint);
138 | } catch (error) {
139 | return { success: false, error: `Invalid endpoint URL: ${this.httpConfig.endpoint}` };
140 | }
141 |
142 | // Try with configured protocol first
143 | const result = await this._attemptHealthCheck(endpointUrl, healthPath);
144 |
145 | // If HTTPS failed, try HTTP fallback on same host:port
146 | if (!result.success && endpointUrl.protocol === 'https:') {
147 | const httpUrl = new URL(endpointUrl);
148 | httpUrl.protocol = 'http:';
149 | return this._attemptHealthCheck(httpUrl, healthPath);
150 | }
151 |
152 | return result;
153 | }
154 |
155 | /**
156 | * Attempt health check with specific protocol/host/port
157 | * @private
158 | */
159 | async _attemptHealthCheck(baseUrl, healthPath) {
160 | return new Promise((resolve) => {
161 | try {
162 | const url = new URL(healthPath, baseUrl);
163 |
164 | const requestOptions = {
165 | hostname: url.hostname,
166 | port: url.port || (url.protocol === 'https:' ? 8443 : 8889),
167 | path: url.pathname,
168 | method: 'GET',
169 | headers: {
170 | 'X-API-Key': this.httpConfig.apiKey,
171 | 'Accept': 'application/json'
172 | },
173 | timeout: this.httpConfig.healthCheckTimeout || 3000,
174 | rejectUnauthorized: false // Allow self-signed certificates
175 | };
176 |
177 | const protocol = url.protocol === 'https:' ? https : http;
178 | const req = protocol.request(requestOptions, (res) => {
179 | let data = '';
180 | res.on('data', (chunk) => data += chunk);
181 | res.on('end', () => {
182 | try {
183 | if (res.statusCode === 200) {
184 | const healthData = JSON.parse(data);
185 | resolve({ success: true, data: healthData });
186 | } else {
187 | resolve({ success: false, error: `HTTP ${res.statusCode}`, fallback: true });
188 | }
189 | } catch (parseError) {
190 | resolve({ success: false, error: 'Invalid JSON response', fallback: true });
191 | }
192 | });
193 | });
194 |
195 | req.on('error', (error) => {
196 | resolve({ success: false, error: error.message, fallback: true });
197 | });
198 |
199 | req.on('timeout', () => {
200 | req.destroy();
201 | resolve({ success: false, error: 'Health check timeout', fallback: true });
202 | });
203 |
204 | req.end();
205 |
206 | } catch (error) {
207 | resolve({ success: false, error: error.message, fallback: true });
208 | }
209 | });
210 | }
211 |
212 | /**
213 | * Query memories using active protocol
214 | */
215 | async queryMemories(query, limit = 10) {
216 | if (this.activeProtocol === 'mcp' && this.mcpClient) {
217 | return this.mcpClient.queryMemories(query, limit);
218 | } else if (this.activeProtocol === 'http') {
219 | return this.queryMemoriesHTTP(query, limit);
220 | } else {
221 | throw new Error('No active connection available');
222 | }
223 | }
224 |
225 | /**
226 | * Query memories by time using active protocol
227 | */
228 | async queryMemoriesByTime(timeQuery, limit = 10, semanticQuery = null) {
229 | if (this.activeProtocol === 'mcp' && this.mcpClient) {
230 | // TODO: Update MCP client to support semantic query parameter
231 | return this.mcpClient.queryMemoriesByTime(timeQuery, limit);
232 | } else if (this.activeProtocol === 'http') {
233 | return this.queryMemoriesByTimeHTTP(timeQuery, limit, semanticQuery);
234 | } else {
235 | throw new Error('No active connection available');
236 | }
237 | }
238 |
239 | /**
240 | * Private helper: Perform HTTP POST request to API
241 | * @private
242 | */
243 | _performApiPost(path, payload) {
244 | return new Promise((resolve) => {
245 | const url = new URL(path, this.httpConfig.endpoint);
246 | const postData = JSON.stringify(payload);
247 |
248 | const options = {
249 | hostname: url.hostname,
250 | port: url.port || (url.protocol === 'https:' ? 8443 : 8889),
251 | path: url.pathname,
252 | method: 'POST',
253 | headers: {
254 | 'Content-Type': 'application/json',
255 | 'Content-Length': Buffer.byteLength(postData),
256 | 'X-API-Key': this.httpConfig.apiKey
257 | },
258 | rejectUnauthorized: false // Allow self-signed certificates
259 | };
260 |
261 | const protocol = url.protocol === 'https:' ? https : http;
262 | const req = protocol.request(options, (res) => {
263 | let data = '';
264 | res.on('data', (chunk) => data += chunk);
265 | res.on('end', () => {
266 | try {
267 | const response = JSON.parse(data);
268 | // REST API returns { results: [{memory: {...}, similarity_score: ...}] }
269 | if (response.results && Array.isArray(response.results)) {
270 | // Extract memory objects from results and preserve similarity_score
271 | const memories = response.results
272 | .filter(result => result && result.memory)
273 | .map(result => {
274 | const memory = { ...result.memory };
275 |
276 | // FIX: API returns Unix timestamps in SECONDS, but JavaScript Date expects MILLISECONDS
277 | // Convert created_at and updated_at from seconds to milliseconds
278 | if (memory.created_at && typeof memory.created_at === 'number') {
279 | // Only convert if value looks like seconds (< year 2100 in milliseconds = 4102444800000)
280 | if (memory.created_at < 4102444800) {
281 | memory.created_at = memory.created_at * 1000;
282 | }
283 | }
284 | if (memory.updated_at && typeof memory.updated_at === 'number') {
285 | if (memory.updated_at < 4102444800) {
286 | memory.updated_at = memory.updated_at * 1000;
287 | }
288 | }
289 |
290 | return {
291 | ...memory,
292 | similarity_score: result.similarity_score
293 | };
294 | });
295 | resolve(memories);
296 | } else {
297 | resolve([]);
298 | }
299 | } catch (parseError) {
300 | console.warn('[Memory Client] HTTP parse error:', parseError.message);
301 | resolve([]);
302 | }
303 | });
304 | });
305 |
306 | req.on('error', (error) => {
307 | console.warn('[Memory Client] HTTP network error:', error.message);
308 | resolve([]);
309 | });
310 |
311 | req.write(postData);
312 | req.end();
313 | });
314 | }
315 |
316 | /**
317 | * Query memories via HTTP REST API
318 | */
319 | async queryMemoriesHTTP(query, limit = 10) {
320 | return this._performApiPost('/api/search', {
321 | query: query,
322 | n_results: limit
323 | });
324 | }
325 |
326 | /**
327 | * Query memories by time via HTTP REST API
328 | */
329 | async queryMemoriesByTimeHTTP(timeQuery, limit = 10, semanticQuery = null) {
330 | const payload = {
331 | query: timeQuery,
332 | n_results: limit
333 | };
334 |
335 | // Add semantic query if provided for relevance filtering
336 | if (semanticQuery) {
337 | payload.semantic_query = semanticQuery;
338 | }
339 |
340 | return this._performApiPost('/api/search/by-time', payload);
341 | }
342 |
343 | /**
344 | * Get connection status and available protocols
345 | */
346 | getConnectionInfo() {
347 | return {
348 | activeProtocol: this.activeProtocol,
349 | httpAvailable: this.httpAvailable,
350 | mcpAvailable: this.mcpAvailable,
351 | fallbackEnabled: this.fallbackEnabled,
352 | preferredProtocol: this.preferredProtocol
353 | };
354 | }
355 |
356 | /**
357 | * Disconnect from active protocol
358 | */
359 | async disconnect() {
360 | if (this.mcpClient) {
361 | try {
362 | await this.mcpClient.disconnect();
363 | } catch (error) {
364 | // Ignore cleanup errors
365 | }
366 | this.mcpClient = null;
367 | }
368 |
369 | this.activeProtocol = null;
370 | this.httpAvailable = null;
371 | this.mcpAvailable = null;
372 | this.connectionCache.clear();
373 | }
374 | }
375 |
376 | module.exports = { MemoryClient };
```
--------------------------------------------------------------------------------
/scripts/testing/test_sqlite_vec_embeddings.py:
--------------------------------------------------------------------------------
```python
1 | #!/usr/bin/env python3
2 | """
3 | Diagnostic script to test SQLite-vec embedding functionality.
4 |
5 | This script performs comprehensive tests to identify and diagnose issues
6 | with the embedding pipeline in the MCP Memory Service.
7 | """
8 |
9 | import asyncio
10 | import os
11 | import sys
12 | import logging
13 | import tempfile
14 | import traceback
15 | from datetime import datetime
16 |
17 | # Add parent directory to path
18 | sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
19 |
20 | from src.mcp_memory_service.storage.sqlite_vec import SqliteVecMemoryStorage
21 | from src.mcp_memory_service.models.memory import Memory
22 | from src.mcp_memory_service.utils.hashing import generate_content_hash
23 |
24 | # Configure logging
25 | logging.basicConfig(
26 | level=logging.DEBUG,
27 | format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
28 | )
29 | logger = logging.getLogger(__name__)
30 |
31 |
32 | class EmbeddingDiagnostics:
33 | """Test suite for SQLite-vec embedding functionality."""
34 |
35 | def __init__(self, db_path=None):
36 | self.db_path = db_path or tempfile.mktemp(suffix='.db')
37 | self.storage = None
38 | self.test_results = []
39 |
40 | async def run_all_tests(self):
41 | """Run all diagnostic tests."""
42 | print("\n" + "="*60)
43 | print("SQLite-vec Embedding Diagnostics")
44 | print("="*60 + "\n")
45 |
46 | tests = [
47 | self.test_dependencies,
48 | self.test_storage_initialization,
49 | self.test_embedding_generation,
50 | self.test_memory_storage,
51 | self.test_semantic_search,
52 | self.test_database_integrity,
53 | self.test_edge_cases
54 | ]
55 |
56 | for test in tests:
57 | try:
58 | await test()
59 | except Exception as e:
60 | self.log_error(f"{test.__name__} failed", e)
61 |
62 | self.print_summary()
63 |
64 | async def test_dependencies(self):
65 | """Test 1: Check required dependencies."""
66 | print("\n[TEST 1] Checking dependencies...")
67 |
68 | # Check sqlite-vec
69 | try:
70 | import sqlite_vec
71 | self.log_success("sqlite-vec is installed")
72 | except ImportError:
73 | self.log_error("sqlite-vec is NOT installed", "pip install sqlite-vec")
74 |
75 | # Check sentence-transformers
76 | try:
77 | from sentence_transformers import SentenceTransformer
78 | self.log_success("sentence-transformers is installed")
79 | except ImportError:
80 | self.log_error("sentence-transformers is NOT installed", "pip install sentence-transformers")
81 |
82 | # Check torch
83 | try:
84 | import torch
85 | device = "cuda" if torch.cuda.is_available() else "cpu"
86 | self.log_success(f"torch is installed (device: {device})")
87 | except ImportError:
88 | self.log_error("torch is NOT installed", "pip install torch")
89 |
90 | async def test_storage_initialization(self):
91 | """Test 2: Initialize storage backend."""
92 | print("\n[TEST 2] Initializing storage...")
93 |
94 | try:
95 | self.storage = SqliteVecMemoryStorage(self.db_path)
96 | await self.storage.initialize()
97 | self.log_success(f"Storage initialized at {self.db_path}")
98 |
99 | # Check embedding model
100 | if self.storage.embedding_model:
101 | self.log_success(f"Embedding model loaded: {self.storage.embedding_model_name}")
102 | self.log_success(f"Embedding dimension: {self.storage.embedding_dimension}")
103 | else:
104 | self.log_error("Embedding model NOT loaded", None)
105 |
106 | except Exception as e:
107 | self.log_error("Storage initialization failed", e)
108 |
109 | async def test_embedding_generation(self):
110 | """Test 3: Generate embeddings."""
111 | print("\n[TEST 3] Testing embedding generation...")
112 |
113 | if not self.storage:
114 | self.log_error("Storage not initialized", "Previous test failed")
115 | return
116 |
117 | test_texts = [
118 | "The quick brown fox jumps over the lazy dog",
119 | "Machine learning is transforming how we process information",
120 | "SQLite is a lightweight embedded database"
121 | ]
122 |
123 | for text in test_texts:
124 | try:
125 | embedding = self.storage._generate_embedding(text)
126 |
127 | # Validate embedding
128 | if not embedding:
129 | self.log_error(f"Empty embedding for: {text[:30]}...", None)
130 | elif len(embedding) != self.storage.embedding_dimension:
131 | self.log_error(
132 | f"Dimension mismatch for: {text[:30]}...",
133 | f"Expected {self.storage.embedding_dimension}, got {len(embedding)}"
134 | )
135 | else:
136 | self.log_success(f"Generated embedding for: {text[:30]}... (dim: {len(embedding)})")
137 |
138 | except Exception as e:
139 | self.log_error(f"Embedding generation failed for: {text[:30]}...", e)
140 |
141 | async def test_memory_storage(self):
142 | """Test 4: Store memories with embeddings."""
143 | print("\n[TEST 4] Testing memory storage...")
144 |
145 | if not self.storage:
146 | self.log_error("Storage not initialized", "Previous test failed")
147 | return
148 |
149 | test_memories = [
150 | Memory(
151 | content="Python is a versatile programming language",
152 | content_hash=generate_content_hash("Python is a versatile programming language"),
153 | tags=["programming", "python"],
154 | memory_type="reference"
155 | ),
156 | Memory(
157 | content="The Eiffel Tower is located in Paris, France",
158 | content_hash=generate_content_hash("The Eiffel Tower is located in Paris, France"),
159 | tags=["geography", "landmarks"],
160 | memory_type="fact"
161 | ),
162 | Memory(
163 | content="Machine learning models can learn patterns from data",
164 | content_hash=generate_content_hash("Machine learning models can learn patterns from data"),
165 | tags=["ml", "ai"],
166 | memory_type="concept"
167 | )
168 | ]
169 |
170 | stored_count = 0
171 | for memory in test_memories:
172 | try:
173 | success, message = await self.storage.store(memory)
174 | if success:
175 | self.log_success(f"Stored: {memory.content[:40]}...")
176 | stored_count += 1
177 | else:
178 | self.log_error(f"Failed to store: {memory.content[:40]}...", message)
179 | except Exception as e:
180 | self.log_error(f"Storage exception for: {memory.content[:40]}...", e)
181 |
182 | print(f"\nStored {stored_count}/{len(test_memories)} memories successfully")
183 |
184 | async def test_semantic_search(self):
185 | """Test 5: Perform semantic search."""
186 | print("\n[TEST 5] Testing semantic search...")
187 |
188 | if not self.storage:
189 | self.log_error("Storage not initialized", "Previous test failed")
190 | return
191 |
192 | test_queries = [
193 | ("programming languages", 2),
194 | ("tourist attractions in Europe", 2),
195 | ("artificial intelligence and data", 2),
196 | ("random unrelated query xyz123", 1)
197 | ]
198 |
199 | for query, expected_min in test_queries:
200 | try:
201 | results = await self.storage.retrieve(query, n_results=5)
202 |
203 | if not results:
204 | self.log_error(f"No results for query: '{query}'", "Semantic search returned empty")
205 | else:
206 | self.log_success(f"Found {len(results)} results for: '{query}'")
207 |
208 | # Show top result
209 | if results:
210 | top_result = results[0]
211 | print(f" Top match: {top_result.memory.content[:50]}...")
212 | print(f" Relevance: {top_result.relevance_score:.3f}")
213 |
214 | except Exception as e:
215 | self.log_error(f"Search failed for: '{query}'", e)
216 |
217 | async def test_database_integrity(self):
218 | """Test 6: Check database integrity."""
219 | print("\n[TEST 6] Checking database integrity...")
220 |
221 | if not self.storage or not self.storage.conn:
222 | self.log_error("Storage not initialized", "Previous test failed")
223 | return
224 |
225 | try:
226 | # Check memory count
227 | cursor = self.storage.conn.execute('SELECT COUNT(*) FROM memories')
228 | memory_count = cursor.fetchone()[0]
229 |
230 | # Check embedding count
231 | cursor = self.storage.conn.execute('SELECT COUNT(*) FROM memory_embeddings')
232 | embedding_count = cursor.fetchone()[0]
233 |
234 | print(f" Memories table: {memory_count} rows")
235 | print(f" Embeddings table: {embedding_count} rows")
236 |
237 | if memory_count != embedding_count:
238 | self.log_error(
239 | "Row count mismatch",
240 | f"Memories: {memory_count}, Embeddings: {embedding_count}"
241 | )
242 | else:
243 | self.log_success("Database row counts match")
244 |
245 | # Check for orphaned embeddings
246 | cursor = self.storage.conn.execute('''
247 | SELECT COUNT(*) FROM memory_embeddings e
248 | WHERE NOT EXISTS (
249 | SELECT 1 FROM memories m WHERE m.id = e.rowid
250 | )
251 | ''')
252 | orphaned = cursor.fetchone()[0]
253 |
254 | if orphaned > 0:
255 | self.log_error("Found orphaned embeddings", f"Count: {orphaned}")
256 | else:
257 | self.log_success("No orphaned embeddings found")
258 |
259 | except Exception as e:
260 | self.log_error("Database integrity check failed", e)
261 |
262 | async def test_edge_cases(self):
263 | """Test 7: Edge cases and error handling."""
264 | print("\n[TEST 7] Testing edge cases...")
265 |
266 | if not self.storage:
267 | self.log_error("Storage not initialized", "Previous test failed")
268 | return
269 |
270 | # Test empty content
271 | try:
272 | empty_memory = Memory(
273 | content="",
274 | content_hash=generate_content_hash(""),
275 | tags=["empty"]
276 | )
277 | success, message = await self.storage.store(empty_memory)
278 | if success:
279 | self.log_error("Stored empty content", "Should have failed")
280 | else:
281 | self.log_success("Correctly rejected empty content")
282 | except Exception as e:
283 | self.log_success(f"Correctly raised exception for empty content: {type(e).__name__}")
284 |
285 | # Test very long content
286 | try:
287 | long_content = "x" * 10000
288 | long_memory = Memory(
289 | content=long_content,
290 | content_hash=generate_content_hash(long_content),
291 | tags=["long"]
292 | )
293 | success, message = await self.storage.store(long_memory)
294 | if success:
295 | self.log_success("Handled long content")
296 | else:
297 | self.log_error("Failed on long content", message)
298 | except Exception as e:
299 | self.log_error("Exception on long content", e)
300 |
301 | def log_success(self, message):
302 | """Log a successful test result."""
303 | print(f" ✓ {message}")
304 | self.test_results.append(("SUCCESS", message))
305 |
306 | def log_error(self, message, error):
307 | """Log a failed test result."""
308 | print(f" ✗ {message}")
309 | if error:
310 | if isinstance(error, Exception):
311 | print(f" Error: {type(error).__name__}: {str(error)}")
312 | else:
313 | print(f" Info: {error}")
314 | self.test_results.append(("ERROR", message, error))
315 |
316 | def print_summary(self):
317 | """Print test summary."""
318 | print("\n" + "="*60)
319 | print("Test Summary")
320 | print("="*60)
321 |
322 | success_count = sum(1 for r in self.test_results if r[0] == "SUCCESS")
323 | error_count = sum(1 for r in self.test_results if r[0] == "ERROR")
324 |
325 | print(f"\nTotal tests: {len(self.test_results)}")
326 | print(f"Successful: {success_count}")
327 | print(f"Failed: {error_count}")
328 |
329 | if error_count > 0:
330 | print("\nFailed tests:")
331 | for result in self.test_results:
332 | if result[0] == "ERROR":
333 | print(f" - {result[1]}")
334 |
335 | print("\n" + "="*60)
336 |
337 |
338 | async def main():
339 | """Run diagnostics."""
340 | # Check if a database path was provided
341 | db_path = sys.argv[1] if len(sys.argv) > 1 else None
342 |
343 | if db_path and not os.path.exists(db_path):
344 | print(f"Warning: Database file does not exist: {db_path}")
345 | print("Creating new database for testing...")
346 |
347 | diagnostics = EmbeddingDiagnostics(db_path)
348 | await diagnostics.run_all_tests()
349 |
350 |
351 | if __name__ == "__main__":
352 | asyncio.run(main())
```
--------------------------------------------------------------------------------
/docs/development/code-quality-workflow.md:
--------------------------------------------------------------------------------
```markdown
1 | # Code Quality Workflow Documentation
2 |
3 | > **Version**: 1.0.0
4 | > **Last Updated**: November 2025
5 | > **Status**: Active
6 |
7 | ## Overview
8 |
9 | This document describes the comprehensive code quality workflow for the MCP Memory Service project, integrating LLM-based analysis (Groq/Gemini) with static analysis (pyscn) for multi-layer quality assurance.
10 |
11 | ## Table of Contents
12 |
13 | - [Quality Strategy](#quality-strategy)
14 | - [Layer 1: Pre-commit Checks](#layer-1-pre-commit-checks)
15 | - [Layer 2: PR Quality Gates](#layer-2-pr-quality-gates)
16 | - [Layer 3: Periodic Reviews](#layer-3-periodic-reviews)
17 | - [pyscn Integration](#pyscn-integration)
18 | - [Health Score Thresholds](#health-score-thresholds)
19 | - [Troubleshooting](#troubleshooting)
20 | - [Appendix](#appendix)
21 |
22 | ## Quality Strategy
23 |
24 | ### Three-Layer Approach
25 |
26 | The workflow uses three complementary layers to ensure code quality:
27 |
28 | ```
29 | Layer 1: Pre-commit → Fast (<5s) → Every commit
30 | Layer 2: PR Gate → Moderate (30s) → PR creation
31 | Layer 3: Periodic → Deep (60s) → Weekly review
32 | ```
33 |
34 | ### Tool Selection
35 |
36 | | Tool | Purpose | Speed | Blocking | When |
37 | |------|---------|-------|----------|------|
38 | | **Groq API** | LLM complexity checks | <5s | Yes (>8) | Pre-commit |
39 | | **Gemini CLI** | LLM fallback | ~3s | Yes (>8) | Pre-commit |
40 | | **pyscn** | Static analysis | 30-60s | Yes (<50) | PR + weekly |
41 | | **code-quality-guard** | Manual review | Variable | No | On-demand |
42 |
43 | ## Layer 1: Pre-commit Checks
44 |
45 | ### Purpose
46 |
47 | Catch quality issues before they enter the codebase.
48 |
49 | ### Checks Performed
50 |
51 | 1. **Development Environment Validation**
52 | - Verify editable install (`pip install -e .`)
53 | - Check version consistency (source vs installed)
54 | - Prevent stale package issues
55 |
56 | 2. **Complexity Analysis** (Groq/Gemini)
57 | - Rate functions 1-10
58 | - Block if any function >8
59 | - Warn if any function =7
60 |
61 | 3. **Security Scanning**
62 | - SQL injection (raw SQL queries)
63 | - XSS (unescaped HTML)
64 | - Command injection (shell=True)
65 | - Hardcoded secrets
66 |
67 | ### Usage
68 |
69 | **Installation:**
70 | ```bash
71 | ln -s ../../scripts/hooks/pre-commit .git/hooks/pre-commit
72 | chmod +x .git/hooks/pre-commit
73 | ```
74 |
75 | **Configuration:**
76 | ```bash
77 | # Primary LLM: Groq (fast, simple auth)
78 | export GROQ_API_KEY="your-groq-api-key"
79 |
80 | # Fallback: Gemini CLI
81 | npm install -g @google/generative-ai-cli
82 | ```
83 |
84 | **Example Output:**
85 | ```
86 | Running pre-commit quality checks...
87 |
88 | ✓ Using Groq API (fast mode)
89 |
90 | Verifying development environment...
91 | ✓ Development environment OK
92 |
93 | === Checking: src/mcp_memory_service/storage/sqlite_vec.py ===
94 | Checking complexity...
95 | ⚠️ High complexity detected (score 7)
96 | initialize: Score 7 - Multiple nested conditions and error handling paths
97 |
98 | Checking for security issues...
99 | ✓ No security issues
100 |
101 | === Pre-commit Check Summary ===
102 |
103 | ⚠️ HIGH COMPLEXITY WARNING
104 |
105 | Some functions have high complexity (score 7).
106 | Consider refactoring to improve maintainability.
107 |
108 | Continue with commit anyway? (y/n)
109 | ```
110 |
111 | ### Thresholds
112 |
113 | - **Block**: Complexity >8, any security issues
114 | - **Warn**: Complexity =7
115 | - **Pass**: Complexity <7, no security issues
116 |
117 | ## Layer 2: PR Quality Gates
118 |
119 | ### Purpose
120 |
121 | Comprehensive checks before code review and merge.
122 |
123 | ### Standard Checks
124 |
125 | Run automatically on PR creation:
126 |
127 | ```bash
128 | bash scripts/pr/quality_gate.sh <PR_NUMBER>
129 | ```
130 |
131 | **Checks:**
132 | 1. Code complexity (Gemini CLI)
133 | 2. Security vulnerabilities
134 | 3. Test coverage (code files vs test files)
135 | 4. Breaking changes detection
136 |
137 | **Duration:** ~10-30 seconds
138 |
139 | ### Comprehensive Checks (with pyscn)
140 |
141 | Optional deep analysis:
142 |
143 | ```bash
144 | bash scripts/pr/quality_gate.sh <PR_NUMBER> --with-pyscn
145 | ```
146 |
147 | **Additional Checks:**
148 | - Cyclomatic complexity scoring
149 | - Dead code detection
150 | - Code duplication analysis
151 | - Coupling metrics (CBO)
152 | - Architecture violations
153 |
154 | **Duration:** ~30-60 seconds
155 |
156 | ### Example Output
157 |
158 | **Standard Checks:**
159 | ```
160 | === PR Quality Gate for #123 ===
161 |
162 | Fetching changed files...
163 | Changed Python files:
164 | src/mcp_memory_service/storage/hybrid.py
165 | tests/test_hybrid_storage.py
166 |
167 | === Check 1: Code Complexity ===
168 | Analyzing: src/mcp_memory_service/storage/hybrid.py
169 | ✓ Complexity OK
170 |
171 | === Check 2: Security Vulnerabilities ===
172 | Scanning: src/mcp_memory_service/storage/hybrid.py
173 | ✓ No security issues
174 |
175 | === Check 3: Test Coverage ===
176 | Code files changed: 1
177 | Test files changed: 1
178 | ✓ Test coverage OK
179 |
180 | === Check 4: Breaking Changes ===
181 | No API changes detected
182 | ✓ No breaking changes
183 |
184 | === Quality Gate Summary ===
185 |
186 | ✅ ALL CHECKS PASSED
187 |
188 | Quality Gate Results:
189 | - Code complexity: ✅ OK
190 | - Security scan: ✅ OK
191 | - Test coverage: ✅ OK
192 | - Breaking changes: ✅ None detected
193 | ```
194 |
195 | **Comprehensive Checks (with pyscn):**
196 | ```
197 | === Check 5: pyscn Comprehensive Analysis ===
198 | Running pyscn static analysis...
199 |
200 | 📊 Overall Health Score: 68/100
201 |
202 | Quality Metrics:
203 | - Complexity: 45/100 (Avg: 8.2, Max: 15)
204 | - Dead Code: 75/100 (12 issues)
205 | - Duplication: 40/100 (4.2% duplication)
206 |
207 | ⚠️ WARNING - Health score: 68 (threshold: 50)
208 |
209 | ✓ pyscn analysis completed
210 | ```
211 |
212 | ### Thresholds
213 |
214 | - **Block PR**: Security issues, health score <50
215 | - **Warn**: Complexity >7, health score 50-69
216 | - **Pass**: No security issues, health score ≥70
217 |
218 | ## Layer 3: Periodic Reviews
219 |
220 | ### Purpose
221 |
222 | Track quality trends, detect regressions, plan refactoring.
223 |
224 | ### Metrics Tracking
225 |
226 | **Run manually or via cron:**
227 | ```bash
228 | bash scripts/quality/track_pyscn_metrics.sh
229 | ```
230 |
231 | **Frequency:** Weekly or after major changes
232 |
233 | **Stored Data:**
234 | - Health score over time
235 | - Complexity metrics (avg, max)
236 | - Duplication percentage
237 | - Dead code issues
238 | - Architecture violations
239 |
240 | **Output:**
241 | - CSV file: `.pyscn/history/metrics.csv`
242 | - HTML report: `.pyscn/reports/analyze_*.html`
243 |
244 | **Example Output:**
245 | ```
246 | === pyscn Metrics Tracking ===
247 |
248 | Running pyscn analysis (this may take 30-60 seconds)...
249 | ✓ Analysis complete
250 |
251 | === Metrics Extracted ===
252 | Health Score: 68/100
253 | Complexity: 45/100 (Avg: 8.2, Max: 15)
254 | Dead Code: 75/100 (12 issues)
255 | Duplication: 40/100 (4.2%)
256 | Coupling: 100/100
257 | Dependencies: 90/100
258 | Architecture: 80/100
259 |
260 | ✓ Metrics saved to .pyscn/history/metrics.csv
261 |
262 | === Comparison to Previous Run ===
263 | Previous: 70/100 (2025-11-16)
264 | Current: 68/100 (2025-11-23)
265 | Change: -2 points
266 |
267 | ⚠️ Regression: -2 points
268 |
269 | === Trend Summary ===
270 | Total measurements: 5
271 | Average health score: 69/100
272 | Highest: 72/100
273 | Lowest: 65/100
274 | ```
275 |
276 | ### Weekly Review
277 |
278 | **Run manually or via cron:**
279 | ```bash
280 | bash scripts/quality/weekly_quality_review.sh [--create-issue]
281 | ```
282 |
283 | **Features:**
284 | - Compare current vs last week's metrics
285 | - Generate markdown trend report
286 | - Identify regressions (>5% health score drop)
287 | - Optionally create GitHub issue for significant regressions
288 |
289 | **Output:** `docs/development/quality-review-YYYYMMDD.md`
290 |
291 | **Example Report:**
292 | ```markdown
293 | # Weekly Quality Review - November 23, 2025
294 |
295 | ## Summary
296 |
297 | **Overall Trend:** ➡️ Stable
298 |
299 | | Metric | Previous | Current | Change |
300 | |--------|----------|---------|--------|
301 | | Health Score | 70/100 | 68/100 | -2 |
302 | | Complexity | 48/100 | 45/100 | -3 |
303 | | Duplication | 42/100 | 40/100 | -2 |
304 |
305 | ## Status
306 |
307 | ### ✅ Acceptable
308 |
309 | Health score ≥70 indicates good code quality:
310 | - Continue current development practices
311 | - Monitor trends for regressions
312 | - Address new issues proactively
313 |
314 | ## Observations
315 |
316 | - ⚠️ Complexity score decreased - New complex code introduced
317 | - ⚠️ Code duplication increased - Review for consolidation opportunities
318 | ```
319 |
320 | ## pyscn Integration
321 |
322 | ### Installation
323 |
324 | ```bash
325 | pip install pyscn
326 | ```
327 |
328 | **Repository:** https://github.com/ludo-technologies/pyscn
329 |
330 | ### Capabilities
331 |
332 | 1. **Cyclomatic Complexity**
333 | - Function-level scoring (1-100)
334 | - Average, maximum, high-risk functions
335 | - Detailed complexity breakdown
336 |
337 | 2. **Dead Code Detection**
338 | - Unreachable code after returns
339 | - Unused imports
340 | - Unused variables/functions
341 |
342 | 3. **Clone Detection**
343 | - Exact duplicates
344 | - Near-exact duplicates (>90% similarity)
345 | - Clone groups and fragments
346 |
347 | 4. **Coupling Metrics (CBO)**
348 | - Coupling Between Objects
349 | - High-coupling classes
350 | - Average coupling score
351 |
352 | 5. **Dependency Analysis**
353 | - Module dependencies
354 | - Circular dependency detection
355 | - Dependency depth
356 |
357 | 6. **Architecture Validation**
358 | - Layered architecture compliance
359 | - Layer violation detection
360 | - Cross-layer dependencies
361 |
362 | ### Usage
363 |
364 | **Full Analysis:**
365 | ```bash
366 | pyscn analyze .
367 | ```
368 |
369 | **View Report:**
370 | ```bash
371 | open .pyscn/reports/analyze_*.html
372 | ```
373 |
374 | **JSON Output:**
375 | ```bash
376 | pyscn analyze . --format json > /tmp/metrics.json
377 | ```
378 |
379 | ### Report Interpretation
380 |
381 | **Health Score Breakdown:**
382 |
383 | | Component | Score | Grade | Interpretation |
384 | |-----------|-------|-------|----------------|
385 | | **Complexity** | 40/100 | Poor | 28 high-risk functions (>7), avg 9.5 |
386 | | **Dead Code** | 70/100 | Fair | 27 issues, 2 critical |
387 | | **Duplication** | 30/100 | Poor | 6.0% duplication, 18 clone groups |
388 | | **Coupling** | 100/100 | Excellent | Avg CBO 1.5, 0 high-coupling |
389 | | **Dependencies** | 85/100 | Good | 0 cycles, depth 7 |
390 | | **Architecture** | 75/100 | Good | 58 violations, 75.5% compliance |
391 |
392 | **Example: Complexity Report**
393 |
394 | ```
395 | Top 5 High-Complexity Functions:
396 | 1. install.py::main() - Complexity: 62, Nesting: 6
397 | 2. config.py::__main__() - Complexity: 42, Nesting: 0
398 | 3. sqlite_vec.py::initialize() - Complexity: 38, Nesting: 10
399 | 4. oauth/authorization.py::token() - Complexity: 35, Nesting: 4
400 | 5. install.py::install_package() - Complexity: 33, Nesting: 4
401 | ```
402 |
403 | **Action:** Refactor functions with complexity >10 using:
404 | - Extract method refactoring
405 | - Strategy pattern for conditionals
406 | - Helper functions for complex operations
407 |
408 | ## Health Score Thresholds
409 |
410 | ### Release Blocker (<50)
411 |
412 | **Status:** 🔴 **Cannot merge or release**
413 |
414 | **Required Actions:**
415 | 1. Review full pyscn report
416 | 2. Identify top 5 complexity hotspots
417 | 3. Create refactoring tasks
418 | 4. Schedule immediate refactoring sprint
419 | 5. Track progress in issue #240
420 |
421 | **Timeline:** Must resolve before any merges
422 |
423 | ### Action Required (50-69)
424 |
425 | **Status:** 🟡 **Plan refactoring within 2 weeks**
426 |
427 | **Recommended Actions:**
428 | 1. Analyze complexity trends
429 | 2. Create project board for tracking
430 | 3. Allocate 20% sprint capacity to quality
431 | 4. Review duplication for consolidation
432 | 5. Remove dead code
433 |
434 | **Timeline:** 2-week improvement plan
435 |
436 | ### Good (70-84)
437 |
438 | **Status:** ✅ **Monitor trends, continue development**
439 |
440 | **Maintenance:**
441 | - Monthly quality reviews
442 | - Track complexity trends
443 | - Keep health score above 70
444 | - Address new issues proactively
445 |
446 | ### Excellent (85+)
447 |
448 | **Status:** 🎯 **Maintain current standards**
449 |
450 | **Best Practices:**
451 | - Document quality patterns
452 | - Share refactoring techniques
453 | - Mentor team members
454 | - Celebrate wins
455 |
456 | ## Troubleshooting
457 |
458 | ### Common Issues
459 |
460 | **Issue:** pyscn not found
461 | ```bash
462 | # Solution
463 | pip install pyscn
464 | ```
465 |
466 | **Issue:** Pre-commit hook not running
467 | ```bash
468 | # Solution
469 | chmod +x .git/hooks/pre-commit
470 | ls -la .git/hooks/pre-commit # Verify symlink
471 | ```
472 |
473 | **Issue:** Groq API errors
474 | ```bash
475 | # Solution 1: Check API key
476 | echo $GROQ_API_KEY # Should not be empty
477 |
478 | # Solution 2: Test Groq connection
479 | curl https://api.groq.com/openai/v1/models \
480 | -H "Authorization: Bearer $GROQ_API_KEY"
481 |
482 | # Solution 3: Fall back to Gemini
483 | unset GROQ_API_KEY # Temporarily disable Groq
484 | ```
485 |
486 | **Issue:** pyscn analysis too slow
487 | ```bash
488 | # Solution: Run on specific directories
489 | pyscn analyze src/ # Exclude tests, scripts
490 | pyscn analyze --exclude "tests/*,scripts/*"
491 | ```
492 |
493 | **Issue:** False positive security warnings
494 | ```bash
495 | # Solution: Review and whitelist
496 | # Add comment explaining why code is safe
497 | # Example:
498 | # SAFE: User input sanitized via parameterized query
499 | ```
500 |
501 | ### Performance Tuning
502 |
503 | **Pre-commit Hooks:**
504 | - Use Groq API (200-300ms vs Gemini 2-3s)
505 | - Analyze only staged files
506 | - Skip checks if no Python files
507 |
508 | **PR Quality Gates:**
509 | - Run standard checks first (fast)
510 | - Use `--with-pyscn` for comprehensive analysis
511 | - Cache pyscn reports for repeated checks
512 |
513 | **Periodic Reviews:**
514 | - Schedule during off-hours (cron)
515 | - Use JSON output for scripting
516 | - Archive old reports (keep last 30 days)
517 |
518 | ## Appendix
519 |
520 | ### Script Reference
521 |
522 | | Script | Purpose | Usage |
523 | |--------|---------|-------|
524 | | `scripts/hooks/pre-commit` | Pre-commit quality checks | Auto-runs on `git commit` |
525 | | `scripts/pr/quality_gate.sh` | PR quality gates | `bash scripts/pr/quality_gate.sh <PR>` |
526 | | `scripts/pr/run_pyscn_analysis.sh` | pyscn PR analysis | `bash scripts/pr/run_pyscn_analysis.sh --pr <PR>` |
527 | | `scripts/quality/track_pyscn_metrics.sh` | Metrics tracking | `bash scripts/quality/track_pyscn_metrics.sh` |
528 | | `scripts/quality/weekly_quality_review.sh` | Weekly review | `bash scripts/quality/weekly_quality_review.sh` |
529 |
530 | ### Configuration Files
531 |
532 | | File | Purpose |
533 | |------|---------|
534 | | `.pyscn/.gitignore` | Ignore pyscn reports and history |
535 | | `.pyscn/history/metrics.csv` | Historical quality metrics |
536 | | `.pyscn/reports/*.html` | pyscn HTML reports |
537 | | `.claude/agents/code-quality-guard.md` | Code quality agent specification |
538 |
539 | ### Related Documentation
540 |
541 | - [CLAUDE.md](../../CLAUDE.md) - Project conventions and workflows
542 | - [`.claude/agents/code-quality-guard.md`](../../.claude/agents/code-quality-guard.md) - Agent workflows
543 | - [scripts/README.md](../../scripts/README.md) - Script documentation
544 | - [Issue #240](https://github.com/doobidoo/mcp-memory-service/issues/240) - Quality improvements tracking
545 |
546 | ### External Resources
547 |
548 | - [pyscn GitHub](https://github.com/ludo-technologies/pyscn) - pyscn documentation
549 | - [Groq API Docs](https://console.groq.com/docs) - Groq API reference
550 | - [Gemini CLI](https://www.npmjs.com/package/@google/generative-ai-cli) - Gemini CLI docs
551 |
552 | ---
553 |
554 | **Document Version History:**
555 |
556 | - v1.0.0 (2025-11-24): Initial comprehensive documentation with pyscn integration
557 |
```
--------------------------------------------------------------------------------
/scripts/migration/migrate_to_cloudflare.py:
--------------------------------------------------------------------------------
```python
1 | #!/usr/bin/env python3
2 | """
3 | Migration script for moving data to Cloudflare backend.
4 | Supports migration from SQLite-vec and ChromaDB backends.
5 | """
6 |
7 | import asyncio
8 | import json
9 | import logging
10 | import os
11 | import sys
12 | import time
13 | from pathlib import Path
14 | from typing import List, Dict, Any, Optional
15 | import argparse
16 |
17 | # Add src to path for imports
18 | sys.path.insert(0, str(Path(__file__).parent.parent / 'src'))
19 |
20 | from mcp_memory_service.models.memory import Memory
21 | from mcp_memory_service.utils.hashing import generate_content_hash
22 |
23 | # Configure logging
24 | logging.basicConfig(
25 | level=logging.INFO,
26 | format='%(asctime)s - %(levelname)s - %(message)s'
27 | )
28 | logger = logging.getLogger(__name__)
29 |
30 |
31 | class DataMigrator:
32 | """Handles migration of data to Cloudflare backend."""
33 |
34 | def __init__(self):
35 | self.source_storage = None
36 | self.cloudflare_storage = None
37 |
38 | async def export_from_sqlite_vec(self, sqlite_path: str) -> List[Dict[str, Any]]:
39 | """Export data from SQLite-vec backend."""
40 | logger.info(f"Exporting data from SQLite-vec: {sqlite_path}")
41 |
42 | try:
43 | from mcp_memory_service.storage.sqlite_vec import SqliteVecMemoryStorage
44 |
45 | storage = SqliteVecMemoryStorage(sqlite_path)
46 | await storage.initialize()
47 |
48 | # Get all memories
49 | memories = []
50 | stats = await storage.get_stats()
51 | total_memories = stats.get('total_memories', 0)
52 |
53 | logger.info(f"Found {total_memories} memories to export")
54 |
55 | # Get recent memories in batches
56 | batch_size = 100
57 | exported_count = 0
58 |
59 | while exported_count < total_memories:
60 | batch = await storage.get_recent_memories(batch_size)
61 | if not batch:
62 | break
63 |
64 | for memory in batch:
65 | memory_data = {
66 | 'content': memory.content,
67 | 'content_hash': memory.content_hash,
68 | 'tags': memory.tags,
69 | 'memory_type': memory.memory_type,
70 | 'metadata': memory.metadata,
71 | 'created_at': memory.created_at,
72 | 'created_at_iso': memory.created_at_iso,
73 | 'updated_at': memory.updated_at,
74 | 'updated_at_iso': memory.updated_at_iso
75 | }
76 | memories.append(memory_data)
77 | exported_count += 1
78 |
79 | logger.info(f"Exported {exported_count}/{total_memories} memories")
80 |
81 | # Break if we got fewer memories than batch size
82 | if len(batch) < batch_size:
83 | break
84 |
85 | logger.info(f"Successfully exported {len(memories)} memories from SQLite-vec")
86 | return memories
87 |
88 | except Exception as e:
89 | logger.error(f"Failed to export from SQLite-vec: {e}")
90 | raise
91 |
92 | async def export_from_chroma(self, chroma_path: str) -> List[Dict[str, Any]]:
93 | """Export data from ChromaDB backend."""
94 | logger.info(f"Exporting data from ChromaDB: {chroma_path}")
95 |
96 | try:
97 | from mcp_memory_service.storage.chroma import ChromaMemoryStorage
98 |
99 | storage = ChromaMemoryStorage(chroma_path, preload_model=False)
100 | await storage.initialize()
101 |
102 | # Get all memories
103 | memories = []
104 | stats = await storage.get_stats()
105 | total_memories = stats.get('total_memories', 0)
106 |
107 | logger.info(f"Found {total_memories} memories to export")
108 |
109 | # Get recent memories
110 | recent_memories = await storage.get_recent_memories(total_memories)
111 |
112 | for memory in recent_memories:
113 | memory_data = {
114 | 'content': memory.content,
115 | 'content_hash': memory.content_hash,
116 | 'tags': memory.tags,
117 | 'memory_type': memory.memory_type,
118 | 'metadata': memory.metadata,
119 | 'created_at': memory.created_at,
120 | 'created_at_iso': memory.created_at_iso,
121 | 'updated_at': memory.updated_at,
122 | 'updated_at_iso': memory.updated_at_iso
123 | }
124 | memories.append(memory_data)
125 |
126 | logger.info(f"Successfully exported {len(memories)} memories from ChromaDB")
127 | return memories
128 |
129 | except Exception as e:
130 | logger.error(f"Failed to export from ChromaDB: {e}")
131 | raise
132 |
133 | async def import_to_cloudflare(self, memories: List[Dict[str, Any]]) -> bool:
134 | """Import data to Cloudflare backend."""
135 | logger.info(f"Importing {len(memories)} memories to Cloudflare backend")
136 |
137 | try:
138 | # Initialize Cloudflare storage
139 | from mcp_memory_service.storage.cloudflare import CloudflareStorage
140 |
141 | # Get configuration from environment
142 | api_token = os.getenv('CLOUDFLARE_API_TOKEN')
143 | account_id = os.getenv('CLOUDFLARE_ACCOUNT_ID')
144 | vectorize_index = os.getenv('CLOUDFLARE_VECTORIZE_INDEX')
145 | d1_database_id = os.getenv('CLOUDFLARE_D1_DATABASE_ID')
146 | r2_bucket = os.getenv('CLOUDFLARE_R2_BUCKET')
147 |
148 | if not all([api_token, account_id, vectorize_index, d1_database_id]):
149 | raise ValueError("Missing required Cloudflare environment variables")
150 |
151 | storage = CloudflareStorage(
152 | api_token=api_token,
153 | account_id=account_id,
154 | vectorize_index=vectorize_index,
155 | d1_database_id=d1_database_id,
156 | r2_bucket=r2_bucket
157 | )
158 |
159 | await storage.initialize()
160 |
161 | # Import memories in batches
162 | batch_size = 10 # Smaller batches for Cloudflare API limits
163 | imported_count = 0
164 | failed_count = 0
165 |
166 | for i in range(0, len(memories), batch_size):
167 | batch = memories[i:i + batch_size]
168 |
169 | for memory_data in batch:
170 | try:
171 | # Convert to Memory object
172 | memory = Memory(
173 | content=memory_data['content'],
174 | content_hash=memory_data['content_hash'],
175 | tags=memory_data.get('tags', []),
176 | memory_type=memory_data.get('memory_type'),
177 | metadata=memory_data.get('metadata', {}),
178 | created_at=memory_data.get('created_at'),
179 | created_at_iso=memory_data.get('created_at_iso'),
180 | updated_at=memory_data.get('updated_at'),
181 | updated_at_iso=memory_data.get('updated_at_iso')
182 | )
183 |
184 | # Store in Cloudflare
185 | success, message = await storage.store(memory)
186 |
187 | if success:
188 | imported_count += 1
189 | logger.debug(f"Imported memory: {memory.content_hash[:16]}...")
190 | else:
191 | failed_count += 1
192 | logger.warning(f"Failed to import memory {memory.content_hash[:16]}: {message}")
193 |
194 | except Exception as e:
195 | failed_count += 1
196 | logger.error(f"Error importing memory: {e}")
197 |
198 | # Progress update
199 | processed = min(i + batch_size, len(memories))
200 | logger.info(f"Progress: {processed}/{len(memories)} processed, {imported_count} imported, {failed_count} failed")
201 |
202 | # Rate limiting - small delay between batches
203 | await asyncio.sleep(0.5)
204 |
205 | # Final cleanup
206 | await storage.close()
207 |
208 | logger.info(f"Migration completed: {imported_count} imported, {failed_count} failed")
209 | return failed_count == 0
210 |
211 | except Exception as e:
212 | logger.error(f"Failed to import to Cloudflare: {e}")
213 | raise
214 |
215 | async def export_to_file(self, source_backend: str, source_path: str, output_file: str) -> bool:
216 | """Export data from source backend to JSON file."""
217 | try:
218 | if source_backend == 'sqlite_vec':
219 | memories = await self.export_from_sqlite_vec(source_path)
220 | elif source_backend == 'chroma':
221 | memories = await self.export_from_chroma(source_path)
222 | else:
223 | raise ValueError(f"Unsupported source backend: {source_backend}")
224 |
225 | # Save to JSON file
226 | export_data = {
227 | 'source_backend': source_backend,
228 | 'source_path': source_path,
229 | 'export_timestamp': time.time(),
230 | 'total_memories': len(memories),
231 | 'memories': memories
232 | }
233 |
234 | with open(output_file, 'w', encoding='utf-8') as f:
235 | json.dump(export_data, f, indent=2, ensure_ascii=False)
236 |
237 | logger.info(f"Exported {len(memories)} memories to {output_file}")
238 | return True
239 |
240 | except Exception as e:
241 | logger.error(f"Export failed: {e}")
242 | return False
243 |
244 | async def import_from_file(self, input_file: str) -> bool:
245 | """Import data from JSON file to Cloudflare backend."""
246 | try:
247 | with open(input_file, 'r', encoding='utf-8') as f:
248 | export_data = json.load(f)
249 |
250 | memories = export_data.get('memories', [])
251 | logger.info(f"Loaded {len(memories)} memories from {input_file}")
252 |
253 | return await self.import_to_cloudflare(memories)
254 |
255 | except Exception as e:
256 | logger.error(f"Import failed: {e}")
257 | return False
258 |
259 | async def migrate_direct(self, source_backend: str, source_path: str) -> bool:
260 | """Direct migration from source backend to Cloudflare."""
261 | try:
262 | # Export data
263 | if source_backend == 'sqlite_vec':
264 | memories = await self.export_from_sqlite_vec(source_path)
265 | elif source_backend == 'chroma':
266 | memories = await self.export_from_chroma(source_path)
267 | else:
268 | raise ValueError(f"Unsupported source backend: {source_backend}")
269 |
270 | # Import to Cloudflare
271 | return await self.import_to_cloudflare(memories)
272 |
273 | except Exception as e:
274 | logger.error(f"Direct migration failed: {e}")
275 | return False
276 |
277 |
278 | async def main():
279 | """Main migration function."""
280 | parser = argparse.ArgumentParser(description='Migrate data to Cloudflare backend')
281 |
282 | subparsers = parser.add_subparsers(dest='command', help='Migration commands')
283 |
284 | # Export command
285 | export_parser = subparsers.add_parser('export', help='Export data to JSON file')
286 | export_parser.add_argument('--source', choices=['sqlite_vec', 'chroma'], required=True,
287 | help='Source backend type')
288 | export_parser.add_argument('--source-path', required=True,
289 | help='Path to source database')
290 | export_parser.add_argument('--output', required=True,
291 | help='Output JSON file path')
292 |
293 | # Import command
294 | import_parser = subparsers.add_parser('import', help='Import data from JSON file')
295 | import_parser.add_argument('--input', required=True,
296 | help='Input JSON file path')
297 |
298 | # Direct migration command
299 | migrate_parser = subparsers.add_parser('migrate', help='Direct migration to Cloudflare')
300 | migrate_parser.add_argument('--source', choices=['sqlite_vec', 'chroma'], required=True,
301 | help='Source backend type')
302 | migrate_parser.add_argument('--source-path', required=True,
303 | help='Path to source database')
304 |
305 | args = parser.parse_args()
306 |
307 | if not args.command:
308 | parser.print_help()
309 | return
310 |
311 | migrator = DataMigrator()
312 |
313 | try:
314 | if args.command == 'export':
315 | success = await migrator.export_to_file(
316 | args.source, args.source_path, args.output
317 | )
318 | elif args.command == 'import':
319 | success = await migrator.import_from_file(args.input)
320 | elif args.command == 'migrate':
321 | success = await migrator.migrate_direct(args.source, args.source_path)
322 |
323 | if success:
324 | logger.info("Migration completed successfully!")
325 | sys.exit(0)
326 | else:
327 | logger.error("Migration failed!")
328 | sys.exit(1)
329 |
330 | except KeyboardInterrupt:
331 | logger.info("Migration cancelled by user")
332 | sys.exit(1)
333 | except Exception as e:
334 | logger.error(f"Migration error: {e}")
335 | sys.exit(1)
336 |
337 |
338 | if __name__ == '__main__':
339 | asyncio.run(main())
```
--------------------------------------------------------------------------------
/claude-hooks/core/topic-change.js:
--------------------------------------------------------------------------------
```javascript
1 | /**
2 | * Claude Code Topic Change Hook
3 | * Monitors conversation flow and dynamically loads relevant memories when topics evolve
4 | * Phase 2: Intelligent Context Updates
5 | */
6 |
7 | const fs = require('fs').promises;
8 | const path = require('path');
9 | const https = require('https');
10 |
11 | // Import utilities
12 | const { analyzeConversation, detectTopicChanges } = require('../utilities/conversation-analyzer');
13 | const { scoreMemoryRelevance } = require('../utilities/memory-scorer');
14 | const { formatMemoriesForContext } = require('../utilities/context-formatter');
15 |
16 | // Global state for conversation tracking
17 | let conversationState = {
18 | previousAnalysis: null,
19 | loadedMemoryHashes: new Set(),
20 | sessionContext: null,
21 | topicChangeCount: 0
22 | };
23 |
24 | /**
25 | * Load hook configuration
26 | */
27 | async function loadConfig() {
28 | try {
29 | const configPath = path.join(__dirname, '../config.json');
30 | const configData = await fs.readFile(configPath, 'utf8');
31 | return JSON.parse(configData);
32 | } catch (error) {
33 | console.warn('[Topic Change Hook] Using default configuration:', error.message);
34 | return {
35 | memoryService: {
36 | endpoint: 'https://10.0.1.30:8443',
37 | apiKey: 'test-key-123',
38 | maxMemoriesPerSession: 8
39 | },
40 | hooks: {
41 | topicChange: {
42 | enabled: true,
43 | timeout: 5000,
44 | priority: 'low',
45 | minSignificanceScore: 0.3,
46 | maxMemoriesPerUpdate: 3
47 | }
48 | }
49 | };
50 | }
51 | }
52 |
53 | /**
54 | * Query memory service for topic-specific memories
55 | */
56 | async function queryMemoryService(endpoint, apiKey, query, options = {}) {
57 | return new Promise((resolve, reject) => {
58 | const {
59 | limit = 5,
60 | excludeHashes = []
61 | } = options;
62 |
63 | const url = new URL('/mcp', endpoint);
64 | const postData = JSON.stringify({
65 | jsonrpc: '2.0',
66 | id: Date.now(),
67 | method: 'tools/call',
68 | params: {
69 | name: 'retrieve_memory',
70 | arguments: {
71 | query: query,
72 | limit: limit
73 | }
74 | }
75 | });
76 |
77 | const requestOptions = {
78 | hostname: url.hostname,
79 | port: url.port,
80 | path: url.pathname,
81 | method: 'POST',
82 | headers: {
83 | 'Content-Type': 'application/json',
84 | 'Authorization': `Bearer ${apiKey}`,
85 | 'Content-Length': Buffer.byteLength(postData)
86 | },
87 | rejectUnauthorized: false,
88 | timeout: 5000
89 | };
90 |
91 | const req = https.request(requestOptions, (res) => {
92 | let data = '';
93 |
94 | res.on('data', (chunk) => {
95 | data += chunk;
96 | });
97 |
98 | res.on('end', () => {
99 | try {
100 | const response = JSON.parse(data);
101 |
102 | if (response.error) {
103 | console.error('[Topic Change Hook] Memory service error:', response.error);
104 | resolve([]);
105 | return;
106 | }
107 |
108 | // Parse memory results from response
109 | const memories = parseMemoryResults(response.result);
110 |
111 | // Filter out already loaded memories
112 | const filteredMemories = memories.filter(memory =>
113 | !excludeHashes.includes(memory.content_hash)
114 | );
115 |
116 | console.log(`[Topic Change Hook] Retrieved ${filteredMemories.length} new memories for topic query`);
117 | resolve(filteredMemories);
118 |
119 | } catch (parseError) {
120 | console.error('[Topic Change Hook] Failed to parse memory response:', parseError.message);
121 | resolve([]);
122 | }
123 | });
124 | });
125 |
126 | req.on('error', (error) => {
127 | console.error('[Topic Change Hook] Memory service request failed:', error.message);
128 | resolve([]);
129 | });
130 |
131 | req.on('timeout', () => {
132 | console.error('[Topic Change Hook] Memory service request timed out');
133 | req.destroy();
134 | resolve([]);
135 | });
136 |
137 | req.write(postData);
138 | req.end();
139 | });
140 | }
141 |
142 | /**
143 | * Parse memory results from MCP response
144 | */
145 | function parseMemoryResults(result) {
146 | try {
147 | if (result && result.content && result.content[0] && result.content[0].text) {
148 | const text = result.content[0].text;
149 |
150 | // Try to extract results array from the response text
151 | const resultsMatch = text.match(/'results':\s*(\[[\s\S]*?\])/);
152 | if (resultsMatch) {
153 | // Use eval carefully on controlled content
154 | const resultsArray = eval(resultsMatch[1]);
155 | return resultsArray || [];
156 | }
157 | }
158 | return [];
159 | } catch (error) {
160 | console.error('[Topic Change Hook] Error parsing memory results:', error.message);
161 | return [];
162 | }
163 | }
164 |
165 | /**
166 | * Generate search queries from conversation analysis
167 | */
168 | function generateTopicQueries(analysis, changes) {
169 | const queries = [];
170 |
171 | // Query for new topics
172 | changes.newTopics.forEach(topic => {
173 | queries.push({
174 | query: topic.name,
175 | weight: topic.confidence,
176 | type: 'topic'
177 | });
178 | });
179 |
180 | // Query for current intent if changed
181 | if (changes.changedIntents && analysis.intent) {
182 | queries.push({
183 | query: analysis.intent.name,
184 | weight: analysis.intent.confidence,
185 | type: 'intent'
186 | });
187 | }
188 |
189 | // Query for high-confidence entities
190 | analysis.entities
191 | .filter(entity => entity.confidence > 0.7)
192 | .slice(0, 2) // Limit to top 2 entities
193 | .forEach(entity => {
194 | queries.push({
195 | query: entity.name,
196 | weight: entity.confidence,
197 | type: 'entity'
198 | });
199 | });
200 |
201 | // Sort by weight and return top queries
202 | return queries
203 | .sort((a, b) => b.weight - a.weight)
204 | .slice(0, 3); // Limit to top 3 queries
205 | }
206 |
207 | /**
208 | * Format context update message
209 | */
210 | function formatContextUpdate(memories, analysis, changes) {
211 | if (memories.length === 0) {
212 | return null;
213 | }
214 |
215 | let updateMessage = '\n🧠 **Dynamic Memory Context Update**\n\n';
216 |
217 | // Explain why context is being updated
218 | if (changes.newTopics.length > 0) {
219 | updateMessage += `**New topics detected:** ${changes.newTopics.map(t => t.name).join(', ')}\n\n`;
220 | }
221 |
222 | if (changes.changedIntents) {
223 | updateMessage += `**Conversation focus shifted:** ${analysis.intent.name}\n\n`;
224 | }
225 |
226 | // Add relevant memories
227 | updateMessage += '**Additional relevant context:**\n';
228 |
229 | memories.slice(0, 3).forEach((memory, index) => {
230 | const content = memory.content.length > 120 ?
231 | memory.content.substring(0, 120) + '...' :
232 | memory.content;
233 |
234 | updateMessage += `${index + 1}. ${content}\n`;
235 | if (memory.tags && memory.tags.length > 0) {
236 | updateMessage += ` *Tags: ${memory.tags.slice(0, 3).join(', ')}*\n`;
237 | }
238 | updateMessage += '\n';
239 | });
240 |
241 | updateMessage += '---\n';
242 |
243 | return updateMessage;
244 | }
245 |
246 | /**
247 | * Main topic change detection and processing
248 | * @param {object} context - Conversation context
249 | */
250 | async function onTopicChange(context) {
251 | console.log('[Topic Change Hook] Analyzing conversation for topic changes...');
252 |
253 | try {
254 | const config = await loadConfig();
255 |
256 | // Check if topic change hook is enabled
257 | if (!config.hooks?.topicChange?.enabled) {
258 | console.log('[Topic Change Hook] Hook is disabled, skipping');
259 | return;
260 | }
261 |
262 | const {
263 | minSignificanceScore = 0.3,
264 | maxMemoriesPerUpdate = 3
265 | } = config.hooks.topicChange;
266 |
267 | // Analyze current conversation
268 | const currentAnalysis = analyzeConversation(context.conversationText || '', {
269 | extractTopics: true,
270 | extractEntities: true,
271 | detectIntent: true,
272 | minTopicConfidence: 0.3
273 | });
274 |
275 | // Detect topic changes
276 | const changes = detectTopicChanges(conversationState.previousAnalysis, currentAnalysis);
277 |
278 | // Only proceed if significant topic change detected
279 | if (!changes.hasTopicShift || changes.significanceScore < minSignificanceScore) {
280 | console.log(`[Topic Change Hook] No significant topic change detected (score: ${changes.significanceScore.toFixed(2)})`);
281 | conversationState.previousAnalysis = currentAnalysis;
282 | return;
283 | }
284 |
285 | console.log(`[Topic Change Hook] Significant topic change detected (score: ${changes.significanceScore.toFixed(2)})`);
286 | console.log(`[Topic Change Hook] New topics: ${changes.newTopics.map(t => t.name).join(', ')}`);
287 |
288 | // Generate search queries for new topics
289 | const queries = generateTopicQueries(currentAnalysis, changes);
290 |
291 | if (queries.length === 0) {
292 | console.log('[Topic Change Hook] No actionable queries generated');
293 | conversationState.previousAnalysis = currentAnalysis;
294 | return;
295 | }
296 |
297 | // Query memory service for each topic
298 | const allMemories = [];
299 | for (const queryObj of queries) {
300 | const memories = await queryMemoryService(
301 | config.memoryService.endpoint,
302 | config.memoryService.apiKey,
303 | queryObj.query,
304 | {
305 | limit: 2,
306 | excludeHashes: Array.from(conversationState.loadedMemoryHashes)
307 | }
308 | );
309 |
310 | // Add query context to memories
311 | memories.forEach(memory => {
312 | memory.queryContext = queryObj;
313 | });
314 |
315 | allMemories.push(...memories);
316 | }
317 |
318 | if (allMemories.length === 0) {
319 | console.log('[Topic Change Hook] No new relevant memories found');
320 | conversationState.previousAnalysis = currentAnalysis;
321 | return;
322 | }
323 |
324 | // Score memories for relevance
325 | const projectContext = conversationState.sessionContext || { name: 'unknown' };
326 | const scoredMemories = scoreMemoryRelevance(allMemories, projectContext, {
327 | includeConversationContext: true,
328 | conversationAnalysis: currentAnalysis
329 | });
330 |
331 | // Select top memories for context update
332 | const selectedMemories = scoredMemories
333 | .filter(memory => memory.relevanceScore > 0.3)
334 | .slice(0, maxMemoriesPerUpdate);
335 |
336 | if (selectedMemories.length === 0) {
337 | console.log('[Topic Change Hook] No high-relevance memories found');
338 | conversationState.previousAnalysis = currentAnalysis;
339 | return;
340 | }
341 |
342 | // Track loaded memories
343 | selectedMemories.forEach(memory => {
344 | conversationState.loadedMemoryHashes.add(memory.content_hash);
345 | });
346 |
347 | // Format context update
348 | const contextUpdate = formatContextUpdate(selectedMemories, currentAnalysis, changes);
349 |
350 | if (contextUpdate) {
351 | // In a real implementation, this would inject the context into the conversation
352 | console.log('[Topic Change Hook] Context update generated:');
353 | console.log(contextUpdate);
354 |
355 | // For now, we'll simulate the context injection
356 | if (context.onContextUpdate && typeof context.onContextUpdate === 'function') {
357 | context.onContextUpdate(contextUpdate);
358 | }
359 | }
360 |
361 | // Update conversation state
362 | conversationState.previousAnalysis = currentAnalysis;
363 | conversationState.topicChangeCount++;
364 |
365 | console.log(`[Topic Change Hook] Topic change processing completed (${conversationState.topicChangeCount} changes total)`);
366 |
367 | } catch (error) {
368 | console.error('[Topic Change Hook] Error processing topic change:', error.message);
369 | }
370 | }
371 |
372 | /**
373 | * Initialize topic change tracking for a new session
374 | * @param {object} sessionContext - Session context information
375 | */
376 | function initializeTopicTracking(sessionContext) {
377 | console.log('[Topic Change Hook] Initializing topic tracking for new session');
378 |
379 | conversationState = {
380 | previousAnalysis: null,
381 | loadedMemoryHashes: new Set(),
382 | sessionContext: sessionContext,
383 | topicChangeCount: 0
384 | };
385 | }
386 |
387 | /**
388 | * Reset topic tracking state
389 | */
390 | function resetTopicTracking() {
391 | console.log('[Topic Change Hook] Resetting topic tracking state');
392 | conversationState = {
393 | previousAnalysis: null,
394 | loadedMemoryHashes: new Set(),
395 | sessionContext: null,
396 | topicChangeCount: 0
397 | };
398 | }
399 |
400 | /**
401 | * Get current topic tracking statistics
402 | */
403 | function getTopicTrackingStats() {
404 | return {
405 | topicChangeCount: conversationState.topicChangeCount,
406 | loadedMemoriesCount: conversationState.loadedMemoryHashes.size,
407 | hasSessionContext: !!conversationState.sessionContext,
408 | lastAnalysis: conversationState.previousAnalysis
409 | };
410 | }
411 |
412 | module.exports = {
413 | onTopicChange,
414 | initializeTopicTracking,
415 | resetTopicTracking,
416 | getTopicTrackingStats
417 | };
```
--------------------------------------------------------------------------------
/scripts/installation/install_macos_service.py:
--------------------------------------------------------------------------------
```python
1 | #!/usr/bin/env python3
2 | """
3 | macOS LaunchAgent installer for MCP Memory Service.
4 | Creates and manages LaunchAgent plist files for automatic service startup.
5 | """
6 | import os
7 | import sys
8 | import json
9 | import plistlib
10 | import argparse
11 | import subprocess
12 | from pathlib import Path
13 |
14 | # Add parent directory to path for imports
15 | sys.path.insert(0, str(Path(__file__).parent.parent))
16 |
17 | try:
18 | from scripts.service_utils import (
19 | get_project_root, get_service_paths, get_service_environment,
20 | generate_api_key, save_service_config, load_service_config,
21 | check_dependencies, get_service_command, print_service_info
22 | )
23 | except ImportError as e:
24 | print(f"Error importing service utilities: {e}")
25 | print("Please ensure you're running this from the project directory")
26 | sys.exit(1)
27 |
28 |
29 | SERVICE_LABEL = "com.mcp.memory-service"
30 | SERVICE_NAME = "MCP Memory Service"
31 |
32 |
33 | def get_launchd_paths(user_level=True):
34 | """Get the paths for LaunchAgent/LaunchDaemon files."""
35 | if user_level:
36 | # User-level LaunchAgent
37 | plist_dir = Path.home() / "Library" / "LaunchAgents"
38 | plist_file = plist_dir / f"{SERVICE_LABEL}.plist"
39 | else:
40 | # System-level LaunchDaemon (requires root)
41 | plist_dir = Path("/Library/LaunchDaemons")
42 | plist_file = plist_dir / f"{SERVICE_LABEL}.plist"
43 |
44 | return plist_dir, plist_file
45 |
46 |
47 | def create_plist(api_key, user_level=True):
48 | """Create the LaunchAgent/LaunchDaemon plist configuration."""
49 | paths = get_service_paths()
50 | command = get_service_command()
51 | environment = get_service_environment()
52 | environment['MCP_API_KEY'] = api_key
53 |
54 | # Create plist dictionary
55 | plist_dict = {
56 | 'Label': SERVICE_LABEL,
57 | 'ProgramArguments': command,
58 | 'EnvironmentVariables': environment,
59 | 'WorkingDirectory': str(paths['project_root']),
60 | 'RunAtLoad': True,
61 | 'KeepAlive': {
62 | 'SuccessfulExit': False,
63 | 'Crashed': True
64 | },
65 | 'StandardOutPath': str(paths['log_dir'] / 'mcp-memory-service.log'),
66 | 'StandardErrorPath': str(paths['log_dir'] / 'mcp-memory-service.error.log'),
67 | 'ProcessType': 'Interactive' if user_level else 'Background',
68 | }
69 |
70 | # Add user/group for system-level daemon
71 | if not user_level:
72 | plist_dict['UserName'] = os.environ.get('USER', 'nobody')
73 | plist_dict['GroupName'] = 'staff'
74 |
75 | return plist_dict
76 |
77 |
78 | def create_shell_scripts():
79 | """Create convenient shell scripts for service management."""
80 | paths = get_service_paths()
81 | scripts_dir = paths['scripts_dir'] / 'macos'
82 | scripts_dir.mkdir(exist_ok=True)
83 |
84 | # Start script
85 | start_script = scripts_dir / 'start_service.sh'
86 | with open(start_script, 'w') as f:
87 | f.write(f'''#!/bin/bash
88 | echo "Starting {SERVICE_NAME}..."
89 | launchctl load ~/Library/LaunchAgents/{SERVICE_LABEL}.plist
90 | if [ $? -eq 0 ]; then
91 | echo "✅ Service started successfully!"
92 | else
93 | echo "❌ Failed to start service"
94 | fi
95 | ''')
96 | start_script.chmod(0o755)
97 |
98 | # Stop script
99 | stop_script = scripts_dir / 'stop_service.sh'
100 | with open(stop_script, 'w') as f:
101 | f.write(f'''#!/bin/bash
102 | echo "Stopping {SERVICE_NAME}..."
103 | launchctl unload ~/Library/LaunchAgents/{SERVICE_LABEL}.plist
104 | if [ $? -eq 0 ]; then
105 | echo "✅ Service stopped successfully!"
106 | else
107 | echo "❌ Failed to stop service"
108 | fi
109 | ''')
110 | stop_script.chmod(0o755)
111 |
112 | # Status script
113 | status_script = scripts_dir / 'service_status.sh'
114 | with open(status_script, 'w') as f:
115 | f.write(f'''#!/bin/bash
116 | echo "{SERVICE_NAME} Status:"
117 | echo "-" | tr '-' '='
118 | launchctl list | grep {SERVICE_LABEL}
119 | if [ $? -eq 0 ]; then
120 | echo ""
121 | echo "Service is loaded. PID shown above (- means not running)"
122 | else
123 | echo "Service is not loaded"
124 | fi
125 | ''')
126 | status_script.chmod(0o755)
127 |
128 | # Uninstall script
129 | uninstall_script = scripts_dir / 'uninstall_service.sh'
130 | with open(uninstall_script, 'w') as f:
131 | f.write(f'''#!/bin/bash
132 | echo "This will uninstall {SERVICE_NAME}."
133 | read -p "Are you sure? (y/N): " confirm
134 | if [[ ! "$confirm" =~ ^[Yy]$ ]]; then
135 | exit 0
136 | fi
137 |
138 | echo "Stopping service..."
139 | launchctl unload ~/Library/LaunchAgents/{SERVICE_LABEL}.plist 2>/dev/null
140 |
141 | echo "Removing service files..."
142 | rm -f ~/Library/LaunchAgents/{SERVICE_LABEL}.plist
143 |
144 | echo "✅ Service uninstalled"
145 | ''')
146 | uninstall_script.chmod(0o755)
147 |
148 | return scripts_dir
149 |
150 |
151 | def install_service(user_level=True):
152 | """Install the macOS LaunchAgent/LaunchDaemon."""
153 | service_type = "LaunchAgent" if user_level else "LaunchDaemon"
154 |
155 | # Check for root if system-level
156 | if not user_level and os.geteuid() != 0:
157 | print("\n❌ ERROR: System-level LaunchDaemon requires root privileges")
158 | print("Please run with sudo or use --user for user-level installation")
159 | sys.exit(1)
160 |
161 | print(f"\n🔍 Checking dependencies...")
162 | deps_ok, deps_msg = check_dependencies()
163 | if not deps_ok:
164 | print(f"❌ {deps_msg}")
165 | sys.exit(1)
166 | print(f"✅ {deps_msg}")
167 |
168 | # Generate API key
169 | api_key = generate_api_key()
170 | print(f"\n🔑 Generated API key: {api_key}")
171 |
172 | # Create service configuration
173 | config = {
174 | 'service_label': SERVICE_LABEL,
175 | 'api_key': api_key,
176 | 'command': get_service_command(),
177 | 'environment': get_service_environment(),
178 | 'user_level': user_level
179 | }
180 |
181 | # Save configuration
182 | config_file = save_service_config(config)
183 | print(f"💾 Saved configuration to: {config_file}")
184 |
185 | # Get plist paths
186 | plist_dir, plist_file = get_launchd_paths(user_level)
187 |
188 | # Create plist directory if it doesn't exist
189 | plist_dir.mkdir(parents=True, exist_ok=True)
190 |
191 | # Create plist
192 | print(f"\n📝 Creating {service_type} plist...")
193 | plist_dict = create_plist(api_key, user_level)
194 |
195 | # Write plist file
196 | with open(plist_file, 'wb') as f:
197 | plistlib.dump(plist_dict, f)
198 |
199 | # Set proper permissions
200 | if user_level:
201 | os.chmod(plist_file, 0o644)
202 | else:
203 | os.chmod(plist_file, 0o644)
204 | os.chown(plist_file, 0, 0) # root:wheel
205 |
206 | print(f"✅ Created plist at: {plist_file}")
207 |
208 | # Load the service
209 | print(f"\n🚀 Loading {service_type}...")
210 | result = subprocess.run([
211 | 'launchctl', 'load', '-w', str(plist_file)
212 | ], capture_output=True, text=True)
213 |
214 | if result.returncode != 0:
215 | if "already loaded" in result.stderr:
216 | print("ℹ️ Service was already loaded, reloading...")
217 | # Unload first
218 | subprocess.run(['launchctl', 'unload', str(plist_file)], capture_output=True)
219 | # Load again
220 | subprocess.run(['launchctl', 'load', '-w', str(plist_file)], capture_output=True)
221 | else:
222 | print(f"❌ Failed to load service: {result.stderr}")
223 | print("\n💡 Try checking Console.app for detailed error messages")
224 | sys.exit(1)
225 |
226 | print(f"✅ {service_type} loaded successfully!")
227 |
228 | # Create convenience scripts
229 | if user_level:
230 | scripts_dir = create_shell_scripts()
231 | print(f"\n📁 Created management scripts in: {scripts_dir}")
232 |
233 | # Print service information
234 | paths = get_service_paths()
235 | platform_info = {
236 | 'Start Service': f'launchctl load -w {plist_file}',
237 | 'Stop Service': f'launchctl unload {plist_file}',
238 | 'Service Status': f'launchctl list | grep {SERVICE_LABEL}',
239 | 'View Logs': f'tail -f {paths["log_dir"] / "mcp-memory-service.log"}',
240 | 'Uninstall': f'python "{Path(__file__)}" --uninstall'
241 | }
242 |
243 | print_service_info(api_key, platform_info)
244 |
245 | # Additional macOS-specific tips
246 | print("\n📌 macOS Tips:")
247 | print(" • Check Console.app for detailed service logs")
248 | print(" • Service will start automatically on login/boot")
249 | print(" • Use Activity Monitor to verify the process is running")
250 |
251 | return True
252 |
253 |
254 | def uninstall_service(user_level=True):
255 | """Uninstall the macOS LaunchAgent/LaunchDaemon."""
256 | service_type = "LaunchAgent" if user_level else "LaunchDaemon"
257 |
258 | # Check for root if system-level
259 | if not user_level and os.geteuid() != 0:
260 | print("\n❌ ERROR: System-level LaunchDaemon requires root privileges")
261 | print("Please run with sudo")
262 | sys.exit(1)
263 |
264 | print(f"\n🗑️ Uninstalling {SERVICE_NAME} {service_type}...")
265 |
266 | # Get plist paths
267 | plist_dir, plist_file = get_launchd_paths(user_level)
268 |
269 | if plist_file.exists():
270 | # Unload the service
271 | print("⏹️ Stopping service...")
272 | subprocess.run([
273 | 'launchctl', 'unload', str(plist_file)
274 | ], capture_output=True)
275 |
276 | # Remove the plist file
277 | print("🗑️ Removing plist file...")
278 | plist_file.unlink()
279 |
280 | print(f"✅ {service_type} uninstalled successfully!")
281 | else:
282 | print(f"ℹ️ {service_type} is not installed")
283 |
284 | # Clean up configuration
285 | config = load_service_config()
286 | if config and config.get('service_label') == SERVICE_LABEL:
287 | print("🧹 Cleaning up configuration...")
288 | config_file = get_service_paths()['config_dir'] / 'service_config.json'
289 | config_file.unlink()
290 |
291 |
292 | def start_service(user_level=True):
293 | """Start the macOS service."""
294 | plist_dir, plist_file = get_launchd_paths(user_level)
295 |
296 | if not plist_file.exists():
297 | print(f"❌ Service is not installed. Run without --start to install first.")
298 | sys.exit(1)
299 |
300 | print(f"\n▶️ Starting {SERVICE_NAME}...")
301 |
302 | result = subprocess.run([
303 | 'launchctl', 'load', str(plist_file)
304 | ], capture_output=True, text=True)
305 |
306 | if result.returncode == 0:
307 | print("✅ Service started successfully!")
308 | else:
309 | if "already loaded" in result.stderr:
310 | print("ℹ️ Service is already running")
311 | else:
312 | print(f"❌ Failed to start service: {result.stderr}")
313 |
314 |
315 | def stop_service(user_level=True):
316 | """Stop the macOS service."""
317 | plist_dir, plist_file = get_launchd_paths(user_level)
318 |
319 | print(f"\n⏹️ Stopping {SERVICE_NAME}...")
320 |
321 | result = subprocess.run([
322 | 'launchctl', 'unload', str(plist_file)
323 | ], capture_output=True, text=True)
324 |
325 | if result.returncode == 0:
326 | print("✅ Service stopped successfully!")
327 | else:
328 | print(f"ℹ️ Service may not be running: {result.stderr}")
329 |
330 |
331 | def service_status(user_level=True):
332 | """Check the macOS service status."""
333 | print(f"\n📊 {SERVICE_NAME} Status:")
334 | print("-" * 40)
335 |
336 | # Check if plist exists
337 | plist_dir, plist_file = get_launchd_paths(user_level)
338 | if not plist_file.exists():
339 | print("❌ Service is not installed")
340 | return
341 |
342 | # Check launchctl list
343 | result = subprocess.run([
344 | 'launchctl', 'list'
345 | ], capture_output=True, text=True)
346 |
347 | service_found = False
348 | for line in result.stdout.splitlines():
349 | if SERVICE_LABEL in line:
350 | service_found = True
351 | parts = line.split()
352 | if len(parts) >= 3:
353 | pid = parts[0]
354 | status = parts[1]
355 | if pid != '-':
356 | print(f"✅ Service is RUNNING (PID: {pid})")
357 | else:
358 | print(f"⏹️ Service is STOPPED (last exit: {status})")
359 | break
360 |
361 | if not service_found:
362 | print("⏹️ Service is not loaded")
363 |
364 | # Show configuration
365 | config = load_service_config()
366 | if config:
367 | print(f"\n📋 Configuration:")
368 | print(f" Service Label: {SERVICE_LABEL}")
369 | print(f" API Key: {config.get('api_key', 'Not set')}")
370 | print(f" Type: {'User LaunchAgent' if user_level else 'System LaunchDaemon'}")
371 | print(f" Plist: {plist_file}")
372 |
373 | # Show recent logs
374 | paths = get_service_paths()
375 | log_file = paths['log_dir'] / 'mcp-memory-service.log'
376 | if log_file.exists():
377 | print(f"\n📜 Recent logs from {log_file}:")
378 | result = subprocess.run([
379 | 'tail', '-n', '10', str(log_file)
380 | ], capture_output=True, text=True)
381 | if result.stdout:
382 | print(result.stdout)
383 |
384 |
385 | def main():
386 | """Main entry point."""
387 | parser = argparse.ArgumentParser(
388 | description="macOS LaunchAgent installer for MCP Memory Service"
389 | )
390 |
391 | # Service level
392 | parser.add_argument('--user', action='store_true', default=True,
393 | help='Install as user LaunchAgent (default)')
394 | parser.add_argument('--system', action='store_true',
395 | help='Install as system LaunchDaemon (requires sudo)')
396 |
397 | # Actions
398 | parser.add_argument('--uninstall', action='store_true', help='Uninstall the service')
399 | parser.add_argument('--start', action='store_true', help='Start the service')
400 | parser.add_argument('--stop', action='store_true', help='Stop the service')
401 | parser.add_argument('--status', action='store_true', help='Check service status')
402 | parser.add_argument('--restart', action='store_true', help='Restart the service')
403 |
404 | args = parser.parse_args()
405 |
406 | # Determine service level
407 | user_level = not args.system
408 |
409 | if args.uninstall:
410 | uninstall_service(user_level)
411 | elif args.start:
412 | start_service(user_level)
413 | elif args.stop:
414 | stop_service(user_level)
415 | elif args.status:
416 | service_status(user_level)
417 | elif args.restart:
418 | stop_service(user_level)
419 | start_service(user_level)
420 | else:
421 | # Default action is to install
422 | install_service(user_level)
423 |
424 |
425 | if __name__ == '__main__':
426 | main()
```
--------------------------------------------------------------------------------
/claude-hooks/install_claude_hooks_windows.ps1:
--------------------------------------------------------------------------------
```
1 | # Claude Code Memory Awareness Hooks - Windows Installation Script v2.2.0
2 | # Installs hooks into Claude Code hooks directory for automatic memory awareness
3 | # Enhanced Output Control and Session Management
4 |
5 | param(
6 | [switch]$Uninstall,
7 | [switch]$Test,
8 | [switch]$Help
9 | )
10 |
11 | $ErrorActionPreference = "Stop"
12 |
13 | # Configuration - Detect proper Claude Code hooks directory
14 | function Get-ClaudeHooksDirectory {
15 | # Primary location: User profile (updated to match actual Claude Code directory structure)
16 | $primaryPath = "$env:USERPROFILE\.claude\hooks"
17 |
18 | # Alternative locations to check
19 | $alternativePaths = @(
20 | "$env:APPDATA\.claude\hooks",
21 | "$env:LOCALAPPDATA\.claude\hooks"
22 | )
23 |
24 | # If primary path already exists, use it
25 | if (Test-Path $primaryPath) {
26 | return $primaryPath
27 | }
28 |
29 | # Check if Claude Code is installed and can tell us the hooks directory
30 | try {
31 | $claudeHelp = claude --help 2>$null
32 | if ($claudeHelp -match "hooks.*directory.*(\S+)") {
33 | $detectedPath = $matches[1]
34 | if ($detectedPath -and (Test-Path (Split-Path -Parent $detectedPath) -ErrorAction SilentlyContinue)) {
35 | return $detectedPath
36 | }
37 | }
38 | } catch {
39 | # Claude CLI not available or failed
40 | }
41 |
42 | # Check alternative locations
43 | foreach ($altPath in $alternativePaths) {
44 | if (Test-Path $altPath) {
45 | return $altPath
46 | }
47 | }
48 |
49 | # Default to primary path (will be created if needed)
50 | return $primaryPath
51 | }
52 |
53 | $CLAUDE_HOOKS_DIR = Get-ClaudeHooksDirectory
54 |
55 | # Script is now in the claude-hooks directory itself
56 | $SCRIPT_DIR = $PSScriptRoot
57 | $SOURCE_DIR = $SCRIPT_DIR
58 |
59 | $dateStr = Get-Date -Format "yyyyMMdd-HHmmss"
60 | $BACKUP_DIR = "$env:USERPROFILE\.claude\hooks-backup-$dateStr"
61 |
62 | # Debug: Display resolved paths
63 | function Write-Info { Write-Host "[INFO]" -ForegroundColor Green -NoNewline; Write-Host " $args" }
64 | function Write-Warn { Write-Host "[WARN]" -ForegroundColor Yellow -NoNewline; Write-Host " $args" }
65 | function Write-Error { Write-Host "[ERROR]" -ForegroundColor Red -NoNewline; Write-Host " $args" }
66 |
67 | Write-Info "Script location: $SCRIPT_DIR"
68 | Write-Info "Repository root: $REPO_ROOT"
69 | Write-Info "Source hooks directory: $SOURCE_DIR"
70 | Write-Info "Target hooks directory: $CLAUDE_HOOKS_DIR"
71 |
72 | # Show help
73 | if ($Help) {
74 | Write-Host @"
75 | Claude Code Memory Awareness Hooks - Windows Installation
76 |
77 | Usage: .\install_claude_hooks_windows.ps1 [options]
78 |
79 | Options:
80 | -Help Show this help message
81 | -Uninstall Remove installed hooks
82 | -Test Run tests only
83 |
84 | Examples:
85 | .\install_claude_hooks_windows.ps1 # Install hooks
86 | .\install_claude_hooks_windows.ps1 -Uninstall # Remove hooks
87 | .\install_claude_hooks_windows.ps1 -Test # Test installation
88 | "@
89 | exit 0
90 | }
91 |
92 | # Header
93 | Write-Host ""
94 | Write-Host "Claude Code Memory Awareness Hooks Installation v2.2.0 (Windows)" -ForegroundColor Cyan
95 | Write-Host "================================================================" -ForegroundColor Cyan
96 | Write-Host ""
97 |
98 | # Check if Claude Code is installed
99 | function Test-ClaudeCode {
100 | $claudePath = Get-Command claude -ErrorAction SilentlyContinue
101 | if (-not $claudePath) {
102 | Write-Warn "Claude Code CLI not found in PATH"
103 | Write-Warn "Please ensure Claude Code is installed and accessible"
104 | $response = Read-Host "Continue anyway? (Y/N)"
105 | if ($response -ne "Y" -and $response -ne "y") {
106 | exit 1
107 | }
108 | } else {
109 | Write-Info "Claude Code CLI found: $($claudePath.Source)"
110 | }
111 | }
112 |
113 | # Validate source directory exists
114 | function Test-SourceDirectory {
115 | Write-Info "Validating source directory..."
116 |
117 | if (-not (Test-Path $SOURCE_DIR)) {
118 | Write-Error "Source hooks directory not found: $SOURCE_DIR"
119 | Write-Error "Please ensure you are running this script from the mcp-memory-service repository"
120 | Write-Error "Expected repository structure:"
121 | Write-Error " mcp-memory-service/"
122 | Write-Error " scripts/"
123 | Write-Error " install_claude_hooks_windows.ps1 (This script)"
124 | Write-Error " claude-hooks/"
125 | Write-Error " core/"
126 | Write-Error " utilities/"
127 | Write-Error " config.json"
128 | exit 1
129 | }
130 |
131 | # Check for required subdirectories
132 | $requiredDirs = @("core", "utilities", "tests")
133 | foreach ($dir in $requiredDirs) {
134 | $dirPath = Join-Path $SOURCE_DIR $dir
135 | if (-not (Test-Path $dirPath)) {
136 | Write-Error "Missing required directory: $dirPath"
137 | Write-Error "The claude-hooks directory appears to be incomplete"
138 | exit 1
139 | }
140 | }
141 |
142 | Write-Info "Source directory validation passed"
143 | }
144 |
145 | # Create Claude Code hooks directory if it does not exist
146 | function New-HooksDirectory {
147 | if (-not (Test-Path $CLAUDE_HOOKS_DIR)) {
148 | Write-Info "Creating Claude Code hooks directory: $CLAUDE_HOOKS_DIR"
149 | try {
150 | New-Item -ItemType Directory -Path $CLAUDE_HOOKS_DIR -Force | Out-Null
151 | Write-Info "Successfully created hooks directory"
152 | } catch {
153 | Write-Error "Failed to create hooks directory: $CLAUDE_HOOKS_DIR"
154 | Write-Error "Error: $($_.Exception.Message)"
155 | Write-Error ""
156 | Write-Error "Possible solutions:"
157 | Write-Error " 1. Run PowerShell as Administrator"
158 | Write-Error " 2. Check if the parent directory exists and is writable"
159 | Write-Error " 3. Manually create the directory: $CLAUDE_HOOKS_DIR"
160 | exit 1
161 | }
162 | } else {
163 | Write-Info "Claude Code hooks directory exists: $CLAUDE_HOOKS_DIR"
164 | }
165 |
166 | # Test write access
167 | $testFile = Join-Path $CLAUDE_HOOKS_DIR "write-test.tmp"
168 | try {
169 | "test" | Out-File -FilePath $testFile -Force
170 | Remove-Item -Path $testFile -Force
171 | Write-Info "Write access confirmed for hooks directory"
172 | } catch {
173 | Write-Error "No write access to hooks directory: $CLAUDE_HOOKS_DIR"
174 | Write-Error "Please check permissions or run as Administrator"
175 | exit 1
176 | }
177 | }
178 |
179 | # Backup existing hooks if they exist
180 | function Backup-ExistingHooks {
181 | $hasExisting = $false
182 |
183 | if ((Test-Path "$CLAUDE_HOOKS_DIR\core") -or
184 | (Test-Path "$CLAUDE_HOOKS_DIR\utilities") -or
185 | (Test-Path "$CLAUDE_HOOKS_DIR\config.json")) {
186 | $hasExisting = $true
187 | }
188 |
189 | if ($hasExisting) {
190 | Write-Info "Backing up existing hooks to: $BACKUP_DIR"
191 | New-Item -ItemType Directory -Path $BACKUP_DIR -Force | Out-Null
192 | Copy-Item -Path "$CLAUDE_HOOKS_DIR\*" -Destination $BACKUP_DIR -Recurse -Force -ErrorAction SilentlyContinue
193 | Write-Info "Backup created successfully"
194 | }
195 | }
196 |
197 | # Install hook files
198 | function Install-Hooks {
199 | Write-Info "Installing memory awareness hooks..."
200 |
201 | # Create necessary directories
202 | New-Item -ItemType Directory -Path "$CLAUDE_HOOKS_DIR\core" -Force | Out-Null
203 | New-Item -ItemType Directory -Path "$CLAUDE_HOOKS_DIR\utilities" -Force | Out-Null
204 | New-Item -ItemType Directory -Path "$CLAUDE_HOOKS_DIR\tests" -Force | Out-Null
205 |
206 | # Copy core hooks
207 | Copy-Item -Path "$SOURCE_DIR\core\*" -Destination "$CLAUDE_HOOKS_DIR\core\" -Recurse -Force
208 | Write-Info "Installed core hooks (session-start, session-end, topic-change)"
209 |
210 | # Copy utilities
211 | Copy-Item -Path "$SOURCE_DIR\utilities\*" -Destination "$CLAUDE_HOOKS_DIR\utilities\" -Recurse -Force
212 | Write-Info "Installed utility modules"
213 |
214 | # Copy tests
215 | Copy-Item -Path "$SOURCE_DIR\tests\*" -Destination "$CLAUDE_HOOKS_DIR\tests\" -Recurse -Force
216 | Write-Info "Installed test suite"
217 |
218 | # Copy documentation and configuration
219 | Copy-Item -Path "$SOURCE_DIR\README.md" -Destination "$CLAUDE_HOOKS_DIR\" -Force
220 | Copy-Item -Path "$SOURCE_DIR\config.template.json" -Destination "$CLAUDE_HOOKS_DIR\" -Force
221 | Write-Info "Installed documentation and templates"
222 | }
223 |
224 | # Install or update configuration
225 | function Install-Config {
226 | $configFile = "$CLAUDE_HOOKS_DIR\config.json"
227 |
228 | if (-not (Test-Path $configFile)) {
229 | # First installation - use default config
230 | Copy-Item -Path "$SOURCE_DIR\config.json" -Destination $configFile -Force
231 | Write-Info "Installed default configuration"
232 | Write-Warn "Please update config.json with your memory service endpoint and API key"
233 | } else {
234 | Write-Info "Configuration file already exists - not overwriting"
235 | Write-Info " Compare with config.template.json for new options"
236 | }
237 | }
238 |
239 | # Test installation
240 | function Test-Installation {
241 | Write-Info "Testing installation..."
242 |
243 | # Check if required files exist
244 | $requiredFiles = @(
245 | "core\session-start.js",
246 | "core\session-end.js",
247 | "utilities\project-detector.js",
248 | "utilities\memory-scorer.js",
249 | "utilities\context-formatter.js",
250 | "config.json",
251 | "README.md"
252 | )
253 |
254 | $missingFiles = @()
255 | foreach ($file in $requiredFiles) {
256 | if (-not (Test-Path "$CLAUDE_HOOKS_DIR\$file")) {
257 | $missingFiles += $file
258 | }
259 | }
260 |
261 | if ($missingFiles.Count -gt 0) {
262 | Write-Error "Installation incomplete - missing files:"
263 | foreach ($file in $missingFiles) {
264 | Write-Host " - $file"
265 | }
266 | return $false
267 | }
268 |
269 | # Test Node.js availability
270 | $nodeVersion = node --version 2>$null
271 | if (-not $nodeVersion) {
272 | Write-Warn "Node.js not found - hooks require Node.js to function"
273 | Write-Warn "Please install Node.js version 14 or higher"
274 | } else {
275 | Write-Info "Node.js available: $nodeVersion"
276 | }
277 |
278 | # Run integration test
279 | if (Test-Path "$CLAUDE_HOOKS_DIR\tests\integration-test.js") {
280 | Write-Info "Running integration tests..."
281 | Push-Location $CLAUDE_HOOKS_DIR
282 | try {
283 | $testResult = node tests\integration-test.js 2>&1
284 | if ($LASTEXITCODE -eq 0) {
285 | Write-Info "Integration tests passed"
286 | } else {
287 | Write-Warn "Some integration tests failed - check configuration"
288 | Write-Host $testResult
289 | }
290 | } finally {
291 | Pop-Location
292 | }
293 | }
294 |
295 | return $true
296 | }
297 |
298 | # Display post-installation instructions
299 | function Show-PostInstallInstructions {
300 | Write-Host ""
301 | Write-Host "Installation Complete!" -ForegroundColor Green
302 | Write-Host "=====================" -ForegroundColor Green
303 | Write-Host ""
304 | Write-Host "Next Steps:" -ForegroundColor Yellow
305 | Write-Host ""
306 | Write-Host "1. Configure your memory service endpoint:"
307 | Write-Host " Edit: $CLAUDE_HOOKS_DIR\config.json"
308 | Write-Host " Update endpoint and apiKey values"
309 | Write-Host ""
310 | Write-Host "2. Test the hooks:"
311 | Write-Host " cd $CLAUDE_HOOKS_DIR"
312 | Write-Host " node tests\integration-test.js"
313 | Write-Host ""
314 | Write-Host "3. Start using Claude Code:"
315 | Write-Host " The hooks will automatically activate on session start/end"
316 | Write-Host ""
317 | Write-Host "Installation Details:" -ForegroundColor Cyan
318 | Write-Host " Hooks Directory: $CLAUDE_HOOKS_DIR"
319 | if (Test-Path $BACKUP_DIR) {
320 | Write-Host " Backup Directory: $BACKUP_DIR"
321 | }
322 | Write-Host ""
323 |
324 | # Try to read and display current configuration
325 | $configPath = Join-Path $CLAUDE_HOOKS_DIR "config.json"
326 | if (Test-Path $configPath) {
327 | try {
328 | $config = Get-Content $configPath | ConvertFrom-Json
329 | Write-Host "Configuration:" -ForegroundColor Cyan
330 | Write-Host " Memory Service: $($config.memoryService.endpoint)"
331 | Write-Host " Max Memories: $($config.memoryService.maxMemoriesPerSession)"
332 | } catch {
333 | Write-Warn "Could not read configuration file"
334 | }
335 | }
336 | Write-Host ""
337 | $readmePath = Join-Path $CLAUDE_HOOKS_DIR "README.md"
338 | Write-Host "For troubleshooting, see: $readmePath"
339 | }
340 |
341 | # Uninstall function
342 | function Uninstall-Hooks {
343 | if (Test-Path $CLAUDE_HOOKS_DIR) {
344 | $response = Read-Host "Remove all Claude Code memory awareness hooks? (Y/N)"
345 | if ($response -eq "Y" -or $response -eq "y") {
346 | Remove-Item -Path "$CLAUDE_HOOKS_DIR\core" -Recurse -Force -ErrorAction SilentlyContinue
347 | Remove-Item -Path "$CLAUDE_HOOKS_DIR\utilities" -Recurse -Force -ErrorAction SilentlyContinue
348 | Remove-Item -Path "$CLAUDE_HOOKS_DIR\tests" -Recurse -Force -ErrorAction SilentlyContinue
349 | Remove-Item -Path "$CLAUDE_HOOKS_DIR\config.json" -Force -ErrorAction SilentlyContinue
350 | Remove-Item -Path "$CLAUDE_HOOKS_DIR\config.template.json" -Force -ErrorAction SilentlyContinue
351 | Remove-Item -Path "$CLAUDE_HOOKS_DIR\README.md" -Force -ErrorAction SilentlyContinue
352 | Write-Info "Hooks uninstalled successfully"
353 | }
354 | } else {
355 | Write-Info "No hooks found to uninstall"
356 | }
357 | }
358 |
359 | # Test only function
360 | function Test-Only {
361 | if (Test-Path "$CLAUDE_HOOKS_DIR\tests\integration-test.js") {
362 | Push-Location $CLAUDE_HOOKS_DIR
363 | try {
364 | node tests\integration-test.js
365 | } finally {
366 | Pop-Location
367 | }
368 | } else {
369 | Write-Error "Tests not found - please install first"
370 | exit 1
371 | }
372 | }
373 |
374 | # Main execution
375 | try {
376 | if ($Uninstall) {
377 | Uninstall-Hooks
378 | } elseif ($Test) {
379 | Test-Only
380 | } else {
381 | # Main installation process
382 | Test-SourceDirectory
383 | Test-ClaudeCode
384 | New-HooksDirectory
385 | Backup-ExistingHooks
386 | Install-Hooks
387 | Install-Config
388 | if (Test-Installation) {
389 | Show-PostInstallInstructions
390 | }
391 | }
392 | } catch {
393 | Write-Host "ERROR: Installation failed" -ForegroundColor Red
394 | Write-Host $_.Exception.Message
395 | exit 1
396 | }
```
--------------------------------------------------------------------------------
/src/mcp_memory_service/web/oauth/authorization.py:
--------------------------------------------------------------------------------
```python
1 | # Copyright 2024 Heinrich Krupp
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
15 | """
16 | OAuth 2.1 Authorization Server implementation for MCP Memory Service.
17 |
18 | Implements OAuth 2.1 authorization code flow and token endpoints.
19 | """
20 |
21 | import time
22 | import logging
23 | import base64
24 | from typing import Optional, Tuple
25 | from urllib.parse import urlencode
26 | from fastapi import APIRouter, HTTPException, status, Form, Query, Request
27 | from fastapi.responses import RedirectResponse
28 | from jose import jwt
29 |
30 | from ...config import (
31 | OAUTH_ISSUER,
32 | OAUTH_ACCESS_TOKEN_EXPIRE_MINUTES,
33 | OAUTH_AUTHORIZATION_CODE_EXPIRE_MINUTES,
34 | get_jwt_algorithm,
35 | get_jwt_signing_key
36 | )
37 | from .models import TokenResponse
38 | from .storage import oauth_storage
39 |
40 | logger = logging.getLogger(__name__)
41 |
42 | router = APIRouter()
43 |
44 |
45 | def parse_basic_auth(authorization_header: Optional[str]) -> Tuple[Optional[str], Optional[str]]:
46 | """
47 | Parse HTTP Basic authentication header.
48 |
49 | Returns:
50 | Tuple of (client_id, client_secret) or (None, None) if not valid
51 | """
52 | if not authorization_header:
53 | return None, None
54 |
55 | try:
56 | # Check if it's Basic authentication
57 | if not authorization_header.startswith('Basic '):
58 | return None, None
59 |
60 | # Extract and decode the credentials
61 | encoded_credentials = authorization_header[6:] # Remove 'Basic ' prefix
62 | decoded_credentials = base64.b64decode(encoded_credentials).decode('utf-8')
63 |
64 | # Split username:password
65 | if ':' not in decoded_credentials:
66 | return None, None
67 |
68 | client_id, client_secret = decoded_credentials.split(':', 1)
69 | return client_id, client_secret
70 |
71 | except Exception as e:
72 | logger.debug(f"Failed to parse Basic auth header: {e}")
73 | return None, None
74 |
75 |
76 | def create_access_token(client_id: str, scope: Optional[str] = None) -> tuple[str, int]:
77 | """
78 | Create a JWT access token for the given client.
79 |
80 | Uses RS256 with RSA key pair if available, otherwise falls back to HS256.
81 |
82 | Returns:
83 | Tuple of (token, expires_in_seconds)
84 | """
85 | expires_in = OAUTH_ACCESS_TOKEN_EXPIRE_MINUTES * 60
86 | expire_time = time.time() + expires_in
87 |
88 | payload = {
89 | "iss": OAUTH_ISSUER,
90 | "sub": client_id,
91 | "aud": "mcp-memory-service",
92 | "exp": expire_time,
93 | "iat": time.time(),
94 | "scope": scope or "read write"
95 | }
96 |
97 | algorithm = get_jwt_algorithm()
98 | signing_key = get_jwt_signing_key()
99 |
100 | logger.debug(f"Creating JWT token with algorithm: {algorithm}")
101 | token = jwt.encode(payload, signing_key, algorithm=algorithm)
102 | return token, expires_in
103 |
104 |
105 | async def validate_redirect_uri(client_id: str, redirect_uri: Optional[str]) -> str:
106 | """Validate redirect URI against registered client."""
107 | client = await oauth_storage.get_client(client_id)
108 | if not client:
109 | raise HTTPException(
110 | status_code=status.HTTP_400_BAD_REQUEST,
111 | detail={
112 | "error": "invalid_client",
113 | "error_description": "Invalid client_id"
114 | }
115 | )
116 |
117 | # If no redirect_uri provided, use the first registered one
118 | if not redirect_uri:
119 | if not client.redirect_uris:
120 | raise HTTPException(
121 | status_code=status.HTTP_400_BAD_REQUEST,
122 | detail={
123 | "error": "invalid_request",
124 | "error_description": "redirect_uri is required when client has no registered redirect URIs"
125 | }
126 | )
127 | return client.redirect_uris[0]
128 |
129 | # Validate that the redirect_uri is registered
130 | if redirect_uri not in client.redirect_uris:
131 | raise HTTPException(
132 | status_code=status.HTTP_400_BAD_REQUEST,
133 | detail={
134 | "error": "invalid_redirect_uri",
135 | "error_description": "redirect_uri not registered for this client"
136 | }
137 | )
138 |
139 | return redirect_uri
140 |
141 |
142 | @router.get("/authorize")
143 | async def authorize(
144 | response_type: str = Query(..., description="OAuth response type"),
145 | client_id: str = Query(..., description="OAuth client identifier"),
146 | redirect_uri: Optional[str] = Query(None, description="Redirection URI"),
147 | scope: Optional[str] = Query(None, description="Requested scope"),
148 | state: Optional[str] = Query(None, description="Opaque value for CSRF protection")
149 | ):
150 | """
151 | OAuth 2.1 Authorization endpoint.
152 |
153 | Implements the authorization code flow. For MVP, this auto-approves
154 | all requests without user interaction.
155 | """
156 | logger.info(f"Authorization request: client_id={client_id}, response_type={response_type}")
157 |
158 | try:
159 | # Validate response_type
160 | if response_type != "code":
161 | error_params = {
162 | "error": "unsupported_response_type",
163 | "error_description": "Only 'code' response type is supported"
164 | }
165 | if state:
166 | error_params["state"] = state
167 |
168 | # If we have a redirect_uri, redirect with error
169 | if redirect_uri:
170 | error_url = f"{redirect_uri}?{urlencode(error_params)}"
171 | return RedirectResponse(url=error_url)
172 | else:
173 | raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail=error_params)
174 |
175 | # Validate client and redirect_uri
176 | validated_redirect_uri = await validate_redirect_uri(client_id, redirect_uri)
177 |
178 | # Generate authorization code
179 | auth_code = oauth_storage.generate_authorization_code()
180 |
181 | # Store authorization code
182 | await oauth_storage.store_authorization_code(
183 | code=auth_code,
184 | client_id=client_id,
185 | redirect_uri=validated_redirect_uri,
186 | scope=scope,
187 | expires_in=OAUTH_AUTHORIZATION_CODE_EXPIRE_MINUTES * 60
188 | )
189 |
190 | # Build redirect URL with authorization code
191 | redirect_params = {"code": auth_code}
192 | if state:
193 | redirect_params["state"] = state
194 |
195 | redirect_url = f"{validated_redirect_uri}?{urlencode(redirect_params)}"
196 |
197 | logger.info(f"Authorization granted for client_id={client_id}")
198 | return RedirectResponse(url=redirect_url)
199 |
200 | except HTTPException:
201 | # Re-raise HTTP exceptions
202 | raise
203 | except Exception as e:
204 | logger.error(f"Authorization error: {e}")
205 |
206 | error_params = {
207 | "error": "server_error",
208 | "error_description": "Internal server error"
209 | }
210 | if state:
211 | error_params["state"] = state
212 |
213 | if redirect_uri:
214 | error_url = f"{redirect_uri}?{urlencode(error_params)}"
215 | return RedirectResponse(url=error_url)
216 | else:
217 | raise HTTPException(status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, detail=error_params)
218 |
219 |
220 | @router.post("/token", response_model=TokenResponse)
221 | async def _handle_authorization_code_grant(
222 | final_client_id: str,
223 | final_client_secret: str,
224 | code: Optional[str],
225 | redirect_uri: Optional[str]
226 | ) -> TokenResponse:
227 | """Handle OAuth authorization_code grant type."""
228 | if not code:
229 | raise HTTPException(
230 | status_code=status.HTTP_400_BAD_REQUEST,
231 | detail={
232 | "error": "invalid_request",
233 | "error_description": "Missing required parameter: code"
234 | }
235 | )
236 |
237 | if not final_client_id:
238 | raise HTTPException(
239 | status_code=status.HTTP_400_BAD_REQUEST,
240 | detail={
241 | "error": "invalid_request",
242 | "error_description": "Missing required parameter: client_id"
243 | }
244 | )
245 |
246 | # Authenticate client
247 | if not await oauth_storage.authenticate_client(final_client_id, final_client_secret or ""):
248 | raise HTTPException(
249 | status_code=status.HTTP_401_UNAUTHORIZED,
250 | detail={
251 | "error": "invalid_client",
252 | "error_description": "Client authentication failed"
253 | }
254 | )
255 |
256 | # Get and consume authorization code
257 | code_data = await oauth_storage.get_authorization_code(code)
258 | if not code_data:
259 | raise HTTPException(
260 | status_code=status.HTTP_400_BAD_REQUEST,
261 | detail={
262 | "error": "invalid_grant",
263 | "error_description": "Invalid or expired authorization code"
264 | }
265 | )
266 |
267 | # Validate client_id matches
268 | if code_data["client_id"] != final_client_id:
269 | raise HTTPException(
270 | status_code=status.HTTP_400_BAD_REQUEST,
271 | detail={
272 | "error": "invalid_grant",
273 | "error_description": "Authorization code was issued to a different client"
274 | }
275 | )
276 |
277 | # Validate redirect_uri if provided
278 | if redirect_uri and code_data["redirect_uri"] != redirect_uri:
279 | raise HTTPException(
280 | status_code=status.HTTP_400_BAD_REQUEST,
281 | detail={
282 | "error": "invalid_grant",
283 | "error_description": "redirect_uri does not match the one used in authorization request"
284 | }
285 | )
286 |
287 | # Create access token
288 | access_token, expires_in = create_access_token(final_client_id, code_data["scope"])
289 |
290 | # Store access token for validation
291 | await oauth_storage.store_access_token(
292 | token=access_token,
293 | client_id=final_client_id,
294 | scope=code_data["scope"],
295 | expires_in=expires_in
296 | )
297 |
298 | logger.info(f"Access token issued for client_id={final_client_id}")
299 | return TokenResponse(
300 | access_token=access_token,
301 | token_type="Bearer",
302 | expires_in=expires_in,
303 | scope=code_data["scope"]
304 | )
305 |
306 | async def _handle_client_credentials_grant(
307 | final_client_id: str,
308 | final_client_secret: str
309 | ) -> TokenResponse:
310 | """Handle OAuth client_credentials grant type."""
311 | if not final_client_id or not final_client_secret:
312 | raise HTTPException(
313 | status_code=status.HTTP_400_BAD_REQUEST,
314 | detail={
315 | "error": "invalid_request",
316 | "error_description": "Missing required parameters: client_id and client_secret"
317 | }
318 | )
319 |
320 | # Authenticate client
321 | if not await oauth_storage.authenticate_client(final_client_id, final_client_secret):
322 | raise HTTPException(
323 | status_code=status.HTTP_401_UNAUTHORIZED,
324 | detail={
325 | "error": "invalid_client",
326 | "error_description": "Client authentication failed"
327 | }
328 | )
329 |
330 | # Create access token
331 | access_token, expires_in = create_access_token(final_client_id, "read write")
332 |
333 | # Store access token
334 | await oauth_storage.store_access_token(
335 | token=access_token,
336 | client_id=final_client_id,
337 | scope="read write",
338 | expires_in=expires_in
339 | )
340 |
341 | logger.info(f"Client credentials token issued for client_id={final_client_id}")
342 | return TokenResponse(
343 | access_token=access_token,
344 | token_type="Bearer",
345 | expires_in=expires_in,
346 | scope="read write"
347 | )
348 |
349 | async def token(
350 | request: Request,
351 | grant_type: str = Form(..., description="OAuth grant type"),
352 | code: Optional[str] = Form(None, description="Authorization code"),
353 | redirect_uri: Optional[str] = Form(None, description="Redirection URI"),
354 | client_id: Optional[str] = Form(None, description="OAuth client identifier"),
355 | client_secret: Optional[str] = Form(None, description="OAuth client secret")
356 | ):
357 | """
358 | OAuth 2.1 Token endpoint.
359 |
360 | Exchanges authorization codes for access tokens.
361 | Supports both authorization_code and client_credentials grant types.
362 | Supports both client_secret_post (form data) and client_secret_basic (HTTP Basic auth).
363 | """
364 | # Extract client credentials from either HTTP Basic auth or form data
365 | auth_header = request.headers.get('authorization')
366 | basic_client_id, basic_client_secret = parse_basic_auth(auth_header)
367 |
368 | # Use Basic auth credentials if available, otherwise fall back to form data
369 | final_client_id = basic_client_id or client_id
370 | final_client_secret = basic_client_secret or client_secret
371 |
372 | auth_method = "client_secret_basic" if basic_client_id else "client_secret_post"
373 | logger.info(f"Token request: grant_type={grant_type}, client_id={final_client_id}, auth_method={auth_method}")
374 |
375 | try:
376 | if grant_type == "authorization_code":
377 | return await _handle_authorization_code_grant(
378 | final_client_id, final_client_secret, code, redirect_uri
379 | )
380 | elif grant_type == "client_credentials":
381 | return await _handle_client_credentials_grant(
382 | final_client_id, final_client_secret
383 | )
384 |
385 | else:
386 | raise HTTPException(
387 | status_code=status.HTTP_400_BAD_REQUEST,
388 | detail={
389 | "error": "unsupported_grant_type",
390 | "error_description": f"Grant type '{grant_type}' is not supported"
391 | }
392 | )
393 |
394 | except HTTPException:
395 | # Re-raise HTTP exceptions
396 | raise
397 | except Exception as e:
398 | logger.error(f"Token endpoint error: {e}")
399 | raise HTTPException(
400 | status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
401 | detail={
402 | "error": "server_error",
403 | "error_description": "Internal server error"
404 | }
405 | )
```
--------------------------------------------------------------------------------
/scripts/maintenance/repair_zero_embeddings.py:
--------------------------------------------------------------------------------
```python
1 | #!/usr/bin/env python3
2 | """
3 | Enhanced repair script to fix zero vector embeddings in SQLite-vec databases.
4 |
5 | This script detects and repairs embeddings that are all zeros (invalid) and
6 | regenerates them with proper sentence transformer embeddings.
7 | """
8 |
9 | import asyncio
10 | import os
11 | import sys
12 | import sqlite3
13 | import logging
14 | import numpy as np
15 | from typing import List, Tuple
16 |
17 | # Add parent directory to path
18 | sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
19 |
20 | try:
21 | import sqlite_vec
22 | from sqlite_vec import serialize_float32
23 | SQLITE_VEC_AVAILABLE = True
24 | except ImportError:
25 | SQLITE_VEC_AVAILABLE = False
26 |
27 | try:
28 | from sentence_transformers import SentenceTransformer
29 | SENTENCE_TRANSFORMERS_AVAILABLE = True
30 | except ImportError:
31 | SENTENCE_TRANSFORMERS_AVAILABLE = False
32 |
33 | # Configure logging
34 | logging.basicConfig(
35 | level=logging.INFO,
36 | format='%(asctime)s - %(levelname)s - %(message)s'
37 | )
38 | logger = logging.getLogger(__name__)
39 |
40 |
41 | class ZeroEmbeddingRepair:
42 | """Repair SQLite-vec database with zero vector embeddings."""
43 |
44 | def __init__(self, db_path: str, model_name: str = "all-MiniLM-L6-v2"):
45 | self.db_path = db_path
46 | self.model_name = model_name
47 | self.conn = None
48 | self.model = None
49 | self.embedding_dimension = 384 # Default for all-MiniLM-L6-v2
50 |
51 | def check_dependencies(self):
52 | """Check required dependencies."""
53 | print("Checking dependencies...")
54 |
55 | if not SQLITE_VEC_AVAILABLE:
56 | print("❌ sqlite-vec not installed. Run: pip install sqlite-vec")
57 | return False
58 |
59 | if not SENTENCE_TRANSFORMERS_AVAILABLE:
60 | print("❌ sentence-transformers not installed. Run: pip install sentence-transformers torch")
61 | return False
62 |
63 | print("✅ All dependencies available")
64 | return True
65 |
66 | def connect_database(self):
67 | """Connect to the database."""
68 | print(f"\nConnecting to database: {self.db_path}")
69 |
70 | if not os.path.exists(self.db_path):
71 | raise FileNotFoundError(f"Database not found: {self.db_path}")
72 |
73 | self.conn = sqlite3.connect(self.db_path)
74 | self.conn.enable_load_extension(True)
75 | sqlite_vec.load(self.conn)
76 | self.conn.enable_load_extension(False)
77 |
78 | print("✅ Connected to database")
79 |
80 | def analyze_database(self) -> dict:
81 | """Analyze the current state of the database including zero embeddings."""
82 | print("\nAnalyzing database...")
83 |
84 | analysis = {
85 | "memory_count": 0,
86 | "embedding_count": 0,
87 | "missing_embeddings": 0,
88 | "zero_embeddings": 0,
89 | "valid_embeddings": 0,
90 | "embedding_dimension": None,
91 | "issues": []
92 | }
93 |
94 | # Count memories
95 | cursor = self.conn.execute("SELECT COUNT(*) FROM memories")
96 | analysis["memory_count"] = cursor.fetchone()[0]
97 |
98 | # Count embeddings
99 | cursor = self.conn.execute("SELECT COUNT(*) FROM memory_embeddings")
100 | analysis["embedding_count"] = cursor.fetchone()[0]
101 |
102 | # Check embedding dimension
103 | cursor = self.conn.execute("""
104 | SELECT sql FROM sqlite_master
105 | WHERE type='table' AND name='memory_embeddings'
106 | """)
107 | schema = cursor.fetchone()
108 | if schema:
109 | import re
110 | match = re.search(r'FLOAT\\[(\\d+)\\]', schema[0])
111 | if match:
112 | analysis["embedding_dimension"] = int(match.group(1))
113 |
114 | # Find memories without embeddings
115 | cursor = self.conn.execute("""
116 | SELECT COUNT(*) FROM memories m
117 | WHERE NOT EXISTS (
118 | SELECT 1 FROM memory_embeddings e WHERE e.rowid = m.id
119 | )
120 | """)
121 | analysis["missing_embeddings"] = cursor.fetchone()[0]
122 |
123 | # Check for zero embeddings
124 | print(" Checking for zero vector embeddings...")
125 | cursor = self.conn.execute("""
126 | SELECT e.rowid, e.content_embedding, m.content
127 | FROM memory_embeddings e
128 | INNER JOIN memories m ON m.id = e.rowid
129 | """)
130 |
131 | zero_count = 0
132 | valid_count = 0
133 |
134 | for row in cursor.fetchall():
135 | rowid, embedding_blob, content = row
136 |
137 | if embedding_blob and len(embedding_blob) > 0:
138 | try:
139 | # Convert to numpy array
140 | embedding_array = np.frombuffer(embedding_blob, dtype=np.float32)
141 |
142 | # Check if all zeros
143 | if np.allclose(embedding_array, 0):
144 | zero_count += 1
145 | logger.debug(f"Zero embedding found for memory {rowid}: {content[:50]}...")
146 | else:
147 | valid_count += 1
148 |
149 | except Exception as e:
150 | logger.warning(f"Failed to parse embedding for rowid {rowid}: {e}")
151 | zero_count += 1 # Treat unparseable as invalid
152 | else:
153 | zero_count += 1
154 |
155 | analysis["zero_embeddings"] = zero_count
156 | analysis["valid_embeddings"] = valid_count
157 |
158 | # Identify issues
159 | if analysis["memory_count"] != analysis["embedding_count"]:
160 | analysis["issues"].append(
161 | f"Mismatch: {analysis['memory_count']} memories vs {analysis['embedding_count']} embeddings"
162 | )
163 |
164 | if analysis["missing_embeddings"] > 0:
165 | analysis["issues"].append(
166 | f"Missing embeddings: {analysis['missing_embeddings']} memories have no embeddings"
167 | )
168 |
169 | if analysis["zero_embeddings"] > 0:
170 | analysis["issues"].append(
171 | f"Zero vector embeddings: {analysis['zero_embeddings']} embeddings are all zeros (invalid)"
172 | )
173 |
174 | print(f" Memories: {analysis['memory_count']}")
175 | print(f" Embeddings: {analysis['embedding_count']}")
176 | print(f" Missing embeddings: {analysis['missing_embeddings']}")
177 | print(f" Zero embeddings: {analysis['zero_embeddings']}")
178 | print(f" Valid embeddings: {analysis['valid_embeddings']}")
179 | print(f" Embedding dimension: {analysis['embedding_dimension']}")
180 |
181 | if analysis["issues"]:
182 | print("\n⚠️ Issues found:")
183 | for issue in analysis["issues"]:
184 | print(f" - {issue}")
185 | else:
186 | print("\n✅ No issues found")
187 |
188 | return analysis
189 |
190 | def load_model(self):
191 | """Load the embedding model."""
192 | print(f"\nLoading embedding model: {self.model_name}")
193 |
194 | self.model = SentenceTransformer(self.model_name)
195 |
196 | # Get actual dimension
197 | test_embedding = self.model.encode(["test"], convert_to_numpy=True)
198 | self.embedding_dimension = test_embedding.shape[1]
199 |
200 | print(f"✅ Model loaded (dimension: {self.embedding_dimension})")
201 |
202 | def regenerate_zero_embeddings(self, analysis: dict) -> int:
203 | """Regenerate embeddings that are zero vectors."""
204 | if analysis["zero_embeddings"] == 0:
205 | return 0
206 |
207 | print(f"\nRegenerating {analysis['zero_embeddings']} zero vector embeddings...")
208 |
209 | # Get all memories with zero embeddings
210 | cursor = self.conn.execute("""
211 | SELECT e.rowid, e.content_embedding, m.content
212 | FROM memory_embeddings e
213 | INNER JOIN memories m ON m.id = e.rowid
214 | """)
215 |
216 | zero_embeddings = []
217 | for row in cursor.fetchall():
218 | rowid, embedding_blob, content = row
219 |
220 | if embedding_blob and len(embedding_blob) > 0:
221 | try:
222 | embedding_array = np.frombuffer(embedding_blob, dtype=np.float32)
223 | if np.allclose(embedding_array, 0):
224 | zero_embeddings.append((rowid, content))
225 | except:
226 | zero_embeddings.append((rowid, content))
227 | else:
228 | zero_embeddings.append((rowid, content))
229 |
230 | fixed_count = 0
231 |
232 | for rowid, content in zero_embeddings:
233 | try:
234 | # Generate new embedding
235 | embedding = self.model.encode([content], convert_to_numpy=True)[0]
236 |
237 | # Validate the new embedding
238 | if not np.allclose(embedding, 0) and np.isfinite(embedding).all():
239 | # Update the embedding in database
240 | self.conn.execute(
241 | "UPDATE memory_embeddings SET content_embedding = ? WHERE rowid = ?",
242 | (serialize_float32(embedding), rowid)
243 | )
244 |
245 | fixed_count += 1
246 |
247 | # Show progress
248 | if fixed_count % 10 == 0:
249 | print(f" ... {fixed_count}/{len(zero_embeddings)} embeddings regenerated")
250 |
251 | else:
252 | logger.error(f"Generated invalid embedding for memory {rowid}")
253 |
254 | except Exception as e:
255 | logger.error(f"Failed to regenerate embedding for memory {rowid}: {e}")
256 |
257 | self.conn.commit()
258 | print(f"✅ Regenerated {fixed_count} embeddings")
259 |
260 | return fixed_count
261 |
262 | def verify_search(self) -> bool:
263 | """Test if semantic search works with proper similarity scores."""
264 | print("\nTesting semantic search with similarity scores...")
265 |
266 | try:
267 | # Generate a test query embedding
268 | test_query = "test embedding verification"
269 | query_embedding = self.model.encode([test_query], convert_to_numpy=True)[0]
270 |
271 | # Try to search and get distances
272 | cursor = self.conn.execute("""
273 | SELECT m.content, e.distance
274 | FROM memories m
275 | INNER JOIN (
276 | SELECT rowid, distance
277 | FROM memory_embeddings
278 | WHERE content_embedding MATCH ?
279 | ORDER BY distance
280 | LIMIT 3
281 | ) e ON m.id = e.rowid
282 | ORDER BY e.distance
283 | """, (serialize_float32(query_embedding),))
284 |
285 | results = cursor.fetchall()
286 |
287 | if results:
288 | print("✅ Semantic search working with results:")
289 | for i, (content, distance) in enumerate(results, 1):
290 | similarity = max(0.0, 1.0 - distance)
291 | print(f" {i}. Distance: {distance:.6f}, Similarity: {similarity:.6f}")
292 | print(f" Content: {content[:60]}...")
293 |
294 | # Check if we have reasonable similarity scores
295 | distances = [result[1] for result in results]
296 | if all(d >= 1.0 for d in distances):
297 | print("⚠️ All distances are >= 1.0, similarities will be 0.0")
298 | return False
299 | else:
300 | print("✅ Found reasonable similarity scores")
301 | return True
302 | else:
303 | print("❌ No results returned")
304 | return False
305 |
306 | except Exception as e:
307 | print(f"❌ Semantic search failed: {e}")
308 | return False
309 |
310 | def run_repair(self):
311 | """Run the repair process."""
312 | print("\\n" + "="*60)
313 | print("SQLite-vec Zero Embedding Repair Tool")
314 | print("="*60)
315 |
316 | try:
317 | # Check dependencies
318 | if not self.check_dependencies():
319 | return
320 |
321 | # Connect to database
322 | self.connect_database()
323 |
324 | # Analyze current state
325 | analysis = self.analyze_database()
326 |
327 | if not analysis["issues"]:
328 | print("\\n✅ Database appears healthy, no repair needed")
329 | return
330 |
331 | # Load model
332 | self.load_model()
333 |
334 | # Fix zero embeddings
335 | fixed = self.regenerate_zero_embeddings(analysis)
336 |
337 | # Verify search works
338 | search_working = self.verify_search()
339 |
340 | # Re-analyze
341 | print("\\nRe-analyzing database after repair...")
342 | new_analysis = self.analyze_database()
343 |
344 | print("\\n" + "="*60)
345 | print("Repair Summary")
346 | print("="*60)
347 | print(f"Fixed {fixed} zero vector embeddings")
348 | print(f"Search working: {'✅ Yes' if search_working else '❌ No'}")
349 |
350 | if new_analysis["issues"]:
351 | print("\\n⚠️ Some issues remain:")
352 | for issue in new_analysis["issues"]:
353 | print(f" - {issue}")
354 | else:
355 | print("\\n✅ All issues resolved!")
356 |
357 | except Exception as e:
358 | print(f"\\n❌ Repair failed: {e}")
359 | logger.exception("Repair failed")
360 |
361 | finally:
362 | if self.conn:
363 | self.conn.close()
364 |
365 |
366 | def main():
367 | """Run the repair tool."""
368 | if len(sys.argv) < 2:
369 | print("Usage: python repair_zero_embeddings.py <database_path>")
370 | print("\\nExample:")
371 | print(" python repair_zero_embeddings.py ~/.local/share/mcp-memory/sqlite_vec.db")
372 | print("\\nThis tool will:")
373 | print(" - Check for zero vector embeddings (invalid)")
374 | print(" - Regenerate proper embeddings using sentence-transformers")
375 | print(" - Verify semantic search functionality with similarity scores")
376 | sys.exit(1)
377 |
378 | db_path = sys.argv[1]
379 |
380 | repair = ZeroEmbeddingRepair(db_path)
381 | repair.run_repair()
382 |
383 |
384 | if __name__ == "__main__":
385 | main()
```
--------------------------------------------------------------------------------
/scripts/validation/diagnose_backend_config.py:
--------------------------------------------------------------------------------
```python
1 | #!/usr/bin/env python3
2 | """
3 | Diagnostic script to troubleshoot backend configuration issues.
4 | This helps identify why Cloudflare backend might not be working.
5 | """
6 |
7 | import os
8 | import sys
9 | from pathlib import Path
10 |
11 | # Add src to path for imports
12 | src_path = Path(__file__).parent.parent.parent / "src"
13 | sys.path.insert(0, str(src_path))
14 |
15 | def print_separator(title):
16 | print("\n" + "=" * 60)
17 | print(f" {title}")
18 | print("=" * 60)
19 |
20 | def print_status(status, message):
21 | """Print status with simple text indicators."""
22 | if status == "success":
23 | print(f"[OK] {message}")
24 | elif status == "warning":
25 | print(f"[WARN] {message}")
26 | elif status == "error":
27 | print(f"[ERROR] {message}")
28 | else:
29 | print(f"[INFO] {message}")
30 |
31 | def check_env_file():
32 | """Check if .env file exists and what it contains."""
33 | print_separator("ENVIRONMENT FILE CHECK")
34 |
35 | project_root = Path(__file__).parent.parent.parent
36 | env_file = project_root / ".env"
37 |
38 | if env_file.exists():
39 | print_status("success", f".env file found at: {env_file}")
40 | print("\n.env file contents:")
41 | with open(env_file, 'r') as f:
42 | lines = f.readlines()
43 | for i, line in enumerate(lines, 1):
44 | # Mask sensitive values
45 | if 'TOKEN' in line or 'PASSWORD' in line or 'SECRET' in line:
46 | if '=' in line:
47 | key, _ = line.split('=', 1)
48 | print(f" {i:2d}: {key}=***MASKED***")
49 | else:
50 | print(f" {i:2d}: {line.rstrip()}")
51 | else:
52 | print(f" {i:2d}: {line.rstrip()}")
53 | else:
54 | print_status("error", f"No .env file found at: {env_file}")
55 | return False
56 | return True
57 |
58 | def check_environment_variables():
59 | """Check current environment variables."""
60 | print_separator("ENVIRONMENT VARIABLES CHECK")
61 |
62 | # Check if dotenv is available and load .env file
63 | try:
64 | from dotenv import load_dotenv
65 | project_root = Path(__file__).parent.parent.parent
66 | env_file = project_root / ".env"
67 | if env_file.exists():
68 | load_dotenv(env_file)
69 | print_status("success", f"Loaded .env file from: {env_file}")
70 | else:
71 | print_status("info", "No .env file to load")
72 | except ImportError:
73 | print_status("warning", "dotenv not available, skipping .env file loading")
74 |
75 | # Core configuration
76 | storage_backend = os.getenv('MCP_MEMORY_STORAGE_BACKEND', 'NOT SET')
77 | print(f"\nCore Configuration:")
78 | print(f" MCP_MEMORY_STORAGE_BACKEND: {storage_backend}")
79 |
80 | # Cloudflare variables
81 | cloudflare_vars = {
82 | 'CLOUDFLARE_API_TOKEN': 'REQUIRED',
83 | 'CLOUDFLARE_ACCOUNT_ID': 'REQUIRED',
84 | 'CLOUDFLARE_VECTORIZE_INDEX': 'REQUIRED',
85 | 'CLOUDFLARE_D1_DATABASE_ID': 'REQUIRED',
86 | 'CLOUDFLARE_R2_BUCKET': 'OPTIONAL',
87 | 'CLOUDFLARE_EMBEDDING_MODEL': 'OPTIONAL',
88 | 'CLOUDFLARE_LARGE_CONTENT_THRESHOLD': 'OPTIONAL',
89 | 'CLOUDFLARE_MAX_RETRIES': 'OPTIONAL',
90 | 'CLOUDFLARE_BASE_DELAY': 'OPTIONAL'
91 | }
92 |
93 | print(f"\nCloudflare Configuration:")
94 | missing_required = []
95 | for var, requirement in cloudflare_vars.items():
96 | value = os.getenv(var)
97 | if value:
98 | if 'TOKEN' in var:
99 | display_value = f"{value[:8]}***MASKED***"
100 | else:
101 | display_value = value
102 | print_status("success", f"{var}: {display_value} ({requirement})")
103 | else:
104 | display_value = "NOT SET"
105 | if requirement == 'REQUIRED':
106 | print_status("error", f"{var}: {display_value} ({requirement})")
107 | missing_required.append(var)
108 | else:
109 | print_status("warning", f"{var}: {display_value} ({requirement})")
110 |
111 | if missing_required:
112 | print_status("error", f"Missing required Cloudflare variables: {', '.join(missing_required)}")
113 | return False
114 | else:
115 | print_status("success", "All required Cloudflare variables are set")
116 | return True
117 |
118 | def test_config_import():
119 | """Test importing the configuration module."""
120 | print_separator("CONFIGURATION MODULE TEST")
121 |
122 | try:
123 | print("Attempting to import config module...")
124 | from mcp_memory_service.config import (
125 | STORAGE_BACKEND,
126 | CLOUDFLARE_API_TOKEN,
127 | CLOUDFLARE_ACCOUNT_ID,
128 | CLOUDFLARE_VECTORIZE_INDEX,
129 | CLOUDFLARE_D1_DATABASE_ID
130 | )
131 |
132 | print_status("success", "Config import successful")
133 | print(f" Configured Backend: {STORAGE_BACKEND}")
134 | print(f" API Token Set: {'YES' if CLOUDFLARE_API_TOKEN else 'NO'}")
135 | print(f" Account ID: {CLOUDFLARE_ACCOUNT_ID}")
136 | print(f" Vectorize Index: {CLOUDFLARE_VECTORIZE_INDEX}")
137 | print(f" D1 Database ID: {CLOUDFLARE_D1_DATABASE_ID}")
138 |
139 | return STORAGE_BACKEND
140 |
141 | except SystemExit as e:
142 | print_status("error", f"Config import failed with SystemExit: {e}")
143 | print(" This means required Cloudflare variables are missing")
144 | return None
145 | except Exception as e:
146 | print_status("error", f"Config import failed with error: {e}")
147 | return None
148 |
149 | def test_storage_creation():
150 | """Test creating the storage backend."""
151 | print_separator("STORAGE BACKEND CREATION TEST")
152 |
153 | try:
154 | from mcp_memory_service.config import STORAGE_BACKEND
155 | print(f"Attempting to create {STORAGE_BACKEND} storage...")
156 |
157 | if STORAGE_BACKEND == 'cloudflare':
158 | from mcp_memory_service.storage.cloudflare import CloudflareStorage
159 | from mcp_memory_service.config import (
160 | CLOUDFLARE_API_TOKEN,
161 | CLOUDFLARE_ACCOUNT_ID,
162 | CLOUDFLARE_VECTORIZE_INDEX,
163 | CLOUDFLARE_D1_DATABASE_ID,
164 | CLOUDFLARE_R2_BUCKET,
165 | CLOUDFLARE_EMBEDDING_MODEL,
166 | CLOUDFLARE_LARGE_CONTENT_THRESHOLD,
167 | CLOUDFLARE_MAX_RETRIES,
168 | CLOUDFLARE_BASE_DELAY
169 | )
170 |
171 | storage = CloudflareStorage(
172 | api_token=CLOUDFLARE_API_TOKEN,
173 | account_id=CLOUDFLARE_ACCOUNT_ID,
174 | vectorize_index=CLOUDFLARE_VECTORIZE_INDEX,
175 | d1_database_id=CLOUDFLARE_D1_DATABASE_ID,
176 | r2_bucket=CLOUDFLARE_R2_BUCKET,
177 | embedding_model=CLOUDFLARE_EMBEDDING_MODEL,
178 | large_content_threshold=CLOUDFLARE_LARGE_CONTENT_THRESHOLD,
179 | max_retries=CLOUDFLARE_MAX_RETRIES,
180 | base_delay=CLOUDFLARE_BASE_DELAY
181 | )
182 | print_status("success", "CloudflareStorage instance created successfully")
183 | print(f" Storage class: {storage.__class__.__name__}")
184 | return storage
185 |
186 | elif STORAGE_BACKEND == 'sqlite_vec':
187 | from mcp_memory_service.storage.sqlite_vec import SqliteVecMemoryStorage
188 | from mcp_memory_service.config import SQLITE_VEC_PATH
189 | storage = SqliteVecMemoryStorage(SQLITE_VEC_PATH)
190 | print_status("success", "SqliteVecMemoryStorage instance created successfully")
191 | print(f" Storage class: {storage.__class__.__name__}")
192 | print(f" Database path: {SQLITE_VEC_PATH}")
193 | return storage
194 |
195 | else:
196 | print_status("error", f"Unknown storage backend: {STORAGE_BACKEND}")
197 | return None
198 |
199 | except Exception as e:
200 | print_status("error", f"Storage creation failed: {e}")
201 | import traceback
202 | print(f"Full traceback:")
203 | traceback.print_exc()
204 | return None
205 |
206 | def _verify_token_endpoint(endpoint_url, endpoint_type, api_token, requests):
207 | """
208 | Helper function to verify a Cloudflare API token against a specific endpoint.
209 |
210 | Args:
211 | endpoint_url: The URL to test the token against
212 | endpoint_type: Description of the endpoint type (e.g., "Account-scoped", "Generic user")
213 | api_token: The Cloudflare API token to verify
214 | requests: The requests module
215 |
216 | Returns:
217 | tuple: (success: bool, result: dict or None)
218 | """
219 | print(f"\nTesting {endpoint_type} token verification...")
220 | try:
221 | response = requests.get(
222 | endpoint_url,
223 | headers={"Authorization": f"Bearer {api_token}"},
224 | timeout=10
225 | )
226 |
227 | if response.status_code == 200:
228 | data = response.json()
229 | if data.get("success"):
230 | result = data.get("result", {})
231 | print_status("success", f"{endpoint_type} verification successful")
232 | print(f" Token ID: {result.get('id', 'N/A')}")
233 | print(f" Status: {result.get('status', 'N/A')}")
234 | print(f" Expires: {result.get('expires_on', 'N/A')}")
235 | return True, result
236 | else:
237 | errors = data.get("errors", [])
238 | print_status("error", f"{endpoint_type} verification failed")
239 | for error in errors:
240 | print(f" Error {error.get('code')}: {error.get('message')}")
241 | return False, None
242 | else:
243 | print_status("error", f"{endpoint_type} verification failed: HTTP {response.status_code}")
244 | print(f" Response: {response.text}")
245 | return False, None
246 |
247 | except Exception as e:
248 | print_status("error", f"{endpoint_type} verification error: {e}")
249 | return False, None
250 |
251 |
252 | def test_cloudflare_token():
253 | """Test Cloudflare API token with both endpoints to help identify token type."""
254 | print_separator("CLOUDFLARE TOKEN VERIFICATION")
255 |
256 | api_token = os.getenv('CLOUDFLARE_API_TOKEN')
257 | account_id = os.getenv('CLOUDFLARE_ACCOUNT_ID')
258 |
259 | if not api_token:
260 | print_status("error", "CLOUDFLARE_API_TOKEN not set, skipping token verification")
261 | return False
262 |
263 | if not account_id:
264 | print_status("warning", "CLOUDFLARE_ACCOUNT_ID not set, cannot test account-scoped endpoint")
265 |
266 | try:
267 | import requests
268 | except ImportError:
269 | print_status("warning", "requests not available, skipping token verification")
270 | return False
271 |
272 | token_verified = False
273 |
274 | # Test 1: Account-scoped endpoint (recommended for scoped tokens)
275 | if account_id:
276 | endpoint_url = f"https://api.cloudflare.com/client/v4/accounts/{account_id}/tokens/verify"
277 | success, _ = _verify_token_endpoint(endpoint_url, "account-scoped", api_token, requests)
278 | if success:
279 | token_verified = True
280 |
281 | # Test 2: Generic user endpoint (works for global tokens)
282 | endpoint_url = "https://api.cloudflare.com/client/v4/user/tokens/verify"
283 | success, _ = _verify_token_endpoint(endpoint_url, "generic user", api_token, requests)
284 | if success:
285 | token_verified = True
286 |
287 | # Provide guidance
288 | print("\nTOKEN VERIFICATION GUIDANCE:")
289 | if account_id:
290 | print("✅ For account-scoped tokens (recommended), use:")
291 | print(f" curl \"https://api.cloudflare.com/client/v4/accounts/{account_id}/tokens/verify\" \\")
292 | print(f" -H \"Authorization: Bearer YOUR_TOKEN\"")
293 | print("✅ For global tokens (legacy), use:")
294 | print(" curl \"https://api.cloudflare.com/client/v4/user/tokens/verify\" \\")
295 | print(" -H \"Authorization: Bearer YOUR_TOKEN\"")
296 | print("❌ Common mistake: Using wrong endpoint for token type")
297 | print("📖 See docs/troubleshooting/cloudflare-authentication.md for details")
298 |
299 | return token_verified
300 |
301 | def main():
302 | """Run all diagnostic tests."""
303 | print("MCP Memory Service Backend Configuration Diagnostics")
304 | print("=" * 60)
305 |
306 | # Step 1: Check .env file
307 | check_env_file()
308 |
309 | # Step 2: Check environment variables
310 | cloudflare_ready = check_environment_variables()
311 |
312 | # Step 3: Test Cloudflare token verification
313 | token_valid = test_cloudflare_token()
314 |
315 | # Step 4: Test config import
316 | configured_backend = test_config_import()
317 |
318 | # Step 5: Test storage creation if config loaded successfully
319 | if configured_backend:
320 | storage = test_storage_creation()
321 | else:
322 | storage = None
323 |
324 | # Final summary
325 | print_separator("DIAGNOSTIC SUMMARY")
326 |
327 | if configured_backend == 'cloudflare' and cloudflare_ready and token_valid and storage:
328 | print_status("success", "Cloudflare backend should be working correctly")
329 | print(f" Configuration loaded: {configured_backend}")
330 | print(f" Required variables set: {cloudflare_ready}")
331 | print(f" Token verification: {'PASSED' if token_valid else 'NOT TESTED'}")
332 | print(f" Storage instance created: {storage.__class__.__name__}")
333 | elif configured_backend == 'sqlite_vec' and storage:
334 | print_status("success", "SQLite-vec backend is working")
335 | print(f" Configuration loaded: {configured_backend}")
336 | print(f" Storage instance created: {storage.__class__.__name__}")
337 | if cloudflare_ready:
338 | print_status("warning", "Cloudflare variables are set but backend is sqlite_vec")
339 | print(" Check MCP_MEMORY_STORAGE_BACKEND environment variable")
340 | else:
341 | print_status("error", "Backend configuration has issues")
342 | print(f" Configured backend: {configured_backend or 'FAILED TO LOAD'}")
343 | print(f" Cloudflare variables ready: {cloudflare_ready}")
344 | print(f" Storage created: {'YES' if storage else 'NO'}")
345 |
346 | print("\nTROUBLESHOOTING STEPS:")
347 | if not cloudflare_ready:
348 | print(" 1. Set missing Cloudflare environment variables")
349 | print(" 2. Create .env file with Cloudflare credentials")
350 | if not token_valid:
351 | print(" 3. Verify Cloudflare API token is valid and has correct permissions")
352 | print(" 4. Use account-scoped verification endpoint (see docs/troubleshooting/cloudflare-authentication.md)")
353 | if not configured_backend:
354 | print(" 5. Fix environment variable loading issues")
355 | if configured_backend and not storage:
356 | print(" 6. Check Cloudflare credentials and connectivity")
357 |
358 | if __name__ == "__main__":
359 | main()
```