#
tokens: 46288/50000 8/625 files (page 27/47)
lines: on (toggle) GitHub
raw markdown copy reset
This is page 27 of 47. Use http://codebase.md/doobidoo/mcp-memory-service?lines=true&page={x} to view the full context.

# Directory Structure

```
├── .claude
│   ├── agents
│   │   ├── amp-bridge.md
│   │   ├── amp-pr-automator.md
│   │   ├── code-quality-guard.md
│   │   ├── gemini-pr-automator.md
│   │   └── github-release-manager.md
│   ├── settings.local.json.backup
│   └── settings.local.json.local
├── .commit-message
├── .dockerignore
├── .env.example
├── .env.sqlite.backup
├── .envnn#
├── .gitattributes
├── .github
│   ├── FUNDING.yml
│   ├── ISSUE_TEMPLATE
│   │   ├── bug_report.yml
│   │   ├── config.yml
│   │   ├── feature_request.yml
│   │   └── performance_issue.yml
│   ├── pull_request_template.md
│   └── workflows
│       ├── bridge-tests.yml
│       ├── CACHE_FIX.md
│       ├── claude-code-review.yml
│       ├── claude.yml
│       ├── cleanup-images.yml.disabled
│       ├── dev-setup-validation.yml
│       ├── docker-publish.yml
│       ├── LATEST_FIXES.md
│       ├── main-optimized.yml.disabled
│       ├── main.yml
│       ├── publish-and-test.yml
│       ├── README_OPTIMIZATION.md
│       ├── release-tag.yml.disabled
│       ├── release.yml
│       ├── roadmap-review-reminder.yml
│       ├── SECRET_CONDITIONAL_FIX.md
│       └── WORKFLOW_FIXES.md
├── .gitignore
├── .mcp.json.backup
├── .mcp.json.template
├── .pyscn
│   ├── .gitignore
│   └── reports
│       └── analyze_20251123_214224.html
├── AGENTS.md
├── archive
│   ├── deployment
│   │   ├── deploy_fastmcp_fixed.sh
│   │   ├── deploy_http_with_mcp.sh
│   │   └── deploy_mcp_v4.sh
│   ├── deployment-configs
│   │   ├── empty_config.yml
│   │   └── smithery.yaml
│   ├── development
│   │   └── test_fastmcp.py
│   ├── docs-removed-2025-08-23
│   │   ├── authentication.md
│   │   ├── claude_integration.md
│   │   ├── claude-code-compatibility.md
│   │   ├── claude-code-integration.md
│   │   ├── claude-code-quickstart.md
│   │   ├── claude-desktop-setup.md
│   │   ├── complete-setup-guide.md
│   │   ├── database-synchronization.md
│   │   ├── development
│   │   │   ├── autonomous-memory-consolidation.md
│   │   │   ├── CLEANUP_PLAN.md
│   │   │   ├── CLEANUP_README.md
│   │   │   ├── CLEANUP_SUMMARY.md
│   │   │   ├── dream-inspired-memory-consolidation.md
│   │   │   ├── hybrid-slm-memory-consolidation.md
│   │   │   ├── mcp-milestone.md
│   │   │   ├── multi-client-architecture.md
│   │   │   ├── test-results.md
│   │   │   └── TIMESTAMP_FIX_SUMMARY.md
│   │   ├── distributed-sync.md
│   │   ├── invocation_guide.md
│   │   ├── macos-intel.md
│   │   ├── master-guide.md
│   │   ├── mcp-client-configuration.md
│   │   ├── multi-client-server.md
│   │   ├── service-installation.md
│   │   ├── sessions
│   │   │   └── MCP_ENHANCEMENT_SESSION_MEMORY_v4.1.0.md
│   │   ├── UBUNTU_SETUP.md
│   │   ├── ubuntu.md
│   │   ├── windows-setup.md
│   │   └── windows.md
│   ├── docs-root-cleanup-2025-08-23
│   │   ├── AWESOME_LIST_SUBMISSION.md
│   │   ├── CLOUDFLARE_IMPLEMENTATION.md
│   │   ├── DOCUMENTATION_ANALYSIS.md
│   │   ├── DOCUMENTATION_CLEANUP_PLAN.md
│   │   ├── DOCUMENTATION_CONSOLIDATION_COMPLETE.md
│   │   ├── LITESTREAM_SETUP_GUIDE.md
│   │   ├── lm_studio_system_prompt.md
│   │   ├── PYTORCH_DOWNLOAD_FIX.md
│   │   └── README-ORIGINAL-BACKUP.md
│   ├── investigations
│   │   └── MACOS_HOOKS_INVESTIGATION.md
│   ├── litestream-configs-v6.3.0
│   │   ├── install_service.sh
│   │   ├── litestream_master_config_fixed.yml
│   │   ├── litestream_master_config.yml
│   │   ├── litestream_replica_config_fixed.yml
│   │   ├── litestream_replica_config.yml
│   │   ├── litestream_replica_simple.yml
│   │   ├── litestream-http.service
│   │   ├── litestream.service
│   │   └── requirements-cloudflare.txt
│   ├── release-notes
│   │   └── release-notes-v7.1.4.md
│   └── setup-development
│       ├── README.md
│       ├── setup_consolidation_mdns.sh
│       ├── STARTUP_SETUP_GUIDE.md
│       └── test_service.sh
├── CHANGELOG-HISTORIC.md
├── CHANGELOG.md
├── claude_commands
│   ├── memory-context.md
│   ├── memory-health.md
│   ├── memory-ingest-dir.md
│   ├── memory-ingest.md
│   ├── memory-recall.md
│   ├── memory-search.md
│   ├── memory-store.md
│   ├── README.md
│   └── session-start.md
├── claude-hooks
│   ├── config.json
│   ├── config.template.json
│   ├── CONFIGURATION.md
│   ├── core
│   │   ├── memory-retrieval.js
│   │   ├── mid-conversation.js
│   │   ├── session-end.js
│   │   ├── session-start.js
│   │   └── topic-change.js
│   ├── debug-pattern-test.js
│   ├── install_claude_hooks_windows.ps1
│   ├── install_hooks.py
│   ├── memory-mode-controller.js
│   ├── MIGRATION.md
│   ├── README-NATURAL-TRIGGERS.md
│   ├── README-phase2.md
│   ├── README.md
│   ├── simple-test.js
│   ├── statusline.sh
│   ├── test-adaptive-weights.js
│   ├── test-dual-protocol-hook.js
│   ├── test-mcp-hook.js
│   ├── test-natural-triggers.js
│   ├── test-recency-scoring.js
│   ├── tests
│   │   ├── integration-test.js
│   │   ├── phase2-integration-test.js
│   │   ├── test-code-execution.js
│   │   ├── test-cross-session.json
│   │   ├── test-session-tracking.json
│   │   └── test-threading.json
│   ├── utilities
│   │   ├── adaptive-pattern-detector.js
│   │   ├── context-formatter.js
│   │   ├── context-shift-detector.js
│   │   ├── conversation-analyzer.js
│   │   ├── dynamic-context-updater.js
│   │   ├── git-analyzer.js
│   │   ├── mcp-client.js
│   │   ├── memory-client.js
│   │   ├── memory-scorer.js
│   │   ├── performance-manager.js
│   │   ├── project-detector.js
│   │   ├── session-tracker.js
│   │   ├── tiered-conversation-monitor.js
│   │   └── version-checker.js
│   └── WINDOWS-SESSIONSTART-BUG.md
├── CLAUDE.md
├── CODE_OF_CONDUCT.md
├── CONTRIBUTING.md
├── Development-Sprint-November-2025.md
├── docs
│   ├── amp-cli-bridge.md
│   ├── api
│   │   ├── code-execution-interface.md
│   │   ├── memory-metadata-api.md
│   │   ├── PHASE1_IMPLEMENTATION_SUMMARY.md
│   │   ├── PHASE2_IMPLEMENTATION_SUMMARY.md
│   │   ├── PHASE2_REPORT.md
│   │   └── tag-standardization.md
│   ├── architecture
│   │   ├── search-enhancement-spec.md
│   │   └── search-examples.md
│   ├── architecture.md
│   ├── archive
│   │   └── obsolete-workflows
│   │       ├── load_memory_context.md
│   │       └── README.md
│   ├── assets
│   │   └── images
│   │       ├── dashboard-v3.3.0-preview.png
│   │       ├── memory-awareness-hooks-example.png
│   │       ├── project-infographic.svg
│   │       └── README.md
│   ├── CLAUDE_CODE_QUICK_REFERENCE.md
│   ├── cloudflare-setup.md
│   ├── deployment
│   │   ├── docker.md
│   │   ├── dual-service.md
│   │   ├── production-guide.md
│   │   └── systemd-service.md
│   ├── development
│   │   ├── ai-agent-instructions.md
│   │   ├── code-quality
│   │   │   ├── phase-2a-completion.md
│   │   │   ├── phase-2a-handle-get-prompt.md
│   │   │   ├── phase-2a-index.md
│   │   │   ├── phase-2a-install-package.md
│   │   │   └── phase-2b-session-summary.md
│   │   ├── code-quality-workflow.md
│   │   ├── dashboard-workflow.md
│   │   ├── issue-management.md
│   │   ├── pr-review-guide.md
│   │   ├── refactoring-notes.md
│   │   ├── release-checklist.md
│   │   └── todo-tracker.md
│   ├── docker-optimized-build.md
│   ├── document-ingestion.md
│   ├── DOCUMENTATION_AUDIT.md
│   ├── enhancement-roadmap-issue-14.md
│   ├── examples
│   │   ├── analysis-scripts.js
│   │   ├── maintenance-session-example.md
│   │   ├── memory-distribution-chart.jsx
│   │   └── tag-schema.json
│   ├── first-time-setup.md
│   ├── glama-deployment.md
│   ├── guides
│   │   ├── advanced-command-examples.md
│   │   ├── chromadb-migration.md
│   │   ├── commands-vs-mcp-server.md
│   │   ├── mcp-enhancements.md
│   │   ├── mdns-service-discovery.md
│   │   ├── memory-consolidation-guide.md
│   │   ├── migration.md
│   │   ├── scripts.md
│   │   └── STORAGE_BACKENDS.md
│   ├── HOOK_IMPROVEMENTS.md
│   ├── hooks
│   │   └── phase2-code-execution-migration.md
│   ├── http-server-management.md
│   ├── ide-compatability.md
│   ├── IMAGE_RETENTION_POLICY.md
│   ├── images
│   │   └── dashboard-placeholder.md
│   ├── implementation
│   │   ├── health_checks.md
│   │   └── performance.md
│   ├── IMPLEMENTATION_PLAN_HTTP_SSE.md
│   ├── integration
│   │   ├── homebrew.md
│   │   └── multi-client.md
│   ├── integrations
│   │   ├── gemini.md
│   │   ├── groq-bridge.md
│   │   ├── groq-integration-summary.md
│   │   └── groq-model-comparison.md
│   ├── integrations.md
│   ├── legacy
│   │   └── dual-protocol-hooks.md
│   ├── LM_STUDIO_COMPATIBILITY.md
│   ├── maintenance
│   │   └── memory-maintenance.md
│   ├── mastery
│   │   ├── api-reference.md
│   │   ├── architecture-overview.md
│   │   ├── configuration-guide.md
│   │   ├── local-setup-and-run.md
│   │   ├── testing-guide.md
│   │   └── troubleshooting.md
│   ├── migration
│   │   └── code-execution-api-quick-start.md
│   ├── natural-memory-triggers
│   │   ├── cli-reference.md
│   │   ├── installation-guide.md
│   │   └── performance-optimization.md
│   ├── oauth-setup.md
│   ├── pr-graphql-integration.md
│   ├── quick-setup-cloudflare-dual-environment.md
│   ├── README.md
│   ├── remote-configuration-wiki-section.md
│   ├── research
│   │   ├── code-execution-interface-implementation.md
│   │   └── code-execution-interface-summary.md
│   ├── ROADMAP.md
│   ├── sqlite-vec-backend.md
│   ├── statistics
│   │   ├── charts
│   │   │   ├── activity_patterns.png
│   │   │   ├── contributors.png
│   │   │   ├── growth_trajectory.png
│   │   │   ├── monthly_activity.png
│   │   │   └── october_sprint.png
│   │   ├── data
│   │   │   ├── activity_by_day.csv
│   │   │   ├── activity_by_hour.csv
│   │   │   ├── contributors.csv
│   │   │   └── monthly_activity.csv
│   │   ├── generate_charts.py
│   │   └── REPOSITORY_STATISTICS.md
│   ├── technical
│   │   ├── development.md
│   │   ├── memory-migration.md
│   │   ├── migration-log.md
│   │   ├── sqlite-vec-embedding-fixes.md
│   │   └── tag-storage.md
│   ├── testing
│   │   └── regression-tests.md
│   ├── testing-cloudflare-backend.md
│   ├── troubleshooting
│   │   ├── cloudflare-api-token-setup.md
│   │   ├── cloudflare-authentication.md
│   │   ├── general.md
│   │   ├── hooks-quick-reference.md
│   │   ├── pr162-schema-caching-issue.md
│   │   ├── session-end-hooks.md
│   │   └── sync-issues.md
│   └── tutorials
│       ├── advanced-techniques.md
│       ├── data-analysis.md
│       └── demo-session-walkthrough.md
├── examples
│   ├── claude_desktop_config_template.json
│   ├── claude_desktop_config_windows.json
│   ├── claude-desktop-http-config.json
│   ├── config
│   │   └── claude_desktop_config.json
│   ├── http-mcp-bridge.js
│   ├── memory_export_template.json
│   ├── README.md
│   ├── setup
│   │   └── setup_multi_client_complete.py
│   └── start_https_example.sh
├── install_service.py
├── install.py
├── LICENSE
├── NOTICE
├── pyproject.toml
├── pytest.ini
├── README.md
├── run_server.py
├── scripts
│   ├── .claude
│   │   └── settings.local.json
│   ├── archive
│   │   └── check_missing_timestamps.py
│   ├── backup
│   │   ├── backup_memories.py
│   │   ├── backup_sqlite_vec.sh
│   │   ├── export_distributable_memories.sh
│   │   └── restore_memories.py
│   ├── benchmarks
│   │   ├── benchmark_code_execution_api.py
│   │   ├── benchmark_hybrid_sync.py
│   │   └── benchmark_server_caching.py
│   ├── database
│   │   ├── analyze_sqlite_vec_db.py
│   │   ├── check_sqlite_vec_status.py
│   │   ├── db_health_check.py
│   │   └── simple_timestamp_check.py
│   ├── development
│   │   ├── debug_server_initialization.py
│   │   ├── find_orphaned_files.py
│   │   ├── fix_mdns.sh
│   │   ├── fix_sitecustomize.py
│   │   ├── remote_ingest.sh
│   │   ├── setup-git-merge-drivers.sh
│   │   ├── uv-lock-merge.sh
│   │   └── verify_hybrid_sync.py
│   ├── hooks
│   │   └── pre-commit
│   ├── installation
│   │   ├── install_linux_service.py
│   │   ├── install_macos_service.py
│   │   ├── install_uv.py
│   │   ├── install_windows_service.py
│   │   ├── install.py
│   │   ├── setup_backup_cron.sh
│   │   ├── setup_claude_mcp.sh
│   │   └── setup_cloudflare_resources.py
│   ├── linux
│   │   ├── service_status.sh
│   │   ├── start_service.sh
│   │   ├── stop_service.sh
│   │   ├── uninstall_service.sh
│   │   └── view_logs.sh
│   ├── maintenance
│   │   ├── assign_memory_types.py
│   │   ├── check_memory_types.py
│   │   ├── cleanup_corrupted_encoding.py
│   │   ├── cleanup_memories.py
│   │   ├── cleanup_organize.py
│   │   ├── consolidate_memory_types.py
│   │   ├── consolidation_mappings.json
│   │   ├── delete_orphaned_vectors_fixed.py
│   │   ├── fast_cleanup_duplicates_with_tracking.sh
│   │   ├── find_all_duplicates.py
│   │   ├── find_cloudflare_duplicates.py
│   │   ├── find_duplicates.py
│   │   ├── memory-types.md
│   │   ├── README.md
│   │   ├── recover_timestamps_from_cloudflare.py
│   │   ├── regenerate_embeddings.py
│   │   ├── repair_malformed_tags.py
│   │   ├── repair_memories.py
│   │   ├── repair_sqlite_vec_embeddings.py
│   │   ├── repair_zero_embeddings.py
│   │   ├── restore_from_json_export.py
│   │   └── scan_todos.sh
│   ├── migration
│   │   ├── cleanup_mcp_timestamps.py
│   │   ├── legacy
│   │   │   └── migrate_chroma_to_sqlite.py
│   │   ├── mcp-migration.py
│   │   ├── migrate_sqlite_vec_embeddings.py
│   │   ├── migrate_storage.py
│   │   ├── migrate_tags.py
│   │   ├── migrate_timestamps.py
│   │   ├── migrate_to_cloudflare.py
│   │   ├── migrate_to_sqlite_vec.py
│   │   ├── migrate_v5_enhanced.py
│   │   ├── TIMESTAMP_CLEANUP_README.md
│   │   └── verify_mcp_timestamps.py
│   ├── pr
│   │   ├── amp_collect_results.sh
│   │   ├── amp_detect_breaking_changes.sh
│   │   ├── amp_generate_tests.sh
│   │   ├── amp_pr_review.sh
│   │   ├── amp_quality_gate.sh
│   │   ├── amp_suggest_fixes.sh
│   │   ├── auto_review.sh
│   │   ├── detect_breaking_changes.sh
│   │   ├── generate_tests.sh
│   │   ├── lib
│   │   │   └── graphql_helpers.sh
│   │   ├── quality_gate.sh
│   │   ├── resolve_threads.sh
│   │   ├── run_pyscn_analysis.sh
│   │   ├── run_quality_checks.sh
│   │   ├── thread_status.sh
│   │   └── watch_reviews.sh
│   ├── quality
│   │   ├── fix_dead_code_install.sh
│   │   ├── phase1_dead_code_analysis.md
│   │   ├── phase2_complexity_analysis.md
│   │   ├── README_PHASE1.md
│   │   ├── README_PHASE2.md
│   │   ├── track_pyscn_metrics.sh
│   │   └── weekly_quality_review.sh
│   ├── README.md
│   ├── run
│   │   ├── run_mcp_memory.sh
│   │   ├── run-with-uv.sh
│   │   └── start_sqlite_vec.sh
│   ├── run_memory_server.py
│   ├── server
│   │   ├── check_http_server.py
│   │   ├── check_server_health.py
│   │   ├── memory_offline.py
│   │   ├── preload_models.py
│   │   ├── run_http_server.py
│   │   ├── run_memory_server.py
│   │   ├── start_http_server.bat
│   │   └── start_http_server.sh
│   ├── service
│   │   ├── deploy_dual_services.sh
│   │   ├── install_http_service.sh
│   │   ├── mcp-memory-http.service
│   │   ├── mcp-memory.service
│   │   ├── memory_service_manager.sh
│   │   ├── service_control.sh
│   │   ├── service_utils.py
│   │   └── update_service.sh
│   ├── sync
│   │   ├── check_drift.py
│   │   ├── claude_sync_commands.py
│   │   ├── export_memories.py
│   │   ├── import_memories.py
│   │   ├── litestream
│   │   │   ├── apply_local_changes.sh
│   │   │   ├── enhanced_memory_store.sh
│   │   │   ├── init_staging_db.sh
│   │   │   ├── io.litestream.replication.plist
│   │   │   ├── manual_sync.sh
│   │   │   ├── memory_sync.sh
│   │   │   ├── pull_remote_changes.sh
│   │   │   ├── push_to_remote.sh
│   │   │   ├── README.md
│   │   │   ├── resolve_conflicts.sh
│   │   │   ├── setup_local_litestream.sh
│   │   │   ├── setup_remote_litestream.sh
│   │   │   ├── staging_db_init.sql
│   │   │   ├── stash_local_changes.sh
│   │   │   ├── sync_from_remote_noconfig.sh
│   │   │   └── sync_from_remote.sh
│   │   ├── README.md
│   │   ├── safe_cloudflare_update.sh
│   │   ├── sync_memory_backends.py
│   │   └── sync_now.py
│   ├── testing
│   │   ├── run_complete_test.py
│   │   ├── run_memory_test.sh
│   │   ├── simple_test.py
│   │   ├── test_cleanup_logic.py
│   │   ├── test_cloudflare_backend.py
│   │   ├── test_docker_functionality.py
│   │   ├── test_installation.py
│   │   ├── test_mdns.py
│   │   ├── test_memory_api.py
│   │   ├── test_memory_simple.py
│   │   ├── test_migration.py
│   │   ├── test_search_api.py
│   │   ├── test_sqlite_vec_embeddings.py
│   │   ├── test_sse_events.py
│   │   ├── test-connection.py
│   │   └── test-hook.js
│   ├── utils
│   │   ├── claude_commands_utils.py
│   │   ├── generate_personalized_claude_md.sh
│   │   ├── groq
│   │   ├── groq_agent_bridge.py
│   │   ├── list-collections.py
│   │   ├── memory_wrapper_uv.py
│   │   ├── query_memories.py
│   │   ├── smithery_wrapper.py
│   │   ├── test_groq_bridge.sh
│   │   └── uv_wrapper.py
│   └── validation
│       ├── check_dev_setup.py
│       ├── check_documentation_links.py
│       ├── diagnose_backend_config.py
│       ├── validate_configuration_complete.py
│       ├── validate_memories.py
│       ├── validate_migration.py
│       ├── validate_timestamp_integrity.py
│       ├── verify_environment.py
│       ├── verify_pytorch_windows.py
│       └── verify_torch.py
├── SECURITY.md
├── selective_timestamp_recovery.py
├── SPONSORS.md
├── src
│   └── mcp_memory_service
│       ├── __init__.py
│       ├── api
│       │   ├── __init__.py
│       │   ├── client.py
│       │   ├── operations.py
│       │   ├── sync_wrapper.py
│       │   └── types.py
│       ├── backup
│       │   ├── __init__.py
│       │   └── scheduler.py
│       ├── cli
│       │   ├── __init__.py
│       │   ├── ingestion.py
│       │   ├── main.py
│       │   └── utils.py
│       ├── config.py
│       ├── consolidation
│       │   ├── __init__.py
│       │   ├── associations.py
│       │   ├── base.py
│       │   ├── clustering.py
│       │   ├── compression.py
│       │   ├── consolidator.py
│       │   ├── decay.py
│       │   ├── forgetting.py
│       │   ├── health.py
│       │   └── scheduler.py
│       ├── dependency_check.py
│       ├── discovery
│       │   ├── __init__.py
│       │   ├── client.py
│       │   └── mdns_service.py
│       ├── embeddings
│       │   ├── __init__.py
│       │   └── onnx_embeddings.py
│       ├── ingestion
│       │   ├── __init__.py
│       │   ├── base.py
│       │   ├── chunker.py
│       │   ├── csv_loader.py
│       │   ├── json_loader.py
│       │   ├── pdf_loader.py
│       │   ├── registry.py
│       │   ├── semtools_loader.py
│       │   └── text_loader.py
│       ├── lm_studio_compat.py
│       ├── mcp_server.py
│       ├── models
│       │   ├── __init__.py
│       │   └── memory.py
│       ├── server.py
│       ├── services
│       │   ├── __init__.py
│       │   └── memory_service.py
│       ├── storage
│       │   ├── __init__.py
│       │   ├── base.py
│       │   ├── cloudflare.py
│       │   ├── factory.py
│       │   ├── http_client.py
│       │   ├── hybrid.py
│       │   └── sqlite_vec.py
│       ├── sync
│       │   ├── __init__.py
│       │   ├── exporter.py
│       │   ├── importer.py
│       │   └── litestream_config.py
│       ├── utils
│       │   ├── __init__.py
│       │   ├── cache_manager.py
│       │   ├── content_splitter.py
│       │   ├── db_utils.py
│       │   ├── debug.py
│       │   ├── document_processing.py
│       │   ├── gpu_detection.py
│       │   ├── hashing.py
│       │   ├── http_server_manager.py
│       │   ├── port_detection.py
│       │   ├── system_detection.py
│       │   └── time_parser.py
│       └── web
│           ├── __init__.py
│           ├── api
│           │   ├── __init__.py
│           │   ├── analytics.py
│           │   ├── backup.py
│           │   ├── consolidation.py
│           │   ├── documents.py
│           │   ├── events.py
│           │   ├── health.py
│           │   ├── manage.py
│           │   ├── mcp.py
│           │   ├── memories.py
│           │   ├── search.py
│           │   └── sync.py
│           ├── app.py
│           ├── dependencies.py
│           ├── oauth
│           │   ├── __init__.py
│           │   ├── authorization.py
│           │   ├── discovery.py
│           │   ├── middleware.py
│           │   ├── models.py
│           │   ├── registration.py
│           │   └── storage.py
│           ├── sse.py
│           └── static
│               ├── app.js
│               ├── index.html
│               ├── README.md
│               ├── sse_test.html
│               └── style.css
├── start_http_debug.bat
├── start_http_server.sh
├── test_document.txt
├── test_version_checker.js
├── tests
│   ├── __init__.py
│   ├── api
│   │   ├── __init__.py
│   │   ├── test_compact_types.py
│   │   └── test_operations.py
│   ├── bridge
│   │   ├── mock_responses.js
│   │   ├── package-lock.json
│   │   ├── package.json
│   │   └── test_http_mcp_bridge.js
│   ├── conftest.py
│   ├── consolidation
│   │   ├── __init__.py
│   │   ├── conftest.py
│   │   ├── test_associations.py
│   │   ├── test_clustering.py
│   │   ├── test_compression.py
│   │   ├── test_consolidator.py
│   │   ├── test_decay.py
│   │   └── test_forgetting.py
│   ├── contracts
│   │   └── api-specification.yml
│   ├── integration
│   │   ├── package-lock.json
│   │   ├── package.json
│   │   ├── test_api_key_fallback.py
│   │   ├── test_api_memories_chronological.py
│   │   ├── test_api_tag_time_search.py
│   │   ├── test_api_with_memory_service.py
│   │   ├── test_bridge_integration.js
│   │   ├── test_cli_interfaces.py
│   │   ├── test_cloudflare_connection.py
│   │   ├── test_concurrent_clients.py
│   │   ├── test_data_serialization_consistency.py
│   │   ├── test_http_server_startup.py
│   │   ├── test_mcp_memory.py
│   │   ├── test_mdns_integration.py
│   │   ├── test_oauth_basic_auth.py
│   │   ├── test_oauth_flow.py
│   │   ├── test_server_handlers.py
│   │   └── test_store_memory.py
│   ├── performance
│   │   ├── test_background_sync.py
│   │   └── test_hybrid_live.py
│   ├── README.md
│   ├── smithery
│   │   └── test_smithery.py
│   ├── sqlite
│   │   └── simple_sqlite_vec_test.py
│   ├── test_client.py
│   ├── test_content_splitting.py
│   ├── test_database.py
│   ├── test_hybrid_cloudflare_limits.py
│   ├── test_hybrid_storage.py
│   ├── test_memory_ops.py
│   ├── test_semantic_search.py
│   ├── test_sqlite_vec_storage.py
│   ├── test_time_parser.py
│   ├── test_timestamp_preservation.py
│   ├── timestamp
│   │   ├── test_hook_vs_manual_storage.py
│   │   ├── test_issue99_final_validation.py
│   │   ├── test_search_retrieval_inconsistency.py
│   │   ├── test_timestamp_issue.py
│   │   └── test_timestamp_simple.py
│   └── unit
│       ├── conftest.py
│       ├── test_cloudflare_storage.py
│       ├── test_csv_loader.py
│       ├── test_fastapi_dependencies.py
│       ├── test_import.py
│       ├── test_json_loader.py
│       ├── test_mdns_simple.py
│       ├── test_mdns.py
│       ├── test_memory_service.py
│       ├── test_memory.py
│       ├── test_semtools_loader.py
│       ├── test_storage_interface_compatibility.py
│       └── test_tag_time_filtering.py
├── tools
│   ├── docker
│   │   ├── DEPRECATED.md
│   │   ├── docker-compose.http.yml
│   │   ├── docker-compose.pythonpath.yml
│   │   ├── docker-compose.standalone.yml
│   │   ├── docker-compose.uv.yml
│   │   ├── docker-compose.yml
│   │   ├── docker-entrypoint-persistent.sh
│   │   ├── docker-entrypoint-unified.sh
│   │   ├── docker-entrypoint.sh
│   │   ├── Dockerfile
│   │   ├── Dockerfile.glama
│   │   ├── Dockerfile.slim
│   │   ├── README.md
│   │   └── test-docker-modes.sh
│   └── README.md
└── uv.lock
```

# Files

--------------------------------------------------------------------------------
/docs/sqlite-vec-backend.md:
--------------------------------------------------------------------------------

```markdown
  1 | # SQLite-vec Backend Guide
  2 | 
  3 | ## Overview
  4 | 
  5 | The MCP Memory Service now supports SQLite-vec as an alternative storage backend. SQLite-vec provides a lightweight, high-performance vector database solution that offers several advantages over ChromaDB:
  6 | 
  7 | - **Lightweight**: Single file database with no external dependencies
  8 | - **Fast**: Optimized vector operations with efficient indexing
  9 | - **Portable**: Easy to backup, copy, and share memory databases
 10 | - **Reliable**: Built on SQLite's proven reliability and ACID compliance
 11 | - **Memory Efficient**: Lower memory footprint for smaller memory collections
 12 | 
 13 | ## Installation
 14 | 
 15 | ### Prerequisites
 16 | 
 17 | The sqlite-vec backend requires the `sqlite-vec` Python package:
 18 | 
 19 | ```bash
 20 | # Install sqlite-vec
 21 | pip install sqlite-vec
 22 | 
 23 | # Or with UV (recommended)
 24 | uv add sqlite-vec
 25 | ```
 26 | 
 27 | ### Verification
 28 | 
 29 | You can verify sqlite-vec is available by running:
 30 | 
 31 | ```python
 32 | try:
 33 |     import sqlite_vec
 34 |     print("✅ sqlite-vec is available")
 35 | except ImportError:
 36 |     print("❌ sqlite-vec is not installed")
 37 | ```
 38 | 
 39 | ## Configuration
 40 | 
 41 | ### Environment Variables
 42 | 
 43 | To use the sqlite-vec backend, set the storage backend environment variable:
 44 | 
 45 | ```bash
 46 | # Primary configuration
 47 | export MCP_MEMORY_STORAGE_BACKEND=sqlite_vec
 48 | 
 49 | # Optional: Custom database path
 50 | export MCP_MEMORY_SQLITE_PATH=/path/to/your/memory.db
 51 | ```
 52 | 
 53 | ### Platform-Specific Setup
 54 | 
 55 | #### macOS (Bash/Zsh)
 56 | ```bash
 57 | # Add to ~/.bashrc or ~/.zshrc
 58 | export MCP_MEMORY_STORAGE_BACKEND=sqlite_vec
 59 | export MCP_MEMORY_SQLITE_PATH="$HOME/Library/Application Support/mcp-memory/sqlite_vec.db"
 60 | ```
 61 | 
 62 | #### Windows (PowerShell)
 63 | ```powershell
 64 | # Add to PowerShell profile
 65 | $env:MCP_MEMORY_STORAGE_BACKEND = "sqlite_vec"
 66 | $env:MCP_MEMORY_SQLITE_PATH = "$env:LOCALAPPDATA\mcp-memory\sqlite_vec.db"
 67 | ```
 68 | 
 69 | #### Windows (Command Prompt)
 70 | ```cmd
 71 | set MCP_MEMORY_STORAGE_BACKEND=sqlite_vec
 72 | set MCP_MEMORY_SQLITE_PATH=%LOCALAPPDATA%\mcp-memory\sqlite_vec.db
 73 | ```
 74 | 
 75 | #### Linux
 76 | ```bash
 77 | # Add to ~/.bashrc
 78 | export MCP_MEMORY_STORAGE_BACKEND=sqlite_vec
 79 | export MCP_MEMORY_SQLITE_PATH="$HOME/.local/share/mcp-memory/sqlite_vec.db"
 80 | ```
 81 | 
 82 | ### Claude Desktop Configuration
 83 | 
 84 | Update your Claude Desktop MCP configuration:
 85 | 
 86 | ```json
 87 | {
 88 |   "mcpServers": {
 89 |     "memory": {
 90 |       "command": "uv",
 91 |       "args": ["--directory", "/path/to/mcp-memory-service", "run", "memory"],
 92 |       "env": {
 93 |         "MCP_MEMORY_STORAGE_BACKEND": "sqlite_vec"
 94 |       }
 95 |     }
 96 |   }
 97 | }
 98 | ```
 99 | 
100 | ## Migration from ChromaDB
101 | 
102 | ### Automatic Migration
103 | 
104 | Use the provided migration script for easy migration:
105 | 
106 | ```bash
107 | # Simple migration with default paths
108 | python migrate_to_sqlite_vec.py
109 | 
110 | # Custom migration
111 | python scripts/migrate_storage.py \
112 |   --from chroma \
113 |   --to sqlite_vec \
114 |   --source-path /path/to/chroma_db \
115 |   --target-path /path/to/sqlite_vec.db \
116 |   --backup
117 | ```
118 | 
119 | ### Manual Migration Steps
120 | 
121 | 1. **Stop the MCP Memory Service**
122 |    ```bash
123 |    # Stop Claude Desktop or any running instances
124 |    ```
125 | 
126 | 2. **Create a backup** (recommended)
127 |    ```bash
128 |    python scripts/migrate_storage.py \
129 |      --from chroma \
130 |      --to sqlite_vec \
131 |      --source-path ~/.local/share/mcp-memory/chroma_db \
132 |      --target-path ~/.local/share/mcp-memory/sqlite_vec.db \
133 |      --backup \
134 |      --backup-path memory_backup.json
135 |    ```
136 | 
137 | 3. **Set environment variables**
138 |    ```bash
139 |    export MCP_MEMORY_STORAGE_BACKEND=sqlite_vec
140 |    ```
141 | 
142 | 4. **Restart Claude Desktop**
143 | 
144 | ### Migration Verification
145 | 
146 | After migration, verify your memories are accessible:
147 | 
148 | ```bash
149 | # Test the new backend
150 | python scripts/verify_environment.py
151 | 
152 | # Check database statistics
153 | python -c "
154 | import asyncio
155 | from src.mcp_memory_service.storage.sqlite_vec import SqliteVecMemoryStorage
156 | 
157 | async def check_stats():
158 |     storage = SqliteVecMemoryStorage('path/to/your/db')
159 |     await storage.initialize()
160 |     stats = storage.get_stats()
161 |     print(f'Total memories: {stats[\"total_memories\"]}')
162 |     print(f'Database size: {stats[\"database_size_mb\"]} MB')
163 |     storage.close()
164 | 
165 | asyncio.run(check_stats())
166 | "
167 | ```
168 | 
169 | ## Performance Characteristics
170 | 
171 | ### Memory Usage
172 | 
173 | | Collection Size | ChromaDB RAM | SQLite-vec RAM | Difference |
174 | |----------------|--------------|----------------|------------|
175 | | 1,000 memories | ~200 MB | ~50 MB | -75% |
176 | | 10,000 memories | ~800 MB | ~200 MB | -75% |
177 | | 100,000 memories | ~4 GB | ~1 GB | -75% |
178 | 
179 | ### Query Performance
180 | 
181 | - **Semantic Search**: Similar performance to ChromaDB for most use cases
182 | - **Tag Search**: Faster due to SQL indexing
183 | - **Metadata Queries**: Significantly faster with SQL WHERE clauses
184 | - **Startup Time**: 2-3x faster initialization
185 | 
186 | ### Storage Characteristics
187 | 
188 | - **Database File**: Single `.db` file (easy backup/restore)
189 | - **Disk Usage**: ~30% smaller than ChromaDB for same data
190 | - **Concurrent Access**: SQLite-level locking (single writer, multiple readers)
191 | 
192 | ## Advanced Configuration
193 | 
194 | ### Custom Embedding Models
195 | 
196 | ```python
197 | # Initialize with custom model
198 | storage = SqliteVecMemoryStorage(
199 |     db_path="memory.db",
200 |     embedding_model="all-mpnet-base-v2"  # Higher quality, slower
201 | )
202 | ```
203 | 
204 | ### Multi-Client Access Configuration
205 | 
206 | SQLite-vec supports advanced multi-client access through **two complementary approaches**:
207 | 
208 | 1. **Phase 1: WAL Mode** - Direct SQLite access with Write-Ahead Logging
209 | 2. **Phase 2: HTTP Coordination** - Automatic HTTP server coordination for seamless multi-client access
210 | 
211 | #### Phase 1: WAL Mode (Default)
212 | 
213 | The backend automatically enables WAL mode with these default settings:
214 | - **WAL Mode**: Enables multiple readers + single writer
215 | - **Busy Timeout**: 5 seconds (prevents immediate lock errors)  
216 | - **Synchronous**: NORMAL (balanced performance/safety)
217 | 
218 | #### Phase 2: HTTP Server Auto-Detection (Advanced)
219 | 
220 | The system automatically detects the optimal coordination mode:
221 | 
222 | **Auto-Detection Modes:**
223 | - **`http_client`**: Existing HTTP server detected → Connect as client
224 | - **`http_server`**: No server found, port available → Start HTTP server
225 | - **`direct`**: Port in use by other service → Fall back to WAL mode
226 | 
227 | **Coordination Flow:**
228 | 1. Check if MCP Memory Service HTTP server is running
229 | 2. If found → Use HTTP client to connect to existing server
230 | 3. If not found and port available → Auto-start HTTP server (optional)
231 | 4. If port busy → Fall back to direct SQLite with WAL mode
232 | 
233 | #### Custom SQLite Pragmas
234 | 
235 | You can customize SQLite behavior using environment variables:
236 | 
237 | ```bash
238 | # Recommended configuration (v8.9.0+) - For concurrent HTTP + MCP access
239 | export MCP_MEMORY_SQLITE_PRAGMAS="busy_timeout=15000,cache_size=20000"
240 | 
241 | # Example configurations for different scenarios:
242 | # High concurrency setup (longer timeout)
243 | export MCP_MEMORY_SQLITE_PRAGMAS="busy_timeout=30000,cache_size=20000,wal_autocheckpoint=1000"
244 | 
245 | # Performance optimized (use with caution - trades safety for speed)
246 | export MCP_MEMORY_SQLITE_PRAGMAS="synchronous=NORMAL,temp_store=MEMORY,cache_size=50000,busy_timeout=15000"
247 | 
248 | # Conservative/safe mode (maximum data safety)
249 | export MCP_MEMORY_SQLITE_PRAGMAS="synchronous=FULL,busy_timeout=60000,cache_size=20000"
250 | ```
251 | 
252 | #### HTTP Coordination Configuration
253 | 
254 | Enable automatic HTTP server coordination for optimal multi-client access:
255 | 
256 | ```bash
257 | # Enable HTTP server auto-start
258 | export MCP_HTTP_ENABLED=true
259 | 
260 | # Configure HTTP server settings (optional)
261 | export MCP_HTTP_PORT=8000
262 | export MCP_HTTP_HOST=localhost
263 | 
264 | # Combine with SQLite-vec backend
265 | export MCP_MEMORY_STORAGE_BACKEND=sqlite_vec
266 | ```
267 | 
268 | **Coordination Modes Explained:**
269 | 
270 | 1. **Automatic Mode (Recommended)**
271 |    ```bash
272 |    # No configuration needed - auto-detects best mode
273 |    export MCP_MEMORY_STORAGE_BACKEND=sqlite_vec
274 |    ```
275 | 
276 | 2. **Forced HTTP Client Mode**
277 |    ```bash
278 |    # Always connect to existing server (fails if none running)
279 |    export MCP_MEMORY_STORAGE_BACKEND=sqlite_vec
280 |    export MCP_HTTP_ENABLED=false
281 |    # Requires running: python scripts/run_http_server.py
282 |    ```
283 | 
284 | 3. **Direct WAL Mode Only**
285 |    ```bash
286 |    # Disable HTTP coordination entirely
287 |    export MCP_MEMORY_STORAGE_BACKEND=sqlite_vec
288 |    export MCP_HTTP_ENABLED=false
289 |    export MCP_HTTP_ENABLED=false
290 |    ```
291 | 
292 | #### Multi-Client Claude Desktop Configuration
293 | 
294 | **Option 1: Automatic Coordination (Recommended)**
295 | ```json
296 | {
297 |   "mcpServers": {
298 |     "memory": {
299 |       "command": "uv",
300 |       "args": ["--directory", "/path/to/mcp-memory-service", "run", "memory"],
301 |       "env": {
302 |         "MCP_MEMORY_STORAGE_BACKEND": "sqlite_vec",
303 |         "MCP_HTTP_ENABLED": "true"
304 |       }
305 |     }
306 |   }
307 | }
308 | ```
309 | 
310 | **Option 2: Manual HTTP Server + Client Mode**
311 | ```json
312 | {
313 |   "mcpServers": {
314 |     "memory": {
315 |       "command": "uv", 
316 |       "args": ["--directory", "/path/to/mcp-memory-service", "run", "memory"],
317 |       "env": {
318 |         "MCP_MEMORY_STORAGE_BACKEND": "sqlite_vec",
319 |         "MCP_HTTP_ENABLED": "false"
320 |       }
321 |     }
322 |   }
323 | }
324 | ```
325 | *Note: Requires manually running `python scripts/run_http_server.py` first*
326 | 
327 | **Option 3: WAL Mode Only (Simple)**
328 | ```json
329 | {
330 |   "mcpServers": {
331 |     "memory": {
332 |       "command": "uv",
333 |       "args": ["--directory", "/path/to/mcp-memory-service", "run", "memory"],
334 |       "env": {
335 |         "MCP_MEMORY_STORAGE_BACKEND": "sqlite_vec",
336 |         "MCP_MEMORY_SQLITE_PRAGMAS": "busy_timeout=10000"
337 |       }
338 |     }
339 |   }
340 | }
341 | ```
342 | 
343 | ### Database Optimization
344 | 
345 | ```bash
346 | # Optimize database periodically
347 | python -c "
348 | import asyncio
349 | from src.mcp_memory_service.storage.sqlite_vec import SqliteVecMemoryStorage
350 | 
351 | async def optimize():
352 |     storage = SqliteVecMemoryStorage('path/to/db')
353 |     await storage.initialize()
354 |     
355 |     # Clean up duplicates
356 |     count, msg = await storage.cleanup_duplicates()
357 |     print(f'Cleaned up {count} duplicates')
358 |     
359 |     # Vacuum database
360 |     storage.conn.execute('VACUUM')
361 |     print('Database vacuumed')
362 |     
363 |     storage.close()
364 | 
365 | asyncio.run(optimize())
366 | "
367 | ```
368 | 
369 | ### Backup and Restore
370 | 
371 | ```bash
372 | # Create backup
373 | python scripts/migrate_storage.py \
374 |   --from sqlite_vec \
375 |   --to sqlite_vec \
376 |   --source-path memory.db \
377 |   --target-path backup.db
378 | 
379 | # Or simple file copy
380 | cp memory.db memory_backup.db
381 | 
382 | # Restore from JSON backup
383 | python scripts/migrate_storage.py \
384 |   --restore backup.json \
385 |   --to sqlite_vec \
386 |   --target-path restored_memory.db
387 | ```
388 | 
389 | ## Troubleshooting
390 | 
391 | ### Common Issues
392 | 
393 | #### 1. sqlite-vec Not Found
394 | ```
395 | ImportError: No module named 'sqlite_vec'
396 | ```
397 | **Solution**: Install sqlite-vec package
398 | ```bash
399 | pip install sqlite-vec
400 | # or
401 | uv add sqlite-vec
402 | ```
403 | 
404 | #### 2. Database Lock Errors
405 | ```
406 | sqlite3.OperationalError: database is locked
407 | ```
408 | 
409 | **✅ Fixed in v8.9.0** - Proper SQLite pragmas now automatically configured by installer
410 | 
411 | **For Single Client Issues:**
412 | ```bash
413 | # Kill existing processes
414 | pkill -f "mcp-memory-service"
415 | # Restart Claude Desktop
416 | ```
417 | 
418 | **For Multi-Client Setup (Claude Desktop + Claude Code + HTTP Server):**
419 | ```bash
420 | # v8.9.0+ Solution: Configure recommended pragma values
421 | export MCP_MEMORY_SQLITE_PRAGMAS="busy_timeout=15000,cache_size=20000"
422 | 
423 | # Restart all services to apply changes
424 | # Note: Installer automatically sets these for hybrid/sqlite_vec backends
425 | 
426 | # If issues persist, try longer timeout:
427 | export MCP_MEMORY_SQLITE_PRAGMAS="busy_timeout=30000,cache_size=20000"
428 | 
429 | # Check for stale lock files (rare)
430 | ls -la /path/to/your/database-wal
431 | ls -la /path/to/your/database-shm
432 | 
433 | # If stale locks exist (no active processes), remove them
434 | rm /path/to/your/database-wal
435 | rm /path/to/your/database-shm
436 | 
437 | # 4. Restart all MCP clients
438 | ```
439 | 
440 | **Prevention Tips:**
441 | - Always use WAL mode (enabled by default)
442 | - Configure appropriate busy timeouts for your use case
443 | - Ensure proper shutdown of MCP clients
444 | - Use connection retry logic (built-in)
445 | 
446 | #### 5. HTTP Coordination Issues
447 | ```
448 | Failed to initialize HTTP client storage: Connection refused
449 | ```
450 | **Solutions:**
451 | 
452 | **Auto-Detection Problems:**
453 | ```bash
454 | # Check if HTTP server auto-start is working
455 | export LOG_LEVEL=DEBUG
456 | export MCP_HTTP_ENABLED=true
457 | 
458 | # Check coordination mode detection
459 | python -c "
460 | import asyncio
461 | from src.mcp_memory_service.utils.port_detection import detect_server_coordination_mode
462 | print(asyncio.run(detect_server_coordination_mode()))
463 | "
464 | ```
465 | 
466 | **Manual HTTP Server Setup:**
467 | ```bash
468 | # Start HTTP server manually in separate terminal
469 | python scripts/run_http_server.py
470 | 
471 | # Then start MCP clients (they'll auto-detect the running server)
472 | ```
473 | 
474 | **Port Conflicts:**
475 | ```bash
476 | # Check what's using the port
477 | netstat -an | grep :8000  # Linux/macOS
478 | netstat -an | findstr :8000  # Windows
479 | 
480 | # Use different port
481 | export MCP_HTTP_PORT=8001
482 | ```
483 | 
484 | **Fallback to WAL Mode:**
485 | ```bash
486 | # Force WAL mode if HTTP coordination fails
487 | export MCP_HTTP_ENABLED=false
488 | export MCP_HTTP_ENABLED=false
489 | ```
490 | 
491 | #### 3. Permission Errors
492 | ```
493 | PermissionError: [Errno 13] Permission denied
494 | ```
495 | **Solution**: Check database file permissions
496 | ```bash
497 | # Fix permissions
498 | chmod 644 /path/to/sqlite_vec.db
499 | chmod 755 /path/to/directory
500 | ```
501 | 
502 | #### 4. Migration Failures
503 | ```
504 | Migration failed: No memories found
505 | ```
506 | **Solution**: Verify source path and initialize if needed
507 | ```bash
508 | # Check source exists
509 | ls -la /path/to/chroma_db
510 | # Use absolute paths in migration
511 | ```
512 | 
513 | ### Debug Mode
514 | 
515 | Enable debug logging for troubleshooting:
516 | 
517 | ```bash
518 | export LOG_LEVEL=DEBUG
519 | export DEBUG_MODE=1
520 | # Run your MCP client
521 | ```
522 | 
523 | ### Health Checks
524 | 
525 | ```python
526 | # Check backend health
527 | import asyncio
528 | from src.mcp_memory_service.storage.sqlite_vec import SqliteVecMemoryStorage
529 | 
530 | async def health_check():
531 |     storage = SqliteVecMemoryStorage('path/to/db')
532 |     await storage.initialize()
533 |     
534 |     stats = storage.get_stats()
535 |     print(f"Backend: {stats['backend']}")
536 |     print(f"Total memories: {stats['total_memories']}")
537 |     print(f"Database size: {stats['database_size_mb']} MB")
538 |     print(f"Embedding model: {stats['embedding_model']}")
539 |     
540 |     storage.close()
541 | 
542 | asyncio.run(health_check())
543 | ```
544 | 
545 | ## Comparison: ChromaDB vs SQLite-vec
546 | 
547 | | Feature | ChromaDB | SQLite-vec | Winner |
548 | |---------|----------|------------|--------|
549 | | Setup Complexity | Medium | Low | SQLite-vec |
550 | | Memory Usage | High | Low | SQLite-vec |
551 | | Query Performance | Excellent | Very Good | ChromaDB |
552 | | Portability | Poor | Excellent | SQLite-vec |
553 | | Backup/Restore | Complex | Simple | SQLite-vec |
554 | | Concurrent Access | Good | Excellent (HTTP + WAL) | SQLite-vec |
555 | | Multi-Client Support | Good | Excellent (HTTP + WAL) | SQLite-vec |
556 | | Ecosystem | Rich | Growing | ChromaDB |
557 | | Reliability | Good | Excellent | SQLite-vec |
558 | 
559 | ## Best Practices
560 | 
561 | ### When to Use SQLite-vec
562 | 
563 | ✅ **Use SQLite-vec when:**
564 | - Memory collections < 100,000 entries
565 | - Multi-client access needed (Claude Desktop + Claude Code + others)
566 | - Seamless setup and coordination required (auto-detection)
567 | - Portability and backup simplicity are important
568 | - Limited system resources
569 | - Simple deployment requirements
570 | - Want both HTTP and direct access capabilities
571 | 
572 | ### When to Use ChromaDB
573 | 
574 | ✅ **Use ChromaDB when:**
575 | - Memory collections > 100,000 entries
576 | - Heavy concurrent usage
577 | - Maximum query performance is critical
578 | - Rich ecosystem features needed
579 | - Distributed setups
580 | 
581 | ### Multi-Client Coordination Tips
582 | 
583 | 1. **Automatic Mode (Recommended)**
584 |    ```bash
585 |    # Let the system choose the best coordination method
586 |    export MCP_MEMORY_STORAGE_BACKEND=sqlite_vec
587 |    export MCP_HTTP_ENABLED=true
588 |    ```
589 | 
590 | 2. **Monitoring Coordination Mode**
591 |    ```bash
592 |    # Check which mode is being used
593 |    export LOG_LEVEL=INFO
594 |    # Look for "Detected coordination mode: ..." in logs
595 |    ```
596 | 
597 | 3. **HTTP Server Management**
598 |    ```bash
599 |    # Manual server control
600 |    python scripts/run_http_server.py  # Start manually
601 |    
602 |    # Check server health
603 |    curl http://localhost:8000/health
604 |    ```
605 | 
606 | 4. **Fallback Strategy**
607 |    ```bash
608 |    # If HTTP coordination fails, system falls back to WAL mode
609 |    # No manual intervention needed - fully automatic
610 |    ```
611 | 
612 | ### Performance Tips
613 | 
614 | 1. **Regular Optimization**
615 |    ```bash
616 |    # Run monthly
617 |    python scripts/optimize_sqlite_vec.py
618 |    ```
619 | 
620 | 2. **Batch Operations**
621 |    ```python
622 |    # Store memories in batches for better performance
623 |    for batch in chunk_memories(all_memories, 100):
624 |        for memory in batch:
625 |            await storage.store(memory)
626 |    ```
627 | 
628 | 3. **Index Maintenance**
629 |    ```sql
630 |    -- Rebuild indexes periodically
631 |    REINDEX;
632 |    VACUUM;
633 |    ```
634 | 
635 | ## API Reference
636 | 
637 | The sqlite-vec backend implements the same `MemoryStorage` interface as ChromaDB:
638 | 
639 | ```python
640 | # All standard operations work identically
641 | await storage.store(memory)
642 | results = await storage.retrieve(query, n_results=5)
643 | memories = await storage.search_by_tag(["tag1", "tag2"])
644 | success, msg = await storage.delete(content_hash)
645 | success, msg = await storage.update_memory_metadata(hash, updates)
646 | ```
647 | 
648 | See the main API documentation for complete method signatures.
649 | 
650 | ## Contributing
651 | 
652 | To contribute to sqlite-vec backend development:
653 | 
654 | 1. Run tests: `pytest tests/test_sqlite_vec_storage.py`
655 | 2. Check performance: `python tests/performance/test_sqlite_vec_perf.py`
656 | 3. Add features following the `MemoryStorage` interface
657 | 4. Update this documentation
658 | 
659 | ## Support
660 | 
661 | For sqlite-vec backend issues:
662 | 
663 | 1. Check [sqlite-vec documentation](https://github.com/asg017/sqlite-vec)
664 | 2. Review this guide's troubleshooting section
665 | 3. Open an issue on the [MCP Memory Service repository](https://github.com/user/mcp-memory-service/issues)
```

--------------------------------------------------------------------------------
/scripts/migration/legacy/migrate_chroma_to_sqlite.py:
--------------------------------------------------------------------------------

```python
  1 | #!/usr/bin/env python3
  2 | # Copyright 2024 Heinrich Krupp
  3 | #
  4 | # Licensed under the Apache License, Version 2.0 (the "License");
  5 | # you may not use this file except in compliance with the License.
  6 | # You may obtain a copy of the License at
  7 | #
  8 | #     http://www.apache.org/licenses/LICENSE-2.0
  9 | #
 10 | # Unless required by applicable law or agreed to in writing, software
 11 | # distributed under the License is distributed on an "AS IS" BASIS,
 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 13 | # See the License for the specific language governing permissions and
 14 | # limitations under the License.
 15 | 
 16 | """
 17 | Migration script to move data from ChromaDB to SQLite-vec.
 18 | 
 19 | This script reads all memories from your existing ChromaDB installation
 20 | and migrates them to the new SQLite-vec backend, preserving all metadata,
 21 | tags, embeddings, and timestamps.
 22 | """
 23 | 
 24 | import asyncio
 25 | import os
 26 | import sys
 27 | import logging
 28 | from pathlib import Path
 29 | from typing import List, Dict, Any, Optional, Union
 30 | from datetime import datetime
 31 | import re
 32 | 
 33 | # Add project root to path
 34 | project_root = Path(__file__).parent.parent
 35 | sys.path.insert(0, str(project_root / "src"))
 36 | 
 37 | from mcp_memory_service.storage.chroma import ChromaMemoryStorage
 38 | from mcp_memory_service.storage.sqlite_vec import SqliteVecMemoryStorage
 39 | from mcp_memory_service.models.memory import Memory
 40 | from mcp_memory_service.config import CHROMA_PATH, EMBEDDING_MODEL_NAME
 41 | from mcp_memory_service.utils.hashing import generate_content_hash
 42 | 
 43 | logger = logging.getLogger(__name__)
 44 | 
 45 | def safe_timestamp_convert(timestamp: Union[str, float, int, None]) -> float:
 46 |     """Safely convert various timestamp formats to float."""
 47 |     if timestamp is None:
 48 |         return datetime.now().timestamp()
 49 |     
 50 |     if isinstance(timestamp, (int, float)):
 51 |         return float(timestamp)
 52 |     
 53 |     if isinstance(timestamp, str):
 54 |         # Try to parse ISO format strings
 55 |         if 'T' in timestamp or '-' in timestamp:
 56 |             try:
 57 |                 # Handle ISO format with or without 'Z'
 58 |                 timestamp_str = timestamp.rstrip('Z')
 59 |                 dt = datetime.fromisoformat(timestamp_str.replace('Z', ''))
 60 |                 return dt.timestamp()
 61 |             except ValueError:
 62 |                 pass
 63 |         
 64 |         # Try to parse as float string
 65 |         try:
 66 |             return float(timestamp)
 67 |         except ValueError:
 68 |             pass
 69 |     
 70 |     # Fallback to current time
 71 |     logger.warning(f"Could not parse timestamp '{timestamp}', using current time")
 72 |     return datetime.now().timestamp()
 73 | 
 74 | def extract_memory_data_directly(chroma_storage) -> List[Dict[str, Any]]:
 75 |     """Extract memory data directly from ChromaDB without using Memory objects."""
 76 |     try:
 77 |         # Access the ChromaDB collection directly
 78 |         collection = chroma_storage.collection
 79 |         
 80 |         # Get all data from the collection
 81 |         results = collection.get(
 82 |             include=['documents', 'metadatas']
 83 |         )
 84 |         
 85 |         memories = []
 86 |         for i, doc_id in enumerate(results['ids']):
 87 |             try:
 88 |                 # Extract basic data
 89 |                 content = results['documents'][i] if i < len(results['documents']) else ""
 90 |                 metadata = results['metadatas'][i] if i < len(results['metadatas']) else {}
 91 |                 
 92 |                 # Extract and validate tags from metadata
 93 |                 raw_tags = metadata.get('tags', metadata.get('tags_str', []))
 94 |                 tags = []
 95 |                 if isinstance(raw_tags, str):
 96 |                     # Handle comma-separated string or single tag
 97 |                     if ',' in raw_tags:
 98 |                         tags = [tag.strip() for tag in raw_tags.split(',') if tag.strip()]
 99 |                     elif raw_tags.strip():
100 |                         tags = [raw_tags.strip()]
101 |                 elif isinstance(raw_tags, list):
102 |                     # Validate each tag in list
103 |                     tags = [str(tag).strip() for tag in raw_tags if tag and str(tag).strip()]
104 |                 else:
105 |                     logger.warning(f"Unknown tag format for memory {i}: {type(raw_tags)}")
106 |                     tags = []
107 |                 
108 |                 # Extract timestamps with flexible conversion
109 |                 created_at = safe_timestamp_convert(metadata.get('created_at'))
110 |                 updated_at = safe_timestamp_convert(metadata.get('updated_at', created_at))
111 |                 
112 |                 # Extract other metadata
113 |                 memory_type = metadata.get('memory_type', 'imported')
114 |                 
115 |                 # Create clean metadata dict (remove special fields)
116 |                 clean_metadata = {k: v for k, v in metadata.items() 
117 |                                 if k not in ['tags', 'created_at', 'updated_at', 'memory_type']}
118 |                 
119 |                 # Generate proper content hash instead of using ChromaDB ID
120 |                 proper_content_hash = generate_content_hash(content)
121 |                 
122 |                 memory_data = {
123 |                     'content': content,
124 |                     'tags': tags,
125 |                     'memory_type': memory_type,
126 |                     'metadata': clean_metadata,
127 |                     'created_at': created_at,
128 |                     'updated_at': updated_at,
129 |                     'content_hash': proper_content_hash  # Use proper SHA256 hash
130 |                 }
131 |                 
132 |                 memories.append(memory_data)
133 |                 
134 |             except Exception as e:
135 |                 logger.warning(f"Failed to extract memory {i}: {e}")
136 |                 continue
137 |         
138 |         logger.info(f"Successfully extracted {len(memories)} memories from ChromaDB")
139 |         return memories
140 |         
141 |     except Exception as e:
142 |         logger.error(f"Failed to extract data from ChromaDB: {e}")
143 |         return []
144 | 
145 | class MigrationStats:
146 |     """Track migration statistics."""
147 |     def __init__(self):
148 |         self.total_memories = 0
149 |         self.migrated_successfully = 0
150 |         self.failed_migrations = 0
151 |         self.duplicates_skipped = 0
152 |         self.start_time = datetime.now()
153 |         self.errors: List[str] = []
154 | 
155 |     def add_error(self, error: str):
156 |         self.errors.append(error)
157 |         self.failed_migrations += 1
158 | 
159 |     def print_summary(self):
160 |         duration = datetime.now() - self.start_time
161 |         print("\n" + "="*60)
162 |         print("MIGRATION SUMMARY")
163 |         print("="*60)
164 |         print(f"Total memories found:     {self.total_memories}")
165 |         print(f"Successfully migrated:    {self.migrated_successfully}")
166 |         print(f"Duplicates skipped:       {self.duplicates_skipped}")
167 |         print(f"Failed migrations:        {self.failed_migrations}")
168 |         print(f"Migration duration:       {duration.total_seconds():.2f} seconds")
169 |         
170 |         if self.errors:
171 |             print(f"\nErrors encountered ({len(self.errors)}):")
172 |             for i, error in enumerate(self.errors[:5], 1):  # Show first 5 errors
173 |                 print(f"  {i}. {error}")
174 |             if len(self.errors) > 5:
175 |                 print(f"  ... and {len(self.errors) - 5} more errors")
176 |         else:
177 |             print("\nMigration completed without errors!")
178 | 
179 | 
180 | async def check_chroma_data(chroma_path: str) -> int:
181 |     """Check if ChromaDB data exists and count memories."""
182 |     print(f"Checking ChromaDB data at: {chroma_path}")
183 |     
184 |     try:
185 |         chroma_storage = ChromaMemoryStorage(
186 |             path=chroma_path
187 |         )
188 |         
189 |         # Extract memories directly to avoid data corruption issues
190 |         memories = extract_memory_data_directly(chroma_storage)
191 |         memory_count = len(memories)
192 |         
193 |         print(f"Found {memory_count} memories in ChromaDB")
194 |         return memory_count
195 |         
196 |     except Exception as e:
197 |         print(f"Error accessing ChromaDB: {e}")
198 |         print("Make sure ChromaDB data exists and is accessible")
199 |         return -1
200 | 
201 | 
202 | async def migrate_memories(
203 |     chroma_path: str, 
204 |     sqlite_path: str, 
205 |     stats: MigrationStats,
206 |     batch_size: int = 50,
207 |     skip_duplicates: bool = True
208 | ) -> bool:
209 |     """Migrate all memories from ChromaDB to SQLite-vec."""
210 |     
211 |     chroma_storage = None
212 |     sqlite_storage = None
213 |     
214 |     try:
215 |         # Initialize ChromaDB storage (source)
216 |         print("Connecting to ChromaDB...")
217 |         chroma_storage = ChromaMemoryStorage(
218 |             path=chroma_path
219 |         )
220 |         
221 |         # Initialize SQLite-vec storage (destination)
222 |         print("Connecting to SQLite-vec...")
223 |         sqlite_storage = SqliteVecMemoryStorage(
224 |             db_path=sqlite_path,
225 |             embedding_model=EMBEDDING_MODEL_NAME
226 |         )
227 |         await sqlite_storage.initialize()
228 |         
229 |         # Extract all memories directly from ChromaDB
230 |         print("Extracting all memories from ChromaDB...")
231 |         all_memories = extract_memory_data_directly(chroma_storage)
232 |         stats.total_memories = len(all_memories)
233 |         
234 |         if stats.total_memories == 0:
235 |             print("No memories found in ChromaDB")
236 |             return True
237 |         
238 |         print(f"Found {stats.total_memories} memories to migrate")
239 |         
240 |         # Migrate in batches
241 |         for i in range(0, stats.total_memories, batch_size):
242 |             batch = all_memories[i:i + batch_size]
243 |             batch_num = (i // batch_size) + 1
244 |             total_batches = (stats.total_memories + batch_size - 1) // batch_size
245 |             
246 |             print(f"Processing batch {batch_num}/{total_batches} ({len(batch)} memories)...")
247 |             
248 |             for memory_data in batch:
249 |                 try:
250 |                     # Check if memory already exists in SQLite-vec (if skipping duplicates)
251 |                     if skip_duplicates:
252 |                         try:
253 |                             # Use a more efficient duplicate check
254 |                             cursor = sqlite_storage.conn.execute(
255 |                                 "SELECT 1 FROM memories WHERE content_hash = ? LIMIT 1",
256 |                                 (memory_data['content_hash'],)
257 |                             )
258 |                             if cursor.fetchone():
259 |                                 stats.duplicates_skipped += 1
260 |                                 continue
261 |                         except Exception:
262 |                             # Fallback to retrieve method if direct query fails
263 |                             existing = await sqlite_storage.retrieve(memory_data['content'], n_results=1)
264 |                             if existing and any(m.memory.content_hash == memory_data['content_hash'] for m in existing):
265 |                                 stats.duplicates_skipped += 1
266 |                                 continue
267 |                     
268 |                     # Create Memory object for SQLite-vec storage
269 |                     memory_obj = Memory(
270 |                         content=memory_data['content'],
271 |                         tags=memory_data['tags'],
272 |                         metadata=memory_data['metadata'],
273 |                         created_at=memory_data['created_at'],
274 |                         updated_at=memory_data['updated_at'],
275 |                         content_hash=memory_data['content_hash']
276 |                     )
277 |                     
278 |                     # Store memory in SQLite-vec
279 |                     success, message = await sqlite_storage.store(memory_obj)
280 |                     if not success:
281 |                         raise Exception(f"Storage failed: {message}")
282 |                     stats.migrated_successfully += 1
283 |                     
284 |                 except Exception as e:
285 |                     error_msg = f"Failed to migrate memory {memory_data['content_hash'][:12]}...: {str(e)}"
286 |                     stats.add_error(error_msg)
287 |                     logger.error(error_msg)
288 |             
289 |             # Progress update with percentage
290 |             migrated_so_far = stats.migrated_successfully + stats.duplicates_skipped + stats.failed_migrations
291 |             percentage = (migrated_so_far / stats.total_memories * 100) if stats.total_memories > 0 else 0
292 |             print(f"Batch {batch_num}/{total_batches} complete. Progress: {migrated_so_far}/{stats.total_memories} ({percentage:.1f}%)")
293 |         
294 |         return True
295 |         
296 |     except Exception as e:
297 |         error_msg = f"Critical migration error: {str(e)}"
298 |         stats.add_error(error_msg)
299 |         logger.error(error_msg)
300 |         return False
301 |         
302 |     finally:
303 |         # Clean up connections
304 |         if sqlite_storage:
305 |             sqlite_storage.close()
306 | 
307 | 
308 | async def verify_migration(sqlite_path: str, expected_count: int) -> bool:
309 |     """Verify that the migration was successful."""
310 |     print("Verifying migration results...")
311 |     
312 |     try:
313 |         sqlite_storage = SqliteVecMemoryStorage(
314 |             db_path=sqlite_path,
315 |             embedding_model=EMBEDDING_MODEL_NAME
316 |         )
317 |         await sqlite_storage.initialize()
318 |         
319 |         # Count memories in SQLite-vec
320 |         all_memories = await sqlite_storage.retrieve("", n_results=10000)
321 |         actual_count = len(all_memories)
322 |         
323 |         sqlite_storage.close()
324 |         
325 |         print(f"Verification: Expected {expected_count}, Found {actual_count}")
326 |         
327 |         if actual_count >= expected_count:
328 |             print("Migration verification passed!")
329 |             return True
330 |         else:
331 |             print("Migration verification failed - some memories may be missing")
332 |             return False
333 |             
334 |     except Exception as e:
335 |         print(f"Verification error: {e}")
336 |         return False
337 | 
338 | 
339 | def print_banner():
340 |     """Print migration banner."""
341 |     print("="*60)
342 |     print("MCP Memory Service - ChromaDB to SQLite-vec Migration")
343 |     print("="*60)
344 |     print("This script migrates all your memories from ChromaDB to SQLite-vec.")
345 |     print("Your original ChromaDB data will not be modified.")
346 |     print()
347 | 
348 | 
349 | async def main():
350 |     """Main migration function."""
351 |     print_banner()
352 |     
353 |     # Parse command-line arguments
354 |     import argparse
355 |     parser = argparse.ArgumentParser(description='Migrate ChromaDB to SQLite-vec')
356 |     parser.add_argument('--chroma-path', help='Path to ChromaDB data directory')
357 |     parser.add_argument('--sqlite-path', help='Path for SQLite-vec database')
358 |     parser.add_argument('--batch-size', type=int, default=50, help='Batch size for migration')
359 |     parser.add_argument('--verbose', action='store_true', help='Enable verbose logging')
360 |     args = parser.parse_args()
361 |     
362 |     # Setup logging
363 |     log_level = logging.DEBUG if args.verbose else logging.INFO
364 |     logging.basicConfig(
365 |         level=log_level,
366 |         format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
367 |     )
368 |     
369 |     # Configuration with environment variable and argument support
370 |     chroma_path = args.chroma_path or os.environ.get('MCP_MEMORY_CHROMA_PATH', CHROMA_PATH)
371 |     
372 |     # Allow custom SQLite path via argument or environment variable
373 |     sqlite_path = args.sqlite_path or os.environ.get('MCP_MEMORY_SQLITE_PATH')
374 |     if not sqlite_path:
375 |         # Default to same directory as ChromaDB
376 |         chroma_dir = os.path.dirname(chroma_path) if os.path.dirname(chroma_path) else os.getcwd()
377 |         sqlite_path = os.path.join(chroma_dir, 'sqlite_vec_migrated.db')
378 |     
379 |     # Use batch size from arguments
380 |     batch_size = args.batch_size
381 |     
382 |     print(f"ChromaDB source: {chroma_path}")
383 |     print(f"SQLite-vec destination: {sqlite_path}")
384 |     print()
385 |     
386 |     # Check if ChromaDB data exists
387 |     memory_count = await check_chroma_data(chroma_path)
388 |     if memory_count < 0:
389 |         return 1
390 |     
391 |     if memory_count == 0:
392 |         print("No memories to migrate. Migration complete!")
393 |         return 0
394 |     
395 |     # Confirm migration
396 |     print(f"About to migrate {memory_count} memories from ChromaDB to SQLite-vec")
397 |     print(f"Destination file: {sqlite_path}")
398 |     
399 |     try:
400 |         response = input("\\nProceed with migration? (y/N): ").strip().lower()
401 |         if response != 'y':
402 |             print("Migration cancelled by user")
403 |             return 1
404 |     except EOFError:
405 |         # Auto-proceed in non-interactive environment
406 |         print("\\nAuto-proceeding with migration in non-interactive environment...")
407 |         response = 'y'
408 |     
409 |     # Perform migration
410 |     stats = MigrationStats()
411 |     success = await migrate_memories(chroma_path, sqlite_path, stats, batch_size=batch_size)
412 |     
413 |     if success:
414 |         # Verify migration
415 |         await verify_migration(sqlite_path, stats.migrated_successfully)
416 |     
417 |     # Print summary
418 |     stats.print_summary()
419 |     
420 |     if success and stats.failed_migrations == 0:
421 |         print("\\nMigration completed successfully!")
422 |         print("\\nNext steps:")
423 |         print(f"   1. Update your environment: export MCP_MEMORY_STORAGE_BACKEND=sqlite_vec")
424 |         print(f"   2. Update database path: export MCP_MEMORY_SQLITE_PATH={sqlite_path}")
425 |         print(f"   3. Restart MCP Memory Service")
426 |         print(f"   4. Test that your memories are accessible")
427 |         print(f"   5. (Optional) Backup your old ChromaDB data: {chroma_path}")
428 |         return 0
429 |     else:
430 |         print("\\nMigration completed with errors. Please review the summary above.")
431 |         return 1
432 | 
433 | 
434 | if __name__ == "__main__":
435 |     sys.exit(asyncio.run(main()))
```

--------------------------------------------------------------------------------
/claude-hooks/utilities/tiered-conversation-monitor.js:
--------------------------------------------------------------------------------

```javascript
  1 | /**
  2 |  * Tiered Conversation Monitor
  3 |  * Performance-aware semantic analysis with multiple processing levels
  4 |  */
  5 | 
  6 | const { PerformanceManager } = require('./performance-manager');
  7 | 
  8 | class TieredConversationMonitor {
  9 |     constructor(config = {}, performanceManager = null) {
 10 |         this.config = config;
 11 |         this.performanceManager = performanceManager || new PerformanceManager(config.performance);
 12 | 
 13 |         // Conversation state
 14 |         this.conversationHistory = [];
 15 |         this.currentTopics = new Set();
 16 |         this.contextWindow = config.contextWindow || 10; // Number of recent messages to analyze
 17 | 
 18 |         // Topic tracking
 19 |         this.topicWeights = new Map();
 20 |         this.semanticCache = new Map();
 21 | 
 22 |         // Performance-based configuration
 23 |         this.tierConfig = {
 24 |             instant: {
 25 |                 enabled: true,
 26 |                 methods: ['simplePatternMatch', 'cacheCheck'],
 27 |                 maxLatency: 50
 28 |             },
 29 |             fast: {
 30 |                 enabled: true,
 31 |                 methods: ['topicExtraction', 'lightweightSemantic'],
 32 |                 maxLatency: 150
 33 |             },
 34 |             intensive: {
 35 |                 enabled: false, // Default off for performance
 36 |                 methods: ['deepSemanticAnalysis', 'fullContextAnalysis'],
 37 |                 maxLatency: 500
 38 |             }
 39 |         };
 40 | 
 41 |         this.updateTierConfiguration();
 42 |     }
 43 | 
 44 |     /**
 45 |      * Update tier configuration based on performance manager
 46 |      */
 47 |     updateTierConfiguration() {
 48 |         if (!this.performanceManager) return;
 49 | 
 50 |         const profile = this.performanceManager.performanceBudget;
 51 | 
 52 |         this.tierConfig.instant.enabled = profile.enabledTiers.includes('instant');
 53 |         this.tierConfig.fast.enabled = profile.enabledTiers.includes('fast');
 54 |         this.tierConfig.intensive.enabled = profile.enabledTiers.includes('intensive');
 55 |     }
 56 | 
 57 |     /**
 58 |      * Analyze user message with tiered approach
 59 |      */
 60 |     async analyzeMessage(message, context = {}) {
 61 |         const analysis = {
 62 |             topics: [],
 63 |             semanticShift: 0,
 64 |             triggerProbability: 0,
 65 |             processingTier: 'none',
 66 |             confidence: 0
 67 |         };
 68 | 
 69 |         // Tier 1: Instant processing (< 50ms)
 70 |         if (this.tierConfig.instant.enabled) {
 71 |             const timing = this.performanceManager.startTiming('instant_analysis', 'instant');
 72 | 
 73 |             try {
 74 |                 const instantResults = await this.instantAnalysis(message, context);
 75 |                 analysis.topics.push(...instantResults.topics);
 76 |                 analysis.triggerProbability = Math.max(analysis.triggerProbability, instantResults.triggerProbability);
 77 |                 analysis.processingTier = 'instant';
 78 | 
 79 |                 const result = this.performanceManager.endTiming(timing);
 80 | 
 81 |                 // If instant analysis is confident enough, return early
 82 |                 if (instantResults.confidence > 0.8 || !this.tierConfig.fast.enabled) {
 83 |                     analysis.confidence = instantResults.confidence;
 84 |                     return analysis;
 85 |                 }
 86 |             } catch (error) {
 87 |                 console.warn('[Monitor] Instant analysis failed:', error.message);
 88 |             }
 89 |         }
 90 | 
 91 |         // Tier 2: Fast processing (< 150ms)
 92 |         if (this.tierConfig.fast.enabled && this.performanceManager.shouldRunHook('fast_analysis', 'fast')) {
 93 |             const timing = this.performanceManager.startTiming('fast_analysis', 'fast');
 94 | 
 95 |             try {
 96 |                 const fastResults = await this.fastAnalysis(message, context);
 97 | 
 98 |                 // Merge results with priority to fast analysis
 99 |                 analysis.topics = this.mergeTopics(analysis.topics, fastResults.topics);
100 |                 analysis.semanticShift = fastResults.semanticShift;
101 |                 analysis.triggerProbability = Math.max(analysis.triggerProbability, fastResults.triggerProbability);
102 |                 analysis.processingTier = 'fast';
103 |                 analysis.confidence = fastResults.confidence;
104 | 
105 |                 this.performanceManager.endTiming(timing);
106 | 
107 |                 // If fast analysis is confident, return
108 |                 if (fastResults.confidence > 0.7 || !this.tierConfig.intensive.enabled) {
109 |                     return analysis;
110 |                 }
111 |             } catch (error) {
112 |                 console.warn('[Monitor] Fast analysis failed:', error.message);
113 |             }
114 |         }
115 | 
116 |         // Tier 3: Intensive processing (< 500ms) - only when needed
117 |         if (this.tierConfig.intensive.enabled &&
118 |             this.performanceManager.shouldRunHook('intensive_analysis', 'intensive') &&
119 |             analysis.triggerProbability > 0.3) {
120 | 
121 |             const timing = this.performanceManager.startTiming('intensive_analysis', 'intensive');
122 | 
123 |             try {
124 |                 const intensiveResults = await this.intensiveAnalysis(message, context);
125 | 
126 |                 // Use intensive results as authoritative
127 |                 analysis.topics = intensiveResults.topics;
128 |                 analysis.semanticShift = intensiveResults.semanticShift;
129 |                 analysis.triggerProbability = intensiveResults.triggerProbability;
130 |                 analysis.confidence = intensiveResults.confidence;
131 |                 analysis.processingTier = 'intensive';
132 | 
133 |                 this.performanceManager.endTiming(timing);
134 |             } catch (error) {
135 |                 console.warn('[Monitor] Intensive analysis failed:', error.message);
136 |             }
137 |         }
138 | 
139 |         // Update conversation history
140 |         this.updateConversationHistory(message, analysis);
141 | 
142 |         return analysis;
143 |     }
144 | 
145 |     /**
146 |      * Instant analysis: Pattern matching and cache checks
147 |      */
148 |     async instantAnalysis(message, context) {
149 |         const cacheKey = this.generateCacheKey(message);
150 | 
151 |         // Check cache first
152 |         if (this.semanticCache.has(cacheKey)) {
153 |             const cached = this.semanticCache.get(cacheKey);
154 |             // Update last used timestamp
155 |             cached.lastUsed = Date.now();
156 |             this.semanticCache.set(cacheKey, cached);
157 |             return { ...cached, confidence: 0.9 }; // High confidence for cached results
158 |         }
159 | 
160 |         // Simple pattern matching for common triggers
161 |         const triggerPatterns = [
162 |             /what (did|do) we (decide|choose|do)/i,
163 |             /remind me (about|how|what)/i,
164 |             /similar to (what|how) we/i,
165 |             /like we (discussed|did|decided)/i,
166 |             /according to (our|previous)/i,
167 |             /remember when we/i,
168 |             /last time we/i
169 |         ];
170 | 
171 |         let triggerProbability = 0;
172 |         const topics = [];
173 | 
174 |         // Check for explicit memory trigger patterns
175 |         for (const pattern of triggerPatterns) {
176 |             if (pattern.test(message)) {
177 |                 triggerProbability = Math.max(triggerProbability, 0.8);
178 |                 topics.push('memory-request');
179 |                 break;
180 |             }
181 |         }
182 | 
183 |         // Extract obvious topics (technology names, frameworks)
184 |         const techPatterns = [
185 |             /\b(react|vue|angular|node|python|java|docker|kubernetes)\b/i,
186 |             /\b(api|database|frontend|backend|ui|ux)\b/i,
187 |             /\b(authentication|oauth|security|performance)\b/i
188 |         ];
189 | 
190 |         for (const pattern of techPatterns) {
191 |             const matches = message.match(pattern);
192 |             if (matches) {
193 |                 topics.push(...matches.map(m => m.toLowerCase()));
194 |                 triggerProbability = Math.max(triggerProbability, 0.4);
195 |             }
196 |         }
197 | 
198 |         const result = {
199 |             topics: [...new Set(topics)], // Remove duplicates
200 |             triggerProbability,
201 |             confidence: triggerProbability > 0.5 ? 0.8 : 0.4,
202 |             lastUsed: Date.now()
203 |         };
204 | 
205 |         // Cache result
206 |         this.semanticCache.set(cacheKey, result);
207 |         this.cleanCache();
208 | 
209 |         return result;
210 |     }
211 | 
212 |     /**
213 |      * Fast analysis: Lightweight semantic processing
214 |      */
215 |     async fastAnalysis(message, context) {
216 |         // Tokenize and extract key phrases
217 |         const tokens = this.tokenizeMessage(message);
218 |         const keyPhrases = this.extractKeyPhrases(tokens);
219 | 
220 |         // Analyze topic shift from recent history
221 |         const semanticShift = this.calculateSemanticShift(keyPhrases);
222 | 
223 |         // Calculate trigger probability based on context and content
224 |         let triggerProbability = 0;
225 | 
226 |         // Check for question patterns that suggest memory need
227 |         if (this.isQuestionPattern(message)) {
228 |             triggerProbability += 0.3;
229 |         }
230 | 
231 |         // Check for reference to past work
232 |         if (this.referencesPastWork(message)) {
233 |             triggerProbability += 0.4;
234 |         }
235 | 
236 |         // Check for topic complexity
237 |         if (keyPhrases.length > 3) {
238 |             triggerProbability += 0.2;
239 |         }
240 | 
241 |         // Semantic shift indicates topic change
242 |         if (semanticShift > 0.5) {
243 |             triggerProbability += 0.3;
244 |         }
245 | 
246 |         return {
247 |             topics: keyPhrases,
248 |             semanticShift,
249 |             triggerProbability: Math.min(triggerProbability, 1.0),
250 |             confidence: 0.7
251 |         };
252 |     }
253 | 
254 |     /**
255 |      * Intensive analysis: Deep semantic understanding
256 |      */
257 |     async intensiveAnalysis(message, context) {
258 |         // This would integrate with more sophisticated NLP if available
259 |         // For now, enhance the fast analysis with deeper processing
260 | 
261 |         const fastResult = await this.fastAnalysis(message, context);
262 | 
263 |         // Analyze conversation context for better topic understanding
264 |         const contextTopics = this.analyzeConversationContext();
265 |         const mergedTopics = this.mergeTopics(fastResult.topics, contextTopics);
266 | 
267 |         // More sophisticated semantic shift calculation
268 |         const enhancedSemanticShift = this.calculateEnhancedSemanticShift(message, context);
269 | 
270 |         // Advanced trigger probability with context weighting
271 |         let enhancedTriggerProbability = fastResult.triggerProbability;
272 | 
273 |         // Weight based on conversation history
274 |         if (this.conversationHistory.length > 5) {
275 |             const historyWeight = this.calculateHistoryRelevance(message);
276 |             enhancedTriggerProbability += historyWeight * 0.2;
277 |         }
278 | 
279 |         // Project context relevance
280 |         if (context.projectContext) {
281 |             const projectRelevance = this.calculateProjectRelevance(message, context.projectContext);
282 |             enhancedTriggerProbability += projectRelevance * 0.3;
283 |         }
284 | 
285 |         return {
286 |             topics: mergedTopics,
287 |             semanticShift: enhancedSemanticShift,
288 |             triggerProbability: Math.min(enhancedTriggerProbability, 1.0),
289 |             confidence: 0.9
290 |         };
291 |     }
292 | 
293 |     /**
294 |      * Helper methods for analysis
295 |      */
296 | 
297 |     tokenizeMessage(message) {
298 |         return message.toLowerCase()
299 |             .replace(/[^\w\s]/g, ' ')
300 |             .split(/\s+/)
301 |             .filter(token => token.length > 2);
302 |     }
303 | 
304 |     extractKeyPhrases(tokens) {
305 |         // Simple key phrase extraction
306 |         const technicalTerms = new Set([
307 |             'react', 'vue', 'angular', 'node', 'python', 'java', 'javascript',
308 |             'api', 'database', 'frontend', 'backend', 'authentication', 'oauth',
309 |             'docker', 'kubernetes', 'security', 'performance', 'architecture',
310 |             'component', 'service', 'endpoint', 'middleware', 'framework'
311 |         ]);
312 | 
313 |         return tokens.filter(token => technicalTerms.has(token));
314 |     }
315 | 
316 |     calculateSemanticShift(currentTopics) {
317 |         if (this.currentTopics.size === 0) {
318 |             this.currentTopics = new Set(currentTopics);
319 |             return 0;
320 |         }
321 | 
322 |         const intersection = new Set([...currentTopics].filter(x => this.currentTopics.has(x)));
323 |         const union = new Set([...currentTopics, ...this.currentTopics]);
324 | 
325 |         // Prevent division by zero when both sets are empty
326 |         if (union.size === 0) {
327 |             this.currentTopics = new Set(currentTopics);
328 |             return 0;
329 |         }
330 | 
331 |         const similarity = intersection.size / union.size;
332 |         const shift = 1 - similarity;
333 | 
334 |         // Update current topics
335 |         this.currentTopics = new Set(currentTopics);
336 | 
337 |         return shift;
338 |     }
339 | 
340 |     isQuestionPattern(message) {
341 |         const questionPatterns = [
342 |             /^(what|how|why|when|where|which|who)/i,
343 |             /\?$/,
344 |             /^(can|could|would|should|do|does|did|is|are|was|were)/i
345 |         ];
346 | 
347 |         return questionPatterns.some(pattern => pattern.test(message.trim()));
348 |     }
349 | 
350 |     referencesPastWork(message) {
351 |         const pastWorkPatterns = [
352 |             /\b(previous|earlier|before|last time|remember|recall)\b/i,
353 |             /\b(we (did|used|chose|decided|implemented))\b/i,
354 |             /\b(our (approach|solution|decision|choice))\b/i
355 |         ];
356 | 
357 |         return pastWorkPatterns.some(pattern => pattern.test(message));
358 |     }
359 | 
360 |     mergeTopics(topics1, topics2) {
361 |         return [...new Set([...topics1, ...topics2])];
362 |     }
363 | 
364 |     analyzeConversationContext() {
365 |         // Analyze recent conversation for recurring topics
366 |         const recentMessages = this.conversationHistory.slice(-this.contextWindow);
367 |         const allTopics = recentMessages.flatMap(msg => msg.analysis?.topics || []);
368 | 
369 |         // Count topic frequency
370 |         const topicCounts = {};
371 |         allTopics.forEach(topic => {
372 |             topicCounts[topic] = (topicCounts[topic] || 0) + 1;
373 |         });
374 | 
375 |         // Return topics mentioned more than once
376 |         return Object.entries(topicCounts)
377 |             .filter(([topic, count]) => count > 1)
378 |             .map(([topic]) => topic);
379 |     }
380 | 
381 |     calculateEnhancedSemanticShift(message, context) {
382 |         // Enhanced semantic shift with context weighting
383 |         const basicShift = this.calculateSemanticShift(this.extractKeyPhrases(this.tokenizeMessage(message)));
384 | 
385 |         // Weight by message length and complexity
386 |         const lengthWeight = Math.min(message.length / 500, 1.0);
387 |         const complexityWeight = (message.match(/\b(implement|architecture|design|strategy|approach)\b/gi) || []).length * 0.1;
388 | 
389 |         return Math.min(basicShift + lengthWeight * 0.2 + complexityWeight, 1.0);
390 |     }
391 | 
392 |     calculateHistoryRelevance(message) {
393 |         // Calculate how relevant current message is to conversation history
394 |         if (this.conversationHistory.length === 0) return 0;
395 | 
396 |         const messageTopics = new Set(this.extractKeyPhrases(this.tokenizeMessage(message)));
397 |         const historyTopics = new Set(
398 |             this.conversationHistory
399 |                 .flatMap(msg => msg.analysis?.topics || [])
400 |         );
401 | 
402 |         const intersection = new Set([...messageTopics].filter(x => historyTopics.has(x)));
403 |         return intersection.size / Math.max(messageTopics.size, 1);
404 |     }
405 | 
406 |     calculateProjectRelevance(message, projectContext) {
407 |         if (!projectContext) return 0;
408 | 
409 |         const messageTokens = this.tokenizeMessage(message);
410 |         const projectTerms = [
411 |             projectContext.name?.toLowerCase(),
412 |             projectContext.language?.toLowerCase(),
413 |             ...(projectContext.frameworks || []).map(f => f.toLowerCase())
414 |         ].filter(Boolean);
415 | 
416 |         const relevantTerms = messageTokens.filter(token =>
417 |             projectTerms.some(term => term.includes(token) || token.includes(term))
418 |         );
419 | 
420 |         return relevantTerms.length / Math.max(messageTokens.length, 1);
421 |     }
422 | 
423 |     updateConversationHistory(message, analysis) {
424 |         this.conversationHistory.push({
425 |             message,
426 |             analysis,
427 |             timestamp: Date.now()
428 |         });
429 | 
430 |         // Keep only recent history
431 |         if (this.conversationHistory.length > this.contextWindow * 2) {
432 |             this.conversationHistory.splice(0, this.conversationHistory.length - this.contextWindow);
433 |         }
434 |     }
435 | 
436 |     generateCacheKey(message) {
437 |         // Generate cache key from message content
438 |         return message.toLowerCase().replace(/[^\w]/g, '').substring(0, 50);
439 |     }
440 | 
441 |     cleanCache() {
442 |         // Clean cache if it gets too large
443 |         if (this.semanticCache.size > 100) {
444 |             const entries = Array.from(this.semanticCache.entries());
445 |             entries.sort((a, b) => (b[1].lastUsed || 0) - (a[1].lastUsed || 0));
446 | 
447 |             // Keep only the 50 most recently used entries
448 |             this.semanticCache.clear();
449 |             entries.slice(0, 50).forEach(([key, value]) => {
450 |                 this.semanticCache.set(key, value);
451 |             });
452 |         }
453 |     }
454 | 
455 |     /**
456 |      * Get current performance status
457 |      */
458 |     getPerformanceStatus() {
459 |         return {
460 |             tierConfig: this.tierConfig,
461 |             cacheSize: this.semanticCache.size,
462 |             historyLength: this.conversationHistory.length,
463 |             currentTopics: Array.from(this.currentTopics),
464 |             performanceReport: this.performanceManager.getPerformanceReport()
465 |         };
466 |     }
467 | 
468 |     /**
469 |      * Update performance profile
470 |      */
471 |     updatePerformanceProfile(profileName) {
472 |         this.performanceManager.switchProfile(profileName);
473 |         this.updateTierConfiguration();
474 |     }
475 | }
476 | 
477 | module.exports = { TieredConversationMonitor };
```

--------------------------------------------------------------------------------
/scripts/maintenance/assign_memory_types.py:
--------------------------------------------------------------------------------

```python
  1 | #!/usr/bin/env python3
  2 | # Copyright 2024 Heinrich Krupp
  3 | #
  4 | # Licensed under the Apache License, Version 2.0 (the "License");
  5 | # you may not use this file except in compliance with the License.
  6 | # You may obtain a copy of the License at
  7 | #
  8 | #     http://www.apache.org/licenses/LICENSE-2.0
  9 | #
 10 | # Unless required by applicable law or agreed to in writing, software
 11 | # distributed under the License is distributed on an "AS IS" BASIS,
 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 13 | # See the License for the specific language governing permissions and
 14 | # limitations under the License.
 15 | 
 16 | """
 17 | Intelligent Memory Type Assignment Script
 18 | 
 19 | Assigns appropriate types to untyped memories using:
 20 | 1. Tag-based inference (highest confidence)
 21 | 2. Content pattern matching (medium confidence)
 22 | 3. Metadata analysis (context hints)
 23 | 4. Fallback to "note" (lowest confidence)
 24 | 
 25 | Usage:
 26 |     python assign_memory_types.py --dry-run      # Preview assignments
 27 |     python assign_memory_types.py --verbose      # Detailed logging
 28 |     python assign_memory_types.py --show-reasoning  # Show inference logic
 29 |     python assign_memory_types.py                # Execute assignments
 30 | """
 31 | 
 32 | import sys
 33 | import os
 34 | import re
 35 | import json
 36 | import sqlite3
 37 | import argparse
 38 | import logging
 39 | from pathlib import Path
 40 | from datetime import datetime
 41 | from typing import Dict, List, Tuple, Optional, Set
 42 | from collections import defaultdict, Counter
 43 | import shutil
 44 | 
 45 | # Add parent directory to path
 46 | sys.path.insert(0, str(Path(__file__).parent.parent.parent))
 47 | 
 48 | from src.mcp_memory_service.config import SQLITE_VEC_PATH
 49 | 
 50 | # Configure logging
 51 | logging.basicConfig(
 52 |     level=logging.INFO,
 53 |     format='%(asctime)s - %(levelname)s - %(message)s',
 54 | )
 55 | logger = logging.getLogger(__name__)
 56 | 
 57 | # ============================================================================
 58 | # TYPE INFERENCE RULES
 59 | # ============================================================================
 60 | 
 61 | # Priority 1: Tag-based inference (highest confidence)
 62 | TAG_TO_TYPE: Dict[str, str] = {
 63 |     # Activity indicators
 64 |     "session-consolidation": "session",
 65 |     "session-summary": "session",
 66 |     "session-end": "session",
 67 |     "session-start": "session",
 68 |     "development-session": "session",
 69 |     "work-session": "session",
 70 | 
 71 |     # Troubleshooting
 72 |     "troubleshooting": "troubleshooting",
 73 |     "debug": "troubleshooting",
 74 |     "debugging": "troubleshooting",
 75 |     "diagnostic": "troubleshooting",
 76 |     "investigation": "troubleshooting",
 77 | 
 78 |     # Fixes
 79 |     "bug-fix": "fix",
 80 |     "bugfix": "fix",
 81 |     "fix": "fix",
 82 |     "patch": "fix",
 83 |     "hotfix": "fix",
 84 |     "correction": "fix",
 85 | 
 86 |     # Releases and deployments
 87 |     "release": "release",
 88 |     "release-notes": "release",
 89 |     "version": "release",
 90 |     "deployment": "deployment",
 91 |     "deploy": "deployment",
 92 |     "production": "deployment",
 93 | 
 94 |     # Features
 95 |     "feature": "feature",
 96 |     "enhancement": "feature",
 97 |     "improvement": "feature",
 98 |     "new-feature": "feature",
 99 | 
100 |     # Configuration
101 |     "configuration": "configuration",
102 |     "config": "configuration",
103 |     "setup": "configuration",
104 |     "settings": "configuration",
105 |     "environment": "configuration",
106 | 
107 |     # Documentation
108 |     "documentation": "documentation",
109 |     "docs": "documentation",
110 |     "readme": "documentation",
111 |     "changelog": "documentation",
112 | 
113 |     # Guides
114 |     "guide": "guide",
115 |     "tutorial": "guide",
116 |     "how-to": "guide",
117 |     "walkthrough": "guide",
118 |     "instructions": "guide",
119 | 
120 |     # Reference
121 |     "reference": "reference",
122 |     "knowledge-base": "reference",
123 |     "cheat-sheet": "reference",
124 |     "quick-reference": "reference",
125 | 
126 |     # Milestones
127 |     "milestone": "milestone",
128 |     "achievement": "achievement",
129 |     "completion": "milestone",
130 |     "accomplished": "achievement",
131 | 
132 |     # Analysis
133 |     "analysis": "analysis",
134 |     "research": "analysis",
135 |     "findings": "analysis",
136 |     "investigation": "analysis",
137 |     "report": "analysis",
138 | 
139 |     # Implementation
140 |     "implementation": "implementation",
141 |     "development": "implementation",
142 |     "coding": "implementation",
143 |     "integration": "implementation",
144 | 
145 |     # Testing
146 |     "test": "test",
147 |     "testing": "test",
148 |     "validation": "test",
149 |     "qa": "test",
150 | 
151 |     # Architecture
152 |     "architecture": "architecture",
153 |     "design": "architecture",
154 |     "design-pattern": "architecture",
155 |     "technical-design": "architecture",
156 | 
157 |     # Infrastructure
158 |     "infrastructure": "infrastructure",
159 |     "devops": "infrastructure",
160 |     "ci-cd": "infrastructure",
161 |     "automation": "infrastructure",
162 | 
163 |     # Process
164 |     "process": "process",
165 |     "workflow": "process",
166 |     "procedure": "process",
167 |     "best-practices": "process",
168 | 
169 |     # Security
170 |     "security": "security",
171 |     "auth": "security",
172 |     "authentication": "security",
173 |     "authorization": "security",
174 | 
175 |     # Status
176 |     "status": "status",
177 |     "update": "status",
178 |     "progress": "status",
179 | }
180 | 
181 | # Priority 2: Content pattern matching (medium confidence)
182 | CONTENT_PATTERNS: Dict[str, List[str]] = {
183 |     "fix": [
184 |         r"\bfixed\b.*\bbug\b",
185 |         r"\bresolved\b.*\b(issue|problem)\b",
186 |         r"\brepair(ed|ing)\b",
187 |         r"\bhotfix\b",
188 |         r"\bpatch(ed|ing)?\b.*\b(bug|issue)\b",
189 |     ],
190 |     "troubleshooting": [
191 |         r"\berror\b.*\boccurred\b",
192 |         r"\btroubleshooting\b",
193 |         r"\bdiagnos(is|tic|ing)\b",
194 |         r"\bdebugging\b",
195 |         r"\binvestigat(ed|ing)\b.*\b(issue|problem|error)\b",
196 |         r"\bfail(ed|ure)\b.*\banalys",
197 |     ],
198 |     "implementation": [
199 |         r"\bimplemented\b",
200 |         r"\bcreated\b.*\b(function|class|module|component)\b",
201 |         r"\badded\b.*\b(feature|functionality)\b",
202 |         r"\bdevelop(ed|ing)\b",
203 |         r"\bbuilt\b.*\b(system|service|tool)\b",
204 |     ],
205 |     "guide": [
206 |         r"^(How to|Step-by-step|Guide:|Tutorial:)",
207 |         r"\binstructions?\b.*\b(follow|complete|execute)\b",
208 |         r"\bprocedure\b",
209 |         r"\bstep \d+",
210 |         r"\bwalkthrough\b",
211 |     ],
212 |     "configuration": [
213 |         r"\bconfigur(e|ed|ation|ing)\b",
214 |         r"\bsetup\b",
215 |         r"\.env\b",
216 |         r"\bsettings?\b",
217 |         r"\benvironment variables?\b",
218 |         r"\binstallation\b",
219 |     ],
220 |     "analysis": [
221 |         r"\banalysis\b.*\b(shows?|reveals?|indicates?)\b",
222 |         r"\bfindings?\b",
223 |         r"\bresults?\b.*\b(show|demonstrate|indicate)\b",
224 |         r"\bresearch\b",
225 |         r"\binvestigation\b.*\bresults?\b",
226 |     ],
227 |     "session": [
228 |         r"\bsession\b.*(summary|recap|notes)\b",
229 |         r"\bwork session\b",
230 |         r"\bdevelopment session\b",
231 |         r"\btopics? (discussed|covered)\b",
232 |     ],
233 |     "release": [
234 |         r"\b(version|v)\d+\.\d+",
235 |         r"\breleas(e|ed|ing)\b",
236 |         r"\bchangelog\b",
237 |         r"\brelease notes\b",
238 |     ],
239 |     "documentation": [
240 |         r"\bdocument(ation|ed|ing)\b",
241 |         r"\bREADME\b",
242 |         r"\bAPI documentation\b",
243 |         r"\breference (manual|guide)\b",
244 |     ],
245 |     "milestone": [
246 |         r"\b(completed|finished|accomplished)\b.*\b(project|milestone|phase)\b",
247 |         r"\bmilestone\b.*\breached\b",
248 |         r"\bdeliverable\b",
249 |     ],
250 | }
251 | 
252 | # Priority 3: Metadata type hints
253 | METADATA_TYPE_HINTS: Set[str] = {
254 |     "session-summary",
255 |     "troubleshooting-session",
256 |     "feature-summary",
257 |     "code-review",
258 |     "release-notes",
259 | }
260 | 
261 | 
262 | # ============================================================================
263 | # TYPE INFERENCE ENGINE
264 | # ============================================================================
265 | 
266 | class TypeInferenceEngine:
267 |     """Infer memory types based on tags, content, and metadata."""
268 | 
269 |     def __init__(self, show_reasoning: bool = False):
270 |         self.show_reasoning = show_reasoning
271 |         self.inference_stats = Counter()
272 | 
273 |     def infer_type(self, content: str, tags: List[str], metadata: Optional[Dict]) -> Tuple[str, str, int]:
274 |         """
275 |         Infer memory type.
276 | 
277 |         Returns:
278 |             (inferred_type, reasoning, confidence_score)
279 |             confidence_score: 3=high, 2=medium, 1=low
280 |         """
281 |         # Priority 1: Tag-based inference (confidence=3)
282 |         for tag in tags:
283 |             tag_clean = tag.lower().strip()
284 |             if tag_clean in TAG_TO_TYPE:
285 |                 inferred_type = TAG_TO_TYPE[tag_clean]
286 |                 reasoning = f"Tag match: '{tag}' → '{inferred_type}'"
287 |                 self.inference_stats["tag_match"] += 1
288 |                 return (inferred_type, reasoning, 3)
289 | 
290 |         # Priority 2: Content pattern matching (confidence=2)
291 |         for memory_type, patterns in CONTENT_PATTERNS.items():
292 |             for pattern in patterns:
293 |                 if re.search(pattern, content, re.IGNORECASE | re.MULTILINE):
294 |                     reasoning = f"Content pattern: '{pattern[:30]}...' → '{memory_type}'"
295 |                     self.inference_stats["pattern_match"] += 1
296 |                     return (memory_type, reasoning, 2)
297 | 
298 |         # Priority 3: Metadata hints (confidence=2)
299 |         if metadata:
300 |             metadata_type = metadata.get("type", "")
301 |             if metadata_type in METADATA_TYPE_HINTS:
302 |                 # Extract base type from hyphenated metadata type
303 |                 base_type = metadata_type.split("-")[0]
304 |                 if base_type in ["session", "troubleshooting", "feature", "release"]:
305 |                     reasoning = f"Metadata hint: type='{metadata_type}' → '{base_type}'"
306 |                     self.inference_stats["metadata_hint"] += 1
307 |                     return (base_type, reasoning, 2)
308 | 
309 |         # Priority 4: Fallback to "note" (confidence=1)
310 |         reasoning = "Fallback: No specific indicators → 'note'"
311 |         self.inference_stats["fallback"] += 1
312 |         return ("note", reasoning, 1)
313 | 
314 |     def get_stats(self) -> Dict[str, int]:
315 |         """Get inference statistics."""
316 |         return dict(self.inference_stats)
317 | 
318 | 
319 | # ============================================================================
320 | # DATABASE OPERATIONS
321 | # ============================================================================
322 | 
323 | def create_backup(db_path: str) -> str:
324 |     """Create a timestamped backup of the database."""
325 |     backup_path = f"{db_path}.backup-{datetime.now().strftime('%Y%m%d_%H%M%S')}"
326 |     shutil.copy2(db_path, backup_path)
327 |     logger.info(f"✅ Backup created: {backup_path}")
328 |     return backup_path
329 | 
330 | 
331 | def analyze_untyped_memories(db_path: str) -> Tuple[int, int]:
332 |     """Count untyped memories and total memories."""
333 |     conn = sqlite3.connect(db_path)
334 |     cursor = conn.cursor()
335 | 
336 |     cursor.execute("SELECT COUNT(*) FROM memories")
337 |     total = cursor.fetchone()[0]
338 | 
339 |     cursor.execute("SELECT COUNT(*) FROM memories WHERE memory_type = '' OR memory_type IS NULL")
340 |     untyped = cursor.fetchone()[0]
341 | 
342 |     conn.close()
343 |     return untyped, total
344 | 
345 | 
346 | def get_untyped_memories(db_path: str) -> List[Tuple[str, str, str, str]]:
347 |     """
348 |     Get all untyped memories.
349 | 
350 |     Returns:
351 |         List of (content_hash, content, tags_str, metadata_str)
352 |     """
353 |     conn = sqlite3.connect(db_path)
354 |     cursor = conn.cursor()
355 | 
356 |     cursor.execute("""
357 |         SELECT content_hash, content, tags, metadata
358 |         FROM memories
359 |         WHERE memory_type = '' OR memory_type IS NULL
360 |     """)
361 | 
362 |     results = cursor.fetchall()
363 |     conn.close()
364 |     return results
365 | 
366 | 
367 | def assign_types(db_path: str, assignments: Dict[str, str], dry_run: bool = False) -> int:
368 |     """
369 |     Assign types to memories.
370 | 
371 |     Args:
372 |         db_path: Database path
373 |         assignments: {content_hash: inferred_type}
374 |         dry_run: If True, don't actually update
375 | 
376 |     Returns:
377 |         Number of memories updated
378 |     """
379 |     if dry_run:
380 |         logger.info(f"[DRY RUN] Would update {len(assignments)} memories")
381 |         return len(assignments)
382 | 
383 |     conn = sqlite3.connect(db_path)
384 |     cursor = conn.cursor()
385 | 
386 |     updated = 0
387 |     for content_hash, memory_type in assignments.items():
388 |         cursor.execute(
389 |             "UPDATE memories SET memory_type = ? WHERE content_hash = ?",
390 |             (memory_type, content_hash)
391 |         )
392 |         updated += cursor.rowcount
393 | 
394 |     conn.commit()
395 |     conn.close()
396 | 
397 |     logger.info(f"✅ Updated {updated} memories")
398 |     return updated
399 | 
400 | 
401 | # ============================================================================
402 | # MAIN SCRIPT
403 | # ============================================================================
404 | 
405 | def main():
406 |     parser = argparse.ArgumentParser(
407 |         description="Intelligently assign types to untyped memories",
408 |         formatter_class=argparse.RawDescriptionHelpFormatter,
409 |         epilog="""
410 | Examples:
411 |   # Preview assignments
412 |   python assign_memory_types.py --dry-run
413 | 
414 |   # Show detailed reasoning
415 |   python assign_memory_types.py --dry-run --show-reasoning
416 | 
417 |   # Execute assignments
418 |   python assign_memory_types.py
419 | 
420 |   # Verbose logging
421 |   python assign_memory_types.py --verbose
422 | """
423 |     )
424 |     parser.add_argument(
425 |         '--dry-run',
426 |         action='store_true',
427 |         help='Preview assignments without modifying database'
428 |     )
429 |     parser.add_argument(
430 |         '--show-reasoning',
431 |         action='store_true',
432 |         help='Show inference reasoning for each memory'
433 |     )
434 |     parser.add_argument(
435 |         '--verbose', '-v',
436 |         action='store_true',
437 |         help='Enable verbose logging'
438 |     )
439 |     parser.add_argument(
440 |         '--db-path',
441 |         type=str,
442 |         default=SQLITE_VEC_PATH,
443 |         help=f'Path to SQLite database (default: {SQLITE_VEC_PATH})'
444 |     )
445 | 
446 |     args = parser.parse_args()
447 | 
448 |     # Set logging level
449 |     if args.verbose:
450 |         logging.getLogger().setLevel(logging.DEBUG)
451 | 
452 |     # Check database exists
453 |     if not os.path.exists(args.db_path):
454 |         logger.error(f"❌ Database not found: {args.db_path}")
455 |         sys.exit(1)
456 | 
457 |     logger.info("=" * 80)
458 |     logger.info("🤖 Intelligent Memory Type Assignment")
459 |     logger.info("=" * 80)
460 |     logger.info(f"Database: {args.db_path}")
461 |     logger.info(f"Mode: {'DRY RUN (preview only)' if args.dry_run else 'EXECUTE (will modify)'}")
462 |     logger.info("")
463 | 
464 |     # Analyze current state
465 |     logger.info("📊 Analyzing database...")
466 |     untyped_count, total_count = analyze_untyped_memories(args.db_path)
467 | 
468 |     logger.info(f"Total memories: {total_count}")
469 |     logger.info(f"Untyped memories: {untyped_count} ({untyped_count/total_count*100:.1f}%)")
470 |     logger.info("")
471 | 
472 |     if untyped_count == 0:
473 |         logger.info("✅ No untyped memories found! Database is clean.")
474 |         return
475 | 
476 |     # Initialize inference engine
477 |     engine = TypeInferenceEngine(show_reasoning=args.show_reasoning)
478 | 
479 |     # Get untyped memories
480 |     logger.info("🔍 Retrieving untyped memories...")
481 |     untyped_memories = get_untyped_memories(args.db_path)
482 |     logger.info(f"Retrieved {len(untyped_memories)} untyped memories")
483 |     logger.info("")
484 | 
485 |     # Infer types
486 |     logger.info("🧠 Inferring types...")
487 |     assignments = {}
488 |     type_distribution = Counter()
489 |     confidence_distribution = Counter()
490 | 
491 |     for content_hash, content, tags_str, metadata_str in untyped_memories:
492 |         # Parse tags and metadata
493 |         tags = [tag.strip() for tag in tags_str.split(",") if tag.strip()] if tags_str else []
494 |         metadata = None
495 |         if metadata_str:
496 |             try:
497 |                 metadata = json.loads(metadata_str)
498 |             except json.JSONDecodeError:
499 |                 pass
500 | 
501 |         # Infer type
502 |         inferred_type, reasoning, confidence = engine.infer_type(content, tags, metadata)
503 | 
504 |         # Store assignment
505 |         assignments[content_hash] = inferred_type
506 |         type_distribution[inferred_type] += 1
507 |         confidence_distribution[confidence] += 1
508 | 
509 |         # Show reasoning if requested
510 |         if args.show_reasoning:
511 |             logger.info(f"{content_hash[:8]}... → {inferred_type} (conf={confidence})")
512 |             logger.info(f"  Reason: {reasoning}")
513 |             logger.info(f"  Tags: {tags[:3]}{'...' if len(tags) > 3 else ''}")
514 |             logger.info(f"  Preview: {content[:100]}...")
515 |             logger.info("")
516 | 
517 |     # Display statistics
518 |     logger.info("")
519 |     logger.info("=" * 80)
520 |     logger.info("📈 Inference Statistics")
521 |     logger.info("=" * 80)
522 | 
523 |     logger.info("\nInference Methods:")
524 |     for method, count in engine.get_stats().items():
525 |         logger.info(f"  {method}: {count}")
526 | 
527 |     logger.info("\nConfidence Distribution:")
528 |     logger.info(f"  High (tag match): {confidence_distribution[3]}")
529 |     logger.info(f"  Medium (pattern/metadata): {confidence_distribution[2]}")
530 |     logger.info(f"  Low (fallback): {confidence_distribution[1]}")
531 | 
532 |     logger.info("\nType Distribution:")
533 |     for memory_type, count in type_distribution.most_common():
534 |         logger.info(f"  {memory_type}: {count}")
535 | 
536 |     logger.info("")
537 |     logger.info("=" * 80)
538 | 
539 |     # Create backup and execute if not dry-run
540 |     if not args.dry_run:
541 |         logger.info("")
542 |         logger.info("💾 Creating backup...")
543 |         backup_path = create_backup(args.db_path)
544 | 
545 |         logger.info("")
546 |         logger.info("✍️  Assigning types...")
547 |         updated = assign_types(args.db_path, assignments, dry_run=False)
548 | 
549 |         logger.info("")
550 |         logger.info("=" * 80)
551 |         logger.info("✅ Type assignment completed successfully!")
552 |         logger.info(f"   Backup saved to: {backup_path}")
553 |         logger.info("=" * 80)
554 |     else:
555 |         logger.info("")
556 |         logger.info("⚠️  This was a DRY RUN - no changes were made")
557 |         logger.info("   Run without --dry-run to apply assignments")
558 |         logger.info("=" * 80)
559 | 
560 | 
561 | if __name__ == "__main__":
562 |     main()
563 | 
```

--------------------------------------------------------------------------------
/tests/consolidation/test_clustering.py:
--------------------------------------------------------------------------------

```python
  1 | """Unit tests for the semantic clustering engine."""
  2 | 
  3 | import pytest
  4 | import numpy as np
  5 | from datetime import datetime, timedelta
  6 | 
  7 | from mcp_memory_service.consolidation.clustering import SemanticClusteringEngine
  8 | from mcp_memory_service.consolidation.base import MemoryCluster
  9 | from mcp_memory_service.models.memory import Memory
 10 | 
 11 | 
 12 | @pytest.mark.unit
 13 | class TestSemanticClusteringEngine:
 14 |     """Test the semantic clustering system."""
 15 |     
 16 |     @pytest.fixture
 17 |     def clustering_engine(self, consolidation_config):
 18 |         return SemanticClusteringEngine(consolidation_config)
 19 |     
 20 |     @pytest.mark.asyncio
 21 |     async def test_basic_clustering(self, clustering_engine, sample_memories):
 22 |         """Test basic clustering functionality."""
 23 |         # Use memories with embeddings
 24 |         memories_with_embeddings = [m for m in sample_memories if m.embedding]
 25 |         
 26 |         clusters = await clustering_engine.process(memories_with_embeddings)
 27 |         
 28 |         assert isinstance(clusters, list)
 29 |         assert all(isinstance(cluster, MemoryCluster) for cluster in clusters)
 30 |         
 31 |         for cluster in clusters:
 32 |             assert len(cluster.memory_hashes) >= clustering_engine.min_cluster_size
 33 |             assert isinstance(cluster.cluster_id, str)
 34 |             assert isinstance(cluster.centroid_embedding, list)
 35 |             assert len(cluster.centroid_embedding) > 0
 36 |             assert 0 <= cluster.coherence_score <= 1
 37 |             assert isinstance(cluster.created_at, datetime)
 38 |             assert isinstance(cluster.theme_keywords, list)
 39 |     
 40 |     @pytest.mark.asyncio 
 41 |     async def test_clustering_with_similar_embeddings(self, clustering_engine):
 42 |         """Test clustering with known similar embeddings."""
 43 |         base_time = datetime.now().timestamp()
 44 |         
 45 |         # Create memories with similar embeddings (should cluster together)
 46 |         similar_memories = []
 47 |         base_embedding = [0.5, 0.4, 0.6, 0.3, 0.7]
 48 |         
 49 |         for i in range(6):  # Create enough for min_cluster_size
 50 |             # Add small variations to base embedding
 51 |             embedding = []
 52 |             for val in base_embedding * 64:  # 320-dim
 53 |                 noise = np.random.normal(0, 0.05)  # Small noise
 54 |                 embedding.append(max(0, min(1, val + noise)))
 55 |             
 56 |             memory = Memory(
 57 |                 content=f"Similar content about programming topic {i}",
 58 |                 content_hash=f"similar_{i}",
 59 |                 tags=["programming", "similar"],
 60 |                 embedding=embedding,
 61 |                 created_at=base_time - (i * 3600)
 62 |             )
 63 |             similar_memories.append(memory)
 64 |         
 65 |         # Add some different memories
 66 |         for i in range(3):
 67 |             different_embedding = [0.1, 0.9, 0.2, 0.8, 0.1] * 64
 68 |             memory = Memory(
 69 |                 content=f"Different content about weather {i}",
 70 |                 content_hash=f"different_{i}",
 71 |                 tags=["weather", "different"],
 72 |                 embedding=different_embedding,
 73 |                 created_at=base_time - (i * 3600)
 74 |             )
 75 |             similar_memories.append(memory)
 76 |         
 77 |         clusters = await clustering_engine.process(similar_memories)
 78 |         
 79 |         # Should find at least one cluster
 80 |         assert len(clusters) >= 1
 81 |         
 82 |         # Check that similar memories are clustered together
 83 |         if clusters:
 84 |             # Find cluster with similar memories
 85 |             similar_cluster = None
 86 |             for cluster in clusters:
 87 |                 if any("similar" in hash_id for hash_id in cluster.memory_hashes):
 88 |                     similar_cluster = cluster
 89 |                     break
 90 |             
 91 |             if similar_cluster:
 92 |                 # Should contain multiple similar memories
 93 |                 similar_hashes = [h for h in similar_cluster.memory_hashes if "similar" in h]
 94 |                 assert len(similar_hashes) >= clustering_engine.min_cluster_size
 95 |     
 96 |     @pytest.mark.asyncio
 97 |     async def test_insufficient_memories(self, clustering_engine):
 98 |         """Test handling of insufficient memories for clustering."""
 99 |         # Create too few memories
100 |         few_memories = []
101 |         for i in range(2):  # Less than min_cluster_size
102 |             memory = Memory(
103 |                 content=f"Content {i}",
104 |                 content_hash=f"hash_{i}",
105 |                 tags=["test"],
106 |                 embedding=[0.5] * 320,
107 |                 created_at=datetime.now().timestamp()
108 |             )
109 |             few_memories.append(memory)
110 |         
111 |         clusters = await clustering_engine.process(few_memories)
112 |         assert clusters == []
113 |     
114 |     @pytest.mark.asyncio
115 |     async def test_memories_without_embeddings(self, clustering_engine):
116 |         """Test handling of memories without embeddings."""
117 |         memories_no_embeddings = []
118 |         for i in range(5):
119 |             memory = Memory(
120 |                 content=f"Content {i}",
121 |                 content_hash=f"hash_{i}",
122 |                 tags=["test"],
123 |                 embedding=None,  # No embedding
124 |                 created_at=datetime.now().timestamp()
125 |             )
126 |             memories_no_embeddings.append(memory)
127 |         
128 |         clusters = await clustering_engine.process(memories_no_embeddings)
129 |         assert clusters == []
130 |     
131 |     @pytest.mark.asyncio
132 |     async def test_theme_keyword_extraction(self, clustering_engine):
133 |         """Test extraction of theme keywords from clusters."""
134 |         # Create memories with common themes
135 |         themed_memories = []
136 |         base_embedding = [0.5, 0.5, 0.5, 0.5, 0.5] * 64
137 |         
138 |         for i in range(5):
139 |             memory = Memory(
140 |                 content=f"Python programming tutorial {i} about functions and classes",
141 |                 content_hash=f"python_{i}",
142 |                 tags=["python", "programming", "tutorial"],
143 |                 embedding=[val + np.random.normal(0, 0.02) for val in base_embedding],
144 |                 created_at=datetime.now().timestamp()
145 |             )
146 |             themed_memories.append(memory)
147 |         
148 |         clusters = await clustering_engine.process(themed_memories)
149 |         
150 |         if clusters:
151 |             cluster = clusters[0]
152 |             # Should extract relevant theme keywords
153 |             assert len(cluster.theme_keywords) > 0
154 |             
155 |             # Should include frequent tags
156 |             common_tags = {"python", "programming", "tutorial"}
157 |             found_tags = set(cluster.theme_keywords).intersection(common_tags)
158 |             assert len(found_tags) > 0
159 |     
160 |     @pytest.mark.asyncio
161 |     async def test_cluster_metadata(self, clustering_engine, sample_memories):
162 |         """Test that cluster metadata is properly populated."""
163 |         memories_with_embeddings = [m for m in sample_memories if m.embedding]
164 |         
165 |         if len(memories_with_embeddings) >= clustering_engine.min_cluster_size:
166 |             clusters = await clustering_engine.process(memories_with_embeddings)
167 |             
168 |             for cluster in clusters:
169 |                 assert 'algorithm' in cluster.metadata
170 |                 assert 'cluster_size' in cluster.metadata
171 |                 assert 'average_memory_age' in cluster.metadata
172 |                 assert 'tag_distribution' in cluster.metadata
173 |                 
174 |                 assert cluster.metadata['cluster_size'] == len(cluster.memory_hashes)
175 |                 assert isinstance(cluster.metadata['average_memory_age'], float)
176 |                 assert isinstance(cluster.metadata['tag_distribution'], dict)
177 |     
178 |     @pytest.mark.asyncio
179 |     async def test_simple_clustering_fallback(self, clustering_engine):
180 |         """Test simple clustering algorithm fallback."""
181 |         # Force simple clustering algorithm
182 |         original_algorithm = clustering_engine.algorithm
183 |         clustering_engine.algorithm = 'simple'
184 |         
185 |         try:
186 |             # Create memories with known similarity patterns
187 |             similar_memories = []
188 |             
189 |             # Group 1: High similarity
190 |             base1 = [0.8, 0.7, 0.9, 0.8, 0.7] * 64
191 |             for i in range(4):
192 |                 embedding = [val + np.random.normal(0, 0.01) for val in base1]
193 |                 memory = Memory(
194 |                     content=f"Group 1 content {i}",
195 |                     content_hash=f"group1_{i}",
196 |                     tags=["group1"],
197 |                     embedding=embedding,
198 |                     created_at=datetime.now().timestamp()
199 |                 )
200 |                 similar_memories.append(memory)
201 |             
202 |             # Group 2: Different but internally similar  
203 |             base2 = [0.2, 0.3, 0.1, 0.2, 0.3] * 64
204 |             for i in range(4):
205 |                 embedding = [val + np.random.normal(0, 0.01) for val in base2]
206 |                 memory = Memory(
207 |                     content=f"Group 2 content {i}",
208 |                     content_hash=f"group2_{i}",
209 |                     tags=["group2"],
210 |                     embedding=embedding,
211 |                     created_at=datetime.now().timestamp()
212 |                 )
213 |                 similar_memories.append(memory)
214 |             
215 |             clusters = await clustering_engine.process(similar_memories)
216 |             
217 |             # Simple algorithm should still find clusters
218 |             assert isinstance(clusters, list)
219 |             
220 |         finally:
221 |             clustering_engine.algorithm = original_algorithm
222 |     
223 |     @pytest.mark.asyncio
224 |     async def test_merge_similar_clusters(self, clustering_engine):
225 |         """Test merging of similar clusters."""
226 |         # Create two similar clusters
227 |         cluster1 = MemoryCluster(
228 |             cluster_id="cluster1",
229 |             memory_hashes=["hash1", "hash2"],
230 |             centroid_embedding=[0.5, 0.5, 0.5] * 107,  # ~320 dim
231 |             coherence_score=0.8,
232 |             created_at=datetime.now(),
233 |             theme_keywords=["python", "programming"]
234 |         )
235 |         
236 |         cluster2 = MemoryCluster(
237 |             cluster_id="cluster2", 
238 |             memory_hashes=["hash3", "hash4"],
239 |             centroid_embedding=[0.52, 0.48, 0.51] * 107,  # Similar to cluster1
240 |             coherence_score=0.7,
241 |             created_at=datetime.now(),
242 |             theme_keywords=["python", "coding"]
243 |         )
244 |         
245 |         # Very different cluster
246 |         cluster3 = MemoryCluster(
247 |             cluster_id="cluster3",
248 |             memory_hashes=["hash5", "hash6"],
249 |             centroid_embedding=[0.1, 0.9, 0.1] * 107,  # Very different
250 |             coherence_score=0.6,
251 |             created_at=datetime.now(),
252 |             theme_keywords=["weather", "forecast"]
253 |         )
254 |         
255 |         clusters = [cluster1, cluster2, cluster3]
256 |         merged_clusters = await clustering_engine.merge_similar_clusters(
257 |             clusters, similarity_threshold=0.9
258 |         )
259 |         
260 |         # Should merge similar clusters
261 |         assert len(merged_clusters) <= len(clusters)
262 |         
263 |         # Check that merged cluster contains memories from both original clusters
264 |         if len(merged_clusters) < len(clusters):
265 |             # Find the merged cluster (should have more memories)
266 |             merged = max(merged_clusters, key=lambda c: len(c.memory_hashes))
267 |             assert len(merged.memory_hashes) >= 4  # Combined from cluster1 and cluster2
268 |     
269 |     @pytest.mark.asyncio
270 |     async def test_coherence_score_calculation(self, clustering_engine):
271 |         """Test coherence score calculation for clusters."""
272 |         # Create tightly clustered memories
273 |         tight_memories = []
274 |         base_embedding = [0.5, 0.5, 0.5, 0.5, 0.5] * 64
275 |         
276 |         for i in range(5):
277 |             # Very similar embeddings (high coherence)
278 |             embedding = [val + np.random.normal(0, 0.01) for val in base_embedding]
279 |             memory = Memory(
280 |                 content=f"Tight cluster content {i}",
281 |                 content_hash=f"tight_{i}",
282 |                 tags=["tight"],
283 |                 embedding=embedding,
284 |                 created_at=datetime.now().timestamp()
285 |             )
286 |             tight_memories.append(memory)
287 |         
288 |         # Create loosely clustered memories
289 |         loose_memories = []
290 |         for i in range(5):
291 |             # More varied embeddings (lower coherence)
292 |             embedding = [val + np.random.normal(0, 0.1) for val in base_embedding]
293 |             memory = Memory(
294 |                 content=f"Loose cluster content {i}",
295 |                 content_hash=f"loose_{i}",
296 |                 tags=["loose"],
297 |                 embedding=embedding,
298 |                 created_at=datetime.now().timestamp()
299 |             )
300 |             loose_memories.append(memory)
301 |         
302 |         tight_clusters = await clustering_engine.process(tight_memories)
303 |         loose_clusters = await clustering_engine.process(loose_memories)
304 |         
305 |         # Tight clusters should have higher coherence scores
306 |         if tight_clusters and loose_clusters:
307 |             tight_coherence = tight_clusters[0].coherence_score
308 |             loose_coherence = loose_clusters[0].coherence_score
309 |             
310 |             # This may not always be true due to randomness, but generally should be
311 |             # Just check that coherence scores are in valid range
312 |             assert 0 <= tight_coherence <= 1
313 |             assert 0 <= loose_coherence <= 1
314 |     
315 |     @pytest.mark.asyncio
316 |     async def test_algorithm_fallback_handling(self, clustering_engine):
317 |         """Test handling of different clustering algorithms."""
318 |         memories = []
319 |         base_embedding = [0.5, 0.4, 0.6, 0.3, 0.7] * 64
320 |         
321 |         for i in range(8):  # Enough for clustering
322 |             embedding = [val + np.random.normal(0, 0.05) for val in base_embedding]
323 |             memory = Memory(
324 |                 content=f"Test content {i}",
325 |                 content_hash=f"test_{i}",
326 |                 tags=["test"],
327 |                 embedding=embedding,
328 |                 created_at=datetime.now().timestamp()
329 |             )
330 |             memories.append(memory)
331 |         
332 |         # Test different algorithms
333 |         algorithms = ['simple', 'dbscan', 'hierarchical']
334 |         
335 |         for algorithm in algorithms:
336 |             original_algorithm = clustering_engine.algorithm
337 |             clustering_engine.algorithm = algorithm
338 |             
339 |             try:
340 |                 clusters = await clustering_engine.process(memories)
341 |                 
342 |                 # All algorithms should return valid clusters
343 |                 assert isinstance(clusters, list)
344 |                 for cluster in clusters:
345 |                     assert isinstance(cluster, MemoryCluster)
346 |                     assert cluster.metadata['algorithm'] in [algorithm, f"{algorithm}_merged"]
347 |                     
348 |             finally:
349 |                 clustering_engine.algorithm = original_algorithm
350 |     
351 |     @pytest.mark.asyncio
352 |     async def test_empty_input_handling(self, clustering_engine):
353 |         """Test handling of empty input."""
354 |         clusters = await clustering_engine.process([])
355 |         assert clusters == []
356 |     
357 |     @pytest.mark.asyncio
358 |     async def test_average_age_calculation(self, clustering_engine):
359 |         """Test average age calculation in cluster metadata."""
360 |         now = datetime.now()
361 |         memories = []
362 |         
363 |         # Create memories with known ages
364 |         ages = [1, 3, 5, 7, 9]  # days ago
365 |         for i, age in enumerate(ages):
366 |             memory = Memory(
367 |                 content=f"Content {i}",
368 |                 content_hash=f"age_test_{i}",
369 |                 tags=["age_test"],
370 |                 embedding=[0.5 + i*0.01] * 320,  # Slightly different embeddings
371 |                 created_at=(now - timedelta(days=age)).timestamp()
372 |             )
373 |             memories.append(memory)
374 |         
375 |         clusters = await clustering_engine.process(memories)
376 |         
377 |         if clusters:
378 |             cluster = clusters[0]
379 |             avg_age = cluster.metadata['average_memory_age']
380 |             
381 |             # Average age should be approximately the mean of our test ages
382 |             expected_avg = sum(ages) / len(ages)
383 |             assert abs(avg_age - expected_avg) < 1  # Within 1 day tolerance
384 |     
385 |     @pytest.mark.asyncio
386 |     async def test_tag_distribution_analysis(self, clustering_engine):
387 |         """Test tag distribution analysis in clusters."""
388 |         memories = []
389 |         base_embedding = [0.5] * 320
390 |         
391 |         # Create memories with specific tag patterns
392 |         tag_patterns = [
393 |             ["python", "programming"],
394 |             ["python", "tutorial"],
395 |             ["programming", "guide"],
396 |             ["python", "programming"],  # Duplicate pattern
397 |             ["tutorial", "guide"]
398 |         ]
399 |         
400 |         for i, tags in enumerate(tag_patterns):
401 |             memory = Memory(
402 |                 content=f"Content {i}",
403 |                 content_hash=f"tag_test_{i}",
404 |                 tags=tags,
405 |                 embedding=[val + i*0.01 for val in base_embedding],
406 |                 created_at=datetime.now().timestamp()
407 |             )
408 |             memories.append(memory)
409 |         
410 |         clusters = await clustering_engine.process(memories)
411 |         
412 |         if clusters:
413 |             cluster = clusters[0]
414 |             tag_dist = cluster.metadata['tag_distribution']
415 |             
416 |             # Should count tag frequencies correctly  
417 |             assert isinstance(tag_dist, dict)
418 |             assert tag_dist.get("python", 0) >= 2  # Appears multiple times
419 |             assert tag_dist.get("programming", 0) >= 2  # Appears multiple times
```

--------------------------------------------------------------------------------
/scripts/migration/migrate_storage.py:
--------------------------------------------------------------------------------

```python
  1 | #!/usr/bin/env python3
  2 | # Copyright 2024 Heinrich Krupp
  3 | #
  4 | # Licensed under the Apache License, Version 2.0 (the "License");
  5 | # you may not use this file except in compliance with the License.
  6 | # You may obtain a copy of the License at
  7 | #
  8 | #     http://www.apache.org/licenses/LICENSE-2.0
  9 | #
 10 | # Unless required by applicable law or agreed to in writing, software
 11 | # distributed under the License is distributed on an "AS IS" BASIS,
 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 13 | # See the License for the specific language governing permissions and
 14 | # limitations under the License.
 15 | 
 16 | """
 17 | Storage Migration Tool for MCP Memory Service
 18 | 
 19 | This script helps migrate memory data between different storage backends
 20 | (ChromaDB and sqlite-vec).
 21 | 
 22 | Usage:
 23 |     python scripts/migrate_storage.py --from chroma --to sqlite-vec
 24 |     python scripts/migrate_storage.py --from sqlite-vec --to chroma --backup
 25 | """
 26 | 
 27 | import argparse
 28 | import asyncio
 29 | import json
 30 | import logging
 31 | import os
 32 | import sys
 33 | import tempfile
 34 | from datetime import datetime
 35 | from pathlib import Path
 36 | from typing import List, Dict, Any
 37 | 
 38 | # Add the src directory to the path
 39 | sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', 'src'))
 40 | 
 41 | from mcp_memory_service.models.memory import Memory
 42 | from mcp_memory_service.storage.base import MemoryStorage
 43 | from mcp_memory_service.storage.chroma import ChromaMemoryStorage
 44 | from mcp_memory_service.storage.sqlite_vec import SqliteVecMemoryStorage
 45 | 
 46 | # Configure logging
 47 | logging.basicConfig(
 48 |     level=logging.INFO,
 49 |     format='%(asctime)s - %(levelname)s - %(message)s'
 50 | )
 51 | logger = logging.getLogger(__name__)
 52 | 
 53 | 
 54 | class MigrationTool:
 55 |     """Tool for migrating memory data between storage backends."""
 56 |     
 57 |     def __init__(self):
 58 |         self.source_storage = None
 59 |         self.target_storage = None
 60 |     
 61 |     async def export_memories(self, storage: MemoryStorage) -> List[Dict[str, Any]]:
 62 |         """Export all memories from a storage backend."""
 63 |         logger.info("Exporting memories from source storage...")
 64 |         
 65 |         exported_memories = []
 66 |         
 67 |         try:
 68 |             # For ChromaDB, we need to get all memories via a broad search
 69 |             if hasattr(storage, 'collection') and storage.collection:
 70 |                 # Get all memories from ChromaDB
 71 |                 results = storage.collection.get()
 72 |                 
 73 |                 if results and results.get("ids"):
 74 |                     for i, memory_id in enumerate(results["ids"]):
 75 |                         try:
 76 |                             metadata = results["metadatas"][i] if results.get("metadatas") else {}
 77 |                             document = results["documents"][i] if results.get("documents") else ""
 78 |                             embedding = results["embeddings"][i] if results.get("embeddings") else None
 79 |                             
 80 |                             # Convert metadata to memory format
 81 |                             memory_data = {
 82 |                                 "content": document,
 83 |                                 "content_hash": metadata.get("content_hash", ""),
 84 |                                 "tags": metadata.get("tags_str", "").split(",") if metadata.get("tags_str") else [],
 85 |                                 "memory_type": metadata.get("type"),
 86 |                                 "metadata": {k: v for k, v in metadata.items() 
 87 |                                            if k not in ["content_hash", "tags_str", "type", 
 88 |                                                       "timestamp", "timestamp_float", "timestamp_str",
 89 |                                                       "created_at", "created_at_iso", "updated_at", "updated_at_iso"]},
 90 |                                 "embedding": embedding,
 91 |                                 "created_at": metadata.get("created_at"),
 92 |                                 "created_at_iso": metadata.get("created_at_iso"),
 93 |                                 "updated_at": metadata.get("updated_at"),
 94 |                                 "updated_at_iso": metadata.get("updated_at_iso")
 95 |                             }
 96 |                             
 97 |                             exported_memories.append(memory_data)
 98 |                             
 99 |                         except Exception as e:
100 |                             logger.warning(f"Failed to export memory {memory_id}: {e}")
101 |                             continue
102 |             
103 |             elif hasattr(storage, 'conn') and storage.conn:
104 |                 # Get all memories from SQLite-vec
105 |                 cursor = storage.conn.execute('''
106 |                     SELECT content_hash, content, tags, memory_type, metadata,
107 |                            created_at, updated_at, created_at_iso, updated_at_iso
108 |                     FROM memories
109 |                     ORDER BY created_at
110 |                 ''')
111 |                 
112 |                 for row in cursor.fetchall():
113 |                     try:
114 |                         content_hash, content, tags_str, memory_type, metadata_str = row[:5]
115 |                         created_at, updated_at, created_at_iso, updated_at_iso = row[5:]
116 |                         
117 |                         # Parse tags and metadata
118 |                         tags = [tag.strip() for tag in tags_str.split(",") if tag.strip()] if tags_str else []
119 |                         metadata = json.loads(metadata_str) if metadata_str else {}
120 |                         
121 |                         memory_data = {
122 |                             "content": content,
123 |                             "content_hash": content_hash,
124 |                             "tags": tags,
125 |                             "memory_type": memory_type,
126 |                             "metadata": metadata,
127 |                             "embedding": None,  # Will be regenerated on import
128 |                             "created_at": created_at,
129 |                             "created_at_iso": created_at_iso,
130 |                             "updated_at": updated_at,
131 |                             "updated_at_iso": updated_at_iso
132 |                         }
133 |                         
134 |                         exported_memories.append(memory_data)
135 |                         
136 |                     except Exception as e:
137 |                         logger.warning(f"Failed to export memory: {e}")
138 |                         continue
139 |             
140 |             logger.info(f"Exported {len(exported_memories)} memories")
141 |             return exported_memories
142 |             
143 |         except Exception as e:
144 |             logger.error(f"Failed to export memories: {e}")
145 |             raise
146 |     
147 |     async def import_memories(self, storage: MemoryStorage, memories: List[Dict[str, Any]]) -> int:
148 |         """Import memories into a storage backend."""
149 |         logger.info(f"Importing {len(memories)} memories to target storage...")
150 |         
151 |         imported_count = 0
152 |         failed_count = 0
153 |         
154 |         for memory_data in memories:
155 |             try:
156 |                 # Create Memory object
157 |                 memory = Memory(
158 |                     content=memory_data["content"],
159 |                     content_hash=memory_data["content_hash"],
160 |                     tags=memory_data.get("tags", []),
161 |                     memory_type=memory_data.get("memory_type"),
162 |                     metadata=memory_data.get("metadata", {}),
163 |                     embedding=memory_data.get("embedding"),
164 |                     created_at=memory_data.get("created_at"),
165 |                     created_at_iso=memory_data.get("created_at_iso"),
166 |                     updated_at=memory_data.get("updated_at"),
167 |                     updated_at_iso=memory_data.get("updated_at_iso")
168 |                 )
169 |                 
170 |                 # Store the memory
171 |                 success, message = await storage.store(memory)
172 |                 
173 |                 if success:
174 |                     imported_count += 1
175 |                     if imported_count % 100 == 0:
176 |                         logger.info(f"Imported {imported_count} memories...")
177 |                 else:
178 |                     failed_count += 1
179 |                     logger.warning(f"Failed to import memory {memory_data['content_hash']}: {message}")
180 |                     
181 |             except Exception as e:
182 |                 failed_count += 1
183 |                 logger.warning(f"Failed to import memory: {e}")
184 |                 continue
185 |         
186 |         logger.info(f"Import complete: {imported_count} successful, {failed_count} failed")
187 |         return imported_count
188 |     
189 |     async def create_backup(self, memories: List[Dict[str, Any]], backup_path: str) -> str:
190 |         """Create a JSON backup of exported memories."""
191 |         backup_data = {
192 |             "version": "1.0",
193 |             "timestamp": datetime.now().isoformat(),
194 |             "total_memories": len(memories),
195 |             "memories": memories
196 |         }
197 |         
198 |         os.makedirs(os.path.dirname(backup_path), exist_ok=True)
199 |         
200 |         with open(backup_path, 'w') as f:
201 |             json.dump(backup_data, f, indent=2)
202 |         
203 |         logger.info(f"Created backup at: {backup_path}")
204 |         return backup_path
205 |     
206 |     async def load_backup(self, backup_path: str) -> List[Dict[str, Any]]:
207 |         """Load memories from a JSON backup file."""
208 |         with open(backup_path, 'r') as f:
209 |             backup_data = json.load(f)
210 |         
211 |         memories = backup_data.get("memories", [])
212 |         logger.info(f"Loaded {len(memories)} memories from backup: {backup_path}")
213 |         return memories
214 |     
215 |     async def migrate(self, from_backend: str, to_backend: str, 
216 |                      source_path: str, target_path: str,
217 |                      create_backup: bool = False, backup_path: str = None) -> bool:
218 |         """Perform migration between storage backends."""
219 |         try:
220 |             logger.info(f"Starting migration from {from_backend} to {to_backend}")
221 |             
222 |             # Initialize source storage
223 |             if from_backend == 'chroma':
224 |                 self.source_storage = ChromaMemoryStorage(source_path)
225 |             elif from_backend == 'sqlite_vec':
226 |                 self.source_storage = SqliteVecMemoryStorage(source_path)
227 |             else:
228 |                 raise ValueError(f"Unsupported source backend: {from_backend}")
229 |             
230 |             await self.source_storage.initialize()
231 |             logger.info(f"Initialized source storage ({from_backend})")
232 |             
233 |             # Export memories
234 |             memories = await self.export_memories(self.source_storage)
235 |             
236 |             if not memories:
237 |                 logger.warning("No memories found in source storage")
238 |                 return False
239 |             
240 |             # Create backup if requested
241 |             if create_backup:
242 |                 if not backup_path:
243 |                     timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
244 |                     backup_path = f"memory_backup_{from_backend}_to_{to_backend}_{timestamp}.json"
245 |                 
246 |                 await self.create_backup(memories, backup_path)
247 |             
248 |             # Initialize target storage
249 |             if to_backend == 'chroma':
250 |                 self.target_storage = ChromaMemoryStorage(target_path)
251 |             elif to_backend == 'sqlite_vec':
252 |                 self.target_storage = SqliteVecMemoryStorage(target_path)
253 |             else:
254 |                 raise ValueError(f"Unsupported target backend: {to_backend}")
255 |             
256 |             await self.target_storage.initialize()
257 |             logger.info(f"Initialized target storage ({to_backend})")
258 |             
259 |             # Import memories
260 |             imported_count = await self.import_memories(self.target_storage, memories)
261 |             
262 |             logger.info(f"Migration completed successfully: {imported_count} memories migrated")
263 |             return True
264 |             
265 |         except Exception as e:
266 |             logger.error(f"Migration failed: {e}")
267 |             return False
268 |         
269 |         finally:
270 |             # Clean up connections
271 |             if self.source_storage and hasattr(self.source_storage, 'close'):
272 |                 self.source_storage.close()
273 |             if self.target_storage and hasattr(self.target_storage, 'close'):
274 |                 self.target_storage.close()
275 | 
276 | 
277 | async def main():
278 |     """Main entry point for the migration tool."""
279 |     parser = argparse.ArgumentParser(
280 |         description="Migrate memory data between storage backends",
281 |         formatter_class=argparse.RawDescriptionHelpFormatter,
282 |         epilog="""
283 | Examples:
284 |   # Migrate from ChromaDB to sqlite-vec
285 |   python scripts/migrate_storage.py --from chroma --to sqlite_vec \\
286 |     --source-path /path/to/chroma_db --target-path /path/to/sqlite_vec.db
287 | 
288 |   # Migrate with backup
289 |   python scripts/migrate_storage.py --from chroma --to sqlite_vec \\
290 |     --source-path /path/to/chroma_db --target-path /path/to/sqlite_vec.db \\
291 |     --backup --backup-path backup.json
292 | 
293 |   # Restore from backup
294 |   python scripts/migrate_storage.py --restore backup.json \\
295 |     --to sqlite_vec --target-path /path/to/sqlite_vec.db
296 |         """
297 |     )
298 |     
299 |     parser.add_argument('--from', dest='from_backend', choices=['chroma', 'sqlite_vec'],
300 |                        help='Source storage backend')
301 |     parser.add_argument('--to', dest='to_backend', choices=['chroma', 'sqlite_vec'],
302 |                        required=True, help='Target storage backend')
303 |     parser.add_argument('--source-path', help='Path to source storage')
304 |     parser.add_argument('--target-path', required=True, help='Path to target storage')
305 |     parser.add_argument('--backup', action='store_true', 
306 |                        help='Create backup before migration')
307 |     parser.add_argument('--backup-path', help='Custom backup file path')
308 |     parser.add_argument('--restore', help='Restore from backup file instead of migrating')
309 |     parser.add_argument('--dry-run', action='store_true', 
310 |                        help='Show what would be migrated without actually doing it')
311 |     parser.add_argument('--verbose', '-v', action='store_true',
312 |                        help='Enable verbose logging')
313 |     
314 |     args = parser.parse_args()
315 |     
316 |     if args.verbose:
317 |         logging.getLogger().setLevel(logging.DEBUG)
318 |     
319 |     # Validate arguments
320 |     if not args.restore and not args.from_backend:
321 |         parser.error("--from is required unless using --restore")
322 |     
323 |     if not args.restore and not args.source_path:
324 |         parser.error("--source-path is required unless using --restore")
325 |     
326 |     if args.from_backend == args.to_backend:
327 |         parser.error("Source and target backends cannot be the same")
328 |     
329 |     migration_tool = MigrationTool()
330 |     
331 |     try:
332 |         if args.restore:
333 |             # Restore from backup
334 |             logger.info(f"Restoring from backup: {args.restore}")
335 |             
336 |             if not os.path.exists(args.restore):
337 |                 logger.error(f"Backup file not found: {args.restore}")
338 |                 return 1
339 |             
340 |             memories = await migration_tool.load_backup(args.restore)
341 |             
342 |             if args.dry_run:
343 |                 logger.info(f"DRY RUN: Would restore {len(memories)} memories to {args.to_backend}")
344 |                 return 0
345 |             
346 |             # Initialize target storage
347 |             if args.to_backend == 'chroma':
348 |                 target_storage = ChromaMemoryStorage(args.target_path)
349 |             else:
350 |                 target_storage = SqliteVecMemoryStorage(args.target_path)
351 |             
352 |             await target_storage.initialize()
353 |             imported_count = await migration_tool.import_memories(target_storage, memories)
354 |             
355 |             if hasattr(target_storage, 'close'):
356 |                 target_storage.close()
357 |             
358 |             logger.info(f"Restoration completed: {imported_count} memories restored")
359 |             
360 |         else:
361 |             # Regular migration
362 |             if args.dry_run:
363 |                 logger.info(f"DRY RUN: Would migrate from {args.from_backend} to {args.to_backend}")
364 |                 
365 |                 # Initialize source storage and count memories
366 |                 if args.from_backend == 'chroma':
367 |                     source_storage = ChromaMemoryStorage(args.source_path)
368 |                 else:
369 |                     source_storage = SqliteVecMemoryStorage(args.source_path)
370 |                 
371 |                 await source_storage.initialize()
372 |                 memories = await migration_tool.export_memories(source_storage)
373 |                 
374 |                 if hasattr(source_storage, 'close'):
375 |                     source_storage.close()
376 |                 
377 |                 logger.info(f"DRY RUN: Found {len(memories)} memories to migrate")
378 |                 return 0
379 |             
380 |             # Perform actual migration
381 |             success = await migration_tool.migrate(
382 |                 from_backend=args.from_backend,
383 |                 to_backend=args.to_backend,
384 |                 source_path=args.source_path,
385 |                 target_path=args.target_path,
386 |                 create_backup=args.backup,
387 |                 backup_path=args.backup_path
388 |             )
389 |             
390 |             if not success:
391 |                 logger.error("Migration failed")
392 |                 return 1
393 |         
394 |         logger.info("Operation completed successfully")
395 |         return 0
396 |         
397 |     except KeyboardInterrupt:
398 |         logger.info("Operation cancelled by user")
399 |         return 1
400 |     except Exception as e:
401 |         logger.error(f"Operation failed: {e}")
402 |         return 1
403 | 
404 | 
405 | if __name__ == "__main__":
406 |     sys.exit(asyncio.run(main()))
```

--------------------------------------------------------------------------------
/docs/cloudflare-setup.md:
--------------------------------------------------------------------------------

```markdown
  1 | # Cloudflare Backend Setup Guide
  2 | 
  3 | ## Overview
  4 | 
  5 | The MCP Memory Service supports native Cloudflare integration using Vectorize for vector storage, D1 for metadata, and optional R2 for large content. This provides:
  6 | 
  7 | - **Vectorize**: Vector database for semantic search (768-dimensional embeddings)
  8 | - **D1**: SQLite database for metadata storage
  9 | - **Workers AI**: Embedding generation (@cf/baai/bge-base-en-v1.5)
 10 | - **R2** (optional): Object storage for large content
 11 | 
 12 | This setup provides global distribution, automatic scaling, and cost-effective pay-per-use pricing.
 13 | 
 14 | ## 🚀 Quick Start
 15 | 
 16 | For users who want to get started immediately:
 17 | 
 18 | ### Prerequisites
 19 | 1. **Cloudflare Account**: You need a Cloudflare account with Workers/D1/Vectorize access
 20 | 2. **API Token**: Create an API token with these permissions:
 21 |    - **Vectorize Edit** (for creating and managing vector indexes)
 22 |    - **D1 Edit** (for creating and managing databases)
 23 |    - **R2 Edit** (optional, for large content storage)
 24 |    - **Workers AI Read** (for embedding generation)
 25 | 
 26 | ### Quick Setup Commands
 27 | 
 28 | ```bash
 29 | # 1. Install dependencies
 30 | pip install httpx>=0.24.0
 31 | 
 32 | # 2. Create Cloudflare resources (requires wrangler CLI)
 33 | wrangler vectorize create mcp-memory-index --dimensions=768 --metric=cosine
 34 | wrangler d1 create mcp-memory-db
 35 | wrangler r2 bucket create mcp-memory-content  # Optional
 36 | 
 37 | # 3. Configure environment
 38 | export MCP_MEMORY_STORAGE_BACKEND=cloudflare
 39 | export CLOUDFLARE_API_TOKEN="your-api-token"
 40 | export CLOUDFLARE_ACCOUNT_ID="your-account-id"
 41 | export CLOUDFLARE_VECTORIZE_INDEX="mcp-memory-index"
 42 | export CLOUDFLARE_D1_DATABASE_ID="your-d1-database-id"
 43 | export CLOUDFLARE_R2_BUCKET="mcp-memory-content"  # Optional
 44 | 
 45 | # 4. Test and start
 46 | python -m src.mcp_memory_service.server
 47 | 
 48 | # Alternative startup methods:
 49 | # uv run memory server          # Modern CLI (recommended)
 50 | # python scripts/run_memory_server.py  # Direct script execution
 51 | ```
 52 | 
 53 | > **⚠️ Important**: Cloudflare backend uses Workers AI for embedding generation, so do NOT use `scripts/memory_offline.py` which sets offline mode. Use the standard startup methods above instead.
 54 | 
 55 | ## Prerequisites
 56 | 
 57 | 1. **Cloudflare Account**: Sign up at [cloudflare.com](https://www.cloudflare.com/)
 58 | 2. **Cloudflare Services**: Access to Vectorize, D1, and optionally R2
 59 | 3. **API Token**: With appropriate permissions
 60 | 
 61 | ## Step 1: Create Cloudflare Resources
 62 | 
 63 | ### 1.1 Create Vectorize Index
 64 | 
 65 | ```bash
 66 | # Install Wrangler CLI
 67 | npm install -g wrangler
 68 | 
 69 | # Login to Cloudflare
 70 | wrangler login
 71 | 
 72 | # Create Vectorize index (768 dimensions for BGE embeddings)
 73 | wrangler vectorize create mcp-memory-index --dimensions=768 --metric=cosine
 74 | ```
 75 | 
 76 | ### 1.2 Create D1 Database
 77 | 
 78 | ```bash
 79 | # Create D1 database
 80 | wrangler d1 create mcp-memory-db
 81 | 
 82 | # Note the database ID from the output
 83 | ```
 84 | 
 85 | ### 1.3 Create R2 Bucket (Optional)
 86 | 
 87 | ```bash
 88 | # Create R2 bucket for large content storage
 89 | wrangler r2 bucket create mcp-memory-content
 90 | ```
 91 | 
 92 | ## Step 2: Configure API Token
 93 | 
 94 | ### 2.1 Create API Token
 95 | 
 96 | 1. Go to [Cloudflare Dashboard → My Profile → API Tokens](https://dash.cloudflare.com/profile/api-tokens)
 97 | 2. Click "Create Token"
 98 | 3. Use "Custom Token" template
 99 | 4. Configure permissions:
100 |    - **Account**: `Read` (to access account resources)
101 |    - **Vectorize**: `Edit` (to manage vector operations)
102 |    - **D1**: `Edit` (to manage database operations)
103 |    - **R2**: `Edit` (if using R2 for large content)
104 |    - **Workers AI**: `Read` (for embedding generation)
105 | 
106 | ### 2.2 Get Account ID
107 | 
108 | 1. Go to [Cloudflare Dashboard](https://dash.cloudflare.com/)
109 | 2. Select your domain or go to overview
110 | 3. Copy the Account ID from the right sidebar
111 | 
112 | ### 2.3 Manual Resource Creation (Alternative)
113 | 
114 | If you prefer manual creation via the Cloudflare Dashboard or encounter authentication issues:
115 | 
116 | **Create Vectorize Index via Dashboard:**
117 | 1. Go to [Cloudflare Dashboard → Vectorize](https://dash.cloudflare.com/vectorize)
118 | 2. Click "Create Index"
119 | 3. Name: `mcp-memory-index`
120 | 4. Dimensions: `768`
121 | 5. Metric: `cosine`
122 | 
123 | **Create D1 Database via Dashboard:**
124 | 1. Go to [Cloudflare Dashboard → D1](https://dash.cloudflare.com/d1)
125 | 2. Click "Create Database"
126 | 3. Name: `mcp-memory-db`
127 | 4. Copy the Database ID from the overview page
128 | 
129 | **Create R2 Bucket via Dashboard (Optional):**
130 | 1. Go to [Cloudflare Dashboard → R2](https://dash.cloudflare.com/r2)
131 | 2. Click "Create Bucket"
132 | 3. Name: `mcp-memory-content`
133 | 4. Choose region closest to your location
134 | 
135 | **Alternative API Creation:**
136 | ```bash
137 | # Create Vectorize index via API
138 | curl -X POST "https://api.cloudflare.com/client/v4/accounts/YOUR_ACCOUNT_ID/vectorize/indexes" \
139 |   -H "Authorization: Bearer YOUR_API_TOKEN" \
140 |   -H "Content-Type: application/json" \
141 |   -d '{
142 |     "name": "mcp-memory-index",
143 |     "config": {
144 |       "dimensions": 768,
145 |       "metric": "cosine"
146 |     }
147 |   }'
148 | 
149 | # Create D1 database via API
150 | curl -X POST "https://api.cloudflare.com/client/v4/accounts/YOUR_ACCOUNT_ID/d1/database" \
151 |   -H "Authorization: Bearer YOUR_API_TOKEN" \
152 |   -H "Content-Type: application/json" \
153 |   -d '{
154 |     "name": "mcp-memory-db"
155 |   }'
156 | 
157 | # Create R2 bucket via API (optional)
158 | curl -X POST "https://api.cloudflare.com/client/v4/accounts/YOUR_ACCOUNT_ID/r2/buckets" \
159 |   -H "Authorization: Bearer YOUR_API_TOKEN" \
160 |   -H "Content-Type: application/json" \
161 |   -d '{
162 |     "name": "mcp-memory-content"
163 |   }'
164 | ```
165 | 
166 | ## Step 3: Configure Environment Variables
167 | 
168 | Set the following environment variables:
169 | 
170 | ```bash
171 | # Required Configuration
172 | export MCP_MEMORY_STORAGE_BACKEND=cloudflare
173 | export CLOUDFLARE_API_TOKEN="your-api-token-here"
174 | export CLOUDFLARE_ACCOUNT_ID="your-account-id-here"
175 | export CLOUDFLARE_VECTORIZE_INDEX="mcp-memory-index"
176 | export CLOUDFLARE_D1_DATABASE_ID="your-d1-database-id"
177 | 
178 | # Optional Configuration
179 | export CLOUDFLARE_R2_BUCKET="mcp-memory-content"  # For large content
180 | export CLOUDFLARE_EMBEDDING_MODEL="@cf/baai/bge-base-en-v1.5"  # Default
181 | export CLOUDFLARE_LARGE_CONTENT_THRESHOLD="1048576"  # 1MB threshold
182 | export CLOUDFLARE_MAX_RETRIES="3"  # API retry attempts
183 | export CLOUDFLARE_BASE_DELAY="1.0"  # Retry delay in seconds
184 | ```
185 | 
186 | ### Configuration File Example
187 | 
188 | Create a `.env` file in your project root:
189 | 
190 | ```env
191 | # Cloudflare Backend Configuration
192 | MCP_MEMORY_STORAGE_BACKEND=cloudflare
193 | 
194 | # Required Cloudflare Settings
195 | CLOUDFLARE_API_TOKEN=your-api-token-here
196 | CLOUDFLARE_ACCOUNT_ID=your-account-id-here
197 | CLOUDFLARE_VECTORIZE_INDEX=mcp-memory-index
198 | CLOUDFLARE_D1_DATABASE_ID=your-d1-database-id
199 | 
200 | # Optional Settings
201 | CLOUDFLARE_R2_BUCKET=mcp-memory-content
202 | CLOUDFLARE_EMBEDDING_MODEL=@cf/baai/bge-base-en-v1.5
203 | CLOUDFLARE_LARGE_CONTENT_THRESHOLD=1048576
204 | CLOUDFLARE_MAX_RETRIES=3
205 | CLOUDFLARE_BASE_DELAY=1.0
206 | 
207 | # Logging
208 | LOG_LEVEL=INFO
209 | ```
210 | 
211 | ## Step 4: Install Dependencies
212 | 
213 | The Cloudflare backend requires additional dependencies:
214 | 
215 | ```bash
216 | # Install additional requirements
217 | pip install -r requirements-cloudflare.txt
218 | 
219 | # Or install manually
220 | pip install httpx>=0.24.0
221 | ```
222 | 
223 | ## Step 5: Initialize and Test
224 | 
225 | ### 5.1 Start the Service
226 | 
227 | ```bash
228 | # Start MCP Memory Service with Cloudflare backend
229 | python -m src.mcp_memory_service.server
230 | ```
231 | 
232 | ### 5.2 Verify Configuration
233 | 
234 | The service will automatically:
235 | 1. Initialize the D1 database schema
236 | 2. Verify access to the Vectorize index
237 | 3. Check R2 bucket access (if configured)
238 | 
239 | Look for these success messages in the logs:
240 | ```
241 | INFO:mcp_memory_service.config:Using Cloudflare backend with:
242 | INFO:mcp_memory_service.config:  Vectorize Index: mcp-memory-index
243 | INFO:mcp_memory_service.config:  D1 Database: your-d1-database-id
244 | INFO:mcp_memory_service.server:Created Cloudflare storage with Vectorize index: mcp-memory-index
245 | INFO:mcp_memory_service.storage.cloudflare:Cloudflare storage backend initialized successfully
246 | ```
247 | 
248 | ### 5.3 Test Basic Operations
249 | 
250 | **Option A: Comprehensive Test Suite**
251 | ```bash
252 | # Run comprehensive automated tests
253 | python scripts/test_cloudflare_backend.py
254 | ```
255 | 
256 | **Option B: Manual API Testing**
257 | ```bash
258 | # Store a test memory
259 | curl -X POST http://localhost:8000/api/memories \
260 |   -H "Content-Type: application/json" \
261 |   -d '{
262 |     "content": "This is a test memory for Cloudflare backend",
263 |     "tags": ["test", "cloudflare"]
264 |   }'
265 | 
266 | # Search memories
267 | curl -X POST http://localhost:8000/api/memories/search \
268 |   -H "Content-Type: application/json" \
269 |   -d '{
270 |     "query": "test memory",
271 |     "n_results": 5
272 |   }'
273 | 
274 | # Get statistics
275 | curl http://localhost:8000/api/stats
276 | ```
277 | 
278 | **Option C: Automated Resource Setup**
279 | ```bash
280 | # Set up Cloudflare resources automatically
281 | python scripts/setup_cloudflare_resources.py
282 | ```
283 | 
284 | ## Architecture Details
285 | 
286 | ### Data Flow
287 | 
288 | 1. **Content Storage**:
289 |    - Small content (<1MB): Stored directly in D1
290 |    - Large content (>1MB): Stored in R2, referenced in D1
291 | 
292 | 2. **Vector Processing**:
293 |    - Content → Workers AI → Embedding Vector
294 |    - Vector stored in Vectorize with metadata
295 |    - Semantic search via Vectorize similarity
296 | 
297 | 3. **Metadata Management**:
298 |    - Memory metadata stored in D1 SQLite
299 |    - Tags stored in relational tables
300 |    - Full ACID compliance for data integrity
301 | 
302 | ### Performance Optimizations
303 | 
304 | - **Connection Pooling**: Reused HTTP connections
305 | - **Embedding Caching**: 1000-entry LRU cache
306 | - **Batch Operations**: Bulk vector operations
307 | - **Smart Retries**: Exponential backoff for rate limits
308 | - **Async Operations**: Non-blocking I/O throughout
309 | 
310 | ### Security Features
311 | 
312 | - **API Key Security**: Never logged or exposed
313 | - **Input Validation**: SQL injection prevention
314 | - **Rate Limiting**: Built-in protection
315 | - **Secure Headers**: Proper HTTP security
316 | 
317 | ## Migration from Other Backends
318 | 
319 | ### From SQLite-vec
320 | 
321 | ```bash
322 | # Export existing data
323 | python scripts/export_sqlite_vec.py --output cloudflare_export.json
324 | 
325 | # Switch to Cloudflare backend
326 | export MCP_MEMORY_STORAGE_BACKEND=cloudflare
327 | 
328 | # Import data
329 | python scripts/import_to_cloudflare.py --input cloudflare_export.json
330 | ```
331 | 
332 | ### From ChromaDB
333 | 
334 | ```bash
335 | # Export ChromaDB data
336 | python scripts/export_chroma.py --output cloudflare_export.json
337 | 
338 | # Switch to Cloudflare backend
339 | export MCP_MEMORY_STORAGE_BACKEND=cloudflare
340 | 
341 | # Import data
342 | python scripts/import_to_cloudflare.py --input cloudflare_export.json
343 | ```
344 | 
345 | ## Troubleshooting
346 | 
347 | ### Common Issues
348 | 
349 | #### 1. Authentication Errors (401)
350 | 
351 | ```
352 | ERROR: Missing required environment variables for Cloudflare backend: CLOUDFLARE_API_TOKEN
353 | ERROR: Unauthorized - Invalid API token
354 | ```
355 | 
356 | **Solution**: 
357 | - Verify all required environment variables are set
358 | - Check API token has correct permissions (Vectorize:Edit, D1:Edit, Workers AI:Read)
359 | - Ensure token is not expired
360 | - Verify account ID is correct
361 | 
362 | #### 2. Resource Not Found (404)
363 | 
364 | ```
365 | ValueError: Vectorize index 'mcp-memory-index' not found
366 | ValueError: D1 database not found
367 | ```
368 | 
369 | **Solution**: 
370 | - Create the Vectorize index or verify the index name is correct
371 | - Check that resources were created in the correct account
372 | - Confirm resource IDs/names match exactly
373 | - Verify resource names match exactly
374 | 
375 | #### 3. Vector Storage Errors (400)
376 | 
377 | ```
378 | ValueError: Failed to store vector data
379 | HTTP 400: Invalid vector data format
380 | ```
381 | 
382 | **Solution**: 
383 | - Check vector dimensions (must be 768)
384 | - Verify NDJSON format for vector data
385 | - Ensure metadata values are properly serialized
386 | - Validate input data types
387 | 
388 | #### 4. D1 Database Access Issues
389 | 
390 | ```
391 | ValueError: Failed to initialize D1 schema
392 | HTTP 403: Insufficient permissions
393 | ```
394 | 
395 | **Solution**: 
396 | - Verify D1 database ID and API token permissions
397 | - Ensure database exists and is accessible
398 | - Check API token has D1:Edit permissions
399 | 
400 | #### 5. API Rate Limits (429)
401 | 
402 | ```
403 | Rate limited after 3 retries
404 | HTTP 429: Too Many Requests
405 | ```
406 | 
407 | **Solution**: 
408 | - Increase `CLOUDFLARE_MAX_RETRIES` or `CLOUDFLARE_BASE_DELAY` for more conservative retry behavior
409 | - Implement exponential backoff (already included)
410 | - Monitor API usage through Cloudflare dashboard
411 | - Consider implementing request caching for high-volume usage
412 | 
413 | ### Debug Mode
414 | 
415 | Enable detailed logging:
416 | 
417 | ```bash
418 | export LOG_LEVEL=DEBUG
419 | python -m src.mcp_memory_service.server --debug
420 | ```
421 | 
422 | ### Health Check
423 | 
424 | ```bash
425 | # Check backend health
426 | curl http://localhost:8000/api/health
427 | 
428 | # Get detailed statistics
429 | curl http://localhost:8000/api/stats
430 | ```
431 | 
432 | ## Limitations
433 | 
434 | ### Current Limitations
435 | 
436 | - **Embedding Model**: Fixed to Workers AI BGE model (768 dimensions)
437 | - **Content Size**: R2 storage recommended for content >1MB
438 | - **Rate Limits**: Subject to Cloudflare service limits
439 | - **Region**: Embedding generation uses Cloudflare's global network
440 | 
441 | ### Planned Improvements
442 | 
443 | - **Local Embedding Fallback**: For offline or restricted environments
444 | - **Custom Embedding Models**: Support for other embedding models
445 | - **Enhanced Caching**: Multi-level caching strategy
446 | - **Batch Import Tools**: Efficient migration utilities
447 | 
448 | ## 🔄 Multi-Machine Bidirectional Sync
449 | 
450 | **New in v6.13.7**: Cloudflare backend now supports seamless bidirectional sync between multiple machines, making it ideal for distributed teams or as a replacement for failed centralized servers.
451 | 
452 | ### Use Cases
453 | 
454 | 1. **Failed Server Recovery**: Replace a failed narrowbox/central server with Cloudflare
455 | 2. **Multi-Machine Development**: Sync memories across multiple development machines
456 | 3. **Team Collaboration**: Share memory context across team members
457 | 4. **Backup Strategy**: Cloudflare as primary with local sqlite_vec as backup
458 | 
459 | ### Architecture
460 | 
461 | ```
462 | ┌─────────────────┐    ┌─────────────────┐
463 | │   Machine A     │    │   Machine B     │
464 | │                 │    │                 │
465 | │ Claude Desktop  │    │ Claude Desktop  │
466 | │      ↕          │    │       ↕         │
467 | │ sqlite_vec      │    │ sqlite_vec      │
468 | │   (backup)      │    │   (backup)      │
469 | └─────────┬───────┘    └─────────┬───────┘
470 |           │                      │
471 |           └─────────┬────────────┘
472 |                     ↕
473 |           ┌─────────────────┐
474 |           │   Cloudflare    │
475 |           │                 │
476 |           │ D1 Database     │
477 |           │ Vectorize Index │
478 |           │ Workers AI      │
479 |           └─────────────────┘
480 | ```
481 | 
482 | ### Setup Process
483 | 
484 | #### 1. Initial Migration (from failed server)
485 | 
486 | ```bash
487 | # Export memories from existing machine
488 | memory export /path/to/export.json
489 | 
490 | # Set up Cloudflare environment
491 | export CLOUDFLARE_API_TOKEN="your-token"
492 | export CLOUDFLARE_ACCOUNT_ID="your-account"
493 | export CLOUDFLARE_D1_DATABASE_ID="your-d1-id"
494 | export CLOUDFLARE_VECTORIZE_INDEX="mcp-memory-index"
495 | export MCP_MEMORY_STORAGE_BACKEND="cloudflare"
496 | 
497 | # Import to Cloudflare
498 | python scripts/import_to_cloudflare.py /path/to/export.json
499 | ```
500 | 
501 | #### 2. Configure Each Machine
502 | 
503 | **Claude Desktop Configuration** (`claude_desktop_config.json`):
504 | ```json
505 | {
506 |   "mcpServers": {
507 |     "memory": {
508 |       "command": "/path/to/memory",
509 |       "args": ["server"],
510 |       "env": {
511 |         "MCP_MEMORY_STORAGE_BACKEND": "cloudflare",
512 |         "MCP_MEMORY_SQLITE_PATH": "/local/backup/path/sqlite_vec.db",
513 |         "CLOUDFLARE_API_TOKEN": "your-token",
514 |         "CLOUDFLARE_ACCOUNT_ID": "your-account",
515 |         "CLOUDFLARE_D1_DATABASE_ID": "your-d1-id",
516 |         "CLOUDFLARE_VECTORIZE_INDEX": "mcp-memory-index"
517 |       }
518 |     }
519 |   }
520 | }
521 | ```
522 | 
523 | #### 3. Verification Testing
524 | 
525 | Test bidirectional sync by storing and retrieving memories from each machine:
526 | 
527 | ```python
528 | # Test script for verification
529 | import asyncio
530 | from mcp_memory_service.storage.cloudflare import CloudflareStorage
531 | 
532 | async def test_sync():
533 |     storage = CloudflareStorage(...)
534 |     await storage.initialize()
535 | 
536 |     # Store test memory
537 |     test_memory = Memory(content="Test from Machine A", tags=["sync-test"])
538 |     success, message = await storage.store(test_memory)
539 | 
540 |     # Verify from other machine
541 |     results = await storage.retrieve("Test from Machine A")
542 |     print(f"Sync verified: {len(results)} results found")
543 | ```
544 | 
545 | ### Important Notes
546 | 
547 | - **v6.13.7 Required**: This version fixes the critical Vectorize ID length issue
548 | - **Breaking Change**: Vector IDs changed format from v6.13.6 (removed "mem_" prefix)
549 | - **Backup Strategy**: Local sqlite_vec files are maintained for fallback
550 | - **Migration Time**: Allow extra time for initial memory migration to Cloudflare
551 | 
552 | ### Troubleshooting Sync Issues
553 | 
554 | #### Vector ID Length Error (Fixed in v6.13.7)
555 | ```
556 | Error: "id too long; max is 64 bytes, got 68 bytes"
557 | ```
558 | **Solution**: Update to v6.13.7 or later
559 | 
560 | #### Environment Variable Issues
561 | **Problem**: Memories not syncing between machines
562 | **Solution**:
563 | - Verify identical environment variables on all machines
564 | - Check Claude Desktop configuration matches exactly
565 | - Restart Claude Desktop after config changes
566 | 
567 | #### Sync Verification
568 | ```bash
569 | # Check memory count on each machine
570 | memory status
571 | 
572 | # Test cross-machine visibility
573 | memory retrieve "test query from other machine"
574 | ```
575 | 
576 | ## Support
577 | 
578 | For issues and questions:
579 | 
580 | 1. **Documentation**: Check this guide and API documentation
581 | 2. **GitHub Issues**: Report bugs at the project repository
582 | 3. **Cloudflare Support**: For Cloudflare service-specific issues
583 | 4. **Community**: Join the project Discord/community channels
584 | 
585 | ## Performance Benchmarks
586 | 
587 | ### Typical Performance
588 | 
589 | - **Storage**: ~200ms per memory (including embedding generation)
590 | - **Search**: ~100ms for semantic search (5 results)
591 | - **Batch Operations**: ~50ms per memory in batches of 100
592 | - **Global Latency**: <100ms from most global locations
593 | 
594 | ### Optimization Tips
595 | 
596 | 1. **Batch Operations**: Use bulk operations when possible
597 | 2. **Content Strategy**: Use R2 for large content
598 | 3. **Caching**: Enable embedding caching
599 | 4. **Connection Pooling**: Reuse HTTP connections
600 | 5. **Regional Deployment**: Deploy close to your users
601 | 
```

--------------------------------------------------------------------------------
/claude-hooks/core/mid-conversation.js:
--------------------------------------------------------------------------------

```javascript
  1 | /**
  2 |  * Mid-Conversation Memory Hook
  3 |  * Intelligently triggers memory awareness during conversations based on natural language patterns
  4 |  */
  5 | 
  6 | const { TieredConversationMonitor } = require('../utilities/tiered-conversation-monitor');
  7 | const { AdaptivePatternDetector } = require('../utilities/adaptive-pattern-detector');
  8 | const { PerformanceManager } = require('../utilities/performance-manager');
  9 | const { MemoryClient } = require('../utilities/memory-client');
 10 | const { scoreMemoryRelevance } = require('../utilities/memory-scorer');
 11 | const { formatMemoriesForContext } = require('../utilities/context-formatter');
 12 | 
 13 | class MidConversationHook {
 14 |     constructor(config = {}) {
 15 |         this.config = config;
 16 | 
 17 |         // Decision weighting constants
 18 |         this.TRIGGER_WEIGHTS = {
 19 |             PATTERN_CONFIDENCE: 0.6,
 20 |             CONVERSATION_CONTEXT: 0.4,
 21 |             SEMANTIC_SHIFT_BOOST: 0.2,
 22 |             QUESTION_PATTERN_BOOST: 0.1,
 23 |             PAST_WORK_BOOST: 0.15
 24 |         };
 25 | 
 26 |         this.THRESHOLD_VALUES = {
 27 |             CONVERSATION_PROBABILITY_MIN: 0.3,
 28 |             SEMANTIC_SHIFT_MIN: 0.6,
 29 |             SPEED_MODE_CONFIDENCE_MIN: 0.8,
 30 |             SPEED_MODE_REDUCTION: 0.8
 31 |         };
 32 | 
 33 |         // Initialize performance management
 34 |         this.performanceManager = new PerformanceManager(config.performance);
 35 | 
 36 |         // Initialize components with performance awareness
 37 |         this.conversationMonitor = new TieredConversationMonitor(
 38 |             config.conversationMonitor,
 39 |             this.performanceManager
 40 |         );
 41 | 
 42 |         this.patternDetector = new AdaptivePatternDetector(
 43 |             config.patternDetector,
 44 |             this.performanceManager
 45 |         );
 46 | 
 47 |         // Memory client for queries
 48 |         this.memoryClient = null;
 49 | 
 50 |         // Hook state - read from correct nested config paths
 51 |         const midConversationConfig = config.hooks?.midConversation || {};
 52 |         const naturalTriggersConfig = config.naturalTriggers || {};
 53 | 
 54 |         this.isEnabled = naturalTriggersConfig.enabled !== false;
 55 |         this.lastTriggerTime = 0;
 56 |         this.cooldownPeriod = naturalTriggersConfig.cooldownPeriod || 30000; // 30 seconds between triggers
 57 | 
 58 |         // Analytics
 59 |         this.analytics = {
 60 |             totalAnalyses: 0,
 61 |             triggersExecuted: 0,
 62 |             userAcceptanceRate: 0,
 63 |             averageLatency: 0,
 64 |             totalFeedback: 0
 65 |         };
 66 |     }
 67 | 
 68 |     /**
 69 |      * Analyze user message for memory trigger needs
 70 |      */
 71 |     async analyzeMessage(userMessage, context = {}) {
 72 |         if (!this.isEnabled) return null;
 73 | 
 74 |         const timing = this.performanceManager.startTiming('mid_conversation_analysis', 'fast');
 75 | 
 76 |         try {
 77 |             this.analytics.totalAnalyses++;
 78 | 
 79 |             // Check cooldown period
 80 |             if (Date.now() - this.lastTriggerTime < this.cooldownPeriod) {
 81 |                 return this.createResult('cooldown', 'Cooldown period active', 0);
 82 |             }
 83 | 
 84 |             // Phase 1: Conversation monitoring
 85 |             const conversationAnalysis = await this.conversationMonitor.analyzeMessage(userMessage, context);
 86 | 
 87 |             // Phase 2: Pattern detection
 88 |             const patternResults = await this.patternDetector.detectPatterns(userMessage, {
 89 |                 ...context,
 90 |                 conversationAnalysis
 91 |             });
 92 | 
 93 |             // Phase 3: Combined decision making
 94 |             const triggerDecision = this.makeTriggerDecision(conversationAnalysis, patternResults, context);
 95 | 
 96 |             // Update last trigger time if we're recommending a trigger
 97 |             if (triggerDecision.shouldTrigger) {
 98 |                 this.lastTriggerTime = Date.now();
 99 |             }
100 | 
101 |             // Record performance
102 |             const performanceResult = this.performanceManager.endTiming(timing);
103 |             this.analytics.averageLatency = this.updateAverageLatency(performanceResult.latency);
104 | 
105 |             return {
106 |                 shouldTrigger: triggerDecision.shouldTrigger,
107 |                 confidence: triggerDecision.confidence,
108 |                 reasoning: triggerDecision.reasoning,
109 |                 conversationAnalysis,
110 |                 patternResults,
111 |                 performance: performanceResult,
112 |                 timestamp: Date.now()
113 |             };
114 | 
115 |         } catch (error) {
116 |             console.error('[Mid-Conversation Hook] Analysis failed:', error.message);
117 |             this.performanceManager.endTiming(timing);
118 |             return this.createResult('error', `Analysis failed: ${error.message}`, 0);
119 |         }
120 |     }
121 | 
122 |     /**
123 |      * Execute memory retrieval and context injection
124 |      */
125 |     async executeMemoryTrigger(analysisResult, context = {}) {
126 |         if (!analysisResult.shouldTrigger) return null;
127 | 
128 |         const timing = this.performanceManager.startTiming('memory_trigger_execution', 'intensive');
129 | 
130 |         try {
131 |             // Initialize memory client if needed
132 |             if (!this.memoryClient) {
133 |                 this.memoryClient = new MemoryClient(this.config.memoryService || {});
134 |                 await this.memoryClient.connect();
135 |             }
136 | 
137 |             // Build enhanced query based on analysis
138 |             const memoryQuery = this.buildMemoryQuery(analysisResult, context);
139 | 
140 |             // Retrieve relevant memories
141 |             const memories = await this.queryMemories(memoryQuery);
142 | 
143 |             if (memories.length === 0) {
144 |                 return this.createResult('no_memories', 'No relevant memories found', analysisResult.confidence);
145 |             }
146 | 
147 |             // Score and format memories
148 |             const scoredMemories = scoreMemoryRelevance(memories, context.projectContext, {
149 |                 verbose: false,
150 |                 enhanceRecency: true
151 |             });
152 | 
153 |             const contextMessage = formatMemoriesForContext(
154 |                 scoredMemories.slice(0, this.config.maxMemoriesPerTrigger || 5),
155 |                 context.projectContext,
156 |                 {
157 |                     includeScore: false,
158 |                     groupByCategory: scoredMemories.length > 3,
159 |                     maxContentLength: 400,
160 |                     includeTimestamp: true
161 |                 }
162 |             );
163 | 
164 |             // Record successful trigger
165 |             this.analytics.triggersExecuted++;
166 | 
167 |             const performanceResult = this.performanceManager.endTiming(timing);
168 | 
169 |             return {
170 |                 success: true,
171 |                 contextMessage,
172 |                 memoriesFound: memories.length,
173 |                 memoriesUsed: Math.min(scoredMemories.length, this.config.maxMemoriesPerTrigger || 5),
174 |                 confidence: analysisResult.confidence,
175 |                 performance: performanceResult,
176 |                 triggerType: 'mid_conversation'
177 |             };
178 | 
179 |         } catch (error) {
180 |             console.error('[Mid-Conversation Hook] Memory trigger failed:', error.message);
181 |             this.performanceManager.endTiming(timing);
182 |             return this.createResult('execution_error', `Memory trigger failed: ${error.message}`, analysisResult.confidence);
183 |         }
184 |     }
185 | 
186 |     /**
187 |      * Make intelligent trigger decision based on all analyses
188 |      */
189 |     makeTriggerDecision(conversationAnalysis, patternResults, context) {
190 |         let confidence = 0;
191 |         const reasons = [];
192 | 
193 |         // Weight pattern detection heavily for explicit requests
194 |         if (patternResults.triggerRecommendation) {
195 |             confidence += patternResults.confidence * this.TRIGGER_WEIGHTS.PATTERN_CONFIDENCE;
196 |             reasons.push(`Pattern detection: ${patternResults.confidence.toFixed(2)} confidence`);
197 |         }
198 | 
199 |         // Add conversation context weighting
200 |         if (conversationAnalysis.triggerProbability > this.THRESHOLD_VALUES.CONVERSATION_PROBABILITY_MIN) {
201 |             confidence += conversationAnalysis.triggerProbability * this.TRIGGER_WEIGHTS.CONVERSATION_CONTEXT;
202 |             reasons.push(`Conversation analysis: ${conversationAnalysis.triggerProbability.toFixed(2)} probability`);
203 |         }
204 | 
205 |         // Boost for semantic shift (topic change)
206 |         if (conversationAnalysis.semanticShift > this.THRESHOLD_VALUES.SEMANTIC_SHIFT_MIN) {
207 |             confidence += this.TRIGGER_WEIGHTS.SEMANTIC_SHIFT_BOOST;
208 |             reasons.push(`Semantic shift detected: ${conversationAnalysis.semanticShift.toFixed(2)}`);
209 |         }
210 | 
211 |         // Context-specific adjustments
212 |         if (context.isQuestionPattern) {
213 |             confidence += this.TRIGGER_WEIGHTS.QUESTION_PATTERN_BOOST;
214 |             reasons.push('Question pattern detected');
215 |         }
216 | 
217 |         if (context.mentionsPastWork) {
218 |             confidence += this.TRIGGER_WEIGHTS.PAST_WORK_BOOST;
219 |             reasons.push('References past work');
220 |         }
221 | 
222 |         // Apply performance profile considerations
223 |         const profile = this.performanceManager.performanceBudget;
224 |         if (profile.maxLatency < 200 && confidence < this.THRESHOLD_VALUES.SPEED_MODE_CONFIDENCE_MIN) {
225 |             // In speed-focused mode, require higher confidence
226 |             confidence *= this.THRESHOLD_VALUES.SPEED_MODE_REDUCTION;
227 |             reasons.push('Speed mode: increased confidence threshold');
228 |         }
229 | 
230 |         // Final decision threshold
231 |         const threshold = this.config.naturalTriggers?.triggerThreshold || 0.6;
232 |         const shouldTrigger = confidence >= threshold;
233 | 
234 |         return {
235 |             shouldTrigger,
236 |             confidence: Math.min(confidence, 1.0),
237 |             reasoning: reasons.join('; '),
238 |             threshold,
239 |             details: {
240 |                 conversationWeight: conversationAnalysis.triggerProbability * 0.4,
241 |                 patternWeight: patternResults.confidence * 0.6,
242 |                 contextAdjustments: confidence - (conversationAnalysis.triggerProbability * 0.4 + patternResults.confidence * 0.6)
243 |             }
244 |         };
245 |     }
246 | 
247 |     /**
248 |      * Build optimized memory query based on analysis
249 |      */
250 |     buildMemoryQuery(analysisResult, context) {
251 |         const query = {
252 |             semanticQuery: '',
253 |             tags: [],
254 |             limit: this.config.maxMemoriesPerTrigger || 5,
255 |             timeFilter: 'last-month'
256 |         };
257 | 
258 |         // Extract key topics from conversation analysis
259 |         if (analysisResult.conversationAnalysis.topics.length > 0) {
260 |             query.semanticQuery += analysisResult.conversationAnalysis.topics.join(' ');
261 |         }
262 | 
263 |         // Add project context
264 |         if (context.projectContext) {
265 |             query.semanticQuery += ` ${context.projectContext.name}`;
266 |             query.tags.push(context.projectContext.name);
267 | 
268 |             if (context.projectContext.language) {
269 |                 query.tags.push(`language:${context.projectContext.language}`);
270 |             }
271 |         }
272 | 
273 |         // Add pattern-based context
274 |         for (const match of analysisResult.patternResults.matches) {
275 |             if (match.category === 'explicitMemoryRequests') {
276 |                 query.timeFilter = 'last-week'; // Recent memories for explicit requests
277 |             } else if (match.category === 'technicalDiscussions') {
278 |                 query.tags.push('architecture', 'decisions');
279 |             }
280 |         }
281 | 
282 |         // Ensure we have a meaningful query
283 |         if (!query.semanticQuery.trim()) {
284 |             query.semanticQuery = 'project context decisions';
285 |         }
286 | 
287 |         return query;
288 |     }
289 | 
290 |     /**
291 |      * Query memories using unified memory client
292 |      */
293 |     async queryMemories(query) {
294 |         try {
295 |             let memories = [];
296 | 
297 |             if (query.timeFilter) {
298 |                 const timeQuery = `${query.semanticQuery} ${query.timeFilter}`;
299 |                 memories = await this.memoryClient.queryMemoriesByTime(timeQuery, query.limit);
300 |             } else {
301 |                 memories = await this.memoryClient.queryMemories(query.semanticQuery, query.limit);
302 |             }
303 | 
304 |             return memories || [];
305 |         } catch (error) {
306 |             console.warn('[Mid-Conversation Hook] Memory query failed:', error.message);
307 |             return [];
308 |         }
309 |     }
310 | 
311 |     /**
312 |      * Handle user feedback on trigger quality
313 |      */
314 |     recordUserFeedback(analysisResult, wasHelpful, context = {}) {
315 |         // Update analytics
316 |         this.updateAcceptanceRate(wasHelpful);
317 | 
318 |         // Pass feedback to components for learning
319 |         this.patternDetector.recordUserFeedback(wasHelpful, analysisResult.patternResults, context);
320 |         this.performanceManager.recordUserFeedback(wasHelpful, {
321 |             latency: analysisResult.performance?.latency || 0
322 |         });
323 | 
324 |         // Log feedback for analysis
325 |         console.log(`[Mid-Conversation Hook] User feedback: ${wasHelpful ? 'helpful' : 'not helpful'} (confidence: ${analysisResult.confidence?.toFixed(2)})`);
326 |     }
327 | 
328 |     /**
329 |      * Update performance profile
330 |      */
331 |     updatePerformanceProfile(profileName) {
332 |         this.performanceManager.switchProfile(profileName);
333 |         this.conversationMonitor.updatePerformanceProfile(profileName);
334 | 
335 |         console.log(`[Mid-Conversation Hook] Switched to performance profile: ${profileName}`);
336 |     }
337 | 
338 |     /**
339 |      * Get hook status and analytics
340 |      */
341 |     getStatus() {
342 |         return {
343 |             enabled: this.isEnabled,
344 |             lastTriggerTime: this.lastTriggerTime,
345 |             cooldownRemaining: Math.max(0, this.cooldownPeriod - (Date.now() - this.lastTriggerTime)),
346 |             analytics: this.analytics,
347 |             performance: this.performanceManager.getPerformanceReport(),
348 |             conversationMonitor: this.conversationMonitor.getPerformanceStatus(),
349 |             patternDetector: this.patternDetector.getStatistics()
350 |         };
351 |     }
352 | 
353 |     /**
354 |      * Enable or disable the hook
355 |      */
356 |     setEnabled(enabled) {
357 |         this.isEnabled = enabled;
358 |         console.log(`[Mid-Conversation Hook] ${enabled ? 'Enabled' : 'Disabled'}`);
359 |     }
360 | 
361 |     /**
362 |      * Helper methods
363 |      */
364 | 
365 |     createResult(type, message, confidence) {
366 |         return {
367 |             shouldTrigger: false,
368 |             confidence,
369 |             reasoning: message,
370 |             type,
371 |             timestamp: Date.now()
372 |         };
373 |     }
374 | 
375 |     updateAverageLatency(newLatency) {
376 |         const alpha = 0.1; // Exponential moving average factor
377 |         return this.analytics.averageLatency * (1 - alpha) + newLatency * alpha;
378 |     }
379 | 
380 |     updateAcceptanceRate(wasPositive) {
381 |         // Increment feedback counter
382 |         this.analytics.totalFeedback++;
383 | 
384 |         const totalFeedback = this.analytics.totalFeedback;
385 |         if (totalFeedback === 1) {
386 |             // First feedback sets the initial rate
387 |             this.analytics.userAcceptanceRate = wasPositive ? 1.0 : 0.0;
388 |         } else {
389 |             // Update running average
390 |             const currentRate = this.analytics.userAcceptanceRate;
391 |             this.analytics.userAcceptanceRate = (currentRate * (totalFeedback - 1) + (wasPositive ? 1 : 0)) / totalFeedback;
392 |         }
393 |     }
394 | 
395 |     /**
396 |      * Cleanup resources
397 |      */
398 |     async cleanup() {
399 |         if (this.memoryClient) {
400 |             try {
401 |                 await this.memoryClient.disconnect();
402 |             } catch (error) {
403 |                 // Ignore cleanup errors
404 |             }
405 |             this.memoryClient = null;
406 |         }
407 |     }
408 | }
409 | 
410 | /**
411 |  * Global hook instance for state management
412 |  */
413 | let globalHookInstance = null;
414 | 
415 | /**
416 |  * Get or create the hook instance (singleton pattern)
417 |  */
418 | function getHookInstance(config) {
419 |     if (!globalHookInstance) {
420 |         globalHookInstance = new MidConversationHook(config || {});
421 |         console.log('[Mid-Conversation Hook] Created new hook instance');
422 |     }
423 |     return globalHookInstance;
424 | }
425 | 
426 | /**
427 |  * Reset hook instance (for testing or config changes)
428 |  */
429 | function resetHookInstance() {
430 |     if (globalHookInstance) {
431 |         globalHookInstance.cleanup().catch((error) => {
432 |             // Log cleanup errors during reset but don't throw
433 |             console.debug('[Mid-Conversation Hook] Cleanup error during reset:', error.message);
434 |         });
435 |         globalHookInstance = null;
436 |         console.log('[Mid-Conversation Hook] Reset hook instance');
437 |     }
438 | }
439 | 
440 | /**
441 |  * Hook function for Claude Code integration
442 |  */
443 | async function onMidConversation(context) {
444 |     // This would be called by Claude Code during conversation flow
445 |     // Implementation depends on how Claude Code exposes mid-conversation hooks
446 | 
447 |     const hook = getHookInstance(context.config);
448 | 
449 |     try {
450 |         // Analyze the current message
451 |         const analysis = await hook.analyzeMessage(context.userMessage, context);
452 | 
453 |         if (analysis && analysis.shouldTrigger) {
454 |             // Execute memory trigger
455 |             const result = await hook.executeMemoryTrigger(analysis, context);
456 | 
457 |             if (result && result.success && context.injectSystemMessage) {
458 |                 await context.injectSystemMessage(result.contextMessage);
459 |                 console.log(`[Mid-Conversation Hook] Injected ${result.memoriesUsed} memories (confidence: ${result.confidence.toFixed(2)})`);
460 |             }
461 |         }
462 | 
463 |     } catch (error) {
464 |         console.error('[Mid-Conversation Hook] Hook execution failed:', error.message);
465 |         // Don't cleanup on error - preserve state for next call
466 |     }
467 | }
468 | 
469 | module.exports = {
470 |     MidConversationHook,
471 |     onMidConversation,
472 |     getHookInstance,
473 |     resetHookInstance,
474 |     name: 'mid-conversation-memory',
475 |     version: '1.0.0',
476 |     description: 'Intelligent mid-conversation memory awareness with performance optimization',
477 |     trigger: 'mid-conversation',
478 |     handler: onMidConversation,
479 |     config: {
480 |         async: true,
481 |         timeout: 10000,
482 |         priority: 'high'
483 |     }
484 | };
```
Page 27/47FirstPrevNextLast