#
tokens: 48049/50000 10/625 files (page 23/47)
lines: on (toggle) GitHub
raw markdown copy reset
This is page 23 of 47. Use http://codebase.md/doobidoo/mcp-memory-service?lines=true&page={x} to view the full context.

# Directory Structure

```
├── .claude
│   ├── agents
│   │   ├── amp-bridge.md
│   │   ├── amp-pr-automator.md
│   │   ├── code-quality-guard.md
│   │   ├── gemini-pr-automator.md
│   │   └── github-release-manager.md
│   ├── settings.local.json.backup
│   └── settings.local.json.local
├── .commit-message
├── .dockerignore
├── .env.example
├── .env.sqlite.backup
├── .envnn#
├── .gitattributes
├── .github
│   ├── FUNDING.yml
│   ├── ISSUE_TEMPLATE
│   │   ├── bug_report.yml
│   │   ├── config.yml
│   │   ├── feature_request.yml
│   │   └── performance_issue.yml
│   ├── pull_request_template.md
│   └── workflows
│       ├── bridge-tests.yml
│       ├── CACHE_FIX.md
│       ├── claude-code-review.yml
│       ├── claude.yml
│       ├── cleanup-images.yml.disabled
│       ├── dev-setup-validation.yml
│       ├── docker-publish.yml
│       ├── LATEST_FIXES.md
│       ├── main-optimized.yml.disabled
│       ├── main.yml
│       ├── publish-and-test.yml
│       ├── README_OPTIMIZATION.md
│       ├── release-tag.yml.disabled
│       ├── release.yml
│       ├── roadmap-review-reminder.yml
│       ├── SECRET_CONDITIONAL_FIX.md
│       └── WORKFLOW_FIXES.md
├── .gitignore
├── .mcp.json.backup
├── .mcp.json.template
├── .pyscn
│   ├── .gitignore
│   └── reports
│       └── analyze_20251123_214224.html
├── AGENTS.md
├── archive
│   ├── deployment
│   │   ├── deploy_fastmcp_fixed.sh
│   │   ├── deploy_http_with_mcp.sh
│   │   └── deploy_mcp_v4.sh
│   ├── deployment-configs
│   │   ├── empty_config.yml
│   │   └── smithery.yaml
│   ├── development
│   │   └── test_fastmcp.py
│   ├── docs-removed-2025-08-23
│   │   ├── authentication.md
│   │   ├── claude_integration.md
│   │   ├── claude-code-compatibility.md
│   │   ├── claude-code-integration.md
│   │   ├── claude-code-quickstart.md
│   │   ├── claude-desktop-setup.md
│   │   ├── complete-setup-guide.md
│   │   ├── database-synchronization.md
│   │   ├── development
│   │   │   ├── autonomous-memory-consolidation.md
│   │   │   ├── CLEANUP_PLAN.md
│   │   │   ├── CLEANUP_README.md
│   │   │   ├── CLEANUP_SUMMARY.md
│   │   │   ├── dream-inspired-memory-consolidation.md
│   │   │   ├── hybrid-slm-memory-consolidation.md
│   │   │   ├── mcp-milestone.md
│   │   │   ├── multi-client-architecture.md
│   │   │   ├── test-results.md
│   │   │   └── TIMESTAMP_FIX_SUMMARY.md
│   │   ├── distributed-sync.md
│   │   ├── invocation_guide.md
│   │   ├── macos-intel.md
│   │   ├── master-guide.md
│   │   ├── mcp-client-configuration.md
│   │   ├── multi-client-server.md
│   │   ├── service-installation.md
│   │   ├── sessions
│   │   │   └── MCP_ENHANCEMENT_SESSION_MEMORY_v4.1.0.md
│   │   ├── UBUNTU_SETUP.md
│   │   ├── ubuntu.md
│   │   ├── windows-setup.md
│   │   └── windows.md
│   ├── docs-root-cleanup-2025-08-23
│   │   ├── AWESOME_LIST_SUBMISSION.md
│   │   ├── CLOUDFLARE_IMPLEMENTATION.md
│   │   ├── DOCUMENTATION_ANALYSIS.md
│   │   ├── DOCUMENTATION_CLEANUP_PLAN.md
│   │   ├── DOCUMENTATION_CONSOLIDATION_COMPLETE.md
│   │   ├── LITESTREAM_SETUP_GUIDE.md
│   │   ├── lm_studio_system_prompt.md
│   │   ├── PYTORCH_DOWNLOAD_FIX.md
│   │   └── README-ORIGINAL-BACKUP.md
│   ├── investigations
│   │   └── MACOS_HOOKS_INVESTIGATION.md
│   ├── litestream-configs-v6.3.0
│   │   ├── install_service.sh
│   │   ├── litestream_master_config_fixed.yml
│   │   ├── litestream_master_config.yml
│   │   ├── litestream_replica_config_fixed.yml
│   │   ├── litestream_replica_config.yml
│   │   ├── litestream_replica_simple.yml
│   │   ├── litestream-http.service
│   │   ├── litestream.service
│   │   └── requirements-cloudflare.txt
│   ├── release-notes
│   │   └── release-notes-v7.1.4.md
│   └── setup-development
│       ├── README.md
│       ├── setup_consolidation_mdns.sh
│       ├── STARTUP_SETUP_GUIDE.md
│       └── test_service.sh
├── CHANGELOG-HISTORIC.md
├── CHANGELOG.md
├── claude_commands
│   ├── memory-context.md
│   ├── memory-health.md
│   ├── memory-ingest-dir.md
│   ├── memory-ingest.md
│   ├── memory-recall.md
│   ├── memory-search.md
│   ├── memory-store.md
│   ├── README.md
│   └── session-start.md
├── claude-hooks
│   ├── config.json
│   ├── config.template.json
│   ├── CONFIGURATION.md
│   ├── core
│   │   ├── memory-retrieval.js
│   │   ├── mid-conversation.js
│   │   ├── session-end.js
│   │   ├── session-start.js
│   │   └── topic-change.js
│   ├── debug-pattern-test.js
│   ├── install_claude_hooks_windows.ps1
│   ├── install_hooks.py
│   ├── memory-mode-controller.js
│   ├── MIGRATION.md
│   ├── README-NATURAL-TRIGGERS.md
│   ├── README-phase2.md
│   ├── README.md
│   ├── simple-test.js
│   ├── statusline.sh
│   ├── test-adaptive-weights.js
│   ├── test-dual-protocol-hook.js
│   ├── test-mcp-hook.js
│   ├── test-natural-triggers.js
│   ├── test-recency-scoring.js
│   ├── tests
│   │   ├── integration-test.js
│   │   ├── phase2-integration-test.js
│   │   ├── test-code-execution.js
│   │   ├── test-cross-session.json
│   │   ├── test-session-tracking.json
│   │   └── test-threading.json
│   ├── utilities
│   │   ├── adaptive-pattern-detector.js
│   │   ├── context-formatter.js
│   │   ├── context-shift-detector.js
│   │   ├── conversation-analyzer.js
│   │   ├── dynamic-context-updater.js
│   │   ├── git-analyzer.js
│   │   ├── mcp-client.js
│   │   ├── memory-client.js
│   │   ├── memory-scorer.js
│   │   ├── performance-manager.js
│   │   ├── project-detector.js
│   │   ├── session-tracker.js
│   │   ├── tiered-conversation-monitor.js
│   │   └── version-checker.js
│   └── WINDOWS-SESSIONSTART-BUG.md
├── CLAUDE.md
├── CODE_OF_CONDUCT.md
├── CONTRIBUTING.md
├── Development-Sprint-November-2025.md
├── docs
│   ├── amp-cli-bridge.md
│   ├── api
│   │   ├── code-execution-interface.md
│   │   ├── memory-metadata-api.md
│   │   ├── PHASE1_IMPLEMENTATION_SUMMARY.md
│   │   ├── PHASE2_IMPLEMENTATION_SUMMARY.md
│   │   ├── PHASE2_REPORT.md
│   │   └── tag-standardization.md
│   ├── architecture
│   │   ├── search-enhancement-spec.md
│   │   └── search-examples.md
│   ├── architecture.md
│   ├── archive
│   │   └── obsolete-workflows
│   │       ├── load_memory_context.md
│   │       └── README.md
│   ├── assets
│   │   └── images
│   │       ├── dashboard-v3.3.0-preview.png
│   │       ├── memory-awareness-hooks-example.png
│   │       ├── project-infographic.svg
│   │       └── README.md
│   ├── CLAUDE_CODE_QUICK_REFERENCE.md
│   ├── cloudflare-setup.md
│   ├── deployment
│   │   ├── docker.md
│   │   ├── dual-service.md
│   │   ├── production-guide.md
│   │   └── systemd-service.md
│   ├── development
│   │   ├── ai-agent-instructions.md
│   │   ├── code-quality
│   │   │   ├── phase-2a-completion.md
│   │   │   ├── phase-2a-handle-get-prompt.md
│   │   │   ├── phase-2a-index.md
│   │   │   ├── phase-2a-install-package.md
│   │   │   └── phase-2b-session-summary.md
│   │   ├── code-quality-workflow.md
│   │   ├── dashboard-workflow.md
│   │   ├── issue-management.md
│   │   ├── pr-review-guide.md
│   │   ├── refactoring-notes.md
│   │   ├── release-checklist.md
│   │   └── todo-tracker.md
│   ├── docker-optimized-build.md
│   ├── document-ingestion.md
│   ├── DOCUMENTATION_AUDIT.md
│   ├── enhancement-roadmap-issue-14.md
│   ├── examples
│   │   ├── analysis-scripts.js
│   │   ├── maintenance-session-example.md
│   │   ├── memory-distribution-chart.jsx
│   │   └── tag-schema.json
│   ├── first-time-setup.md
│   ├── glama-deployment.md
│   ├── guides
│   │   ├── advanced-command-examples.md
│   │   ├── chromadb-migration.md
│   │   ├── commands-vs-mcp-server.md
│   │   ├── mcp-enhancements.md
│   │   ├── mdns-service-discovery.md
│   │   ├── memory-consolidation-guide.md
│   │   ├── migration.md
│   │   ├── scripts.md
│   │   └── STORAGE_BACKENDS.md
│   ├── HOOK_IMPROVEMENTS.md
│   ├── hooks
│   │   └── phase2-code-execution-migration.md
│   ├── http-server-management.md
│   ├── ide-compatability.md
│   ├── IMAGE_RETENTION_POLICY.md
│   ├── images
│   │   └── dashboard-placeholder.md
│   ├── implementation
│   │   ├── health_checks.md
│   │   └── performance.md
│   ├── IMPLEMENTATION_PLAN_HTTP_SSE.md
│   ├── integration
│   │   ├── homebrew.md
│   │   └── multi-client.md
│   ├── integrations
│   │   ├── gemini.md
│   │   ├── groq-bridge.md
│   │   ├── groq-integration-summary.md
│   │   └── groq-model-comparison.md
│   ├── integrations.md
│   ├── legacy
│   │   └── dual-protocol-hooks.md
│   ├── LM_STUDIO_COMPATIBILITY.md
│   ├── maintenance
│   │   └── memory-maintenance.md
│   ├── mastery
│   │   ├── api-reference.md
│   │   ├── architecture-overview.md
│   │   ├── configuration-guide.md
│   │   ├── local-setup-and-run.md
│   │   ├── testing-guide.md
│   │   └── troubleshooting.md
│   ├── migration
│   │   └── code-execution-api-quick-start.md
│   ├── natural-memory-triggers
│   │   ├── cli-reference.md
│   │   ├── installation-guide.md
│   │   └── performance-optimization.md
│   ├── oauth-setup.md
│   ├── pr-graphql-integration.md
│   ├── quick-setup-cloudflare-dual-environment.md
│   ├── README.md
│   ├── remote-configuration-wiki-section.md
│   ├── research
│   │   ├── code-execution-interface-implementation.md
│   │   └── code-execution-interface-summary.md
│   ├── ROADMAP.md
│   ├── sqlite-vec-backend.md
│   ├── statistics
│   │   ├── charts
│   │   │   ├── activity_patterns.png
│   │   │   ├── contributors.png
│   │   │   ├── growth_trajectory.png
│   │   │   ├── monthly_activity.png
│   │   │   └── october_sprint.png
│   │   ├── data
│   │   │   ├── activity_by_day.csv
│   │   │   ├── activity_by_hour.csv
│   │   │   ├── contributors.csv
│   │   │   └── monthly_activity.csv
│   │   ├── generate_charts.py
│   │   └── REPOSITORY_STATISTICS.md
│   ├── technical
│   │   ├── development.md
│   │   ├── memory-migration.md
│   │   ├── migration-log.md
│   │   ├── sqlite-vec-embedding-fixes.md
│   │   └── tag-storage.md
│   ├── testing
│   │   └── regression-tests.md
│   ├── testing-cloudflare-backend.md
│   ├── troubleshooting
│   │   ├── cloudflare-api-token-setup.md
│   │   ├── cloudflare-authentication.md
│   │   ├── general.md
│   │   ├── hooks-quick-reference.md
│   │   ├── pr162-schema-caching-issue.md
│   │   ├── session-end-hooks.md
│   │   └── sync-issues.md
│   └── tutorials
│       ├── advanced-techniques.md
│       ├── data-analysis.md
│       └── demo-session-walkthrough.md
├── examples
│   ├── claude_desktop_config_template.json
│   ├── claude_desktop_config_windows.json
│   ├── claude-desktop-http-config.json
│   ├── config
│   │   └── claude_desktop_config.json
│   ├── http-mcp-bridge.js
│   ├── memory_export_template.json
│   ├── README.md
│   ├── setup
│   │   └── setup_multi_client_complete.py
│   └── start_https_example.sh
├── install_service.py
├── install.py
├── LICENSE
├── NOTICE
├── pyproject.toml
├── pytest.ini
├── README.md
├── run_server.py
├── scripts
│   ├── .claude
│   │   └── settings.local.json
│   ├── archive
│   │   └── check_missing_timestamps.py
│   ├── backup
│   │   ├── backup_memories.py
│   │   ├── backup_sqlite_vec.sh
│   │   ├── export_distributable_memories.sh
│   │   └── restore_memories.py
│   ├── benchmarks
│   │   ├── benchmark_code_execution_api.py
│   │   ├── benchmark_hybrid_sync.py
│   │   └── benchmark_server_caching.py
│   ├── database
│   │   ├── analyze_sqlite_vec_db.py
│   │   ├── check_sqlite_vec_status.py
│   │   ├── db_health_check.py
│   │   └── simple_timestamp_check.py
│   ├── development
│   │   ├── debug_server_initialization.py
│   │   ├── find_orphaned_files.py
│   │   ├── fix_mdns.sh
│   │   ├── fix_sitecustomize.py
│   │   ├── remote_ingest.sh
│   │   ├── setup-git-merge-drivers.sh
│   │   ├── uv-lock-merge.sh
│   │   └── verify_hybrid_sync.py
│   ├── hooks
│   │   └── pre-commit
│   ├── installation
│   │   ├── install_linux_service.py
│   │   ├── install_macos_service.py
│   │   ├── install_uv.py
│   │   ├── install_windows_service.py
│   │   ├── install.py
│   │   ├── setup_backup_cron.sh
│   │   ├── setup_claude_mcp.sh
│   │   └── setup_cloudflare_resources.py
│   ├── linux
│   │   ├── service_status.sh
│   │   ├── start_service.sh
│   │   ├── stop_service.sh
│   │   ├── uninstall_service.sh
│   │   └── view_logs.sh
│   ├── maintenance
│   │   ├── assign_memory_types.py
│   │   ├── check_memory_types.py
│   │   ├── cleanup_corrupted_encoding.py
│   │   ├── cleanup_memories.py
│   │   ├── cleanup_organize.py
│   │   ├── consolidate_memory_types.py
│   │   ├── consolidation_mappings.json
│   │   ├── delete_orphaned_vectors_fixed.py
│   │   ├── fast_cleanup_duplicates_with_tracking.sh
│   │   ├── find_all_duplicates.py
│   │   ├── find_cloudflare_duplicates.py
│   │   ├── find_duplicates.py
│   │   ├── memory-types.md
│   │   ├── README.md
│   │   ├── recover_timestamps_from_cloudflare.py
│   │   ├── regenerate_embeddings.py
│   │   ├── repair_malformed_tags.py
│   │   ├── repair_memories.py
│   │   ├── repair_sqlite_vec_embeddings.py
│   │   ├── repair_zero_embeddings.py
│   │   ├── restore_from_json_export.py
│   │   └── scan_todos.sh
│   ├── migration
│   │   ├── cleanup_mcp_timestamps.py
│   │   ├── legacy
│   │   │   └── migrate_chroma_to_sqlite.py
│   │   ├── mcp-migration.py
│   │   ├── migrate_sqlite_vec_embeddings.py
│   │   ├── migrate_storage.py
│   │   ├── migrate_tags.py
│   │   ├── migrate_timestamps.py
│   │   ├── migrate_to_cloudflare.py
│   │   ├── migrate_to_sqlite_vec.py
│   │   ├── migrate_v5_enhanced.py
│   │   ├── TIMESTAMP_CLEANUP_README.md
│   │   └── verify_mcp_timestamps.py
│   ├── pr
│   │   ├── amp_collect_results.sh
│   │   ├── amp_detect_breaking_changes.sh
│   │   ├── amp_generate_tests.sh
│   │   ├── amp_pr_review.sh
│   │   ├── amp_quality_gate.sh
│   │   ├── amp_suggest_fixes.sh
│   │   ├── auto_review.sh
│   │   ├── detect_breaking_changes.sh
│   │   ├── generate_tests.sh
│   │   ├── lib
│   │   │   └── graphql_helpers.sh
│   │   ├── quality_gate.sh
│   │   ├── resolve_threads.sh
│   │   ├── run_pyscn_analysis.sh
│   │   ├── run_quality_checks.sh
│   │   ├── thread_status.sh
│   │   └── watch_reviews.sh
│   ├── quality
│   │   ├── fix_dead_code_install.sh
│   │   ├── phase1_dead_code_analysis.md
│   │   ├── phase2_complexity_analysis.md
│   │   ├── README_PHASE1.md
│   │   ├── README_PHASE2.md
│   │   ├── track_pyscn_metrics.sh
│   │   └── weekly_quality_review.sh
│   ├── README.md
│   ├── run
│   │   ├── run_mcp_memory.sh
│   │   ├── run-with-uv.sh
│   │   └── start_sqlite_vec.sh
│   ├── run_memory_server.py
│   ├── server
│   │   ├── check_http_server.py
│   │   ├── check_server_health.py
│   │   ├── memory_offline.py
│   │   ├── preload_models.py
│   │   ├── run_http_server.py
│   │   ├── run_memory_server.py
│   │   ├── start_http_server.bat
│   │   └── start_http_server.sh
│   ├── service
│   │   ├── deploy_dual_services.sh
│   │   ├── install_http_service.sh
│   │   ├── mcp-memory-http.service
│   │   ├── mcp-memory.service
│   │   ├── memory_service_manager.sh
│   │   ├── service_control.sh
│   │   ├── service_utils.py
│   │   └── update_service.sh
│   ├── sync
│   │   ├── check_drift.py
│   │   ├── claude_sync_commands.py
│   │   ├── export_memories.py
│   │   ├── import_memories.py
│   │   ├── litestream
│   │   │   ├── apply_local_changes.sh
│   │   │   ├── enhanced_memory_store.sh
│   │   │   ├── init_staging_db.sh
│   │   │   ├── io.litestream.replication.plist
│   │   │   ├── manual_sync.sh
│   │   │   ├── memory_sync.sh
│   │   │   ├── pull_remote_changes.sh
│   │   │   ├── push_to_remote.sh
│   │   │   ├── README.md
│   │   │   ├── resolve_conflicts.sh
│   │   │   ├── setup_local_litestream.sh
│   │   │   ├── setup_remote_litestream.sh
│   │   │   ├── staging_db_init.sql
│   │   │   ├── stash_local_changes.sh
│   │   │   ├── sync_from_remote_noconfig.sh
│   │   │   └── sync_from_remote.sh
│   │   ├── README.md
│   │   ├── safe_cloudflare_update.sh
│   │   ├── sync_memory_backends.py
│   │   └── sync_now.py
│   ├── testing
│   │   ├── run_complete_test.py
│   │   ├── run_memory_test.sh
│   │   ├── simple_test.py
│   │   ├── test_cleanup_logic.py
│   │   ├── test_cloudflare_backend.py
│   │   ├── test_docker_functionality.py
│   │   ├── test_installation.py
│   │   ├── test_mdns.py
│   │   ├── test_memory_api.py
│   │   ├── test_memory_simple.py
│   │   ├── test_migration.py
│   │   ├── test_search_api.py
│   │   ├── test_sqlite_vec_embeddings.py
│   │   ├── test_sse_events.py
│   │   ├── test-connection.py
│   │   └── test-hook.js
│   ├── utils
│   │   ├── claude_commands_utils.py
│   │   ├── generate_personalized_claude_md.sh
│   │   ├── groq
│   │   ├── groq_agent_bridge.py
│   │   ├── list-collections.py
│   │   ├── memory_wrapper_uv.py
│   │   ├── query_memories.py
│   │   ├── smithery_wrapper.py
│   │   ├── test_groq_bridge.sh
│   │   └── uv_wrapper.py
│   └── validation
│       ├── check_dev_setup.py
│       ├── check_documentation_links.py
│       ├── diagnose_backend_config.py
│       ├── validate_configuration_complete.py
│       ├── validate_memories.py
│       ├── validate_migration.py
│       ├── validate_timestamp_integrity.py
│       ├── verify_environment.py
│       ├── verify_pytorch_windows.py
│       └── verify_torch.py
├── SECURITY.md
├── selective_timestamp_recovery.py
├── SPONSORS.md
├── src
│   └── mcp_memory_service
│       ├── __init__.py
│       ├── api
│       │   ├── __init__.py
│       │   ├── client.py
│       │   ├── operations.py
│       │   ├── sync_wrapper.py
│       │   └── types.py
│       ├── backup
│       │   ├── __init__.py
│       │   └── scheduler.py
│       ├── cli
│       │   ├── __init__.py
│       │   ├── ingestion.py
│       │   ├── main.py
│       │   └── utils.py
│       ├── config.py
│       ├── consolidation
│       │   ├── __init__.py
│       │   ├── associations.py
│       │   ├── base.py
│       │   ├── clustering.py
│       │   ├── compression.py
│       │   ├── consolidator.py
│       │   ├── decay.py
│       │   ├── forgetting.py
│       │   ├── health.py
│       │   └── scheduler.py
│       ├── dependency_check.py
│       ├── discovery
│       │   ├── __init__.py
│       │   ├── client.py
│       │   └── mdns_service.py
│       ├── embeddings
│       │   ├── __init__.py
│       │   └── onnx_embeddings.py
│       ├── ingestion
│       │   ├── __init__.py
│       │   ├── base.py
│       │   ├── chunker.py
│       │   ├── csv_loader.py
│       │   ├── json_loader.py
│       │   ├── pdf_loader.py
│       │   ├── registry.py
│       │   ├── semtools_loader.py
│       │   └── text_loader.py
│       ├── lm_studio_compat.py
│       ├── mcp_server.py
│       ├── models
│       │   ├── __init__.py
│       │   └── memory.py
│       ├── server.py
│       ├── services
│       │   ├── __init__.py
│       │   └── memory_service.py
│       ├── storage
│       │   ├── __init__.py
│       │   ├── base.py
│       │   ├── cloudflare.py
│       │   ├── factory.py
│       │   ├── http_client.py
│       │   ├── hybrid.py
│       │   └── sqlite_vec.py
│       ├── sync
│       │   ├── __init__.py
│       │   ├── exporter.py
│       │   ├── importer.py
│       │   └── litestream_config.py
│       ├── utils
│       │   ├── __init__.py
│       │   ├── cache_manager.py
│       │   ├── content_splitter.py
│       │   ├── db_utils.py
│       │   ├── debug.py
│       │   ├── document_processing.py
│       │   ├── gpu_detection.py
│       │   ├── hashing.py
│       │   ├── http_server_manager.py
│       │   ├── port_detection.py
│       │   ├── system_detection.py
│       │   └── time_parser.py
│       └── web
│           ├── __init__.py
│           ├── api
│           │   ├── __init__.py
│           │   ├── analytics.py
│           │   ├── backup.py
│           │   ├── consolidation.py
│           │   ├── documents.py
│           │   ├── events.py
│           │   ├── health.py
│           │   ├── manage.py
│           │   ├── mcp.py
│           │   ├── memories.py
│           │   ├── search.py
│           │   └── sync.py
│           ├── app.py
│           ├── dependencies.py
│           ├── oauth
│           │   ├── __init__.py
│           │   ├── authorization.py
│           │   ├── discovery.py
│           │   ├── middleware.py
│           │   ├── models.py
│           │   ├── registration.py
│           │   └── storage.py
│           ├── sse.py
│           └── static
│               ├── app.js
│               ├── index.html
│               ├── README.md
│               ├── sse_test.html
│               └── style.css
├── start_http_debug.bat
├── start_http_server.sh
├── test_document.txt
├── test_version_checker.js
├── tests
│   ├── __init__.py
│   ├── api
│   │   ├── __init__.py
│   │   ├── test_compact_types.py
│   │   └── test_operations.py
│   ├── bridge
│   │   ├── mock_responses.js
│   │   ├── package-lock.json
│   │   ├── package.json
│   │   └── test_http_mcp_bridge.js
│   ├── conftest.py
│   ├── consolidation
│   │   ├── __init__.py
│   │   ├── conftest.py
│   │   ├── test_associations.py
│   │   ├── test_clustering.py
│   │   ├── test_compression.py
│   │   ├── test_consolidator.py
│   │   ├── test_decay.py
│   │   └── test_forgetting.py
│   ├── contracts
│   │   └── api-specification.yml
│   ├── integration
│   │   ├── package-lock.json
│   │   ├── package.json
│   │   ├── test_api_key_fallback.py
│   │   ├── test_api_memories_chronological.py
│   │   ├── test_api_tag_time_search.py
│   │   ├── test_api_with_memory_service.py
│   │   ├── test_bridge_integration.js
│   │   ├── test_cli_interfaces.py
│   │   ├── test_cloudflare_connection.py
│   │   ├── test_concurrent_clients.py
│   │   ├── test_data_serialization_consistency.py
│   │   ├── test_http_server_startup.py
│   │   ├── test_mcp_memory.py
│   │   ├── test_mdns_integration.py
│   │   ├── test_oauth_basic_auth.py
│   │   ├── test_oauth_flow.py
│   │   ├── test_server_handlers.py
│   │   └── test_store_memory.py
│   ├── performance
│   │   ├── test_background_sync.py
│   │   └── test_hybrid_live.py
│   ├── README.md
│   ├── smithery
│   │   └── test_smithery.py
│   ├── sqlite
│   │   └── simple_sqlite_vec_test.py
│   ├── test_client.py
│   ├── test_content_splitting.py
│   ├── test_database.py
│   ├── test_hybrid_cloudflare_limits.py
│   ├── test_hybrid_storage.py
│   ├── test_memory_ops.py
│   ├── test_semantic_search.py
│   ├── test_sqlite_vec_storage.py
│   ├── test_time_parser.py
│   ├── test_timestamp_preservation.py
│   ├── timestamp
│   │   ├── test_hook_vs_manual_storage.py
│   │   ├── test_issue99_final_validation.py
│   │   ├── test_search_retrieval_inconsistency.py
│   │   ├── test_timestamp_issue.py
│   │   └── test_timestamp_simple.py
│   └── unit
│       ├── conftest.py
│       ├── test_cloudflare_storage.py
│       ├── test_csv_loader.py
│       ├── test_fastapi_dependencies.py
│       ├── test_import.py
│       ├── test_json_loader.py
│       ├── test_mdns_simple.py
│       ├── test_mdns.py
│       ├── test_memory_service.py
│       ├── test_memory.py
│       ├── test_semtools_loader.py
│       ├── test_storage_interface_compatibility.py
│       └── test_tag_time_filtering.py
├── tools
│   ├── docker
│   │   ├── DEPRECATED.md
│   │   ├── docker-compose.http.yml
│   │   ├── docker-compose.pythonpath.yml
│   │   ├── docker-compose.standalone.yml
│   │   ├── docker-compose.uv.yml
│   │   ├── docker-compose.yml
│   │   ├── docker-entrypoint-persistent.sh
│   │   ├── docker-entrypoint-unified.sh
│   │   ├── docker-entrypoint.sh
│   │   ├── Dockerfile
│   │   ├── Dockerfile.glama
│   │   ├── Dockerfile.slim
│   │   ├── README.md
│   │   └── test-docker-modes.sh
│   └── README.md
└── uv.lock
```

# Files

--------------------------------------------------------------------------------
/src/mcp_memory_service/consolidation/associations.py:
--------------------------------------------------------------------------------

```python
  1 | # Copyright 2024 Heinrich Krupp
  2 | #
  3 | # Licensed under the Apache License, Version 2.0 (the "License");
  4 | # you may not use this file except in compliance with the License.
  5 | # You may obtain a copy of the License at
  6 | #
  7 | #     http://www.apache.org/licenses/LICENSE-2.0
  8 | #
  9 | # Unless required by applicable law or agreed to in writing, software
 10 | # distributed under the License is distributed on an "AS IS" BASIS,
 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 12 | # See the License for the specific language governing permissions and
 13 | # limitations under the License.
 14 | 
 15 | """Creative association discovery engine for memory connections."""
 16 | 
 17 | import random
 18 | import numpy as np
 19 | from typing import List, Dict, Any, Optional, Tuple, Set
 20 | from itertools import combinations
 21 | from datetime import datetime
 22 | from dataclasses import dataclass
 23 | import re
 24 | 
 25 | from .base import ConsolidationBase, ConsolidationConfig, MemoryAssociation
 26 | from ..models.memory import Memory
 27 | 
 28 | @dataclass
 29 | class AssociationAnalysis:
 30 |     """Analysis results for a potential memory association."""
 31 |     memory1_hash: str
 32 |     memory2_hash: str
 33 |     similarity_score: float
 34 |     connection_reasons: List[str]
 35 |     shared_concepts: List[str]
 36 |     temporal_relationship: Optional[str]
 37 |     tag_overlap: List[str]
 38 |     confidence_score: float
 39 | 
 40 | class CreativeAssociationEngine(ConsolidationBase):
 41 |     """
 42 |     Discovers creative connections between seemingly unrelated memories.
 43 |     
 44 |     Similar to how dreams create unexpected associations, this engine randomly
 45 |     pairs memories to discover non-obvious connections in the "sweet spot"
 46 |     of moderate similarity (0.3-0.7 range).
 47 |     """
 48 |     
 49 |     def __init__(self, config: ConsolidationConfig):
 50 |         super().__init__(config)
 51 |         self.min_similarity = config.min_similarity
 52 |         self.max_similarity = config.max_similarity
 53 |         self.max_pairs_per_run = config.max_pairs_per_run
 54 |         
 55 |         # Compile regex patterns for concept extraction
 56 |         self._concept_patterns = {
 57 |             'urls': re.compile(r'https?://[^\s]+'),
 58 |             'emails': re.compile(r'\b[A-Za-z0-9._%+-]+@[A-Za-z0-9.-]+\.[A-Z|a-z]{2,}\b'),
 59 |             'dates': re.compile(r'\b\d{1,2}[/-]\d{1,2}[/-]\d{2,4}\b|\b\d{4}[/-]\d{1,2}[/-]\d{1,2}\b'),
 60 |             'numbers': re.compile(r'\b\d+\.?\d*\b'),
 61 |             'camelCase': re.compile(r'\b[a-z]+[A-Z][a-zA-Z]*\b'),
 62 |             'PascalCase': re.compile(r'\b[A-Z][a-z]*[A-Z][a-zA-Z]*\b'),
 63 |             'acronyms': re.compile(r'\b[A-Z]{2,}\b')
 64 |         }
 65 |     
 66 |     async def process(self, memories: List[Memory], **kwargs) -> List[MemoryAssociation]:
 67 |         """Discover creative associations between memories."""
 68 |         if not self._validate_memories(memories) or len(memories) < 2:
 69 |             return []
 70 |         
 71 |         # Get existing associations to avoid duplicates
 72 |         existing_associations = kwargs.get('existing_associations', set())
 73 |         
 74 |         # Sample memory pairs for analysis
 75 |         pairs = self._sample_memory_pairs(memories)
 76 |         
 77 |         associations = []
 78 |         for mem1, mem2 in pairs:
 79 |             # Skip if association already exists
 80 |             pair_key = tuple(sorted([mem1.content_hash, mem2.content_hash]))
 81 |             if pair_key in existing_associations:
 82 |                 continue
 83 |             
 84 |             # Calculate semantic similarity
 85 |             similarity = await self._calculate_semantic_similarity(mem1, mem2)
 86 |             
 87 |             # Check if similarity is in the "sweet spot" for creative connections
 88 |             if self.min_similarity <= similarity <= self.max_similarity:
 89 |                 analysis = await self._analyze_association(mem1, mem2, similarity)
 90 |                 
 91 |                 if analysis.confidence_score > 0.3:  # Minimum confidence threshold
 92 |                     association = await self._create_association_memory(analysis)
 93 |                     associations.append(association)
 94 |         
 95 |         self.logger.info(f"Discovered {len(associations)} creative associations from {len(pairs)} pairs")
 96 |         return associations
 97 |     
 98 |     def _sample_memory_pairs(self, memories: List[Memory]) -> List[Tuple[Memory, Memory]]:
 99 |         """Sample random pairs of memories for association discovery."""
100 |         # Calculate maximum possible pairs
101 |         total_possible = len(memories) * (len(memories) - 1) // 2
102 |         max_pairs = min(self.max_pairs_per_run, total_possible)
103 |         
104 |         if total_possible <= max_pairs:
105 |             # Return all possible pairs if total is manageable
106 |             return list(combinations(memories, 2))
107 |         else:
108 |             # Randomly sample pairs to prevent combinatorial explosion
109 |             all_pairs = list(combinations(memories, 2))
110 |             return random.sample(all_pairs, max_pairs)
111 |     
112 |     async def _calculate_semantic_similarity(self, mem1: Memory, mem2: Memory) -> float:
113 |         """Calculate semantic similarity between two memories using embeddings."""
114 |         if not mem1.embedding or not mem2.embedding:
115 |             # Fallback to text-based similarity if embeddings unavailable
116 |             return self._calculate_text_similarity(mem1.content, mem2.content)
117 |         
118 |         # Use cosine similarity for embeddings
119 |         embedding1 = np.array(mem1.embedding)
120 |         embedding2 = np.array(mem2.embedding)
121 |         
122 |         # Normalize embeddings
123 |         norm1 = np.linalg.norm(embedding1)
124 |         norm2 = np.linalg.norm(embedding2)
125 |         
126 |         if norm1 == 0 or norm2 == 0:
127 |             return 0.0
128 |         
129 |         # Calculate cosine similarity
130 |         similarity = np.dot(embedding1, embedding2) / (norm1 * norm2)
131 |         
132 |         # Convert to 0-1 range (cosine similarity can be -1 to 1)
133 |         return (similarity + 1) / 2
134 |     
135 |     def _calculate_text_similarity(self, text1: str, text2: str) -> float:
136 |         """Fallback text similarity using word overlap."""
137 |         words1 = set(text1.lower().split())
138 |         words2 = set(text2.lower().split())
139 |         
140 |         if not words1 or not words2:
141 |             return 0.0
142 |         
143 |         intersection = len(words1.intersection(words2))
144 |         union = len(words1.union(words2))
145 |         
146 |         return intersection / union if union > 0 else 0.0
147 |     
148 |     async def _analyze_association(
149 |         self, 
150 |         mem1: Memory, 
151 |         mem2: Memory, 
152 |         similarity: float
153 |     ) -> AssociationAnalysis:
154 |         """Analyze why two memories might be associated."""
155 |         connection_reasons = []
156 |         shared_concepts = []
157 |         tag_overlap = []
158 |         temporal_relationship = None
159 |         
160 |         # Analyze tag overlap
161 |         tags1 = set(mem1.tags)
162 |         tags2 = set(mem2.tags)
163 |         tag_overlap = list(tags1.intersection(tags2))
164 |         if tag_overlap:
165 |             connection_reasons.append("shared_tags")
166 |         
167 |         # Analyze temporal relationship
168 |         temporal_relationship = self._analyze_temporal_relationship(mem1, mem2)
169 |         if temporal_relationship:
170 |             connection_reasons.append("temporal_proximity")
171 |         
172 |         # Extract and compare concepts
173 |         concepts1 = self._extract_concepts(mem1.content)
174 |         concepts2 = self._extract_concepts(mem2.content)
175 |         shared_concepts = list(concepts1.intersection(concepts2))
176 |         if shared_concepts:
177 |             connection_reasons.append("shared_concepts")
178 |         
179 |         # Analyze content patterns
180 |         if self._has_similar_structure(mem1.content, mem2.content):
181 |             connection_reasons.append("similar_structure")
182 |         
183 |         if self._has_complementary_content(mem1.content, mem2.content):
184 |             connection_reasons.append("complementary_content")
185 |         
186 |         # Calculate confidence score based on multiple factors
187 |         confidence_score = self._calculate_confidence_score(
188 |             similarity, len(connection_reasons), len(shared_concepts), len(tag_overlap)
189 |         )
190 |         
191 |         return AssociationAnalysis(
192 |             memory1_hash=mem1.content_hash,
193 |             memory2_hash=mem2.content_hash,
194 |             similarity_score=similarity,
195 |             connection_reasons=connection_reasons,
196 |             shared_concepts=shared_concepts,
197 |             temporal_relationship=temporal_relationship,
198 |             tag_overlap=tag_overlap,
199 |             confidence_score=confidence_score
200 |         )
201 |     
202 |     def _extract_concepts(self, text: str) -> Set[str]:
203 |         """Extract key concepts from text using various patterns."""
204 |         concepts = set()
205 |         
206 |         # Extract different types of concepts
207 |         for concept_type, pattern in self._concept_patterns.items():
208 |             matches = pattern.findall(text)
209 |             concepts.update(matches)
210 |         
211 |         # Extract capitalized words (potential proper nouns)
212 |         capitalized_words = re.findall(r'\b[A-Z][a-z]+\b', text)
213 |         concepts.update(capitalized_words)
214 |         
215 |         # Extract quoted phrases
216 |         quoted_phrases = re.findall(r'"([^"]*)"', text)
217 |         concepts.update(quoted_phrases)
218 |         
219 |         # Extract common important words (filter out common stop words)
220 |         stop_words = {'the', 'and', 'or', 'but', 'in', 'on', 'at', 'to', 'for', 'of', 'with', 'by'}
221 |         words = re.findall(r'\b\w{4,}\b', text.lower())  # Words with 4+ characters
222 |         important_words = [word for word in words if word not in stop_words]
223 |         concepts.update(important_words[:10])  # Limit to top 10 words
224 |         
225 |         return concepts
226 |     
227 |     def _analyze_temporal_relationship(self, mem1: Memory, mem2: Memory) -> Optional[str]:
228 |         """Analyze temporal relationship between memories."""
229 |         if not (mem1.created_at and mem2.created_at):
230 |             return None
231 |         
232 |         time_diff = abs(mem1.created_at - mem2.created_at)
233 |         days_diff = time_diff / (24 * 3600)  # Convert to days
234 |         
235 |         if days_diff < 1:
236 |             return "same_day"
237 |         elif days_diff < 7:
238 |             return "same_week"
239 |         elif days_diff < 30:
240 |             return "same_month"
241 |         elif days_diff < 365:
242 |             return "same_year"
243 |         else:
244 |             return "different_years"
245 |     
246 |     def _has_similar_structure(self, text1: str, text2: str) -> bool:
247 |         """Check if texts have similar structural patterns."""
248 |         # Check for similar formatting patterns
249 |         patterns = [
250 |             r'\n\s*[-*+]\s+',  # List items
251 |             r'\n\s*\d+\.\s+',  # Numbered lists
252 |             r'\n#{1,6}\s+',    # Headers
253 |             r'```[\s\S]*?```', # Code blocks
254 |             r'\[.*?\]\(.*?\)', # Links
255 |         ]
256 |         
257 |         for pattern in patterns:
258 |             matches1 = len(re.findall(pattern, text1))
259 |             matches2 = len(re.findall(pattern, text2))
260 |             
261 |             if matches1 > 0 and matches2 > 0:
262 |                 return True
263 |         
264 |         return False
265 |     
266 |     def _has_complementary_content(self, text1: str, text2: str) -> bool:
267 |         """Check if texts contain complementary information."""
268 |         # Look for question-answer patterns
269 |         has_question1 = bool(re.search(r'\?', text1))
270 |         has_question2 = bool(re.search(r'\?', text2))
271 |         
272 |         # If one has questions and the other doesn't, they might be complementary
273 |         if has_question1 != has_question2:
274 |             return True
275 |         
276 |         # Look for problem-solution patterns
277 |         problem_words = ['problem', 'issue', 'error', 'bug', 'fail', 'wrong']
278 |         solution_words = ['solution', 'fix', 'resolve', 'answer', 'correct', 'solve']
279 |         
280 |         has_problem1 = any(word in text1.lower() for word in problem_words)
281 |         has_solution1 = any(word in text1.lower() for word in solution_words)
282 |         has_problem2 = any(word in text2.lower() for word in problem_words)
283 |         has_solution2 = any(word in text2.lower() for word in solution_words)
284 |         
285 |         # Complementary if one focuses on problems, other on solutions
286 |         if (has_problem1 and has_solution2) or (has_solution1 and has_problem2):
287 |             return True
288 |         
289 |         return False
290 |     
291 |     def _calculate_confidence_score(
292 |         self,
293 |         similarity: float,
294 |         num_reasons: int,
295 |         num_shared_concepts: int,
296 |         num_shared_tags: int
297 |     ) -> float:
298 |         """Calculate confidence score for the association."""
299 |         base_score = similarity
300 |         
301 |         # Boost for multiple connection reasons
302 |         reason_boost = min(0.3, num_reasons * 0.1)
303 |         
304 |         # Boost for shared concepts
305 |         concept_boost = min(0.2, num_shared_concepts * 0.05)
306 |         
307 |         # Boost for shared tags
308 |         tag_boost = min(0.2, num_shared_tags * 0.1)
309 |         
310 |         total_score = base_score + reason_boost + concept_boost + tag_boost
311 |         
312 |         return min(1.0, total_score)
313 |     
314 |     async def _create_association_memory(self, analysis: AssociationAnalysis) -> MemoryAssociation:
315 |         """Create a memory association from analysis results."""
316 |         return MemoryAssociation(
317 |             source_memory_hashes=[analysis.memory1_hash, analysis.memory2_hash],
318 |             similarity_score=analysis.similarity_score,
319 |             connection_type=', '.join(analysis.connection_reasons),
320 |             discovery_method="creative_association",
321 |             discovery_date=datetime.now(),
322 |             metadata={
323 |                 "shared_concepts": analysis.shared_concepts,
324 |                 "temporal_relationship": analysis.temporal_relationship,
325 |                 "tag_overlap": analysis.tag_overlap,
326 |                 "confidence_score": analysis.confidence_score,
327 |                 "analysis_version": "1.0"
328 |             }
329 |         )
330 |     
331 |     async def filter_high_confidence_associations(
332 |         self,
333 |         associations: List[MemoryAssociation],
334 |         min_confidence: float = 0.5
335 |     ) -> List[MemoryAssociation]:
336 |         """Filter associations by confidence score."""
337 |         return [
338 |             assoc for assoc in associations
339 |             if assoc.metadata.get('confidence_score', 0) >= min_confidence
340 |         ]
341 |     
342 |     async def group_associations_by_type(
343 |         self,
344 |         associations: List[MemoryAssociation]
345 |     ) -> Dict[str, List[MemoryAssociation]]:
346 |         """Group associations by their connection type."""
347 |         groups = {}
348 |         for assoc in associations:
349 |             conn_type = assoc.connection_type
350 |             if conn_type not in groups:
351 |                 groups[conn_type] = []
352 |             groups[conn_type].append(assoc)
353 |         
354 |         return groups
```

--------------------------------------------------------------------------------
/scripts/archive/check_missing_timestamps.py:
--------------------------------------------------------------------------------

```python
  1 | #!/usr/bin/env python3
  2 | """
  3 | Check for memories without timestamps in the MCP memory service database.
  4 | This script analyzes the storage backend for entries missing timestamp data.
  5 | """
  6 | 
  7 | import sys
  8 | import os
  9 | sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', 'src'))
 10 | 
 11 | import asyncio
 12 | import json
 13 | from typing import List, Dict, Any
 14 | from datetime import datetime
 15 | 
 16 | from mcp_memory_service.storage.sqlite_vec import SqliteVecMemoryStorage
 17 | from mcp_memory_service.models.memory import Memory
 18 | 
 19 | class TimestampAnalyzer:
 20 |     """Analyze memory database for missing timestamp entries."""
 21 | 
 22 |     def __init__(self, storage_backend: str = "sqlite_vec", db_path: str = None):
 23 |         self.storage_backend = storage_backend
 24 |         self.db_path = db_path or os.path.expanduser("~/.mcp_memory_service/storage.db")
 25 |         self.storage = None
 26 | 
 27 |     async def setup(self):
 28 |         """Initialize storage backend."""
 29 |         print(f"=== Analyzing {self.storage_backend} database for timestamp issues ===")
 30 |         print(f"Database path: {self.db_path}")
 31 | 
 32 |         if not os.path.exists(self.db_path):
 33 |             print(f"❌ Database file not found: {self.db_path}")
 34 |             print("Possible locations:")
 35 |             common_paths = [
 36 |                 os.path.expanduser("~/.mcp_memory_service/storage.db"),
 37 |                 os.path.expanduser("~/.mcp_memory_service/memory.db"),
 38 |                 "storage.db",
 39 |                 "memory.db"
 40 |             ]
 41 |             for path in common_paths:
 42 |                 if os.path.exists(path):
 43 |                     print(f"  ✅ Found: {path}")
 44 |                 else:
 45 |                     print(f"  ❌ Not found: {path}")
 46 |             return False
 47 | 
 48 |         try:
 49 |             self.storage = SqliteVecMemoryStorage(
 50 |                 db_path=self.db_path,
 51 |                 embedding_model="all-MiniLM-L6-v2"
 52 |             )
 53 |             await self.storage.initialize()
 54 |             print("✅ Storage initialized successfully")
 55 |             return True
 56 |         except Exception as e:
 57 |             print(f"❌ Failed to initialize storage: {e}")
 58 |             return False
 59 | 
 60 |     async def get_all_memories(self) -> List[Memory]:
 61 |         """Retrieve all memories from the database."""
 62 |         try:
 63 |             # SQLite-Vec has a limit of 4096 for k in knn queries, so use direct database access
 64 |             return await self.get_memories_direct_query()
 65 |         except Exception as e:
 66 |             print(f"❌ Error with direct query, trying search approach: {e}")
 67 |             return await self.get_memories_via_search()
 68 | 
 69 |     async def get_memories_direct_query(self) -> List[Memory]:
 70 |         """Get all memories using direct database queries."""
 71 |         import sqlite3
 72 |         memories = []
 73 | 
 74 |         try:
 75 |             # Connect directly to SQLite database
 76 |             conn = sqlite3.connect(self.db_path)
 77 |             conn.row_factory = sqlite3.Row
 78 | 
 79 |             # Get all memory records
 80 |             cursor = conn.execute("""
 81 |                 SELECT id, content, content_hash, tags, memory_type,
 82 |                        created_at, created_at_iso, updated_at, updated_at_iso, metadata
 83 |                 FROM memories
 84 |                 ORDER BY created_at DESC
 85 |             """)
 86 | 
 87 |             rows = cursor.fetchall()
 88 |             print(f"📊 Found {len(rows)} memory records in database")
 89 | 
 90 |             for i, row in enumerate(rows):
 91 |                 try:
 92 |                     # Safely parse JSON fields
 93 |                     tags = []
 94 |                     if row['tags']:
 95 |                         try:
 96 |                             tags = json.loads(row['tags'])
 97 |                         except (json.JSONDecodeError, TypeError):
 98 |                             tags = []
 99 | 
100 |                     metadata = {}
101 |                     if row['metadata']:
102 |                         try:
103 |                             metadata = json.loads(row['metadata'])
104 |                         except (json.JSONDecodeError, TypeError):
105 |                             metadata = {}
106 | 
107 |                     # Reconstruct Memory object from database row
108 |                     memory_dict = {
109 |                         'content': row['content'] or '',
110 |                         'content_hash': row['content_hash'] or '',
111 |                         'tags': tags,
112 |                         'memory_type': row['memory_type'] or 'unknown',
113 |                         'created_at': row['created_at'],
114 |                         'created_at_iso': row['created_at_iso'],
115 |                         'updated_at': row['updated_at'],
116 |                         'updated_at_iso': row['updated_at_iso'],
117 |                         'metadata': metadata
118 |                     }
119 | 
120 |                     memory = Memory.from_dict(memory_dict)
121 |                     memories.append(memory)
122 | 
123 |                 except Exception as e:
124 |                     print(f"⚠️  Error processing memory {i+1}: {e}")
125 |                     # Continue processing other memories
126 |                     continue
127 | 
128 |             conn.close()
129 |             return memories
130 | 
131 |         except Exception as e:
132 |             print(f"❌ Direct query failed: {e}")
133 |             if 'conn' in locals():
134 |                 conn.close()
135 |             return []
136 | 
137 |     async def get_memories_via_search(self) -> List[Memory]:
138 |         """Get memories using search with smaller batches."""
139 |         memories = []
140 | 
141 |         try:
142 |             # Try different search approaches with smaller limits
143 |             search_queries = ["", "memory", "note", "session"]
144 | 
145 |             for query in search_queries:
146 |                 try:
147 |                     results = await self.storage.retrieve(query, n_results=1000)  # Well under 4096 limit
148 |                     batch_memories = [result.memory for result in results]
149 | 
150 |                     # Deduplicate based on content_hash
151 |                     existing_hashes = {m.content_hash for m in memories}
152 |                     new_memories = [m for m in batch_memories if m.content_hash not in existing_hashes]
153 |                     memories.extend(new_memories)
154 | 
155 |                     print(f"📊 Query '{query}': {len(batch_memories)} results, {len(new_memories)} new")
156 | 
157 |                 except Exception as e:
158 |                     print(f"⚠️  Query '{query}' failed: {e}")
159 |                     continue
160 | 
161 |             print(f"📊 Total unique memories retrieved: {len(memories)}")
162 |             return memories
163 | 
164 |         except Exception as e:
165 |             print(f"❌ All search approaches failed: {e}")
166 |             return []
167 | 
168 |     def analyze_timestamp_fields(self, memories: List[Memory]) -> Dict[str, Any]:
169 |         """Analyze timestamp fields across all memories."""
170 |         analysis = {
171 |             "total_memories": len(memories),
172 |             "missing_created_at": 0,
173 |             "missing_created_at_iso": 0,
174 |             "missing_both_timestamps": 0,
175 |             "invalid_timestamps": 0,
176 |             "problematic_memories": [],
177 |             "timestamp_formats": set(),
178 |             "timestamp_range": {"earliest": None, "latest": None}
179 |         }
180 | 
181 |         for memory in memories:
182 |             has_created_at = memory.created_at is not None
183 |             has_created_at_iso = memory.created_at_iso is not None
184 | 
185 |             # Track missing timestamp fields
186 |             if not has_created_at:
187 |                 analysis["missing_created_at"] += 1
188 | 
189 |             if not has_created_at_iso:
190 |                 analysis["missing_created_at_iso"] += 1
191 | 
192 |             if not has_created_at and not has_created_at_iso:
193 |                 analysis["missing_both_timestamps"] += 1
194 |                 analysis["problematic_memories"].append({
195 |                     "content_hash": memory.content_hash,
196 |                     "content_preview": memory.content[:100] + "..." if len(memory.content) > 100 else memory.content,
197 |                     "tags": memory.tags,
198 |                     "memory_type": memory.memory_type,
199 |                     "issue": "missing_both_timestamps"
200 |                 })
201 | 
202 |             # Track timestamp formats and ranges
203 |             if has_created_at_iso:
204 |                 analysis["timestamp_formats"].add(type(memory.created_at_iso).__name__)
205 | 
206 |             if has_created_at:
207 |                 try:
208 |                     if analysis["timestamp_range"]["earliest"] is None or memory.created_at < analysis["timestamp_range"]["earliest"]:
209 |                         analysis["timestamp_range"]["earliest"] = memory.created_at
210 |                     if analysis["timestamp_range"]["latest"] is None or memory.created_at > analysis["timestamp_range"]["latest"]:
211 |                         analysis["timestamp_range"]["latest"] = memory.created_at
212 |                 except:
213 |                     analysis["invalid_timestamps"] += 1
214 |                     analysis["problematic_memories"].append({
215 |                         "content_hash": memory.content_hash,
216 |                         "content_preview": memory.content[:100] + "..." if len(memory.content) > 100 else memory.content,
217 |                         "created_at": str(memory.created_at),
218 |                         "issue": "invalid_timestamp"
219 |                     })
220 | 
221 |         # Convert set to list for JSON serialization
222 |         analysis["timestamp_formats"] = list(analysis["timestamp_formats"])
223 | 
224 |         return analysis
225 | 
226 |     def print_analysis_report(self, analysis: Dict[str, Any]):
227 |         """Print a detailed analysis report."""
228 |         print("\n" + "="*70)
229 |         print("TIMESTAMP ANALYSIS REPORT")
230 |         print("="*70)
231 | 
232 |         total = analysis["total_memories"]
233 | 
234 |         print(f"\n📊 OVERVIEW:")
235 |         print(f"  Total memories analyzed: {total}")
236 |         print(f"  Missing created_at (float): {analysis['missing_created_at']}")
237 |         print(f"  Missing created_at_iso (ISO string): {analysis['missing_created_at_iso']}")
238 |         print(f"  Missing both timestamps: {analysis['missing_both_timestamps']}")
239 |         print(f"  Invalid timestamp values: {analysis['invalid_timestamps']}")
240 | 
241 |         if total > 0:
242 |             print(f"\n📈 PERCENTAGES:")
243 |             print(f"  Missing created_at: {analysis['missing_created_at']/total*100:.1f}%")
244 |             print(f"  Missing created_at_iso: {analysis['missing_created_at_iso']/total*100:.1f}%")
245 |             print(f"  Missing both: {analysis['missing_both_timestamps']/total*100:.1f}%")
246 |             print(f"  Invalid timestamps: {analysis['invalid_timestamps']/total*100:.1f}%")
247 | 
248 |         print(f"\n🕐 TIMESTAMP RANGE:")
249 |         if analysis["timestamp_range"]["earliest"] and analysis["timestamp_range"]["latest"]:
250 |             earliest = datetime.fromtimestamp(analysis["timestamp_range"]["earliest"])
251 |             latest = datetime.fromtimestamp(analysis["timestamp_range"]["latest"])
252 |             print(f"  Earliest: {earliest} ({analysis['timestamp_range']['earliest']})")
253 |             print(f"  Latest: {latest} ({analysis['timestamp_range']['latest']})")
254 |         else:
255 |             print("  No valid timestamps found")
256 | 
257 |         print(f"\n📝 TIMESTAMP FORMATS DETECTED:")
258 |         for fmt in analysis["timestamp_formats"]:
259 |             print(f"  - {fmt}")
260 | 
261 |         if analysis["problematic_memories"]:
262 |             print(f"\n⚠️  PROBLEMATIC MEMORIES ({len(analysis['problematic_memories'])}):")
263 |             for i, memory in enumerate(analysis["problematic_memories"][:10]):  # Show first 10
264 |                 print(f"  {i+1}. Issue: {memory['issue']}")
265 |                 print(f"     Content: {memory['content_preview']}")
266 |                 print(f"     Hash: {memory['content_hash']}")
267 |                 if 'tags' in memory:
268 |                     print(f"     Tags: {memory.get('tags', [])}")
269 |                 print()
270 | 
271 |             if len(analysis["problematic_memories"]) > 10:
272 |                 print(f"  ... and {len(analysis['problematic_memories']) - 10} more")
273 | 
274 |         # Health assessment
275 |         print(f"\n🏥 DATABASE HEALTH ASSESSMENT:")
276 |         if analysis["missing_both_timestamps"] == 0:
277 |             print("  ✅ EXCELLENT: All memories have at least one timestamp field")
278 |         elif analysis["missing_both_timestamps"] < total * 0.1:
279 |             print(f"  ⚠️  GOOD: Only {analysis['missing_both_timestamps']} memories missing all timestamps")
280 |         elif analysis["missing_both_timestamps"] < total * 0.5:
281 |             print(f"  ⚠️  CONCERNING: {analysis['missing_both_timestamps']} memories missing all timestamps")
282 |         else:
283 |             print(f"  ❌ CRITICAL: {analysis['missing_both_timestamps']} memories missing all timestamps")
284 | 
285 |         if analysis["missing_created_at"] > 0 or analysis["missing_created_at_iso"] > 0:
286 |             print("  💡 RECOMMENDATION: Run timestamp migration script to fix missing fields")
287 | 
288 |     async def run_analysis(self):
289 |         """Run the complete timestamp analysis."""
290 |         if not await self.setup():
291 |             return False
292 | 
293 |         memories = await self.get_all_memories()
294 |         if not memories:
295 |             print("⚠️  No memories found in database")
296 |             return False
297 | 
298 |         analysis = self.analyze_timestamp_fields(memories)
299 |         self.print_analysis_report(analysis)
300 | 
301 |         # Save detailed report to file
302 |         report_file = "timestamp_analysis_report.json"
303 |         with open(report_file, 'w') as f:
304 |             # Convert any datetime objects to strings for JSON serialization
305 |             json_analysis = analysis.copy()
306 |             if json_analysis["timestamp_range"]["earliest"]:
307 |                 json_analysis["timestamp_range"]["earliest_iso"] = datetime.fromtimestamp(json_analysis["timestamp_range"]["earliest"]).isoformat()
308 |             if json_analysis["timestamp_range"]["latest"]:
309 |                 json_analysis["timestamp_range"]["latest_iso"] = datetime.fromtimestamp(json_analysis["timestamp_range"]["latest"]).isoformat()
310 | 
311 |             json.dump(json_analysis, f, indent=2, default=str)
312 | 
313 |         print(f"\n📄 Detailed report saved to: {report_file}")
314 | 
315 |         return analysis["missing_both_timestamps"] == 0
316 | 
317 | async def main():
318 |     """Main analysis execution."""
319 |     import argparse
320 | 
321 |     parser = argparse.ArgumentParser(description="Check for memories without timestamps")
322 |     parser.add_argument("--db-path", help="Path to database file")
323 |     parser.add_argument("--storage", default="sqlite_vec", choices=["sqlite_vec"],
324 |                        help="Storage backend to analyze")
325 | 
326 |     args = parser.parse_args()
327 | 
328 |     analyzer = TimestampAnalyzer(
329 |         storage_backend=args.storage,
330 |         db_path=args.db_path
331 |     )
332 | 
333 |     success = await analyzer.run_analysis()
334 |     return 0 if success else 1
335 | 
336 | if __name__ == "__main__":
337 |     exit_code = asyncio.run(main())
338 |     sys.exit(exit_code)
```

--------------------------------------------------------------------------------
/scripts/installation/install_windows_service.py:
--------------------------------------------------------------------------------

```python
  1 | #!/usr/bin/env python3
  2 | """
  3 | Windows Service installer for MCP Memory Service.
  4 | Installs the service using Python's Windows service capabilities.
  5 | """
  6 | import os
  7 | import sys
  8 | import json
  9 | import argparse
 10 | import subprocess
 11 | from pathlib import Path
 12 | 
 13 | # Add parent directory to path for imports
 14 | sys.path.insert(0, str(Path(__file__).parent.parent))
 15 | 
 16 | try:
 17 |     from scripts.service_utils import (
 18 |         get_project_root, get_service_paths, get_service_environment,
 19 |         generate_api_key, save_service_config, load_service_config,
 20 |         check_dependencies, get_service_command, print_service_info,
 21 |         require_admin
 22 |     )
 23 | except ImportError as e:
 24 |     print(f"Error importing service utilities: {e}")
 25 |     print("Please ensure you're running this from the project directory")
 26 |     sys.exit(1)
 27 | 
 28 | 
 29 | SERVICE_NAME = "MCPMemoryService"
 30 | SERVICE_DISPLAY_NAME = "MCP Memory Service"
 31 | SERVICE_DESCRIPTION = "Semantic memory and persistent storage service for Claude Desktop"
 32 | 
 33 | 
 34 | def create_windows_service_script():
 35 |     """Create the Windows service wrapper script."""
 36 |     paths = get_service_paths()
 37 |     service_script = paths['scripts_dir'] / 'mcp_memory_windows_service.py'
 38 |     
 39 |     script_content = '''#!/usr/bin/env python3
 40 | """
 41 | Windows Service wrapper for MCP Memory Service.
 42 | This script runs as a Windows service and manages the MCP Memory server process.
 43 | """
 44 | import os
 45 | import sys
 46 | import time
 47 | import subprocess
 48 | import win32serviceutil
 49 | import win32service
 50 | import win32event
 51 | import servicemanager
 52 | import socket
 53 | import json
 54 | from pathlib import Path
 55 | 
 56 | 
 57 | class MCPMemoryService(win32serviceutil.ServiceFramework):
 58 |     _svc_name_ = "MCPMemoryService"
 59 |     _svc_display_name_ = "MCP Memory Service"
 60 |     _svc_description_ = "Semantic memory and persistent storage service for Claude Desktop"
 61 |     
 62 |     def __init__(self, args):
 63 |         win32serviceutil.ServiceFramework.__init__(self, args)
 64 |         self.hWaitStop = win32event.CreateEvent(None, 0, 0, None)
 65 |         socket.setdefaulttimeout(60)
 66 |         self.is_running = True
 67 |         self.process = None
 68 |         
 69 |     def SvcStop(self):
 70 |         self.ReportServiceStatus(win32service.SERVICE_STOP_PENDING)
 71 |         win32event.SetEvent(self.hWaitStop)
 72 |         self.is_running = False
 73 |         
 74 |         # Stop the subprocess
 75 |         if self.process:
 76 |             self.process.terminate()
 77 |             try:
 78 |                 self.process.wait(timeout=10)
 79 |             except subprocess.TimeoutExpired:
 80 |                 self.process.kill()
 81 |                 
 82 |     def SvcDoRun(self):
 83 |         servicemanager.LogMsg(
 84 |             servicemanager.EVENTLOG_INFORMATION_TYPE,
 85 |             servicemanager.PYS_SERVICE_STARTED,
 86 |             (self._svc_name_, '')
 87 |         )
 88 |         self.main()
 89 |         
 90 |     def main(self):
 91 |         # Load service configuration
 92 |         config_dir = Path.home() / '.mcp_memory_service'
 93 |         config_file = config_dir / 'service_config.json'
 94 |         
 95 |         if not config_file.exists():
 96 |             servicemanager.LogErrorMsg("Service configuration not found")
 97 |             return
 98 |             
 99 |         with open(config_file, 'r') as f:
100 |             config = json.load(f)
101 |         
102 |         # Set up environment
103 |         env = os.environ.copy()
104 |         env.update(config['environment'])
105 |         
106 |         # Start the service process
107 |         try:
108 |             self.process = subprocess.Popen(
109 |                 config['command'],
110 |                 env=env,
111 |                 stdout=subprocess.PIPE,
112 |                 stderr=subprocess.PIPE,
113 |                 text=True
114 |             )
115 |             
116 |             servicemanager.LogMsg(
117 |                 servicemanager.EVENTLOG_INFORMATION_TYPE,
118 |                 0,
119 |                 "MCP Memory Service process started"
120 |             )
121 |             
122 |             # Monitor the process
123 |             while self.is_running:
124 |                 if self.process.poll() is not None:
125 |                     # Process died, log error and restart
126 |                     stdout, stderr = self.process.communicate()
127 |                     servicemanager.LogErrorMsg(
128 |                         f"Service process died unexpectedly. stderr: {stderr}"
129 |                     )
130 |                     
131 |                     # Wait a bit before restarting
132 |                     time.sleep(5)
133 |                     
134 |                     # Restart the process
135 |                     self.process = subprocess.Popen(
136 |                         config['command'],
137 |                         env=env,
138 |                         stdout=subprocess.PIPE,
139 |                         stderr=subprocess.PIPE,
140 |                         text=True
141 |                     )
142 |                     
143 |                 # Check if we should stop
144 |                 if win32event.WaitForSingleObject(self.hWaitStop, 1000) == win32event.WAIT_OBJECT_0:
145 |                     break
146 |                     
147 |         except Exception as e:
148 |             servicemanager.LogErrorMsg(f"Error in service: {str(e)}")
149 | 
150 | 
151 | if __name__ == '__main__':
152 |     if len(sys.argv) == 1:
153 |         servicemanager.Initialize()
154 |         servicemanager.PrepareToHostSingle(MCPMemoryService)
155 |         servicemanager.StartServiceCtrlDispatcher()
156 |     else:
157 |         win32serviceutil.HandleCommandLine(MCPMemoryService)
158 | '''
159 |     
160 |     with open(service_script, 'w') as f:
161 |         f.write(script_content)
162 |     
163 |     return service_script
164 | 
165 | 
166 | def create_batch_scripts():
167 |     """Create convenient batch scripts for service management."""
168 |     paths = get_service_paths()
169 |     scripts_dir = paths['scripts_dir'] / 'windows'
170 |     scripts_dir.mkdir(exist_ok=True)
171 |     
172 |     # Start service batch file
173 |     start_script = scripts_dir / 'start_service.bat'
174 |     with open(start_script, 'w') as f:
175 |         f.write(f'''@echo off
176 | echo Starting {SERVICE_DISPLAY_NAME}...
177 | net start {SERVICE_NAME}
178 | if %ERRORLEVEL% == 0 (
179 |     echo Service started successfully!
180 | ) else (
181 |     echo Failed to start service. Run as Administrator if needed.
182 | )
183 | pause
184 | ''')
185 |     
186 |     # Stop service batch file
187 |     stop_script = scripts_dir / 'stop_service.bat'
188 |     with open(stop_script, 'w') as f:
189 |         f.write(f'''@echo off
190 | echo Stopping {SERVICE_DISPLAY_NAME}...
191 | net stop {SERVICE_NAME}
192 | if %ERRORLEVEL% == 0 (
193 |     echo Service stopped successfully!
194 | ) else (
195 |     echo Failed to stop service. Run as Administrator if needed.
196 | )
197 | pause
198 | ''')
199 |     
200 |     # Status batch file
201 |     status_script = scripts_dir / 'service_status.bat'
202 |     with open(status_script, 'w') as f:
203 |         f.write(f'''@echo off
204 | echo Checking {SERVICE_DISPLAY_NAME} status...
205 | sc query {SERVICE_NAME}
206 | pause
207 | ''')
208 |     
209 |     # Uninstall batch file
210 |     uninstall_script = scripts_dir / 'uninstall_service.bat'
211 |     with open(uninstall_script, 'w') as f:
212 |         f.write(f'''@echo off
213 | echo This will uninstall {SERVICE_DISPLAY_NAME}.
214 | echo.
215 | set /p confirm="Are you sure? (Y/N): "
216 | if /i "%confirm%" neq "Y" exit /b
217 | 
218 | echo Stopping service...
219 | net stop {SERVICE_NAME} 2>nul
220 | 
221 | echo Uninstalling service...
222 | python "{paths['scripts_dir'] / 'install_windows_service.py'}" --uninstall
223 | 
224 | pause
225 | ''')
226 |     
227 |     return scripts_dir
228 | 
229 | 
230 | def install_service():
231 |     """Install the Windows service."""
232 |     # Check if pywin32 is installed
233 |     try:
234 |         import win32serviceutil
235 |         import win32service
236 |     except ImportError:
237 |         print("\n❌ ERROR: pywin32 is required for Windows service installation")
238 |         print("Please install it with: pip install pywin32")
239 |         sys.exit(1)
240 |     
241 |     # Require administrator privileges
242 |     require_admin("Administrator privileges are required to install Windows services")
243 |     
244 |     print("\n🔍 Checking dependencies...")
245 |     deps_ok, deps_msg = check_dependencies()
246 |     if not deps_ok:
247 |         print(f"❌ {deps_msg}")
248 |         sys.exit(1)
249 |     print(f"✅ {deps_msg}")
250 |     
251 |     # Generate API key
252 |     api_key = generate_api_key()
253 |     print(f"\n🔑 Generated API key: {api_key}")
254 |     
255 |     # Create service configuration
256 |     config = {
257 |         'service_name': SERVICE_NAME,
258 |         'api_key': api_key,
259 |         'command': get_service_command(),
260 |         'environment': get_service_environment()
261 |     }
262 |     config['environment']['MCP_API_KEY'] = api_key
263 |     
264 |     # Save configuration
265 |     config_file = save_service_config(config)
266 |     print(f"💾 Saved configuration to: {config_file}")
267 |     
268 |     # Create service wrapper script
269 |     print("\n📝 Creating service wrapper...")
270 |     service_script = create_windows_service_script()
271 |     
272 |     # Install the service using the wrapper
273 |     print(f"\n🚀 Installing {SERVICE_DISPLAY_NAME}...")
274 |     
275 |     try:
276 |         # First, try to stop and remove existing service
277 |         subprocess.run([
278 |             sys.executable, str(service_script), 'stop'
279 |         ], capture_output=True)
280 |         subprocess.run([
281 |             sys.executable, str(service_script), 'remove'
282 |         ], capture_output=True)
283 |         
284 |         # Install the service
285 |         result = subprocess.run([
286 |             sys.executable, str(service_script), 'install'
287 |         ], capture_output=True, text=True)
288 |         
289 |         if result.returncode != 0:
290 |             print(f"❌ Failed to install service: {result.stderr}")
291 |             sys.exit(1)
292 |             
293 |         # Configure service for automatic startup
294 |         subprocess.run([
295 |             'sc', 'config', SERVICE_NAME, 'start=', 'auto'
296 |         ], capture_output=True)
297 |         
298 |         # Set service description
299 |         subprocess.run([
300 |             'sc', 'description', SERVICE_NAME, SERVICE_DESCRIPTION
301 |         ], capture_output=True)
302 |         
303 |         print(f"✅ Service installed successfully!")
304 |         
305 |     except Exception as e:
306 |         print(f"❌ Error installing service: {e}")
307 |         sys.exit(1)
308 |     
309 |     # Create batch scripts
310 |     scripts_dir = create_batch_scripts()
311 |     print(f"\n📁 Created management scripts in: {scripts_dir}")
312 |     
313 |     # Print service information
314 |     platform_info = {
315 |         'Start Service': f'net start {SERVICE_NAME}',
316 |         'Stop Service': f'net stop {SERVICE_NAME}',
317 |         'Service Status': f'sc query {SERVICE_NAME}',
318 |         'Uninstall': f'python "{Path(__file__)}" --uninstall'
319 |     }
320 |     
321 |     print_service_info(api_key, platform_info)
322 |     
323 |     return True
324 | 
325 | 
326 | def uninstall_service():
327 |     """Uninstall the Windows service."""
328 |     require_admin("Administrator privileges are required to uninstall Windows services")
329 |     
330 |     print(f"\n🗑️  Uninstalling {SERVICE_DISPLAY_NAME}...")
331 |     
332 |     paths = get_service_paths()
333 |     service_script = paths['scripts_dir'] / 'mcp_memory_windows_service.py'
334 |     
335 |     if not service_script.exists():
336 |         # Try using sc command directly
337 |         result = subprocess.run([
338 |             'sc', 'delete', SERVICE_NAME
339 |         ], capture_output=True, text=True)
340 |         
341 |         if result.returncode == 0:
342 |             print("✅ Service uninstalled successfully!")
343 |         else:
344 |             print(f"❌ Failed to uninstall service: {result.stderr}")
345 |     else:
346 |         # Stop the service first
347 |         subprocess.run([
348 |             sys.executable, str(service_script), 'stop'
349 |         ], capture_output=True)
350 |         
351 |         # Remove the service
352 |         result = subprocess.run([
353 |             sys.executable, str(service_script), 'remove'
354 |         ], capture_output=True, text=True)
355 |         
356 |         if result.returncode == 0:
357 |             print("✅ Service uninstalled successfully!")
358 |         else:
359 |             print(f"❌ Failed to uninstall service: {result.stderr}")
360 | 
361 | 
362 | def start_service():
363 |     """Start the Windows service."""
364 |     print(f"\n▶️  Starting {SERVICE_DISPLAY_NAME}...")
365 |     
366 |     result = subprocess.run([
367 |         'net', 'start', SERVICE_NAME
368 |     ], capture_output=True, text=True)
369 |     
370 |     if result.returncode == 0:
371 |         print("✅ Service started successfully!")
372 |     else:
373 |         if "already been started" in result.stderr:
374 |             print("ℹ️  Service is already running")
375 |         else:
376 |             print(f"❌ Failed to start service: {result.stderr}")
377 |             print("\n💡 Try running as Administrator if you see access denied errors")
378 | 
379 | 
380 | def stop_service():
381 |     """Stop the Windows service."""
382 |     print(f"\n⏹️  Stopping {SERVICE_DISPLAY_NAME}...")
383 |     
384 |     result = subprocess.run([
385 |         'net', 'stop', SERVICE_NAME
386 |     ], capture_output=True, text=True)
387 |     
388 |     if result.returncode == 0:
389 |         print("✅ Service stopped successfully!")
390 |     else:
391 |         if "is not started" in result.stderr:
392 |             print("ℹ️  Service is not running")
393 |         else:
394 |             print(f"❌ Failed to stop service: {result.stderr}")
395 | 
396 | 
397 | def service_status():
398 |     """Check the Windows service status."""
399 |     print(f"\n📊 {SERVICE_DISPLAY_NAME} Status:")
400 |     print("-" * 40)
401 |     
402 |     result = subprocess.run([
403 |         'sc', 'query', SERVICE_NAME
404 |     ], capture_output=True, text=True)
405 |     
406 |     if result.returncode == 0:
407 |         # Parse the output
408 |         for line in result.stdout.splitlines():
409 |             if "STATE" in line:
410 |                 if "RUNNING" in line:
411 |                     print("✅ Service is RUNNING")
412 |                 elif "STOPPED" in line:
413 |                     print("⏹️  Service is STOPPED")
414 |                 else:
415 |                     print(f"ℹ️  {line.strip()}")
416 |             elif "SERVICE_NAME:" in line:
417 |                 print(f"Service Name: {SERVICE_NAME}")
418 |     else:
419 |         print("❌ Service is not installed")
420 |     
421 |     # Show configuration if available
422 |     config = load_service_config()
423 |     if config:
424 |         print(f"\n📋 Configuration:")
425 |         print(f"  API Key: {config.get('api_key', 'Not set')}")
426 |         print(f"  Config File: {get_service_paths()['config_dir'] / 'service_config.json'}")
427 | 
428 | 
429 | def main():
430 |     """Main entry point."""
431 |     parser = argparse.ArgumentParser(
432 |         description="Windows Service installer for MCP Memory Service"
433 |     )
434 |     
435 |     parser.add_argument('--uninstall', action='store_true', help='Uninstall the service')
436 |     parser.add_argument('--start', action='store_true', help='Start the service')
437 |     parser.add_argument('--stop', action='store_true', help='Stop the service')
438 |     parser.add_argument('--status', action='store_true', help='Check service status')
439 |     parser.add_argument('--restart', action='store_true', help='Restart the service')
440 |     
441 |     args = parser.parse_args()
442 |     
443 |     if args.uninstall:
444 |         uninstall_service()
445 |     elif args.start:
446 |         start_service()
447 |     elif args.stop:
448 |         stop_service()
449 |     elif args.status:
450 |         service_status()
451 |     elif args.restart:
452 |         stop_service()
453 |         start_service()
454 |     else:
455 |         # Default action is to install
456 |         install_service()
457 | 
458 | 
459 | if __name__ == '__main__':
460 |     main()
```

--------------------------------------------------------------------------------
/src/mcp_memory_service/web/api/mcp.py:
--------------------------------------------------------------------------------

```python
  1 | """
  2 | MCP (Model Context Protocol) endpoints for Claude Code integration.
  3 | 
  4 | This module provides MCP protocol endpoints that allow Claude Code clients
  5 | to directly access memory operations using the MCP standard.
  6 | """
  7 | 
  8 | import asyncio
  9 | import json
 10 | import logging
 11 | from typing import Dict, List, Any, Optional, Union, TYPE_CHECKING
 12 | from fastapi import APIRouter, HTTPException, Request, Depends
 13 | from fastapi.responses import JSONResponse
 14 | from pydantic import BaseModel, ConfigDict
 15 | 
 16 | from ..dependencies import get_storage
 17 | from ...utils.hashing import generate_content_hash
 18 | from ...config import OAUTH_ENABLED
 19 | 
 20 | # Import OAuth dependencies only when needed
 21 | if OAUTH_ENABLED or TYPE_CHECKING:
 22 |     from ..oauth.middleware import require_read_access, require_write_access, AuthenticationResult
 23 | else:
 24 |     # Provide type stubs when OAuth is disabled
 25 |     AuthenticationResult = None
 26 |     require_read_access = None
 27 |     require_write_access = None
 28 | 
 29 | logger = logging.getLogger(__name__)
 30 | 
 31 | router = APIRouter(prefix="/mcp", tags=["mcp"])
 32 | 
 33 | 
 34 | class MCPRequest(BaseModel):
 35 |     """MCP protocol request structure."""
 36 |     jsonrpc: str = "2.0"
 37 |     id: Optional[Union[str, int]] = None
 38 |     method: str
 39 |     params: Optional[Dict[str, Any]] = None
 40 | 
 41 | 
 42 | class MCPResponse(BaseModel):
 43 |     """MCP protocol response structure.
 44 | 
 45 |     Note: JSON-RPC 2.0 spec requires that successful responses EXCLUDE the 'error'
 46 |     field entirely (not include it as null), and error responses EXCLUDE 'result'.
 47 |     The exclude_none config ensures proper compliance.
 48 |     """
 49 |     model_config = ConfigDict(exclude_none=True)
 50 | 
 51 |     jsonrpc: str = "2.0"
 52 |     id: Optional[Union[str, int]] = None
 53 |     result: Optional[Dict[str, Any]] = None
 54 |     error: Optional[Dict[str, Any]] = None
 55 | 
 56 | 
 57 | class MCPTool(BaseModel):
 58 |     """MCP tool definition."""
 59 |     name: str
 60 |     description: str
 61 |     inputSchema: Dict[str, Any]
 62 | 
 63 | 
 64 | # Define MCP tools available
 65 | MCP_TOOLS = [
 66 |     MCPTool(
 67 |         name="store_memory",
 68 |         description="Store a new memory with optional tags, metadata, and client information",
 69 |         inputSchema={
 70 |             "type": "object",
 71 |             "properties": {
 72 |                 "content": {"type": "string", "description": "The memory content to store"},
 73 |                 "tags": {"type": "array", "items": {"type": "string"}, "description": "Optional tags for the memory"},
 74 |                 "memory_type": {"type": "string", "description": "Optional memory type (e.g., 'note', 'reminder', 'fact')"},
 75 |                 "metadata": {"type": "object", "description": "Additional metadata for the memory"},
 76 |                 "client_hostname": {"type": "string", "description": "Client machine hostname for source tracking"}
 77 |             },
 78 |             "required": ["content"]
 79 |         }
 80 |     ),
 81 |     MCPTool(
 82 |         name="retrieve_memory", 
 83 |         description="Search and retrieve memories using semantic similarity",
 84 |         inputSchema={
 85 |             "type": "object",
 86 |             "properties": {
 87 |                 "query": {"type": "string", "description": "Search query for finding relevant memories"},
 88 |                 "limit": {"type": "integer", "description": "Maximum number of memories to return", "default": 10},
 89 |                 "similarity_threshold": {"type": "number", "description": "Minimum similarity score threshold (0.0-1.0)", "default": 0.7, "minimum": 0.0, "maximum": 1.0}
 90 |             },
 91 |             "required": ["query"]
 92 |         }
 93 |     ),
 94 |     MCPTool(
 95 |         name="recall_memory",
 96 |         description="Retrieve memories using natural language time expressions and optional semantic search",
 97 |         inputSchema={
 98 |             "type": "object",
 99 |             "properties": {
100 |                 "query": {"type": "string", "description": "Natural language query specifying the time frame or content to recall"},
101 |                 "n_results": {"type": "integer", "description": "Maximum number of results to return", "default": 5}
102 |             },
103 |             "required": ["query"]
104 |         }
105 |     ),
106 |     MCPTool(
107 |         name="search_by_tag",
108 |         description="Search memories by specific tags",
109 |         inputSchema={
110 |             "type": "object", 
111 |             "properties": {
112 |                 "tags": {"type": "array", "items": {"type": "string"}, "description": "Tags to search for"},
113 |                 "operation": {"type": "string", "enum": ["AND", "OR"], "description": "Tag search operation", "default": "AND"}
114 |             },
115 |             "required": ["tags"]
116 |         }
117 |     ),
118 |     MCPTool(
119 |         name="delete_memory",
120 |         description="Delete a specific memory by content hash",
121 |         inputSchema={
122 |             "type": "object",
123 |             "properties": {
124 |                 "content_hash": {"type": "string", "description": "Hash of the memory to delete"}
125 |             },
126 |             "required": ["content_hash"]
127 |         }
128 |     ),
129 |     MCPTool(
130 |         name="check_database_health",
131 |         description="Check the health and status of the memory database",
132 |         inputSchema={
133 |             "type": "object",
134 |             "properties": {}
135 |         }
136 |     ),
137 |     MCPTool(
138 |         name="list_memories",
139 |         description="List memories with pagination and optional filtering",
140 |         inputSchema={
141 |             "type": "object",
142 |             "properties": {
143 |                 "page": {"type": "integer", "description": "Page number (1-based)", "default": 1, "minimum": 1},
144 |                 "page_size": {"type": "integer", "description": "Number of memories per page", "default": 10, "minimum": 1, "maximum": 100},
145 |                 "tag": {"type": "string", "description": "Filter by specific tag"},
146 |                 "memory_type": {"type": "string", "description": "Filter by memory type"}
147 |             }
148 |         }
149 |     ),
150 | ]
151 | 
152 | 
153 | @router.post("/")
154 | @router.post("")
155 | async def mcp_endpoint(
156 |     request: MCPRequest,
157 |     user: AuthenticationResult = Depends(require_read_access) if OAUTH_ENABLED else None
158 | ):
159 |     """Main MCP protocol endpoint for processing MCP requests."""
160 |     try:
161 |         storage = get_storage()
162 | 
163 |         if request.method == "initialize":
164 |             response = MCPResponse(
165 |                 id=request.id,
166 |                 result={
167 |                     "protocolVersion": "2024-11-05",
168 |                     "capabilities": {
169 |                         "tools": {}
170 |                     },
171 |                     "serverInfo": {
172 |                         "name": "mcp-memory-service",
173 |                         "version": "4.1.1"
174 |                     }
175 |                 }
176 |             )
177 |             return JSONResponse(content=response.model_dump(exclude_none=True))
178 | 
179 |         elif request.method == "tools/list":
180 |             response = MCPResponse(
181 |                 id=request.id,
182 |                 result={
183 |                     "tools": [tool.model_dump() for tool in MCP_TOOLS]
184 |                 }
185 |             )
186 |             return JSONResponse(content=response.model_dump(exclude_none=True))
187 | 
188 |         elif request.method == "tools/call":
189 |             tool_name = request.params.get("name") if request.params else None
190 |             arguments = request.params.get("arguments", {}) if request.params else {}
191 | 
192 |             result = await handle_tool_call(storage, tool_name, arguments)
193 | 
194 |             response = MCPResponse(
195 |                 id=request.id,
196 |                 result={
197 |                     "content": [
198 |                         {
199 |                             "type": "text",
200 |                             "text": json.dumps(result)
201 |                         }
202 |                     ]
203 |                 }
204 |             )
205 |             return JSONResponse(content=response.model_dump(exclude_none=True))
206 | 
207 |         else:
208 |             response = MCPResponse(
209 |                 id=request.id,
210 |                 error={
211 |                     "code": -32601,
212 |                     "message": f"Method not found: {request.method}"
213 |                 }
214 |             )
215 |             return JSONResponse(content=response.model_dump(exclude_none=True))
216 | 
217 |     except Exception as e:
218 |         logger.error(f"MCP endpoint error: {e}")
219 |         response = MCPResponse(
220 |             id=request.id,
221 |             error={
222 |                 "code": -32603,
223 |                 "message": f"Internal error: {str(e)}"
224 |             }
225 |         )
226 |         return JSONResponse(content=response.model_dump(exclude_none=True))
227 | 
228 | 
229 | async def handle_tool_call(storage, tool_name: str, arguments: Dict[str, Any]) -> Dict[str, Any]:
230 |     """Handle MCP tool calls and route to appropriate memory operations."""
231 |     
232 |     if tool_name == "store_memory":
233 |         from mcp_memory_service.models.memory import Memory
234 |         
235 |         content = arguments.get("content")
236 |         tags = arguments.get("tags", [])
237 |         memory_type = arguments.get("memory_type")
238 |         metadata = arguments.get("metadata", {})
239 |         client_hostname = arguments.get("client_hostname")
240 |         
241 |         # Ensure metadata is a dict
242 |         if isinstance(metadata, str):
243 |             try:
244 |                 metadata = json.loads(metadata)
245 |             except:
246 |                 metadata = {}
247 |         elif not isinstance(metadata, dict):
248 |             metadata = {}
249 |         
250 |         # Add client_hostname to metadata if provided
251 |         if client_hostname:
252 |             metadata["client_hostname"] = client_hostname
253 |         
254 |         content_hash = generate_content_hash(content, metadata)
255 |         
256 |         memory = Memory(
257 |             content=content,
258 |             content_hash=content_hash,
259 |             tags=tags,
260 |             memory_type=memory_type,
261 |             metadata=metadata
262 |         )
263 |         
264 |         success, message = await storage.store(memory)
265 |         
266 |         return {
267 |             "success": success,
268 |             "message": message,
269 |             "content_hash": memory.content_hash if success else None
270 |         }
271 |     
272 |     elif tool_name == "retrieve_memory":
273 |         query = arguments.get("query")
274 |         limit = arguments.get("limit", 10)
275 |         similarity_threshold = arguments.get("similarity_threshold", 0.0)
276 |         
277 |         # Get results from storage (no similarity filtering at storage level)
278 |         results = await storage.retrieve(query=query, n_results=limit)
279 |         
280 |         # Apply similarity threshold filtering (same as API implementation)
281 |         if similarity_threshold is not None:
282 |             results = [
283 |                 result for result in results
284 |                 if result.relevance_score and result.relevance_score >= similarity_threshold
285 |             ]
286 |         
287 |         return {
288 |             "results": [
289 |                 {
290 |                     "content": r.memory.content,
291 |                     "content_hash": r.memory.content_hash,
292 |                     "tags": r.memory.tags,
293 |                     "similarity_score": r.relevance_score,
294 |                     "created_at": r.memory.created_at_iso
295 |                 }
296 |                 for r in results
297 |             ],
298 |             "total_found": len(results)
299 |         }
300 | 
301 |     elif tool_name == "recall_memory":
302 |         query = arguments.get("query")
303 |         n_results = arguments.get("n_results", 5)
304 | 
305 |         # Use storage recall_memory method which handles time expressions
306 |         memories = await storage.recall_memory(query=query, n_results=n_results)
307 | 
308 |         return {
309 |             "results": [
310 |                 {
311 |                     "content": m.content,
312 |                     "content_hash": m.content_hash,
313 |                     "tags": m.tags,
314 |                     "created_at": m.created_at_iso
315 |                 }
316 |                 for m in memories
317 |             ],
318 |             "total_found": len(memories)
319 |         }
320 | 
321 |     elif tool_name == "search_by_tag":
322 |         tags = arguments.get("tags")
323 |         operation = arguments.get("operation", "AND")
324 |         
325 |         results = await storage.search_by_tags(tags=tags, operation=operation)
326 |         
327 |         return {
328 |             "results": [
329 |                 {
330 |                     "content": memory.content,
331 |                     "content_hash": memory.content_hash,
332 |                     "tags": memory.tags,
333 |                     "created_at": memory.created_at_iso
334 |                 }
335 |                 for memory in results
336 |             ],
337 |             "total_found": len(results)
338 |         }
339 |     
340 |     elif tool_name == "delete_memory":
341 |         content_hash = arguments.get("content_hash")
342 |         
343 |         success, message = await storage.delete(content_hash)
344 |         
345 |         return {
346 |             "success": success,
347 |             "message": message
348 |         }
349 |     
350 |     elif tool_name == "check_database_health":
351 |         stats = await storage.get_stats()
352 | 
353 |         return {
354 |             "status": "healthy",
355 |             "statistics": stats
356 |         }
357 |     
358 |     elif tool_name == "list_memories":
359 |         page = arguments.get("page", 1)
360 |         page_size = arguments.get("page_size", 10)
361 |         tag = arguments.get("tag")
362 |         memory_type = arguments.get("memory_type")
363 |         
364 |         # Calculate offset
365 |         offset = (page - 1) * page_size
366 | 
367 |         # Use database-level filtering for better performance
368 |         tags_list = [tag] if tag else None
369 |         memories = await storage.get_all_memories(
370 |             limit=page_size,
371 |             offset=offset,
372 |             memory_type=memory_type,
373 |             tags=tags_list
374 |         )
375 |         
376 |         return {
377 |             "memories": [
378 |                 {
379 |                     "content": memory.content,
380 |                     "content_hash": memory.content_hash,
381 |                     "tags": memory.tags,
382 |                     "memory_type": memory.memory_type,
383 |                     "metadata": memory.metadata,
384 |                     "created_at": memory.created_at_iso,
385 |                     "updated_at": memory.updated_at_iso
386 |                 }
387 |                 for memory in memories
388 |             ],
389 |             "page": page,
390 |             "page_size": page_size,
391 |             "total_found": len(memories)
392 |         }
393 |     
394 |     
395 |     else:
396 |         raise ValueError(f"Unknown tool: {tool_name}")
397 | 
398 | 
399 | @router.get("/tools")
400 | async def list_mcp_tools(
401 |     user: AuthenticationResult = Depends(require_read_access) if OAUTH_ENABLED else None
402 | ):
403 |     """List available MCP tools for discovery."""
404 |     return {
405 |         "tools": [tool.dict() for tool in MCP_TOOLS],
406 |         "protocol": "mcp",
407 |         "version": "1.0"
408 |     }
409 | 
410 | 
411 | @router.get("/health")
412 | async def mcp_health():
413 |     """MCP-specific health check."""
414 |     storage = get_storage()
415 |     stats = await storage.get_stats()
416 | 
417 |     return {
418 |         "status": "healthy",
419 |         "protocol": "mcp",
420 |         "tools_available": len(MCP_TOOLS),
421 |         "storage_backend": "sqlite-vec",
422 |         "statistics": stats
423 |     }
```

--------------------------------------------------------------------------------
/src/mcp_memory_service/web/api/search.py:
--------------------------------------------------------------------------------

```python
  1 | # Copyright 2024 Heinrich Krupp
  2 | #
  3 | # Licensed under the Apache License, Version 2.0 (the "License");
  4 | # you may not use this file except in compliance with the License.
  5 | # You may obtain a copy of the License at
  6 | #
  7 | #     http://www.apache.org/licenses/LICENSE-2.0
  8 | #
  9 | # Unless required by applicable law or agreed to in writing, software
 10 | # distributed under the License is distributed on an "AS IS" BASIS,
 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 12 | # See the License for the specific language governing permissions and
 13 | # limitations under the License.
 14 | 
 15 | """
 16 | Search endpoints for the HTTP interface.
 17 | 
 18 | Provides semantic search, tag-based search, and time-based recall functionality.
 19 | """
 20 | 
 21 | import logging
 22 | from typing import List, Optional, Dict, Any, TYPE_CHECKING
 23 | from datetime import datetime, timedelta, timezone
 24 | 
 25 | from fastapi import APIRouter, HTTPException, Depends, Query
 26 | from pydantic import BaseModel, Field
 27 | 
 28 | from ...storage.base import MemoryStorage
 29 | from ...models.memory import Memory, MemoryQueryResult
 30 | from ...config import OAUTH_ENABLED
 31 | from ...utils.time_parser import parse_time_expression
 32 | from ..dependencies import get_storage
 33 | from .memories import MemoryResponse, memory_to_response
 34 | from ..sse import sse_manager, create_search_completed_event
 35 | 
 36 | # Constants
 37 | _TIME_SEARCH_CANDIDATE_POOL_SIZE = 100  # Number of candidates to retrieve for time filtering (reduced for performance)
 38 | 
 39 | # OAuth authentication imports (conditional)
 40 | if OAUTH_ENABLED or TYPE_CHECKING:
 41 |     from ..oauth.middleware import require_read_access, AuthenticationResult
 42 | else:
 43 |     # Provide type stubs when OAuth is disabled
 44 |     AuthenticationResult = None
 45 |     require_read_access = None
 46 | 
 47 | router = APIRouter()
 48 | logger = logging.getLogger(__name__)
 49 | 
 50 | 
 51 | # Request Models
 52 | class SemanticSearchRequest(BaseModel):
 53 |     """Request model for semantic similarity search."""
 54 |     query: str = Field(..., description="The search query for semantic similarity")
 55 |     n_results: int = Field(default=10, ge=1, le=100, description="Maximum number of results to return")
 56 |     similarity_threshold: Optional[float] = Field(None, ge=0.0, le=1.0, description="Minimum similarity score")
 57 | 
 58 | 
 59 | class TagSearchRequest(BaseModel):
 60 |     """Request model for tag-based search."""
 61 |     tags: List[str] = Field(..., description="List of tags to search for (ANY match)")
 62 |     match_all: bool = Field(default=False, description="If true, memory must have ALL tags; if false, ANY tag")
 63 |     time_filter: Optional[str] = Field(None, description="Optional natural language time filter (e.g., 'last week', 'yesterday')")
 64 | 
 65 | 
 66 | class TimeSearchRequest(BaseModel):
 67 |     """Request model for time-based search."""
 68 |     query: str = Field(..., description="Natural language time query (e.g., 'last week', 'yesterday')")
 69 |     n_results: int = Field(default=10, ge=1, le=100, description="Maximum number of results to return")
 70 |     semantic_query: Optional[str] = Field(None, description="Optional semantic query for relevance filtering within time range")
 71 | 
 72 | 
 73 | # Response Models
 74 | class SearchResult(BaseModel):
 75 |     """Individual search result with similarity score."""
 76 |     memory: MemoryResponse
 77 |     similarity_score: Optional[float] = Field(None, description="Similarity score (0-1, higher is more similar)")
 78 |     relevance_reason: Optional[str] = Field(None, description="Why this result was included")
 79 | 
 80 | 
 81 | class SearchResponse(BaseModel):
 82 |     """Response model for search operations."""
 83 |     results: List[SearchResult]
 84 |     total_found: int
 85 |     query: str
 86 |     search_type: str
 87 |     processing_time_ms: Optional[float] = None
 88 | 
 89 | 
 90 | def memory_query_result_to_search_result(query_result: MemoryQueryResult) -> SearchResult:
 91 |     """Convert MemoryQueryResult to SearchResult format."""
 92 |     return SearchResult(
 93 |         memory=memory_to_response(query_result.memory),
 94 |         similarity_score=query_result.relevance_score,
 95 |         relevance_reason=f"Semantic similarity: {query_result.relevance_score:.3f}" if query_result.relevance_score else None
 96 |     )
 97 | 
 98 | 
 99 | def memory_to_search_result(memory: Memory, reason: str = None) -> SearchResult:
100 |     """Convert Memory to SearchResult format."""
101 |     return SearchResult(
102 |         memory=memory_to_response(memory),
103 |         similarity_score=None,
104 |         relevance_reason=reason
105 |     )
106 | 
107 | 
108 | @router.post("/search", response_model=SearchResponse, tags=["search"])
109 | async def semantic_search(
110 |     request: SemanticSearchRequest,
111 |     storage: MemoryStorage = Depends(get_storage),
112 |     user: AuthenticationResult = Depends(require_read_access) if OAUTH_ENABLED else None
113 | ):
114 |     """
115 |     Perform semantic similarity search on memory content.
116 |     
117 |     Uses vector embeddings to find memories with similar meaning to the query,
118 |     even if they don't share exact keywords.
119 |     """
120 |     import time
121 |     start_time = time.time()
122 |     
123 |     try:
124 |         # Perform semantic search using the storage layer
125 |         query_results = await storage.retrieve(
126 |             query=request.query,
127 |             n_results=request.n_results
128 |         )
129 |         
130 |         # Filter by similarity threshold if specified
131 |         if request.similarity_threshold is not None:
132 |             query_results = [
133 |                 result for result in query_results
134 |                 if result.relevance_score and result.relevance_score >= request.similarity_threshold
135 |             ]
136 |         
137 |         # Convert to search results
138 |         search_results = [
139 |             memory_query_result_to_search_result(result)
140 |             for result in query_results
141 |         ]
142 |         
143 |         processing_time = (time.time() - start_time) * 1000
144 |         
145 |         # Broadcast SSE event for search completion
146 |         try:
147 |             event = create_search_completed_event(
148 |                 query=request.query,
149 |                 search_type="semantic",
150 |                 results_count=len(search_results),
151 |                 processing_time_ms=processing_time
152 |             )
153 |             await sse_manager.broadcast_event(event)
154 |         except Exception as e:
155 |             logger.warning(f"Failed to broadcast search_completed event: {e}")
156 |         
157 |         return SearchResponse(
158 |             results=search_results,
159 |             total_found=len(search_results),
160 |             query=request.query,
161 |             search_type="semantic",
162 |             processing_time_ms=processing_time
163 |         )
164 |         
165 |     except Exception as e:
166 |         logger.error(f"Semantic search failed: {str(e)}")
167 |         raise HTTPException(status_code=500, detail="Search operation failed. Please try again.")
168 | 
169 | 
170 | @router.post("/search/by-tag", response_model=SearchResponse, tags=["search"])
171 | async def tag_search(
172 |     request: TagSearchRequest,
173 |     storage: MemoryStorage = Depends(get_storage),
174 |     user: AuthenticationResult = Depends(require_read_access) if OAUTH_ENABLED else None
175 | ):
176 |     """
177 |     Search memories by tags with optional time filtering.
178 | 
179 |     Finds memories that contain any of the specified tags (OR search) or
180 |     all of the specified tags (AND search) based on the match_all parameter.
181 | 
182 |     Optionally filters by time range using natural language expressions like
183 |     'last week', 'yesterday', 'this month', etc.
184 |     """
185 |     import time
186 |     start_time = time.time()
187 | 
188 |     try:
189 |         if not request.tags:
190 |             raise HTTPException(status_code=400, detail="At least one tag must be specified")
191 | 
192 |         # Parse time filter if provided
193 |         time_start = None
194 |         if request.time_filter:
195 |             start_ts, _ = parse_time_expression(request.time_filter)
196 |             time_start = start_ts if start_ts else None
197 | 
198 |         # Use the storage layer's tag search with optional time filtering
199 |         memories = await storage.search_by_tag(request.tags, time_start=time_start)
200 | 
201 |         # If match_all is True, filter to only memories that have ALL tags
202 |         if request.match_all and len(request.tags) > 1:
203 |             tag_set = set(request.tags)
204 |             memories = [
205 |                 memory for memory in memories
206 |                 if tag_set.issubset(set(memory.tags))
207 |             ]
208 | 
209 |         # Convert to search results
210 |         match_type = "ALL" if request.match_all else "ANY"
211 |         search_results = [
212 |             memory_to_search_result(
213 |                 memory,
214 |                 reason=f"Tags match ({match_type}): {', '.join(set(memory.tags) & set(request.tags))}"
215 |             )
216 |             for memory in memories
217 |         ]
218 | 
219 |         processing_time = (time.time() - start_time) * 1000
220 | 
221 |         # Build query string with time filter info if present
222 |         query_string = f"Tags: {', '.join(request.tags)} ({match_type})"
223 |         if request.time_filter:
224 |             query_string += f" | Time: {request.time_filter}"
225 | 
226 |         # Broadcast SSE event for search completion
227 |         try:
228 |             event = create_search_completed_event(
229 |                 query=query_string,
230 |                 search_type="tag",
231 |                 results_count=len(search_results),
232 |                 processing_time_ms=processing_time
233 |             )
234 |             await sse_manager.broadcast_event(event)
235 |         except Exception as e:
236 |             logger.warning(f"Failed to broadcast search_completed event: {e}")
237 | 
238 |         return SearchResponse(
239 |             results=search_results,
240 |             total_found=len(search_results),
241 |             query=query_string,
242 |             search_type="tag",
243 |             processing_time_ms=processing_time
244 |         )
245 | 
246 |     except HTTPException:
247 |         raise
248 |     except Exception as e:
249 |         raise HTTPException(status_code=500, detail=f"Tag search failed: {str(e)}")
250 | 
251 | 
252 | @router.post("/search/by-time", response_model=SearchResponse, tags=["search"])
253 | async def time_search(
254 |     request: TimeSearchRequest,
255 |     storage: MemoryStorage = Depends(get_storage),
256 |     user: AuthenticationResult = Depends(require_read_access) if OAUTH_ENABLED else None
257 | ):
258 |     """
259 |     Search memories by time-based queries.
260 |     
261 |     Supports natural language time expressions like 'yesterday', 'last week',
262 |     'this month', etc. Currently implements basic time filtering - full natural
263 |     language parsing can be enhanced later.
264 |     """
265 |     import time
266 |     start_time = time.time()
267 |     
268 |     try:
269 |         # Parse time query using robust time_parser
270 |         start_ts, end_ts = parse_time_expression(request.query)
271 | 
272 |         if start_ts is None and end_ts is None:
273 |             raise HTTPException(
274 |                 status_code=400,
275 |                 detail=f"Could not parse time query: '{request.query}'. Try 'yesterday', 'last week', 'this month', etc."
276 |             )
277 | 
278 |         # Retrieve memories within time range (with larger candidate pool if semantic query provided)
279 |         candidate_pool_size = _TIME_SEARCH_CANDIDATE_POOL_SIZE if request.semantic_query else request.n_results
280 |         query_results = await storage.recall(
281 |             query=request.semantic_query.strip() if request.semantic_query and request.semantic_query.strip() else None,
282 |             n_results=candidate_pool_size,
283 |             start_timestamp=start_ts,
284 |             end_timestamp=end_ts
285 |         )
286 | 
287 |         # If semantic query was provided, results are already ranked by relevance
288 |         # Otherwise, sort by recency (newest first)
289 |         if not (request.semantic_query and request.semantic_query.strip()):
290 |             query_results.sort(key=lambda r: r.memory.created_at or 0.0, reverse=True)
291 | 
292 |         # Limit results
293 |         filtered_memories = query_results[:request.n_results]
294 |         
295 |         # Convert to search results
296 |         search_results = [
297 |             memory_query_result_to_search_result(result)
298 |             for result in filtered_memories
299 |         ]
300 |         
301 |         # Update relevance reason for time-based results
302 |         for result in search_results:
303 |             result.relevance_reason = f"Time match: {request.query}"
304 |         
305 |         processing_time = (time.time() - start_time) * 1000
306 |         
307 |         return SearchResponse(
308 |             results=search_results,
309 |             total_found=len(search_results),
310 |             query=request.query,
311 |             search_type="time",
312 |             processing_time_ms=processing_time
313 |         )
314 |         
315 |     except HTTPException:
316 |         raise
317 |     except Exception as e:
318 |         raise HTTPException(status_code=500, detail=f"Time search failed: {str(e)}")
319 | 
320 | 
321 | @router.get("/search/similar/{content_hash}", response_model=SearchResponse, tags=["search"])
322 | async def find_similar(
323 |     content_hash: str,
324 |     n_results: int = Query(default=10, ge=1, le=100, description="Number of similar memories to find"),
325 |     storage: MemoryStorage = Depends(get_storage),
326 |     user: AuthenticationResult = Depends(require_read_access) if OAUTH_ENABLED else None
327 | ):
328 |     """
329 |     Find memories similar to a specific memory identified by its content hash.
330 |     
331 |     Uses the content of the specified memory as a search query to find
332 |     semantically similar memories.
333 |     """
334 |     import time
335 |     start_time = time.time()
336 |     
337 |     try:
338 |         # First, get the target memory by searching with its hash
339 |         # This is inefficient but works with current storage interface
340 |         target_results = await storage.retrieve(content_hash, n_results=1)
341 |         
342 |         if not target_results or target_results[0].memory.content_hash != content_hash:
343 |             raise HTTPException(status_code=404, detail="Memory not found")
344 |         
345 |         target_memory = target_results[0].memory
346 |         
347 |         # Use the target memory's content to find similar memories
348 |         similar_results = await storage.retrieve(
349 |             query=target_memory.content,
350 |             n_results=n_results + 1  # +1 because the original will be included
351 |         )
352 |         
353 |         # Filter out the original memory
354 |         filtered_results = [
355 |             result for result in similar_results
356 |             if result.memory.content_hash != content_hash
357 |         ][:n_results]
358 |         
359 |         # Convert to search results
360 |         search_results = [
361 |             memory_query_result_to_search_result(result)
362 |             for result in filtered_results
363 |         ]
364 |         
365 |         processing_time = (time.time() - start_time) * 1000
366 |         
367 |         return SearchResponse(
368 |             results=search_results,
369 |             total_found=len(search_results),
370 |             query=f"Similar to: {target_memory.content[:50]}...",
371 |             search_type="similar",
372 |             processing_time_ms=processing_time
373 |         )
374 |         
375 |     except HTTPException:
376 |         raise
377 |     except Exception as e:
378 |         raise HTTPException(status_code=500, detail=f"Similar search failed: {str(e)}")
379 | 
```

--------------------------------------------------------------------------------
/docs/natural-memory-triggers/cli-reference.md:
--------------------------------------------------------------------------------

```markdown
  1 | # Natural Memory Triggers v7.1.3 - CLI Reference
  2 | 
  3 | Complete reference for the CLI management system that provides real-time configuration and monitoring of Natural Memory Triggers without requiring file edits or Claude Code restarts.
  4 | 
  5 | ## Overview
  6 | 
  7 | The CLI controller (`memory-mode-controller.js`) is the primary interface for managing Natural Memory Triggers. It provides:
  8 | 
  9 | - ✅ **Real-time configuration** changes without restart
 10 | - ✅ **Performance monitoring** and metrics
 11 | - ✅ **Profile management** for different workflows
 12 | - ✅ **Sensitivity tuning** for trigger frequency
 13 | - ✅ **System diagnostics** and health checks
 14 | 
 15 | ## Command Syntax
 16 | 
 17 | ```bash
 18 | node ~/.claude/hooks/memory-mode-controller.js <command> [options] [arguments]
 19 | ```
 20 | 
 21 | ## Core Commands
 22 | 
 23 | ### `status` - System Status and Information
 24 | 
 25 | Display current system status, configuration, and performance metrics.
 26 | 
 27 | ```bash
 28 | node memory-mode-controller.js status
 29 | ```
 30 | 
 31 | **Output Example:**
 32 | ```
 33 | 📊 Memory Hook Status
 34 | Current Profile: balanced
 35 | Description: Moderate latency, smart memory triggers
 36 | Natural Triggers: enabled
 37 | Sensitivity: 0.6
 38 | Cooldown Period: 30000ms
 39 | Max Memories per Trigger: 5
 40 | Performance: 145ms avg latency, 2 degradation events
 41 | Cache Size: 12 entries
 42 | Conversation History: 8 messages
 43 | ```
 44 | 
 45 | **Options:**
 46 | - `--verbose` - Show detailed performance metrics
 47 | - `--json` - Output in JSON format for scripting
 48 | 
 49 | ```bash
 50 | node memory-mode-controller.js status --verbose
 51 | node memory-mode-controller.js status --json
 52 | ```
 53 | 
 54 | ### `profiles` - List Available Performance Profiles
 55 | 
 56 | Display all available performance profiles with descriptions and configurations.
 57 | 
 58 | ```bash
 59 | node memory-mode-controller.js profiles
 60 | ```
 61 | 
 62 | **Output:**
 63 | ```
 64 | 📋 Available Performance Profiles
 65 | 
 66 | 🏃 speed_focused
 67 |   Max Latency: 100ms
 68 |   Enabled Tiers: instant
 69 |   Description: Fastest response, minimal memory awareness
 70 | 
 71 | ⚖️ balanced (current)
 72 |   Max Latency: 200ms
 73 |   Enabled Tiers: instant, fast
 74 |   Description: Moderate latency, smart memory triggers
 75 | 
 76 | 🧠 memory_aware
 77 |   Max Latency: 500ms
 78 |   Enabled Tiers: instant, fast, intensive
 79 |   Description: Full memory awareness, accept higher latency
 80 | 
 81 | 🤖 adaptive
 82 |   Max Latency: auto-adjusting
 83 |   Enabled Tiers: dynamic
 84 |   Description: Auto-adjust based on performance and user preferences
 85 | ```
 86 | 
 87 | ### `profile` - Switch Performance Profile
 88 | 
 89 | Change the active performance profile for different workflow requirements.
 90 | 
 91 | ```bash
 92 | node memory-mode-controller.js profile <profile_name>
 93 | ```
 94 | 
 95 | **Available Profiles:**
 96 | 
 97 | #### `speed_focused` - Maximum Speed
 98 | ```bash
 99 | node memory-mode-controller.js profile speed_focused
100 | ```
101 | - **Latency**: < 100ms
102 | - **Tiers**: Instant only (pattern matching, cache checks)
103 | - **Use Case**: Quick coding sessions, pair programming
104 | - **Trade-off**: Minimal memory awareness for maximum speed
105 | 
106 | #### `balanced` - Recommended Default
107 | ```bash
108 | node memory-mode-controller.js profile balanced
109 | ```
110 | - **Latency**: < 200ms
111 | - **Tiers**: Instant + Fast (semantic analysis)
112 | - **Use Case**: General development work, most productive for daily use
113 | - **Trade-off**: Good balance of speed and context awareness
114 | 
115 | #### `memory_aware` - Maximum Context
116 | ```bash
117 | node memory-mode-controller.js profile memory_aware
118 | ```
119 | - **Latency**: < 500ms
120 | - **Tiers**: All tiers (deep semantic understanding)
121 | - **Use Case**: Complex projects, architectural decisions, research
122 | - **Trade-off**: Maximum context awareness, higher latency acceptable
123 | 
124 | #### `adaptive` - Machine Learning
125 | ```bash
126 | node memory-mode-controller.js profile adaptive
127 | ```
128 | - **Latency**: Auto-adjusting based on usage patterns
129 | - **Tiers**: Dynamic selection based on user feedback
130 | - **Use Case**: Users who want the system to learn automatically
131 | - **Trade-off**: Requires learning period but becomes highly personalized
132 | 
133 | ### `sensitivity` - Adjust Trigger Sensitivity
134 | 
135 | Control how often Natural Memory Triggers activate by adjusting the confidence threshold.
136 | 
137 | ```bash
138 | node memory-mode-controller.js sensitivity <value>
139 | ```
140 | 
141 | **Sensitivity Values:**
142 | - `0.0` - Maximum triggers (activates on any potential memory-seeking pattern)
143 | - `0.4` - High sensitivity (more triggers, useful for research/architecture work)
144 | - `0.6` - Balanced (recommended default)
145 | - `0.8` - Low sensitivity (fewer triggers, high-confidence only)
146 | - `1.0` - Minimum triggers (only explicit memory requests)
147 | 
148 | **Examples:**
149 | ```bash
150 | # More triggers for architecture work
151 | node memory-mode-controller.js sensitivity 0.4
152 | 
153 | # Balanced triggers (recommended)
154 | node memory-mode-controller.js sensitivity 0.6
155 | 
156 | # Fewer triggers for focused coding
157 | node memory-mode-controller.js sensitivity 0.8
158 | ```
159 | 
160 | ## System Management Commands
161 | 
162 | ### `enable` - Enable Natural Memory Triggers
163 | 
164 | Activate the Natural Memory Triggers system.
165 | 
166 | ```bash
167 | node memory-mode-controller.js enable
168 | ```
169 | 
170 | **Output:**
171 | ```
172 | ✅ Natural Memory Triggers enabled
173 | Current sensitivity: 0.6
174 | Active profile: balanced
175 | Ready to detect memory-seeking patterns
176 | ```
177 | 
178 | ### `disable` - Disable Natural Memory Triggers
179 | 
180 | Temporarily disable the Natural Memory Triggers system without uninstalling.
181 | 
182 | ```bash
183 | node memory-mode-controller.js disable
184 | ```
185 | 
186 | **Output:**
187 | ```
188 | ⏸️ Natural Memory Triggers disabled
189 | Manual memory commands still available
190 | Use 'enable' to reactivate triggers
191 | ```
192 | 
193 | ### `reset` - Reset to Default Settings
194 | 
195 | Reset all configuration to default values.
196 | 
197 | ```bash
198 | node memory-mode-controller.js reset
199 | ```
200 | 
201 | **What gets reset:**
202 | - Performance profile → `balanced`
203 | - Sensitivity → `0.6`
204 | - Natural triggers → `enabled`
205 | - Cooldown period → `30000ms`
206 | - Max memories per trigger → `5`
207 | 
208 | **Confirmation prompt:**
209 | ```
210 | ⚠️ This will reset all Natural Memory Triggers settings to defaults.
211 | Are you sure? (y/N): y
212 | ✅ Settings reset to defaults
213 | ```
214 | 
215 | **Options:**
216 | - `--force` - Skip confirmation prompt
217 | 
218 | ```bash
219 | node memory-mode-controller.js reset --force
220 | ```
221 | 
222 | ## Testing and Diagnostics
223 | 
224 | ### `test` - Test Trigger Detection
225 | 
226 | Test the trigger detection system with a specific query to see how it would be processed.
227 | 
228 | ```bash
229 | node memory-mode-controller.js test "your test query"
230 | ```
231 | 
232 | **Example:**
233 | ```bash
234 | node memory-mode-controller.js test "What did we decide about authentication?"
235 | ```
236 | 
237 | **Output:**
238 | ```
239 | 🧪 Testing Natural Memory Triggers
240 | 
241 | Query: "What did we decide about authentication?"
242 | Processing tiers: instant → fast → intensive
243 | 
244 | Tier 1 (Instant): 42ms
245 |   - Pattern match: ✅ "what...decide" detected
246 |   - Cache check: ❌ No cached result
247 |   - Confidence: 0.85
248 | 
249 | Tier 2 (Fast): 127ms
250 |   - Key phrases: ["decide", "authentication"]
251 |   - Topic shift: 0.2 (moderate)
252 |   - Question pattern: ✅ Detected
253 |   - Confidence: 0.78
254 | 
255 | Memory Query Generated:
256 |   - Type: recent-development
257 |   - Query: "authentication decision approach implementation"
258 |   - Weight: 1.0
259 | 
260 | Result: Would trigger memory retrieval (confidence 0.85 > threshold 0.6)
261 | ```
262 | 
263 | ### `metrics` - Performance Metrics
264 | 
265 | Display detailed performance metrics and system health information.
266 | 
267 | ```bash
268 | node memory-mode-controller.js metrics
269 | ```
270 | 
271 | **Output:**
272 | ```
273 | 📊 Natural Memory Triggers Performance Metrics
274 | 
275 | System Performance:
276 |   - Active Profile: balanced
277 |   - Average Latency: 145ms
278 |   - Degradation Events: 2
279 |   - User Tolerance: 0.7
280 | 
281 | Tier Performance:
282 |   - Instant Tier: 47ms avg (120 calls)
283 |   - Fast Tier: 142ms avg (89 calls)
284 |   - Intensive Tier: 387ms avg (23 calls)
285 | 
286 | Trigger Statistics:
287 |   - Total Triggers: 45
288 |   - Success Rate: 89%
289 |   - False Positives: 5%
290 |   - User Satisfaction: 87%
291 | 
292 | Cache Performance:
293 |   - Cache Size: 15 entries
294 |   - Hit Rate: 34%
295 |   - Average Hit Time: 3ms
296 | 
297 | Memory Service:
298 |   - Connection Status: ✅ Connected
299 |   - Average Response: 89ms
300 |   - Error Rate: 0%
301 | ```
302 | 
303 | ### `health` - System Health Check
304 | 
305 | Perform comprehensive health check of all system components.
306 | 
307 | ```bash
308 | node memory-mode-controller.js health
309 | ```
310 | 
311 | **Output:**
312 | ```
313 | 🏥 Natural Memory Triggers Health Check
314 | 
315 | Core Components:
316 |   ✅ TieredConversationMonitor loaded
317 |   ✅ PerformanceManager initialized
318 |   ✅ GitAnalyzer functional
319 |   ✅ MCP Client connected
320 | 
321 | Configuration:
322 |   ✅ config.json syntax valid
323 |   ✅ naturalTriggers section present
324 |   ✅ performance profiles configured
325 |   ✅ memory service endpoint accessible
326 | 
327 | Dependencies:
328 |   ✅ Node.js version compatible (v18.17.0)
329 |   ✅ Required packages available
330 |   ✅ File permissions correct
331 | 
332 | Memory Service Integration:
333 |   ✅ Connection established
334 |   ✅ Authentication valid
335 |   ✅ API responses normal
336 |   ⚠️ High response latency (245ms)
337 | 
338 | Git Integration:
339 |   ✅ Repository detected
340 |   ✅ Recent commits available
341 |   ✅ Changelog found
342 |   ❌ Branch name unavailable
343 | 
344 | Recommendations:
345 |   - Consider optimizing memory service for faster responses
346 |   - Check git configuration for branch detection
347 | ```
348 | 
349 | ## Advanced Commands
350 | 
351 | ### `config` - Configuration Management
352 | 
353 | View and modify configuration settings directly through CLI.
354 | 
355 | ```bash
356 | # View current configuration
357 | node memory-mode-controller.js config show
358 | 
359 | # Get specific setting
360 | node memory-mode-controller.js config get naturalTriggers.triggerThreshold
361 | 
362 | # Set specific setting
363 | node memory-mode-controller.js config set naturalTriggers.cooldownPeriod 45000
364 | ```
365 | 
366 | ### `cache` - Cache Management
367 | 
368 | Manage the semantic analysis cache.
369 | 
370 | ```bash
371 | # View cache statistics
372 | node memory-mode-controller.js cache stats
373 | 
374 | # Clear cache
375 | node memory-mode-controller.js cache clear
376 | 
377 | # Show cache contents (debug)
378 | node memory-mode-controller.js cache show
379 | ```
380 | 
381 | **Cache Stats Output:**
382 | ```
383 | 💾 Semantic Cache Statistics
384 | 
385 | Size: 18/50 entries
386 | Memory Usage: 2.4KB
387 | Hit Rate: 34% (89/260 requests)
388 | Average Hit Time: 2.8ms
389 | Last Cleanup: 15 minutes ago
390 | 
391 | Most Accessed Patterns:
392 |   1. "what did we decide" (12 hits)
393 |   2. "how did we implement" (8 hits)
394 |   3. "similar to what we" (6 hits)
395 | ```
396 | 
397 | ### `export` - Export Configuration and Metrics
398 | 
399 | Export system configuration and performance data for backup or analysis.
400 | 
401 | ```bash
402 | # Export configuration
403 | node memory-mode-controller.js export config > my-config-backup.json
404 | 
405 | # Export metrics
406 | node memory-mode-controller.js export metrics > performance-report.json
407 | 
408 | # Export full system state
409 | node memory-mode-controller.js export all > system-state.json
410 | ```
411 | 
412 | ### `import` - Import Configuration
413 | 
414 | Import previously exported configuration.
415 | 
416 | ```bash
417 | node memory-mode-controller.js import config my-config-backup.json
418 | ```
419 | 
420 | ## Scripting and Automation
421 | 
422 | ### JSON Output Mode
423 | 
424 | Most commands support `--json` flag for machine-readable output:
425 | 
426 | ```bash
427 | # Get status in JSON format
428 | node memory-mode-controller.js status --json
429 | 
430 | # Example output:
431 | {
432 |   "profile": "balanced",
433 |   "enabled": true,
434 |   "sensitivity": 0.6,
435 |   "performance": {
436 |     "avgLatency": 145,
437 |     "degradationEvents": 2
438 |   },
439 |   "cache": {
440 |     "size": 12,
441 |     "hitRate": 0.34
442 |   }
443 | }
444 | ```
445 | 
446 | ### Batch Operations
447 | 
448 | Run multiple commands in sequence:
449 | 
450 | ```bash
451 | # Setup for architecture work
452 | node memory-mode-controller.js profile memory_aware
453 | node memory-mode-controller.js sensitivity 0.4
454 | 
455 | # Daily development setup
456 | node memory-mode-controller.js profile balanced
457 | node memory-mode-controller.js sensitivity 0.6
458 | 
459 | # Quick coding setup
460 | node memory-mode-controller.js profile speed_focused
461 | node memory-mode-controller.js sensitivity 0.8
462 | ```
463 | 
464 | ### Environment Variables
465 | 
466 | Control CLI behavior with environment variables:
467 | 
468 | ```bash
469 | # Enable debug output
470 | export CLAUDE_HOOKS_DEBUG=true
471 | node memory-mode-controller.js status
472 | 
473 | # Disable colored output
474 | export NO_COLOR=1
475 | node memory-mode-controller.js status
476 | 
477 | # Set alternative config path
478 | export CLAUDE_HOOKS_CONFIG=/path/to/config.json
479 | node memory-mode-controller.js status
480 | ```
481 | 
482 | ## Error Handling and Debugging
483 | 
484 | ### Common Error Messages
485 | 
486 | #### `Configuration Error: Cannot read config file`
487 | **Cause**: Missing or corrupted configuration file
488 | **Solution**:
489 | ```bash
490 | # Check if config exists
491 | ls ~/.claude/hooks/config.json
492 | 
493 | # Validate JSON syntax
494 | cat ~/.claude/hooks/config.json | node -e "console.log(JSON.parse(require('fs').readFileSync(0, 'utf8')))"
495 | 
496 | # Reset to defaults if corrupted
497 | node memory-mode-controller.js reset --force
498 | ```
499 | 
500 | #### `Memory Service Connection Failed`
501 | **Cause**: MCP Memory Service not running or unreachable
502 | **Solution**:
503 | ```bash
504 | # Check memory service status
505 | curl -k https://localhost:8443/api/health
506 | 
507 | # Start memory service
508 | uv run memory server
509 | 
510 | # Check configuration
511 | node memory-mode-controller.js config get memoryService.endpoint
512 | ```
513 | 
514 | #### `Permission Denied`
515 | **Cause**: Incorrect file permissions
516 | **Solution**:
517 | ```bash
518 | # Fix permissions
519 | chmod +x ~/.claude/hooks/memory-mode-controller.js
520 | chmod 644 ~/.claude/hooks/config.json
521 | ```
522 | 
523 | ### Debug Mode
524 | 
525 | Enable verbose debugging:
526 | 
527 | ```bash
528 | export CLAUDE_HOOKS_DEBUG=true
529 | node memory-mode-controller.js status
530 | ```
531 | 
532 | **Debug Output Example:**
533 | ```
534 | [DEBUG] Loading configuration from ~/.claude/hooks/config.json
535 | [DEBUG] Configuration loaded successfully
536 | [DEBUG] Initializing TieredConversationMonitor
537 | [DEBUG] PerformanceManager initialized with profile: balanced
538 | [DEBUG] GitAnalyzer detecting repository context
539 | [DEBUG] MCP Client connecting to https://localhost:8443
540 | [DEBUG] Status command executed successfully
541 | ```
542 | 
543 | ## Integration Examples
544 | 
545 | ### Shell Aliases
546 | 
547 | Add to your `.bashrc` or `.zshrc`:
548 | 
549 | ```bash
550 | # Quick aliases for common operations
551 | alias nmt-status='node ~/.claude/hooks/memory-mode-controller.js status'
552 | alias nmt-balanced='node ~/.claude/hooks/memory-mode-controller.js profile balanced'
553 | alias nmt-speed='node ~/.claude/hooks/memory-mode-controller.js profile speed_focused'
554 | alias nmt-memory='node ~/.claude/hooks/memory-mode-controller.js profile memory_aware'
555 | alias nmt-metrics='node ~/.claude/hooks/memory-mode-controller.js metrics'
556 | ```
557 | 
558 | ### VS Code Integration
559 | 
560 | Create VS Code tasks (`.vscode/tasks.json`):
561 | 
562 | ```json
563 | {
564 |   "version": "2.0.0",
565 |   "tasks": [
566 |     {
567 |       "label": "NMT: Check Status",
568 |       "type": "shell",
569 |       "command": "node ~/.claude/hooks/memory-mode-controller.js status",
570 |       "group": "build",
571 |       "presentation": {
572 |         "echo": true,
573 |         "reveal": "always",
574 |         "focus": false,
575 |         "panel": "shared"
576 |       }
577 |     },
578 |     {
579 |       "label": "NMT: Switch to Memory Aware",
580 |       "type": "shell",
581 |       "command": "node ~/.claude/hooks/memory-mode-controller.js profile memory_aware",
582 |       "group": "build"
583 |     }
584 |   ]
585 | }
586 | ```
587 | 
588 | ### Automated Performance Monitoring
589 | 
590 | Monitor system performance with cron job:
591 | 
592 | ```bash
593 | # Add to crontab (crontab -e)
594 | # Check metrics every hour and log to file
595 | 0 * * * * node ~/.claude/hooks/memory-mode-controller.js metrics --json >> ~/nmt-metrics.log 2>&1
596 | ```
597 | 
598 | ---
599 | 
600 | The CLI controller provides complete control over Natural Memory Triggers v7.1.3, enabling real-time optimization of your intelligent memory awareness system! 🚀
```

--------------------------------------------------------------------------------
/src/mcp_memory_service/storage/base.py:
--------------------------------------------------------------------------------

```python
  1 | # Copyright 2024 Heinrich Krupp
  2 | #
  3 | # Licensed under the Apache License, Version 2.0 (the "License");
  4 | # you may not use this file except in compliance with the License.
  5 | # You may obtain a copy of the License at
  6 | #
  7 | #     http://www.apache.org/licenses/LICENSE-2.0
  8 | #
  9 | # Unless required by applicable law or agreed to in writing, software
 10 | # distributed under the License is distributed on an "AS IS" BASIS,
 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 12 | # See the License for the specific language governing permissions and
 13 | # limitations under the License.
 14 | 
 15 | """
 16 | MCP Memory Service
 17 | Copyright (c) 2024 Heinrich Krupp
 18 | Licensed under the MIT License. See LICENSE file in the project root for full license text.
 19 | """
 20 | import asyncio
 21 | from abc import ABC, abstractmethod
 22 | from typing import List, Optional, Dict, Any, Tuple
 23 | from datetime import datetime, timezone, timedelta
 24 | from ..models.memory import Memory, MemoryQueryResult
 25 | 
 26 | class MemoryStorage(ABC):
 27 |     """Abstract base class for memory storage implementations."""
 28 | 
 29 |     @property
 30 |     @abstractmethod
 31 |     def max_content_length(self) -> Optional[int]:
 32 |         """
 33 |         Maximum content length supported by this storage backend.
 34 | 
 35 |         Returns:
 36 |             Maximum number of characters allowed in memory content, or None for unlimited.
 37 |             This limit is based on the underlying embedding model's token limits.
 38 |         """
 39 |         pass
 40 | 
 41 |     @property
 42 |     @abstractmethod
 43 |     def supports_chunking(self) -> bool:
 44 |         """
 45 |         Whether this backend supports automatic content chunking.
 46 | 
 47 |         Returns:
 48 |             True if the backend can store chunked memories with linking metadata.
 49 |         """
 50 |         pass
 51 | 
 52 |     @abstractmethod
 53 |     async def initialize(self) -> None:
 54 |         """Initialize the storage backend."""
 55 |         pass
 56 |     
 57 |     @abstractmethod
 58 |     async def store(self, memory: Memory) -> Tuple[bool, str]:
 59 |         """Store a memory. Returns (success, message)."""
 60 |         pass
 61 | 
 62 |     async def store_batch(self, memories: List[Memory]) -> List[Tuple[bool, str]]:
 63 |         """
 64 |         Store multiple memories in a single operation.
 65 | 
 66 |         Default implementation calls store() for each memory concurrently using asyncio.gather.
 67 |         Override this method in concrete storage backends to provide true batch operations
 68 |         for improved performance (e.g., single database transaction, bulk network request).
 69 | 
 70 |         Args:
 71 |             memories: List of Memory objects to store
 72 | 
 73 |         Returns:
 74 |             A list of (success, message) tuples, one for each memory in the batch.
 75 |         """
 76 |         if not memories:
 77 |             return []
 78 | 
 79 |         results = await asyncio.gather(
 80 |             *(self.store(memory) for memory in memories),
 81 |             return_exceptions=True
 82 |         )
 83 | 
 84 |         # Process results to handle potential exceptions from gather
 85 |         final_results = []
 86 |         for res in results:
 87 |             if isinstance(res, Exception):
 88 |                 # If a store operation failed with an exception, record it as a failure
 89 |                 final_results.append((False, f"Failed to store memory: {res}"))
 90 |             else:
 91 |                 final_results.append(res)
 92 |         return final_results
 93 |     
 94 |     @abstractmethod
 95 |     async def retrieve(self, query: str, n_results: int = 5) -> List[MemoryQueryResult]:
 96 |         """Retrieve memories by semantic search."""
 97 |         pass
 98 |     
 99 |     @abstractmethod
100 |     async def search_by_tag(self, tags: List[str], time_start: Optional[float] = None) -> List[Memory]:
101 |         """Search memories by tags with optional time filtering.
102 | 
103 |         Args:
104 |             tags: List of tags to search for
105 |             time_start: Optional Unix timestamp (in seconds) to filter memories created after this time
106 | 
107 |         Returns:
108 |             List of Memory objects matching the tag criteria and time filter
109 |         """
110 |         pass
111 | 
112 |     @abstractmethod
113 |     async def search_by_tags(
114 |         self,
115 |         tags: List[str],
116 |         operation: str = "AND",
117 |         time_start: Optional[float] = None,
118 |         time_end: Optional[float] = None
119 |     ) -> List[Memory]:
120 |         """Search memories by tags with AND/OR semantics and time range filtering.
121 | 
122 |         Args:
123 |             tags: List of tag names to search for
124 |             operation: "AND" (all tags must match) or "OR" (any tag matches)
125 |             time_start: Optional Unix timestamp for inclusive range start
126 |             time_end: Optional Unix timestamp for inclusive range end
127 | 
128 |         Returns:
129 |             List of Memory objects matching the criteria
130 |         """
131 |         pass
132 | 
133 |     async def search_by_tag_chronological(self, tags: List[str], limit: int = None, offset: int = 0) -> List[Memory]:
134 |         """
135 |         Search memories by tags with chronological ordering (newest first).
136 | 
137 |         Args:
138 |             tags: List of tags to search for
139 |             limit: Maximum number of memories to return (None for all)
140 |             offset: Number of memories to skip (for pagination)
141 | 
142 |         Returns:
143 |             List of Memory objects ordered by created_at DESC
144 |         """
145 |         # Default implementation: use search_by_tag then sort
146 |         memories = await self.search_by_tag(tags)
147 |         memories.sort(key=lambda m: m.created_at or 0, reverse=True)
148 | 
149 |         # Apply pagination
150 |         if offset > 0:
151 |             memories = memories[offset:]
152 |         if limit is not None:
153 |             memories = memories[:limit]
154 | 
155 |         return memories
156 |     
157 |     @abstractmethod
158 |     async def delete(self, content_hash: str) -> Tuple[bool, str]:
159 |         """Delete a memory by its hash."""
160 |         pass
161 | 
162 |     @abstractmethod
163 |     async def get_by_hash(self, content_hash: str) -> Optional[Memory]:
164 |         """
165 |         Get a memory by its content hash using direct O(1) lookup.
166 | 
167 |         Args:
168 |             content_hash: The content hash of the memory to retrieve
169 | 
170 |         Returns:
171 |             Memory object if found, None otherwise
172 |         """
173 |         pass
174 | 
175 |     @abstractmethod
176 |     async def delete_by_tag(self, tag: str) -> Tuple[int, str]:
177 |         """Delete memories by tag. Returns (count_deleted, message)."""
178 |         pass
179 | 
180 |     async def delete_by_tags(self, tags: List[str]) -> Tuple[int, str]:
181 |         """
182 |         Delete memories matching ANY of the given tags.
183 | 
184 |         Default implementation calls delete_by_tag for each tag sequentially.
185 |         Override in concrete implementations for better performance (e.g., single query with OR).
186 | 
187 |         Args:
188 |             tags: List of tags - memories matching ANY tag will be deleted
189 | 
190 |         Returns:
191 |             Tuple of (total_count_deleted, message)
192 |         """
193 |         if not tags:
194 |             return 0, "No tags provided"
195 | 
196 |         total_count = 0
197 |         errors = []
198 | 
199 |         for tag in tags:
200 |             try:
201 |                 count, message = await self.delete_by_tag(tag)
202 |                 total_count += count
203 |                 if "error" in message.lower() or "failed" in message.lower():
204 |                     errors.append(f"{tag}: {message}")
205 |             except Exception as e:
206 |                 errors.append(f"{tag}: {str(e)}")
207 | 
208 |         if errors:
209 |             error_summary = "; ".join(errors[:3])  # Limit error details
210 |             if len(errors) > 3:
211 |                 error_summary += f" (+{len(errors) - 3} more errors)"
212 |             return total_count, f"Deleted {total_count} memories with partial failures: {error_summary}"
213 | 
214 |         return total_count, f"Deleted {total_count} memories across {len(tags)} tag(s)"
215 | 
216 |     @abstractmethod
217 |     async def cleanup_duplicates(self) -> Tuple[int, str]:
218 |         """Remove duplicate memories. Returns (count_removed, message)."""
219 |         pass
220 |     
221 |     @abstractmethod
222 |     async def update_memory_metadata(self, content_hash: str, updates: Dict[str, Any], preserve_timestamps: bool = True) -> Tuple[bool, str]:
223 |         """
224 |         Update memory metadata without recreating the entire memory entry.
225 | 
226 |         Args:
227 |             content_hash: Hash of the memory to update
228 |             updates: Dictionary of metadata fields to update
229 |             preserve_timestamps: Whether to preserve original created_at timestamp
230 | 
231 |         Returns:
232 |             Tuple of (success, message)
233 | 
234 |         Note:
235 |             - Only metadata, tags, and memory_type can be updated
236 |             - Content and content_hash cannot be modified
237 |             - updated_at timestamp is always refreshed
238 |             - created_at is preserved unless preserve_timestamps=False
239 |         """
240 |         pass
241 | 
242 |     async def update_memory(self, memory: Memory) -> bool:
243 |         """
244 |         Update an existing memory with new metadata, tags, and memory_type.
245 | 
246 |         Args:
247 |             memory: Memory object with updated fields
248 | 
249 |         Returns:
250 |             True if update was successful, False otherwise
251 |         """
252 |         updates = {
253 |             'tags': memory.tags,
254 |             'metadata': memory.metadata,
255 |             'memory_type': memory.memory_type
256 |         }
257 |         success, _ = await self.update_memory_metadata(
258 |             memory.content_hash,
259 |             updates,
260 |             preserve_timestamps=True
261 |         )
262 |         return success
263 | 
264 |     async def update_memories_batch(self, memories: List[Memory]) -> List[bool]:
265 |         """
266 |         Update multiple memories in a batch operation.
267 | 
268 |         Default implementation calls update_memory() for each memory concurrently using asyncio.gather.
269 |         Override this method in concrete storage backends to provide true batch operations
270 |         for improved performance (e.g., single database transaction with multiple UPDATEs).
271 | 
272 |         Args:
273 |             memories: List of Memory objects with updated fields
274 | 
275 |         Returns:
276 |             List of success booleans, one for each memory in the batch
277 |         """
278 |         if not memories:
279 |             return []
280 | 
281 |         results = await asyncio.gather(
282 |             *(self.update_memory(memory) for memory in memories),
283 |             return_exceptions=True
284 |         )
285 | 
286 |         # Process results to handle potential exceptions from gather
287 |         final_results = []
288 |         for res in results:
289 |             if isinstance(res, Exception):
290 |                 final_results.append(False)
291 |             else:
292 |                 final_results.append(res)
293 |         return final_results
294 |     
295 |     async def get_stats(self) -> Dict[str, Any]:
296 |         """Get storage statistics. Override for specific implementations."""
297 |         return {
298 |             "total_memories": 0,
299 |             "storage_backend": self.__class__.__name__,
300 |             "status": "operational"
301 |         }
302 |     
303 |     async def get_all_tags(self) -> List[str]:
304 |         """Get all unique tags in the storage. Override for specific implementations."""
305 |         return []
306 |     
307 |     async def get_recent_memories(self, n: int = 10) -> List[Memory]:
308 |         """Get n most recent memories. Override for specific implementations."""
309 |         return []
310 |     
311 |     async def recall_memory(self, query: str, n_results: int = 5) -> List[Memory]:
312 |         """Recall memories based on natural language time expression. Override for specific implementations."""
313 |         # Default implementation just uses regular search
314 |         results = await self.retrieve(query, n_results)
315 |         return [r.memory for r in results]
316 |     
317 |     async def search(self, query: str, n_results: int = 5) -> List[MemoryQueryResult]:
318 |         """Search memories. Default implementation uses retrieve."""
319 |         return await self.retrieve(query, n_results)
320 |     
321 |     async def get_all_memories(self, limit: int = None, offset: int = 0, memory_type: Optional[str] = None, tags: Optional[List[str]] = None) -> List[Memory]:
322 |         """
323 |         Get all memories in storage ordered by creation time (newest first).
324 | 
325 |         Args:
326 |             limit: Maximum number of memories to return (None for all)
327 |             offset: Number of memories to skip (for pagination)
328 |             memory_type: Optional filter by memory type
329 |             tags: Optional filter by tags (matches ANY of the provided tags)
330 | 
331 |         Returns:
332 |             List of Memory objects ordered by created_at DESC, optionally filtered by type and tags
333 |         """
334 |         return []
335 |     
336 |     async def count_all_memories(self, memory_type: Optional[str] = None, tags: Optional[List[str]] = None) -> int:
337 |         """
338 |         Get total count of memories in storage.
339 | 
340 |         Args:
341 |             memory_type: Optional filter by memory type
342 |             tags: Optional filter by tags (memories matching ANY of the tags)
343 | 
344 |         Returns:
345 |             Total number of memories, optionally filtered by type and/or tags
346 |         """
347 |         return 0
348 | 
349 |     async def count_memories_by_tag(self, tags: List[str]) -> int:
350 |         """
351 |         Count memories that match any of the given tags.
352 | 
353 |         Args:
354 |             tags: List of tags to search for
355 | 
356 |         Returns:
357 |             Number of memories matching any tag
358 |         """
359 |         # Default implementation: search then count
360 |         memories = await self.search_by_tag(tags)
361 |         return len(memories)
362 | 
363 |     async def get_memories_by_time_range(self, start_time: float, end_time: float) -> List[Memory]:
364 |         """Get memories within a time range. Override for specific implementations."""
365 |         return []
366 |     
367 |     async def get_memory_connections(self) -> Dict[str, int]:
368 |         """Get memory connection statistics. Override for specific implementations."""
369 |         return {}
370 | 
371 |     async def get_access_patterns(self) -> Dict[str, datetime]:
372 |         """Get memory access pattern statistics. Override for specific implementations."""
373 |         return {}
374 | 
375 |     async def get_memory_timestamps(self, days: Optional[int] = None) -> List[float]:
376 |         """
377 |         Get memory creation timestamps only, without loading full memory objects.
378 | 
379 |         This is an optimized method for analytics that only needs timestamps,
380 |         avoiding the overhead of loading full memory content and embeddings.
381 | 
382 |         Args:
383 |             days: Optional filter to only get memories from last N days
384 | 
385 |         Returns:
386 |             List of Unix timestamps (float) in descending order (newest first)
387 |         """
388 |         # Default implementation falls back to get_recent_memories
389 |         # Concrete backends should override with optimized SQL queries
390 |         n = 5000 if days is None else days * 100  # Rough estimate
391 |         memories = await self.get_recent_memories(n=n)
392 |         timestamps = [m.created_at for m in memories if m.created_at]
393 | 
394 |         # Filter by days if specified
395 |         if days is not None:
396 |             cutoff = datetime.now(timezone.utc) - timedelta(days=days)
397 |             cutoff_timestamp = cutoff.timestamp()
398 |             timestamps = [ts for ts in timestamps if ts >= cutoff_timestamp]
399 | 
400 |         return sorted(timestamps, reverse=True)
401 | 
```

--------------------------------------------------------------------------------
/docs/architecture.md:
--------------------------------------------------------------------------------

```markdown
  1 | # MCP Memory Service Architecture
  2 | 
  3 | ## Overview
  4 | 
  5 | MCP Memory Service is a Model Context Protocol server that provides semantic memory and persistent storage capabilities for AI assistants. It enables long-term memory storage with semantic search, time-based recall, and tag-based organization across conversations.
  6 | 
  7 | ## System Architecture
  8 | 
  9 | ```mermaid
 10 | graph TB
 11 |     subgraph "Client Layer"
 12 |         CC[Claude Desktop]
 13 |         LMS[LM Studio]
 14 |         VSC[VS Code MCP]
 15 |         GEN[Generic MCP Client]
 16 |     end
 17 | 
 18 |     subgraph "Protocol Layer"
 19 |         MCP[MCP Server Protocol]
 20 |         HTTP[HTTP API Server]
 21 |         WEB[Web Dashboard]
 22 |     end
 23 | 
 24 |     subgraph "Core Services"
 25 |         SRV[Memory Service Core]
 26 |         AUTH[Authentication]
 27 |         CACHE[Model Cache]
 28 |         EMB[Embedding Service]
 29 |     end
 30 | 
 31 |     subgraph "Storage Abstraction"
 32 |         ABS[Storage Interface]
 33 |         HYBRID[Hybrid Backend ⭐]
 34 |         CLOUDFLARE[Cloudflare Backend]
 35 |         SQLITE[SQLite-vec Backend]
 36 |         REMOTE[HTTP Client Backend]
 37 |         CHROMA[ChromaDB ⚠️ DEPRECATED]
 38 |     end
 39 | 
 40 |     subgraph "Infrastructure"
 41 |         DB[(Vector Database)]
 42 |         FS[(File System)]
 43 |         MDNS[mDNS Discovery]
 44 |     end
 45 | 
 46 |     CC --> MCP
 47 |     LMS --> MCP
 48 |     VSC --> MCP
 49 |     GEN --> MCP
 50 |     
 51 |     MCP --> SRV
 52 |     HTTP --> SRV
 53 |     WEB --> HTTP
 54 |     
 55 |     SRV --> AUTH
 56 |     SRV --> CACHE
 57 |     SRV --> EMB
 58 |     SRV --> ABS
 59 |     
 60 |     ABS --> HYBRID
 61 |     ABS --> CLOUDFLARE
 62 |     ABS --> SQLITE
 63 |     ABS --> REMOTE
 64 |     ABS --> CHROMA
 65 | 
 66 |     HYBRID --> SQLITE
 67 |     HYBRID --> CLOUDFLARE
 68 |     CLOUDFLARE --> DB
 69 |     SQLITE --> DB
 70 |     REMOTE --> HTTP
 71 |     CHROMA --> DB
 72 |     
 73 |     DB --> FS
 74 |     SRV --> MDNS
 75 | ```
 76 | 
 77 | ## Core Components
 78 | 
 79 | ### 1. Server Layer (`src/mcp_memory_service/server.py`)
 80 | 
 81 | The main server implementation that handles MCP protocol communication:
 82 | 
 83 | - **Protocol Handler**: Implements the MCP protocol specification
 84 | - **Request Router**: Routes incoming requests to appropriate handlers
 85 | - **Response Builder**: Constructs protocol-compliant responses
 86 | - **Client Detection**: Identifies and adapts to different MCP clients (Claude Desktop, LM Studio, etc.)
 87 | - **Logging System**: Client-aware logging with JSON compliance for Claude Desktop
 88 | 
 89 | Key responsibilities:
 90 | - Async request handling with proper error boundaries
 91 | - Global model and embedding cache management
 92 | - Lazy initialization of storage backends
 93 | - Tool registration and invocation
 94 | 
 95 | ### 2. Storage Abstraction Layer (`src/mcp_memory_service/storage/`)
 96 | 
 97 | Abstract interface that allows multiple storage backend implementations:
 98 | 
 99 | #### Base Interface (`storage/base.py`)
100 | ```python
101 | class MemoryStorage(ABC):
102 |     async def initialize(self) -> None:
103 |         """Initialize the storage backend."""
104 |         pass
105 | 
106 |     async def store(self, memory: Memory) -> Tuple[bool, str]:
107 |         """Store a memory object."""
108 |         pass
109 | 
110 |     async def retrieve(self, query: str, n_results: int) -> List[MemoryQueryResult]:
111 |         """Retrieve memories based on semantic similarity."""
112 |         pass
113 | 
114 |     async def search_by_tag(self, tags: List[str]) -> List[Memory]:
115 |         """Search memories by tags."""
116 |         pass
117 | 
118 |     async def delete(self, content_hash: str) -> Tuple[bool, str]:
119 |         """Delete a memory by content hash."""
120 |         pass
121 | 
122 |     async def recall_memory(self, query: str, n_results: int) -> List[Memory]:
123 |         """Recall memories using natural language time queries."""
124 |         pass
125 | ```
126 | 
127 | #### Hybrid Backend (`storage/hybrid.py`) ⭐ **RECOMMENDED**
128 | - **Production default** - Best performance with cloud synchronization
129 | - **Primary storage**: SQLite-vec for ultra-fast local reads (~5ms)
130 | - **Secondary storage**: Cloudflare for multi-device persistence and cloud backup
131 | - **Background sync**: Zero user-facing latency with async operation queue
132 | - **Graceful degradation**: Works offline, automatically syncs when cloud available
133 | - **Capacity monitoring**: Tracks Cloudflare limits and provides warnings
134 | - **Use cases**: Production deployments, multi-device users, cloud-backed local performance
135 | 
136 | #### Cloudflare Backend (`storage/cloudflare.py`)
137 | - Cloud-native storage using Cloudflare D1 (SQL) + Vectorize (vectors)
138 | - Global edge distribution for low-latency access worldwide
139 | - Serverless architecture with no infrastructure management
140 | - Automatic scaling and high availability
141 | - **Limits**: 10GB D1 database, 5M vectors in Vectorize
142 | - **Use cases**: Cloud-only deployments, serverless environments, no local storage
143 | 
144 | #### SQLite-vec Backend (`storage/sqlite_vec.py`)
145 | - Lightweight, fast local storage (5ms read latency)
146 | - Native SQLite with vec0 extension for vector similarity
147 | - ONNX Runtime embeddings (no PyTorch dependency)
148 | - Minimal memory footprint and dependencies
149 | - **Use cases**: Development, single-device deployments, or as primary in Hybrid backend
150 | 
151 | #### HTTP Client Backend (`storage/http_client.py`)
152 | - Remote storage via HTTP API for distributed architectures
153 | - Enables client-server deployments with centralized memory
154 | - Bearer token authentication with API key support
155 | - Automatic retry logic with exponential backoff
156 | - **Use cases**: Multi-client shared memory, remote MCP servers, load balancing
157 | 
158 | #### ChromaDB Backend (`storage/chroma.py`) ⚠️ **DEPRECATED**
159 | - **Status**: Deprecated since v5.x, removal planned for v6.0.0
160 | - **Migration path**: Switch to Hybrid backend for production
161 | - Original vector database backend with sentence transformer embeddings
162 | - Heavy dependencies (PyTorch, sentence-transformers, ~2GB download)
163 | - Slower performance (15ms vs 5ms for SQLite-vec)
164 | - Higher memory footprint and complexity
165 | - **Why deprecated**: Hybrid backend provides better performance with cloud sync
166 | - **Historical only**: Not recommended for new deployments
167 | 
168 | ### 3. Models Layer (`src/mcp_memory_service/models/`)
169 | 
170 | Data structures and validation:
171 | 
172 | ```python
173 | @dataclass
174 | class Memory:
175 |     id: str
176 |     content: str
177 |     content_hash: str
178 |     memory_type: str
179 |     tags: List[str]
180 |     metadata: MemoryMetadata
181 |     created_at: datetime
182 |     updated_at: datetime
183 | 
184 | @dataclass
185 | class MemoryMetadata:
186 |     source: Optional[str]
187 |     client_id: Optional[str]
188 |     session_id: Optional[str]
189 |     parent_memory_id: Optional[str]
190 |     child_memory_ids: List[str]
191 | ```
192 | 
193 | ### 4. Web Interface (`src/mcp_memory_service/web/`)
194 | 
195 | Modern web dashboard for memory management:
196 | 
197 | - **Frontend**: Responsive React-based UI
198 | - **API Routes**: RESTful endpoints for memory operations
199 | - **WebSocket Support**: Real-time updates
200 | - **Authentication**: API key-based authentication
201 | - **Health Monitoring**: System status and metrics
202 | 
203 | ### 5. Configuration Management (`src/mcp_memory_service/config.py`)
204 | 
205 | Environment-based configuration with sensible defaults:
206 | 
207 | - Storage backend selection
208 | - Model selection and caching
209 | - Platform-specific optimizations
210 | - Hardware acceleration detection (CUDA, MPS, DirectML, ROCm)
211 | - Network configuration (HTTP, HTTPS, mDNS)
212 | 
213 | ## Key Design Patterns
214 | 
215 | ### Async/Await Pattern
216 | All I/O operations use Python's async/await for non-blocking execution:
217 | ```python
218 | async def store_memory(self, content: str) -> Memory:
219 |     embedding = await self._generate_embedding(content)
220 |     memory = await self.storage.store(content, embedding)
221 |     return memory
222 | ```
223 | 
224 | ### Lazy Initialization
225 | Resources are initialized only when first needed:
226 | ```python
227 | async def _ensure_storage_initialized(self):
228 |     if self.storage is None:
229 |         self.storage = await create_storage_backend()
230 |     return self.storage
231 | ```
232 | 
233 | ### Global Caching Strategy
234 | Model and embedding caches are shared globally to reduce memory usage:
235 | ```python
236 | _MODEL_CACHE = {}
237 | _EMBEDDING_CACHE = LRUCache(maxsize=1000)
238 | ```
239 | 
240 | ### Platform Detection and Optimization
241 | Automatic detection and optimization for different platforms:
242 | - **macOS**: MPS acceleration for Apple Silicon
243 | - **Windows**: CUDA or DirectML
244 | - **Linux**: CUDA, ROCm, or CPU
245 | - **Fallback**: ONNX Runtime for compatibility
246 | 
247 | ## MCP Protocol Operations
248 | 
249 | ### Core Memory Operations
250 | 
251 | | Operation | Description | Parameters |
252 | |-----------|-------------|------------|
253 | | `store_memory` | Store new memory with tags | content, tags, metadata |
254 | | `retrieve_memory` | Semantic search | query, n_results |
255 | | `recall_memory` | Time-based retrieval | time_expression, n_results |
256 | | `search_by_tag` | Tag-based search | tags[] |
257 | | `delete_memory` | Delete by hash | content_hash |
258 | | `delete_by_tags` | Bulk deletion | tags[] |
259 | 
260 | ### Utility Operations
261 | 
262 | | Operation | Description | Parameters |
263 | |-----------|-------------|------------|
264 | | `check_database_health` | Health status | - |
265 | | `optimize_db` | Database optimization | - |
266 | | `export_memories` | Export to JSON | output_path |
267 | | `import_memories` | Import from JSON | input_path |
268 | | `get_memory_stats` | Usage statistics | - |
269 | 
270 | ### Debug Operations
271 | 
272 | | Operation | Description | Parameters |
273 | |-----------|-------------|------------|
274 | | `debug_retrieve` | Detailed similarity scores | query, n_results |
275 | | `exact_match_retrieve` | Exact content matching | query |
276 | 
277 | ## Data Flow
278 | 
279 | ### Memory Storage Flow
280 | ```
281 | 1. Client sends store_memory request
282 | 2. Server validates and enriches metadata
283 | 3. Content is hashed for deduplication
284 | 4. Text is embedded using sentence transformers
285 | 5. Memory is stored in vector database
286 | 6. Confirmation returned to client
287 | ```
288 | 
289 | ### Memory Retrieval Flow
290 | ```
291 | 1. Client sends retrieve_memory request
292 | 2. Query is embedded to vector representation
293 | 3. Vector similarity search performed
294 | 4. Results ranked by similarity score
295 | 5. Metadata enriched results returned
296 | ```
297 | 
298 | ### Time-Based Recall Flow
299 | ```
300 | 1. Client sends recall_memory with time expression
301 | 2. Time parser extracts temporal boundaries
302 | 3. Semantic query combined with time filter
303 | 4. Filtered results returned chronologically
304 | ```
305 | 
306 | ## Performance Optimizations
307 | 
308 | ### Model Caching
309 | - Sentence transformer models cached globally
310 | - Single model instance shared across requests
311 | - Lazy loading on first use
312 | 
313 | ### Embedding Cache
314 | - LRU cache for frequently used embeddings
315 | - Configurable cache size
316 | - Cache hit tracking for optimization
317 | 
318 | ### Query Optimization
319 | - Batch processing for multiple operations
320 | - Connection pooling for database access
321 | - Async I/O for non-blocking operations
322 | 
323 | ### Platform-Specific Optimizations
324 | - Hardware acceleration auto-detection
325 | - Optimized tensor operations per platform
326 | - Fallback strategies for compatibility
327 | 
328 | ## Security Considerations
329 | 
330 | ### Authentication
331 | - API key-based authentication for HTTP endpoints
332 | - Bearer token support
333 | - Per-client authentication in multi-client mode
334 | 
335 | ### Data Privacy
336 | - Content hashing for deduplication
337 | - Optional encryption at rest
338 | - Client isolation in shared deployments
339 | 
340 | ### Network Security
341 | - HTTPS support with SSL/TLS
342 | - CORS configuration for web access
343 | - Rate limiting for API endpoints
344 | 
345 | ## Deployment Architectures
346 | 
347 | ### Production (Hybrid Backend) ⭐ **RECOMMENDED**
348 | - **Local performance**: SQLite-vec for 5ms read latency
349 | - **Cloud persistence**: Cloudflare for multi-device sync and backup
350 | - **Background sync**: Zero user-facing latency, async operation queue
351 | - **Offline capability**: Full functionality without internet, syncs when available
352 | - **Multi-device**: Access same memories across desktop, laptop, mobile
353 | - **Use cases**: Individual users, teams with personal instances, production deployments
354 | - **Setup**: `install.py --storage-backend hybrid` or set `MCP_MEMORY_STORAGE_BACKEND=hybrid`
355 | 
356 | ### Cloud-Only (Cloudflare Backend)
357 | - **Serverless deployment**: No local storage, pure cloud architecture
358 | - **Global edge**: Cloudflare's worldwide network for low latency
359 | - **Automatic scaling**: Handles traffic spikes without configuration
360 | - **Use cases**: Serverless environments, ephemeral containers, CI/CD systems
361 | - **Limits**: 10GB D1 database, 5M vectors in Vectorize
362 | - **Setup**: `install.py --storage-backend cloudflare` or set `MCP_MEMORY_STORAGE_BACKEND=cloudflare`
363 | 
364 | ### Development (SQLite-vec Backend)
365 | - **Lightweight**: Minimal dependencies, fast startup
366 | - **Local-only**: No cloud connectivity required
367 | - **Fast iteration**: 5ms read latency, no sync overhead
368 | - **Use cases**: Development, testing, single-device prototypes
369 | - **Setup**: `install.py --storage-backend sqlite_vec` or set `MCP_MEMORY_STORAGE_BACKEND=sqlite_vec`
370 | 
371 | ### Multi-Client Shared (HTTP Server)
372 | - **Centralized HTTP server** with shared memory pool
373 | - **Multiple clients** connect via API (Claude Desktop, VS Code, custom apps)
374 | - **Authentication**: API key-based access control
375 | - **Use cases**: Team collaboration, shared organizational memory
376 | - **Setup**: Enable HTTP server with `MCP_HTTP_ENABLED=true`, clients use HTTP Client backend
377 | 
378 | ### Legacy (ChromaDB Backend) ⚠️ **NOT RECOMMENDED**
379 | - **Deprecated**: Removal planned for v6.0.0
380 | - **Migration required**: Switch to Hybrid backend
381 | - Heavy dependencies, slower performance (15ms vs 5ms)
382 | - Only for existing deployments with migration path to Hybrid
383 | 
384 | ## Extension Points
385 | 
386 | ### Custom Storage Backends
387 | Implement the `MemoryStorage` abstract base class:
388 | ```python
389 | class CustomStorage(MemoryStorage):
390 |     async def store(self, memory: Memory) -> Tuple[bool, str]:
391 |         # Custom implementation
392 | ```
393 | 
394 | ### Custom Embedding Models
395 | Replace the default sentence transformer:
396 | ```python
397 | EMBEDDING_MODEL = "your-model/name"
398 | ```
399 | 
400 | ### Protocol Extensions
401 | Add new operations via tool registration:
402 | ```python
403 | types.Tool(
404 |     name="custom_operation",
405 |     description="Custom memory operation",
406 |     inputSchema={
407 |         "type": "object",
408 |         "properties": {
409 |             "param1": {
410 |                 "type": "string",
411 |                 "description": "First parameter"
412 |             },
413 |             "param2": {
414 |                 "type": "integer",
415 |                 "description": "Second parameter",
416 |                 "default": 0
417 |             }
418 |         },
419 |         "required": ["param1"],
420 |         "additionalProperties": false
421 |     }
422 | )
423 | ```
424 | 
425 | ## Future Enhancements
426 | 
427 | ### Planned Features (See Issue #91)
428 | - **WFGY Semantic Firewall** - Enhanced memory reliability with 16 failure mode detection/recovery
429 | - **Ontology Foundation Layer** (Phase 0) - Controlled vocabulary, taxonomy, knowledge graph
430 | - Automatic memory consolidation
431 | - Semantic clustering
432 | - Memory importance scoring
433 | - Cross-conversation threading
434 | 
435 | ### Under Consideration
436 | - **Agentic RAG** for intelligent retrieval (see Discussion #86)
437 | - **Graph-based memory relationships** (ontology pipeline integration)
438 | - Memory compression strategies
439 | - Federated learning from memories
440 | - Real-time collaboration features
441 | - Advanced visualization tools
442 | 
443 | ## References
444 | 
445 | - [MCP Protocol Specification](https://modelcontextprotocol.io/docs)
446 | - [ChromaDB Documentation](https://docs.trychroma.com/)
447 | - [SQLite Vec Extension](https://github.com/asg017/sqlite-vec)
448 | - [Sentence Transformers](https://www.sbert.net/)
```

--------------------------------------------------------------------------------
/scripts/maintenance/find_duplicates.py:
--------------------------------------------------------------------------------

```python
  1 | #!/usr/bin/env python3
  2 | """
  3 | Find and remove duplicate memories from the database.
  4 | Duplicates can occur when:
  5 | 1. Same content was ingested multiple times
  6 | 2. Re-ingestion after encoding fixes created duplicates
  7 | 3. Manual storage of similar content
  8 | """
  9 | 
 10 | import sqlite3
 11 | import json
 12 | import sys
 13 | import hashlib
 14 | import urllib.request
 15 | import urllib.parse
 16 | import ssl
 17 | from pathlib import Path
 18 | from collections import defaultdict
 19 | from datetime import datetime
 20 | 
 21 | def load_config():
 22 |     """Load configuration from Claude hooks config file."""
 23 |     config_path = Path.home() / '.claude' / 'hooks' / 'config.json'
 24 |     if config_path.exists():
 25 |         with open(config_path) as f:
 26 |             return json.load(f)
 27 |     return None
 28 | 
 29 | def get_memories_from_api(endpoint, api_key):
 30 |     """Retrieve all memories from the API endpoint using pagination."""
 31 |     try:
 32 |         # Create SSL context that allows self-signed certificates
 33 |         ssl_context = ssl.create_default_context()
 34 |         ssl_context.check_hostname = False
 35 |         ssl_context.verify_mode = ssl.CERT_NONE
 36 |         
 37 |         all_memories = []
 38 |         page = 1
 39 |         page_size = 100  # Use reasonable page size
 40 |         
 41 |         while True:
 42 |             # Create request for current page
 43 |             url = f"{endpoint}/api/memories?page={page}&page_size={page_size}"
 44 |             req = urllib.request.Request(url)
 45 |             req.add_header('Authorization', f'Bearer {api_key}')
 46 |             
 47 |             # Make request
 48 |             with urllib.request.urlopen(req, context=ssl_context, timeout=30) as response:
 49 |                 if response.status != 200:
 50 |                     print(f"❌ API request failed: {response.status}")
 51 |                     return []
 52 |                 
 53 |                 data = response.read().decode('utf-8')
 54 |                 api_response = json.loads(data)
 55 |             
 56 |             # Extract memories from this page
 57 |             page_memories = api_response.get('memories', [])
 58 |             total = api_response.get('total', 0)
 59 |             has_more = api_response.get('has_more', False)
 60 |             
 61 |             all_memories.extend(page_memories)
 62 |             print(f"Retrieved page {page}: {len(page_memories)} memories (total so far: {len(all_memories)}/{total})")
 63 |             
 64 |             if not has_more:
 65 |                 break
 66 |                 
 67 |             page += 1
 68 |         
 69 |         print(f"✅ Retrieved all {len(all_memories)} memories from API")
 70 |         
 71 |         # Convert API format to internal format
 72 |         converted_memories = []
 73 |         for mem in all_memories:
 74 |             converted_memories.append((
 75 |                 mem.get('content_hash', ''),
 76 |                 mem.get('content', ''),
 77 |                 json.dumps(mem.get('tags', [])),
 78 |                 mem.get('created_at', ''),
 79 |                 json.dumps(mem.get('metadata', {}))
 80 |             ))
 81 |         
 82 |         return converted_memories
 83 |         
 84 |     except Exception as e:
 85 |         print(f"❌ Error retrieving memories from API: {e}")
 86 |         return []
 87 | 
 88 | def content_similarity_hash(content):
 89 |     """Create a hash for content similarity detection."""
 90 |     # Normalize content for comparison
 91 |     normalized = content.strip().lower()
 92 |     # Remove extra whitespace
 93 |     normalized = ' '.join(normalized.split())
 94 |     return hashlib.sha256(normalized.encode('utf-8')).hexdigest()[:16]
 95 | 
 96 | def find_duplicates(memories_source, similarity_threshold=0.95):
 97 |     """
 98 |     Find duplicate memories from either database or API.
 99 |     
100 |     Args:
101 |         memories_source: Either a database path (str) or list of memories from API
102 |         similarity_threshold: Threshold for considering memories duplicates (0.0-1.0)
103 |     
104 |     Returns:
105 |         Dict of duplicate groups
106 |     """
107 |     if isinstance(memories_source, str):
108 |         # Database path provided
109 |         conn = sqlite3.connect(memories_source)
110 |         cursor = conn.cursor()
111 |         
112 |         print("Scanning for duplicate memories...")
113 |         
114 |         # Get all memories
115 |         cursor.execute("""
116 |             SELECT content_hash, content, tags, created_at, metadata
117 |             FROM memories 
118 |             ORDER BY created_at DESC
119 |         """)
120 |         
121 |         all_memories = cursor.fetchall()
122 |         conn.close()
123 |     else:
124 |         # API memories provided
125 |         print("Analyzing memories from API...")
126 |         all_memories = memories_source
127 |     
128 |     print(f"Found {len(all_memories)} total memories")
129 |     
130 |     # Group by content similarity
131 |     content_groups = defaultdict(list)
132 |     exact_content_groups = defaultdict(list)
133 |     
134 |     for memory in all_memories:
135 |         content_hash, content, tags_json, created_at, metadata_json = memory
136 |         
137 |         # Parse tags and metadata
138 |         try:
139 |             tags = json.loads(tags_json) if tags_json else []
140 |         except:
141 |             tags = []
142 |             
143 |         try:
144 |             metadata = json.loads(metadata_json) if metadata_json else {}
145 |         except:
146 |             metadata = {}
147 |         
148 |         # Exact content match
149 |         exact_hash = hashlib.sha256(content.encode('utf-8')).hexdigest()
150 |         exact_content_groups[exact_hash].append({
151 |             'hash': content_hash,
152 |             'content': content,
153 |             'tags': tags,
154 |             'created_at': created_at,
155 |             'metadata': metadata,
156 |             'content_length': len(content)
157 |         })
158 |         
159 |         # Similar content match (normalized)
160 |         similarity_hash = content_similarity_hash(content)
161 |         content_groups[similarity_hash].append({
162 |             'hash': content_hash,
163 |             'content': content,
164 |             'tags': tags,
165 |             'created_at': created_at,
166 |             'metadata': metadata,
167 |             'content_length': len(content)
168 |         })
169 |     
170 |     # Find actual duplicates (groups with > 1 memory)
171 |     exact_duplicates = {k: v for k, v in exact_content_groups.items() if len(v) > 1}
172 |     similar_duplicates = {k: v for k, v in content_groups.items() if len(v) > 1}
173 |     
174 |     return {
175 |         'exact': exact_duplicates,
176 |         'similar': similar_duplicates,
177 |         'total_memories': len(all_memories)
178 |     }
179 | 
180 | def analyze_duplicate_group(group):
181 |     """Analyze a group of duplicate memories to determine which to keep."""
182 |     if len(group) <= 1:
183 |         return None
184 |         
185 |     # Sort by creation date (newest first)
186 |     sorted_group = sorted(group, key=lambda x: x['created_at'], reverse=True)
187 |     
188 |     analysis = {
189 |         'group_size': len(group),
190 |         'recommended_keep': None,
191 |         'recommended_delete': [],
192 |         'reasons': []
193 |     }
194 |     
195 |     # Prefer memories with utf8-fixed tag (these are the corrected versions)
196 |     utf8_fixed = [m for m in sorted_group if 'utf8-fixed' in m['tags']]
197 |     if utf8_fixed:
198 |         analysis['recommended_keep'] = utf8_fixed[0]
199 |         analysis['recommended_delete'] = [m for m in sorted_group if m != utf8_fixed[0]]
200 |         analysis['reasons'].append('Keeping UTF8-fixed version')
201 |         return analysis
202 |     
203 |     # Prefer newer memories
204 |     analysis['recommended_keep'] = sorted_group[0]  # Newest
205 |     analysis['recommended_delete'] = sorted_group[1:]  # Older ones
206 |     analysis['reasons'].append('Keeping newest version')
207 |     
208 |     # Check for different tag sets
209 |     keep_tags = set(analysis['recommended_keep']['tags'])
210 |     for delete_mem in analysis['recommended_delete']:
211 |         delete_tags = set(delete_mem['tags'])
212 |         if delete_tags != keep_tags:
213 |             analysis['reasons'].append(f'Tag differences: {delete_tags - keep_tags}')
214 |     
215 |     return analysis
216 | 
217 | def remove_duplicates(db_path, duplicate_groups, dry_run=True):
218 |     """
219 |     Remove duplicate memories from the database.
220 |     
221 |     Args:
222 |         db_path: Path to the SQLite database
223 |         duplicate_groups: Dict of duplicate groups from find_duplicates()
224 |         dry_run: If True, only show what would be deleted
225 |     """
226 |     conn = sqlite3.connect(db_path)
227 |     cursor = conn.cursor()
228 |     
229 |     total_to_delete = 0
230 |     deletion_plan = []
231 |     
232 |     print(f"\n{'DRY RUN - ' if dry_run else ''}Analyzing duplicate groups...")
233 |     
234 |     # Process exact duplicates first
235 |     print(f"\n=== EXACT DUPLICATES ===")
236 |     for content_hash, group in duplicate_groups['exact'].items():
237 |         analysis = analyze_duplicate_group(group)
238 |         if analysis:
239 |             total_to_delete += len(analysis['recommended_delete'])
240 |             deletion_plan.extend(analysis['recommended_delete'])
241 |             
242 |             print(f"\nDuplicate group: {len(group)} memories")
243 |             print(f"  Keep: {analysis['recommended_keep']['hash'][:20]}... ({analysis['recommended_keep']['created_at']})")
244 |             print(f"  Tags: {', '.join(analysis['recommended_keep']['tags'][:3])}")
245 |             print(f"  Delete: {len(analysis['recommended_delete'])} older versions")
246 |             for reason in analysis['reasons']:
247 |                 print(f"  Reason: {reason}")
248 |     
249 |     # Process similar duplicates (but not exact)
250 |     print(f"\n=== SIMILAR DUPLICATES ===")
251 |     processed_exact_hashes = set()
252 |     for group in duplicate_groups['exact'].values():
253 |         for mem in group:
254 |             processed_exact_hashes.add(mem['hash'])
255 |     
256 |     for similarity_hash, group in duplicate_groups['similar'].items():
257 |         # Skip if these are exact duplicates we already processed
258 |         group_hashes = {mem['hash'] for mem in group}
259 |         if group_hashes.issubset(processed_exact_hashes):
260 |             continue
261 |             
262 |         analysis = analyze_duplicate_group(group)
263 |         if analysis:
264 |             print(f"\nSimilar group: {len(group)} memories")
265 |             print(f"  Keep: {analysis['recommended_keep']['hash'][:20]}... ({analysis['recommended_keep']['created_at']})")
266 |             print(f"  Content preview: {analysis['recommended_keep']['content'][:100]}...")
267 |             print(f"  Would delete: {len(analysis['recommended_delete'])} similar versions")
268 |             # Don't auto-delete similar (only exact) in this version
269 |     
270 |     print(f"\n{'DRY RUN SUMMARY' if dry_run else 'DELETION SUMMARY'}:")
271 |     print(f"Total exact duplicates to delete: {total_to_delete}")
272 |     print(f"Current total memories: {duplicate_groups['total_memories']}")
273 |     print(f"After cleanup: {duplicate_groups['total_memories'] - total_to_delete}")
274 |     
275 |     if not dry_run and total_to_delete > 0:
276 |         print(f"\n{'='*50}")
277 |         print("DELETING DUPLICATE MEMORIES...")
278 |         
279 |         deleted_count = 0
280 |         for mem_to_delete in deletion_plan:
281 |             try:
282 |                 # Delete from memories table
283 |                 cursor.execute("DELETE FROM memories WHERE content_hash = ?", (mem_to_delete['hash'],))
284 |                 
285 |                 # Also try to delete from embeddings if it exists
286 |                 try:
287 |                     cursor.execute("DELETE FROM memory_embeddings WHERE rowid = ?", (mem_to_delete['hash'],))
288 |                 except:
289 |                     pass  # Embeddings table might use different structure
290 |                     
291 |                 deleted_count += 1
292 |                 if deleted_count % 10 == 0:
293 |                     print(f"  Deleted {deleted_count}/{total_to_delete}...")
294 |                     
295 |             except Exception as e:
296 |                 print(f"  Error deleting {mem_to_delete['hash'][:20]}: {e}")
297 |         
298 |         conn.commit()
299 |         print(f"✅ Successfully deleted {deleted_count} duplicate memories")
300 |         
301 |         # Verify final count
302 |         cursor.execute("SELECT COUNT(*) FROM memories")
303 |         final_count = cursor.fetchone()[0]
304 |         print(f"📊 Final memory count: {final_count}")
305 |     
306 |     elif dry_run and total_to_delete > 0:
307 |         print(f"\nTo actually delete these {total_to_delete} duplicates, run with --execute flag")
308 |     
309 |     conn.close()
310 |     return total_to_delete
311 | 
312 | def main():
313 |     """Main entry point."""
314 |     import argparse
315 |     
316 |     parser = argparse.ArgumentParser(description='Find and remove duplicate memories')
317 |     parser.add_argument('--db-path', type=str,
318 |                         help='Path to SQLite database (if not using API)')
319 |     parser.add_argument('--use-api', action='store_true',
320 |                         help='Use API endpoint from config instead of database')
321 |     parser.add_argument('--execute', action='store_true',
322 |                         help='Actually delete the duplicates (default is dry run)')
323 |     parser.add_argument('--similarity-threshold', type=float, default=0.95,
324 |                         help='Similarity threshold for duplicate detection (0.0-1.0)')
325 |     
326 |     args = parser.parse_args()
327 |     
328 |     # Try to load config first
329 |     config = load_config()
330 |     
331 |     if args.use_api or (not args.db_path and config):
332 |         if not config:
333 |             print("❌ No configuration found. Use --db-path for local database or ensure config exists.")
334 |             sys.exit(1)
335 |         
336 |         endpoint = config.get('memoryService', {}).get('endpoint')
337 |         api_key = config.get('memoryService', {}).get('apiKey')
338 |         
339 |         if not endpoint or not api_key:
340 |             print("❌ API endpoint or key not found in configuration")
341 |             sys.exit(1)
342 |         
343 |         print(f"🌐 Using API endpoint: {endpoint}")
344 |         
345 |         # Get memories from API
346 |         memories = get_memories_from_api(endpoint, api_key)
347 |         if not memories:
348 |             print("❌ Failed to retrieve memories from API")
349 |             sys.exit(1)
350 |         
351 |         # Find duplicates
352 |         duplicates = find_duplicates(memories, args.similarity_threshold)
353 |         
354 |         if not duplicates['exact'] and not duplicates['similar']:
355 |             print("✅ No duplicates found!")
356 |             return
357 |         
358 |         print(f"\nFound:")
359 |         print(f"  - {len(duplicates['exact'])} exact duplicate groups")
360 |         print(f"  - {len(duplicates['similar'])} similar content groups")
361 |         
362 |         if args.execute:
363 |             print("⚠️  API-based deletion not yet implemented. Use database path for deletion.")
364 |         else:
365 |             # Show analysis only
366 |             remove_duplicates(None, duplicates, dry_run=True)
367 |             
368 |     else:
369 |         # Use database path
370 |         db_path = args.db_path or '/home/hkr/.local/share/mcp-memory/sqlite_vec.db'
371 |         
372 |         if not Path(db_path).exists():
373 |             print(f"❌ Database not found: {db_path}")
374 |             print("💡 Try --use-api to use the API endpoint from config instead")
375 |             sys.exit(1)
376 |         
377 |         # Find duplicates
378 |         duplicates = find_duplicates(db_path, args.similarity_threshold)
379 |         
380 |         if not duplicates['exact'] and not duplicates['similar']:
381 |             print("✅ No duplicates found!")
382 |             return
383 |         
384 |         print(f"\nFound:")
385 |         print(f"  - {len(duplicates['exact'])} exact duplicate groups")
386 |         print(f"  - {len(duplicates['similar'])} similar content groups")
387 |         
388 |         # Remove duplicates
389 |         remove_duplicates(db_path, duplicates, dry_run=not args.execute)
390 | 
391 | if __name__ == "__main__":
392 |     main()
```

--------------------------------------------------------------------------------
/src/mcp_memory_service/utils/system_detection.py:
--------------------------------------------------------------------------------

```python
  1 | # Copyright 2024 Heinrich Krupp
  2 | #
  3 | # Licensed under the Apache License, Version 2.0 (the "License");
  4 | # you may not use this file except in compliance with the License.
  5 | # You may obtain a copy of the License at
  6 | #
  7 | #     http://www.apache.org/licenses/LICENSE-2.0
  8 | #
  9 | # Unless required by applicable law or agreed to in writing, software
 10 | # distributed under the License is distributed on an "AS IS" BASIS,
 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 12 | # See the License for the specific language governing permissions and
 13 | # limitations under the License.
 14 | 
 15 | """
 16 | System detection utilities for hardware compatibility.
 17 | Provides functions to detect hardware architecture, available accelerators,
 18 | and determine optimal configurations for different environments.
 19 | """
 20 | import os
 21 | import sys
 22 | import platform
 23 | import logging
 24 | import subprocess
 25 | from typing import Dict, Any, Tuple, Optional, List
 26 | import json
 27 | 
 28 | logger = logging.getLogger(__name__)
 29 | 
 30 | # Hardware acceleration types
 31 | class AcceleratorType:
 32 |     NONE = "none"
 33 |     CUDA = "cuda"
 34 |     MPS = "mps"  # Apple Metal Performance Shaders
 35 |     CPU = "cpu"
 36 |     DIRECTML = "directml"  # DirectML for Windows
 37 |     ROCm = "rocm"  # AMD ROCm
 38 | 
 39 | class Architecture:
 40 |     X86_64 = "x86_64"
 41 |     ARM64 = "arm64"
 42 |     UNKNOWN = "unknown"
 43 | 
 44 | class SystemInfo:
 45 |     """Class to store and provide system information."""
 46 |     
 47 |     def __init__(self):
 48 |         self.os_name = platform.system().lower()
 49 |         self.os_version = platform.version()
 50 |         self.architecture = self._detect_architecture()
 51 |         self.python_version = platform.python_version()
 52 |         self.cpu_count = os.cpu_count() or 1
 53 |         self.memory_gb = self._get_system_memory()
 54 |         self.accelerator = self._detect_accelerator()
 55 |         self.is_rosetta = self._detect_rosetta()
 56 |         self.is_virtual_env = sys.prefix != sys.base_prefix
 57 |         
 58 |     def _detect_architecture(self) -> str:
 59 |         """Detect the system architecture."""
 60 |         arch = platform.machine().lower()
 61 |         
 62 |         if arch in ("x86_64", "amd64", "x64"):
 63 |             return Architecture.X86_64
 64 |         elif arch in ("arm64", "aarch64"):
 65 |             return Architecture.ARM64
 66 |         else:
 67 |             return Architecture.UNKNOWN
 68 |             
 69 |     def _get_system_memory(self) -> float:
 70 |         """Get the total system memory in GB."""
 71 |         try:
 72 |             if self.os_name == "linux":
 73 |                 with open('/proc/meminfo', 'r') as f:
 74 |                     for line in f:
 75 |                         if line.startswith('MemTotal:'):
 76 |                             # Extract the memory value (in kB)
 77 |                             memory_kb = int(line.split()[1])
 78 |                             return round(memory_kb / (1024 * 1024), 2)  # Convert to GB
 79 |                             
 80 |             elif self.os_name == "darwin":  # macOS
 81 |                 output = subprocess.check_output(['sysctl', '-n', 'hw.memsize']).decode('utf-8').strip()
 82 |                 memory_bytes = int(output)
 83 |                 return round(memory_bytes / (1024**3), 2)  # Convert to GB
 84 |                 
 85 |             elif self.os_name == "windows":
 86 |                 import ctypes
 87 |                 kernel32 = ctypes.windll.kernel32
 88 |                 c_ulonglong = ctypes.c_ulonglong
 89 |                 class MEMORYSTATUSEX(ctypes.Structure):
 90 |                     _fields_ = [
 91 |                         ('dwLength', ctypes.c_ulong),
 92 |                         ('dwMemoryLoad', ctypes.c_ulong),
 93 |                         ('ullTotalPhys', c_ulonglong),
 94 |                         ('ullAvailPhys', c_ulonglong),
 95 |                         ('ullTotalPageFile', c_ulonglong),
 96 |                         ('ullAvailPageFile', c_ulonglong),
 97 |                         ('ullTotalVirtual', c_ulonglong),
 98 |                         ('ullAvailVirtual', c_ulonglong),
 99 |                         ('ullAvailExtendedVirtual', c_ulonglong),
100 |                     ]
101 |                     
102 |                 memoryStatus = MEMORYSTATUSEX()
103 |                 memoryStatus.dwLength = ctypes.sizeof(MEMORYSTATUSEX)
104 |                 kernel32.GlobalMemoryStatusEx(ctypes.byref(memoryStatus))
105 |                 return round(memoryStatus.ullTotalPhys / (1024**3), 2)  # Convert to GB
106 |                 
107 |         except Exception as e:
108 |             logger.warning(f"Failed to get system memory: {e}")
109 |             
110 |         # Default fallback
111 |         return 4.0  # Assume 4GB as a conservative default
112 |         
113 |     def _detect_accelerator(self) -> str:
114 |         """Detect available hardware acceleration."""
115 |         # Try to detect CUDA
116 |         if self._check_cuda_available():
117 |             return AcceleratorType.CUDA
118 |             
119 |         # Check for Apple MPS (Metal Performance Shaders)
120 |         if self.os_name == "darwin" and self.architecture == Architecture.ARM64:
121 |             if self._check_mps_available():
122 |                 return AcceleratorType.MPS
123 |                 
124 |         # Check for ROCm on Linux
125 |         if self.os_name == "linux" and self._check_rocm_available():
126 |             return AcceleratorType.ROCm
127 |             
128 |         # Check for DirectML on Windows
129 |         if self.os_name == "windows" and self._check_directml_available():
130 |             return AcceleratorType.DIRECTML
131 |             
132 |         # Default to CPU
133 |         return AcceleratorType.CPU
134 |         
135 |     def _check_cuda_available(self) -> bool:
136 |         """Check if CUDA is available."""
137 |         try:
138 |             # Try to import torch and check for CUDA
139 |             import torch
140 |             # Check if torch is properly installed with CUDA support
141 |             if hasattr(torch, 'cuda'):
142 |                 return torch.cuda.is_available()
143 |             else:
144 |                 logger.warning("PyTorch installed but appears broken (no cuda attribute)")
145 |                 return False
146 |         except (ImportError, AttributeError) as e:
147 |             logger.debug(f"CUDA check failed: {e}")
148 |             # If torch is not installed or broken, try to check for CUDA using environment
149 |             return 'CUDA_HOME' in os.environ or 'CUDA_PATH' in os.environ
150 |             
151 |     def _check_mps_available(self) -> bool:
152 |         """Check if Apple MPS is available."""
153 |         try:
154 |             import torch
155 |             if hasattr(torch, 'backends') and hasattr(torch.backends, 'mps'):
156 |                 return torch.backends.mps.is_available()
157 |             else:
158 |                 logger.warning("PyTorch installed but appears broken (no backends attribute)")
159 |                 return False
160 |         except (ImportError, AttributeError) as e:
161 |             logger.debug(f"MPS check failed: {e}")
162 |             # Check for Metal support using system profiler
163 |             try:
164 |                 output = subprocess.check_output(
165 |                     ['system_profiler', 'SPDisplaysDataType'], 
166 |                     stderr=subprocess.DEVNULL
167 |                 ).decode('utf-8')
168 |                 return 'Metal' in output
169 |             except (subprocess.SubprocessError, FileNotFoundError):
170 |                 return False
171 |                 
172 |     def _check_rocm_available(self) -> bool:
173 |         """Check if AMD ROCm is available."""
174 |         try:
175 |             # Check for ROCm environment
176 |             if 'ROCM_HOME' in os.environ or 'ROCM_PATH' in os.environ:
177 |                 return True
178 |                 
179 |             # Check if ROCm libraries are installed
180 |             try:
181 |                 output = subprocess.check_output(
182 |                     ['rocminfo'], 
183 |                     stderr=subprocess.DEVNULL
184 |                 ).decode('utf-8')
185 |                 return 'GPU Agent' in output
186 |             except (subprocess.SubprocessError, FileNotFoundError):
187 |                 return False
188 |                 
189 |         except Exception:
190 |             return False
191 |             
192 |     def _check_directml_available(self) -> bool:
193 |         """Check if DirectML is available on Windows."""
194 |         try:
195 |             # Check if DirectML package is installed
196 |             import pkg_resources
197 |             pkg_resources.get_distribution('torch-directml')
198 |             return True
199 |         except ImportError:
200 |             # pkg_resources not available
201 |             return False
202 |         except Exception:
203 |             # Any other error (including DistributionNotFound)
204 |             return False
205 |             
206 |     def _detect_rosetta(self) -> bool:
207 |         """Detect if running under Rosetta 2 on Apple Silicon."""
208 |         if self.os_name != "darwin" or self.architecture != Architecture.ARM64:
209 |             return False
210 |             
211 |         try:
212 |             # Check for Rosetta by examining the process
213 |             output = subprocess.check_output(
214 |                 ['sysctl', '-n', 'sysctl.proc_translated'], 
215 |                 stderr=subprocess.DEVNULL
216 |             ).decode('utf-8').strip()
217 |             return output == '1'
218 |         except (subprocess.SubprocessError, FileNotFoundError):
219 |             return False
220 |             
221 |     def get_optimal_batch_size(self) -> int:
222 |         """Determine optimal batch size based on hardware."""
223 |         # Start with a base batch size
224 |         if self.accelerator == AcceleratorType.CUDA:
225 |             # Scale based on available GPU memory (rough estimate)
226 |             try:
227 |                 import torch
228 |                 gpu_memory = torch.cuda.get_device_properties(0).total_memory / (1024**3)  # GB
229 |                 if gpu_memory > 10:
230 |                     return 32
231 |                 elif gpu_memory > 6:
232 |                     return 16
233 |                 else:
234 |                     return 8
235 |             except:
236 |                 return 8  # Default for CUDA
237 |         elif self.accelerator == AcceleratorType.MPS:
238 |             return 8  # Conservative for Apple Silicon
239 |         elif self.memory_gb > 16:
240 |             return 8  # Larger batch for systems with more RAM
241 |         elif self.memory_gb > 8:
242 |             return 4
243 |         else:
244 |             return 2  # Conservative for low-memory systems
245 |             
246 |     def get_optimal_model(self) -> str:
247 |         """Determine the optimal embedding model based on hardware capabilities."""
248 |         # Default model
249 |         default_model = 'all-MiniLM-L6-v2'
250 |         
251 |         # For very constrained environments, use an even smaller model
252 |         if self.memory_gb < 4:
253 |             return 'paraphrase-MiniLM-L3-v2'
254 |             
255 |         # For high-performance environments, consider a larger model
256 |         if (self.accelerator in [AcceleratorType.CUDA, AcceleratorType.MPS] and 
257 |                 self.memory_gb > 8):
258 |             return 'all-mpnet-base-v2'  # Better quality but more resource intensive
259 |             
260 |         return default_model
261 |         
262 |     def get_optimal_thread_count(self) -> int:
263 |         """Determine optimal thread count for parallel operations."""
264 |         # Use 75% of available cores, but at least 1
265 |         return max(1, int(self.cpu_count * 0.75))
266 |         
267 |     def to_dict(self) -> Dict[str, Any]:
268 |         """Convert system info to dictionary."""
269 |         return {
270 |             "os": self.os_name,
271 |             "os_version": self.os_version,
272 |             "architecture": self.architecture,
273 |             "python_version": self.python_version,
274 |             "cpu_count": self.cpu_count,
275 |             "memory_gb": self.memory_gb,
276 |             "accelerator": self.accelerator,
277 |             "is_rosetta": self.is_rosetta,
278 |             "is_virtual_env": self.is_virtual_env,
279 |             "optimal_batch_size": self.get_optimal_batch_size(),
280 |             "optimal_model": self.get_optimal_model(),
281 |             "optimal_thread_count": self.get_optimal_thread_count()
282 |         }
283 |         
284 |     def __str__(self) -> str:
285 |         """String representation of system info."""
286 |         return json.dumps(self.to_dict(), indent=2)
287 | 
288 | 
289 | def get_system_info() -> SystemInfo:
290 |     """Get system information singleton."""
291 |     if not hasattr(get_system_info, 'instance'):
292 |         get_system_info.instance = SystemInfo()
293 |     return get_system_info.instance
294 | 
295 | 
296 | def get_torch_device() -> str:
297 |     """Get the optimal PyTorch device based on system capabilities."""
298 |     system_info = get_system_info()
299 |     
300 |     try:
301 |         import torch
302 |         
303 |         if system_info.accelerator == AcceleratorType.CUDA and torch.cuda.is_available():
304 |             return "cuda"
305 |         elif (system_info.accelerator == AcceleratorType.MPS and 
306 |               hasattr(torch.backends, 'mps') and 
307 |               torch.backends.mps.is_available()):
308 |             return "mps"
309 |         else:
310 |             return "cpu"
311 |     except ImportError:
312 |         return "cpu"
313 | 
314 | 
315 | def get_optimal_embedding_settings() -> Dict[str, Any]:
316 |     """Get optimal settings for embedding operations."""
317 |     system_info = get_system_info()
318 |     
319 |     return {
320 |         "model_name": system_info.get_optimal_model(),
321 |         "batch_size": system_info.get_optimal_batch_size(),
322 |         "device": get_torch_device(),
323 |         "threads": system_info.get_optimal_thread_count()
324 |     }
325 | 
326 | 
327 | def print_system_diagnostics(client_type: str = 'lm_studio'):
328 |     """Print detailed system diagnostics for troubleshooting, conditionally based on client."""
329 |     # Only print for LM Studio to avoid JSON parsing errors in Claude Desktop
330 |     if client_type != 'lm_studio':
331 |         return
332 |         
333 |     system_info = get_system_info()
334 |     
335 |     print("\n=== System Diagnostics ===")
336 |     print(f"OS: {system_info.os_name} {system_info.os_version}")
337 |     print(f"Architecture: {system_info.architecture}")
338 |     print(f"Python: {system_info.python_version}")
339 |     print(f"CPU Cores: {system_info.cpu_count}")
340 |     print(f"Memory: {system_info.memory_gb:.2f} GB")
341 |     print(f"Accelerator: {system_info.accelerator}")
342 |     
343 |     if system_info.is_rosetta:
344 |         print("⚠️ Running under Rosetta 2 translation")
345 |         
346 |     print("\n=== Optimal Settings ===")
347 |     print(f"Embedding Model: {system_info.get_optimal_model()}")
348 |     print(f"Batch Size: {system_info.get_optimal_batch_size()}")
349 |     print(f"Thread Count: {system_info.get_optimal_thread_count()}")
350 |     print(f"PyTorch Device: {get_torch_device()}")
351 |     
352 |     # Additional PyTorch diagnostics if available
353 |     try:
354 |         import torch
355 |         print("\n=== PyTorch Diagnostics ===")
356 |         print(f"PyTorch Version: {torch.__version__}")
357 |         print(f"CUDA Available: {torch.cuda.is_available()}")
358 |         
359 |         if torch.cuda.is_available():
360 |             print(f"CUDA Version: {torch.version.cuda}")
361 |             print(f"GPU Device: {torch.cuda.get_device_name(0)}")
362 |             print(f"GPU Memory: {torch.cuda.get_device_properties(0).total_memory / (1024**3):.2f} GB")
363 |             
364 |         if hasattr(torch.backends, 'mps'):
365 |             print(f"MPS Available: {torch.backends.mps.is_available()}")
366 |             
367 |     except ImportError:
368 |         print("\nPyTorch not installed, skipping PyTorch diagnostics")
369 |         
370 |     print("\n=== Environment Variables ===")
371 |     for var in ['CUDA_HOME', 'CUDA_PATH', 'ROCM_HOME', 'PYTORCH_ENABLE_MPS_FALLBACK']:
372 |         if var in os.environ:
373 |             print(f"{var}: {os.environ[var]}")
```
Page 23/47FirstPrevNextLast