#
tokens: 49936/50000 9/625 files (page 26/47)
lines: on (toggle) GitHub
raw markdown copy reset
This is page 26 of 47. Use http://codebase.md/doobidoo/mcp-memory-service?lines=true&page={x} to view the full context.

# Directory Structure

```
├── .claude
│   ├── agents
│   │   ├── amp-bridge.md
│   │   ├── amp-pr-automator.md
│   │   ├── code-quality-guard.md
│   │   ├── gemini-pr-automator.md
│   │   └── github-release-manager.md
│   ├── settings.local.json.backup
│   └── settings.local.json.local
├── .commit-message
├── .dockerignore
├── .env.example
├── .env.sqlite.backup
├── .envnn#
├── .gitattributes
├── .github
│   ├── FUNDING.yml
│   ├── ISSUE_TEMPLATE
│   │   ├── bug_report.yml
│   │   ├── config.yml
│   │   ├── feature_request.yml
│   │   └── performance_issue.yml
│   ├── pull_request_template.md
│   └── workflows
│       ├── bridge-tests.yml
│       ├── CACHE_FIX.md
│       ├── claude-code-review.yml
│       ├── claude.yml
│       ├── cleanup-images.yml.disabled
│       ├── dev-setup-validation.yml
│       ├── docker-publish.yml
│       ├── LATEST_FIXES.md
│       ├── main-optimized.yml.disabled
│       ├── main.yml
│       ├── publish-and-test.yml
│       ├── README_OPTIMIZATION.md
│       ├── release-tag.yml.disabled
│       ├── release.yml
│       ├── roadmap-review-reminder.yml
│       ├── SECRET_CONDITIONAL_FIX.md
│       └── WORKFLOW_FIXES.md
├── .gitignore
├── .mcp.json.backup
├── .mcp.json.template
├── .pyscn
│   ├── .gitignore
│   └── reports
│       └── analyze_20251123_214224.html
├── AGENTS.md
├── archive
│   ├── deployment
│   │   ├── deploy_fastmcp_fixed.sh
│   │   ├── deploy_http_with_mcp.sh
│   │   └── deploy_mcp_v4.sh
│   ├── deployment-configs
│   │   ├── empty_config.yml
│   │   └── smithery.yaml
│   ├── development
│   │   └── test_fastmcp.py
│   ├── docs-removed-2025-08-23
│   │   ├── authentication.md
│   │   ├── claude_integration.md
│   │   ├── claude-code-compatibility.md
│   │   ├── claude-code-integration.md
│   │   ├── claude-code-quickstart.md
│   │   ├── claude-desktop-setup.md
│   │   ├── complete-setup-guide.md
│   │   ├── database-synchronization.md
│   │   ├── development
│   │   │   ├── autonomous-memory-consolidation.md
│   │   │   ├── CLEANUP_PLAN.md
│   │   │   ├── CLEANUP_README.md
│   │   │   ├── CLEANUP_SUMMARY.md
│   │   │   ├── dream-inspired-memory-consolidation.md
│   │   │   ├── hybrid-slm-memory-consolidation.md
│   │   │   ├── mcp-milestone.md
│   │   │   ├── multi-client-architecture.md
│   │   │   ├── test-results.md
│   │   │   └── TIMESTAMP_FIX_SUMMARY.md
│   │   ├── distributed-sync.md
│   │   ├── invocation_guide.md
│   │   ├── macos-intel.md
│   │   ├── master-guide.md
│   │   ├── mcp-client-configuration.md
│   │   ├── multi-client-server.md
│   │   ├── service-installation.md
│   │   ├── sessions
│   │   │   └── MCP_ENHANCEMENT_SESSION_MEMORY_v4.1.0.md
│   │   ├── UBUNTU_SETUP.md
│   │   ├── ubuntu.md
│   │   ├── windows-setup.md
│   │   └── windows.md
│   ├── docs-root-cleanup-2025-08-23
│   │   ├── AWESOME_LIST_SUBMISSION.md
│   │   ├── CLOUDFLARE_IMPLEMENTATION.md
│   │   ├── DOCUMENTATION_ANALYSIS.md
│   │   ├── DOCUMENTATION_CLEANUP_PLAN.md
│   │   ├── DOCUMENTATION_CONSOLIDATION_COMPLETE.md
│   │   ├── LITESTREAM_SETUP_GUIDE.md
│   │   ├── lm_studio_system_prompt.md
│   │   ├── PYTORCH_DOWNLOAD_FIX.md
│   │   └── README-ORIGINAL-BACKUP.md
│   ├── investigations
│   │   └── MACOS_HOOKS_INVESTIGATION.md
│   ├── litestream-configs-v6.3.0
│   │   ├── install_service.sh
│   │   ├── litestream_master_config_fixed.yml
│   │   ├── litestream_master_config.yml
│   │   ├── litestream_replica_config_fixed.yml
│   │   ├── litestream_replica_config.yml
│   │   ├── litestream_replica_simple.yml
│   │   ├── litestream-http.service
│   │   ├── litestream.service
│   │   └── requirements-cloudflare.txt
│   ├── release-notes
│   │   └── release-notes-v7.1.4.md
│   └── setup-development
│       ├── README.md
│       ├── setup_consolidation_mdns.sh
│       ├── STARTUP_SETUP_GUIDE.md
│       └── test_service.sh
├── CHANGELOG-HISTORIC.md
├── CHANGELOG.md
├── claude_commands
│   ├── memory-context.md
│   ├── memory-health.md
│   ├── memory-ingest-dir.md
│   ├── memory-ingest.md
│   ├── memory-recall.md
│   ├── memory-search.md
│   ├── memory-store.md
│   ├── README.md
│   └── session-start.md
├── claude-hooks
│   ├── config.json
│   ├── config.template.json
│   ├── CONFIGURATION.md
│   ├── core
│   │   ├── memory-retrieval.js
│   │   ├── mid-conversation.js
│   │   ├── session-end.js
│   │   ├── session-start.js
│   │   └── topic-change.js
│   ├── debug-pattern-test.js
│   ├── install_claude_hooks_windows.ps1
│   ├── install_hooks.py
│   ├── memory-mode-controller.js
│   ├── MIGRATION.md
│   ├── README-NATURAL-TRIGGERS.md
│   ├── README-phase2.md
│   ├── README.md
│   ├── simple-test.js
│   ├── statusline.sh
│   ├── test-adaptive-weights.js
│   ├── test-dual-protocol-hook.js
│   ├── test-mcp-hook.js
│   ├── test-natural-triggers.js
│   ├── test-recency-scoring.js
│   ├── tests
│   │   ├── integration-test.js
│   │   ├── phase2-integration-test.js
│   │   ├── test-code-execution.js
│   │   ├── test-cross-session.json
│   │   ├── test-session-tracking.json
│   │   └── test-threading.json
│   ├── utilities
│   │   ├── adaptive-pattern-detector.js
│   │   ├── context-formatter.js
│   │   ├── context-shift-detector.js
│   │   ├── conversation-analyzer.js
│   │   ├── dynamic-context-updater.js
│   │   ├── git-analyzer.js
│   │   ├── mcp-client.js
│   │   ├── memory-client.js
│   │   ├── memory-scorer.js
│   │   ├── performance-manager.js
│   │   ├── project-detector.js
│   │   ├── session-tracker.js
│   │   ├── tiered-conversation-monitor.js
│   │   └── version-checker.js
│   └── WINDOWS-SESSIONSTART-BUG.md
├── CLAUDE.md
├── CODE_OF_CONDUCT.md
├── CONTRIBUTING.md
├── Development-Sprint-November-2025.md
├── docs
│   ├── amp-cli-bridge.md
│   ├── api
│   │   ├── code-execution-interface.md
│   │   ├── memory-metadata-api.md
│   │   ├── PHASE1_IMPLEMENTATION_SUMMARY.md
│   │   ├── PHASE2_IMPLEMENTATION_SUMMARY.md
│   │   ├── PHASE2_REPORT.md
│   │   └── tag-standardization.md
│   ├── architecture
│   │   ├── search-enhancement-spec.md
│   │   └── search-examples.md
│   ├── architecture.md
│   ├── archive
│   │   └── obsolete-workflows
│   │       ├── load_memory_context.md
│   │       └── README.md
│   ├── assets
│   │   └── images
│   │       ├── dashboard-v3.3.0-preview.png
│   │       ├── memory-awareness-hooks-example.png
│   │       ├── project-infographic.svg
│   │       └── README.md
│   ├── CLAUDE_CODE_QUICK_REFERENCE.md
│   ├── cloudflare-setup.md
│   ├── deployment
│   │   ├── docker.md
│   │   ├── dual-service.md
│   │   ├── production-guide.md
│   │   └── systemd-service.md
│   ├── development
│   │   ├── ai-agent-instructions.md
│   │   ├── code-quality
│   │   │   ├── phase-2a-completion.md
│   │   │   ├── phase-2a-handle-get-prompt.md
│   │   │   ├── phase-2a-index.md
│   │   │   ├── phase-2a-install-package.md
│   │   │   └── phase-2b-session-summary.md
│   │   ├── code-quality-workflow.md
│   │   ├── dashboard-workflow.md
│   │   ├── issue-management.md
│   │   ├── pr-review-guide.md
│   │   ├── refactoring-notes.md
│   │   ├── release-checklist.md
│   │   └── todo-tracker.md
│   ├── docker-optimized-build.md
│   ├── document-ingestion.md
│   ├── DOCUMENTATION_AUDIT.md
│   ├── enhancement-roadmap-issue-14.md
│   ├── examples
│   │   ├── analysis-scripts.js
│   │   ├── maintenance-session-example.md
│   │   ├── memory-distribution-chart.jsx
│   │   └── tag-schema.json
│   ├── first-time-setup.md
│   ├── glama-deployment.md
│   ├── guides
│   │   ├── advanced-command-examples.md
│   │   ├── chromadb-migration.md
│   │   ├── commands-vs-mcp-server.md
│   │   ├── mcp-enhancements.md
│   │   ├── mdns-service-discovery.md
│   │   ├── memory-consolidation-guide.md
│   │   ├── migration.md
│   │   ├── scripts.md
│   │   └── STORAGE_BACKENDS.md
│   ├── HOOK_IMPROVEMENTS.md
│   ├── hooks
│   │   └── phase2-code-execution-migration.md
│   ├── http-server-management.md
│   ├── ide-compatability.md
│   ├── IMAGE_RETENTION_POLICY.md
│   ├── images
│   │   └── dashboard-placeholder.md
│   ├── implementation
│   │   ├── health_checks.md
│   │   └── performance.md
│   ├── IMPLEMENTATION_PLAN_HTTP_SSE.md
│   ├── integration
│   │   ├── homebrew.md
│   │   └── multi-client.md
│   ├── integrations
│   │   ├── gemini.md
│   │   ├── groq-bridge.md
│   │   ├── groq-integration-summary.md
│   │   └── groq-model-comparison.md
│   ├── integrations.md
│   ├── legacy
│   │   └── dual-protocol-hooks.md
│   ├── LM_STUDIO_COMPATIBILITY.md
│   ├── maintenance
│   │   └── memory-maintenance.md
│   ├── mastery
│   │   ├── api-reference.md
│   │   ├── architecture-overview.md
│   │   ├── configuration-guide.md
│   │   ├── local-setup-and-run.md
│   │   ├── testing-guide.md
│   │   └── troubleshooting.md
│   ├── migration
│   │   └── code-execution-api-quick-start.md
│   ├── natural-memory-triggers
│   │   ├── cli-reference.md
│   │   ├── installation-guide.md
│   │   └── performance-optimization.md
│   ├── oauth-setup.md
│   ├── pr-graphql-integration.md
│   ├── quick-setup-cloudflare-dual-environment.md
│   ├── README.md
│   ├── remote-configuration-wiki-section.md
│   ├── research
│   │   ├── code-execution-interface-implementation.md
│   │   └── code-execution-interface-summary.md
│   ├── ROADMAP.md
│   ├── sqlite-vec-backend.md
│   ├── statistics
│   │   ├── charts
│   │   │   ├── activity_patterns.png
│   │   │   ├── contributors.png
│   │   │   ├── growth_trajectory.png
│   │   │   ├── monthly_activity.png
│   │   │   └── october_sprint.png
│   │   ├── data
│   │   │   ├── activity_by_day.csv
│   │   │   ├── activity_by_hour.csv
│   │   │   ├── contributors.csv
│   │   │   └── monthly_activity.csv
│   │   ├── generate_charts.py
│   │   └── REPOSITORY_STATISTICS.md
│   ├── technical
│   │   ├── development.md
│   │   ├── memory-migration.md
│   │   ├── migration-log.md
│   │   ├── sqlite-vec-embedding-fixes.md
│   │   └── tag-storage.md
│   ├── testing
│   │   └── regression-tests.md
│   ├── testing-cloudflare-backend.md
│   ├── troubleshooting
│   │   ├── cloudflare-api-token-setup.md
│   │   ├── cloudflare-authentication.md
│   │   ├── general.md
│   │   ├── hooks-quick-reference.md
│   │   ├── pr162-schema-caching-issue.md
│   │   ├── session-end-hooks.md
│   │   └── sync-issues.md
│   └── tutorials
│       ├── advanced-techniques.md
│       ├── data-analysis.md
│       └── demo-session-walkthrough.md
├── examples
│   ├── claude_desktop_config_template.json
│   ├── claude_desktop_config_windows.json
│   ├── claude-desktop-http-config.json
│   ├── config
│   │   └── claude_desktop_config.json
│   ├── http-mcp-bridge.js
│   ├── memory_export_template.json
│   ├── README.md
│   ├── setup
│   │   └── setup_multi_client_complete.py
│   └── start_https_example.sh
├── install_service.py
├── install.py
├── LICENSE
├── NOTICE
├── pyproject.toml
├── pytest.ini
├── README.md
├── run_server.py
├── scripts
│   ├── .claude
│   │   └── settings.local.json
│   ├── archive
│   │   └── check_missing_timestamps.py
│   ├── backup
│   │   ├── backup_memories.py
│   │   ├── backup_sqlite_vec.sh
│   │   ├── export_distributable_memories.sh
│   │   └── restore_memories.py
│   ├── benchmarks
│   │   ├── benchmark_code_execution_api.py
│   │   ├── benchmark_hybrid_sync.py
│   │   └── benchmark_server_caching.py
│   ├── database
│   │   ├── analyze_sqlite_vec_db.py
│   │   ├── check_sqlite_vec_status.py
│   │   ├── db_health_check.py
│   │   └── simple_timestamp_check.py
│   ├── development
│   │   ├── debug_server_initialization.py
│   │   ├── find_orphaned_files.py
│   │   ├── fix_mdns.sh
│   │   ├── fix_sitecustomize.py
│   │   ├── remote_ingest.sh
│   │   ├── setup-git-merge-drivers.sh
│   │   ├── uv-lock-merge.sh
│   │   └── verify_hybrid_sync.py
│   ├── hooks
│   │   └── pre-commit
│   ├── installation
│   │   ├── install_linux_service.py
│   │   ├── install_macos_service.py
│   │   ├── install_uv.py
│   │   ├── install_windows_service.py
│   │   ├── install.py
│   │   ├── setup_backup_cron.sh
│   │   ├── setup_claude_mcp.sh
│   │   └── setup_cloudflare_resources.py
│   ├── linux
│   │   ├── service_status.sh
│   │   ├── start_service.sh
│   │   ├── stop_service.sh
│   │   ├── uninstall_service.sh
│   │   └── view_logs.sh
│   ├── maintenance
│   │   ├── assign_memory_types.py
│   │   ├── check_memory_types.py
│   │   ├── cleanup_corrupted_encoding.py
│   │   ├── cleanup_memories.py
│   │   ├── cleanup_organize.py
│   │   ├── consolidate_memory_types.py
│   │   ├── consolidation_mappings.json
│   │   ├── delete_orphaned_vectors_fixed.py
│   │   ├── fast_cleanup_duplicates_with_tracking.sh
│   │   ├── find_all_duplicates.py
│   │   ├── find_cloudflare_duplicates.py
│   │   ├── find_duplicates.py
│   │   ├── memory-types.md
│   │   ├── README.md
│   │   ├── recover_timestamps_from_cloudflare.py
│   │   ├── regenerate_embeddings.py
│   │   ├── repair_malformed_tags.py
│   │   ├── repair_memories.py
│   │   ├── repair_sqlite_vec_embeddings.py
│   │   ├── repair_zero_embeddings.py
│   │   ├── restore_from_json_export.py
│   │   └── scan_todos.sh
│   ├── migration
│   │   ├── cleanup_mcp_timestamps.py
│   │   ├── legacy
│   │   │   └── migrate_chroma_to_sqlite.py
│   │   ├── mcp-migration.py
│   │   ├── migrate_sqlite_vec_embeddings.py
│   │   ├── migrate_storage.py
│   │   ├── migrate_tags.py
│   │   ├── migrate_timestamps.py
│   │   ├── migrate_to_cloudflare.py
│   │   ├── migrate_to_sqlite_vec.py
│   │   ├── migrate_v5_enhanced.py
│   │   ├── TIMESTAMP_CLEANUP_README.md
│   │   └── verify_mcp_timestamps.py
│   ├── pr
│   │   ├── amp_collect_results.sh
│   │   ├── amp_detect_breaking_changes.sh
│   │   ├── amp_generate_tests.sh
│   │   ├── amp_pr_review.sh
│   │   ├── amp_quality_gate.sh
│   │   ├── amp_suggest_fixes.sh
│   │   ├── auto_review.sh
│   │   ├── detect_breaking_changes.sh
│   │   ├── generate_tests.sh
│   │   ├── lib
│   │   │   └── graphql_helpers.sh
│   │   ├── quality_gate.sh
│   │   ├── resolve_threads.sh
│   │   ├── run_pyscn_analysis.sh
│   │   ├── run_quality_checks.sh
│   │   ├── thread_status.sh
│   │   └── watch_reviews.sh
│   ├── quality
│   │   ├── fix_dead_code_install.sh
│   │   ├── phase1_dead_code_analysis.md
│   │   ├── phase2_complexity_analysis.md
│   │   ├── README_PHASE1.md
│   │   ├── README_PHASE2.md
│   │   ├── track_pyscn_metrics.sh
│   │   └── weekly_quality_review.sh
│   ├── README.md
│   ├── run
│   │   ├── run_mcp_memory.sh
│   │   ├── run-with-uv.sh
│   │   └── start_sqlite_vec.sh
│   ├── run_memory_server.py
│   ├── server
│   │   ├── check_http_server.py
│   │   ├── check_server_health.py
│   │   ├── memory_offline.py
│   │   ├── preload_models.py
│   │   ├── run_http_server.py
│   │   ├── run_memory_server.py
│   │   ├── start_http_server.bat
│   │   └── start_http_server.sh
│   ├── service
│   │   ├── deploy_dual_services.sh
│   │   ├── install_http_service.sh
│   │   ├── mcp-memory-http.service
│   │   ├── mcp-memory.service
│   │   ├── memory_service_manager.sh
│   │   ├── service_control.sh
│   │   ├── service_utils.py
│   │   └── update_service.sh
│   ├── sync
│   │   ├── check_drift.py
│   │   ├── claude_sync_commands.py
│   │   ├── export_memories.py
│   │   ├── import_memories.py
│   │   ├── litestream
│   │   │   ├── apply_local_changes.sh
│   │   │   ├── enhanced_memory_store.sh
│   │   │   ├── init_staging_db.sh
│   │   │   ├── io.litestream.replication.plist
│   │   │   ├── manual_sync.sh
│   │   │   ├── memory_sync.sh
│   │   │   ├── pull_remote_changes.sh
│   │   │   ├── push_to_remote.sh
│   │   │   ├── README.md
│   │   │   ├── resolve_conflicts.sh
│   │   │   ├── setup_local_litestream.sh
│   │   │   ├── setup_remote_litestream.sh
│   │   │   ├── staging_db_init.sql
│   │   │   ├── stash_local_changes.sh
│   │   │   ├── sync_from_remote_noconfig.sh
│   │   │   └── sync_from_remote.sh
│   │   ├── README.md
│   │   ├── safe_cloudflare_update.sh
│   │   ├── sync_memory_backends.py
│   │   └── sync_now.py
│   ├── testing
│   │   ├── run_complete_test.py
│   │   ├── run_memory_test.sh
│   │   ├── simple_test.py
│   │   ├── test_cleanup_logic.py
│   │   ├── test_cloudflare_backend.py
│   │   ├── test_docker_functionality.py
│   │   ├── test_installation.py
│   │   ├── test_mdns.py
│   │   ├── test_memory_api.py
│   │   ├── test_memory_simple.py
│   │   ├── test_migration.py
│   │   ├── test_search_api.py
│   │   ├── test_sqlite_vec_embeddings.py
│   │   ├── test_sse_events.py
│   │   ├── test-connection.py
│   │   └── test-hook.js
│   ├── utils
│   │   ├── claude_commands_utils.py
│   │   ├── generate_personalized_claude_md.sh
│   │   ├── groq
│   │   ├── groq_agent_bridge.py
│   │   ├── list-collections.py
│   │   ├── memory_wrapper_uv.py
│   │   ├── query_memories.py
│   │   ├── smithery_wrapper.py
│   │   ├── test_groq_bridge.sh
│   │   └── uv_wrapper.py
│   └── validation
│       ├── check_dev_setup.py
│       ├── check_documentation_links.py
│       ├── diagnose_backend_config.py
│       ├── validate_configuration_complete.py
│       ├── validate_memories.py
│       ├── validate_migration.py
│       ├── validate_timestamp_integrity.py
│       ├── verify_environment.py
│       ├── verify_pytorch_windows.py
│       └── verify_torch.py
├── SECURITY.md
├── selective_timestamp_recovery.py
├── SPONSORS.md
├── src
│   └── mcp_memory_service
│       ├── __init__.py
│       ├── api
│       │   ├── __init__.py
│       │   ├── client.py
│       │   ├── operations.py
│       │   ├── sync_wrapper.py
│       │   └── types.py
│       ├── backup
│       │   ├── __init__.py
│       │   └── scheduler.py
│       ├── cli
│       │   ├── __init__.py
│       │   ├── ingestion.py
│       │   ├── main.py
│       │   └── utils.py
│       ├── config.py
│       ├── consolidation
│       │   ├── __init__.py
│       │   ├── associations.py
│       │   ├── base.py
│       │   ├── clustering.py
│       │   ├── compression.py
│       │   ├── consolidator.py
│       │   ├── decay.py
│       │   ├── forgetting.py
│       │   ├── health.py
│       │   └── scheduler.py
│       ├── dependency_check.py
│       ├── discovery
│       │   ├── __init__.py
│       │   ├── client.py
│       │   └── mdns_service.py
│       ├── embeddings
│       │   ├── __init__.py
│       │   └── onnx_embeddings.py
│       ├── ingestion
│       │   ├── __init__.py
│       │   ├── base.py
│       │   ├── chunker.py
│       │   ├── csv_loader.py
│       │   ├── json_loader.py
│       │   ├── pdf_loader.py
│       │   ├── registry.py
│       │   ├── semtools_loader.py
│       │   └── text_loader.py
│       ├── lm_studio_compat.py
│       ├── mcp_server.py
│       ├── models
│       │   ├── __init__.py
│       │   └── memory.py
│       ├── server.py
│       ├── services
│       │   ├── __init__.py
│       │   └── memory_service.py
│       ├── storage
│       │   ├── __init__.py
│       │   ├── base.py
│       │   ├── cloudflare.py
│       │   ├── factory.py
│       │   ├── http_client.py
│       │   ├── hybrid.py
│       │   └── sqlite_vec.py
│       ├── sync
│       │   ├── __init__.py
│       │   ├── exporter.py
│       │   ├── importer.py
│       │   └── litestream_config.py
│       ├── utils
│       │   ├── __init__.py
│       │   ├── cache_manager.py
│       │   ├── content_splitter.py
│       │   ├── db_utils.py
│       │   ├── debug.py
│       │   ├── document_processing.py
│       │   ├── gpu_detection.py
│       │   ├── hashing.py
│       │   ├── http_server_manager.py
│       │   ├── port_detection.py
│       │   ├── system_detection.py
│       │   └── time_parser.py
│       └── web
│           ├── __init__.py
│           ├── api
│           │   ├── __init__.py
│           │   ├── analytics.py
│           │   ├── backup.py
│           │   ├── consolidation.py
│           │   ├── documents.py
│           │   ├── events.py
│           │   ├── health.py
│           │   ├── manage.py
│           │   ├── mcp.py
│           │   ├── memories.py
│           │   ├── search.py
│           │   └── sync.py
│           ├── app.py
│           ├── dependencies.py
│           ├── oauth
│           │   ├── __init__.py
│           │   ├── authorization.py
│           │   ├── discovery.py
│           │   ├── middleware.py
│           │   ├── models.py
│           │   ├── registration.py
│           │   └── storage.py
│           ├── sse.py
│           └── static
│               ├── app.js
│               ├── index.html
│               ├── README.md
│               ├── sse_test.html
│               └── style.css
├── start_http_debug.bat
├── start_http_server.sh
├── test_document.txt
├── test_version_checker.js
├── tests
│   ├── __init__.py
│   ├── api
│   │   ├── __init__.py
│   │   ├── test_compact_types.py
│   │   └── test_operations.py
│   ├── bridge
│   │   ├── mock_responses.js
│   │   ├── package-lock.json
│   │   ├── package.json
│   │   └── test_http_mcp_bridge.js
│   ├── conftest.py
│   ├── consolidation
│   │   ├── __init__.py
│   │   ├── conftest.py
│   │   ├── test_associations.py
│   │   ├── test_clustering.py
│   │   ├── test_compression.py
│   │   ├── test_consolidator.py
│   │   ├── test_decay.py
│   │   └── test_forgetting.py
│   ├── contracts
│   │   └── api-specification.yml
│   ├── integration
│   │   ├── package-lock.json
│   │   ├── package.json
│   │   ├── test_api_key_fallback.py
│   │   ├── test_api_memories_chronological.py
│   │   ├── test_api_tag_time_search.py
│   │   ├── test_api_with_memory_service.py
│   │   ├── test_bridge_integration.js
│   │   ├── test_cli_interfaces.py
│   │   ├── test_cloudflare_connection.py
│   │   ├── test_concurrent_clients.py
│   │   ├── test_data_serialization_consistency.py
│   │   ├── test_http_server_startup.py
│   │   ├── test_mcp_memory.py
│   │   ├── test_mdns_integration.py
│   │   ├── test_oauth_basic_auth.py
│   │   ├── test_oauth_flow.py
│   │   ├── test_server_handlers.py
│   │   └── test_store_memory.py
│   ├── performance
│   │   ├── test_background_sync.py
│   │   └── test_hybrid_live.py
│   ├── README.md
│   ├── smithery
│   │   └── test_smithery.py
│   ├── sqlite
│   │   └── simple_sqlite_vec_test.py
│   ├── test_client.py
│   ├── test_content_splitting.py
│   ├── test_database.py
│   ├── test_hybrid_cloudflare_limits.py
│   ├── test_hybrid_storage.py
│   ├── test_memory_ops.py
│   ├── test_semantic_search.py
│   ├── test_sqlite_vec_storage.py
│   ├── test_time_parser.py
│   ├── test_timestamp_preservation.py
│   ├── timestamp
│   │   ├── test_hook_vs_manual_storage.py
│   │   ├── test_issue99_final_validation.py
│   │   ├── test_search_retrieval_inconsistency.py
│   │   ├── test_timestamp_issue.py
│   │   └── test_timestamp_simple.py
│   └── unit
│       ├── conftest.py
│       ├── test_cloudflare_storage.py
│       ├── test_csv_loader.py
│       ├── test_fastapi_dependencies.py
│       ├── test_import.py
│       ├── test_json_loader.py
│       ├── test_mdns_simple.py
│       ├── test_mdns.py
│       ├── test_memory_service.py
│       ├── test_memory.py
│       ├── test_semtools_loader.py
│       ├── test_storage_interface_compatibility.py
│       └── test_tag_time_filtering.py
├── tools
│   ├── docker
│   │   ├── DEPRECATED.md
│   │   ├── docker-compose.http.yml
│   │   ├── docker-compose.pythonpath.yml
│   │   ├── docker-compose.standalone.yml
│   │   ├── docker-compose.uv.yml
│   │   ├── docker-compose.yml
│   │   ├── docker-entrypoint-persistent.sh
│   │   ├── docker-entrypoint-unified.sh
│   │   ├── docker-entrypoint.sh
│   │   ├── Dockerfile
│   │   ├── Dockerfile.glama
│   │   ├── Dockerfile.slim
│   │   ├── README.md
│   │   └── test-docker-modes.sh
│   └── README.md
└── uv.lock
```

# Files

--------------------------------------------------------------------------------
/docs/deployment/docker.md:
--------------------------------------------------------------------------------

```markdown
  1 | # Docker Deployment Guide
  2 | 
  3 | This comprehensive guide covers deploying MCP Memory Service using Docker, including various configurations for different use cases and environments.
  4 | 
  5 | ## Overview
  6 | 
  7 | MCP Memory Service provides Docker support with multiple deployment configurations:
  8 | 
  9 | - **Standard Mode**: For MCP clients (Claude Desktop, VS Code, etc.)
 10 | - **Standalone Mode**: For testing and development (prevents boot loops)
 11 | - **HTTP/SSE Mode**: For web services and multi-client access
 12 | - **Production Mode**: For scalable server deployments
 13 | 
 14 | ## Prerequisites
 15 | 
 16 | - **Docker** 20.10+ installed on your system
 17 | - **Docker Compose** 2.0+ (recommended for simplified deployment)
 18 | - Basic knowledge of Docker concepts
 19 | - Sufficient disk space for Docker images and container volumes
 20 | 
 21 | ## Quick Start
 22 | 
 23 | ### Using Docker Compose (Recommended)
 24 | 
 25 | ```bash
 26 | # Clone the repository
 27 | git clone https://github.com/doobidoo/mcp-memory-service.git
 28 | cd mcp-memory-service
 29 | 
 30 | # Start with standard configuration
 31 | docker-compose up -d
 32 | 
 33 | # View logs
 34 | docker-compose logs -f
 35 | ```
 36 | 
 37 | This will:
 38 | - Build a Docker image for the Memory Service
 39 | - Create persistent volumes for the database and backups
 40 | - Start the service configured for MCP clients
 41 | 
 42 | ## Docker Compose Configurations
 43 | 
 44 | ### 1. Standard Configuration (`docker-compose.yml`)
 45 | 
 46 | **Best for**: MCP clients like Claude Desktop, VS Code with MCP extension
 47 | 
 48 | ```yaml
 49 | version: '3.8'
 50 | services:
 51 |   mcp-memory-service:
 52 |     build: .
 53 |     stdin_open: true
 54 |     tty: true
 55 |     volumes:
 56 |       - ./data/chroma_db:/app/chroma_db
 57 |       - ./data/backups:/app/backups
 58 |     environment:
 59 |       - MCP_MEMORY_STORAGE_BACKEND=chromadb
 60 |     restart: unless-stopped
 61 | ```
 62 | 
 63 | ```bash
 64 | # Deploy standard configuration
 65 | docker-compose up -d
 66 | ```
 67 | 
 68 | ### 2. Standalone Configuration (`docker-compose.standalone.yml`)
 69 | 
 70 | **Best for**: Testing, development, and preventing boot loops when no MCP client is connected
 71 | 
 72 | ```yaml
 73 | version: '3.8'
 74 | services:
 75 |   mcp-memory-service:
 76 |     build: .
 77 |     stdin_open: true
 78 |     tty: true
 79 |     ports:
 80 |       - "8000:8000"
 81 |     volumes:
 82 |       - ./data/chroma_db:/app/chroma_db
 83 |       - ./data/backups:/app/backups
 84 |     environment:
 85 |       - MCP_STANDALONE_MODE=1
 86 |       - MCP_HTTP_HOST=0.0.0.0
 87 |       - MCP_HTTP_PORT=8000
 88 |     restart: unless-stopped
 89 | ```
 90 | 
 91 | ```bash
 92 | # Deploy standalone configuration
 93 | docker-compose -f docker-compose.standalone.yml up -d
 94 | 
 95 | # Test connectivity
 96 | curl http://localhost:8000/health
 97 | ```
 98 | 
 99 | ### 3. UV Configuration (`docker-compose.uv.yml`)
100 | 
101 | **Best for**: Enhanced dependency management with UV package manager
102 | 
103 | ```yaml
104 | version: '3.8'
105 | services:
106 |   mcp-memory-service:
107 |     build: .
108 |     stdin_open: true
109 |     tty: true
110 |     ports:
111 |       - "8000:8000"
112 |     volumes:
113 |       - ./data/chroma_db:/app/chroma_db
114 |       - ./data/backups:/app/backups
115 |     environment:
116 |       - UV_ACTIVE=1
117 |       - MCP_MEMORY_STORAGE_BACKEND=sqlite_vec
118 |     restart: unless-stopped
119 | ```
120 | 
121 | ### 4. Python Path Configuration (`docker-compose.pythonpath.yml`)
122 | 
123 | **Best for**: Custom Python path configurations and development mode
124 | 
125 | ```bash
126 | # Deploy with Python path configuration
127 | docker-compose -f docker-compose.pythonpath.yml up -d
128 | ```
129 | 
130 | ## Manual Docker Commands
131 | 
132 | ### Basic Docker Deployment
133 | 
134 | ```bash
135 | # Build the Docker image
136 | docker build -t mcp-memory-service .
137 | 
138 | # Create directories for persistent storage
139 | mkdir -p ./data/chroma_db ./data/backups
140 | 
141 | # Run in standard mode (for MCP clients)
142 | docker run -d --name memory-service \
143 |   -v $(pwd)/data/chroma_db:/app/chroma_db \
144 |   -v $(pwd)/data/backups:/app/backups \
145 |   -e MCP_MEMORY_STORAGE_BACKEND=chromadb \
146 |   --stdin --tty \
147 |   mcp-memory-service
148 | 
149 | # Run in standalone/HTTP mode
150 | docker run -d -p 8000:8000 --name memory-service \
151 |   -v $(pwd)/data/chroma_db:/app/chroma_db \
152 |   -v $(pwd)/data/backups:/app/backups \
153 |   -e MCP_STANDALONE_MODE=1 \
154 |   -e MCP_HTTP_HOST=0.0.0.0 \
155 |   -e MCP_HTTP_PORT=8000 \
156 |   --stdin --tty \
157 |   mcp-memory-service
158 | ```
159 | 
160 | ### Using Specific Docker Images
161 | 
162 | ```bash
163 | # Use pre-built Glama deployment image
164 | docker run -d -p 8000:8000 \
165 |   -v $(pwd)/data:/app/data \
166 |   -e MCP_API_KEY=your-api-key \
167 |   --name memory-service \
168 |   mcp-memory-service:glama
169 | 
170 | # Use SQLite-vec optimized image
171 | docker run -d -p 8000:8000 \
172 |   -v $(pwd)/data:/app/data \
173 |   -e MCP_MEMORY_STORAGE_BACKEND=sqlite_vec \
174 |   --name memory-service \
175 |   mcp-memory-service:sqlite-vec
176 | ```
177 | 
178 | ## Environment Configuration
179 | 
180 | ### Core Environment Variables
181 | 
182 | | Variable | Default | Description |
183 | |----------|---------|-------------|
184 | | `MCP_MEMORY_STORAGE_BACKEND` | `chromadb` | Storage backend (chromadb, sqlite_vec) |
185 | | `MCP_HTTP_HOST` | `0.0.0.0` | HTTP server bind address |
186 | | `MCP_HTTP_PORT` | `8000` | HTTP server port |
187 | | `MCP_STANDALONE_MODE` | `false` | Enable standalone HTTP mode |
188 | | `MCP_API_KEY` | `none` | API key for authentication |
189 | 
190 | ### Docker-Specific Variables
191 | 
192 | | Variable | Default | Description |
193 | |----------|---------|-------------|
194 | | `DOCKER_CONTAINER` | `auto-detect` | Indicates running in Docker |
195 | | `UV_ACTIVE` | `false` | Use UV package manager |
196 | | `PYTHONPATH` | `/app/src` | Python module search path |
197 | 
198 | ### Storage Configuration
199 | 
200 | ```bash
201 | # ChromaDB backend
202 | docker run -d \
203 |   -e MCP_MEMORY_STORAGE_BACKEND=chromadb \
204 |   -e MCP_MEMORY_CHROMA_PATH=/app/chroma_db \
205 |   -v $(pwd)/data/chroma_db:/app/chroma_db \
206 |   mcp-memory-service
207 | 
208 | # SQLite-vec backend (recommended for containers)
209 | docker run -d \
210 |   -e MCP_MEMORY_STORAGE_BACKEND=sqlite_vec \
211 |   -e MCP_MEMORY_SQLITE_PATH=/app/sqlite_data/memory.db \
212 |   -v $(pwd)/data/sqlite_data:/app/sqlite_data \
213 |   mcp-memory-service
214 | ```
215 | 
216 | ## Production Deployment
217 | 
218 | ### Docker Swarm Deployment
219 | 
220 | ```yaml
221 | # docker-stack.yml
222 | version: '3.8'
223 | services:
224 |   mcp-memory-service:
225 |     image: mcp-memory-service:latest
226 |     ports:
227 |       - "8000:8000"
228 |     environment:
229 |       - MCP_MEMORY_STORAGE_BACKEND=sqlite_vec
230 |       - MCP_HTTP_HOST=0.0.0.0
231 |       - MCP_API_KEY=REDACTED
232 |     volumes:
233 |       - memory_data:/app/data
234 |     secrets:
235 |       - api_key
236 |     deploy:
237 |       replicas: 3
238 |       restart_policy:
239 |         condition: on-failure
240 |         delay: 5s
241 |         max_attempts: 3
242 |       resources:
243 |         limits:
244 |           cpus: '1.0'
245 |           memory: 2G
246 |         reservations:
247 |           cpus: '0.5'
248 |           memory: 1G
249 | 
250 | volumes:
251 |   memory_data:
252 | 
253 | secrets:
254 |   api_key:
255 |     external: true
256 | ```
257 | 
258 | ```bash
259 | # Deploy to Docker Swarm
260 | docker stack deploy -c docker-stack.yml mcp-memory
261 | ```
262 | 
263 | ### Kubernetes Deployment
264 | 
265 | ```yaml
266 | # k8s-deployment.yml
267 | apiVersion: apps/v1
268 | kind: Deployment
269 | metadata:
270 |   name: mcp-memory-service
271 | spec:
272 |   replicas: 3
273 |   selector:
274 |     matchLabels:
275 |       app: mcp-memory-service
276 |   template:
277 |     metadata:
278 |       labels:
279 |         app: mcp-memory-service
280 |     spec:
281 |       containers:
282 |       - name: mcp-memory-service
283 |         image: mcp-memory-service:latest
284 |         ports:
285 |         - containerPort: 8000
286 |         env:
287 |         - name: MCP_MEMORY_STORAGE_BACKEND
288 |           value: "sqlite_vec"
289 |         - name: MCP_HTTP_HOST
290 |           value: "0.0.0.0"
291 |         - name: MCP_API_KEY
292 |           valueFrom:
293 |             secretKeyRef:
294 |               name: mcp-api-key
295 |               key: api-key
296 |         volumeMounts:
297 |         - name: data-volume
298 |           mountPath: /app/data
299 |         resources:
300 |           limits:
301 |             cpu: 1000m
302 |             memory: 2Gi
303 |           requests:
304 |             cpu: 500m
305 |             memory: 1Gi
306 |       volumes:
307 |       - name: data-volume
308 |         persistentVolumeClaim:
309 |           claimName: mcp-memory-pvc
310 | ---
311 | apiVersion: v1
312 | kind: Service
313 | metadata:
314 |   name: mcp-memory-service
315 | spec:
316 |   selector:
317 |     app: mcp-memory-service
318 |   ports:
319 |   - port: 80
320 |     targetPort: 8000
321 |   type: LoadBalancer
322 | ```
323 | 
324 | ## Volume Management
325 | 
326 | ### Data Persistence
327 | 
328 | ```bash
329 | # Create named volumes
330 | docker volume create mcp_memory_data
331 | docker volume create mcp_memory_backups
332 | 
333 | # Use named volumes
334 | docker run -d \
335 |   -v mcp_memory_data:/app/data \
336 |   -v mcp_memory_backups:/app/backups \
337 |   mcp-memory-service
338 | 
339 | # Backup volumes
340 | docker run --rm \
341 |   -v mcp_memory_data:/data \
342 |   -v $(pwd)/backup:/backup \
343 |   alpine tar czf /backup/mcp_memory_$(date +%Y%m%d).tar.gz /data
344 | ```
345 | 
346 | ### Database Migration
347 | 
348 | ```bash
349 | # Export data from running container
350 | docker exec memory-service python scripts/backup_memories.py
351 | 
352 | # Import data to new container
353 | docker cp ./backup.json new-memory-service:/app/
354 | docker exec new-memory-service python scripts/restore_memories.py /app/backup.json
355 | ```
356 | 
357 | ## Monitoring and Logging
358 | 
359 | ### Container Health Checks
360 | 
361 | ```yaml
362 | # Add to docker-compose.yml
363 | services:
364 |   mcp-memory-service:
365 |     build: .
366 |     healthcheck:
367 |       test: ["CMD", "curl", "-f", "http://localhost:8000/health"]
368 |       interval: 30s
369 |       timeout: 10s
370 |       retries: 3
371 |       start_period: 40s
372 | ```
373 | 
374 | ### Log Management
375 | 
376 | ```bash
377 | # View container logs
378 | docker-compose logs -f mcp-memory-service
379 | 
380 | # Configure log rotation
381 | docker-compose -f docker-compose.yml -f docker-compose.logging.yml up -d
382 | ```
383 | 
384 | ```yaml
385 | # docker-compose.logging.yml
386 | version: '3.8'
387 | services:
388 |   mcp-memory-service:
389 |     logging:
390 |       driver: "json-file"
391 |       options:
392 |         max-size: "10m"
393 |         max-file: "3"
394 | ```
395 | 
396 | ### Monitoring with Prometheus
397 | 
398 | ```yaml
399 | # docker-compose.monitoring.yml
400 | version: '3.8'
401 | services:
402 |   mcp-memory-service:
403 |     environment:
404 |       - MCP_MEMORY_ENABLE_METRICS=true
405 |       - MCP_MEMORY_METRICS_PORT=9090
406 |     ports:
407 |       - "9090:9090"
408 |   
409 |   prometheus:
410 |     image: prom/prometheus
411 |     ports:
412 |       - "9091:9090"
413 |     volumes:
414 |       - ./prometheus.yml:/etc/prometheus/prometheus.yml
415 | ```
416 | 
417 | ## Troubleshooting
418 | 
419 | ### Common Docker Issues
420 | 
421 | #### 1. Container Boot Loop
422 | 
423 | **Symptom**: Container exits immediately with code 0
424 | 
425 | **Solution**: Use standalone mode or ensure proper TTY configuration:
426 | 
427 | ```yaml
428 | services:
429 |   mcp-memory-service:
430 |     stdin_open: true
431 |     tty: true
432 |     environment:
433 |       - MCP_STANDALONE_MODE=1
434 | ```
435 | 
436 | #### 2. Permission Issues
437 | 
438 | **Symptom**: Permission denied errors in container
439 | 
440 | **Solution**: Fix volume permissions:
441 | 
442 | ```bash
443 | # Set proper ownership
444 | sudo chown -R 1000:1000 ./data
445 | 
446 | # Or run with specific user
447 | docker run --user $(id -u):$(id -g) mcp-memory-service
448 | ```
449 | 
450 | #### 3. Storage Backend Issues
451 | 
452 | **Symptom**: Database initialization failures
453 | 
454 | **Solution**: Use SQLite-vec for containers:
455 | 
456 | ```bash
457 | docker run -d \
458 |   -e MCP_MEMORY_STORAGE_BACKEND=sqlite_vec \
459 |   -v $(pwd)/data:/app/data \
460 |   mcp-memory-service
461 | ```
462 | 
463 | #### 4. Network Connectivity
464 | 
465 | **Symptom**: Cannot connect to containerized service
466 | 
467 | **Solution**: Check port mapping and firewall:
468 | 
469 | ```bash
470 | # Test container networking
471 | docker exec memory-service netstat -tlnp
472 | 
473 | # Check port mapping
474 | docker port memory-service
475 | 
476 | # Test external connectivity
477 | curl http://localhost:8000/health
478 | ```
479 | 
480 | #### 5. Model Download Issues
481 | 
482 | **Symptom**: `OSError: We couldn't connect to 'https://huggingface.co'` when starting container
483 | 
484 | **Issue**: Container cannot download sentence-transformer models due to network restrictions
485 | 
486 | **Solutions**:
487 | 
488 | 1. **Pre-download models and mount cache (Recommended)**:
489 | 
490 | ```bash
491 | # Step 1: Download models on host machine first
492 | python -c "from sentence_transformers import SentenceTransformer; \
493 |           model = SentenceTransformer('all-MiniLM-L6-v2'); \
494 |           print('Model downloaded successfully')"
495 | 
496 | # Step 2: Run container with model cache mounted
497 | docker run -d --name memory-service \
498 |   -v ~/.cache/huggingface:/root/.cache/huggingface \
499 |   -v $(pwd)/data/chroma_db:/app/chroma_db \
500 |   -e MCP_MEMORY_STORAGE_BACKEND=chromadb \
501 |   mcp-memory-service
502 | ```
503 | 
504 | 2. **Configure proxy for Docker Desktop (Windows/Corporate networks)**:
505 | 
506 | ```bash
507 | # With proxy environment variables
508 | docker run -d --name memory-service \
509 |   -e HTTPS_PROXY=http://your-proxy:port \
510 |   -e HTTP_PROXY=http://your-proxy:port \
511 |   -e NO_PROXY=localhost,127.0.0.1 \
512 |   -v $(pwd)/data:/app/data \
513 |   mcp-memory-service
514 | ```
515 | 
516 | 3. **Use offline mode with pre-cached models**:
517 | 
518 | ```bash
519 | # Ensure models are in mounted volume, then run offline
520 | docker run -d --name memory-service \
521 |   -v ~/.cache/huggingface:/root/.cache/huggingface \
522 |   -e HF_HUB_OFFLINE=1 \
523 |   -e TRANSFORMERS_OFFLINE=1 \
524 |   -e HF_DATASETS_OFFLINE=1 \
525 |   -v $(pwd)/data:/app/data \
526 |   mcp-memory-service
527 | ```
528 | 
529 | 4. **Docker Compose with model cache**:
530 | 
531 | ```yaml
532 | # docker-compose.yml
533 | version: '3.8'
534 | services:
535 |   mcp-memory-service:
536 |     build: .
537 |     volumes:
538 |       # Mount model cache from host
539 |       - ${HOME}/.cache/huggingface:/root/.cache/huggingface
540 |       - ./data/chroma_db:/app/chroma_db
541 |       - ./data/backups:/app/backups
542 |     environment:
543 |       - MCP_MEMORY_STORAGE_BACKEND=chromadb
544 |       # Optional: force offline mode if models are pre-cached
545 |       # - HF_HUB_OFFLINE=1
546 |       # - TRANSFORMERS_OFFLINE=1
547 | ```
548 | 
549 | **Prevention**: Always mount the Hugging Face cache directory as a volume to persist models between container runs and avoid re-downloading.
550 | 
551 | ### Diagnostic Commands
552 | 
553 | #### Container Status
554 | 
555 | ```bash
556 | # Check container status
557 | docker ps -a
558 | 
559 | # View container logs
560 | docker logs memory-service
561 | 
562 | # Execute commands in container
563 | docker exec -it memory-service bash
564 | 
565 | # Check resource usage
566 | docker stats memory-service
567 | ```
568 | 
569 | #### Service Health
570 | 
571 | ```bash
572 | # Test HTTP endpoints
573 | curl http://localhost:8000/health
574 | curl http://localhost:8000/stats
575 | 
576 | # Check database connectivity
577 | docker exec memory-service python -c "
578 | from src.mcp_memory_service.storage.sqlite_vec import SqliteVecStorage
579 | storage = SqliteVecStorage()
580 | print('Database accessible')
581 | "
582 | ```
583 | 
584 | #### Model Cache Verification
585 | 
586 | ```bash
587 | # Check if models are cached on host
588 | ls -la ~/.cache/huggingface/hub/
589 | 
590 | # Verify model availability in container
591 | docker exec memory-service ls -la /root/.cache/huggingface/hub/
592 | 
593 | # Test model loading in container
594 | docker exec memory-service python -c "
595 | from sentence_transformers import SentenceTransformer
596 | try:
597 |     model = SentenceTransformer('all-MiniLM-L6-v2')
598 |     print('✅ Model loaded successfully')
599 | except Exception as e:
600 |     print(f'❌ Model loading failed: {e}')
601 | "
602 | ```
603 | 
604 | ## Security Considerations
605 | 
606 | ### API Key Authentication
607 | 
608 | ```bash
609 | # Generate secure API key
610 | API_KEY=$(openssl rand -hex 32)
611 | 
612 | # Use with Docker
613 | docker run -d \
614 |   -e MCP_API_KEY=$API_KEY \
615 |   -p 8000:8000 \
616 |   mcp-memory-service
617 | ```
618 | 
619 | ### HTTPS Configuration
620 | 
621 | ```yaml
622 | # docker-compose.https.yml
623 | services:
624 |   mcp-memory-service:
625 |     environment:
626 |       - MCP_HTTPS_ENABLED=true
627 |       - MCP_HTTP_PORT=8443
628 |       - MCP_SSL_CERT_FILE=/app/certs/cert.pem
629 |       - MCP_SSL_KEY_FILE=/app/certs/key.pem
630 |     volumes:
631 |       - ./certs:/app/certs:ro
632 |     ports:
633 |       - "8443:8443"
634 | ```
635 | 
636 | ### Container Security
637 | 
638 | ```bash
639 | # Run with security options
640 | docker run -d \
641 |   --security-opt no-new-privileges:true \
642 |   --cap-drop ALL \
643 |   --cap-add NET_BIND_SERVICE \
644 |   --read-only \
645 |   --tmpfs /tmp \
646 |   mcp-memory-service
647 | ```
648 | 
649 | ## Performance Optimization
650 | 
651 | ### Resource Limits
652 | 
653 | ```yaml
654 | services:
655 |   mcp-memory-service:
656 |     deploy:
657 |       resources:
658 |         limits:
659 |           cpus: '2.0'
660 |           memory: 4G
661 |         reservations:
662 |           cpus: '1.0'
663 |           memory: 2G
664 | ```
665 | 
666 | ### Multi-Stage Builds
667 | 
668 | ```dockerfile
669 | # Optimized Dockerfile
670 | FROM python:3.11-slim as builder
671 | WORKDIR /app
672 | COPY requirements.txt .
673 | RUN pip install --user -r requirements.txt
674 | 
675 | FROM python:3.11-slim
676 | WORKDIR /app
677 | COPY --from=builder /root/.local /root/.local
678 | COPY . .
679 | ENV PATH=/root/.local/bin:$PATH
680 | CMD ["python", "src/mcp_memory_service/server.py"]
681 | ```
682 | 
683 | ## Development Workflow
684 | 
685 | ### Development with Docker
686 | 
687 | ```bash
688 | # Development with live reload
689 | docker-compose -f docker-compose.dev.yml up
690 | 
691 | # Run tests in container
692 | docker exec memory-service pytest tests/
693 | 
694 | # Debug with interactive shell
695 | docker exec -it memory-service bash
696 | ```
697 | 
698 | ### Building Custom Images
699 | 
700 | ```bash
701 | # Build with specific tag
702 | docker build -t mcp-memory-service:v1.2.3 .
703 | 
704 | # Build for multiple platforms
705 | docker buildx build --platform linux/amd64,linux/arm64 -t mcp-memory-service:latest .
706 | 
707 | # Push to registry
708 | docker push mcp-memory-service:latest
709 | ```
710 | 
711 | ## Related Documentation
712 | 
713 | - [Installation Guide](../installation/master-guide.md) - General installation instructions
714 | - [Multi-Client Setup](../integration/multi-client.md) - Multi-client configuration
715 | - [Ubuntu Setup](../platforms/ubuntu.md) - Ubuntu Docker deployment
716 | - [Windows Setup](../platforms/windows.md) - Windows Docker deployment
717 | - [Troubleshooting](../troubleshooting/general.md) - Docker-specific troubleshooting
718 | 
```

--------------------------------------------------------------------------------
/docs/architecture/search-examples.md:
--------------------------------------------------------------------------------

```markdown
  1 | # Advanced Hybrid Search - Real-World Usage Examples
  2 | 
  3 | ## API Usage Examples
  4 | 
  5 | ### Example 1: Project Troubleshooting Scenario
  6 | 
  7 | **Scenario**: Developer needs to find all information about deployment issues in Project Alpha
  8 | 
  9 | **REST API Call:**
 10 | ```bash
 11 | curl -X POST "https://localhost:8443/api/search/advanced" \
 12 |   -H "Content-Type: application/json" \
 13 |   -H "Authorization: Bearer your-api-key" \
 14 |   -d '{
 15 |     "query": "Project Alpha database timeout deployment error",
 16 |     "search_mode": "hybrid",
 17 |     "n_results": 20,
 18 |     "consolidate_related": true,
 19 |     "include_context": true,
 20 |     "filters": {
 21 |       "memory_types": ["task", "decision", "note", "reference"],
 22 |       "tags": ["project-alpha", "deployment", "database"],
 23 |       "time_range": "last 2 weeks",
 24 |       "metadata_filters": {
 25 |         "priority": ["high", "critical"],
 26 |         "status": ["in-progress", "completed", "failed"]
 27 |       }
 28 |     },
 29 |     "ranking_options": {
 30 |       "semantic_weight": 0.5,
 31 |       "keyword_weight": 0.4,
 32 |       "recency_weight": 0.1,
 33 |       "boost_exact_matches": true
 34 |     }
 35 |   }'
 36 | ```
 37 | 
 38 | **Response:**
 39 | ```json
 40 | {
 41 |   "results": [
 42 |     {
 43 |       "primary_memory": {
 44 |         "content": "Project Alpha production deployment failed at 2024-01-15 10:30 AM. Database connection timeout after 15 seconds. Error: Connection pool exhausted. Impact: 500+ users affected. Rolling back to previous version.",
 45 |         "content_hash": "pa_deploy_error_20240115",
 46 |         "tags": ["project-alpha", "deployment", "database", "production", "error"],
 47 |         "memory_type": "task",
 48 |         "created_at_iso": "2024-01-15T10:35:00Z",
 49 |         "metadata": {
 50 |           "priority": "critical",
 51 |           "status": "in-progress",
 52 |           "project_id": "alpha-001",
 53 |           "environment": "production",
 54 |           "impact_level": "high"
 55 |         }
 56 |       },
 57 |       "similarity_score": 0.98,
 58 |       "relevance_reason": "Exact match: 'Project Alpha', 'database timeout', 'deployment error' + high semantic similarity",
 59 |       "consolidation": {
 60 |         "related_memories": [
 61 |           {
 62 |             "content": "DECISION: Increase database connection timeout from 15s to 45s for Project Alpha. Approved by Tech Lead. Implementation scheduled for next deployment window.",
 63 |             "content_hash": "pa_timeout_decision_20240115",
 64 |             "relationship": "solution",
 65 |             "similarity_score": 0.89,
 66 |             "memory_type": "decision",
 67 |             "relevance_reason": "Direct solution to the identified problem"
 68 |           },
 69 |           {
 70 |             "content": "Project Alpha database configuration: Connection pool size: 20, Timeout: 15s, Retry attempts: 3. Performance baseline established.",
 71 |             "content_hash": "pa_db_config_baseline",
 72 |             "relationship": "context",
 73 |             "similarity_score": 0.85,
 74 |             "memory_type": "reference",
 75 |             "relevance_reason": "Configuration context for troubleshooting"
 76 |           },
 77 |           {
 78 |             "content": "Post-deployment monitoring shows Project Alpha database connections stabilized after timeout increase. No further timeout errors in 48 hours.",
 79 |             "content_hash": "pa_monitor_success_20240117",
 80 |             "relationship": "follow_up",
 81 |             "similarity_score": 0.82,
 82 |             "memory_type": "note",
 83 |             "relevance_reason": "Follow-up results and validation"
 84 |           }
 85 |         ],
 86 |         "topic_cluster": "project-alpha-database-deployment",
 87 |         "consolidation_summary": "Database timeout deployment issue resolved by increasing connection timeout from 15s to 45s. Monitoring confirms successful resolution.",
 88 |         "timeline": [
 89 |           {
 90 |             "date": "2024-01-15T10:30:00Z",
 91 |             "event": "Deployment failure detected",
 92 |             "type": "problem"
 93 |           },
 94 |           {
 95 |             "date": "2024-01-15T14:00:00Z", 
 96 |             "event": "Solution decided and approved",
 97 |             "type": "solution"
 98 |           },
 99 |           {
100 |             "date": "2024-01-16T09:00:00Z",
101 |             "event": "Fix implemented and deployed",
102 |             "type": "implementation"
103 |           },
104 |           {
105 |             "date": "2024-01-17T10:00:00Z",
106 |             "event": "Success validated through monitoring",
107 |             "type": "validation"
108 |           }
109 |         ]
110 |       }
111 |     }
112 |   ],
113 |   "consolidated_topics": [
114 |     {
115 |       "topic": "Project Alpha Database Issues",
116 |       "memory_count": 8,
117 |       "key_themes": ["timeout", "connection pool", "performance", "monitoring"],
118 |       "timeline": "2024-01-10 to 2024-01-18",
119 |       "status": "resolved"
120 |     },
121 |     {
122 |       "topic": "Project Alpha Deployment Process",
123 |       "memory_count": 15,
124 |       "key_themes": ["rollback procedures", "deployment windows", "approval process"],
125 |       "timeline": "2024-01-01 to present",
126 |       "status": "ongoing"
127 |     }
128 |   ],
129 |   "search_intelligence": {
130 |     "query_analysis": {
131 |       "intent": "troubleshooting",
132 |       "entities": ["Project Alpha", "database timeout", "deployment error"],
133 |       "confidence": 0.94,
134 |       "suggested_filters": ["infrastructure", "database", "production"],
135 |       "query_type": "problem_resolution"
136 |     },
137 |     "recommendations": [
138 |       "Search for 'Project Alpha monitoring dashboard' for real-time metrics",
139 |       "Consider searching 'database performance optimization' for preventive measures",
140 |       "Review memories tagged with 'post-mortem' for similar incident analysis"
141 |     ],
142 |     "related_searches": [
143 |       "Project Alpha performance metrics",
144 |       "database connection pool tuning",
145 |       "deployment rollback procedures"
146 |     ]
147 |   },
148 |   "performance_metrics": {
149 |     "total_processing_time_ms": 87,
150 |     "semantic_search_time_ms": 34,
151 |     "keyword_search_time_ms": 12,
152 |     "consolidation_time_ms": 28,
153 |     "relationship_mapping_time_ms": 13
154 |   }
155 | }
156 | ```
157 | 
158 | ### Example 2: Knowledge Discovery Scenario
159 | 
160 | **Scenario**: Product manager wants to understand all decisions made about user authentication
161 | 
162 | **REST API Call:**
163 | ```bash
164 | curl -X POST "https://localhost:8443/api/search/advanced" \
165 |   -H "Content-Type: application/json" \
166 |   -H "Authorization: Bearer your-api-key" \
167 |   -d '{
168 |     "query": "user authentication security decisions",
169 |     "search_mode": "auto",
170 |     "n_results": 25,
171 |     "consolidate_related": true,
172 |     "include_context": true,
173 |     "filters": {
174 |       "memory_types": ["decision", "note"],
175 |       "time_range": "last 6 months",
176 |       "metadata_filters": {
177 |         "category": ["security", "architecture", "user-experience"]
178 |       }
179 |     }
180 |   }'
181 | ```
182 | 
183 | ## MCP API Usage Examples
184 | 
185 | ### Example 1: MCP Tool via HTTP Bridge
186 | 
187 | **Request:**
188 | ```http
189 | POST /api/mcp/tools/call
190 | Content-Type: application/json
191 | 
192 | {
193 |   "tool_name": "advanced_memory_search",
194 |   "arguments": {
195 |     "query": "API rate limiting implementation discussion",
196 |     "search_mode": "hybrid",
197 |     "consolidate_related": true,
198 |     "max_results": 15,
199 |     "filters": {
200 |       "memory_types": ["decision", "task", "reference"],
201 |       "tags": ["api", "rate-limiting", "performance"],
202 |       "time_range": "last month"
203 |     }
204 |   }
205 | }
206 | ```
207 | 
208 | **Response:**
209 | ```json
210 | {
211 |   "success": true,
212 |   "result": {
213 |     "search_results": [
214 |       {
215 |         "primary_content": "DECISION: Implement token bucket rate limiting for public API endpoints. Limit: 1000 requests/hour per API key. Burst capacity: 100 requests. Approved by architecture team.",
216 |         "content_hash": "api_rate_limit_decision_001",
217 |         "relevance_score": 0.95,
218 |         "memory_type": "decision",
219 |         "tags": ["api", "rate-limiting", "architecture", "approved"],
220 |         "created_at": "2024-01-10T14:30:00Z",
221 |         "consolidation": {
222 |           "related_content": [
223 |             {
224 |               "content": "Research: Token bucket vs sliding window rate limiting algorithms. Token bucket provides better burst handling for API scenarios.",
225 |               "relationship": "background_research",
226 |               "memory_type": "reference"
227 |             },
228 |             {
229 |               "content": "TASK: Implement rate limiting middleware in Express.js API server. Use redis for distributed rate limit storage. Due: 2024-01-20",
230 |               "relationship": "implementation_task", 
231 |               "memory_type": "task",
232 |               "status": "completed"
233 |             }
234 |           ],
235 |           "topic_summary": "API rate limiting decision with token bucket algorithm, researched and implemented successfully"
236 |         }
237 |       }
238 |     ],
239 |     "total_found": 8,
240 |     "consolidated_topics": [
241 |       {
242 |         "topic": "API Security & Performance",
243 |         "memory_count": 12,
244 |         "key_themes": ["rate limiting", "authentication", "caching", "monitoring"]
245 |       }
246 |     ],
247 |     "processing_time_ms": 45
248 |   }
249 | }
250 | ```
251 | 
252 | ### Example 2: Direct MCP Server Call (Claude Desktop)
253 | 
254 | **MCP Tool Definition:**
255 | ```json
256 | {
257 |   "name": "advanced_memory_search",
258 |   "description": "Search memories using hybrid semantic + keyword search with automatic content consolidation",
259 |   "inputSchema": {
260 |     "type": "object",
261 |     "properties": {
262 |       "query": {
263 |         "type": "string",
264 |         "description": "Search query combining keywords and concepts"
265 |       },
266 |       "search_mode": {
267 |         "type": "string",
268 |         "enum": ["hybrid", "semantic", "keyword", "auto"],
269 |         "default": "hybrid",
270 |         "description": "Search strategy to use"
271 |       },
272 |       "consolidate_related": {
273 |         "type": "boolean", 
274 |         "default": true,
275 |         "description": "Automatically group related memories and provide context"
276 |       },
277 |       "max_results": {
278 |         "type": "integer",
279 |         "default": 10,
280 |         "minimum": 1,
281 |         "maximum": 50,
282 |         "description": "Maximum number of results to return"
283 |       },
284 |       "filters": {
285 |         "type": "object",
286 |         "properties": {
287 |           "memory_types": {
288 |             "type": "array",
289 |             "items": {"type": "string"},
290 |             "description": "Filter by memory types: task, decision, note, reference"
291 |           },
292 |           "tags": {
293 |             "type": "array", 
294 |             "items": {"type": "string"},
295 |             "description": "Filter by specific tags"
296 |           },
297 |           "time_range": {
298 |             "type": "string",
299 |             "description": "Natural language time filter: 'last week', 'this month', etc."
300 |           },
301 |           "metadata_filters": {
302 |             "type": "object",
303 |             "description": "Filter by metadata fields and values"
304 |           }
305 |         }
306 |       }
307 |     },
308 |     "required": ["query"]
309 |   }
310 | }
311 | ```
312 | 
313 | **Example Usage in Claude Desktop:**
314 | 
315 | **User Query:** "Find all discussions about microservices architecture decisions"
316 | 
317 | **MCP Tool Call:**
318 | ```json
319 | {
320 |   "tool": "advanced_memory_search",
321 |   "arguments": {
322 |     "query": "microservices architecture decisions discussion",
323 |     "search_mode": "hybrid",
324 |     "consolidate_related": true,
325 |     "max_results": 20,
326 |     "filters": {
327 |       "memory_types": ["decision", "note", "reference"],
328 |       "tags": ["microservices", "architecture"],
329 |       "time_range": "last 3 months"
330 |     }
331 |   }
332 | }
333 | ```
334 | 
335 | **Claude's Response Based on Tool Result:**
336 | 
337 | "I found 15 memories related to microservices architecture decisions from the last 3 months. Here's what I discovered:
338 | 
339 | ## Key Architecture Decisions
340 | 
341 | **Primary Decision (Jan 15, 2024):**
342 | *DECISION: Adopt microservices architecture for the new customer platform. Services: User Management, Order Processing, Payment Gateway, Notification Service. Each service will have its own database and API. Approved by architecture committee.*
343 | 
344 | **Related Context:**
345 | - **Research Background**: Comparison study of monolith vs microservices showed 40% better scalability for our use case
346 | - **Implementation Plan**: 6-month phased migration starting with User Management service
347 | - **Success Metrics**: Service response time < 200ms, 99.9% uptime target
348 | 
349 | ## Consolidated Topics Found:
350 | 
351 | 1. **Service Communication** (8 memories)
352 |    - Event-driven architecture using RabbitMQ
353 |    - API gateway implementation with Kong
354 |    - Service discovery with Consul
355 | 
356 | 2. **Data Management** (4 memories)  
357 |    - Database per service pattern
358 |    - Event sourcing for audit trails
359 |    - Data consistency strategies
360 | 
361 | 3. **DevOps & Deployment** (3 memories)
362 |    - Docker containerization approach
363 |    - Kubernetes orchestration setup
364 |    - CI/CD pipeline modifications
365 | 
366 | The search found strong relationships between the architecture decisions and their implementation details, providing a complete picture of your microservices adoption journey."
367 | 
368 | ### Example 3: Project Consolidation MCP Tool
369 | 
370 | **MCP Tool Definition:**
371 | ```json
372 | {
373 |   "name": "consolidate_project_memories",
374 |   "description": "Automatically gather and organize all memories related to a specific project",
375 |   "inputSchema": {
376 |     "type": "object", 
377 |     "properties": {
378 |       "project_identifier": {
379 |         "type": "string",
380 |         "description": "Project name or identifier (e.g., 'Project Alpha', 'mobile-app-v2')"
381 |       },
382 |       "consolidation_depth": {
383 |         "type": "string",
384 |         "enum": ["shallow", "deep", "comprehensive"],
385 |         "default": "deep",
386 |         "description": "How extensively to search for related content"
387 |       },
388 |       "include_timeline": {
389 |         "type": "boolean",
390 |         "default": true,
391 |         "description": "Generate chronological timeline of project events"
392 |       }
393 |     },
394 |     "required": ["project_identifier"]
395 |   }
396 | }
397 | ```
398 | 
399 | **Usage Example:**
400 | ```json
401 | {
402 |   "tool": "consolidate_project_memories", 
403 |   "arguments": {
404 |     "project_identifier": "mobile app redesign",
405 |     "consolidation_depth": "comprehensive",
406 |     "include_timeline": true
407 |   }
408 | }
409 | ```
410 | 
411 | **Tool Response:**
412 | ```json
413 | {
414 |   "project_overview": {
415 |     "name": "Mobile App Redesign",
416 |     "total_memories": 47,
417 |     "date_range": "2023-11-01 to 2024-01-20",
418 |     "status": "in_progress",
419 |     "key_stakeholders": ["Product Team", "UX Design", "Mobile Dev Team"]
420 |   },
421 |   "timeline": [
422 |     {
423 |       "date": "2023-11-01",
424 |       "event": "Project kickoff and requirements gathering",
425 |       "type": "milestone",
426 |       "memories": 3
427 |     },
428 |     {
429 |       "date": "2023-11-15", 
430 |       "event": "UX wireframes and user research completed",
431 |       "type": "deliverable",
432 |       "memories": 8
433 |     },
434 |     {
435 |       "date": "2023-12-01",
436 |       "event": "Technical architecture decisions finalized", 
437 |       "type": "decision",
438 |       "memories": 5
439 |     }
440 |   ],
441 |   "key_decisions": [
442 |     {
443 |       "content": "DECISION: Use React Native for cross-platform development. Allows 80% code sharing between iOS/Android. Team already familiar with React.",
444 |       "impact": "high",
445 |       "date": "2023-11-20"
446 |     }
447 |   ],
448 |   "outstanding_issues": [
449 |     {
450 |       "content": "ISSUE: Performance concerns with large image galleries in React Native. Need optimization strategy.",
451 |       "priority": "high", 
452 |       "status": "open",
453 |       "assigned_to": "Mobile Dev Team"
454 |     }
455 |   ],
456 |   "related_projects": [
457 |     {
458 |       "name": "API v2 Migration",
459 |       "relationship": "dependency",
460 |       "status": "completed"
461 |     }
462 |   ]
463 | }
464 | ```
465 | 
466 | ## Claude Code Integration Examples
467 | 
468 | ### Example 1: Claude Code Slash Command
469 | 
470 | ```bash
471 | # Enhanced memory search with consolidation
472 | claude /memory-search-advanced "database performance optimization" --consolidate --filters="tags:database,performance;type:decision,reference"
473 | 
474 | # Quick project overview
475 | claude /memory-project-overview "Project Beta" --timeline --issues
476 | 
477 | # Intelligent search with auto-suggestions
478 | claude /memory-smart-search "user feedback login problems" --auto-expand --suggest-actions
479 | ```
480 | 
481 | ### Example 2: Claude Code Hook Integration
482 | 
483 | **Session Hook Usage:**
484 | ```javascript
485 | // .claude/hooks/memory-enhanced-search.js
486 | module.exports = {
487 |   name: "enhanced-memory-search",
488 |   description: "Automatically use hybrid search for memory queries",
489 |   trigger: "before_memory_search",
490 |   
491 |   async execute(context) {
492 |     // Automatically enhance memory searches with hybrid mode
493 |     if (context.tool === "retrieve_memory") {
494 |       context.arguments.search_mode = "hybrid";
495 |       context.arguments.consolidate_related = true;
496 |       context.arguments.include_context = true;
497 |     }
498 |     return context;
499 |   }
500 | };
501 | ```
502 | 
503 | These examples demonstrate how the Advanced Hybrid Search enhancement provides rich, contextual, and intelligent search capabilities through both REST API and MCP interfaces, making it easy for users to find and understand related information in their memory store.
```

--------------------------------------------------------------------------------
/tests/timestamp/test_hook_vs_manual_storage.py:
--------------------------------------------------------------------------------

```python
  1 | #!/usr/bin/env python3
  2 | """
  3 | Test script to compare hook-generated vs manual memory storage for Issue #99.
  4 | This test validates timestamp handling, tag consistency, and discoverability
  5 | between different memory creation methods.
  6 | """
  7 | 
  8 | import sys
  9 | import os
 10 | sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', '..', 'src'))
 11 | 
 12 | import asyncio
 13 | import time
 14 | import json
 15 | import tempfile
 16 | import httpx
 17 | from datetime import datetime, timedelta
 18 | from typing import Dict, List, Any, Optional
 19 | 
 20 | from mcp_memory_service.models.memory import Memory
 21 | from mcp_memory_service.utils.hashing import generate_content_hash
 22 | from mcp_memory_service.utils.time_parser import extract_time_expression
 23 | from mcp_memory_service.storage.sqlite_vec import SqliteVecMemoryStorage
 24 | 
 25 | class HookVsManualStorageTest:
 26 |     """Test suite comparing hook-generated and manual memory storage."""
 27 | 
 28 |     def __init__(self, storage_backend: str = "sqlite_vec"):
 29 |         self.storage_backend = storage_backend
 30 |         self.storage = None
 31 |         self.test_memories_created = []
 32 | 
 33 |     async def setup(self):
 34 |         """Set up test environment and storage."""
 35 |         print(f"=== Setting up {self.storage_backend} storage for testing ===")
 36 | 
 37 |         if self.storage_backend == "sqlite_vec":
 38 |             # Create temporary database for testing
 39 |             self.temp_db = tempfile.NamedTemporaryFile(suffix=".db", delete=False)
 40 |             self.temp_db.close()
 41 | 
 42 |             self.storage = SqliteVecMemoryStorage(
 43 |                 db_path=self.temp_db.name,
 44 |                 embedding_model="all-MiniLM-L6-v2"
 45 |             )
 46 |             await self.storage.initialize()
 47 |             print(f"✅ SQLite-Vec storage initialized: {self.temp_db.name}")
 48 | 
 49 |     async def cleanup(self):
 50 |         """Clean up test environment."""
 51 |         # Note: SqliteVecMemoryStorage doesn't have a close() method
 52 |         self.storage = None
 53 | 
 54 |         if hasattr(self, 'temp_db') and os.path.exists(self.temp_db.name):
 55 |             os.unlink(self.temp_db.name)
 56 |             print("✅ Test database cleaned up")
 57 | 
 58 |     def create_hook_style_memory(self, content: str, project_context: Dict) -> Memory:
 59 |         """Create a memory as hooks would create it (with auto-generated tags)."""
 60 | 
 61 |         # Simulate hook behavior - generate tags like session-end.js does
 62 |         hook_tags = [
 63 |             'claude-code-session',
 64 |             'session-consolidation',
 65 |             project_context.get('name', 'unknown-project'),
 66 |             f"language:{project_context.get('language', 'unknown')}",
 67 |             *project_context.get('frameworks', [])[:2],  # Top 2 frameworks
 68 |         ]
 69 | 
 70 |         # Filter out None/empty tags
 71 |         hook_tags = [tag for tag in hook_tags if tag]
 72 | 
 73 |         memory = Memory(
 74 |             content=content,
 75 |             content_hash=generate_content_hash(content),
 76 |             tags=hook_tags,
 77 |             memory_type='session-summary',
 78 |             metadata={
 79 |                 'session_analysis': {
 80 |                     'topics': ['test-topic'],
 81 |                     'decisions_count': 1,
 82 |                     'insights_count': 1,
 83 |                     'confidence': 0.85
 84 |                 },
 85 |                 'project_context': project_context,
 86 |                 'generated_by': 'claude-code-session-end-hook',
 87 |                 'generated_at': datetime.now().isoformat()
 88 |             }
 89 |         )
 90 | 
 91 |         return memory
 92 | 
 93 |     def create_manual_memory(self, content: str, user_tags: List[str] = None) -> Memory:
 94 |         """Create a memory as manual /memory-store would create it."""
 95 | 
 96 |         # Manual memories typically have user-provided tags, not auto-generated ones
 97 |         manual_tags = user_tags or []
 98 | 
 99 |         memory = Memory(
100 |             content=content,
101 |             content_hash=generate_content_hash(content),
102 |             tags=manual_tags,
103 |             memory_type='note',
104 |             metadata={
105 |                 'created_by': 'manual-storage',
106 |                 'source': 'user-input'
107 |             }
108 |         )
109 | 
110 |         return memory
111 | 
112 |     async def test_timestamp_consistency(self):
113 |         """Test 1: Compare timestamp handling between hook and manual memories."""
114 |         print("\n🧪 Test 1: Timestamp Consistency")
115 |         print("-" * 50)
116 | 
117 |         # Create memories with slight time differences
118 |         base_time = time.time()
119 | 
120 |         project_context = {
121 |             'name': 'mcp-memory-service',
122 |             'language': 'python',
123 |             'frameworks': ['fastapi', 'chromadb']
124 |         }
125 | 
126 |         # Hook-style memory
127 |         hook_memory = self.create_hook_style_memory(
128 |             "Implemented timestamp standardization for Issue #99",
129 |             project_context
130 |         )
131 | 
132 |         # Manual memory created shortly after
133 |         time.sleep(0.1)  # Small delay to test precision
134 |         manual_memory = self.create_manual_memory(
135 |             "Fixed timestamp precision issue in memory storage",
136 |             ['timestamp-fix', 'issue-99', 'debugging']
137 |         )
138 | 
139 |         # Store both memories
140 |         await self.storage.store(hook_memory)
141 |         await self.storage.store(manual_memory)
142 | 
143 |         self.test_memories_created.extend([hook_memory.content_hash, manual_memory.content_hash])
144 | 
145 |         print(f"Hook memory timestamps:")
146 |         print(f"  created_at: {hook_memory.created_at}")
147 |         print(f"  created_at_iso: {hook_memory.created_at_iso}")
148 |         print(f"  Type check: {type(hook_memory.created_at)} / {type(hook_memory.created_at_iso)}")
149 | 
150 |         print(f"\nManual memory timestamps:")
151 |         print(f"  created_at: {manual_memory.created_at}")
152 |         print(f"  created_at_iso: {manual_memory.created_at_iso}")
153 |         print(f"  Type check: {type(manual_memory.created_at)} / {type(manual_memory.created_at_iso)}")
154 | 
155 |         # Check if both have proper timestamps
156 |         hook_has_timestamps = (hook_memory.created_at is not None and
157 |                               hook_memory.created_at_iso is not None)
158 |         manual_has_timestamps = (manual_memory.created_at is not None and
159 |                                manual_memory.created_at_iso is not None)
160 | 
161 |         print(f"\nTimestamp validation:")
162 |         print(f"  Hook memory has complete timestamps: {hook_has_timestamps}")
163 |         print(f"  Manual memory has complete timestamps: {manual_has_timestamps}")
164 | 
165 |         if hook_has_timestamps and manual_has_timestamps:
166 |             print("✅ Both memory types have consistent timestamp formats")
167 |         else:
168 |             print("❌ Timestamp inconsistency detected!")
169 | 
170 |         return {
171 |             'hook_has_timestamps': hook_has_timestamps,
172 |             'manual_has_timestamps': manual_has_timestamps,
173 |             'hook_memory': hook_memory,
174 |             'manual_memory': manual_memory
175 |         }
176 | 
177 |     async def test_tag_consistency(self):
178 |         """Test 2: Compare tag patterns between hook and manual memories."""
179 |         print("\n🧪 Test 2: Tag Consistency Analysis")
180 |         print("-" * 50)
181 | 
182 |         project_context = {
183 |             'name': 'test-project',
184 |             'language': 'typescript',
185 |             'frameworks': ['react', 'node']
186 |         }
187 | 
188 |         # Create hook memory
189 |         hook_memory = self.create_hook_style_memory(
190 |             "Testing tag consistency between storage methods",
191 |             project_context
192 |         )
193 | 
194 |         # Create manual memory with content-appropriate tags
195 |         manual_memory = self.create_manual_memory(
196 |             "Testing tag consistency between storage methods",
197 |             ['testing', 'tag-consistency', 'storage-methods', 'validation']
198 |         )
199 | 
200 |         await self.storage.store(hook_memory)
201 |         await self.storage.store(manual_memory)
202 | 
203 |         self.test_memories_created.extend([hook_memory.content_hash, manual_memory.content_hash])
204 | 
205 |         print(f"Hook memory tags: {hook_memory.tags}")
206 |         print(f"Manual memory tags: {manual_memory.tags}")
207 | 
208 |         # Analyze tag patterns
209 |         hook_has_auto_tags = any('claude-code' in tag for tag in hook_memory.tags)
210 |         manual_has_content_tags = len(manual_memory.tags) > 0 and not any('auto-generated' in tag for tag in manual_memory.tags)
211 | 
212 |         print(f"\nTag analysis:")
213 |         print(f"  Hook memory has auto-generated tags: {hook_has_auto_tags}")
214 |         print(f"  Manual memory has content-relevant tags: {manual_has_content_tags}")
215 |         print(f"  Hook tag count: {len(hook_memory.tags)}")
216 |         print(f"  Manual tag count: {len(manual_memory.tags)}")
217 | 
218 |         if hook_has_auto_tags and manual_has_content_tags:
219 |             print("✅ Tag patterns are appropriately different and content-relevant")
220 |         else:
221 |             print("❌ Tag pattern issues detected")
222 | 
223 |         return {
224 |             'hook_tags': hook_memory.tags,
225 |             'manual_tags': manual_memory.tags,
226 |             'hook_has_auto_tags': hook_has_auto_tags,
227 |             'manual_has_content_tags': manual_has_content_tags
228 |         }
229 | 
230 |     async def test_time_based_search_consistency(self):
231 |         """Test 3: Verify both memory types are discoverable in time-based searches."""
232 |         print("\n🧪 Test 3: Time-Based Search Discoverability")
233 |         print("-" * 50)
234 | 
235 |         # Create memories with known timestamps
236 |         current_time = time.time()
237 |         yesterday_time = current_time - (24 * 60 * 60)  # 24 hours ago
238 | 
239 |         # Create hook memory with specific timestamp
240 |         hook_memory = self.create_hook_style_memory(
241 |             "Hook memory created yesterday for search testing",
242 |             {'name': 'search-test', 'language': 'python', 'frameworks': []}
243 |         )
244 |         hook_memory.created_at = yesterday_time
245 |         hook_memory.created_at_iso = datetime.fromtimestamp(yesterday_time).isoformat() + "Z"
246 | 
247 |         # Create manual memory with specific timestamp
248 |         manual_memory = self.create_manual_memory(
249 |             "Manual memory created yesterday for search testing",
250 |             ['search-test', 'yesterday', 'discoverability']
251 |         )
252 |         manual_memory.created_at = yesterday_time + 100  # Slightly later
253 |         manual_memory.created_at_iso = datetime.fromtimestamp(yesterday_time + 100).isoformat() + "Z"
254 | 
255 |         await self.storage.store(hook_memory)
256 |         await self.storage.store(manual_memory)
257 | 
258 |         self.test_memories_created.extend([hook_memory.content_hash, manual_memory.content_hash])
259 | 
260 |         # Test time-based recall
261 |         query = "yesterday"
262 |         cleaned_query, (start_ts, end_ts) = extract_time_expression(query)
263 | 
264 |         if start_ts and end_ts:
265 |             print(f"Search range: {datetime.fromtimestamp(start_ts)} to {datetime.fromtimestamp(end_ts)}")
266 |             print(f"Hook memory timestamp: {datetime.fromtimestamp(hook_memory.created_at)}")
267 |             print(f"Manual memory timestamp: {datetime.fromtimestamp(manual_memory.created_at)}")
268 | 
269 |             # Check if memories fall within range
270 |             hook_in_range = start_ts <= hook_memory.created_at <= end_ts
271 |             manual_in_range = start_ts <= manual_memory.created_at <= end_ts
272 | 
273 |             print(f"\nTime range analysis:")
274 |             print(f"  Hook memory in range: {hook_in_range}")
275 |             print(f"  Manual memory in range: {manual_in_range}")
276 | 
277 |             if hook_in_range and manual_in_range:
278 |                 print("✅ Both memory types would be discoverable in time-based searches")
279 |                 return {'discoverability_consistent': True}
280 |             else:
281 |                 print("❌ Time-based search discoverability inconsistent")
282 |                 return {'discoverability_consistent': False}
283 |         else:
284 |             print("⚠️  Could not parse time expression for testing")
285 |             return {'discoverability_consistent': None}
286 | 
287 |     async def test_metadata_structure_comparison(self):
288 |         """Test 4: Compare metadata structure between hook and manual memories."""
289 |         print("\n🧪 Test 4: Metadata Structure Comparison")
290 |         print("-" * 50)
291 | 
292 |         # Create memories with different metadata patterns
293 |         hook_memory = self.create_hook_style_memory(
294 |             "Testing metadata structure consistency",
295 |             {'name': 'metadata-test', 'language': 'javascript', 'frameworks': ['express']}
296 |         )
297 | 
298 |         manual_memory = self.create_manual_memory(
299 |             "Testing metadata structure consistency",
300 |             ['metadata-test', 'structure-analysis']
301 |         )
302 | 
303 |         await self.storage.store(hook_memory)
304 |         await self.storage.store(manual_memory)
305 | 
306 |         self.test_memories_created.extend([hook_memory.content_hash, manual_memory.content_hash])
307 | 
308 |         # Analyze metadata structures
309 |         hook_metadata_keys = set(hook_memory.metadata.keys()) if hook_memory.metadata else set()
310 |         manual_metadata_keys = set(manual_memory.metadata.keys()) if manual_memory.metadata else set()
311 | 
312 |         print(f"Hook memory metadata keys: {sorted(hook_metadata_keys)}")
313 |         print(f"Manual memory metadata keys: {sorted(manual_metadata_keys)}")
314 | 
315 |         # Check for required fields
316 |         hook_has_timestamps = hasattr(hook_memory, 'created_at_iso') and hook_memory.created_at_iso is not None
317 |         manual_has_timestamps = hasattr(manual_memory, 'created_at_iso') and manual_memory.created_at_iso is not None
318 | 
319 |         print(f"\nMetadata analysis:")
320 |         print(f"  Hook memory has ISO timestamp: {hook_has_timestamps}")
321 |         print(f"  Manual memory has ISO timestamp: {manual_has_timestamps}")
322 |         print(f"  Hook metadata structure: {bool(hook_memory.metadata)}")
323 |         print(f"  Manual metadata structure: {bool(manual_memory.metadata)}")
324 | 
325 |         return {
326 |             'hook_metadata_keys': hook_metadata_keys,
327 |             'manual_metadata_keys': manual_metadata_keys,
328 |             'metadata_consistency': hook_has_timestamps and manual_has_timestamps
329 |         }
330 | 
331 |     async def run_all_tests(self):
332 |         """Run all tests and compile results."""
333 |         print("=" * 70)
334 |         print("MCP Memory Service: Hook vs Manual Storage Consistency Tests")
335 |         print("Testing for Issue #99 - Memory Storage Inconsistency")
336 |         print("=" * 70)
337 | 
338 |         try:
339 |             await self.setup()
340 | 
341 |             # Run individual tests
342 |             timestamp_results = await self.test_timestamp_consistency()
343 |             tag_results = await self.test_tag_consistency()
344 |             search_results = await self.test_time_based_search_consistency()
345 |             metadata_results = await self.test_metadata_structure_comparison()
346 | 
347 |             # Compile overall results
348 |             print("\n" + "=" * 70)
349 |             print("TEST SUMMARY")
350 |             print("=" * 70)
351 | 
352 |             tests_passed = 0
353 |             total_tests = 4
354 | 
355 |             if timestamp_results.get('hook_has_timestamps') and timestamp_results.get('manual_has_timestamps'):
356 |                 print("✅ PASS: Timestamp Consistency")
357 |                 tests_passed += 1
358 |             else:
359 |                 print("❌ FAIL: Timestamp Consistency")
360 | 
361 |             if tag_results.get('hook_has_auto_tags') and tag_results.get('manual_has_content_tags'):
362 |                 print("✅ PASS: Tag Pattern Appropriateness")
363 |                 tests_passed += 1
364 |             else:
365 |                 print("❌ FAIL: Tag Pattern Issues")
366 | 
367 |             if search_results.get('discoverability_consistent'):
368 |                 print("✅ PASS: Time-Based Search Discoverability")
369 |                 tests_passed += 1
370 |             elif search_results.get('discoverability_consistent') is False:
371 |                 print("❌ FAIL: Time-Based Search Discoverability")
372 |             else:
373 |                 print("⚠️  SKIP: Time-Based Search Test (parsing issue)")
374 | 
375 |             if metadata_results.get('metadata_consistency'):
376 |                 print("✅ PASS: Metadata Structure Consistency")
377 |                 tests_passed += 1
378 |             else:
379 |                 print("❌ FAIL: Metadata Structure Consistency")
380 | 
381 |             print(f"\nOverall Result: {tests_passed}/{total_tests} tests passed")
382 | 
383 |             if tests_passed == total_tests:
384 |                 print("🎉 All tests passed! No storage inconsistency detected.")
385 |                 return True
386 |             else:
387 |                 print("⚠️  Storage inconsistencies detected - Issue #99 confirmed.")
388 |                 return False
389 | 
390 |         finally:
391 |             await self.cleanup()
392 | 
393 | async def main():
394 |     """Main test execution."""
395 |     test_suite = HookVsManualStorageTest("sqlite_vec")
396 |     success = await test_suite.run_all_tests()
397 |     return 0 if success else 1
398 | 
399 | if __name__ == "__main__":
400 |     exit_code = asyncio.run(main())
401 |     sys.exit(exit_code)
```

--------------------------------------------------------------------------------
/src/mcp_memory_service/cli/ingestion.py:
--------------------------------------------------------------------------------

```python
  1 | # Copyright 2024 Heinrich Krupp
  2 | #
  3 | # Licensed under the Apache License, Version 2.0 (the "License");
  4 | # you may not use this file except in compliance with the License.
  5 | # You may obtain a copy of the License at
  6 | #
  7 | #     http://www.apache.org/licenses/LICENSE-2.0
  8 | #
  9 | # Unless required by applicable law or agreed to in writing, software
 10 | # distributed under the License is distributed on an "AS IS" BASIS,
 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 12 | # See the License for the specific language governing permissions and
 13 | # limitations under the License.
 14 | 
 15 | """
 16 | CLI commands for document ingestion.
 17 | """
 18 | 
 19 | import asyncio
 20 | import logging
 21 | import sys
 22 | import time
 23 | from pathlib import Path
 24 | from typing import List, Optional
 25 | 
 26 | import click
 27 | 
 28 | from ..ingestion import get_loader_for_file, is_supported_file, SUPPORTED_FORMATS
 29 | from ..models.memory import Memory
 30 | from ..utils import create_memory_from_chunk, _process_and_store_chunk
 31 | 
 32 | logger = logging.getLogger(__name__)
 33 | 
 34 | 
 35 | def add_ingestion_commands(cli_group):
 36 |     """Add ingestion commands to a Click CLI group."""
 37 |     cli_group.add_command(ingest_document)
 38 |     cli_group.add_command(ingest_directory)
 39 |     cli_group.add_command(list_formats)
 40 | 
 41 | 
 42 | @click.command()
 43 | @click.argument('file_path', type=click.Path(exists=True, path_type=Path))
 44 | @click.option('--tags', '-t', multiple=True, help='Tags to apply to all memories (can be used multiple times)')
 45 | @click.option('--chunk-size', '-c', default=1000, help='Target size for text chunks in characters')
 46 | @click.option('--chunk-overlap', '-o', default=200, help='Characters to overlap between chunks')
 47 | @click.option('--memory-type', '-m', default='document', help='Type label for created memories')
 48 | @click.option('--storage-backend', '-s', default='sqlite_vec',
 49 |               type=click.Choice(['sqlite_vec', 'sqlite-vec', 'cloudflare', 'hybrid']), help='Storage backend to use')
 50 | @click.option('--verbose', '-v', is_flag=True, help='Enable verbose output')
 51 | def ingest_document(file_path: Path, tags: tuple, chunk_size: int, chunk_overlap: int,
 52 |                    memory_type: str, storage_backend: str, verbose: bool):
 53 |     """
 54 |     Ingest a single document file into the memory database.
 55 |     
 56 |     Supports multiple formats including PDF, text, and Markdown files.
 57 |     The document will be parsed, chunked intelligently, and stored as multiple memories.
 58 |     
 59 |     Examples:
 60 |         memory ingest-document manual.pdf --tags documentation,manual
 61 |         memory ingest-document README.md --chunk-size 500 --verbose
 62 |         memory ingest-document data.txt --memory-type reference --tags important
 63 |     """
 64 |     if verbose:
 65 |         logging.basicConfig(level=logging.INFO)
 66 |         click.echo(f"📄 Processing document: {file_path}")
 67 |     
 68 |     async def run_ingestion():
 69 |         from .utils import get_storage
 70 |         
 71 |         try:
 72 |             # Initialize storage
 73 |             storage = await get_storage(storage_backend)
 74 |             
 75 |             # Get appropriate document loader
 76 |             loader = get_loader_for_file(file_path)
 77 |             if loader is None:
 78 |                 click.echo(f"❌ Error: Unsupported file format: {file_path.suffix}", err=True)
 79 |                 return False
 80 |             
 81 |             # Configure loader
 82 |             loader.chunk_size = chunk_size
 83 |             loader.chunk_overlap = chunk_overlap
 84 |             
 85 |             if verbose:
 86 |                 click.echo(f"🔧 Using loader: {loader.__class__.__name__}")
 87 |                 click.echo(f"⚙️  Chunk size: {chunk_size}, Overlap: {chunk_overlap}")
 88 |             
 89 |             start_time = time.time()
 90 |             chunks_processed = 0
 91 |             chunks_stored = 0
 92 |             errors = []
 93 |             
 94 |             # Extract and store chunks
 95 |             with click.progressbar(length=0, label='Processing chunks') as bar:
 96 |                 async for chunk in loader.extract_chunks(file_path):
 97 |                     chunks_processed += 1
 98 |                     bar.length = chunks_processed
 99 |                     bar.update(1)
100 |                     
101 |                     try:
102 |                         # Combine CLI tags with chunk metadata tags
103 |                         all_tags = list(tags)
104 |                         if chunk.metadata.get('tags'):
105 |                             # Handle tags from chunk metadata (can be string or list)
106 |                             chunk_tags = chunk.metadata['tags']
107 |                             if isinstance(chunk_tags, str):
108 |                                 # Split comma-separated string into list
109 |                                 chunk_tags = [tag.strip() for tag in chunk_tags.split(',') if tag.strip()]
110 |                             all_tags.extend(chunk_tags)
111 |                         
112 |                         # Create memory object
113 |                         memory = Memory(
114 |                             content=chunk.content,
115 |                             content_hash=generate_content_hash(chunk.content, chunk.metadata),
116 |                             tags=list(set(all_tags)),  # Remove duplicates
117 |                             memory_type=memory_type,
118 |                             metadata=chunk.metadata
119 |                         )
120 |                         
121 |                         # Store the memory
122 |                         success, error = await storage.store(memory)
123 |                         if success:
124 |                             chunks_stored += 1
125 |                         else:
126 |                             errors.append(f"Chunk {chunk.chunk_index}: {error}")
127 |                             if verbose:
128 |                                 click.echo(f"⚠️  Error storing chunk {chunk.chunk_index}: {error}")
129 |                                 
130 |                     except Exception as e:
131 |                         errors.append(f"Chunk {chunk.chunk_index}: {str(e)}")
132 |                         if verbose:
133 |                             click.echo(f"⚠️  Exception in chunk {chunk.chunk_index}: {str(e)}")
134 |             
135 |             processing_time = time.time() - start_time
136 |             success_rate = (chunks_stored / chunks_processed * 100) if chunks_processed > 0 else 0
137 |             
138 |             # Display results
139 |             click.echo(f"\n✅ Document ingestion completed: {file_path.name}")
140 |             click.echo(f"📄 Chunks processed: {chunks_processed}")
141 |             click.echo(f"💾 Chunks stored: {chunks_stored}")
142 |             click.echo(f"⚡ Success rate: {success_rate:.1f}%")
143 |             click.echo(f"⏱️  Processing time: {processing_time:.2f} seconds")
144 |             
145 |             if errors:
146 |                 click.echo(f"⚠️  Errors encountered: {len(errors)}")
147 |                 if verbose:
148 |                     for error in errors[:5]:  # Show first 5 errors
149 |                         click.echo(f"   - {error}")
150 |                     if len(errors) > 5:
151 |                         click.echo(f"   ... and {len(errors) - 5} more errors")
152 |             
153 |             return chunks_stored > 0
154 |             
155 |         except Exception as e:
156 |             click.echo(f"❌ Error ingesting document: {str(e)}", err=True)
157 |             if verbose:
158 |                 import traceback
159 |                 click.echo(traceback.format_exc(), err=True)
160 |             return False
161 |         finally:
162 |             if 'storage' in locals():
163 |                 await storage.close()
164 |     
165 |     success = asyncio.run(run_ingestion())
166 |     sys.exit(0 if success else 1)
167 | 
168 | 
169 | @click.command()
170 | @click.argument('directory_path', type=click.Path(exists=True, file_okay=False, path_type=Path))
171 | @click.option('--tags', '-t', multiple=True, help='Tags to apply to all memories (can be used multiple times)')
172 | @click.option('--recursive', '-r', is_flag=True, default=True, help='Process subdirectories recursively')
173 | @click.option('--extensions', '-e', multiple=True, help='File extensions to process (default: all supported)')
174 | @click.option('--chunk-size', '-c', default=1000, help='Target size for text chunks in characters')
175 | @click.option('--max-files', default=100, help='Maximum number of files to process')
176 | @click.option('--storage-backend', '-s', default='sqlite_vec',
177 |               type=click.Choice(['sqlite_vec', 'sqlite-vec', 'cloudflare', 'hybrid']), help='Storage backend to use')
178 | @click.option('--verbose', '-v', is_flag=True, help='Enable verbose output')
179 | @click.option('--dry-run', is_flag=True, help='Show what would be processed without storing')
180 | def ingest_directory(directory_path: Path, tags: tuple, recursive: bool, extensions: tuple,
181 |                     chunk_size: int, max_files: int, storage_backend: str, verbose: bool, dry_run: bool):
182 |     """
183 |     Batch ingest all supported documents from a directory.
184 |     
185 |     Recursively processes all supported file types in the directory,
186 |     creating memories with consistent tagging and metadata.
187 |     
188 |     Examples:
189 |         memory ingest-directory ./docs --tags knowledge-base --recursive
190 |         memory ingest-directory ./manuals --extensions pdf,md --max-files 50
191 |         memory ingest-directory ./content --dry-run --verbose
192 |     """
193 |     if verbose:
194 |         logging.basicConfig(level=logging.INFO)
195 |         click.echo(f"📁 Processing directory: {directory_path}")
196 |     
197 |     async def run_batch_ingestion():
198 |         from .utils import get_storage
199 |         
200 |         try:
201 |             # Initialize storage (unless dry run)
202 |             storage = None if dry_run else await get_storage(storage_backend)
203 |             
204 |             # Determine file extensions to process
205 |             if extensions:
206 |                 file_extensions = [ext.lstrip('.') for ext in extensions]
207 |             else:
208 |                 file_extensions = list(SUPPORTED_FORMATS.keys())
209 |             
210 |             if verbose:
211 |                 click.echo(f"🔍 Looking for extensions: {', '.join(file_extensions)}")
212 |                 click.echo(f"📊 Max files: {max_files}, Recursive: {recursive}")
213 |             
214 |             # Find all supported files
215 |             all_files = []
216 |             for ext in file_extensions:
217 |                 ext_pattern = f"*.{ext.lstrip('.')}"
218 |                 if recursive:
219 |                     files = list(directory_path.rglob(ext_pattern))
220 |                 else:
221 |                     files = list(directory_path.glob(ext_pattern))
222 |                 all_files.extend(files)
223 |             
224 |             # Remove duplicates and filter supported files
225 |             unique_files = []
226 |             seen = set()
227 |             for file_path in all_files:
228 |                 if file_path not in seen and is_supported_file(file_path):
229 |                     unique_files.append(file_path)
230 |                     seen.add(file_path)
231 |             
232 |             # Limit number of files
233 |             files_to_process = unique_files[:max_files]
234 |             
235 |             if not files_to_process:
236 |                 click.echo(f"❌ No supported files found in directory: {directory_path}")
237 |                 return False
238 |             
239 |             click.echo(f"📋 Found {len(files_to_process)} files to process")
240 |             
241 |             if dry_run:
242 |                 click.echo("🔍 DRY RUN - Files that would be processed:")
243 |                 for file_path in files_to_process:
244 |                     click.echo(f"   📄 {file_path}")
245 |                 return True
246 |             
247 |             start_time = time.time()
248 |             total_chunks_processed = 0
249 |             total_chunks_stored = 0
250 |             files_processed = 0
251 |             files_failed = 0
252 |             all_errors = []
253 |             
254 |             # Process each file with progress bar
255 |             with click.progressbar(files_to_process, label='Processing files') as files_bar:
256 |                 for file_path in files_bar:
257 |                     try:
258 |                         if verbose:
259 |                             click.echo(f"\n🔄 Processing: {file_path.name}")
260 |                         
261 |                         # Get appropriate document loader
262 |                         loader = get_loader_for_file(file_path)
263 |                         if loader is None:
264 |                             all_errors.append(f"{file_path.name}: Unsupported format")
265 |                             files_failed += 1
266 |                             continue
267 |                         
268 |                         # Configure loader
269 |                         loader.chunk_size = chunk_size
270 |                         
271 |                         file_chunks_processed = 0
272 |                         file_chunks_stored = 0
273 |                         
274 |                         # Extract and store chunks from this file
275 |                         async for chunk in loader.extract_chunks(file_path):
276 |                             file_chunks_processed += 1
277 |                             total_chunks_processed += 1
278 |                             
279 |                             # Process and store the chunk
280 |                             success, error = await _process_and_store_chunk(
281 |                                 chunk,
282 |                                 storage,
283 |                                 file_path.name,
284 |                                 base_tags=list(tags),
285 |                                 context_tags={
286 |                                     "source_dir": directory_path.name,
287 |                                     "file_type": file_path.suffix.lstrip('.')
288 |                                 }
289 |                             )
290 |                             
291 |                             if success:
292 |                                 file_chunks_stored += 1
293 |                                 total_chunks_stored += 1
294 |                             else:
295 |                                 all_errors.append(error)
296 |                         
297 |                         if file_chunks_stored > 0:
298 |                             files_processed += 1
299 |                             if verbose:
300 |                                 click.echo(f"   ✅ {file_chunks_stored}/{file_chunks_processed} chunks stored")
301 |                         else:
302 |                             files_failed += 1
303 |                             if verbose:
304 |                                 click.echo(f"   ❌ No chunks stored")
305 |                                 
306 |                     except Exception as e:
307 |                         files_failed += 1
308 |                         all_errors.append(f"{file_path.name}: {str(e)}")
309 |                         if verbose:
310 |                             click.echo(f"   ❌ Error: {str(e)}")
311 |             
312 |             processing_time = time.time() - start_time
313 |             success_rate = (total_chunks_stored / total_chunks_processed * 100) if total_chunks_processed > 0 else 0
314 |             
315 |             # Display results
316 |             click.echo(f"\n✅ Directory ingestion completed: {directory_path.name}")
317 |             click.echo(f"📁 Files processed: {files_processed}/{len(files_to_process)}")
318 |             click.echo(f"📄 Total chunks processed: {total_chunks_processed}")
319 |             click.echo(f"💾 Total chunks stored: {total_chunks_stored}")
320 |             click.echo(f"⚡ Success rate: {success_rate:.1f}%")
321 |             click.echo(f"⏱️  Processing time: {processing_time:.2f} seconds")
322 |             
323 |             if files_failed > 0:
324 |                 click.echo(f"❌ Files failed: {files_failed}")
325 |             
326 |             if all_errors:
327 |                 click.echo(f"⚠️  Total errors: {len(all_errors)}")
328 |                 if verbose:
329 |                     error_limit = 10
330 |                     for error in all_errors[:error_limit]:
331 |                         click.echo(f"   - {error}")
332 |                     if len(all_errors) > error_limit:
333 |                         click.echo(f"   ... and {len(all_errors) - error_limit} more errors")
334 |             
335 |             return total_chunks_stored > 0
336 |             
337 |         except Exception as e:
338 |             click.echo(f"❌ Error in batch ingestion: {str(e)}", err=True)
339 |             if verbose:
340 |                 import traceback
341 |                 click.echo(traceback.format_exc(), err=True)
342 |             return False
343 |         finally:
344 |             if storage:
345 |                 await storage.close()
346 |     
347 |     success = asyncio.run(run_batch_ingestion())
348 |     sys.exit(0 if success else 1)
349 | 
350 | 
351 | @click.command()
352 | def list_formats() -> None:
353 |     """
354 |     List all supported document formats for ingestion.
355 |     
356 |     Shows file extensions and descriptions of supported document types.
357 |     """
358 |     click.echo("📋 Supported document formats for ingestion:\n")
359 |     
360 |     for ext, description in SUPPORTED_FORMATS.items():
361 |         click.echo(f"  📄 .{ext:<8} - {description}")
362 |     
363 |     click.echo(f"\n✨ Total: {len(SUPPORTED_FORMATS)} supported formats")
364 |     click.echo("\nExamples:")
365 |     click.echo("  memory ingest-document manual.pdf")
366 |     click.echo("  memory ingest-directory ./docs --extensions pdf,md")
```

--------------------------------------------------------------------------------
/claude-hooks/utilities/git-analyzer.js:
--------------------------------------------------------------------------------

```javascript
  1 | /**
  2 |  * Git Context Analyzer
  3 |  * Analyzes git repository history and changelog to provide development context for memory retrieval
  4 |  */
  5 | 
  6 | const fs = require('fs').promises;
  7 | const path = require('path');
  8 | const { execSync } = require('child_process');
  9 | 
 10 | /**
 11 |  * Get recent commit history with detailed information
 12 |  */
 13 | async function getRecentCommits(workingDir, options = {}) {
 14 |     try {
 15 |         const {
 16 |             days = 14,
 17 |             maxCommits = 20,
 18 |             includeMerges = false
 19 |         } = options;
 20 |         
 21 |         // Build git log command
 22 |         let gitCommand = `git log --pretty=format:"%H|%aI|%s|%an" --max-count=${maxCommits}`;
 23 |         
 24 |         if (!includeMerges) {
 25 |             gitCommand += ' --no-merges';
 26 |         }
 27 |         
 28 |         // Add time filter
 29 |         const sinceDate = new Date();
 30 |         sinceDate.setDate(sinceDate.getDate() - days);
 31 |         gitCommand += ` --since="${sinceDate.toISOString()}"`;
 32 |         
 33 |         const output = execSync(gitCommand, { 
 34 |             cwd: path.resolve(workingDir), 
 35 |             encoding: 'utf8',
 36 |             timeout: 10000
 37 |         });
 38 |         
 39 |         if (!output.trim()) {
 40 |             return [];
 41 |         }
 42 |         
 43 |         const commits = output.trim().split('\n').map(line => {
 44 |             const [hash, date, message, author] = line.split('|');
 45 |             return {
 46 |                 hash: hash?.substring(0, 8),
 47 |                 fullHash: hash,
 48 |                 date: new Date(date),
 49 |                 message: message || '',
 50 |                 author: author || '',
 51 |                 daysSinceCommit: Math.floor((new Date() - new Date(date)) / (1000 * 60 * 60 * 24))
 52 |             };
 53 |         });
 54 |         
 55 |         // Get file changes for recent commits (last 5 commits for performance)
 56 |         const recentCommits = commits.slice(0, Math.min(5, commits.length));
 57 |         for (const commit of recentCommits) {
 58 |             try {
 59 |                 const filesOutput = execSync(`git show --name-only --pretty="" ${commit.fullHash}`, {
 60 |                     cwd: path.resolve(workingDir),
 61 |                     encoding: 'utf8',
 62 |                     timeout: 5000
 63 |                 });
 64 |                 commit.files = filesOutput.trim().split('\n').filter(f => f.length > 0);
 65 |             } catch (error) {
 66 |                 commit.files = [];
 67 |             }
 68 |         }
 69 |         
 70 |         return commits;
 71 |         
 72 |     } catch (error) {
 73 |         // Silently fail for non-git directories
 74 |         return [];
 75 |     }
 76 | }
 77 | 
 78 | /**
 79 |  * Parse CHANGELOG.md for recent entries
 80 |  */
 81 | async function parseChangelog(workingDir) {
 82 |     try {
 83 |         const changelogPath = path.join(workingDir, 'CHANGELOG.md');
 84 |         
 85 |         try {
 86 |             await fs.access(changelogPath);
 87 |         } catch {
 88 |             // Try alternative locations
 89 |             const altPaths = ['changelog.md', 'HISTORY.md', 'RELEASES.md'];
 90 |             let found = false;
 91 |             for (const altPath of altPaths) {
 92 |                 try {
 93 |                     await fs.access(path.join(workingDir, altPath));
 94 |                     changelogPath = path.join(workingDir, altPath);
 95 |                     found = true;
 96 |                     break;
 97 |                 } catch {}
 98 |             }
 99 |             if (!found) return null;
100 |         }
101 |         
102 |         const content = await fs.readFile(changelogPath, 'utf8');
103 |         
104 |         // Parse changelog entries (assuming standard markdown format)
105 |         const entries = [];
106 |         const lines = content.split('\n');
107 |         let currentVersion = null;
108 |         let currentDate = null;
109 |         let currentChanges = [];
110 |         
111 |         for (const line of lines) {
112 |             // Match version headers: ## [1.0.0] - 2024-08-25 or ## v1.0.0
113 |             const versionMatch = line.match(/^##\s*\[?v?([^\]]+)\]?\s*-?\s*(.*)$/);
114 |             if (versionMatch) {
115 |                 // Save previous entry
116 |                 if (currentVersion && currentChanges.length > 0) {
117 |                     entries.push({
118 |                         version: currentVersion,
119 |                         date: currentDate,
120 |                         changes: currentChanges.slice(),
121 |                         raw: currentChanges.join('\n')
122 |                     });
123 |                 }
124 |                 
125 |                 currentVersion = versionMatch[1];
126 |                 currentDate = versionMatch[2] || null;
127 |                 currentChanges = [];
128 |                 continue;
129 |             }
130 |             
131 |             // Collect changes under current version
132 |             if (currentVersion && line.trim()) {
133 |                 // Skip section headers like "### Added", "### Fixed"
134 |                 if (!line.match(/^###\s/)) {
135 |                     currentChanges.push(line.trim());
136 |                 }
137 |             }
138 |         }
139 |         
140 |         // Don't forget the last entry
141 |         if (currentVersion && currentChanges.length > 0) {
142 |             entries.push({
143 |                 version: currentVersion,
144 |                 date: currentDate,
145 |                 changes: currentChanges.slice(),
146 |                 raw: currentChanges.join('\n')
147 |             });
148 |         }
149 |         
150 |         // Return only recent entries (last 3 versions or entries from last 30 days)
151 |         const cutoffDate = new Date();
152 |         cutoffDate.setDate(cutoffDate.getDate() - 30);
153 |         
154 |         const recentEntries = entries.slice(0, 3).filter(entry => {
155 |             if (!entry.date) return true; // Include entries without dates
156 |             try {
157 |                 const entryDate = new Date(entry.date);
158 |                 return entryDate >= cutoffDate;
159 |             } catch {
160 |                 return true; // Include entries with unparseable dates
161 |             }
162 |         });
163 |         
164 |         return recentEntries.length > 0 ? recentEntries : null;
165 |         
166 |     } catch (error) {
167 |         // Silently fail if changelog not found or not readable
168 |         return null;
169 |     }
170 | }
171 | 
172 | /**
173 |  * Extract development keywords from git history and changelog
174 |  */
175 | function extractDevelopmentKeywords(commits = [], changelogEntries = null) {
176 |     const keywords = new Set();
177 |     const themes = new Set();
178 |     const filePatterns = new Set();
179 |     
180 |     // Extract from commit messages
181 |     commits.forEach(commit => {
182 |         const message = commit.message.toLowerCase();
183 |         
184 |         // Extract action keywords (feat, fix, refactor, etc.)
185 |         const actionMatch = message.match(/^(feat|fix|refactor|docs|test|chore|improve|add|update|enhance)([:(]|\s)/);
186 |         if (actionMatch) {
187 |             keywords.add(actionMatch[1]);
188 |         }
189 |         
190 |         // Extract key technical terms (avoid very common words)
191 |         // Expanded to capture more development-specific keywords
192 |         const technicalTerms = message.match(/\b(hook|memory|context|retrieval|phase|query|storage|backend|session|git|recent|scoring|config|timestamp|parsing|sort|sorting|date|age|dashboard|analytics|footer|layout|async|sync|bugfix|release|version|embedding|consolidation|stats|display|grid|css|api|endpoint|server|http|mcp|client|protocol)\b/g);
193 |         if (technicalTerms) {
194 |             technicalTerms.forEach(term => keywords.add(term));
195 |         }
196 | 
197 |         // Extract version numbers (v8.5.12, v8.5.13, etc.)
198 |         const versionMatch = message.match(/v?\d+\.\d+\.\d+/g);
199 |         if (versionMatch) {
200 |             versionMatch.forEach(version => keywords.add(version));
201 |         }
202 |         
203 |         // Extract file-based themes
204 |         if (commit.files) {
205 |             commit.files.forEach(file => {
206 |                 const basename = path.basename(file, path.extname(file));
207 |                 if (basename.length > 2) {
208 |                     filePatterns.add(basename);
209 |                 }
210 |                 
211 |                 // Extract directory themes
212 |                 const dir = path.dirname(file);
213 |                 if (dir !== '.' && dir !== '/' && !dir.startsWith('.')) {
214 |                     themes.add(dir.split('/')[0]); // First directory level
215 |                 }
216 |             });
217 |         }
218 |     });
219 |     
220 |     // Extract from changelog entries
221 |     if (changelogEntries) {
222 |         changelogEntries.forEach(entry => {
223 |             const text = entry.raw.toLowerCase();
224 |             
225 |             // Extract technical keywords (expanded for better coverage)
226 |             const changelogTerms = text.match(/\b(added|fixed|improved|enhanced|updated|removed|deprecated|breaking|feature|bug|performance|security|bugfix|release|dashboard|hooks|timestamp|parsing|sorting|analytics|footer|async|sync|embedding|consolidation|memory|retrieval|scoring)\b/g);
227 |             if (changelogTerms) {
228 |                 changelogTerms.forEach(term => keywords.add(term));
229 |             }
230 | 
231 |             // Extract version numbers from changelog
232 |             const changelogVersions = text.match(/v?\d+\.\d+\.\d+/g);
233 |             if (changelogVersions) {
234 |                 changelogVersions.forEach(version => keywords.add(version));
235 |             }
236 |             
237 |             // Extract version-specific themes
238 |             if (entry.version) {
239 |                 themes.add(`v${entry.version}`);
240 |                 themes.add(`version-${entry.version}`);
241 |             }
242 |         });
243 |     }
244 |     
245 |     return {
246 |         keywords: Array.from(keywords).slice(0, 20), // Increased from 15 to capture more relevant terms
247 |         themes: Array.from(themes).slice(0, 12),     // Increased from 10
248 |         filePatterns: Array.from(filePatterns).slice(0, 12), // Increased from 10
249 |         recentCommitMessages: commits.slice(0, 5).map(c => c.message)
250 |     };
251 | }
252 | 
253 | /**
254 |  * Build git-aware search queries
255 |  */
256 | function buildGitContextQuery(projectContext, gitContext, userMessage = '') {
257 |     try {
258 |         const queries = [];
259 |         const baseProject = projectContext.name || 'project';
260 |         
261 |         // Query 1: Recent development focus
262 |         if (gitContext.keywords.length > 0) {
263 |             const devKeywords = gitContext.keywords.slice(0, 8).join(' ');
264 |             const recentQuery = `${baseProject} recent development ${devKeywords}`;
265 |             queries.push({
266 |                 type: 'recent-development',
267 |                 semanticQuery: userMessage ? `${recentQuery} ${userMessage}` : recentQuery,
268 |                 weight: 1.0,
269 |                 source: 'git-commits'
270 |             });
271 |         }
272 |         
273 |         // Query 2: File-based context
274 |         if (gitContext.filePatterns.length > 0) {
275 |             const fileContext = gitContext.filePatterns.slice(0, 5).join(' ');
276 |             const fileQuery = `${baseProject} ${fileContext} implementation changes`;
277 |             queries.push({
278 |                 type: 'file-context',
279 |                 semanticQuery: userMessage ? `${fileQuery} ${userMessage}` : fileQuery,
280 |                 weight: 0.8,
281 |                 source: 'git-files'
282 |             });
283 |         }
284 |         
285 |         // Query 3: Version/theme context
286 |         if (gitContext.themes.length > 0) {
287 |             const themeContext = gitContext.themes.slice(0, 5).join(' ');
288 |             const themeQuery = `${baseProject} ${themeContext} features decisions`;
289 |             queries.push({
290 |                 type: 'theme-context', 
291 |                 semanticQuery: userMessage ? `${themeQuery} ${userMessage}` : themeQuery,
292 |                 weight: 0.6,
293 |                 source: 'git-themes'
294 |             });
295 |         }
296 |         
297 |         // Query 4: Commit message context (most recent)
298 |         if (gitContext.recentCommitMessages.length > 0) {
299 |             const recentMessage = gitContext.recentCommitMessages[0];
300 |             const commitQuery = `${baseProject} ${recentMessage}`;
301 |             queries.push({
302 |                 type: 'commit-context',
303 |                 semanticQuery: userMessage ? `${commitQuery} ${userMessage}` : commitQuery,
304 |                 weight: 0.9,
305 |                 source: 'recent-commit'
306 |             });
307 |         }
308 |         
309 |         return queries;
310 |         
311 |     } catch (error) {
312 |         // Return empty queries on error
313 |         return [];
314 |     }
315 | }
316 | 
317 | /**
318 |  * Get current git branch information
319 |  */
320 | function getCurrentGitInfo(workingDir) {
321 |     try {
322 |         const branch = execSync('git rev-parse --abbrev-ref HEAD', {
323 |             cwd: path.resolve(workingDir),
324 |             encoding: 'utf8',
325 |             timeout: 3000
326 |         }).trim();
327 |         
328 |         const lastCommit = execSync('git log -1 --pretty=format:"%h %s"', {
329 |             cwd: path.resolve(workingDir),
330 |             encoding: 'utf8',
331 |             timeout: 3000
332 |         }).trim();
333 |         
334 |         const hasChanges = execSync('git status --porcelain', {
335 |             cwd: path.resolve(workingDir),
336 |             encoding: 'utf8',
337 |             timeout: 3000
338 |         }).trim().length > 0;
339 |         
340 |         return {
341 |             branch,
342 |             lastCommit,
343 |             hasUncommittedChanges: hasChanges,
344 |             isGitRepo: true
345 |         };
346 |         
347 |     } catch (error) {
348 |         return {
349 |             branch: null,
350 |             lastCommit: null,
351 |             hasUncommittedChanges: false,
352 |             isGitRepo: false
353 |         };
354 |     }
355 | }
356 | 
357 | /**
358 |  * Main function to analyze git context for memory retrieval
359 |  */
360 | async function analyzeGitContext(workingDir, options = {}) {
361 |     try {
362 |         const {
363 |             commitLookback = 14,
364 |             maxCommits = 20,
365 |             includeChangelog = true,
366 |             verbose = false
367 |         } = options;
368 |         
369 |         // Get basic git info
370 |         const gitInfo = getCurrentGitInfo(workingDir);
371 |         if (!gitInfo.isGitRepo) {
372 |             return null;
373 |         }
374 |         
375 |         // Get recent commits
376 |         const commits = await getRecentCommits(workingDir, {
377 |             days: commitLookback,
378 |             maxCommits
379 |         });
380 |         
381 |         // Parse changelog if enabled
382 |         const changelogEntries = includeChangelog ? await parseChangelog(workingDir) : null;
383 |         
384 |         // Extract development context
385 |         const developmentKeywords = extractDevelopmentKeywords(commits, changelogEntries);
386 |         
387 |         const context = {
388 |             gitInfo,
389 |             commits: commits.slice(0, 10), // Limit for performance
390 |             changelogEntries,
391 |             developmentKeywords,
392 |             analysisTimestamp: new Date().toISOString(),
393 |             repositoryActivity: {
394 |                 recentCommitCount: commits.length,
395 |                 activeDays: Math.max(1, Math.min(commitLookback, commits.length > 0 ? commits[0].daysSinceCommit : commitLookback)),
396 |                 hasChangelog: changelogEntries !== null,
397 |                 developmentIntensity: commits.length > 5 ? 'high' : commits.length > 2 ? 'medium' : 'low'
398 |             }
399 |         };
400 |         
401 |         if (verbose) {
402 |             console.log(`[Git Analyzer] Analyzed ${commits.length} commits, ${changelogEntries?.length || 0} changelog entries`);
403 |             console.log(`[Git Analyzer] Keywords: ${developmentKeywords.keywords.join(', ')}`);
404 |         }
405 |         
406 |         return context;
407 |         
408 |     } catch (error) {
409 |         if (options.verbose) {
410 |             console.warn(`[Git Analyzer] Error analyzing context: ${error.message}`);
411 |         }
412 |         return null;
413 |     }
414 | }
415 | 
416 | module.exports = {
417 |     analyzeGitContext,
418 |     getRecentCommits,
419 |     parseChangelog,
420 |     extractDevelopmentKeywords,
421 |     buildGitContextQuery,
422 |     getCurrentGitInfo
423 | };
424 | 
425 | // Direct execution support for testing
426 | if (require.main === module) {
427 |     // Test the git analyzer
428 |     analyzeGitContext(process.cwd(), { verbose: true })
429 |         .then(context => {
430 |             if (context) {
431 |                 console.log('\n=== GIT CONTEXT ANALYSIS ===');
432 |                 console.log(`Repository: ${context.gitInfo.branch} (${context.commits.length} recent commits)`);
433 |                 console.log(`Development keywords: ${context.developmentKeywords.keywords.join(', ')}`);
434 |                 console.log(`File patterns: ${context.developmentKeywords.filePatterns.join(', ')}`);
435 |                 console.log(`Themes: ${context.developmentKeywords.themes.join(', ')}`);
436 |                 
437 |                 if (context.changelogEntries) {
438 |                     console.log(`Changelog entries: ${context.changelogEntries.length}`);
439 |                     context.changelogEntries.forEach(entry => {
440 |                         console.log(`  - ${entry.version} (${entry.changes.length} changes)`);
441 |                     });
442 |                 }
443 |                 
444 |                 // Test query building
445 |                 const queries = buildGitContextQuery({ name: 'test-project' }, context.developmentKeywords);
446 |                 console.log(`\nGenerated ${queries.length} git-aware queries:`);
447 |                 queries.forEach((query, idx) => {
448 |                     console.log(`  ${idx + 1}. [${query.type}] ${query.semanticQuery}`);
449 |                 });
450 |                 
451 |             } else {
452 |                 console.log('No git context available');
453 |             }
454 |         })
455 |         .catch(error => console.error('Git analysis failed:', error));
456 | }
```

--------------------------------------------------------------------------------
/tests/timestamp/test_search_retrieval_inconsistency.py:
--------------------------------------------------------------------------------

```python
  1 | #!/usr/bin/env python3
  2 | """
  3 | Test script to identify the exact root cause of Issue #99 search inconsistency.
  4 | Based on investigation, the issue appears to be in timestamp field mapping
  5 | between storage and retrieval in ChromaDB where different timestamp fields
  6 | are used for querying vs storing.
  7 | """
  8 | 
  9 | import sys
 10 | import os
 11 | sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', '..', 'src'))
 12 | 
 13 | import asyncio
 14 | import json
 15 | import tempfile
 16 | import time
 17 | from datetime import datetime, timedelta
 18 | from typing import Dict, List, Any
 19 | 
 20 | from mcp_memory_service.models.memory import Memory
 21 | from mcp_memory_service.utils.hashing import generate_content_hash
 22 | from mcp_memory_service.utils.time_parser import extract_time_expression
 23 | from mcp_memory_service.storage.sqlite_vec import SqliteVecMemoryStorage
 24 | 
 25 | class SearchRetrievalInconsistencyTest:
 26 |     """Test suite to identify search/retrieval timestamp inconsistencies."""
 27 | 
 28 |     def __init__(self):
 29 |         self.storage = None
 30 |         self.test_memories = []
 31 | 
 32 |     async def setup(self):
 33 |         """Set up test environment."""
 34 |         print("=== Setting up search/retrieval inconsistency test ===")
 35 | 
 36 |         self.temp_db = tempfile.NamedTemporaryFile(suffix=".db", delete=False)
 37 |         self.temp_db.close()
 38 | 
 39 |         self.storage = SqliteVecMemoryStorage(
 40 |             db_path=self.temp_db.name,
 41 |             embedding_model="all-MiniLM-L6-v2"
 42 |         )
 43 |         await self.storage.initialize()
 44 |         print(f"✅ Storage initialized: {self.temp_db.name}")
 45 | 
 46 |     async def cleanup(self):
 47 |         """Clean up test environment."""
 48 |         self.storage = None
 49 |         if hasattr(self, 'temp_db') and os.path.exists(self.temp_db.name):
 50 |             os.unlink(self.temp_db.name)
 51 |             print("✅ Test database cleaned up")
 52 | 
 53 |     async def create_test_memories_with_specific_timestamps(self):
 54 |         """Create test memories with carefully controlled timestamps."""
 55 |         print("\n🧪 Creating test memories with specific timestamps")
 56 |         print("-" * 60)
 57 | 
 58 |         # Calculate specific timestamps for testing
 59 |         now = time.time()
 60 |         yesterday_start = now - (24 * 60 * 60)  # 24 hours ago
 61 |         yesterday_middle = yesterday_start + (12 * 60 * 60)  # 12 hours into yesterday
 62 |         yesterday_end = yesterday_start + (23.5 * 60 * 60)  # Near end of yesterday
 63 | 
 64 |         test_cases = [
 65 |             {
 66 |                 "name": "hook_style_memory_yesterday",
 67 |                 "content": "Hook-generated memory from yesterday's development session",
 68 |                 "timestamp": yesterday_middle,
 69 |                 "tags": ["claude-code-session", "session-consolidation", "yesterday-work"],
 70 |                 "metadata": {
 71 |                     "generated_by": "claude-code-session-end-hook",
 72 |                     "generated_at": datetime.fromtimestamp(yesterday_middle).isoformat() + "Z",
 73 |                     "session_analysis": {"topics": ["development", "testing"]}
 74 |                 },
 75 |                 "memory_type": "session-summary"
 76 |             },
 77 |             {
 78 |                 "name": "manual_memory_yesterday",
 79 |                 "content": "Manual note stored yesterday about project progress",
 80 |                 "timestamp": yesterday_end,
 81 |                 "tags": ["manual-note", "project-progress", "yesterday"],
 82 |                 "metadata": {
 83 |                     "created_by": "manual-storage",
 84 |                     "source": "user-input"
 85 |                 },
 86 |                 "memory_type": "note"
 87 |             },
 88 |             {
 89 |                 "name": "hook_style_memory_today",
 90 |                 "content": "Hook-generated memory from today's session",
 91 |                 "timestamp": now - (2 * 60 * 60),  # 2 hours ago
 92 |                 "tags": ["claude-code-session", "session-consolidation", "today-work"],
 93 |                 "metadata": {
 94 |                     "generated_by": "claude-code-session-end-hook",
 95 |                     "generated_at": datetime.fromtimestamp(now - (2 * 60 * 60)).isoformat() + "Z"
 96 |                 },
 97 |                 "memory_type": "session-summary"
 98 |             },
 99 |             {
100 |                 "name": "manual_memory_today",
101 |                 "content": "Manual note stored today about urgent task",
102 |                 "timestamp": now - (1 * 60 * 60),  # 1 hour ago
103 |                 "tags": ["manual-note", "urgent-task", "today"],
104 |                 "metadata": {
105 |                     "created_by": "manual-storage",
106 |                     "source": "user-input"
107 |                 },
108 |                 "memory_type": "note"
109 |             }
110 |         ]
111 | 
112 |         stored_memories = []
113 | 
114 |         for case in test_cases:
115 |             # Create memory with specific timestamp
116 |             memory = Memory(
117 |                 content=case["content"],
118 |                 content_hash=generate_content_hash(case["content"]),
119 |                 tags=case["tags"],
120 |                 memory_type=case["memory_type"],
121 |                 metadata=case["metadata"],
122 |                 created_at=case["timestamp"],
123 |                 created_at_iso=datetime.fromtimestamp(case["timestamp"]).isoformat() + "Z"
124 |             )
125 | 
126 |             # Store the memory
127 |             success, message = await self.storage.store(memory)
128 |             if success:
129 |                 stored_memories.append({
130 |                     "name": case["name"],
131 |                     "memory": memory,
132 |                     "expected_timestamp": case["timestamp"]
133 |                 })
134 |                 print(f"✅ Stored {case['name']}: {datetime.fromtimestamp(case['timestamp'])}")
135 |             else:
136 |                 print(f"❌ Failed to store {case['name']}: {message}")
137 | 
138 |         self.test_memories = stored_memories
139 |         return stored_memories
140 | 
141 |     async def test_time_based_search_consistency(self):
142 |         """Test if time-based searches find all expected memories."""
143 |         print("\n🧪 Test 1: Time-Based Search Consistency")
144 |         print("-" * 60)
145 | 
146 |         # Test yesterday search
147 |         query = "yesterday"
148 |         cleaned_query, (start_ts, end_ts) = extract_time_expression(query)
149 | 
150 |         print(f"🔍 Testing query: '{query}'")
151 |         print(f"📅 Search range: {datetime.fromtimestamp(start_ts)} to {datetime.fromtimestamp(end_ts)}")
152 | 
153 |         # Check which memories should be found
154 |         expected_memories = []
155 |         for mem_info in self.test_memories:
156 |             if start_ts <= mem_info["expected_timestamp"] <= end_ts:
157 |                 expected_memories.append(mem_info["name"])
158 | 
159 |         print(f"📋 Expected to find memories: {expected_memories}")
160 | 
161 |         # Perform the search using retrieve (general search)
162 |         search_results = await self.storage.retrieve(query, n_results=10)
163 |         print(f"🔍 General retrieve found: {len(search_results)} memories")
164 | 
165 |         for result in search_results:
166 |             print(f"  - {result.memory.content[:50]}...")
167 | 
168 |         # Check if we found the expected memories
169 |         found_memories = []
170 |         for result in search_results:
171 |             for mem_info in self.test_memories:
172 |                 if result.memory.content == mem_info["memory"].content:
173 |                     found_memories.append(mem_info["name"])
174 |                     break
175 | 
176 |         print(f"📋 Actually found memories: {found_memories}")
177 | 
178 |         # Analysis
179 |         missing_memories = set(expected_memories) - set(found_memories)
180 |         unexpected_memories = set(found_memories) - set(expected_memories)
181 | 
182 |         search_analysis = {
183 |             "expected_count": len(expected_memories),
184 |             "found_count": len(found_memories),
185 |             "missing_memories": list(missing_memories),
186 |             "unexpected_memories": list(unexpected_memories),
187 |             "search_consistent": len(missing_memories) == 0 and len(unexpected_memories) == 0
188 |         }
189 | 
190 |         if search_analysis["search_consistent"]:
191 |             print("✅ Time-based search is consistent")
192 |         else:
193 |             print("❌ Time-based search inconsistency detected!")
194 |             if missing_memories:
195 |                 print(f"   Missing: {missing_memories}")
196 |             if unexpected_memories:
197 |                 print(f"   Unexpected: {unexpected_memories}")
198 | 
199 |         return search_analysis
200 | 
201 |     async def test_direct_timestamp_queries(self):
202 |         """Test direct timestamp-based queries to isolate the issue."""
203 |         print("\n🧪 Test 2: Direct Timestamp Query Analysis")
204 |         print("-" * 60)
205 | 
206 |         # Get yesterday's timestamp range
207 |         yesterday_query = "yesterday"
208 |         cleaned_query, (start_ts, end_ts) = extract_time_expression(yesterday_query)
209 | 
210 |         print(f"🕐 Yesterday range: {start_ts} to {end_ts}")
211 | 
212 |         # Check each stored memory's timestamp against the range
213 |         timestamp_analysis = {
214 |             "memories_in_range": [],
215 |             "memories_out_of_range": [],
216 |             "timestamp_precision_issues": []
217 |         }
218 | 
219 |         for mem_info in self.test_memories:
220 |             memory = mem_info["memory"]
221 |             expected_ts = mem_info["expected_timestamp"]
222 | 
223 |             print(f"\n📝 Analyzing {mem_info['name']}:")
224 |             print(f"   Expected timestamp: {expected_ts} ({datetime.fromtimestamp(expected_ts)})")
225 |             print(f"   Memory created_at: {memory.created_at}")
226 |             print(f"   Memory created_at_iso: {memory.created_at_iso}")
227 | 
228 |             # Check if memory should be in yesterday's range
229 |             in_range = start_ts <= expected_ts <= end_ts
230 |             actually_in_range = start_ts <= (memory.created_at or 0) <= end_ts
231 | 
232 |             if in_range:
233 |                 timestamp_analysis["memories_in_range"].append(mem_info["name"])
234 | 
235 |             if in_range != actually_in_range:
236 |                 timestamp_analysis["timestamp_precision_issues"].append({
237 |                     "memory": mem_info["name"],
238 |                     "expected_in_range": in_range,
239 |                     "actually_in_range": actually_in_range,
240 |                     "expected_timestamp": expected_ts,
241 |                     "stored_timestamp": memory.created_at
242 |                 })
243 | 
244 |             print(f"   Should be in yesterday range: {in_range}")
245 |             print(f"   Memory timestamp in range: {actually_in_range}")
246 | 
247 |         print(f"\n📊 Timestamp Analysis Summary:")
248 |         print(f"   Memories in yesterday range: {len(timestamp_analysis['memories_in_range'])}")
249 |         print(f"   Timestamp precision issues: {len(timestamp_analysis['timestamp_precision_issues'])}")
250 | 
251 |         return timestamp_analysis
252 | 
253 |     async def test_memory_serialization_fields(self):
254 |         """Test what timestamp fields are actually stored/retrieved."""
255 |         print("\n🧪 Test 3: Memory Serialization Fields Analysis")
256 |         print("-" * 60)
257 | 
258 |         if not self.test_memories:
259 |             print("⚠️  No test memories available for analysis")
260 |             return {}
261 | 
262 |         serialization_analysis = {
263 |             "memory_field_analysis": [],
264 |             "consistent_fields": True
265 |         }
266 | 
267 |         for mem_info in self.test_memories:
268 |             memory = mem_info["memory"]
269 | 
270 |             # Get the serialized dictionary representation
271 |             memory_dict = memory.to_dict()
272 | 
273 |             timestamp_fields = {
274 |                 "created_at": memory_dict.get("created_at"),
275 |                 "created_at_iso": memory_dict.get("created_at_iso"),
276 |                 "timestamp": memory_dict.get("timestamp"),
277 |                 "timestamp_float": memory_dict.get("timestamp_float"),
278 |                 "timestamp_str": memory_dict.get("timestamp_str"),
279 |                 "updated_at": memory_dict.get("updated_at"),
280 |                 "updated_at_iso": memory_dict.get("updated_at_iso")
281 |             }
282 | 
283 |             print(f"\n📝 {mem_info['name']} serialization fields:")
284 |             for field, value in timestamp_fields.items():
285 |                 if value is not None:
286 |                     if isinstance(value, float):
287 |                         dt_str = datetime.fromtimestamp(value).isoformat()
288 |                         print(f"   {field}: {value} ({dt_str})")
289 |                     else:
290 |                         print(f"   {field}: {value}")
291 |                 else:
292 |                     print(f"   {field}: None")
293 | 
294 |             analysis_entry = {
295 |                 "memory_name": mem_info["name"],
296 |                 "timestamp_fields": timestamp_fields,
297 |                 "has_all_required": all([
298 |                     timestamp_fields.get("created_at") is not None,
299 |                     timestamp_fields.get("created_at_iso") is not None,
300 |                     timestamp_fields.get("timestamp") is not None
301 |                 ])
302 |             }
303 | 
304 |             serialization_analysis["memory_field_analysis"].append(analysis_entry)
305 | 
306 |             if not analysis_entry["has_all_required"]:
307 |                 serialization_analysis["consistent_fields"] = False
308 | 
309 |         return serialization_analysis
310 | 
311 |     async def run_all_tests(self):
312 |         """Run comprehensive search/retrieval inconsistency analysis."""
313 |         print("=" * 70)
314 |         print("MCP Memory Service: Search/Retrieval Inconsistency Root Cause Analysis")
315 |         print("Issue #99 - Final Investigation Phase")
316 |         print("=" * 70)
317 | 
318 |         try:
319 |             await self.setup()
320 | 
321 |             # Create test data
322 |             await self.create_test_memories_with_specific_timestamps()
323 | 
324 |             # Run tests
325 |             search_test = await self.test_time_based_search_consistency()
326 |             timestamp_test = await self.test_direct_timestamp_queries()
327 |             serialization_test = await self.test_memory_serialization_fields()
328 | 
329 |             # Final analysis
330 |             print("\n" + "=" * 70)
331 |             print("FINAL ROOT CAUSE ANALYSIS")
332 |             print("=" * 70)
333 | 
334 |             tests_passed = 0
335 |             total_tests = 3
336 | 
337 |             # Search consistency
338 |             if search_test.get("search_consistent", False):
339 |                 print("✅ PASS: Time-based search is consistent")
340 |                 tests_passed += 1
341 |             else:
342 |                 print("❌ FAIL: Time-based search inconsistency confirmed")
343 |                 print(f"   Missing: {search_test.get('missing_memories', [])}")
344 |                 print(f"   Unexpected: {search_test.get('unexpected_memories', [])}")
345 | 
346 |             # Timestamp precision
347 |             precision_issues = timestamp_test.get("timestamp_precision_issues", [])
348 |             if len(precision_issues) == 0:
349 |                 print("✅ PASS: Timestamp precision is correct")
350 |                 tests_passed += 1
351 |             else:
352 |                 print("❌ FAIL: Timestamp precision issues detected")
353 |                 for issue in precision_issues:
354 |                     print(f"   {issue['memory']}: expected={issue['expected_in_range']}, actual={issue['actually_in_range']}")
355 | 
356 |             # Field serialization
357 |             if serialization_test.get("consistent_fields", False):
358 |                 print("✅ PASS: Memory serialization fields consistent")
359 |                 tests_passed += 1
360 |             else:
361 |                 print("❌ FAIL: Memory serialization field issues")
362 | 
363 |             print(f"\nOverall Result: {tests_passed}/{total_tests} tests passed")
364 | 
365 |             # Root cause determination
366 |             print("\n🎯 DEFINITIVE ROOT CAUSE:")
367 | 
368 |             if tests_passed == total_tests:
369 |                 print("• Storage and serialization are working correctly")
370 |                 print("• Issue #99 might be in a different storage backend or search implementation")
371 |                 print("• The problem could be client-side or in specific edge cases")
372 |             else:
373 |                 print("• CONFIRMED: Search/retrieval inconsistencies exist")
374 |                 if not search_test.get("search_consistent", False):
375 |                     print("  → Time-based search is not finding expected memories")
376 |                 if precision_issues:
377 |                     print("  → Timestamp precision/handling issues in queries")
378 |                 if not serialization_test.get("consistent_fields", False):
379 |                     print("  → Memory serialization field inconsistencies")
380 | 
381 |             print("\n💡 RECOMMENDED FIXES:")
382 |             if not search_test.get("search_consistent", False):
383 |                 print("• Review time-based search implementation in storage backends")
384 |                 print("• Ensure timestamp field mapping is consistent between store and query")
385 |             if precision_issues:
386 |                 print("• Fix timestamp precision handling in search queries")
387 |             if not serialization_test.get("consistent_fields", False):
388 |                 print("• Standardize timestamp field serialization across all memories")
389 | 
390 |             return tests_passed == total_tests
391 | 
392 |         finally:
393 |             await self.cleanup()
394 | 
395 | async def main():
396 |     """Main test execution."""
397 |     test_suite = SearchRetrievalInconsistencyTest()
398 |     success = await test_suite.run_all_tests()
399 |     return 0 if success else 1
400 | 
401 | if __name__ == "__main__":
402 |     exit_code = asyncio.run(main())
403 |     sys.exit(exit_code)
```

--------------------------------------------------------------------------------
/docs/natural-memory-triggers/performance-optimization.md:
--------------------------------------------------------------------------------

```markdown
  1 | # Natural Memory Triggers v7.1.3 - Performance Optimization Guide
  2 | 
  3 | This guide provides comprehensive strategies for optimizing Natural Memory Triggers performance to achieve the best balance of speed, accuracy, and resource usage for your specific workflow.
  4 | 
  5 | ## Performance Overview
  6 | 
  7 | Natural Memory Triggers uses a sophisticated multi-tier architecture designed for optimal performance:
  8 | 
  9 | ### Performance Tiers
 10 | 
 11 | | Tier | Target Latency | Processing | Accuracy | Use Case |
 12 | |------|---------------|------------|-----------|----------|
 13 | | **Instant** | < 50ms | Pattern matching, cache checks | 85% | Common memory-seeking patterns |
 14 | | **Fast** | < 150ms | Lightweight semantic analysis | 90% | Topic shifts, question patterns |
 15 | | **Intensive** | < 500ms | Deep semantic understanding | 95% | Complex context analysis |
 16 | 
 17 | ### Real-World Benchmarks
 18 | 
 19 | **Production Performance Metrics:**
 20 | - ✅ **85%+ trigger accuracy** across all processing tiers
 21 | - ✅ **<50ms instant analysis** for cached and pattern-matched queries
 22 | - ✅ **<150ms fast analysis** for semantic topic detection
 23 | - ✅ **<5ms cache performance** with LRU management
 24 | - ✅ **Zero user-facing latency** with background processing
 25 | 
 26 | ## Performance Profiles
 27 | 
 28 | Choose the right profile based on your current workflow needs:
 29 | 
 30 | ### 🏃 Speed Focused Profile
 31 | 
 32 | Optimized for minimal latency with basic memory awareness.
 33 | 
 34 | ```bash
 35 | node memory-mode-controller.js profile speed_focused
 36 | ```
 37 | 
 38 | **Configuration:**
 39 | - **Max Latency**: 100ms
 40 | - **Enabled Tiers**: Instant only
 41 | - **Background Processing**: Disabled
 42 | - **Cache Aggressiveness**: High
 43 | 
 44 | **Best For:**
 45 | - Quick coding sessions
 46 | - Pair programming
 47 | - Time-sensitive development work
 48 | - Performance-critical environments
 49 | 
 50 | **Trade-offs:**
 51 | - Minimal memory awareness
 52 | - Only pattern-based detection
 53 | - No semantic analysis
 54 | - Reduced context accuracy
 55 | 
 56 | **Optimization Tips:**
 57 | ```bash
 58 | # Increase cache size for better hit rates
 59 | node memory-mode-controller.js config set performance.cacheSize 100
 60 | 
 61 | # Reduce cooldown for faster triggers
 62 | node memory-mode-controller.js config set naturalTriggers.cooldownPeriod 15000
 63 | 
 64 | # Lower memory limit for faster responses
 65 | node memory-mode-controller.js config set naturalTriggers.maxMemoriesPerTrigger 3
 66 | ```
 67 | 
 68 | ### ⚖️ Balanced Profile (Recommended)
 69 | 
 70 | Optimal balance of speed and context awareness for general development.
 71 | 
 72 | ```bash
 73 | node memory-mode-controller.js profile balanced
 74 | ```
 75 | 
 76 | **Configuration:**
 77 | - **Max Latency**: 200ms
 78 | - **Enabled Tiers**: Instant + Fast
 79 | - **Background Processing**: Enabled
 80 | - **Degradation Threshold**: 400ms
 81 | 
 82 | **Best For:**
 83 | - Daily development work
 84 | - General coding sessions
 85 | - Code reviews and debugging
 86 | - Most productive for regular use
 87 | 
 88 | **Optimization Tips:**
 89 | ```bash
 90 | # Fine-tune sensitivity for your preference
 91 | node memory-mode-controller.js sensitivity 0.6
 92 | 
 93 | # Monitor performance regularly
 94 | node memory-mode-controller.js metrics
 95 | 
 96 | # Adjust based on user satisfaction
 97 | node memory-mode-controller.js config set performance.autoAdjust true
 98 | ```
 99 | 
100 | ### 🧠 Memory Aware Profile
101 | 
102 | Maximum context awareness with acceptable higher latency.
103 | 
104 | ```bash
105 | node memory-mode-controller.js profile memory_aware
106 | ```
107 | 
108 | **Configuration:**
109 | - **Max Latency**: 500ms
110 | - **Enabled Tiers**: All (Instant + Fast + Intensive)
111 | - **Background Processing**: Enabled
112 | - **Context Analysis**: Deep semantic understanding
113 | 
114 | **Best For:**
115 | - Architectural decision sessions
116 | - Complex problem solving
117 | - Research and exploration work
118 | - When context quality is paramount
119 | 
120 | **Optimization Tips:**
121 | ```bash
122 | # Enable all analysis features
123 | node memory-mode-controller.js config set performance.enableFullAnalysis true
124 | 
125 | # Increase memory retrieval for better context
126 | node memory-mode-controller.js config set naturalTriggers.maxMemoriesPerTrigger 8
127 | 
128 | # Enable conversation context tracking
129 | node memory-mode-controller.js config set performance.trackConversationContext true
130 | ```
131 | 
132 | ### 🤖 Adaptive Profile
133 | 
134 | Machine learning-based optimization that learns your preferences.
135 | 
136 | ```bash
137 | node memory-mode-controller.js profile adaptive
138 | ```
139 | 
140 | **Configuration:**
141 | - **Max Latency**: Auto-adjusting (100ms - 800ms)
142 | - **Enabled Tiers**: Dynamic based on usage patterns
143 | - **User Feedback**: Tracks satisfaction and adjusts
144 | - **Learning Rate**: 0.05 (configurable)
145 | 
146 | **Optimization Process:**
147 | 1. **Learning Phase** (first 50 interactions): Collects usage data
148 | 2. **Adjustment Phase** (ongoing): Optimizes based on patterns
149 | 3. **Feedback Integration**: Incorporates user satisfaction signals
150 | 4. **Performance Tuning**: Adjusts tiers and thresholds automatically
151 | 
152 | **Monitoring Adaptive Learning:**
153 | ```bash
154 | # Check learning progress
155 | node memory-mode-controller.js metrics --learning
156 | 
157 | # View adaptation history
158 | node memory-mode-controller.js config get performance.adaptationHistory
159 | 
160 | # Reset learning data if needed
161 | node memory-mode-controller.js config set performance.resetLearning true
162 | ```
163 | 
164 | ## Performance Monitoring
165 | 
166 | ### Real-Time Metrics
167 | 
168 | Monitor system performance in real-time:
169 | 
170 | ```bash
171 | # Basic performance overview
172 | node memory-mode-controller.js status
173 | 
174 | # Detailed performance metrics
175 | node memory-mode-controller.js metrics
176 | 
177 | # Continuous monitoring (updates every 5 seconds)
178 | watch -n 5 "node ~/.claude/hooks/memory-mode-controller.js metrics"
179 | ```
180 | 
181 | **Key Metrics to Monitor:**
182 | 
183 | #### Response Time Metrics
184 | - **Average Latency**: Overall response time across all tiers
185 | - **Tier-Specific Latency**: Performance breakdown by processing tier
186 | - **Cache Hit Rate**: Percentage of requests served from cache
187 | - **Memory Service Latency**: Backend response times
188 | 
189 | #### Accuracy Metrics
190 | - **Trigger Accuracy**: Percentage of relevant memory retrievals
191 | - **False Positive Rate**: Percentage of irrelevant triggers
192 | - **User Satisfaction**: Adaptive feedback scoring
193 | - **Success Rate**: Overall system effectiveness
194 | 
195 | #### Resource Usage Metrics
196 | - **Cache Size**: Current semantic cache utilization
197 | - **Memory Usage**: Node.js heap and memory consumption
198 | - **CPU Usage**: Processing overhead (available with `--system` flag)
199 | - **Network I/O**: Memory service communication overhead
200 | 
201 | ### Performance Alerts
202 | 
203 | Set up automated performance monitoring:
204 | 
205 | ```bash
206 | # Create performance monitoring script
207 | cat > ~/nmt-monitor.sh << 'EOF'
208 | #!/bin/bash
209 | METRICS=$(node ~/.claude/hooks/memory-mode-controller.js metrics --json)
210 | AVG_LATENCY=$(echo $METRICS | jq '.performance.avgLatency')
211 | 
212 | if [ $AVG_LATENCY -gt 300 ]; then
213 |     echo "⚠️ High latency detected: ${AVG_LATENCY}ms"
214 |     # Could trigger notifications, logging, or automatic optimization
215 | fi
216 | EOF
217 | 
218 | chmod +x ~/nmt-monitor.sh
219 | 
220 | # Add to crontab for regular monitoring
221 | (crontab -l ; echo "*/5 * * * * ~/nmt-monitor.sh") | crontab -
222 | ```
223 | 
224 | ## Cache Optimization
225 | 
226 | The semantic cache is crucial for performance. Optimize it based on your usage patterns:
227 | 
228 | ### Cache Configuration
229 | 
230 | ```bash
231 | # View current cache statistics
232 | node memory-mode-controller.js cache stats
233 | 
234 | # Adjust cache size based on memory availability
235 | node memory-mode-controller.js config set performance.cacheSize 75  # entries
236 | 
237 | # Configure cache cleanup behavior
238 | node memory-mode-controller.js config set performance.cacheCleanupThreshold 0.8
239 | ```
240 | 
241 | ### Cache Performance Analysis
242 | 
243 | ```bash
244 | # Analyze cache effectiveness
245 | node memory-mode-controller.js cache analyze
246 | 
247 | # Example output:
248 | Cache Performance Analysis:
249 |   Hit Rate: 42% (ideal: >30%)
250 |   Average Hit Time: 3.2ms
251 |   Average Miss Time: 147ms
252 |   Most Valuable Cached Patterns:
253 |     - "what did we decide": 15 hits, 180ms saved
254 |     - "how did we implement": 12 hits, 134ms saved
255 |     - "similar to what we": 8 hits, 98ms saved
256 | ```
257 | 
258 | ### Cache Optimization Strategies
259 | 
260 | #### High Hit Rate Strategy
261 | ```bash
262 | # Increase cache size for better retention
263 | node memory-mode-controller.js config set performance.cacheSize 100
264 | 
265 | # Increase pattern retention time
266 | node memory-mode-controller.js config set performance.cacheRetentionTime 3600000  # 1 hour
267 | ```
268 | 
269 | #### Memory-Conscious Strategy
270 | ```bash
271 | # Reduce cache size for lower memory usage
272 | node memory-mode-controller.js config set performance.cacheSize 25
273 | 
274 | # More aggressive cleanup
275 | node memory-mode-controller.js config set performance.cacheCleanupThreshold 0.6
276 | ```
277 | 
278 | ## Memory Service Optimization
279 | 
280 | Optimize communication with the MCP Memory Service:
281 | 
282 | ### Connection Configuration
283 | 
284 | ```bash
285 | # Adjust timeout settings for your environment
286 | node memory-mode-controller.js config set memoryService.timeout 5000
287 | 
288 | # Configure connection pooling (if available)
289 | node memory-mode-controller.js config set memoryService.connectionPool.maxConnections 3
290 | 
291 | # Enable keep-alive for persistent connections
292 | node memory-mode-controller.js config set memoryService.keepAlive true
293 | ```
294 | 
295 | ### Backend-Specific Optimization
296 | 
297 | #### SQLite-vec Backend
298 | ```bash
299 | # Optimize for local performance
300 | node memory-mode-controller.js config set memoryService.localOptimizations true
301 | node memory-mode-controller.js config set memoryService.timeout 3000
302 | ```
303 | 
304 | #### Cloudflare Backend
305 | ```bash
306 | # Optimize for network latency
307 | node memory-mode-controller.js config set memoryService.timeout 8000
308 | node memory-mode-controller.js config set memoryService.retryAttempts 2
309 | ```
310 | 
311 | #### ChromaDB Backend
312 | ```bash
313 | # Optimize for multi-client access
314 | node memory-mode-controller.js config set memoryService.timeout 6000
315 | node memory-mode-controller.js config set memoryService.batchRequests true
316 | ```
317 | 
318 | ## Git Integration Optimization
319 | 
320 | Optimize Git-aware context analysis for better performance:
321 | 
322 | ### Repository Analysis Configuration
323 | 
324 | ```bash
325 | # Limit commit analysis scope for performance
326 | node memory-mode-controller.js config set gitAnalysis.commitLookback 7  # days
327 | node memory-mode-controller.js config set gitAnalysis.maxCommits 10
328 | 
329 | # Cache git analysis results
330 | node memory-mode-controller.js config set gitAnalysis.cacheResults true
331 | node memory-mode-controller.js config set gitAnalysis.cacheExpiry 1800  # 30 minutes
332 | ```
333 | 
334 | ### Large Repository Optimization
335 | 
336 | For repositories with extensive history:
337 | 
338 | ```bash
339 | # Reduce analysis depth
340 | node memory-mode-controller.js config set gitAnalysis.maxCommits 5
341 | node memory-mode-controller.js config set gitAnalysis.commitLookback 3
342 | 
343 | # Skip changelog parsing for performance
344 | node memory-mode-controller.js config set gitAnalysis.includeChangelog false
345 | 
346 | # Use lightweight git operations
347 | node memory-mode-controller.js config set gitAnalysis.lightweight true
348 | ```
349 | 
350 | ## System Resource Optimization
351 | 
352 | ### Memory Usage Optimization
353 | 
354 | Monitor and optimize Node.js memory usage:
355 | 
356 | ```bash
357 | # Check current memory usage
358 | node --expose-gc -e "
359 | const used = process.memoryUsage();
360 | console.log('Memory usage:');
361 | for (let key in used) {
362 |   console.log(\`\${key}: \${Math.round(used[key] / 1024 / 1024 * 100) / 100} MB\`);
363 | }
364 | "
365 | 
366 | # Configure garbage collection for better performance
367 | export NODE_OPTIONS="--max-old-space-size=512 --gc-interval=100"
368 | node memory-mode-controller.js status
369 | ```
370 | 
371 | ### CPU Usage Optimization
372 | 
373 | #### Single-Core Optimization
374 | ```bash
375 | # Disable background processing for CPU-constrained environments
376 | node memory-mode-controller.js config set performance.backgroundProcessing false
377 | 
378 | # Reduce concurrent operations
379 | node memory-mode-controller.js config set performance.maxConcurrentAnalysis 1
380 | ```
381 | 
382 | #### Multi-Core Optimization
383 | ```bash
384 | # Enable parallel processing (if available)
385 | node memory-mode-controller.js config set performance.enableParallelProcessing true
386 | 
387 | # Increase concurrent analysis threads
388 | node memory-mode-controller.js config set performance.maxConcurrentAnalysis 3
389 | ```
390 | 
391 | ## Performance Troubleshooting
392 | 
393 | ### Common Performance Issues
394 | 
395 | #### High Latency
396 | 
397 | **Symptoms**: Response times consistently above target thresholds
398 | 
399 | **Diagnosis**:
400 | ```bash
401 | # Identify bottlenecks
402 | node memory-mode-controller.js metrics --breakdown
403 | 
404 | # Test memory service directly
405 | curl -w "@curl-format.txt" -k https://localhost:8443/api/health
406 | 
407 | # Check system resources
408 | top -p $(pgrep -f memory-mode-controller)
409 | ```
410 | 
411 | **Solutions**:
412 | 1. **Switch to faster profile**: `node memory-mode-controller.js profile speed_focused`
413 | 2. **Optimize cache**: Increase cache size and check hit rates
414 | 3. **Memory service optimization**: Check backend performance
415 | 4. **Reduce analysis depth**: Lower commit lookback and max commits
416 | 
417 | #### Cache Misses
418 | 
419 | **Symptoms**: Low cache hit rate (< 20%)
420 | 
421 | **Diagnosis**:
422 | ```bash
423 | node memory-mode-controller.js cache analyze
424 | ```
425 | 
426 | **Solutions**:
427 | 1. **Increase cache size**: `node memory-mode-controller.js config set performance.cacheSize 100`
428 | 2. **Adjust cache retention**: Increase cache cleanup threshold
429 | 3. **Pattern analysis**: Review most common missed patterns
430 | 
431 | #### Memory Service Timeouts
432 | 
433 | **Symptoms**: Frequent timeout errors in metrics
434 | 
435 | **Diagnosis**:
436 | ```bash
437 | # Test memory service responsiveness
438 | time curl -k https://localhost:8443/api/health
439 | 
440 | # Check service logs
441 | tail -f ~/Library/Logs/Claude/mcp-server-memory.log
442 | ```
443 | 
444 | **Solutions**:
445 | 1. **Increase timeout**: `node memory-mode-controller.js config set memoryService.timeout 10000`
446 | 2. **Check backend**: Switch to faster backend if available
447 | 3. **Network optimization**: Ensure local service deployment
448 | 
449 | ### Performance Profiling
450 | 
451 | #### Detailed Timing Analysis
452 | 
453 | Enable detailed timing for performance analysis:
454 | 
455 | ```bash
456 | # Enable timing instrumentation
457 | export CLAUDE_HOOKS_TIMING=true
458 | node memory-mode-controller.js test "What did we decide about authentication?"
459 | 
460 | # Example output with timing:
461 | 🧪 Testing Natural Memory Triggers [TIMING ENABLED]
462 | 
463 | Query: "What did we decide about authentication?"
464 | 
465 | [0ms] Starting analysis
466 | [2ms] Cache check: miss
467 | [7ms] Pattern analysis complete
468 | [45ms] Instant tier complete (confidence: 0.85)
469 | [147ms] Fast tier complete (confidence: 0.78)
470 | [389ms] Intensive tier complete (confidence: 0.92)
471 | [421ms] Memory query generated
472 | [567ms] Memory service response received
473 | [572ms] Analysis complete
474 | 
475 | Total Time: 572ms
476 | ```
477 | 
478 | #### Memory Profiling
479 | 
480 | Profile memory usage patterns:
481 | 
482 | ```bash
483 | # Generate memory profile
484 | node --inspect ~/.claude/hooks/memory-mode-controller.js status &
485 | # Open Chrome DevTools to chrome://inspect for memory analysis
486 | ```
487 | 
488 | ## Performance Best Practices
489 | 
490 | ### Workflow-Specific Optimization
491 | 
492 | #### Development Sessions
493 | ```bash
494 | # Morning setup for general development
495 | node memory-mode-controller.js profile balanced
496 | node memory-mode-controller.js sensitivity 0.6
497 | ```
498 | 
499 | #### Architecture Sessions
500 | ```bash
501 | # Setup for architecture work
502 | node memory-mode-controller.js profile memory_aware
503 | node memory-mode-controller.js sensitivity 0.4
504 | ```
505 | 
506 | #### Quick Fixes/Debugging
507 | ```bash
508 | # Setup for focused debugging
509 | node memory-mode-controller.js profile speed_focused
510 | node memory-mode-controller.js sensitivity 0.8
511 | ```
512 | 
513 | ### Maintenance Routines
514 | 
515 | #### Daily Maintenance
516 | ```bash
517 | # Check system health
518 | node memory-mode-controller.js health
519 | 
520 | # Review performance metrics
521 | node memory-mode-controller.js metrics
522 | 
523 | # Clear cache if hit rate is low
524 | if [ $(node memory-mode-controller.js cache stats --json | jq '.hitRate < 0.2') ]; then
525 |     node memory-mode-controller.js cache clear
526 | fi
527 | ```
528 | 
529 | #### Weekly Optimization
530 | ```bash
531 | # Export performance data for analysis
532 | node memory-mode-controller.js export metrics > weekly-metrics.json
533 | 
534 | # Review and adjust configuration based on usage patterns
535 | node memory-mode-controller.js metrics --recommendations
536 | 
537 | # Update adaptive learning if needed
538 | node memory-mode-controller.js config set performance.learningRate 0.1
539 | ```
540 | 
541 | ## Advanced Performance Features
542 | 
543 | ### Custom Performance Profiles
544 | 
545 | Create custom performance profiles for specific use cases:
546 | 
547 | ```bash
548 | # Create custom profile for code reviews
549 | node memory-mode-controller.js config set performance.profiles.code_review '{
550 |   "maxLatency": 250,
551 |   "enabledTiers": ["instant", "fast"],
552 |   "backgroundProcessing": true,
553 |   "degradeThreshold": 500,
554 |   "description": "Optimized for code review sessions"
555 | }'
556 | 
557 | # Activate custom profile
558 | node memory-mode-controller.js profile code_review
559 | ```
560 | 
561 | ### Performance Automation
562 | 
563 | Automate performance optimization based on context:
564 | 
565 | ```bash
566 | # Create context-aware performance script
567 | cat > ~/nmt-auto-optimize.sh << 'EOF'
568 | #!/bin/bash
569 | 
570 | # Check current time and adjust profile accordingly
571 | HOUR=$(date +%H)
572 | 
573 | if [ $HOUR -ge 9 ] && [ $HOUR -le 11 ]; then
574 |     # Morning: architecture work
575 |     node ~/.claude/hooks/memory-mode-controller.js profile memory_aware
576 | elif [ $HOUR -ge 14 ] && [ $HOUR -le 16 ]; then
577 |     # Afternoon: general development
578 |     node ~/.claude/hooks/memory-mode-controller.js profile balanced
579 | else
580 |     # Other times: speed focused
581 |     node ~/.claude/hooks/memory-mode-controller.js profile speed_focused
582 | fi
583 | EOF
584 | 
585 | chmod +x ~/nmt-auto-optimize.sh
586 | 
587 | # Add to login scripts or IDE startup
588 | ```
589 | 
590 | ---
591 | 
592 | **Natural Memory Triggers v7.1.3** provides extensive performance optimization capabilities to ensure optimal speed and accuracy for your specific development workflow! 🚀
```

--------------------------------------------------------------------------------
/src/mcp_memory_service/backup/scheduler.py:
--------------------------------------------------------------------------------

```python
  1 | # Copyright 2024 Heinrich Krupp
  2 | #
  3 | # Licensed under the Apache License, Version 2.0 (the "License");
  4 | # you may not use this file except in compliance with the License.
  5 | # You may obtain a copy of the License at
  6 | #
  7 | #     http://www.apache.org/licenses/LICENSE-2.0
  8 | #
  9 | # Unless required by applicable law or agreed to in writing, software
 10 | # distributed under the License is distributed on an "AS IS" BASIS,
 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 12 | # See the License for the specific language governing permissions and
 13 | # limitations under the License.
 14 | 
 15 | """
 16 | Automatic backup scheduler for MCP Memory Service.
 17 | 
 18 | Provides scheduled database backups with configurable intervals and retention policies.
 19 | """
 20 | 
 21 | import asyncio
 22 | import shutil
 23 | import sqlite3
 24 | import logging
 25 | import time
 26 | from datetime import datetime, timezone, timedelta
 27 | from pathlib import Path
 28 | from typing import Dict, Any, List, Optional
 29 | 
 30 | from ..config import (
 31 |     BACKUPS_PATH,
 32 |     BACKUP_ENABLED,
 33 |     BACKUP_INTERVAL,
 34 |     BACKUP_RETENTION,
 35 |     BACKUP_MAX_COUNT,
 36 |     SQLITE_VEC_PATH
 37 | )
 38 | 
 39 | logger = logging.getLogger(__name__)
 40 | 
 41 | 
 42 | class BackupService:
 43 |     """Service for creating and managing database backups."""
 44 | 
 45 |     def __init__(self, backups_dir: str = None, db_path: str = None):
 46 |         """Initialize backup service.
 47 | 
 48 |         Args:
 49 |             backups_dir: Directory to store backups (defaults to BACKUPS_PATH)
 50 |             db_path: Path to database file (defaults to SQLITE_VEC_PATH)
 51 |         """
 52 |         self.backups_dir = Path(backups_dir or BACKUPS_PATH)
 53 |         # Determine database path with clear fallback logic
 54 |         db_path_str = db_path or SQLITE_VEC_PATH
 55 |         self.db_path = Path(db_path_str) if db_path_str else None
 56 |         self.last_backup_time: Optional[float] = None
 57 |         self.backup_count: int = 0
 58 |         self._lock = asyncio.Lock()  # Ensure thread-safe operations
 59 | 
 60 |         # Ensure backup directory exists
 61 |         self.backups_dir.mkdir(parents=True, exist_ok=True)
 62 | 
 63 |         # Load existing backup metadata
 64 |         self._load_backup_metadata()
 65 | 
 66 |         logger.info(f"BackupService initialized: backups_dir={self.backups_dir}, db_path={self.db_path}")
 67 | 
 68 |     def _load_backup_metadata(self):
 69 |         """Load metadata about existing backups."""
 70 |         backups = self.list_backups()
 71 |         self.backup_count = len(backups)
 72 |         if backups:
 73 |             # Get most recent backup time
 74 |             latest = backups[0]
 75 |             self.last_backup_time = latest.get('created_timestamp', 0)
 76 | 
 77 |     def _generate_backup_filename(self) -> str:
 78 |         """Generate a timestamped backup filename."""
 79 |         timestamp = datetime.now(timezone.utc).strftime('%Y%m%d_%H%M%S')
 80 |         return f"memory_backup_{timestamp}.db"
 81 | 
 82 |     async def create_backup(self, description: str = None) -> Dict[str, Any]:
 83 |         """Create a new database backup.
 84 | 
 85 |         Args:
 86 |             description: Optional description for the backup
 87 | 
 88 |         Returns:
 89 |             Dict with backup details
 90 |         """
 91 |         if not self.db_path or not self.db_path.exists():
 92 |             return {
 93 |                 'success': False,
 94 |                 'error': f'Database file not found: {self.db_path}',
 95 |                 'timestamp': datetime.now(timezone.utc).isoformat()
 96 |             }
 97 | 
 98 |         async with self._lock:  # Ensure atomic operations
 99 |             try:
100 |                 start_time = time.time()
101 |                 created_at = datetime.now(timezone.utc)
102 | 
103 |                 # Generate backup filename
104 |                 backup_filename = self._generate_backup_filename()
105 |                 backup_path = self.backups_dir / backup_filename
106 | 
107 |                 # Use SQLite's native backup API for safe, consistent backups
108 |                 # This handles active database connections properly
109 |                 def _do_backup():
110 |                     source = sqlite3.connect(str(self.db_path))
111 |                     dest = sqlite3.connect(str(backup_path))
112 |                     try:
113 |                         source.backup(dest)
114 |                     finally:
115 |                         source.close()
116 |                         dest.close()
117 | 
118 |                 await asyncio.to_thread(_do_backup)
119 | 
120 |                 # Calculate backup duration (just the backup operation)
121 |                 backup_duration = time.time() - start_time
122 | 
123 |                 # Get backup size
124 |                 backup_size = backup_path.stat().st_size
125 | 
126 |                 # Update metadata
127 |                 self.last_backup_time = created_at.timestamp()
128 |                 self.backup_count += 1
129 | 
130 |                 logger.info(f"Created backup: {backup_filename} ({backup_size} bytes) in {backup_duration:.2f}s")
131 | 
132 |                 # Cleanup old backups (outside of duration calculation)
133 |                 await self.cleanup_old_backups()
134 | 
135 |                 return {
136 |                     'success': True,
137 |                     'filename': backup_filename,
138 |                     'path': str(backup_path),
139 |                     'size_bytes': backup_size,
140 |                     'description': description,
141 |                     'created_at': created_at.isoformat(),
142 |                     'duration_seconds': round(backup_duration, 3)
143 |                 }
144 | 
145 |             except Exception as e:
146 |                 logger.error(f"Failed to create backup: {e}")
147 |                 return {
148 |                     'success': False,
149 |                     'error': str(e),
150 |                     'timestamp': datetime.now(timezone.utc).isoformat()
151 |                 }
152 | 
153 |     def list_backups(self) -> List[Dict[str, Any]]:
154 |         """List all available backups.
155 | 
156 |         Returns:
157 |             List of backup info dicts, sorted by date (newest first)
158 |         """
159 |         backups = []
160 | 
161 |         try:
162 |             for backup_file in self.backups_dir.glob('memory_backup_*.db'):
163 |                 stat = backup_file.stat()
164 | 
165 |                 # Parse timestamp from filename
166 |                 try:
167 |                     timestamp_str = backup_file.stem.replace('memory_backup_', '')
168 |                     created_dt = datetime.strptime(timestamp_str, '%Y%m%d_%H%M%S')
169 |                     created_dt = created_dt.replace(tzinfo=timezone.utc)
170 |                 except ValueError:
171 |                     created_dt = datetime.fromtimestamp(stat.st_mtime, tz=timezone.utc)
172 | 
173 |                 backups.append({
174 |                     'filename': backup_file.name,
175 |                     'path': str(backup_file),
176 |                     'size_bytes': stat.st_size,
177 |                     'created_at': created_dt.isoformat(),
178 |                     'created_timestamp': created_dt.timestamp(),
179 |                     'age_days': (datetime.now(timezone.utc) - created_dt).days
180 |                 })
181 | 
182 |             # Sort by creation time, newest first
183 |             backups.sort(key=lambda x: x['created_timestamp'], reverse=True)
184 | 
185 |         except Exception as e:
186 |             logger.error(f"Error listing backups: {e}")
187 | 
188 |         return backups
189 | 
190 |     async def cleanup_old_backups(self) -> Dict[str, Any]:
191 |         """Remove old backups based on retention policy.
192 | 
193 |         Removes backups that are:
194 |         - Older than BACKUP_RETENTION days
195 |         - Exceed BACKUP_MAX_COUNT
196 | 
197 |         Returns:
198 |             Dict with cleanup results
199 |         """
200 |         removed = []
201 |         errors = []
202 | 
203 |         try:
204 |             backups = self.list_backups()
205 |             retention_cutoff = datetime.now(timezone.utc) - timedelta(days=BACKUP_RETENTION)
206 | 
207 |             for i, backup in enumerate(backups):
208 |                 should_remove = False
209 |                 reason = ""
210 | 
211 |                 # Check if exceeds max count
212 |                 if i >= BACKUP_MAX_COUNT:
213 |                     should_remove = True
214 |                     reason = f"exceeds max count ({BACKUP_MAX_COUNT})"
215 | 
216 |                 # Check if older than retention period
217 |                 try:
218 |                     created_dt = datetime.fromisoformat(backup['created_at'].replace('Z', '+00:00'))
219 |                     if created_dt < retention_cutoff:
220 |                         should_remove = True
221 |                         reason = f"older than {BACKUP_RETENTION} days"
222 |                 except (ValueError, KeyError) as e:
223 |                     logger.warning(f"Could not parse timestamp for backup {backup.get('filename', 'unknown')}: {e}")
224 | 
225 |                 if should_remove:
226 |                     try:
227 |                         # Use asyncio.to_thread to avoid blocking the event loop
228 |                         await asyncio.to_thread(Path(backup['path']).unlink)
229 |                         removed.append({
230 |                             'filename': backup['filename'],
231 |                             'reason': reason
232 |                         })
233 |                         logger.info(f"Removed old backup: {backup['filename']} ({reason})")
234 |                     except Exception as e:
235 |                         errors.append({
236 |                             'filename': backup['filename'],
237 |                             'error': str(e)
238 |                         })
239 |                         logger.error(f"Failed to remove backup {backup['filename']}: {e}")
240 | 
241 |             # Update count more efficiently by subtracting removed count
242 |             self.backup_count = max(0, self.backup_count - len(removed))
243 | 
244 |         except Exception as e:
245 |             logger.error(f"Error during backup cleanup: {e}")
246 |             errors.append({'error': str(e)})
247 | 
248 |         return {
249 |             'removed_count': len(removed),
250 |             'removed': removed,
251 |             'errors': errors
252 |         }
253 | 
254 |     async def restore_backup(self, filename: str) -> Dict[str, Any]:
255 |         """Restore database from a backup.
256 | 
257 |         Args:
258 |             filename: Name of backup file to restore
259 | 
260 |         Returns:
261 |             Dict with restore results
262 |         """
263 |         backup_path = self.backups_dir / filename
264 | 
265 |         if not backup_path.exists():
266 |             return {
267 |                 'success': False,
268 |                 'error': f'Backup file not found: {filename}'
269 |             }
270 | 
271 |         if not self.db_path:
272 |             return {
273 |                 'success': False,
274 |                 'error': 'Database path not configured'
275 |             }
276 | 
277 |         try:
278 |             # Create a backup of current database first
279 |             if self.db_path.exists():
280 |                 current_backup = self.db_path.with_suffix('.db.pre_restore')
281 |                 # Use asyncio.to_thread to avoid blocking the event loop
282 |                 await asyncio.to_thread(shutil.copy2, str(self.db_path), str(current_backup))
283 |                 logger.info(f"Created pre-restore backup: {current_backup}")
284 | 
285 |             # Restore from backup
286 |             # Use asyncio.to_thread to avoid blocking the event loop
287 |             await asyncio.to_thread(shutil.copy2, str(backup_path), str(self.db_path))
288 | 
289 |             logger.info(f"Restored database from backup: {filename}")
290 | 
291 |             return {
292 |                 'success': True,
293 |                 'filename': filename,
294 |                 'restored_at': datetime.now(timezone.utc).isoformat()
295 |             }
296 | 
297 |         except Exception as e:
298 |             logger.error(f"Failed to restore backup: {e}")
299 |             return {
300 |                 'success': False,
301 |                 'error': str(e)
302 |             }
303 | 
304 |     def get_status(self) -> Dict[str, Any]:
305 |         """Get current backup service status.
306 | 
307 |         Returns:
308 |             Dict with backup service status
309 |         """
310 |         backups = self.list_backups()
311 |         total_size = sum(b['size_bytes'] for b in backups)
312 | 
313 |         # Calculate time since last backup
314 |         time_since_last = None
315 |         if self.last_backup_time:
316 |             time_since_last = time.time() - self.last_backup_time
317 | 
318 |         # Calculate next scheduled backup time
319 |         next_backup = self._calculate_next_backup_time()
320 | 
321 |         return {
322 |             'enabled': BACKUP_ENABLED,
323 |             'interval': BACKUP_INTERVAL,
324 |             'retention_days': BACKUP_RETENTION,
325 |             'max_count': BACKUP_MAX_COUNT,
326 |             'backup_count': len(backups),
327 |             'total_size_bytes': total_size,
328 |             'last_backup_time': self.last_backup_time,
329 |             'time_since_last_seconds': time_since_last,
330 |             'next_backup_at': next_backup.isoformat() if next_backup else None,
331 |             'backups_dir': str(self.backups_dir),
332 |             'db_path': str(self.db_path) if self.db_path else None
333 |         }
334 | 
335 |     def _calculate_next_backup_time(self) -> Optional[datetime]:
336 |         """Calculate the next scheduled backup time."""
337 |         if not BACKUP_ENABLED or not self.last_backup_time:
338 |             return None
339 | 
340 |         last_backup_dt = datetime.fromtimestamp(self.last_backup_time, tz=timezone.utc)
341 | 
342 |         if BACKUP_INTERVAL == 'hourly':
343 |             return last_backup_dt + timedelta(hours=1)
344 |         elif BACKUP_INTERVAL == 'daily':
345 |             return last_backup_dt + timedelta(days=1)
346 |         elif BACKUP_INTERVAL == 'weekly':
347 |             return last_backup_dt + timedelta(weeks=1)
348 | 
349 |         return None
350 | 
351 | 
352 | class BackupScheduler:
353 |     """Scheduler for automatic database backups."""
354 | 
355 |     def __init__(self, backup_service: BackupService = None):
356 |         """Initialize backup scheduler.
357 | 
358 |         Args:
359 |             backup_service: BackupService instance (creates one if not provided)
360 |         """
361 |         self.backup_service = backup_service or BackupService()
362 |         self.is_running = False
363 |         self._task: Optional[asyncio.Task] = None
364 | 
365 |         logger.info("BackupScheduler initialized")
366 | 
367 |     def _get_interval_seconds(self) -> int:
368 |         """Get backup interval in seconds."""
369 |         if BACKUP_INTERVAL == 'hourly':
370 |             return 3600
371 |         elif BACKUP_INTERVAL == 'daily':
372 |             return 86400
373 |         elif BACKUP_INTERVAL == 'weekly':
374 |             return 604800
375 |         return 86400  # Default to daily
376 | 
377 |     async def start(self):
378 |         """Start the backup scheduler."""
379 |         if self.is_running:
380 |             logger.warning("BackupScheduler already running")
381 |             return
382 | 
383 |         if not BACKUP_ENABLED:
384 |             logger.info("Backups disabled, scheduler not started")
385 |             return
386 | 
387 |         self.is_running = True
388 |         self._task = asyncio.create_task(self._schedule_loop())
389 |         logger.info(f"BackupScheduler started with {BACKUP_INTERVAL} interval")
390 | 
391 |     async def stop(self):
392 |         """Stop the backup scheduler."""
393 |         if not self.is_running:
394 |             return
395 | 
396 |         self.is_running = False
397 |         if self._task:
398 |             self._task.cancel()
399 |             try:
400 |                 await self._task
401 |             except asyncio.CancelledError:
402 |                 pass
403 | 
404 |         logger.info("BackupScheduler stopped")
405 | 
406 |     async def _schedule_loop(self):
407 |         """Main scheduling loop."""
408 |         interval_seconds = self._get_interval_seconds()
409 | 
410 |         while self.is_running:
411 |             try:
412 |                 # Check if it's time for a backup
413 |                 should_backup = False
414 | 
415 |                 if not self.backup_service.last_backup_time:
416 |                     # No previous backup, create one
417 |                     should_backup = True
418 |                 else:
419 |                     time_since_last = time.time() - self.backup_service.last_backup_time
420 |                     if time_since_last >= interval_seconds:
421 |                         should_backup = True
422 | 
423 |                 if should_backup:
424 |                     logger.info("Scheduled backup triggered")
425 |                     result = await self.backup_service.create_backup(
426 |                         description=f"Scheduled {BACKUP_INTERVAL} backup"
427 |                     )
428 |                     if result['success']:
429 |                         logger.info(f"Scheduled backup completed: {result['filename']}")
430 |                     else:
431 |                         logger.error(f"Scheduled backup failed: {result.get('error')}")
432 | 
433 |                 # Sleep for a check interval (every 5 minutes)
434 |                 await asyncio.sleep(300)
435 | 
436 |             except asyncio.CancelledError:
437 |                 break
438 |             except Exception as e:
439 |                 logger.error(f"Error in backup scheduler loop: {e}")
440 |                 await asyncio.sleep(60)  # Wait before retrying
441 | 
442 |     def get_status(self) -> Dict[str, Any]:
443 |         """Get scheduler status.
444 | 
445 |         Returns:
446 |             Dict with scheduler status and backup service status
447 |         """
448 |         status = self.backup_service.get_status()
449 |         status['scheduler_running'] = self.is_running
450 |         return status
451 | 
452 | 
453 | # Global backup service instance
454 | _backup_service: Optional[BackupService] = None
455 | _backup_scheduler: Optional[BackupScheduler] = None
456 | 
457 | 
458 | def get_backup_service() -> BackupService:
459 |     """Get or create the global backup service instance."""
460 |     global _backup_service
461 |     if _backup_service is None:
462 |         _backup_service = BackupService()
463 |     return _backup_service
464 | 
465 | 
466 | def get_backup_scheduler() -> BackupScheduler:
467 |     """Get or create the global backup scheduler instance."""
468 |     global _backup_scheduler
469 |     if _backup_scheduler is None:
470 |         _backup_scheduler = BackupScheduler(get_backup_service())
471 |     return _backup_scheduler
472 | 
```

--------------------------------------------------------------------------------
/scripts/installation/install_linux_service.py:
--------------------------------------------------------------------------------

```python
  1 | #!/usr/bin/env python3
  2 | """
  3 | Linux systemd service installer for MCP Memory Service.
  4 | Creates and manages systemd service files for automatic service startup.
  5 | """
  6 | import os
  7 | import sys
  8 | import json
  9 | import argparse
 10 | import subprocess
 11 | from pathlib import Path
 12 | import pwd
 13 | import grp
 14 | 
 15 | # Add parent directory to path for imports
 16 | sys.path.insert(0, str(Path(__file__).parent.parent))
 17 | 
 18 | try:
 19 |     from scripts.service_utils import (
 20 |         get_project_root, get_service_paths, get_service_environment,
 21 |         generate_api_key, save_service_config, load_service_config,
 22 |         check_dependencies, get_service_command, print_service_info,
 23 |         require_admin
 24 |     )
 25 | except ImportError as e:
 26 |     print(f"Error importing service utilities: {e}")
 27 |     print("Please ensure you're running this from the project directory")
 28 |     sys.exit(1)
 29 | 
 30 | 
 31 | SERVICE_NAME = "mcp-memory"
 32 | SERVICE_DISPLAY_NAME = "MCP Memory Service"
 33 | SERVICE_DESCRIPTION = "MCP Memory Service with Consolidation and mDNS"
 34 | 
 35 | 
 36 | def get_systemd_paths(user_level=True):
 37 |     """Get the paths for systemd service files."""
 38 |     if user_level:
 39 |         # User-level systemd service
 40 |         service_dir = Path.home() / ".config" / "systemd" / "user"
 41 |         service_file = service_dir / f"{SERVICE_NAME}.service"
 42 |         systemctl_cmd = "systemctl --user"
 43 |     else:
 44 |         # System-level systemd service
 45 |         service_dir = Path("/etc/systemd/system")
 46 |         service_file = service_dir / f"{SERVICE_NAME}.service"
 47 |         systemctl_cmd = "sudo systemctl"
 48 |     
 49 |     return service_dir, service_file, systemctl_cmd
 50 | 
 51 | 
 52 | def create_systemd_service(api_key, user_level=True):
 53 |     """Create the systemd service unit file."""
 54 |     paths = get_service_paths()
 55 |     command = get_service_command()
 56 |     environment = get_service_environment()
 57 |     environment['MCP_API_KEY'] = api_key
 58 |     
 59 |     # Get current user info
 60 |     current_user = pwd.getpwuid(os.getuid())
 61 |     username = current_user.pw_name
 62 |     groupname = grp.getgrgid(current_user.pw_gid).gr_name
 63 |     
 64 |     # Build environment lines
 65 |     env_lines = []
 66 |     for key, value in environment.items():
 67 |         env_lines.append(f'Environment={key}={value}')
 68 |     
 69 |     # Create service content
 70 |     service_content = f'''[Unit]
 71 | Description={SERVICE_DESCRIPTION}
 72 | Documentation=https://github.com/doobidoo/mcp-memory-service
 73 | After=network.target network-online.target
 74 | Wants=network-online.target
 75 | 
 76 | [Service]
 77 | Type=simple
 78 | '''
 79 |     
 80 |     # Add user/group for system-level service
 81 |     if not user_level:
 82 |         service_content += f'''User={username}
 83 | Group={groupname}
 84 | '''
 85 |     
 86 |     service_content += f'''WorkingDirectory={paths['project_root']}
 87 | ExecStart={' '.join(command)}
 88 | {chr(10).join(env_lines)}
 89 | Restart=always
 90 | RestartSec=10
 91 | StandardOutput=journal
 92 | StandardError=journal
 93 | SyslogIdentifier={SERVICE_NAME}
 94 | '''
 95 |     
 96 |     # Add capabilities for binding to privileged ports (if using HTTPS on 443)
 97 |     if not user_level and environment.get('MCP_HTTP_PORT') == '443':
 98 |         service_content += '''AmbientCapabilities=CAP_NET_BIND_SERVICE
 99 | CapabilityBoundingSet=CAP_NET_BIND_SERVICE
100 | '''
101 |     
102 |     service_content += '''
103 | [Install]
104 | WantedBy='''
105 |     
106 |     if user_level:
107 |         service_content += 'default.target'
108 |     else:
109 |         service_content += 'multi-user.target'
110 |     
111 |     return service_content
112 | 
113 | 
114 | def create_shell_scripts():
115 |     """Create convenient shell scripts for service management."""
116 |     paths = get_service_paths()
117 |     scripts_dir = paths['scripts_dir'] / 'linux'
118 |     scripts_dir.mkdir(exist_ok=True)
119 |     
120 |     # Determine if user or system service based on existing installation
121 |     user_service_file = Path.home() / ".config" / "systemd" / "user" / f"{SERVICE_NAME}.service"
122 |     system_service_file = Path(f"/etc/systemd/system/{SERVICE_NAME}.service")
123 |     
124 |     if user_service_file.exists():
125 |         systemctl = "systemctl --user"
126 |         sudo = ""
127 |     elif system_service_file.exists():
128 |         systemctl = "systemctl"
129 |         sudo = "sudo "
130 |     else:
131 |         # Default to user
132 |         systemctl = "systemctl --user"
133 |         sudo = ""
134 |     
135 |     # Start script
136 |     start_script = scripts_dir / 'start_service.sh'
137 |     with open(start_script, 'w') as f:
138 |         f.write(f'''#!/bin/bash
139 | echo "Starting {SERVICE_DISPLAY_NAME}..."
140 | {sudo}{systemctl} start {SERVICE_NAME}
141 | if [ $? -eq 0 ]; then
142 |     echo "✅ Service started successfully!"
143 | else
144 |     echo "❌ Failed to start service"
145 | fi
146 | ''')
147 |     start_script.chmod(0o755)
148 |     
149 |     # Stop script
150 |     stop_script = scripts_dir / 'stop_service.sh'
151 |     with open(stop_script, 'w') as f:
152 |         f.write(f'''#!/bin/bash
153 | echo "Stopping {SERVICE_DISPLAY_NAME}..."
154 | {sudo}{systemctl} stop {SERVICE_NAME}
155 | if [ $? -eq 0 ]; then
156 |     echo "✅ Service stopped successfully!"
157 | else
158 |     echo "❌ Failed to stop service"
159 | fi
160 | ''')
161 |     stop_script.chmod(0o755)
162 |     
163 |     # Status script
164 |     status_script = scripts_dir / 'service_status.sh'
165 |     with open(status_script, 'w') as f:
166 |         f.write(f'''#!/bin/bash
167 | echo "{SERVICE_DISPLAY_NAME} Status:"
168 | echo "-" | tr '-' '='
169 | {sudo}{systemctl} status {SERVICE_NAME}
170 | ''')
171 |     status_script.chmod(0o755)
172 |     
173 |     # Logs script
174 |     logs_script = scripts_dir / 'view_logs.sh'
175 |     with open(logs_script, 'w') as f:
176 |         f.write(f'''#!/bin/bash
177 | echo "Viewing {SERVICE_DISPLAY_NAME} logs (press Ctrl+C to exit)..."
178 | {sudo}journalctl -u {SERVICE_NAME} -f
179 | ''')
180 |     logs_script.chmod(0o755)
181 |     
182 |     # Uninstall script
183 |     uninstall_script = scripts_dir / 'uninstall_service.sh'
184 |     with open(uninstall_script, 'w') as f:
185 |         f.write(f'''#!/bin/bash
186 | echo "This will uninstall {SERVICE_DISPLAY_NAME}."
187 | read -p "Are you sure? (y/N): " confirm
188 | if [[ ! "$confirm" =~ ^[Yy]$ ]]; then
189 |     exit 0
190 | fi
191 | 
192 | echo "Stopping service..."
193 | {sudo}{systemctl} stop {SERVICE_NAME} 2>/dev/null
194 | {sudo}{systemctl} disable {SERVICE_NAME} 2>/dev/null
195 | 
196 | echo "Removing service files..."
197 | if [ -f "$HOME/.config/systemd/user/{SERVICE_NAME}.service" ]; then
198 |     rm -f "$HOME/.config/systemd/user/{SERVICE_NAME}.service"
199 |     systemctl --user daemon-reload
200 | else
201 |     sudo rm -f /etc/systemd/system/{SERVICE_NAME}.service
202 |     sudo systemctl daemon-reload
203 | fi
204 | 
205 | echo "✅ Service uninstalled"
206 | ''')
207 |     uninstall_script.chmod(0o755)
208 |     
209 |     return scripts_dir
210 | 
211 | 
212 | def install_service(user_level=True):
213 |     """Install the Linux systemd service."""
214 |     service_type = "user service" if user_level else "system service"
215 |     
216 |     # Check for root if system-level
217 |     if not user_level:
218 |         require_admin(f"System-level service installation requires root privileges")
219 |     
220 |     print(f"\n🔍 Checking dependencies...")
221 |     deps_ok, deps_msg = check_dependencies()
222 |     if not deps_ok:
223 |         print(f"❌ {deps_msg}")
224 |         sys.exit(1)
225 |     print(f"✅ {deps_msg}")
226 |     
227 |     # Generate API key
228 |     api_key = generate_api_key()
229 |     print(f"\n🔑 Generated API key: {api_key}")
230 |     
231 |     # Create service configuration
232 |     config = {
233 |         'service_name': SERVICE_NAME,
234 |         'api_key': api_key,
235 |         'command': get_service_command(),
236 |         'environment': get_service_environment(),
237 |         'user_level': user_level
238 |     }
239 |     
240 |     # Save configuration
241 |     config_file = save_service_config(config)
242 |     print(f"💾 Saved configuration to: {config_file}")
243 |     
244 |     # Get systemd paths
245 |     service_dir, service_file, systemctl_cmd = get_systemd_paths(user_level)
246 |     
247 |     # Create service directory if it doesn't exist
248 |     service_dir.mkdir(parents=True, exist_ok=True)
249 |     
250 |     # Create service file
251 |     print(f"\n📝 Creating systemd {service_type} file...")
252 |     service_content = create_systemd_service(api_key, user_level)
253 |     
254 |     # Write service file
255 |     if user_level:
256 |         with open(service_file, 'w') as f:
257 |             f.write(service_content)
258 |         os.chmod(service_file, 0o644)
259 |     else:
260 |         # Use sudo to write system service file
261 |         import tempfile
262 |         with tempfile.NamedTemporaryFile(mode='w', delete=False) as tmp:
263 |             tmp.write(service_content)
264 |             tmp_path = tmp.name
265 |         
266 |         subprocess.run(['sudo', 'cp', tmp_path, str(service_file)], check=True)
267 |         subprocess.run(['sudo', 'chmod', '644', str(service_file)], check=True)
268 |         os.unlink(tmp_path)
269 |     
270 |     print(f"✅ Created service file at: {service_file}")
271 |     
272 |     # Reload systemd
273 |     print("\n🔄 Reloading systemd daemon...")
274 |     if user_level:
275 |         subprocess.run(['systemctl', '--user', 'daemon-reload'], check=True)
276 |     else:
277 |         subprocess.run(['sudo', 'systemctl', 'daemon-reload'], check=True)
278 |     
279 |     # Enable the service
280 |     print(f"\n🚀 Enabling {service_type}...")
281 |     cmd = systemctl_cmd.split() + ['enable', SERVICE_NAME]
282 |     result = subprocess.run(cmd, capture_output=True, text=True)
283 |     
284 |     if result.returncode != 0:
285 |         print(f"❌ Failed to enable service: {result.stderr}")
286 |         sys.exit(1)
287 |     
288 |     print(f"✅ Service enabled for automatic startup!")
289 |     
290 |     # Create convenience scripts
291 |     scripts_dir = create_shell_scripts()
292 |     print(f"\n📁 Created management scripts in: {scripts_dir}")
293 |     
294 |     # Print service information
295 |     platform_info = {
296 |         'Start Service': f'{systemctl_cmd} start {SERVICE_NAME}',
297 |         'Stop Service': f'{systemctl_cmd} stop {SERVICE_NAME}',
298 |         'Service Status': f'{systemctl_cmd} status {SERVICE_NAME}',
299 |         'View Logs': f'{"sudo " if not user_level else ""}journalctl {"--user " if user_level else ""}-u {SERVICE_NAME} -f',
300 |         'Uninstall': f'python "{Path(__file__)}" --uninstall'
301 |     }
302 |     
303 |     print_service_info(api_key, platform_info)
304 |     
305 |     # Additional Linux-specific tips
306 |     print("\n📌 Linux Tips:")
307 |     print(f"  • Service will start automatically on {'login' if user_level else 'boot'}")
308 |     print(f"  • Use journalctl to view detailed logs")
309 |     print(f"  • {'User services require you to be logged in' if user_level else 'System service runs independently'}")
310 |     
311 |     # Offer to start the service
312 |     print(f"\n▶️  To start the service now, run:")
313 |     print(f"  {systemctl_cmd} start {SERVICE_NAME}")
314 |     
315 |     return True
316 | 
317 | 
318 | def uninstall_service(user_level=None):
319 |     """Uninstall the Linux systemd service."""
320 |     # Auto-detect installation type if not specified
321 |     if user_level is None:
322 |         user_service_file = Path.home() / ".config" / "systemd" / "user" / f"{SERVICE_NAME}.service"
323 |         system_service_file = Path(f"/etc/systemd/system/{SERVICE_NAME}.service")
324 |         
325 |         if user_service_file.exists():
326 |             user_level = True
327 |         elif system_service_file.exists():
328 |             user_level = False
329 |         else:
330 |             print("❌ Service is not installed")
331 |             return
332 |     
333 |     service_type = "user service" if user_level else "system service"
334 |     
335 |     # Check for root if system-level
336 |     if not user_level:
337 |         require_admin(f"System-level service removal requires root privileges")
338 |     
339 |     print(f"\n🗑️  Uninstalling {SERVICE_DISPLAY_NAME} {service_type}...")
340 |     
341 |     # Get systemd paths
342 |     service_dir, service_file, systemctl_cmd = get_systemd_paths(user_level)
343 |     
344 |     if service_file.exists() or (not user_level and Path(f"/etc/systemd/system/{SERVICE_NAME}.service").exists()):
345 |         # Stop the service
346 |         print("⏹️  Stopping service...")
347 |         cmd = systemctl_cmd.split() + ['stop', SERVICE_NAME]
348 |         subprocess.run(cmd, capture_output=True)
349 |         
350 |         # Disable the service
351 |         print("🔌 Disabling service...")
352 |         cmd = systemctl_cmd.split() + ['disable', SERVICE_NAME]
353 |         subprocess.run(cmd, capture_output=True)
354 |         
355 |         # Remove service file
356 |         print("🗑️  Removing service file...")
357 |         if user_level:
358 |             service_file.unlink()
359 |         else:
360 |             subprocess.run(['sudo', 'rm', '-f', str(service_file)], check=True)
361 |         
362 |         # Reload systemd
363 |         print("🔄 Reloading systemd daemon...")
364 |         if user_level:
365 |             subprocess.run(['systemctl', '--user', 'daemon-reload'], check=True)
366 |         else:
367 |             subprocess.run(['sudo', 'systemctl', 'daemon-reload'], check=True)
368 |         
369 |         print(f"✅ {service_type} uninstalled successfully!")
370 |     else:
371 |         print(f"ℹ️  {service_type} is not installed")
372 |     
373 |     # Clean up configuration
374 |     config = load_service_config()
375 |     if config and config.get('service_name') == SERVICE_NAME:
376 |         print("🧹 Cleaning up configuration...")
377 |         config_file = get_service_paths()['config_dir'] / 'service_config.json'
378 |         config_file.unlink()
379 | 
380 | 
381 | def start_service(user_level=None):
382 |     """Start the Linux service."""
383 |     # Auto-detect if not specified
384 |     if user_level is None:
385 |         user_service_file = Path.home() / ".config" / "systemd" / "user" / f"{SERVICE_NAME}.service"
386 |         user_level = user_service_file.exists()
387 |     
388 |     service_dir, service_file, systemctl_cmd = get_systemd_paths(user_level)
389 |     
390 |     print(f"\n▶️  Starting {SERVICE_DISPLAY_NAME}...")
391 |     
392 |     cmd = systemctl_cmd.split() + ['start', SERVICE_NAME]
393 |     result = subprocess.run(cmd, capture_output=True, text=True)
394 |     
395 |     if result.returncode == 0:
396 |         print("✅ Service started successfully!")
397 |     else:
398 |         print(f"❌ Failed to start service: {result.stderr}")
399 |         print(f"\n💡 Check logs with: {systemctl_cmd} status {SERVICE_NAME}")
400 | 
401 | 
402 | def stop_service(user_level=None):
403 |     """Stop the Linux service."""
404 |     # Auto-detect if not specified
405 |     if user_level is None:
406 |         user_service_file = Path.home() / ".config" / "systemd" / "user" / f"{SERVICE_NAME}.service"
407 |         user_level = user_service_file.exists()
408 |     
409 |     service_dir, service_file, systemctl_cmd = get_systemd_paths(user_level)
410 |     
411 |     print(f"\n⏹️  Stopping {SERVICE_DISPLAY_NAME}...")
412 |     
413 |     cmd = systemctl_cmd.split() + ['stop', SERVICE_NAME]
414 |     result = subprocess.run(cmd, capture_output=True, text=True)
415 |     
416 |     if result.returncode == 0:
417 |         print("✅ Service stopped successfully!")
418 |     else:
419 |         print(f"ℹ️  Service may not be running: {result.stderr}")
420 | 
421 | 
422 | def service_status(user_level=None):
423 |     """Check the Linux service status."""
424 |     # Auto-detect if not specified
425 |     if user_level is None:
426 |         user_service_file = Path.home() / ".config" / "systemd" / "user" / f"{SERVICE_NAME}.service"
427 |         system_service_file = Path(f"/etc/systemd/system/{SERVICE_NAME}.service")
428 |         
429 |         if user_service_file.exists():
430 |             user_level = True
431 |         elif system_service_file.exists():
432 |             user_level = False
433 |         else:
434 |             print(f"\n❌ {SERVICE_DISPLAY_NAME} is not installed")
435 |             return
436 |     
437 |     service_dir, service_file, systemctl_cmd = get_systemd_paths(user_level)
438 |     
439 |     print(f"\n📊 {SERVICE_DISPLAY_NAME} Status:")
440 |     print("-" * 60)
441 |     
442 |     # Get detailed status
443 |     cmd = systemctl_cmd.split() + ['status', SERVICE_NAME, '--no-pager']
444 |     subprocess.run(cmd)
445 |     
446 |     # Show configuration
447 |     config = load_service_config()
448 |     if config:
449 |         print(f"\n📋 Configuration:")
450 |         print(f"  Service Name: {SERVICE_NAME}")
451 |         print(f"  API Key: {config.get('api_key', 'Not set')}")
452 |         print(f"  Type: {'User Service' if user_level else 'System Service'}")
453 |         print(f"  Service File: {service_file}")
454 | 
455 | 
456 | def main():
457 |     """Main entry point."""
458 |     parser = argparse.ArgumentParser(
459 |         description="Linux systemd service installer for MCP Memory Service"
460 |     )
461 |     
462 |     # Service level
463 |     parser.add_argument('--user', action='store_true',
464 |                         help='Install as user service (default)')
465 |     parser.add_argument('--system', action='store_true',
466 |                         help='Install as system service (requires sudo)')
467 |     
468 |     # Actions
469 |     parser.add_argument('--uninstall', action='store_true', help='Uninstall the service')
470 |     parser.add_argument('--start', action='store_true', help='Start the service')
471 |     parser.add_argument('--stop', action='store_true', help='Stop the service')
472 |     parser.add_argument('--status', action='store_true', help='Check service status')
473 |     parser.add_argument('--restart', action='store_true', help='Restart the service')
474 |     
475 |     args = parser.parse_args()
476 |     
477 |     # Determine service level
478 |     if args.system and args.user:
479 |         print("❌ Cannot specify both --user and --system")
480 |         sys.exit(1)
481 |     
482 |     user_level = None  # Auto-detect for status/start/stop
483 |     if args.system:
484 |         user_level = False
485 |     elif args.user or not any([args.uninstall, args.start, args.stop, args.status, args.restart]):
486 |         user_level = True  # Default to user for installation
487 |     
488 |     if args.uninstall:
489 |         uninstall_service(user_level)
490 |     elif args.start:
491 |         start_service(user_level)
492 |     elif args.stop:
493 |         stop_service(user_level)
494 |     elif args.status:
495 |         service_status(user_level)
496 |     elif args.restart:
497 |         stop_service(user_level)
498 |         start_service(user_level)
499 |     else:
500 |         # Default action is to install
501 |         install_service(user_level)
502 | 
503 | 
504 | if __name__ == '__main__':
505 |     main()
```
Page 26/47FirstPrevNextLast