#
tokens: 43528/50000 1/772 files (page 60/62)
lines: on (toggle) GitHub
raw markdown copy reset
This is page 60 of 62. Use http://codebase.md/doobidoo/mcp-memory-service?lines=true&page={x} to view the full context.

# Directory Structure

```
├── .claude
│   ├── agents
│   │   ├── amp-bridge.md
│   │   ├── amp-pr-automator.md
│   │   ├── code-quality-guard.md
│   │   ├── gemini-pr-automator.md
│   │   └── github-release-manager.md
│   ├── commands
│   │   ├── README.md
│   │   ├── refactor-function
│   │   ├── refactor-function-prod
│   │   └── refactor-function.md
│   ├── consolidation-fix-handoff.md
│   ├── consolidation-hang-fix-summary.md
│   ├── directives
│   │   ├── agents.md
│   │   ├── code-quality-workflow.md
│   │   ├── consolidation-details.md
│   │   ├── development-setup.md
│   │   ├── hooks-configuration.md
│   │   ├── memory-first.md
│   │   ├── memory-tagging.md
│   │   ├── pr-workflow.md
│   │   ├── quality-system-details.md
│   │   ├── README.md
│   │   ├── refactoring-checklist.md
│   │   ├── storage-backends.md
│   │   └── version-management.md
│   ├── prompts
│   │   └── hybrid-cleanup-integration.md
│   ├── settings.local.json.backup
│   └── settings.local.json.local
├── .commit-message
├── .coveragerc
├── .dockerignore
├── .env.example
├── .env.sqlite.backup
├── .envnn#
├── .gitattributes
├── .github
│   ├── FUNDING.yml
│   ├── ISSUE_TEMPLATE
│   │   ├── bug_report.yml
│   │   ├── config.yml
│   │   ├── feature_request.yml
│   │   └── performance_issue.yml
│   ├── pull_request_template.md
│   └── workflows
│       ├── bridge-tests.yml
│       ├── CACHE_FIX.md
│       ├── claude-branch-automation.yml
│       ├── claude-code-review.yml
│       ├── claude.yml
│       ├── cleanup-images.yml.disabled
│       ├── dev-setup-validation.yml
│       ├── docker-publish.yml
│       ├── dockerfile-lint.yml
│       ├── LATEST_FIXES.md
│       ├── main-optimized.yml.disabled
│       ├── main.yml
│       ├── publish-and-test.yml
│       ├── publish-dual.yml
│       ├── README_OPTIMIZATION.md
│       ├── release-tag.yml.disabled
│       ├── release.yml
│       ├── roadmap-review-reminder.yml
│       ├── SECRET_CONDITIONAL_FIX.md
│       └── WORKFLOW_FIXES.md
├── .gitignore
├── .mcp.json.backup
├── .mcp.json.template
├── .metrics
│   ├── baseline_cc_install_hooks.txt
│   ├── baseline_mi_install_hooks.txt
│   ├── baseline_nesting_install_hooks.txt
│   ├── BASELINE_REPORT.md
│   ├── COMPLEXITY_COMPARISON.txt
│   ├── QUICK_REFERENCE.txt
│   ├── README.md
│   ├── REFACTORED_BASELINE.md
│   ├── REFACTORING_COMPLETION_REPORT.md
│   └── TRACKING_TABLE.md
├── .pyscn
│   ├── .gitignore
│   └── reports
│       └── analyze_20251123_214224.html
├── AGENTS.md
├── ai-optimized-tool-descriptions.py
├── archive
│   ├── deployment
│   │   ├── deploy_fastmcp_fixed.sh
│   │   ├── deploy_http_with_mcp.sh
│   │   └── deploy_mcp_v4.sh
│   ├── deployment-configs
│   │   ├── empty_config.yml
│   │   └── smithery.yaml
│   ├── development
│   │   └── test_fastmcp.py
│   ├── docs-removed-2025-08-23
│   │   ├── authentication.md
│   │   ├── claude_integration.md
│   │   ├── claude-code-compatibility.md
│   │   ├── claude-code-integration.md
│   │   ├── claude-code-quickstart.md
│   │   ├── claude-desktop-setup.md
│   │   ├── complete-setup-guide.md
│   │   ├── database-synchronization.md
│   │   ├── development
│   │   │   ├── autonomous-memory-consolidation.md
│   │   │   ├── CLEANUP_PLAN.md
│   │   │   ├── CLEANUP_README.md
│   │   │   ├── CLEANUP_SUMMARY.md
│   │   │   ├── dream-inspired-memory-consolidation.md
│   │   │   ├── hybrid-slm-memory-consolidation.md
│   │   │   ├── mcp-milestone.md
│   │   │   ├── multi-client-architecture.md
│   │   │   ├── test-results.md
│   │   │   └── TIMESTAMP_FIX_SUMMARY.md
│   │   ├── distributed-sync.md
│   │   ├── invocation_guide.md
│   │   ├── macos-intel.md
│   │   ├── master-guide.md
│   │   ├── mcp-client-configuration.md
│   │   ├── multi-client-server.md
│   │   ├── service-installation.md
│   │   ├── sessions
│   │   │   └── MCP_ENHANCEMENT_SESSION_MEMORY_v4.1.0.md
│   │   ├── UBUNTU_SETUP.md
│   │   ├── ubuntu.md
│   │   ├── windows-setup.md
│   │   └── windows.md
│   ├── docs-root-cleanup-2025-08-23
│   │   ├── AWESOME_LIST_SUBMISSION.md
│   │   ├── CLOUDFLARE_IMPLEMENTATION.md
│   │   ├── DOCUMENTATION_ANALYSIS.md
│   │   ├── DOCUMENTATION_CLEANUP_PLAN.md
│   │   ├── DOCUMENTATION_CONSOLIDATION_COMPLETE.md
│   │   ├── LITESTREAM_SETUP_GUIDE.md
│   │   ├── lm_studio_system_prompt.md
│   │   ├── PYTORCH_DOWNLOAD_FIX.md
│   │   └── README-ORIGINAL-BACKUP.md
│   ├── investigations
│   │   └── MACOS_HOOKS_INVESTIGATION.md
│   ├── litestream-configs-v6.3.0
│   │   ├── install_service.sh
│   │   ├── litestream_master_config_fixed.yml
│   │   ├── litestream_master_config.yml
│   │   ├── litestream_replica_config_fixed.yml
│   │   ├── litestream_replica_config.yml
│   │   ├── litestream_replica_simple.yml
│   │   ├── litestream-http.service
│   │   ├── litestream.service
│   │   └── requirements-cloudflare.txt
│   ├── release-notes
│   │   └── release-notes-v7.1.4.md
│   └── setup-development
│       ├── README.md
│       ├── setup_consolidation_mdns.sh
│       ├── STARTUP_SETUP_GUIDE.md
│       └── test_service.sh
├── CHANGELOG-HISTORIC.md
├── CHANGELOG.md
├── claude_commands
│   ├── memory-context.md
│   ├── memory-health.md
│   ├── memory-ingest-dir.md
│   ├── memory-ingest.md
│   ├── memory-recall.md
│   ├── memory-search.md
│   ├── memory-store.md
│   ├── README.md
│   └── session-start.md
├── claude-hooks
│   ├── config.json
│   ├── config.template.json
│   ├── CONFIGURATION.md
│   ├── core
│   │   ├── auto-capture-hook.js
│   │   ├── auto-capture-hook.ps1
│   │   ├── memory-retrieval.js
│   │   ├── mid-conversation.js
│   │   ├── permission-request.js
│   │   ├── session-end.js
│   │   ├── session-start.js
│   │   └── topic-change.js
│   ├── debug-pattern-test.js
│   ├── install_claude_hooks_windows.ps1
│   ├── install_hooks.py
│   ├── memory-mode-controller.js
│   ├── MIGRATION.md
│   ├── README-AUTO-CAPTURE.md
│   ├── README-NATURAL-TRIGGERS.md
│   ├── README-PERMISSION-REQUEST.md
│   ├── README-phase2.md
│   ├── README.md
│   ├── simple-test.js
│   ├── statusline.sh
│   ├── test-adaptive-weights.js
│   ├── test-dual-protocol-hook.js
│   ├── test-mcp-hook.js
│   ├── test-natural-triggers.js
│   ├── test-recency-scoring.js
│   ├── tests
│   │   ├── integration-test.js
│   │   ├── phase2-integration-test.js
│   │   ├── test-code-execution.js
│   │   ├── test-cross-session.json
│   │   ├── test-permission-request.js
│   │   ├── test-session-tracking.json
│   │   └── test-threading.json
│   ├── utilities
│   │   ├── adaptive-pattern-detector.js
│   │   ├── auto-capture-patterns.js
│   │   ├── context-formatter.js
│   │   ├── context-shift-detector.js
│   │   ├── conversation-analyzer.js
│   │   ├── dynamic-context-updater.js
│   │   ├── git-analyzer.js
│   │   ├── mcp-client.js
│   │   ├── memory-client.js
│   │   ├── memory-scorer.js
│   │   ├── performance-manager.js
│   │   ├── project-detector.js
│   │   ├── session-cache.json
│   │   ├── session-tracker.js
│   │   ├── tiered-conversation-monitor.js
│   │   ├── user-override-detector.js
│   │   └── version-checker.js
│   └── WINDOWS-SESSIONSTART-BUG.md
├── CLAUDE.md
├── CODE_OF_CONDUCT.md
├── COMMIT_MESSAGE.md
├── CONTRIBUTING.md
├── Development-Sprint-November-2025.md
├── docs
│   ├── amp-cli-bridge.md
│   ├── api
│   │   ├── code-execution-interface.md
│   │   ├── memory-metadata-api.md
│   │   ├── PHASE1_IMPLEMENTATION_SUMMARY.md
│   │   ├── PHASE2_IMPLEMENTATION_SUMMARY.md
│   │   ├── PHASE2_REPORT.md
│   │   └── tag-standardization.md
│   ├── architecture
│   │   ├── graph-database-design.md
│   │   ├── search-enhancement-spec.md
│   │   └── search-examples.md
│   ├── architecture.md
│   ├── archive
│   │   └── obsolete-workflows
│   │       ├── load_memory_context.md
│   │       └── README.md
│   ├── assets
│   │   └── images
│   │       ├── dashboard-v3.3.0-preview.png
│   │       ├── memory-awareness-hooks-example.png
│   │       ├── project-infographic.svg
│   │       └── README.md
│   ├── CLAUDE_CODE_QUICK_REFERENCE.md
│   ├── cloudflare-setup.md
│   ├── demo-recording-script.md
│   ├── deployment
│   │   ├── docker.md
│   │   ├── dual-service.md
│   │   ├── production-guide.md
│   │   └── systemd-service.md
│   ├── development
│   │   ├── ai-agent-instructions.md
│   │   ├── code-quality
│   │   │   ├── phase-2a-completion.md
│   │   │   ├── phase-2a-handle-get-prompt.md
│   │   │   ├── phase-2a-index.md
│   │   │   ├── phase-2a-install-package.md
│   │   │   └── phase-2b-session-summary.md
│   │   ├── code-quality-workflow.md
│   │   ├── dashboard-workflow.md
│   │   ├── issue-management.md
│   │   ├── pr-280-post-mortem.md
│   │   ├── pr-review-guide.md
│   │   ├── refactoring-notes.md
│   │   ├── release-checklist.md
│   │   └── todo-tracker.md
│   ├── docker-optimized-build.md
│   ├── document-ingestion.md
│   ├── DOCUMENTATION_AUDIT.md
│   ├── enhancement-roadmap-issue-14.md
│   ├── examples
│   │   ├── analysis-scripts.js
│   │   ├── maintenance-session-example.md
│   │   ├── memory-distribution-chart.jsx
│   │   ├── quality-system-configs.md
│   │   └── tag-schema.json
│   ├── features
│   │   └── association-quality-boost.md
│   ├── first-time-setup.md
│   ├── glama-deployment.md
│   ├── guides
│   │   ├── advanced-command-examples.md
│   │   ├── chromadb-migration.md
│   │   ├── commands-vs-mcp-server.md
│   │   ├── mcp-enhancements.md
│   │   ├── mdns-service-discovery.md
│   │   ├── memory-consolidation-guide.md
│   │   ├── memory-quality-guide.md
│   │   ├── migration.md
│   │   ├── scripts.md
│   │   └── STORAGE_BACKENDS.md
│   ├── HOOK_IMPROVEMENTS.md
│   ├── hooks
│   │   └── phase2-code-execution-migration.md
│   ├── http-server-management.md
│   ├── ide-compatability.md
│   ├── IMAGE_RETENTION_POLICY.md
│   ├── images
│   │   ├── dashboard-placeholder.md
│   │   └── update-restart-demo.png
│   ├── implementation
│   │   ├── health_checks.md
│   │   └── performance.md
│   ├── IMPLEMENTATION_PLAN_HTTP_SSE.md
│   ├── integration
│   │   ├── homebrew.md
│   │   └── multi-client.md
│   ├── integrations
│   │   ├── gemini.md
│   │   ├── groq-bridge.md
│   │   ├── groq-integration-summary.md
│   │   └── groq-model-comparison.md
│   ├── integrations.md
│   ├── legacy
│   │   └── dual-protocol-hooks.md
│   ├── LIGHTWEIGHT_ONNX_SETUP.md
│   ├── LM_STUDIO_COMPATIBILITY.md
│   ├── maintenance
│   │   └── memory-maintenance.md
│   ├── mastery
│   │   ├── api-reference.md
│   │   ├── architecture-overview.md
│   │   ├── configuration-guide.md
│   │   ├── local-setup-and-run.md
│   │   ├── testing-guide.md
│   │   └── troubleshooting.md
│   ├── migration
│   │   ├── code-execution-api-quick-start.md
│   │   └── graph-migration-guide.md
│   ├── natural-memory-triggers
│   │   ├── cli-reference.md
│   │   ├── installation-guide.md
│   │   └── performance-optimization.md
│   ├── oauth-setup.md
│   ├── pr-graphql-integration.md
│   ├── quality-system-ui-implementation.md
│   ├── quick-setup-cloudflare-dual-environment.md
│   ├── README.md
│   ├── refactoring
│   │   └── phase-3-3-analysis.md
│   ├── releases
│   │   └── v8.72.0-testing.md
│   ├── remote-configuration-wiki-section.md
│   ├── research
│   │   ├── code-execution-interface-implementation.md
│   │   └── code-execution-interface-summary.md
│   ├── ROADMAP.md
│   ├── sqlite-vec-backend.md
│   ├── statistics
│   │   ├── charts
│   │   │   ├── activity_patterns.png
│   │   │   ├── contributors.png
│   │   │   ├── growth_trajectory.png
│   │   │   ├── monthly_activity.png
│   │   │   └── october_sprint.png
│   │   ├── data
│   │   │   ├── activity_by_day.csv
│   │   │   ├── activity_by_hour.csv
│   │   │   ├── contributors.csv
│   │   │   └── monthly_activity.csv
│   │   ├── generate_charts.py
│   │   └── REPOSITORY_STATISTICS.md
│   ├── technical
│   │   ├── development.md
│   │   ├── memory-migration.md
│   │   ├── migration-log.md
│   │   ├── sqlite-vec-embedding-fixes.md
│   │   └── tag-storage.md
│   ├── testing
│   │   └── regression-tests.md
│   ├── testing-cloudflare-backend.md
│   ├── troubleshooting
│   │   ├── cloudflare-api-token-setup.md
│   │   ├── cloudflare-authentication.md
│   │   ├── database-transfer-migration.md
│   │   ├── general.md
│   │   ├── hooks-quick-reference.md
│   │   ├── memory-management.md
│   │   ├── pr162-schema-caching-issue.md
│   │   ├── session-end-hooks.md
│   │   └── sync-issues.md
│   ├── tutorials
│   │   ├── advanced-techniques.md
│   │   ├── data-analysis.md
│   │   └── demo-session-walkthrough.md
│   ├── wiki-documentation-plan.md
│   └── wiki-Graph-Database-Architecture.md
├── examples
│   ├── claude_desktop_config_template.json
│   ├── claude_desktop_config_windows.json
│   ├── claude-desktop-http-config.json
│   ├── config
│   │   └── claude_desktop_config.json
│   ├── http-mcp-bridge.js
│   ├── memory_export_template.json
│   ├── README.md
│   ├── setup
│   │   └── setup_multi_client_complete.py
│   └── start_https_example.sh
├── IMPLEMENTATION_SUMMARY.md
├── install_service.py
├── install.py
├── LICENSE
├── NOTICE
├── PR_DESCRIPTION.md
├── pyproject-lite.toml
├── pyproject.toml
├── pytest.ini
├── README.md
├── release-notes-v8.61.0.md
├── run_server.py
├── scripts
│   ├── .claude
│   │   └── settings.local.json
│   ├── archive
│   │   └── check_missing_timestamps.py
│   ├── backup
│   │   ├── backup_memories.py
│   │   ├── backup_sqlite_vec.sh
│   │   ├── export_distributable_memories.sh
│   │   └── restore_memories.py
│   ├── benchmarks
│   │   ├── benchmark_code_execution_api.py
│   │   ├── benchmark_hybrid_sync.py
│   │   └── benchmark_server_caching.py
│   ├── ci
│   │   ├── check_dockerfile_args.sh
│   │   └── validate_imports.sh
│   ├── database
│   │   ├── analyze_sqlite_vec_db.py
│   │   ├── check_sqlite_vec_status.py
│   │   ├── db_health_check.py
│   │   └── simple_timestamp_check.py
│   ├── development
│   │   ├── debug_server_initialization.py
│   │   ├── find_orphaned_files.py
│   │   ├── fix_mdns.sh
│   │   ├── fix_sitecustomize.py
│   │   ├── remote_ingest.sh
│   │   ├── setup-git-merge-drivers.sh
│   │   ├── uv-lock-merge.sh
│   │   └── verify_hybrid_sync.py
│   ├── hooks
│   │   └── pre-commit
│   ├── installation
│   │   ├── install_linux_service.py
│   │   ├── install_macos_service.py
│   │   ├── install_uv.py
│   │   ├── install_windows_service.py
│   │   ├── install.py
│   │   ├── setup_backup_cron.sh
│   │   ├── setup_claude_mcp.sh
│   │   └── setup_cloudflare_resources.py
│   ├── linux
│   │   ├── service_status.sh
│   │   ├── start_service.sh
│   │   ├── stop_service.sh
│   │   ├── uninstall_service.sh
│   │   └── view_logs.sh
│   ├── maintenance
│   │   ├── add_project_tags.py
│   │   ├── apply_quality_boost_retroactively.py
│   │   ├── assign_memory_types.py
│   │   ├── auto_retag_memory_merge.py
│   │   ├── auto_retag_memory.py
│   │   ├── backfill_graph_table.py
│   │   ├── check_memory_types.py
│   │   ├── cleanup_association_memories_hybrid.py
│   │   ├── cleanup_association_memories.py
│   │   ├── cleanup_corrupted_encoding.py
│   │   ├── cleanup_low_quality.py
│   │   ├── cleanup_memories.py
│   │   ├── cleanup_organize.py
│   │   ├── consolidate_memory_types.py
│   │   ├── consolidation_mappings.json
│   │   ├── delete_orphaned_vectors_fixed.py
│   │   ├── delete_test_memories.py
│   │   ├── fast_cleanup_duplicates_with_tracking.sh
│   │   ├── find_all_duplicates.py
│   │   ├── find_cloudflare_duplicates.py
│   │   ├── find_duplicates.py
│   │   ├── memory-types.md
│   │   ├── README.md
│   │   ├── recover_timestamps_from_cloudflare.py
│   │   ├── regenerate_embeddings.py
│   │   ├── repair_malformed_tags.py
│   │   ├── repair_memories.py
│   │   ├── repair_sqlite_vec_embeddings.py
│   │   ├── repair_zero_embeddings.py
│   │   ├── restore_from_json_export.py
│   │   ├── retag_valuable_memories.py
│   │   ├── scan_todos.sh
│   │   ├── soft_delete_test_memories.py
│   │   └── sync_status.py
│   ├── migration
│   │   ├── cleanup_mcp_timestamps.py
│   │   ├── legacy
│   │   │   └── migrate_chroma_to_sqlite.py
│   │   ├── mcp-migration.py
│   │   ├── migrate_sqlite_vec_embeddings.py
│   │   ├── migrate_storage.py
│   │   ├── migrate_tags.py
│   │   ├── migrate_timestamps.py
│   │   ├── migrate_to_cloudflare.py
│   │   ├── migrate_to_sqlite_vec.py
│   │   ├── migrate_v5_enhanced.py
│   │   ├── TIMESTAMP_CLEANUP_README.md
│   │   └── verify_mcp_timestamps.py
│   ├── pr
│   │   ├── amp_collect_results.sh
│   │   ├── amp_detect_breaking_changes.sh
│   │   ├── amp_generate_tests.sh
│   │   ├── amp_pr_review.sh
│   │   ├── amp_quality_gate.sh
│   │   ├── amp_suggest_fixes.sh
│   │   ├── auto_review.sh
│   │   ├── detect_breaking_changes.sh
│   │   ├── generate_tests.sh
│   │   ├── lib
│   │   │   └── graphql_helpers.sh
│   │   ├── pre_pr_check.sh
│   │   ├── quality_gate.sh
│   │   ├── resolve_threads.sh
│   │   ├── run_pyscn_analysis.sh
│   │   ├── run_quality_checks_on_files.sh
│   │   ├── run_quality_checks.sh
│   │   ├── thread_status.sh
│   │   └── watch_reviews.sh
│   ├── quality
│   │   ├── bulk_evaluate_onnx.py
│   │   ├── check_test_scores.py
│   │   ├── debug_deberta_scoring.py
│   │   ├── export_deberta_onnx.py
│   │   ├── fix_dead_code_install.sh
│   │   ├── migrate_to_deberta.py
│   │   ├── phase1_dead_code_analysis.md
│   │   ├── phase2_complexity_analysis.md
│   │   ├── README_PHASE1.md
│   │   ├── README_PHASE2.md
│   │   ├── rescore_deberta.py
│   │   ├── rescore_fallback.py
│   │   ├── reset_onnx_scores.py
│   │   ├── track_pyscn_metrics.sh
│   │   └── weekly_quality_review.sh
│   ├── README.md
│   ├── run
│   │   ├── memory_wrapper_cleanup.ps1
│   │   ├── memory_wrapper_cleanup.py
│   │   ├── memory_wrapper_cleanup.sh
│   │   ├── README_CLEANUP_WRAPPER.md
│   │   ├── run_mcp_memory.sh
│   │   ├── run-with-uv.sh
│   │   └── start_sqlite_vec.sh
│   ├── run_memory_server.py
│   ├── server
│   │   ├── check_http_server.py
│   │   ├── check_server_health.py
│   │   ├── memory_offline.py
│   │   ├── preload_models.py
│   │   ├── run_http_server.py
│   │   ├── run_memory_server.py
│   │   ├── start_http_server.bat
│   │   └── start_http_server.sh
│   ├── service
│   │   ├── deploy_dual_services.sh
│   │   ├── http_server_manager.sh
│   │   ├── install_http_service.sh
│   │   ├── mcp-memory-http.service
│   │   ├── mcp-memory.service
│   │   ├── memory_service_manager.sh
│   │   ├── service_control.sh
│   │   ├── service_utils.py
│   │   ├── update_service.sh
│   │   └── windows
│   │       ├── add_watchdog_trigger.ps1
│   │       ├── install_scheduled_task.ps1
│   │       ├── manage_service.ps1
│   │       ├── run_http_server_background.ps1
│   │       ├── uninstall_scheduled_task.ps1
│   │       └── update_and_restart.ps1
│   ├── setup-lightweight.sh
│   ├── sync
│   │   ├── check_drift.py
│   │   ├── claude_sync_commands.py
│   │   ├── export_memories.py
│   │   ├── import_memories.py
│   │   ├── litestream
│   │   │   ├── apply_local_changes.sh
│   │   │   ├── enhanced_memory_store.sh
│   │   │   ├── init_staging_db.sh
│   │   │   ├── io.litestream.replication.plist
│   │   │   ├── manual_sync.sh
│   │   │   ├── memory_sync.sh
│   │   │   ├── pull_remote_changes.sh
│   │   │   ├── push_to_remote.sh
│   │   │   ├── README.md
│   │   │   ├── resolve_conflicts.sh
│   │   │   ├── setup_local_litestream.sh
│   │   │   ├── setup_remote_litestream.sh
│   │   │   ├── staging_db_init.sql
│   │   │   ├── stash_local_changes.sh
│   │   │   ├── sync_from_remote_noconfig.sh
│   │   │   └── sync_from_remote.sh
│   │   ├── README.md
│   │   ├── safe_cloudflare_update.sh
│   │   ├── sync_memory_backends.py
│   │   └── sync_now.py
│   ├── testing
│   │   ├── run_complete_test.py
│   │   ├── run_memory_test.sh
│   │   ├── simple_test.py
│   │   ├── test_cleanup_logic.py
│   │   ├── test_cloudflare_backend.py
│   │   ├── test_docker_functionality.py
│   │   ├── test_installation.py
│   │   ├── test_mdns.py
│   │   ├── test_memory_api.py
│   │   ├── test_memory_simple.py
│   │   ├── test_migration.py
│   │   ├── test_search_api.py
│   │   ├── test_sqlite_vec_embeddings.py
│   │   ├── test_sse_events.py
│   │   ├── test-connection.py
│   │   └── test-hook.js
│   ├── update_and_restart.sh
│   ├── utils
│   │   ├── claude_commands_utils.py
│   │   ├── detect_platform.py
│   │   ├── generate_personalized_claude_md.sh
│   │   ├── groq
│   │   ├── groq_agent_bridge.py
│   │   ├── list-collections.py
│   │   ├── memory_wrapper_uv.py
│   │   ├── query_memories.py
│   │   ├── README_detect_platform.md
│   │   ├── smithery_wrapper.py
│   │   ├── test_groq_bridge.sh
│   │   └── uv_wrapper.py
│   └── validation
│       ├── check_dev_setup.py
│       ├── check_documentation_links.py
│       ├── check_handler_coverage.py
│       ├── diagnose_backend_config.py
│       ├── validate_configuration_complete.py
│       ├── validate_graph_tools.py
│       ├── validate_memories.py
│       ├── validate_migration.py
│       ├── validate_timestamp_integrity.py
│       ├── verify_environment.py
│       ├── verify_pytorch_windows.py
│       └── verify_torch.py
├── SECURITY.md
├── selective_timestamp_recovery.py
├── SPONSORS.md
├── src
│   └── mcp_memory_service
│       ├── __init__.py
│       ├── _version.py
│       ├── api
│       │   ├── __init__.py
│       │   ├── client.py
│       │   ├── operations.py
│       │   ├── sync_wrapper.py
│       │   └── types.py
│       ├── backup
│       │   ├── __init__.py
│       │   └── scheduler.py
│       ├── cli
│       │   ├── __init__.py
│       │   ├── ingestion.py
│       │   ├── main.py
│       │   └── utils.py
│       ├── config.py
│       ├── consolidation
│       │   ├── __init__.py
│       │   ├── associations.py
│       │   ├── base.py
│       │   ├── clustering.py
│       │   ├── compression.py
│       │   ├── consolidator.py
│       │   ├── decay.py
│       │   ├── forgetting.py
│       │   ├── health.py
│       │   └── scheduler.py
│       ├── dependency_check.py
│       ├── discovery
│       │   ├── __init__.py
│       │   ├── client.py
│       │   └── mdns_service.py
│       ├── embeddings
│       │   ├── __init__.py
│       │   └── onnx_embeddings.py
│       ├── ingestion
│       │   ├── __init__.py
│       │   ├── base.py
│       │   ├── chunker.py
│       │   ├── csv_loader.py
│       │   ├── json_loader.py
│       │   ├── pdf_loader.py
│       │   ├── registry.py
│       │   ├── semtools_loader.py
│       │   └── text_loader.py
│       ├── lm_studio_compat.py
│       ├── mcp_server.py
│       ├── models
│       │   ├── __init__.py
│       │   └── memory.py
│       ├── quality
│       │   ├── __init__.py
│       │   ├── ai_evaluator.py
│       │   ├── async_scorer.py
│       │   ├── config.py
│       │   ├── implicit_signals.py
│       │   ├── metadata_codec.py
│       │   ├── onnx_ranker.py
│       │   └── scorer.py
│       ├── server
│       │   ├── __init__.py
│       │   ├── __main__.py
│       │   ├── cache_manager.py
│       │   ├── client_detection.py
│       │   ├── environment.py
│       │   ├── handlers
│       │   │   ├── __init__.py
│       │   │   ├── consolidation.py
│       │   │   ├── documents.py
│       │   │   ├── graph.py
│       │   │   ├── memory.py
│       │   │   ├── quality.py
│       │   │   └── utility.py
│       │   └── logging_config.py
│       ├── server_impl.py
│       ├── services
│       │   ├── __init__.py
│       │   └── memory_service.py
│       ├── storage
│       │   ├── __init__.py
│       │   ├── base.py
│       │   ├── cloudflare.py
│       │   ├── factory.py
│       │   ├── graph.py
│       │   ├── http_client.py
│       │   ├── hybrid.py
│       │   ├── migrations
│       │   │   └── 008_add_graph_table.sql
│       │   └── sqlite_vec.py
│       ├── sync
│       │   ├── __init__.py
│       │   ├── exporter.py
│       │   ├── importer.py
│       │   └── litestream_config.py
│       ├── utils
│       │   ├── __init__.py
│       │   ├── cache_manager.py
│       │   ├── content_splitter.py
│       │   ├── db_utils.py
│       │   ├── debug.py
│       │   ├── directory_ingestion.py
│       │   ├── document_processing.py
│       │   ├── gpu_detection.py
│       │   ├── hashing.py
│       │   ├── health_check.py
│       │   ├── http_server_manager.py
│       │   ├── port_detection.py
│       │   ├── quality_analytics.py
│       │   ├── startup_orchestrator.py
│       │   ├── system_detection.py
│       │   └── time_parser.py
│       └── web
│           ├── __init__.py
│           ├── api
│           │   ├── __init__.py
│           │   ├── analytics.py
│           │   ├── backup.py
│           │   ├── consolidation.py
│           │   ├── documents.py
│           │   ├── events.py
│           │   ├── health.py
│           │   ├── manage.py
│           │   ├── mcp.py
│           │   ├── memories.py
│           │   ├── quality.py
│           │   ├── search.py
│           │   └── sync.py
│           ├── app.py
│           ├── dependencies.py
│           ├── oauth
│           │   ├── __init__.py
│           │   ├── authorization.py
│           │   ├── discovery.py
│           │   ├── middleware.py
│           │   ├── models.py
│           │   ├── registration.py
│           │   └── storage.py
│           ├── sse.py
│           └── static
│               ├── app.js
│               ├── i18n
│               │   ├── de.json
│               │   ├── en.json
│               │   ├── es.json
│               │   ├── fr.json
│               │   ├── ja.json
│               │   ├── ko.json
│               │   └── zh.json
│               ├── index.html
│               ├── README.md
│               ├── sse_test.html
│               └── style.css
├── start_http_debug.bat
├── start_http_server.sh
├── test_document.txt
├── test_version_checker.js
├── TESTING_NOTES.md
├── tests
│   ├── __init__.py
│   ├── api
│   │   ├── __init__.py
│   │   ├── test_compact_types.py
│   │   └── test_operations.py
│   ├── bridge
│   │   ├── mock_responses.js
│   │   ├── package-lock.json
│   │   ├── package.json
│   │   └── test_http_mcp_bridge.js
│   ├── conftest.py
│   ├── consolidation
│   │   ├── __init__.py
│   │   ├── conftest.py
│   │   ├── test_associations.py
│   │   ├── test_clustering.py
│   │   ├── test_compression.py
│   │   ├── test_consolidator.py
│   │   ├── test_decay.py
│   │   ├── test_forgetting.py
│   │   └── test_graph_modes.py
│   ├── contracts
│   │   └── api-specification.yml
│   ├── integration
│   │   ├── conftest.py
│   │   ├── HANDLER_COVERAGE_REPORT.md
│   │   ├── package-lock.json
│   │   ├── package.json
│   │   ├── test_all_memory_handlers.py
│   │   ├── test_api_key_fallback.py
│   │   ├── test_api_memories_chronological.py
│   │   ├── test_api_tag_time_search.py
│   │   ├── test_api_with_memory_service.py
│   │   ├── test_bridge_integration.js
│   │   ├── test_cli_interfaces.py
│   │   ├── test_cloudflare_connection.py
│   │   ├── test_concurrent_clients.py
│   │   ├── test_data_serialization_consistency.py
│   │   ├── test_http_server_startup.py
│   │   ├── test_mcp_memory.py
│   │   ├── test_mdns_integration.py
│   │   ├── test_oauth_basic_auth.py
│   │   ├── test_oauth_flow.py
│   │   ├── test_server_handlers.py
│   │   └── test_store_memory.py
│   ├── performance
│   │   ├── test_background_sync.py
│   │   └── test_hybrid_live.py
│   ├── README.md
│   ├── smithery
│   │   └── test_smithery.py
│   ├── sqlite
│   │   └── simple_sqlite_vec_test.py
│   ├── storage
│   │   ├── conftest.py
│   │   └── test_graph_storage.py
│   ├── test_client.py
│   ├── test_content_splitting.py
│   ├── test_database.py
│   ├── test_deberta_quality.py
│   ├── test_fallback_quality.py
│   ├── test_graph_traversal.py
│   ├── test_hybrid_cloudflare_limits.py
│   ├── test_hybrid_storage.py
│   ├── test_lightweight_onnx.py
│   ├── test_memory_ops.py
│   ├── test_memory_wrapper_cleanup.py
│   ├── test_quality_integration.py
│   ├── test_quality_system.py
│   ├── test_semantic_search.py
│   ├── test_sqlite_vec_storage.py
│   ├── test_time_parser.py
│   ├── test_timestamp_preservation.py
│   ├── timestamp
│   │   ├── test_hook_vs_manual_storage.py
│   │   ├── test_issue99_final_validation.py
│   │   ├── test_search_retrieval_inconsistency.py
│   │   ├── test_timestamp_issue.py
│   │   └── test_timestamp_simple.py
│   └── unit
│       ├── conftest.py
│       ├── test_cloudflare_storage.py
│       ├── test_csv_loader.py
│       ├── test_fastapi_dependencies.py
│       ├── test_import.py
│       ├── test_imports.py
│       ├── test_json_loader.py
│       ├── test_mdns_simple.py
│       ├── test_mdns.py
│       ├── test_memory_service.py
│       ├── test_memory.py
│       ├── test_semtools_loader.py
│       ├── test_storage_interface_compatibility.py
│       ├── test_tag_time_filtering.py
│       └── test_uv_no_pip_installer_fallback.py
├── tools
│   ├── docker
│   │   ├── DEPRECATED.md
│   │   ├── docker-compose.http.yml
│   │   ├── docker-compose.pythonpath.yml
│   │   ├── docker-compose.standalone.yml
│   │   ├── docker-compose.uv.yml
│   │   ├── docker-compose.yml
│   │   ├── docker-entrypoint-persistent.sh
│   │   ├── docker-entrypoint-unified.sh
│   │   ├── docker-entrypoint.sh
│   │   ├── Dockerfile
│   │   ├── Dockerfile.glama
│   │   ├── Dockerfile.slim
│   │   ├── README.md
│   │   └── test-docker-modes.sh
│   └── README.md
├── uv.lock
└── verify_compression.sh
```

# Files

--------------------------------------------------------------------------------
/src/mcp_memory_service/server_impl.py:
--------------------------------------------------------------------------------

```python
   1 | # Copyright 2024 Heinrich Krupp
   2 | #
   3 | # Licensed under the Apache License, Version 2.0 (the "License");
   4 | # you may not use this file except in compliance with the License.
   5 | # You may obtain a copy of the License at
   6 | #
   7 | #     http://www.apache.org/licenses/LICENSE-2.0
   8 | #
   9 | # Unless required by applicable law or agreed to in writing, software
  10 | # distributed under the License is distributed on an "AS IS" BASIS,
  11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12 | # See the License for the specific language governing permissions and
  13 | # limitations under the License.
  14 | 
  15 | """
  16 | MCP Memory Service
  17 | Copyright (c) 2024 Heinrich Krupp
  18 | Licensed under the MIT License. See LICENSE file in the project root for full license text.
  19 | """
  20 | # Standard library imports
  21 | import sys
  22 | import os
  23 | import time
  24 | import asyncio
  25 | import traceback
  26 | import json
  27 | import platform
  28 | import logging
  29 | from collections import deque
  30 | from typing import List, Dict, Any, Optional, Tuple
  31 | from datetime import datetime, timedelta
  32 | 
  33 | # Import from server package modules
  34 | from .server import (
  35 |     # Client Detection
  36 |     MCP_CLIENT,
  37 |     detect_mcp_client,
  38 |     # Logging
  39 |     DualStreamHandler,
  40 |     logger,
  41 |     # Environment
  42 |     setup_python_paths,
  43 |     check_uv_environment,
  44 |     check_version_consistency,
  45 |     configure_environment,
  46 |     configure_performance_environment,
  47 |     # Cache
  48 |     _STORAGE_CACHE,
  49 |     _MEMORY_SERVICE_CACHE,
  50 |     _CACHE_LOCK,
  51 |     _CACHE_STATS,
  52 |     _get_cache_lock,
  53 |     _get_or_create_memory_service,
  54 |     _log_cache_performance
  55 | )
  56 | 
  57 | # MCP protocol imports
  58 | from mcp.server.models import InitializationOptions
  59 | import mcp.types as types
  60 | from mcp.server import NotificationOptions, Server
  61 | import mcp.server.stdio
  62 | from mcp.types import Resource, Prompt
  63 | 
  64 | # Package imports
  65 | from . import __version__
  66 | from .lm_studio_compat import patch_mcp_for_lm_studio, add_windows_timeout_handling
  67 | from .dependency_check import run_dependency_check, get_recommended_timeout
  68 | from .config import (
  69 |     BACKUPS_PATH,
  70 |     SERVER_NAME,
  71 |     SERVER_VERSION,
  72 |     STORAGE_BACKEND,
  73 |     EMBEDDING_MODEL_NAME,
  74 |     SQLITE_VEC_PATH,
  75 |     CONSOLIDATION_ENABLED,
  76 |     CONSOLIDATION_CONFIG,
  77 |     CONSOLIDATION_SCHEDULE,
  78 |     INCLUDE_HOSTNAME,
  79 |     # Cloudflare configuration
  80 |     CLOUDFLARE_API_TOKEN,
  81 |     CLOUDFLARE_ACCOUNT_ID,
  82 |     CLOUDFLARE_VECTORIZE_INDEX,
  83 |     CLOUDFLARE_D1_DATABASE_ID,
  84 |     CLOUDFLARE_R2_BUCKET,
  85 |     CLOUDFLARE_EMBEDDING_MODEL,
  86 |     CLOUDFLARE_LARGE_CONTENT_THRESHOLD,
  87 |     CLOUDFLARE_MAX_RETRIES,
  88 |     CLOUDFLARE_BASE_DELAY,
  89 |     # Hybrid backend configuration
  90 |     HYBRID_SYNC_INTERVAL,
  91 |     HYBRID_BATCH_SIZE,
  92 |     HYBRID_SYNC_ON_STARTUP
  93 | )
  94 | # Storage imports will be done conditionally in the server class
  95 | from .models.memory import Memory
  96 | from .utils.hashing import generate_content_hash
  97 | from .utils.document_processing import _process_and_store_chunk
  98 | from .utils.system_detection import (
  99 |     get_system_info,
 100 |     print_system_diagnostics,
 101 |     AcceleratorType
 102 | )
 103 | from .services.memory_service import MemoryService
 104 | from .utils.time_parser import extract_time_expression, parse_time_expression
 105 | 
 106 | # Consolidation system imports (conditional)
 107 | if CONSOLIDATION_ENABLED:
 108 |     from .consolidation.base import ConsolidationConfig
 109 |     from .consolidation.consolidator import DreamInspiredConsolidator
 110 |     from .consolidation.scheduler import ConsolidationScheduler
 111 | 
 112 | # Note: Logging is already configured in server.logging_config module
 113 | 
 114 | # Configure performance-critical module logging
 115 | if not os.getenv('DEBUG_MODE'):
 116 |     # Set higher log levels for performance-critical modules
 117 |     for module_name in ['sentence_transformers', 'transformers', 'torch', 'numpy']:
 118 |         logging.getLogger(module_name).setLevel(logging.WARNING)
 119 | 
 120 | class MemoryServer:
 121 |     def __init__(self):
 122 |         """Initialize the server with hardware-aware configuration."""
 123 |         self.server = Server(SERVER_NAME)
 124 |         self.system_info = get_system_info()
 125 |         
 126 |         # Initialize query time tracking
 127 |         self.query_times = deque(maxlen=50)  # Keep last 50 query times for averaging
 128 |         
 129 |         # Initialize progress tracking
 130 |         self.current_progress = {}  # Track ongoing operations
 131 |         
 132 |         # Initialize consolidation system (if enabled)
 133 |         self.consolidator = None
 134 |         self.consolidation_scheduler = None
 135 |         if CONSOLIDATION_ENABLED:
 136 |             try:
 137 |                 config = ConsolidationConfig(**CONSOLIDATION_CONFIG)
 138 |                 self.consolidator = None  # Will be initialized after storage
 139 |                 self.consolidation_scheduler = None  # Will be initialized after consolidator
 140 |                 logger.info("Consolidation system will be initialized after storage")
 141 |             except Exception as e:
 142 |                 logger.error(f"Failed to initialize consolidation config: {e}")
 143 |                 self.consolidator = None
 144 |                 self.consolidation_scheduler = None
 145 |         
 146 |         try:
 147 |             # Initialize paths
 148 |             logger.info(f"Creating directories if they don't exist...")
 149 |             os.makedirs(BACKUPS_PATH, exist_ok=True)
 150 | 
 151 |             # Log system diagnostics
 152 |             logger.info(f"Initializing on {platform.system()} {platform.machine()} with Python {platform.python_version()}")
 153 |             logger.info(f"Using accelerator: {self.system_info.accelerator}")
 154 | 
 155 |             # DEFER STORAGE INITIALIZATION - Initialize storage lazily when needed
 156 |             # This prevents hanging during server startup due to embedding model loading
 157 |             logger.info(f"Deferring {STORAGE_BACKEND} storage initialization to prevent hanging")
 158 |             if MCP_CLIENT == 'lm_studio':
 159 |                 print(f"Deferring {STORAGE_BACKEND} storage initialization to prevent startup hanging", file=sys.stdout, flush=True)
 160 |             self.storage = None
 161 |             self.memory_service = None
 162 |             self._storage_initialized = False
 163 | 
 164 |         except Exception as e:
 165 |             logger.error(f"Initialization error: {str(e)}")
 166 |             logger.error(traceback.format_exc())
 167 |             
 168 |             # Set storage to None to prevent any hanging
 169 |             self.storage = None
 170 |             self.memory_service = None
 171 |             self._storage_initialized = False
 172 |         
 173 |         # Register handlers
 174 |         self.register_handlers()
 175 |         logger.info("Server initialization complete")
 176 |         
 177 |         # Test handler registration with proper arguments
 178 |         try:
 179 |             logger.info("Testing handler registration...")
 180 |             capabilities = self.server.get_capabilities(
 181 |                 notification_options=NotificationOptions(),
 182 |                 experimental_capabilities={}
 183 |             )
 184 |             logger.info(f"Server capabilities: {capabilities}")
 185 |             if MCP_CLIENT == 'lm_studio':
 186 |                 print(f"Server capabilities registered successfully!", file=sys.stdout, flush=True)
 187 |         except Exception as e:
 188 |             logger.error(f"Handler registration test failed: {str(e)}")
 189 |             print(f"Handler registration issue: {str(e)}", file=sys.stderr, flush=True)
 190 |     
 191 |     def record_query_time(self, query_time_ms: float):
 192 |         """Record a query time for averaging."""
 193 |         self.query_times.append(query_time_ms)
 194 |         logger.debug(f"Recorded query time: {query_time_ms:.2f}ms")
 195 |     
 196 |     def get_average_query_time(self) -> float:
 197 |         """Get the average query time from recent operations."""
 198 |         if not self.query_times:
 199 |             return 0.0
 200 |         
 201 |         avg = sum(self.query_times) / len(self.query_times)
 202 |         logger.debug(f"Average query time: {avg:.2f}ms (from {len(self.query_times)} samples)")
 203 |         return round(avg, 2)
 204 |     
 205 |     async def send_progress_notification(self, operation_id: str, progress: float, message: str = None):
 206 |         """Send a progress notification for a long-running operation."""
 207 |         try:
 208 |             # Store progress for potential querying
 209 |             self.current_progress[operation_id] = {
 210 |                 "progress": progress,
 211 |                 "message": message or f"Operation {operation_id}: {progress:.0f}% complete",
 212 |                 "timestamp": datetime.now().isoformat()
 213 |             }
 214 |             
 215 |             # Send notification if server supports it
 216 |             if hasattr(self.server, 'send_progress_notification'):
 217 |                 await self.server.send_progress_notification(
 218 |                     progress=progress,
 219 |                     progress_token=operation_id,
 220 |                     message=message
 221 |                 )
 222 |             
 223 |             logger.debug(f"Progress {operation_id}: {progress:.0f}% - {message}")
 224 |             
 225 |             # Clean up completed operations
 226 |             if progress >= 100:
 227 |                 self.current_progress.pop(operation_id, None)
 228 |                 
 229 |         except Exception as e:
 230 |             logger.debug(f"Could not send progress notification: {e}")
 231 |     
 232 |     def get_operation_progress(self, operation_id: str) -> Optional[Dict[str, Any]]:
 233 |         """Get the current progress of an operation."""
 234 |         return self.current_progress.get(operation_id)
 235 |     
 236 |     async def _initialize_storage_with_timeout(self):
 237 |         """Initialize storage with timeout and caching optimization."""
 238 |         global _STORAGE_CACHE, _MEMORY_SERVICE_CACHE, _CACHE_STATS
 239 | 
 240 |         # Track call statistics
 241 |         _CACHE_STATS["total_calls"] += 1
 242 |         start_time = time.time()
 243 | 
 244 |         logger.info(f"🚀 EAGER INIT Call #{_CACHE_STATS['total_calls']}: Checking global cache...")
 245 | 
 246 |         # Acquire lock for thread-safe cache access
 247 |         cache_lock = _get_cache_lock()
 248 |         async with cache_lock:
 249 |             # Generate cache key for storage backend
 250 |             cache_key = f"{STORAGE_BACKEND}:{SQLITE_VEC_PATH}"
 251 | 
 252 |             # Check storage cache
 253 |             if cache_key in _STORAGE_CACHE:
 254 |                 self.storage = _STORAGE_CACHE[cache_key]
 255 |                 _CACHE_STATS["storage_hits"] += 1
 256 |                 logger.info(f"✅ Storage Cache HIT - Reusing {STORAGE_BACKEND} instance (key: {cache_key})")
 257 |                 self._storage_initialized = True
 258 | 
 259 |                 # Check memory service cache and log performance
 260 |                 self.memory_service = _get_or_create_memory_service(self.storage)
 261 |                 _log_cache_performance(start_time)
 262 | 
 263 |                 return True  # Cached initialization succeeded
 264 | 
 265 |         # Cache miss - proceed with initialization
 266 |         _CACHE_STATS["storage_misses"] += 1
 267 |         logger.info(f"❌ Storage Cache MISS - Initializing {STORAGE_BACKEND} instance...")
 268 | 
 269 |         try:
 270 |             logger.info(f"🚀 EAGER INIT: Starting {STORAGE_BACKEND} storage initialization...")
 271 |             logger.info(f"🔧 EAGER INIT: Environment check - STORAGE_BACKEND={STORAGE_BACKEND}")
 272 |             
 273 |             # Log all Cloudflare config values for debugging
 274 |             if STORAGE_BACKEND == 'cloudflare':
 275 |                 logger.info(f"🔧 EAGER INIT: Cloudflare config validation:")
 276 |                 logger.info(f"   API_TOKEN: {'SET' if CLOUDFLARE_API_TOKEN else 'NOT SET'}")
 277 |                 logger.info(f"   ACCOUNT_ID: {CLOUDFLARE_ACCOUNT_ID}")
 278 |                 logger.info(f"   VECTORIZE_INDEX: {CLOUDFLARE_VECTORIZE_INDEX}")
 279 |                 logger.info(f"   D1_DATABASE_ID: {CLOUDFLARE_D1_DATABASE_ID}")
 280 |                 logger.info(f"   R2_BUCKET: {CLOUDFLARE_R2_BUCKET}")
 281 |                 logger.info(f"   EMBEDDING_MODEL: {CLOUDFLARE_EMBEDDING_MODEL}")
 282 |             
 283 |             if STORAGE_BACKEND == 'sqlite_vec':
 284 |                 # Check for multi-client coordination mode
 285 |                 from .utils.port_detection import ServerCoordinator
 286 |                 coordinator = ServerCoordinator()
 287 |                 coordination_mode = await coordinator.detect_mode()
 288 |                 
 289 |                 logger.info(f"🔧 EAGER INIT: SQLite-vec - detected coordination mode: {coordination_mode}")
 290 |                 
 291 |                 if coordination_mode == "http_client":
 292 |                     # Use HTTP client to connect to existing server
 293 |                     from .storage.http_client import HTTPClientStorage
 294 |                     self.storage = HTTPClientStorage()
 295 |                     logger.info(f"✅ EAGER INIT: Using HTTP client storage")
 296 |                 elif coordination_mode == "http_server":
 297 |                     # Try to auto-start HTTP server for coordination
 298 |                     from .utils.http_server_manager import auto_start_http_server_if_needed
 299 |                     server_started = await auto_start_http_server_if_needed()
 300 |                     
 301 |                     if server_started:
 302 |                         # Wait a moment for the server to be ready, then use HTTP client
 303 |                         await asyncio.sleep(2)
 304 |                         from .storage.http_client import HTTPClientStorage
 305 |                         self.storage = HTTPClientStorage()
 306 |                         logger.info(f"✅ EAGER INIT: Started HTTP server and using HTTP client storage")
 307 |                     else:
 308 |                         # Fall back to direct SQLite-vec storage
 309 |                         from . import storage
 310 |                         import importlib
 311 |                         storage_module = importlib.import_module('mcp_memory_service.storage.sqlite_vec')
 312 |                         SqliteVecMemoryStorage = storage_module.SqliteVecMemoryStorage
 313 |                         self.storage = SqliteVecMemoryStorage(SQLITE_VEC_PATH, embedding_model=EMBEDDING_MODEL_NAME)
 314 |                         logger.info(f"✅ EAGER INIT: HTTP server auto-start failed, using direct SQLite-vec storage")
 315 |                 else:
 316 |                     # Import sqlite-vec storage module (supports dynamic class replacement)
 317 |                     from . import storage
 318 |                     import importlib
 319 |                     storage_module = importlib.import_module('mcp_memory_service.storage.sqlite_vec')
 320 |                     SqliteVecMemoryStorage = storage_module.SqliteVecMemoryStorage
 321 |                     self.storage = SqliteVecMemoryStorage(SQLITE_VEC_PATH, embedding_model=EMBEDDING_MODEL_NAME)
 322 |                     logger.info(f"✅ EAGER INIT: Using direct SQLite-vec storage at {SQLITE_VEC_PATH}")
 323 |             elif STORAGE_BACKEND == 'cloudflare':
 324 |                 # Initialize Cloudflare storage
 325 |                 logger.info(f"☁️  EAGER INIT: Importing CloudflareStorage...")
 326 |                 from .storage.cloudflare import CloudflareStorage
 327 |                 logger.info(f"☁️  EAGER INIT: Creating CloudflareStorage instance...")
 328 |                 self.storage = CloudflareStorage(
 329 |                     api_token=CLOUDFLARE_API_TOKEN,
 330 |                     account_id=CLOUDFLARE_ACCOUNT_ID,
 331 |                     vectorize_index=CLOUDFLARE_VECTORIZE_INDEX,
 332 |                     d1_database_id=CLOUDFLARE_D1_DATABASE_ID,
 333 |                     r2_bucket=CLOUDFLARE_R2_BUCKET,
 334 |                     embedding_model=CLOUDFLARE_EMBEDDING_MODEL,
 335 |                     large_content_threshold=CLOUDFLARE_LARGE_CONTENT_THRESHOLD,
 336 |                     max_retries=CLOUDFLARE_MAX_RETRIES,
 337 |                     base_delay=CLOUDFLARE_BASE_DELAY
 338 |                 )
 339 |                 logger.info(f"✅ EAGER INIT: CloudflareStorage instance created with index: {CLOUDFLARE_VECTORIZE_INDEX}")
 340 |             elif STORAGE_BACKEND == 'hybrid':
 341 |                 # Initialize Hybrid storage (SQLite-vec + Cloudflare)
 342 |                 logger.info(f"🔄 EAGER INIT: Using Hybrid storage...")
 343 |                 from .storage.hybrid import HybridMemoryStorage
 344 | 
 345 |                 # Prepare Cloudflare configuration dict
 346 |                 cloudflare_config = None
 347 |                 if all([CLOUDFLARE_API_TOKEN, CLOUDFLARE_ACCOUNT_ID, CLOUDFLARE_VECTORIZE_INDEX, CLOUDFLARE_D1_DATABASE_ID]):
 348 |                     cloudflare_config = {
 349 |                         'api_token': CLOUDFLARE_API_TOKEN,
 350 |                         'account_id': CLOUDFLARE_ACCOUNT_ID,
 351 |                         'vectorize_index': CLOUDFLARE_VECTORIZE_INDEX,
 352 |                         'd1_database_id': CLOUDFLARE_D1_DATABASE_ID,
 353 |                         'r2_bucket': CLOUDFLARE_R2_BUCKET,
 354 |                         'embedding_model': CLOUDFLARE_EMBEDDING_MODEL,
 355 |                         'large_content_threshold': CLOUDFLARE_LARGE_CONTENT_THRESHOLD,
 356 |                         'max_retries': CLOUDFLARE_MAX_RETRIES,
 357 |                         'base_delay': CLOUDFLARE_BASE_DELAY
 358 |                     }
 359 |                     logger.info(f"🔄 EAGER INIT: Cloudflare config prepared for hybrid storage")
 360 |                 else:
 361 |                     logger.warning("🔄 EAGER INIT: Incomplete Cloudflare config, hybrid will run in SQLite-only mode")
 362 | 
 363 |                 self.storage = HybridMemoryStorage(
 364 |                     sqlite_db_path=SQLITE_VEC_PATH,
 365 |                     embedding_model=EMBEDDING_MODEL_NAME,
 366 |                     cloudflare_config=cloudflare_config,
 367 |                     sync_interval=HYBRID_SYNC_INTERVAL or 300,
 368 |                     batch_size=HYBRID_BATCH_SIZE or 50
 369 |                 )
 370 |                 logger.info(f"✅ EAGER INIT: HybridMemoryStorage instance created")
 371 |             else:
 372 |                 # Unknown backend - should not reach here due to factory validation
 373 |                 logger.error(f"❌ EAGER INIT: Unknown storage backend: {STORAGE_BACKEND}")
 374 |                 raise ValueError(f"Unsupported storage backend: {STORAGE_BACKEND}")
 375 | 
 376 |             # Initialize the storage backend
 377 |             logger.info(f"🔧 EAGER INIT: Calling storage.initialize()...")
 378 |             await self.storage.initialize()
 379 |             logger.info(f"✅ EAGER INIT: storage.initialize() completed successfully")
 380 |             
 381 |             self._storage_initialized = True
 382 |             logger.info(f"🎉 EAGER INIT: {STORAGE_BACKEND} storage initialization successful")
 383 | 
 384 |             # Cache the newly initialized storage instance
 385 |             async with cache_lock:
 386 |                 _STORAGE_CACHE[cache_key] = self.storage
 387 |                 init_time = (time.time() - start_time) * 1000
 388 |                 _CACHE_STATS["initialization_times"].append(init_time)
 389 |                 logger.info(f"💾 Cached storage instance (key: {cache_key}, init_time: {init_time:.1f}ms)")
 390 | 
 391 |                 # Initialize and cache MemoryService
 392 |                 _CACHE_STATS["service_misses"] += 1
 393 |                 self.memory_service = MemoryService(self.storage)
 394 |                 storage_id = id(self.storage)
 395 |                 _MEMORY_SERVICE_CACHE[storage_id] = self.memory_service
 396 |                 logger.info(f"💾 Cached MemoryService instance (storage_id: {storage_id})")
 397 | 
 398 |             # Verify storage type
 399 |             storage_type = self.storage.__class__.__name__
 400 |             logger.info(f"🔍 EAGER INIT: Final storage type verification: {storage_type}")
 401 | 
 402 |             # Initialize consolidation system after storage is ready
 403 |             await self._initialize_consolidation()
 404 | 
 405 |             return True
 406 |         except Exception as e:
 407 |             logger.error(f"❌ EAGER INIT: Storage initialization failed: {str(e)}")
 408 |             logger.error(f"📋 EAGER INIT: Full traceback:")
 409 |             logger.error(traceback.format_exc())
 410 |             return False
 411 | 
 412 |     async def _ensure_storage_initialized(self):
 413 |         """Lazily initialize storage backend when needed with global caching."""
 414 |         if not self._storage_initialized:
 415 |             global _STORAGE_CACHE, _MEMORY_SERVICE_CACHE, _CACHE_STATS
 416 | 
 417 |             # Track call statistics
 418 |             _CACHE_STATS["total_calls"] += 1
 419 |             start_time = time.time()
 420 | 
 421 |             logger.info(f"🔄 LAZY INIT Call #{_CACHE_STATS['total_calls']}: Checking global cache...")
 422 | 
 423 |             # Acquire lock for thread-safe cache access
 424 |             cache_lock = _get_cache_lock()
 425 |             async with cache_lock:
 426 |                 # Generate cache key for storage backend
 427 |                 cache_key = f"{STORAGE_BACKEND}:{SQLITE_VEC_PATH}"
 428 | 
 429 |                 # Check storage cache
 430 |                 if cache_key in _STORAGE_CACHE:
 431 |                     self.storage = _STORAGE_CACHE[cache_key]
 432 |                     _CACHE_STATS["storage_hits"] += 1
 433 |                     logger.info(f"✅ Storage Cache HIT - Reusing {STORAGE_BACKEND} instance (key: {cache_key})")
 434 |                     self._storage_initialized = True
 435 | 
 436 |                     # Check memory service cache and log performance
 437 |                     self.memory_service = _get_or_create_memory_service(self.storage)
 438 |                     _log_cache_performance(start_time)
 439 | 
 440 |                     return self.storage
 441 | 
 442 |             # Cache miss - proceed with initialization
 443 |             _CACHE_STATS["storage_misses"] += 1
 444 |             logger.info(f"❌ Storage Cache MISS - Initializing {STORAGE_BACKEND} instance...")
 445 | 
 446 |             try:
 447 |                 logger.info(f"🔄 LAZY INIT: Starting {STORAGE_BACKEND} storage initialization...")
 448 |                 logger.info(f"🔧 LAZY INIT: Environment check - STORAGE_BACKEND={STORAGE_BACKEND}")
 449 |                 
 450 |                 # Log all Cloudflare config values for debugging
 451 |                 if STORAGE_BACKEND == 'cloudflare':
 452 |                     logger.info(f"🔧 LAZY INIT: Cloudflare config validation:")
 453 |                     logger.info(f"   API_TOKEN: {'SET' if CLOUDFLARE_API_TOKEN else 'NOT SET'}")
 454 |                     logger.info(f"   ACCOUNT_ID: {CLOUDFLARE_ACCOUNT_ID}")
 455 |                     logger.info(f"   VECTORIZE_INDEX: {CLOUDFLARE_VECTORIZE_INDEX}")
 456 |                     logger.info(f"   D1_DATABASE_ID: {CLOUDFLARE_D1_DATABASE_ID}")
 457 |                     logger.info(f"   R2_BUCKET: {CLOUDFLARE_R2_BUCKET}")
 458 |                     logger.info(f"   EMBEDDING_MODEL: {CLOUDFLARE_EMBEDDING_MODEL}")
 459 |                 
 460 |                 if STORAGE_BACKEND == 'sqlite_vec':
 461 |                     # Check for multi-client coordination mode
 462 |                     from .utils.port_detection import ServerCoordinator
 463 |                     coordinator = ServerCoordinator()
 464 |                     coordination_mode = await coordinator.detect_mode()
 465 |                     
 466 |                     logger.info(f"🔧 LAZY INIT: SQLite-vec - detected coordination mode: {coordination_mode}")
 467 |                     
 468 |                     if coordination_mode == "http_client":
 469 |                         # Use HTTP client to connect to existing server
 470 |                         from .storage.http_client import HTTPClientStorage
 471 |                         self.storage = HTTPClientStorage()
 472 |                         logger.info(f"✅ LAZY INIT: Using HTTP client storage")
 473 |                     elif coordination_mode == "http_server":
 474 |                         # Try to auto-start HTTP server for coordination
 475 |                         from .utils.http_server_manager import auto_start_http_server_if_needed
 476 |                         server_started = await auto_start_http_server_if_needed()
 477 |                         
 478 |                         if server_started:
 479 |                             # Wait a moment for the server to be ready, then use HTTP client
 480 |                             await asyncio.sleep(2)
 481 |                             from .storage.http_client import HTTPClientStorage
 482 |                             self.storage = HTTPClientStorage()
 483 |                             logger.info(f"✅ LAZY INIT: Started HTTP server and using HTTP client storage")
 484 |                         else:
 485 |                             # Fall back to direct SQLite-vec storage
 486 |                             import importlib
 487 |                             storage_module = importlib.import_module('mcp_memory_service.storage.sqlite_vec')
 488 |                             SqliteVecMemoryStorage = storage_module.SqliteVecMemoryStorage
 489 |                             self.storage = SqliteVecMemoryStorage(SQLITE_VEC_PATH, embedding_model=EMBEDDING_MODEL_NAME)
 490 |                             logger.info(f"✅ LAZY INIT: HTTP server auto-start failed, using direct SQLite-vec storage at: {SQLITE_VEC_PATH}")
 491 |                     else:
 492 |                         # Use direct SQLite-vec storage (with WAL mode for concurrent access)
 493 |                         import importlib
 494 |                         storage_module = importlib.import_module('mcp_memory_service.storage.sqlite_vec')
 495 |                         SqliteVecMemoryStorage = storage_module.SqliteVecMemoryStorage
 496 |                         self.storage = SqliteVecMemoryStorage(SQLITE_VEC_PATH, embedding_model=EMBEDDING_MODEL_NAME)
 497 |                         logger.info(f"✅ LAZY INIT: Created SQLite-vec storage at: {SQLITE_VEC_PATH}")
 498 |                 elif STORAGE_BACKEND == 'cloudflare':
 499 |                     # Cloudflare backend using Vectorize, D1, and R2
 500 |                     logger.info(f"☁️  LAZY INIT: Importing CloudflareStorage...")
 501 |                     from .storage.cloudflare import CloudflareStorage
 502 |                     logger.info(f"☁️  LAZY INIT: Creating CloudflareStorage instance...")
 503 |                     self.storage = CloudflareStorage(
 504 |                         api_token=CLOUDFLARE_API_TOKEN,
 505 |                         account_id=CLOUDFLARE_ACCOUNT_ID,
 506 |                         vectorize_index=CLOUDFLARE_VECTORIZE_INDEX,
 507 |                         d1_database_id=CLOUDFLARE_D1_DATABASE_ID,
 508 |                         r2_bucket=CLOUDFLARE_R2_BUCKET,
 509 |                         embedding_model=CLOUDFLARE_EMBEDDING_MODEL,
 510 |                         large_content_threshold=CLOUDFLARE_LARGE_CONTENT_THRESHOLD,
 511 |                         max_retries=CLOUDFLARE_MAX_RETRIES,
 512 |                         base_delay=CLOUDFLARE_BASE_DELAY
 513 |                     )
 514 |                     logger.info(f"✅ LAZY INIT: Created Cloudflare storage with Vectorize index: {CLOUDFLARE_VECTORIZE_INDEX}")
 515 |                 elif STORAGE_BACKEND == 'hybrid':
 516 |                     # Hybrid backend using SQLite-vec as primary and Cloudflare as secondary
 517 |                     logger.info(f"🔄 LAZY INIT: Importing HybridMemoryStorage...")
 518 |                     from .storage.hybrid import HybridMemoryStorage
 519 | 
 520 |                     # Prepare Cloudflare configuration dict
 521 |                     cloudflare_config = None
 522 |                     if all([CLOUDFLARE_API_TOKEN, CLOUDFLARE_ACCOUNT_ID, CLOUDFLARE_VECTORIZE_INDEX, CLOUDFLARE_D1_DATABASE_ID]):
 523 |                         cloudflare_config = {
 524 |                             'api_token': CLOUDFLARE_API_TOKEN,
 525 |                             'account_id': CLOUDFLARE_ACCOUNT_ID,
 526 |                             'vectorize_index': CLOUDFLARE_VECTORIZE_INDEX,
 527 |                             'd1_database_id': CLOUDFLARE_D1_DATABASE_ID,
 528 |                             'r2_bucket': CLOUDFLARE_R2_BUCKET,
 529 |                             'embedding_model': CLOUDFLARE_EMBEDDING_MODEL,
 530 |                             'large_content_threshold': CLOUDFLARE_LARGE_CONTENT_THRESHOLD,
 531 |                             'max_retries': CLOUDFLARE_MAX_RETRIES,
 532 |                             'base_delay': CLOUDFLARE_BASE_DELAY
 533 |                         }
 534 |                         logger.info(f"🔄 LAZY INIT: Cloudflare config prepared for hybrid storage")
 535 |                     else:
 536 |                         logger.warning("🔄 LAZY INIT: Incomplete Cloudflare config, hybrid will run in SQLite-only mode")
 537 | 
 538 |                     logger.info(f"🔄 LAZY INIT: Creating HybridMemoryStorage instance...")
 539 |                     self.storage = HybridMemoryStorage(
 540 |                         sqlite_db_path=SQLITE_VEC_PATH,
 541 |                         embedding_model=EMBEDDING_MODEL_NAME,
 542 |                         cloudflare_config=cloudflare_config,
 543 |                         sync_interval=HYBRID_SYNC_INTERVAL or 300,
 544 |                         batch_size=HYBRID_BATCH_SIZE or 50
 545 |                     )
 546 |                     logger.info(f"✅ LAZY INIT: Created Hybrid storage at: {SQLITE_VEC_PATH} with Cloudflare sync")
 547 |                 else:
 548 |                     # Unknown/unsupported backend
 549 |                     logger.error("=" * 70)
 550 |                     logger.error(f"❌ LAZY INIT: Unsupported storage backend: {STORAGE_BACKEND}")
 551 |                     logger.error("")
 552 |                     logger.error("Supported backends:")
 553 |                     logger.error("  - sqlite_vec (recommended for single-device use)")
 554 |                     logger.error("  - cloudflare (cloud storage)")
 555 |                     logger.error("  - hybrid (recommended for multi-device use)")
 556 |                     logger.error("=" * 70)
 557 |                     raise ValueError(
 558 |                         f"Unsupported storage backend: {STORAGE_BACKEND}. "
 559 |                         "Use 'sqlite_vec', 'cloudflare', or 'hybrid'."
 560 |                     )
 561 |                 
 562 |                 # Initialize the storage backend
 563 |                 logger.info(f"🔧 LAZY INIT: Calling storage.initialize()...")
 564 |                 await self.storage.initialize()
 565 |                 logger.info(f"✅ LAZY INIT: storage.initialize() completed successfully")
 566 |                 
 567 |                 # Verify the storage is properly initialized
 568 |                 if hasattr(self.storage, 'is_initialized') and not self.storage.is_initialized():
 569 |                     # Get detailed status for debugging
 570 |                     if hasattr(self.storage, 'get_initialization_status'):
 571 |                         status = self.storage.get_initialization_status()
 572 |                         logger.error(f"❌ LAZY INIT: Storage initialization incomplete: {status}")
 573 |                     raise RuntimeError("Storage initialization incomplete")
 574 |                 
 575 |                 self._storage_initialized = True
 576 |                 storage_type = self.storage.__class__.__name__
 577 |                 logger.info(f"🎉 LAZY INIT: Storage backend ({STORAGE_BACKEND}) initialization successful")
 578 |                 logger.info(f"🔍 LAZY INIT: Final storage type verification: {storage_type}")
 579 | 
 580 |                 # Cache the newly initialized storage instance
 581 |                 async with cache_lock:
 582 |                     _STORAGE_CACHE[cache_key] = self.storage
 583 |                     init_time = (time.time() - start_time) * 1000
 584 |                     _CACHE_STATS["initialization_times"].append(init_time)
 585 |                     logger.info(f"💾 Cached storage instance (key: {cache_key}, init_time: {init_time:.1f}ms)")
 586 | 
 587 |                     # Initialize and cache MemoryService
 588 |                     _CACHE_STATS["service_misses"] += 1
 589 |                     self.memory_service = MemoryService(self.storage)
 590 |                     storage_id = id(self.storage)
 591 |                     _MEMORY_SERVICE_CACHE[storage_id] = self.memory_service
 592 |                     logger.info(f"💾 Cached MemoryService instance (storage_id: {storage_id})")
 593 | 
 594 |                 # Initialize consolidation system after storage is ready
 595 |                 await self._initialize_consolidation()
 596 | 
 597 |             except Exception as e:
 598 |                 logger.error(f"❌ LAZY INIT: Failed to initialize {STORAGE_BACKEND} storage: {str(e)}")
 599 |                 logger.error(f"📋 LAZY INIT: Full traceback:")
 600 |                 logger.error(traceback.format_exc())
 601 |                 # Set storage to None to indicate failure
 602 |                 self.storage = None
 603 |                 self._storage_initialized = False
 604 |                 raise
 605 |         return self.storage
 606 | 
 607 |     async def initialize(self):
 608 |         """Async initialization method with eager storage initialization and timeout."""
 609 |         try:
 610 |             # Run any async initialization tasks here
 611 |             logger.info("🚀 SERVER INIT: Starting async initialization...")
 612 |             
 613 |             # Print system diagnostics only for LM Studio (avoid JSON parsing errors in Claude Desktop)
 614 |             if MCP_CLIENT == 'lm_studio':
 615 |                 print("\n=== System Diagnostics ===", file=sys.stdout, flush=True)
 616 |                 print(f"OS: {self.system_info.os_name} {self.system_info.os_version}", file=sys.stdout, flush=True)
 617 |                 print(f"Architecture: {self.system_info.architecture}", file=sys.stdout, flush=True)
 618 |                 print(f"Memory: {self.system_info.memory_gb:.2f} GB", file=sys.stdout, flush=True)
 619 |                 print(f"Accelerator: {self.system_info.accelerator}", file=sys.stdout, flush=True)
 620 |                 print(f"Python: {platform.python_version()}", file=sys.stdout, flush=True)
 621 |             
 622 |             # Log environment info
 623 |             logger.info(f"🔧 SERVER INIT: Environment - STORAGE_BACKEND={STORAGE_BACKEND}")
 624 |             
 625 |             # Attempt eager storage initialization with timeout
 626 |             # Get dynamic timeout based on system and dependency status
 627 |             timeout_seconds = get_recommended_timeout()
 628 |             logger.info(f"⏱️  SERVER INIT: Attempting eager storage initialization (timeout: {timeout_seconds}s)...")
 629 |             if MCP_CLIENT == 'lm_studio':
 630 |                 print(f"Attempting eager storage initialization (timeout: {timeout_seconds}s)...", file=sys.stdout, flush=True)
 631 |             try:
 632 |                 init_task = asyncio.create_task(self._initialize_storage_with_timeout())
 633 |                 success = await asyncio.wait_for(init_task, timeout=timeout_seconds)
 634 |                 if success:
 635 |                     if MCP_CLIENT == 'lm_studio':
 636 |                         print("[OK] Eager storage initialization successful", file=sys.stdout, flush=True)
 637 |                     logger.info("✅ SERVER INIT: Eager storage initialization completed successfully")
 638 |                     
 639 |                     # Verify storage type after successful eager init
 640 |                     if hasattr(self, 'storage') and self.storage:
 641 |                         storage_type = self.storage.__class__.__name__
 642 |                         logger.info(f"🔍 SERVER INIT: Eager init resulted in storage type: {storage_type}")
 643 |                 else:
 644 |                     if MCP_CLIENT == 'lm_studio':
 645 |                         print("[WARNING] Eager storage initialization failed, will use lazy loading", file=sys.stdout, flush=True)
 646 |                     logger.warning("⚠️  SERVER INIT: Eager initialization failed, falling back to lazy loading")
 647 |                     # Reset state for lazy loading
 648 |                     self.storage = None
 649 |                     self._storage_initialized = False
 650 |             except asyncio.TimeoutError:
 651 |                 if MCP_CLIENT == 'lm_studio':
 652 |                     print("[TIMEOUT] Eager storage initialization timed out, will use lazy loading", file=sys.stdout, flush=True)
 653 |                 logger.warning(f"⏱️  SERVER INIT: Storage initialization timed out after {timeout_seconds}s, falling back to lazy loading")
 654 |                 # Reset state for lazy loading
 655 |                 self.storage = None
 656 |                 self._storage_initialized = False
 657 |             except Exception as e:
 658 |                 if MCP_CLIENT == 'lm_studio':
 659 |                     print(f"[WARNING] Eager initialization error: {str(e)}, will use lazy loading", file=sys.stdout, flush=True)
 660 |                 logger.warning(f"⚠️  SERVER INIT: Eager initialization error: {str(e)}, falling back to lazy loading")
 661 |                 logger.warning(f"📋 SERVER INIT: Eager init error traceback:")
 662 |                 logger.warning(traceback.format_exc())
 663 |                 # Reset state for lazy loading
 664 |                 self.storage = None
 665 |                 self._storage_initialized = False
 666 |             
 667 |             # Add explicit console output for Smithery to see (only for LM Studio)
 668 |             if MCP_CLIENT == 'lm_studio':
 669 |                 print("MCP Memory Service initialization completed", file=sys.stdout, flush=True)
 670 |             
 671 |             logger.info("🎉 SERVER INIT: Async initialization completed")
 672 |             return True
 673 |         except Exception as e:
 674 |             logger.error(f"❌ SERVER INIT: Async initialization error: {str(e)}")
 675 |             logger.error(f"📋 SERVER INIT: Full traceback:")
 676 |             logger.error(traceback.format_exc())
 677 |             # Add explicit console error output for Smithery to see
 678 |             print(f"Initialization error: {str(e)}", file=sys.stderr, flush=True)
 679 |             # Don't raise the exception, just return False
 680 |             return False
 681 | 
 682 |     async def validate_database_health(self):
 683 |         """Validate database health during initialization."""
 684 |         from .utils.db_utils import validate_database, repair_database
 685 |         
 686 |         try:
 687 |             # Check database health
 688 |             is_valid, message = await validate_database(self.storage)
 689 |             if not is_valid:
 690 |                 logger.warning(f"Database validation failed: {message}")
 691 |                 
 692 |                 # Attempt repair
 693 |                 logger.info("Attempting database repair...")
 694 |                 repair_success, repair_message = await repair_database(self.storage)
 695 |                 
 696 |                 if not repair_success:
 697 |                     logger.error(f"Database repair failed: {repair_message}")
 698 |                     return False
 699 |                 else:
 700 |                     logger.info(f"Database repair successful: {repair_message}")
 701 |                     return True
 702 |             else:
 703 |                 logger.info(f"Database validation successful: {message}")
 704 |                 return True
 705 |         except Exception as e:
 706 |             logger.error(f"Database validation error: {str(e)}")
 707 |             return False
 708 | 
 709 |     async def _initialize_consolidation(self):
 710 |         """Initialize the consolidation system after storage is ready."""
 711 |         if not CONSOLIDATION_ENABLED or not self._storage_initialized:
 712 |             return
 713 |         
 714 |         try:
 715 |             if self.consolidator is None:
 716 |                 # Create consolidation config
 717 |                 config = ConsolidationConfig(**CONSOLIDATION_CONFIG)
 718 |                 
 719 |                 # Initialize the consolidator with storage
 720 |                 self.consolidator = DreamInspiredConsolidator(self.storage, config)
 721 |                 logger.info("Dream-inspired consolidator initialized")
 722 |                 
 723 |                 # Initialize the scheduler if not disabled
 724 |                 if any(schedule != 'disabled' for schedule in CONSOLIDATION_SCHEDULE.values()):
 725 |                     self.consolidation_scheduler = ConsolidationScheduler(
 726 |                         self.consolidator, 
 727 |                         CONSOLIDATION_SCHEDULE, 
 728 |                         enabled=True
 729 |                     )
 730 |                     
 731 |                     # Start the scheduler
 732 |                     if await self.consolidation_scheduler.start():
 733 |                         logger.info("Consolidation scheduler started successfully")
 734 |                     else:
 735 |                         logger.warning("Failed to start consolidation scheduler")
 736 |                         self.consolidation_scheduler = None
 737 |                 else:
 738 |                     logger.info("Consolidation scheduler disabled (all schedules set to 'disabled')")
 739 |                 
 740 |         except Exception as e:
 741 |             logger.error(f"Failed to initialize consolidation system: {e}")
 742 |             logger.error(traceback.format_exc())
 743 |             self.consolidator = None
 744 |             self.consolidation_scheduler = None
 745 | 
 746 |     def handle_method_not_found(self, method: str) -> None:
 747 |         """Custom handler for unsupported methods.
 748 |         
 749 |         This logs the unsupported method request but doesn't raise an exception,
 750 |         allowing the MCP server to handle it with a standard JSON-RPC error response.
 751 |         """
 752 |         logger.warning(f"Unsupported method requested: {method}")
 753 |         # The MCP server will automatically respond with a Method not found error
 754 |         # We don't need to do anything else here
 755 |     
 756 |     def register_handlers(self):
 757 |         # Enhanced Resources implementation
 758 |         @self.server.list_resources()
 759 |         async def handle_list_resources() -> List[Resource]:
 760 |             """List available memory resources."""
 761 |             await self._ensure_storage_initialized()
 762 |             
 763 |             resources = [
 764 |                 types.Resource(
 765 |                     uri="memory://stats",
 766 |                     name="Memory Statistics",
 767 |                     description="Current memory database statistics",
 768 |                     mimeType="application/json"
 769 |                 ),
 770 |                 types.Resource(
 771 |                     uri="memory://tags",
 772 |                     name="Available Tags",
 773 |                     description="List of all tags used in memories",
 774 |                     mimeType="application/json"
 775 |                 ),
 776 |                 types.Resource(
 777 |                     uri="memory://recent/10",
 778 |                     name="Recent Memories",
 779 |                     description="10 most recent memories",
 780 |                     mimeType="application/json"
 781 |                 )
 782 |             ]
 783 |             
 784 |             # Add tag-specific resources for existing tags
 785 |             try:
 786 |                 all_tags = await self.storage.get_all_tags()
 787 |                 for tag in all_tags[:5]:  # Limit to first 5 tags for resources
 788 |                     resources.append(types.Resource(
 789 |                         uri=f"memory://tag/{tag}",
 790 |                         name=f"Memories tagged '{tag}'",
 791 |                         description=f"All memories with tag '{tag}'",
 792 |                         mimeType="application/json"
 793 |                     ))
 794 |             except AttributeError:
 795 |                 # get_all_tags method not available on this storage backend
 796 |                 pass
 797 |             except Exception as e:
 798 |                 logger.warning(f"Failed to load tag resources: {e}")
 799 |                 pass
 800 |             
 801 |             return resources
 802 |         
 803 |         @self.server.read_resource()
 804 |         async def handle_read_resource(uri: str) -> str:
 805 |             """Read a specific memory resource."""
 806 |             await self._ensure_storage_initialized()
 807 | 
 808 |             import json
 809 |             from urllib.parse import unquote
 810 | 
 811 |             # Convert AnyUrl to string if necessary (fix for issue #254)
 812 |             # MCP SDK may pass Pydantic AnyUrl objects instead of plain strings
 813 |             if hasattr(uri, '__str__'):
 814 |                 uri = str(uri)
 815 | 
 816 |             try:
 817 |                 if uri == "memory://stats":
 818 |                     # Get memory statistics
 819 |                     stats = await self.storage.get_stats()
 820 |                     return json.dumps(stats, indent=2)
 821 |                     
 822 |                 elif uri == "memory://tags":
 823 |                     # Get all available tags
 824 |                     tags = await self.storage.get_all_tags()
 825 |                     return json.dumps({"tags": tags, "count": len(tags)}, indent=2)
 826 |                     
 827 |                 elif uri.startswith("memory://recent/"):
 828 |                     # Get recent memories
 829 |                     n = int(uri.split("/")[-1])
 830 |                     memories = await self.storage.get_recent_memories(n)
 831 |                     return json.dumps({
 832 |                         "memories": [m.to_dict() for m in memories],
 833 |                         "count": len(memories)
 834 |                     }, indent=2, default=str)
 835 |                     
 836 |                 elif uri.startswith("memory://tag/"):
 837 |                     # Get memories by tag
 838 |                     tag = unquote(uri.split("/", 3)[-1])
 839 |                     memories = await self.storage.search_by_tag([tag])
 840 |                     return json.dumps({
 841 |                         "tag": tag,
 842 |                         "memories": [m.to_dict() for m in memories],
 843 |                         "count": len(memories)
 844 |                     }, indent=2, default=str)
 845 |                     
 846 |                 elif uri.startswith("memory://search/"):
 847 |                     # Dynamic search
 848 |                     query = unquote(uri.split("/", 3)[-1])
 849 |                     results = await self.storage.search(query, n_results=10)
 850 |                     return json.dumps({
 851 |                         "query": query,
 852 |                         "results": [r.to_dict() for r in results],
 853 |                         "count": len(results)
 854 |                     }, indent=2, default=str)
 855 |                     
 856 |                 else:
 857 |                     return json.dumps({"error": f"Resource not found: {uri}"}, indent=2)
 858 |                     
 859 |             except Exception as e:
 860 |                 logger.error(f"Error reading resource {uri}: {e}")
 861 |                 return json.dumps({"error": str(e)}, indent=2)
 862 |         
 863 |         @self.server.list_resource_templates()
 864 |         async def handle_list_resource_templates() -> List[types.ResourceTemplate]:
 865 |             """List resource templates for dynamic queries."""
 866 |             return [
 867 |                 types.ResourceTemplate(
 868 |                     uriTemplate="memory://recent/{n}",
 869 |                     name="Recent Memories",
 870 |                     description="Get N most recent memories",
 871 |                     mimeType="application/json"
 872 |                 ),
 873 |                 types.ResourceTemplate(
 874 |                     uriTemplate="memory://tag/{tag}",
 875 |                     name="Memories by Tag",
 876 |                     description="Get all memories with a specific tag",
 877 |                     mimeType="application/json"
 878 |                 ),
 879 |                 types.ResourceTemplate(
 880 |                     uriTemplate="memory://search/{query}",
 881 |                     name="Search Memories",
 882 |                     description="Search memories by query",
 883 |                     mimeType="application/json"
 884 |                 )
 885 |             ]
 886 |         
 887 |         @self.server.list_prompts()
 888 |         async def handle_list_prompts() -> List[types.Prompt]:
 889 |             """List available guided prompts for memory operations."""
 890 |             return [
 891 |                 types.Prompt(
 892 |                     name="memory_review",
 893 |                     description="Review and organize memories from a specific time period",
 894 |                     arguments=[
 895 |                         types.PromptArgument(
 896 |                             name="time_period",
 897 |                             description="Time period to review (e.g., 'last week', 'yesterday', '2 days ago')",
 898 |                             required=True
 899 |                         ),
 900 |                         types.PromptArgument(
 901 |                             name="focus_area",
 902 |                             description="Optional area to focus on (e.g., 'work', 'personal', 'learning')",
 903 |                             required=False
 904 |                         )
 905 |                     ]
 906 |                 ),
 907 |                 types.Prompt(
 908 |                     name="memory_analysis",
 909 |                     description="Analyze patterns and themes in stored memories",
 910 |                     arguments=[
 911 |                         types.PromptArgument(
 912 |                             name="tags",
 913 |                             description="Tags to analyze (comma-separated)",
 914 |                             required=False
 915 |                         ),
 916 |                         types.PromptArgument(
 917 |                             name="time_range",
 918 |                             description="Time range to analyze (e.g., 'last month', 'all time')",
 919 |                             required=False
 920 |                         )
 921 |                     ]
 922 |                 ),
 923 |                 types.Prompt(
 924 |                     name="knowledge_export",
 925 |                     description="Export memories in a specific format",
 926 |                     arguments=[
 927 |                         types.PromptArgument(
 928 |                             name="format",
 929 |                             description="Export format (json, markdown, text)",
 930 |                             required=True
 931 |                         ),
 932 |                         types.PromptArgument(
 933 |                             name="filter",
 934 |                             description="Filter criteria (tags or search query)",
 935 |                             required=False
 936 |                         )
 937 |                     ]
 938 |                 ),
 939 |                 types.Prompt(
 940 |                     name="memory_cleanup",
 941 |                     description="Identify and remove duplicate or outdated memories",
 942 |                     arguments=[
 943 |                         types.PromptArgument(
 944 |                             name="older_than",
 945 |                             description="Remove memories older than (e.g., '6 months', '1 year')",
 946 |                             required=False
 947 |                         ),
 948 |                         types.PromptArgument(
 949 |                             name="similarity_threshold",
 950 |                             description="Similarity threshold for duplicates (0.0-1.0)",
 951 |                             required=False
 952 |                         )
 953 |                     ]
 954 |                 ),
 955 |                 types.Prompt(
 956 |                     name="learning_session",
 957 |                     description="Store structured learning notes from a study session",
 958 |                     arguments=[
 959 |                         types.PromptArgument(
 960 |                             name="topic",
 961 |                             description="Learning topic or subject",
 962 |                             required=True
 963 |                         ),
 964 |                         types.PromptArgument(
 965 |                             name="key_points",
 966 |                             description="Key points learned (comma-separated)",
 967 |                             required=True
 968 |                         ),
 969 |                         types.PromptArgument(
 970 |                             name="questions",
 971 |                             description="Questions or areas for further study",
 972 |                             required=False
 973 |                         )
 974 |                     ]
 975 |                 )
 976 |             ]
 977 |         
 978 |         @self.server.get_prompt()
 979 |         async def handle_get_prompt(name: str, arguments: dict) -> types.GetPromptResult:
 980 |             """Handle prompt execution with provided arguments."""
 981 |             await self._ensure_storage_initialized()
 982 |             
 983 |             # Dispatch to specific prompt handler
 984 |             if name == "memory_review":
 985 |                 messages = await self._prompt_memory_review(arguments)
 986 |             elif name == "memory_analysis":
 987 |                 messages = await self._prompt_memory_analysis(arguments)
 988 |             elif name == "knowledge_export":
 989 |                 messages = await self._prompt_knowledge_export(arguments)
 990 |             elif name == "memory_cleanup":
 991 |                 messages = await self._prompt_memory_cleanup(arguments)
 992 |             elif name == "learning_session":
 993 |                 messages = await self._prompt_learning_session(arguments)
 994 |             else:
 995 |                 messages = [
 996 |                     types.PromptMessage(
 997 |                         role="user",
 998 |                         content=types.TextContent(
 999 |                             type="text",
1000 |                             text=f"Unknown prompt: {name}"
1001 |                         )
1002 |                     )
1003 |                 ]
1004 |             
1005 |             return types.GetPromptResult(
1006 |                 description=f"Result of {name} prompt",
1007 |                 messages=messages
1008 |             )
1009 |         
1010 |         # Helper methods for specific prompts
1011 |         async def _prompt_memory_review(self, arguments: dict) -> list:
1012 |             """Generate memory review prompt."""
1013 |             time_period = arguments.get("time_period", "last week")
1014 |             focus_area = arguments.get("focus_area", "")
1015 |             
1016 |             # Retrieve memories from the specified time period
1017 |             memories = await self.storage.recall_memory(time_period, n_results=20)
1018 |             
1019 |             prompt_text = f"Review of memories from {time_period}"
1020 |             if focus_area:
1021 |                 prompt_text += f" (focusing on {focus_area})"
1022 |             prompt_text += ":\n\n"
1023 |             
1024 |             if memories:
1025 |                 for mem in memories:
1026 |                     prompt_text += f"- {mem.content}\n"
1027 |                     if mem.metadata.tags:
1028 |                         prompt_text += f"  Tags: {', '.join(mem.metadata.tags)}\n"
1029 |             else:
1030 |                 prompt_text += "No memories found for this time period."
1031 |             
1032 |             return [
1033 |                 types.PromptMessage(
1034 |                     role="user",
1035 |                     content=types.TextContent(type="text", text=prompt_text)
1036 |                 )
1037 |             ]
1038 |         
1039 |         async def _prompt_memory_analysis(self, arguments: dict) -> list:
1040 |             """Generate memory analysis prompt."""
1041 |             tags = arguments.get("tags", "").split(",") if arguments.get("tags") else []
1042 |             time_range = arguments.get("time_range", "all time")
1043 |             
1044 |             analysis_text = "Memory Analysis"
1045 |             if tags:
1046 |                 analysis_text += f" for tags: {', '.join(tags)}"
1047 |             if time_range != "all time":
1048 |                 analysis_text += f" from {time_range}"
1049 |             analysis_text += "\n\n"
1050 |             
1051 |             # Get relevant memories
1052 |             if tags:
1053 |                 memories = await self.storage.search_by_tag(tags)
1054 |             else:
1055 |                 memories = await self.storage.get_recent_memories(100)
1056 |             
1057 |             # Analyze patterns
1058 |             tag_counts = {}
1059 |             type_counts = {}
1060 |             for mem in memories:
1061 |                 for tag in mem.metadata.tags:
1062 |                     tag_counts[tag] = tag_counts.get(tag, 0) + 1
1063 |                 mem_type = mem.metadata.memory_type
1064 |                 type_counts[mem_type] = type_counts.get(mem_type, 0) + 1
1065 |             
1066 |             analysis_text += f"Total memories analyzed: {len(memories)}\n\n"
1067 |             analysis_text += "Top tags:\n"
1068 |             for tag, count in sorted(tag_counts.items(), key=lambda x: x[1], reverse=True)[:10]:
1069 |                 analysis_text += f"  - {tag}: {count} occurrences\n"
1070 |             analysis_text += "\nMemory types:\n"
1071 |             for mem_type, count in type_counts.items():
1072 |                 analysis_text += f"  - {mem_type}: {count} memories\n"
1073 |             
1074 |             return [
1075 |                 types.PromptMessage(
1076 |                     role="user",
1077 |                     content=types.TextContent(type="text", text=analysis_text)
1078 |                 )
1079 |             ]
1080 |         
1081 |         async def _prompt_knowledge_export(self, arguments: dict) -> list:
1082 |             """Generate knowledge export prompt."""
1083 |             format_type = arguments.get("format", "json")
1084 |             filter_criteria = arguments.get("filter", "")
1085 |             
1086 |             # Get memories based on filter
1087 |             if filter_criteria:
1088 |                 if "," in filter_criteria:
1089 |                     # Assume tags
1090 |                     memories = await self.storage.search_by_tag(filter_criteria.split(","))
1091 |                 else:
1092 |                     # Assume search query
1093 |                     memories = await self.storage.search(filter_criteria, n_results=100)
1094 |             else:
1095 |                 memories = await self.storage.get_recent_memories(100)
1096 |             
1097 |             export_text = f"Exported {len(memories)} memories in {format_type} format:\n\n"
1098 |             
1099 |             if format_type == "markdown":
1100 |                 for mem in memories:
1101 |                     export_text += f"## {mem.metadata.created_at_iso}\n"
1102 |                     export_text += f"{mem.content}\n"
1103 |                     if mem.metadata.tags:
1104 |                         export_text += f"*Tags: {', '.join(mem.metadata.tags)}*\n"
1105 |                     export_text += "\n"
1106 |             elif format_type == "text":
1107 |                 for mem in memories:
1108 |                     export_text += f"[{mem.metadata.created_at_iso}] {mem.content}\n"
1109 |             else:  # json
1110 |                 import json
1111 |                 export_data = [m.to_dict() for m in memories]
1112 |                 export_text += json.dumps(export_data, indent=2, default=str)
1113 |             
1114 |             return [
1115 |                 types.PromptMessage(
1116 |                     role="user",
1117 |                     content=types.TextContent(type="text", text=export_text)
1118 |                 )
1119 |             ]
1120 |         
1121 |         async def _prompt_memory_cleanup(self, arguments: dict) -> list:
1122 |             """Generate memory cleanup prompt."""
1123 |             older_than = arguments.get("older_than", "")
1124 |             similarity_threshold = float(arguments.get("similarity_threshold", "0.95"))
1125 |             
1126 |             cleanup_text = "Memory Cleanup Report:\n\n"
1127 |             
1128 |             # Find duplicates
1129 |             all_memories = await self.storage.get_recent_memories(1000)
1130 |             duplicates = []
1131 |             
1132 |             for i, mem1 in enumerate(all_memories):
1133 |                 for mem2 in all_memories[i+1:]:
1134 |                     # Simple similarity check based on content length
1135 |                     if abs(len(mem1.content) - len(mem2.content)) < 10:
1136 |                         if mem1.content[:50] == mem2.content[:50]:
1137 |                             duplicates.append((mem1, mem2))
1138 |             
1139 |             cleanup_text += f"Found {len(duplicates)} potential duplicate pairs\n"
1140 |             
1141 |             if older_than:
1142 |                 cleanup_text += f"\nMemories older than {older_than} can be archived\n"
1143 |             
1144 |             return [
1145 |                 types.PromptMessage(
1146 |                     role="user",
1147 |                     content=types.TextContent(type="text", text=cleanup_text)
1148 |                 )
1149 |             ]
1150 |         
1151 |         async def _prompt_learning_session(self, arguments: dict) -> list:
1152 |             """Generate learning session prompt."""
1153 |             topic = arguments.get("topic", "General")
1154 |             key_points = arguments.get("key_points", "").split(",")
1155 |             questions = arguments.get("questions", "").split(",") if arguments.get("questions") else []
1156 |             
1157 |             # Create structured learning note
1158 |             learning_note = f"# Learning Session: {topic}\n\n"
1159 |             learning_note += f"Date: {datetime.now().isoformat()}\n\n"
1160 |             learning_note += "## Key Points:\n"
1161 |             for point in key_points:
1162 |                 learning_note += f"- {point.strip()}\n"
1163 |             
1164 |             if questions:
1165 |                 learning_note += "\n## Questions for Further Study:\n"
1166 |                 for question in questions:
1167 |                     learning_note += f"- {question.strip()}\n"
1168 |             
1169 |             # Store the learning note
1170 |             memory = Memory(
1171 |                 content=learning_note,
1172 |                 tags=["learning", topic.lower().replace(" ", "_")],
1173 |                 memory_type="learning_note"
1174 |             )
1175 |             success, message = await self.storage.store(memory)
1176 |             
1177 |             response_text = f"Learning session stored successfully!\n\n{learning_note}"
1178 |             if not success:
1179 |                 response_text = f"Failed to store learning session: {message}"
1180 |             
1181 |             return [
1182 |                 types.PromptMessage(
1183 |                     role="user",
1184 |                     content=types.TextContent(type="text", text=response_text)
1185 |                 )
1186 |             ]
1187 |         
1188 |         # Add a custom error handler for unsupported methods
1189 |         self.server.on_method_not_found = self.handle_method_not_found
1190 |         
1191 |         @self.server.list_tools()
1192 |         async def handle_list_tools() -> List[types.Tool]:
1193 |             logger.info("=== HANDLING LIST_TOOLS REQUEST ===")
1194 |             try:
1195 |                 tools = [
1196 |                     types.Tool(
1197 |                         name="store_memory",
1198 |                         description="""Store new information with optional tags.
1199 | 
1200 |                         Accepts two tag formats in metadata:
1201 |                         - Array: ["tag1", "tag2"]
1202 |                         - String: "tag1,tag2"
1203 | 
1204 |                        Examples:
1205 |                         # Using array format:
1206 |                         {
1207 |                             "content": "Memory content",
1208 |                             "metadata": {
1209 |                                 "tags": ["important", "reference"],
1210 |                                 "type": "note"
1211 |                             }
1212 |                         }
1213 | 
1214 |                         # Using string format(preferred):
1215 |                         {
1216 |                             "content": "Memory content",
1217 |                             "metadata": {
1218 |                                 "tags": "important,reference",
1219 |                                 "type": "note"
1220 |                             }
1221 |                         }""",
1222 |                         inputSchema={
1223 |                             "type": "object",
1224 |                             "properties": {
1225 |                                 "content": {
1226 |                                     "type": "string",
1227 |                                     "description": "The memory content to store, such as a fact, note, or piece of information."
1228 |                                 },
1229 |                                 "metadata": {
1230 |                                     "type": "object",
1231 |                                     "description": "Optional metadata about the memory, including tags and type.",
1232 |                                     "properties": {
1233 |                                         "tags": {
1234 |                                             "oneOf": [
1235 |                                                 {
1236 |                                                     "type": "array",
1237 |                                                     "items": {"type": "string"},
1238 |                                                     "description": "Tags as an array of strings"
1239 |                                                 },
1240 |                                                 {
1241 |                                                     "type": "string",
1242 |                                                     "description": "Tags as comma-separated string"
1243 |                                                 }
1244 |                                             ],
1245 |                                             "description": "Tags to categorize the memory. Accepts either an array of strings or a comma-separated string.",
1246 |                                             "examples": [
1247 |                                                 "tag1,tag2,tag3",
1248 |                                                 ["tag1", "tag2", "tag3"]
1249 |                                             ]
1250 |                                         },
1251 |                                         "type": {
1252 |                                             "type": "string",
1253 |                                             "description": "Optional type or category label for the memory, e.g., 'note', 'fact', 'reminder'."
1254 |                                         }
1255 |                                     }
1256 |                                 }
1257 |                             },
1258 |                             "required": ["content"]
1259 |                         },
1260 |                         annotations=types.ToolAnnotations(
1261 |                             title="Store Memory",
1262 |                             destructiveHint=False,
1263 |                         ),
1264 |                     ),
1265 |                     types.Tool(
1266 |                         name="recall_memory",
1267 |                         description="""Retrieve memories using natural language time expressions and optional semantic search.
1268 | 
1269 |                         Supports various time-related expressions such as:
1270 |                         - "yesterday", "last week", "2 days ago"
1271 |                         - "last summer", "this month", "last January"
1272 |                         - "spring", "winter", "Christmas", "Thanksgiving"
1273 |                         - "morning", "evening", "yesterday afternoon"
1274 | 
1275 |                         Examples:
1276 |                         {
1277 |                             "query": "recall what I stored last week"
1278 |                         }
1279 | 
1280 |                         {
1281 |                             "query": "find information about databases from two months ago",
1282 |                             "n_results": 5
1283 |                         }
1284 |                         """,
1285 |                         inputSchema={
1286 |                             "type": "object",
1287 |                             "properties": {
1288 |                                 "query": {
1289 |                                     "type": "string",
1290 |                                     "description": "Natural language query specifying the time frame or content to recall, e.g., 'last week', 'yesterday afternoon', or a topic."
1291 |                                 },
1292 |                                 "n_results": {
1293 |                                     "type": "number",
1294 |                                     "default": 5,
1295 |                                     "description": "Maximum number of results to return."
1296 |                                 }
1297 |                             },
1298 |                             "required": ["query"]
1299 |                         },
1300 |                         annotations=types.ToolAnnotations(
1301 |                             title="Recall Memory",
1302 |                             readOnlyHint=True,
1303 |                         ),
1304 |                     ),
1305 |                     types.Tool(
1306 |                         name="retrieve_memory",
1307 |                         description="""Find relevant memories based on query.
1308 | 
1309 |                         Example:
1310 |                         {
1311 |                             "query": "find this memory",
1312 |                             "n_results": 5
1313 |                         }""",
1314 |                         inputSchema={
1315 |                             "type": "object",
1316 |                             "properties": {
1317 |                                 "query": {
1318 |                                     "type": "string",
1319 |                                     "description": "Search query to find relevant memories based on content."
1320 |                                 },
1321 |                                 "n_results": {
1322 |                                     "type": "number",
1323 |                                     "default": 5,
1324 |                                     "description": "Maximum number of results to return."
1325 |                                 }
1326 |                             },
1327 |                             "required": ["query"]
1328 |                         },
1329 |                         annotations=types.ToolAnnotations(
1330 |                             title="Retrieve Memory",
1331 |                             readOnlyHint=True,
1332 |                         ),
1333 |                     ),
1334 |                     types.Tool(
1335 |                         name="retrieve_with_quality_boost",
1336 |                         description="""Search memories with quality-based reranking.
1337 | 
1338 |                         Prioritizes high-quality memories in results using composite scoring:
1339 |                         - Over-fetches 3x candidates
1340 |                         - Reranks by: (1 - quality_weight) * semantic_similarity + quality_weight * quality_score
1341 |                         - Default: 70% semantic + 30% quality
1342 | 
1343 |                         Quality scores (0.0-1.0) reflect memory usefulness based on:
1344 |                         - Specificity and actionability
1345 |                         - Recency and context relevance
1346 |                         - Retrieval frequency
1347 | 
1348 |                         Examples:
1349 |                         {
1350 |                             "query": "python async patterns",
1351 |                             "n_results": 10
1352 |                         }
1353 | 
1354 |                         {
1355 |                             "query": "deployment best practices",
1356 |                             "n_results": 5,
1357 |                             "quality_weight": 0.5
1358 |                         }""",
1359 |                         inputSchema={
1360 |                             "type": "object",
1361 |                             "properties": {
1362 |                                 "query": {
1363 |                                     "type": "string",
1364 |                                     "description": "Search query to find relevant memories"
1365 |                                 },
1366 |                                 "n_results": {
1367 |                                     "type": "number",
1368 |                                     "default": 10,
1369 |                                     "description": "Number of results to return (default 10)"
1370 |                                 },
1371 |                                 "quality_weight": {
1372 |                                     "type": "number",
1373 |                                     "default": 0.3,
1374 |                                     "minimum": 0.0,
1375 |                                     "maximum": 1.0,
1376 |                                     "description": "Quality score weight 0.0-1.0 (default 0.3 = 30% quality, 70% semantic)"
1377 |                                 }
1378 |                             },
1379 |                             "required": ["query"]
1380 |                         },
1381 |                         annotations=types.ToolAnnotations(
1382 |                             title="Retrieve with Quality Boost",
1383 |                             readOnlyHint=True,
1384 |                         ),
1385 |                     ),
1386 |                     types.Tool(
1387 |                         name="search_by_tag",
1388 |                         description="""Search memories by tags. Must use array format.
1389 |                         Returns memories matching ANY of the specified tags.
1390 | 
1391 |                         Example:
1392 |                         {
1393 |                             "tags": ["important", "reference"]
1394 |                         }""",
1395 |                         inputSchema={
1396 |                             "type": "object",
1397 |                             "properties": {
1398 |                                 "tags": {
1399 |                                     "oneOf": [
1400 |                                         {
1401 |                                             "type": "array",
1402 |                                             "items": {"type": "string"},
1403 |                                             "description": "Tags as an array of strings"
1404 |                                         },
1405 |                                         {
1406 |                                             "type": "string",
1407 |                                             "description": "Tags as comma-separated string"
1408 |                                         }
1409 |                                     ],
1410 |                                     "description": "List of tags to search for. Returns memories matching ANY of these tags. Accepts either an array of strings or a comma-separated string."
1411 |                                 }
1412 |                             },
1413 |                             "required": ["tags"]
1414 |                         },
1415 |                         annotations=types.ToolAnnotations(
1416 |                             title="Search by Tag",
1417 |                             readOnlyHint=True,
1418 |                         ),
1419 |                     ),
1420 |                     types.Tool(
1421 |                         name="delete_memory",
1422 |                         description="""Delete a specific memory by its hash.
1423 | 
1424 |                         Example:
1425 |                         {
1426 |                             "content_hash": "a1b2c3d4..."
1427 |                         }""",
1428 |                         inputSchema={
1429 |                             "type": "object",
1430 |                             "properties": {
1431 |                                 "content_hash": {
1432 |                                     "type": "string",
1433 |                                     "description": "Hash of the memory content to delete. Obtainable from memory metadata."
1434 |                                 }
1435 |                             },
1436 |                             "required": ["content_hash"]
1437 |                         },
1438 |                         annotations=types.ToolAnnotations(
1439 |                             title="Delete Memory",
1440 |                             destructiveHint=True,
1441 |                         ),
1442 |                     ),
1443 |                     types.Tool(
1444 |                         name="delete_by_tag",
1445 |                         description="""Delete all memories with specific tags.
1446 |                         WARNING: Deletes ALL memories containing any of the specified tags.
1447 | 
1448 |                         Example:
1449 |                         {"tags": ["temporary", "outdated"]}""",
1450 |                         inputSchema={
1451 |                             "type": "object",
1452 |                             "properties": {
1453 |                                 "tags": {
1454 |                                     "oneOf": [
1455 |                                         {
1456 |                                             "type": "array",
1457 |                                             "items": {"type": "string"},
1458 |                                             "description": "Tags as an array of strings"
1459 |                                         },
1460 |                                         {
1461 |                                             "type": "string",
1462 |                                             "description": "Tags as comma-separated string"
1463 |                                         }
1464 |                                     ],
1465 |                                     "description": "Array of tag labels. Memories containing any of these tags will be deleted. Accepts either an array of strings or a comma-separated string."
1466 |                                 }
1467 |                             },
1468 |                             "required": ["tags"]
1469 |                         },
1470 |                         annotations=types.ToolAnnotations(
1471 |                             title="Delete by Tag",
1472 |                             destructiveHint=True,
1473 |                         ),
1474 |                     ),
1475 |                     types.Tool(
1476 |                         name="delete_by_tags",
1477 |                         description="""Delete all memories containing any of the specified tags.
1478 |                         This is the explicit multi-tag version for API clarity.
1479 |                         WARNING: Deletes ALL memories containing any of the specified tags.
1480 | 
1481 |                         Example:
1482 |                         {
1483 |                             "tags": ["temporary", "outdated", "test"]
1484 |                         }""",
1485 |                         inputSchema={
1486 |                             "type": "object",
1487 |                             "properties": {
1488 |                                 "tags": {
1489 |                                     "oneOf": [
1490 |                                         {
1491 |                                             "type": "array",
1492 |                                             "items": {"type": "string"},
1493 |                                             "description": "Tags as an array of strings"
1494 |                                         },
1495 |                                         {
1496 |                                             "type": "string",
1497 |                                             "description": "Tags as comma-separated string"
1498 |                                         }
1499 |                                     ],
1500 |                                     "description": "List of tag labels. Memories containing any of these tags will be deleted. Accepts either an array of strings or a comma-separated string."
1501 |                                 }
1502 |                             },
1503 |                             "required": ["tags"]
1504 |                         },
1505 |                         annotations=types.ToolAnnotations(
1506 |                             title="Delete by Tags",
1507 |                             destructiveHint=True,
1508 |                         ),
1509 |                     ),
1510 |                     types.Tool(
1511 |                         name="delete_by_all_tags",
1512 |                         description="""Delete memories that contain ALL of the specified tags.
1513 |                         WARNING: Only deletes memories that have every one of the specified tags.
1514 | 
1515 |                         Example:
1516 |                         {
1517 |                             "tags": ["important", "urgent"]
1518 |                         }""",
1519 |                         inputSchema={
1520 |                             "type": "object",
1521 |                             "properties": {
1522 |                                 "tags": {
1523 |                                     "oneOf": [
1524 |                                         {
1525 |                                             "type": "array",
1526 |                                             "items": {"type": "string"},
1527 |                                             "description": "Tags as an array of strings"
1528 |                                         },
1529 |                                         {
1530 |                                             "type": "string",
1531 |                                             "description": "Tags as comma-separated string"
1532 |                                         }
1533 |                                     ],
1534 |                                     "description": "List of tag labels. Only memories containing ALL of these tags will be deleted. Accepts either an array of strings or a comma-separated string."
1535 |                                 }
1536 |                             },
1537 |                             "required": ["tags"]
1538 |                         },
1539 |                         annotations=types.ToolAnnotations(
1540 |                             title="Delete by All Tags",
1541 |                             destructiveHint=True,
1542 |                         ),
1543 |                     ),
1544 |                     types.Tool(
1545 |                         name="cleanup_duplicates",
1546 |                         description="Find and remove duplicate entries",
1547 |                         inputSchema={
1548 |                             "type": "object",
1549 |                             "properties": {}
1550 |                         },
1551 |                         annotations=types.ToolAnnotations(
1552 |                             title="Cleanup Duplicates",
1553 |                             destructiveHint=True,
1554 |                         ),
1555 |                     ),
1556 |                     types.Tool(
1557 |                         name="debug_retrieve",
1558 |                         description="""Retrieve memories with debug information.
1559 | 
1560 |                         Example:
1561 |                         {
1562 |                             "query": "debug this",
1563 |                             "n_results": 5,
1564 |                             "similarity_threshold": 0.0
1565 |                         }""",
1566 |                         inputSchema={
1567 |                             "type": "object",
1568 |                             "properties": {
1569 |                                 "query": {
1570 |                                     "type": "string",
1571 |                                     "description": "Search query for debugging retrieval, e.g., a phrase or keyword."
1572 |                                 },
1573 |                                 "n_results": {
1574 |                                     "type": "number",
1575 |                                     "default": 5,
1576 |                                     "description": "Maximum number of results to return."
1577 |                                 },
1578 |                                 "similarity_threshold": {
1579 |                                     "type": "number",
1580 |                                     "default": 0.0,
1581 |                                     "description": "Minimum similarity score threshold for results (0.0 to 1.0)."
1582 |                                 }
1583 |                             },
1584 |                             "required": ["query"]
1585 |                         },
1586 |                         annotations=types.ToolAnnotations(
1587 |                             title="Debug Retrieve",
1588 |                             readOnlyHint=True,
1589 |                         ),
1590 |                     ),
1591 |                     types.Tool(
1592 |                         name="exact_match_retrieve",
1593 |                         description="""Retrieve memories using exact content match.
1594 | 
1595 |                         Example:
1596 |                         {
1597 |                             "content": "find exactly this"
1598 |                         }""",
1599 |                         inputSchema={
1600 |                             "type": "object",
1601 |                             "properties": {
1602 |                                 "content": {
1603 |                                     "type": "string",
1604 |                                     "description": "Exact content string to match against stored memories."
1605 |                                 }
1606 |                             },
1607 |                             "required": ["content"]
1608 |                         },
1609 |                         annotations=types.ToolAnnotations(
1610 |                             title="Exact Match Retrieve",
1611 |                             readOnlyHint=True,
1612 |                         ),
1613 |                     ),
1614 |                     types.Tool(
1615 |                         name="get_raw_embedding",
1616 |                         description="""Get raw embedding vector for debugging purposes.
1617 | 
1618 |                         Example:
1619 |                         {
1620 |                             "content": "text to embed"
1621 |                         }""",
1622 |                         inputSchema={
1623 |                             "type": "object",
1624 |                             "properties": {
1625 |                                 "content": {
1626 |                                     "type": "string",
1627 |                                     "description": "Content to generate embedding for."
1628 |                                 }
1629 |                             },
1630 |                             "required": ["content"]
1631 |                         },
1632 |                         annotations=types.ToolAnnotations(
1633 |                             title="Get Raw Embedding",
1634 |                             readOnlyHint=True,
1635 |                         ),
1636 |                     ),
1637 |                     types.Tool(
1638 |                         name="check_database_health",
1639 |                         description="Check database health and get statistics",
1640 |                         inputSchema={
1641 |                             "type": "object",
1642 |                             "properties": {}
1643 |                         },
1644 |                         annotations=types.ToolAnnotations(
1645 |                             title="Check Database Health",
1646 |                             readOnlyHint=True,
1647 |                         ),
1648 |                     ),
1649 |                     types.Tool(
1650 |                         name="get_cache_stats",
1651 |                         description="""Get MCP server global cache statistics for performance monitoring.
1652 | 
1653 |                         Returns detailed metrics about storage and memory service caching,
1654 |                         including hit rates, initialization times, and cache sizes.
1655 | 
1656 |                         This tool is useful for:
1657 |                         - Monitoring cache effectiveness
1658 |                         - Debugging performance issues
1659 |                         - Verifying cache persistence across MCP tool calls
1660 | 
1661 |                         Returns cache statistics including total calls, hit rate percentage,
1662 |                         storage/service cache metrics, performance metrics, and backend info.""",
1663 |                         inputSchema={
1664 |                             "type": "object",
1665 |                             "properties": {}
1666 |                         },
1667 |                         annotations=types.ToolAnnotations(
1668 |                             title="Get Cache Stats",
1669 |                             readOnlyHint=True,
1670 |                         ),
1671 |                     ),
1672 |                     types.Tool(
1673 |                         name="recall_by_timeframe",
1674 |                         description="""Retrieve memories within a specific timeframe.
1675 | 
1676 |                         Example:
1677 |                         {
1678 |                             "start_date": "2024-01-01",
1679 |                             "end_date": "2024-01-31",
1680 |                             "n_results": 5
1681 |                         }""",
1682 |                         inputSchema={
1683 |                             "type": "object",
1684 |                             "properties": {
1685 |                                 "start_date": {
1686 |                                     "type": "string",
1687 |                                     "format": "date",
1688 |                                     "description": "Start date (inclusive) in YYYY-MM-DD format."
1689 |                                 },
1690 |                                 "end_date": {
1691 |                                     "type": "string",
1692 |                                     "format": "date",
1693 |                                     "description": "End date (inclusive) in YYYY-MM-DD format."
1694 |                                 },
1695 |                                 "n_results": {
1696 |                                     "type": "number",
1697 |                                     "default": 5,
1698 |                                     "description": "Maximum number of results to return."
1699 |                                 }
1700 |                             },
1701 |                             "required": ["start_date"]
1702 |                         },
1703 |                         annotations=types.ToolAnnotations(
1704 |                             title="Recall by Timeframe",
1705 |                             readOnlyHint=True,
1706 |                         ),
1707 |                     ),
1708 |                     types.Tool(
1709 |                         name="delete_by_timeframe",
1710 |                         description="""Delete memories within a specific timeframe.
1711 |                         Optional tag parameter to filter deletions.
1712 | 
1713 |                         Example:
1714 |                         {
1715 |                             "start_date": "2024-01-01",
1716 |                             "end_date": "2024-01-31",
1717 |                             "tag": "temporary"
1718 |                         }""",
1719 |                         inputSchema={
1720 |                             "type": "object",
1721 |                             "properties": {
1722 |                                 "start_date": {
1723 |                                     "type": "string",
1724 |                                     "format": "date",
1725 |                                     "description": "Start date (inclusive) in YYYY-MM-DD format."
1726 |                                 },
1727 |                                 "end_date": {
1728 |                                     "type": "string",
1729 |                                     "format": "date",
1730 |                                     "description": "End date (inclusive) in YYYY-MM-DD format."
1731 |                                 },
1732 |                                 "tag": {
1733 |                                     "type": "string",
1734 |                                     "description": "Optional tag to filter deletions. Only memories with this tag will be deleted."
1735 |                                 }
1736 |                             },
1737 |                             "required": ["start_date"]
1738 |                         },
1739 |                         annotations=types.ToolAnnotations(
1740 |                             title="Delete by Timeframe",
1741 |                             destructiveHint=True,
1742 |                         ),
1743 |                     ),
1744 |                     types.Tool(
1745 |                         name="delete_before_date",
1746 |                         description="""Delete memories before a specific date.
1747 |                         Optional tag parameter to filter deletions.
1748 | 
1749 |                         Example:
1750 |                         {
1751 |                             "before_date": "2024-01-01",
1752 |                             "tag": "temporary"
1753 |                         }""",
1754 |                         inputSchema={
1755 |                             "type": "object",
1756 |                             "properties": {
1757 |                                 "before_date": {"type": "string", "format": "date"},
1758 |                                 "tag": {"type": "string"}
1759 |                             },
1760 |                             "required": ["before_date"]
1761 |                         },
1762 |                         annotations=types.ToolAnnotations(
1763 |                             title="Delete Before Date",
1764 |                             destructiveHint=True,
1765 |                         ),
1766 |                     ),
1767 |                     types.Tool(
1768 |                         name="update_memory_metadata",
1769 |                         description="""Update memory metadata without recreating the entire memory entry.
1770 |                         
1771 |                         This provides efficient metadata updates while preserving the original
1772 |                         memory content, embeddings, and optionally timestamps.
1773 |                         
1774 |                         Examples:
1775 |                         # Add tags to a memory
1776 |                         {
1777 |                             "content_hash": "abc123...",
1778 |                             "updates": {
1779 |                                 "tags": ["important", "reference", "new-tag"]
1780 |                             }
1781 |                         }
1782 |                         
1783 |                         # Update memory type and custom metadata
1784 |                         {
1785 |                             "content_hash": "abc123...",
1786 |                             "updates": {
1787 |                                 "memory_type": "reminder",
1788 |                                 "metadata": {
1789 |                                     "priority": "high",
1790 |                                     "due_date": "2024-01-15"
1791 |                                 }
1792 |                             }
1793 |                         }
1794 |                         
1795 |                         # Update custom fields directly
1796 |                         {
1797 |                             "content_hash": "abc123...",
1798 |                             "updates": {
1799 |                                 "priority": "urgent",
1800 |                                 "status": "active"
1801 |                             }
1802 |                         }""",
1803 |                         inputSchema={
1804 |                             "type": "object",
1805 |                             "properties": {
1806 |                                 "content_hash": {
1807 |                                     "type": "string",
1808 |                                     "description": "The content hash of the memory to update."
1809 |                                 },
1810 |                                 "updates": {
1811 |                                     "type": "object",
1812 |                                     "description": "Dictionary of metadata fields to update.",
1813 |                                     "properties": {
1814 |                                         "tags": {
1815 |                                             "oneOf": [
1816 |                                                 {
1817 |                                                     "type": "array",
1818 |                                                     "items": {"type": "string"},
1819 |                                                     "description": "Tags as an array of strings"
1820 |                                                 },
1821 |                                                 {
1822 |                                                     "type": "string",
1823 |                                                     "description": "Tags as comma-separated string"
1824 |                                                 }
1825 |                                             ],
1826 |                                             "description": "Replace existing tags with this list. Accepts either an array of strings or a comma-separated string."
1827 |                                         },
1828 |                                         "memory_type": {
1829 |                                             "type": "string",
1830 |                                             "description": "Update the memory type (e.g., 'note', 'reminder', 'fact')."
1831 |                                         },
1832 |                                         "metadata": {
1833 |                                             "type": "object",
1834 |                                             "description": "Custom metadata fields to merge with existing metadata."
1835 |                                         }
1836 |                                     }
1837 |                                 },
1838 |                                 "preserve_timestamps": {
1839 |                                     "type": "boolean",
1840 |                                     "default": True,
1841 |                                     "description": "Whether to preserve the original created_at timestamp (default: true)."
1842 |                                 }
1843 |                             },
1844 |                             "required": ["content_hash", "updates"]
1845 |                         },
1846 |                         annotations=types.ToolAnnotations(
1847 |                             title="Update Memory Metadata",
1848 |                             destructiveHint=True,
1849 |                         ),
1850 |                     )
1851 |                 ]
1852 |                 
1853 |                 # Add consolidation tools if enabled
1854 |                 if CONSOLIDATION_ENABLED and self.consolidator:
1855 |                     consolidation_tools = [
1856 |                         types.Tool(
1857 |                             name="consolidate_memories",
1858 |                             description="""Run memory consolidation for a specific time horizon.
1859 |                             
1860 |                             Performs dream-inspired memory consolidation including:
1861 |                             - Exponential decay scoring
1862 |                             - Creative association discovery  
1863 |                             - Semantic clustering and compression
1864 |                             - Controlled forgetting with archival
1865 |                             
1866 |                             Example:
1867 |                             {
1868 |                                 "time_horizon": "weekly"
1869 |                             }""",
1870 |                             inputSchema={
1871 |                                 "type": "object",
1872 |                                 "properties": {
1873 |                                     "time_horizon": {
1874 |                                         "type": "string",
1875 |                                         "enum": ["daily", "weekly", "monthly", "quarterly", "yearly"],
1876 |                                         "description": "Time horizon for consolidation operations."
1877 |                                     }
1878 |                                 },
1879 |                                 "required": ["time_horizon"]
1880 |                             },
1881 |                             annotations=types.ToolAnnotations(
1882 |                                 title="Consolidate Memories",
1883 |                                 destructiveHint=True,
1884 |                             ),
1885 |                         ),
1886 |                         types.Tool(
1887 |                             name="consolidation_status",
1888 |                             description="Get status and health information about the consolidation system.",
1889 |                             inputSchema={"type": "object", "properties": {}},
1890 |                             annotations=types.ToolAnnotations(
1891 |                                 title="Consolidation Status",
1892 |                                 readOnlyHint=True,
1893 |                             ),
1894 |                         ),
1895 |                         types.Tool(
1896 |                             name="consolidation_recommendations",
1897 |                             description="""Get recommendations for consolidation based on current memory state.
1898 | 
1899 |                             Example:
1900 |                             {
1901 |                                 "time_horizon": "monthly"
1902 |                             }""",
1903 |                             inputSchema={
1904 |                                 "type": "object",
1905 |                                 "properties": {
1906 |                                     "time_horizon": {
1907 |                                         "type": "string",
1908 |                                         "enum": ["daily", "weekly", "monthly", "quarterly", "yearly"],
1909 |                                         "description": "Time horizon to analyze for consolidation recommendations."
1910 |                                     }
1911 |                                 },
1912 |                                 "required": ["time_horizon"]
1913 |                             },
1914 |                             annotations=types.ToolAnnotations(
1915 |                                 title="Consolidation Recommendations",
1916 |                                 readOnlyHint=True,
1917 |                             ),
1918 |                         ),
1919 |                         types.Tool(
1920 |                             name="scheduler_status",
1921 |                             description="Get consolidation scheduler status and job information.",
1922 |                             inputSchema={"type": "object", "properties": {}},
1923 |                             annotations=types.ToolAnnotations(
1924 |                                 title="Scheduler Status",
1925 |                                 readOnlyHint=True,
1926 |                             ),
1927 |                         ),
1928 |                         types.Tool(
1929 |                             name="trigger_consolidation",
1930 |                             description="""Manually trigger a consolidation job.
1931 | 
1932 |                             Example:
1933 |                             {
1934 |                                 "time_horizon": "weekly",
1935 |                                 "immediate": true
1936 |                             }""",
1937 |                             inputSchema={
1938 |                                 "type": "object",
1939 |                                 "properties": {
1940 |                                     "time_horizon": {
1941 |                                         "type": "string",
1942 |                                         "enum": ["daily", "weekly", "monthly", "quarterly", "yearly"],
1943 |                                         "description": "Time horizon for the consolidation job."
1944 |                                     },
1945 |                                     "immediate": {
1946 |                                         "type": "boolean",
1947 |                                         "default": True,
1948 |                                         "description": "Whether to run immediately or schedule for later."
1949 |                                     }
1950 |                                 },
1951 |                                 "required": ["time_horizon"]
1952 |                             },
1953 |                             annotations=types.ToolAnnotations(
1954 |                                 title="Trigger Consolidation",
1955 |                                 destructiveHint=True,
1956 |                             ),
1957 |                         ),
1958 |                         types.Tool(
1959 |                             name="pause_consolidation",
1960 |                             description="""Pause consolidation jobs.
1961 | 
1962 |                             Example:
1963 |                             {
1964 |                                 "time_horizon": "weekly"
1965 |                             }""",
1966 |                             inputSchema={
1967 |                                 "type": "object",
1968 |                                 "properties": {
1969 |                                     "time_horizon": {
1970 |                                         "type": "string",
1971 |                                         "enum": ["daily", "weekly", "monthly", "quarterly", "yearly"],
1972 |                                         "description": "Specific time horizon to pause, or omit to pause all jobs."
1973 |                                     }
1974 |                                 }
1975 |                             },
1976 |                             annotations=types.ToolAnnotations(
1977 |                                 title="Pause Consolidation",
1978 |                                 destructiveHint=True,
1979 |                             ),
1980 |                         ),
1981 |                         types.Tool(
1982 |                             name="resume_consolidation",
1983 |                             description="""Resume consolidation jobs.
1984 | 
1985 |                             Example:
1986 |                             {
1987 |                                 "time_horizon": "weekly"
1988 |                             }""",
1989 |                             inputSchema={
1990 |                                 "type": "object",
1991 |                                 "properties": {
1992 |                                     "time_horizon": {
1993 |                                         "type": "string",
1994 |                                         "enum": ["daily", "weekly", "monthly", "quarterly", "yearly"],
1995 |                                         "description": "Specific time horizon to resume, or omit to resume all jobs."
1996 |                                     }
1997 |                                 }
1998 |                             },
1999 |                             annotations=types.ToolAnnotations(
2000 |                                 title="Resume Consolidation",
2001 |                                 destructiveHint=True,
2002 |                             ),
2003 |                         )
2004 |                     ]
2005 |                     tools.extend(consolidation_tools)
2006 |                     logger.info(f"Added {len(consolidation_tools)} consolidation tools")
2007 |                 
2008 |                 # Add document ingestion tools
2009 |                 ingestion_tools = [
2010 |                     types.Tool(
2011 |                         name="ingest_document",
2012 |                         description="""Ingest a single document file into the memory database.
2013 |                         
2014 |                         Supports multiple formats:
2015 |                         - PDF files (.pdf)
2016 |                         - Text files (.txt, .md, .markdown, .rst)
2017 |                         - JSON files (.json)
2018 |                         
2019 |                         The document will be parsed, chunked intelligently, and stored
2020 |                         as multiple memories with appropriate metadata.
2021 |                         
2022 |                         Example:
2023 |                         {
2024 |                             "file_path": "/path/to/document.pdf",
2025 |                             "tags": ["documentation", "manual"],
2026 |                             "chunk_size": 1000
2027 |                         }""",
2028 |                         inputSchema={
2029 |                             "type": "object",
2030 |                             "properties": {
2031 |                                 "file_path": {
2032 |                                     "type": "string",
2033 |                                     "description": "Path to the document file to ingest."
2034 |                                 },
2035 |                                 "tags": {
2036 |                                     "oneOf": [
2037 |                                         {
2038 |                                             "type": "array",
2039 |                                             "items": {"type": "string"},
2040 |                                             "description": "Tags as an array of strings"
2041 |                                         },
2042 |                                         {
2043 |                                             "type": "string",
2044 |                                             "description": "Tags as comma-separated string"
2045 |                                         }
2046 |                                     ],
2047 |                                     "description": "Optional tags to apply to all memories created from this document. Accepts either an array of strings or a comma-separated string.",
2048 |                                     "default": []
2049 |                                 },
2050 |                                 "chunk_size": {
2051 |                                     "type": "number",
2052 |                                     "description": "Target size for text chunks in characters (default: 1000).",
2053 |                                     "default": 1000
2054 |                                 },
2055 |                                 "chunk_overlap": {
2056 |                                     "type": "number",
2057 |                                     "description": "Characters to overlap between chunks (default: 200).",
2058 |                                     "default": 200
2059 |                                 },
2060 |                                 "memory_type": {
2061 |                                     "type": "string",
2062 |                                     "description": "Type label for created memories (default: 'document').",
2063 |                                     "default": "document"
2064 |                                 }
2065 |                             },
2066 |                             "required": ["file_path"]
2067 |                         },
2068 |                         annotations=types.ToolAnnotations(
2069 |                             title="Ingest Document",
2070 |                             destructiveHint=False,
2071 |                         ),
2072 |                     ),
2073 |                     types.Tool(
2074 |                         name="ingest_directory",
2075 |                         description="""Batch ingest all supported documents from a directory.
2076 |                         
2077 |                         Recursively processes all supported file types in the directory,
2078 |                         creating memories with consistent tagging and metadata.
2079 |                         
2080 |                         Supported formats: PDF, TXT, MD, JSON
2081 |                         
2082 |                         Example:
2083 |                         {
2084 |                             "directory_path": "/path/to/documents",
2085 |                             "tags": ["knowledge-base"],
2086 |                             "recursive": true,
2087 |                             "file_extensions": ["pdf", "md", "txt"]
2088 |                         }""",
2089 |                         inputSchema={
2090 |                             "type": "object",
2091 |                             "properties": {
2092 |                                 "directory_path": {
2093 |                                     "type": "string",
2094 |                                     "description": "Path to the directory containing documents to ingest."
2095 |                                 },
2096 |                                 "tags": {
2097 |                                     "oneOf": [
2098 |                                         {
2099 |                                             "type": "array",
2100 |                                             "items": {"type": "string"},
2101 |                                             "description": "Tags as an array of strings"
2102 |                                         },
2103 |                                         {
2104 |                                             "type": "string",
2105 |                                             "description": "Tags as comma-separated string"
2106 |                                         }
2107 |                                     ],
2108 |                                     "description": "Optional tags to apply to all memories created. Accepts either an array of strings or a comma-separated string.",
2109 |                                     "default": []
2110 |                                 },
2111 |                                 "recursive": {
2112 |                                     "type": "boolean",
2113 |                                     "description": "Whether to process subdirectories recursively (default: true).",
2114 |                                     "default": True
2115 |                                 },
2116 |                                 "file_extensions": {
2117 |                                     "type": "array",
2118 |                                     "items": {"type": "string"},
2119 |                                     "description": "File extensions to process (default: all supported).",
2120 |                                     "default": ["pdf", "txt", "md", "json"]
2121 |                                 },
2122 |                                 "chunk_size": {
2123 |                                     "type": "number",
2124 |                                     "description": "Target size for text chunks in characters (default: 1000).",
2125 |                                     "default": 1000
2126 |                                 },
2127 |                                 "max_files": {
2128 |                                     "type": "number",
2129 |                                     "description": "Maximum number of files to process (default: 100).",
2130 |                                     "default": 100
2131 |                                 }
2132 |                             },
2133 |                             "required": ["directory_path"]
2134 |                         },
2135 |                         annotations=types.ToolAnnotations(
2136 |                             title="Ingest Directory",
2137 |                             destructiveHint=False,
2138 |                         ),
2139 |                     )
2140 |                 ]
2141 |                 tools.extend(ingestion_tools)
2142 |                 logger.info(f"Added {len(ingestion_tools)} ingestion tools")
2143 | 
2144 |                 # Quality system tools
2145 |                 quality_tools = [
2146 |                     types.Tool(
2147 |                         name="rate_memory",
2148 |                         description="""Manually rate a memory's quality.
2149 | 
2150 |                         Allows manual quality override with thumbs up/down rating.
2151 |                         User ratings are weighted higher than AI scores in quality calculation.
2152 | 
2153 |                         Example:
2154 |                         {
2155 |                             "content_hash": "abc123def456",
2156 |                             "rating": 1,
2157 |                             "feedback": "Highly relevant information"
2158 |                         }""",
2159 |                         inputSchema={
2160 |                             "type": "object",
2161 |                             "properties": {
2162 |                                 "content_hash": {
2163 |                                     "type": "string",
2164 |                                     "description": "Hash of the memory to rate"
2165 |                                 },
2166 |                                 "rating": {
2167 |                                     "type": "number",
2168 |                                     "description": "Quality rating: -1 (thumbs down), 0 (neutral), 1 (thumbs up)",
2169 |                                     "enum": [-1, 0, 1]
2170 |                                 },
2171 |                                 "feedback": {
2172 |                                     "type": "string",
2173 |                                     "description": "Optional feedback text explaining the rating",
2174 |                                     "default": ""
2175 |                                 }
2176 |                             },
2177 |                             "required": ["content_hash", "rating"]
2178 |                         },
2179 |                         annotations=types.ToolAnnotations(
2180 |                             title="Rate Memory",
2181 |                             destructiveHint=True,
2182 |                         ),
2183 |                     ),
2184 |                     types.Tool(
2185 |                         name="get_memory_quality",
2186 |                         description="""Get quality metrics for a specific memory.
2187 | 
2188 |                         Returns comprehensive quality information including:
2189 |                         - Current quality score (0.0-1.0)
2190 |                         - Quality provider (which tier scored it)
2191 |                         - Access count and last access time
2192 |                         - Historical AI scores
2193 |                         - User rating if present
2194 | 
2195 |                         Example:
2196 |                         {
2197 |                             "content_hash": "abc123def456"
2198 |                         }""",
2199 |                         inputSchema={
2200 |                             "type": "object",
2201 |                             "properties": {
2202 |                                 "content_hash": {
2203 |                                     "type": "string",
2204 |                                     "description": "Hash of the memory to query"
2205 |                                 }
2206 |                             },
2207 |                             "required": ["content_hash"]
2208 |                         },
2209 |                         annotations=types.ToolAnnotations(
2210 |                             title="Get Memory Quality",
2211 |                             readOnlyHint=True,
2212 |                         ),
2213 |                     ),
2214 |                     types.Tool(
2215 |                         name="analyze_quality_distribution",
2216 |                         description="""Analyze quality score distribution across all memories.
2217 | 
2218 |                         Provides system-wide quality analytics including:
2219 |                         - Total memory count
2220 |                         - High/medium/low quality distribution
2221 |                         - Average quality score
2222 |                         - Provider breakdown (local/groq/gemini/implicit)
2223 |                         - Top 10 highest scoring memories
2224 |                         - Bottom 10 lowest scoring memories
2225 | 
2226 |                         Example:
2227 |                         {
2228 |                             "min_quality": 0.0,
2229 |                             "max_quality": 1.0
2230 |                         }""",
2231 |                         inputSchema={
2232 |                             "type": "object",
2233 |                             "properties": {
2234 |                                 "min_quality": {
2235 |                                     "type": "number",
2236 |                                     "description": "Minimum quality threshold (default: 0.0)",
2237 |                                     "default": 0.0
2238 |                                 },
2239 |                                 "max_quality": {
2240 |                                     "type": "number",
2241 |                                     "description": "Maximum quality threshold (default: 1.0)",
2242 |                                     "default": 1.0
2243 |                                 }
2244 |                             }
2245 |                         },
2246 |                         annotations=types.ToolAnnotations(
2247 |                             title="Analyze Quality Distribution",
2248 |                             readOnlyHint=True,
2249 |                         ),
2250 |                     )
2251 |                 ]
2252 |                 tools.extend(quality_tools)
2253 |                 logger.info(f"Added {len(quality_tools)} quality system tools")
2254 | 
2255 |                 # Graph traversal tools
2256 |                 graph_tools = [
2257 |                     types.Tool(
2258 |                         name="find_connected_memories",
2259 |                         description="""Find memories connected to a given memory via associations.
2260 | 
2261 |                         Performs breadth-first traversal of the association graph up to
2262 |                         max_hops distance, returning all connected memories with their
2263 |                         distance from the source.
2264 | 
2265 |                         Example:
2266 |                         {
2267 |                             "hash": "abc123...",
2268 |                             "max_hops": 2
2269 |                         }""",
2270 |                         inputSchema={
2271 |                             "type": "object",
2272 |                             "properties": {
2273 |                                 "hash": {
2274 |                                     "type": "string",
2275 |                                     "description": "Content hash of the starting memory"
2276 |                                 },
2277 |                                 "max_hops": {
2278 |                                     "type": "number",
2279 |                                     "description": "Maximum number of hops to traverse (default: 2)",
2280 |                                     "default": 2
2281 |                                 }
2282 |                             },
2283 |                             "required": ["hash"]
2284 |                         },
2285 |                         annotations=types.ToolAnnotations(
2286 |                             title="Find Connected Memories",
2287 |                             readOnlyHint=True,
2288 |                         ),
2289 |                     ),
2290 |                     types.Tool(
2291 |                         name="find_shortest_path",
2292 |                         description="""Find shortest path between two memories in the association graph.
2293 | 
2294 |                         Uses breadth-first search to find the shortest sequence of associations
2295 |                         connecting two memories. Returns null if no path exists.
2296 | 
2297 |                         Example:
2298 |                         {
2299 |                             "hash1": "abc123...",
2300 |                             "hash2": "def456...",
2301 |                             "max_depth": 5
2302 |                         }""",
2303 |                         inputSchema={
2304 |                             "type": "object",
2305 |                             "properties": {
2306 |                                 "hash1": {
2307 |                                     "type": "string",
2308 |                                     "description": "Starting memory hash"
2309 |                                 },
2310 |                                 "hash2": {
2311 |                                     "type": "string",
2312 |                                     "description": "Target memory hash"
2313 |                                 },
2314 |                                 "max_depth": {
2315 |                                     "type": "number",
2316 |                                     "description": "Maximum path length (default: 5)",
2317 |                                     "default": 5
2318 |                                 }
2319 |                             },
2320 |                             "required": ["hash1", "hash2"]
2321 |                         },
2322 |                         annotations=types.ToolAnnotations(
2323 |                             title="Find Shortest Path",
2324 |                             readOnlyHint=True,
2325 |                         ),
2326 |                     ),
2327 |                     types.Tool(
2328 |                         name="get_memory_subgraph",
2329 |                         description="""Get subgraph around a memory for visualization.
2330 | 
2331 |                         Extracts all nodes and edges within the specified radius for
2332 |                         graph visualization. Returns nodes (memory hashes) and edges
2333 |                         (associations with metadata).
2334 | 
2335 |                         Example:
2336 |                         {
2337 |                             "hash": "abc123...",
2338 |                             "radius": 2
2339 |                         }""",
2340 |                         inputSchema={
2341 |                             "type": "object",
2342 |                             "properties": {
2343 |                                 "hash": {
2344 |                                     "type": "string",
2345 |                                     "description": "Center memory hash"
2346 |                                 },
2347 |                                 "radius": {
2348 |                                     "type": "number",
2349 |                                     "description": "Number of hops to include (default: 2)",
2350 |                                     "default": 2
2351 |                                 }
2352 |                             },
2353 |                             "required": ["hash"]
2354 |                         },
2355 |                         annotations=types.ToolAnnotations(
2356 |                             title="Get Memory Subgraph",
2357 |                             readOnlyHint=True,
2358 |                         ),
2359 |                     )
2360 |                 ]
2361 |                 tools.extend(graph_tools)
2362 |                 logger.info(f"Added {len(graph_tools)} graph traversal tools")
2363 | 
2364 |                 logger.info(f"Returning {len(tools)} tools")
2365 |                 return tools
2366 |             except Exception as e:
2367 |                 logger.error(f"Error in handle_list_tools: {str(e)}")
2368 |                 logger.error(traceback.format_exc())
2369 |                 raise
2370 |         
2371 |         @self.server.call_tool()
2372 |         async def handle_call_tool(name: str, arguments: dict | None) -> List[types.TextContent]:
2373 |             # Add immediate debugging to catch any protocol issues
2374 |             if MCP_CLIENT == 'lm_studio':
2375 |                 print(f"TOOL CALL INTERCEPTED: {name}", file=sys.stdout, flush=True)
2376 |             logger.info(f"=== HANDLING TOOL CALL: {name} ===")
2377 |             logger.info(f"Arguments: {arguments}")
2378 |             
2379 |             try:
2380 |                 if arguments is None:
2381 |                     arguments = {}
2382 |                 
2383 |                 logger.info(f"Processing tool: {name}")
2384 |                 if MCP_CLIENT == 'lm_studio':
2385 |                     print(f"Processing tool: {name}", file=sys.stdout, flush=True)
2386 |                 
2387 |                 if name == "store_memory":
2388 |                     return await self.handle_store_memory(arguments)
2389 |                 elif name == "retrieve_memory":
2390 |                     return await self.handle_retrieve_memory(arguments)
2391 |                 elif name == "retrieve_with_quality_boost":
2392 |                     return await self.handle_retrieve_with_quality_boost(arguments)
2393 |                 elif name == "recall_memory":
2394 |                     return await self.handle_recall_memory(arguments)
2395 |                 elif name == "search_by_tag":
2396 |                     return await self.handle_search_by_tag(arguments)
2397 |                 elif name == "delete_memory":
2398 |                     return await self.handle_delete_memory(arguments)
2399 |                 elif name == "delete_by_tag":
2400 |                     return await self.handle_delete_by_tag(arguments)
2401 |                 elif name == "delete_by_tags":
2402 |                     return await self.handle_delete_by_tags(arguments)
2403 |                 elif name == "delete_by_all_tags":
2404 |                     return await self.handle_delete_by_all_tags(arguments)
2405 |                 elif name == "cleanup_duplicates":
2406 |                     return await self.handle_cleanup_duplicates(arguments)
2407 |                 elif name == "debug_retrieve":
2408 |                     return await self.handle_debug_retrieve(arguments)
2409 |                 elif name == "exact_match_retrieve":
2410 |                     return await self.handle_exact_match_retrieve(arguments)
2411 |                 elif name == "get_raw_embedding":
2412 |                     return await self.handle_get_raw_embedding(arguments)
2413 |                 elif name == "check_database_health":
2414 |                     logger.info("Calling handle_check_database_health")
2415 |                     return await self.handle_check_database_health(arguments)
2416 |                 elif name == "get_cache_stats":
2417 |                     logger.info("Calling handle_get_cache_stats")
2418 |                     return await self.handle_get_cache_stats(arguments)
2419 |                 elif name == "recall_by_timeframe":
2420 |                     return await self.handle_recall_by_timeframe(arguments)
2421 |                 elif name == "delete_by_timeframe":
2422 |                     return await self.handle_delete_by_timeframe(arguments)
2423 |                 elif name == "delete_before_date":
2424 |                     return await self.handle_delete_before_date(arguments)
2425 |                 elif name == "update_memory_metadata":
2426 |                     logger.info("Calling handle_update_memory_metadata")
2427 |                     return await self.handle_update_memory_metadata(arguments)
2428 |                 # Consolidation tool handlers
2429 |                 elif name == "consolidate_memories":
2430 |                     logger.info("Calling handle_consolidate_memories")
2431 |                     return await self.handle_consolidate_memories(arguments)
2432 |                 elif name == "consolidation_status":
2433 |                     logger.info("Calling handle_consolidation_status")
2434 |                     return await self.handle_consolidation_status(arguments)
2435 |                 elif name == "consolidation_recommendations":
2436 |                     logger.info("Calling handle_consolidation_recommendations")
2437 |                     return await self.handle_consolidation_recommendations(arguments)
2438 |                 elif name == "scheduler_status":
2439 |                     logger.info("Calling handle_scheduler_status")
2440 |                     return await self.handle_scheduler_status(arguments)
2441 |                 elif name == "trigger_consolidation":
2442 |                     logger.info("Calling handle_trigger_consolidation")
2443 |                     return await self.handle_trigger_consolidation(arguments)
2444 |                 elif name == "pause_consolidation":
2445 |                     logger.info("Calling handle_pause_consolidation")
2446 |                     return await self.handle_pause_consolidation(arguments)
2447 |                 elif name == "resume_consolidation":
2448 |                     logger.info("Calling handle_resume_consolidation")
2449 |                     return await self.handle_resume_consolidation(arguments)
2450 |                 elif name == "ingest_document":
2451 |                     logger.info("Calling handle_ingest_document")
2452 |                     return await self.handle_ingest_document(arguments)
2453 |                 elif name == "ingest_directory":
2454 |                     logger.info("Calling handle_ingest_directory")
2455 |                     return await self.handle_ingest_directory(arguments)
2456 |                 # Quality system tool handlers
2457 |                 elif name == "rate_memory":
2458 |                     logger.info("Calling handle_rate_memory")
2459 |                     return await self.handle_rate_memory(arguments)
2460 |                 elif name == "get_memory_quality":
2461 |                     logger.info("Calling handle_get_memory_quality")
2462 |                     return await self.handle_get_memory_quality(arguments)
2463 |                 elif name == "analyze_quality_distribution":
2464 |                     logger.info("Calling handle_analyze_quality_distribution")
2465 |                     return await self.handle_analyze_quality_distribution(arguments)
2466 |                 # Graph traversal tool handlers
2467 |                 elif name == "find_connected_memories":
2468 |                     logger.info("Calling handle_find_connected_memories")
2469 |                     return await self.handle_find_connected_memories(arguments)
2470 |                 elif name == "find_shortest_path":
2471 |                     logger.info("Calling handle_find_shortest_path")
2472 |                     return await self.handle_find_shortest_path(arguments)
2473 |                 elif name == "get_memory_subgraph":
2474 |                     logger.info("Calling handle_get_memory_subgraph")
2475 |                     return await self.handle_get_memory_subgraph(arguments)
2476 |                 else:
2477 |                     logger.warning(f"Unknown tool requested: {name}")
2478 |                     raise ValueError(f"Unknown tool: {name}")
2479 |             except Exception as e:
2480 |                 error_msg = f"Error in {name}: {str(e)}\n{traceback.format_exc()}"
2481 |                 logger.error(error_msg)
2482 |                 print(f"ERROR in tool execution: {error_msg}", file=sys.stderr, flush=True)
2483 |                 return [types.TextContent(type="text", text=f"Error: {str(e)}")]
2484 | 
2485 |     async def handle_store_memory(self, arguments: dict) -> List[types.TextContent]:
2486 |         """Store new memory (delegates to handler)."""
2487 |         from .server.handlers import memory as memory_handlers
2488 |         return await memory_handlers.handle_store_memory(self, arguments)
2489 | 
2490 |     async def handle_retrieve_memory(self, arguments: dict) -> List[types.TextContent]:
2491 |         """Retrieve memories (delegates to handler)."""
2492 |         from .server.handlers import memory as memory_handlers
2493 |         return await memory_handlers.handle_retrieve_memory(self, arguments)
2494 | 
2495 |     async def handle_retrieve_with_quality_boost(self, arguments: dict) -> List[types.TextContent]:
2496 |         """Handle quality-boosted memory retrieval with reranking (delegates to handler)."""
2497 |         from .server.handlers import memory as memory_handlers
2498 |         return await memory_handlers.handle_retrieve_with_quality_boost(self, arguments)
2499 | 
2500 |     async def handle_search_by_tag(self, arguments: dict) -> List[types.TextContent]:
2501 |         """Search by tag (delegates to handler)."""
2502 |         from .server.handlers import memory as memory_handlers
2503 |         return await memory_handlers.handle_search_by_tag(self, arguments)
2504 | 
2505 |     async def handle_delete_memory(self, arguments: dict) -> List[types.TextContent]:
2506 |         """Delete memory (delegates to handler)."""
2507 |         from .server.handlers import memory as memory_handlers
2508 |         return await memory_handlers.handle_delete_memory(self, arguments)
2509 | 
2510 |     async def handle_delete_by_tag(self, arguments: dict) -> List[types.TextContent]:
2511 |         """Handler for deleting memories by tags (delegates to handler)."""
2512 |         from .server.handlers import memory as memory_handlers
2513 |         return await memory_handlers.handle_delete_by_tag(self, arguments)
2514 | 
2515 |     async def handle_delete_by_tags(self, arguments: dict) -> List[types.TextContent]:
2516 |         """Handler for explicit multiple tag deletion with progress tracking (delegates to handler)."""
2517 |         from .server.handlers import memory as memory_handlers
2518 |         return await memory_handlers.handle_delete_by_tags(self, arguments)
2519 | 
2520 |     async def handle_delete_by_all_tags(self, arguments: dict) -> List[types.TextContent]:
2521 |         """Handler for deleting memories that contain ALL specified tags (delegates to handler)."""
2522 |         from .server.handlers import memory as memory_handlers
2523 |         return await memory_handlers.handle_delete_by_all_tags(self, arguments)
2524 | 
2525 |     async def handle_cleanup_duplicates(self, arguments: dict) -> List[types.TextContent]:
2526 |         """Cleanup duplicates (delegates to handler)."""
2527 |         from .server.handlers import memory as memory_handlers
2528 |         return await memory_handlers.handle_cleanup_duplicates(self, arguments)
2529 | 
2530 |     async def handle_update_memory_metadata(self, arguments: dict) -> List[types.TextContent]:
2531 |         """Handle memory metadata update requests (delegates to handler)."""
2532 |         from .server.handlers import memory as memory_handlers
2533 |         return await memory_handlers.handle_update_memory_metadata(self, arguments)
2534 | 
2535 |     # Consolidation tool handlers
2536 |     async def handle_consolidate_memories(self, arguments: dict) -> List[types.TextContent]:
2537 |         """Handle memory consolidation requests (delegates to handler)."""
2538 |         from .server.handlers import consolidation as consolidation_handlers
2539 |         return await consolidation_handlers.handle_consolidate_memories(self, arguments)
2540 | 
2541 |     async def handle_consolidation_status(self, arguments: dict) -> List[types.TextContent]:
2542 |         """Handle consolidation status requests (delegates to handler)."""
2543 |         from .server.handlers import consolidation as consolidation_handlers
2544 |         return await consolidation_handlers.handle_consolidation_status(self, arguments)
2545 | 
2546 |     async def handle_consolidation_recommendations(self, arguments: dict) -> List[types.TextContent]:
2547 |         """Handle consolidation recommendation requests (delegates to handler)."""
2548 |         from .server.handlers import consolidation as consolidation_handlers
2549 |         return await consolidation_handlers.handle_consolidation_recommendations(self, arguments)
2550 | 
2551 |     async def handle_scheduler_status(self, arguments: dict) -> List[types.TextContent]:
2552 |         """Handle scheduler status requests (delegates to handler)."""
2553 |         from .server.handlers import consolidation as consolidation_handlers
2554 |         return await consolidation_handlers.handle_scheduler_status(self, arguments)
2555 | 
2556 |     async def handle_trigger_consolidation(self, arguments: dict) -> List[types.TextContent]:
2557 |         """Handle manual consolidation trigger requests (delegates to handler)."""
2558 |         from .server.handlers import consolidation as consolidation_handlers
2559 |         return await consolidation_handlers.handle_trigger_consolidation(self, arguments)
2560 | 
2561 |     async def handle_pause_consolidation(self, arguments: dict) -> List[types.TextContent]:
2562 |         """Handle consolidation pause requests (delegates to handler)."""
2563 |         from .server.handlers import consolidation as consolidation_handlers
2564 |         return await consolidation_handlers.handle_pause_consolidation(self, arguments)
2565 | 
2566 |     async def handle_resume_consolidation(self, arguments: dict) -> List[types.TextContent]:
2567 |         """Handle consolidation resume requests (delegates to handler)."""
2568 |         from .server.handlers import consolidation as consolidation_handlers
2569 |         return await consolidation_handlers.handle_resume_consolidation(self, arguments)
2570 | 
2571 |     async def handle_debug_retrieve(self, arguments: dict) -> List[types.TextContent]:
2572 |         """Debug retrieve (delegates to handler)."""
2573 |         from .server.handlers import memory as memory_handlers
2574 |         return await memory_handlers.handle_debug_retrieve(self, arguments)
2575 | 
2576 |     async def handle_exact_match_retrieve(self, arguments: dict) -> List[types.TextContent]:
2577 |         """Exact match retrieve (delegates to handler)."""
2578 |         from .server.handlers import memory as memory_handlers
2579 |         return await memory_handlers.handle_exact_match_retrieve(self, arguments)
2580 | 
2581 |     async def handle_get_raw_embedding(self, arguments: dict) -> List[types.TextContent]:
2582 |         """Get raw embedding (delegates to handler)."""
2583 |         from .server.handlers import memory as memory_handlers
2584 |         return await memory_handlers.handle_get_raw_embedding(self, arguments)
2585 | 
2586 |     async def handle_recall_memory(self, arguments: dict) -> List[types.TextContent]:
2587 |         """Handle memory recall requests with natural language time expressions (delegates to handler)."""
2588 |         from .server.handlers import memory as memory_handlers
2589 |         return await memory_handlers.handle_recall_memory(self, arguments)
2590 | 
2591 |     async def handle_check_database_health(self, arguments: dict) -> List[types.TextContent]:
2592 |         """Handle database health check requests (delegates to handler)."""
2593 |         from .server.handlers import utility as utility_handlers
2594 |         return await utility_handlers.handle_check_database_health(self, arguments)
2595 | 
2596 |     async def handle_get_cache_stats(self, arguments: dict) -> List[types.TextContent]:
2597 |         """Get MCP server global cache statistics (delegates to handler)."""
2598 |         from .server.handlers import utility as utility_handlers
2599 |         return await utility_handlers.handle_get_cache_stats(self, arguments)
2600 | 
2601 |     async def handle_recall_by_timeframe(self, arguments: dict) -> List[types.TextContent]:
2602 |         """Handle recall by timeframe requests (delegates to handler)."""
2603 |         from .server.handlers import memory as memory_handlers
2604 |         return await memory_handlers.handle_recall_by_timeframe(self, arguments)
2605 | 
2606 |     async def handle_delete_by_timeframe(self, arguments: dict) -> List[types.TextContent]:
2607 |         """Handle delete by timeframe requests (delegates to handler)."""
2608 |         from .server.handlers import memory as memory_handlers
2609 |         return await memory_handlers.handle_delete_by_timeframe(self, arguments)
2610 | 
2611 |     async def handle_delete_before_date(self, arguments: dict) -> List[types.TextContent]:
2612 |         """Handle delete before date requests (delegates to handler)."""
2613 |         from .server.handlers import memory as memory_handlers
2614 |         return await memory_handlers.handle_delete_before_date(self, arguments)
2615 | 
2616 |     async def handle_ingest_document(self, arguments: dict) -> List[types.TextContent]:
2617 |         """Handle document ingestion requests (delegates to handler)."""
2618 |         from .server.handlers import documents as document_handlers
2619 |         return await document_handlers.handle_ingest_document(self, arguments)
2620 | 
2621 |     async def handle_ingest_directory(self, arguments: dict) -> List[types.TextContent]:
2622 |         """Handle directory ingestion requests (delegates to handler)."""
2623 |         from .server.handlers import documents as document_handlers
2624 |         return await document_handlers.handle_ingest_directory(self, arguments)
2625 | 
2626 |     async def handle_rate_memory(self, arguments: dict) -> List[types.TextContent]:
2627 |         """Handle memory quality rating (delegates to handler)."""
2628 |         from .server.handlers import quality as quality_handlers
2629 |         return await quality_handlers.handle_rate_memory(self, arguments)
2630 | 
2631 |     async def handle_get_memory_quality(self, arguments: dict) -> List[types.TextContent]:
2632 |         """Get memory quality metrics (delegates to handler)."""
2633 |         from .server.handlers import quality as quality_handlers
2634 |         return await quality_handlers.handle_get_memory_quality(self, arguments)
2635 | 
2636 |     async def handle_analyze_quality_distribution(self, arguments: dict) -> List[types.TextContent]:
2637 |         """Analyze quality distribution (delegates to handler)."""
2638 |         from .server.handlers import quality as quality_handlers
2639 |         return await quality_handlers.handle_analyze_quality_distribution(self, arguments)
2640 | 
2641 |     async def handle_find_connected_memories(self, arguments: dict) -> List[types.TextContent]:
2642 |         """Find connected memories (delegates to handler)."""
2643 |         from .server.handlers import graph as graph_handlers
2644 |         return await graph_handlers.handle_find_connected_memories(self, arguments)
2645 | 
2646 |     async def handle_find_shortest_path(self, arguments: dict) -> List[types.TextContent]:
2647 |         """Find shortest path between memories (delegates to handler)."""
2648 |         from .server.handlers import graph as graph_handlers
2649 |         return await graph_handlers.handle_find_shortest_path(self, arguments)
2650 | 
2651 |     async def handle_get_memory_subgraph(self, arguments: dict) -> List[types.TextContent]:
2652 |         """Get memory subgraph for visualization (delegates to handler)."""
2653 |         from .server.handlers import graph as graph_handlers
2654 |         return await graph_handlers.handle_get_memory_subgraph(self, arguments)
2655 | 
2656 |     # ============================================================
2657 |     # Test Compatibility Wrapper Methods
2658 |     # ============================================================
2659 |     # These methods provide a simplified API for testing,
2660 |     # wrapping the underlying MemoryService and Storage calls.
2661 | 
2662 |     async def store_memory(
2663 |         self,
2664 |         content: str,
2665 |         metadata: Optional[Dict[str, Any]] = None
2666 |     ) -> Dict[str, Any]:
2667 |         """
2668 |         Store a new memory (test-compatible wrapper).
2669 | 
2670 |         Args:
2671 |             content: The memory content to store
2672 |             metadata: Optional metadata dict with tags, type, etc.
2673 | 
2674 |         Returns:
2675 |             Dictionary with operation result including success, memory/memories, and hash
2676 |         """
2677 |         await self._ensure_storage_initialized()
2678 | 
2679 |         # Extract metadata fields
2680 |         metadata = metadata or {}
2681 |         tags = metadata.get("tags", [])
2682 |         memory_type = metadata.get("type", "note")
2683 | 
2684 |         # Call MemoryService
2685 |         result = await self.memory_service.store_memory(
2686 |             content=content,
2687 |             tags=tags,
2688 |             memory_type=memory_type,
2689 |             metadata=metadata
2690 |         )
2691 | 
2692 |         # Add a 'hash' field for test compatibility
2693 |         if result.get("success"):
2694 |             if "memory" in result:
2695 |                 # Single memory - add hash shortcut
2696 |                 result["hash"] = result["memory"]["content_hash"]
2697 |             elif "memories" in result and len(result["memories"]) > 0:
2698 |                 # Chunked - use first chunk's hash
2699 |                 result["hash"] = result["memories"][0]["content_hash"]
2700 | 
2701 |         return result
2702 | 
2703 |     async def retrieve_memory(
2704 |         self,
2705 |         query: str,
2706 |         n_results: int = 5
2707 |     ) -> List[str]:
2708 |         """
2709 |         Retrieve memories using semantic search (test-compatible wrapper).
2710 | 
2711 |         Args:
2712 |             query: Search query
2713 |             n_results: Number of results to return
2714 | 
2715 |         Returns:
2716 |             List of memory content strings
2717 |         """
2718 |         await self._ensure_storage_initialized()
2719 | 
2720 |         result = await self.memory_service.retrieve_memories(
2721 |             query=query,
2722 |             n_results=n_results
2723 |         )
2724 | 
2725 |         # Extract just the content from each memory for test compatibility
2726 |         memories = result.get("memories", [])
2727 |         return [m["content"] for m in memories]
2728 | 
2729 |     async def search_by_tag(
2730 |         self,
2731 |         tags: List[str]
2732 |     ) -> List[str]:
2733 |         """
2734 |         Search memories by tags (test-compatible wrapper).
2735 | 
2736 |         Args:
2737 |             tags: List of tags to search for
2738 | 
2739 |         Returns:
2740 |             List of memory content strings
2741 |         """
2742 |         await self._ensure_storage_initialized()
2743 | 
2744 |         # Call storage directly (search_by_tags is not in MemoryService)
2745 |         memories = await self.storage.search_by_tags(
2746 |             tags=tags,
2747 |             operation="OR"  # Match ANY tag (more permissive for tests)
2748 |         )
2749 | 
2750 |         return [m.content for m in memories]
2751 | 
2752 |     async def delete_memory(
2753 |         self,
2754 |         content_hash: str
2755 |     ) -> Dict[str, Any]:
2756 |         """
2757 |         Delete a memory by its content hash (test-compatible wrapper).
2758 | 
2759 |         Args:
2760 |             content_hash: The content hash of the memory to delete
2761 | 
2762 |         Returns:
2763 |             Dictionary with success status
2764 |         """
2765 |         await self._ensure_storage_initialized()
2766 | 
2767 |         result = await self.memory_service.delete_memory(content_hash=content_hash)
2768 |         return result
2769 | 
2770 |     async def check_database_health(self) -> Dict[str, Any]:
2771 |         """
2772 |         Check database health and get statistics (test-compatible wrapper).
2773 | 
2774 |         Returns:
2775 |             Dictionary with health status and statistics
2776 |         """
2777 |         await self._ensure_storage_initialized()
2778 | 
2779 |         # Get stats from storage
2780 |         stats = await self.storage.get_stats()
2781 | 
2782 |         return {
2783 |             "status": "healthy",
2784 |             "memory_count": stats.get("total_memories", 0),
2785 |             "database_size": stats.get("database_size_bytes", 0),
2786 |             "storage_type": stats.get("storage_backend", "unknown"),
2787 |             **stats  # Include all other stats
2788 |         }
2789 | 
2790 |     async def create_backup(self, description: str = None) -> Dict[str, Any]:
2791 |         """
2792 |         Create a database backup (test-compatible wrapper).
2793 | 
2794 |         Args:
2795 |             description: Optional description for the backup
2796 | 
2797 |         Returns:
2798 |             Dictionary with success status and backup path
2799 |         """
2800 |         await self._ensure_storage_initialized()
2801 | 
2802 |         # Use backup scheduler if available
2803 |         if hasattr(self, 'backup_scheduler') and self.backup_scheduler:
2804 |             result = await self.backup_scheduler.create_backup(description)
2805 |             # Normalize response for test compatibility
2806 |             if result.get('success'):
2807 |                 return {
2808 |                     "success": True,
2809 |                     "backup_path": result.get('path')
2810 |                 }
2811 |             return result
2812 | 
2813 |         # Fallback: Create backup directly if no scheduler
2814 |         from pathlib import Path
2815 |         import sqlite3
2816 |         import asyncio
2817 |         from datetime import datetime, timezone
2818 |         import tempfile
2819 | 
2820 |         try:
2821 |             # Get database path from storage
2822 |             db_path = None
2823 |             if hasattr(self.storage, 'db_path'):
2824 |                 db_path = self.storage.db_path
2825 |             elif hasattr(self.storage, 'sqlite_storage') and hasattr(self.storage.sqlite_storage, 'db_path'):
2826 |                 db_path = self.storage.sqlite_storage.db_path
2827 | 
2828 |             # Handle in-memory databases (for tests)
2829 |             if not db_path or db_path == ':memory:':
2830 |                 # Create temp backup for in-memory databases
2831 |                 timestamp = datetime.now(timezone.utc).strftime('%Y%m%d_%H%M%S')
2832 |                 backup_filename = f"memory_backup_{timestamp}.db"
2833 |                 temp_dir = Path(tempfile.gettempdir()) / "mcp_test_backups"
2834 |                 temp_dir.mkdir(exist_ok=True)
2835 |                 backup_path = temp_dir / backup_filename
2836 | 
2837 |                 # For in-memory, we can't really backup, so just create empty file
2838 |                 backup_path.touch()
2839 | 
2840 |                 return {
2841 |                     "success": True,
2842 |                     "backup_path": str(backup_path)
2843 |                 }
2844 | 
2845 |             if not Path(db_path).exists():
2846 |                 return {
2847 |                     "success": False,
2848 |                     "error": f"Database file not found: {db_path}"
2849 |                 }
2850 | 
2851 |             # Create backups directory
2852 |             backups_dir = Path(db_path).parent / "backups"
2853 |             backups_dir.mkdir(exist_ok=True)
2854 | 
2855 |             # Generate backup filename
2856 |             timestamp = datetime.now(timezone.utc).strftime('%Y%m%d_%H%M%S')
2857 |             backup_filename = f"memory_backup_{timestamp}.db"
2858 |             backup_path = backups_dir / backup_filename
2859 | 
2860 |             # Create backup using SQLite's native backup API
2861 |             def _do_backup():
2862 |                 source = sqlite3.connect(str(db_path))
2863 |                 dest = sqlite3.connect(str(backup_path))
2864 |                 try:
2865 |                     source.backup(dest)
2866 |                 finally:
2867 |                     source.close()
2868 |                     dest.close()
2869 | 
2870 |             await asyncio.to_thread(_do_backup)
2871 | 
2872 |             return {
2873 |                 "success": True,
2874 |                 "backup_path": str(backup_path)
2875 |             }
2876 | 
2877 |         except Exception as e:
2878 |             return {
2879 |                 "success": False,
2880 |                 "error": str(e)
2881 |             }
2882 | 
2883 |     async def optimize_db(self) -> Dict[str, Any]:
2884 |         """
2885 |         Optimize database by running VACUUM and rebuilding indexes (test-compatible wrapper).
2886 | 
2887 |         Returns:
2888 |             Dictionary with success status and optimized size
2889 |         """
2890 |         await self._ensure_storage_initialized()
2891 | 
2892 |         try:
2893 |             # Get database path
2894 |             db_path = None
2895 |             if hasattr(self.storage, 'db_path'):
2896 |                 db_path = self.storage.db_path
2897 |             elif hasattr(self.storage, 'sqlite_storage') and hasattr(self.storage.sqlite_storage, 'db_path'):
2898 |                 db_path = self.storage.sqlite_storage.db_path
2899 | 
2900 |             # Handle in-memory databases (for tests)
2901 |             if not db_path or db_path == ':memory:':
2902 |                 return {
2903 |                     "success": True,
2904 |                     "optimized_size": 0,
2905 |                     "size_before": 0,
2906 |                     "size_saved": 0
2907 |                 }
2908 | 
2909 |             from pathlib import Path
2910 |             import sqlite3
2911 |             import asyncio
2912 | 
2913 |             if not Path(db_path).exists():
2914 |                 return {
2915 |                     "success": False,
2916 |                     "error": f"Database file not found: {db_path}"
2917 |                 }
2918 | 
2919 |             # Get size before optimization
2920 |             size_before = Path(db_path).stat().st_size
2921 | 
2922 |             # Run VACUUM to optimize database
2923 |             def _do_optimize():
2924 |                 conn = sqlite3.connect(str(db_path))
2925 |                 try:
2926 |                     conn.execute("VACUUM")
2927 |                     conn.execute("ANALYZE")
2928 |                     conn.commit()
2929 |                 finally:
2930 |                     conn.close()
2931 | 
2932 |             await asyncio.to_thread(_do_optimize)
2933 | 
2934 |             # Get size after optimization
2935 |             size_after = Path(db_path).stat().st_size
2936 | 
2937 |             return {
2938 |                 "success": True,
2939 |                 "optimized_size": size_after,
2940 |                 "size_before": size_before,
2941 |                 "size_saved": size_before - size_after
2942 |             }
2943 | 
2944 |         except Exception as e:
2945 |             return {
2946 |                 "success": False,
2947 |                 "error": str(e)
2948 |             }
2949 | 
2950 |     async def cleanup_duplicates(self) -> Dict[str, Any]:
2951 |         """
2952 |         Remove duplicate memories (test-compatible wrapper).
2953 | 
2954 |         Returns:
2955 |             Dictionary with success status and duplicates removed count
2956 |         """
2957 |         await self._ensure_storage_initialized()
2958 | 
2959 |         try:
2960 |             # Call storage's cleanup_duplicates method
2961 |             count_removed, message = await self.storage.cleanup_duplicates()
2962 | 
2963 |             return {
2964 |                 "success": True,
2965 |                 "duplicates_removed": count_removed,
2966 |                 "message": message
2967 |             }
2968 | 
2969 |         except Exception as e:
2970 |             return {
2971 |                 "success": False,
2972 |                 "duplicates_removed": 0,
2973 |                 "error": str(e)
2974 |             }
2975 | 
2976 |     async def exact_match_retrieve(self, content: str) -> List[str]:
2977 |         """
2978 |         Retrieve memories using exact content match (test-compatible wrapper).
2979 | 
2980 |         Args:
2981 |             content: Exact content to match
2982 | 
2983 |         Returns:
2984 |             List of memory content strings that exactly match
2985 |         """
2986 |         await self._ensure_storage_initialized()
2987 | 
2988 |         try:
2989 |             # Use semantic search with the exact content as query
2990 |             # This will find the most similar items (which should include exact matches)
2991 |             results = await self.storage.retrieve(content, n_results=50)
2992 | 
2993 |             # Filter for exact matches only
2994 |             exact_matches = []
2995 |             for result in results:
2996 |                 if result.memory.content == content:
2997 |                     exact_matches.append(result.memory.content)
2998 | 
2999 |             return exact_matches
3000 |         except Exception as e:
3001 |             # Return empty list on error
3002 |             return []
3003 | 
3004 |     async def debug_retrieve(
3005 |         self,
3006 |         query: str,
3007 |         n_results: int = 5,
3008 |         similarity_threshold: float = 0.0
3009 |     ) -> List[str]:
3010 |         """
3011 |         Retrieve memories with debug information (test-compatible wrapper).
3012 | 
3013 |         Args:
3014 |             query: Search query
3015 |             n_results: Number of results to return
3016 |             similarity_threshold: Minimum similarity threshold
3017 | 
3018 |         Returns:
3019 |             List of memory content strings
3020 |         """
3021 |         await self._ensure_storage_initialized()
3022 | 
3023 |         try:
3024 |             from .utils.debug import debug_retrieve_memory
3025 |             results = await debug_retrieve_memory(
3026 |                 self.storage,
3027 |                 query=query,
3028 |                 n_results=n_results,
3029 |                 similarity_threshold=similarity_threshold
3030 |             )
3031 |             return [result.memory.content for result in results]
3032 |         except Exception as e:
3033 |             # Return empty list on error
3034 |             return []
3035 | 
3036 |     async def shutdown(self) -> None:
3037 |         """
3038 |         Shutdown the server and cleanup resources.
3039 | 
3040 |         This method properly cleans up all caches and resources to free memory.
3041 |         Called during graceful shutdown (SIGTERM/SIGINT) or process exit.
3042 | 
3043 |         Cleanup includes:
3044 |         - Service and storage caches (cache_manager)
3045 |         - Embedding model caches (sqlite_vec)
3046 |         - Garbage collection to reclaim memory
3047 |         """
3048 |         import gc
3049 |         from .server.cache_manager import clear_all_caches
3050 |         from .storage.sqlite_vec import clear_model_caches
3051 | 
3052 |         logger.info("Initiating graceful shutdown...")
3053 | 
3054 |         try:
3055 |             # Clear service and storage caches
3056 |             cache_stats = clear_all_caches()
3057 |             logger.info(f"Cleared service caches: {cache_stats}")
3058 | 
3059 |             # Clear model caches (embedding models)
3060 |             model_stats = clear_model_caches()
3061 |             logger.info(f"Cleared model caches: {model_stats}")
3062 | 
3063 |             # Force garbage collection to reclaim memory
3064 |             gc_collected = gc.collect()
3065 |             logger.info(f"Garbage collection: {gc_collected} objects collected")
3066 | 
3067 |             logger.info("Graceful shutdown complete")
3068 |         except Exception as e:
3069 |             logger.warning(f"Error during shutdown cleanup: {e}")
3070 | 
3071 | 
3072 | def _print_system_diagnostics(system_info: Any) -> None:
3073 |     """Print system diagnostics for LM Studio."""
3074 |     print("\n=== MCP Memory Service System Diagnostics ===", file=sys.stdout, flush=True)
3075 |     print(f"OS: {system_info.os_name} {system_info.architecture}", file=sys.stdout, flush=True)
3076 |     print(f"Python: {platform.python_version()}", file=sys.stdout, flush=True)
3077 |     print(f"Hardware Acceleration: {system_info.accelerator}", file=sys.stdout, flush=True)
3078 |     print(f"Memory: {system_info.memory_gb:.2f} GB", file=sys.stdout, flush=True)
3079 |     print(f"Optimal Model: {system_info.get_optimal_model()}", file=sys.stdout, flush=True)
3080 |     print(f"Optimal Batch Size: {system_info.get_optimal_batch_size()}", file=sys.stdout, flush=True)
3081 |     print(f"Storage Backend: {STORAGE_BACKEND}", file=sys.stdout, flush=True)
3082 |     print("================================================\n", file=sys.stdout, flush=True)
3083 | 
3084 | 
3085 | async def async_main():
3086 |     """Main async entry point for MCP Memory Service."""
3087 |     from .utils.startup_orchestrator import (
3088 |         StartupCheckOrchestrator,
3089 |         InitializationRetryManager,
3090 |         ServerRunManager
3091 |     )
3092 | 
3093 |     # Run all startup checks
3094 |     StartupCheckOrchestrator.run_all_checks()
3095 | 
3096 |     # Print system diagnostics only for LM Studio
3097 |     system_info = get_system_info()
3098 |     if MCP_CLIENT == 'lm_studio':
3099 |         _print_system_diagnostics(system_info)
3100 | 
3101 |     logger.info(f"Starting MCP Memory Service with storage backend: {STORAGE_BACKEND}")
3102 | 
3103 |     try:
3104 |         # Create server instance
3105 |         memory_server = MemoryServer()
3106 | 
3107 |         # Initialize with retry logic
3108 |         retry_manager = InitializationRetryManager(max_retries=2, timeout=30.0, retry_delay=2.0)
3109 |         await retry_manager.initialize_with_retry(memory_server)
3110 | 
3111 |         # Run server based on mode
3112 |         run_manager = ServerRunManager(memory_server, system_info)
3113 | 
3114 |         if ServerRunManager.is_standalone_mode():
3115 |             await run_manager.run_standalone()
3116 |         else:
3117 |             await run_manager.run_stdio()
3118 | 
3119 |     except Exception as e:
3120 |         logger.error(f"Server error: {str(e)}")
3121 |         logger.error(traceback.format_exc())
3122 |         print(f"Fatal server error: {str(e)}", file=sys.stderr, flush=True)
3123 |         raise
3124 | 
3125 | def _cleanup_on_shutdown():
3126 |     """
3127 |     Cleanup function called on process shutdown (SIGTERM, SIGINT, KeyboardInterrupt).
3128 | 
3129 |     This function clears all caches to free memory and runs garbage collection.
3130 |     It's designed to be called from signal handlers (synchronous context).
3131 |     """
3132 |     import gc
3133 |     from .server.cache_manager import clear_all_caches
3134 |     from .storage.sqlite_vec import clear_model_caches
3135 | 
3136 |     try:
3137 |         logger.info("Running shutdown cleanup...")
3138 | 
3139 |         # Clear service and storage caches
3140 |         cache_stats = clear_all_caches()
3141 |         logger.info(f"Cleared service caches: {cache_stats}")
3142 | 
3143 |         # Clear model caches (embedding models)
3144 |         model_stats = clear_model_caches()
3145 |         logger.info(f"Cleared model caches: {model_stats}")
3146 | 
3147 |         # Force garbage collection
3148 |         gc_collected = gc.collect()
3149 |         logger.info(f"Garbage collection: {gc_collected} objects collected")
3150 | 
3151 |         logger.info("Shutdown cleanup complete")
3152 |     except Exception as e:
3153 |         logger.warning(f"Error during shutdown cleanup: {e}")
3154 | 
3155 | 
3156 | def main():
3157 |     import signal
3158 |     import atexit
3159 | 
3160 |     # Register cleanup function for normal exit
3161 |     atexit.register(_cleanup_on_shutdown)
3162 | 
3163 |     # Set up signal handlers for graceful shutdown
3164 |     def signal_handler(signum, frame):
3165 |         logger.info(f"Received signal {signum}, shutting down gracefully...")
3166 |         _cleanup_on_shutdown()
3167 |         sys.exit(0)
3168 | 
3169 |     signal.signal(signal.SIGTERM, signal_handler)
3170 |     signal.signal(signal.SIGINT, signal_handler)
3171 | 
3172 |     try:
3173 |         # Check if running in Docker
3174 |         if os.path.exists('/.dockerenv') or os.environ.get('DOCKER_CONTAINER', False):
3175 |             logger.info("Running in Docker container")
3176 |             if MCP_CLIENT == 'lm_studio':
3177 |                 print("MCP Memory Service starting in Docker mode", file=sys.stdout, flush=True)
3178 | 
3179 |         asyncio.run(async_main())
3180 |     except KeyboardInterrupt:
3181 |         logger.info("Shutting down gracefully (KeyboardInterrupt)...")
3182 |         _cleanup_on_shutdown()
3183 |     except Exception as e:
3184 |         logger.error(f"Fatal error: {str(e)}\n{traceback.format_exc()}")
3185 |         sys.exit(1)
3186 | 
3187 | if __name__ == "__main__":
3188 |     main()
3189 | 
```
Page 60/62FirstPrevNextLast