#
tokens: 54019/50000 1/625 files (page 46/47)
lines: on (toggle) GitHub
raw markdown copy reset
This is page 46 of 47. Use http://codebase.md/doobidoo/mcp-memory-service?lines=true&page={x} to view the full context.

# Directory Structure

```
├── .claude
│   ├── agents
│   │   ├── amp-bridge.md
│   │   ├── amp-pr-automator.md
│   │   ├── code-quality-guard.md
│   │   ├── gemini-pr-automator.md
│   │   └── github-release-manager.md
│   ├── settings.local.json.backup
│   └── settings.local.json.local
├── .commit-message
├── .dockerignore
├── .env.example
├── .env.sqlite.backup
├── .envnn#
├── .gitattributes
├── .github
│   ├── FUNDING.yml
│   ├── ISSUE_TEMPLATE
│   │   ├── bug_report.yml
│   │   ├── config.yml
│   │   ├── feature_request.yml
│   │   └── performance_issue.yml
│   ├── pull_request_template.md
│   └── workflows
│       ├── bridge-tests.yml
│       ├── CACHE_FIX.md
│       ├── claude-code-review.yml
│       ├── claude.yml
│       ├── cleanup-images.yml.disabled
│       ├── dev-setup-validation.yml
│       ├── docker-publish.yml
│       ├── LATEST_FIXES.md
│       ├── main-optimized.yml.disabled
│       ├── main.yml
│       ├── publish-and-test.yml
│       ├── README_OPTIMIZATION.md
│       ├── release-tag.yml.disabled
│       ├── release.yml
│       ├── roadmap-review-reminder.yml
│       ├── SECRET_CONDITIONAL_FIX.md
│       └── WORKFLOW_FIXES.md
├── .gitignore
├── .mcp.json.backup
├── .mcp.json.template
├── .pyscn
│   ├── .gitignore
│   └── reports
│       └── analyze_20251123_214224.html
├── AGENTS.md
├── archive
│   ├── deployment
│   │   ├── deploy_fastmcp_fixed.sh
│   │   ├── deploy_http_with_mcp.sh
│   │   └── deploy_mcp_v4.sh
│   ├── deployment-configs
│   │   ├── empty_config.yml
│   │   └── smithery.yaml
│   ├── development
│   │   └── test_fastmcp.py
│   ├── docs-removed-2025-08-23
│   │   ├── authentication.md
│   │   ├── claude_integration.md
│   │   ├── claude-code-compatibility.md
│   │   ├── claude-code-integration.md
│   │   ├── claude-code-quickstart.md
│   │   ├── claude-desktop-setup.md
│   │   ├── complete-setup-guide.md
│   │   ├── database-synchronization.md
│   │   ├── development
│   │   │   ├── autonomous-memory-consolidation.md
│   │   │   ├── CLEANUP_PLAN.md
│   │   │   ├── CLEANUP_README.md
│   │   │   ├── CLEANUP_SUMMARY.md
│   │   │   ├── dream-inspired-memory-consolidation.md
│   │   │   ├── hybrid-slm-memory-consolidation.md
│   │   │   ├── mcp-milestone.md
│   │   │   ├── multi-client-architecture.md
│   │   │   ├── test-results.md
│   │   │   └── TIMESTAMP_FIX_SUMMARY.md
│   │   ├── distributed-sync.md
│   │   ├── invocation_guide.md
│   │   ├── macos-intel.md
│   │   ├── master-guide.md
│   │   ├── mcp-client-configuration.md
│   │   ├── multi-client-server.md
│   │   ├── service-installation.md
│   │   ├── sessions
│   │   │   └── MCP_ENHANCEMENT_SESSION_MEMORY_v4.1.0.md
│   │   ├── UBUNTU_SETUP.md
│   │   ├── ubuntu.md
│   │   ├── windows-setup.md
│   │   └── windows.md
│   ├── docs-root-cleanup-2025-08-23
│   │   ├── AWESOME_LIST_SUBMISSION.md
│   │   ├── CLOUDFLARE_IMPLEMENTATION.md
│   │   ├── DOCUMENTATION_ANALYSIS.md
│   │   ├── DOCUMENTATION_CLEANUP_PLAN.md
│   │   ├── DOCUMENTATION_CONSOLIDATION_COMPLETE.md
│   │   ├── LITESTREAM_SETUP_GUIDE.md
│   │   ├── lm_studio_system_prompt.md
│   │   ├── PYTORCH_DOWNLOAD_FIX.md
│   │   └── README-ORIGINAL-BACKUP.md
│   ├── investigations
│   │   └── MACOS_HOOKS_INVESTIGATION.md
│   ├── litestream-configs-v6.3.0
│   │   ├── install_service.sh
│   │   ├── litestream_master_config_fixed.yml
│   │   ├── litestream_master_config.yml
│   │   ├── litestream_replica_config_fixed.yml
│   │   ├── litestream_replica_config.yml
│   │   ├── litestream_replica_simple.yml
│   │   ├── litestream-http.service
│   │   ├── litestream.service
│   │   └── requirements-cloudflare.txt
│   ├── release-notes
│   │   └── release-notes-v7.1.4.md
│   └── setup-development
│       ├── README.md
│       ├── setup_consolidation_mdns.sh
│       ├── STARTUP_SETUP_GUIDE.md
│       └── test_service.sh
├── CHANGELOG-HISTORIC.md
├── CHANGELOG.md
├── claude_commands
│   ├── memory-context.md
│   ├── memory-health.md
│   ├── memory-ingest-dir.md
│   ├── memory-ingest.md
│   ├── memory-recall.md
│   ├── memory-search.md
│   ├── memory-store.md
│   ├── README.md
│   └── session-start.md
├── claude-hooks
│   ├── config.json
│   ├── config.template.json
│   ├── CONFIGURATION.md
│   ├── core
│   │   ├── memory-retrieval.js
│   │   ├── mid-conversation.js
│   │   ├── session-end.js
│   │   ├── session-start.js
│   │   └── topic-change.js
│   ├── debug-pattern-test.js
│   ├── install_claude_hooks_windows.ps1
│   ├── install_hooks.py
│   ├── memory-mode-controller.js
│   ├── MIGRATION.md
│   ├── README-NATURAL-TRIGGERS.md
│   ├── README-phase2.md
│   ├── README.md
│   ├── simple-test.js
│   ├── statusline.sh
│   ├── test-adaptive-weights.js
│   ├── test-dual-protocol-hook.js
│   ├── test-mcp-hook.js
│   ├── test-natural-triggers.js
│   ├── test-recency-scoring.js
│   ├── tests
│   │   ├── integration-test.js
│   │   ├── phase2-integration-test.js
│   │   ├── test-code-execution.js
│   │   ├── test-cross-session.json
│   │   ├── test-session-tracking.json
│   │   └── test-threading.json
│   ├── utilities
│   │   ├── adaptive-pattern-detector.js
│   │   ├── context-formatter.js
│   │   ├── context-shift-detector.js
│   │   ├── conversation-analyzer.js
│   │   ├── dynamic-context-updater.js
│   │   ├── git-analyzer.js
│   │   ├── mcp-client.js
│   │   ├── memory-client.js
│   │   ├── memory-scorer.js
│   │   ├── performance-manager.js
│   │   ├── project-detector.js
│   │   ├── session-tracker.js
│   │   ├── tiered-conversation-monitor.js
│   │   └── version-checker.js
│   └── WINDOWS-SESSIONSTART-BUG.md
├── CLAUDE.md
├── CODE_OF_CONDUCT.md
├── CONTRIBUTING.md
├── Development-Sprint-November-2025.md
├── docs
│   ├── amp-cli-bridge.md
│   ├── api
│   │   ├── code-execution-interface.md
│   │   ├── memory-metadata-api.md
│   │   ├── PHASE1_IMPLEMENTATION_SUMMARY.md
│   │   ├── PHASE2_IMPLEMENTATION_SUMMARY.md
│   │   ├── PHASE2_REPORT.md
│   │   └── tag-standardization.md
│   ├── architecture
│   │   ├── search-enhancement-spec.md
│   │   └── search-examples.md
│   ├── architecture.md
│   ├── archive
│   │   └── obsolete-workflows
│   │       ├── load_memory_context.md
│   │       └── README.md
│   ├── assets
│   │   └── images
│   │       ├── dashboard-v3.3.0-preview.png
│   │       ├── memory-awareness-hooks-example.png
│   │       ├── project-infographic.svg
│   │       └── README.md
│   ├── CLAUDE_CODE_QUICK_REFERENCE.md
│   ├── cloudflare-setup.md
│   ├── deployment
│   │   ├── docker.md
│   │   ├── dual-service.md
│   │   ├── production-guide.md
│   │   └── systemd-service.md
│   ├── development
│   │   ├── ai-agent-instructions.md
│   │   ├── code-quality
│   │   │   ├── phase-2a-completion.md
│   │   │   ├── phase-2a-handle-get-prompt.md
│   │   │   ├── phase-2a-index.md
│   │   │   ├── phase-2a-install-package.md
│   │   │   └── phase-2b-session-summary.md
│   │   ├── code-quality-workflow.md
│   │   ├── dashboard-workflow.md
│   │   ├── issue-management.md
│   │   ├── pr-review-guide.md
│   │   ├── refactoring-notes.md
│   │   ├── release-checklist.md
│   │   └── todo-tracker.md
│   ├── docker-optimized-build.md
│   ├── document-ingestion.md
│   ├── DOCUMENTATION_AUDIT.md
│   ├── enhancement-roadmap-issue-14.md
│   ├── examples
│   │   ├── analysis-scripts.js
│   │   ├── maintenance-session-example.md
│   │   ├── memory-distribution-chart.jsx
│   │   └── tag-schema.json
│   ├── first-time-setup.md
│   ├── glama-deployment.md
│   ├── guides
│   │   ├── advanced-command-examples.md
│   │   ├── chromadb-migration.md
│   │   ├── commands-vs-mcp-server.md
│   │   ├── mcp-enhancements.md
│   │   ├── mdns-service-discovery.md
│   │   ├── memory-consolidation-guide.md
│   │   ├── migration.md
│   │   ├── scripts.md
│   │   └── STORAGE_BACKENDS.md
│   ├── HOOK_IMPROVEMENTS.md
│   ├── hooks
│   │   └── phase2-code-execution-migration.md
│   ├── http-server-management.md
│   ├── ide-compatability.md
│   ├── IMAGE_RETENTION_POLICY.md
│   ├── images
│   │   └── dashboard-placeholder.md
│   ├── implementation
│   │   ├── health_checks.md
│   │   └── performance.md
│   ├── IMPLEMENTATION_PLAN_HTTP_SSE.md
│   ├── integration
│   │   ├── homebrew.md
│   │   └── multi-client.md
│   ├── integrations
│   │   ├── gemini.md
│   │   ├── groq-bridge.md
│   │   ├── groq-integration-summary.md
│   │   └── groq-model-comparison.md
│   ├── integrations.md
│   ├── legacy
│   │   └── dual-protocol-hooks.md
│   ├── LM_STUDIO_COMPATIBILITY.md
│   ├── maintenance
│   │   └── memory-maintenance.md
│   ├── mastery
│   │   ├── api-reference.md
│   │   ├── architecture-overview.md
│   │   ├── configuration-guide.md
│   │   ├── local-setup-and-run.md
│   │   ├── testing-guide.md
│   │   └── troubleshooting.md
│   ├── migration
│   │   └── code-execution-api-quick-start.md
│   ├── natural-memory-triggers
│   │   ├── cli-reference.md
│   │   ├── installation-guide.md
│   │   └── performance-optimization.md
│   ├── oauth-setup.md
│   ├── pr-graphql-integration.md
│   ├── quick-setup-cloudflare-dual-environment.md
│   ├── README.md
│   ├── remote-configuration-wiki-section.md
│   ├── research
│   │   ├── code-execution-interface-implementation.md
│   │   └── code-execution-interface-summary.md
│   ├── ROADMAP.md
│   ├── sqlite-vec-backend.md
│   ├── statistics
│   │   ├── charts
│   │   │   ├── activity_patterns.png
│   │   │   ├── contributors.png
│   │   │   ├── growth_trajectory.png
│   │   │   ├── monthly_activity.png
│   │   │   └── october_sprint.png
│   │   ├── data
│   │   │   ├── activity_by_day.csv
│   │   │   ├── activity_by_hour.csv
│   │   │   ├── contributors.csv
│   │   │   └── monthly_activity.csv
│   │   ├── generate_charts.py
│   │   └── REPOSITORY_STATISTICS.md
│   ├── technical
│   │   ├── development.md
│   │   ├── memory-migration.md
│   │   ├── migration-log.md
│   │   ├── sqlite-vec-embedding-fixes.md
│   │   └── tag-storage.md
│   ├── testing
│   │   └── regression-tests.md
│   ├── testing-cloudflare-backend.md
│   ├── troubleshooting
│   │   ├── cloudflare-api-token-setup.md
│   │   ├── cloudflare-authentication.md
│   │   ├── general.md
│   │   ├── hooks-quick-reference.md
│   │   ├── pr162-schema-caching-issue.md
│   │   ├── session-end-hooks.md
│   │   └── sync-issues.md
│   └── tutorials
│       ├── advanced-techniques.md
│       ├── data-analysis.md
│       └── demo-session-walkthrough.md
├── examples
│   ├── claude_desktop_config_template.json
│   ├── claude_desktop_config_windows.json
│   ├── claude-desktop-http-config.json
│   ├── config
│   │   └── claude_desktop_config.json
│   ├── http-mcp-bridge.js
│   ├── memory_export_template.json
│   ├── README.md
│   ├── setup
│   │   └── setup_multi_client_complete.py
│   └── start_https_example.sh
├── install_service.py
├── install.py
├── LICENSE
├── NOTICE
├── pyproject.toml
├── pytest.ini
├── README.md
├── run_server.py
├── scripts
│   ├── .claude
│   │   └── settings.local.json
│   ├── archive
│   │   └── check_missing_timestamps.py
│   ├── backup
│   │   ├── backup_memories.py
│   │   ├── backup_sqlite_vec.sh
│   │   ├── export_distributable_memories.sh
│   │   └── restore_memories.py
│   ├── benchmarks
│   │   ├── benchmark_code_execution_api.py
│   │   ├── benchmark_hybrid_sync.py
│   │   └── benchmark_server_caching.py
│   ├── database
│   │   ├── analyze_sqlite_vec_db.py
│   │   ├── check_sqlite_vec_status.py
│   │   ├── db_health_check.py
│   │   └── simple_timestamp_check.py
│   ├── development
│   │   ├── debug_server_initialization.py
│   │   ├── find_orphaned_files.py
│   │   ├── fix_mdns.sh
│   │   ├── fix_sitecustomize.py
│   │   ├── remote_ingest.sh
│   │   ├── setup-git-merge-drivers.sh
│   │   ├── uv-lock-merge.sh
│   │   └── verify_hybrid_sync.py
│   ├── hooks
│   │   └── pre-commit
│   ├── installation
│   │   ├── install_linux_service.py
│   │   ├── install_macos_service.py
│   │   ├── install_uv.py
│   │   ├── install_windows_service.py
│   │   ├── install.py
│   │   ├── setup_backup_cron.sh
│   │   ├── setup_claude_mcp.sh
│   │   └── setup_cloudflare_resources.py
│   ├── linux
│   │   ├── service_status.sh
│   │   ├── start_service.sh
│   │   ├── stop_service.sh
│   │   ├── uninstall_service.sh
│   │   └── view_logs.sh
│   ├── maintenance
│   │   ├── assign_memory_types.py
│   │   ├── check_memory_types.py
│   │   ├── cleanup_corrupted_encoding.py
│   │   ├── cleanup_memories.py
│   │   ├── cleanup_organize.py
│   │   ├── consolidate_memory_types.py
│   │   ├── consolidation_mappings.json
│   │   ├── delete_orphaned_vectors_fixed.py
│   │   ├── fast_cleanup_duplicates_with_tracking.sh
│   │   ├── find_all_duplicates.py
│   │   ├── find_cloudflare_duplicates.py
│   │   ├── find_duplicates.py
│   │   ├── memory-types.md
│   │   ├── README.md
│   │   ├── recover_timestamps_from_cloudflare.py
│   │   ├── regenerate_embeddings.py
│   │   ├── repair_malformed_tags.py
│   │   ├── repair_memories.py
│   │   ├── repair_sqlite_vec_embeddings.py
│   │   ├── repair_zero_embeddings.py
│   │   ├── restore_from_json_export.py
│   │   └── scan_todos.sh
│   ├── migration
│   │   ├── cleanup_mcp_timestamps.py
│   │   ├── legacy
│   │   │   └── migrate_chroma_to_sqlite.py
│   │   ├── mcp-migration.py
│   │   ├── migrate_sqlite_vec_embeddings.py
│   │   ├── migrate_storage.py
│   │   ├── migrate_tags.py
│   │   ├── migrate_timestamps.py
│   │   ├── migrate_to_cloudflare.py
│   │   ├── migrate_to_sqlite_vec.py
│   │   ├── migrate_v5_enhanced.py
│   │   ├── TIMESTAMP_CLEANUP_README.md
│   │   └── verify_mcp_timestamps.py
│   ├── pr
│   │   ├── amp_collect_results.sh
│   │   ├── amp_detect_breaking_changes.sh
│   │   ├── amp_generate_tests.sh
│   │   ├── amp_pr_review.sh
│   │   ├── amp_quality_gate.sh
│   │   ├── amp_suggest_fixes.sh
│   │   ├── auto_review.sh
│   │   ├── detect_breaking_changes.sh
│   │   ├── generate_tests.sh
│   │   ├── lib
│   │   │   └── graphql_helpers.sh
│   │   ├── quality_gate.sh
│   │   ├── resolve_threads.sh
│   │   ├── run_pyscn_analysis.sh
│   │   ├── run_quality_checks.sh
│   │   ├── thread_status.sh
│   │   └── watch_reviews.sh
│   ├── quality
│   │   ├── fix_dead_code_install.sh
│   │   ├── phase1_dead_code_analysis.md
│   │   ├── phase2_complexity_analysis.md
│   │   ├── README_PHASE1.md
│   │   ├── README_PHASE2.md
│   │   ├── track_pyscn_metrics.sh
│   │   └── weekly_quality_review.sh
│   ├── README.md
│   ├── run
│   │   ├── run_mcp_memory.sh
│   │   ├── run-with-uv.sh
│   │   └── start_sqlite_vec.sh
│   ├── run_memory_server.py
│   ├── server
│   │   ├── check_http_server.py
│   │   ├── check_server_health.py
│   │   ├── memory_offline.py
│   │   ├── preload_models.py
│   │   ├── run_http_server.py
│   │   ├── run_memory_server.py
│   │   ├── start_http_server.bat
│   │   └── start_http_server.sh
│   ├── service
│   │   ├── deploy_dual_services.sh
│   │   ├── install_http_service.sh
│   │   ├── mcp-memory-http.service
│   │   ├── mcp-memory.service
│   │   ├── memory_service_manager.sh
│   │   ├── service_control.sh
│   │   ├── service_utils.py
│   │   └── update_service.sh
│   ├── sync
│   │   ├── check_drift.py
│   │   ├── claude_sync_commands.py
│   │   ├── export_memories.py
│   │   ├── import_memories.py
│   │   ├── litestream
│   │   │   ├── apply_local_changes.sh
│   │   │   ├── enhanced_memory_store.sh
│   │   │   ├── init_staging_db.sh
│   │   │   ├── io.litestream.replication.plist
│   │   │   ├── manual_sync.sh
│   │   │   ├── memory_sync.sh
│   │   │   ├── pull_remote_changes.sh
│   │   │   ├── push_to_remote.sh
│   │   │   ├── README.md
│   │   │   ├── resolve_conflicts.sh
│   │   │   ├── setup_local_litestream.sh
│   │   │   ├── setup_remote_litestream.sh
│   │   │   ├── staging_db_init.sql
│   │   │   ├── stash_local_changes.sh
│   │   │   ├── sync_from_remote_noconfig.sh
│   │   │   └── sync_from_remote.sh
│   │   ├── README.md
│   │   ├── safe_cloudflare_update.sh
│   │   ├── sync_memory_backends.py
│   │   └── sync_now.py
│   ├── testing
│   │   ├── run_complete_test.py
│   │   ├── run_memory_test.sh
│   │   ├── simple_test.py
│   │   ├── test_cleanup_logic.py
│   │   ├── test_cloudflare_backend.py
│   │   ├── test_docker_functionality.py
│   │   ├── test_installation.py
│   │   ├── test_mdns.py
│   │   ├── test_memory_api.py
│   │   ├── test_memory_simple.py
│   │   ├── test_migration.py
│   │   ├── test_search_api.py
│   │   ├── test_sqlite_vec_embeddings.py
│   │   ├── test_sse_events.py
│   │   ├── test-connection.py
│   │   └── test-hook.js
│   ├── utils
│   │   ├── claude_commands_utils.py
│   │   ├── generate_personalized_claude_md.sh
│   │   ├── groq
│   │   ├── groq_agent_bridge.py
│   │   ├── list-collections.py
│   │   ├── memory_wrapper_uv.py
│   │   ├── query_memories.py
│   │   ├── smithery_wrapper.py
│   │   ├── test_groq_bridge.sh
│   │   └── uv_wrapper.py
│   └── validation
│       ├── check_dev_setup.py
│       ├── check_documentation_links.py
│       ├── diagnose_backend_config.py
│       ├── validate_configuration_complete.py
│       ├── validate_memories.py
│       ├── validate_migration.py
│       ├── validate_timestamp_integrity.py
│       ├── verify_environment.py
│       ├── verify_pytorch_windows.py
│       └── verify_torch.py
├── SECURITY.md
├── selective_timestamp_recovery.py
├── SPONSORS.md
├── src
│   └── mcp_memory_service
│       ├── __init__.py
│       ├── api
│       │   ├── __init__.py
│       │   ├── client.py
│       │   ├── operations.py
│       │   ├── sync_wrapper.py
│       │   └── types.py
│       ├── backup
│       │   ├── __init__.py
│       │   └── scheduler.py
│       ├── cli
│       │   ├── __init__.py
│       │   ├── ingestion.py
│       │   ├── main.py
│       │   └── utils.py
│       ├── config.py
│       ├── consolidation
│       │   ├── __init__.py
│       │   ├── associations.py
│       │   ├── base.py
│       │   ├── clustering.py
│       │   ├── compression.py
│       │   ├── consolidator.py
│       │   ├── decay.py
│       │   ├── forgetting.py
│       │   ├── health.py
│       │   └── scheduler.py
│       ├── dependency_check.py
│       ├── discovery
│       │   ├── __init__.py
│       │   ├── client.py
│       │   └── mdns_service.py
│       ├── embeddings
│       │   ├── __init__.py
│       │   └── onnx_embeddings.py
│       ├── ingestion
│       │   ├── __init__.py
│       │   ├── base.py
│       │   ├── chunker.py
│       │   ├── csv_loader.py
│       │   ├── json_loader.py
│       │   ├── pdf_loader.py
│       │   ├── registry.py
│       │   ├── semtools_loader.py
│       │   └── text_loader.py
│       ├── lm_studio_compat.py
│       ├── mcp_server.py
│       ├── models
│       │   ├── __init__.py
│       │   └── memory.py
│       ├── server.py
│       ├── services
│       │   ├── __init__.py
│       │   └── memory_service.py
│       ├── storage
│       │   ├── __init__.py
│       │   ├── base.py
│       │   ├── cloudflare.py
│       │   ├── factory.py
│       │   ├── http_client.py
│       │   ├── hybrid.py
│       │   └── sqlite_vec.py
│       ├── sync
│       │   ├── __init__.py
│       │   ├── exporter.py
│       │   ├── importer.py
│       │   └── litestream_config.py
│       ├── utils
│       │   ├── __init__.py
│       │   ├── cache_manager.py
│       │   ├── content_splitter.py
│       │   ├── db_utils.py
│       │   ├── debug.py
│       │   ├── document_processing.py
│       │   ├── gpu_detection.py
│       │   ├── hashing.py
│       │   ├── http_server_manager.py
│       │   ├── port_detection.py
│       │   ├── system_detection.py
│       │   └── time_parser.py
│       └── web
│           ├── __init__.py
│           ├── api
│           │   ├── __init__.py
│           │   ├── analytics.py
│           │   ├── backup.py
│           │   ├── consolidation.py
│           │   ├── documents.py
│           │   ├── events.py
│           │   ├── health.py
│           │   ├── manage.py
│           │   ├── mcp.py
│           │   ├── memories.py
│           │   ├── search.py
│           │   └── sync.py
│           ├── app.py
│           ├── dependencies.py
│           ├── oauth
│           │   ├── __init__.py
│           │   ├── authorization.py
│           │   ├── discovery.py
│           │   ├── middleware.py
│           │   ├── models.py
│           │   ├── registration.py
│           │   └── storage.py
│           ├── sse.py
│           └── static
│               ├── app.js
│               ├── index.html
│               ├── README.md
│               ├── sse_test.html
│               └── style.css
├── start_http_debug.bat
├── start_http_server.sh
├── test_document.txt
├── test_version_checker.js
├── tests
│   ├── __init__.py
│   ├── api
│   │   ├── __init__.py
│   │   ├── test_compact_types.py
│   │   └── test_operations.py
│   ├── bridge
│   │   ├── mock_responses.js
│   │   ├── package-lock.json
│   │   ├── package.json
│   │   └── test_http_mcp_bridge.js
│   ├── conftest.py
│   ├── consolidation
│   │   ├── __init__.py
│   │   ├── conftest.py
│   │   ├── test_associations.py
│   │   ├── test_clustering.py
│   │   ├── test_compression.py
│   │   ├── test_consolidator.py
│   │   ├── test_decay.py
│   │   └── test_forgetting.py
│   ├── contracts
│   │   └── api-specification.yml
│   ├── integration
│   │   ├── package-lock.json
│   │   ├── package.json
│   │   ├── test_api_key_fallback.py
│   │   ├── test_api_memories_chronological.py
│   │   ├── test_api_tag_time_search.py
│   │   ├── test_api_with_memory_service.py
│   │   ├── test_bridge_integration.js
│   │   ├── test_cli_interfaces.py
│   │   ├── test_cloudflare_connection.py
│   │   ├── test_concurrent_clients.py
│   │   ├── test_data_serialization_consistency.py
│   │   ├── test_http_server_startup.py
│   │   ├── test_mcp_memory.py
│   │   ├── test_mdns_integration.py
│   │   ├── test_oauth_basic_auth.py
│   │   ├── test_oauth_flow.py
│   │   ├── test_server_handlers.py
│   │   └── test_store_memory.py
│   ├── performance
│   │   ├── test_background_sync.py
│   │   └── test_hybrid_live.py
│   ├── README.md
│   ├── smithery
│   │   └── test_smithery.py
│   ├── sqlite
│   │   └── simple_sqlite_vec_test.py
│   ├── test_client.py
│   ├── test_content_splitting.py
│   ├── test_database.py
│   ├── test_hybrid_cloudflare_limits.py
│   ├── test_hybrid_storage.py
│   ├── test_memory_ops.py
│   ├── test_semantic_search.py
│   ├── test_sqlite_vec_storage.py
│   ├── test_time_parser.py
│   ├── test_timestamp_preservation.py
│   ├── timestamp
│   │   ├── test_hook_vs_manual_storage.py
│   │   ├── test_issue99_final_validation.py
│   │   ├── test_search_retrieval_inconsistency.py
│   │   ├── test_timestamp_issue.py
│   │   └── test_timestamp_simple.py
│   └── unit
│       ├── conftest.py
│       ├── test_cloudflare_storage.py
│       ├── test_csv_loader.py
│       ├── test_fastapi_dependencies.py
│       ├── test_import.py
│       ├── test_json_loader.py
│       ├── test_mdns_simple.py
│       ├── test_mdns.py
│       ├── test_memory_service.py
│       ├── test_memory.py
│       ├── test_semtools_loader.py
│       ├── test_storage_interface_compatibility.py
│       └── test_tag_time_filtering.py
├── tools
│   ├── docker
│   │   ├── DEPRECATED.md
│   │   ├── docker-compose.http.yml
│   │   ├── docker-compose.pythonpath.yml
│   │   ├── docker-compose.standalone.yml
│   │   ├── docker-compose.uv.yml
│   │   ├── docker-compose.yml
│   │   ├── docker-entrypoint-persistent.sh
│   │   ├── docker-entrypoint-unified.sh
│   │   ├── docker-entrypoint.sh
│   │   ├── Dockerfile
│   │   ├── Dockerfile.glama
│   │   ├── Dockerfile.slim
│   │   ├── README.md
│   │   └── test-docker-modes.sh
│   └── README.md
└── uv.lock
```

# Files

--------------------------------------------------------------------------------
/src/mcp_memory_service/server.py:
--------------------------------------------------------------------------------

```python
   1 | # Copyright 2024 Heinrich Krupp
   2 | #
   3 | # Licensed under the Apache License, Version 2.0 (the "License");
   4 | # you may not use this file except in compliance with the License.
   5 | # You may obtain a copy of the License at
   6 | #
   7 | #     http://www.apache.org/licenses/LICENSE-2.0
   8 | #
   9 | # Unless required by applicable law or agreed to in writing, software
  10 | # distributed under the License is distributed on an "AS IS" BASIS,
  11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12 | # See the License for the specific language governing permissions and
  13 | # limitations under the License.
  14 | 
  15 | """
  16 | MCP Memory Service
  17 | Copyright (c) 2024 Heinrich Krupp
  18 | Licensed under the MIT License. See LICENSE file in the project root for full license text.
  19 | """
  20 | import sys
  21 | import os
  22 | import socket
  23 | import time
  24 | import logging
  25 | import psutil
  26 | 
  27 | # Client detection for environment-aware behavior
  28 | def detect_mcp_client():
  29 |     """Detect which MCP client is running this server."""
  30 |     try:
  31 |         # Get the parent process (the MCP client)
  32 |         current_process = psutil.Process()
  33 |         parent = current_process.parent()
  34 |         
  35 |         if parent:
  36 |             parent_name = parent.name().lower()
  37 |             parent_exe = parent.exe() if hasattr(parent, 'exe') else ""
  38 |             
  39 |             # Check for Claude Desktop
  40 |             if 'claude' in parent_name or 'claude' in parent_exe.lower():
  41 |                 return 'claude_desktop'
  42 |             
  43 |             # Check for LM Studio
  44 |             if 'lmstudio' in parent_name or 'lm-studio' in parent_name or 'lmstudio' in parent_exe.lower():
  45 |                 return 'lm_studio'
  46 |             
  47 |             # Check command line for additional clues
  48 |             try:
  49 |                 cmdline = parent.cmdline()
  50 |                 cmdline_str = ' '.join(cmdline).lower()
  51 |                 
  52 |                 if 'claude' in cmdline_str:
  53 |                     return 'claude_desktop'
  54 |                 if 'lmstudio' in cmdline_str or 'lm-studio' in cmdline_str:
  55 |                     return 'lm_studio'
  56 |             except (OSError, IndexError, AttributeError) as e:
  57 |                 logger.debug(f"Could not detect client from process: {e}")
  58 |                 pass
  59 |         
  60 |         # Fallback: check environment variables
  61 |         if os.getenv('CLAUDE_DESKTOP'):
  62 |             return 'claude_desktop'
  63 |         if os.getenv('LM_STUDIO'):
  64 |             return 'lm_studio'
  65 |             
  66 |         # Default to Claude Desktop for strict JSON compliance
  67 |         return 'claude_desktop'
  68 |         
  69 |     except Exception:
  70 |         # If detection fails, default to Claude Desktop (strict mode)
  71 |         return 'claude_desktop'
  72 | 
  73 | # Detect the current MCP client
  74 | MCP_CLIENT = detect_mcp_client()
  75 | 
  76 | # Custom logging handler that routes INFO/DEBUG to stdout, WARNING/ERROR to stderr
  77 | class DualStreamHandler(logging.Handler):
  78 |     """Client-aware handler that adjusts logging behavior based on MCP client."""
  79 |     
  80 |     def __init__(self, client_type='claude_desktop'):
  81 |         super().__init__()
  82 |         self.client_type = client_type
  83 |         self.stdout_handler = logging.StreamHandler(sys.stdout)
  84 |         self.stderr_handler = logging.StreamHandler(sys.stderr)
  85 |         
  86 |         # Set the same formatter for both handlers
  87 |         formatter = logging.Formatter('%(levelname)s:%(name)s:%(message)s')
  88 |         self.stdout_handler.setFormatter(formatter)
  89 |         self.stderr_handler.setFormatter(formatter)
  90 |     
  91 |     def emit(self, record):
  92 |         """Route log records based on client type and level."""
  93 |         # For Claude Desktop: strict JSON mode - suppress most output, route everything to stderr
  94 |         if self.client_type == 'claude_desktop':
  95 |             # Only emit WARNING and above to stderr to maintain JSON protocol
  96 |             if record.levelno >= logging.WARNING:
  97 |                 self.stderr_handler.emit(record)
  98 |             # Suppress INFO/DEBUG for Claude Desktop to prevent JSON parsing errors
  99 |             return
 100 |         
 101 |         # For LM Studio: enhanced mode with dual-stream
 102 |         if record.levelno >= logging.WARNING:  # WARNING, ERROR, CRITICAL
 103 |             self.stderr_handler.emit(record)
 104 |         else:  # DEBUG, INFO
 105 |             self.stdout_handler.emit(record)
 106 | 
 107 | # Configure logging with client-aware handler BEFORE any imports that use logging
 108 | log_level = os.getenv('LOG_LEVEL', 'WARNING').upper()  # Default to WARNING for performance
 109 | root_logger = logging.getLogger()
 110 | root_logger.setLevel(getattr(logging, log_level, logging.WARNING))
 111 | 
 112 | # Remove any existing handlers to avoid duplicates
 113 | for handler in root_logger.handlers[:]:
 114 |     root_logger.removeHandler(handler)
 115 | 
 116 | # Add our custom client-aware handler
 117 | client_aware_handler = DualStreamHandler(client_type=MCP_CLIENT)
 118 | root_logger.addHandler(client_aware_handler)
 119 | 
 120 | logger = logging.getLogger(__name__)
 121 | 
 122 | # Enhanced path detection for Claude Desktop compatibility
 123 | def setup_python_paths():
 124 |     """Setup Python paths for dependency access."""
 125 |     current_dir = os.path.dirname(os.path.dirname(os.path.dirname(__file__)))
 126 |     
 127 |     # Check for virtual environment
 128 |     potential_venv_paths = [
 129 |         os.path.join(current_dir, 'venv', 'Lib', 'site-packages'),  # Windows venv
 130 |         os.path.join(current_dir, 'venv', 'lib', 'python3.11', 'site-packages'),  # Linux/Mac venv
 131 |         os.path.join(current_dir, '.venv', 'Lib', 'site-packages'),  # Windows .venv
 132 |         os.path.join(current_dir, '.venv', 'lib', 'python3.11', 'site-packages'),  # Linux/Mac .venv
 133 |     ]
 134 |     
 135 |     for venv_path in potential_venv_paths:
 136 |         if os.path.exists(venv_path):
 137 |             sys.path.insert(0, venv_path)
 138 |             logger.debug(f"Added venv path: {venv_path}")
 139 |             break
 140 |     
 141 |     # For Claude Desktop: also check if we can access global site-packages
 142 |     try:
 143 |         import site
 144 |         global_paths = site.getsitepackages()
 145 |         user_path = site.getusersitepackages()
 146 |         
 147 |         # Add user site-packages if not blocked by PYTHONNOUSERSITE
 148 |         if not os.environ.get('PYTHONNOUSERSITE') and user_path not in sys.path:
 149 |             sys.path.append(user_path)
 150 |             logger.debug(f"Added user site-packages: {user_path}")
 151 |         
 152 |         # Add global site-packages if available
 153 |         for path in global_paths:
 154 |             if path not in sys.path:
 155 |                 sys.path.append(path)
 156 |                 logger.debug(f"Added global site-packages: {path}")
 157 |                 
 158 |     except Exception as e:
 159 |         logger.warning(f"Could not access site-packages: {e}")
 160 | 
 161 | # Setup paths before other imports
 162 | setup_python_paths()
 163 | import asyncio
 164 | import traceback
 165 | import json
 166 | import platform
 167 | from collections import deque
 168 | from typing import List, Dict, Any, Optional, Tuple
 169 | from datetime import datetime, timedelta
 170 | 
 171 | from mcp.server.models import InitializationOptions
 172 | import mcp.types as types
 173 | from mcp.server import NotificationOptions, Server
 174 | import mcp.server.stdio
 175 | from mcp.types import Resource, Prompt
 176 | 
 177 | from . import __version__
 178 | from .lm_studio_compat import patch_mcp_for_lm_studio, add_windows_timeout_handling
 179 | from .dependency_check import run_dependency_check, get_recommended_timeout
 180 | from .config import (
 181 |     BACKUPS_PATH,
 182 |     SERVER_NAME,
 183 |     SERVER_VERSION,
 184 |     STORAGE_BACKEND,
 185 |     SQLITE_VEC_PATH,
 186 |     CONSOLIDATION_ENABLED,
 187 |     CONSOLIDATION_CONFIG,
 188 |     CONSOLIDATION_SCHEDULE,
 189 |     INCLUDE_HOSTNAME,
 190 |     # Cloudflare configuration
 191 |     CLOUDFLARE_API_TOKEN,
 192 |     CLOUDFLARE_ACCOUNT_ID,
 193 |     CLOUDFLARE_VECTORIZE_INDEX,
 194 |     CLOUDFLARE_D1_DATABASE_ID,
 195 |     CLOUDFLARE_R2_BUCKET,
 196 |     CLOUDFLARE_EMBEDDING_MODEL,
 197 |     CLOUDFLARE_LARGE_CONTENT_THRESHOLD,
 198 |     CLOUDFLARE_MAX_RETRIES,
 199 |     CLOUDFLARE_BASE_DELAY,
 200 |     # Hybrid backend configuration
 201 |     HYBRID_SYNC_INTERVAL,
 202 |     HYBRID_BATCH_SIZE,
 203 |     HYBRID_SYNC_ON_STARTUP
 204 | )
 205 | # Storage imports will be done conditionally in the server class
 206 | from .models.memory import Memory
 207 | from .utils.hashing import generate_content_hash
 208 | from .utils.document_processing import _process_and_store_chunk
 209 | from .utils.system_detection import (
 210 |     get_system_info,
 211 |     print_system_diagnostics,
 212 |     AcceleratorType
 213 | )
 214 | from .services.memory_service import MemoryService
 215 | from .utils.time_parser import extract_time_expression, parse_time_expression
 216 | 
 217 | # Consolidation system imports (conditional)
 218 | if CONSOLIDATION_ENABLED:
 219 |     from .consolidation.base import ConsolidationConfig
 220 |     from .consolidation.consolidator import DreamInspiredConsolidator
 221 |     from .consolidation.scheduler import ConsolidationScheduler
 222 | 
 223 | # Note: Logging is already configured at the top of the file with dual-stream handler
 224 | 
 225 | # Configure performance-critical module logging
 226 | if not os.getenv('DEBUG_MODE'):
 227 |     # Set higher log levels for performance-critical modules
 228 |     for module_name in ['sentence_transformers', 'transformers', 'torch', 'numpy']:
 229 |         logging.getLogger(module_name).setLevel(logging.WARNING)
 230 | 
 231 | # Check if UV is being used
 232 | def check_uv_environment():
 233 |     """Check if UV is being used and provide recommendations if not."""
 234 |     running_with_uv = 'UV_ACTIVE' in os.environ or any('uv' in arg.lower() for arg in sys.argv)
 235 | 
 236 |     if not running_with_uv:
 237 |         logger.info("Memory server is running without UV. For better performance and dependency management, consider using UV:")
 238 |         logger.info("  pip install uv")
 239 |         logger.info("  uv run memory")
 240 |     else:
 241 |         logger.info("Memory server is running with UV")
 242 | 
 243 | def check_version_consistency():
 244 |     """
 245 |     Check if installed package version matches source code version.
 246 | 
 247 |     Warns if version mismatch detected (common "stale venv" issue).
 248 |     This helps catch the scenario where source code is updated but
 249 |     the package wasn't reinstalled with 'pip install -e .'.
 250 |     """
 251 |     try:
 252 |         # Get source code version (from __init__.py)
 253 |         source_version = __version__
 254 | 
 255 |         # Get installed package version (from package metadata)
 256 |         try:
 257 |             import pkg_resources
 258 |             installed_version = pkg_resources.get_distribution("mcp-memory-service").version
 259 |         except:
 260 |             # If pkg_resources fails, try importlib.metadata (Python 3.8+)
 261 |             try:
 262 |                 from importlib import metadata
 263 |                 installed_version = metadata.version("mcp-memory-service")
 264 |             except:
 265 |                 # Can't determine installed version - skip check
 266 |                 return
 267 | 
 268 |         # Compare versions
 269 |         if installed_version != source_version:
 270 |             logger.warning("=" * 70)
 271 |             logger.warning("⚠️  VERSION MISMATCH DETECTED!")
 272 |             logger.warning(f"   Source code: v{source_version}")
 273 |             logger.warning(f"   Installed:   v{installed_version}")
 274 |             logger.warning("")
 275 |             logger.warning("   This usually means you need to run:")
 276 |             logger.warning("   pip install -e . --force-reinstall")
 277 |             logger.warning("")
 278 |             logger.warning("   Then restart the MCP server:")
 279 |             logger.warning("   - In Claude Code: Run /mcp")
 280 |             logger.warning("   - In Claude Desktop: Restart the application")
 281 |             logger.warning("=" * 70)
 282 |         else:
 283 |             logger.debug(f"Version check OK: v{source_version}")
 284 | 
 285 |     except Exception as e:
 286 |         # Don't fail server startup on version check errors
 287 |         logger.debug(f"Version check failed (non-critical): {e}")
 288 | 
 289 | # Configure environment variables based on detected system
 290 | def configure_environment():
 291 |     """Configure environment variables based on detected system."""
 292 |     system_info = get_system_info()
 293 |     
 294 |     # Log system information
 295 |     logger.info(f"Detected system: {system_info.os_name} {system_info.architecture}")
 296 |     logger.info(f"Memory: {system_info.memory_gb:.2f} GB")
 297 |     logger.info(f"Accelerator: {system_info.accelerator}")
 298 |     
 299 |     # Set environment variables for better cross-platform compatibility
 300 |     os.environ["PYTORCH_ENABLE_MPS_FALLBACK"] = "1"
 301 |     
 302 |     # For Apple Silicon, ensure we use MPS when available
 303 |     if system_info.architecture == "arm64" and system_info.os_name == "darwin":
 304 |         logger.info("Configuring for Apple Silicon")
 305 |         os.environ["PYTORCH_MPS_HIGH_WATERMARK_RATIO"] = "0.0"
 306 |     
 307 |     # For Windows with limited GPU memory, use smaller chunks
 308 |     if system_info.os_name == "windows" and system_info.accelerator == AcceleratorType.CUDA:
 309 |         logger.info("Configuring for Windows with CUDA")
 310 |         os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "max_split_size_mb:128"
 311 |     
 312 |     # For Linux with ROCm, ensure we use the right backend
 313 |     if system_info.os_name == "linux" and system_info.accelerator == AcceleratorType.ROCm:
 314 |         logger.info("Configuring for Linux with ROCm")
 315 |         os.environ["HSA_OVERRIDE_GFX_VERSION"] = "10.3.0"
 316 |     
 317 |     # For systems with limited memory, reduce cache sizes
 318 |     if system_info.memory_gb < 8:
 319 |         logger.info("Configuring for low-memory system")
 320 |         # Use BACKUPS_PATH parent directory for model caches
 321 |         cache_base = os.path.dirname(BACKUPS_PATH)
 322 |         os.environ["TRANSFORMERS_CACHE"] = os.path.join(cache_base, "model_cache")
 323 |         os.environ["HF_HOME"] = os.path.join(cache_base, "hf_cache")
 324 |         os.environ["SENTENCE_TRANSFORMERS_HOME"] = os.path.join(cache_base, "st_cache")
 325 | 
 326 | # Configure environment before any imports that might use it
 327 | configure_environment()
 328 | 
 329 | # Performance optimization environment variables
 330 | def configure_performance_environment():
 331 |     """Configure environment variables for optimal performance."""
 332 |     # PyTorch optimizations
 333 |     os.environ["PYTORCH_ENABLE_MPS_FALLBACK"] = "1"
 334 |     os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "max_split_size_mb:128,garbage_collection_threshold:0.6"
 335 |     
 336 |     # CPU optimizations
 337 |     os.environ["OMP_NUM_THREADS"] = str(min(8, os.cpu_count() or 1))
 338 |     os.environ["MKL_NUM_THREADS"] = str(min(8, os.cpu_count() or 1))
 339 |     
 340 |     # Disable unnecessary features for performance
 341 |     os.environ["TOKENIZERS_PARALLELISM"] = "false"
 342 |     os.environ["TRANSFORMERS_NO_ADVISORY_WARNINGS"] = "1"
 343 |     
 344 |     # Async CUDA operations
 345 |     os.environ["CUDA_LAUNCH_BLOCKING"] = "0"
 346 | 
 347 | # Apply performance optimizations
 348 | configure_performance_environment()
 349 | 
 350 | # =============================================================================
 351 | # GLOBAL CACHING FOR MCP SERVER PERFORMANCE OPTIMIZATION
 352 | # =============================================================================
 353 | # Module-level caches to persist storage/service instances across MCP tool calls.
 354 | # This reduces initialization overhead from ~1,810ms to <400ms on cache hits.
 355 | #
 356 | # Cache Keys:
 357 | # - Storage: "{backend_type}:{db_path}" (e.g., "sqlite_vec:/path/to/db")
 358 | # - MemoryService: storage instance ID (id(storage))
 359 | #
 360 | # Thread Safety:
 361 | # - Uses asyncio.Lock to prevent race conditions during concurrent access
 362 | #
 363 | # Lifecycle:
 364 | # - Cached instances persist for the lifetime of the Python process
 365 | # - NOT cleared between MCP tool calls (intentional for performance)
 366 | # - Cleaned up on process shutdown
 367 | 
 368 | _STORAGE_CACHE: Dict[str, Any] = {}  # Storage instances keyed by "{backend}:{path}"
 369 | _MEMORY_SERVICE_CACHE: Dict[int, Any] = {}  # MemoryService instances keyed by storage ID
 370 | _CACHE_LOCK: Optional[asyncio.Lock] = None  # Initialized on first use to avoid event loop issues
 371 | _CACHE_STATS = {
 372 |     "storage_hits": 0,
 373 |     "storage_misses": 0,
 374 |     "service_hits": 0,
 375 |     "service_misses": 0,
 376 |     "total_calls": 0,
 377 |     "initialization_times": []  # Track initialization durations for cache misses
 378 | }
 379 | 
 380 | def _get_cache_lock() -> asyncio.Lock:
 381 |     """Get or create the global cache lock (lazy initialization to avoid event loop issues)."""
 382 |     global _CACHE_LOCK
 383 |     if _CACHE_LOCK is None:
 384 |         _CACHE_LOCK = asyncio.Lock()
 385 |     return _CACHE_LOCK
 386 | 
 387 | def _get_or_create_memory_service(storage: Any) -> Any:
 388 |     """
 389 |     Get cached MemoryService or create new one.
 390 | 
 391 |     Args:
 392 |         storage: Storage instance to use as cache key
 393 | 
 394 |     Returns:
 395 |         MemoryService instance (cached or newly created)
 396 |     """
 397 |     from .services.memory import MemoryService
 398 | 
 399 |     storage_id = id(storage)
 400 |     if storage_id in _MEMORY_SERVICE_CACHE:
 401 |         memory_service = _MEMORY_SERVICE_CACHE[storage_id]
 402 |         _CACHE_STATS["service_hits"] += 1
 403 |         logger.info(f"✅ MemoryService Cache HIT - Reusing service instance (storage_id: {storage_id})")
 404 |     else:
 405 |         _CACHE_STATS["service_misses"] += 1
 406 |         logger.info(f"❌ MemoryService Cache MISS - Creating new service instance...")
 407 | 
 408 |         # Initialize memory service with shared business logic
 409 |         memory_service = MemoryService(storage)
 410 | 
 411 |         # Cache the memory service instance
 412 |         _MEMORY_SERVICE_CACHE[storage_id] = memory_service
 413 |         logger.info(f"💾 Cached MemoryService instance (storage_id: {storage_id})")
 414 | 
 415 |     return memory_service
 416 | 
 417 | def _log_cache_performance(start_time: float) -> None:
 418 |     """
 419 |     Log comprehensive cache performance statistics.
 420 | 
 421 |     Args:
 422 |         start_time: Timer start time to calculate total elapsed time
 423 |     """
 424 |     total_time = (time.time() - start_time) * 1000
 425 |     cache_hit_rate = (
 426 |         (_CACHE_STATS["storage_hits"] + _CACHE_STATS["service_hits"]) /
 427 |         (_CACHE_STATS["total_calls"] * 2)  # 2 caches per call
 428 |     ) * 100
 429 | 
 430 |     logger.info(
 431 |         f"📊 Cache Stats - "
 432 |         f"Hit Rate: {cache_hit_rate:.1f}% | "
 433 |         f"Storage: {_CACHE_STATS['storage_hits']}H/{_CACHE_STATS['storage_misses']}M | "
 434 |         f"Service: {_CACHE_STATS['service_hits']}H/{_CACHE_STATS['service_misses']}M | "
 435 |         f"Total Time: {total_time:.1f}ms"
 436 |     )
 437 | 
 438 | class MemoryServer:
 439 |     def __init__(self):
 440 |         """Initialize the server with hardware-aware configuration."""
 441 |         self.server = Server(SERVER_NAME)
 442 |         self.system_info = get_system_info()
 443 |         
 444 |         # Initialize query time tracking
 445 |         self.query_times = deque(maxlen=50)  # Keep last 50 query times for averaging
 446 |         
 447 |         # Initialize progress tracking
 448 |         self.current_progress = {}  # Track ongoing operations
 449 |         
 450 |         # Initialize consolidation system (if enabled)
 451 |         self.consolidator = None
 452 |         self.consolidation_scheduler = None
 453 |         if CONSOLIDATION_ENABLED:
 454 |             try:
 455 |                 config = ConsolidationConfig(**CONSOLIDATION_CONFIG)
 456 |                 self.consolidator = None  # Will be initialized after storage
 457 |                 self.consolidation_scheduler = None  # Will be initialized after consolidator
 458 |                 logger.info("Consolidation system will be initialized after storage")
 459 |             except Exception as e:
 460 |                 logger.error(f"Failed to initialize consolidation config: {e}")
 461 |                 self.consolidator = None
 462 |                 self.consolidation_scheduler = None
 463 |         
 464 |         try:
 465 |             # Initialize paths
 466 |             logger.info(f"Creating directories if they don't exist...")
 467 |             os.makedirs(BACKUPS_PATH, exist_ok=True)
 468 | 
 469 |             # Log system diagnostics
 470 |             logger.info(f"Initializing on {platform.system()} {platform.machine()} with Python {platform.python_version()}")
 471 |             logger.info(f"Using accelerator: {self.system_info.accelerator}")
 472 | 
 473 |             # DEFER STORAGE INITIALIZATION - Initialize storage lazily when needed
 474 |             # This prevents hanging during server startup due to embedding model loading
 475 |             logger.info(f"Deferring {STORAGE_BACKEND} storage initialization to prevent hanging")
 476 |             if MCP_CLIENT == 'lm_studio':
 477 |                 print(f"Deferring {STORAGE_BACKEND} storage initialization to prevent startup hanging", file=sys.stdout, flush=True)
 478 |             self.storage = None
 479 |             self.memory_service = None
 480 |             self._storage_initialized = False
 481 | 
 482 |         except Exception as e:
 483 |             logger.error(f"Initialization error: {str(e)}")
 484 |             logger.error(traceback.format_exc())
 485 |             
 486 |             # Set storage to None to prevent any hanging
 487 |             self.storage = None
 488 |             self.memory_service = None
 489 |             self._storage_initialized = False
 490 |         
 491 |         # Register handlers
 492 |         self.register_handlers()
 493 |         logger.info("Server initialization complete")
 494 |         
 495 |         # Test handler registration with proper arguments
 496 |         try:
 497 |             logger.info("Testing handler registration...")
 498 |             capabilities = self.server.get_capabilities(
 499 |                 notification_options=NotificationOptions(),
 500 |                 experimental_capabilities={}
 501 |             )
 502 |             logger.info(f"Server capabilities: {capabilities}")
 503 |             if MCP_CLIENT == 'lm_studio':
 504 |                 print(f"Server capabilities registered successfully!", file=sys.stdout, flush=True)
 505 |         except Exception as e:
 506 |             logger.error(f"Handler registration test failed: {str(e)}")
 507 |             print(f"Handler registration issue: {str(e)}", file=sys.stderr, flush=True)
 508 |     
 509 |     def record_query_time(self, query_time_ms: float):
 510 |         """Record a query time for averaging."""
 511 |         self.query_times.append(query_time_ms)
 512 |         logger.debug(f"Recorded query time: {query_time_ms:.2f}ms")
 513 |     
 514 |     def get_average_query_time(self) -> float:
 515 |         """Get the average query time from recent operations."""
 516 |         if not self.query_times:
 517 |             return 0.0
 518 |         
 519 |         avg = sum(self.query_times) / len(self.query_times)
 520 |         logger.debug(f"Average query time: {avg:.2f}ms (from {len(self.query_times)} samples)")
 521 |         return round(avg, 2)
 522 |     
 523 |     async def send_progress_notification(self, operation_id: str, progress: float, message: str = None):
 524 |         """Send a progress notification for a long-running operation."""
 525 |         try:
 526 |             # Store progress for potential querying
 527 |             self.current_progress[operation_id] = {
 528 |                 "progress": progress,
 529 |                 "message": message or f"Operation {operation_id}: {progress:.0f}% complete",
 530 |                 "timestamp": datetime.now().isoformat()
 531 |             }
 532 |             
 533 |             # Send notification if server supports it
 534 |             if hasattr(self.server, 'send_progress_notification'):
 535 |                 await self.server.send_progress_notification(
 536 |                     progress=progress,
 537 |                     progress_token=operation_id,
 538 |                     message=message
 539 |                 )
 540 |             
 541 |             logger.debug(f"Progress {operation_id}: {progress:.0f}% - {message}")
 542 |             
 543 |             # Clean up completed operations
 544 |             if progress >= 100:
 545 |                 self.current_progress.pop(operation_id, None)
 546 |                 
 547 |         except Exception as e:
 548 |             logger.debug(f"Could not send progress notification: {e}")
 549 |     
 550 |     def get_operation_progress(self, operation_id: str) -> Optional[Dict[str, Any]]:
 551 |         """Get the current progress of an operation."""
 552 |         return self.current_progress.get(operation_id)
 553 |     
 554 |     async def _initialize_storage_with_timeout(self):
 555 |         """Initialize storage with timeout and caching optimization."""
 556 |         global _STORAGE_CACHE, _MEMORY_SERVICE_CACHE, _CACHE_STATS
 557 | 
 558 |         # Track call statistics
 559 |         _CACHE_STATS["total_calls"] += 1
 560 |         start_time = time.time()
 561 | 
 562 |         logger.info(f"🚀 EAGER INIT Call #{_CACHE_STATS['total_calls']}: Checking global cache...")
 563 | 
 564 |         # Acquire lock for thread-safe cache access
 565 |         cache_lock = _get_cache_lock()
 566 |         async with cache_lock:
 567 |             # Generate cache key for storage backend
 568 |             cache_key = f"{STORAGE_BACKEND}:{SQLITE_VEC_PATH}"
 569 | 
 570 |             # Check storage cache
 571 |             if cache_key in _STORAGE_CACHE:
 572 |                 self.storage = _STORAGE_CACHE[cache_key]
 573 |                 _CACHE_STATS["storage_hits"] += 1
 574 |                 logger.info(f"✅ Storage Cache HIT - Reusing {STORAGE_BACKEND} instance (key: {cache_key})")
 575 |                 self._storage_initialized = True
 576 | 
 577 |                 # Check memory service cache and log performance
 578 |                 self.memory_service = _get_or_create_memory_service(self.storage)
 579 |                 _log_cache_performance(start_time)
 580 | 
 581 |                 return True  # Cached initialization succeeded
 582 | 
 583 |         # Cache miss - proceed with initialization
 584 |         _CACHE_STATS["storage_misses"] += 1
 585 |         logger.info(f"❌ Storage Cache MISS - Initializing {STORAGE_BACKEND} instance...")
 586 | 
 587 |         try:
 588 |             logger.info(f"🚀 EAGER INIT: Starting {STORAGE_BACKEND} storage initialization...")
 589 |             logger.info(f"🔧 EAGER INIT: Environment check - STORAGE_BACKEND={STORAGE_BACKEND}")
 590 |             
 591 |             # Log all Cloudflare config values for debugging
 592 |             if STORAGE_BACKEND == 'cloudflare':
 593 |                 logger.info(f"🔧 EAGER INIT: Cloudflare config validation:")
 594 |                 logger.info(f"   API_TOKEN: {'SET' if CLOUDFLARE_API_TOKEN else 'NOT SET'}")
 595 |                 logger.info(f"   ACCOUNT_ID: {CLOUDFLARE_ACCOUNT_ID}")
 596 |                 logger.info(f"   VECTORIZE_INDEX: {CLOUDFLARE_VECTORIZE_INDEX}")
 597 |                 logger.info(f"   D1_DATABASE_ID: {CLOUDFLARE_D1_DATABASE_ID}")
 598 |                 logger.info(f"   R2_BUCKET: {CLOUDFLARE_R2_BUCKET}")
 599 |                 logger.info(f"   EMBEDDING_MODEL: {CLOUDFLARE_EMBEDDING_MODEL}")
 600 |             
 601 |             if STORAGE_BACKEND == 'sqlite_vec':
 602 |                 # Check for multi-client coordination mode
 603 |                 from .utils.port_detection import ServerCoordinator
 604 |                 coordinator = ServerCoordinator()
 605 |                 coordination_mode = await coordinator.detect_mode()
 606 |                 
 607 |                 logger.info(f"🔧 EAGER INIT: SQLite-vec - detected coordination mode: {coordination_mode}")
 608 |                 
 609 |                 if coordination_mode == "http_client":
 610 |                     # Use HTTP client to connect to existing server
 611 |                     from .storage.http_client import HTTPClientStorage
 612 |                     self.storage = HTTPClientStorage()
 613 |                     logger.info(f"✅ EAGER INIT: Using HTTP client storage")
 614 |                 elif coordination_mode == "http_server":
 615 |                     # Try to auto-start HTTP server for coordination
 616 |                     from .utils.http_server_manager import auto_start_http_server_if_needed
 617 |                     server_started = await auto_start_http_server_if_needed()
 618 |                     
 619 |                     if server_started:
 620 |                         # Wait a moment for the server to be ready, then use HTTP client
 621 |                         await asyncio.sleep(2)
 622 |                         from .storage.http_client import HTTPClientStorage
 623 |                         self.storage = HTTPClientStorage()
 624 |                         logger.info(f"✅ EAGER INIT: Started HTTP server and using HTTP client storage")
 625 |                     else:
 626 |                         # Fall back to direct SQLite-vec storage
 627 |                         from . import storage
 628 |                         import importlib
 629 |                         storage_module = importlib.import_module('mcp_memory_service.storage.sqlite_vec')
 630 |                         SqliteVecMemoryStorage = storage_module.SqliteVecMemoryStorage
 631 |                         self.storage = SqliteVecMemoryStorage(SQLITE_VEC_PATH)
 632 |                         logger.info(f"✅ EAGER INIT: HTTP server auto-start failed, using direct SQLite-vec storage")
 633 |                 else:
 634 |                     # Import sqlite-vec storage module (supports dynamic class replacement)
 635 |                     from . import storage
 636 |                     import importlib
 637 |                     storage_module = importlib.import_module('mcp_memory_service.storage.sqlite_vec')
 638 |                     SqliteVecMemoryStorage = storage_module.SqliteVecMemoryStorage
 639 |                     self.storage = SqliteVecMemoryStorage(SQLITE_VEC_PATH)
 640 |                     logger.info(f"✅ EAGER INIT: Using direct SQLite-vec storage at {SQLITE_VEC_PATH}")
 641 |             elif STORAGE_BACKEND == 'cloudflare':
 642 |                 # Initialize Cloudflare storage
 643 |                 logger.info(f"☁️  EAGER INIT: Importing CloudflareStorage...")
 644 |                 from .storage.cloudflare import CloudflareStorage
 645 |                 logger.info(f"☁️  EAGER INIT: Creating CloudflareStorage instance...")
 646 |                 self.storage = CloudflareStorage(
 647 |                     api_token=CLOUDFLARE_API_TOKEN,
 648 |                     account_id=CLOUDFLARE_ACCOUNT_ID,
 649 |                     vectorize_index=CLOUDFLARE_VECTORIZE_INDEX,
 650 |                     d1_database_id=CLOUDFLARE_D1_DATABASE_ID,
 651 |                     r2_bucket=CLOUDFLARE_R2_BUCKET,
 652 |                     embedding_model=CLOUDFLARE_EMBEDDING_MODEL,
 653 |                     large_content_threshold=CLOUDFLARE_LARGE_CONTENT_THRESHOLD,
 654 |                     max_retries=CLOUDFLARE_MAX_RETRIES,
 655 |                     base_delay=CLOUDFLARE_BASE_DELAY
 656 |                 )
 657 |                 logger.info(f"✅ EAGER INIT: CloudflareStorage instance created with index: {CLOUDFLARE_VECTORIZE_INDEX}")
 658 |             elif STORAGE_BACKEND == 'hybrid':
 659 |                 # Initialize Hybrid storage (SQLite-vec + Cloudflare)
 660 |                 logger.info(f"🔄 EAGER INIT: Using Hybrid storage...")
 661 |                 from .storage.hybrid import HybridMemoryStorage
 662 | 
 663 |                 # Prepare Cloudflare configuration dict
 664 |                 cloudflare_config = None
 665 |                 if all([CLOUDFLARE_API_TOKEN, CLOUDFLARE_ACCOUNT_ID, CLOUDFLARE_VECTORIZE_INDEX, CLOUDFLARE_D1_DATABASE_ID]):
 666 |                     cloudflare_config = {
 667 |                         'api_token': CLOUDFLARE_API_TOKEN,
 668 |                         'account_id': CLOUDFLARE_ACCOUNT_ID,
 669 |                         'vectorize_index': CLOUDFLARE_VECTORIZE_INDEX,
 670 |                         'd1_database_id': CLOUDFLARE_D1_DATABASE_ID,
 671 |                         'r2_bucket': CLOUDFLARE_R2_BUCKET,
 672 |                         'embedding_model': CLOUDFLARE_EMBEDDING_MODEL,
 673 |                         'large_content_threshold': CLOUDFLARE_LARGE_CONTENT_THRESHOLD,
 674 |                         'max_retries': CLOUDFLARE_MAX_RETRIES,
 675 |                         'base_delay': CLOUDFLARE_BASE_DELAY
 676 |                     }
 677 |                     logger.info(f"🔄 EAGER INIT: Cloudflare config prepared for hybrid storage")
 678 |                 else:
 679 |                     logger.warning("🔄 EAGER INIT: Incomplete Cloudflare config, hybrid will run in SQLite-only mode")
 680 | 
 681 |                 self.storage = HybridMemoryStorage(
 682 |                     sqlite_db_path=SQLITE_VEC_PATH,
 683 |                     embedding_model="all-MiniLM-L6-v2",
 684 |                     cloudflare_config=cloudflare_config,
 685 |                     sync_interval=HYBRID_SYNC_INTERVAL or 300,
 686 |                     batch_size=HYBRID_BATCH_SIZE or 50
 687 |                 )
 688 |                 logger.info(f"✅ EAGER INIT: HybridMemoryStorage instance created")
 689 |             else:
 690 |                 # Unknown backend - should not reach here due to factory validation
 691 |                 logger.error(f"❌ EAGER INIT: Unknown storage backend: {STORAGE_BACKEND}")
 692 |                 raise ValueError(f"Unsupported storage backend: {STORAGE_BACKEND}")
 693 | 
 694 |             # Initialize the storage backend
 695 |             logger.info(f"🔧 EAGER INIT: Calling storage.initialize()...")
 696 |             await self.storage.initialize()
 697 |             logger.info(f"✅ EAGER INIT: storage.initialize() completed successfully")
 698 |             
 699 |             self._storage_initialized = True
 700 |             logger.info(f"🎉 EAGER INIT: {STORAGE_BACKEND} storage initialization successful")
 701 | 
 702 |             # Cache the newly initialized storage instance
 703 |             async with cache_lock:
 704 |                 _STORAGE_CACHE[cache_key] = self.storage
 705 |                 init_time = (time.time() - start_time) * 1000
 706 |                 _CACHE_STATS["initialization_times"].append(init_time)
 707 |                 logger.info(f"💾 Cached storage instance (key: {cache_key}, init_time: {init_time:.1f}ms)")
 708 | 
 709 |                 # Initialize and cache MemoryService
 710 |                 _CACHE_STATS["service_misses"] += 1
 711 |                 self.memory_service = MemoryService(self.storage)
 712 |                 storage_id = id(self.storage)
 713 |                 _MEMORY_SERVICE_CACHE[storage_id] = self.memory_service
 714 |                 logger.info(f"💾 Cached MemoryService instance (storage_id: {storage_id})")
 715 | 
 716 |             # Verify storage type
 717 |             storage_type = self.storage.__class__.__name__
 718 |             logger.info(f"🔍 EAGER INIT: Final storage type verification: {storage_type}")
 719 | 
 720 |             # Initialize consolidation system after storage is ready
 721 |             await self._initialize_consolidation()
 722 | 
 723 |             return True
 724 |         except Exception as e:
 725 |             logger.error(f"❌ EAGER INIT: Storage initialization failed: {str(e)}")
 726 |             logger.error(f"📋 EAGER INIT: Full traceback:")
 727 |             logger.error(traceback.format_exc())
 728 |             return False
 729 | 
 730 |     async def _ensure_storage_initialized(self):
 731 |         """Lazily initialize storage backend when needed with global caching."""
 732 |         if not self._storage_initialized:
 733 |             global _STORAGE_CACHE, _MEMORY_SERVICE_CACHE, _CACHE_STATS
 734 | 
 735 |             # Track call statistics
 736 |             _CACHE_STATS["total_calls"] += 1
 737 |             start_time = time.time()
 738 | 
 739 |             logger.info(f"🔄 LAZY INIT Call #{_CACHE_STATS['total_calls']}: Checking global cache...")
 740 | 
 741 |             # Acquire lock for thread-safe cache access
 742 |             cache_lock = _get_cache_lock()
 743 |             async with cache_lock:
 744 |                 # Generate cache key for storage backend
 745 |                 cache_key = f"{STORAGE_BACKEND}:{SQLITE_VEC_PATH}"
 746 | 
 747 |                 # Check storage cache
 748 |                 if cache_key in _STORAGE_CACHE:
 749 |                     self.storage = _STORAGE_CACHE[cache_key]
 750 |                     _CACHE_STATS["storage_hits"] += 1
 751 |                     logger.info(f"✅ Storage Cache HIT - Reusing {STORAGE_BACKEND} instance (key: {cache_key})")
 752 |                     self._storage_initialized = True
 753 | 
 754 |                     # Check memory service cache and log performance
 755 |                     self.memory_service = _get_or_create_memory_service(self.storage)
 756 |                     _log_cache_performance(start_time)
 757 | 
 758 |                     return self.storage
 759 | 
 760 |             # Cache miss - proceed with initialization
 761 |             _CACHE_STATS["storage_misses"] += 1
 762 |             logger.info(f"❌ Storage Cache MISS - Initializing {STORAGE_BACKEND} instance...")
 763 | 
 764 |             try:
 765 |                 logger.info(f"🔄 LAZY INIT: Starting {STORAGE_BACKEND} storage initialization...")
 766 |                 logger.info(f"🔧 LAZY INIT: Environment check - STORAGE_BACKEND={STORAGE_BACKEND}")
 767 |                 
 768 |                 # Log all Cloudflare config values for debugging
 769 |                 if STORAGE_BACKEND == 'cloudflare':
 770 |                     logger.info(f"🔧 LAZY INIT: Cloudflare config validation:")
 771 |                     logger.info(f"   API_TOKEN: {'SET' if CLOUDFLARE_API_TOKEN else 'NOT SET'}")
 772 |                     logger.info(f"   ACCOUNT_ID: {CLOUDFLARE_ACCOUNT_ID}")
 773 |                     logger.info(f"   VECTORIZE_INDEX: {CLOUDFLARE_VECTORIZE_INDEX}")
 774 |                     logger.info(f"   D1_DATABASE_ID: {CLOUDFLARE_D1_DATABASE_ID}")
 775 |                     logger.info(f"   R2_BUCKET: {CLOUDFLARE_R2_BUCKET}")
 776 |                     logger.info(f"   EMBEDDING_MODEL: {CLOUDFLARE_EMBEDDING_MODEL}")
 777 |                 
 778 |                 if STORAGE_BACKEND == 'sqlite_vec':
 779 |                     # Check for multi-client coordination mode
 780 |                     from .utils.port_detection import ServerCoordinator
 781 |                     coordinator = ServerCoordinator()
 782 |                     coordination_mode = await coordinator.detect_mode()
 783 |                     
 784 |                     logger.info(f"🔧 LAZY INIT: SQLite-vec - detected coordination mode: {coordination_mode}")
 785 |                     
 786 |                     if coordination_mode == "http_client":
 787 |                         # Use HTTP client to connect to existing server
 788 |                         from .storage.http_client import HTTPClientStorage
 789 |                         self.storage = HTTPClientStorage()
 790 |                         logger.info(f"✅ LAZY INIT: Using HTTP client storage")
 791 |                     elif coordination_mode == "http_server":
 792 |                         # Try to auto-start HTTP server for coordination
 793 |                         from .utils.http_server_manager import auto_start_http_server_if_needed
 794 |                         server_started = await auto_start_http_server_if_needed()
 795 |                         
 796 |                         if server_started:
 797 |                             # Wait a moment for the server to be ready, then use HTTP client
 798 |                             await asyncio.sleep(2)
 799 |                             from .storage.http_client import HTTPClientStorage
 800 |                             self.storage = HTTPClientStorage()
 801 |                             logger.info(f"✅ LAZY INIT: Started HTTP server and using HTTP client storage")
 802 |                         else:
 803 |                             # Fall back to direct SQLite-vec storage
 804 |                             import importlib
 805 |                             storage_module = importlib.import_module('mcp_memory_service.storage.sqlite_vec')
 806 |                             SqliteVecMemoryStorage = storage_module.SqliteVecMemoryStorage
 807 |                             self.storage = SqliteVecMemoryStorage(SQLITE_VEC_PATH)
 808 |                             logger.info(f"✅ LAZY INIT: HTTP server auto-start failed, using direct SQLite-vec storage at: {SQLITE_VEC_PATH}")
 809 |                     else:
 810 |                         # Use direct SQLite-vec storage (with WAL mode for concurrent access)
 811 |                         import importlib
 812 |                         storage_module = importlib.import_module('mcp_memory_service.storage.sqlite_vec')
 813 |                         SqliteVecMemoryStorage = storage_module.SqliteVecMemoryStorage
 814 |                         self.storage = SqliteVecMemoryStorage(SQLITE_VEC_PATH)
 815 |                         logger.info(f"✅ LAZY INIT: Created SQLite-vec storage at: {SQLITE_VEC_PATH}")
 816 |                 elif STORAGE_BACKEND == 'cloudflare':
 817 |                     # Cloudflare backend using Vectorize, D1, and R2
 818 |                     logger.info(f"☁️  LAZY INIT: Importing CloudflareStorage...")
 819 |                     from .storage.cloudflare import CloudflareStorage
 820 |                     logger.info(f"☁️  LAZY INIT: Creating CloudflareStorage instance...")
 821 |                     self.storage = CloudflareStorage(
 822 |                         api_token=CLOUDFLARE_API_TOKEN,
 823 |                         account_id=CLOUDFLARE_ACCOUNT_ID,
 824 |                         vectorize_index=CLOUDFLARE_VECTORIZE_INDEX,
 825 |                         d1_database_id=CLOUDFLARE_D1_DATABASE_ID,
 826 |                         r2_bucket=CLOUDFLARE_R2_BUCKET,
 827 |                         embedding_model=CLOUDFLARE_EMBEDDING_MODEL,
 828 |                         large_content_threshold=CLOUDFLARE_LARGE_CONTENT_THRESHOLD,
 829 |                         max_retries=CLOUDFLARE_MAX_RETRIES,
 830 |                         base_delay=CLOUDFLARE_BASE_DELAY
 831 |                     )
 832 |                     logger.info(f"✅ LAZY INIT: Created Cloudflare storage with Vectorize index: {CLOUDFLARE_VECTORIZE_INDEX}")
 833 |                 elif STORAGE_BACKEND == 'hybrid':
 834 |                     # Hybrid backend using SQLite-vec as primary and Cloudflare as secondary
 835 |                     logger.info(f"🔄 LAZY INIT: Importing HybridMemoryStorage...")
 836 |                     from .storage.hybrid import HybridMemoryStorage
 837 | 
 838 |                     # Prepare Cloudflare configuration dict
 839 |                     cloudflare_config = None
 840 |                     if all([CLOUDFLARE_API_TOKEN, CLOUDFLARE_ACCOUNT_ID, CLOUDFLARE_VECTORIZE_INDEX, CLOUDFLARE_D1_DATABASE_ID]):
 841 |                         cloudflare_config = {
 842 |                             'api_token': CLOUDFLARE_API_TOKEN,
 843 |                             'account_id': CLOUDFLARE_ACCOUNT_ID,
 844 |                             'vectorize_index': CLOUDFLARE_VECTORIZE_INDEX,
 845 |                             'd1_database_id': CLOUDFLARE_D1_DATABASE_ID,
 846 |                             'r2_bucket': CLOUDFLARE_R2_BUCKET,
 847 |                             'embedding_model': CLOUDFLARE_EMBEDDING_MODEL,
 848 |                             'large_content_threshold': CLOUDFLARE_LARGE_CONTENT_THRESHOLD,
 849 |                             'max_retries': CLOUDFLARE_MAX_RETRIES,
 850 |                             'base_delay': CLOUDFLARE_BASE_DELAY
 851 |                         }
 852 |                         logger.info(f"🔄 LAZY INIT: Cloudflare config prepared for hybrid storage")
 853 |                     else:
 854 |                         logger.warning("🔄 LAZY INIT: Incomplete Cloudflare config, hybrid will run in SQLite-only mode")
 855 | 
 856 |                     logger.info(f"🔄 LAZY INIT: Creating HybridMemoryStorage instance...")
 857 |                     self.storage = HybridMemoryStorage(
 858 |                         sqlite_db_path=SQLITE_VEC_PATH,
 859 |                         embedding_model="all-MiniLM-L6-v2",
 860 |                         cloudflare_config=cloudflare_config,
 861 |                         sync_interval=HYBRID_SYNC_INTERVAL or 300,
 862 |                         batch_size=HYBRID_BATCH_SIZE or 50
 863 |                     )
 864 |                     logger.info(f"✅ LAZY INIT: Created Hybrid storage at: {SQLITE_VEC_PATH} with Cloudflare sync")
 865 |                 else:
 866 |                     # Unknown/unsupported backend
 867 |                     logger.error("=" * 70)
 868 |                     logger.error(f"❌ LAZY INIT: Unsupported storage backend: {STORAGE_BACKEND}")
 869 |                     logger.error("")
 870 |                     logger.error("Supported backends:")
 871 |                     logger.error("  - sqlite_vec (recommended for single-device use)")
 872 |                     logger.error("  - cloudflare (cloud storage)")
 873 |                     logger.error("  - hybrid (recommended for multi-device use)")
 874 |                     logger.error("=" * 70)
 875 |                     raise ValueError(
 876 |                         f"Unsupported storage backend: {STORAGE_BACKEND}. "
 877 |                         "Use 'sqlite_vec', 'cloudflare', or 'hybrid'."
 878 |                     )
 879 |                 
 880 |                 # Initialize the storage backend
 881 |                 logger.info(f"🔧 LAZY INIT: Calling storage.initialize()...")
 882 |                 await self.storage.initialize()
 883 |                 logger.info(f"✅ LAZY INIT: storage.initialize() completed successfully")
 884 |                 
 885 |                 # Verify the storage is properly initialized
 886 |                 if hasattr(self.storage, 'is_initialized') and not self.storage.is_initialized():
 887 |                     # Get detailed status for debugging
 888 |                     if hasattr(self.storage, 'get_initialization_status'):
 889 |                         status = self.storage.get_initialization_status()
 890 |                         logger.error(f"❌ LAZY INIT: Storage initialization incomplete: {status}")
 891 |                     raise RuntimeError("Storage initialization incomplete")
 892 |                 
 893 |                 self._storage_initialized = True
 894 |                 storage_type = self.storage.__class__.__name__
 895 |                 logger.info(f"🎉 LAZY INIT: Storage backend ({STORAGE_BACKEND}) initialization successful")
 896 |                 logger.info(f"🔍 LAZY INIT: Final storage type verification: {storage_type}")
 897 | 
 898 |                 # Cache the newly initialized storage instance
 899 |                 async with cache_lock:
 900 |                     _STORAGE_CACHE[cache_key] = self.storage
 901 |                     init_time = (time.time() - start_time) * 1000
 902 |                     _CACHE_STATS["initialization_times"].append(init_time)
 903 |                     logger.info(f"💾 Cached storage instance (key: {cache_key}, init_time: {init_time:.1f}ms)")
 904 | 
 905 |                     # Initialize and cache MemoryService
 906 |                     _CACHE_STATS["service_misses"] += 1
 907 |                     self.memory_service = MemoryService(self.storage)
 908 |                     storage_id = id(self.storage)
 909 |                     _MEMORY_SERVICE_CACHE[storage_id] = self.memory_service
 910 |                     logger.info(f"💾 Cached MemoryService instance (storage_id: {storage_id})")
 911 | 
 912 |                 # Initialize consolidation system after storage is ready
 913 |                 await self._initialize_consolidation()
 914 | 
 915 |             except Exception as e:
 916 |                 logger.error(f"❌ LAZY INIT: Failed to initialize {STORAGE_BACKEND} storage: {str(e)}")
 917 |                 logger.error(f"📋 LAZY INIT: Full traceback:")
 918 |                 logger.error(traceback.format_exc())
 919 |                 # Set storage to None to indicate failure
 920 |                 self.storage = None
 921 |                 self._storage_initialized = False
 922 |                 raise
 923 |         return self.storage
 924 | 
 925 |     async def initialize(self):
 926 |         """Async initialization method with eager storage initialization and timeout."""
 927 |         try:
 928 |             # Run any async initialization tasks here
 929 |             logger.info("🚀 SERVER INIT: Starting async initialization...")
 930 |             
 931 |             # Print system diagnostics only for LM Studio (avoid JSON parsing errors in Claude Desktop)
 932 |             if MCP_CLIENT == 'lm_studio':
 933 |                 print("\n=== System Diagnostics ===", file=sys.stdout, flush=True)
 934 |                 print(f"OS: {self.system_info.os_name} {self.system_info.os_version}", file=sys.stdout, flush=True)
 935 |                 print(f"Architecture: {self.system_info.architecture}", file=sys.stdout, flush=True)
 936 |                 print(f"Memory: {self.system_info.memory_gb:.2f} GB", file=sys.stdout, flush=True)
 937 |                 print(f"Accelerator: {self.system_info.accelerator}", file=sys.stdout, flush=True)
 938 |                 print(f"Python: {platform.python_version()}", file=sys.stdout, flush=True)
 939 |             
 940 |             # Log environment info
 941 |             logger.info(f"🔧 SERVER INIT: Environment - STORAGE_BACKEND={STORAGE_BACKEND}")
 942 |             
 943 |             # Attempt eager storage initialization with timeout
 944 |             # Get dynamic timeout based on system and dependency status
 945 |             timeout_seconds = get_recommended_timeout()
 946 |             logger.info(f"⏱️  SERVER INIT: Attempting eager storage initialization (timeout: {timeout_seconds}s)...")
 947 |             if MCP_CLIENT == 'lm_studio':
 948 |                 print(f"Attempting eager storage initialization (timeout: {timeout_seconds}s)...", file=sys.stdout, flush=True)
 949 |             try:
 950 |                 init_task = asyncio.create_task(self._initialize_storage_with_timeout())
 951 |                 success = await asyncio.wait_for(init_task, timeout=timeout_seconds)
 952 |                 if success:
 953 |                     if MCP_CLIENT == 'lm_studio':
 954 |                         print("[OK] Eager storage initialization successful", file=sys.stdout, flush=True)
 955 |                     logger.info("✅ SERVER INIT: Eager storage initialization completed successfully")
 956 |                     
 957 |                     # Verify storage type after successful eager init
 958 |                     if hasattr(self, 'storage') and self.storage:
 959 |                         storage_type = self.storage.__class__.__name__
 960 |                         logger.info(f"🔍 SERVER INIT: Eager init resulted in storage type: {storage_type}")
 961 |                 else:
 962 |                     if MCP_CLIENT == 'lm_studio':
 963 |                         print("[WARNING] Eager storage initialization failed, will use lazy loading", file=sys.stdout, flush=True)
 964 |                     logger.warning("⚠️  SERVER INIT: Eager initialization failed, falling back to lazy loading")
 965 |                     # Reset state for lazy loading
 966 |                     self.storage = None
 967 |                     self._storage_initialized = False
 968 |             except asyncio.TimeoutError:
 969 |                 if MCP_CLIENT == 'lm_studio':
 970 |                     print("[TIMEOUT] Eager storage initialization timed out, will use lazy loading", file=sys.stdout, flush=True)
 971 |                 logger.warning(f"⏱️  SERVER INIT: Storage initialization timed out after {timeout_seconds}s, falling back to lazy loading")
 972 |                 # Reset state for lazy loading
 973 |                 self.storage = None
 974 |                 self._storage_initialized = False
 975 |             except Exception as e:
 976 |                 if MCP_CLIENT == 'lm_studio':
 977 |                     print(f"[WARNING] Eager initialization error: {str(e)}, will use lazy loading", file=sys.stdout, flush=True)
 978 |                 logger.warning(f"⚠️  SERVER INIT: Eager initialization error: {str(e)}, falling back to lazy loading")
 979 |                 logger.warning(f"📋 SERVER INIT: Eager init error traceback:")
 980 |                 logger.warning(traceback.format_exc())
 981 |                 # Reset state for lazy loading
 982 |                 self.storage = None
 983 |                 self._storage_initialized = False
 984 |             
 985 |             # Add explicit console output for Smithery to see (only for LM Studio)
 986 |             if MCP_CLIENT == 'lm_studio':
 987 |                 print("MCP Memory Service initialization completed", file=sys.stdout, flush=True)
 988 |             
 989 |             logger.info("🎉 SERVER INIT: Async initialization completed")
 990 |             return True
 991 |         except Exception as e:
 992 |             logger.error(f"❌ SERVER INIT: Async initialization error: {str(e)}")
 993 |             logger.error(f"📋 SERVER INIT: Full traceback:")
 994 |             logger.error(traceback.format_exc())
 995 |             # Add explicit console error output for Smithery to see
 996 |             print(f"Initialization error: {str(e)}", file=sys.stderr, flush=True)
 997 |             # Don't raise the exception, just return False
 998 |             return False
 999 | 
1000 |     async def validate_database_health(self):
1001 |         """Validate database health during initialization."""
1002 |         from .utils.db_utils import validate_database, repair_database
1003 |         
1004 |         try:
1005 |             # Check database health
1006 |             is_valid, message = await validate_database(self.storage)
1007 |             if not is_valid:
1008 |                 logger.warning(f"Database validation failed: {message}")
1009 |                 
1010 |                 # Attempt repair
1011 |                 logger.info("Attempting database repair...")
1012 |                 repair_success, repair_message = await repair_database(self.storage)
1013 |                 
1014 |                 if not repair_success:
1015 |                     logger.error(f"Database repair failed: {repair_message}")
1016 |                     return False
1017 |                 else:
1018 |                     logger.info(f"Database repair successful: {repair_message}")
1019 |                     return True
1020 |             else:
1021 |                 logger.info(f"Database validation successful: {message}")
1022 |                 return True
1023 |         except Exception as e:
1024 |             logger.error(f"Database validation error: {str(e)}")
1025 |             return False
1026 | 
1027 |     async def _initialize_consolidation(self):
1028 |         """Initialize the consolidation system after storage is ready."""
1029 |         if not CONSOLIDATION_ENABLED or not self._storage_initialized:
1030 |             return
1031 |         
1032 |         try:
1033 |             if self.consolidator is None:
1034 |                 # Create consolidation config
1035 |                 config = ConsolidationConfig(**CONSOLIDATION_CONFIG)
1036 |                 
1037 |                 # Initialize the consolidator with storage
1038 |                 self.consolidator = DreamInspiredConsolidator(self.storage, config)
1039 |                 logger.info("Dream-inspired consolidator initialized")
1040 |                 
1041 |                 # Initialize the scheduler if not disabled
1042 |                 if any(schedule != 'disabled' for schedule in CONSOLIDATION_SCHEDULE.values()):
1043 |                     self.consolidation_scheduler = ConsolidationScheduler(
1044 |                         self.consolidator, 
1045 |                         CONSOLIDATION_SCHEDULE, 
1046 |                         enabled=True
1047 |                     )
1048 |                     
1049 |                     # Start the scheduler
1050 |                     if await self.consolidation_scheduler.start():
1051 |                         logger.info("Consolidation scheduler started successfully")
1052 |                     else:
1053 |                         logger.warning("Failed to start consolidation scheduler")
1054 |                         self.consolidation_scheduler = None
1055 |                 else:
1056 |                     logger.info("Consolidation scheduler disabled (all schedules set to 'disabled')")
1057 |                 
1058 |         except Exception as e:
1059 |             logger.error(f"Failed to initialize consolidation system: {e}")
1060 |             logger.error(traceback.format_exc())
1061 |             self.consolidator = None
1062 |             self.consolidation_scheduler = None
1063 | 
1064 |     def handle_method_not_found(self, method: str) -> None:
1065 |         """Custom handler for unsupported methods.
1066 |         
1067 |         This logs the unsupported method request but doesn't raise an exception,
1068 |         allowing the MCP server to handle it with a standard JSON-RPC error response.
1069 |         """
1070 |         logger.warning(f"Unsupported method requested: {method}")
1071 |         # The MCP server will automatically respond with a Method not found error
1072 |         # We don't need to do anything else here
1073 |     
1074 |     def register_handlers(self):
1075 |         # Enhanced Resources implementation
1076 |         @self.server.list_resources()
1077 |         async def handle_list_resources() -> List[Resource]:
1078 |             """List available memory resources."""
1079 |             await self._ensure_storage_initialized()
1080 |             
1081 |             resources = [
1082 |                 types.Resource(
1083 |                     uri="memory://stats",
1084 |                     name="Memory Statistics",
1085 |                     description="Current memory database statistics",
1086 |                     mimeType="application/json"
1087 |                 ),
1088 |                 types.Resource(
1089 |                     uri="memory://tags",
1090 |                     name="Available Tags",
1091 |                     description="List of all tags used in memories",
1092 |                     mimeType="application/json"
1093 |                 ),
1094 |                 types.Resource(
1095 |                     uri="memory://recent/10",
1096 |                     name="Recent Memories",
1097 |                     description="10 most recent memories",
1098 |                     mimeType="application/json"
1099 |                 )
1100 |             ]
1101 |             
1102 |             # Add tag-specific resources for existing tags
1103 |             try:
1104 |                 all_tags = await self.storage.get_all_tags()
1105 |                 for tag in all_tags[:5]:  # Limit to first 5 tags for resources
1106 |                     resources.append(types.Resource(
1107 |                         uri=f"memory://tag/{tag}",
1108 |                         name=f"Memories tagged '{tag}'",
1109 |                         description=f"All memories with tag '{tag}'",
1110 |                         mimeType="application/json"
1111 |                     ))
1112 |             except AttributeError:
1113 |                 # get_all_tags method not available on this storage backend
1114 |                 pass
1115 |             except Exception as e:
1116 |                 logger.warning(f"Failed to load tag resources: {e}")
1117 |                 pass
1118 |             
1119 |             return resources
1120 |         
1121 |         @self.server.read_resource()
1122 |         async def handle_read_resource(uri: str) -> str:
1123 |             """Read a specific memory resource."""
1124 |             await self._ensure_storage_initialized()
1125 |             
1126 |             import json
1127 |             from urllib.parse import unquote
1128 |             
1129 |             try:
1130 |                 if uri == "memory://stats":
1131 |                     # Get memory statistics
1132 |                     stats = await self.storage.get_stats()
1133 |                     return json.dumps(stats, indent=2)
1134 |                     
1135 |                 elif uri == "memory://tags":
1136 |                     # Get all available tags
1137 |                     tags = await self.storage.get_all_tags()
1138 |                     return json.dumps({"tags": tags, "count": len(tags)}, indent=2)
1139 |                     
1140 |                 elif uri.startswith("memory://recent/"):
1141 |                     # Get recent memories
1142 |                     n = int(uri.split("/")[-1])
1143 |                     memories = await self.storage.get_recent_memories(n)
1144 |                     return json.dumps({
1145 |                         "memories": [m.to_dict() for m in memories],
1146 |                         "count": len(memories)
1147 |                     }, indent=2, default=str)
1148 |                     
1149 |                 elif uri.startswith("memory://tag/"):
1150 |                     # Get memories by tag
1151 |                     tag = unquote(uri.split("/", 3)[-1])
1152 |                     memories = await self.storage.search_by_tag([tag])
1153 |                     return json.dumps({
1154 |                         "tag": tag,
1155 |                         "memories": [m.to_dict() for m in memories],
1156 |                         "count": len(memories)
1157 |                     }, indent=2, default=str)
1158 |                     
1159 |                 elif uri.startswith("memory://search/"):
1160 |                     # Dynamic search
1161 |                     query = unquote(uri.split("/", 3)[-1])
1162 |                     results = await self.storage.search(query, n_results=10)
1163 |                     return json.dumps({
1164 |                         "query": query,
1165 |                         "results": [r.to_dict() for r in results],
1166 |                         "count": len(results)
1167 |                     }, indent=2, default=str)
1168 |                     
1169 |                 else:
1170 |                     return json.dumps({"error": f"Resource not found: {uri}"}, indent=2)
1171 |                     
1172 |             except Exception as e:
1173 |                 logger.error(f"Error reading resource {uri}: {e}")
1174 |                 return json.dumps({"error": str(e)}, indent=2)
1175 |         
1176 |         @self.server.list_resource_templates()
1177 |         async def handle_list_resource_templates() -> List[types.ResourceTemplate]:
1178 |             """List resource templates for dynamic queries."""
1179 |             return [
1180 |                 types.ResourceTemplate(
1181 |                     uriTemplate="memory://recent/{n}",
1182 |                     name="Recent Memories",
1183 |                     description="Get N most recent memories",
1184 |                     mimeType="application/json"
1185 |                 ),
1186 |                 types.ResourceTemplate(
1187 |                     uriTemplate="memory://tag/{tag}",
1188 |                     name="Memories by Tag",
1189 |                     description="Get all memories with a specific tag",
1190 |                     mimeType="application/json"
1191 |                 ),
1192 |                 types.ResourceTemplate(
1193 |                     uriTemplate="memory://search/{query}",
1194 |                     name="Search Memories",
1195 |                     description="Search memories by query",
1196 |                     mimeType="application/json"
1197 |                 )
1198 |             ]
1199 |         
1200 |         @self.server.list_prompts()
1201 |         async def handle_list_prompts() -> List[types.Prompt]:
1202 |             """List available guided prompts for memory operations."""
1203 |             return [
1204 |                 types.Prompt(
1205 |                     name="memory_review",
1206 |                     description="Review and organize memories from a specific time period",
1207 |                     arguments=[
1208 |                         types.PromptArgument(
1209 |                             name="time_period",
1210 |                             description="Time period to review (e.g., 'last week', 'yesterday', '2 days ago')",
1211 |                             required=True
1212 |                         ),
1213 |                         types.PromptArgument(
1214 |                             name="focus_area",
1215 |                             description="Optional area to focus on (e.g., 'work', 'personal', 'learning')",
1216 |                             required=False
1217 |                         )
1218 |                     ]
1219 |                 ),
1220 |                 types.Prompt(
1221 |                     name="memory_analysis",
1222 |                     description="Analyze patterns and themes in stored memories",
1223 |                     arguments=[
1224 |                         types.PromptArgument(
1225 |                             name="tags",
1226 |                             description="Tags to analyze (comma-separated)",
1227 |                             required=False
1228 |                         ),
1229 |                         types.PromptArgument(
1230 |                             name="time_range",
1231 |                             description="Time range to analyze (e.g., 'last month', 'all time')",
1232 |                             required=False
1233 |                         )
1234 |                     ]
1235 |                 ),
1236 |                 types.Prompt(
1237 |                     name="knowledge_export",
1238 |                     description="Export memories in a specific format",
1239 |                     arguments=[
1240 |                         types.PromptArgument(
1241 |                             name="format",
1242 |                             description="Export format (json, markdown, text)",
1243 |                             required=True
1244 |                         ),
1245 |                         types.PromptArgument(
1246 |                             name="filter",
1247 |                             description="Filter criteria (tags or search query)",
1248 |                             required=False
1249 |                         )
1250 |                     ]
1251 |                 ),
1252 |                 types.Prompt(
1253 |                     name="memory_cleanup",
1254 |                     description="Identify and remove duplicate or outdated memories",
1255 |                     arguments=[
1256 |                         types.PromptArgument(
1257 |                             name="older_than",
1258 |                             description="Remove memories older than (e.g., '6 months', '1 year')",
1259 |                             required=False
1260 |                         ),
1261 |                         types.PromptArgument(
1262 |                             name="similarity_threshold",
1263 |                             description="Similarity threshold for duplicates (0.0-1.0)",
1264 |                             required=False
1265 |                         )
1266 |                     ]
1267 |                 ),
1268 |                 types.Prompt(
1269 |                     name="learning_session",
1270 |                     description="Store structured learning notes from a study session",
1271 |                     arguments=[
1272 |                         types.PromptArgument(
1273 |                             name="topic",
1274 |                             description="Learning topic or subject",
1275 |                             required=True
1276 |                         ),
1277 |                         types.PromptArgument(
1278 |                             name="key_points",
1279 |                             description="Key points learned (comma-separated)",
1280 |                             required=True
1281 |                         ),
1282 |                         types.PromptArgument(
1283 |                             name="questions",
1284 |                             description="Questions or areas for further study",
1285 |                             required=False
1286 |                         )
1287 |                     ]
1288 |                 )
1289 |             ]
1290 |         
1291 |         @self.server.get_prompt()
1292 |         async def handle_get_prompt(name: str, arguments: dict) -> types.GetPromptResult:
1293 |             """Handle prompt execution with provided arguments."""
1294 |             await self._ensure_storage_initialized()
1295 |             
1296 |             # Dispatch to specific prompt handler
1297 |             if name == "memory_review":
1298 |                 messages = await self._prompt_memory_review(arguments)
1299 |             elif name == "memory_analysis":
1300 |                 messages = await self._prompt_memory_analysis(arguments)
1301 |             elif name == "knowledge_export":
1302 |                 messages = await self._prompt_knowledge_export(arguments)
1303 |             elif name == "memory_cleanup":
1304 |                 messages = await self._prompt_memory_cleanup(arguments)
1305 |             elif name == "learning_session":
1306 |                 messages = await self._prompt_learning_session(arguments)
1307 |             else:
1308 |                 messages = [
1309 |                     types.PromptMessage(
1310 |                         role="user",
1311 |                         content=types.TextContent(
1312 |                             type="text",
1313 |                             text=f"Unknown prompt: {name}"
1314 |                         )
1315 |                     )
1316 |                 ]
1317 |             
1318 |             return types.GetPromptResult(
1319 |                 description=f"Result of {name} prompt",
1320 |                 messages=messages
1321 |             )
1322 |         
1323 |         # Helper methods for specific prompts
1324 |         async def _prompt_memory_review(self, arguments: dict) -> list:
1325 |             """Generate memory review prompt."""
1326 |             time_period = arguments.get("time_period", "last week")
1327 |             focus_area = arguments.get("focus_area", "")
1328 |             
1329 |             # Retrieve memories from the specified time period
1330 |             memories = await self.storage.recall_memory(time_period, n_results=20)
1331 |             
1332 |             prompt_text = f"Review of memories from {time_period}"
1333 |             if focus_area:
1334 |                 prompt_text += f" (focusing on {focus_area})"
1335 |             prompt_text += ":\n\n"
1336 |             
1337 |             if memories:
1338 |                 for mem in memories:
1339 |                     prompt_text += f"- {mem.content}\n"
1340 |                     if mem.metadata.tags:
1341 |                         prompt_text += f"  Tags: {', '.join(mem.metadata.tags)}\n"
1342 |             else:
1343 |                 prompt_text += "No memories found for this time period."
1344 |             
1345 |             return [
1346 |                 types.PromptMessage(
1347 |                     role="user",
1348 |                     content=types.TextContent(type="text", text=prompt_text)
1349 |                 )
1350 |             ]
1351 |         
1352 |         async def _prompt_memory_analysis(self, arguments: dict) -> list:
1353 |             """Generate memory analysis prompt."""
1354 |             tags = arguments.get("tags", "").split(",") if arguments.get("tags") else []
1355 |             time_range = arguments.get("time_range", "all time")
1356 |             
1357 |             analysis_text = "Memory Analysis"
1358 |             if tags:
1359 |                 analysis_text += f" for tags: {', '.join(tags)}"
1360 |             if time_range != "all time":
1361 |                 analysis_text += f" from {time_range}"
1362 |             analysis_text += "\n\n"
1363 |             
1364 |             # Get relevant memories
1365 |             if tags:
1366 |                 memories = await self.storage.search_by_tag(tags)
1367 |             else:
1368 |                 memories = await self.storage.get_recent_memories(100)
1369 |             
1370 |             # Analyze patterns
1371 |             tag_counts = {}
1372 |             type_counts = {}
1373 |             for mem in memories:
1374 |                 for tag in mem.metadata.tags:
1375 |                     tag_counts[tag] = tag_counts.get(tag, 0) + 1
1376 |                 mem_type = mem.metadata.memory_type
1377 |                 type_counts[mem_type] = type_counts.get(mem_type, 0) + 1
1378 |             
1379 |             analysis_text += f"Total memories analyzed: {len(memories)}\n\n"
1380 |             analysis_text += "Top tags:\n"
1381 |             for tag, count in sorted(tag_counts.items(), key=lambda x: x[1], reverse=True)[:10]:
1382 |                 analysis_text += f"  - {tag}: {count} occurrences\n"
1383 |             analysis_text += "\nMemory types:\n"
1384 |             for mem_type, count in type_counts.items():
1385 |                 analysis_text += f"  - {mem_type}: {count} memories\n"
1386 |             
1387 |             return [
1388 |                 types.PromptMessage(
1389 |                     role="user",
1390 |                     content=types.TextContent(type="text", text=analysis_text)
1391 |                 )
1392 |             ]
1393 |         
1394 |         async def _prompt_knowledge_export(self, arguments: dict) -> list:
1395 |             """Generate knowledge export prompt."""
1396 |             format_type = arguments.get("format", "json")
1397 |             filter_criteria = arguments.get("filter", "")
1398 |             
1399 |             # Get memories based on filter
1400 |             if filter_criteria:
1401 |                 if "," in filter_criteria:
1402 |                     # Assume tags
1403 |                     memories = await self.storage.search_by_tag(filter_criteria.split(","))
1404 |                 else:
1405 |                     # Assume search query
1406 |                     memories = await self.storage.search(filter_criteria, n_results=100)
1407 |             else:
1408 |                 memories = await self.storage.get_recent_memories(100)
1409 |             
1410 |             export_text = f"Exported {len(memories)} memories in {format_type} format:\n\n"
1411 |             
1412 |             if format_type == "markdown":
1413 |                 for mem in memories:
1414 |                     export_text += f"## {mem.metadata.created_at_iso}\n"
1415 |                     export_text += f"{mem.content}\n"
1416 |                     if mem.metadata.tags:
1417 |                         export_text += f"*Tags: {', '.join(mem.metadata.tags)}*\n"
1418 |                     export_text += "\n"
1419 |             elif format_type == "text":
1420 |                 for mem in memories:
1421 |                     export_text += f"[{mem.metadata.created_at_iso}] {mem.content}\n"
1422 |             else:  # json
1423 |                 import json
1424 |                 export_data = [m.to_dict() for m in memories]
1425 |                 export_text += json.dumps(export_data, indent=2, default=str)
1426 |             
1427 |             return [
1428 |                 types.PromptMessage(
1429 |                     role="user",
1430 |                     content=types.TextContent(type="text", text=export_text)
1431 |                 )
1432 |             ]
1433 |         
1434 |         async def _prompt_memory_cleanup(self, arguments: dict) -> list:
1435 |             """Generate memory cleanup prompt."""
1436 |             older_than = arguments.get("older_than", "")
1437 |             similarity_threshold = float(arguments.get("similarity_threshold", "0.95"))
1438 |             
1439 |             cleanup_text = "Memory Cleanup Report:\n\n"
1440 |             
1441 |             # Find duplicates
1442 |             all_memories = await self.storage.get_recent_memories(1000)
1443 |             duplicates = []
1444 |             
1445 |             for i, mem1 in enumerate(all_memories):
1446 |                 for mem2 in all_memories[i+1:]:
1447 |                     # Simple similarity check based on content length
1448 |                     if abs(len(mem1.content) - len(mem2.content)) < 10:
1449 |                         if mem1.content[:50] == mem2.content[:50]:
1450 |                             duplicates.append((mem1, mem2))
1451 |             
1452 |             cleanup_text += f"Found {len(duplicates)} potential duplicate pairs\n"
1453 |             
1454 |             if older_than:
1455 |                 cleanup_text += f"\nMemories older than {older_than} can be archived\n"
1456 |             
1457 |             return [
1458 |                 types.PromptMessage(
1459 |                     role="user",
1460 |                     content=types.TextContent(type="text", text=cleanup_text)
1461 |                 )
1462 |             ]
1463 |         
1464 |         async def _prompt_learning_session(self, arguments: dict) -> list:
1465 |             """Generate learning session prompt."""
1466 |             topic = arguments.get("topic", "General")
1467 |             key_points = arguments.get("key_points", "").split(",")
1468 |             questions = arguments.get("questions", "").split(",") if arguments.get("questions") else []
1469 |             
1470 |             # Create structured learning note
1471 |             learning_note = f"# Learning Session: {topic}\n\n"
1472 |             learning_note += f"Date: {datetime.now().isoformat()}\n\n"
1473 |             learning_note += "## Key Points:\n"
1474 |             for point in key_points:
1475 |                 learning_note += f"- {point.strip()}\n"
1476 |             
1477 |             if questions:
1478 |                 learning_note += "\n## Questions for Further Study:\n"
1479 |                 for question in questions:
1480 |                     learning_note += f"- {question.strip()}\n"
1481 |             
1482 |             # Store the learning note
1483 |             memory = Memory(
1484 |                 content=learning_note,
1485 |                 tags=["learning", topic.lower().replace(" ", "_")],
1486 |                 memory_type="learning_note"
1487 |             )
1488 |             success, message = await self.storage.store(memory)
1489 |             
1490 |             response_text = f"Learning session stored successfully!\n\n{learning_note}"
1491 |             if not success:
1492 |                 response_text = f"Failed to store learning session: {message}"
1493 |             
1494 |             return [
1495 |                 types.PromptMessage(
1496 |                     role="user",
1497 |                     content=types.TextContent(type="text", text=response_text)
1498 |                 )
1499 |             ]
1500 |         
1501 |         # Add a custom error handler for unsupported methods
1502 |         self.server.on_method_not_found = self.handle_method_not_found
1503 |         
1504 |         @self.server.list_tools()
1505 |         async def handle_list_tools() -> List[types.Tool]:
1506 |             logger.info("=== HANDLING LIST_TOOLS REQUEST ===")
1507 |             try:
1508 |                 tools = [
1509 |                     types.Tool(
1510 |                         name="store_memory",
1511 |                         description="""Store new information with optional tags.
1512 | 
1513 |                         Accepts two tag formats in metadata:
1514 |                         - Array: ["tag1", "tag2"]
1515 |                         - String: "tag1,tag2"
1516 | 
1517 |                        Examples:
1518 |                         # Using array format:
1519 |                         {
1520 |                             "content": "Memory content",
1521 |                             "metadata": {
1522 |                                 "tags": ["important", "reference"],
1523 |                                 "type": "note"
1524 |                             }
1525 |                         }
1526 | 
1527 |                         # Using string format(preferred):
1528 |                         {
1529 |                             "content": "Memory content",
1530 |                             "metadata": {
1531 |                                 "tags": "important,reference",
1532 |                                 "type": "note"
1533 |                             }
1534 |                         }""",
1535 |                         inputSchema={
1536 |                             "type": "object",
1537 |                             "properties": {
1538 |                                 "content": {
1539 |                                     "type": "string",
1540 |                                     "description": "The memory content to store, such as a fact, note, or piece of information."
1541 |                                 },
1542 |                                 "metadata": {
1543 |                                     "type": "object",
1544 |                                     "description": "Optional metadata about the memory, including tags and type.",
1545 |                                     "properties": {
1546 |                                         "tags": {
1547 |                                             "oneOf": [
1548 |                                                 {
1549 |                                                     "type": "array",
1550 |                                                     "items": {"type": "string"},
1551 |                                                     "description": "Tags as an array of strings"
1552 |                                                 },
1553 |                                                 {
1554 |                                                     "type": "string",
1555 |                                                     "description": "Tags as comma-separated string"
1556 |                                                 }
1557 |                                             ],
1558 |                                             "description": "Tags to categorize the memory. Accepts either an array of strings or a comma-separated string.",
1559 |                                             "examples": [
1560 |                                                 "tag1,tag2,tag3",
1561 |                                                 ["tag1", "tag2", "tag3"]
1562 |                                             ]
1563 |                                         },
1564 |                                         "type": {
1565 |                                             "type": "string",
1566 |                                             "description": "Optional type or category label for the memory, e.g., 'note', 'fact', 'reminder'."
1567 |                                         }
1568 |                                     }
1569 |                                 }
1570 |                             },
1571 |                             "required": ["content"]
1572 |                         }
1573 |                     ),
1574 |                     types.Tool(
1575 |                         name="recall_memory",
1576 |                         description="""Retrieve memories using natural language time expressions and optional semantic search.
1577 |                         
1578 |                         Supports various time-related expressions such as:
1579 |                         - "yesterday", "last week", "2 days ago"
1580 |                         - "last summer", "this month", "last January"
1581 |                         - "spring", "winter", "Christmas", "Thanksgiving"
1582 |                         - "morning", "evening", "yesterday afternoon"
1583 |                         
1584 |                         Examples:
1585 |                         {
1586 |                             "query": "recall what I stored last week"
1587 |                         }
1588 |                         
1589 |                         {
1590 |                             "query": "find information about databases from two months ago",
1591 |                             "n_results": 5
1592 |                         }
1593 |                         """,
1594 |                         inputSchema={
1595 |                             "type": "object",
1596 |                             "properties": {
1597 |                                 "query": {
1598 |                                     "type": "string",
1599 |                                     "description": "Natural language query specifying the time frame or content to recall, e.g., 'last week', 'yesterday afternoon', or a topic."
1600 |                                 },
1601 |                                 "n_results": {
1602 |                                     "type": "number",
1603 |                                     "default": 5,
1604 |                                     "description": "Maximum number of results to return."
1605 |                                 }
1606 |                             },
1607 |                             "required": ["query"]
1608 |                         }
1609 |                     ),
1610 |                     types.Tool(
1611 |                         name="retrieve_memory",
1612 |                         description="""Find relevant memories based on query.
1613 | 
1614 |                         Example:
1615 |                         {
1616 |                             "query": "find this memory",
1617 |                             "n_results": 5
1618 |                         }""",
1619 |                         inputSchema={
1620 |                             "type": "object",
1621 |                             "properties": {
1622 |                                 "query": {
1623 |                                     "type": "string",
1624 |                                     "description": "Search query to find relevant memories based on content."
1625 |                                 },
1626 |                                 "n_results": {
1627 |                                     "type": "number",
1628 |                                     "default": 5,
1629 |                                     "description": "Maximum number of results to return."
1630 |                                 }
1631 |                             },
1632 |                             "required": ["query"]
1633 |                         }
1634 |                     ),
1635 |                     types.Tool(
1636 |                         name="search_by_tag",
1637 |                         description="""Search memories by tags. Must use array format.
1638 |                         Returns memories matching ANY of the specified tags.
1639 | 
1640 |                         Example:
1641 |                         {
1642 |                             "tags": ["important", "reference"]
1643 |                         }""",
1644 |                         inputSchema={
1645 |                             "type": "object",
1646 |                             "properties": {
1647 |                                 "tags": {
1648 |                                     "oneOf": [
1649 |                                         {
1650 |                                             "type": "array",
1651 |                                             "items": {"type": "string"},
1652 |                                             "description": "Tags as an array of strings"
1653 |                                         },
1654 |                                         {
1655 |                                             "type": "string",
1656 |                                             "description": "Tags as comma-separated string"
1657 |                                         }
1658 |                                     ],
1659 |                                     "description": "List of tags to search for. Returns memories matching ANY of these tags. Accepts either an array of strings or a comma-separated string."
1660 |                                 }
1661 |                             },
1662 |                             "required": ["tags"]
1663 |                         }
1664 |                     ),
1665 |                     types.Tool(
1666 |                         name="delete_memory",
1667 |                         description="""Delete a specific memory by its hash.
1668 | 
1669 |                         Example:
1670 |                         {
1671 |                             "content_hash": "a1b2c3d4..."
1672 |                         }""",
1673 |                         inputSchema={
1674 |                             "type": "object",
1675 |                             "properties": {
1676 |                                 "content_hash": {
1677 |                                     "type": "string",
1678 |                                     "description": "Hash of the memory content to delete. Obtainable from memory metadata."
1679 |                                 }
1680 |                             },
1681 |                             "required": ["content_hash"]
1682 |                         }
1683 |                     ),
1684 |                     types.Tool(
1685 |                         name="delete_by_tag",
1686 |                         description="""Delete all memories with specific tags.
1687 |                         WARNING: Deletes ALL memories containing any of the specified tags.
1688 | 
1689 |                         Example:
1690 |                         {"tags": ["temporary", "outdated"]}""",
1691 |                         inputSchema={
1692 |                             "type": "object",
1693 |                             "properties": {
1694 |                                 "tags": {
1695 |                                     "oneOf": [
1696 |                                         {
1697 |                                             "type": "array",
1698 |                                             "items": {"type": "string"},
1699 |                                             "description": "Tags as an array of strings"
1700 |                                         },
1701 |                                         {
1702 |                                             "type": "string",
1703 |                                             "description": "Tags as comma-separated string"
1704 |                                         }
1705 |                                     ],
1706 |                                     "description": "Array of tag labels. Memories containing any of these tags will be deleted. Accepts either an array of strings or a comma-separated string."
1707 |                                 }
1708 |                             },
1709 |                             "required": ["tags"]
1710 |                         }
1711 |                     ),
1712 |                     types.Tool(
1713 |                         name="delete_by_tags",
1714 |                         description="""Delete all memories containing any of the specified tags.
1715 |                         This is the explicit multi-tag version for API clarity.
1716 |                         WARNING: Deletes ALL memories containing any of the specified tags.
1717 | 
1718 |                         Example:
1719 |                         {
1720 |                             "tags": ["temporary", "outdated", "test"]
1721 |                         }""",
1722 |                         inputSchema={
1723 |                             "type": "object",
1724 |                             "properties": {
1725 |                                 "tags": {
1726 |                                     "oneOf": [
1727 |                                         {
1728 |                                             "type": "array",
1729 |                                             "items": {"type": "string"},
1730 |                                             "description": "Tags as an array of strings"
1731 |                                         },
1732 |                                         {
1733 |                                             "type": "string",
1734 |                                             "description": "Tags as comma-separated string"
1735 |                                         }
1736 |                                     ],
1737 |                                     "description": "List of tag labels. Memories containing any of these tags will be deleted. Accepts either an array of strings or a comma-separated string."
1738 |                                 }
1739 |                             },
1740 |                             "required": ["tags"]
1741 |                         }
1742 |                     ),
1743 |                     types.Tool(
1744 |                         name="delete_by_all_tags",
1745 |                         description="""Delete memories that contain ALL of the specified tags.
1746 |                         WARNING: Only deletes memories that have every one of the specified tags.
1747 | 
1748 |                         Example:
1749 |                         {
1750 |                             "tags": ["important", "urgent"]
1751 |                         }""",
1752 |                         inputSchema={
1753 |                             "type": "object",
1754 |                             "properties": {
1755 |                                 "tags": {
1756 |                                     "oneOf": [
1757 |                                         {
1758 |                                             "type": "array",
1759 |                                             "items": {"type": "string"},
1760 |                                             "description": "Tags as an array of strings"
1761 |                                         },
1762 |                                         {
1763 |                                             "type": "string",
1764 |                                             "description": "Tags as comma-separated string"
1765 |                                         }
1766 |                                     ],
1767 |                                     "description": "List of tag labels. Only memories containing ALL of these tags will be deleted. Accepts either an array of strings or a comma-separated string."
1768 |                                 }
1769 |                             },
1770 |                             "required": ["tags"]
1771 |                         }
1772 |                     ),
1773 |                     types.Tool(
1774 |                         name="cleanup_duplicates",
1775 |                         description="Find and remove duplicate entries",
1776 |                         inputSchema={
1777 |                             "type": "object",
1778 |                             "properties": {}
1779 |                         }
1780 |                     ),
1781 |                     types.Tool(
1782 |                         name="debug_retrieve",
1783 |                         description="""Retrieve memories with debug information.
1784 | 
1785 |                         Example:
1786 |                         {
1787 |                             "query": "debug this",
1788 |                             "n_results": 5,
1789 |                             "similarity_threshold": 0.0
1790 |                         }""",
1791 |                         inputSchema={
1792 |                             "type": "object",
1793 |                             "properties": {
1794 |                                 "query": {
1795 |                                     "type": "string",
1796 |                                     "description": "Search query for debugging retrieval, e.g., a phrase or keyword."
1797 |                                 },
1798 |                                 "n_results": {
1799 |                                     "type": "number",
1800 |                                     "default": 5,
1801 |                                     "description": "Maximum number of results to return."
1802 |                                 },
1803 |                                 "similarity_threshold": {
1804 |                                     "type": "number",
1805 |                                     "default": 0.0,
1806 |                                     "description": "Minimum similarity score threshold for results (0.0 to 1.0)."
1807 |                                 }
1808 |                             },
1809 |                             "required": ["query"]
1810 |                         }
1811 |                     ),
1812 |                     types.Tool(
1813 |                         name="exact_match_retrieve",
1814 |                         description="""Retrieve memories using exact content match.
1815 | 
1816 |                         Example:
1817 |                         {
1818 |                             "content": "find exactly this"
1819 |                         }""",
1820 |                         inputSchema={
1821 |                             "type": "object",
1822 |                             "properties": {
1823 |                                 "content": {
1824 |                                     "type": "string",
1825 |                                     "description": "Exact content string to match against stored memories."
1826 |                                 }
1827 |                             },
1828 |                             "required": ["content"]
1829 |                         }
1830 |                     ),
1831 |                     types.Tool(
1832 |                         name="get_raw_embedding",
1833 |                         description="""Get raw embedding vector for debugging purposes.
1834 | 
1835 |                         Example:
1836 |                         {
1837 |                             "content": "text to embed"
1838 |                         }""",
1839 |                         inputSchema={
1840 |                             "type": "object",
1841 |                             "properties": {
1842 |                                 "content": {
1843 |                                     "type": "string",
1844 |                                     "description": "Content to generate embedding for."
1845 |                                 }
1846 |                             },
1847 |                             "required": ["content"]
1848 |                         }
1849 |                     ),
1850 |                     types.Tool(
1851 |                         name="check_database_health",
1852 |                         description="Check database health and get statistics",
1853 |                         inputSchema={
1854 |                             "type": "object",
1855 |                             "properties": {}
1856 |                         }
1857 |                     ),
1858 |                     types.Tool(
1859 |                         name="get_cache_stats",
1860 |                         description="""Get MCP server global cache statistics for performance monitoring.
1861 | 
1862 |                         Returns detailed metrics about storage and memory service caching,
1863 |                         including hit rates, initialization times, and cache sizes.
1864 | 
1865 |                         This tool is useful for:
1866 |                         - Monitoring cache effectiveness
1867 |                         - Debugging performance issues
1868 |                         - Verifying cache persistence across MCP tool calls
1869 | 
1870 |                         Returns cache statistics including total calls, hit rate percentage,
1871 |                         storage/service cache metrics, performance metrics, and backend info.""",
1872 |                         inputSchema={
1873 |                             "type": "object",
1874 |                             "properties": {}
1875 |                         }
1876 |                     ),
1877 |                     types.Tool(
1878 |                         name="recall_by_timeframe",
1879 |                         description="""Retrieve memories within a specific timeframe.
1880 | 
1881 |                         Example:
1882 |                         {
1883 |                             "start_date": "2024-01-01",
1884 |                             "end_date": "2024-01-31",
1885 |                             "n_results": 5
1886 |                         }""",
1887 |                         inputSchema={
1888 |                             "type": "object",
1889 |                             "properties": {
1890 |                                 "start_date": {
1891 |                                     "type": "string",
1892 |                                     "format": "date",
1893 |                                     "description": "Start date (inclusive) in YYYY-MM-DD format."
1894 |                                 },
1895 |                                 "end_date": {
1896 |                                     "type": "string",
1897 |                                     "format": "date",
1898 |                                     "description": "End date (inclusive) in YYYY-MM-DD format."
1899 |                                 },
1900 |                                 "n_results": {
1901 |                                     "type": "number",
1902 |                                     "default": 5,
1903 |                                     "description": "Maximum number of results to return."
1904 |                                 }
1905 |                             },
1906 |                             "required": ["start_date"]
1907 |                         }
1908 |                     ),
1909 |                     types.Tool(
1910 |                         name="delete_by_timeframe",
1911 |                         description="""Delete memories within a specific timeframe.
1912 |                         Optional tag parameter to filter deletions.
1913 | 
1914 |                         Example:
1915 |                         {
1916 |                             "start_date": "2024-01-01",
1917 |                             "end_date": "2024-01-31",
1918 |                             "tag": "temporary"
1919 |                         }""",
1920 |                         inputSchema={
1921 |                             "type": "object",
1922 |                             "properties": {
1923 |                                 "start_date": {
1924 |                                     "type": "string",
1925 |                                     "format": "date",
1926 |                                     "description": "Start date (inclusive) in YYYY-MM-DD format."
1927 |                                 },
1928 |                                 "end_date": {
1929 |                                     "type": "string",
1930 |                                     "format": "date",
1931 |                                     "description": "End date (inclusive) in YYYY-MM-DD format."
1932 |                                 },
1933 |                                 "tag": {
1934 |                                     "type": "string",
1935 |                                     "description": "Optional tag to filter deletions. Only memories with this tag will be deleted."
1936 |                                 }
1937 |                             },
1938 |                             "required": ["start_date"]
1939 |                         }
1940 |                     ),
1941 |                     types.Tool(
1942 |                         name="delete_before_date",
1943 |                         description="""Delete memories before a specific date.
1944 |                         Optional tag parameter to filter deletions.
1945 | 
1946 |                         Example:
1947 |                         {
1948 |                             "before_date": "2024-01-01",
1949 |                             "tag": "temporary"
1950 |                         }""",
1951 |                         inputSchema={
1952 |                             "type": "object",
1953 |                             "properties": {
1954 |                                 "before_date": {"type": "string", "format": "date"},
1955 |                                 "tag": {"type": "string"}
1956 |                             },
1957 |                             "required": ["before_date"]
1958 |                         }
1959 |                     ),
1960 |                     types.Tool(
1961 |                         name="update_memory_metadata",
1962 |                         description="""Update memory metadata without recreating the entire memory entry.
1963 |                         
1964 |                         This provides efficient metadata updates while preserving the original
1965 |                         memory content, embeddings, and optionally timestamps.
1966 |                         
1967 |                         Examples:
1968 |                         # Add tags to a memory
1969 |                         {
1970 |                             "content_hash": "abc123...",
1971 |                             "updates": {
1972 |                                 "tags": ["important", "reference", "new-tag"]
1973 |                             }
1974 |                         }
1975 |                         
1976 |                         # Update memory type and custom metadata
1977 |                         {
1978 |                             "content_hash": "abc123...",
1979 |                             "updates": {
1980 |                                 "memory_type": "reminder",
1981 |                                 "metadata": {
1982 |                                     "priority": "high",
1983 |                                     "due_date": "2024-01-15"
1984 |                                 }
1985 |                             }
1986 |                         }
1987 |                         
1988 |                         # Update custom fields directly
1989 |                         {
1990 |                             "content_hash": "abc123...",
1991 |                             "updates": {
1992 |                                 "priority": "urgent",
1993 |                                 "status": "active"
1994 |                             }
1995 |                         }""",
1996 |                         inputSchema={
1997 |                             "type": "object",
1998 |                             "properties": {
1999 |                                 "content_hash": {
2000 |                                     "type": "string",
2001 |                                     "description": "The content hash of the memory to update."
2002 |                                 },
2003 |                                 "updates": {
2004 |                                     "type": "object",
2005 |                                     "description": "Dictionary of metadata fields to update.",
2006 |                                     "properties": {
2007 |                                         "tags": {
2008 |                                             "oneOf": [
2009 |                                                 {
2010 |                                                     "type": "array",
2011 |                                                     "items": {"type": "string"},
2012 |                                                     "description": "Tags as an array of strings"
2013 |                                                 },
2014 |                                                 {
2015 |                                                     "type": "string",
2016 |                                                     "description": "Tags as comma-separated string"
2017 |                                                 }
2018 |                                             ],
2019 |                                             "description": "Replace existing tags with this list. Accepts either an array of strings or a comma-separated string."
2020 |                                         },
2021 |                                         "memory_type": {
2022 |                                             "type": "string",
2023 |                                             "description": "Update the memory type (e.g., 'note', 'reminder', 'fact')."
2024 |                                         },
2025 |                                         "metadata": {
2026 |                                             "type": "object",
2027 |                                             "description": "Custom metadata fields to merge with existing metadata."
2028 |                                         }
2029 |                                     }
2030 |                                 },
2031 |                                 "preserve_timestamps": {
2032 |                                     "type": "boolean",
2033 |                                     "default": True,
2034 |                                     "description": "Whether to preserve the original created_at timestamp (default: true)."
2035 |                                 }
2036 |                             },
2037 |                             "required": ["content_hash", "updates"]
2038 |                         }
2039 |                     )
2040 |                 ]
2041 |                 
2042 |                 # Add consolidation tools if enabled
2043 |                 if CONSOLIDATION_ENABLED and self.consolidator:
2044 |                     consolidation_tools = [
2045 |                         types.Tool(
2046 |                             name="consolidate_memories",
2047 |                             description="""Run memory consolidation for a specific time horizon.
2048 |                             
2049 |                             Performs dream-inspired memory consolidation including:
2050 |                             - Exponential decay scoring
2051 |                             - Creative association discovery  
2052 |                             - Semantic clustering and compression
2053 |                             - Controlled forgetting with archival
2054 |                             
2055 |                             Example:
2056 |                             {
2057 |                                 "time_horizon": "weekly"
2058 |                             }""",
2059 |                             inputSchema={
2060 |                                 "type": "object",
2061 |                                 "properties": {
2062 |                                     "time_horizon": {
2063 |                                         "type": "string",
2064 |                                         "enum": ["daily", "weekly", "monthly", "quarterly", "yearly"],
2065 |                                         "description": "Time horizon for consolidation operations."
2066 |                                     }
2067 |                                 },
2068 |                                 "required": ["time_horizon"]
2069 |                             }
2070 |                         ),
2071 |                         types.Tool(
2072 |                             name="consolidation_status",
2073 |                             description="Get status and health information about the consolidation system.",
2074 |                             inputSchema={"type": "object", "properties": {}}
2075 |                         ),
2076 |                         types.Tool(
2077 |                             name="consolidation_recommendations",
2078 |                             description="""Get recommendations for consolidation based on current memory state.
2079 |                             
2080 |                             Example:
2081 |                             {
2082 |                                 "time_horizon": "monthly"
2083 |                             }""",
2084 |                             inputSchema={
2085 |                                 "type": "object",
2086 |                                 "properties": {
2087 |                                     "time_horizon": {
2088 |                                         "type": "string",
2089 |                                         "enum": ["daily", "weekly", "monthly", "quarterly", "yearly"],
2090 |                                         "description": "Time horizon to analyze for consolidation recommendations."
2091 |                                     }
2092 |                                 },
2093 |                                 "required": ["time_horizon"]
2094 |                             }
2095 |                         ),
2096 |                         types.Tool(
2097 |                             name="scheduler_status",
2098 |                             description="Get consolidation scheduler status and job information.",
2099 |                             inputSchema={"type": "object", "properties": {}}
2100 |                         ),
2101 |                         types.Tool(
2102 |                             name="trigger_consolidation",
2103 |                             description="""Manually trigger a consolidation job.
2104 |                             
2105 |                             Example:
2106 |                             {
2107 |                                 "time_horizon": "weekly",
2108 |                                 "immediate": true
2109 |                             }""",
2110 |                             inputSchema={
2111 |                                 "type": "object",
2112 |                                 "properties": {
2113 |                                     "time_horizon": {
2114 |                                         "type": "string",
2115 |                                         "enum": ["daily", "weekly", "monthly", "quarterly", "yearly"],
2116 |                                         "description": "Time horizon for the consolidation job."
2117 |                                     },
2118 |                                     "immediate": {
2119 |                                         "type": "boolean",
2120 |                                         "default": True,
2121 |                                         "description": "Whether to run immediately or schedule for later."
2122 |                                     }
2123 |                                 },
2124 |                                 "required": ["time_horizon"]
2125 |                             }
2126 |                         ),
2127 |                         types.Tool(
2128 |                             name="pause_consolidation",
2129 |                             description="""Pause consolidation jobs.
2130 |                             
2131 |                             Example:
2132 |                             {
2133 |                                 "time_horizon": "weekly"
2134 |                             }""",
2135 |                             inputSchema={
2136 |                                 "type": "object",
2137 |                                 "properties": {
2138 |                                     "time_horizon": {
2139 |                                         "type": "string",
2140 |                                         "enum": ["daily", "weekly", "monthly", "quarterly", "yearly"],
2141 |                                         "description": "Specific time horizon to pause, or omit to pause all jobs."
2142 |                                     }
2143 |                                 }
2144 |                             }
2145 |                         ),
2146 |                         types.Tool(
2147 |                             name="resume_consolidation",
2148 |                             description="""Resume consolidation jobs.
2149 |                             
2150 |                             Example:
2151 |                             {
2152 |                                 "time_horizon": "weekly"
2153 |                             }""",
2154 |                             inputSchema={
2155 |                                 "type": "object",
2156 |                                 "properties": {
2157 |                                     "time_horizon": {
2158 |                                         "type": "string",
2159 |                                         "enum": ["daily", "weekly", "monthly", "quarterly", "yearly"],
2160 |                                         "description": "Specific time horizon to resume, or omit to resume all jobs."
2161 |                                     }
2162 |                                 }
2163 |                             }
2164 |                         )
2165 |                     ]
2166 |                     tools.extend(consolidation_tools)
2167 |                     logger.info(f"Added {len(consolidation_tools)} consolidation tools")
2168 |                 
2169 |                 # Add document ingestion tools
2170 |                 ingestion_tools = [
2171 |                     types.Tool(
2172 |                         name="ingest_document",
2173 |                         description="""Ingest a single document file into the memory database.
2174 |                         
2175 |                         Supports multiple formats:
2176 |                         - PDF files (.pdf)
2177 |                         - Text files (.txt, .md, .markdown, .rst)
2178 |                         - JSON files (.json)
2179 |                         
2180 |                         The document will be parsed, chunked intelligently, and stored
2181 |                         as multiple memories with appropriate metadata.
2182 |                         
2183 |                         Example:
2184 |                         {
2185 |                             "file_path": "/path/to/document.pdf",
2186 |                             "tags": ["documentation", "manual"],
2187 |                             "chunk_size": 1000
2188 |                         }""",
2189 |                         inputSchema={
2190 |                             "type": "object",
2191 |                             "properties": {
2192 |                                 "file_path": {
2193 |                                     "type": "string",
2194 |                                     "description": "Path to the document file to ingest."
2195 |                                 },
2196 |                                 "tags": {
2197 |                                     "oneOf": [
2198 |                                         {
2199 |                                             "type": "array",
2200 |                                             "items": {"type": "string"},
2201 |                                             "description": "Tags as an array of strings"
2202 |                                         },
2203 |                                         {
2204 |                                             "type": "string",
2205 |                                             "description": "Tags as comma-separated string"
2206 |                                         }
2207 |                                     ],
2208 |                                     "description": "Optional tags to apply to all memories created from this document. Accepts either an array of strings or a comma-separated string.",
2209 |                                     "default": []
2210 |                                 },
2211 |                                 "chunk_size": {
2212 |                                     "type": "number",
2213 |                                     "description": "Target size for text chunks in characters (default: 1000).",
2214 |                                     "default": 1000
2215 |                                 },
2216 |                                 "chunk_overlap": {
2217 |                                     "type": "number",
2218 |                                     "description": "Characters to overlap between chunks (default: 200).",
2219 |                                     "default": 200
2220 |                                 },
2221 |                                 "memory_type": {
2222 |                                     "type": "string",
2223 |                                     "description": "Type label for created memories (default: 'document').",
2224 |                                     "default": "document"
2225 |                                 }
2226 |                             },
2227 |                             "required": ["file_path"]
2228 |                         }
2229 |                     ),
2230 |                     types.Tool(
2231 |                         name="ingest_directory",
2232 |                         description="""Batch ingest all supported documents from a directory.
2233 |                         
2234 |                         Recursively processes all supported file types in the directory,
2235 |                         creating memories with consistent tagging and metadata.
2236 |                         
2237 |                         Supported formats: PDF, TXT, MD, JSON
2238 |                         
2239 |                         Example:
2240 |                         {
2241 |                             "directory_path": "/path/to/documents",
2242 |                             "tags": ["knowledge-base"],
2243 |                             "recursive": true,
2244 |                             "file_extensions": ["pdf", "md", "txt"]
2245 |                         }""",
2246 |                         inputSchema={
2247 |                             "type": "object",
2248 |                             "properties": {
2249 |                                 "directory_path": {
2250 |                                     "type": "string",
2251 |                                     "description": "Path to the directory containing documents to ingest."
2252 |                                 },
2253 |                                 "tags": {
2254 |                                     "oneOf": [
2255 |                                         {
2256 |                                             "type": "array",
2257 |                                             "items": {"type": "string"},
2258 |                                             "description": "Tags as an array of strings"
2259 |                                         },
2260 |                                         {
2261 |                                             "type": "string",
2262 |                                             "description": "Tags as comma-separated string"
2263 |                                         }
2264 |                                     ],
2265 |                                     "description": "Optional tags to apply to all memories created. Accepts either an array of strings or a comma-separated string.",
2266 |                                     "default": []
2267 |                                 },
2268 |                                 "recursive": {
2269 |                                     "type": "boolean",
2270 |                                     "description": "Whether to process subdirectories recursively (default: true).",
2271 |                                     "default": True
2272 |                                 },
2273 |                                 "file_extensions": {
2274 |                                     "type": "array",
2275 |                                     "items": {"type": "string"},
2276 |                                     "description": "File extensions to process (default: all supported).",
2277 |                                     "default": ["pdf", "txt", "md", "json"]
2278 |                                 },
2279 |                                 "chunk_size": {
2280 |                                     "type": "number",
2281 |                                     "description": "Target size for text chunks in characters (default: 1000).",
2282 |                                     "default": 1000
2283 |                                 },
2284 |                                 "max_files": {
2285 |                                     "type": "number",
2286 |                                     "description": "Maximum number of files to process (default: 100).",
2287 |                                     "default": 100
2288 |                                 }
2289 |                             },
2290 |                             "required": ["directory_path"]
2291 |                         }
2292 |                     )
2293 |                 ]
2294 |                 tools.extend(ingestion_tools)
2295 |                 logger.info(f"Added {len(ingestion_tools)} ingestion tools")
2296 |                 
2297 |                 logger.info(f"Returning {len(tools)} tools")
2298 |                 return tools
2299 |             except Exception as e:
2300 |                 logger.error(f"Error in handle_list_tools: {str(e)}")
2301 |                 logger.error(traceback.format_exc())
2302 |                 raise
2303 |         
2304 |         @self.server.call_tool()
2305 |         async def handle_call_tool(name: str, arguments: dict | None) -> List[types.TextContent]:
2306 |             # Add immediate debugging to catch any protocol issues
2307 |             if MCP_CLIENT == 'lm_studio':
2308 |                 print(f"TOOL CALL INTERCEPTED: {name}", file=sys.stdout, flush=True)
2309 |             logger.info(f"=== HANDLING TOOL CALL: {name} ===")
2310 |             logger.info(f"Arguments: {arguments}")
2311 |             
2312 |             try:
2313 |                 if arguments is None:
2314 |                     arguments = {}
2315 |                 
2316 |                 logger.info(f"Processing tool: {name}")
2317 |                 if MCP_CLIENT == 'lm_studio':
2318 |                     print(f"Processing tool: {name}", file=sys.stdout, flush=True)
2319 |                 
2320 |                 if name == "store_memory":
2321 |                     return await self.handle_store_memory(arguments)
2322 |                 elif name == "retrieve_memory":
2323 |                     return await self.handle_retrieve_memory(arguments)
2324 |                 elif name == "recall_memory":
2325 |                     return await self.handle_recall_memory(arguments)
2326 |                 elif name == "search_by_tag":
2327 |                     return await self.handle_search_by_tag(arguments)
2328 |                 elif name == "delete_memory":
2329 |                     return await self.handle_delete_memory(arguments)
2330 |                 elif name == "delete_by_tag":
2331 |                     return await self.handle_delete_by_tag(arguments)
2332 |                 elif name == "delete_by_tags":
2333 |                     return await self.handle_delete_by_tags(arguments)
2334 |                 elif name == "delete_by_all_tags":
2335 |                     return await self.handle_delete_by_all_tags(arguments)
2336 |                 elif name == "cleanup_duplicates":
2337 |                     return await self.handle_cleanup_duplicates(arguments)
2338 |                 elif name == "debug_retrieve":
2339 |                     return await self.handle_debug_retrieve(arguments)
2340 |                 elif name == "exact_match_retrieve":
2341 |                     return await self.handle_exact_match_retrieve(arguments)
2342 |                 elif name == "get_raw_embedding":
2343 |                     return await self.handle_get_raw_embedding(arguments)
2344 |                 elif name == "check_database_health":
2345 |                     logger.info("Calling handle_check_database_health")
2346 |                     return await self.handle_check_database_health(arguments)
2347 |                 elif name == "get_cache_stats":
2348 |                     logger.info("Calling handle_get_cache_stats")
2349 |                     return await self.handle_get_cache_stats(arguments)
2350 |                 elif name == "recall_by_timeframe":
2351 |                     return await self.handle_recall_by_timeframe(arguments)
2352 |                 elif name == "delete_by_timeframe":
2353 |                     return await self.handle_delete_by_timeframe(arguments)
2354 |                 elif name == "delete_before_date":
2355 |                     return await self.handle_delete_before_date(arguments)
2356 |                 elif name == "update_memory_metadata":
2357 |                     logger.info("Calling handle_update_memory_metadata")
2358 |                     return await self.handle_update_memory_metadata(arguments)
2359 |                 # Consolidation tool handlers
2360 |                 elif name == "consolidate_memories":
2361 |                     logger.info("Calling handle_consolidate_memories")
2362 |                     return await self.handle_consolidate_memories(arguments)
2363 |                 elif name == "consolidation_status":
2364 |                     logger.info("Calling handle_consolidation_status")
2365 |                     return await self.handle_consolidation_status(arguments)
2366 |                 elif name == "consolidation_recommendations":
2367 |                     logger.info("Calling handle_consolidation_recommendations")
2368 |                     return await self.handle_consolidation_recommendations(arguments)
2369 |                 elif name == "scheduler_status":
2370 |                     logger.info("Calling handle_scheduler_status")
2371 |                     return await self.handle_scheduler_status(arguments)
2372 |                 elif name == "trigger_consolidation":
2373 |                     logger.info("Calling handle_trigger_consolidation")
2374 |                     return await self.handle_trigger_consolidation(arguments)
2375 |                 elif name == "pause_consolidation":
2376 |                     logger.info("Calling handle_pause_consolidation")
2377 |                     return await self.handle_pause_consolidation(arguments)
2378 |                 elif name == "resume_consolidation":
2379 |                     logger.info("Calling handle_resume_consolidation")
2380 |                     return await self.handle_resume_consolidation(arguments)
2381 |                 elif name == "ingest_document":
2382 |                     logger.info("Calling handle_ingest_document")
2383 |                     return await self.handle_ingest_document(arguments)
2384 |                 elif name == "ingest_directory":
2385 |                     logger.info("Calling handle_ingest_directory")
2386 |                     return await self.handle_ingest_directory(arguments)
2387 |                 else:
2388 |                     logger.warning(f"Unknown tool requested: {name}")
2389 |                     raise ValueError(f"Unknown tool: {name}")
2390 |             except Exception as e:
2391 |                 error_msg = f"Error in {name}: {str(e)}\n{traceback.format_exc()}"
2392 |                 logger.error(error_msg)
2393 |                 print(f"ERROR in tool execution: {error_msg}", file=sys.stderr, flush=True)
2394 |                 return [types.TextContent(type="text", text=f"Error: {str(e)}")]
2395 | 
2396 |     async def handle_store_memory(self, arguments: dict) -> List[types.TextContent]:
2397 |         content = arguments.get("content")
2398 |         metadata = arguments.get("metadata", {})
2399 | 
2400 |         if not content:
2401 |             return [types.TextContent(type="text", text="Error: Content is required")]
2402 | 
2403 |         try:
2404 |             # Initialize storage lazily when needed (also initializes memory_service)
2405 |             await self._ensure_storage_initialized()
2406 | 
2407 |             # Extract parameters for MemoryService call
2408 |             tags = metadata.get("tags", "")
2409 |             memory_type = metadata.get("type", "note")  # HTTP server uses metadata.type
2410 |             client_hostname = arguments.get("client_hostname")
2411 | 
2412 |             # Call shared MemoryService business logic
2413 |             result = await self.memory_service.store_memory(
2414 |                 content=content,
2415 |                 tags=tags,
2416 |                 memory_type=memory_type,
2417 |                 metadata=metadata,
2418 |                 client_hostname=client_hostname
2419 |             )
2420 | 
2421 |             # Convert MemoryService result to MCP response format
2422 |             if not result.get("success"):
2423 |                 error_msg = result.get("error", "Unknown error")
2424 |                 return [types.TextContent(type="text", text=f"Error storing memory: {error_msg}")]
2425 | 
2426 |             if "memories" in result:
2427 |                 # Chunked response - multiple memories created
2428 |                 num_chunks = len(result["memories"])
2429 |                 original_hash = result.get("original_hash", "unknown")
2430 |                 message = f"Successfully stored {num_chunks} memory chunks (original hash: {original_hash[:8]}...)"
2431 |             else:
2432 |                 # Single memory response
2433 |                 memory_hash = result["memory"]["content_hash"]
2434 |                 message = f"Memory stored successfully (hash: {memory_hash[:8]}...)"
2435 | 
2436 |             return [types.TextContent(type="text", text=message)]
2437 | 
2438 |         except Exception as e:
2439 |             logger.error(f"Error storing memory: {str(e)}\n{traceback.format_exc()}")
2440 |             return [types.TextContent(type="text", text=f"Error storing memory: {str(e)}")]
2441 |     
2442 |     async def handle_retrieve_memory(self, arguments: dict) -> List[types.TextContent]:
2443 |         query = arguments.get("query")
2444 |         n_results = arguments.get("n_results", 5)
2445 |         
2446 |         if not query:
2447 |             return [types.TextContent(type="text", text="Error: Query is required")]
2448 |         
2449 |         try:
2450 |             # Initialize storage lazily when needed (also initializes memory_service)
2451 |             await self._ensure_storage_initialized()
2452 | 
2453 |             # Track performance
2454 |             start_time = time.time()
2455 | 
2456 |             # Call shared MemoryService business logic
2457 |             result = await self.memory_service.retrieve_memories(
2458 |                 query=query,
2459 |                 n_results=n_results
2460 |             )
2461 | 
2462 |             query_time_ms = (time.time() - start_time) * 1000
2463 |             
2464 |             # Record query time for performance monitoring
2465 |             self.record_query_time(query_time_ms)
2466 | 
2467 |             if result.get("error"):
2468 |                 return [types.TextContent(type="text", text=f"Error retrieving memories: {result['error']}")]
2469 | 
2470 |             memories = result.get("memories", [])
2471 |             if not memories:
2472 |                 return [types.TextContent(type="text", text="No matching memories found")]
2473 | 
2474 |             # Format results in HTTP server style (different from MCP server)
2475 |             formatted_results = []
2476 |             for i, memory in enumerate(memories):
2477 |                 memory_info = [f"Memory {i+1}:"]
2478 |                 # HTTP server uses created_at instead of timestamp
2479 |                 created_at = memory.get("created_at")
2480 |                 if created_at:
2481 |                     # Parse ISO string and format
2482 |                     try:
2483 |                         # Handle both float (timestamp) and string (ISO format) types
2484 |                         if isinstance(created_at, (int, float)):
2485 |                             dt = datetime.fromtimestamp(created_at)
2486 |                         else:
2487 |                             dt = datetime.fromisoformat(created_at.replace('Z', '+00:00'))
2488 |                         memory_info.append(f"Timestamp: {dt.strftime('%Y-%m-%d %H:%M:%S')}")
2489 |                     except (ValueError, TypeError):
2490 |                         memory_info.append(f"Timestamp: {created_at}")
2491 | 
2492 |                 memory_info.extend([
2493 |                     f"Content: {memory['content']}",
2494 |                     f"Hash: {memory['content_hash']}",
2495 |                     f"Relevance Score: {memory['similarity_score']:.2f}"
2496 |                 ])
2497 |                 tags = memory.get("tags", [])
2498 |                 if tags:
2499 |                     memory_info.append(f"Tags: {', '.join(tags)}")
2500 |                 memory_info.append("---")
2501 |                 formatted_results.append("\n".join(memory_info))
2502 |             
2503 |             return [types.TextContent(
2504 |                 type="text",
2505 |                 text="Found the following memories:\n\n" + "\n".join(formatted_results)
2506 |             )]
2507 |         except Exception as e:
2508 |             logger.error(f"Error retrieving memories: {str(e)}\n{traceback.format_exc()}")
2509 |             return [types.TextContent(type="text", text=f"Error retrieving memories: {str(e)}")]
2510 | 
2511 |     async def handle_search_by_tag(self, arguments: dict) -> List[types.TextContent]:
2512 |         from .services.memory_service import normalize_tags
2513 | 
2514 |         tags = normalize_tags(arguments.get("tags", []))
2515 | 
2516 |         if not tags:
2517 |             return [types.TextContent(type="text", text="Error: Tags are required")]
2518 |         
2519 |         try:
2520 |             # Initialize storage lazily when needed (also initializes memory_service)
2521 |             await self._ensure_storage_initialized()
2522 | 
2523 |             # Call shared MemoryService business logic
2524 |             result = await self.memory_service.search_by_tag(tags=tags)
2525 | 
2526 |             if result.get("error"):
2527 |                 return [types.TextContent(type="text", text=f"Error searching by tags: {result['error']}")]
2528 | 
2529 |             memories = result.get("memories", [])
2530 |             if not memories:
2531 |                 return [types.TextContent(
2532 |                     type="text",
2533 |                     text=f"No memories found with tags: {', '.join(tags)}"
2534 |                 )]
2535 |             
2536 |             formatted_results = []
2537 |             for i, memory in enumerate(memories):
2538 |                 memory_info = [f"Memory {i+1}:"]
2539 |                 created_at = memory.get("created_at")
2540 |                 if created_at:
2541 |                     try:
2542 |                         # Handle both float (timestamp) and string (ISO format) types
2543 |                         if isinstance(created_at, (int, float)):
2544 |                             dt = datetime.fromtimestamp(created_at)
2545 |                         else:
2546 |                             dt = datetime.fromisoformat(created_at.replace('Z', '+00:00'))
2547 |                         memory_info.append(f"Timestamp: {dt.strftime('%Y-%m-%d %H:%M:%S')}")
2548 |                     except (ValueError, TypeError) as e:
2549 |                         memory_info.append(f"Timestamp: {created_at}")
2550 | 
2551 |                 memory_info.extend([
2552 |                     f"Content: {memory['content']}",
2553 |                     f"Hash: {memory['content_hash']}",
2554 |                     f"Tags: {', '.join(memory.get('tags', []))}"
2555 |                 ])
2556 |                 memory_type = memory.get("memory_type")
2557 |                 if memory_type:
2558 |                     memory_info.append(f"Type: {memory_type}")
2559 |                 memory_info.append("---")
2560 |                 formatted_results.append("\n".join(memory_info))
2561 |             
2562 |             return [types.TextContent(
2563 |                 type="text",
2564 |                 text="Found the following memories:\n\n" + "\n".join(formatted_results)
2565 |             )]
2566 |         except Exception as e:
2567 |             logger.error(f"Error searching by tags: {str(e)}\n{traceback.format_exc()}")
2568 |             return [types.TextContent(type="text", text=f"Error searching by tags: {str(e)}")]
2569 | 
2570 |     async def handle_delete_memory(self, arguments: dict) -> List[types.TextContent]:
2571 |         content_hash = arguments.get("content_hash")
2572 |         
2573 |         try:
2574 |             # Initialize storage lazily when needed (also initializes memory_service)
2575 |             await self._ensure_storage_initialized()
2576 | 
2577 |             # Call shared MemoryService business logic
2578 |             result = await self.memory_service.delete_memory(content_hash)
2579 | 
2580 |             return [types.TextContent(type="text", text=result["message"])]
2581 |         except Exception as e:
2582 |             logger.error(f"Error deleting memory: {str(e)}\n{traceback.format_exc()}")
2583 |             return [types.TextContent(type="text", text=f"Error deleting memory: {str(e)}")]
2584 | 
2585 |     async def handle_delete_by_tag(self, arguments: dict) -> List[types.TextContent]:
2586 |         """Handler for deleting memories by tags."""
2587 |         from .services.memory_service import normalize_tags
2588 | 
2589 |         tags = arguments.get("tags", [])
2590 | 
2591 |         if not tags:
2592 |             return [types.TextContent(type="text", text="Error: Tags array is required")]
2593 | 
2594 |         # Normalize tags (handles comma-separated strings and arrays)
2595 |         tags = normalize_tags(tags)
2596 |         
2597 |         try:
2598 |             # Initialize storage lazily when needed
2599 |             storage = await self._ensure_storage_initialized()
2600 |             count, message = await storage.delete_by_tag(tags)
2601 |             return [types.TextContent(type="text", text=message)]
2602 |         except Exception as e:
2603 |             logger.error(f"Error deleting by tag: {str(e)}\n{traceback.format_exc()}")
2604 |             return [types.TextContent(type="text", text=f"Error deleting by tag: {str(e)}")]
2605 | 
2606 |     async def handle_delete_by_tags(self, arguments: dict) -> List[types.TextContent]:
2607 |         """Handler for explicit multiple tag deletion with progress tracking."""
2608 |         from .services.memory_service import normalize_tags
2609 | 
2610 |         tags = normalize_tags(arguments.get("tags", []))
2611 | 
2612 |         if not tags:
2613 |             return [types.TextContent(type="text", text="Error: Tags array is required")]
2614 |         
2615 |         try:
2616 |             # Initialize storage lazily when needed
2617 |             storage = await self._ensure_storage_initialized()
2618 |             
2619 |             # Generate operation ID for progress tracking
2620 |             import uuid
2621 |             operation_id = f"delete_by_tags_{uuid.uuid4().hex[:8]}"
2622 |             
2623 |             # Send initial progress notification
2624 |             await self.send_progress_notification(operation_id, 0, f"Starting deletion of memories with tags: {', '.join(tags)}")
2625 |             
2626 |             # Execute deletion with progress updates
2627 |             await self.send_progress_notification(operation_id, 25, "Searching for memories to delete...")
2628 |             
2629 |             # If storage supports progress callbacks, use them
2630 |             if hasattr(storage, 'delete_by_tags_with_progress'):
2631 |                 count, message = await storage.delete_by_tags_with_progress(
2632 |                     tags, 
2633 |                     progress_callback=lambda p, msg: asyncio.create_task(
2634 |                         self.send_progress_notification(operation_id, 25 + (p * 0.7), msg)
2635 |                     )
2636 |                 )
2637 |             else:
2638 |                 await self.send_progress_notification(operation_id, 50, "Deleting memories...")
2639 |                 count, message = await storage.delete_by_tags(tags)
2640 |                 await self.send_progress_notification(operation_id, 90, f"Deleted {count} memories")
2641 |             
2642 |             # Complete the operation
2643 |             await self.send_progress_notification(operation_id, 100, f"Deletion completed: {message}")
2644 |             
2645 |             return [types.TextContent(type="text", text=f"{message} (Operation ID: {operation_id})")]
2646 |         except Exception as e:
2647 |             logger.error(f"Error deleting by tags: {str(e)}\n{traceback.format_exc()}")
2648 |             return [types.TextContent(type="text", text=f"Error deleting by tags: {str(e)}")]
2649 | 
2650 |     async def handle_delete_by_all_tags(self, arguments: dict) -> List[types.TextContent]:
2651 |         """Handler for deleting memories that contain ALL specified tags."""
2652 |         from .services.memory_service import normalize_tags
2653 | 
2654 |         tags = normalize_tags(arguments.get("tags", []))
2655 | 
2656 |         if not tags:
2657 |             return [types.TextContent(type="text", text="Error: Tags array is required")]
2658 |         
2659 |         try:
2660 |             # Initialize storage lazily when needed
2661 |             storage = await self._ensure_storage_initialized()
2662 |             count, message = await storage.delete_by_all_tags(tags)
2663 |             return [types.TextContent(type="text", text=message)]
2664 |         except Exception as e:
2665 |             logger.error(f"Error deleting by all tags: {str(e)}\n{traceback.format_exc()}")
2666 |             return [types.TextContent(type="text", text=f"Error deleting by all tags: {str(e)}")]
2667 | 
2668 |     async def handle_cleanup_duplicates(self, arguments: dict) -> List[types.TextContent]:
2669 |         try:
2670 |             # Initialize storage lazily when needed
2671 |             storage = await self._ensure_storage_initialized()
2672 |             count, message = await storage.cleanup_duplicates()
2673 |             return [types.TextContent(type="text", text=message)]
2674 |         except Exception as e:
2675 |             logger.error(f"Error cleaning up duplicates: {str(e)}\n{traceback.format_exc()}")
2676 |             return [types.TextContent(type="text", text=f"Error cleaning up duplicates: {str(e)}")]
2677 | 
2678 |     async def handle_update_memory_metadata(self, arguments: dict) -> List[types.TextContent]:
2679 |         """Handle memory metadata update requests."""
2680 |         try:
2681 |             from .services.memory_service import normalize_tags
2682 | 
2683 |             content_hash = arguments.get("content_hash")
2684 |             updates = arguments.get("updates")
2685 |             preserve_timestamps = arguments.get("preserve_timestamps", True)
2686 | 
2687 |             if not content_hash:
2688 |                 return [types.TextContent(type="text", text="Error: content_hash is required")]
2689 | 
2690 |             if not updates:
2691 |                 return [types.TextContent(type="text", text="Error: updates dictionary is required")]
2692 | 
2693 |             if not isinstance(updates, dict):
2694 |                 return [types.TextContent(type="text", text="Error: updates must be a dictionary")]
2695 | 
2696 |             # Normalize tags if present in updates
2697 |             if "tags" in updates:
2698 |                 updates["tags"] = normalize_tags(updates["tags"])
2699 | 
2700 |             # Initialize storage lazily when needed
2701 |             storage = await self._ensure_storage_initialized()
2702 |             
2703 |             # Call the storage method
2704 |             success, message = await storage.update_memory_metadata(
2705 |                 content_hash=content_hash,
2706 |                 updates=updates,
2707 |                 preserve_timestamps=preserve_timestamps
2708 |             )
2709 |             
2710 |             if success:
2711 |                 logger.info(f"Successfully updated metadata for memory {content_hash}")
2712 |                 return [types.TextContent(
2713 |                     type="text", 
2714 |                     text=f"Successfully updated memory metadata. {message}"
2715 |                 )]
2716 |             else:
2717 |                 logger.warning(f"Failed to update metadata for memory {content_hash}: {message}")
2718 |                 return [types.TextContent(type="text", text=f"Failed to update memory metadata: {message}")]
2719 |                 
2720 |         except Exception as e:
2721 |             error_msg = f"Error updating memory metadata: {str(e)}"
2722 |             logger.error(f"{error_msg}\n{traceback.format_exc()}")
2723 |             return [types.TextContent(type="text", text=error_msg)]
2724 | 
2725 |     # Consolidation tool handlers
2726 |     async def handle_consolidate_memories(self, arguments: dict) -> List[types.TextContent]:
2727 |         """Handle memory consolidation requests."""
2728 |         if not CONSOLIDATION_ENABLED or not self.consolidator:
2729 |             return [types.TextContent(type="text", text="Error: Consolidation system not available")]
2730 |         
2731 |         try:
2732 |             time_horizon = arguments.get("time_horizon")
2733 |             if not time_horizon:
2734 |                 return [types.TextContent(type="text", text="Error: time_horizon is required")]
2735 |             
2736 |             if time_horizon not in ["daily", "weekly", "monthly", "quarterly", "yearly"]:
2737 |                 return [types.TextContent(type="text", text="Error: Invalid time_horizon. Must be one of: daily, weekly, monthly, quarterly, yearly")]
2738 |             
2739 |             logger.info(f"Starting {time_horizon} consolidation")
2740 |             
2741 |             # Run consolidation
2742 |             report = await self.consolidator.consolidate(time_horizon)
2743 |             
2744 |             # Format response
2745 |             result = f"""Consolidation completed successfully!
2746 | 
2747 | Time Horizon: {report.time_horizon}
2748 | Duration: {(report.end_time - report.start_time).total_seconds():.2f} seconds
2749 | Memories Processed: {report.memories_processed}
2750 | Associations Discovered: {report.associations_discovered}
2751 | Clusters Created: {report.clusters_created}
2752 | Memories Compressed: {report.memories_compressed}
2753 | Memories Archived: {report.memories_archived}"""
2754 | 
2755 |             if report.errors:
2756 |                 result += f"\n\nWarnings/Errors:\n" + "\n".join(f"- {error}" for error in report.errors)
2757 |             
2758 |             return [types.TextContent(type="text", text=result)]
2759 |             
2760 |         except Exception as e:
2761 |             error_msg = f"Error during consolidation: {str(e)}"
2762 |             logger.error(f"{error_msg}\n{traceback.format_exc()}")
2763 |             return [types.TextContent(type="text", text=error_msg)]
2764 | 
2765 |     async def handle_consolidation_status(self, arguments: dict) -> List[types.TextContent]:
2766 |         """Handle consolidation status requests."""
2767 |         if not CONSOLIDATION_ENABLED or not self.consolidator:
2768 |             return [types.TextContent(type="text", text="Consolidation system: DISABLED")]
2769 |         
2770 |         try:
2771 |             # Get health check from consolidator
2772 |             health = await self.consolidator.health_check()
2773 |             
2774 |             # Format status report
2775 |             status_lines = [
2776 |                 f"Consolidation System Status: {health['status'].upper()}",
2777 |                 f"Last Updated: {health['timestamp']}",
2778 |                 "",
2779 |                 "Component Health:"
2780 |             ]
2781 |             
2782 |             for component, component_health in health['components'].items():
2783 |                 status = component_health['status']
2784 |                 status_lines.append(f"  {component}: {status.upper()}")
2785 |                 if status == 'unhealthy' and 'error' in component_health:
2786 |                     status_lines.append(f"    Error: {component_health['error']}")
2787 |             
2788 |             status_lines.extend([
2789 |                 "",
2790 |                 "Statistics:",
2791 |                 f"  Total consolidation runs: {health['statistics']['total_runs']}",
2792 |                 f"  Successful runs: {health['statistics']['successful_runs']}",
2793 |                 f"  Total memories processed: {health['statistics']['total_memories_processed']}",
2794 |                 f"  Total associations created: {health['statistics']['total_associations_created']}",
2795 |                 f"  Total clusters created: {health['statistics']['total_clusters_created']}",
2796 |                 f"  Total memories compressed: {health['statistics']['total_memories_compressed']}",
2797 |                 f"  Total memories archived: {health['statistics']['total_memories_archived']}"
2798 |             ])
2799 |             
2800 |             if health['last_consolidation_times']:
2801 |                 status_lines.extend([
2802 |                     "",
2803 |                     "Last Consolidation Times:"
2804 |                 ])
2805 |                 for horizon, timestamp in health['last_consolidation_times'].items():
2806 |                     status_lines.append(f"  {horizon}: {timestamp}")
2807 |             
2808 |             return [types.TextContent(type="text", text="\n".join(status_lines))]
2809 |             
2810 |         except Exception as e:
2811 |             error_msg = f"Error getting consolidation status: {str(e)}"
2812 |             logger.error(f"{error_msg}\n{traceback.format_exc()}")
2813 |             return [types.TextContent(type="text", text=error_msg)]
2814 | 
2815 |     async def handle_consolidation_recommendations(self, arguments: dict) -> List[types.TextContent]:
2816 |         """Handle consolidation recommendation requests."""
2817 |         if not CONSOLIDATION_ENABLED or not self.consolidator:
2818 |             return [types.TextContent(type="text", text="Error: Consolidation system not available")]
2819 |         
2820 |         try:
2821 |             time_horizon = arguments.get("time_horizon")
2822 |             if not time_horizon:
2823 |                 return [types.TextContent(type="text", text="Error: time_horizon is required")]
2824 |             
2825 |             if time_horizon not in ["daily", "weekly", "monthly", "quarterly", "yearly"]:
2826 |                 return [types.TextContent(type="text", text="Error: Invalid time_horizon")]
2827 |             
2828 |             # Get recommendations
2829 |             recommendations = await self.consolidator.get_consolidation_recommendations(time_horizon)
2830 |             
2831 |             # Format response
2832 |             lines = [
2833 |                 f"Consolidation Recommendations for {time_horizon} horizon:",
2834 |                 "",
2835 |                 f"Recommendation: {recommendations['recommendation'].upper()}",
2836 |                 f"Memory Count: {recommendations['memory_count']}",
2837 |             ]
2838 |             
2839 |             if 'reasons' in recommendations:
2840 |                 lines.extend([
2841 |                     "",
2842 |                     "Reasons:"
2843 |                 ])
2844 |                 for reason in recommendations['reasons']:
2845 |                     lines.append(f"  • {reason}")
2846 |             
2847 |             if 'memory_types' in recommendations:
2848 |                 lines.extend([
2849 |                     "",
2850 |                     "Memory Types:"
2851 |                 ])
2852 |                 for mem_type, count in recommendations['memory_types'].items():
2853 |                     lines.append(f"  {mem_type}: {count}")
2854 |             
2855 |             if 'total_size_bytes' in recommendations:
2856 |                 size_mb = recommendations['total_size_bytes'] / (1024 * 1024)
2857 |                 lines.append(f"\nTotal Size: {size_mb:.2f} MB")
2858 |             
2859 |             if 'old_memory_percentage' in recommendations:
2860 |                 lines.append(f"Old Memory Percentage: {recommendations['old_memory_percentage']:.1f}%")
2861 |             
2862 |             if 'estimated_duration_seconds' in recommendations:
2863 |                 lines.append(f"Estimated Duration: {recommendations['estimated_duration_seconds']:.1f} seconds")
2864 |             
2865 |             return [types.TextContent(type="text", text="\n".join(lines))]
2866 |             
2867 |         except Exception as e:
2868 |             error_msg = f"Error getting consolidation recommendations: {str(e)}"
2869 |             logger.error(f"{error_msg}\n{traceback.format_exc()}")
2870 |             return [types.TextContent(type="text", text=error_msg)]
2871 | 
2872 |     async def handle_scheduler_status(self, arguments: dict) -> List[types.TextContent]:
2873 |         """Handle scheduler status requests."""
2874 |         if not CONSOLIDATION_ENABLED or not self.consolidation_scheduler:
2875 |             return [types.TextContent(type="text", text="Consolidation scheduler: DISABLED")]
2876 |         
2877 |         try:
2878 |             # Get scheduler status
2879 |             status = await self.consolidation_scheduler.get_scheduler_status()
2880 |             
2881 |             if not status['enabled']:
2882 |                 return [types.TextContent(type="text", text=f"Scheduler: DISABLED\nReason: {status.get('reason', 'Unknown')}")]
2883 |             
2884 |             # Format status report
2885 |             lines = [
2886 |                 f"Consolidation Scheduler Status: {'RUNNING' if status['running'] else 'STOPPED'}",
2887 |                 "",
2888 |                 "Scheduled Jobs:"
2889 |             ]
2890 |             
2891 |             for job in status['jobs']:
2892 |                 next_run = job['next_run_time'] or 'Not scheduled'
2893 |                 lines.append(f"  {job['name']}: {next_run}")
2894 |             
2895 |             lines.extend([
2896 |                 "",
2897 |                 "Execution Statistics:",
2898 |                 f"  Total jobs executed: {status['execution_stats']['total_jobs']}",
2899 |                 f"  Successful jobs: {status['execution_stats']['successful_jobs']}",
2900 |                 f"  Failed jobs: {status['execution_stats']['failed_jobs']}"
2901 |             ])
2902 |             
2903 |             if status['last_execution_times']:
2904 |                 lines.extend([
2905 |                     "",
2906 |                     "Last Execution Times:"
2907 |                 ])
2908 |                 for horizon, timestamp in status['last_execution_times'].items():
2909 |                     lines.append(f"  {horizon}: {timestamp}")
2910 |             
2911 |             if status['recent_jobs']:
2912 |                 lines.extend([
2913 |                     "",
2914 |                     "Recent Jobs:"
2915 |                 ])
2916 |                 for job in status['recent_jobs'][-5:]:  # Show last 5 jobs
2917 |                     duration = (job['end_time'] - job['start_time']).total_seconds()
2918 |                     lines.append(f"  {job['time_horizon']} ({job['status']}): {duration:.2f}s")
2919 |             
2920 |             return [types.TextContent(type="text", text="\n".join(lines))]
2921 |             
2922 |         except Exception as e:
2923 |             error_msg = f"Error getting scheduler status: {str(e)}"
2924 |             logger.error(f"{error_msg}\n{traceback.format_exc()}")
2925 |             return [types.TextContent(type="text", text=error_msg)]
2926 | 
2927 |     async def handle_trigger_consolidation(self, arguments: dict) -> List[types.TextContent]:
2928 |         """Handle manual consolidation trigger requests."""
2929 |         if not CONSOLIDATION_ENABLED or not self.consolidation_scheduler:
2930 |             return [types.TextContent(type="text", text="Error: Consolidation scheduler not available")]
2931 |         
2932 |         try:
2933 |             time_horizon = arguments.get("time_horizon")
2934 |             immediate = arguments.get("immediate", True)
2935 |             
2936 |             if not time_horizon:
2937 |                 return [types.TextContent(type="text", text="Error: time_horizon is required")]
2938 |             
2939 |             if time_horizon not in ["daily", "weekly", "monthly", "quarterly", "yearly"]:
2940 |                 return [types.TextContent(type="text", text="Error: Invalid time_horizon")]
2941 |             
2942 |             # Trigger consolidation
2943 |             success = await self.consolidation_scheduler.trigger_consolidation(time_horizon, immediate)
2944 |             
2945 |             if success:
2946 |                 action = "triggered immediately" if immediate else "scheduled for later"
2947 |                 return [types.TextContent(type="text", text=f"Successfully {action} {time_horizon} consolidation")]
2948 |             else:
2949 |                 return [types.TextContent(type="text", text=f"Failed to trigger {time_horizon} consolidation")]
2950 |             
2951 |         except Exception as e:
2952 |             error_msg = f"Error triggering consolidation: {str(e)}"
2953 |             logger.error(f"{error_msg}\n{traceback.format_exc()}")
2954 |             return [types.TextContent(type="text", text=error_msg)]
2955 | 
2956 |     async def handle_pause_consolidation(self, arguments: dict) -> List[types.TextContent]:
2957 |         """Handle consolidation pause requests."""
2958 |         if not CONSOLIDATION_ENABLED or not self.consolidation_scheduler:
2959 |             return [types.TextContent(type="text", text="Error: Consolidation scheduler not available")]
2960 |         
2961 |         try:
2962 |             time_horizon = arguments.get("time_horizon")
2963 |             
2964 |             # Pause consolidation
2965 |             success = await self.consolidation_scheduler.pause_consolidation(time_horizon)
2966 |             
2967 |             if success:
2968 |                 target = time_horizon or "all"
2969 |                 return [types.TextContent(type="text", text=f"Successfully paused {target} consolidation jobs")]
2970 |             else:
2971 |                 return [types.TextContent(type="text", text="Failed to pause consolidation jobs")]
2972 |             
2973 |         except Exception as e:
2974 |             error_msg = f"Error pausing consolidation: {str(e)}"
2975 |             logger.error(f"{error_msg}\n{traceback.format_exc()}")
2976 |             return [types.TextContent(type="text", text=error_msg)]
2977 | 
2978 |     async def handle_resume_consolidation(self, arguments: dict) -> List[types.TextContent]:
2979 |         """Handle consolidation resume requests."""
2980 |         if not CONSOLIDATION_ENABLED or not self.consolidation_scheduler:
2981 |             return [types.TextContent(type="text", text="Error: Consolidation scheduler not available")]
2982 |         
2983 |         try:
2984 |             time_horizon = arguments.get("time_horizon")
2985 |             
2986 |             # Resume consolidation
2987 |             success = await self.consolidation_scheduler.resume_consolidation(time_horizon)
2988 |             
2989 |             if success:
2990 |                 target = time_horizon or "all"
2991 |                 return [types.TextContent(type="text", text=f"Successfully resumed {target} consolidation jobs")]
2992 |             else:
2993 |                 return [types.TextContent(type="text", text="Failed to resume consolidation jobs")]
2994 |             
2995 |         except Exception as e:
2996 |             error_msg = f"Error resuming consolidation: {str(e)}"
2997 |             logger.error(f"{error_msg}\n{traceback.format_exc()}")
2998 |             return [types.TextContent(type="text", text=error_msg)]
2999 | 
3000 |     async def handle_debug_retrieve(self, arguments: dict) -> List[types.TextContent]:
3001 |         query = arguments.get("query")
3002 |         n_results = arguments.get("n_results", 5)
3003 |         similarity_threshold = arguments.get("similarity_threshold", 0.0)
3004 |         
3005 |         if not query:
3006 |             return [types.TextContent(type="text", text="Error: Query is required")]
3007 |         
3008 |         try:
3009 |             # Initialize storage lazily when needed
3010 |             storage = await self._ensure_storage_initialized()
3011 |             
3012 |             from .utils.debug import debug_retrieve_memory
3013 |             results = await debug_retrieve_memory(
3014 |                 storage,
3015 |                 query,
3016 |                 n_results,
3017 |                 similarity_threshold
3018 |             )
3019 |             
3020 |             if not results:
3021 |                 return [types.TextContent(type="text", text="No matching memories found")]
3022 |             
3023 |             formatted_results = []
3024 |             for i, result in enumerate(results):
3025 |                 memory_info = [
3026 |                     f"Memory {i+1}:",
3027 |                     f"Content: {result.memory.content}",
3028 |                     f"Hash: {result.memory.content_hash}",
3029 |                     f"Similarity Score: {result.relevance_score:.4f}"
3030 |                 ]
3031 | 
3032 |                 # Add debug info if available
3033 |                 if result.debug_info:
3034 |                     if 'raw_distance' in result.debug_info:
3035 |                         memory_info.append(f"Raw Distance: {result.debug_info['raw_distance']:.4f}")
3036 |                     if 'backend' in result.debug_info:
3037 |                         memory_info.append(f"Backend: {result.debug_info['backend']}")
3038 |                     if 'query' in result.debug_info:
3039 |                         memory_info.append(f"Query: {result.debug_info['query']}")
3040 |                     if 'similarity_threshold' in result.debug_info:
3041 |                         memory_info.append(f"Threshold: {result.debug_info['similarity_threshold']:.2f}")
3042 | 
3043 |                 if result.memory.tags:
3044 |                     memory_info.append(f"Tags: {', '.join(result.memory.tags)}")
3045 |                 memory_info.append("---")
3046 |                 formatted_results.append("\n".join(memory_info))
3047 |             
3048 |             return [types.TextContent(
3049 |                 type="text",
3050 |                 text="Found the following memories:\n\n" + "\n".join(formatted_results)
3051 |             )]
3052 |         except Exception as e:
3053 |             return [types.TextContent(type="text", text=f"Error in debug retrieve: {str(e)}")]
3054 | 
3055 |     async def handle_exact_match_retrieve(self, arguments: dict) -> List[types.TextContent]:
3056 |         content = arguments.get("content")
3057 |         if not content:
3058 |             return [types.TextContent(type="text", text="Error: Content is required")]
3059 |         
3060 |         try:
3061 |             # Initialize storage lazily when needed
3062 |             storage = await self._ensure_storage_initialized()
3063 |             
3064 |             from .utils.debug import exact_match_retrieve
3065 |             memories = await exact_match_retrieve(storage, content)
3066 |             
3067 |             if not memories:
3068 |                 return [types.TextContent(type="text", text="No exact matches found")]
3069 |             
3070 |             formatted_results = []
3071 |             for i, memory in enumerate(memories):
3072 |                 memory_info = [
3073 |                     f"Memory {i+1}:",
3074 |                     f"Content: {memory.content}",
3075 |                     f"Hash: {memory.content_hash}"
3076 |                 ]
3077 |                 
3078 |                 if memory.tags:
3079 |                     memory_info.append(f"Tags: {', '.join(memory.tags)}")
3080 |                 memory_info.append("---")
3081 |                 formatted_results.append("\n".join(memory_info))
3082 |             
3083 |             return [types.TextContent(
3084 |                 type="text",
3085 |                 text="Found the following exact matches:\n\n" + "\n".join(formatted_results)
3086 |             )]
3087 |         except Exception as e:
3088 |             return [types.TextContent(type="text", text=f"Error in exact match retrieve: {str(e)}")]
3089 | 
3090 |     async def handle_get_raw_embedding(self, arguments: dict) -> List[types.TextContent]:
3091 |         content = arguments.get("content")
3092 |         if not content:
3093 |             return [types.TextContent(type="text", text="Error: Content is required")]
3094 | 
3095 |         try:
3096 |             # Initialize storage lazily when needed
3097 |             storage = await self._ensure_storage_initialized()
3098 | 
3099 |             from .utils.debug import get_raw_embedding
3100 |             result = await asyncio.to_thread(get_raw_embedding, storage, content)
3101 | 
3102 |             if result["status"] == "success":
3103 |                 embedding = result["embedding"]
3104 |                 dimension = result["dimension"]
3105 |                 # Show first 10 and last 10 values for readability
3106 |                 if len(embedding) > 20:
3107 |                     embedding_str = f"[{', '.join(f'{x:.6f}' for x in embedding[:10])}, ..., {', '.join(f'{x:.6f}' for x in embedding[-10:])}]"
3108 |                 else:
3109 |                     embedding_str = f"[{', '.join(f'{x:.6f}' for x in embedding)}]"
3110 | 
3111 |                 return [types.TextContent(
3112 |                     type="text",
3113 |                     text=f"Embedding generated successfully:\n"
3114 |                          f"Dimension: {dimension}\n"
3115 |                          f"Vector: {embedding_str}"
3116 |                 )]
3117 |             else:
3118 |                 return [types.TextContent(
3119 |                     type="text",
3120 |                     text=f"Failed to generate embedding: {result['error']}"
3121 |                 )]
3122 | 
3123 |         except Exception as e:
3124 |             return [types.TextContent(type="text", text=f"Error getting raw embedding: {str(e)}")]
3125 | 
3126 |     async def handle_recall_memory(self, arguments: dict) -> List[types.TextContent]:
3127 |         """
3128 |         Handle memory recall requests with natural language time expressions.
3129 |         
3130 |         This handler parses natural language time expressions from the query,
3131 |         extracts time ranges, and combines them with optional semantic search.
3132 |         """
3133 |         query = arguments.get("query", "")
3134 |         n_results = arguments.get("n_results", 5)
3135 |         
3136 |         if not query:
3137 |             return [types.TextContent(type="text", text="Error: Query is required")]
3138 |         
3139 |         try:
3140 |             # Initialize storage lazily when needed
3141 |             storage = await self._ensure_storage_initialized()
3142 |             
3143 |             # Parse natural language time expressions
3144 |             cleaned_query, (start_timestamp, end_timestamp) = extract_time_expression(query)
3145 |             
3146 |             # Log the parsed timestamps and clean query
3147 |             logger.info(f"Original query: {query}")
3148 |             logger.info(f"Cleaned query for semantic search: {cleaned_query}")
3149 |             logger.info(f"Parsed time range: {start_timestamp} to {end_timestamp}")
3150 |             
3151 |             # Log more detailed timestamp information for debugging
3152 |             if start_timestamp is not None:
3153 |                 start_dt = datetime.fromtimestamp(start_timestamp)
3154 |                 logger.info(f"Start timestamp: {start_timestamp} ({start_dt.strftime('%Y-%m-%d %H:%M:%S')})")
3155 |             if end_timestamp is not None:
3156 |                 end_dt = datetime.fromtimestamp(end_timestamp)
3157 |                 logger.info(f"End timestamp: {end_timestamp} ({end_dt.strftime('%Y-%m-%d %H:%M:%S')})")
3158 |             
3159 |             if start_timestamp is None and end_timestamp is None:
3160 |                 # No time expression found, try direct parsing
3161 |                 logger.info("No time expression found in query, trying direct parsing")
3162 |                 start_timestamp, end_timestamp = parse_time_expression(query)
3163 |                 logger.info(f"Direct parse result: {start_timestamp} to {end_timestamp}")
3164 |             
3165 |             # Format human-readable time range for response
3166 |             time_range_str = ""
3167 |             if start_timestamp is not None and end_timestamp is not None:
3168 |                 start_dt = datetime.fromtimestamp(start_timestamp)
3169 |                 end_dt = datetime.fromtimestamp(end_timestamp)
3170 |                 time_range_str = f" from {start_dt.strftime('%Y-%m-%d %H:%M')} to {end_dt.strftime('%Y-%m-%d %H:%M')}"
3171 |             
3172 |             # Retrieve memories with timestamp filter and optional semantic search
3173 |             # If cleaned_query is empty or just whitespace after removing time expressions,
3174 |             # we should perform time-based retrieval only
3175 |             semantic_query = cleaned_query.strip() if cleaned_query.strip() else None
3176 | 
3177 |             # Use the enhanced recall method that combines semantic search with time filtering,
3178 |             # or just time filtering if no semantic query
3179 |             results = await storage.recall(
3180 |                 query=semantic_query,
3181 |                 n_results=n_results,
3182 |                 start_timestamp=start_timestamp,
3183 |                 end_timestamp=end_timestamp
3184 |             )
3185 |             
3186 |             if not results:
3187 |                 no_results_msg = f"No memories found{time_range_str}"
3188 |                 return [types.TextContent(type="text", text=no_results_msg)]
3189 |             
3190 |             # Format results
3191 |             formatted_results = []
3192 |             for i, result in enumerate(results):
3193 |                 memory_dt = result.memory.timestamp
3194 |                 
3195 |                 memory_info = [
3196 |                     f"Memory {i+1}:",
3197 |                 ]
3198 |                 
3199 |                 # Add timestamp if available
3200 |                 if memory_dt:
3201 |                     memory_info.append(f"Timestamp: {memory_dt.strftime('%Y-%m-%d %H:%M:%S')}")
3202 |                 
3203 |                 # Add other memory information
3204 |                 memory_info.extend([
3205 |                     f"Content: {result.memory.content}",
3206 |                     f"Hash: {result.memory.content_hash}"
3207 |                 ])
3208 |                 
3209 |                 # Add relevance score if available (may not be for time-only queries)
3210 |                 if hasattr(result, 'relevance_score') and result.relevance_score is not None:
3211 |                     memory_info.append(f"Relevance Score: {result.relevance_score:.2f}")
3212 |                 
3213 |                 # Add tags if available
3214 |                 if result.memory.tags:
3215 |                     memory_info.append(f"Tags: {', '.join(result.memory.tags)}")
3216 |                 
3217 |                 memory_info.append("---")
3218 |                 formatted_results.append("\n".join(memory_info))
3219 |             
3220 |             # Include time range in response if available
3221 |             found_msg = f"Found {len(results)} memories{time_range_str}:"
3222 |             return [types.TextContent(
3223 |                 type="text",
3224 |                 text=f"{found_msg}\n\n" + "\n".join(formatted_results)
3225 |             )]
3226 |             
3227 |         except Exception as e:
3228 |             logger.error(f"Error in recall_memory: {str(e)}\n{traceback.format_exc()}")
3229 |             return [types.TextContent(type="text", text=f"Error recalling memories: {str(e)}")]
3230 | 
3231 |     async def handle_check_database_health(self, arguments: dict) -> List[types.TextContent]:
3232 |         """Handle database health check requests with performance metrics."""
3233 |         logger.info("=== EXECUTING CHECK_DATABASE_HEALTH ===")
3234 |         try:
3235 |             # Initialize storage lazily when needed
3236 |             try:
3237 |                 storage = await self._ensure_storage_initialized()
3238 |             except Exception as init_error:
3239 |                 # Storage initialization failed
3240 |                 result = {
3241 |                     "validation": {
3242 |                         "status": "unhealthy",
3243 |                         "message": f"Storage initialization failed: {str(init_error)}"
3244 |                     },
3245 |                     "statistics": {
3246 |                         "status": "error",
3247 |                         "error": "Cannot get statistics - storage not initialized"
3248 |                     },
3249 |                     "performance": {
3250 |                         "storage": {},
3251 |                         "server": {
3252 |                             "average_query_time_ms": self.get_average_query_time(),
3253 |                             "total_queries": len(self.query_times)
3254 |                         }
3255 |                     }
3256 |                 }
3257 |                 
3258 |                 logger.error(f"Storage initialization failed during health check: {str(init_error)}")
3259 |                 return [types.TextContent(
3260 |                     type="text",
3261 |                     text=f"Database Health Check Results:\n{json.dumps(result, indent=2)}"
3262 |                 )]
3263 |             
3264 |             # Skip db_utils completely for health check - implement directly here
3265 |             # Get storage type for backend-specific handling
3266 |             storage_type = storage.__class__.__name__
3267 |             
3268 |             # Direct health check implementation based on storage type
3269 |             is_valid = False
3270 |             message = ""
3271 |             stats = {}
3272 |             
3273 |             if storage_type == "SqliteVecMemoryStorage":
3274 |                 # Direct SQLite-vec validation
3275 |                 if not hasattr(storage, 'conn') or storage.conn is None:
3276 |                     is_valid = False
3277 |                     message = "SQLite database connection is not initialized"
3278 |                 else:
3279 |                     try:
3280 |                         # Check for required tables
3281 |                         cursor = storage.conn.execute("SELECT name FROM sqlite_master WHERE type='table' AND name='memories'")
3282 |                         if not cursor.fetchone():
3283 |                             is_valid = False
3284 |                             message = "SQLite database is missing required tables"
3285 |                         else:
3286 |                             # Count memories
3287 |                             cursor = storage.conn.execute('SELECT COUNT(*) FROM memories')
3288 |                             memory_count = cursor.fetchone()[0]
3289 |                             
3290 |                             # Check if embedding tables exist
3291 |                             cursor = storage.conn.execute("SELECT name FROM sqlite_master WHERE type='table' AND name='memory_embeddings'")
3292 |                             has_embeddings = cursor.fetchone() is not None
3293 |                             
3294 |                             # Check embedding model
3295 |                             has_model = hasattr(storage, 'embedding_model') and storage.embedding_model is not None
3296 |                             
3297 |                             # Collect stats
3298 |                             stats = {
3299 |                                 "status": "healthy",
3300 |                                 "backend": "sqlite-vec",
3301 |                                 "total_memories": memory_count,
3302 |                                 "has_embedding_tables": has_embeddings,
3303 |                                 "has_embedding_model": has_model,
3304 |                                 "embedding_model": storage.embedding_model_name if hasattr(storage, 'embedding_model_name') else "none"
3305 |                             }
3306 |                             
3307 |                             # Get database file size
3308 |                             db_path = storage.db_path if hasattr(storage, 'db_path') else None
3309 |                             if db_path and os.path.exists(db_path):
3310 |                                 file_size = os.path.getsize(db_path)
3311 |                                 stats["database_size_bytes"] = file_size
3312 |                                 stats["database_size_mb"] = round(file_size / (1024 * 1024), 2)
3313 |                             
3314 |                             is_valid = True
3315 |                             message = "SQLite-vec database validation successful"
3316 |                     except Exception as e:
3317 |                         is_valid = False
3318 |                         message = f"SQLite database validation error: {str(e)}"
3319 |                         stats = {
3320 |                             "status": "error",
3321 |                             "error": str(e),
3322 |                             "backend": "sqlite-vec" 
3323 |                         }
3324 | 
3325 |             elif storage_type == "CloudflareStorage":
3326 |                 # Cloudflare storage validation
3327 |                 try:
3328 |                     # Check if storage is properly initialized
3329 |                     if not hasattr(storage, 'client') or storage.client is None:
3330 |                         is_valid = False
3331 |                         message = "Cloudflare storage client is not initialized"
3332 |                         stats = {
3333 |                             "status": "error",
3334 |                             "error": "Cloudflare storage client is not initialized",
3335 |                             "backend": "cloudflare"
3336 |                         }
3337 |                     else:
3338 |                         # Get storage stats
3339 |                         storage_stats = await storage.get_stats()
3340 | 
3341 |                         # Collect basic health info
3342 |                         stats = {
3343 |                             "status": "healthy",
3344 |                             "backend": "cloudflare",
3345 |                             "total_memories": storage_stats.get("total_memories", 0),
3346 |                             "vectorize_index": storage.vectorize_index,
3347 |                             "d1_database_id": storage.d1_database_id,
3348 |                             "r2_bucket": storage.r2_bucket,
3349 |                             "embedding_model": storage.embedding_model
3350 |                         }
3351 | 
3352 |                         # Add additional stats if available
3353 |                         stats.update(storage_stats)
3354 | 
3355 |                         is_valid = True
3356 |                         message = "Cloudflare storage validation successful"
3357 | 
3358 |                 except Exception as e:
3359 |                     is_valid = False
3360 |                     message = f"Cloudflare storage validation error: {str(e)}"
3361 |                     stats = {
3362 |                         "status": "error",
3363 |                         "error": str(e),
3364 |                         "backend": "cloudflare"
3365 |                     }
3366 | 
3367 |             elif storage_type == "HybridMemoryStorage":
3368 |                 # Hybrid storage validation (SQLite-vec primary + Cloudflare secondary)
3369 |                 try:
3370 |                     if not hasattr(storage, 'primary') or storage.primary is None:
3371 |                         is_valid = False
3372 |                         message = "Hybrid storage primary backend is not initialized"
3373 |                         stats = {
3374 |                             "status": "error",
3375 |                             "error": "Hybrid storage primary backend is not initialized",
3376 |                             "backend": "hybrid"
3377 |                         }
3378 |                     else:
3379 |                         primary_storage = storage.primary
3380 |                         # Validate primary storage (SQLite-vec)
3381 |                         if not hasattr(primary_storage, 'conn') or primary_storage.conn is None:
3382 |                             is_valid = False
3383 |                             message = "Hybrid storage: SQLite connection is not initialized"
3384 |                             stats = {
3385 |                                 "status": "error",
3386 |                                 "error": "SQLite connection is not initialized",
3387 |                                 "backend": "hybrid"
3388 |                             }
3389 |                         else:
3390 |                             # Check for required tables
3391 |                             cursor = primary_storage.conn.execute("SELECT name FROM sqlite_master WHERE type='table' AND name='memories'")
3392 |                             if not cursor.fetchone():
3393 |                                 is_valid = False
3394 |                                 message = "Hybrid storage: SQLite database is missing required tables"
3395 |                                 stats = {
3396 |                                     "status": "error",
3397 |                                     "error": "SQLite database is missing required tables",
3398 |                                     "backend": "hybrid"
3399 |                                 }
3400 |                             else:
3401 |                                 # Count memories
3402 |                                 cursor = primary_storage.conn.execute('SELECT COUNT(*) FROM memories')
3403 |                                 memory_count = cursor.fetchone()[0]
3404 | 
3405 |                                 # Check if embedding tables exist
3406 |                                 cursor = primary_storage.conn.execute("SELECT name FROM sqlite_master WHERE type='table' AND name='memory_embeddings'")
3407 |                                 has_embeddings = cursor.fetchone() is not None
3408 | 
3409 |                                 # Check secondary (Cloudflare) status
3410 |                                 cloudflare_status = "not_configured"
3411 |                                 if hasattr(storage, 'secondary') and storage.secondary:
3412 |                                     sync_service = getattr(storage, 'sync_service', None)
3413 |                                     if sync_service and getattr(sync_service, 'is_running', False):
3414 |                                         cloudflare_status = "syncing"
3415 |                                     else:
3416 |                                         cloudflare_status = "configured"
3417 | 
3418 |                                 # Collect stats
3419 |                                 stats = {
3420 |                                     "status": "healthy",
3421 |                                     "backend": "hybrid",
3422 |                                     "total_memories": memory_count,
3423 |                                     "has_embeddings": has_embeddings,
3424 |                                     "database_path": getattr(primary_storage, 'db_path', SQLITE_VEC_PATH),
3425 |                                     "cloudflare_sync": cloudflare_status
3426 |                                 }
3427 | 
3428 |                                 is_valid = True
3429 |                                 message = f"Hybrid storage validation successful ({memory_count} memories, Cloudflare: {cloudflare_status})"
3430 | 
3431 |                 except Exception as e:
3432 |                     is_valid = False
3433 |                     message = f"Hybrid storage validation error: {str(e)}"
3434 |                     stats = {
3435 |                         "status": "error",
3436 |                         "error": str(e),
3437 |                         "backend": "hybrid"
3438 |                     }
3439 | 
3440 |             else:
3441 |                 is_valid = False
3442 |                 message = f"Unknown storage type: {storage_type}"
3443 |                 stats = {
3444 |                     "status": "error",
3445 |                     "error": f"Unknown storage type: {storage_type}",
3446 |                     "backend": "unknown"
3447 |                 }
3448 |             
3449 |             # Get performance stats from optimized storage
3450 |             performance_stats = {}
3451 |             if hasattr(storage, 'get_performance_stats') and callable(storage.get_performance_stats):
3452 |                 try:
3453 |                     performance_stats = storage.get_performance_stats()
3454 |                 except Exception as perf_error:
3455 |                     logger.warning(f"Could not get performance stats: {str(perf_error)}")
3456 |                     performance_stats = {"error": str(perf_error)}
3457 |             
3458 |             # Get server-level performance stats
3459 |             server_stats = {
3460 |                 "average_query_time_ms": self.get_average_query_time(),
3461 |                 "total_queries": len(self.query_times)
3462 |             }
3463 |             
3464 |             # Add storage type for debugging
3465 |             server_stats["storage_type"] = storage_type
3466 |             
3467 |             # Add storage initialization status for debugging
3468 |             if hasattr(storage, 'get_initialization_status') and callable(storage.get_initialization_status):
3469 |                 try:
3470 |                     server_stats["storage_initialization"] = storage.get_initialization_status()
3471 |                 except Exception:
3472 |                     pass
3473 |             
3474 |             # Combine results with performance data
3475 |             result = {
3476 |                 "version": __version__,
3477 |                 "validation": {
3478 |                     "status": "healthy" if is_valid else "unhealthy",
3479 |                     "message": message
3480 |                 },
3481 |                 "statistics": stats,
3482 |                 "performance": {
3483 |                     "storage": performance_stats,
3484 |                     "server": server_stats
3485 |                 }
3486 |             }
3487 |             
3488 |             logger.info(f"Database health result with performance data: {result}")
3489 |             return [types.TextContent(
3490 |                 type="text",
3491 |                 text=f"Database Health Check Results:\n{json.dumps(result, indent=2)}"
3492 |             )]
3493 |         except Exception as e:
3494 |             logger.error(f"Error in check_database_health: {str(e)}")
3495 |             logger.error(traceback.format_exc())
3496 |             return [types.TextContent(
3497 |                 type="text",
3498 |                 text=f"Error checking database health: {str(e)}"
3499 |             )]
3500 | 
3501 |     async def handle_get_cache_stats(self, arguments: dict) -> List[types.TextContent]:
3502 |         """
3503 |         Get MCP server global cache statistics for performance monitoring.
3504 | 
3505 |         Returns detailed metrics about storage and memory service caching,
3506 |         including hit rates, initialization times, and cache sizes.
3507 |         """
3508 |         global _CACHE_STATS, _STORAGE_CACHE, _MEMORY_SERVICE_CACHE
3509 | 
3510 |         try:
3511 |             # Import shared stats calculation utility
3512 |             from .utils.cache_manager import CacheStats, calculate_cache_stats_dict
3513 | 
3514 |             # Convert global dict to CacheStats dataclass
3515 |             stats = CacheStats(
3516 |                 total_calls=_CACHE_STATS["total_calls"],
3517 |                 storage_hits=_CACHE_STATS["storage_hits"],
3518 |                 storage_misses=_CACHE_STATS["storage_misses"],
3519 |                 service_hits=_CACHE_STATS["service_hits"],
3520 |                 service_misses=_CACHE_STATS["service_misses"],
3521 |                 initialization_times=_CACHE_STATS["initialization_times"]
3522 |             )
3523 | 
3524 |             # Calculate statistics using shared utility
3525 |             cache_sizes = (len(_STORAGE_CACHE), len(_MEMORY_SERVICE_CACHE))
3526 |             result = calculate_cache_stats_dict(stats, cache_sizes)
3527 | 
3528 |             # Add server-specific details
3529 |             result["storage_cache"]["keys"] = list(_STORAGE_CACHE.keys())
3530 |             result["backend_info"] = {
3531 |                 "storage_backend": STORAGE_BACKEND,
3532 |                 "sqlite_path": SQLITE_VEC_PATH
3533 |             }
3534 | 
3535 |             logger.info(f"Cache stats retrieved: {result['message']}")
3536 | 
3537 |             # Return JSON string for easy parsing by clients
3538 |             return [types.TextContent(
3539 |                 type="text",
3540 |                 text=json.dumps(result, indent=2)
3541 |             )]
3542 | 
3543 |         except Exception as e:
3544 |             logger.error(f"Error in get_cache_stats: {str(e)}")
3545 |             logger.error(traceback.format_exc())
3546 |             return [types.TextContent(
3547 |                 type="text",
3548 |                 text=f"Error getting cache stats: {str(e)}"
3549 |             )]
3550 | 
3551 |     async def handle_recall_by_timeframe(self, arguments: dict) -> List[types.TextContent]:
3552 |         """Handle recall by timeframe requests."""
3553 |         from datetime import datetime
3554 |         
3555 |         try:
3556 |             # Initialize storage lazily when needed
3557 |             storage = await self._ensure_storage_initialized()
3558 |             
3559 |             start_date = datetime.fromisoformat(arguments["start_date"]).date()
3560 |             end_date = datetime.fromisoformat(arguments.get("end_date", arguments["start_date"])).date()
3561 |             n_results = arguments.get("n_results", 5)
3562 |             
3563 |             # Get timestamp range
3564 |             start_timestamp = datetime(start_date.year, start_date.month, start_date.day).timestamp()
3565 |             end_timestamp = datetime(end_date.year, end_date.month, end_date.day, 23, 59, 59).timestamp()
3566 |             
3567 |             # Log the timestamp values for debugging
3568 |             logger.info(f"Recall by timeframe: {start_date} to {end_date}")
3569 |             logger.info(f"Start timestamp: {start_timestamp} ({datetime.fromtimestamp(start_timestamp).strftime('%Y-%m-%d %H:%M:%S')})")
3570 |             logger.info(f"End timestamp: {end_timestamp} ({datetime.fromtimestamp(end_timestamp).strftime('%Y-%m-%d %H:%M:%S')})")
3571 |             
3572 |             # Retrieve memories with proper parameters - query is None because this is pure time-based filtering
3573 |             results = await storage.recall(
3574 |                 query=None,
3575 |                 n_results=n_results,
3576 |                 start_timestamp=start_timestamp,
3577 |                 end_timestamp=end_timestamp
3578 |             )
3579 |             
3580 |             if not results:
3581 |                 return [types.TextContent(type="text", text=f"No memories found from {start_date} to {end_date}")]
3582 |             
3583 |             formatted_results = []
3584 |             for i, result in enumerate(results):
3585 |                 memory_timestamp = result.memory.timestamp
3586 |                 memory_info = [
3587 |                     f"Memory {i+1}:",
3588 |                 ]
3589 |                 
3590 |                 # Add timestamp if available
3591 |                 if memory_timestamp:
3592 |                     memory_info.append(f"Timestamp: {memory_timestamp.strftime('%Y-%m-%d %H:%M:%S')}")
3593 |                 
3594 |                 memory_info.extend([
3595 |                     f"Content: {result.memory.content}",
3596 |                     f"Hash: {result.memory.content_hash}"
3597 |                 ])
3598 |                 
3599 |                 if result.memory.tags:
3600 |                     memory_info.append(f"Tags: {', '.join(result.memory.tags)}")
3601 |                 memory_info.append("---")
3602 |                 formatted_results.append("\n".join(memory_info))
3603 |             
3604 |             return [types.TextContent(
3605 |                 type="text",
3606 |                 text=f"Found {len(results)} memories from {start_date} to {end_date}:\n\n" + "\n".join(formatted_results)
3607 |             )]
3608 |             
3609 |         except Exception as e:
3610 |             logger.error(f"Error in recall_by_timeframe: {str(e)}\n{traceback.format_exc()}")
3611 |             return [types.TextContent(
3612 |                 type="text",
3613 |                 text=f"Error recalling memories: {str(e)}"
3614 |             )]
3615 | 
3616 |     async def handle_delete_by_timeframe(self, arguments: dict) -> List[types.TextContent]:
3617 |         """Handle delete by timeframe requests."""
3618 |         from datetime import datetime
3619 |         
3620 |         try:
3621 |             # Initialize storage lazily when needed
3622 |             storage = await self._ensure_storage_initialized()
3623 |             
3624 |             start_date = datetime.fromisoformat(arguments["start_date"]).date()
3625 |             end_date = datetime.fromisoformat(arguments.get("end_date", arguments["start_date"])).date()
3626 |             tag = arguments.get("tag")
3627 |             
3628 |             count, message = await storage.delete_by_timeframe(start_date, end_date, tag)
3629 |             return [types.TextContent(
3630 |                 type="text",
3631 |                 text=f"Deleted {count} memories: {message}"
3632 |             )]
3633 |             
3634 |         except Exception as e:
3635 |             return [types.TextContent(
3636 |                 type="text",
3637 |                 text=f"Error deleting memories: {str(e)}"
3638 |             )]
3639 | 
3640 |     async def handle_delete_before_date(self, arguments: dict) -> List[types.TextContent]:
3641 |         """Handle delete before date requests."""
3642 |         from datetime import datetime
3643 |         
3644 |         try:
3645 |             # Initialize storage lazily when needed
3646 |             storage = await self._ensure_storage_initialized()
3647 |             
3648 |             before_date = datetime.fromisoformat(arguments["before_date"]).date()
3649 |             tag = arguments.get("tag")
3650 |             
3651 |             count, message = await storage.delete_before_date(before_date, tag)
3652 |             return [types.TextContent(
3653 |                 type="text",
3654 |                 text=f"Deleted {count} memories: {message}"
3655 |             )]
3656 |             
3657 |         except Exception as e:
3658 |             return [types.TextContent(
3659 |                 type="text",
3660 |                 text=f"Error deleting memories: {str(e)}"
3661 |             )]
3662 | 
3663 |     async def handle_ingest_document(self, arguments: dict) -> List[types.TextContent]:
3664 |         """Handle document ingestion requests."""
3665 |         try:
3666 |             from pathlib import Path
3667 |             from .ingestion import get_loader_for_file
3668 |             from .models.memory import Memory
3669 |             from .utils import create_memory_from_chunk
3670 |             import time
3671 |             
3672 |             # Initialize storage lazily when needed
3673 |             storage = await self._ensure_storage_initialized()
3674 |             
3675 |             from .services.memory_service import normalize_tags
3676 | 
3677 |             file_path = Path(arguments["file_path"])
3678 |             tags = normalize_tags(arguments.get("tags", []))
3679 |             chunk_size = arguments.get("chunk_size", 1000)
3680 |             chunk_overlap = arguments.get("chunk_overlap", 200)
3681 |             memory_type = arguments.get("memory_type", "document")
3682 |             
3683 |             logger.info(f"Starting document ingestion: {file_path}")
3684 |             start_time = time.time()
3685 |             
3686 |             # Validate file exists and get appropriate document loader
3687 |             if not file_path.exists():
3688 |                 return [types.TextContent(
3689 |                     type="text",
3690 |                     text=f"Error: File not found: {file_path.resolve()}"
3691 |                 )]
3692 | 
3693 |             loader = get_loader_for_file(file_path)
3694 |             if loader is None:
3695 |                 from .ingestion import SUPPORTED_FORMATS
3696 |                 supported_exts = ", ".join(f".{ext}" for ext in SUPPORTED_FORMATS.keys())
3697 |                 return [types.TextContent(
3698 |                     type="text",
3699 |                     text=f"Error: Unsupported file format: {file_path.suffix}. Supported formats: {supported_exts}"
3700 |                 )]
3701 |             
3702 |             # Configure loader
3703 |             loader.chunk_size = chunk_size
3704 |             loader.chunk_overlap = chunk_overlap
3705 |             
3706 |             chunks_processed = 0
3707 |             chunks_stored = 0
3708 |             errors = []
3709 |             
3710 |             # Extract and store chunks
3711 |             async for chunk in loader.extract_chunks(file_path):
3712 |                 chunks_processed += 1
3713 |                 
3714 |                 try:
3715 |                     # Combine document tags with chunk metadata tags
3716 |                     all_tags = tags.copy()
3717 |                     if chunk.metadata.get('tags'):
3718 |                         # Handle tags from chunk metadata (can be string or list)
3719 |                         chunk_tags = chunk.metadata['tags']
3720 |                         if isinstance(chunk_tags, str):
3721 |                             # Split comma-separated string into list
3722 |                             chunk_tags = [tag.strip() for tag in chunk_tags.split(',') if tag.strip()]
3723 |                         all_tags.extend(chunk_tags)
3724 |                     
3725 |                     # Create memory object
3726 |                     memory = Memory(
3727 |                         content=chunk.content,
3728 |                         content_hash=generate_content_hash(chunk.content, chunk.metadata),
3729 |                         tags=list(set(all_tags)),  # Remove duplicates
3730 |                         memory_type=memory_type,
3731 |                         metadata=chunk.metadata
3732 |                     )
3733 |                     
3734 |                     # Store the memory
3735 |                     success, error = await storage.store(memory)
3736 |                     if success:
3737 |                         chunks_stored += 1
3738 |                     else:
3739 |                         errors.append(f"Chunk {chunk.chunk_index}: {error}")
3740 |                         
3741 |                 except Exception as e:
3742 |                     errors.append(f"Chunk {chunk.chunk_index}: {str(e)}")
3743 |             
3744 |             processing_time = time.time() - start_time
3745 |             success_rate = (chunks_stored / chunks_processed * 100) if chunks_processed > 0 else 0
3746 |             
3747 |             # Prepare result message
3748 |             result_lines = [
3749 |                 f"✅ Document ingestion completed: {file_path.name}",
3750 |                 f"📄 Chunks processed: {chunks_processed}",
3751 |                 f"💾 Chunks stored: {chunks_stored}",
3752 |                 f"⚡ Success rate: {success_rate:.1f}%",
3753 |                 f"⏱️  Processing time: {processing_time:.2f} seconds"
3754 |             ]
3755 |             
3756 |             if errors:
3757 |                 result_lines.append(f"⚠️  Errors encountered: {len(errors)}")
3758 |                 if len(errors) <= 5:  # Show first few errors
3759 |                     result_lines.extend([f"   - {error}" for error in errors[:5]])
3760 |                 else:
3761 |                     result_lines.extend([f"   - {error}" for error in errors[:3]])
3762 |                     result_lines.append(f"   ... and {len(errors) - 3} more errors")
3763 |             
3764 |             logger.info(f"Document ingestion completed: {chunks_stored}/{chunks_processed} chunks stored")
3765 |             return [types.TextContent(type="text", text="\n".join(result_lines))]
3766 |             
3767 |         except Exception as e:
3768 |             logger.error(f"Error in document ingestion: {str(e)}")
3769 |             return [types.TextContent(
3770 |                 type="text",
3771 |                 text=f"Error ingesting document: {str(e)}"
3772 |             )]
3773 | 
3774 |     async def handle_ingest_directory(self, arguments: dict) -> List[types.TextContent]:
3775 |         """Handle directory ingestion requests."""
3776 |         try:
3777 |             from pathlib import Path
3778 |             from .ingestion import get_loader_for_file, is_supported_file
3779 |             from .models.memory import Memory
3780 |             from .utils import generate_content_hash
3781 |             import time
3782 |             
3783 |             # Initialize storage lazily when needed
3784 |             storage = await self._ensure_storage_initialized()
3785 |             
3786 |             from .services.memory_service import normalize_tags
3787 | 
3788 |             directory_path = Path(arguments["directory_path"])
3789 |             tags = normalize_tags(arguments.get("tags", []))
3790 |             recursive = arguments.get("recursive", True)
3791 |             file_extensions = arguments.get("file_extensions", ["pdf", "txt", "md", "json"])
3792 |             chunk_size = arguments.get("chunk_size", 1000)
3793 |             max_files = arguments.get("max_files", 100)
3794 |             
3795 |             if not directory_path.exists() or not directory_path.is_dir():
3796 |                 return [types.TextContent(
3797 |                     type="text",
3798 |                     text=f"Error: Directory not found: {directory_path}"
3799 |                 )]
3800 |             
3801 |             logger.info(f"Starting directory ingestion: {directory_path}")
3802 |             start_time = time.time()
3803 |             
3804 |             # Find all supported files
3805 |             pattern = "**/*" if recursive else "*"
3806 |             all_files = []
3807 |             
3808 |             for ext in file_extensions:
3809 |                 ext_pattern = f"*.{ext.lstrip('.')}"
3810 |                 if recursive:
3811 |                     files = list(directory_path.rglob(ext_pattern))
3812 |                 else:
3813 |                     files = list(directory_path.glob(ext_pattern))
3814 |                 all_files.extend(files)
3815 |             
3816 |             # Remove duplicates and filter supported files
3817 |             unique_files = []
3818 |             seen = set()
3819 |             for file_path in all_files:
3820 |                 if file_path not in seen and is_supported_file(file_path):
3821 |                     unique_files.append(file_path)
3822 |                     seen.add(file_path)
3823 |             
3824 |             # Limit number of files
3825 |             files_to_process = unique_files[:max_files]
3826 |             
3827 |             if not files_to_process:
3828 |                 return [types.TextContent(
3829 |                     type="text",
3830 |                     text=f"No supported files found in directory: {directory_path}"
3831 |                 )]
3832 |             
3833 |             total_chunks_processed = 0
3834 |             total_chunks_stored = 0
3835 |             files_processed = 0
3836 |             files_failed = 0
3837 |             all_errors = []
3838 |             
3839 |             # Process each file
3840 |             for file_path in files_to_process:
3841 |                 try:
3842 |                     logger.info(f"Processing file {files_processed + 1}/{len(files_to_process)}: {file_path.name}")
3843 |                     
3844 |                     # Get appropriate document loader
3845 |                     loader = get_loader_for_file(file_path)
3846 |                     if loader is None:
3847 |                         all_errors.append(f"{file_path.name}: Unsupported format")
3848 |                         files_failed += 1
3849 |                         continue
3850 |                     
3851 |                     # Configure loader
3852 |                     loader.chunk_size = chunk_size
3853 |                     
3854 |                     file_chunks_processed = 0
3855 |                     file_chunks_stored = 0
3856 |                     
3857 |                     # Extract and store chunks from this file
3858 |                     async for chunk in loader.extract_chunks(file_path):
3859 |                         file_chunks_processed += 1
3860 |                         total_chunks_processed += 1
3861 |                         
3862 |                         # Process and store the chunk
3863 |                         success, error = await _process_and_store_chunk(
3864 |                             chunk,
3865 |                             storage,
3866 |                             file_path.name,
3867 |                             base_tags=tags.copy(),
3868 |                             context_tags={
3869 |                                 "source_dir": directory_path.name,
3870 |                                 "file_type": file_path.suffix.lstrip('.')
3871 |                             }
3872 |                         )
3873 |                         
3874 |                         if success:
3875 |                             file_chunks_stored += 1
3876 |                             total_chunks_stored += 1
3877 |                         else:
3878 |                             all_errors.append(error)
3879 |                     
3880 |                     if file_chunks_stored > 0:
3881 |                         files_processed += 1
3882 |                     else:
3883 |                         files_failed += 1
3884 |                         
3885 |                 except Exception as e:
3886 |                     files_failed += 1
3887 |                     all_errors.append(f"{file_path.name}: {str(e)}")
3888 |             
3889 |             processing_time = time.time() - start_time
3890 |             success_rate = (total_chunks_stored / total_chunks_processed * 100) if total_chunks_processed > 0 else 0
3891 |             
3892 |             # Prepare result message
3893 |             result_lines = [
3894 |                 f"✅ Directory ingestion completed: {directory_path.name}",
3895 |                 f"📁 Files processed: {files_processed}/{len(files_to_process)}",
3896 |                 f"📄 Total chunks processed: {total_chunks_processed}",
3897 |                 f"💾 Total chunks stored: {total_chunks_stored}",
3898 |                 f"⚡ Success rate: {success_rate:.1f}%",
3899 |                 f"⏱️  Processing time: {processing_time:.2f} seconds"
3900 |             ]
3901 |             
3902 |             if files_failed > 0:
3903 |                 result_lines.append(f"❌ Files failed: {files_failed}")
3904 |             
3905 |             if all_errors:
3906 |                 result_lines.append(f"⚠️  Total errors: {len(all_errors)}")
3907 |                 # Show first few errors
3908 |                 error_limit = 5
3909 |                 for error in all_errors[:error_limit]:
3910 |                     result_lines.append(f"   - {error}")
3911 |                 if len(all_errors) > error_limit:
3912 |                     result_lines.append(f"   ... and {len(all_errors) - error_limit} more errors")
3913 |             
3914 |             logger.info(f"Directory ingestion completed: {total_chunks_stored}/{total_chunks_processed} chunks from {files_processed} files")
3915 |             return [types.TextContent(type="text", text="\n".join(result_lines))]
3916 |             
3917 |         except Exception as e:
3918 |             logger.error(f"Error in directory ingestion: {str(e)}")
3919 |             return [types.TextContent(
3920 |                 type="text",
3921 |                 text=f"Error ingesting directory: {str(e)}"
3922 |             )]
3923 | 
3924 | 
3925 | async def async_main():
3926 |     # Apply LM Studio compatibility patch before anything else
3927 |     patch_mcp_for_lm_studio()
3928 |     
3929 |     # Add Windows-specific timeout handling
3930 |     add_windows_timeout_handling()
3931 |     
3932 |     # Run dependency check before starting
3933 |     run_dependency_check()
3934 | 
3935 |     # Check if running with UV
3936 |     check_uv_environment()
3937 | 
3938 |     # Check for version mismatch (stale venv issue)
3939 |     check_version_consistency()
3940 | 
3941 |     # Debug logging is now handled by the CLI layer
3942 | 
3943 |     # Print system diagnostics only for LM Studio (avoid JSON parsing errors in Claude Desktop)
3944 |     system_info = get_system_info()
3945 |     if MCP_CLIENT == 'lm_studio':
3946 |         print("\n=== MCP Memory Service System Diagnostics ===", file=sys.stdout, flush=True)
3947 |         print(f"OS: {system_info.os_name} {system_info.architecture}", file=sys.stdout, flush=True)
3948 |         print(f"Python: {platform.python_version()}", file=sys.stdout, flush=True)
3949 |         print(f"Hardware Acceleration: {system_info.accelerator}", file=sys.stdout, flush=True)
3950 |         print(f"Memory: {system_info.memory_gb:.2f} GB", file=sys.stdout, flush=True)
3951 |         print(f"Optimal Model: {system_info.get_optimal_model()}", file=sys.stdout, flush=True)
3952 |         print(f"Optimal Batch Size: {system_info.get_optimal_batch_size()}", file=sys.stdout, flush=True)
3953 |         print(f"Storage Backend: {STORAGE_BACKEND}", file=sys.stdout, flush=True)
3954 |         print("================================================\n", file=sys.stdout, flush=True)
3955 | 
3956 |     logger.info(f"Starting MCP Memory Service with storage backend: {STORAGE_BACKEND}")
3957 |     
3958 |     try:
3959 |         # Create server instance with hardware-aware configuration
3960 |         memory_server = MemoryServer()
3961 |         
3962 |         # Set up async initialization with timeout and retry logic
3963 |         max_retries = 2
3964 |         retry_count = 0
3965 |         init_success = False
3966 |         
3967 |         while retry_count <= max_retries and not init_success:
3968 |             if retry_count > 0:
3969 |                 logger.warning(f"Retrying initialization (attempt {retry_count}/{max_retries})...")
3970 |                 
3971 |             init_task = asyncio.create_task(memory_server.initialize())
3972 |             try:
3973 |                 # 30 second timeout for initialization
3974 |                 init_success = await asyncio.wait_for(init_task, timeout=30.0)
3975 |                 if init_success:
3976 |                     logger.info("Async initialization completed successfully")
3977 |                 else:
3978 |                     logger.warning("Initialization returned failure status")
3979 |                     retry_count += 1
3980 |             except asyncio.TimeoutError:
3981 |                 logger.warning("Async initialization timed out. Continuing with server startup.")
3982 |                 # Don't cancel the task, let it complete in the background
3983 |                 break
3984 |             except Exception as init_error:
3985 |                 logger.error(f"Initialization error: {str(init_error)}")
3986 |                 logger.error(traceback.format_exc())
3987 |                 retry_count += 1
3988 |                 
3989 |                 if retry_count <= max_retries:
3990 |                     logger.info(f"Waiting 2 seconds before retry...")
3991 |                     await asyncio.sleep(2)
3992 |         
3993 |         # Check if running in standalone mode (Docker without active client)
3994 |         standalone_mode = os.environ.get('MCP_STANDALONE_MODE', '').lower() == '1'
3995 |         running_in_docker = os.path.exists('/.dockerenv') or os.environ.get('DOCKER_CONTAINER', False)
3996 |         
3997 |         if standalone_mode:
3998 |             logger.info("Running in standalone mode - keeping server alive without active client")
3999 |             if MCP_CLIENT == 'lm_studio':
4000 |                 print("MCP Memory Service running in standalone mode", file=sys.stdout, flush=True)
4001 |             
4002 |             # Keep the server running indefinitely
4003 |             try:
4004 |                 while True:
4005 |                     await asyncio.sleep(60)  # Sleep for 60 seconds at a time
4006 |                     logger.debug("Standalone server heartbeat")
4007 |             except asyncio.CancelledError:
4008 |                 logger.info("Standalone server cancelled")
4009 |                 raise
4010 |         else:
4011 |             # Start the server with stdio
4012 |             async with mcp.server.stdio.stdio_server() as (read_stream, write_stream):
4013 |                 logger.info("Server started and ready to handle requests")
4014 |                 
4015 |                 if running_in_docker:
4016 |                     logger.info("Detected Docker environment - ensuring proper stdio handling")
4017 |                     if MCP_CLIENT == 'lm_studio':
4018 |                         print("MCP Memory Service running in Docker container", file=sys.stdout, flush=True)
4019 |                 
4020 |                 try:
4021 |                     await memory_server.server.run(
4022 |                         read_stream,
4023 |                         write_stream,
4024 |                         InitializationOptions(
4025 |                             server_name=SERVER_NAME,
4026 |                             server_version=SERVER_VERSION,
4027 |                             # Explicitly specify the protocol version that matches Claude's request
4028 |                             # Use the latest protocol version to ensure compatibility with all clients
4029 |                             protocol_version="2024-11-05",
4030 |                             capabilities=memory_server.server.get_capabilities(
4031 |                                 notification_options=NotificationOptions(),
4032 |                                 experimental_capabilities={
4033 |                                     "hardware_info": {
4034 |                                         "architecture": system_info.architecture,
4035 |                                         "accelerator": system_info.accelerator,
4036 |                                         "memory_gb": system_info.memory_gb,
4037 |                                         "cpu_count": system_info.cpu_count
4038 |                                     }
4039 |                                 },
4040 |                             ),
4041 |                         ),
4042 |                     )
4043 |                 except asyncio.CancelledError:
4044 |                     logger.info("Server run cancelled")
4045 |                     raise
4046 |                 except BaseException as e:
4047 |                     # Handle ExceptionGroup specially (Python 3.11+)
4048 |                     if type(e).__name__ == 'ExceptionGroup' or 'ExceptionGroup' in str(type(e)):
4049 |                         error_str = str(e)
4050 |                         # Check if this contains the LM Studio cancelled notification error
4051 |                         if 'notifications/cancelled' in error_str or 'ValidationError' in error_str:
4052 |                             logger.info("LM Studio sent a cancelled notification - this is expected behavior")
4053 |                             logger.debug(f"Full error for debugging: {error_str}")
4054 |                             # Don't re-raise - just continue gracefully
4055 |                         else:
4056 |                             logger.error(f"ExceptionGroup in server.run: {str(e)}")
4057 |                             logger.error(traceback.format_exc())
4058 |                             raise
4059 |                     else:
4060 |                         logger.error(f"Error in server.run: {str(e)}")
4061 |                         logger.error(traceback.format_exc())
4062 |                         raise
4063 |                 finally:
4064 |                     logger.info("Server run completed")
4065 |     except Exception as e:
4066 |         logger.error(f"Server error: {str(e)}")
4067 |         logger.error(traceback.format_exc())
4068 |         print(f"Fatal server error: {str(e)}", file=sys.stderr, flush=True)
4069 |         raise
4070 | 
4071 | def main():
4072 |     import signal
4073 |     
4074 |     # Set up signal handlers for graceful shutdown
4075 |     def signal_handler(signum, frame):
4076 |         logger.info(f"Received signal {signum}, shutting down gracefully...")
4077 |         sys.exit(0)
4078 |     
4079 |     signal.signal(signal.SIGTERM, signal_handler)
4080 |     signal.signal(signal.SIGINT, signal_handler)
4081 |     
4082 |     try:
4083 |         # Check if running in Docker
4084 |         if os.path.exists('/.dockerenv') or os.environ.get('DOCKER_CONTAINER', False):
4085 |             logger.info("Running in Docker container")
4086 |             if MCP_CLIENT == 'lm_studio':
4087 |                 print("MCP Memory Service starting in Docker mode", file=sys.stdout, flush=True)
4088 |         
4089 |         asyncio.run(async_main())
4090 |     except KeyboardInterrupt:
4091 |         logger.info("Shutting down gracefully...")
4092 |     except Exception as e:
4093 |         logger.error(f"Fatal error: {str(e)}\n{traceback.format_exc()}")
4094 |         sys.exit(1)
4095 | 
4096 | if __name__ == "__main__":
4097 |     main()
4098 | 
```
Page 46/47FirstPrevNextLast