#
tokens: 46658/50000 10/625 files (page 21/35)
lines: off (toggle) GitHub
raw markdown copy
This is page 21 of 35. Use http://codebase.md/doobidoo/mcp-memory-service?lines=false&page={x} to view the full context.

# Directory Structure

```
├── .claude
│   ├── agents
│   │   ├── amp-bridge.md
│   │   ├── amp-pr-automator.md
│   │   ├── code-quality-guard.md
│   │   ├── gemini-pr-automator.md
│   │   └── github-release-manager.md
│   ├── settings.local.json.backup
│   └── settings.local.json.local
├── .commit-message
├── .dockerignore
├── .env.example
├── .env.sqlite.backup
├── .envnn#
├── .gitattributes
├── .github
│   ├── FUNDING.yml
│   ├── ISSUE_TEMPLATE
│   │   ├── bug_report.yml
│   │   ├── config.yml
│   │   ├── feature_request.yml
│   │   └── performance_issue.yml
│   ├── pull_request_template.md
│   └── workflows
│       ├── bridge-tests.yml
│       ├── CACHE_FIX.md
│       ├── claude-code-review.yml
│       ├── claude.yml
│       ├── cleanup-images.yml.disabled
│       ├── dev-setup-validation.yml
│       ├── docker-publish.yml
│       ├── LATEST_FIXES.md
│       ├── main-optimized.yml.disabled
│       ├── main.yml
│       ├── publish-and-test.yml
│       ├── README_OPTIMIZATION.md
│       ├── release-tag.yml.disabled
│       ├── release.yml
│       ├── roadmap-review-reminder.yml
│       ├── SECRET_CONDITIONAL_FIX.md
│       └── WORKFLOW_FIXES.md
├── .gitignore
├── .mcp.json.backup
├── .mcp.json.template
├── .pyscn
│   ├── .gitignore
│   └── reports
│       └── analyze_20251123_214224.html
├── AGENTS.md
├── archive
│   ├── deployment
│   │   ├── deploy_fastmcp_fixed.sh
│   │   ├── deploy_http_with_mcp.sh
│   │   └── deploy_mcp_v4.sh
│   ├── deployment-configs
│   │   ├── empty_config.yml
│   │   └── smithery.yaml
│   ├── development
│   │   └── test_fastmcp.py
│   ├── docs-removed-2025-08-23
│   │   ├── authentication.md
│   │   ├── claude_integration.md
│   │   ├── claude-code-compatibility.md
│   │   ├── claude-code-integration.md
│   │   ├── claude-code-quickstart.md
│   │   ├── claude-desktop-setup.md
│   │   ├── complete-setup-guide.md
│   │   ├── database-synchronization.md
│   │   ├── development
│   │   │   ├── autonomous-memory-consolidation.md
│   │   │   ├── CLEANUP_PLAN.md
│   │   │   ├── CLEANUP_README.md
│   │   │   ├── CLEANUP_SUMMARY.md
│   │   │   ├── dream-inspired-memory-consolidation.md
│   │   │   ├── hybrid-slm-memory-consolidation.md
│   │   │   ├── mcp-milestone.md
│   │   │   ├── multi-client-architecture.md
│   │   │   ├── test-results.md
│   │   │   └── TIMESTAMP_FIX_SUMMARY.md
│   │   ├── distributed-sync.md
│   │   ├── invocation_guide.md
│   │   ├── macos-intel.md
│   │   ├── master-guide.md
│   │   ├── mcp-client-configuration.md
│   │   ├── multi-client-server.md
│   │   ├── service-installation.md
│   │   ├── sessions
│   │   │   └── MCP_ENHANCEMENT_SESSION_MEMORY_v4.1.0.md
│   │   ├── UBUNTU_SETUP.md
│   │   ├── ubuntu.md
│   │   ├── windows-setup.md
│   │   └── windows.md
│   ├── docs-root-cleanup-2025-08-23
│   │   ├── AWESOME_LIST_SUBMISSION.md
│   │   ├── CLOUDFLARE_IMPLEMENTATION.md
│   │   ├── DOCUMENTATION_ANALYSIS.md
│   │   ├── DOCUMENTATION_CLEANUP_PLAN.md
│   │   ├── DOCUMENTATION_CONSOLIDATION_COMPLETE.md
│   │   ├── LITESTREAM_SETUP_GUIDE.md
│   │   ├── lm_studio_system_prompt.md
│   │   ├── PYTORCH_DOWNLOAD_FIX.md
│   │   └── README-ORIGINAL-BACKUP.md
│   ├── investigations
│   │   └── MACOS_HOOKS_INVESTIGATION.md
│   ├── litestream-configs-v6.3.0
│   │   ├── install_service.sh
│   │   ├── litestream_master_config_fixed.yml
│   │   ├── litestream_master_config.yml
│   │   ├── litestream_replica_config_fixed.yml
│   │   ├── litestream_replica_config.yml
│   │   ├── litestream_replica_simple.yml
│   │   ├── litestream-http.service
│   │   ├── litestream.service
│   │   └── requirements-cloudflare.txt
│   ├── release-notes
│   │   └── release-notes-v7.1.4.md
│   └── setup-development
│       ├── README.md
│       ├── setup_consolidation_mdns.sh
│       ├── STARTUP_SETUP_GUIDE.md
│       └── test_service.sh
├── CHANGELOG-HISTORIC.md
├── CHANGELOG.md
├── claude_commands
│   ├── memory-context.md
│   ├── memory-health.md
│   ├── memory-ingest-dir.md
│   ├── memory-ingest.md
│   ├── memory-recall.md
│   ├── memory-search.md
│   ├── memory-store.md
│   ├── README.md
│   └── session-start.md
├── claude-hooks
│   ├── config.json
│   ├── config.template.json
│   ├── CONFIGURATION.md
│   ├── core
│   │   ├── memory-retrieval.js
│   │   ├── mid-conversation.js
│   │   ├── session-end.js
│   │   ├── session-start.js
│   │   └── topic-change.js
│   ├── debug-pattern-test.js
│   ├── install_claude_hooks_windows.ps1
│   ├── install_hooks.py
│   ├── memory-mode-controller.js
│   ├── MIGRATION.md
│   ├── README-NATURAL-TRIGGERS.md
│   ├── README-phase2.md
│   ├── README.md
│   ├── simple-test.js
│   ├── statusline.sh
│   ├── test-adaptive-weights.js
│   ├── test-dual-protocol-hook.js
│   ├── test-mcp-hook.js
│   ├── test-natural-triggers.js
│   ├── test-recency-scoring.js
│   ├── tests
│   │   ├── integration-test.js
│   │   ├── phase2-integration-test.js
│   │   ├── test-code-execution.js
│   │   ├── test-cross-session.json
│   │   ├── test-session-tracking.json
│   │   └── test-threading.json
│   ├── utilities
│   │   ├── adaptive-pattern-detector.js
│   │   ├── context-formatter.js
│   │   ├── context-shift-detector.js
│   │   ├── conversation-analyzer.js
│   │   ├── dynamic-context-updater.js
│   │   ├── git-analyzer.js
│   │   ├── mcp-client.js
│   │   ├── memory-client.js
│   │   ├── memory-scorer.js
│   │   ├── performance-manager.js
│   │   ├── project-detector.js
│   │   ├── session-tracker.js
│   │   ├── tiered-conversation-monitor.js
│   │   └── version-checker.js
│   └── WINDOWS-SESSIONSTART-BUG.md
├── CLAUDE.md
├── CODE_OF_CONDUCT.md
├── CONTRIBUTING.md
├── Development-Sprint-November-2025.md
├── docs
│   ├── amp-cli-bridge.md
│   ├── api
│   │   ├── code-execution-interface.md
│   │   ├── memory-metadata-api.md
│   │   ├── PHASE1_IMPLEMENTATION_SUMMARY.md
│   │   ├── PHASE2_IMPLEMENTATION_SUMMARY.md
│   │   ├── PHASE2_REPORT.md
│   │   └── tag-standardization.md
│   ├── architecture
│   │   ├── search-enhancement-spec.md
│   │   └── search-examples.md
│   ├── architecture.md
│   ├── archive
│   │   └── obsolete-workflows
│   │       ├── load_memory_context.md
│   │       └── README.md
│   ├── assets
│   │   └── images
│   │       ├── dashboard-v3.3.0-preview.png
│   │       ├── memory-awareness-hooks-example.png
│   │       ├── project-infographic.svg
│   │       └── README.md
│   ├── CLAUDE_CODE_QUICK_REFERENCE.md
│   ├── cloudflare-setup.md
│   ├── deployment
│   │   ├── docker.md
│   │   ├── dual-service.md
│   │   ├── production-guide.md
│   │   └── systemd-service.md
│   ├── development
│   │   ├── ai-agent-instructions.md
│   │   ├── code-quality
│   │   │   ├── phase-2a-completion.md
│   │   │   ├── phase-2a-handle-get-prompt.md
│   │   │   ├── phase-2a-index.md
│   │   │   ├── phase-2a-install-package.md
│   │   │   └── phase-2b-session-summary.md
│   │   ├── code-quality-workflow.md
│   │   ├── dashboard-workflow.md
│   │   ├── issue-management.md
│   │   ├── pr-review-guide.md
│   │   ├── refactoring-notes.md
│   │   ├── release-checklist.md
│   │   └── todo-tracker.md
│   ├── docker-optimized-build.md
│   ├── document-ingestion.md
│   ├── DOCUMENTATION_AUDIT.md
│   ├── enhancement-roadmap-issue-14.md
│   ├── examples
│   │   ├── analysis-scripts.js
│   │   ├── maintenance-session-example.md
│   │   ├── memory-distribution-chart.jsx
│   │   └── tag-schema.json
│   ├── first-time-setup.md
│   ├── glama-deployment.md
│   ├── guides
│   │   ├── advanced-command-examples.md
│   │   ├── chromadb-migration.md
│   │   ├── commands-vs-mcp-server.md
│   │   ├── mcp-enhancements.md
│   │   ├── mdns-service-discovery.md
│   │   ├── memory-consolidation-guide.md
│   │   ├── migration.md
│   │   ├── scripts.md
│   │   └── STORAGE_BACKENDS.md
│   ├── HOOK_IMPROVEMENTS.md
│   ├── hooks
│   │   └── phase2-code-execution-migration.md
│   ├── http-server-management.md
│   ├── ide-compatability.md
│   ├── IMAGE_RETENTION_POLICY.md
│   ├── images
│   │   └── dashboard-placeholder.md
│   ├── implementation
│   │   ├── health_checks.md
│   │   └── performance.md
│   ├── IMPLEMENTATION_PLAN_HTTP_SSE.md
│   ├── integration
│   │   ├── homebrew.md
│   │   └── multi-client.md
│   ├── integrations
│   │   ├── gemini.md
│   │   ├── groq-bridge.md
│   │   ├── groq-integration-summary.md
│   │   └── groq-model-comparison.md
│   ├── integrations.md
│   ├── legacy
│   │   └── dual-protocol-hooks.md
│   ├── LM_STUDIO_COMPATIBILITY.md
│   ├── maintenance
│   │   └── memory-maintenance.md
│   ├── mastery
│   │   ├── api-reference.md
│   │   ├── architecture-overview.md
│   │   ├── configuration-guide.md
│   │   ├── local-setup-and-run.md
│   │   ├── testing-guide.md
│   │   └── troubleshooting.md
│   ├── migration
│   │   └── code-execution-api-quick-start.md
│   ├── natural-memory-triggers
│   │   ├── cli-reference.md
│   │   ├── installation-guide.md
│   │   └── performance-optimization.md
│   ├── oauth-setup.md
│   ├── pr-graphql-integration.md
│   ├── quick-setup-cloudflare-dual-environment.md
│   ├── README.md
│   ├── remote-configuration-wiki-section.md
│   ├── research
│   │   ├── code-execution-interface-implementation.md
│   │   └── code-execution-interface-summary.md
│   ├── ROADMAP.md
│   ├── sqlite-vec-backend.md
│   ├── statistics
│   │   ├── charts
│   │   │   ├── activity_patterns.png
│   │   │   ├── contributors.png
│   │   │   ├── growth_trajectory.png
│   │   │   ├── monthly_activity.png
│   │   │   └── october_sprint.png
│   │   ├── data
│   │   │   ├── activity_by_day.csv
│   │   │   ├── activity_by_hour.csv
│   │   │   ├── contributors.csv
│   │   │   └── monthly_activity.csv
│   │   ├── generate_charts.py
│   │   └── REPOSITORY_STATISTICS.md
│   ├── technical
│   │   ├── development.md
│   │   ├── memory-migration.md
│   │   ├── migration-log.md
│   │   ├── sqlite-vec-embedding-fixes.md
│   │   └── tag-storage.md
│   ├── testing
│   │   └── regression-tests.md
│   ├── testing-cloudflare-backend.md
│   ├── troubleshooting
│   │   ├── cloudflare-api-token-setup.md
│   │   ├── cloudflare-authentication.md
│   │   ├── general.md
│   │   ├── hooks-quick-reference.md
│   │   ├── pr162-schema-caching-issue.md
│   │   ├── session-end-hooks.md
│   │   └── sync-issues.md
│   └── tutorials
│       ├── advanced-techniques.md
│       ├── data-analysis.md
│       └── demo-session-walkthrough.md
├── examples
│   ├── claude_desktop_config_template.json
│   ├── claude_desktop_config_windows.json
│   ├── claude-desktop-http-config.json
│   ├── config
│   │   └── claude_desktop_config.json
│   ├── http-mcp-bridge.js
│   ├── memory_export_template.json
│   ├── README.md
│   ├── setup
│   │   └── setup_multi_client_complete.py
│   └── start_https_example.sh
├── install_service.py
├── install.py
├── LICENSE
├── NOTICE
├── pyproject.toml
├── pytest.ini
├── README.md
├── run_server.py
├── scripts
│   ├── .claude
│   │   └── settings.local.json
│   ├── archive
│   │   └── check_missing_timestamps.py
│   ├── backup
│   │   ├── backup_memories.py
│   │   ├── backup_sqlite_vec.sh
│   │   ├── export_distributable_memories.sh
│   │   └── restore_memories.py
│   ├── benchmarks
│   │   ├── benchmark_code_execution_api.py
│   │   ├── benchmark_hybrid_sync.py
│   │   └── benchmark_server_caching.py
│   ├── database
│   │   ├── analyze_sqlite_vec_db.py
│   │   ├── check_sqlite_vec_status.py
│   │   ├── db_health_check.py
│   │   └── simple_timestamp_check.py
│   ├── development
│   │   ├── debug_server_initialization.py
│   │   ├── find_orphaned_files.py
│   │   ├── fix_mdns.sh
│   │   ├── fix_sitecustomize.py
│   │   ├── remote_ingest.sh
│   │   ├── setup-git-merge-drivers.sh
│   │   ├── uv-lock-merge.sh
│   │   └── verify_hybrid_sync.py
│   ├── hooks
│   │   └── pre-commit
│   ├── installation
│   │   ├── install_linux_service.py
│   │   ├── install_macos_service.py
│   │   ├── install_uv.py
│   │   ├── install_windows_service.py
│   │   ├── install.py
│   │   ├── setup_backup_cron.sh
│   │   ├── setup_claude_mcp.sh
│   │   └── setup_cloudflare_resources.py
│   ├── linux
│   │   ├── service_status.sh
│   │   ├── start_service.sh
│   │   ├── stop_service.sh
│   │   ├── uninstall_service.sh
│   │   └── view_logs.sh
│   ├── maintenance
│   │   ├── assign_memory_types.py
│   │   ├── check_memory_types.py
│   │   ├── cleanup_corrupted_encoding.py
│   │   ├── cleanup_memories.py
│   │   ├── cleanup_organize.py
│   │   ├── consolidate_memory_types.py
│   │   ├── consolidation_mappings.json
│   │   ├── delete_orphaned_vectors_fixed.py
│   │   ├── fast_cleanup_duplicates_with_tracking.sh
│   │   ├── find_all_duplicates.py
│   │   ├── find_cloudflare_duplicates.py
│   │   ├── find_duplicates.py
│   │   ├── memory-types.md
│   │   ├── README.md
│   │   ├── recover_timestamps_from_cloudflare.py
│   │   ├── regenerate_embeddings.py
│   │   ├── repair_malformed_tags.py
│   │   ├── repair_memories.py
│   │   ├── repair_sqlite_vec_embeddings.py
│   │   ├── repair_zero_embeddings.py
│   │   ├── restore_from_json_export.py
│   │   └── scan_todos.sh
│   ├── migration
│   │   ├── cleanup_mcp_timestamps.py
│   │   ├── legacy
│   │   │   └── migrate_chroma_to_sqlite.py
│   │   ├── mcp-migration.py
│   │   ├── migrate_sqlite_vec_embeddings.py
│   │   ├── migrate_storage.py
│   │   ├── migrate_tags.py
│   │   ├── migrate_timestamps.py
│   │   ├── migrate_to_cloudflare.py
│   │   ├── migrate_to_sqlite_vec.py
│   │   ├── migrate_v5_enhanced.py
│   │   ├── TIMESTAMP_CLEANUP_README.md
│   │   └── verify_mcp_timestamps.py
│   ├── pr
│   │   ├── amp_collect_results.sh
│   │   ├── amp_detect_breaking_changes.sh
│   │   ├── amp_generate_tests.sh
│   │   ├── amp_pr_review.sh
│   │   ├── amp_quality_gate.sh
│   │   ├── amp_suggest_fixes.sh
│   │   ├── auto_review.sh
│   │   ├── detect_breaking_changes.sh
│   │   ├── generate_tests.sh
│   │   ├── lib
│   │   │   └── graphql_helpers.sh
│   │   ├── quality_gate.sh
│   │   ├── resolve_threads.sh
│   │   ├── run_pyscn_analysis.sh
│   │   ├── run_quality_checks.sh
│   │   ├── thread_status.sh
│   │   └── watch_reviews.sh
│   ├── quality
│   │   ├── fix_dead_code_install.sh
│   │   ├── phase1_dead_code_analysis.md
│   │   ├── phase2_complexity_analysis.md
│   │   ├── README_PHASE1.md
│   │   ├── README_PHASE2.md
│   │   ├── track_pyscn_metrics.sh
│   │   └── weekly_quality_review.sh
│   ├── README.md
│   ├── run
│   │   ├── run_mcp_memory.sh
│   │   ├── run-with-uv.sh
│   │   └── start_sqlite_vec.sh
│   ├── run_memory_server.py
│   ├── server
│   │   ├── check_http_server.py
│   │   ├── check_server_health.py
│   │   ├── memory_offline.py
│   │   ├── preload_models.py
│   │   ├── run_http_server.py
│   │   ├── run_memory_server.py
│   │   ├── start_http_server.bat
│   │   └── start_http_server.sh
│   ├── service
│   │   ├── deploy_dual_services.sh
│   │   ├── install_http_service.sh
│   │   ├── mcp-memory-http.service
│   │   ├── mcp-memory.service
│   │   ├── memory_service_manager.sh
│   │   ├── service_control.sh
│   │   ├── service_utils.py
│   │   └── update_service.sh
│   ├── sync
│   │   ├── check_drift.py
│   │   ├── claude_sync_commands.py
│   │   ├── export_memories.py
│   │   ├── import_memories.py
│   │   ├── litestream
│   │   │   ├── apply_local_changes.sh
│   │   │   ├── enhanced_memory_store.sh
│   │   │   ├── init_staging_db.sh
│   │   │   ├── io.litestream.replication.plist
│   │   │   ├── manual_sync.sh
│   │   │   ├── memory_sync.sh
│   │   │   ├── pull_remote_changes.sh
│   │   │   ├── push_to_remote.sh
│   │   │   ├── README.md
│   │   │   ├── resolve_conflicts.sh
│   │   │   ├── setup_local_litestream.sh
│   │   │   ├── setup_remote_litestream.sh
│   │   │   ├── staging_db_init.sql
│   │   │   ├── stash_local_changes.sh
│   │   │   ├── sync_from_remote_noconfig.sh
│   │   │   └── sync_from_remote.sh
│   │   ├── README.md
│   │   ├── safe_cloudflare_update.sh
│   │   ├── sync_memory_backends.py
│   │   └── sync_now.py
│   ├── testing
│   │   ├── run_complete_test.py
│   │   ├── run_memory_test.sh
│   │   ├── simple_test.py
│   │   ├── test_cleanup_logic.py
│   │   ├── test_cloudflare_backend.py
│   │   ├── test_docker_functionality.py
│   │   ├── test_installation.py
│   │   ├── test_mdns.py
│   │   ├── test_memory_api.py
│   │   ├── test_memory_simple.py
│   │   ├── test_migration.py
│   │   ├── test_search_api.py
│   │   ├── test_sqlite_vec_embeddings.py
│   │   ├── test_sse_events.py
│   │   ├── test-connection.py
│   │   └── test-hook.js
│   ├── utils
│   │   ├── claude_commands_utils.py
│   │   ├── generate_personalized_claude_md.sh
│   │   ├── groq
│   │   ├── groq_agent_bridge.py
│   │   ├── list-collections.py
│   │   ├── memory_wrapper_uv.py
│   │   ├── query_memories.py
│   │   ├── smithery_wrapper.py
│   │   ├── test_groq_bridge.sh
│   │   └── uv_wrapper.py
│   └── validation
│       ├── check_dev_setup.py
│       ├── check_documentation_links.py
│       ├── diagnose_backend_config.py
│       ├── validate_configuration_complete.py
│       ├── validate_memories.py
│       ├── validate_migration.py
│       ├── validate_timestamp_integrity.py
│       ├── verify_environment.py
│       ├── verify_pytorch_windows.py
│       └── verify_torch.py
├── SECURITY.md
├── selective_timestamp_recovery.py
├── SPONSORS.md
├── src
│   └── mcp_memory_service
│       ├── __init__.py
│       ├── api
│       │   ├── __init__.py
│       │   ├── client.py
│       │   ├── operations.py
│       │   ├── sync_wrapper.py
│       │   └── types.py
│       ├── backup
│       │   ├── __init__.py
│       │   └── scheduler.py
│       ├── cli
│       │   ├── __init__.py
│       │   ├── ingestion.py
│       │   ├── main.py
│       │   └── utils.py
│       ├── config.py
│       ├── consolidation
│       │   ├── __init__.py
│       │   ├── associations.py
│       │   ├── base.py
│       │   ├── clustering.py
│       │   ├── compression.py
│       │   ├── consolidator.py
│       │   ├── decay.py
│       │   ├── forgetting.py
│       │   ├── health.py
│       │   └── scheduler.py
│       ├── dependency_check.py
│       ├── discovery
│       │   ├── __init__.py
│       │   ├── client.py
│       │   └── mdns_service.py
│       ├── embeddings
│       │   ├── __init__.py
│       │   └── onnx_embeddings.py
│       ├── ingestion
│       │   ├── __init__.py
│       │   ├── base.py
│       │   ├── chunker.py
│       │   ├── csv_loader.py
│       │   ├── json_loader.py
│       │   ├── pdf_loader.py
│       │   ├── registry.py
│       │   ├── semtools_loader.py
│       │   └── text_loader.py
│       ├── lm_studio_compat.py
│       ├── mcp_server.py
│       ├── models
│       │   ├── __init__.py
│       │   └── memory.py
│       ├── server.py
│       ├── services
│       │   ├── __init__.py
│       │   └── memory_service.py
│       ├── storage
│       │   ├── __init__.py
│       │   ├── base.py
│       │   ├── cloudflare.py
│       │   ├── factory.py
│       │   ├── http_client.py
│       │   ├── hybrid.py
│       │   └── sqlite_vec.py
│       ├── sync
│       │   ├── __init__.py
│       │   ├── exporter.py
│       │   ├── importer.py
│       │   └── litestream_config.py
│       ├── utils
│       │   ├── __init__.py
│       │   ├── cache_manager.py
│       │   ├── content_splitter.py
│       │   ├── db_utils.py
│       │   ├── debug.py
│       │   ├── document_processing.py
│       │   ├── gpu_detection.py
│       │   ├── hashing.py
│       │   ├── http_server_manager.py
│       │   ├── port_detection.py
│       │   ├── system_detection.py
│       │   └── time_parser.py
│       └── web
│           ├── __init__.py
│           ├── api
│           │   ├── __init__.py
│           │   ├── analytics.py
│           │   ├── backup.py
│           │   ├── consolidation.py
│           │   ├── documents.py
│           │   ├── events.py
│           │   ├── health.py
│           │   ├── manage.py
│           │   ├── mcp.py
│           │   ├── memories.py
│           │   ├── search.py
│           │   └── sync.py
│           ├── app.py
│           ├── dependencies.py
│           ├── oauth
│           │   ├── __init__.py
│           │   ├── authorization.py
│           │   ├── discovery.py
│           │   ├── middleware.py
│           │   ├── models.py
│           │   ├── registration.py
│           │   └── storage.py
│           ├── sse.py
│           └── static
│               ├── app.js
│               ├── index.html
│               ├── README.md
│               ├── sse_test.html
│               └── style.css
├── start_http_debug.bat
├── start_http_server.sh
├── test_document.txt
├── test_version_checker.js
├── tests
│   ├── __init__.py
│   ├── api
│   │   ├── __init__.py
│   │   ├── test_compact_types.py
│   │   └── test_operations.py
│   ├── bridge
│   │   ├── mock_responses.js
│   │   ├── package-lock.json
│   │   ├── package.json
│   │   └── test_http_mcp_bridge.js
│   ├── conftest.py
│   ├── consolidation
│   │   ├── __init__.py
│   │   ├── conftest.py
│   │   ├── test_associations.py
│   │   ├── test_clustering.py
│   │   ├── test_compression.py
│   │   ├── test_consolidator.py
│   │   ├── test_decay.py
│   │   └── test_forgetting.py
│   ├── contracts
│   │   └── api-specification.yml
│   ├── integration
│   │   ├── package-lock.json
│   │   ├── package.json
│   │   ├── test_api_key_fallback.py
│   │   ├── test_api_memories_chronological.py
│   │   ├── test_api_tag_time_search.py
│   │   ├── test_api_with_memory_service.py
│   │   ├── test_bridge_integration.js
│   │   ├── test_cli_interfaces.py
│   │   ├── test_cloudflare_connection.py
│   │   ├── test_concurrent_clients.py
│   │   ├── test_data_serialization_consistency.py
│   │   ├── test_http_server_startup.py
│   │   ├── test_mcp_memory.py
│   │   ├── test_mdns_integration.py
│   │   ├── test_oauth_basic_auth.py
│   │   ├── test_oauth_flow.py
│   │   ├── test_server_handlers.py
│   │   └── test_store_memory.py
│   ├── performance
│   │   ├── test_background_sync.py
│   │   └── test_hybrid_live.py
│   ├── README.md
│   ├── smithery
│   │   └── test_smithery.py
│   ├── sqlite
│   │   └── simple_sqlite_vec_test.py
│   ├── test_client.py
│   ├── test_content_splitting.py
│   ├── test_database.py
│   ├── test_hybrid_cloudflare_limits.py
│   ├── test_hybrid_storage.py
│   ├── test_memory_ops.py
│   ├── test_semantic_search.py
│   ├── test_sqlite_vec_storage.py
│   ├── test_time_parser.py
│   ├── test_timestamp_preservation.py
│   ├── timestamp
│   │   ├── test_hook_vs_manual_storage.py
│   │   ├── test_issue99_final_validation.py
│   │   ├── test_search_retrieval_inconsistency.py
│   │   ├── test_timestamp_issue.py
│   │   └── test_timestamp_simple.py
│   └── unit
│       ├── conftest.py
│       ├── test_cloudflare_storage.py
│       ├── test_csv_loader.py
│       ├── test_fastapi_dependencies.py
│       ├── test_import.py
│       ├── test_json_loader.py
│       ├── test_mdns_simple.py
│       ├── test_mdns.py
│       ├── test_memory_service.py
│       ├── test_memory.py
│       ├── test_semtools_loader.py
│       ├── test_storage_interface_compatibility.py
│       └── test_tag_time_filtering.py
├── tools
│   ├── docker
│   │   ├── DEPRECATED.md
│   │   ├── docker-compose.http.yml
│   │   ├── docker-compose.pythonpath.yml
│   │   ├── docker-compose.standalone.yml
│   │   ├── docker-compose.uv.yml
│   │   ├── docker-compose.yml
│   │   ├── docker-entrypoint-persistent.sh
│   │   ├── docker-entrypoint-unified.sh
│   │   ├── docker-entrypoint.sh
│   │   ├── Dockerfile
│   │   ├── Dockerfile.glama
│   │   ├── Dockerfile.slim
│   │   ├── README.md
│   │   └── test-docker-modes.sh
│   └── README.md
└── uv.lock
```

# Files

--------------------------------------------------------------------------------
/src/mcp_memory_service/consolidation/compression.py:
--------------------------------------------------------------------------------

```python
# Copyright 2024 Heinrich Krupp
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

"""Semantic compression engine for memory cluster summarization."""

import numpy as np
from typing import List, Dict, Any, Optional, Tuple, Set
from datetime import datetime
from dataclasses import dataclass
from collections import Counter
import re
import hashlib

from .base import ConsolidationBase, ConsolidationConfig, MemoryCluster
from ..models.memory import Memory

@dataclass
class CompressionResult:
    """Result of compressing a memory cluster."""
    cluster_id: str
    compressed_memory: Memory
    compression_ratio: float
    key_concepts: List[str]
    temporal_span: Dict[str, Any]
    source_memory_count: int
    compression_metadata: Dict[str, Any]

class SemanticCompressionEngine(ConsolidationBase):
    """
    Creates condensed representations of memory clusters for efficient storage.
    
    This creates higher-level abstractions while preserving key information,
    using statistical methods and concept extraction to summarize clusters.
    """
    
    def __init__(self, config: ConsolidationConfig):
        super().__init__(config)
        self.max_summary_length = config.max_summary_length
        self.preserve_originals = config.preserve_originals
        
        # Word importance patterns
        self._important_patterns = {
            'technical_terms': re.compile(r'\b[A-Z][a-z]*[A-Z][a-zA-Z]*\b'),  # CamelCase
            'acronyms': re.compile(r'\b[A-Z]{2,}\b'),
            'numbers': re.compile(r'\b\d+(?:\.\d+)?\b'),
            'urls': re.compile(r'https?://[^\s]+'),
            'file_paths': re.compile(r'[/\\][^\s]+'),
            'quoted_text': re.compile(r'"([^"]*)"'),
            'code_blocks': re.compile(r'```[\s\S]*?```|`[^`]+`')
        }
    
    async def process(self, clusters: List[MemoryCluster], memories: List[Memory], **kwargs) -> List[CompressionResult]:
        """Compress memory clusters into condensed representations."""
        if not clusters:
            return []
        
        # Create memory hash lookup
        memory_lookup = {m.content_hash: m for m in memories}
        
        compression_results = []
        for cluster in clusters:
            # Get memories for this cluster
            cluster_memories = []
            for hash_val in cluster.memory_hashes:
                if hash_val in memory_lookup:
                    cluster_memories.append(memory_lookup[hash_val])
            
            if not cluster_memories:
                continue
            
            # Compress the cluster
            result = await self._compress_cluster(cluster, cluster_memories)
            if result:
                compression_results.append(result)
        
        self.logger.info(f"Compressed {len(compression_results)} clusters")
        return compression_results
    
    async def _compress_cluster(self, cluster: MemoryCluster, memories: List[Memory]) -> Optional[CompressionResult]:
        """Compress a single memory cluster."""
        if len(memories) < 2:
            return None
        
        # Extract key concepts and themes
        key_concepts = await self._extract_key_concepts(memories, cluster.theme_keywords)
        
        # Generate thematic summary
        summary = await self._generate_thematic_summary(memories, key_concepts)
        
        # Calculate temporal information
        temporal_span = self._calculate_temporal_span(memories)
        
        # Aggregate tags and metadata
        aggregated_tags = self._aggregate_tags(memories)
        aggregated_metadata = self._aggregate_metadata(memories)
        
        # Create compressed memory embedding (cluster centroid)
        compressed_embedding = cluster.centroid_embedding
        
        # Calculate compression ratio
        original_size = sum(len(m.content) for m in memories)
        compressed_size = len(summary)
        compression_ratio = compressed_size / original_size if original_size > 0 else 0
        
        # Create content hash for the compressed memory
        content_hash = hashlib.sha256(summary.encode()).hexdigest()
        
        # Create compressed memory object
        compressed_memory = Memory(
            content=summary,
            content_hash=content_hash,
            tags=aggregated_tags,
            memory_type='compressed_cluster',
            metadata={
                **aggregated_metadata,
                'cluster_id': cluster.cluster_id,
                'compression_date': datetime.now().isoformat(),
                'source_memory_count': len(memories),
                'compression_ratio': compression_ratio,
                'key_concepts': key_concepts,
                'temporal_span': temporal_span,
                'theme_keywords': cluster.theme_keywords,
                'coherence_score': cluster.coherence_score,
                'compression_version': '1.0'
            },
            embedding=compressed_embedding,
            created_at=datetime.now().timestamp(),
            created_at_iso=datetime.now().isoformat() + 'Z'
        )
        
        return CompressionResult(
            cluster_id=cluster.cluster_id,
            compressed_memory=compressed_memory,
            compression_ratio=compression_ratio,
            key_concepts=key_concepts,
            temporal_span=temporal_span,
            source_memory_count=len(memories),
            compression_metadata={
                'algorithm': 'semantic_compression_v1',
                'original_total_length': original_size,
                'compressed_length': compressed_size,
                'concept_count': len(key_concepts),
                'theme_keywords': cluster.theme_keywords
            }
        )
    
    async def _extract_key_concepts(self, memories: List[Memory], theme_keywords: List[str]) -> List[str]:
        """Extract key concepts from cluster memories."""
        all_text = ' '.join([m.content for m in memories])
        concepts = set()
        
        # Add theme keywords as primary concepts
        concepts.update(theme_keywords)
        
        # Extract important patterns
        for pattern_name, pattern in self._important_patterns.items():
            matches = pattern.findall(all_text)
            if pattern_name == 'quoted_text':
                # For quoted text, add the content inside quotes
                concepts.update(matches)
            else:
                concepts.update(matches)
        
        # Extract capitalized terms (potential proper nouns)
        capitalized = re.findall(r'\b[A-Z][a-z]{2,}\b', all_text)
        concepts.update(capitalized)
        
        # Extract frequent meaningful words
        words = re.findall(r'\b[a-zA-Z]{4,}\b', all_text.lower())
        word_counts = Counter(words)
        
        # Filter out common words
        stop_words = {
            'this', 'that', 'with', 'have', 'will', 'from', 'they', 'know',
            'want', 'been', 'good', 'much', 'some', 'time', 'very', 'when',
            'come', 'here', 'just', 'like', 'long', 'make', 'many', 'over',
            'such', 'take', 'than', 'them', 'well', 'were', 'what', 'work',
            'your', 'could', 'should', 'would', 'there', 'their', 'these',
            'about', 'after', 'again', 'before', 'being', 'between', 'during',
            'under', 'where', 'while', 'other', 'through', 'against', 'without'
        }
        
        # Add frequent non-stop words
        for word, count in word_counts.most_common(20):
            if word not in stop_words and count >= 2:  # Must appear at least twice
                concepts.add(word)
        
        # Convert to list and limit
        concept_list = list(concepts)
        concept_list.sort(key=lambda x: word_counts.get(x.lower(), 0), reverse=True)
        
        return concept_list[:15]  # Limit to top 15 concepts
    
    async def _generate_thematic_summary(self, memories: List[Memory], key_concepts: List[str]) -> str:
        """Generate a thematic summary of the memory cluster."""
        # Analyze the memories to identify common themes and patterns
        all_content = [m.content for m in memories]
        
        # Extract representative sentences that contain key concepts
        representative_sentences = []
        concept_coverage = set()
        
        for memory in memories:
            sentences = self._split_into_sentences(memory.content)
            for sentence in sentences:
                sentence_concepts = set()
                sentence_lower = sentence.lower()
                
                # Check which concepts this sentence covers
                for concept in key_concepts:
                    if concept.lower() in sentence_lower:
                        sentence_concepts.add(concept)
                
                # If this sentence covers new concepts, include it
                new_concepts = sentence_concepts - concept_coverage
                if new_concepts and len(sentence) > 20:  # Minimum sentence length
                    representative_sentences.append({
                        'sentence': sentence.strip(),
                        'concepts': sentence_concepts,
                        'new_concepts': new_concepts,
                        'score': len(new_concepts) + len(sentence_concepts) * 0.1
                    })
                    concept_coverage.update(new_concepts)
        
        # Sort by score and select best sentences
        representative_sentences.sort(key=lambda x: x['score'], reverse=True)
        
        # Build summary
        summary_parts = []
        
        # Add cluster overview
        memory_count = len(memories)
        time_span = self._calculate_temporal_span(memories)
        concept_str = ', '.join(key_concepts[:5])
        
        overview = f"Cluster of {memory_count} related memories about {concept_str}"
        if time_span['span_days'] > 0:
            overview += f" spanning {time_span['span_days']} days"
        overview += "."
        
        summary_parts.append(overview)
        
        # Add key insights from representative sentences
        used_length = len(overview)
        remaining_length = self.max_summary_length - used_length - 100  # Reserve space for conclusion
        
        for sent_info in representative_sentences:
            sentence = sent_info['sentence']
            if used_length + len(sentence) < remaining_length:
                summary_parts.append(sentence)
                used_length += len(sentence)
            else:
                break
        
        # Add concept summary if space allows
        if used_length < self.max_summary_length - 50:
            concept_summary = f"Key concepts: {', '.join(key_concepts[:8])}."
            if used_length + len(concept_summary) < self.max_summary_length:
                summary_parts.append(concept_summary)
        
        summary = ' '.join(summary_parts)
        
        # Truncate if still too long
        if len(summary) > self.max_summary_length:
            summary = summary[:self.max_summary_length - 3] + '...'
        
        return summary
    
    def _split_into_sentences(self, text: str) -> List[str]:
        """Split text into sentences using simple heuristics."""
        # Simple sentence splitting (could be improved with NLTK)
        sentences = re.split(r'[.!?]+\s+', text)
        
        # Filter out very short sentences and clean up
        clean_sentences = []
        for sentence in sentences:
            sentence = sentence.strip()
            if len(sentence) > 10:  # Minimum sentence length
                clean_sentences.append(sentence)
        
        return clean_sentences
    
    def _calculate_temporal_span(self, memories: List[Memory]) -> Dict[str, Any]:
        """Calculate temporal information for the memory cluster."""
        timestamps = []
        
        for memory in memories:
            if memory.created_at:
                timestamps.append(memory.created_at)
            elif memory.timestamp:
                timestamps.append(memory.timestamp.timestamp())
        
        if not timestamps:
            return {
                'start_time': None,
                'end_time': None,
                'span_days': 0,
                'span_description': 'unknown'
            }
        
        start_time = min(timestamps)
        end_time = max(timestamps)
        span_seconds = end_time - start_time
        span_days = int(span_seconds / (24 * 3600))
        
        # Create human-readable span description
        if span_days == 0:
            span_description = 'same day'
        elif span_days < 7:
            span_description = f'{span_days} days'
        elif span_days < 30:
            weeks = span_days // 7
            span_description = f'{weeks} week{"s" if weeks > 1 else ""}'
        elif span_days < 365:
            months = span_days // 30
            span_description = f'{months} month{"s" if months > 1 else ""}'
        else:
            years = span_days // 365
            span_description = f'{years} year{"s" if years > 1 else ""}'
        
        return {
            'start_time': start_time,
            'end_time': end_time,
            'span_days': span_days,
            'span_description': span_description,
            'start_iso': datetime.utcfromtimestamp(start_time).isoformat() + 'Z',
            'end_iso': datetime.utcfromtimestamp(end_time).isoformat() + 'Z'
        }
    
    def _aggregate_tags(self, memories: List[Memory]) -> List[str]:
        """Aggregate tags from cluster memories."""
        all_tags = []
        for memory in memories:
            all_tags.extend(memory.tags)
        
        # Count tag frequency
        tag_counts = Counter(all_tags)
        
        # Include tags that appear in multiple memories or are important
        aggregated_tags = ['cluster', 'compressed']  # Always include these
        
        for tag, count in tag_counts.most_common():
            if count > 1 or tag in {'important', 'critical', 'reference', 'project'}:
                if tag not in aggregated_tags:
                    aggregated_tags.append(tag)
        
        return aggregated_tags[:10]  # Limit to 10 tags
    
    def _aggregate_metadata(self, memories: List[Memory]) -> Dict[str, Any]:
        """Aggregate metadata from cluster memories."""
        aggregated = {
            'source_memory_hashes': [m.content_hash for m in memories]
        }
        
        # Collect unique metadata keys and their values
        all_metadata = {}
        for memory in memories:
            for key, value in memory.metadata.items():
                if key not in all_metadata:
                    all_metadata[key] = []
                all_metadata[key].append(value)
        
        # Aggregate metadata intelligently
        for key, values in all_metadata.items():
            unique_values = list(set(str(v) for v in values))
            
            if len(unique_values) == 1:
                # All memories have the same value
                aggregated[f'common_{key}'] = unique_values[0]
            elif len(unique_values) <= 5:
                # Small number of unique values, list them
                aggregated[f'varied_{key}'] = unique_values
            else:
                # Many unique values, just note the variety
                aggregated[f'{key}_variety_count'] = len(unique_values)
        
        return aggregated
    
    async def estimate_compression_benefit(
        self,
        clusters: List[MemoryCluster],
        memories: List[Memory]
    ) -> Dict[str, Any]:
        """Estimate the benefit of compressing given clusters."""
        memory_lookup = {m.content_hash: m for m in memories}
        
        total_original_size = 0
        total_compressed_size = 0
        compressible_clusters = 0
        
        for cluster in clusters:
            cluster_memories = [memory_lookup[h] for h in cluster.memory_hashes if h in memory_lookup]
            
            if len(cluster_memories) < 2:
                continue
            
            compressible_clusters += 1
            original_size = sum(len(m.content) for m in cluster_memories)
            
            # Estimate compressed size (rough approximation)
            estimated_compressed_size = min(self.max_summary_length, original_size // 3)
            
            total_original_size += original_size
            total_compressed_size += estimated_compressed_size
        
        overall_ratio = total_compressed_size / total_original_size if total_original_size > 0 else 1.0
        savings = total_original_size - total_compressed_size
        
        return {
            'compressible_clusters': compressible_clusters,
            'total_original_size': total_original_size,
            'estimated_compressed_size': total_compressed_size,
            'compression_ratio': overall_ratio,
            'estimated_savings_bytes': savings,
            'estimated_savings_percent': (1 - overall_ratio) * 100
        }
```

--------------------------------------------------------------------------------
/claude-hooks/utilities/session-tracker.js:
--------------------------------------------------------------------------------

```javascript
/**
 * Session Tracker Utility
 * Provides cross-session intelligence and conversation continuity
 * Phase 2: Intelligent Context Updates
 */

const fs = require('fs').promises;
const path = require('path');
const crypto = require('crypto');

/**
 * Session tracking data structure
 */
class SessionTracker {
    constructor(options = {}) {
        this.options = {
            maxSessionHistory: 50,      // Maximum sessions to track
            maxConversationDepth: 10,   // Maximum conversation thread depth
            sessionExpiryDays: 30,      // Days after which sessions are considered expired
            trackingDataPath: options.trackingDataPath || path.join(__dirname, '../session-tracking.json'),
            ...options
        };

        this.sessions = new Map();
        this.conversationThreads = new Map();
        this.projectSessions = new Map();
        this.loaded = false;
    }

    /**
     * Initialize session tracking system
     */
    async initialize() {
        console.log('[Session Tracker] Initializing session tracking system...');
        
        try {
            await this.loadTrackingData();
            this.cleanupExpiredSessions();
            this.loaded = true;
            
            console.log(`[Session Tracker] Loaded ${this.sessions.size} sessions, ${this.conversationThreads.size} threads`);
        } catch (error) {
            console.error('[Session Tracker] Failed to initialize:', error.message);
            this.loaded = false;
        }
    }

    /**
     * Start tracking a new session
     */
    async startSession(sessionId, context = {}) {
        if (!this.loaded) {
            await this.initialize();
        }

        const session = {
            id: sessionId,
            startTime: new Date().toISOString(),
            endTime: null,
            projectContext: context.projectContext || {},
            workingDirectory: context.workingDirectory,
            initialTopics: [],
            finalTopics: [],
            memoriesLoaded: [],
            memoriesCreated: [],
            conversationSummary: null,
            outcome: null,
            threadId: null,
            parentSessionId: null,
            childSessionIds: [],
            status: 'active'
        };

        // Try to link to existing conversation thread
        await this.linkToConversationThread(session, context);

        this.sessions.set(sessionId, session);
        
        // Track by project
        const projectName = context.projectContext?.name;
        if (projectName) {
            if (!this.projectSessions.has(projectName)) {
                this.projectSessions.set(projectName, []);
            }
            this.projectSessions.get(projectName).push(sessionId);
        }

        console.log(`[Session Tracker] Started session ${sessionId} for project: ${projectName || 'unknown'}`);
        
        await this.saveTrackingData();
        return session;
    }

    /**
     * End session tracking and record outcomes
     */
    async endSession(sessionId, outcome = {}) {
        const session = this.sessions.get(sessionId);
        if (!session) {
            console.warn(`[Session Tracker] Session ${sessionId} not found`);
            return null;
        }

        session.endTime = new Date().toISOString();
        session.status = 'completed';
        session.outcome = outcome;
        session.conversationSummary = outcome.summary;
        session.finalTopics = outcome.topics || [];

        // Update conversation thread with session outcome
        if (session.threadId) {
            await this.updateConversationThread(session.threadId, session);
        }

        console.log(`[Session Tracker] Ended session ${sessionId} with outcome: ${outcome.type || 'unknown'}`);
        
        await this.saveTrackingData();
        return session;
    }

    /**
     * Link session to existing conversation thread or create new one
     */
    async linkToConversationThread(session, context) {
        // Try to find related sessions based on project and recent activity
        const relatedSessions = await this.findRelatedSessions(session, context);
        
        if (relatedSessions.length > 0) {
            // Link to existing thread
            const parentSession = relatedSessions[0];
            session.threadId = parentSession.threadId;
            session.parentSessionId = parentSession.id;
            
            // Update parent session
            if (this.sessions.has(parentSession.id)) {
                this.sessions.get(parentSession.id).childSessionIds.push(session.id);
            }

            console.log(`[Session Tracker] Linked session ${session.id} to thread ${session.threadId}`);
        } else {
            // Create new conversation thread
            const threadId = this.generateThreadId();
            session.threadId = threadId;

            const thread = {
                id: threadId,
                createdAt: new Date().toISOString(),
                projectContext: session.projectContext,
                sessionIds: [session.id],
                topics: new Set(),
                outcomes: [],
                status: 'active'
            };

            this.conversationThreads.set(threadId, thread);
            console.log(`[Session Tracker] Created new conversation thread ${threadId}`);
        }
    }

    /**
     * Find related sessions for conversation threading
     */
    async findRelatedSessions(session, context) {
        const projectName = context.projectContext?.name;
        if (!projectName) {
            return [];
        }

        const projectSessionIds = this.projectSessions.get(projectName) || [];
        const relatedSessions = [];

        // Look for recent sessions in same project
        const cutoffTime = new Date();
        cutoffTime.setHours(cutoffTime.getHours() - 24); // 24 hour window

        for (const sessionId of projectSessionIds.slice(-10)) { // Check last 10 sessions
            const session = this.sessions.get(sessionId);
            if (!session || session.status === 'active') continue;

            const sessionTime = new Date(session.endTime || session.startTime);
            if (sessionTime > cutoffTime) {
                // Calculate relatedness score
                const relatednessScore = this.calculateSessionRelatedness(session, context);
                if (relatednessScore > 0.3) {
                    relatedSessions.push({
                        ...session,
                        relatednessScore
                    });
                }
            }
        }

        // Sort by relatedness score
        return relatedSessions.sort((a, b) => b.relatednessScore - a.relatednessScore);
    }

    /**
     * Calculate how related two sessions are
     */
    calculateSessionRelatedness(existingSession, newContext) {
        let score = 0;

        // Same project bonus
        if (existingSession.projectContext?.name === newContext.projectContext?.name) {
            score += 0.4;
        }

        // Same working directory bonus
        if (existingSession.workingDirectory === newContext.workingDirectory) {
            score += 0.3;
        }

        // Technology stack similarity
        const existingTech = [
            ...(existingSession.projectContext?.languages || []),
            ...(existingSession.projectContext?.frameworks || [])
        ];
        const newTech = [
            ...(newContext.projectContext?.languages || []),
            ...(newContext.projectContext?.frameworks || [])
        ];

        const techOverlap = existingTech.filter(tech => newTech.includes(tech)).length;
        if (existingTech.length > 0) {
            score += (techOverlap / existingTech.length) * 0.3;
        }

        return Math.min(score, 1.0);
    }

    /**
     * Update conversation thread with session information
     */
    async updateConversationThread(threadId, session) {
        const thread = this.conversationThreads.get(threadId);
        if (!thread) {
            console.warn(`[Session Tracker] Thread ${threadId} not found`);
            return;
        }

        // Add session to thread if not already present
        if (!thread.sessionIds.includes(session.id)) {
            thread.sessionIds.push(session.id);
        }

        // Update thread topics
        if (session.finalTopics && session.finalTopics.length > 0) {
            session.finalTopics.forEach(topic => thread.topics.add(topic));
        }

        // Add outcome to thread history
        if (session.outcome) {
            thread.outcomes.push({
                sessionId: session.id,
                outcome: session.outcome,
                timestamp: session.endTime
            });
        }

        thread.lastUpdated = new Date().toISOString();
    }

    /**
     * Get conversation context for a new session
     */
    async getConversationContext(projectContext, options = {}) {
        const {
            maxPreviousSessions = 3,
            maxDaysBack = 7
        } = options;

        const projectName = projectContext?.name;
        if (!projectName) {
            return null;
        }

        const projectSessionIds = this.projectSessions.get(projectName) || [];
        if (projectSessionIds.length === 0) {
            return null;
        }

        // Get recent sessions
        const cutoffTime = new Date();
        cutoffTime.setDate(cutoffTime.getDate() - maxDaysBack);

        const recentSessions = [];
        for (const sessionId of projectSessionIds.slice(-10)) {
            const session = this.sessions.get(sessionId);
            if (!session || session.status === 'active') continue;

            const sessionTime = new Date(session.endTime || session.startTime);
            if (sessionTime > cutoffTime) {
                recentSessions.push(session);
            }
        }

        // Sort by end time and take most recent
        const sortedSessions = recentSessions
            .sort((a, b) => new Date(b.endTime || b.startTime) - new Date(a.endTime || a.startTime))
            .slice(0, maxPreviousSessions);

        if (sortedSessions.length === 0) {
            return null;
        }

        // Build conversation context
        const context = {
            projectName: projectName,
            recentSessions: sortedSessions.map(session => ({
                id: session.id,
                endTime: session.endTime,
                outcome: session.outcome,
                topics: session.finalTopics,
                memoriesCreated: session.memoriesCreated?.length || 0
            })),
            continuityInsights: this.extractContinuityInsights(sortedSessions),
            activeThreads: this.getActiveThreadsForProject(projectName)
        };

        return context;
    }

    /**
     * Extract insights about conversation continuity
     */
    extractContinuityInsights(sessions) {
        const insights = {
            recurringTopics: this.findRecurringTopics(sessions),
            progressionPatterns: this.analyzeProgressionPatterns(sessions),
            uncompletedTasks: this.findUncompletedTasks(sessions)
        };

        return insights;
    }

    /**
     * Find topics that appear across multiple sessions
     */
    findRecurringTopics(sessions) {
        const topicCounts = new Map();
        
        sessions.forEach(session => {
            (session.finalTopics || []).forEach(topic => {
                topicCounts.set(topic, (topicCounts.get(topic) || 0) + 1);
            });
        });

        return Array.from(topicCounts.entries())
            .filter(([topic, count]) => count > 1)
            .sort((a, b) => b[1] - a[1])
            .slice(0, 5)
            .map(([topic, count]) => ({ topic, frequency: count }));
    }

    /**
     * Analyze how work progresses across sessions
     */
    analyzeProgressionPatterns(sessions) {
        const patterns = [];
        
        // Look for planning -> implementation -> testing patterns
        const outcomePairs = [];
        for (let i = 0; i < sessions.length - 1; i++) {
            outcomePairs.push([
                sessions[i].outcome?.type,
                sessions[i + 1].outcome?.type
            ]);
        }

        return patterns;
    }

    /**
     * Find tasks or decisions that weren't completed
     */
    findUncompletedTasks(sessions) {
        const tasks = [];
        
        sessions.forEach(session => {
            if (session.outcome?.type === 'planning' || session.outcome?.type === 'partial') {
                tasks.push({
                    sessionId: session.id,
                    description: session.outcome?.summary,
                    timestamp: session.endTime
                });
            }
        });

        return tasks;
    }

    /**
     * Get active conversation threads for a project
     */
    getActiveThreadsForProject(projectName) {
        const threads = [];
        
        this.conversationThreads.forEach((thread, threadId) => {
            if (thread.projectContext?.name === projectName && thread.status === 'active') {
                threads.push({
                    id: threadId,
                    sessionCount: thread.sessionIds.length,
                    topics: Array.from(thread.topics),
                    lastUpdated: thread.lastUpdated
                });
            }
        });

        return threads;
    }

    /**
     * Cleanup expired sessions and threads
     */
    cleanupExpiredSessions() {
        const cutoffTime = new Date();
        cutoffTime.setDate(cutoffTime.getDate() - this.options.sessionExpiryDays);

        let cleanedCount = 0;

        // Cleanup sessions
        for (const [sessionId, session] of this.sessions.entries()) {
            const sessionTime = new Date(session.endTime || session.startTime);
            if (sessionTime < cutoffTime) {
                this.sessions.delete(sessionId);
                cleanedCount++;
            }
        }

        // Cleanup project session references
        this.projectSessions.forEach((sessionIds, projectName) => {
            const validSessions = sessionIds.filter(id => this.sessions.has(id));
            if (validSessions.length !== sessionIds.length) {
                this.projectSessions.set(projectName, validSessions);
            }
        });

        if (cleanedCount > 0) {
            console.log(`[Session Tracker] Cleaned up ${cleanedCount} expired sessions`);
        }
    }

    /**
     * Generate unique thread ID
     */
    generateThreadId() {
        return 'thread-' + crypto.randomBytes(8).toString('hex');
    }

    /**
     * Load tracking data from disk
     */
    async loadTrackingData() {
        try {
            const data = await fs.readFile(this.options.trackingDataPath, 'utf8');
            const parsed = JSON.parse(data);

            // Restore sessions
            if (parsed.sessions) {
                parsed.sessions.forEach(session => {
                    this.sessions.set(session.id, session);
                });
            }

            // Restore conversation threads (convert topics Set back from array)
            if (parsed.conversationThreads) {
                parsed.conversationThreads.forEach(thread => {
                    thread.topics = new Set(thread.topics || []);
                    this.conversationThreads.set(thread.id, thread);
                });
            }

            // Restore project sessions
            if (parsed.projectSessions) {
                Object.entries(parsed.projectSessions).forEach(([project, sessionIds]) => {
                    this.projectSessions.set(project, sessionIds);
                });
            }

        } catch (error) {
            if (error.code !== 'ENOENT') {
                console.warn('[Session Tracker] Failed to load tracking data:', error.message);
            }
            // Initialize empty structures if file doesn't exist
        }
    }

    /**
     * Save tracking data to disk
     */
    async saveTrackingData() {
        try {
            const data = {
                sessions: Array.from(this.sessions.values()),
                conversationThreads: Array.from(this.conversationThreads.values()).map(thread => ({
                    ...thread,
                    topics: Array.from(thread.topics) // Convert Set to Array for JSON
                })),
                projectSessions: Object.fromEntries(this.projectSessions.entries()),
                lastSaved: new Date().toISOString()
            };

            await fs.writeFile(this.options.trackingDataPath, JSON.stringify(data, null, 2));
        } catch (error) {
            console.error('[Session Tracker] Failed to save tracking data:', error.message);
        }
    }

    /**
     * Get statistics about session tracking
     */
    getStats() {
        return {
            totalSessions: this.sessions.size,
            activeSessions: Array.from(this.sessions.values()).filter(s => s.status === 'active').length,
            totalThreads: this.conversationThreads.size,
            trackedProjects: this.projectSessions.size,
            loaded: this.loaded
        };
    }
}

// Create global session tracker instance
let globalSessionTracker = null;

/**
 * Get or create global session tracker instance
 */
function getSessionTracker(options = {}) {
    if (!globalSessionTracker) {
        globalSessionTracker = new SessionTracker(options);
    }
    return globalSessionTracker;
}

module.exports = {
    SessionTracker,
    getSessionTracker
};
```

--------------------------------------------------------------------------------
/docs/testing/regression-tests.md:
--------------------------------------------------------------------------------

```markdown
# Regression Tests

This document provides structured test scenarios for validating critical functionality and preventing regressions. Each test includes setup instructions, expected results, evidence collection, and pass/fail criteria.

## Purpose

Regression tests ensure that:
- Critical bugs don't reappear after being fixed
- Performance optimizations don't degrade over time
- Platform-specific issues are caught before release
- Integration points (MCP, HTTP API, storage backends) work correctly

## Test Categories

1. [Database Locking & Concurrency](#database-locking--concurrency)
2. [Storage Backend Integrity](#storage-backend-integrity)
3. [Dashboard Performance](#dashboard-performance)
4. [Tag Filtering Correctness](#tag-filtering-correctness)
5. [MCP Protocol Compliance](#mcp-protocol-compliance)

---

## Database Locking & Concurrency

### Test 1: Concurrent MCP Server Startup

**Context:** v8.9.0+ fixed "database is locked" errors by setting SQLite pragmas before connection

**Setup:**
1. Close all Claude Desktop instances
2. Ensure SQLite database exists at `~/Library/Application Support/mcp-memory/sqlite_vec.db` (macOS)
3. Verify `.env` contains `MCP_MEMORY_SQLITE_PRAGMAS=busy_timeout=15000,journal_mode=WAL`

**Execution:**
1. Open 3 Claude Desktop instances simultaneously (within 5 seconds)
2. In each instance, trigger memory service initialization:
   ```
   /mcp
   # Wait for MCP servers to connect
   # Try storing a memory in each instance
   ```
3. Monitor logs in `~/Library/Logs/Claude/mcp-server-memory.log`

**Expected Results:**
- ✅ All 3 instances connect successfully
- ✅ Zero "database is locked" errors in logs
- ✅ All instances show healthy status via `/api/health`
- ✅ Memory operations work in all instances

**Evidence Collection:**
```bash
# Check for lock errors
grep -i "database is locked" ~/Library/Logs/Claude/mcp-server-memory.log

# Verify pragma settings
sqlite3 ~/Library/Application\ Support/mcp-memory/sqlite_vec.db "PRAGMA busy_timeout;"
# Expected output: 15000

# Check journal mode
sqlite3 ~/Library/Application\ Support/mcp-memory/sqlite_vec.db "PRAGMA journal_mode;"
# Expected output: wal
```

**Pass Criteria:**
- ✅ Zero lock errors
- ✅ All servers initialize within 10 seconds
- ✅ Concurrent memory operations succeed
- ❌ FAIL if any server shows "database is locked"

---

### Test 2: Concurrent Memory Operations

**Context:** Test simultaneous read/write operations from multiple clients

**Setup:**
1. Start HTTP server: `uv run memory server --http`
2. Verify server is healthy: `curl http://127.0.0.1:8000/api/health`

**Execution:**
1. Run concurrent memory stores from multiple terminals:
   ```bash
   # Terminal 1
   for i in {1..50}; do
     curl -X POST http://127.0.0.1:8000/api/memories \
       -H "Content-Type: application/json" \
       -d "{\"content\":\"Test memory $i from terminal 1\",\"tags\":[\"test\",\"concurrent\"]}"
   done

   # Terminal 2 (run simultaneously)
   for i in {1..50}; do
     curl -X POST http://127.0.0.1:8000/api/memories \
       -H "Content-Type: application/json" \
       -d "{\"content\":\"Test memory $i from terminal 2\",\"tags\":[\"test\",\"concurrent\"]}"
   done
   ```

2. While stores are running, perform searches:
   ```bash
   # Terminal 3
   for i in {1..20}; do
     curl -s "http://127.0.0.1:8000/api/search" \
       -H "Content-Type: application/json" \
       -d '{"query":"test memory","limit":10}'
   done
   ```

**Expected Results:**
- ✅ All 100 memory stores complete successfully
- ✅ Zero HTTP 500 errors
- ✅ Search operations return results during writes
- ✅ No database lock errors in server logs

**Evidence Collection:**
```bash
# Count successful stores
curl -s "http://127.0.0.1:8000/api/search/by-tag" \
  -H "Content-Type: application/json" \
  -d '{"tags":["concurrent"],"limit":1000}' | jq '.memories | length'
# Expected: 100

# Check server logs for errors
tail -100 ~/Library/Logs/Claude/mcp-server-memory.log | grep -i error
```

**Pass Criteria:**
- ✅ 100 memories stored successfully
- ✅ Zero database lock errors
- ✅ Zero HTTP 500 responses
- ❌ FAIL if any operation times out or errors

---

## Storage Backend Integrity

### Test 3: Hybrid Backend Synchronization

**Context:** Verify hybrid backend syncs SQLite → Cloudflare without data loss

**Setup:**
1. Configure hybrid backend in `.env`:
   ```bash
   MCP_MEMORY_STORAGE_BACKEND=hybrid
   MCP_HYBRID_SYNC_INTERVAL=10  # Frequent sync for testing
   CLOUDFLARE_API_TOKEN=your-token
   CLOUDFLARE_ACCOUNT_ID=your-account
   CLOUDFLARE_D1_DATABASE_ID=your-db-id
   CLOUDFLARE_VECTORIZE_INDEX=mcp-memory-index
   ```
2. Clear Cloudflare backend: `python scripts/database/clear_cloudflare.py --confirm`
3. Start server: `uv run memory server --http`

**Execution:**
1. Store 10 test memories via API:
   ```bash
   for i in {1..10}; do
     curl -X POST http://127.0.0.1:8000/api/memories \
       -H "Content-Type: application/json" \
       -d "{\"content\":\"Hybrid test memory $i\",\"tags\":[\"hybrid-test\"]}"
   done
   ```

2. Wait 30 seconds (3x sync interval) for background sync

3. Query Cloudflare backend directly:
   ```bash
   python scripts/sync/check_cloudflare_sync.py --tag hybrid-test
   ```

**Expected Results:**
- ✅ All 10 memories present in SQLite (immediate)
- ✅ All 10 memories synced to Cloudflare (within 30s)
- ✅ Content hashes match between backends
- ✅ No sync errors in server logs

**Evidence Collection:**
```bash
# Check SQLite count
curl -s "http://127.0.0.1:8000/api/search/by-tag" \
  -H "Content-Type: application/json" \
  -d '{"tags":["hybrid-test"]}' | jq '.memories | length'

# Check Cloudflare count
python scripts/sync/check_cloudflare_sync.py --tag hybrid-test --count

# Compare content hashes
python scripts/sync/check_cloudflare_sync.py --tag hybrid-test --verify-hashes
```

**Pass Criteria:**
- ✅ SQLite count == Cloudflare count
- ✅ All content hashes match
- ✅ Sync completes within 30 seconds
- ❌ FAIL if any memory missing or hash mismatch

---

### Test 4: Storage Backend Switching

**Context:** Verify switching backends doesn't corrupt existing data

**Setup:**
1. Start with sqlite-vec backend, store 20 memories
2. Stop server
3. Configure hybrid backend, restart server
4. Verify all memories still accessible

**Execution:**
1. **SQLite-vec phase:**
   ```bash
   export MCP_MEMORY_STORAGE_BACKEND=sqlite_vec
   uv run memory server --http &
   SERVER_PID=$!

   # Store 20 memories
   for i in {1..20}; do
     curl -X POST http://127.0.0.1:8000/api/memories \
       -H "Content-Type: application/json" \
       -d "{\"content\":\"Backend switch test $i\",\"tags\":[\"switch-test\"]}"
   done

   kill $SERVER_PID
   ```

2. **Switch to hybrid:**
   ```bash
   export MCP_MEMORY_STORAGE_BACKEND=hybrid
   export MCP_HYBRID_SYNC_INTERVAL=10
   # Set Cloudflare credentials...
   uv run memory server --http &
   SERVER_PID=$!

   # Wait for startup
   sleep 5
   ```

3. **Verify data integrity:**
   ```bash
   curl -s "http://127.0.0.1:8000/api/search/by-tag" \
     -H "Content-Type: application/json" \
     -d '{"tags":["switch-test"]}' | jq '.memories | length'
   ```

**Expected Results:**
- ✅ All 20 memories still accessible after switch
- ✅ Memories begin syncing to Cloudflare
- ✅ No data corruption or loss
- ✅ Health check shows "hybrid" backend

**Pass Criteria:**
- ✅ 20 memories retrieved successfully
- ✅ Backend reported as "hybrid" in health check
- ✅ No errors during backend initialization
- ❌ FAIL if any memory inaccessible or corrupted

---

## Dashboard Performance

### Test 5: Page Load Performance

**Context:** Dashboard should load in <2 seconds (v7.2.2 benchmark: 25ms)

**Setup:**
1. Database with 1000+ memories
2. HTTP server running: `uv run memory server --http`
3. Dashboard at `http://127.0.0.1:8000/`

**Execution:**
```bash
# Measure page load time (10 iterations)
for i in {1..10}; do
  time curl -s "http://127.0.0.1:8000/" > /dev/null
done
```

**Expected Results:**
- ✅ Average load time <500ms
- ✅ All static assets (HTML/CSS/JS) load successfully
- ✅ No JavaScript errors in browser console
- ✅ Dashboard functional on first load

**Evidence Collection:**
```bash
# Browser DevTools → Network tab
# - Check "Load" time in waterfall
# - Verify no 404/500 errors
# - Measure DOMContentLoaded and Load events

# Server-side timing
time curl -s "http://127.0.0.1:8000/" -o /dev/null -w "%{time_total}\n"
```

**Pass Criteria:**
- ✅ Page load <2 seconds (target: <500ms)
- ✅ Zero resource loading errors
- ✅ Dashboard interactive immediately
- ❌ FAIL if >2 seconds or JavaScript errors

---

### Test 6: Memory Operation Performance

**Context:** CRUD operations should complete in <1 second (v7.2.2 benchmark: 26ms)

**Setup:**
1. Clean database: `python scripts/database/reset_database.py --confirm`
2. HTTP server running

**Execution:**
1. **Store operation:**
   ```bash
   time curl -s -X POST http://127.0.0.1:8000/api/memories \
     -H "Content-Type: application/json" \
     -d '{"content":"Performance test memory","tags":["perf-test"]}' \
     -w "\n%{time_total}\n"
   ```

2. **Search operation:**
   ```bash
   time curl -s "http://127.0.0.1:8000/api/search" \
     -H "Content-Type: application/json" \
     -d '{"query":"performance test","limit":10}' \
     -w "\n%{time_total}\n"
   ```

3. **Tag search operation:**
   ```bash
   time curl -s "http://127.0.0.1:8000/api/search/by-tag" \
     -H "Content-Type: application/json" \
     -d '{"tags":["perf-test"]}' \
     -w "\n%{time_total}\n"
   ```

4. **Delete operation:**
   ```bash
   HASH=$(curl -s "http://127.0.0.1:8000/api/search/by-tag" \
     -H "Content-Type: application/json" \
     -d '{"tags":["perf-test"]}' | jq -r '.memories[0].hash')

   time curl -s -X DELETE "http://127.0.0.1:8000/api/memories/$HASH" \
     -w "\n%{time_total}\n"
   ```

**Expected Results:**
- ✅ Store: <100ms
- ✅ Search: <200ms
- ✅ Tag search: <100ms
- ✅ Delete: <100ms

**Pass Criteria:**
- ✅ All operations <1 second
- ✅ HTTP 200 responses
- ✅ Correct response format
- ❌ FAIL if any operation >1 second

---

## Tag Filtering Correctness

### Test 7: Exact Tag Matching (No False Positives)

**Context:** v8.13.0 fixed tag filtering to prevent false positives (e.g., "python" shouldn't match "python3")

**Setup:**
1. Clear database
2. Store memories with similar tags

**Execution:**
```bash
# Store test memories
curl -X POST http://127.0.0.1:8000/api/memories \
  -H "Content-Type: application/json" \
  -d '{"content":"Python programming","tags":["python"]}'

curl -X POST http://127.0.0.1:8000/api/memories \
  -H "Content-Type: application/json" \
  -d '{"content":"Python 3 features","tags":["python3"]}'

curl -X POST http://127.0.0.1:8000/api/memories \
  -H "Content-Type: application/json" \
  -d '{"content":"CPython internals","tags":["cpython"]}'

curl -X POST http://127.0.0.1:8000/api/memories \
  -H "Content-Type: application/json" \
  -d '{"content":"Jython compatibility","tags":["jython"]}'

# Search for exact tag "python"
curl -s "http://127.0.0.1:8000/api/search/by-tag" \
  -H "Content-Type: application/json" \
  -d '{"tags":["python"]}' | jq '.memories | length'
```

**Expected Results:**
- ✅ Searching "python" returns exactly 1 memory
- ✅ Does NOT return python3, cpython, jython
- ✅ Exact substring boundary matching works

**Evidence Collection:**
```bash
# Test each tag variation
for tag in python python3 cpython jython; do
  echo "Testing tag: $tag"
  curl -s "http://127.0.0.1:8000/api/search/by-tag" \
    -H "Content-Type: application/json" \
    -d "{\"tags\":[\"$tag\"]}" | jq -r '.memories[].tags[]'
done
```

**Pass Criteria:**
- ✅ Each search returns only exact tag matches
- ✅ Zero false positives (substring matches)
- ✅ All 4 memories retrievable individually
- ❌ FAIL if any false positive occurs

---

### Test 8: Tag Index Usage (Performance)

**Context:** v8.13.0 added tag normalization with relational tables for O(log n) performance

**Setup:**
1. Database with 10,000+ memories
2. Verify migration completed: `python scripts/database/validate_migration.py`

**Execution:**
```bash
# Check query plan uses index
sqlite3 ~/Library/Application\ Support/mcp-memory/sqlite_vec.db <<EOF
EXPLAIN QUERY PLAN
SELECT DISTINCT m.*
FROM memories m
JOIN memory_tags mt ON m.id = mt.memory_id
JOIN tags t ON mt.tag_id = t.id
WHERE t.name = 'test-tag';
EOF
```

**Expected Results:**
- ✅ Query plan shows `SEARCH` (using index)
- ✅ Query plan does NOT show `SCAN` (table scan)
- ✅ Tag search completes in <200ms even with 10K memories

**Evidence Collection:**
```bash
# Verify index exists
sqlite3 ~/Library/Application\ Support/mcp-memory/sqlite_vec.db \
  "SELECT name FROM sqlite_master WHERE type='index' AND name='idx_memory_tags_tag_id';"

# Benchmark tag search
time curl -s "http://127.0.0.1:8000/api/search/by-tag" \
  -H "Content-Type: application/json" \
  -d '{"tags":["test-tag"]}' -o /dev/null -w "%{time_total}\n"
```

**Pass Criteria:**
- ✅ Index exists and is used (SEARCH in query plan)
- ✅ Tag search <200ms with 10K+ memories
- ✅ Sub-linear scaling (2x data ≠ 2x time)
- ❌ FAIL if SCAN appears or >500ms with 10K memories

---

## MCP Protocol Compliance

### Test 9: MCP Tool Schema Validation

**Context:** Ensure all MCP tools conform to protocol schema

**Setup:**
1. Start MCP server: `uv run memory server`
2. Use MCP Inspector: `npx @modelcontextprotocol/inspector uv run memory server`

**Execution:**
1. Connect with MCP Inspector
2. List all tools: `tools/list`
3. Validate each tool schema:
   - Required fields present (name, description, inputSchema)
   - Input schema is valid JSON Schema
   - All parameters documented

**Expected Results:**
- ✅ All 13 core tools listed
- ✅ Each tool has valid JSON Schema
- ✅ No schema validation errors
- ✅ Tool descriptions are concise (<300 tokens each)

**Evidence Collection:**
```bash
# Capture tools/list output
npx @modelcontextprotocol/inspector uv run memory server \
  --command "tools/list" > tools_schema.json

# Validate schema format
cat tools_schema.json | jq '.tools[] | {name, inputSchema}'
```

**Pass Criteria:**
- ✅ 13 tools exposed (26 after v8.13.0 consolidation → 13)
- ✅ All schemas valid JSON Schema Draft 07
- ✅ No missing required fields
- ❌ FAIL if any tool lacks proper schema

---

### Test 10: MCP Tool Execution

**Context:** Verify all tools execute correctly via MCP protocol

**Setup:**
1. MCP server running
2. MCP Inspector connected

**Execution:**
1. **Test store_memory:**
   ```json
   {
     "name": "store_memory",
     "arguments": {
       "content": "MCP protocol test memory",
       "tags": ["mcp-test", "protocol-validation"],
       "metadata": {"type": "test"}
     }
   }
   ```

2. **Test recall_memory:**
   ```json
   {
     "name": "recall_memory",
     "arguments": {
       "query": "last week",
       "n_results": 5
     }
   }
   ```

3. **Test search_by_tag:**
   ```json
   {
     "name": "search_by_tag",
     "arguments": {
       "tags": ["mcp-test"],
       "match_mode": "any"
     }
   }
   ```

4. **Test delete_by_tag:**
   ```json
   {
     "name": "delete_by_tag",
     "arguments": {
       "tags": ["mcp-test"],
       "match_mode": "all"
     }
   }
   ```

**Expected Results:**
- ✅ All tool calls return valid MCP responses
- ✅ No protocol errors or timeouts
- ✅ Response format matches tool schema
- ✅ Operations reflect in database

**Pass Criteria:**
- ✅ 4/4 tools execute successfully
- ✅ Responses valid JSON
- ✅ Database state matches operations
- ❌ FAIL if any tool returns error or invalid format

---

## Test Execution Guide

### Running All Regression Tests

```bash
# 1. Set up test environment
export MCP_MEMORY_STORAGE_BACKEND=sqlite_vec
export MCP_MEMORY_SQLITE_PRAGMAS=busy_timeout=15000,journal_mode=WAL

# 2. Clear test data
python scripts/database/reset_database.py --confirm

# 3. Run automated tests
pytest tests/unit/test_exact_tag_matching.py
pytest tests/unit/test_query_plan_validation.py
pytest tests/unit/test_performance_benchmark.py

# 4. Run manual tests (follow each test's Execution section)
# - Document results in checklist format
# - Capture evidence (logs, screenshots, timing data)
# - Mark pass/fail for each test

# 5. Generate test report
python scripts/testing/generate_regression_report.py \
  --output docs/testing/regression-report-$(date +%Y%m%d).md
```

### Test Frequency

- **Pre-Release:** All regression tests MUST pass
- **Post-PR Merge:** Run affected test categories
- **Weekly:** Automated subset (performance, tag filtering)
- **Monthly:** Full regression suite

### Reporting Issues

If any test fails:
1. Create GitHub issue with label `regression`
2. Include test name, evidence, and reproduction steps
3. Link to relevant commit/PR that may have caused regression
4. Add to release blockers if critical functionality affected

---

## Appendix: Test Data Generation

### Create Large Test Dataset

```bash
# Generate 10,000 test memories for performance testing
python scripts/testing/generate_test_data.py \
  --count 10000 \
  --tags-per-memory 3 \
  --output test-data-10k.json

# Import into database
curl -X POST http://127.0.0.1:8000/api/memories/batch \
  -H "Content-Type: application/json" \
  -d @test-data-10k.json
```

### Cleanup Test Data

```bash
# Remove all test data by tag
curl -X POST http://127.0.0.1:8000/api/memories/delete-by-tag \
  -H "Content-Type: application/json" \
  -d '{"tags": ["test", "perf-test", "mcp-test", "hybrid-test", "switch-test"], "match_mode": "any"}'
```

---

**Last Updated:** 2025-11-05
**Version:** 1.0
**Related:** [Release Checklist](release-checklist.md), [PR Review Guide](pr-review-guide.md)

```

--------------------------------------------------------------------------------
/docs/integration/multi-client.md:
--------------------------------------------------------------------------------

```markdown
# Multi-Client Setup Guide

This comprehensive guide covers setting up MCP Memory Service for multiple clients, enabling shared memory access across different applications and devices.

## Overview

MCP Memory Service supports multi-client access through several deployment patterns:

1. **🌟 Integrated Setup** (Easiest - during installation)
2. **📁 Shared File Access** (Local networks with shared storage)
3. **🌐 Centralized HTTP/SSE Server** (Distributed teams and cloud deployment)

## 🌟 Integrated Setup (Recommended)

### During Installation

The easiest way to configure multi-client access is during the initial installation:

```bash
# Run the installer - you'll be prompted for multi-client setup
python install.py

# When prompted, choose 'y':
# 🌐 Multi-Client Access Available!
# Would you like to configure multi-client access? (y/N): y
```

**Benefits of integrated setup:**
- ✅ Automatic detection of Claude Desktop, VS Code, Continue, Cursor, and other MCP clients
- ✅ Universal compatibility beyond just Claude applications
- ✅ Zero manual configuration required
- ✅ Future-proof setup for any MCP application

### Command Line Options

```bash
# Automatic multi-client setup (no prompts)
python install.py --setup-multi-client

# Skip the multi-client prompt entirely
python install.py --skip-multi-client-prompt

# Combined with other options
python install.py --storage-backend sqlite_vec --setup-multi-client
```

### Supported Applications

The integrated setup automatically detects and configures:

#### Automatically Configured
- **Claude Desktop**: Updates `claude_desktop_config.json` with multi-client settings
- **Continue IDE**: Modifies Continue configuration files
- **VS Code MCP Extension**: Updates VS Code MCP settings
- **Cursor**: Configures Cursor MCP integration
- **Generic MCP Clients**: Updates `.mcp.json` and similar configuration files

#### Manual Configuration Required
- **Custom MCP implementations**: May require manual configuration file updates
- **Enterprise MCP clients**: Check with your IT department for configuration requirements

## 📁 Shared File Access (Local Networks)

### Overview

For local networks with shared storage, multiple clients can access the same SQLite database using Write-Ahead Logging (WAL) mode.

### Quick Setup

1. **Run the setup script:**
   ```bash
   python setup_multi_client_complete.py
   ```

2. **Configure shared database location:**
   ```bash
   # Path to the SQLite-vec database file (folder will be created if needed)
   export MCP_MEMORY_SQLITE_PATH="/shared/network/mcp_memory/memory.db"

   # WAL is enabled by default by the service; no extra env needed
   ```

3. **Update each client configuration** to point to the shared location.

### Technical Implementation

The shared file access uses SQLite's WAL (Write-Ahead Logging) mode for concurrent access:

- **WAL Mode**: Enables multiple readers and one writer simultaneously
- **File Locking**: Handles concurrent access safely
- **Automatic Recovery**: SQLite handles crash recovery automatically

### Configuration Example

For Claude Desktop on each client machine:

```json
{
  "mcpServers": {
    "memory": {
      "command": "python",
      "args": ["/path/to/mcp-memory-service/src/mcp_memory_service/server.py"],
      "env": {
        "MCP_MEMORY_SQLITE_PATH": "/shared/network/mcp_memory/memory.db",
        "MCP_MEMORY_STORAGE_BACKEND": "sqlite_vec"
      }
    }
  }
}
```

### Network Storage Requirements

- **NFS/SMB Share**: Properly configured network file system
- **File Permissions**: Read/write access for all client users
- **Network Reliability**: Stable network connection to prevent corruption

## 🌐 Centralized HTTP/SSE Server (Cloud Deployment)

### Why This Approach?

- ✅ **True Concurrency**: Proper handling of multiple simultaneous clients
- ✅ **Real-time Updates**: Server-Sent Events (SSE) push changes to all clients instantly
- ✅ **Cross-platform**: Works from any device with HTTP access
- ✅ **Secure**: Optional API key authentication
- ✅ **Scalable**: Can handle many concurrent clients
- ✅ **Cloud-friendly**: No file locking issues

### Architecture

```
┌─────────────────┐    ┌─────────────────┐    ┌─────────────────┐
│   Client PC 1   │    │   Client PC 2   │    │   Client PC 3   │
│   (Claude App)  │    │   (VS Code)     │    │   (Web Client)  │
└─────────┬───────┘    └─────────┬───────┘    └─────────┬───────┘
          │                      │                      │
          │         HTTP/SSE API │                      │
          └──────────────────────┼──────────────────────┘
                                 │
                    ┌─────────────▼──────────────┐
                    │     Central Server         │
                    │  ┌─────────────────────┐   │
                    │  │ MCP Memory Service  │   │
                    │  │   HTTP/SSE Server   │   │
                    │  └─────────────────────┘   │
                    │  ┌─────────────────────┐   │
                    │  │   SQLite-vec DB     │   │
                    │  │   (Single Source)   │   │
                    │  └─────────────────────┘   │
                    └────────────────────────────┘
```

### Server Installation

1. **Install on your server machine:**
   ```bash
   git clone https://github.com/doobidoo/mcp-memory-service.git
   cd mcp-memory-service
   python install.py --server-mode --storage-backend sqlite_vec
   ```

2. **Configure HTTP server:**
   ```bash
   export MCP_HTTP_HOST=0.0.0.0
   export MCP_HTTP_PORT=8000
   export MCP_API_KEY=your-secure-api-key
   ```

3. **Start the HTTP server:**
   ```bash
   python scripts/run_http_server.py
   ```

### Client Configuration (HTTP Mode)

There are two reliable ways for clients to connect to the centralized server:

- Direct Streamable HTTP (for clients that natively support MCP Streamable HTTP)
- Via mcp-proxy (for stdio-only clients like Codex)

Option A — Direct Streamable HTTP (preferred when supported):

```json
{
  "mcpServers": {
    "memory": {
      "transport": "streamablehttp",
      "url": "http://your-server:8000/mcp",
      "headers": {
        "Authorization": "Bearer your-secure-api-key"
      }
    }
  }
}
```

Option B — mcp-proxy bridge (works with any stdio-only client):

```json
{
  "mcpServers": {
    "memory": {
      "command": "mcp-proxy",
      "args": [
        "http://your-server:8000/mcp",
        "--transport=streamablehttp"
      ],
      "env": {
        "API_ACCESS_TOKEN": "your-secure-api-key"
      }
    }
  }
}
```

### Security Configuration

#### API Key Authentication

```bash
# Generate a secure API key
export MCP_API_KEY=$(openssl rand -hex 32)

# Configure HTTPS (recommended for production)
export MCP_HTTPS_ENABLED=true
export MCP_SSL_CERT_FILE=/path/to/cert.pem
export MCP_SSL_KEY_FILE=/path/to/key.pem
```

#### Firewall Configuration

```bash
# Allow HTTP/HTTPS access (adjust port as needed)
sudo ufw allow 8000/tcp
sudo ufw allow 8443/tcp  # For HTTPS
```

### Docker Deployment

For containerized deployment:

```yaml
# docker-compose.yml
version: '3.8'
services:
  mcp-memory-service:
    build: .
    ports:
      - "8000:8000"
    environment:
      - MCP_HTTP_HOST=0.0.0.0
      - MCP_HTTP_PORT=8000
      - MCP_API_KEY=${MCP_API_KEY}
      - MCP_MEMORY_STORAGE_BACKEND=sqlite_vec
    volumes:
      - ./data:/app/data
    restart: unless-stopped
```

```bash
# Deploy with Docker Compose
docker-compose up -d
```

## Advanced Configuration

Note: For the HTTP server interface, use `MCP_HTTP_HOST`, `MCP_HTTP_PORT`, and `MCP_API_KEY`. These supersede older `MCP_MEMORY_HTTP_*` names in legacy docs. Client-side tools may use different env vars (see below).

### Client Environment Variables

- mcp-proxy: set `API_ACCESS_TOKEN` to pass the bearer token automatically.
- Memory MCP Bridge (`docker-compose/mcp-gateway/scripts/memory-mcp-bridge.js`): set `MCP_MEMORY_API_KEY` and optionally `MCP_MEMORY_HTTP_ENDPOINT`, `MCP_MEMORY_AUTO_DISCOVER`, `MCP_MEMORY_PREFER_HTTPS`.
- Direct Streamable HTTP clients: provide `Authorization: Bearer <MCP_API_KEY>` via headers (no special env var required).

### Environment Variables

| Variable | Default | Description |
|----------|---------|-------------|
| `MCP_HTTP_ENABLED` | `false` | Enable HTTP mode (FastAPI + Streamable HTTP) |
| `MCP_HTTP_HOST` | `0.0.0.0` | HTTP server bind address |
| `MCP_HTTP_PORT` | `8000` | HTTP server port |
| `MCP_API_KEY` | `none` | API key for auth (sent as `Authorization: Bearer ...`) |
| `MCP_HTTPS_ENABLED` | `false` | Enable HTTPS termination |
| `MCP_SSL_CERT_FILE` | `none` | Path to TLS certificate |
| `MCP_SSL_KEY_FILE` | `none` | Path to TLS private key |
| `MCP_CORS_ORIGINS` | `*` | CSV list of allowed origins |
| `MCP_SSE_HEARTBEAT` | `30` | SSE heartbeat interval (seconds) |
| `MCP_MEMORY_STORAGE_BACKEND` | `sqlite_vec` | `sqlite_vec`, `chroma`, or `cloudflare` |
| `MCP_MEMORY_SQLITE_PATH` | `<base>/sqlite_vec.db` | SQLite-vec database file path |
| `MCP_MEMORY_SQLITEVEC_PATH` | `none` | Alternate var for SQLite path (if set, used) |
| `MCP_MEMORY_SQLITE_PRAGMAS` | `none` | Override SQLite pragmas (e.g. `busy_timeout=15000,cache_size=20000`) |
| `MCP_MDNS_ENABLED` | `true` | Enable mDNS advertising/discovery |
| `MCP_MDNS_SERVICE_NAME` | `MCP Memory Service` | mDNS service name |
| `MCP_MDNS_SERVICE_TYPE` | `_mcp-memory._tcp.local.` | mDNS service type |
| `MCP_MDNS_DISCOVERY_TIMEOUT` | `5` | mDNS discovery timeout (seconds) |

Deprecated (replaced):
- `MCP_MEMORY_HTTP_HOST` → `MCP_HTTP_HOST`
- `MCP_MEMORY_HTTP_PORT` → `MCP_HTTP_PORT`
- `MCP_MEMORY_API_KEY` → `MCP_API_KEY` (server HTTP mode). Note: the standalone Memory MCP Bridge continues to use `MCP_MEMORY_API_KEY`.
- `MCP_MEMORY_ENABLE_WAL`: not needed; WAL is enabled by default. Use `MCP_MEMORY_SQLITE_PRAGMAS` to change.
- `MCP_MEMORY_ENABLE_SSE`: not required; SSE events are enabled with the HTTP server.
- `MCP_MEMORY_MULTI_CLIENT`, `MCP_MEMORY_MAX_CLIENTS`: not used.

### Performance Tuning

#### SQLite Configuration

```bash
# Optimize for concurrent access (v8.9.0+)
# Recommended values for HTTP + MCP server concurrent access
export MCP_MEMORY_SQLITE_PRAGMAS="busy_timeout=15000,cache_size=20000"

# Note: WAL mode (journal_mode=WAL) is enabled by default
# These values are automatically configured by the installer for hybrid/sqlite_vec backends
```

#### HTTP Server Tuning

```bash
# Adjust for high concurrency
export MCP_HTTP_WORKERS=4
export MCP_HTTP_TIMEOUT=30
export MCP_HTTP_KEEPALIVE=true
```

## Troubleshooting

### Common Issues

#### 1. Database Lock Errors

**Symptom**: `database is locked` errors during concurrent HTTP + MCP server access

**Solution (v8.9.0+)**: Configure proper SQLite pragmas for concurrent access:

```bash
# Set recommended pragma values (15 second timeout, larger cache)
export MCP_MEMORY_SQLITE_PRAGMAS="busy_timeout=15000,cache_size=20000"

# Restart both HTTP server and MCP server to apply changes
# Note: The installer automatically configures these values for hybrid/sqlite_vec backends
```

**Root Cause**: Default `busy_timeout=5000ms` (5 seconds) is too short when both HTTP server and MCP server access the same SQLite database. The fix increases timeout to 15 seconds and cache to 20,000 pages.

**Additional checks** (if issue persists):
```bash
# Verify WAL mode is enabled (should be default)
sqlite3 /path/to/memory.db "PRAGMA journal_mode;"
# Should show: wal

# Check file permissions
chmod 666 /path/to/memory.db
chmod 666 /path/to/memory.db-wal 2>/dev/null || true
chmod 666 /path/to/memory.db-shm 2>/dev/null || true
```

#### 2. Network Access Issues

**Symptom**: Clients can't connect to HTTP server
**Solution**: Check firewall and network connectivity:

```bash
# Test server connectivity
curl http://your-server:8000/health

# Check firewall rules
sudo ufw status
```

#### 3. Configuration Conflicts

**Symptom**: Clients use different configurations
**Solution**: Verify all clients use the same settings:

```bash
# Check environment variables on each client
env | grep MCP_MEMORY

# Verify database file path matches
ls -la "$MCP_MEMORY_SQLITE_PATH"
```

### Diagnostic Commands

#### Check Multi-Client Status

```bash
# Test multi-client setup
python scripts/test_multi_client.py

# Verify database access
python -c "
import os, sqlite3
db = os.environ.get('MCP_MEMORY_SQLITE_PATH', '')
conn = sqlite3.connect(db) if db else None
print(f'Database accessible: {bool(conn)} (path={db})')
conn and conn.close()
"
```

#### Monitor Client Connections

```bash
# For HTTP server deployment
curl http://your-server:8000/stats

# Check active connections
netstat -an | grep :8000
```

## Migration from Single-Client

### Upgrading Existing Installation

1. **Backup existing data:**
   ```bash
   python scripts/backup_memories.py
   ```

2. **Run multi-client setup:**
   ```bash
   python install.py --setup-multi-client --migrate-existing
   ```

3. **Update client configurations** as needed.

### Data Migration

The installer automatically handles data migration, but you can also run it manually:

```bash
# Migrate to shared database location
python scripts/migrate_to_multi_client.py \
  --source ~/.mcp_memory_chroma \
  --target /shared/mcp_memory
```

## Related Documentation

- [Installation Guide](../installation/master-guide.md) - General installation instructions
- [Deployment Guide](../deployment/docker.md) - Docker and cloud deployment
- [Troubleshooting](../troubleshooting/general.md) - Multi-client specific issues
- [API Reference](../IMPLEMENTATION_PLAN_HTTP_SSE.md) - HTTP/SSE API documentation

## Client Setup Recipes (Codex, Cursor, Qwen, Gemini)

This section provides practical, copy-pasteable setups for popular MCP clients. Use Streamable HTTP at `http://<host>:8000/mcp` when supported, or bridge via `mcp-proxy` for stdio-only clients.

Important:
- Server API key: set `MCP_API_KEY` on the server. Clients must send `Authorization: Bearer <MCP_API_KEY>`.
- Our MCP endpoint is Streamable HTTP at `/mcp` (not the SSE events feed at `/api/events`).

### Codex (via mcp-proxy)

Codex does not natively support HTTP transport. Use `mcp-proxy` to bridge stdio ⇄ Streamable HTTP.

1) Install mcp-proxy
```bash
pipx install mcp-proxy  # or: uv tool install mcp-proxy
```

2) Update Codex MCP config (see Codex docs for exact file location):
```json
{
  "mcpServers": {
    "memory": {
      "command": "mcp-proxy",
      "args": [
        "http://your-server:8000/mcp",
        "--transport=streamablehttp"
      ],
      "env": {
        "API_ACCESS_TOKEN": "your-secure-api-key"
      }
    }
  }
}
```

Reference template: `examples/codex-mcp-config.json` in this repository.

Notes:
- Replace `your-server` and `your-secure-api-key` accordingly. For local testing use `http://127.0.0.1:8000/mcp`.
- Alternatively pass headers explicitly: `"args": ["http://.../mcp", "--transport=streamablehttp", "--headers", "Authorization", "Bearer your-secure-api-key"]`.

### Cursor

Pick one of these depending on your deployment:

- Option A — Local stdio (single machine):
```json
{
  "mcpServers": {
    "memory": {
      "command": "uv",
      "args": ["--directory", "/path/to/mcp-memory-service", "run", "memory"],
      "env": {
        "MCP_MEMORY_STORAGE_BACKEND": "sqlite_vec"
      }
    }
  }
}
```

- Option B — Remote central server via mcp-proxy (recommended for multi-client):
```json
{
  "mcpServers": {
    "memory": {
      "command": "mcp-proxy",
      "args": [
        "http://your-server:8000/mcp",
        "--transport=streamablehttp"
      ],
      "env": {
        "API_ACCESS_TOKEN": "your-secure-api-key"
      }
    }
  }
}
```

- Option C — Direct Streamable HTTP (if your Cursor version supports it):
```json
{
  "mcpServers": {
    "memory": {
      "transport": "streamablehttp",
      "url": "http://your-server:8000/mcp",
      "headers": { "Authorization": "Bearer your-secure-api-key" }
    }
  }
}
```

### Qwen

Qwen clients that support MCP can connect either directly via Streamable HTTP or through `mcp-proxy` when only stdio is available. If your Qwen client UI accepts an MCP server list, use one of the Cursor-style examples above. If it only lets you specify a command, use the `mcp-proxy` form:

```json
{
  "mcpServers": {
    "memory": {
      "command": "mcp-proxy",
      "args": [
        "http://your-server:8000/mcp",
        "--transport=streamablehttp"
      ],
      "env": { "API_ACCESS_TOKEN": "your-secure-api-key" }
    }
  }
}
```

Tips:
- Some Qwen distributions expose MCP configuration in a UI. Map fields as: transport = Streamable HTTP, URL = `http://<host>:8000/mcp`, header `Authorization: Bearer <key>`.

### Gemini

Gemini-based IDE integrations (e.g., Gemini Code Assist in VS Code/JetBrains) typically support MCP via a config file or UI. Use either direct Streamable HTTP or `mcp-proxy`:

- Direct Streamable HTTP (when supported):
```json
{
  "mcpServers": {
    "memory": {
      "transport": "streamablehttp",
      "url": "https://your-server:8443/mcp",
      "headers": { "Authorization": "Bearer your-secure-api-key" }
    }
  }
}
```

- Via mcp-proxy (works everywhere):
```json
{
  "mcpServers": {
    "memory": {
      "command": "mcp-proxy",
      "args": [
        "https://your-server:8443/mcp",
        "--transport=streamablehttp"
      ],
      "env": { "API_ACCESS_TOKEN": "your-secure-api-key" }
    }
  }
}
```

If your Gemini client expects a command-only entry, prefer the `mcp-proxy` form.

---

Troubleshooting client connections:
- Ensure you’re using `/mcp` (Streamable HTTP), not `/api/events` (SSE).
- Verify server exports `MCP_API_KEY` and clients send `Authorization: Bearer ...`.
- For remote setups, test reachability: `curl -i http://your-server:8000/api/health`.
- If a client doesn’t support Streamable HTTP, use `mcp-proxy`.

```

--------------------------------------------------------------------------------
/scripts/quality/phase1_dead_code_analysis.md:
--------------------------------------------------------------------------------

```markdown
# Phase 1: Dead Code Removal Analysis for Issue #240

**Generated:** 2025-11-24
**Based on:** pyscn report `analyze_20251123_214224.html`
**Current Health Score:** 63/100 (Grade C)
**Dead Code Score:** 70/100 (27 issues, 2 critical)

---

## Executive Summary

Based on the pyscn analysis, this codebase has **27 dead code issues** that need to be addressed. After detailed analysis of the report, I've identified the following breakdown:

- **Total Issues:** 27
- **Critical:** 2 (1 critical unreachable code block in `scripts/installation/install.py`)
- **Warnings:** 25 (unreachable branches in the same function)
- **Safe to Remove Immediately:** 27 (all issues are in the same function with a clear root cause)
- **Needs Investigation:** 0
- **False Positives:** 0

**Estimated Health Score Improvement:**
- **Before:** Dead Code 70/100, Overall 63/100
- **After:** Dead Code 85-90/100, Overall 68-72/100
- **Confidence:** High (95%)

### Root Cause Analysis

All 27 dead code issues stem from a **single premature return statement** in the `configure_paths()` function at line 1358 of `scripts/installation/install.py`. This return statement makes 77 lines of critical Claude Desktop configuration code unreachable.

**Impact:**
- Claude Desktop configuration is never applied during installation
- Users must manually configure Claude Desktop after installation
- Installation verification may fail silently

---

## Critical Dead Code (Priority 1)

### Issue 1: Unreachable Claude Desktop Configuration Block

**File:** `scripts/installation/install.py`
**Function:** `configure_paths`
**Lines:** 1360-1436 (77 lines)
**Type:** Unreachable code after return statement
**Severity:** **CRITICAL**
**References:** 0 (verified - this is genuine dead code)

**Root Cause:**
Line 1358 contains `return False` inside a try-except block, causing the entire Claude Desktop configuration logic to be unreachable.

**Code Context (Lines 1350-1365):**
```python
    try:
        test_file = os.path.join(backups_path, '.write_test')
        with open(test_file, 'w') as f:
            f.write('test')
        os.remove(test_file)
        print_success("Storage directories created and are writable")
    except Exception as e:
        print_error(f"Failed to test backups directory: {e}")
        return False  # ← PROBLEM: This return makes all code below unreachable

        # Configure Claude Desktop if available  # ← UNREACHABLE
        claude_config_paths = [
            home_dir / 'Library' / 'Application Support' / 'Claude' / 'claude_desktop_config.json',
            home_dir / '.config' / 'Claude' / 'claude_desktop_config.json',
            Path('claude_config') / 'claude_desktop_config.json'
        ]
```

**Unreachable Code Block (Lines 1360-1436):**
```python
# Configure Claude Desktop if available
claude_config_paths = [
    home_dir / 'Library' / 'Application Support' / 'Claude' / 'claude_desktop_config.json',
    home_dir / '.config' / 'Claude' / 'claude_desktop_config.json',
    Path('claude_config') / 'claude_desktop_config.json'
]

for config_path in claude_config_paths:
    if config_path.exists():
        print_info(f"Found Claude Desktop config at {config_path}")
        try:
            import json
            with open(config_path, 'r') as f:
                config = json.load(f)

            # Update or add MCP Memory configuration
            if 'mcpServers' not in config:
                config['mcpServers'] = {}

            # Create environment configuration based on storage backend
            env_config = {
                "MCP_MEMORY_BACKUPS_PATH": str(backups_path),
                "MCP_MEMORY_STORAGE_BACKEND": storage_backend
            }

            if storage_backend in ['sqlite_vec', 'hybrid']:
                env_config["MCP_MEMORY_SQLITE_PATH"] = str(storage_path)
                env_config["MCP_MEMORY_SQLITE_PRAGMAS"] = "busy_timeout=15000,cache_size=20000"

            if storage_backend in ['hybrid', 'cloudflare']:
                cloudflare_env_vars = [
                    'CLOUDFLARE_API_TOKEN',
                    'CLOUDFLARE_ACCOUNT_ID',
                    'CLOUDFLARE_D1_DATABASE_ID',
                    'CLOUDFLARE_VECTORIZE_INDEX'
                ]
                for var in cloudflare_env_vars:
                    value = os.environ.get(var)
                    if value:
                        env_config[var] = value

            if storage_backend == 'chromadb':
                env_config["MCP_MEMORY_CHROMA_PATH"] = str(storage_path)

            # Create or update the memory server configuration
            if system_info["is_windows"]:
                script_path = os.path.abspath("memory_wrapper.py")
                config['mcpServers']['memory'] = {
                    "command": "python",
                    "args": [script_path],
                    "env": env_config
                }
                print_info("Configured Claude Desktop to use memory_wrapper.py for Windows")
            else:
                config['mcpServers']['memory'] = {
                    "command": "uv",
                    "args": [
                        "--directory",
                        os.path.abspath("."),
                        "run",
                        "memory"
                    ],
                    "env": env_config
                }

            with open(config_path, 'w') as f:
                json.dump(config, f, indent=2)

            print_success("Updated Claude Desktop configuration")
        except Exception as e:
            print_warning(f"Failed to update Claude Desktop configuration: {e}")
        break

return True
```

**Classification:** ✅ **Safe to Remove and Fix**

**Recommended Fix:**
The `return False` at line 1358 should NOT be removed - it's a valid error condition. Instead, the Claude Desktop configuration code (lines 1360-1436) needs to be **moved outside the try-except block** so it executes regardless of the write test result.

**Fix Strategy:**
1. Remove lines 1360-1436 from current location (inside except block)
2. Dedent and move this code block to after line 1358 (after the except block closes)
3. Ensure proper indentation and flow control

**Detailed Fix:**

```python
# BEFORE (Current broken code):
    try:
        test_file = os.path.join(backups_path, '.write_test')
        with open(test_file, 'w') as f:
            f.write('test')
        os.remove(test_file)
        print_success("Storage directories created and are writable")
    except Exception as e:
        print_error(f"Failed to test backups directory: {e}")
        return False

        # Configure Claude Desktop if available  # ← UNREACHABLE
        claude_config_paths = [...]

# AFTER (Fixed code):
    try:
        test_file = os.path.join(backups_path, '.write_test')
        with open(test_file, 'w') as f:
            f.write('test')
        os.remove(test_file)
        print_success("Storage directories created and are writable")
    except Exception as e:
        print_error(f"Failed to test backups directory: {e}")
        # Don't return False here - we can still configure Claude Desktop
        print_warning("Continuing with Claude Desktop configuration despite write test failure")

    # Configure Claude Desktop if available  # ← NOW REACHABLE
    claude_config_paths = [
        home_dir / 'Library' / 'Application Support' / 'Claude' / 'claude_desktop_config.json',
        home_dir / '.config' / 'Claude' / 'claude_desktop_config.json',
        Path('claude_config') / 'claude_desktop_config.json'
    ]

    for config_path in claude_config_paths:
        # ... rest of configuration logic ...
        break

    return True
```

**Verification Command:**
```bash
# After fix, verify with:
python -m py_compile scripts/installation/install.py
pyscn analyze scripts/installation/install.py --dead-code
```

---

## Detailed Issue Breakdown (All 27 Issues)

All 27 issues are variations of the same root cause. Here's the complete list from pyscn:

| # | Lines | Severity | Reason | Description |
|---|-------|----------|--------|-------------|
| 1 | 1361-1365 | Critical | unreachable_after_return | Comment and variable declarations |
| 2 | 1367-1436 | Warning | unreachable_branch | Entire for loop and configuration logic |
| 3 | 1368-1436 | Warning | unreachable_branch | For loop body |
| 4 | 1369-1369 | Warning | unreachable_branch | If condition check |
| 5 | 1371-1371 | Warning | unreachable_branch | Import statement |
| 6 | 1372-1373 | Warning | unreachable_branch | File read |
| 7 | 1373-1373 | Warning | unreachable_branch | JSON load |
| 8 | 1376-1377 | Warning | unreachable_branch | Config check |
| 9 | 1377-1377 | Warning | unreachable_branch | Dictionary assignment |
| 10 | 1380-1388 | Warning | unreachable_branch | env_config creation |
| ... | ... | ... | ... | (17 more warnings for nested code blocks) |

**Note:** These are all sub-issues of the main critical issue. Fixing the root cause (moving the code block) will resolve all 27 issues simultaneously.

---

## Removal Script

**Important:** This is not a simple removal - it's a **code restructuring** to make the unreachable code reachable.

### Manual Fix Script

```bash
#!/bin/bash
# scripts/quality/fix_dead_code_install.sh
# Fix unreachable Claude Desktop configuration in install.py

set -e

PROJECT_ROOT="/Users/hkr/Documents/GitHub/mcp-memory-service"
cd "$PROJECT_ROOT"

INSTALL_FILE="scripts/installation/install.py"

echo "=== Phase 1: Fix Dead Code in install.py ==="
echo ""

# Backup
BRANCH_NAME="quality/fix-dead-code-install-$(date +%Y%m%d-%H%M%S)"
git checkout -b "$BRANCH_NAME"
echo "✓ Created branch: $BRANCH_NAME"
echo ""

# Create backup of original file
cp "$INSTALL_FILE" "$INSTALL_FILE.backup"
echo "✓ Backed up $INSTALL_FILE to $INSTALL_FILE.backup"
echo ""

echo "Manual fix required for this issue:"
echo "1. Open $INSTALL_FILE"
echo "2. Locate line 1358: 'return False'"
echo "3. Change it to: print_warning('Continuing with Claude Desktop configuration despite write test failure')"
echo "4. Cut lines 1360-1436 (Claude Desktop configuration)"
echo "5. Paste them AFTER the except block (after current line 1358)"
echo "6. Adjust indentation to match outer scope"
echo "7. Save file"
echo ""

read -p "Press Enter after making the manual fix..."

# Verify syntax
echo "Verifying Python syntax..."
if python -m py_compile "$INSTALL_FILE"; then
    echo "✓ Python syntax valid"
else
    echo "✗ Python syntax error - reverting"
    mv "$INSTALL_FILE.backup" "$INSTALL_FILE"
    exit 1
fi

# Run pyscn to verify fix
echo ""
echo "Running pyscn to verify fix..."
pyscn analyze "$INSTALL_FILE" --dead-code --output .pyscn/reports/

# Run tests
echo ""
echo "Running installation tests..."
if pytest tests/unit/test_installation.py -v; then
    echo "✓ Installation tests passed"
else
    echo "⚠ Some tests failed - review manually"
fi

# Summary
echo ""
echo "=== Summary ==="
git diff --stat "$INSTALL_FILE"
echo ""
echo "✓ Dead code fix applied"
echo ""
echo "Next steps:"
echo "1. Review changes: git diff $INSTALL_FILE"
echo "2. Test installation: python scripts/installation/install.py --storage-backend sqlite_vec"
echo "3. Verify Claude Desktop config is created"
echo "4. Commit: git commit -m 'fix: move Claude Desktop configuration out of unreachable code block (issue #240 Phase 1)'"
echo "5. Re-run pyscn: pyscn analyze . --output .pyscn/reports/"
```

### Automated Fix Script (Using sed)

**Warning:** This is complex due to the need to move and re-indent code. Manual fix is recommended.

---

## Risk Assessment Matrix

| Item | Risk | Impact | Testing | Rollback | Priority |
|------|------|--------|---------|----------|----------|
| Move Claude Desktop config code | **Low** | **High** - Fixes installation for all users | `pytest tests/unit/test_installation.py` | `git revert` or restore from backup | **P1** |
| Change `return False` to warning | **Low** | Medium - Changes error handling behavior | Manual installation test | `git revert` | **P1** |
| Indentation adjustment | **Very Low** | High - Code won't run if wrong | `python -m py_compile` | `git revert` | **P1** |

**Overall Risk Level:** Low
**Reason:** This is a straightforward code movement with clear intent. The original code was never executing, so we're not changing existing behavior - we're enabling intended behavior.

---

## Expected Impact

### Before Fix
```
Health Score: 63/100
├─ Complexity: 40/100
├─ Dead Code: 70/100 (27 issues, 2 critical)
├─ Duplication: 30/100
├─ Coupling: 100/100
├─ Dependencies: 85/100
└─ Architecture: 75/100
```

### After Fix (Estimated)
```
Health Score: 68-72/100 (+5 to +9)
├─ Complexity: 40/100 (unchanged)
├─ Dead Code: 85-90/100 (0 issues, 0 critical) [+15 to +20]
├─ Duplication: 30/100 (unchanged)
├─ Coupling: 100/100 (unchanged)
├─ Dependencies: 85/100 (unchanged)
└─ Architecture: 75/100 (unchanged)
```

**Confidence:** High (95%)

**Rationale:**
- Fixing all 27 dead code issues simultaneously by addressing the root cause
- Dead code score expected to improve by 15-20 points (from 70 to 85-90)
- Overall health score improvement of 5-9 points (from 63 to 68-72)
- This is a conservative estimate - could be higher if pyscn weighs critical issues heavily

**Additional Benefits:**
- Installation process will work correctly for Claude Desktop configuration
- Users won't need manual post-installation configuration
- Improved user experience and reduced support requests

---

## Testing Strategy

### Pre-Fix Verification
1. **Confirm current behavior:**
   ```bash
   # Run installer and verify Claude Desktop config is NOT created
   python scripts/installation/install.py --storage-backend sqlite_vec
   # Check: Is ~/.claude/claude_desktop_config.json updated? (Should be NO)
   ```

### Post-Fix Verification
1. **Syntax Check:**
   ```bash
   python -m py_compile scripts/installation/install.py
   ```

2. **Unit Tests:**
   ```bash
   pytest tests/unit/test_installation.py -v
   ```

3. **Integration Test:**
   ```bash
   # Test full installation flow
   python scripts/installation/install.py --storage-backend sqlite_vec
   # Verify Claude Desktop config IS created
   cat ~/.claude/claude_desktop_config.json | grep "mcp-memory-service"
   ```

4. **pyscn Re-analysis:**
   ```bash
   pyscn analyze . --output .pyscn/reports/
   # Verify dead code issues reduced from 27 to 0
   ```

5. **Edge Case Testing:**
   ```bash
   # Test with different storage backends
   python scripts/installation/install.py --storage-backend hybrid
   python scripts/installation/install.py --storage-backend cloudflare
   ```

---

## Next Steps

### Immediate Actions (Phase 1)
1. ✅ **Review this analysis** - Confirm the root cause and fix strategy
2. ⏳ **Apply the fix manually** - Edit `scripts/installation/install.py`
3. ⏳ **Run tests** - Verify no regressions: `pytest tests/unit/test_installation.py`
4. ⏳ **Test installation** - Run full installer and verify Claude config created
5. ⏳ **Commit changes** - Use semantic commit message
6. ⏳ **Re-run pyscn** - Verify health score improvement

### Commit Message Template
```
fix: move Claude Desktop configuration out of unreachable code block

Fixes issue #240 Phase 1 - Dead Code Removal

The configure_paths() function had a 'return False' statement inside
an exception handler that made 77 lines of Claude Desktop configuration
code unreachable. This caused installations to skip Claude Desktop setup.

Changes:
- Move Claude Desktop config code (lines 1360-1436) outside except block
- Replace premature 'return False' with warning message
- Ensure config runs regardless of write test result

Impact:
- Resolves all 27 dead code issues identified by pyscn
- Claude Desktop now configured automatically during installation
- Dead code score: 70 → 85-90 (+15 to +20 points)
- Overall health score: 63 → 68-72 (+5 to +9 points)

Testing:
- Syntax validated with py_compile
- Unit tests pass: pytest tests/unit/test_installation.py
- Manual installation tested with sqlite_vec backend
- pyscn re-analysis confirms 0 dead code issues

Co-authored-by: pyscn analysis tool
```

### Follow-up Actions (Phase 2)
After Phase 1 is complete and merged:
1. **Run pyscn again** - Get updated health score
2. **Analyze complexity issues** - Address complexity score of 40/100
3. **Review duplication** - Address duplication score of 30/100
4. **Create Phase 2 plan** - Target low-hanging complexity reductions

---

## Appendix: pyscn Report Metadata

**Report File:** `.pyscn/reports/analyze_20251123_214224.html`
**Generated:** 2025-11-23 21:42:24
**Total Files Analyzed:** 252
**Total Functions:** 567
**Average Complexity:** 9.52

**Health Score Breakdown:**
- Overall: 63/100 (Grade C)
- Complexity: 40/100 (28 high-risk functions)
- Dead Code: 70/100 (27 issues, 2 critical)
- Duplication: 30/100 (6.0% duplication, 18 groups)
- Coupling (CBO): 100/100 (excellent)
- Dependencies: 85/100 (no cycles)
- Architecture: 75/100 (75.5% compliant)

---

## Conclusion

This Phase 1 analysis identifies a single root cause affecting all 27 dead code issues: a premature `return False` statement in the `configure_paths()` function. By moving 77 lines of Claude Desktop configuration code outside the exception handler, we can:

1. **Eliminate all 27 dead code issues** identified by pyscn
2. **Fix a critical installation bug** where Claude Desktop is never configured
3. **Improve overall health score by 5-9 points** (from 63 to 68-72)
4. **Improve dead code score by 15-20 points** (from 70 to 85-90)

The fix is straightforward, low-risk, and has high impact. This sets the stage for Phase 2, where we can tackle complexity and duplication issues with a cleaner codebase.

**Recommendation:** Proceed with manual fix using the strategy outlined above. Automated sed script is possible but manual fix is safer given the code movement and indentation requirements.

```

--------------------------------------------------------------------------------
/claude-hooks/utilities/dynamic-context-updater.js:
--------------------------------------------------------------------------------

```javascript
/**
 * Dynamic Context Updater
 * Orchestrates intelligent context updates during active conversations
 * Phase 2: Intelligent Context Updates
 */

const { analyzeConversation, detectTopicChanges } = require('./conversation-analyzer');
const { scoreMemoryRelevance } = require('./memory-scorer');
const { formatMemoriesForContext } = require('./context-formatter');
const { getSessionTracker } = require('./session-tracker');

/**
 * Dynamic Context Update Manager
 * Coordinates between conversation analysis, memory retrieval, and context injection
 */
class DynamicContextUpdater {
    constructor(options = {}) {
        this.options = {
            updateThreshold: 0.3,           // Minimum significance score to trigger update
            maxMemoriesPerUpdate: 3,        // Maximum memories to inject per update
            updateCooldownMs: 30000,        // Minimum time between updates (30 seconds)
            maxUpdatesPerSession: 10,       // Maximum updates per session
            debounceMs: 5000,               // Debounce rapid conversation changes
            enableCrossSessionContext: true, // Include cross-session intelligence
            ...options
        };

        this.lastUpdateTime = 0;
        this.updateCount = 0;
        this.conversationBuffer = '';
        this.lastAnalysis = null;
        this.loadedMemoryHashes = new Set();
        this.sessionTracker = null;
        this.debounceTimer = null;
    }

    /**
     * Initialize the dynamic context updater
     */
    async initialize(sessionContext = {}) {
        console.log('[Dynamic Context] Initializing dynamic context updater...');
        
        this.sessionContext = sessionContext;
        this.updateCount = 0;
        this.loadedMemoryHashes.clear();
        
        if (this.options.enableCrossSessionContext) {
            this.sessionTracker = getSessionTracker();
            await this.sessionTracker.initialize();
        }

        console.log('[Dynamic Context] Dynamic context updater initialized');
    }

    /**
     * Process conversation update and potentially inject new context
     * @param {string} conversationText - Current conversation content
     * @param {object} memoryServiceConfig - Memory service configuration
     * @param {function} contextInjector - Function to inject context into conversation
     */
    async processConversationUpdate(conversationText, memoryServiceConfig, contextInjector) {
        try {
            // Check rate limiting
            if (!this.shouldProcessUpdate()) {
                return { processed: false, reason: 'rate_limited' };
            }

            // Debounce rapid updates
            if (this.debounceTimer) {
                clearTimeout(this.debounceTimer);
            }

            return new Promise((resolve) => {
                this.debounceTimer = setTimeout(async () => {
                    const result = await this.performContextUpdate(
                        conversationText,
                        memoryServiceConfig,
                        contextInjector
                    );
                    resolve(result);
                }, this.options.debounceMs);
            });

        } catch (error) {
            console.error('[Dynamic Context] Error processing conversation update:', error.message);
            return { processed: false, error: error.message };
        }
    }

    /**
     * Perform the actual context update
     */
    async performContextUpdate(conversationText, memoryServiceConfig, contextInjector) {
        console.log('[Dynamic Context] Processing conversation update...');

        // Analyze current conversation
        const currentAnalysis = analyzeConversation(conversationText, {
            extractTopics: true,
            extractEntities: true,
            detectIntent: true,
            detectCodeContext: true,
            minTopicConfidence: 0.3
        });

        // Detect significant changes
        const changes = detectTopicChanges(this.lastAnalysis, currentAnalysis);

        if (!changes.hasTopicShift || changes.significanceScore < this.options.updateThreshold) {
            console.log(`[Dynamic Context] No significant changes detected (score: ${changes.significanceScore.toFixed(2)})`);
            this.lastAnalysis = currentAnalysis;
            return { processed: false, reason: 'insufficient_change', significanceScore: changes.significanceScore };
        }

        console.log(`[Dynamic Context] Significant conversation change detected (score: ${changes.significanceScore.toFixed(2)})`);
        console.log(`[Dynamic Context] New topics: ${changes.newTopics.map(t => t.name).join(', ')}`);

        // Generate memory queries based on conversation changes
        const queries = this.generateMemoryQueries(currentAnalysis, changes);
        
        if (queries.length === 0) {
            this.lastAnalysis = currentAnalysis;
            return { processed: false, reason: 'no_actionable_queries' };
        }

        // Retrieve memories from memory service
        const memories = await this.retrieveRelevantMemories(queries, memoryServiceConfig);
        
        if (memories.length === 0) {
            this.lastAnalysis = currentAnalysis;
            return { processed: false, reason: 'no_relevant_memories' };
        }

        // Score memories with conversation context
        const scoredMemories = this.scoreMemoriesWithContext(memories, currentAnalysis);
        
        // Select top memories for injection
        const selectedMemories = scoredMemories
            .filter(memory => memory.relevanceScore > 0.3)
            .slice(0, this.options.maxMemoriesPerUpdate);

        if (selectedMemories.length === 0) {
            this.lastAnalysis = currentAnalysis;
            return { processed: false, reason: 'no_high_relevance_memories' };
        }

        // Track loaded memories to avoid duplicates
        selectedMemories.forEach(memory => {
            this.loadedMemoryHashes.add(memory.content_hash);
        });

        // Include cross-session context if enabled
        let crossSessionContext = null;
        if (this.options.enableCrossSessionContext && this.sessionTracker) {
            crossSessionContext = await this.sessionTracker.getConversationContext(
                this.sessionContext.projectContext,
                { maxPreviousSessions: 2, maxDaysBack: 3 }
            );
        }

        // Format context update
        const contextUpdate = this.formatContextUpdate(
            selectedMemories,
            currentAnalysis,
            changes,
            crossSessionContext
        );

        // Inject context into conversation
        if (contextInjector && typeof contextInjector === 'function') {
            await contextInjector(contextUpdate);
        }

        // Update state
        this.lastAnalysis = currentAnalysis;
        this.lastUpdateTime = Date.now();
        this.updateCount++;

        console.log(`[Dynamic Context] Context update completed (update #${this.updateCount})`);
        console.log(`[Dynamic Context] Injected ${selectedMemories.length} memories`);

        return {
            processed: true,
            updateCount: this.updateCount,
            memoriesInjected: selectedMemories.length,
            significanceScore: changes.significanceScore,
            topics: changes.newTopics.map(t => t.name),
            hasConversationContext: true,
            hasCrossSessionContext: !!crossSessionContext
        };
    }

    /**
     * Check if we should process an update based on rate limiting
     */
    shouldProcessUpdate() {
        const now = Date.now();
        
        // Check cooldown period
        if (now - this.lastUpdateTime < this.options.updateCooldownMs) {
            return false;
        }

        // Check maximum updates per session
        if (this.updateCount >= this.options.maxUpdatesPerSession) {
            return false;
        }

        return true;
    }

    /**
     * Generate memory queries from conversation analysis
     */
    generateMemoryQueries(analysis, changes) {
        const queries = [];

        // Query for new topics
        changes.newTopics.forEach(topic => {
            if (topic.confidence > 0.4) {
                queries.push({
                    query: topic.name,
                    type: 'topic',
                    weight: topic.confidence,
                    limit: 2
                });
            }
        });

        // Query for changed intent
        if (changes.changedIntents && analysis.intent && analysis.intent.confidence > 0.5) {
            queries.push({
                query: `${analysis.intent.name} ${this.sessionContext.projectContext?.name || ''}`,
                type: 'intent',
                weight: analysis.intent.confidence,
                limit: 1
            });
        }

        // Query for high-confidence entities
        analysis.entities
            .filter(entity => entity.confidence > 0.7)
            .slice(0, 2)
            .forEach(entity => {
                queries.push({
                    query: `${entity.name} ${entity.type}`,
                    type: 'entity',
                    weight: entity.confidence,
                    limit: 1
                });
            });

        // Sort by weight and limit total queries
        return queries
            .sort((a, b) => b.weight - a.weight)
            .slice(0, 4); // Maximum 4 queries per update
    }

    /**
     * Retrieve memories from memory service for multiple queries
     */
    async retrieveRelevantMemories(queries, memoryServiceConfig) {
        const allMemories = [];
        
        // Import the query function from topic-change hook
        const { queryMemoryService } = require('../core/topic-change');

        for (const queryObj of queries) {
            try {
                const memories = await this.queryMemoryService(
                    memoryServiceConfig.endpoint,
                    memoryServiceConfig.apiKey,
                    queryObj.query,
                    {
                        limit: queryObj.limit,
                        excludeHashes: Array.from(this.loadedMemoryHashes)
                    }
                );

                // Add query context to memories
                memories.forEach(memory => {
                    memory.queryContext = queryObj;
                });

                allMemories.push(...memories);

            } catch (error) {
                console.error(`[Dynamic Context] Failed to query memories for "${queryObj.query}":`, error.message);
            }
        }

        return allMemories;
    }

    /**
     * Simplified memory service query (extracted from topic-change.js pattern)
     */
    async queryMemoryService(endpoint, apiKey, query, options = {}) {
        const https = require('https');
        
        return new Promise((resolve, reject) => {
            const { limit = 3, excludeHashes = [] } = options;

            const postData = JSON.stringify({
                jsonrpc: '2.0',
                id: Date.now(),
                method: 'tools/call',
                params: {
                    name: 'retrieve_memory',
                    arguments: { query: query, limit: limit }
                }
            });

            const url = new URL('/mcp', endpoint);
            const requestOptions = {
                hostname: url.hostname,
                port: url.port,
                path: url.pathname,
                method: 'POST',
                headers: {
                    'Content-Type': 'application/json',
                    'Authorization': `Bearer ${apiKey}`,
                    'Content-Length': Buffer.byteLength(postData)
                },
                rejectUnauthorized: false,
                timeout: 5000
            };

            const req = https.request(requestOptions, (res) => {
                let data = '';
                res.on('data', (chunk) => { data += chunk; });
                res.on('end', () => {
                    try {
                        const response = JSON.parse(data);
                        if (response.error) {
                            console.error('[Dynamic Context] Memory service error:', response.error);
                            resolve([]);
                            return;
                        }

                        const memories = this.parseMemoryResults(response.result);
                        const filteredMemories = memories.filter(memory => 
                            !excludeHashes.includes(memory.content_hash)
                        );
                        
                        resolve(filteredMemories);
                    } catch (parseError) {
                        console.error('[Dynamic Context] Failed to parse memory response:', parseError.message);
                        resolve([]);
                    }
                });
            });

            req.on('error', (error) => {
                console.error('[Dynamic Context] Memory service request failed:', error.message);
                resolve([]);
            });

            req.on('timeout', () => {
                req.destroy();
                resolve([]);
            });

            req.write(postData);
            req.end();
        });
    }

    /**
     * Parse memory results from MCP response
     */
    parseMemoryResults(result) {
        try {
            if (result && result.content && result.content[0] && result.content[0].text) {
                const text = result.content[0].text;
                const resultsMatch = text.match(/'results':\s*(\[[\s\S]*?\])/);
                if (resultsMatch) {
                    return eval(resultsMatch[1]) || [];
                }
            }
            return [];
        } catch (error) {
            console.error('[Dynamic Context] Error parsing memory results:', error.message);
            return [];
        }
    }

    /**
     * Score memories with enhanced conversation context
     */
    scoreMemoriesWithContext(memories, conversationAnalysis) {
        return scoreMemoryRelevance(memories, this.sessionContext.projectContext || {}, {
            includeConversationContext: true,
            conversationAnalysis: conversationAnalysis,
            weights: {
                timeDecay: 0.2,
                tagRelevance: 0.3,
                contentRelevance: 0.15,
                conversationRelevance: 0.35  // High weight for conversation context
            }
        });
    }

    /**
     * Format the context update message
     */
    formatContextUpdate(memories, analysis, changes, crossSessionContext) {
        let updateMessage = '\n🧠 **Dynamic Context Update**\n\n';

        // Explain the trigger
        if (changes.newTopics.length > 0) {
            updateMessage += `**New topics detected**: ${changes.newTopics.map(t => t.name).join(', ')}\n`;
        }
        if (changes.changedIntents && analysis.intent) {
            updateMessage += `**Focus shifted to**: ${analysis.intent.name}\n`;
        }
        updateMessage += '\n';

        // Add cross-session context if available
        if (crossSessionContext && crossSessionContext.recentSessions.length > 0) {
            updateMessage += '**Recent session context**:\n';
            crossSessionContext.recentSessions.slice(0, 2).forEach(session => {
                const timeAgo = this.formatTimeAgo(session.endTime);
                updateMessage += `• ${session.outcome?.type || 'Session'} completed ${timeAgo}\n`;
            });
            updateMessage += '\n';
        }

        // Add relevant memories
        updateMessage += '**Relevant context**:\n';
        memories.slice(0, 3).forEach((memory, index) => {
            const content = memory.content.length > 100 ? 
                memory.content.substring(0, 100) + '...' : 
                memory.content;
            
            const relevanceIndicator = memory.relevanceScore > 0.7 ? '🔥' : 
                                     memory.relevanceScore > 0.5 ? '⭐' : '💡';
            
            updateMessage += `${relevanceIndicator} ${content}\n`;
            
            if (memory.tags && memory.tags.length > 0) {
                updateMessage += `   *${memory.tags.slice(0, 3).join(', ')}*\n`;
            }
            updateMessage += '\n';
        });

        updateMessage += '---\n';
        return updateMessage;
    }

    /**
     * Format time ago for human readability
     */
    formatTimeAgo(timestamp) {
        const now = new Date();
        const time = new Date(timestamp);
        const diffMs = now - time;
        const diffMins = Math.floor(diffMs / 60000);
        const diffHours = Math.floor(diffMs / 3600000);
        const diffDays = Math.floor(diffMs / 86400000);

        if (diffMins < 60) return `${diffMins} minutes ago`;
        if (diffHours < 24) return `${diffHours} hours ago`;
        if (diffDays < 7) return `${diffDays} days ago`;
        return time.toLocaleDateString();
    }

    /**
     * Get statistics about dynamic context updates
     */
    getStats() {
        return {
            updateCount: this.updateCount,
            loadedMemoriesCount: this.loadedMemoryHashes.size,
            lastUpdateTime: this.lastUpdateTime,
            hasSessionTracker: !!this.sessionTracker,
            isInitialized: !!this.sessionContext
        };
    }

    /**
     * Reset the updater state for a new conversation
     */
    reset() {
        console.log('[Dynamic Context] Resetting dynamic context updater');
        
        this.lastUpdateTime = 0;
        this.updateCount = 0;
        this.conversationBuffer = '';
        this.lastAnalysis = null;
        this.loadedMemoryHashes.clear();
        
        if (this.debounceTimer) {
            clearTimeout(this.debounceTimer);
            this.debounceTimer = null;
        }
    }
}

module.exports = {
    DynamicContextUpdater
};
```

--------------------------------------------------------------------------------
/src/mcp_memory_service/storage/http_client.py:
--------------------------------------------------------------------------------

```python
# Copyright 2024 Heinrich Krupp
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

"""
HTTP client storage adapter for MCP Memory Service.
Implements the MemoryStorage interface by forwarding requests to a remote HTTP server.
"""

import aiohttp
import asyncio
import json
import logging
from typing import List, Dict, Any, Tuple, Optional
from datetime import datetime, timezone

from .base import MemoryStorage
from ..models.memory import Memory, MemoryQueryResult
from ..config import HTTP_HOST, HTTP_PORT

logger = logging.getLogger(__name__)


class HTTPClientStorage(MemoryStorage):
    """
    HTTP client storage implementation.
    
    This adapter forwards all storage operations to a remote MCP Memory Service
    HTTP server, enabling multiple clients to coordinate through a shared server.
    """
    
    def __init__(self, base_url: Optional[str] = None, timeout: float = 30.0):
        """
        Initialize HTTP client storage.
        
        Args:
            base_url: Base URL of the MCP Memory Service HTTP server
            timeout: Request timeout in seconds
        """
        if base_url:
            self.base_url = base_url.rstrip('/')
        else:
            # Use default from config
            host = HTTP_HOST if HTTP_HOST != '0.0.0.0' else 'localhost'
            self.base_url = f"http://{host}:{HTTP_PORT}"
        
        self.timeout = aiohttp.ClientTimeout(total=timeout)
        self.session = None
        self._initialized = False
        
        logger.info(f"Initialized HTTP client storage for: {self.base_url}")

    def _handle_http_error(self, e: Exception, operation: str, return_empty_list: bool = False):
        """Centralized HTTP error handling with context-specific logging."""
        if isinstance(e, aiohttp.ClientError):
            error_msg = f"HTTP client connection error during {operation}: {str(e)}"
        elif isinstance(e, aiohttp.ServerTimeoutError):
            error_msg = f"HTTP server timeout during {operation}: {str(e)}"
        elif isinstance(e, asyncio.TimeoutError):
            error_msg = f"{operation.capitalize()} operation timeout: {str(e)}"
        elif isinstance(e, json.JSONDecodeError):
            error_msg = f"Invalid JSON response during {operation}: {str(e)}"
        else:
            error_msg = f"Unexpected {operation} error: {type(e).__name__}: {str(e)}"

        logger.error(error_msg)

        if return_empty_list:
            return []
        else:
            return False, error_msg

    async def initialize(self):
        """Initialize the HTTP client session."""
        try:
            self.session = aiohttp.ClientSession(timeout=self.timeout)
            
            # Test connection to the server
            health_url = f"{self.base_url}/health"
            async with self.session.get(health_url) as response:
                if response.status == 200:
                    health_data = await response.json()
                    logger.info(f"Connected to MCP Memory Service: {health_data.get('service', 'unknown')} v{health_data.get('version', 'unknown')}")
                    self._initialized = True
                else:
                    raise RuntimeError(f"Health check failed: HTTP {response.status}")
        except Exception as e:
            if isinstance(e, aiohttp.ClientError):
                error_msg = f"HTTP client connection error during initialization: {str(e)}"
            elif isinstance(e, aiohttp.ServerTimeoutError):
                error_msg = f"HTTP server timeout during initialization: {str(e)}"
            elif isinstance(e, asyncio.TimeoutError):
                error_msg = f"Initialization timeout: {str(e)}"
            else:
                error_msg = f"Unexpected error during HTTP client initialization: {type(e).__name__}: {str(e)}"

            logger.error(error_msg)
            if self.session:
                await self.session.close()
                self.session = None
            raise RuntimeError(error_msg)
    
    async def store(self, memory: Memory) -> Tuple[bool, str]:
        """Store a memory via HTTP API."""
        if not self._initialized or not self.session:
            return False, "HTTP client not initialized"
        
        try:
            store_url = f"{self.base_url}/api/memories"
            payload = {
                "content": memory.content,
                "tags": memory.tags or [],
                "memory_type": memory.memory_type,
                "metadata": memory.metadata or {}
            }
            
            async with self.session.post(store_url, json=payload) as response:
                if response.status == 201:
                    result = await response.json()
                    logger.info(f"Successfully stored memory via HTTP: {result.get('content_hash')}")
                    return True, f"Memory stored successfully: {result.get('content_hash')}"
                else:
                    error_data = await response.json()
                    error_msg = error_data.get('detail', f'HTTP {response.status}')
                    logger.error(f"Failed to store memory via HTTP: {error_msg}")
                    return False, error_msg
                    
        except Exception as e:
            return self._handle_http_error(e, "store")
    
    async def retrieve(self, query: str, n_results: int = 5) -> List[MemoryQueryResult]:
        """Retrieve memories using semantic search via HTTP API."""
        if not self._initialized or not self.session:
            logger.error("HTTP client not initialized")
            return []
        
        try:
            search_url = f"{self.base_url}/api/search/semantic"
            payload = {
                "query": query,
                "n_results": n_results
            }
            
            async with self.session.post(search_url, json=payload) as response:
                if response.status == 200:
                    data = await response.json()
                    results = []
                    
                    for item in data.get("results", []):
                        memory_data = item.get("memory", {})
                        memory = Memory(
                            content=memory_data.get("content", ""),
                            content_hash=memory_data.get("content_hash", ""),
                            tags=memory_data.get("tags", []),
                            memory_type=memory_data.get("memory_type"),
                            metadata=memory_data.get("metadata", {}),
                            created_at=memory_data.get("created_at"),
                            updated_at=memory_data.get("updated_at"),
                            created_at_iso=memory_data.get("created_at_iso"),
                            updated_at_iso=memory_data.get("updated_at_iso")
                        )
                        
                        result = MemoryQueryResult(
                            memory=memory,
                            relevance_score=item.get("similarity_score"),
                            debug_info={"backend": "http_client", "server": self.base_url}
                        )
                        results.append(result)
                    
                    logger.info(f"Retrieved {len(results)} memories via HTTP for query: {query}")
                    return results
                else:
                    logger.error(f"HTTP retrieve error: {response.status}")
                    return []
                    
        except Exception as e:
            return self._handle_http_error(e, "retrieve", return_empty_list=True)
    
    async def search_by_tag(self, tags: List[str], time_start: Optional[float] = None) -> List[Memory]:
        """Search memories by tags via HTTP API with optional time filtering."""
        return await self._execute_tag_search(
            tags=tags,
            match_all=False,
            time_start=time_start,
            time_end=None
        )

    async def search_by_tags(
        self,
        tags: List[str],
        operation: str = "AND",
        time_start: Optional[float] = None,
        time_end: Optional[float] = None
    ) -> List[Memory]:
        """Search memories by tags with AND/OR semantics via HTTP API."""
        normalized_operation = operation.strip().upper() if isinstance(operation, str) else "AND"
        if normalized_operation not in {"AND", "OR"}:
            logger.warning("Unsupported tag operation %s; defaulting to AND", operation)
            normalized_operation = "AND"

        match_all = normalized_operation == "AND"
        return await self._execute_tag_search(
            tags=tags,
            match_all=match_all,
            time_start=time_start,
            time_end=time_end
        )

    async def _execute_tag_search(
        self,
        tags: List[str],
        match_all: bool,
        time_start: Optional[float],
        time_end: Optional[float]
    ) -> List[Memory]:
        """Internal helper to execute HTTP tag-based searches."""
        if not self._initialized or not self.session:
            logger.error("HTTP client not initialized")
            return []

        try:
            search_url = f"{self.base_url}/api/search/by-tag"
            payload: Dict[str, Any] = {
                "tags": tags,
                "match_all": match_all
            }

            time_filter = self._build_time_filter(time_start, time_end)
            if time_filter:
                payload["time_filter"] = time_filter

            async with self.session.post(search_url, json=payload) as response:
                if response.status == 200:
                    data = await response.json()
                    results: List[Memory] = []

                    for result_item in data.get("results", []):
                        memory_data = result_item.get("memory", {})
                        memory = Memory(
                            content=memory_data.get("content", ""),
                            content_hash=memory_data.get("content_hash", ""),
                            tags=memory_data.get("tags", []),
                            memory_type=memory_data.get("memory_type"),
                            metadata=memory_data.get("metadata", {}),
                            created_at=memory_data.get("created_at"),
                            updated_at=memory_data.get("updated_at"),
                            created_at_iso=memory_data.get("created_at_iso"),
                            updated_at_iso=memory_data.get("updated_at_iso")
                        )
                        results.append(memory)

                    logger.info(
                        "Found %d memories via HTTP with tags %s (match_all=%s)",
                        len(results),
                        tags,
                        match_all
                    )
                    return results

                logger.error(f"HTTP tag search error: {response.status}")
                return []

        except Exception as e:
            return self._handle_http_error(e, "tag search", return_empty_list=True)

    @staticmethod
    def _build_time_filter(time_start: Optional[float], time_end: Optional[float]) -> Optional[str]:
        """Convert timestamps to the natural language format expected by the HTTP API."""
        if time_start is None and time_end is None:
            return None

        def _to_date(ts: float) -> str:
            return datetime.fromtimestamp(ts, tz=timezone.utc).date().isoformat()

        if time_start is not None and time_end is not None:
            return f"between {_to_date(time_start)} and {_to_date(time_end)}"
        if time_start is not None:
            return _to_date(time_start)
        return _to_date(time_end)
    
    async def delete(self, content_hash: str) -> Tuple[bool, str]:
        """Delete a memory by content hash via HTTP API."""
        if not self._initialized or not self.session:
            return False, "HTTP client not initialized"
        
        try:
            delete_url = f"{self.base_url}/api/memories/{content_hash}"
            
            async with self.session.delete(delete_url) as response:
                if response.status == 200:
                    result = await response.json()
                    logger.info(f"Successfully deleted memory via HTTP: {content_hash}")
                    return True, result.get("message", "Memory deleted successfully")
                elif response.status == 404:
                    return False, f"Memory with hash {content_hash} not found"
                else:
                    error_data = await response.json()
                    error_msg = error_data.get('detail', f'HTTP {response.status}')
                    logger.error(f"Failed to delete memory via HTTP: {error_msg}")
                    return False, error_msg
                    
        except Exception as e:
            return self._handle_http_error(e, "delete")
    
    async def delete_by_tag(self, tag: str) -> Tuple[int, str]:
        """Delete memories by tag (not implemented via HTTP - would be dangerous)."""
        logger.warning("Bulk delete by tag not supported via HTTP client for safety")
        return 0, "Bulk delete by tag not supported via HTTP client for safety reasons"
    
    async def cleanup_duplicates(self) -> Tuple[int, str]:
        """Cleanup duplicates (not implemented via HTTP - server-side operation)."""
        logger.warning("Cleanup duplicates not supported via HTTP client")
        return 0, "Cleanup duplicates should be performed on the server side"
    
    async def update_memory_metadata(self, content_hash: str, updates: Dict[str, Any], preserve_timestamps: bool = True) -> Tuple[bool, str]:
        """Update memory metadata (not implemented - would need PUT endpoint)."""
        logger.warning("Update memory metadata not supported via HTTP client yet")
        return False, "Update memory metadata not supported via HTTP client yet"
    
    async def recall(self, query: Optional[str] = None, n_results: int = 5, start_timestamp: Optional[float] = None, end_timestamp: Optional[float] = None) -> List[MemoryQueryResult]:
        """
        Retrieve memories with time filtering and optional semantic search via HTTP API.
        """
        if not self._initialized or not self.session:
            logger.error("HTTP client not initialized")
            return []
        
        try:
            recall_url = f"{self.base_url}/api/search/time"
            payload = {
                "query": query or f"memories from {datetime.fromtimestamp(start_timestamp).isoformat() if start_timestamp else 'beginning'} to {datetime.fromtimestamp(end_timestamp).isoformat() if end_timestamp else 'now'}",
                "n_results": n_results
            }
            
            async with self.session.post(recall_url, json=payload) as response:
                if response.status == 200:
                    data = await response.json()
                    results = []
                    
                    for item in data.get("results", []):
                        memory_data = item.get("memory", {})
                        memory = Memory(
                            content=memory_data.get("content", ""),
                            content_hash=memory_data.get("content_hash", ""),
                            tags=memory_data.get("tags", []),
                            memory_type=memory_data.get("memory_type"),
                            metadata=memory_data.get("metadata", {}),
                            created_at=memory_data.get("created_at"),
                            updated_at=memory_data.get("updated_at"),
                            created_at_iso=memory_data.get("created_at_iso"),
                            updated_at_iso=memory_data.get("updated_at_iso")
                        )
                        
                        result = MemoryQueryResult(
                            memory=memory,
                            relevance_score=item.get("similarity_score"),
                            debug_info={"backend": "http_client", "server": self.base_url, "time_filtered": True}
                        )
                        results.append(result)
                    
                    logger.info(f"Retrieved {len(results)} memories via HTTP recall")
                    return results
                else:
                    logger.error(f"HTTP recall error: {response.status}")
                    return []
                    
        except Exception as e:
            return self._handle_http_error(e, "recall", return_empty_list=True)
    
    def get_stats(self) -> Dict[str, Any]:
        """Get storage statistics (placeholder - could call stats endpoint)."""
        return {
            "backend": "http_client",
            "server": self.base_url,
            "initialized": self._initialized,
            "note": "Statistics from remote server not implemented yet"
        }
    
    async def close(self):
        """Close the HTTP client session."""
        if self.session:
            await self.session.close()
            self.session = None
            self._initialized = False
            logger.info("HTTP client storage connection closed")

    async def update_memory(self, memory: Memory) -> bool:
        """Update an existing memory (not implemented via HTTP client yet)."""
        logger.warning("Update memory not supported via HTTP client yet")
        return False

```

--------------------------------------------------------------------------------
/docs/development/refactoring-notes.md:
--------------------------------------------------------------------------------

```markdown
# Memory Service Refactoring Summary

## 2025-02-XX Duplication Review

- **Memory response serialization** – `src/mcp_memory_service/web/api/memories.py:86` re-implements the same field mapping already provided by `src/mcp_memory_service/services/memory_service.py:83`. We can convert HTTP responses by delegating to `MemoryService.format_memory_response` and avoid keeping two copies of the field list in sync.
- **Search helpers drift** – `src/mcp_memory_service/web/api/search.py:75` and `src/mcp_memory_service/web/api/search.py:84` duplicate logic that now lives inside `MemoryService.retrieve_memory` and `MemoryService.search_by_tag`. The module still defines legacy helpers (`parse_time_query`, `is_within_time_range`) at `src/mcp_memory_service/web/api/search.py:365` that mirror `src/mcp_memory_service/services/memory_service.py:502` and `src/mcp_memory_service/services/memory_service.py:535`; they appear unused and should either call through to the service or be removed.
- **MCP tool vs HTTP MCP API** – Each tool is implemented twice (FastMCP server in `src/mcp_memory_service/mcp_server.py` and HTTP bridge in `src/mcp_memory_service/web/api/mcp.py`), with near-identical request handling and result shaping. Examples: `store_memory` (`mcp_server.py:154` vs `web/api/mcp.py:247`), `retrieve_memory` (`mcp_server.py:204` vs `web/api/mcp.py:282`), `search_by_tag` (`mcp_server.py:262` vs `web/api/mcp.py:313`), `delete_memory` (`mcp_server.py:330` vs `web/api/mcp.py:384`), `check_database_health` (`mcp_server.py:367` vs `web/api/mcp.py:398`), `list_memories` (`mcp_server.py:394` vs `web/api/mcp.py:407`), `search_by_time` (`mcp_server.py:440` vs `web/api/mcp.py:427`), and `search_similar` (`mcp_server.py:502` vs `web/api/mcp.py:463`). Consolidating these into shared helpers would keep the tool surface synchronized and reduce error-prone duplication.

## Problem Identified

The original implementation had **duplicated and inconsistent logic** between the API and MCP tool implementations for `list_memories`:

### Critical Issues Found:

1. **Different Pagination Logic:**
   - **API**: Correctly filters first, then paginates
   - **MCP Tool**: Paginates first, then filters (loses data!)

2. **Inconsistent Tag Filtering:**
   - **API**: Uses `storage.search_by_tag([tag])` for proper tag-based queries
   - **MCP Tool**: Uses in-memory filtering after pagination

3. **Wrong Total Counts:**
   - **API**: Provides accurate `total` and `has_more` for pagination
   - **MCP Tool**: Returns incorrect `total_found` count

4. **Code Duplication:**
   - Same business logic implemented in 3 different places
   - Maintenance nightmare and inconsistency risk

## Solution Implemented

### 1. Created Shared Service Layer

**File**: `src/mcp_memory_service/services/memory_service.py`

- **Single source of truth** for memory listing logic
- Consistent pagination and filtering across all interfaces
- Proper error handling and logging
- Separate formatting methods for different response types

### 2. Refactored All Implementations

**Updated Files:**
- `src/mcp_memory_service/web/api/memories.py` - API endpoint
- `src/mcp_memory_service/mcp_server.py` - MCP tool
- `src/mcp_memory_service/web/api/mcp.py` - MCP API endpoint

**All now use**: `MemoryService.list_memories()` for consistent behavior

## Benefits Achieved

### ✅ **Consistency**
- All interfaces now use identical business logic
- No more data loss or incorrect pagination
- Consistent error handling

### ✅ **Maintainability**
- Single place to update memory listing logic
- Reduced code duplication by ~80%
- Easier to add new features or fix bugs

### ✅ **Reliability**
- Proper pagination with accurate counts
- Correct tag and memory_type filtering
- Better error handling and logging

### ✅ **Testability**
- Service layer can be unit tested independently
- Easier to mock and test different scenarios

## Architecture Pattern

```
┌─────────────────┐    ┌─────────────────┐    ┌─────────────────┐
│   API Endpoint  │    │   MCP Tool      │    │   MCP API       │
│                 │    │                 │    │                 │
└─────────┬───────┘    └─────────┬───────┘    └─────────┬───────┘
          │                      │                      │
          └──────────────────────┼──────────────────────┘
                                 │
                    ┌─────────────▼─────────────┐
                    │    MemoryService          │
                    │  (Shared Business Logic)  │
                    └─────────────┬─────────────┘
                                  │
                    ┌─────────────▼─────────────┐
                    │    MemoryStorage          │
                    │   (Data Access Layer)     │
                    └───────────────────────────┘
```

## Best Practices Applied

1. **Single Responsibility Principle**: Service layer handles only business logic
2. **DRY (Don't Repeat Yourself)**: Eliminated code duplication
3. **Separation of Concerns**: Business logic separated from presentation logic
4. **Consistent Interface**: All consumers use the same service methods
5. **Error Handling**: Centralized error handling and logging

## Future Recommendations

1. **Apply Same Pattern**: Consider refactoring other operations (store, delete, search) to use shared services
2. **Add Validation**: Move input validation to the service layer
3. **Add Caching**: Implement caching at the service layer if needed
4. **Add Metrics**: Add performance metrics and monitoring to the service layer

## Testing Recommendations

1. **Unit Tests**: Test `MemoryService` independently
2. **Integration Tests**: Test each interface (API, MCP) with the service
3. **End-to-End Tests**: Verify consistent behavior across all interfaces

This refactoring ensures that all memory listing operations behave identically regardless of the interface used, eliminating the data loss and inconsistency issues that were present in the original implementation.

## Phase 2: Complete Service Layer Refactoring

### Tools Refactoring Analysis

Based on comprehensive analysis of the codebase, **8 total tools** need to be refactored to use the shared `MemoryService` pattern:

#### ✅ **COMPLETED (1 tool):**
1. **`list_memories`** - ✅ **DONE** - Already uses `MemoryService.list_memories()`

#### 🔄 **PENDING REFACTORING (7 tools):**

##### **Core Memory Operations (4 tools):**
2. **`store_memory`** - **HIGH PRIORITY**
   - **Current Issues**: Duplicated logic in 3 files
   - **Files**: `mcp_server.py` (lines 154-217), `web/api/memories.py` (lines 100-182), `web/api/mcp.py` (lines 247-286)
   - **Service Method Needed**: `MemoryService.store_memory()`

3. **`retrieve_memory`** - **HIGH PRIORITY**
   - **Current Issues**: Duplicated logic in 2 files
   - **Files**: `mcp_server.py` (lines 219-271), `web/api/mcp.py` (lines 288-315)
   - **Service Method Needed**: `MemoryService.retrieve_memory()`

4. **`search_by_tag`** - **HIGH PRIORITY**
   - **Current Issues**: Duplicated logic in 2 files
   - **Files**: `mcp_server.py` (lines 273-326), `web/api/mcp.py` (lines 317-370)
   - **Service Method Needed**: `MemoryService.search_by_tag()`

5. **`delete_memory`** - **HIGH PRIORITY**
   - **Current Issues**: Duplicated logic in 3 files
   - **Files**: `mcp_server.py` (lines 328-360), `web/api/memories.py` (lines 248-276), `web/api/mcp.py` (lines 372-380)
   - **Service Method Needed**: `MemoryService.delete_memory()`

##### **Advanced Search Operations (2 tools):**
6. **`search_by_time`** - **MEDIUM PRIORITY**
   - **Current Issues**: Duplicated logic in 2 files
   - **Files**: `mcp_server.py` (lines 442-516), `web/api/mcp.py` (lines 417-468)
   - **Service Method Needed**: `MemoryService.search_by_time()`

7. **`search_similar`** - **MEDIUM PRIORITY**
   - **Current Issues**: Duplicated logic in 2 files
   - **Files**: `mcp_server.py` (lines 518-584), `web/api/mcp.py` (lines 470-512)
   - **Service Method Needed**: `MemoryService.search_similar()`

##### **Health Check (1 tool):**
8. **`check_database_health`** - **LOW PRIORITY**
   - **Current Issues**: Duplicated logic in 2 files
   - **Files**: `mcp_server.py` (lines 362-394), `web/api/mcp.py` (lines 382-395)
   - **Service Method Needed**: `MemoryService.check_database_health()`

### Refactoring Progress Tracking

| Tool | Priority | Status | Service Method | MCP Server | API Endpoint | MCP API |
|------|----------|--------|----------------|------------|--------------|---------|
| `list_memories` | HIGH | ✅ DONE | ✅ `list_memories()` | ✅ Refactored | ✅ Refactored | ✅ Refactored |
| `store_memory` | HIGH | ✅ DONE | ✅ `store_memory()` | ✅ Refactored | ✅ Refactored | ✅ Refactored |
| `retrieve_memory` | HIGH | ✅ DONE | ✅ `retrieve_memory()` | ✅ Refactored | ✅ Refactored | ✅ Refactored |
| `search_by_tag` | HIGH | ✅ DONE | ✅ `search_by_tag()` | ✅ Refactored | ✅ Refactored | ✅ Refactored |
| `delete_memory` | HIGH | ✅ DONE | ✅ `delete_memory()` | ✅ Refactored | ✅ Refactored | ✅ Refactored |
| `search_by_time` | MEDIUM | ✅ DONE | ✅ `search_by_time()` | ✅ Refactored | ✅ Refactored | ✅ Refactored |
| `search_similar` | MEDIUM | ✅ DONE | ✅ `search_similar()` | ✅ Refactored | ✅ Refactored | ✅ Refactored |
| `check_database_health` | LOW | ✅ DONE | ✅ `check_database_health()` | ✅ Refactored | N/A | ✅ Refactored |

### Implementation Plan

#### **Phase 2A: Core Operations (High Priority)**
1. ✅ **COMPLETED** - Create `MemoryService.store_memory()` method
2. Create `MemoryService.retrieve_memory()` method  
3. Create `MemoryService.search_by_tag()` method
4. Create `MemoryService.delete_memory()` method
5. ✅ **COMPLETED** - Refactor all 3 interfaces to use new service methods

#### **Phase 2A.1: `store_memory` Refactoring - COMPLETED ✅**

**Service Method Created:**
- ✅ `MemoryService.store_memory()` - API-based implementation
- ✅ Hostname priority: Client → HTTP Header → Server
- ✅ Content hash generation with metadata
- ✅ Complete error handling and logging
- ✅ Memory object creation and storage

**Interfaces Refactored:**
- ✅ **MCP Server** - Uses `MemoryService.store_memory()`
- ✅ **API Endpoint** - Uses `MemoryService.store_memory()` with SSE events
- ✅ **MCP API** - Uses `MemoryService.store_memory()`

**Testing Completed:**
- ✅ **Manual Testing** - Both user and AI tested successfully
- ✅ **Sample Data Storage** - Verified with real data
- ✅ **Tag and Metadata Handling** - Confirmed working
- ✅ **Client Hostname Processing** - Verified automatic addition
- ✅ **Content Hash Generation** - Confirmed consistency
- ✅ **Memory Retrieval** - Verified stored memories can be found

**Code Reduction:**
- ✅ **~70% reduction** in duplicated business logic
- ✅ **Single source of truth** for memory storage
- ✅ **Consistent behavior** across all interfaces

#### **Phase 2A.2: `retrieve_memory` Refactoring - COMPLETED ✅**

**Service Method Created:**
- ✅ `MemoryService.retrieve_memory()` - API-based implementation (`/api/search`)
- ✅ Uses exact API logic as source of truth
- ✅ Handles semantic search, similarity filtering, processing time
- ✅ Returns consistent response format with `SearchResult` structure

**Interfaces Refactored:**
- ✅ **API Endpoint** - Refactored to use service method (eliminated duplication)
- ✅ **MCP Server** - Refactored to use service method
- ✅ **MCP API** - Refactored to use service method

**Testing Completed:**
- ✅ **Exact Matches** - Perfect similarity scores (1.0) for identical content
- ✅ **Partial Matches** - Reasonable similarity scores (0.121, 0.118, 0.135)
- ✅ **Similarity Filtering** - Threshold filtering working correctly
- ✅ **Processing Time** - Timing metrics included (~13ms)
- ✅ **Response Format** - Consistent across all interfaces
- ✅ **Manual Testing** - User tested with real queries and thresholds
- ✅ **Production Ready** - All interfaces working correctly in live environment

**Key Features:**
- ✅ **Semantic Search**: Uses vector embeddings for similarity
- ✅ **Similarity Filtering**: Post-processing threshold filtering
- ✅ **Processing Time**: Includes timing metrics
- ✅ **Relevance Reasoning**: Explains why results were included
- ✅ **SSE Events**: Maintains real-time event broadcasting

**Code Reduction:**
- ✅ **~60% reduction** in duplicated search logic
- ✅ **Single source of truth** for memory retrieval
- ✅ **Consistent behavior** across all interfaces

#### **Phase 2A.3: `search_by_tag` Refactoring - COMPLETED ✅**

**Service Method Created:**
- ✅ `MemoryService.search_by_tag()` - API-based implementation (`/api/search/by-tag`)
- ✅ Uses exact API logic as source of truth
- ✅ Handles tag filtering with AND/OR operations (match_all parameter)
- ✅ Returns consistent response format with `SearchResult` structure
- ✅ Processing time metrics and proper error handling

**Interfaces Refactored:**
- ✅ **API Endpoint** - Refactored to use service method (eliminated duplication)
- ✅ **MCP Server** - Refactored to use service method with parameter conversion
- ✅ **MCP API** - Refactored to use service method while preserving string parsing

**Testing Completed:**
- ✅ **Tag Matching** - Both ANY and ALL tag matching modes working correctly
- ✅ **Parameter Conversion** - Proper handling of operation string vs match_all boolean
- ✅ **Response Format** - Consistent SearchResult format across all interfaces
- ✅ **Error Handling** - Validation errors properly handled and converted
- ✅ **Manual Testing** - User tested with real tag queries and confirmed working
- ✅ **Production Ready** - All interfaces working correctly in live environment

**Key Features:**
- ✅ **Tag Search**: Finds memories containing specified tags
- ✅ **AND/OR Operations**: Supports both any tag match and all tags match
- ✅ **Processing Time**: Includes timing metrics for performance monitoring
- ✅ **Relevance Reasoning**: Explains which tags matched for transparency
- ✅ **SSE Events**: Maintains real-time event broadcasting

**Code Reduction:**
- ✅ **~65% reduction** in duplicated tag search logic
- ✅ **Single source of truth** for tag-based memory search
- ✅ **Consistent behavior** across all interfaces

#### **Phase 2A.4: `delete_memory` Refactoring - COMPLETED ✅**

**Service Method Created:**
- ✅ `MemoryService.delete_memory()` - API-based implementation (`/api/memories/{content_hash}`)
- ✅ Uses exact API logic as source of truth
- ✅ Handles content hash validation and storage layer deletion
- ✅ Returns consistent response format with success/message/content_hash
- ✅ Comprehensive error handling and logging

**Interfaces Refactored:**
- ✅ **API Endpoint** - Refactored to use service method (eliminated duplication)
- ✅ **MCP Server** - Refactored to use service method
- ✅ **MCP API** - Refactored to use service method

**Testing Completed:**
- ✅ **Service Method Testing** - Direct testing of MemoryService.delete_memory()
- ✅ **Storage Integration** - Verified memory creation and deletion workflow
- ✅ **Manual Testing** - User tested with real memory hashes and confirmed working
- ✅ **Production Ready** - All interfaces working correctly in live environment

**Key Features:**
- ✅ **Content Hash Validation**: Validates input parameters before processing
- ✅ **Storage Integration**: Uses storage layer delete() method for consistency
- ✅ **Error Handling**: Comprehensive error handling with detailed messages
- ✅ **Response Consistency**: Uniform response format across all interfaces
- ✅ **SSE Events**: Maintains real-time event broadcasting for web dashboard

**Code Reduction:**
- ✅ **~70% reduction** in duplicated deletion logic
- ✅ **Single source of truth** for memory deletion
- ✅ **Consistent behavior** across all interfaces

#### **Phase 2B: Advanced Search (Medium Priority)**
6. Create `MemoryService.search_by_time()` method
7. Create `MemoryService.search_similar()` method
8. Refactor MCP server and MCP API to use new service methods

#### **Phase 2C: Health Check (Low Priority) - COMPLETED ✅**

**Service Method Created:**
- ✅ `MemoryService.check_database_health()` - MCP Server-based implementation
- ✅ Handles both async and sync storage `get_stats()` methods
- ✅ Maps storage backend fields to consistent health check format
- ✅ Includes comprehensive statistics: memories, tags, storage size, embedding info
- ✅ Complete error handling with detailed error responses

**Interfaces Refactored:**
- ✅ **MCP Server** - Uses `MemoryService.check_database_health()`
- ✅ **MCP API** - Uses `MemoryService.check_database_health()`

**Key Features:**
- ✅ **Field Mapping**: Handles variations between storage backends (`unique_tags` → `total_tags`, `database_size_mb` → formatted size)
- ✅ **Async/Sync Compatibility**: Detects and handles both async and sync `get_stats()` methods
- ✅ **Comprehensive Statistics**: Includes embedding model info, storage size, and backend details
- ✅ **Error Handling**: Proper error responses for storage backend failures
- ✅ **Consistent Format**: Unified health check response across all interfaces

**Testing Completed:**
- ✅ **Field Mapping Fix** - Resolved user-reported issues with `total_tags`, `storage_size`, and `timestamp` fields
- ✅ **Storage Backend Integration** - Verified compatibility with SQLite-Vec storage
- ✅ **Manual Testing** - User confirmed health check now returns proper field values
- ✅ **Production Ready** - All interfaces working correctly with enhanced statistics

**Code Reduction:**
- ✅ **~60% reduction** in duplicated health check logic
- ✅ **Single source of truth** for database health monitoring
- ✅ **Consistent behavior** across all interfaces

### Expected Benefits

- **Consistency**: All 8 tools will have identical behavior across all interfaces
- **Maintainability**: Single source of truth for all memory operations
- **Code Reduction**: ~70% reduction in duplicated business logic
- **Reliability**: Centralized error handling and validation
- **Testability**: Service layer can be unit tested independently

### Success Metrics

- ✅ **Zero Code Duplication**: No business logic duplicated across interfaces
- ✅ **100% Consistency**: All tools behave identically regardless of interface
- ✅ **Single Source of Truth**: All operations go through `MemoryService`
- ✅ **Comprehensive Testing**: Service layer fully tested independently

```

--------------------------------------------------------------------------------
/src/mcp_memory_service/consolidation/scheduler.py:
--------------------------------------------------------------------------------

```python
# Copyright 2024 Heinrich Krupp
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

"""APScheduler integration for autonomous consolidation operations."""

import asyncio
import logging
from typing import Dict, Any, Optional, Callable, Awaitable
from datetime import datetime, timedelta

try:
    from apscheduler.schedulers.asyncio import AsyncIOScheduler
    from apscheduler.triggers.cron import CronTrigger
    from apscheduler.triggers.interval import IntervalTrigger
    from apscheduler.jobstores.memory import MemoryJobStore
    from apscheduler.executors.asyncio import AsyncIOExecutor
    from apscheduler.events import EVENT_JOB_EXECUTED, EVENT_JOB_ERROR
    APSCHEDULER_AVAILABLE = True
except ImportError:
    APSCHEDULER_AVAILABLE = False

from .consolidator import DreamInspiredConsolidator
from .base import ConsolidationConfig

class ConsolidationScheduler:
    """
    Scheduler for autonomous consolidation operations.
    
    Integrates with APScheduler to run consolidation operations at specified intervals
    based on time horizons (daily, weekly, monthly, quarterly, yearly).
    """
    
    def __init__(
        self, 
        consolidator: DreamInspiredConsolidator,
        schedule_config: Dict[str, str],
        enabled: bool = True
    ):
        self.consolidator = consolidator
        self.schedule_config = schedule_config
        self.enabled = enabled
        self.logger = logging.getLogger(__name__)
        
        # Job execution tracking
        self.job_history = []
        self.last_execution_times = {}
        self.execution_stats = {
            'total_jobs': 0,
            'successful_jobs': 0,
            'failed_jobs': 0
        }
        
        # Initialize scheduler if APScheduler is available
        if APSCHEDULER_AVAILABLE and enabled:
            self.scheduler = AsyncIOScheduler(
                jobstores={'default': MemoryJobStore()},
                executors={'default': AsyncIOExecutor()},
                job_defaults={
                    'coalesce': True,  # Combine multiple pending executions
                    'max_instances': 1,  # Only one instance of each job at a time
                    'misfire_grace_time': 3600  # 1 hour grace period for missed jobs
                }
            )
            
            # Add event listeners
            self.scheduler.add_listener(self._job_executed_listener, EVENT_JOB_EXECUTED)
            self.scheduler.add_listener(self._job_error_listener, EVENT_JOB_ERROR)
        else:
            self.scheduler = None
            if not APSCHEDULER_AVAILABLE:
                self.logger.warning("APScheduler not available - consolidation scheduling disabled")
            elif not enabled:
                self.logger.info("Consolidation scheduling disabled by configuration")
    
    async def start(self) -> bool:
        """Start the consolidation scheduler."""
        if not self.scheduler:
            return False
        
        try:
            # Add consolidation jobs based on configuration
            self._schedule_consolidation_jobs()
            
            # Start the scheduler
            self.scheduler.start()
            self.logger.info("Consolidation scheduler started successfully")
            
            # Log scheduled jobs
            jobs = self.scheduler.get_jobs()
            for job in jobs:
                self.logger.info(f"Scheduled job: {job.id} - next run: {job.next_run_time}")
            
            return True
            
        except Exception as e:
            self.logger.error(f"Failed to start consolidation scheduler: {e}")
            return False
    
    async def stop(self) -> bool:
        """Stop the consolidation scheduler."""
        if not self.scheduler:
            return True
        
        try:
            self.scheduler.shutdown(wait=True)
            self.logger.info("Consolidation scheduler stopped")
            return True
        except Exception as e:
            self.logger.error(f"Error stopping consolidation scheduler: {e}")
            return False
    
    def _schedule_consolidation_jobs(self):
        """Schedule consolidation jobs based on configuration."""
        time_horizons = ['daily', 'weekly', 'monthly', 'quarterly', 'yearly']
        
        for horizon in time_horizons:
            schedule_spec = self.schedule_config.get(horizon, 'disabled')
            
            if schedule_spec == 'disabled':
                self.logger.debug(f"Consolidation for {horizon} horizon is disabled")
                continue
            
            try:
                trigger = self._create_trigger(horizon, schedule_spec)
                if trigger:
                    job_id = f"consolidation_{horizon}"
                    self.scheduler.add_job(
                        func=self._run_consolidation_job,
                        trigger=trigger,
                        args=[horizon],
                        id=job_id,
                        name=f"Consolidation - {horizon.title()}",
                        replace_existing=True
                    )
                    self.logger.info(f"Scheduled {horizon} consolidation: {schedule_spec}")
                
            except Exception as e:
                self.logger.error(f"Error scheduling {horizon} consolidation: {e}")
    
    def _create_trigger(self, horizon: str, schedule_spec: str):
        """Create APScheduler trigger from schedule specification."""
        try:
            if horizon == 'daily':
                # Daily format: "HH:MM" (e.g., "02:00")
                hour, minute = map(int, schedule_spec.split(':'))
                return CronTrigger(hour=hour, minute=minute)
            
            elif horizon == 'weekly':
                # Weekly format: "DAY HH:MM" (e.g., "SUN 03:00")
                day_time = schedule_spec.split(' ')
                if len(day_time) != 2:
                    raise ValueError(f"Invalid weekly schedule format: {schedule_spec}")
                
                day_map = {
                    'MON': 0, 'TUE': 1, 'WED': 2, 'THU': 3, 
                    'FRI': 4, 'SAT': 5, 'SUN': 6
                }
                
                day = day_map.get(day_time[0].upper())
                if day is None:
                    raise ValueError(f"Invalid day: {day_time[0]}")
                
                hour, minute = map(int, day_time[1].split(':'))
                return CronTrigger(day_of_week=day, hour=hour, minute=minute)
            
            elif horizon == 'monthly':
                # Monthly format: "DD HH:MM" (e.g., "01 04:00")
                day_time = schedule_spec.split(' ')
                if len(day_time) != 2:
                    raise ValueError(f"Invalid monthly schedule format: {schedule_spec}")
                
                day = int(day_time[0])
                hour, minute = map(int, day_time[1].split(':'))
                return CronTrigger(day=day, hour=hour, minute=minute)
            
            elif horizon == 'quarterly':
                # Quarterly format: "MM-DD HH:MM" (e.g., "01-01 05:00")
                # Run on the first day of quarters (Jan, Apr, Jul, Oct)
                parts = schedule_spec.split(' ')
                if len(parts) != 2:
                    raise ValueError(f"Invalid quarterly schedule format: {schedule_spec}")
                
                month_day = parts[0].split('-')
                if len(month_day) != 2:
                    raise ValueError(f"Invalid quarterly date format: {parts[0]}")
                
                day = int(month_day[1])
                hour, minute = map(int, parts[1].split(':'))
                
                # Quarters: Jan(1), Apr(4), Jul(7), Oct(10)
                return CronTrigger(month='1,4,7,10', day=day, hour=hour, minute=minute)
            
            elif horizon == 'yearly':
                # Yearly format: "MM-DD HH:MM" (e.g., "01-01 06:00")
                parts = schedule_spec.split(' ')
                if len(parts) != 2:
                    raise ValueError(f"Invalid yearly schedule format: {schedule_spec}")
                
                month_day = parts[0].split('-')
                if len(month_day) != 2:
                    raise ValueError(f"Invalid yearly date format: {parts[0]}")
                
                month = int(month_day[0])
                day = int(month_day[1])
                hour, minute = map(int, parts[1].split(':'))
                
                return CronTrigger(month=month, day=day, hour=hour, minute=minute)
            
            else:
                self.logger.error(f"Unknown time horizon: {horizon}")
                return None
                
        except Exception as e:
            self.logger.error(f"Error creating trigger for {horizon} with spec '{schedule_spec}': {e}")
            return None
    
    async def _run_consolidation_job(self, time_horizon: str):
        """Execute a consolidation job for the specified time horizon."""
        job_start_time = datetime.now()
        self.logger.info(f"Starting scheduled {time_horizon} consolidation")
        
        try:
            # Run the consolidation
            report = await self.consolidator.consolidate(time_horizon)
            
            # Record successful execution
            self.execution_stats['successful_jobs'] += 1
            self.last_execution_times[time_horizon] = job_start_time
            
            # Add to job history
            job_record = {
                'time_horizon': time_horizon,
                'start_time': job_start_time,
                'end_time': datetime.now(),
                'status': 'success',
                'memories_processed': report.memories_processed,
                'associations_discovered': report.associations_discovered,
                'clusters_created': report.clusters_created,
                'memories_compressed': report.memories_compressed,
                'memories_archived': report.memories_archived,
                'errors': report.errors
            }
            
            self._add_job_to_history(job_record)
            
            # Log success
            duration = (job_record['end_time'] - job_record['start_time']).total_seconds()
            self.logger.info(
                f"Completed {time_horizon} consolidation successfully in {duration:.2f}s: "
                f"{report.memories_processed} memories processed, "
                f"{report.associations_discovered} associations, "
                f"{report.clusters_created} clusters, "
                f"{report.memories_compressed} compressed, "
                f"{report.memories_archived} archived"
            )
            
        except Exception as e:
            # Record failed execution
            self.execution_stats['failed_jobs'] += 1
            
            job_record = {
                'time_horizon': time_horizon,
                'start_time': job_start_time,
                'end_time': datetime.now(),
                'status': 'failed',
                'error': str(e),
                'memories_processed': 0,
                'associations_discovered': 0,
                'clusters_created': 0,
                'memories_compressed': 0,
                'memories_archived': 0,
                'errors': [str(e)]
            }
            
            self._add_job_to_history(job_record)
            
            self.logger.error(f"Failed {time_horizon} consolidation: {e}")
            raise
    
    def _add_job_to_history(self, job_record: Dict[str, Any]):
        """Add job record to history with size limit."""
        self.job_history.append(job_record)
        
        # Keep only last 100 job records
        if len(self.job_history) > 100:
            self.job_history = self.job_history[-100:]
    
    def _job_executed_listener(self, event):
        """Handle job execution events."""
        self.execution_stats['total_jobs'] += 1
        self.logger.debug(f"Job executed: {event.job_id}")
    
    def _job_error_listener(self, event):
        """Handle job error events."""
        self.logger.error(f"Job error: {event.job_id} - {event.exception}")
    
    async def trigger_consolidation(self, time_horizon: str, immediate: bool = True) -> bool:
        """Manually trigger a consolidation job."""
        if not self.scheduler:
            self.logger.error("Scheduler not available")
            return False
        
        try:
            if immediate:
                # Run immediately
                await self._run_consolidation_job(time_horizon)
                return True
            else:
                # Schedule to run in 1 minute
                job_id = f"manual_consolidation_{time_horizon}_{datetime.now().strftime('%Y%m%d_%H%M%S')}"
                trigger = IntervalTrigger(seconds=60)  # Run once after 60 seconds
                
                self.scheduler.add_job(
                    func=self._run_consolidation_job,
                    trigger=trigger,
                    args=[time_horizon],
                    id=job_id,
                    name=f"Manual Consolidation - {time_horizon.title()}",
                    max_instances=1
                )
                
                self.logger.info(f"Scheduled manual {time_horizon} consolidation")
                return True
                
        except Exception as e:
            self.logger.error(f"Error triggering {time_horizon} consolidation: {e}")
            return False
    
    async def get_scheduler_status(self) -> Dict[str, Any]:
        """Get scheduler status and job information."""
        if not self.scheduler:
            return {
                'enabled': False,
                'reason': 'APScheduler not available or disabled'
            }
        
        jobs = self.scheduler.get_jobs()
        job_info = []
        
        for job in jobs:
            job_info.append({
                'id': job.id,
                'name': job.name,
                'next_run_time': job.next_run_time.isoformat() if job.next_run_time else None,
                'trigger': str(job.trigger)
            })
        
        return {
            'enabled': True,
            'running': self.scheduler.running,
            'jobs': job_info,
            'execution_stats': self.execution_stats.copy(),
            'last_execution_times': {
                horizon: time.isoformat() for horizon, time in self.last_execution_times.items()
            },
            'recent_jobs': self.job_history[-10:]  # Last 10 jobs
        }
    
    async def update_schedule(self, new_schedule_config: Dict[str, str]) -> bool:
        """Update the consolidation schedule."""
        if not self.scheduler:
            return False
        
        try:
            # Remove existing consolidation jobs
            job_ids = [f"consolidation_{horizon}" for horizon in ['daily', 'weekly', 'monthly', 'quarterly', 'yearly']]
            
            for job_id in job_ids:
                if self.scheduler.get_job(job_id):
                    self.scheduler.remove_job(job_id)
            
            # Update configuration
            self.schedule_config = new_schedule_config
            
            # Re-schedule jobs
            self._schedule_consolidation_jobs()
            
            self.logger.info("Consolidation schedule updated successfully")
            return True
            
        except Exception as e:
            self.logger.error(f"Error updating consolidation schedule: {e}")
            return False
    
    async def pause_consolidation(self, time_horizon: Optional[str] = None) -> bool:
        """Pause consolidation jobs (all or specific horizon)."""
        if not self.scheduler:
            return False
        
        try:
            if time_horizon:
                job_id = f"consolidation_{time_horizon}"
                job = self.scheduler.get_job(job_id)
                if job:
                    self.scheduler.pause_job(job_id)
                    self.logger.info(f"Paused {time_horizon} consolidation")
                else:
                    self.logger.warning(f"No job found for {time_horizon} consolidation")
            else:
                # Pause all consolidation jobs
                jobs = self.scheduler.get_jobs()
                for job in jobs:
                    if job.id.startswith('consolidation_'):
                        self.scheduler.pause_job(job.id)
                
                self.logger.info("Paused all consolidation jobs")
            
            return True
            
        except Exception as e:
            self.logger.error(f"Error pausing consolidation: {e}")
            return False
    
    async def resume_consolidation(self, time_horizon: Optional[str] = None) -> bool:
        """Resume consolidation jobs (all or specific horizon)."""
        if not self.scheduler:
            return False
        
        try:
            if time_horizon:
                job_id = f"consolidation_{time_horizon}"
                job = self.scheduler.get_job(job_id)
                if job:
                    self.scheduler.resume_job(job_id)
                    self.logger.info(f"Resumed {time_horizon} consolidation")
                else:
                    self.logger.warning(f"No job found for {time_horizon} consolidation")
            else:
                # Resume all consolidation jobs
                jobs = self.scheduler.get_jobs()
                for job in jobs:
                    if job.id.startswith('consolidation_'):
                        self.scheduler.resume_job(job.id)
                
                self.logger.info("Resumed all consolidation jobs")
            
            return True
            
        except Exception as e:
            self.logger.error(f"Error resuming consolidation: {e}")
            return False
```

--------------------------------------------------------------------------------
/tests/consolidation/test_consolidator.py:
--------------------------------------------------------------------------------

```python
"""Integration tests for the main dream-inspired consolidator."""

import pytest
from datetime import datetime, timedelta
from unittest.mock import AsyncMock, MagicMock

from mcp_memory_service.consolidation.consolidator import DreamInspiredConsolidator
from mcp_memory_service.consolidation.base import ConsolidationReport
from mcp_memory_service.models.memory import Memory


@pytest.mark.integration
class TestDreamInspiredConsolidator:
    """Test the main consolidation orchestrator."""
    
    @pytest.fixture
    def consolidator(self, mock_storage, consolidation_config):
        return DreamInspiredConsolidator(mock_storage, consolidation_config)
    
    @pytest.mark.asyncio
    async def test_basic_consolidation_pipeline(self, consolidator, mock_storage):
        """Test the complete consolidation pipeline."""
        report = await consolidator.consolidate("weekly")
        
        assert isinstance(report, ConsolidationReport)
        assert report.time_horizon == "weekly"
        assert isinstance(report.start_time, datetime)
        assert isinstance(report.end_time, datetime)
        assert report.end_time >= report.start_time
        assert report.memories_processed >= 0
        assert report.associations_discovered >= 0
        assert report.clusters_created >= 0
        assert report.memories_compressed >= 0
        assert report.memories_archived >= 0
        assert isinstance(report.errors, list)
        assert isinstance(report.performance_metrics, dict)
    
    @pytest.mark.asyncio
    async def test_daily_consolidation(self, consolidator):
        """Test daily consolidation (light processing)."""
        report = await consolidator.consolidate("daily")
        
        assert report.time_horizon == "daily"
        # Daily consolidation should be lighter - less intensive operations
        assert isinstance(report, ConsolidationReport)
    
    @pytest.mark.asyncio
    async def test_weekly_consolidation(self, consolidator):
        """Test weekly consolidation (includes associations)."""
        report = await consolidator.consolidate("weekly")
        
        assert report.time_horizon == "weekly"
        # Weekly should include association discovery
        assert isinstance(report, ConsolidationReport)
    
    @pytest.mark.asyncio
    async def test_monthly_consolidation(self, consolidator):
        """Test monthly consolidation (includes forgetting)."""
        report = await consolidator.consolidate("monthly")
        
        assert report.time_horizon == "monthly"
        # Monthly should include more comprehensive processing
        assert isinstance(report, ConsolidationReport)
    
    @pytest.mark.asyncio
    async def test_quarterly_consolidation(self, consolidator):
        """Test quarterly consolidation (deep processing)."""
        report = await consolidator.consolidate("quarterly")
        
        assert report.time_horizon == "quarterly"
        # Quarterly should include all processing steps
        assert isinstance(report, ConsolidationReport)
    
    @pytest.mark.asyncio
    async def test_yearly_consolidation(self, consolidator):
        """Test yearly consolidation (full processing)."""
        report = await consolidator.consolidate("yearly")
        
        assert report.time_horizon == "yearly"
        # Yearly should include comprehensive forgetting
        assert isinstance(report, ConsolidationReport)
    
    @pytest.mark.asyncio
    async def test_invalid_time_horizon(self, consolidator):
        """Test handling of invalid time horizon."""
        from mcp_memory_service.consolidation.base import ConsolidationError
        with pytest.raises(ConsolidationError):
            await consolidator.consolidate("invalid_horizon")
    
    @pytest.mark.asyncio
    async def test_empty_memory_set(self, consolidation_config):
        """Test consolidation with empty memory set."""
        # Create storage with no memories
        empty_storage = AsyncMock()
        empty_storage.get_all_memories.return_value = []
        empty_storage.get_memories_by_time_range.return_value = []
        empty_storage.get_memory_connections.return_value = {}
        empty_storage.get_access_patterns.return_value = {}
        
        consolidator = DreamInspiredConsolidator(empty_storage, consolidation_config)
        
        report = await consolidator.consolidate("weekly")
        
        assert report.memories_processed == 0
        assert report.associations_discovered == 0
        assert report.clusters_created == 0
        assert report.memories_compressed == 0
        assert report.memories_archived == 0
    
    @pytest.mark.asyncio
    async def test_memories_by_time_range_retrieval(self, consolidator, mock_storage):
        """Test retrieval of memories by time range for daily processing."""
        # Mock the time range method to return specific memories
        recent_memories = mock_storage.memories.copy()
        mock_storage.get_memories_by_time_range = AsyncMock(return_value=list(recent_memories.values())[:3])
        
        report = await consolidator.consolidate("daily")
        
        # Should have called the time range method for daily processing
        mock_storage.get_memories_by_time_range.assert_called_once()
        assert report.memories_processed >= 0
    
    @pytest.mark.asyncio
    async def test_association_storage(self, consolidator, mock_storage):
        """Test that discovered associations are stored as memories."""
        original_memory_count = len(mock_storage.memories)
        
        # Run consolidation that should discover associations
        report = await consolidator.consolidate("weekly")
        
        # Check if new association memories were added
        current_memory_count = len(mock_storage.memories)
        
        # May or may not find associations depending on similarity
        # Just ensure no errors occurred
        assert isinstance(report, ConsolidationReport)
        assert current_memory_count >= original_memory_count
    
    @pytest.mark.asyncio
    async def test_health_check(self, consolidator):
        """Test consolidation system health check."""
        health = await consolidator.health_check()
        
        assert isinstance(health, dict)
        assert "status" in health
        assert "timestamp" in health
        assert "components" in health
        assert "statistics" in health
        
        # Check component health
        expected_components = [
            "decay_calculator",
            "association_engine", 
            "clustering_engine",
            "compression_engine",
            "forgetting_engine"
        ]
        
        for component in expected_components:
            assert component in health["components"]
            assert "status" in health["components"][component]
    
    @pytest.mark.asyncio
    async def test_consolidation_recommendations(self, consolidator):
        """Test consolidation recommendations."""
        recommendations = await consolidator.get_consolidation_recommendations("weekly")
        
        assert isinstance(recommendations, dict)
        assert "recommendation" in recommendations
        assert "memory_count" in recommendations
        
        # Check recommendation types
        valid_recommendations = ["no_action", "consolidation_beneficial", "optional", "error"]
        assert recommendations["recommendation"] in valid_recommendations
        
        if recommendations["recommendation"] != "error":
            assert "reasons" in recommendations
            assert isinstance(recommendations["reasons"], list)
    
    @pytest.mark.asyncio
    async def test_performance_metrics(self, consolidator):
        """Test performance metrics collection."""
        report = await consolidator.consolidate("daily")
        
        assert "performance_metrics" in report.__dict__
        metrics = report.performance_metrics
        
        assert "duration_seconds" in metrics
        assert "memories_per_second" in metrics
        assert "success" in metrics
        
        assert isinstance(metrics["duration_seconds"], float)
        assert metrics["duration_seconds"] >= 0
        assert isinstance(metrics["memories_per_second"], (int, float))
        assert isinstance(metrics["success"], bool)
    
    @pytest.mark.asyncio
    async def test_consolidation_statistics_tracking(self, consolidator):
        """Test that consolidation statistics are tracked."""
        initial_stats = consolidator.consolidation_stats.copy()
        
        # Run consolidation
        await consolidator.consolidate("weekly")
        
        # Check that stats were updated
        assert consolidator.consolidation_stats["total_runs"] == initial_stats["total_runs"] + 1
        
        # Check other stats (may or may not be incremented depending on processing)
        for key in ["successful_runs", "total_memories_processed", "total_associations_created"]:
            assert consolidator.consolidation_stats[key] >= initial_stats[key]
    
    @pytest.mark.asyncio
    async def test_error_handling_in_pipeline(self, consolidation_config):
        """Test error handling in the consolidation pipeline."""
        # Create storage that raises errors
        error_storage = AsyncMock()
        error_storage.get_all_memories.side_effect = Exception("Storage error")
        error_storage.get_memories_by_time_range.side_effect = Exception("Storage error")
        
        consolidator = DreamInspiredConsolidator(error_storage, consolidation_config)
        
        report = await consolidator.consolidate("weekly")
        
        # Should handle errors gracefully
        assert len(report.errors) > 0
        assert report.performance_metrics["success"] is False
    
    @pytest.mark.asyncio
    async def test_component_integration(self, consolidator, mock_storage):
        """Test integration between different consolidation components."""
        # Ensure we have enough memories for meaningful processing
        if len(mock_storage.memories) < 5:
            # Add more memories for testing
            base_time = datetime.now().timestamp()
            for i in range(10):
                memory = Memory(
                    content=f"Integration test memory {i} with content",
                    content_hash=f"integration_{i}",
                    tags=["integration", "test"],
                    embedding=[0.1 + i*0.01] * 320,
                    created_at=base_time - (i * 3600)
                )
                mock_storage.memories[memory.content_hash] = memory
        
        # Run full consolidation
        report = await consolidator.consolidate("monthly")
        
        # Verify that components worked together
        assert report.memories_processed > 0
        
        # Check that the pipeline completed successfully
        assert report.performance_metrics["success"] is True
    
    @pytest.mark.asyncio
    async def test_time_horizon_specific_processing(self, consolidator):
        """Test that different time horizons trigger appropriate processing."""
        # Test that weekly includes associations but not intensive forgetting
        weekly_report = await consolidator.consolidate("weekly")
        
        # Test that monthly includes forgetting
        monthly_report = await consolidator.consolidate("monthly")
        
        # Both should complete successfully
        assert weekly_report.performance_metrics["success"] is True
        assert monthly_report.performance_metrics["success"] is True
        
        # Monthly might have more archived memories (if forgetting triggered)
        # But this depends on the actual memory state, so just verify structure
        assert isinstance(weekly_report.memories_archived, int)
        assert isinstance(monthly_report.memories_archived, int)
    
    @pytest.mark.asyncio
    async def test_concurrent_consolidation_prevention(self, consolidator):
        """Test that the system handles concurrent consolidation requests appropriately."""
        # Start two consolidations concurrently
        import asyncio
        
        task1 = asyncio.create_task(consolidator.consolidate("daily"))
        task2 = asyncio.create_task(consolidator.consolidate("weekly"))
        
        # Both should complete (the system should handle concurrency)
        report1, report2 = await asyncio.gather(task1, task2)
        
        assert isinstance(report1, ConsolidationReport)
        assert isinstance(report2, ConsolidationReport)
        assert report1.time_horizon == "daily"
        assert report2.time_horizon == "weekly"
    
    @pytest.mark.asyncio
    async def test_memory_metadata_updates(self, consolidator, mock_storage):
        """Test that memory metadata is updated during consolidation."""
        original_memories = list(mock_storage.memories.values())
        
        # Run consolidation
        await consolidator.consolidate("weekly")
        
        # Check that memories exist (update_memory would have been called internally)
        # Since the mock doesn't track calls, we just verify the process completed
        current_memories = list(mock_storage.memories.values())
        assert len(current_memories) >= len(original_memories)
    
    @pytest.mark.asyncio
    async def test_large_memory_set_performance(self, consolidation_config, mock_large_storage):
        """Test performance with larger memory sets."""
        consolidator = DreamInspiredConsolidator(mock_large_storage, consolidation_config)
        
        start_time = datetime.now()
        report = await consolidator.consolidate("weekly")
        end_time = datetime.now()
        
        duration = (end_time - start_time).total_seconds()
        
        # Should complete within reasonable time (adjust threshold as needed)
        assert duration < 30  # 30 seconds for 100 memories
        assert report.memories_processed > 0
        assert report.performance_metrics["success"] is True
        
        # Performance should be reasonable
        if report.memories_processed > 0:
            memories_per_second = report.memories_processed / duration
            assert memories_per_second > 1  # At least 1 memory per second
    
    @pytest.mark.asyncio
    async def test_consolidation_report_completeness(self, consolidator):
        """Test that consolidation reports contain all expected information."""
        report = await consolidator.consolidate("weekly")
        
        # Check all required fields
        required_fields = [
            "time_horizon", "start_time", "end_time", "memories_processed",
            "associations_discovered", "clusters_created", "memories_compressed",
            "memories_archived", "errors", "performance_metrics"
        ]
        
        for field in required_fields:
            assert hasattr(report, field), f"Missing field: {field}"
            assert getattr(report, field) is not None, f"Field {field} is None"
        
        # Check performance metrics
        perf_metrics = report.performance_metrics
        assert "duration_seconds" in perf_metrics
        assert "memories_per_second" in perf_metrics
        assert "success" in perf_metrics
    
    @pytest.mark.asyncio
    async def test_storage_backend_integration(self, consolidator, mock_storage):
        """Test integration with storage backend methods."""
        # Run consolidation
        report = await consolidator.consolidate("monthly")
        
        # Verify storage integration worked (memories were processed)
        assert report.memories_processed >= 0
        assert isinstance(report.performance_metrics, dict)
        
        # Verify storage backend has the expected methods
        assert hasattr(mock_storage, 'get_all_memories')
        assert hasattr(mock_storage, 'get_memories_by_time_range')  
        assert hasattr(mock_storage, 'get_memory_connections')
        assert hasattr(mock_storage, 'get_access_patterns')
        assert hasattr(mock_storage, 'update_memory')
    
    @pytest.mark.asyncio
    async def test_configuration_impact(self, mock_storage):
        """Test that configuration changes affect consolidation behavior."""
        # Create two different configurations
        config1 = type('Config', (), {
            'decay_enabled': True,
            'associations_enabled': True,
            'clustering_enabled': True,
            'compression_enabled': True,
            'forgetting_enabled': True,
            'retention_periods': {'standard': 30},
            'min_similarity': 0.3,
            'max_similarity': 0.7,
            'max_pairs_per_run': 50,
            'min_cluster_size': 3,
            'clustering_algorithm': 'simple',
            'max_summary_length': 200,
            'preserve_originals': True,
            'relevance_threshold': 0.1,
            'access_threshold_days': 30,
            'archive_location': None
        })()
        
        config2 = type('Config', (), {
            'decay_enabled': False,
            'associations_enabled': False,
            'clustering_enabled': False,
            'compression_enabled': False,
            'forgetting_enabled': False,
            'retention_periods': {'standard': 30},
            'min_similarity': 0.3,
            'max_similarity': 0.7,
            'max_pairs_per_run': 50,
            'min_cluster_size': 3,
            'clustering_algorithm': 'simple',
            'max_summary_length': 200,
            'preserve_originals': True,
            'relevance_threshold': 0.1,
            'access_threshold_days': 30,
            'archive_location': None
        })()
        
        consolidator1 = DreamInspiredConsolidator(mock_storage, config1)
        consolidator2 = DreamInspiredConsolidator(mock_storage, config2)
        
        # Both should work, but may produce different results
        report1 = await consolidator1.consolidate("weekly")
        report2 = await consolidator2.consolidate("weekly")
        
        assert isinstance(report1, ConsolidationReport)
        assert isinstance(report2, ConsolidationReport)
        
        # With disabled features, the second consolidator might process differently
        # but both should complete successfully
        assert report1.performance_metrics["success"] is True
        assert report2.performance_metrics["success"] is True
```
Page 21/35FirstPrevNextLast