This is page 38 of 47. Use http://codebase.md/doobidoo/mcp-memory-service?lines=true&page={x} to view the full context.
# Directory Structure
```
├── .claude
│ ├── agents
│ │ ├── amp-bridge.md
│ │ ├── amp-pr-automator.md
│ │ ├── code-quality-guard.md
│ │ ├── gemini-pr-automator.md
│ │ └── github-release-manager.md
│ ├── settings.local.json.backup
│ └── settings.local.json.local
├── .commit-message
├── .dockerignore
├── .env.example
├── .env.sqlite.backup
├── .envnn#
├── .gitattributes
├── .github
│ ├── FUNDING.yml
│ ├── ISSUE_TEMPLATE
│ │ ├── bug_report.yml
│ │ ├── config.yml
│ │ ├── feature_request.yml
│ │ └── performance_issue.yml
│ ├── pull_request_template.md
│ └── workflows
│ ├── bridge-tests.yml
│ ├── CACHE_FIX.md
│ ├── claude-code-review.yml
│ ├── claude.yml
│ ├── cleanup-images.yml.disabled
│ ├── dev-setup-validation.yml
│ ├── docker-publish.yml
│ ├── LATEST_FIXES.md
│ ├── main-optimized.yml.disabled
│ ├── main.yml
│ ├── publish-and-test.yml
│ ├── README_OPTIMIZATION.md
│ ├── release-tag.yml.disabled
│ ├── release.yml
│ ├── roadmap-review-reminder.yml
│ ├── SECRET_CONDITIONAL_FIX.md
│ └── WORKFLOW_FIXES.md
├── .gitignore
├── .mcp.json.backup
├── .mcp.json.template
├── .pyscn
│ ├── .gitignore
│ └── reports
│ └── analyze_20251123_214224.html
├── AGENTS.md
├── archive
│ ├── deployment
│ │ ├── deploy_fastmcp_fixed.sh
│ │ ├── deploy_http_with_mcp.sh
│ │ └── deploy_mcp_v4.sh
│ ├── deployment-configs
│ │ ├── empty_config.yml
│ │ └── smithery.yaml
│ ├── development
│ │ └── test_fastmcp.py
│ ├── docs-removed-2025-08-23
│ │ ├── authentication.md
│ │ ├── claude_integration.md
│ │ ├── claude-code-compatibility.md
│ │ ├── claude-code-integration.md
│ │ ├── claude-code-quickstart.md
│ │ ├── claude-desktop-setup.md
│ │ ├── complete-setup-guide.md
│ │ ├── database-synchronization.md
│ │ ├── development
│ │ │ ├── autonomous-memory-consolidation.md
│ │ │ ├── CLEANUP_PLAN.md
│ │ │ ├── CLEANUP_README.md
│ │ │ ├── CLEANUP_SUMMARY.md
│ │ │ ├── dream-inspired-memory-consolidation.md
│ │ │ ├── hybrid-slm-memory-consolidation.md
│ │ │ ├── mcp-milestone.md
│ │ │ ├── multi-client-architecture.md
│ │ │ ├── test-results.md
│ │ │ └── TIMESTAMP_FIX_SUMMARY.md
│ │ ├── distributed-sync.md
│ │ ├── invocation_guide.md
│ │ ├── macos-intel.md
│ │ ├── master-guide.md
│ │ ├── mcp-client-configuration.md
│ │ ├── multi-client-server.md
│ │ ├── service-installation.md
│ │ ├── sessions
│ │ │ └── MCP_ENHANCEMENT_SESSION_MEMORY_v4.1.0.md
│ │ ├── UBUNTU_SETUP.md
│ │ ├── ubuntu.md
│ │ ├── windows-setup.md
│ │ └── windows.md
│ ├── docs-root-cleanup-2025-08-23
│ │ ├── AWESOME_LIST_SUBMISSION.md
│ │ ├── CLOUDFLARE_IMPLEMENTATION.md
│ │ ├── DOCUMENTATION_ANALYSIS.md
│ │ ├── DOCUMENTATION_CLEANUP_PLAN.md
│ │ ├── DOCUMENTATION_CONSOLIDATION_COMPLETE.md
│ │ ├── LITESTREAM_SETUP_GUIDE.md
│ │ ├── lm_studio_system_prompt.md
│ │ ├── PYTORCH_DOWNLOAD_FIX.md
│ │ └── README-ORIGINAL-BACKUP.md
│ ├── investigations
│ │ └── MACOS_HOOKS_INVESTIGATION.md
│ ├── litestream-configs-v6.3.0
│ │ ├── install_service.sh
│ │ ├── litestream_master_config_fixed.yml
│ │ ├── litestream_master_config.yml
│ │ ├── litestream_replica_config_fixed.yml
│ │ ├── litestream_replica_config.yml
│ │ ├── litestream_replica_simple.yml
│ │ ├── litestream-http.service
│ │ ├── litestream.service
│ │ └── requirements-cloudflare.txt
│ ├── release-notes
│ │ └── release-notes-v7.1.4.md
│ └── setup-development
│ ├── README.md
│ ├── setup_consolidation_mdns.sh
│ ├── STARTUP_SETUP_GUIDE.md
│ └── test_service.sh
├── CHANGELOG-HISTORIC.md
├── CHANGELOG.md
├── claude_commands
│ ├── memory-context.md
│ ├── memory-health.md
│ ├── memory-ingest-dir.md
│ ├── memory-ingest.md
│ ├── memory-recall.md
│ ├── memory-search.md
│ ├── memory-store.md
│ ├── README.md
│ └── session-start.md
├── claude-hooks
│ ├── config.json
│ ├── config.template.json
│ ├── CONFIGURATION.md
│ ├── core
│ │ ├── memory-retrieval.js
│ │ ├── mid-conversation.js
│ │ ├── session-end.js
│ │ ├── session-start.js
│ │ └── topic-change.js
│ ├── debug-pattern-test.js
│ ├── install_claude_hooks_windows.ps1
│ ├── install_hooks.py
│ ├── memory-mode-controller.js
│ ├── MIGRATION.md
│ ├── README-NATURAL-TRIGGERS.md
│ ├── README-phase2.md
│ ├── README.md
│ ├── simple-test.js
│ ├── statusline.sh
│ ├── test-adaptive-weights.js
│ ├── test-dual-protocol-hook.js
│ ├── test-mcp-hook.js
│ ├── test-natural-triggers.js
│ ├── test-recency-scoring.js
│ ├── tests
│ │ ├── integration-test.js
│ │ ├── phase2-integration-test.js
│ │ ├── test-code-execution.js
│ │ ├── test-cross-session.json
│ │ ├── test-session-tracking.json
│ │ └── test-threading.json
│ ├── utilities
│ │ ├── adaptive-pattern-detector.js
│ │ ├── context-formatter.js
│ │ ├── context-shift-detector.js
│ │ ├── conversation-analyzer.js
│ │ ├── dynamic-context-updater.js
│ │ ├── git-analyzer.js
│ │ ├── mcp-client.js
│ │ ├── memory-client.js
│ │ ├── memory-scorer.js
│ │ ├── performance-manager.js
│ │ ├── project-detector.js
│ │ ├── session-tracker.js
│ │ ├── tiered-conversation-monitor.js
│ │ └── version-checker.js
│ └── WINDOWS-SESSIONSTART-BUG.md
├── CLAUDE.md
├── CODE_OF_CONDUCT.md
├── CONTRIBUTING.md
├── Development-Sprint-November-2025.md
├── docs
│ ├── amp-cli-bridge.md
│ ├── api
│ │ ├── code-execution-interface.md
│ │ ├── memory-metadata-api.md
│ │ ├── PHASE1_IMPLEMENTATION_SUMMARY.md
│ │ ├── PHASE2_IMPLEMENTATION_SUMMARY.md
│ │ ├── PHASE2_REPORT.md
│ │ └── tag-standardization.md
│ ├── architecture
│ │ ├── search-enhancement-spec.md
│ │ └── search-examples.md
│ ├── architecture.md
│ ├── archive
│ │ └── obsolete-workflows
│ │ ├── load_memory_context.md
│ │ └── README.md
│ ├── assets
│ │ └── images
│ │ ├── dashboard-v3.3.0-preview.png
│ │ ├── memory-awareness-hooks-example.png
│ │ ├── project-infographic.svg
│ │ └── README.md
│ ├── CLAUDE_CODE_QUICK_REFERENCE.md
│ ├── cloudflare-setup.md
│ ├── deployment
│ │ ├── docker.md
│ │ ├── dual-service.md
│ │ ├── production-guide.md
│ │ └── systemd-service.md
│ ├── development
│ │ ├── ai-agent-instructions.md
│ │ ├── code-quality
│ │ │ ├── phase-2a-completion.md
│ │ │ ├── phase-2a-handle-get-prompt.md
│ │ │ ├── phase-2a-index.md
│ │ │ ├── phase-2a-install-package.md
│ │ │ └── phase-2b-session-summary.md
│ │ ├── code-quality-workflow.md
│ │ ├── dashboard-workflow.md
│ │ ├── issue-management.md
│ │ ├── pr-review-guide.md
│ │ ├── refactoring-notes.md
│ │ ├── release-checklist.md
│ │ └── todo-tracker.md
│ ├── docker-optimized-build.md
│ ├── document-ingestion.md
│ ├── DOCUMENTATION_AUDIT.md
│ ├── enhancement-roadmap-issue-14.md
│ ├── examples
│ │ ├── analysis-scripts.js
│ │ ├── maintenance-session-example.md
│ │ ├── memory-distribution-chart.jsx
│ │ └── tag-schema.json
│ ├── first-time-setup.md
│ ├── glama-deployment.md
│ ├── guides
│ │ ├── advanced-command-examples.md
│ │ ├── chromadb-migration.md
│ │ ├── commands-vs-mcp-server.md
│ │ ├── mcp-enhancements.md
│ │ ├── mdns-service-discovery.md
│ │ ├── memory-consolidation-guide.md
│ │ ├── migration.md
│ │ ├── scripts.md
│ │ └── STORAGE_BACKENDS.md
│ ├── HOOK_IMPROVEMENTS.md
│ ├── hooks
│ │ └── phase2-code-execution-migration.md
│ ├── http-server-management.md
│ ├── ide-compatability.md
│ ├── IMAGE_RETENTION_POLICY.md
│ ├── images
│ │ └── dashboard-placeholder.md
│ ├── implementation
│ │ ├── health_checks.md
│ │ └── performance.md
│ ├── IMPLEMENTATION_PLAN_HTTP_SSE.md
│ ├── integration
│ │ ├── homebrew.md
│ │ └── multi-client.md
│ ├── integrations
│ │ ├── gemini.md
│ │ ├── groq-bridge.md
│ │ ├── groq-integration-summary.md
│ │ └── groq-model-comparison.md
│ ├── integrations.md
│ ├── legacy
│ │ └── dual-protocol-hooks.md
│ ├── LM_STUDIO_COMPATIBILITY.md
│ ├── maintenance
│ │ └── memory-maintenance.md
│ ├── mastery
│ │ ├── api-reference.md
│ │ ├── architecture-overview.md
│ │ ├── configuration-guide.md
│ │ ├── local-setup-and-run.md
│ │ ├── testing-guide.md
│ │ └── troubleshooting.md
│ ├── migration
│ │ └── code-execution-api-quick-start.md
│ ├── natural-memory-triggers
│ │ ├── cli-reference.md
│ │ ├── installation-guide.md
│ │ └── performance-optimization.md
│ ├── oauth-setup.md
│ ├── pr-graphql-integration.md
│ ├── quick-setup-cloudflare-dual-environment.md
│ ├── README.md
│ ├── remote-configuration-wiki-section.md
│ ├── research
│ │ ├── code-execution-interface-implementation.md
│ │ └── code-execution-interface-summary.md
│ ├── ROADMAP.md
│ ├── sqlite-vec-backend.md
│ ├── statistics
│ │ ├── charts
│ │ │ ├── activity_patterns.png
│ │ │ ├── contributors.png
│ │ │ ├── growth_trajectory.png
│ │ │ ├── monthly_activity.png
│ │ │ └── october_sprint.png
│ │ ├── data
│ │ │ ├── activity_by_day.csv
│ │ │ ├── activity_by_hour.csv
│ │ │ ├── contributors.csv
│ │ │ └── monthly_activity.csv
│ │ ├── generate_charts.py
│ │ └── REPOSITORY_STATISTICS.md
│ ├── technical
│ │ ├── development.md
│ │ ├── memory-migration.md
│ │ ├── migration-log.md
│ │ ├── sqlite-vec-embedding-fixes.md
│ │ └── tag-storage.md
│ ├── testing
│ │ └── regression-tests.md
│ ├── testing-cloudflare-backend.md
│ ├── troubleshooting
│ │ ├── cloudflare-api-token-setup.md
│ │ ├── cloudflare-authentication.md
│ │ ├── general.md
│ │ ├── hooks-quick-reference.md
│ │ ├── pr162-schema-caching-issue.md
│ │ ├── session-end-hooks.md
│ │ └── sync-issues.md
│ └── tutorials
│ ├── advanced-techniques.md
│ ├── data-analysis.md
│ └── demo-session-walkthrough.md
├── examples
│ ├── claude_desktop_config_template.json
│ ├── claude_desktop_config_windows.json
│ ├── claude-desktop-http-config.json
│ ├── config
│ │ └── claude_desktop_config.json
│ ├── http-mcp-bridge.js
│ ├── memory_export_template.json
│ ├── README.md
│ ├── setup
│ │ └── setup_multi_client_complete.py
│ └── start_https_example.sh
├── install_service.py
├── install.py
├── LICENSE
├── NOTICE
├── pyproject.toml
├── pytest.ini
├── README.md
├── run_server.py
├── scripts
│ ├── .claude
│ │ └── settings.local.json
│ ├── archive
│ │ └── check_missing_timestamps.py
│ ├── backup
│ │ ├── backup_memories.py
│ │ ├── backup_sqlite_vec.sh
│ │ ├── export_distributable_memories.sh
│ │ └── restore_memories.py
│ ├── benchmarks
│ │ ├── benchmark_code_execution_api.py
│ │ ├── benchmark_hybrid_sync.py
│ │ └── benchmark_server_caching.py
│ ├── database
│ │ ├── analyze_sqlite_vec_db.py
│ │ ├── check_sqlite_vec_status.py
│ │ ├── db_health_check.py
│ │ └── simple_timestamp_check.py
│ ├── development
│ │ ├── debug_server_initialization.py
│ │ ├── find_orphaned_files.py
│ │ ├── fix_mdns.sh
│ │ ├── fix_sitecustomize.py
│ │ ├── remote_ingest.sh
│ │ ├── setup-git-merge-drivers.sh
│ │ ├── uv-lock-merge.sh
│ │ └── verify_hybrid_sync.py
│ ├── hooks
│ │ └── pre-commit
│ ├── installation
│ │ ├── install_linux_service.py
│ │ ├── install_macos_service.py
│ │ ├── install_uv.py
│ │ ├── install_windows_service.py
│ │ ├── install.py
│ │ ├── setup_backup_cron.sh
│ │ ├── setup_claude_mcp.sh
│ │ └── setup_cloudflare_resources.py
│ ├── linux
│ │ ├── service_status.sh
│ │ ├── start_service.sh
│ │ ├── stop_service.sh
│ │ ├── uninstall_service.sh
│ │ └── view_logs.sh
│ ├── maintenance
│ │ ├── assign_memory_types.py
│ │ ├── check_memory_types.py
│ │ ├── cleanup_corrupted_encoding.py
│ │ ├── cleanup_memories.py
│ │ ├── cleanup_organize.py
│ │ ├── consolidate_memory_types.py
│ │ ├── consolidation_mappings.json
│ │ ├── delete_orphaned_vectors_fixed.py
│ │ ├── fast_cleanup_duplicates_with_tracking.sh
│ │ ├── find_all_duplicates.py
│ │ ├── find_cloudflare_duplicates.py
│ │ ├── find_duplicates.py
│ │ ├── memory-types.md
│ │ ├── README.md
│ │ ├── recover_timestamps_from_cloudflare.py
│ │ ├── regenerate_embeddings.py
│ │ ├── repair_malformed_tags.py
│ │ ├── repair_memories.py
│ │ ├── repair_sqlite_vec_embeddings.py
│ │ ├── repair_zero_embeddings.py
│ │ ├── restore_from_json_export.py
│ │ └── scan_todos.sh
│ ├── migration
│ │ ├── cleanup_mcp_timestamps.py
│ │ ├── legacy
│ │ │ └── migrate_chroma_to_sqlite.py
│ │ ├── mcp-migration.py
│ │ ├── migrate_sqlite_vec_embeddings.py
│ │ ├── migrate_storage.py
│ │ ├── migrate_tags.py
│ │ ├── migrate_timestamps.py
│ │ ├── migrate_to_cloudflare.py
│ │ ├── migrate_to_sqlite_vec.py
│ │ ├── migrate_v5_enhanced.py
│ │ ├── TIMESTAMP_CLEANUP_README.md
│ │ └── verify_mcp_timestamps.py
│ ├── pr
│ │ ├── amp_collect_results.sh
│ │ ├── amp_detect_breaking_changes.sh
│ │ ├── amp_generate_tests.sh
│ │ ├── amp_pr_review.sh
│ │ ├── amp_quality_gate.sh
│ │ ├── amp_suggest_fixes.sh
│ │ ├── auto_review.sh
│ │ ├── detect_breaking_changes.sh
│ │ ├── generate_tests.sh
│ │ ├── lib
│ │ │ └── graphql_helpers.sh
│ │ ├── quality_gate.sh
│ │ ├── resolve_threads.sh
│ │ ├── run_pyscn_analysis.sh
│ │ ├── run_quality_checks.sh
│ │ ├── thread_status.sh
│ │ └── watch_reviews.sh
│ ├── quality
│ │ ├── fix_dead_code_install.sh
│ │ ├── phase1_dead_code_analysis.md
│ │ ├── phase2_complexity_analysis.md
│ │ ├── README_PHASE1.md
│ │ ├── README_PHASE2.md
│ │ ├── track_pyscn_metrics.sh
│ │ └── weekly_quality_review.sh
│ ├── README.md
│ ├── run
│ │ ├── run_mcp_memory.sh
│ │ ├── run-with-uv.sh
│ │ └── start_sqlite_vec.sh
│ ├── run_memory_server.py
│ ├── server
│ │ ├── check_http_server.py
│ │ ├── check_server_health.py
│ │ ├── memory_offline.py
│ │ ├── preload_models.py
│ │ ├── run_http_server.py
│ │ ├── run_memory_server.py
│ │ ├── start_http_server.bat
│ │ └── start_http_server.sh
│ ├── service
│ │ ├── deploy_dual_services.sh
│ │ ├── install_http_service.sh
│ │ ├── mcp-memory-http.service
│ │ ├── mcp-memory.service
│ │ ├── memory_service_manager.sh
│ │ ├── service_control.sh
│ │ ├── service_utils.py
│ │ └── update_service.sh
│ ├── sync
│ │ ├── check_drift.py
│ │ ├── claude_sync_commands.py
│ │ ├── export_memories.py
│ │ ├── import_memories.py
│ │ ├── litestream
│ │ │ ├── apply_local_changes.sh
│ │ │ ├── enhanced_memory_store.sh
│ │ │ ├── init_staging_db.sh
│ │ │ ├── io.litestream.replication.plist
│ │ │ ├── manual_sync.sh
│ │ │ ├── memory_sync.sh
│ │ │ ├── pull_remote_changes.sh
│ │ │ ├── push_to_remote.sh
│ │ │ ├── README.md
│ │ │ ├── resolve_conflicts.sh
│ │ │ ├── setup_local_litestream.sh
│ │ │ ├── setup_remote_litestream.sh
│ │ │ ├── staging_db_init.sql
│ │ │ ├── stash_local_changes.sh
│ │ │ ├── sync_from_remote_noconfig.sh
│ │ │ └── sync_from_remote.sh
│ │ ├── README.md
│ │ ├── safe_cloudflare_update.sh
│ │ ├── sync_memory_backends.py
│ │ └── sync_now.py
│ ├── testing
│ │ ├── run_complete_test.py
│ │ ├── run_memory_test.sh
│ │ ├── simple_test.py
│ │ ├── test_cleanup_logic.py
│ │ ├── test_cloudflare_backend.py
│ │ ├── test_docker_functionality.py
│ │ ├── test_installation.py
│ │ ├── test_mdns.py
│ │ ├── test_memory_api.py
│ │ ├── test_memory_simple.py
│ │ ├── test_migration.py
│ │ ├── test_search_api.py
│ │ ├── test_sqlite_vec_embeddings.py
│ │ ├── test_sse_events.py
│ │ ├── test-connection.py
│ │ └── test-hook.js
│ ├── utils
│ │ ├── claude_commands_utils.py
│ │ ├── generate_personalized_claude_md.sh
│ │ ├── groq
│ │ ├── groq_agent_bridge.py
│ │ ├── list-collections.py
│ │ ├── memory_wrapper_uv.py
│ │ ├── query_memories.py
│ │ ├── smithery_wrapper.py
│ │ ├── test_groq_bridge.sh
│ │ └── uv_wrapper.py
│ └── validation
│ ├── check_dev_setup.py
│ ├── check_documentation_links.py
│ ├── diagnose_backend_config.py
│ ├── validate_configuration_complete.py
│ ├── validate_memories.py
│ ├── validate_migration.py
│ ├── validate_timestamp_integrity.py
│ ├── verify_environment.py
│ ├── verify_pytorch_windows.py
│ └── verify_torch.py
├── SECURITY.md
├── selective_timestamp_recovery.py
├── SPONSORS.md
├── src
│ └── mcp_memory_service
│ ├── __init__.py
│ ├── api
│ │ ├── __init__.py
│ │ ├── client.py
│ │ ├── operations.py
│ │ ├── sync_wrapper.py
│ │ └── types.py
│ ├── backup
│ │ ├── __init__.py
│ │ └── scheduler.py
│ ├── cli
│ │ ├── __init__.py
│ │ ├── ingestion.py
│ │ ├── main.py
│ │ └── utils.py
│ ├── config.py
│ ├── consolidation
│ │ ├── __init__.py
│ │ ├── associations.py
│ │ ├── base.py
│ │ ├── clustering.py
│ │ ├── compression.py
│ │ ├── consolidator.py
│ │ ├── decay.py
│ │ ├── forgetting.py
│ │ ├── health.py
│ │ └── scheduler.py
│ ├── dependency_check.py
│ ├── discovery
│ │ ├── __init__.py
│ │ ├── client.py
│ │ └── mdns_service.py
│ ├── embeddings
│ │ ├── __init__.py
│ │ └── onnx_embeddings.py
│ ├── ingestion
│ │ ├── __init__.py
│ │ ├── base.py
│ │ ├── chunker.py
│ │ ├── csv_loader.py
│ │ ├── json_loader.py
│ │ ├── pdf_loader.py
│ │ ├── registry.py
│ │ ├── semtools_loader.py
│ │ └── text_loader.py
│ ├── lm_studio_compat.py
│ ├── mcp_server.py
│ ├── models
│ │ ├── __init__.py
│ │ └── memory.py
│ ├── server.py
│ ├── services
│ │ ├── __init__.py
│ │ └── memory_service.py
│ ├── storage
│ │ ├── __init__.py
│ │ ├── base.py
│ │ ├── cloudflare.py
│ │ ├── factory.py
│ │ ├── http_client.py
│ │ ├── hybrid.py
│ │ └── sqlite_vec.py
│ ├── sync
│ │ ├── __init__.py
│ │ ├── exporter.py
│ │ ├── importer.py
│ │ └── litestream_config.py
│ ├── utils
│ │ ├── __init__.py
│ │ ├── cache_manager.py
│ │ ├── content_splitter.py
│ │ ├── db_utils.py
│ │ ├── debug.py
│ │ ├── document_processing.py
│ │ ├── gpu_detection.py
│ │ ├── hashing.py
│ │ ├── http_server_manager.py
│ │ ├── port_detection.py
│ │ ├── system_detection.py
│ │ └── time_parser.py
│ └── web
│ ├── __init__.py
│ ├── api
│ │ ├── __init__.py
│ │ ├── analytics.py
│ │ ├── backup.py
│ │ ├── consolidation.py
│ │ ├── documents.py
│ │ ├── events.py
│ │ ├── health.py
│ │ ├── manage.py
│ │ ├── mcp.py
│ │ ├── memories.py
│ │ ├── search.py
│ │ └── sync.py
│ ├── app.py
│ ├── dependencies.py
│ ├── oauth
│ │ ├── __init__.py
│ │ ├── authorization.py
│ │ ├── discovery.py
│ │ ├── middleware.py
│ │ ├── models.py
│ │ ├── registration.py
│ │ └── storage.py
│ ├── sse.py
│ └── static
│ ├── app.js
│ ├── index.html
│ ├── README.md
│ ├── sse_test.html
│ └── style.css
├── start_http_debug.bat
├── start_http_server.sh
├── test_document.txt
├── test_version_checker.js
├── tests
│ ├── __init__.py
│ ├── api
│ │ ├── __init__.py
│ │ ├── test_compact_types.py
│ │ └── test_operations.py
│ ├── bridge
│ │ ├── mock_responses.js
│ │ ├── package-lock.json
│ │ ├── package.json
│ │ └── test_http_mcp_bridge.js
│ ├── conftest.py
│ ├── consolidation
│ │ ├── __init__.py
│ │ ├── conftest.py
│ │ ├── test_associations.py
│ │ ├── test_clustering.py
│ │ ├── test_compression.py
│ │ ├── test_consolidator.py
│ │ ├── test_decay.py
│ │ └── test_forgetting.py
│ ├── contracts
│ │ └── api-specification.yml
│ ├── integration
│ │ ├── package-lock.json
│ │ ├── package.json
│ │ ├── test_api_key_fallback.py
│ │ ├── test_api_memories_chronological.py
│ │ ├── test_api_tag_time_search.py
│ │ ├── test_api_with_memory_service.py
│ │ ├── test_bridge_integration.js
│ │ ├── test_cli_interfaces.py
│ │ ├── test_cloudflare_connection.py
│ │ ├── test_concurrent_clients.py
│ │ ├── test_data_serialization_consistency.py
│ │ ├── test_http_server_startup.py
│ │ ├── test_mcp_memory.py
│ │ ├── test_mdns_integration.py
│ │ ├── test_oauth_basic_auth.py
│ │ ├── test_oauth_flow.py
│ │ ├── test_server_handlers.py
│ │ └── test_store_memory.py
│ ├── performance
│ │ ├── test_background_sync.py
│ │ └── test_hybrid_live.py
│ ├── README.md
│ ├── smithery
│ │ └── test_smithery.py
│ ├── sqlite
│ │ └── simple_sqlite_vec_test.py
│ ├── test_client.py
│ ├── test_content_splitting.py
│ ├── test_database.py
│ ├── test_hybrid_cloudflare_limits.py
│ ├── test_hybrid_storage.py
│ ├── test_memory_ops.py
│ ├── test_semantic_search.py
│ ├── test_sqlite_vec_storage.py
│ ├── test_time_parser.py
│ ├── test_timestamp_preservation.py
│ ├── timestamp
│ │ ├── test_hook_vs_manual_storage.py
│ │ ├── test_issue99_final_validation.py
│ │ ├── test_search_retrieval_inconsistency.py
│ │ ├── test_timestamp_issue.py
│ │ └── test_timestamp_simple.py
│ └── unit
│ ├── conftest.py
│ ├── test_cloudflare_storage.py
│ ├── test_csv_loader.py
│ ├── test_fastapi_dependencies.py
│ ├── test_import.py
│ ├── test_json_loader.py
│ ├── test_mdns_simple.py
│ ├── test_mdns.py
│ ├── test_memory_service.py
│ ├── test_memory.py
│ ├── test_semtools_loader.py
│ ├── test_storage_interface_compatibility.py
│ └── test_tag_time_filtering.py
├── tools
│ ├── docker
│ │ ├── DEPRECATED.md
│ │ ├── docker-compose.http.yml
│ │ ├── docker-compose.pythonpath.yml
│ │ ├── docker-compose.standalone.yml
│ │ ├── docker-compose.uv.yml
│ │ ├── docker-compose.yml
│ │ ├── docker-entrypoint-persistent.sh
│ │ ├── docker-entrypoint-unified.sh
│ │ ├── docker-entrypoint.sh
│ │ ├── Dockerfile
│ │ ├── Dockerfile.glama
│ │ ├── Dockerfile.slim
│ │ ├── README.md
│ │ └── test-docker-modes.sh
│ └── README.md
└── uv.lock
```
# Files
--------------------------------------------------------------------------------
/CHANGELOG.md:
--------------------------------------------------------------------------------
```markdown
1 | # Changelog
2 |
3 | **Recent releases for MCP Memory Service (v8.0.0 and later)**
4 |
5 | All notable changes to the MCP Memory Service project will be documented in this file.
6 |
7 | For older releases, see [CHANGELOG-HISTORIC.md](./CHANGELOG-HISTORIC.md).
8 |
9 | The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
10 |
11 | ## [Unreleased]
12 |
13 | ## [8.42.0] - 2025-11-27
14 |
15 | ### Added
16 | - **Visible Memory Injection Display** - Users now see injected memories at session start (commit TBD)
17 | - Added `showInjectedMemories` config option to display top 3 memories with relevance scores
18 | - Shows memory age (e.g., "2 days ago"), tags, and relevance scores
19 | - Formatted with colored output box for clear visibility
20 | - Helps users understand what context the AI assistant is using
21 | - Configurable via `~/.claude/hooks/config.json`
22 |
23 | ### Changed
24 | - **Session-End Hook Quality Improvements** - Raised quality thresholds to prevent generic boilerplate (commit TBD)
25 | - Increased `minSessionLength` from 100 → 200 characters (requires more substantial content)
26 | - Increased `minConfidence` from 0.1 → 0.5 (requires 5+ meaningful items vs 1+)
27 | - Added optional LLM-powered session summarizer using Gemini CLI
28 | - New files: `llm-session-summarizer.js` utility and `session-end-llm.js` core hook
29 | - Prevents low-quality memories like "User asked Claude to review code" from polluting database
30 | - Database cleaned from 3352 → 3185 memories (167 generic entries removed)
31 |
32 | ### Fixed
33 | - **Duplicate MCP Fallback Messages** - Fixed duplicate "MCP Fallback → Using standard MCP tools" log messages (commit TBD)
34 | - Added module-level flag to track if fallback message was already logged
35 | - Message now appears once per session instead of once per query
36 | - Improved session start hook output clarity
37 |
38 | ### Performance
39 | - **Configuration Improvements** - Better defaults for session analysis
40 | - Enabled relevance scores in context formatting
41 | - Improved memory scoring to prioritize quality over recency for generic content
42 | - Session-end hook re-enabled with improved quality gates
43 |
44 | ## [8.41.2] - 2025-11-27
45 |
46 | ### Fixed
47 | - **Hook Installer Utility File Deployment** - Installer now copies ALL utility files instead of hardcoded lists (commit 557be0e)
48 | - **BREAKING**: Previous installer only copied 8/14 basic utilities and 5/14 enhanced utilities
49 | - Updated files like `memory-scorer.js` and `context-formatter.js` were not deployed with `--natural-triggers` flag
50 | - Replaced hardcoded file lists with glob pattern (`*.js`) to automatically include all utility files
51 | - Ensures v8.41.0/v8.41.1 project affinity filtering fixes get properly deployed
52 | - Future utility file additions automatically included without manual list maintenance
53 | - **Impact**: Users running `python install_hooks.py --natural-triggers` now get all 14 utility files, preventing stale hooks
54 |
55 | ## [8.41.1] - 2025-11-27
56 |
57 | ### Fixed
58 | - **Context Formatter Memory Sorting** - Memories now sorted by recency within each category (commit 2ede2a8)
59 | - Added sorting by `created_at_iso` (descending) after grouping memories into categories
60 | - Ensures most recent memories appear first in each section for better context relevance
61 | - Applied in `context-formatter.js` after category grouping logic
62 | - Improves user experience by prioritizing newest information in memory context
63 |
64 | ## [8.41.0] - 2025-11-27
65 |
66 | ### Fixed
67 | - **Session Start Hook Reliability** - Improved session start hook reliability and memory filtering (commit 924962a)
68 | - **Error Suppression**: Suppressed Code Execution ModuleNotFoundError spam
69 | - Added `suppressErrors: true` to Code Execution call configuration
70 | - Eliminates console noise from module import errors during session start
71 | - **Clean Output**: Removed duplicate "Injected Memory Context" output
72 | - Removed duplicate stdout console.log that caused double messages
73 | - Session start output now cleaner and easier to read
74 | - **Memory Filtering**: Added project affinity scoring to prevent cross-project memory pollution
75 | - New `calculateProjectAffinity()` function in `memory-scorer.js`
76 | - Hard filters out memories without project tag when in a project context
77 | - Soft scoring penalty (0.3x) for memories from different projects
78 | - Prevents Azure/Terraform memories from appearing in mcp-memory-service context
79 | - **Classification Fix**: Session summaries no longer misclassified as "Current Problems"
80 | - Excludes `session`, `session-summary`, and `session-end` memory types from problem classification
81 | - Prevents confusion between historical session notes and actual current issues
82 | - **Path Display**: "Unknown location" now shows actual path via `process.cwd()` fallback
83 | - When git repository detection fails, uses `process.cwd()` instead of "Unknown location"
84 | - Provides better context awareness even in non-git directories
85 |
86 | ## [8.40.0] - 2025-11-27
87 |
88 | ### Added
89 | - **Session Start Version Display** - Automatic version information display during session startup (commit f2f7d2b, fixes #250)
90 | - **Version Checker Utility**: New `version-checker.js` utility in `claude-hooks/utilities/`
91 | - Reads local version from `src/mcp_memory_service/__init__.py`
92 | - Fetches latest published version from PyPI API
93 | - Compares versions and displays status labels (published/development/outdated)
94 | - Configurable timeout for PyPI API requests
95 | - **Session Start Integration**: Version information now appears automatically during session initialization
96 | - Displays format: `📦 Version → X.Y.Z (local) • PyPI: X.Y.Z`
97 | - Shows after storage backend information
98 | - Provides immediate visibility into version status
99 | - **Testing**: Includes `test_version_checker.js` for utility validation
100 | - **Benefits**:
101 | - Quick version verification without manual checks
102 | - Early detection of outdated installations
103 | - Improved development workflow transparency
104 | - Helps users stay current with latest features and fixes
105 |
106 | ## [8.39.1] - 2025-11-27
107 |
108 | ### Fixed
109 | - **Dashboard Analytics Bugs** - Fixed three critical bugs in the analytics section (commit c898a72, fixes #253)
110 | - **Top Tags filtering**: Now correctly filters tags by selected timeframe (7d/30d/90d)
111 | - Implemented time-based filtering using `get_memories_by_time_range()`
112 | - Counts tags only from memories within the selected period
113 | - Maintains backward compatibility with all storage backends
114 | - **Recent Activity display**: Bars now show percentage distribution
115 | - Enhanced display to show both count and percentage of total
116 | - Tooltip includes both absolute count and percentage
117 | - Activity count label shows percentage (e.g., '42 (23.5%)')
118 | - **Storage Report field mismatches**: Fixed "undefined chars" display
119 | - Fixed field name: `size_kb` instead of `size`
120 | - Fixed field name: `preview` instead of `content_preview`
121 | - Fixed date parsing: `created_at` is ISO string, not timestamp
122 | - Added null safety and proper size display (KB with bytes fallback)
123 |
124 | ## [8.39.0] - 2025-11-26
125 |
126 | ### Performance
127 | - **Analytics date-range filtering**: Moved from application layer to storage layer for 10x performance improvement (#238)
128 | - Added `get_memories_by_time_range()` to Cloudflare backend with D1 database filtering
129 | - Updated memory growth endpoint to use database-layer queries instead of fetching all memories
130 | - **Performance gains**:
131 | - Reduced data transfer: 50MB → 1.5MB (97% reduction for 10,000 memories)
132 | - Response time (SQLite-vec): ~500ms → ~50ms (10x improvement)
133 | - Response time (Cloudflare): ~2-3s → ~200ms (10-15x improvement)
134 | - **Scalability**: Now handles databases with >10,000 memories efficiently
135 | - **Benefits**: Pushes filtering to database WHERE clauses, leverages indexes on `created_at`
136 |
137 | ## [8.38.1] - 2025-11-26
138 |
139 | ### Fixed
140 | - **HTTP MCP Transport: JSON-RPC 2.0 Compliance** - Fixed critical bug where HTTP MCP responses violated JSON-RPC 2.0 specification (PR #249, fixes #236)
141 | - **Problem**: FastAPI ignored Pydantic's `ConfigDict(exclude_none=True)` when directly returning models, causing responses to include null fields (`"error": null` in success, `"result": null` in errors)
142 | - **Impact**: Claude Code/Desktop rejected all HTTP MCP communications due to spec violation
143 | - **Solution**: Wrapped all `MCPResponse` returns in `JSONResponse` with explicit `.model_dump(exclude_none=True)` serialization
144 | - **Verification**:
145 | - Success responses now contain ONLY: `jsonrpc`, `id`, `result`
146 | - Error responses now contain ONLY: `jsonrpc`, `id`, `error`
147 | - **Testing**: Validated with curl commands against all 5 MCP endpoint response paths
148 | - **Credits**: @timkjr (Tim Knauff) for identifying root cause and implementing proper fix
149 |
150 | ## [8.38.0] - 2025-11-25
151 |
152 | ### Improved
153 | - **Code Quality: Phase 2b Duplicate Consolidation COMPLETE** - Eliminated ~176-186 lines of duplicate code (issue #246)
154 | - **Document chunk processing consolidation (Group 3)**:
155 | - Extracted `process_document_chunk()` helper function from duplicate implementations
156 | - Consolidated chunk_text/chunk_size/chunk_overlap pattern across document ingestion tools
157 | - 2 occurrences reduced to 1 canonical implementation with consistent metadata handling
158 | - **MCP response parsing consolidation (Group 3)**:
159 | - Extracted `parse_mcp_response()` helper for isError/error/content pattern
160 | - Standardized error handling across MCP tool invocations
161 | - 2 occurrences reduced to 1 canonical implementation
162 | - **Cache statistics logging consolidation (Group 5)**:
163 | - Extracted `log_cache_statistics()` helper for storage/service cache metrics
164 | - Standardized cache performance logging format (hits, misses, hit rates)
165 | - 2 occurrences reduced to 1 canonical implementation with consistent percentage formatting
166 | - **Winter season boundary logic consolidation (Group 7)**:
167 | - Extracted `is_winter_boundary_case()` helper for cross-year winter season handling
168 | - Centralized December-January transition logic (Dec 21 - Mar 20 spans years)
169 | - 2 occurrences reduced to 1 canonical implementation
170 | - **Test tempfile setup consolidation (Groups 10, 11)**:
171 | - Extracted `create_test_document()` helper for pytest tmp_path fixture patterns
172 | - Standardized temporary file creation across document ingestion tests
173 | - 6 occurrences reduced to 2 canonical implementations (PDF, DOCX variants)
174 | - **MCP server configuration consolidation (Phase 2b-3)**:
175 | - Consolidated duplicate server config sections in install.py and scripts/installation/install.py
176 | - Unified JSON serialization logic for mcpServers configuration blocks
177 | - Improved maintainability through shared configuration structure
178 | - **User input prompt consolidation (Phase 2b-2)**:
179 | - Extracted shared prompt logic for backend selection and configuration
180 | - Standardized input validation patterns across installation scripts
181 | - Reduced code duplication in interactive installation workflows
182 | - **Additional GPU detection consolidation (Phase 2b-1)**:
183 | - Completed GPU platform detection consolidation from Phase 2a
184 | - Refined helper function extraction for test_gpu_platform() and related utilities
185 | - Enhanced configuration-driven GPU detection architecture
186 | - **Consolidation Summary**:
187 | - Total duplicate code eliminated: ~176-186 lines across 10 consolidation commits
188 | - Functions/patterns consolidated: 10+ duplicate implementations → canonical versions
189 | - Strategic deference: 5 groups intentionally skipped (high-risk/low-benefit per session analysis)
190 | - Code maintainability: Enhanced through focused helper methods and consistent patterns
191 | - 100% backward compatibility maintained (no breaking changes)
192 | - Test coverage: 100% maintained across all consolidations
193 |
194 | ### Code Quality
195 | - **Phase 2b Duplicate Consolidation**: 10 consolidation commits addressing multiple duplication groups
196 | - **Duplication Score**: Reduced from 5.5% (Phase 2a baseline) to estimated 4.5-4.7%
197 | - **Complexity Reduction**: Helper extraction pattern applied consistently across codebase
198 | - **Expected Impact**:
199 | - Duplication Score: Approaching <3% target with strategic consolidation
200 | - Complexity Score: Improved through helper function extraction
201 | - Overall Health Score: Strong progress toward 75+ target
202 | - **Remaining Work**: 5 duplication groups intentionally deferred (high-risk backend logic, low-benefit shared imports)
203 | - **Related**: Issue #246 Phase 2b (Duplicate Consolidation Strategy COMPLETE)
204 |
205 | ## [8.37.0] - 2025-11-24
206 |
207 | ### Improved
208 | - **Code Quality: Phase 2a Duplicate Consolidation COMPLETE** - Eliminated 5 duplicate high-complexity functions (issue #246)
209 | - **detect_gpu() consolidation (3 duplicates → 1 canonical)**:
210 | - Consolidated ROOT install.py::detect_gpu() (119 lines, complexity 30) with refactored scripts/installation/install.py version (187 lines, configuration-driven)
211 | - Refactored scripts/validation/verify_environment.py::EnvironmentVerifier.detect_gpu() (123 lines, complexity 27) to use helper-based architecture
212 | - Final canonical implementation in install.py: GPU_PLATFORM_CHECKS config dict + test_gpu_platform() helper + CUDA_VERSION_PARSER
213 | - Impact: -4% high-complexity functions (27 → 26), improved maintainability
214 | - **verify_installation() consolidation (2 duplicates → 1 canonical)**:
215 | - Replaced scripts/installation/install.py simplified version with canonical ROOT install.py implementation
216 | - Added tokenizers check for ONNX dependencies, safer DirectML version handling
217 | - Improved error messaging and user guidance
218 | - **Consolidation Summary**:
219 | - Total duplicate functions eliminated: 5 (3x detect_gpu + 2x verify_installation)
220 | - High-complexity functions reduced: 27 → 24 (-11%)
221 | - Code maintainability improved through focused helper methods and configuration-driven design
222 | - 100% backward compatibility maintained (no breaking changes)
223 |
224 | ### Code Quality
225 | - **Phase 2a Duplicate Consolidation**: 5 of 5 target functions consolidated (100% complete)
226 | - **High-Complexity Functions**: Reduced from 27 to 24 (-11%)
227 | - **Complexity Reduction**: Configuration-driven patterns replace monolithic if/elif chains
228 | - **Expected Impact**:
229 | - Duplication Score: Reduced toward <3% target
230 | - Complexity Score: Improved through helper extraction
231 | - Overall Health Score: On track for 75+ target
232 | - **Related**: Issue #246 Phase 2a (Duplicate Consolidation Strategy COMPLETE)
233 |
234 | ## [8.36.1] - 2025-11-24
235 |
236 | ### Fixed
237 | - **CRITICAL**: HTTP server crash on v8.36.0 startup - forward reference error in analytics.py (issue #247)
238 | - Added `from __future__ import annotations` to enable forward references in type hints
239 | - Added `Tuple` to typing imports for Python 3.9 compatibility
240 | - Impact: Unblocks all v8.36.0 users experiencing startup failures
241 | - Root cause: PR #244 refactoring introduced forward references without future annotations import
242 | - Fix verified: HTTP server starts successfully, all 10 analytics routes registered
243 |
244 | ## [8.36.0] - 2025-11-24
245 |
246 | ### Improved
247 | - **Code Quality: Phase 2 COMPLETE - 100% of Target Achieved** - Refactored final 7 functions, -19 complexity points (issue #240 PR #244)
248 | - **consolidator.py (-8 points)**:
249 | - `consolidate()`: 12 → 8 - Introduced SyncPauseContext for cleaner sync state management + extracted `check_horizon_requirements()` helper
250 | - `_get_memories_for_horizon()`: 10 → 8 - Replaced conditional logic with data-driven HORIZON_CONFIGS dict lookup
251 | - **analytics.py (-8 points)**:
252 | - `get_tag_usage_analytics()`: 10 → 6 - Extracted `fetch_storage_stats()` and `calculate_tag_statistics()` helpers (40+ lines)
253 | - `get_activity_breakdown()`: 9 → 7 - Extracted `calculate_activity_time_ranges()` helper (70+ lines)
254 | - `get_memory_type_distribution()`: 9 → 7 - Extracted `aggregate_type_statistics()` helper
255 | - **install.py (-2 points)**:
256 | - `detect_gpu()`: 10 → 8 - Data-driven GPU_PLATFORM_CHECKS dict + extracted `test_gpu_platform()` helper
257 | - **cloudflare.py (-1 point)**:
258 | - `get_memory_timestamps()`: 9 → 8 - Extracted `_fetch_d1_timestamps()` method for D1 query logic
259 | - **Gemini Review Improvements (5 iterations)**:
260 | - **Critical Fixes**:
261 | - Fixed timezone bug: `datetime.now()` → `datetime.now(timezone.utc)` in consolidator
262 | - Fixed analytics double-counting: proper use of `count_all_memories()`
263 | - CUDA/ROCm robustness: try all detection paths before failing
264 | - **Quality Improvements**:
265 | - Modernized deprecated APIs: `pkg_resources` → `importlib.metadata`, `universal_newlines` → `text=True`
266 | - Enhanced error logging with `exc_info=True` for better debugging
267 | - Improved code consistency and structure across all refactored functions
268 |
269 | ### Code Quality
270 | - **Phase 2 Complete**: 10 of 10 functions refactored (100%)
271 | - **Complexity Reduction**: -39 of -39 points achieved (100% of target)
272 | - **Total Batches**:
273 | - v8.34.0 (PR #242): `analytics.py::get_memory_growth()` (-5 points)
274 | - v8.35.0 (PR #243): `install.py::configure_paths()`, `cloudflare.py::_search_by_tags_internal()` (-15 points)
275 | - v8.36.0 (PR #244): Remaining 7 functions (-19 points)
276 | - **Expected Impact**:
277 | - Complexity Score: 40 → 51+ (+11 points, exceeded +10 target)
278 | - Overall Health Score: 63 → 68-72 (Grade B achieved!)
279 | - **Related**: Issue #240 Phase 2 (100% COMPLETE), Phase 1: v8.33.0 (dead code removal, +5-9 health points)
280 |
281 | ## [8.35.0] - 2025-11-24
282 |
283 | ### Improved
284 | - **Code Quality: Phase 2 Batch 1 Complete** - Refactored 2 high-priority functions (issue #240 PR #243)
285 | - **install.py::configure_paths()**: Complexity reduced from 15 → 5 (-10 points)
286 | - Extracted 4 helper functions for better separation of concerns
287 | - Main function reduced from 80 → ~30 lines
288 | - Improved testability and maintainability
289 | - **cloudflare.py::_search_by_tags_internal()**: Complexity reduced from 13 → 8 (-5 points)
290 | - Extracted 3 helper functions for tag normalization and query building
291 | - Method reduced from 75 → ~45 lines
292 | - Better code organization
293 | - **Gemini Review Improvements**:
294 | - Dynamic PROJECT_ROOT detection in scripts
295 | - Specific exception handling (OSError, IOError, PermissionError)
296 | - Portable documentation paths
297 |
298 | ### Code Quality
299 | - **Phase 2 Progress**: 3 of 10 functions refactored (30% complete)
300 | - **Complexity Reduction**: -20 points achieved of -39 point target (51% of target)
301 | - **Remaining Work**: 7 functions with implementation plans ready
302 | - **Overall Health**: On track for 75+ target score
303 |
304 | ## [8.34.0] - 2025-11-24
305 |
306 | ### Improved
307 | - **Code Quality: Phase 2 Complexity Reduction** - Refactored `analytics.py::get_memory_growth()` function (issue #240 Phase 2)
308 | - Complexity reduced from 11 → 6-7 (-4 to -5 points, exceeding -3 point target)
309 | - Introduced PeriodType Enum for type-safe period validation
310 | - Data-driven period configuration with PERIOD_CONFIGS dict
311 | - Data-driven label formatting with PERIOD_LABEL_FORMATTERS dict
312 | - Improved maintainability and extensibility for analytics endpoints
313 |
314 | ### Code Quality
315 | - Phase 2 Progress: 1 of 10 functions refactored
316 | - Complexity Score: Estimated +1 point improvement (partial Phase 2)
317 | - Overall Health: On track for 70+ target
318 |
319 | ## [8.33.0] - 2025-11-24
320 |
321 | ### Fixed
322 | - **Critical Installation Bug**: Fixed early return in `install.py` that prevented Claude Desktop MCP configuration from executing (issue #240 Phase 1)
323 | - 77 lines of Claude Desktop setup code now properly runs during installation
324 | - Users will now get automatic MCP server configuration when running `install.py`
325 | - Bug was at line 1358 - early `return False` in exception handler made lines 1360-1436 unreachable
326 | - Resolves all 27 pyscn dead code violations identified in issue #240 Phase 1
327 |
328 | ### Improved
329 | - Modernized `install.py` with pathlib throughout (via Gemini Code Assist automated review)
330 | - Specific exception handling (OSError, PermissionError, JSONDecodeError) instead of bare `except`
331 | - Fixed Windows `memory_wrapper.py` path resolution bug (now uses `resolve()` for absolute paths)
332 | - Added config structure validation to prevent TypeError on malformed JSON
333 | - Import optimization and better error messages
334 | - Code structure improvements from 10+ Gemini Code Assist review iterations
335 |
336 | ### Code Quality
337 | - **Dead Code Score**: 70 → 85-90 (projected +15-20 points from removing 27 violations)
338 | - **Overall Health Score**: 63 → 68-72 (projected +5-9 points)
339 | - All improvements applied via automated Gemini PR review workflow
340 |
341 | ## [8.32.0] - 2025-11-24
342 |
343 | ### Added
344 | - **pyscn Static Analysis Integration**: Multi-layer quality workflow with comprehensive static analysis
345 | - New `scripts/pr/run_pyscn_analysis.sh` for PR-time analysis with health score thresholds (blocks <50)
346 | - New `scripts/quality/track_pyscn_metrics.sh` for historical metrics tracking (CSV storage)
347 | - New `scripts/quality/weekly_quality_review.sh` for automated weekly reviews with regression detection
348 | - Enhanced `scripts/pr/quality_gate.sh` with `--with-pyscn` flag for comprehensive checks
349 | - Three-layer quality strategy: Pre-commit (Groq/Gemini LLM) → PR Gate (standard + pyscn) → Periodic (weekly)
350 | - 6 comprehensive metrics: cyclomatic complexity, dead code, duplication, coupling, dependencies, architecture
351 | - Health score thresholds: <50 (blocker), 50-69 (action required), 70-84 (good), 85+ (excellent)
352 | - Complete documentation in `docs/development/code-quality-workflow.md` (651 lines)
353 | - Integration guide in `.claude/agents/code-quality-guard.md`
354 | - Updated `CLAUDE.md` with "Code Quality Monitoring" section
355 |
356 | ## [8.31.0] - 2025-11-23
357 |
358 | ### Added
359 | - **Revolutionary Batch Update Performance** - Memory consolidation now 21,428x faster with new batch update API (#241)
360 | - **Performance Improvement**: 300 seconds → 0.014 seconds for 500 memory batch updates (21,428x speedup)
361 | - **Consolidation Workflow**: Complete consolidation time reduced from 5+ minutes to <1 second for 500 memories
362 | - **New API Method**: `update_memories_batch()` in storage backends for atomic batch operations
363 | - **Implementation**:
364 | - **SQLite Backend**: Single transaction with executemany for 21,428x speedup
365 | - **Cloudflare Backend**: Parallel batch updates with proper vectorize sync
366 | - **Hybrid Backend**: Optimized dual-backend batch sync with queue processing
367 | - **Backward Compatible**: Existing single-update code paths continue working
368 | - **Real-world Impact**: Memory consolidation that previously took 5+ minutes now completes in <1 second
369 | - **Files Modified**:
370 | - `src/mcp_memory_service/storage/sqlite_vec.py` (lines 542-571): Batch update implementation
371 | - `src/mcp_memory_service/storage/cloudflare.py` (lines 673-728): Cloudflare batch updates
372 | - `src/mcp_memory_service/storage/hybrid.py` (lines 772-822): Hybrid backend batch sync
373 | - `src/mcp_memory_service/consolidation/service.py` (line 472): Using batch update in consolidation
374 |
375 | ### Performance
376 | - **Memory Consolidation**: 21,428x faster batch metadata updates (300s → 0.014s for 500 memories)
377 | - **Consolidation Workflow**: Complete workflow time reduced from 5+ minutes to <1 second for 500 memories
378 | - **Database Efficiency**: Single transaction instead of 500 individual updates with commit overhead
379 |
380 | ## [8.30.0] - 2025-11-23
381 |
382 | ### Added
383 | - **Adaptive Chart Granularity** - Analytics charts now use semantically appropriate time intervals for better trend visualization
384 | - **Last Month view**: Changed from 3-day intervals to weekly aggregation for clearer monthly trends
385 | - **Last Year view**: Uses monthly aggregation for annual overview
386 | - **Human-readable labels**: Charts display clear interval formatting:
387 | - Daily view: "Nov 15" format
388 | - Weekly aggregation: "Week of Nov 15" format
389 | - Monthly aggregation: "November 2024" format
390 | - **Improved UX**: Better semantic alignment between time period and chart granularity
391 | - **Files Modified**: `src/mcp_memory_service/web/api/analytics.py` (lines 307-345), `src/mcp_memory_service/web/static/app.js` (line 3661)
392 |
393 | ### Fixed
394 | - **CRITICAL: Interval Aggregation Bug** - Multi-day intervals (weekly, monthly) now correctly aggregate across entire period
395 | - **Problem**: Intervals were only counting memories from the first day of the interval, not the entire period
396 | - **Impact**: Analytics showed wildly inaccurate data (e.g., 0 memories instead of 427 for Oct 24-30 week)
397 | - **Root Cause**: `strftime` format in date grouping only used the first timestamp, not the interval's date range
398 | - **Solution**: Updated aggregation logic to properly filter and count all memories within each interval
399 | - **Files Modified**: `src/mcp_memory_service/web/api/analytics.py` (lines 242-267)
400 |
401 | - **CRITICAL: Data Sampling Bug** - Analytics now fetch complete historical data with proper date range filtering
402 | - **Problem**: API only fetched 1,000 most recent memories, missing historical data for longer time periods
403 | - **Impact**: Charts showed incomplete or missing data for older time ranges
404 | - **Solution**: Increased fetch limit to 10,000 memories with proper `created_at >= start_date` filtering
405 | - **Files Modified**: `src/mcp_memory_service/web/api/analytics.py` (lines 56-62)
406 | - **Performance**: Maintains fast response times (<200ms) even with larger dataset
407 |
408 | ### Changed
409 | - **Analytics API**: Improved data fetching with larger limits and proper date filtering for accurate historical analysis
410 |
411 | ## [8.29.0] - 2025-11-23
412 |
413 | ### Added
414 | - **Dashboard Quick Actions: Sync Controls Widget** - Compact, real-time sync management for hybrid backend users (#234, fixes #233)
415 | - **Real-time sync status indicator**: Visual states for synced/syncing/pending/error/paused with color-coded icons
416 | - **Pause/Resume controls**: Safely pause background sync for database maintenance or offline work
417 | - **Force sync button**: Manual trigger for immediate synchronization
418 | - **Sync metrics**: Display last sync time and pending operations count
419 | - **Clean layout**: Removed redundant sync status bar between header and body, moved to sidebar widget
420 | - **Backend-aware**: Widget automatically hides for sqlite-vec only users (hybrid-specific feature)
421 | - **API endpoints**:
422 | - `POST /api/sync/pause` - Pause background sync
423 | - `POST /api/sync/resume` - Resume background sync
424 | - **Hybrid backend methods**: Added `pause_sync()` and `resume_sync()` for sync control
425 |
426 | - **Automatic Scheduled Backup System** - Enterprise-grade backup with retention policies and scheduling (#234, fixes #233)
427 | - **New backup module**: `src/mcp_memory_service/backup/` with `BackupService` and `BackupScheduler`
428 | - **SQLite native backup API**: Uses safe `sqlite3.backup()` to prevent corruption (no file copying)
429 | - **Async I/O**: Non-blocking backup operations with `asyncio.to_thread`
430 | - **Flexible scheduling**: Hourly, daily, or weekly automatic backups
431 | - **Retention policies**: Configurable by days and max backup count
432 | - **Dashboard widget**: Backup status, last backup time, manual trigger, backup count, next scheduled time
433 | - **Configuration via environment variables**:
434 | - `MCP_BACKUP_ENABLED=true` (default: true)
435 | - `MCP_BACKUP_INTERVAL=daily` (hourly/daily/weekly, default: daily)
436 | - `MCP_BACKUP_RETENTION=7` (days, default: 7)
437 | - `MCP_BACKUP_MAX_COUNT=10` (max backups, default: 10)
438 | - **API endpoints**:
439 | - `GET /api/backup/status` - Get backup status and scheduler info
440 | - `POST /api/backup/now` - Trigger manual backup
441 | - `GET /api/backup/list` - List available backups with metadata
442 | - **Security**: OAuth protection on backup endpoints, no file path exposure in responses
443 | - **Safari compatibility**: Improved event listener handling with lazy initialization
444 |
445 | ### Changed
446 | - **Quick Actions Layout**: Moved sync controls from top status bar to sidebar widget for cleaner, more accessible UI
447 | - **Sync State Persistence**: Pause state is now preserved during force sync operations
448 | - **Dashboard Feedback**: Added toast notifications for sync and backup operations
449 |
450 | ### Fixed
451 | - **Sync Button Click Events**: Resolved DOM timing issues with lazy event listeners for reliable button interactions
452 | - **Spinner Animation**: Fixed syncing state visual feedback with proper CSS animations
453 | - **Security**: Removed file path exposure from backup API responses (used backup IDs instead)
454 |
455 | ## [8.28.1] - 2025-11-22
456 |
457 | ### Fixed
458 | - **CRITICAL: HTTP MCP Transport JSON-RPC 2.0 Compliance** - Fixed protocol violation causing Claude Code rejection (#236)
459 | - **Problem**: HTTP MCP server returned `"error": null` in successful responses, violating JSON-RPC 2.0 spec which requires successful responses to OMIT the error field entirely (not include it as null)
460 | - **Impact**: Claude Code's strict schema validation rejected all HTTP MCP responses with "Unrecognized key(s) in object: 'error'" errors, making HTTP transport completely unusable
461 | - **Root Cause**: MCPResponse Pydantic model included both `result` and `error` fields in all responses, serializing null values
462 | - **Solution**:
463 | - Added `ConfigDict(exclude_none=True)` to MCPResponse model to exclude null fields from serialization
464 | - Updated docstring to document JSON-RPC 2.0 compliance requirements
465 | - Replaced deprecated `.dict()` with `.model_dump()` for Pydantic V2 compatibility
466 | - Moved json import to top of file per PEP 8 style guidelines
467 | - **Files Modified**:
468 | - `src/mcp_memory_service/web/api/mcp.py` - Added ConfigDict, updated serialization
469 | - **Affected Users**: All users attempting to use HTTP MCP transport with Claude Code or other strict JSON-RPC 2.0 clients
470 | - **Testing**: Verified successful responses exclude `error` field and error responses exclude `result` field
471 | - **Credits**: Thanks to @timkjr for identifying the issue and providing the fix
472 |
473 | ## [8.28.0] - 2025-11-21
474 |
475 | ### Added
476 | - **Cloudflare Tag Filtering** - AND/OR operations for tag searches with unified API contracts (#228)
477 | - Added `search_by_tags(tags, operation, time_start, time_end)` to the storage base class and implemented it across SQLite, Cloudflare, Hybrid, and HTTP client backends
478 | - Normalized Cloudflare SQL to use `GROUP BY` + `HAVING COUNT(DISTINCT ...)` for AND semantics while supporting optional time ranges
479 | - Introduced `get_all_tags_with_counts()` for Cloudflare to power analytics dashboards without extra queries
480 |
481 | ### Changed
482 | - **Tag Filtering Behavior** - `get_all_memories(tags=...)` now performs exact tag comparisons with AND logic instead of substring OR matching, and hybrid storage exposes the same `operation` parameter for parity across backends.
483 |
484 | ## [8.27.2] - 2025-11-18
485 |
486 | ### Fixed
487 | - **Memory Type Loss During Cloudflare-to-SQLite Sync** - Fixed `memory_type` not being preserved in sync script
488 | - **Problem**: `scripts/sync/sync_memory_backends.py` did not extract or pass `memory_type` when syncing from Cloudflare to SQLite-vec
489 | - **Impact**: All memories synced via `--direction cf-to-sqlite` showed as "untyped" (100%) in dashboard analytics
490 | - **Root Cause**: Missing `memory_type` field in both memory dict extraction and Memory object creation
491 | - **Solution**:
492 | - Added `memory_type` to memory dictionary extraction from source
493 | - Added `memory_type` and `updated_at` parameters when creating Memory objects for target storage
494 | - **Files Modified**:
495 | - `scripts/sync/sync_memory_backends.py` - Added memory_type and updated_at handling
496 | - **Affected Users**: Users who ran `python scripts/sync/sync_memory_backends.py --direction cf-to-sqlite`
497 | - **Recovery**: Re-run sync from Cloudflare to restore memory types (Cloudflare preserves original types)
498 |
499 | ## [8.27.1] - 2025-11-18
500 |
501 | ### Fixed
502 | - **CRITICAL: Timestamp Regression Bug** - Fixed `created_at` timestamps being reset during metadata sync
503 | - **Problem**: Bidirectional sync and drift detection (v8.25.0-v8.27.0) incorrectly reset `created_at` timestamps to current time during metadata updates
504 | - **Impact**: All memories synced from Cloudflare → SQLite-vec appeared "just created", destroying historical timestamp data
505 | - **Root Cause**: `preserve_timestamps=False` parameter reset **both** `created_at` and `updated_at`, when it should only update `updated_at`
506 | - **Solution**:
507 | - Modified `update_memory_metadata()` to preserve `created_at` from source memory during sync
508 | - Hybrid storage now passes all 4 timestamp fields (`created_at`, `created_at_iso`, `updated_at`, `updated_at_iso`) during drift detection
509 | - Cloudflare storage updated to handle timestamps consistently with SQLite-vec
510 | - **Files Modified**:
511 | - `src/mcp_memory_service/storage/sqlite_vec.py:1389-1406` - Fixed timestamp handling logic
512 | - `src/mcp_memory_service/storage/hybrid.py:625-637, 935-947` - Pass source timestamps during sync
513 | - `src/mcp_memory_service/storage/cloudflare.py:833-864` - Consistent timestamp handling
514 | - **Tests Added**: `tests/test_timestamp_preservation.py` - Comprehensive test suite with 7 tests covering:
515 | - Timestamp preservation with `preserve_timestamps=True`
516 | - Regression test for `created_at` preservation without source timestamps
517 | - Drift detection scenario
518 | - Multiple sync operations
519 | - Initial memory storage
520 | - **Recovery Tools**:
521 | - `scripts/validation/validate_timestamp_integrity.py` - Detect timestamp anomalies
522 | - `scripts/maintenance/recover_timestamps_from_cloudflare.py` - Restore corrupted timestamps from Cloudflare
523 | - **Affected Versions**: v8.25.0 (drift detection), v8.27.0 (bidirectional sync)
524 | - **Affected Users**: Hybrid backend users who experienced automatic drift detection or initial sync
525 | - **Data Recovery**: If using hybrid backend and Cloudflare has correct timestamps, run recovery script:
526 | ```bash
527 | # Preview recovery
528 | python scripts/maintenance/recover_timestamps_from_cloudflare.py --dry-run
529 |
530 | # Apply recovery
531 | python scripts/maintenance/recover_timestamps_from_cloudflare.py --apply
532 | ```
533 |
534 | ### Changed
535 | - **Timestamp Handling Semantics** - Clarified `preserve_timestamps` parameter behavior:
536 | - `preserve_timestamps=True` (default): Only updates `updated_at` to current time, preserves `created_at`
537 | - `preserve_timestamps=False`: Uses timestamps from `updates` dict if provided, otherwise preserves existing `created_at`
538 | - **Never** resets `created_at` to current time (this was the bug)
539 |
540 | ### Added
541 | - **Timestamp Integrity Validation** - New script to detect timestamp anomalies:
542 | ```bash
543 | python scripts/validation/validate_timestamp_integrity.py
544 | ```
545 | - Checks for impossible timestamps (`created_at > updated_at`)
546 | - Detects suspicious timestamp clusters (bulk reset indicators)
547 | - Analyzes timestamp distribution for anomalies
548 | - Provides detailed statistics and warnings
549 |
550 | ## [8.27.0] - 2025-11-17
551 |
552 | ### Added
553 | - **Hybrid Storage Sync Performance Optimization** - Dramatic initial sync speed improvement (3-5x faster)
554 | - **Performance Metrics**:
555 | - **Before**: ~5.5 memories/second (8 minutes for 2,619 memories)
556 | - **After**: ~15-30 memories/second (1.5-3 minutes for 2,619 memories)
557 | - **3-5x faster** initial sync from Cloudflare to local SQLite
558 | - **Optimizations**:
559 | - **Bulk Existence Check**: `get_all_content_hashes()` method eliminates 2,619 individual DB queries
560 | - **Parallel Processing**: `asyncio.gather()` with Semaphore(15) for concurrent memory processing
561 | - **Larger Batch Sizes**: Increased from 100 to 500 memories per Cloudflare API call (5x fewer requests)
562 | - **Files Modified**:
563 | - `src/mcp_memory_service/storage/sqlite_vec.py` - Added `get_all_content_hashes()` method (lines 1208-1227)
564 | - `src/mcp_memory_service/storage/hybrid.py` - Parallel sync implementation (lines 859-921)
565 | - `scripts/benchmarks/benchmark_hybrid_sync.py` - Performance validation script
566 | - **Backward Compatibility**: Zero breaking changes, transparent optimization for all sync operations
567 | - **Use Case**: Users with large memory databases (1000+ memories) will see significantly faster initial sync times
568 |
569 | ### Changed
570 | - **Hybrid Initial Sync Architecture** - Refactored sync loop for better performance
571 | - O(1) hash lookups instead of O(n) individual queries
572 | - Concurrent processing with controlled parallelism (15 simultaneous operations)
573 | - Reduced Cloudflare API overhead with larger batches (6 API calls vs 27)
574 | - Maintains full drift detection and metadata synchronization capabilities
575 |
576 | ### Fixed
577 | - **Duplicate Sync Queue Architecture** - Resolved inefficient dual-sync issue
578 | - **Problem**: MCP server and HTTP server each created separate HybridStorage instances with independent sync queues
579 | - **Impact**: Duplicate sync work, potential race conditions, memory not immediately visible across servers
580 | - **Solution**: New `MCP_HYBRID_SYNC_OWNER` configuration to control which process handles Cloudflare sync
581 | - **Configuration Options**:
582 | - `"http"` - HTTP server only handles sync (recommended - avoids duplicate work)
583 | - `"mcp"` - MCP server only handles sync
584 | - `"both"` - Both servers sync independently (default for backward compatibility)
585 | - **Files Modified**:
586 | - `src/mcp_memory_service/config.py` - Added `HYBRID_SYNC_OWNER` configuration (lines 424-427)
587 | - `src/mcp_memory_service/storage/factory.py` - Server-type aware storage creation (lines 76-110)
588 | - `src/mcp_memory_service/mcp_server.py` - Pass server_type="mcp" (line 143)
589 | - `src/mcp_memory_service/web/dependencies.py` - Pass server_type="http" (line 65)
590 | - **Migration Guide**:
591 | ```bash
592 | # Recommended: Set HTTP server as sync owner to eliminate duplicate sync
593 | export MCP_HYBRID_SYNC_OWNER=http
594 | ```
595 | - **Backward Compatibility**: Defaults to "both" (existing behavior), no breaking changes
596 |
597 | ### Performance
598 | - **Benchmark Results** (`python scripts/benchmarks/benchmark_hybrid_sync.py`):
599 | - Bulk hash loading: 2,619 hashes loaded in ~100ms (vs ~13,000ms for individual queries)
600 | - Parallel processing: 15x concurrency reduces CPU idle time
601 | - Batch size optimization: 78% reduction in API calls (27 → 6 for 2,619 memories)
602 | - Combined speedup: 3-5x faster initial sync
603 |
604 | ## [8.26.0] - 2025-11-16
605 |
606 | ### Added
607 | - **Global MCP Server Caching** - Revolutionary performance improvement for MCP tools (PR #227)
608 | - **Performance Metrics**:
609 | - **534,628x faster** on cache hits (1,810ms → 0.01ms per MCP tool call)
610 | - **99.9996% latency reduction** for cached operations
611 | - **90%+ cache hit rate** in normal usage patterns
612 | - **MCP tools now 41x faster** than HTTP API after warm-up
613 | - **New MCP Tool**: `get_cache_stats` - Real-time cache performance monitoring
614 | - Track hits/misses, hit rate percentage
615 | - Monitor storage and service cache sizes
616 | - View initialization time statistics (avg/min/max)
617 | - **Infrastructure**:
618 | - Global cache structures: `_STORAGE_CACHE`, `_MEMORY_SERVICE_CACHE`, `_CACHE_STATS`
619 | - Thread-safe concurrent access via `asyncio.Lock`
620 | - Automatic cleanup on server shutdown (no memory leaks)
621 | - **Files Modified**:
622 | - `src/mcp_memory_service/server.py` - Production MCP server caching
623 | - `src/mcp_memory_service/mcp_server.py` - FastMCP server caching
624 | - `src/mcp_memory_service/utils/cache_manager.py` - New cache management utilities
625 | - `scripts/benchmarks/benchmark_server_caching.py` - Cache effectiveness validation
626 | - **Backward Compatibility**: Zero breaking changes, transparent caching for all MCP clients
627 | - **Use Case**: MCP tools in Claude Desktop and Claude Code are now the fastest method for memory operations
628 |
629 | ### Changed
630 | - **Code Quality Improvements** - Gemini Code Assist review implementation (PR #227)
631 | - Eliminated code duplication across `server.py` and `mcp_server.py`
632 | - Created shared `CacheManager.calculate_stats()` utility for statistics
633 | - Enhanced PEP 8 compliance with proper naming conventions
634 | - Added comprehensive inline documentation for cache implementation
635 |
636 | ### Fixed
637 | - **Security Vulnerability** - Removed unsafe `eval()` usage in benchmark script (PR #227)
638 | - Replaced `eval(stats_str)` with safe `json.loads()` for parsing cache statistics
639 | - Eliminated arbitrary code execution risk in development tools
640 | - Improved benchmark script robustness
641 |
642 | ### Performance
643 | - **Benchmark Results** (10 consecutive MCP tool calls):
644 | - First Call (Cache Miss): ~2,485ms
645 | - Cached Calls Average: ~0.01ms
646 | - Speedup Factor: 534,628x
647 | - Cache Hit Rate: 90%
648 | - **Impact**: MCP tools are now the recommended method for Claude Desktop and Claude Code users
649 | - **Technical Details**:
650 | - Caches persist across stateless HTTP calls
651 | - Storage instances keyed by "{backend}:{path}"
652 | - MemoryService instances keyed by storage ID
653 | - Lazy initialization preserved to prevent startup hangs
654 |
655 | ### Documentation
656 | - Updated Wiki: 05-Performance-Optimization.md with cache architecture
657 | - Added cache monitoring guide using `get_cache_stats` tool
658 | - Performance comparison tables now show MCP as fastest method
659 |
660 | ## [8.25.2] - 2025-11-16
661 |
662 | ### Changed
663 | - **Drift Detection Script Refactoring** - Improved code maintainability in `check_drift.py` (PR #226)
664 | - **Refactored**: Cloudflare config dictionary construction to use dictionary comprehension
665 | - **Improvement**: Separated configuration keys list from transformation logic
666 | - **Benefit**: Easier to maintain and modify configuration keys
667 | - **Code Quality**: More Pythonic, cleaner, and more readable
668 | - **Impact**: No functional changes, pure code quality improvement
669 | - **File Modified**: `scripts/sync/check_drift.py`
670 | - **Credit**: Implements Gemini code review suggestions from PR #224
671 |
672 | ## [8.25.1] - 2025-11-16
673 |
674 | ### Fixed
675 | - **Drift Detection Script Initialization** - Corrected critical bugs in `check_drift.py` (PR #224)
676 | - **Bug 1**: Fixed incorrect config attribute `SQLITE_DB_PATH` → `SQLITE_VEC_PATH` in AppConfig
677 | - **Bug 2**: Added missing `cloudflare_config` parameter to HybridMemoryStorage initialization
678 | - **Impact**: Script was completely broken for Cloudflare/Hybrid backends - now initializes successfully
679 | - **Error prevented**: `AttributeError: 'AppConfig' object has no attribute 'SQLITE_DB_PATH'`
680 | - **File Modified**: `scripts/sync/check_drift.py`
681 | - **Severity**: High - Script was non-functional for users with hybrid or cloudflare backends
682 | - **CI Test Infrastructure** - Added HuggingFace model caching to prevent network-related test failures (PR #225)
683 | - **Root Cause**: GitHub Actions runners cannot access huggingface.co during test runs
684 | - **Solution**: Implemented `actions/cache@v3` for `~/.cache/huggingface` directory
685 | - **Pre-download step**: Downloads `all-MiniLM-L6-v2` model after dependency installation
686 | - **Impact**: Fixes all future PR test failures caused by model download restrictions
687 | - **Cache Strategy**: Key includes `pyproject.toml` hash for dependency tracking
688 | - **Performance**: First run downloads model, subsequent runs use cache
689 | - **File Modified**: `.github/workflows/main.yml`
690 |
691 | ### Technical Details
692 | - **PR #224**: Drift detection script now properly initializes Cloudflare backend with all required parameters (api_token, account_id, d1_database_id, vectorize_index)
693 | - **PR #225**: CI environment now caches embedding models, eliminating network dependency during test execution
694 | - **Testing**: Both fixes validated in PR test runs - drift detection now works, tests pass consistently
695 |
696 | ## [8.25.0] - 2025-11-15
697 |
698 | ### Added
699 | - **Hybrid Backend Drift Detection** - Automatic metadata synchronization using `updated_at` timestamps (issue #202)
700 | - **Bidirectional awareness**: Detects metadata changes on either backend (SQLite-vec ↔ Cloudflare)
701 | - **Periodic drift checks**: Configurable interval via `MCP_HYBRID_DRIFT_CHECK_INTERVAL` (default: 1 hour)
702 | - **"Newer timestamp wins" conflict resolution**: Prevents data loss during metadata updates
703 | - **Dry-run support**: Preview changes via `python scripts/sync/check_drift.py`
704 | - **New configuration variables**:
705 | - `MCP_HYBRID_SYNC_UPDATES` - Enable metadata sync (default: true)
706 | - `MCP_HYBRID_DRIFT_CHECK_INTERVAL` - Seconds between drift checks (default: 3600)
707 | - `MCP_HYBRID_DRIFT_BATCH_SIZE` - Memories to check per scan (default: 100)
708 | - **New methods**:
709 | - `BackgroundSyncService._detect_and_sync_drift()` - Core drift detection logic with dry-run mode
710 | - `CloudflareStorage.get_memories_updated_since()` - Query memories by update timestamp
711 | - **Enhanced initial sync**: Now detects and syncs metadata drift for existing memories
712 |
713 | ### Fixed
714 | - **Issue #202** - Hybrid backend now syncs metadata updates (tags, types, custom fields)
715 | - Previous behavior only detected missing memories, ignoring metadata changes
716 | - Prevented silent data loss when memories updated on one backend but not synced
717 | - Tag fixes in Cloudflare now properly propagate to local SQLite
718 | - Metadata updates no longer diverge between backends
719 |
720 | ### Changed
721 | - Initial sync (`_perform_initial_sync`) now compares timestamps for existing memories
722 | - Periodic sync includes drift detection checks at configurable intervals
723 | - Sync statistics tracking expanded with drift detection metrics
724 |
725 | ### Technical Details
726 | - **Files Modified**:
727 | - `src/mcp_memory_service/config.py` - Added 3 configuration variables
728 | - `src/mcp_memory_service/storage/hybrid.py` - Drift detection implementation (~150 lines)
729 | - `src/mcp_memory_service/storage/cloudflare.py` - Added `get_memories_updated_since()` method
730 | - `scripts/sync/check_drift.py` - New dry-run validation script
731 | - **Architecture**: Timestamp-based drift detection with 1-second clock skew tolerance
732 | - **Performance**: Non-blocking async operations, configurable batch sizes
733 | - **Safety**: Opt-in feature, dry-run mode, comprehensive audit logging
734 |
735 | ## [8.24.4] - 2025-11-15
736 |
737 | ### Changed
738 | - **Code Quality Improvements** - Applied Gemini Code Assist review suggestions (issue #180)
739 | - **documents.py:87** - Replaced chained `.replace()` calls with `re.sub()` for path separator sanitization
740 | - **app.js:751-762** - Cached DOM elements in setProcessingMode to reduce query overhead
741 | - **app.js:551-553, 778-780** - Cached upload option elements to optimize handleDocumentUpload
742 | - **index.html:357, 570** - Fixed indentation consistency for closing `</div>` tags
743 | - Performance impact: Minor - reduced DOM query overhead
744 | - Breaking changes: None
745 |
746 | ### Technical Details
747 | - **Files Modified**: `src/mcp_memory_service/web/api/documents.py`, `src/mcp_memory_service/web/static/app.js`, `src/mcp_memory_service/web/static/index.html`
748 | - **Code Quality**: Regex-based sanitization more scalable, DOM element caching reduces redundant queries
749 | - **Commit**: ffc6246 - refactor: code quality improvements from Gemini review (issue #180)
750 |
751 | ## [8.24.3] - 2025-11-15
752 |
753 | ### Fixed
754 | - **GitHub Release Manager Agent** - Resolved systematic version history omission in README.md (commit ccf959a)
755 | - Fixed agent behavior that was omitting previous versions from "Previous Releases" section
756 | - Added v8.24.1 to Previous Releases list (was missing despite being valid release)
757 | - Enhanced agent instructions with CRITICAL section for maintaining version history integrity
758 | - Added quality assurance checklist item to prevent future omissions
759 | - Root cause: Agent was replacing entire Previous Releases section instead of prepending new version
760 |
761 | ### Added
762 | - **Test Coverage for Tag+Time Filtering** - Comprehensive test suite for issue #216 (commit ebff282)
763 | - 10 unit tests passing across SQLite-vec, Cloudflare, and Hybrid backends
764 | - Validates PR #215 functionality (tag+time filtering to fix semantic over-filtering bug #214)
765 | - Tests verify memories can be retrieved using both tag criteria AND time range filters
766 | - API integration tests created (with known threading issues documented for future fix)
767 | - Ensures regression prevention for semantic search over-filtering bug
768 |
769 | ### Changed
770 | - GitHub release workflow now more reliable with enhanced agent guardrails
771 | - Test suite provides better coverage for multi-filter memory retrieval scenarios
772 |
773 | ### Technical Details
774 | - **Files Modified**:
775 | - `.claude/agents/github-release-manager.md` - Added CRITICAL section for Previous Releases maintenance
776 | - `tests/test_time_filtering.py` - 10 new unit tests for tag+time filtering
777 | - `tests/integration/test_api_time_search.py` - API integration tests (threading issues documented)
778 | - **Test Execution**: All 10 unit tests passing, API tests have known threading limitations
779 | - **Impact**: Prevents version history loss in future releases, ensures tag+time filtering remains functional
780 |
781 | ## [8.24.2] - 2025-11-15
782 |
783 | ### Fixed
784 | - **CI/CD Workflow Infrastructure** - Development Setup Validation workflow fixes (issue #217 related)
785 | - Fixed bash errexit handling in workflow tests - prevents premature exit on intentional test failures
786 | - Corrected exit code capture using EXIT_CODE=0 and || EXIT_CODE=$? pattern
787 | - All 5 workflow tests now passing: version consistency, pre-commit hooks, server warnings, developer prompts, docs accuracy
788 | - Root cause: bash runs with -e flag (errexit), which exits immediately when commands return non-zero exit codes
789 | - Tests intentionally run check_dev_setup.py expecting exit code 1, but bash was exiting before capture
790 | - Commits: b4f9a5a, d1bcd67
791 |
792 | ### Changed
793 | - Workflow tests can now properly validate that the development setup validator correctly detects problems
794 | - Exit code capture no longer uses "|| true" pattern (was making all commands return 0)
795 |
796 | ### Technical Details
797 | - **Files Modified**: .github/workflows/dev-setup-validation.yml
798 | - **Pattern Change**:
799 | - Before: `python script.py || true` (always returns 0, breaks exit code testing)
800 | - After: `EXIT_CODE=0; python script.py || EXIT_CODE=$?` (captures actual exit code, prevents bash exit)
801 | - **Test Jobs**: All 5 jobs in dev-setup-validation workflow now pass consistently
802 | - **Context**: Part of test infrastructure improvement efforts (issue #217)
803 |
804 | ## [8.24.1] - 2025-11-15
805 |
806 | ### Fixed
807 | - **Test Infrastructure Failures** - Resolved 27 pre-existing test failures (issue #217)
808 | - Fixed async fixture incompatibility in 6 test files (19+ failures)
809 | - Corrected missing imports (MCPMemoryServer → MemoryServer, removed MemoryMetadata)
810 | - Added missing content_hash parameter to Memory() instantiations
811 | - Updated hardcoded version strings (6.3.0 → 8.24.0)
812 | - Improved test pass rate from 63% to 71% (412/584 tests passing)
813 | - Execution: Automated via amp-bridge agent
814 |
815 | ### Changed
816 | - Test suite now has cleaner baseline for detecting new regressions
817 | - All async test fixtures now use @pytest_asyncio.fixture decorator
818 |
819 | ### Technical Details
820 | - **Automated Fix**: Used amp-bridge agent for pattern-based refactoring
821 | - **Execution Time**: ~15 minutes (vs 1-2 hours manual)
822 | - **Files Modified**: 11 test files across tests/ and tests/integration/
823 | - **Root Causes**: Test infrastructure issues, not code bugs
824 | - **Remaining Failures**: 172 failures remain (backend config, performance, actual bugs)
825 |
826 | ## [8.24.0] - 2025-11-12
827 |
828 | ### Added
829 | - **PyPI Publishing Automation** - Package now available via `pip install mcp-memory-service`
830 | - **Workflow Automation**: Configured GitHub Actions workflow to automatically publish to PyPI on tag pushes
831 | - **Installation Simplification**: Users can now install directly via `pip install mcp-memory-service` or `uv pip install mcp-memory-service`
832 | - **Accessibility**: Resolves installation barriers for users without git access or familiarity
833 | - **Token Configuration**: Secured with `PYPI_TOKEN` GitHub secret for automated publishing
834 | - **Quality Gates**: Publishes only after successful test suite execution
835 |
836 | ### Changed
837 | - **Distribution Method**: Added PyPI as primary distribution channel alongside GitHub releases
838 | - **Installation Documentation**: Updated guides to include pip-based installation as recommended method
839 |
840 | ### Technical Details
841 | - **Files Modified**:
842 | - `.github/workflows/publish.yml` - NEW workflow for automated PyPI publishing
843 | - GitHub repository secrets - Added `PYPI_TOKEN` for authentication
844 | - **Trigger**: Workflow runs automatically on git tag creation (pattern: `v*.*.*`)
845 | - **Build System**: Uses Hatchling build backend with `python-semantic-release`
846 |
847 | ### Migration Notes
848 | - **For New Users**: Preferred installation is now `pip install mcp-memory-service`
849 | - **For Existing Users**: No action required - git-based installation continues to work
850 | - **For Contributors**: Tag creation now triggers PyPI publishing automatically
851 |
852 | ## [8.23.1] - 2025-11-10
853 |
854 | ### Fixed
855 | - **Stale Virtual Environment Prevention System** - Comprehensive 6-layer strategy to prevent "stale venv vs source code" version mismatches
856 | - **Root Cause**: MCP servers load from site-packages, not source files. System restart doesn't help - it relaunches with same stale package
857 | - **Impact**: Prevented issue that caused v8.23.0 tag validation bug to persist despite v8.22.2 fix (source showed v8.23.0 while venv had v8.5.3)
858 |
859 | ### Added
860 | - **Phase 1: Automated Detection**
861 | - New `scripts/validation/check_dev_setup.py` - Validates source/venv version consistency, detects editable installs
862 | - Enhanced `scripts/hooks/pre-commit` - Blocks commits when venv is stale, provides actionable error messages
863 | - Added CLAUDE.md development setup section with explicit `pip install -e .` guidance
864 |
865 | - **Phase 2: Runtime Warnings**
866 | - Added `check_version_consistency()` function in `src/mcp_memory_service/server.py`
867 | - Server startup warnings when version mismatch detected (source vs package)
868 | - Updated README.md developer section with editable install instructions
869 | - Enhanced `docs/development/ai-agent-instructions.md` with proper setup commands
870 |
871 | - **Phase 3: Interactive Onboarding**
872 | - Enhanced `scripts/installation/install.py` with developer detection (checks for git repo)
873 | - Interactive prompt guides developers to use `pip install -e .` for editable installs
874 | - New CI/CD workflow `.github/workflows/dev-setup-validation.yml` with 5 comprehensive test jobs:
875 | 1. Version consistency validation
876 | 2. Pre-commit hook functionality
877 | 3. Server startup warnings
878 | 4. Interactive developer prompts
879 | 5. Documentation accuracy checks
880 |
881 | ### Changed
882 | - **Developer Workflow**: Developers now automatically guided to use `pip install -e .` for proper setup
883 | - **Pre-commit Hook**: Now validates venv consistency before allowing commits
884 | - **Installation Process**: Detects developer mode and provides targeted guidance
885 |
886 | ### Technical Details
887 | - **6-Layer Prevention System**:
888 | 1. **Development**: Pre-commit hook blocks bad commits, detection script validates setup
889 | 2. **Runtime**: Server startup warnings catch edge cases
890 | 3. **Documentation**: CLAUDE.md, README.md, ai-agent-instructions.md all updated
891 | 4. **Automation**: check_dev_setup.py, pre-commit hook, CI/CD workflow
892 | 5. **Interactive**: install.py prompts developers for editable install
893 | 6. **Testing**: CI/CD workflow with 5 comprehensive test jobs
894 |
895 | - **Files Modified**:
896 | - `scripts/validation/check_dev_setup.py` - NEW automated detection script
897 | - `scripts/hooks/pre-commit` - Enhanced with venv validation
898 | - `CLAUDE.md` - Added development setup guidance
899 | - `src/mcp_memory_service/server.py` - Added runtime version check
900 | - `README.md` - Updated developer section
901 | - `docs/development/ai-agent-instructions.md` - Updated setup commands
902 | - `scripts/installation/install.py` - Added developer detection
903 | - `.github/workflows/dev-setup-validation.yml` - NEW CI/CD validation
904 |
905 | ### Migration Notes
906 | - **For Developers**: Run `pip install -e .` to install in editable mode (will be prompted by install.py)
907 | - **For Users**: No action required - prevention system is transparent for production use
908 | - **Pre-commit Hook**: Automatically installed during `install.py`, validates on every commit
909 |
910 | ### Commits Included
911 | - `670fb74` - Phase 1: Automated detection (check_dev_setup.py, pre-commit hook, CLAUDE.md)
912 | - `9537259` - Phase 2: Runtime warnings (server.py) + developer documentation
913 | - `a17bcc7` - Phase 3: Interactive onboarding (install.py) + CI/CD validation
914 |
915 |
```
--------------------------------------------------------------------------------
/claude-hooks/core/session-start.js:
--------------------------------------------------------------------------------
```javascript
1 | /**
2 | * Claude Code Session Start Hook
3 | * Automatically injects relevant memories at the beginning of each session
4 | */
5 |
6 | const fs = require('fs').promises;
7 | const path = require('path');
8 |
9 | // Import utilities
10 | const { detectProjectContext } = require('../utilities/project-detector');
11 | const { scoreMemoryRelevance, analyzeMemoryAgeDistribution, calculateAdaptiveGitWeight } = require('../utilities/memory-scorer');
12 | const { formatMemoriesForContext } = require('../utilities/context-formatter');
13 | const { detectContextShift, extractCurrentContext, determineRefreshStrategy } = require('../utilities/context-shift-detector');
14 | const { analyzeGitContext, buildGitContextQuery } = require('../utilities/git-analyzer');
15 | const { MemoryClient } = require('../utilities/memory-client');
16 | const { getVersionInfo, formatVersionDisplay } = require('../utilities/version-checker');
17 |
18 | /**
19 | * Load hook configuration
20 | */
21 | async function loadConfig() {
22 | try {
23 | const configPath = path.join(__dirname, '../config.json');
24 | const configData = await fs.readFile(configPath, 'utf8');
25 | return JSON.parse(configData);
26 | } catch (error) {
27 | console.warn('[Memory Hook] Using default configuration:', error.message);
28 | return {
29 | memoryService: {
30 | protocol: 'auto',
31 | preferredProtocol: 'http',
32 | fallbackEnabled: true,
33 | http: {
34 | endpoint: 'http://127.0.0.1:8889',
35 | apiKey: 'test-key-123',
36 | healthCheckTimeout: 3000,
37 | useDetailedHealthCheck: false
38 | },
39 | mcp: {
40 | serverCommand: ['uv', 'run', 'memory', 'server'],
41 | serverWorkingDir: null,
42 | connectionTimeout: 5000,
43 | toolCallTimeout: 10000
44 | },
45 | defaultTags: ['claude-code', 'auto-generated'],
46 | maxMemoriesPerSession: 8,
47 | injectAfterCompacting: false
48 | },
49 | projectDetection: {
50 | gitRepository: true,
51 | packageFiles: ['package.json', 'pyproject.toml', 'Cargo.toml'],
52 | frameworkDetection: true,
53 | languageDetection: true
54 | },
55 | output: {
56 | verbose: true, // Default to verbose for backward compatibility
57 | showMemoryDetails: false, // Hide detailed memory scoring by default
58 | showProjectDetails: true, // Show project detection by default
59 | showScoringDetails: false, // Hide detailed scoring breakdown
60 | cleanMode: false // Default to normal output
61 | }
62 | };
63 | }
64 | }
65 |
66 | /**
67 | * Query memory service for health information (supports both HTTP and MCP)
68 | */
69 | async function queryMemoryHealth(memoryClient) {
70 | try {
71 | const healthResult = await memoryClient.getHealthStatus();
72 | return healthResult;
73 | } catch (error) {
74 | return {
75 | success: false,
76 | error: error.message,
77 | fallback: true
78 | };
79 | }
80 | }
81 |
82 | /**
83 | * Parse health data into storage info structure (supports both HTTP and MCP responses)
84 | */
85 | function parseHealthDataToStorageInfo(healthData) {
86 | try {
87 | // Handle MCP tool response format
88 | if (healthData.content && Array.isArray(healthData.content)) {
89 | const textContent = healthData.content.find(c => c.type === 'text')?.text;
90 | if (textContent) {
91 | try {
92 | // Parse JSON from MCP response
93 | const parsedData = JSON.parse(textContent.replace(/'/g, '"').replace(/True/g, 'true').replace(/False/g, 'false').replace(/None/g, 'null'));
94 | return parseHealthDataToStorageInfo(parsedData);
95 | } catch (parseError) {
96 | console.warn('[Memory Hook] Could not parse MCP health response:', parseError.message);
97 | return getUnknownStorageInfo();
98 | }
99 | }
100 | }
101 |
102 | // Handle direct health data object
103 | const storage = healthData.storage || healthData || {};
104 | const system = healthData.system || {};
105 | const statistics = healthData.statistics || healthData.stats || {};
106 |
107 | // Determine icon based on backend
108 | let icon = '💾';
109 | switch (storage.backend?.toLowerCase()) {
110 | case 'sqlite-vec':
111 | case 'sqlite_vec':
112 | icon = '🪶';
113 | break;
114 | case 'chromadb':
115 | case 'chroma':
116 | icon = '📦';
117 | break;
118 | case 'cloudflare':
119 | icon = '☁️';
120 | break;
121 | }
122 |
123 | // Build description with status
124 | const backendName = storage.backend ? storage.backend.replace('_', '-') : 'Unknown';
125 | const statusText = storage.status === 'connected' ? 'Connected' :
126 | storage.status === 'disconnected' ? 'Disconnected' :
127 | storage.status || 'Unknown';
128 |
129 | const description = `${backendName} (${statusText})`;
130 |
131 | // Build location info (use cwd as better fallback than "Unknown")
132 | let location = storage.database_path || storage.location || process.cwd();
133 | if (location.length > 50) {
134 | location = '...' + location.substring(location.length - 47);
135 | }
136 |
137 | // Determine type (local/remote/cloud)
138 | let type = 'unknown';
139 | if (storage.backend === 'cloudflare') {
140 | type = 'cloud';
141 | } else if (storage.database_path && storage.database_path.startsWith('/')) {
142 | type = 'local';
143 | } else if (location.includes('://')) {
144 | type = 'remote';
145 | } else {
146 | type = 'local';
147 | }
148 |
149 | return {
150 | backend: storage.backend || 'unknown',
151 | type: type,
152 | location: location,
153 | description: description,
154 | icon: icon,
155 | // Rich health data
156 | health: {
157 | status: storage.status,
158 | totalMemories: statistics.total_memories || storage.total_memories || 0,
159 | databaseSizeMB: statistics.database_size_mb || storage.database_size_mb || 0,
160 | uniqueTags: statistics.unique_tags || storage.unique_tags || 0,
161 | embeddingModel: storage.embedding_model || 'Unknown',
162 | platform: system.platform,
163 | uptime: healthData.uptime_seconds,
164 | accessible: storage.accessible
165 | }
166 | };
167 |
168 | } catch (error) {
169 | return getUnknownStorageInfo();
170 | }
171 | }
172 |
173 | /**
174 | * Get unknown storage info structure
175 | */
176 | function getUnknownStorageInfo() {
177 | return {
178 | backend: 'unknown',
179 | type: 'unknown',
180 | location: 'Health parse error',
181 | description: 'Unknown Storage',
182 | icon: '❓',
183 | health: { status: 'error', totalMemories: 0 }
184 | };
185 | }
186 |
187 | /**
188 | * Detect storage backend configuration (fallback method)
189 | */
190 | function detectStorageBackendFallback(config) {
191 | try {
192 | // Check environment variable first
193 | const envBackend = process.env.MCP_MEMORY_STORAGE_BACKEND?.toLowerCase();
194 | const endpoint = config.memoryService?.http?.endpoint || 'http://127.0.0.1:8889';
195 |
196 | // Parse endpoint to determine if local or remote
197 | const url = new URL(endpoint);
198 | const isLocal = url.hostname === 'localhost' || url.hostname === '127.0.0.1' || url.hostname.endsWith('.local');
199 |
200 | let storageInfo = {
201 | backend: 'unknown',
202 | type: 'unknown',
203 | location: endpoint,
204 | description: 'Unknown Storage',
205 | icon: '💾',
206 | health: { status: 'unknown', totalMemories: 0 }
207 | };
208 |
209 | if (envBackend) {
210 | switch (envBackend) {
211 | case 'sqlite_vec':
212 | storageInfo = {
213 | backend: 'sqlite_vec',
214 | type: 'local',
215 | location: process.env.MCP_MEMORY_SQLITE_PATH || '~/.mcp-memory/memories.db',
216 | description: 'SQLite-vec (Config)',
217 | icon: '🪶',
218 | health: { status: 'unknown', totalMemories: 0 }
219 | };
220 | break;
221 |
222 | case 'chromadb':
223 | case 'chroma':
224 | const chromaHost = process.env.MCP_MEMORY_CHROMADB_HOST;
225 | const chromaPath = process.env.MCP_MEMORY_CHROMA_PATH;
226 |
227 | if (chromaHost) {
228 | // Remote ChromaDB
229 | const chromaPort = process.env.MCP_MEMORY_CHROMADB_PORT || '8000';
230 | const ssl = process.env.MCP_MEMORY_CHROMADB_SSL === 'true';
231 | const protocol = ssl ? 'https' : 'http';
232 | storageInfo = {
233 | backend: 'chromadb',
234 | type: 'remote',
235 | location: `${protocol}://${chromaHost}:${chromaPort}`,
236 | description: 'ChromaDB (Remote Config)',
237 | icon: '🌐',
238 | health: { status: 'unknown', totalMemories: 0 }
239 | };
240 | } else {
241 | // Local ChromaDB
242 | storageInfo = {
243 | backend: 'chromadb',
244 | type: 'local',
245 | location: chromaPath || '~/.mcp-memory/chroma',
246 | description: 'ChromaDB (Config)',
247 | icon: '📦',
248 | health: { status: 'unknown', totalMemories: 0 }
249 | };
250 | }
251 | break;
252 |
253 | case 'cloudflare':
254 | const accountId = process.env.CLOUDFLARE_ACCOUNT_ID;
255 | storageInfo = {
256 | backend: 'cloudflare',
257 | type: 'cloud',
258 | location: accountId ? `Account: ${accountId.substring(0, 8)}...` : 'Cloudflare Workers',
259 | description: 'Cloudflare Vector (Config)',
260 | icon: '☁️',
261 | health: { status: 'unknown', totalMemories: 0 }
262 | };
263 | break;
264 | }
265 | } else {
266 | // Fallback: infer from endpoint
267 | if (isLocal) {
268 | storageInfo = {
269 | backend: 'local_service',
270 | type: 'local',
271 | location: endpoint,
272 | description: 'Local MCP Service',
273 | icon: '💾',
274 | health: { status: 'unknown', totalMemories: 0 }
275 | };
276 | } else {
277 | storageInfo = {
278 | backend: 'remote_service',
279 | type: 'remote',
280 | location: endpoint,
281 | description: 'Remote MCP Service',
282 | icon: '🌐',
283 | health: { status: 'unknown', totalMemories: 0 }
284 | };
285 | }
286 | }
287 |
288 | return storageInfo;
289 |
290 | } catch (error) {
291 | return {
292 | backend: 'unknown',
293 | type: 'unknown',
294 | location: 'Configuration Error',
295 | description: 'Unknown Storage',
296 | icon: '❓',
297 | health: { status: 'error', totalMemories: 0 }
298 | };
299 | }
300 | }
301 |
302 | /**
303 | * Query memory service using code execution (token-efficient)
304 | */
305 | async function queryMemoryServiceViaCode(query, config) {
306 | const startTime = Date.now();
307 | const enableMetrics = config?.codeExecution?.enableMetrics !== false;
308 |
309 | try {
310 | const { execSync } = require('child_process');
311 |
312 | // Escape query strings for safe shell execution
313 | const escapeForPython = (str) => str.replace(/"/g, '\\"').replace(/\n/g, '\\n');
314 |
315 | // Build Python code for memory search with time filter support
316 | // Use v8.19.0+ Code Execution Interface API for optimal performance
317 | const pythonCode = query.timeFilter ? `
318 | import sys
319 | import json
320 | from datetime import datetime
321 | from mcp_memory_service.api import search
322 |
323 | try:
324 | # Execute search with time filter (v8.21.0+ API enhancement)
325 | results = search("${escapeForPython(query.semanticQuery || '')}", limit=${query.limit || 8}, time_filter="${escapeForPython(query.timeFilter)}")
326 |
327 | # Format compact output
328 | output = {
329 | 'success': True,
330 | 'memories': [
331 | {
332 | 'hash': m.hash,
333 | 'preview': m.preview,
334 | 'tags': list(m.tags),
335 | 'created': m.created,
336 | 'created_at': m.created,
337 | 'created_at_iso': datetime.fromtimestamp(m.created).isoformat(),
338 | 'score': m.score,
339 | 'content': m.preview # Use preview as content for compatibility
340 | }
341 | for m in results.memories
342 | ],
343 | 'total': results.total,
344 | 'method': 'code_execution'
345 | }
346 | print(json.dumps(output))
347 | sys.exit(0)
348 | except Exception as e:
349 | print(json.dumps({'success': False, 'error': str(e), 'method': 'code_execution'}))
350 | sys.exit(1)
351 | ` : `
352 | import sys
353 | import json
354 | from datetime import datetime
355 | from mcp_memory_service.api import search
356 |
357 | try:
358 | # Execute search with semantic query and limit
359 | results = search("${escapeForPython(query.semanticQuery || '')}", limit=${query.limit || 8})
360 |
361 | # Format compact output
362 | output = {
363 | 'success': True,
364 | 'memories': [
365 | {
366 | 'hash': m.hash,
367 | 'preview': m.preview,
368 | 'tags': list(m.tags),
369 | 'created': m.created,
370 | 'created_at': m.created,
371 | 'created_at_iso': datetime.fromtimestamp(m.created).isoformat(),
372 | 'score': m.score,
373 | 'content': m.preview # Use preview as content for compatibility
374 | }
375 | for m in results.memories
376 | ],
377 | 'total': results.total,
378 | 'method': 'code_execution'
379 | }
380 | print(json.dumps(output))
381 | sys.exit(0)
382 | except Exception as e:
383 | print(json.dumps({'success': False, 'error': str(e), 'method': 'code_execution'}))
384 | sys.exit(1)
385 | `;
386 |
387 | // Get Python path from config
388 | const pythonPath = config?.codeExecution?.pythonPath || 'python3';
389 | const timeout = config?.codeExecution?.timeout || 5000;
390 |
391 | // Execute Python code with timeout
392 | const result = execSync(`${pythonPath} -c "${pythonCode.replace(/"/g, '\\"')}"`, {
393 | encoding: 'utf-8',
394 | timeout: timeout,
395 | stdio: ['pipe', 'pipe', 'pipe']
396 | });
397 |
398 | const parsed = JSON.parse(result);
399 |
400 | if (parsed.success) {
401 | const executionTime = Date.now() - startTime;
402 |
403 | // Calculate token savings estimate
404 | const memoriesRetrieved = (parsed.memories || []).length;
405 | const mcpTokens = 1200 + (memoriesRetrieved * 300); // Conservative MCP estimate
406 | const codeTokens = 20 + (memoriesRetrieved * 25); // Code execution tokens
407 | const tokensSaved = mcpTokens - codeTokens;
408 | const reductionPercent = ((tokensSaved / mcpTokens) * 100).toFixed(1);
409 |
410 | // Store metrics for reporting
411 | if (enableMetrics) {
412 | parsed._metrics = {
413 | executionTime,
414 | memoriesRetrieved,
415 | mcpTokensEstimate: mcpTokens,
416 | codeTokensEstimate: codeTokens,
417 | tokensSaved,
418 | reductionPercent
419 | };
420 | }
421 |
422 | return parsed.memories || [];
423 | } else {
424 | throw new Error(parsed.error || 'Code execution failed');
425 | }
426 | } catch (error) {
427 | // Silently return null to trigger MCP fallback
428 | // Error logging suppressed - fallback is expected when module not installed
429 | return null;
430 | }
431 | }
432 |
433 | /**
434 | * Query memory service for relevant memories (supports code execution with MCP fallback)
435 | */
436 | async function queryMemoryService(memoryClient, query, config) {
437 | const startTime = Date.now();
438 |
439 | try {
440 | // Check if code execution is enabled
441 | const codeExecutionEnabled = config?.codeExecution?.enabled !== false; // Default true
442 | const fallbackToMCP = config?.codeExecution?.fallbackToMCP !== false; // Default true
443 | const enableMetrics = config?.codeExecution?.enableMetrics !== false;
444 |
445 | // Phase 1: Try code execution first (75% token reduction)
446 | if (codeExecutionEnabled) {
447 | const codeResult = await queryMemoryServiceViaCode(query, config);
448 |
449 | if (codeResult !== null) {
450 | const executionTime = Date.now() - startTime;
451 |
452 | // Extract metrics if available
453 | const metrics = codeResult._metrics || {};
454 |
455 | // Success! Log token savings
456 | if (config?.output?.verbose && config?.output?.showMemoryDetails && enableMetrics) {
457 | const tokenInfo = metrics.reductionPercent ?
458 | ` ${CONSOLE_COLORS.GRAY}(${metrics.reductionPercent}% reduction, ${metrics.tokensSaved} tokens saved)${CONSOLE_COLORS.RESET}` :
459 | ` ${CONSOLE_COLORS.GRAY}(75% reduction)${CONSOLE_COLORS.RESET}`;
460 | console.log(`${CONSOLE_COLORS.GREEN}⚡ Code Execution${CONSOLE_COLORS.RESET} ${CONSOLE_COLORS.DIM}→${CONSOLE_COLORS.RESET} Token-efficient path${tokenInfo}`);
461 | }
462 |
463 | return codeResult;
464 | }
465 | }
466 |
467 | // Phase 2: Fallback to MCP tools if code execution failed
468 | if (fallbackToMCP && memoryClient) {
469 | if (config?.output?.verbose && config?.output?.showMemoryDetails) {
470 | console.log(`${CONSOLE_COLORS.YELLOW}↩️ MCP Fallback${CONSOLE_COLORS.RESET} ${CONSOLE_COLORS.DIM}→${CONSOLE_COLORS.RESET} ${CONSOLE_COLORS.GRAY}Using standard MCP tools${CONSOLE_COLORS.RESET}`);
471 | }
472 |
473 | // Add timeout for each individual query (2 seconds max)
474 | const queryTimeout = new Promise((resolve) =>
475 | setTimeout(() => resolve([]), 2000)
476 | );
477 |
478 | let memories = [];
479 |
480 | // Use time-based queries with semantic filtering for relevant recent memories
481 | const queryPromise = query.timeFilter ?
482 | memoryClient.queryMemoriesByTime(query.timeFilter, query.limit, query.semanticQuery) :
483 | memoryClient.queryMemories(query.semanticQuery, query.limit);
484 |
485 | memories = await Promise.race([queryPromise, queryTimeout]);
486 |
487 | return memories || [];
488 | }
489 |
490 | return [];
491 | } catch (error) {
492 | console.warn('[Memory Hook] Memory query error:', error.message);
493 | return [];
494 | }
495 | }
496 |
497 | // ANSI Colors for console output
498 | const CONSOLE_COLORS = {
499 | RESET: '\x1b[0m',
500 | BRIGHT: '\x1b[1m',
501 | DIM: '\x1b[2m',
502 | CYAN: '\x1b[36m',
503 | GREEN: '\x1b[32m',
504 | BLUE: '\x1b[34m',
505 | YELLOW: '\x1b[33m',
506 | GRAY: '\x1b[90m',
507 | RED: '\x1b[31m'
508 | };
509 |
510 | /**
511 | * Main session start hook function with enhanced visual output
512 | */
513 | async function onSessionStart(context) {
514 | // Global timeout wrapper to prevent hook from hanging
515 | // Config specifies 10s, we use 9.5s to leave 0.5s buffer for cleanup
516 | // With 1 git query + 1 recent query, expect ~9.5s total (4.5s each due to Python cold-start)
517 | const HOOK_TIMEOUT = 9500; // 9.5 seconds (reduced Phase 0 from 2 to 1 query)
518 | const timeoutPromise = new Promise((_, reject) => {
519 | setTimeout(() => reject(new Error('Hook timeout - completing early')), HOOK_TIMEOUT);
520 | });
521 |
522 | try {
523 | return await Promise.race([
524 | executeSessionStart(context),
525 | timeoutPromise
526 | ]);
527 | } catch (error) {
528 | if (error.message.includes('Hook timeout')) {
529 | console.log(`${CONSOLE_COLORS.YELLOW}⏱️ Memory Hook${CONSOLE_COLORS.RESET} ${CONSOLE_COLORS.DIM}→${CONSOLE_COLORS.RESET} ${CONSOLE_COLORS.GRAY}Completed with timeout (normal for slow connections)${CONSOLE_COLORS.RESET}`);
530 | return;
531 | }
532 | throw error;
533 | }
534 | }
535 |
536 | /**
537 | * Main execution logic (wrapped by timeout)
538 | */
539 | async function executeSessionStart(context) {
540 | try {
541 | // Load configuration first to check verbosity settings
542 | const config = await loadConfig();
543 | const verbose = config.output?.verbose !== false; // Default to true
544 | const cleanMode = config.output?.cleanMode === true; // Default to false
545 | const showMemoryDetails = config.output?.showMemoryDetails === true;
546 | const showProjectDetails = config.output?.showProjectDetails !== false; // Default to true
547 |
548 | if (verbose && !cleanMode) {
549 | console.log(`${CONSOLE_COLORS.CYAN}🧠 Memory Hook${CONSOLE_COLORS.RESET} ${CONSOLE_COLORS.DIM}→${CONSOLE_COLORS.RESET} Initializing session awareness...`);
550 | }
551 |
552 | // Check if this is triggered by a compacting event and skip if configured to do so
553 | if (context.trigger === 'compacting' || context.event === 'memory-compacted') {
554 | if (!config.memoryService.injectAfterCompacting) {
555 | console.log(`${CONSOLE_COLORS.YELLOW}⏸️ Memory Hook${CONSOLE_COLORS.RESET} ${CONSOLE_COLORS.DIM}→${CONSOLE_COLORS.RESET} Skipping injection after compacting`);
556 | return;
557 | }
558 | console.log(`${CONSOLE_COLORS.GREEN}▶️ Memory Hook${CONSOLE_COLORS.RESET} ${CONSOLE_COLORS.DIM}→${CONSOLE_COLORS.RESET} Proceeding with injection after compacting`);
559 | }
560 |
561 | // For non-session-start events, use smart timing to decide if refresh is needed
562 | if (context.trigger !== 'session-start' && context.trigger !== 'start') {
563 | const currentContext = extractCurrentContext(context.conversationState || {}, context.workingDirectory);
564 | const previousContext = context.previousContext || context.conversationState?.previousContext;
565 |
566 | if (previousContext) {
567 | const shiftDetection = detectContextShift(currentContext, previousContext);
568 |
569 | if (!shiftDetection.shouldRefresh) {
570 | console.log(`${CONSOLE_COLORS.GRAY}⏸️ Memory Hook${CONSOLE_COLORS.RESET} ${CONSOLE_COLORS.DIM}→${CONSOLE_COLORS.RESET} ${CONSOLE_COLORS.GRAY}No context shift detected, skipping${CONSOLE_COLORS.RESET}`);
571 | return;
572 | }
573 |
574 | console.log(`${CONSOLE_COLORS.BLUE}🔄 Memory Hook${CONSOLE_COLORS.RESET} ${CONSOLE_COLORS.DIM}→${CONSOLE_COLORS.RESET} Context shift: ${shiftDetection.description}`);
575 | }
576 | }
577 |
578 | // Detect project context
579 | const projectContext = await detectProjectContext(context.workingDirectory || process.cwd());
580 | if (verbose && showProjectDetails && !cleanMode) {
581 | const projectDisplay = `${CONSOLE_COLORS.BRIGHT}${projectContext.name}${CONSOLE_COLORS.RESET}`;
582 | const typeDisplay = projectContext.language !== 'Unknown' ? ` ${CONSOLE_COLORS.GRAY}(${projectContext.language})${CONSOLE_COLORS.RESET}` : '';
583 | console.log(`${CONSOLE_COLORS.BLUE}📂 Project Detector${CONSOLE_COLORS.RESET} ${CONSOLE_COLORS.DIM}→${CONSOLE_COLORS.RESET} Analyzing ${projectDisplay}${typeDisplay}`);
584 | }
585 |
586 | // Initialize memory client and detect storage backend
587 | const showStorageSource = config.memoryService?.showStorageSource !== false; // Default to true
588 | const sourceDisplayMode = config.memoryService?.sourceDisplayMode || 'brief';
589 | let memoryClient = null;
590 | let storageInfo = null;
591 | let connectionInfo = null;
592 |
593 | if (showStorageSource && verbose && !cleanMode) {
594 | // Initialize unified memory client for health check and memory queries
595 | try {
596 | memoryClient = new MemoryClient(config.memoryService);
597 | const connection = await memoryClient.connect();
598 | connectionInfo = memoryClient.getConnectionInfo();
599 |
600 | if (verbose && showMemoryDetails && !cleanMode && connectionInfo?.activeProtocol) {
601 | console.log(`${CONSOLE_COLORS.CYAN}🔗 Connection${CONSOLE_COLORS.RESET} ${CONSOLE_COLORS.DIM}→${CONSOLE_COLORS.RESET} Using ${CONSOLE_COLORS.BRIGHT}${connectionInfo.activeProtocol.toUpperCase()}${CONSOLE_COLORS.RESET} protocol`);
602 | }
603 |
604 | const healthResult = await queryMemoryHealth(memoryClient);
605 |
606 | if (healthResult.success) {
607 | storageInfo = parseHealthDataToStorageInfo(healthResult.data);
608 |
609 | // Display based on mode with rich health information
610 | if (sourceDisplayMode === 'detailed') {
611 | console.log(`${CONSOLE_COLORS.CYAN}💾 Storage${CONSOLE_COLORS.RESET} ${CONSOLE_COLORS.DIM}→${CONSOLE_COLORS.RESET} ${storageInfo.icon} ${CONSOLE_COLORS.BRIGHT}${storageInfo.description}${CONSOLE_COLORS.RESET}`);
612 | console.log(`${CONSOLE_COLORS.CYAN}📍 Location${CONSOLE_COLORS.RESET} ${CONSOLE_COLORS.DIM}→${CONSOLE_COLORS.RESET} ${CONSOLE_COLORS.GRAY}${storageInfo.location}${CONSOLE_COLORS.RESET}`);
613 | if (storageInfo.health.totalMemories > 0) {
614 | console.log(`${CONSOLE_COLORS.CYAN}📊 Database${CONSOLE_COLORS.RESET} ${CONSOLE_COLORS.DIM}→${CONSOLE_COLORS.RESET} ${CONSOLE_COLORS.GREEN}${storageInfo.health.totalMemories} memories${CONSOLE_COLORS.RESET}, ${CONSOLE_COLORS.YELLOW}${storageInfo.health.databaseSizeMB}MB${CONSOLE_COLORS.RESET}, ${CONSOLE_COLORS.BLUE}${storageInfo.health.uniqueTags} tags${CONSOLE_COLORS.RESET}`);
615 | }
616 | } else if (sourceDisplayMode === 'brief') {
617 | const memoryCount = storageInfo.health.totalMemories > 0 ? ` • ${storageInfo.health.totalMemories} memories` : '';
618 | const sizeInfo = storageInfo.health.databaseSizeMB > 0 ? ` • ${storageInfo.health.databaseSizeMB}MB` : '';
619 | console.log(`${CONSOLE_COLORS.CYAN}💾 Storage${CONSOLE_COLORS.RESET} ${CONSOLE_COLORS.DIM}→${CONSOLE_COLORS.RESET} ${storageInfo.icon} ${CONSOLE_COLORS.BRIGHT}${storageInfo.description}${CONSOLE_COLORS.RESET}${memoryCount}${sizeInfo}`);
620 | if (storageInfo.location && sourceDisplayMode === 'brief') {
621 | console.log(`${CONSOLE_COLORS.CYAN}📍 Path${CONSOLE_COLORS.RESET} ${CONSOLE_COLORS.DIM}→${CONSOLE_COLORS.RESET} ${CONSOLE_COLORS.GRAY}${storageInfo.location}${CONSOLE_COLORS.RESET}`);
622 | }
623 | } else if (sourceDisplayMode === 'icon-only') {
624 | console.log(`${CONSOLE_COLORS.CYAN}💾 Storage${CONSOLE_COLORS.RESET} ${CONSOLE_COLORS.DIM}→${CONSOLE_COLORS.RESET} ${storageInfo.icon} ${storageInfo.backend} • ${storageInfo.health.totalMemories} memories`);
625 | }
626 | } else {
627 | // Fallback to environment/config detection when MCP health check fails
628 | if (verbose && showMemoryDetails && !cleanMode) {
629 | console.log(`${CONSOLE_COLORS.YELLOW}⚠️ MCP Health Check${CONSOLE_COLORS.RESET} ${CONSOLE_COLORS.DIM}→${CONSOLE_COLORS.RESET} ${CONSOLE_COLORS.GRAY}${healthResult.error}, using config fallback${CONSOLE_COLORS.RESET}`);
630 | }
631 |
632 | storageInfo = detectStorageBackendFallback(config);
633 |
634 | if (sourceDisplayMode === 'detailed') {
635 | console.log(`${CONSOLE_COLORS.CYAN}💾 Storage${CONSOLE_COLORS.RESET} ${CONSOLE_COLORS.DIM}→${CONSOLE_COLORS.RESET} ${storageInfo.icon} ${CONSOLE_COLORS.BRIGHT}${storageInfo.description}${CONSOLE_COLORS.RESET}`);
636 | console.log(`${CONSOLE_COLORS.CYAN}📍 Location${CONSOLE_COLORS.RESET} ${CONSOLE_COLORS.DIM}→${CONSOLE_COLORS.RESET} ${CONSOLE_COLORS.GRAY}${storageInfo.location}${CONSOLE_COLORS.RESET}`);
637 | } else if (sourceDisplayMode === 'brief') {
638 | console.log(`${CONSOLE_COLORS.CYAN}💾 Storage${CONSOLE_COLORS.RESET} ${CONSOLE_COLORS.DIM}→${CONSOLE_COLORS.RESET} ${storageInfo.icon} ${CONSOLE_COLORS.BRIGHT}${storageInfo.description}${CONSOLE_COLORS.RESET} ${CONSOLE_COLORS.GRAY}(${storageInfo.location})${CONSOLE_COLORS.RESET}`);
639 | } else if (sourceDisplayMode === 'icon-only') {
640 | console.log(`${CONSOLE_COLORS.CYAN}💾 Storage${CONSOLE_COLORS.RESET} ${CONSOLE_COLORS.DIM}→${CONSOLE_COLORS.RESET} ${storageInfo.icon} ${storageInfo.backend}`);
641 | }
642 | }
643 | } catch (error) {
644 | // Memory client connection failed, fall back to environment detection
645 | if (verbose && showMemoryDetails && !cleanMode) {
646 | console.log(`${CONSOLE_COLORS.YELLOW}⚠️ Memory Connection${CONSOLE_COLORS.RESET} ${CONSOLE_COLORS.DIM}→${CONSOLE_COLORS.RESET} ${CONSOLE_COLORS.GRAY}${error.message}, using environment fallback${CONSOLE_COLORS.RESET}`);
647 | }
648 |
649 | storageInfo = detectStorageBackendFallback(config);
650 |
651 | if (sourceDisplayMode === 'brief') {
652 | console.log(`${CONSOLE_COLORS.CYAN}💾 Storage${CONSOLE_COLORS.RESET} ${CONSOLE_COLORS.DIM}→${CONSOLE_COLORS.RESET} ${storageInfo.icon} ${CONSOLE_COLORS.BRIGHT}${storageInfo.description}${CONSOLE_COLORS.RESET} ${CONSOLE_COLORS.GRAY}(${storageInfo.location})${CONSOLE_COLORS.RESET}`);
653 | }
654 | }
655 | } else {
656 | // Health check disabled, use config fallback
657 | storageInfo = detectStorageBackendFallback(config);
658 |
659 | if (sourceDisplayMode === 'detailed') {
660 | console.log(`${CONSOLE_COLORS.CYAN}💾 Storage${CONSOLE_COLORS.RESET} ${CONSOLE_COLORS.DIM}→${CONSOLE_COLORS.RESET} ${storageInfo.icon} ${CONSOLE_COLORS.BRIGHT}${storageInfo.description}${CONSOLE_COLORS.RESET}`);
661 | console.log(`${CONSOLE_COLORS.CYAN}📍 Location${CONSOLE_COLORS.RESET} ${CONSOLE_COLORS.DIM}→${CONSOLE_COLORS.RESET} ${CONSOLE_COLORS.GRAY}${storageInfo.location}${CONSOLE_COLORS.RESET}`);
662 | } else if (sourceDisplayMode === 'brief') {
663 | console.log(`${CONSOLE_COLORS.CYAN}💾 Storage${CONSOLE_COLORS.RESET} ${CONSOLE_COLORS.DIM}→${CONSOLE_COLORS.RESET} ${storageInfo.icon} ${CONSOLE_COLORS.BRIGHT}${storageInfo.description}${CONSOLE_COLORS.RESET} ${CONSOLE_COLORS.GRAY}(${storageInfo.location})${CONSOLE_COLORS.RESET}`);
664 | } else if (sourceDisplayMode === 'icon-only') {
665 | console.log(`${CONSOLE_COLORS.CYAN}💾 Storage${CONSOLE_COLORS.RESET} ${CONSOLE_COLORS.DIM}→${CONSOLE_COLORS.RESET} ${storageInfo.icon} ${storageInfo.backend}`);
666 | }
667 | }
668 |
669 | // Display version information
670 | const showVersionInfo = config.versionCheck?.enabled !== false; // Default to true
671 | if (showVersionInfo && verbose && !cleanMode) {
672 | try {
673 | const versionInfo = await getVersionInfo(context.workingDirectory || process.cwd(), {
674 | checkPyPI: config.versionCheck?.checkPyPI !== false,
675 | timeout: config.versionCheck?.timeout || 2000
676 | });
677 |
678 | const versionDisplay = formatVersionDisplay(versionInfo, CONSOLE_COLORS);
679 | console.log(versionDisplay);
680 | } catch (error) {
681 | // Silently fail - version check is informational, not critical
682 | if (verbose && showMemoryDetails) {
683 | console.warn(`[Memory Hook] Version check failed: ${error.message}`);
684 | }
685 | }
686 | }
687 |
688 | // Analyze git context if enabled
689 | const gitAnalysisEnabled = config.gitAnalysis?.enabled !== false; // Default to true
690 | const showGitAnalysis = config.output?.showGitAnalysis !== false; // Default to true
691 | let gitContext = null;
692 |
693 | if (gitAnalysisEnabled) {
694 | if (verbose && showGitAnalysis && !cleanMode) {
695 | console.log(`${CONSOLE_COLORS.CYAN}📊 Git Analysis${CONSOLE_COLORS.RESET} ${CONSOLE_COLORS.DIM}→${CONSOLE_COLORS.RESET} Analyzing repository context...`);
696 | }
697 |
698 | gitContext = await analyzeGitContext(context.workingDirectory || process.cwd(), {
699 | commitLookback: config.gitAnalysis?.commitLookback || 14,
700 | maxCommits: config.gitAnalysis?.maxCommits || 20,
701 | includeChangelog: config.gitAnalysis?.includeChangelog !== false,
702 | verbose: showGitAnalysis && showMemoryDetails && !cleanMode
703 | });
704 |
705 | if (gitContext && verbose && showGitAnalysis && !cleanMode) {
706 | const { commits, changelogEntries, repositoryActivity, developmentKeywords } = gitContext;
707 | console.log(`${CONSOLE_COLORS.CYAN}📊 Git Context${CONSOLE_COLORS.RESET} ${CONSOLE_COLORS.DIM}→${CONSOLE_COLORS.RESET} ${commits.length} commits, ${changelogEntries?.length || 0} changelog entries`);
708 |
709 | if (showMemoryDetails) {
710 | const topKeywords = developmentKeywords.keywords.slice(0, 5).join(', ');
711 | if (topKeywords) {
712 | console.log(`${CONSOLE_COLORS.CYAN}🔑 Keywords${CONSOLE_COLORS.RESET} ${CONSOLE_COLORS.DIM}→${CONSOLE_COLORS.RESET} ${CONSOLE_COLORS.YELLOW}${topKeywords}${CONSOLE_COLORS.RESET}`);
713 | }
714 | }
715 | }
716 | }
717 |
718 | // Initialize memory client for memory queries if not already connected
719 | if (!memoryClient) {
720 | try {
721 | // Add quick timeout for initial connection
722 | const connectionTimeout = new Promise((_, reject) =>
723 | setTimeout(() => reject(new Error('Quick connection timeout')), 2000)
724 | );
725 |
726 | memoryClient = new MemoryClient(config.memoryService);
727 | await Promise.race([
728 | memoryClient.connect(),
729 | connectionTimeout
730 | ]);
731 | connectionInfo = memoryClient.getConnectionInfo();
732 | } catch (error) {
733 | if (verbose && !cleanMode) {
734 | console.log(`${CONSOLE_COLORS.YELLOW}⚠️ Memory Connection${CONSOLE_COLORS.RESET} ${CONSOLE_COLORS.DIM}→${CONSOLE_COLORS.RESET} ${CONSOLE_COLORS.GRAY}Failed to connect for memory queries: ${error.message}${CONSOLE_COLORS.RESET}`);
735 | }
736 | memoryClient = null;
737 | }
738 | }
739 |
740 | // Multi-phase memory retrieval for better recency prioritization
741 | const allMemories = [];
742 | const maxMemories = config.memoryService.maxMemoriesPerSession;
743 | const recentFirstMode = config.memoryService.recentFirstMode !== false; // Default to true
744 | const recentRatio = config.memoryService.recentMemoryRatio || 0.6;
745 | const recentTimeWindow = config.memoryService.recentTimeWindow || 'last-week';
746 | const fallbackTimeWindow = config.memoryService.fallbackTimeWindow || 'last-month';
747 |
748 | // Extract memory scoring configuration
749 | const scoringWeights = config.memoryScoring?.weights || {};
750 | const timeDecayRate = config.memoryScoring?.timeDecayRate || 0.1;
751 | const enableConversationContext = config.memoryScoring?.enableConversationContext || false;
752 | const minRelevanceScore = config.memoryScoring?.minRelevanceScore || 0.3;
753 | const showPhaseDetails = config.output?.showPhaseDetails !== false && config.output?.style !== 'balanced'; // Hide in balanced mode
754 |
755 | if (recentFirstMode) {
756 | // Phase 0: Git Context Phase (NEW - highest priority for repository-aware memories)
757 | if (gitContext && gitContext.developmentKeywords.keywords.length > 0) {
758 | const maxGitMemories = config.gitAnalysis?.maxGitMemories || 3;
759 | const gitQueries = buildGitContextQuery(projectContext, gitContext.developmentKeywords, context.userMessage);
760 |
761 | if (verbose && showPhaseDetails && !cleanMode && gitQueries.length > 0) {
762 | console.log(`${CONSOLE_COLORS.GREEN}⚡ Phase 0${CONSOLE_COLORS.RESET} ${CONSOLE_COLORS.DIM}→${CONSOLE_COLORS.RESET} Git-aware memory search (${maxGitMemories} slots, 1 of ${gitQueries.length} queries for 8s timeout)`);
763 | }
764 |
765 | // Execute git-context queries
766 | for (const gitQuery of gitQueries.slice(0, 1)) { // Limit to top 1 query to stay within 8s timeout
767 | if (allMemories.length >= maxGitMemories) break;
768 |
769 | const gitMemories = await queryMemoryService(memoryClient, {
770 | semanticQuery: gitQuery.semanticQuery,
771 | limit: Math.min(maxGitMemories - allMemories.length, 3),
772 | timeFilter: 'last-2-weeks' // Focus on recent memories for git context
773 | }, config);
774 |
775 | if (gitMemories && gitMemories.length > 0) {
776 | // Mark these memories as git-context derived for scoring
777 | const markedMemories = gitMemories.map(mem => ({
778 | ...mem,
779 | _gitContextType: gitQuery.type,
780 | _gitContextSource: gitQuery.source,
781 | _gitContextWeight: config.gitAnalysis?.gitContextWeight || 1.2
782 | }));
783 |
784 | // Avoid duplicates from previous git queries
785 | const newGitMemories = markedMemories.filter(newMem =>
786 | !allMemories.some(existing =>
787 | existing.content && newMem.content &&
788 | existing.content.substring(0, 100) === newMem.content.substring(0, 100)
789 | )
790 | );
791 |
792 | allMemories.push(...newGitMemories);
793 |
794 | if (verbose && showMemoryDetails && !cleanMode && newGitMemories.length > 0) {
795 | console.log(`${CONSOLE_COLORS.GREEN} 📋 Git Query${CONSOLE_COLORS.RESET} ${CONSOLE_COLORS.DIM}→${CONSOLE_COLORS.RESET} [${gitQuery.type}] found ${newGitMemories.length} memories`);
796 | }
797 | }
798 | }
799 | }
800 |
801 | // Phase 1: Recent memories - high priority
802 | const remainingSlotsAfterGit = Math.max(0, maxMemories - allMemories.length);
803 | if (remainingSlotsAfterGit > 0) {
804 | // Build enhanced semantic query with git context
805 | let recentSemanticQuery = context.userMessage ?
806 | `recent ${projectContext.name} ${context.userMessage}` :
807 | `recent ${projectContext.name} development decisions insights`;
808 |
809 | // Add git context if available
810 | if (projectContext.git?.branch) {
811 | recentSemanticQuery += ` ${projectContext.git.branch}`;
812 | }
813 | if (projectContext.git?.lastCommit) {
814 | recentSemanticQuery += ` latest changes commit`;
815 | }
816 |
817 | // Add development keywords from git analysis
818 | if (gitContext && gitContext.developmentKeywords.keywords.length > 0) {
819 | const topKeywords = gitContext.developmentKeywords.keywords.slice(0, 3).join(' ');
820 | recentSemanticQuery += ` ${topKeywords}`;
821 | }
822 | const recentQuery = {
823 | semanticQuery: recentSemanticQuery,
824 | limit: Math.max(Math.floor(remainingSlotsAfterGit * recentRatio), 2), // Adjusted for remaining slots
825 | timeFilter: recentTimeWindow
826 | };
827 |
828 | if (verbose && showMemoryDetails && showPhaseDetails && !cleanMode) {
829 | console.log(`${CONSOLE_COLORS.BLUE}🕒 Phase 1${CONSOLE_COLORS.RESET} ${CONSOLE_COLORS.DIM}→${CONSOLE_COLORS.RESET} Searching recent memories (${recentTimeWindow}, ${recentQuery.limit} slots)`);
830 | }
831 |
832 | const recentMemories = await queryMemoryService(memoryClient, recentQuery, config);
833 |
834 | // Filter out duplicates from git context phase
835 | if (recentMemories && recentMemories.length > 0) {
836 | const newRecentMemories = recentMemories.filter(newMem =>
837 | !allMemories.some(existing =>
838 | existing.content && newMem.content &&
839 | existing.content.substring(0, 100) === newMem.content.substring(0, 100)
840 | )
841 | );
842 |
843 | allMemories.push(...newRecentMemories);
844 | }
845 | }
846 |
847 | // Phase 2: Important tagged memories - fill remaining slots
848 | const remainingSlots = maxMemories - allMemories.length;
849 | if (remainingSlots > 0) {
850 | // Build tag list for important memories
851 | const importantTags = [
852 | projectContext.name,
853 | 'key-decisions',
854 | 'architecture',
855 | 'claude-code-reference'
856 | ].filter(Boolean);
857 |
858 | const timeFilter = 'last-2-weeks';
859 |
860 | if (verbose && showMemoryDetails && showPhaseDetails && !cleanMode) {
861 | console.log(`${CONSOLE_COLORS.BLUE}🎯 Phase 2${CONSOLE_COLORS.RESET} ${CONSOLE_COLORS.DIM}→${CONSOLE_COLORS.RESET} Searching important tagged memories (${remainingSlots} slots)`);
862 | }
863 |
864 | // Use new tag-time filtering method for efficient recency prioritization
865 | const importantMemories = memoryClient ?
866 | await memoryClient.queryMemoriesByTagsAndTime(importantTags, timeFilter, remainingSlots, false) :
867 | [];
868 |
869 | // Avoid duplicates by checking content similarity
870 | const newMemories = (importantMemories || []).filter(newMem =>
871 | !allMemories.some(existing =>
872 | existing.content && newMem.content &&
873 | existing.content.substring(0, 100) === newMem.content.substring(0, 100)
874 | )
875 | );
876 |
877 | allMemories.push(...newMemories);
878 | }
879 |
880 | // Phase 3: Fallback to general project context if still need more
881 | const stillRemaining = maxMemories - allMemories.length;
882 | if (stillRemaining > 0 && allMemories.length < 3) {
883 | const fallbackQuery = {
884 | semanticQuery: `${projectContext.name} project context`,
885 | limit: stillRemaining,
886 | timeFilter: fallbackTimeWindow
887 | };
888 |
889 | if (verbose && showMemoryDetails && showPhaseDetails && !cleanMode) {
890 | console.log(`${CONSOLE_COLORS.BLUE}🔄 Phase 3${CONSOLE_COLORS.RESET} ${CONSOLE_COLORS.DIM}→${CONSOLE_COLORS.RESET} Fallback general context (${stillRemaining} slots, ${fallbackTimeWindow})`);
891 | }
892 |
893 | const fallbackMemories = await queryMemoryService(memoryClient, fallbackQuery, config);
894 |
895 | const newFallbackMemories = (fallbackMemories || []).filter(newMem =>
896 | !allMemories.some(existing =>
897 | existing.content && newMem.content &&
898 | existing.content.substring(0, 100) === newMem.content.substring(0, 100)
899 | )
900 | );
901 |
902 | allMemories.push(...newFallbackMemories);
903 | }
904 | } else {
905 | // Legacy single-phase approach
906 | const memoryQuery = {
907 | tags: [
908 | projectContext.name,
909 | `language:${projectContext.language}`,
910 | 'key-decisions',
911 | 'architecture',
912 | 'recent-insights',
913 | 'claude-code-reference'
914 | ].filter(Boolean),
915 | semanticQuery: context.userMessage ?
916 | `${projectContext.name} ${context.userMessage}` :
917 | `${projectContext.name} project context decisions architecture`,
918 | limit: maxMemories,
919 | timeFilter: 'last-2-weeks'
920 | };
921 |
922 | const legacyMemories = await queryMemoryService(memoryClient, memoryQuery, config);
923 |
924 | allMemories.push(...(legacyMemories || []));
925 | }
926 |
927 | // Skip memory retrieval if no memory client available
928 | if (!memoryClient) {
929 | if (verbose && !cleanMode) {
930 | console.log(`${CONSOLE_COLORS.YELLOW}⚠️ Memory Retrieval${CONSOLE_COLORS.RESET} ${CONSOLE_COLORS.DIM}→${CONSOLE_COLORS.RESET} ${CONSOLE_COLORS.GRAY}Skipped due to connection failure${CONSOLE_COLORS.RESET}`);
931 | }
932 | // Skip memory operations but don't return - still complete the hook
933 | if (verbose && showMemoryDetails && !cleanMode) {
934 | console.log(`${CONSOLE_COLORS.YELLOW}📭 Memory Search${CONSOLE_COLORS.RESET} ${CONSOLE_COLORS.DIM}→${CONSOLE_COLORS.RESET} ${CONSOLE_COLORS.GRAY}No memory service available${CONSOLE_COLORS.RESET}`);
935 | }
936 | }
937 |
938 | // Use the collected memories from all phases
939 | const memories = allMemories.slice(0, maxMemories);
940 |
941 | if (memories.length > 0) {
942 | // Analyze memory recency for better reporting
943 | const now = new Date();
944 | const recentCount = memories.filter(m => {
945 | if (!m.created_at_iso) return false;
946 | const memDate = new Date(m.created_at_iso);
947 | const daysDiff = (now - memDate) / (1000 * 60 * 60 * 24);
948 | return daysDiff <= 7; // Within last week
949 | }).length;
950 |
951 | if (verbose && !cleanMode) {
952 | const recentText = recentCount > 0 ? ` ${CONSOLE_COLORS.GREEN}(${recentCount} recent)${CONSOLE_COLORS.RESET}` : '';
953 | console.log(`${CONSOLE_COLORS.GREEN}📚 Memory Search${CONSOLE_COLORS.RESET} ${CONSOLE_COLORS.DIM}→${CONSOLE_COLORS.RESET} Found ${CONSOLE_COLORS.BRIGHT}${memories.length}${CONSOLE_COLORS.RESET} relevant memories${recentText}`);
954 | }
955 |
956 | // Analyze memory age distribution for adaptive weight adjustment
957 | const ageAnalysis = analyzeMemoryAgeDistribution(memories, { verbose: showMemoryDetails && !cleanMode });
958 |
959 | // Apply auto-calibration if enabled
960 | const autoCalibrate = config.memoryScoring?.autoCalibrate !== false; // Default true
961 | let adjustedWeights = { ...scoringWeights };
962 |
963 | if (autoCalibrate && ageAnalysis.isStale && ageAnalysis.recommendedAdjustments.timeDecay) {
964 | adjustedWeights = {
965 | ...adjustedWeights,
966 | timeDecay: ageAnalysis.recommendedAdjustments.timeDecay,
967 | tagRelevance: ageAnalysis.recommendedAdjustments.tagRelevance
968 | };
969 |
970 | if (verbose && showMemoryDetails && !cleanMode) {
971 | console.log(`${CONSOLE_COLORS.CYAN}🎯 Auto-Calibration${CONSOLE_COLORS.RESET} ${CONSOLE_COLORS.DIM}→${CONSOLE_COLORS.RESET} ${CONSOLE_COLORS.GRAY}${ageAnalysis.recommendedAdjustments.reason}${CONSOLE_COLORS.RESET}`);
972 | console.log(`${CONSOLE_COLORS.CYAN} Adjusted Weights${CONSOLE_COLORS.RESET} ${CONSOLE_COLORS.DIM}→${CONSOLE_COLORS.RESET} timeDecay: ${adjustedWeights.timeDecay.toFixed(2)}, tagRelevance: ${adjustedWeights.tagRelevance.toFixed(2)}`);
973 | }
974 | }
975 |
976 | // Score memories for relevance (with enhanced recency weighting and auto-calibrated weights)
977 | let scoredMemories = scoreMemoryRelevance(memories, projectContext, {
978 | verbose: showMemoryDetails,
979 | enhanceRecency: recentFirstMode,
980 | weights: adjustedWeights,
981 | timeDecayRate: timeDecayRate,
982 | includeConversationContext: enableConversationContext
983 | });
984 |
985 | // Calculate adaptive git context weight
986 | // v8.5.1+ Dynamic git weight based on memory age and commit activity
987 | const configuredGitWeight = config.gitAnalysis?.gitContextWeight || 1.2;
988 | const adaptiveGitEnabled = config.gitAnalysis?.adaptiveGitWeight !== false; // Default true
989 |
990 | let gitWeightResult;
991 | if (adaptiveGitEnabled && gitContext) {
992 | gitWeightResult = calculateAdaptiveGitWeight(
993 | gitContext,
994 | ageAnalysis,
995 | configuredGitWeight,
996 | { verbose: showMemoryDetails && !cleanMode }
997 | );
998 | } else {
999 | gitWeightResult = { weight: configuredGitWeight, reason: 'Adaptive git weight disabled', adjusted: false };
1000 | }
1001 |
1002 | const gitWeight = gitWeightResult.weight;
1003 |
1004 | // Show git weight info
1005 | if (verbose && showMemoryDetails && !cleanMode) {
1006 | if (gitWeightResult.adjusted) {
1007 | console.log(`${CONSOLE_COLORS.CYAN}⚙️ Adaptive Git Weight${CONSOLE_COLORS.RESET} ${CONSOLE_COLORS.DIM}→${CONSOLE_COLORS.RESET} ${CONSOLE_COLORS.GRAY}${gitWeightResult.reason}${CONSOLE_COLORS.RESET}`);
1008 | }
1009 | if (configuredGitWeight > 1.5 && !gitWeightResult.adjusted) {
1010 | console.log(`${CONSOLE_COLORS.YELLOW}⚠️ Git Weight${CONSOLE_COLORS.RESET} ${CONSOLE_COLORS.DIM}→${CONSOLE_COLORS.RESET} ${CONSOLE_COLORS.GRAY}High git context weight (${gitWeight.toFixed(1)}x) may prioritize git-related memories excessively${CONSOLE_COLORS.RESET}`);
1011 | console.log(`${CONSOLE_COLORS.YELLOW} Recommended${CONSOLE_COLORS.RESET} ${CONSOLE_COLORS.DIM}→${CONSOLE_COLORS.RESET} ${CONSOLE_COLORS.GRAY}1.1-1.3x for balanced recency${CONSOLE_COLORS.RESET}`);
1012 | }
1013 | }
1014 |
1015 | // Apply git context weight boost to git-derived memories
1016 |
1017 | scoredMemories = scoredMemories.map(memory => {
1018 | if (memory._gitContextWeight && memory._gitContextWeight !== 1.0) {
1019 | const originalScore = memory.relevanceScore;
1020 | const boostedScore = Math.min(1.0, originalScore * memory._gitContextWeight);
1021 |
1022 | // Store original score for transparency
1023 | return {
1024 | ...memory,
1025 | _originalScore: originalScore,
1026 | relevanceScore: boostedScore,
1027 | _wasBoosted: true
1028 | };
1029 | }
1030 | return memory;
1031 | }).sort((a, b) => b.relevanceScore - a.relevanceScore); // Re-sort after boost
1032 |
1033 | // Filter out zero-scored memories (project affinity filtered)
1034 | const preFilterCount = scoredMemories.length;
1035 | scoredMemories = scoredMemories.filter(m => m.relevanceScore > 0);
1036 | if (verbose && showMemoryDetails && !cleanMode && preFilterCount !== scoredMemories.length) {
1037 | console.log(`[Memory Filter] Removed ${preFilterCount - scoredMemories.length} unrelated memories (no project affinity)`);
1038 | }
1039 |
1040 | // Show top scoring memories with recency info and detailed breakdown
1041 | if (verbose && showMemoryDetails && scoredMemories.length > 0 && !cleanMode) {
1042 | const topMemories = scoredMemories.slice(0, 3);
1043 | const memoryInfo = topMemories.map((m, idx) => {
1044 | const score = `${(m.relevanceScore * 100).toFixed(0)}%`;
1045 | let recencyFlag = '';
1046 | let ageText = '';
1047 | if (m.created_at_iso) {
1048 | const daysDiff = (now - new Date(m.created_at_iso)) / (1000 * 60 * 60 * 24);
1049 | if (daysDiff <= 1) {
1050 | recencyFlag = '🕒';
1051 | ageText = 'today';
1052 | } else if (daysDiff <= 7) {
1053 | recencyFlag = '📅';
1054 | ageText = `${Math.floor(daysDiff)}d ago`;
1055 | } else if (daysDiff <= 30) {
1056 | ageText = `${Math.floor(daysDiff)}d ago`;
1057 | } else {
1058 | ageText = `${Math.floor(daysDiff)}d ago`;
1059 | }
1060 | }
1061 |
1062 | // Show detailed breakdown for top memory (only if explicitly enabled)
1063 | if (idx === 0 && m.scoreBreakdown) {
1064 | const bd = m.scoreBreakdown;
1065 | const showBreakdown = config.output?.showScoringBreakdown === true;
1066 |
1067 | if (showBreakdown) {
1068 | console.log(`${CONSOLE_COLORS.CYAN} 📊 Top Memory Breakdown${CONSOLE_COLORS.RESET} ${CONSOLE_COLORS.DIM}→${CONSOLE_COLORS.RESET}`);
1069 | console.log(`${CONSOLE_COLORS.CYAN} • Time Decay${CONSOLE_COLORS.RESET}: ${(bd.timeDecay * 100).toFixed(0)}% ${CONSOLE_COLORS.GRAY}(${ageText})${CONSOLE_COLORS.RESET}`);
1070 | console.log(`${CONSOLE_COLORS.CYAN} • Tag Match${CONSOLE_COLORS.RESET}: ${(bd.tagRelevance * 100).toFixed(0)}%`);
1071 | console.log(`${CONSOLE_COLORS.CYAN} • Content${CONSOLE_COLORS.RESET}: ${(bd.contentRelevance * 100).toFixed(0)}%`);
1072 | console.log(`${CONSOLE_COLORS.CYAN} • Quality${CONSOLE_COLORS.RESET}: ${(bd.contentQuality * 100).toFixed(0)}%`);
1073 | if (bd.recencyBonus > 0) {
1074 | console.log(`${CONSOLE_COLORS.CYAN} • Recency Bonus${CONSOLE_COLORS.RESET}: ${CONSOLE_COLORS.GREEN}+${(bd.recencyBonus * 100).toFixed(0)}%${CONSOLE_COLORS.RESET}`);
1075 | }
1076 | // Show git context boost if applied
1077 | if (m._wasBoosted && m._originalScore) {
1078 | const boostAmount = ((m.relevanceScore - m._originalScore) * 100).toFixed(0);
1079 | console.log(`${CONSOLE_COLORS.CYAN} • Git Boost${CONSOLE_COLORS.RESET}: ${CONSOLE_COLORS.YELLOW}+${boostAmount}%${CONSOLE_COLORS.RESET} ${CONSOLE_COLORS.GRAY}(${(m._originalScore * 100).toFixed(0)}% → ${(m.relevanceScore * 100).toFixed(0)}%)${CONSOLE_COLORS.RESET}`);
1080 | }
1081 | } else if (config.logging?.enableDebug) {
1082 | // Log to debug file instead of console
1083 | const debugMsg = `[Memory Scorer] Top memory breakdown: TimeDecay=${(bd.timeDecay * 100).toFixed(0)}%, TagMatch=${(bd.tagRelevance * 100).toFixed(0)}%, Content=${(bd.contentRelevance * 100).toFixed(0)}%, Quality=${(bd.contentQuality * 100).toFixed(0)}%, RecencyBonus=${(bd.recencyBonus * 100).toFixed(0)}%`;
1084 | console.log(debugMsg);
1085 | }
1086 | }
1087 |
1088 | return ageText ? `${score}${recencyFlag} (${ageText})` : `${score}${recencyFlag}`;
1089 | }).join(', ');
1090 | console.log(`${CONSOLE_COLORS.CYAN}🎯 Scoring${CONSOLE_COLORS.RESET} ${CONSOLE_COLORS.DIM}→${CONSOLE_COLORS.RESET} Top relevance: ${CONSOLE_COLORS.YELLOW}${memoryInfo}${CONSOLE_COLORS.RESET}`);
1091 | }
1092 |
1093 | // Determine refresh strategy based on context
1094 | const strategy = context.trigger && context.previousContext ?
1095 | determineRefreshStrategy(detectContextShift(
1096 | extractCurrentContext(context.conversationState || {}, context.workingDirectory),
1097 | context.previousContext
1098 | )) : {
1099 | maxMemories: config.memoryService.maxMemoriesPerSession,
1100 | includeScore: false,
1101 | message: '🧠 Loading relevant memory context...'
1102 | };
1103 |
1104 | // Take top scored memories based on strategy
1105 | const maxMemories = Math.min(strategy.maxMemories || config.memoryService.maxMemoriesPerSession, scoredMemories.length);
1106 | const topMemories = scoredMemories.slice(0, maxMemories);
1107 |
1108 | // Show actual memory processing info (moved from deduplication)
1109 | if (verbose && showMemoryDetails && !cleanMode) {
1110 | const totalCollected = allMemories.length;
1111 | const actualUsed = Math.min(maxMemories, scoredMemories.length);
1112 | if (totalCollected > actualUsed) {
1113 | console.log(`[Context Formatter] Selected ${actualUsed} from ${totalCollected} collected memories`);
1114 | }
1115 | console.log(`${CONSOLE_COLORS.CYAN}🔄 Processing${CONSOLE_COLORS.RESET} ${CONSOLE_COLORS.DIM}→${CONSOLE_COLORS.RESET} ${actualUsed} memories selected`);
1116 | }
1117 |
1118 | // Format memories for context injection with strategy-based options
1119 | const contextMessage = formatMemoriesForContext(topMemories, projectContext, {
1120 | includeScore: strategy.includeScore || false,
1121 | groupByCategory: maxMemories > 3,
1122 | maxMemories: maxMemories,
1123 | includeTimestamp: true,
1124 | maxContentLength: config.contextFormatting?.maxContentLength || 500,
1125 | maxContentLengthCLI: config.contextFormatting?.maxContentLengthCLI || 400,
1126 | maxContentLengthCategorized: config.contextFormatting?.maxContentLengthCategorized || 350,
1127 | storageInfo: showStorageSource ? (storageInfo || detectStorageBackend(config)) : null,
1128 | adaptiveTruncation: config.output?.adaptiveTruncation !== false,
1129 | contentLengthConfig: config.contentLength
1130 | });
1131 |
1132 | // Inject context into session
1133 | if (context.injectSystemMessage) {
1134 | await context.injectSystemMessage(contextMessage);
1135 | // Note: Don't console.log here - injectSystemMessage handles display
1136 | // console.log would cause duplicate output in Claude Code
1137 |
1138 |
1139 | // Write detailed session context log file (Option 3)
1140 | try {
1141 | const os = require('os');
1142 | const logPath = path.join(os.homedir(), '.claude', 'last-session-context.txt');
1143 | const recencyPercent = maxMemories > 0 ? ((recentCount / maxMemories) * 100).toFixed(0) : 0;
1144 |
1145 | let logContent = `Session Started: ${new Date().toISOString()}\n`;
1146 | logContent += `Session ID: ${context.sessionId || 'unknown'}\n\n`;
1147 | logContent += `=== Project Context ===\n`;
1148 | logContent += `Project: ${projectContext.name}\n`;
1149 | logContent += `Language: ${projectContext.language}\n`;
1150 | if (projectContext.frameworks && projectContext.frameworks.length > 0) {
1151 | logContent += `Frameworks: ${projectContext.frameworks.join(', ')}\n`;
1152 | }
1153 | if (projectContext.git) {
1154 | logContent += `Git Branch: ${projectContext.git.branch || 'unknown'}\n`;
1155 | }
1156 | logContent += `\n=== Storage Backend ===\n`;
1157 | if (storageInfo) {
1158 | logContent += `Backend: ${storageInfo.backend}\n`;
1159 | logContent += `Type: ${storageInfo.type}\n`;
1160 | logContent += `Location: ${storageInfo.location}\n`;
1161 | if (storageInfo.health.totalMemories > 0) {
1162 | logContent += `Total Memories in DB: ${storageInfo.health.totalMemories}\n`;
1163 | }
1164 | }
1165 | logContent += `\n=== Memory Statistics ===\n`;
1166 | logContent += `Memories Loaded: ${maxMemories}\n`;
1167 | logContent += `Recent (last week): ${recentCount} (${recencyPercent}%)\n`;
1168 |
1169 | if (gitContext) {
1170 | logContent += `\n=== Git Context ===\n`;
1171 | logContent += `Commits Analyzed: ${gitContext.commits.length}\n`;
1172 | logContent += `Changelog Entries: ${gitContext.changelogEntries?.length || 0}\n`;
1173 | logContent += `Top Keywords: ${gitContext.developmentKeywords.keywords.slice(0, 5).join(', ')}\n`;
1174 | }
1175 |
1176 | if (topMemories.length > 0) {
1177 | logContent += `\n=== Top Loaded Memories ===\n`;
1178 | topMemories.slice(0, 3).forEach((m, idx) => {
1179 | const preview = m.content ? m.content.substring(0, 150).replace(/\n/g, ' ') : 'No content';
1180 | const ageInfo = m.created_at_iso ? ` (${Math.floor((now - new Date(m.created_at_iso)) / (1000 * 60 * 60 * 24))}d ago)` : '';
1181 | logContent += `\n${idx + 1}. Score: ${(m.relevanceScore * 100).toFixed(0)}%${ageInfo}\n`;
1182 | logContent += ` ${preview}...\n`;
1183 | });
1184 | }
1185 |
1186 | await fs.writeFile(logPath, logContent, 'utf8');
1187 | } catch (error) {
1188 | // Silently fail - log file is nice-to-have, not critical
1189 | if (verbose && showMemoryDetails) {
1190 | console.warn(`[Memory Hook] Failed to write log file: ${error.message}`);
1191 | }
1192 | }
1193 |
1194 | // Write status line cache file (Option 4)
1195 | try {
1196 | const cachePath = path.join(__dirname, '../utilities/session-cache.json');
1197 | const cacheData = {
1198 | timestamp: new Date().toISOString(),
1199 | sessionId: context.sessionId || 'unknown',
1200 | project: projectContext.name,
1201 | memoriesLoaded: maxMemories,
1202 | recentCount: recentCount,
1203 | gitCommits: gitContext ? gitContext.commits.length : 0,
1204 | gitKeywords: gitContext ? gitContext.developmentKeywords.keywords.slice(0, 3) : [],
1205 | storageBackend: storageInfo ? storageInfo.backend : 'unknown'
1206 | };
1207 |
1208 | await fs.writeFile(cachePath, JSON.stringify(cacheData, null, 2), 'utf8');
1209 | } catch (error) {
1210 | // Silently fail - status line cache is optional
1211 | if (verbose && showMemoryDetails) {
1212 | console.warn(`[Memory Hook] Failed to write status line cache: ${error.message}`);
1213 | }
1214 | }
1215 | } else if (verbose && !cleanMode) {
1216 | // Fallback: log context for manual copying with styling
1217 | console.log(`\n${CONSOLE_COLORS.CYAN}╭──────────────────────────────────────────╮${CONSOLE_COLORS.RESET}`);
1218 | console.log(`${CONSOLE_COLORS.CYAN}│${CONSOLE_COLORS.RESET} ${CONSOLE_COLORS.BRIGHT}Memory Context for Manual Copy${CONSOLE_COLORS.RESET} ${CONSOLE_COLORS.CYAN}│${CONSOLE_COLORS.RESET}`);
1219 | console.log(`${CONSOLE_COLORS.CYAN}╰──────────────────────────────────────────╯${CONSOLE_COLORS.RESET}`);
1220 | // Clean output to remove session-start-hook wrapper tags
1221 | const cleanedMessage = contextMessage.replace(/<\/?session-start-hook>/g, '');
1222 | console.log(cleanedMessage);
1223 | console.log(`${CONSOLE_COLORS.CYAN}╰──────────────────────────────────────────╯${CONSOLE_COLORS.RESET}\n`);
1224 | }
1225 | } else if (verbose && showMemoryDetails && !cleanMode) {
1226 | console.log(`${CONSOLE_COLORS.YELLOW}📭 Memory Search${CONSOLE_COLORS.RESET} ${CONSOLE_COLORS.DIM}→${CONSOLE_COLORS.RESET} ${CONSOLE_COLORS.GRAY}No relevant memories found${CONSOLE_COLORS.RESET}`);
1227 | }
1228 |
1229 | } catch (error) {
1230 | console.error(`${CONSOLE_COLORS.RED}❌ Memory Hook Error${CONSOLE_COLORS.RESET} ${CONSOLE_COLORS.DIM}→${CONSOLE_COLORS.RESET} ${error.message}`);
1231 | // Fail gracefully - don't prevent session from starting
1232 | } finally {
1233 | // Ensure MCP client cleanup even on error
1234 | try {
1235 | if (memoryClient && typeof memoryClient.disconnect === 'function') {
1236 | await memoryClient.disconnect();
1237 | }
1238 | } catch (error) {
1239 | // Ignore cleanup errors silently
1240 | }
1241 | }
1242 | }
1243 |
1244 | /**
1245 | * Hook metadata for Claude Code
1246 | */
1247 | module.exports = {
1248 | name: 'memory-awareness-session-start',
1249 | version: '2.3.0',
1250 | description: 'Automatically inject relevant memories at session start with git-aware repository context',
1251 | trigger: 'session-start',
1252 | handler: onSessionStart,
1253 | config: {
1254 | async: true,
1255 | timeout: 15000, // Increased timeout for git analysis
1256 | priority: 'high'
1257 | }
1258 | };
1259 |
1260 | // Direct execution support for testing
1261 | if (require.main === module) {
1262 | // Test the hook with mock context
1263 | const mockContext = {
1264 | workingDirectory: process.cwd(),
1265 | sessionId: 'test-session',
1266 | injectSystemMessage: async (message) => {
1267 | // Just print the message - it already has its own formatting from context-formatter.js
1268 | console.log(message);
1269 | }
1270 | };
1271 |
1272 | onSessionStart(mockContext)
1273 | .then(() => {
1274 | // Test completed quietly
1275 | process.exit(0);
1276 | })
1277 | .catch(error => {
1278 | console.error(`${CONSOLE_COLORS.RED}❌ Hook test failed:${CONSOLE_COLORS.RESET} ${error.message}`);
1279 | process.exit(1);
1280 | });
1281 | }
```