This is page 18 of 47. Use http://codebase.md/doobidoo/mcp-memory-service?lines=true&page={x} to view the full context.
# Directory Structure
```
├── .claude
│ ├── agents
│ │ ├── amp-bridge.md
│ │ ├── amp-pr-automator.md
│ │ ├── code-quality-guard.md
│ │ ├── gemini-pr-automator.md
│ │ └── github-release-manager.md
│ ├── settings.local.json.backup
│ └── settings.local.json.local
├── .commit-message
├── .dockerignore
├── .env.example
├── .env.sqlite.backup
├── .envnn#
├── .gitattributes
├── .github
│ ├── FUNDING.yml
│ ├── ISSUE_TEMPLATE
│ │ ├── bug_report.yml
│ │ ├── config.yml
│ │ ├── feature_request.yml
│ │ └── performance_issue.yml
│ ├── pull_request_template.md
│ └── workflows
│ ├── bridge-tests.yml
│ ├── CACHE_FIX.md
│ ├── claude-code-review.yml
│ ├── claude.yml
│ ├── cleanup-images.yml.disabled
│ ├── dev-setup-validation.yml
│ ├── docker-publish.yml
│ ├── LATEST_FIXES.md
│ ├── main-optimized.yml.disabled
│ ├── main.yml
│ ├── publish-and-test.yml
│ ├── README_OPTIMIZATION.md
│ ├── release-tag.yml.disabled
│ ├── release.yml
│ ├── roadmap-review-reminder.yml
│ ├── SECRET_CONDITIONAL_FIX.md
│ └── WORKFLOW_FIXES.md
├── .gitignore
├── .mcp.json.backup
├── .mcp.json.template
├── .pyscn
│ ├── .gitignore
│ └── reports
│ └── analyze_20251123_214224.html
├── AGENTS.md
├── archive
│ ├── deployment
│ │ ├── deploy_fastmcp_fixed.sh
│ │ ├── deploy_http_with_mcp.sh
│ │ └── deploy_mcp_v4.sh
│ ├── deployment-configs
│ │ ├── empty_config.yml
│ │ └── smithery.yaml
│ ├── development
│ │ └── test_fastmcp.py
│ ├── docs-removed-2025-08-23
│ │ ├── authentication.md
│ │ ├── claude_integration.md
│ │ ├── claude-code-compatibility.md
│ │ ├── claude-code-integration.md
│ │ ├── claude-code-quickstart.md
│ │ ├── claude-desktop-setup.md
│ │ ├── complete-setup-guide.md
│ │ ├── database-synchronization.md
│ │ ├── development
│ │ │ ├── autonomous-memory-consolidation.md
│ │ │ ├── CLEANUP_PLAN.md
│ │ │ ├── CLEANUP_README.md
│ │ │ ├── CLEANUP_SUMMARY.md
│ │ │ ├── dream-inspired-memory-consolidation.md
│ │ │ ├── hybrid-slm-memory-consolidation.md
│ │ │ ├── mcp-milestone.md
│ │ │ ├── multi-client-architecture.md
│ │ │ ├── test-results.md
│ │ │ └── TIMESTAMP_FIX_SUMMARY.md
│ │ ├── distributed-sync.md
│ │ ├── invocation_guide.md
│ │ ├── macos-intel.md
│ │ ├── master-guide.md
│ │ ├── mcp-client-configuration.md
│ │ ├── multi-client-server.md
│ │ ├── service-installation.md
│ │ ├── sessions
│ │ │ └── MCP_ENHANCEMENT_SESSION_MEMORY_v4.1.0.md
│ │ ├── UBUNTU_SETUP.md
│ │ ├── ubuntu.md
│ │ ├── windows-setup.md
│ │ └── windows.md
│ ├── docs-root-cleanup-2025-08-23
│ │ ├── AWESOME_LIST_SUBMISSION.md
│ │ ├── CLOUDFLARE_IMPLEMENTATION.md
│ │ ├── DOCUMENTATION_ANALYSIS.md
│ │ ├── DOCUMENTATION_CLEANUP_PLAN.md
│ │ ├── DOCUMENTATION_CONSOLIDATION_COMPLETE.md
│ │ ├── LITESTREAM_SETUP_GUIDE.md
│ │ ├── lm_studio_system_prompt.md
│ │ ├── PYTORCH_DOWNLOAD_FIX.md
│ │ └── README-ORIGINAL-BACKUP.md
│ ├── investigations
│ │ └── MACOS_HOOKS_INVESTIGATION.md
│ ├── litestream-configs-v6.3.0
│ │ ├── install_service.sh
│ │ ├── litestream_master_config_fixed.yml
│ │ ├── litestream_master_config.yml
│ │ ├── litestream_replica_config_fixed.yml
│ │ ├── litestream_replica_config.yml
│ │ ├── litestream_replica_simple.yml
│ │ ├── litestream-http.service
│ │ ├── litestream.service
│ │ └── requirements-cloudflare.txt
│ ├── release-notes
│ │ └── release-notes-v7.1.4.md
│ └── setup-development
│ ├── README.md
│ ├── setup_consolidation_mdns.sh
│ ├── STARTUP_SETUP_GUIDE.md
│ └── test_service.sh
├── CHANGELOG-HISTORIC.md
├── CHANGELOG.md
├── claude_commands
│ ├── memory-context.md
│ ├── memory-health.md
│ ├── memory-ingest-dir.md
│ ├── memory-ingest.md
│ ├── memory-recall.md
│ ├── memory-search.md
│ ├── memory-store.md
│ ├── README.md
│ └── session-start.md
├── claude-hooks
│ ├── config.json
│ ├── config.template.json
│ ├── CONFIGURATION.md
│ ├── core
│ │ ├── memory-retrieval.js
│ │ ├── mid-conversation.js
│ │ ├── session-end.js
│ │ ├── session-start.js
│ │ └── topic-change.js
│ ├── debug-pattern-test.js
│ ├── install_claude_hooks_windows.ps1
│ ├── install_hooks.py
│ ├── memory-mode-controller.js
│ ├── MIGRATION.md
│ ├── README-NATURAL-TRIGGERS.md
│ ├── README-phase2.md
│ ├── README.md
│ ├── simple-test.js
│ ├── statusline.sh
│ ├── test-adaptive-weights.js
│ ├── test-dual-protocol-hook.js
│ ├── test-mcp-hook.js
│ ├── test-natural-triggers.js
│ ├── test-recency-scoring.js
│ ├── tests
│ │ ├── integration-test.js
│ │ ├── phase2-integration-test.js
│ │ ├── test-code-execution.js
│ │ ├── test-cross-session.json
│ │ ├── test-session-tracking.json
│ │ └── test-threading.json
│ ├── utilities
│ │ ├── adaptive-pattern-detector.js
│ │ ├── context-formatter.js
│ │ ├── context-shift-detector.js
│ │ ├── conversation-analyzer.js
│ │ ├── dynamic-context-updater.js
│ │ ├── git-analyzer.js
│ │ ├── mcp-client.js
│ │ ├── memory-client.js
│ │ ├── memory-scorer.js
│ │ ├── performance-manager.js
│ │ ├── project-detector.js
│ │ ├── session-tracker.js
│ │ ├── tiered-conversation-monitor.js
│ │ └── version-checker.js
│ └── WINDOWS-SESSIONSTART-BUG.md
├── CLAUDE.md
├── CODE_OF_CONDUCT.md
├── CONTRIBUTING.md
├── Development-Sprint-November-2025.md
├── docs
│ ├── amp-cli-bridge.md
│ ├── api
│ │ ├── code-execution-interface.md
│ │ ├── memory-metadata-api.md
│ │ ├── PHASE1_IMPLEMENTATION_SUMMARY.md
│ │ ├── PHASE2_IMPLEMENTATION_SUMMARY.md
│ │ ├── PHASE2_REPORT.md
│ │ └── tag-standardization.md
│ ├── architecture
│ │ ├── search-enhancement-spec.md
│ │ └── search-examples.md
│ ├── architecture.md
│ ├── archive
│ │ └── obsolete-workflows
│ │ ├── load_memory_context.md
│ │ └── README.md
│ ├── assets
│ │ └── images
│ │ ├── dashboard-v3.3.0-preview.png
│ │ ├── memory-awareness-hooks-example.png
│ │ ├── project-infographic.svg
│ │ └── README.md
│ ├── CLAUDE_CODE_QUICK_REFERENCE.md
│ ├── cloudflare-setup.md
│ ├── deployment
│ │ ├── docker.md
│ │ ├── dual-service.md
│ │ ├── production-guide.md
│ │ └── systemd-service.md
│ ├── development
│ │ ├── ai-agent-instructions.md
│ │ ├── code-quality
│ │ │ ├── phase-2a-completion.md
│ │ │ ├── phase-2a-handle-get-prompt.md
│ │ │ ├── phase-2a-index.md
│ │ │ ├── phase-2a-install-package.md
│ │ │ └── phase-2b-session-summary.md
│ │ ├── code-quality-workflow.md
│ │ ├── dashboard-workflow.md
│ │ ├── issue-management.md
│ │ ├── pr-review-guide.md
│ │ ├── refactoring-notes.md
│ │ ├── release-checklist.md
│ │ └── todo-tracker.md
│ ├── docker-optimized-build.md
│ ├── document-ingestion.md
│ ├── DOCUMENTATION_AUDIT.md
│ ├── enhancement-roadmap-issue-14.md
│ ├── examples
│ │ ├── analysis-scripts.js
│ │ ├── maintenance-session-example.md
│ │ ├── memory-distribution-chart.jsx
│ │ └── tag-schema.json
│ ├── first-time-setup.md
│ ├── glama-deployment.md
│ ├── guides
│ │ ├── advanced-command-examples.md
│ │ ├── chromadb-migration.md
│ │ ├── commands-vs-mcp-server.md
│ │ ├── mcp-enhancements.md
│ │ ├── mdns-service-discovery.md
│ │ ├── memory-consolidation-guide.md
│ │ ├── migration.md
│ │ ├── scripts.md
│ │ └── STORAGE_BACKENDS.md
│ ├── HOOK_IMPROVEMENTS.md
│ ├── hooks
│ │ └── phase2-code-execution-migration.md
│ ├── http-server-management.md
│ ├── ide-compatability.md
│ ├── IMAGE_RETENTION_POLICY.md
│ ├── images
│ │ └── dashboard-placeholder.md
│ ├── implementation
│ │ ├── health_checks.md
│ │ └── performance.md
│ ├── IMPLEMENTATION_PLAN_HTTP_SSE.md
│ ├── integration
│ │ ├── homebrew.md
│ │ └── multi-client.md
│ ├── integrations
│ │ ├── gemini.md
│ │ ├── groq-bridge.md
│ │ ├── groq-integration-summary.md
│ │ └── groq-model-comparison.md
│ ├── integrations.md
│ ├── legacy
│ │ └── dual-protocol-hooks.md
│ ├── LM_STUDIO_COMPATIBILITY.md
│ ├── maintenance
│ │ └── memory-maintenance.md
│ ├── mastery
│ │ ├── api-reference.md
│ │ ├── architecture-overview.md
│ │ ├── configuration-guide.md
│ │ ├── local-setup-and-run.md
│ │ ├── testing-guide.md
│ │ └── troubleshooting.md
│ ├── migration
│ │ └── code-execution-api-quick-start.md
│ ├── natural-memory-triggers
│ │ ├── cli-reference.md
│ │ ├── installation-guide.md
│ │ └── performance-optimization.md
│ ├── oauth-setup.md
│ ├── pr-graphql-integration.md
│ ├── quick-setup-cloudflare-dual-environment.md
│ ├── README.md
│ ├── remote-configuration-wiki-section.md
│ ├── research
│ │ ├── code-execution-interface-implementation.md
│ │ └── code-execution-interface-summary.md
│ ├── ROADMAP.md
│ ├── sqlite-vec-backend.md
│ ├── statistics
│ │ ├── charts
│ │ │ ├── activity_patterns.png
│ │ │ ├── contributors.png
│ │ │ ├── growth_trajectory.png
│ │ │ ├── monthly_activity.png
│ │ │ └── october_sprint.png
│ │ ├── data
│ │ │ ├── activity_by_day.csv
│ │ │ ├── activity_by_hour.csv
│ │ │ ├── contributors.csv
│ │ │ └── monthly_activity.csv
│ │ ├── generate_charts.py
│ │ └── REPOSITORY_STATISTICS.md
│ ├── technical
│ │ ├── development.md
│ │ ├── memory-migration.md
│ │ ├── migration-log.md
│ │ ├── sqlite-vec-embedding-fixes.md
│ │ └── tag-storage.md
│ ├── testing
│ │ └── regression-tests.md
│ ├── testing-cloudflare-backend.md
│ ├── troubleshooting
│ │ ├── cloudflare-api-token-setup.md
│ │ ├── cloudflare-authentication.md
│ │ ├── general.md
│ │ ├── hooks-quick-reference.md
│ │ ├── pr162-schema-caching-issue.md
│ │ ├── session-end-hooks.md
│ │ └── sync-issues.md
│ └── tutorials
│ ├── advanced-techniques.md
│ ├── data-analysis.md
│ └── demo-session-walkthrough.md
├── examples
│ ├── claude_desktop_config_template.json
│ ├── claude_desktop_config_windows.json
│ ├── claude-desktop-http-config.json
│ ├── config
│ │ └── claude_desktop_config.json
│ ├── http-mcp-bridge.js
│ ├── memory_export_template.json
│ ├── README.md
│ ├── setup
│ │ └── setup_multi_client_complete.py
│ └── start_https_example.sh
├── install_service.py
├── install.py
├── LICENSE
├── NOTICE
├── pyproject.toml
├── pytest.ini
├── README.md
├── run_server.py
├── scripts
│ ├── .claude
│ │ └── settings.local.json
│ ├── archive
│ │ └── check_missing_timestamps.py
│ ├── backup
│ │ ├── backup_memories.py
│ │ ├── backup_sqlite_vec.sh
│ │ ├── export_distributable_memories.sh
│ │ └── restore_memories.py
│ ├── benchmarks
│ │ ├── benchmark_code_execution_api.py
│ │ ├── benchmark_hybrid_sync.py
│ │ └── benchmark_server_caching.py
│ ├── database
│ │ ├── analyze_sqlite_vec_db.py
│ │ ├── check_sqlite_vec_status.py
│ │ ├── db_health_check.py
│ │ └── simple_timestamp_check.py
│ ├── development
│ │ ├── debug_server_initialization.py
│ │ ├── find_orphaned_files.py
│ │ ├── fix_mdns.sh
│ │ ├── fix_sitecustomize.py
│ │ ├── remote_ingest.sh
│ │ ├── setup-git-merge-drivers.sh
│ │ ├── uv-lock-merge.sh
│ │ └── verify_hybrid_sync.py
│ ├── hooks
│ │ └── pre-commit
│ ├── installation
│ │ ├── install_linux_service.py
│ │ ├── install_macos_service.py
│ │ ├── install_uv.py
│ │ ├── install_windows_service.py
│ │ ├── install.py
│ │ ├── setup_backup_cron.sh
│ │ ├── setup_claude_mcp.sh
│ │ └── setup_cloudflare_resources.py
│ ├── linux
│ │ ├── service_status.sh
│ │ ├── start_service.sh
│ │ ├── stop_service.sh
│ │ ├── uninstall_service.sh
│ │ └── view_logs.sh
│ ├── maintenance
│ │ ├── assign_memory_types.py
│ │ ├── check_memory_types.py
│ │ ├── cleanup_corrupted_encoding.py
│ │ ├── cleanup_memories.py
│ │ ├── cleanup_organize.py
│ │ ├── consolidate_memory_types.py
│ │ ├── consolidation_mappings.json
│ │ ├── delete_orphaned_vectors_fixed.py
│ │ ├── fast_cleanup_duplicates_with_tracking.sh
│ │ ├── find_all_duplicates.py
│ │ ├── find_cloudflare_duplicates.py
│ │ ├── find_duplicates.py
│ │ ├── memory-types.md
│ │ ├── README.md
│ │ ├── recover_timestamps_from_cloudflare.py
│ │ ├── regenerate_embeddings.py
│ │ ├── repair_malformed_tags.py
│ │ ├── repair_memories.py
│ │ ├── repair_sqlite_vec_embeddings.py
│ │ ├── repair_zero_embeddings.py
│ │ ├── restore_from_json_export.py
│ │ └── scan_todos.sh
│ ├── migration
│ │ ├── cleanup_mcp_timestamps.py
│ │ ├── legacy
│ │ │ └── migrate_chroma_to_sqlite.py
│ │ ├── mcp-migration.py
│ │ ├── migrate_sqlite_vec_embeddings.py
│ │ ├── migrate_storage.py
│ │ ├── migrate_tags.py
│ │ ├── migrate_timestamps.py
│ │ ├── migrate_to_cloudflare.py
│ │ ├── migrate_to_sqlite_vec.py
│ │ ├── migrate_v5_enhanced.py
│ │ ├── TIMESTAMP_CLEANUP_README.md
│ │ └── verify_mcp_timestamps.py
│ ├── pr
│ │ ├── amp_collect_results.sh
│ │ ├── amp_detect_breaking_changes.sh
│ │ ├── amp_generate_tests.sh
│ │ ├── amp_pr_review.sh
│ │ ├── amp_quality_gate.sh
│ │ ├── amp_suggest_fixes.sh
│ │ ├── auto_review.sh
│ │ ├── detect_breaking_changes.sh
│ │ ├── generate_tests.sh
│ │ ├── lib
│ │ │ └── graphql_helpers.sh
│ │ ├── quality_gate.sh
│ │ ├── resolve_threads.sh
│ │ ├── run_pyscn_analysis.sh
│ │ ├── run_quality_checks.sh
│ │ ├── thread_status.sh
│ │ └── watch_reviews.sh
│ ├── quality
│ │ ├── fix_dead_code_install.sh
│ │ ├── phase1_dead_code_analysis.md
│ │ ├── phase2_complexity_analysis.md
│ │ ├── README_PHASE1.md
│ │ ├── README_PHASE2.md
│ │ ├── track_pyscn_metrics.sh
│ │ └── weekly_quality_review.sh
│ ├── README.md
│ ├── run
│ │ ├── run_mcp_memory.sh
│ │ ├── run-with-uv.sh
│ │ └── start_sqlite_vec.sh
│ ├── run_memory_server.py
│ ├── server
│ │ ├── check_http_server.py
│ │ ├── check_server_health.py
│ │ ├── memory_offline.py
│ │ ├── preload_models.py
│ │ ├── run_http_server.py
│ │ ├── run_memory_server.py
│ │ ├── start_http_server.bat
│ │ └── start_http_server.sh
│ ├── service
│ │ ├── deploy_dual_services.sh
│ │ ├── install_http_service.sh
│ │ ├── mcp-memory-http.service
│ │ ├── mcp-memory.service
│ │ ├── memory_service_manager.sh
│ │ ├── service_control.sh
│ │ ├── service_utils.py
│ │ └── update_service.sh
│ ├── sync
│ │ ├── check_drift.py
│ │ ├── claude_sync_commands.py
│ │ ├── export_memories.py
│ │ ├── import_memories.py
│ │ ├── litestream
│ │ │ ├── apply_local_changes.sh
│ │ │ ├── enhanced_memory_store.sh
│ │ │ ├── init_staging_db.sh
│ │ │ ├── io.litestream.replication.plist
│ │ │ ├── manual_sync.sh
│ │ │ ├── memory_sync.sh
│ │ │ ├── pull_remote_changes.sh
│ │ │ ├── push_to_remote.sh
│ │ │ ├── README.md
│ │ │ ├── resolve_conflicts.sh
│ │ │ ├── setup_local_litestream.sh
│ │ │ ├── setup_remote_litestream.sh
│ │ │ ├── staging_db_init.sql
│ │ │ ├── stash_local_changes.sh
│ │ │ ├── sync_from_remote_noconfig.sh
│ │ │ └── sync_from_remote.sh
│ │ ├── README.md
│ │ ├── safe_cloudflare_update.sh
│ │ ├── sync_memory_backends.py
│ │ └── sync_now.py
│ ├── testing
│ │ ├── run_complete_test.py
│ │ ├── run_memory_test.sh
│ │ ├── simple_test.py
│ │ ├── test_cleanup_logic.py
│ │ ├── test_cloudflare_backend.py
│ │ ├── test_docker_functionality.py
│ │ ├── test_installation.py
│ │ ├── test_mdns.py
│ │ ├── test_memory_api.py
│ │ ├── test_memory_simple.py
│ │ ├── test_migration.py
│ │ ├── test_search_api.py
│ │ ├── test_sqlite_vec_embeddings.py
│ │ ├── test_sse_events.py
│ │ ├── test-connection.py
│ │ └── test-hook.js
│ ├── utils
│ │ ├── claude_commands_utils.py
│ │ ├── generate_personalized_claude_md.sh
│ │ ├── groq
│ │ ├── groq_agent_bridge.py
│ │ ├── list-collections.py
│ │ ├── memory_wrapper_uv.py
│ │ ├── query_memories.py
│ │ ├── smithery_wrapper.py
│ │ ├── test_groq_bridge.sh
│ │ └── uv_wrapper.py
│ └── validation
│ ├── check_dev_setup.py
│ ├── check_documentation_links.py
│ ├── diagnose_backend_config.py
│ ├── validate_configuration_complete.py
│ ├── validate_memories.py
│ ├── validate_migration.py
│ ├── validate_timestamp_integrity.py
│ ├── verify_environment.py
│ ├── verify_pytorch_windows.py
│ └── verify_torch.py
├── SECURITY.md
├── selective_timestamp_recovery.py
├── SPONSORS.md
├── src
│ └── mcp_memory_service
│ ├── __init__.py
│ ├── api
│ │ ├── __init__.py
│ │ ├── client.py
│ │ ├── operations.py
│ │ ├── sync_wrapper.py
│ │ └── types.py
│ ├── backup
│ │ ├── __init__.py
│ │ └── scheduler.py
│ ├── cli
│ │ ├── __init__.py
│ │ ├── ingestion.py
│ │ ├── main.py
│ │ └── utils.py
│ ├── config.py
│ ├── consolidation
│ │ ├── __init__.py
│ │ ├── associations.py
│ │ ├── base.py
│ │ ├── clustering.py
│ │ ├── compression.py
│ │ ├── consolidator.py
│ │ ├── decay.py
│ │ ├── forgetting.py
│ │ ├── health.py
│ │ └── scheduler.py
│ ├── dependency_check.py
│ ├── discovery
│ │ ├── __init__.py
│ │ ├── client.py
│ │ └── mdns_service.py
│ ├── embeddings
│ │ ├── __init__.py
│ │ └── onnx_embeddings.py
│ ├── ingestion
│ │ ├── __init__.py
│ │ ├── base.py
│ │ ├── chunker.py
│ │ ├── csv_loader.py
│ │ ├── json_loader.py
│ │ ├── pdf_loader.py
│ │ ├── registry.py
│ │ ├── semtools_loader.py
│ │ └── text_loader.py
│ ├── lm_studio_compat.py
│ ├── mcp_server.py
│ ├── models
│ │ ├── __init__.py
│ │ └── memory.py
│ ├── server.py
│ ├── services
│ │ ├── __init__.py
│ │ └── memory_service.py
│ ├── storage
│ │ ├── __init__.py
│ │ ├── base.py
│ │ ├── cloudflare.py
│ │ ├── factory.py
│ │ ├── http_client.py
│ │ ├── hybrid.py
│ │ └── sqlite_vec.py
│ ├── sync
│ │ ├── __init__.py
│ │ ├── exporter.py
│ │ ├── importer.py
│ │ └── litestream_config.py
│ ├── utils
│ │ ├── __init__.py
│ │ ├── cache_manager.py
│ │ ├── content_splitter.py
│ │ ├── db_utils.py
│ │ ├── debug.py
│ │ ├── document_processing.py
│ │ ├── gpu_detection.py
│ │ ├── hashing.py
│ │ ├── http_server_manager.py
│ │ ├── port_detection.py
│ │ ├── system_detection.py
│ │ └── time_parser.py
│ └── web
│ ├── __init__.py
│ ├── api
│ │ ├── __init__.py
│ │ ├── analytics.py
│ │ ├── backup.py
│ │ ├── consolidation.py
│ │ ├── documents.py
│ │ ├── events.py
│ │ ├── health.py
│ │ ├── manage.py
│ │ ├── mcp.py
│ │ ├── memories.py
│ │ ├── search.py
│ │ └── sync.py
│ ├── app.py
│ ├── dependencies.py
│ ├── oauth
│ │ ├── __init__.py
│ │ ├── authorization.py
│ │ ├── discovery.py
│ │ ├── middleware.py
│ │ ├── models.py
│ │ ├── registration.py
│ │ └── storage.py
│ ├── sse.py
│ └── static
│ ├── app.js
│ ├── index.html
│ ├── README.md
│ ├── sse_test.html
│ └── style.css
├── start_http_debug.bat
├── start_http_server.sh
├── test_document.txt
├── test_version_checker.js
├── tests
│ ├── __init__.py
│ ├── api
│ │ ├── __init__.py
│ │ ├── test_compact_types.py
│ │ └── test_operations.py
│ ├── bridge
│ │ ├── mock_responses.js
│ │ ├── package-lock.json
│ │ ├── package.json
│ │ └── test_http_mcp_bridge.js
│ ├── conftest.py
│ ├── consolidation
│ │ ├── __init__.py
│ │ ├── conftest.py
│ │ ├── test_associations.py
│ │ ├── test_clustering.py
│ │ ├── test_compression.py
│ │ ├── test_consolidator.py
│ │ ├── test_decay.py
│ │ └── test_forgetting.py
│ ├── contracts
│ │ └── api-specification.yml
│ ├── integration
│ │ ├── package-lock.json
│ │ ├── package.json
│ │ ├── test_api_key_fallback.py
│ │ ├── test_api_memories_chronological.py
│ │ ├── test_api_tag_time_search.py
│ │ ├── test_api_with_memory_service.py
│ │ ├── test_bridge_integration.js
│ │ ├── test_cli_interfaces.py
│ │ ├── test_cloudflare_connection.py
│ │ ├── test_concurrent_clients.py
│ │ ├── test_data_serialization_consistency.py
│ │ ├── test_http_server_startup.py
│ │ ├── test_mcp_memory.py
│ │ ├── test_mdns_integration.py
│ │ ├── test_oauth_basic_auth.py
│ │ ├── test_oauth_flow.py
│ │ ├── test_server_handlers.py
│ │ └── test_store_memory.py
│ ├── performance
│ │ ├── test_background_sync.py
│ │ └── test_hybrid_live.py
│ ├── README.md
│ ├── smithery
│ │ └── test_smithery.py
│ ├── sqlite
│ │ └── simple_sqlite_vec_test.py
│ ├── test_client.py
│ ├── test_content_splitting.py
│ ├── test_database.py
│ ├── test_hybrid_cloudflare_limits.py
│ ├── test_hybrid_storage.py
│ ├── test_memory_ops.py
│ ├── test_semantic_search.py
│ ├── test_sqlite_vec_storage.py
│ ├── test_time_parser.py
│ ├── test_timestamp_preservation.py
│ ├── timestamp
│ │ ├── test_hook_vs_manual_storage.py
│ │ ├── test_issue99_final_validation.py
│ │ ├── test_search_retrieval_inconsistency.py
│ │ ├── test_timestamp_issue.py
│ │ └── test_timestamp_simple.py
│ └── unit
│ ├── conftest.py
│ ├── test_cloudflare_storage.py
│ ├── test_csv_loader.py
│ ├── test_fastapi_dependencies.py
│ ├── test_import.py
│ ├── test_json_loader.py
│ ├── test_mdns_simple.py
│ ├── test_mdns.py
│ ├── test_memory_service.py
│ ├── test_memory.py
│ ├── test_semtools_loader.py
│ ├── test_storage_interface_compatibility.py
│ └── test_tag_time_filtering.py
├── tools
│ ├── docker
│ │ ├── DEPRECATED.md
│ │ ├── docker-compose.http.yml
│ │ ├── docker-compose.pythonpath.yml
│ │ ├── docker-compose.standalone.yml
│ │ ├── docker-compose.uv.yml
│ │ ├── docker-compose.yml
│ │ ├── docker-entrypoint-persistent.sh
│ │ ├── docker-entrypoint-unified.sh
│ │ ├── docker-entrypoint.sh
│ │ ├── Dockerfile
│ │ ├── Dockerfile.glama
│ │ ├── Dockerfile.slim
│ │ ├── README.md
│ │ └── test-docker-modes.sh
│ └── README.md
└── uv.lock
```
# Files
--------------------------------------------------------------------------------
/claude-hooks/utilities/project-detector.js:
--------------------------------------------------------------------------------
```javascript
1 | /**
2 | * Project Context Detection Utility
3 | * Analyzes the current directory to determine project type, language, and context
4 | */
5 |
6 | const fs = require('fs').promises;
7 | const path = require('path');
8 | const { execSync } = require('child_process');
9 |
10 | /**
11 | * Detect programming language from file extensions
12 | */
13 | async function detectLanguage(directory) {
14 | try {
15 | const files = await fs.readdir(directory, { withFileTypes: true });
16 | const extensions = new Map();
17 |
18 | // Count file extensions
19 | for (const file of files) {
20 | if (file.isFile()) {
21 | const ext = path.extname(file.name).toLowerCase();
22 | if (ext) {
23 | extensions.set(ext, (extensions.get(ext) || 0) + 1);
24 | }
25 | }
26 | }
27 |
28 | // Language detection rules
29 | const languageMap = {
30 | '.js': 'JavaScript',
31 | '.ts': 'TypeScript',
32 | '.jsx': 'React/JavaScript',
33 | '.tsx': 'React/TypeScript',
34 | '.py': 'Python',
35 | '.rs': 'Rust',
36 | '.go': 'Go',
37 | '.java': 'Java',
38 | '.cpp': 'C++',
39 | '.c': 'C',
40 | '.cs': 'C#',
41 | '.php': 'PHP',
42 | '.rb': 'Ruby',
43 | '.swift': 'Swift',
44 | '.kt': 'Kotlin',
45 | '.scala': 'Scala',
46 | '.sh': 'Shell',
47 | '.md': 'Documentation'
48 | };
49 |
50 | // Find most common language extension
51 | let primaryLanguage = 'Unknown';
52 | let maxCount = 0;
53 |
54 | for (const [ext, count] of extensions.entries()) {
55 | if (languageMap[ext] && count > maxCount) {
56 | maxCount = count;
57 | primaryLanguage = languageMap[ext];
58 | }
59 | }
60 |
61 | return {
62 | primary: primaryLanguage,
63 | extensions: Object.fromEntries(extensions),
64 | confidence: maxCount > 0 ? Math.min(maxCount / 10, 1) : 0
65 | };
66 |
67 | } catch (error) {
68 | return { primary: 'Unknown', extensions: {}, confidence: 0 };
69 | }
70 | }
71 |
72 | /**
73 | * Detect framework and tools from configuration files
74 | */
75 | async function detectFramework(directory) {
76 | const frameworks = [];
77 | const tools = [];
78 |
79 | try {
80 | const files = await fs.readdir(directory);
81 |
82 | // Check for common configuration files
83 | const configFiles = {
84 | 'package.json': async () => {
85 | const pkg = JSON.parse(await fs.readFile(path.join(directory, 'package.json'), 'utf8'));
86 |
87 | // Check dependencies for frameworks
88 | const deps = { ...pkg.dependencies, ...pkg.devDependencies };
89 |
90 | if (deps.react || deps['@types/react']) frameworks.push('React');
91 | if (deps.vue || deps['@vue/cli']) frameworks.push('Vue.js');
92 | if (deps.angular || deps['@angular/core']) frameworks.push('Angular');
93 | if (deps.next || deps['next']) frameworks.push('Next.js');
94 | if (deps.express || deps['express']) frameworks.push('Express.js');
95 | if (deps.fastify) frameworks.push('Fastify');
96 | if (deps.svelte || deps['svelte']) frameworks.push('Svelte');
97 |
98 | tools.push('npm');
99 | return pkg.name || 'node-project';
100 | },
101 | 'pyproject.toml': async () => {
102 | tools.push('Python');
103 | const content = await fs.readFile(path.join(directory, 'pyproject.toml'), 'utf8');
104 |
105 | // Extract project name from pyproject.toml
106 | const nameMatch = content.match(/^name\s*=\s*["']([^"']+)["']/m);
107 |
108 | if (content.includes('django')) frameworks.push('Django');
109 | if (content.includes('flask')) frameworks.push('Flask');
110 | if (content.includes('fastapi')) frameworks.push('FastAPI');
111 | if (content.includes('pytest')) tools.push('pytest');
112 | if (content.includes('poetry')) tools.push('Poetry');
113 |
114 | return nameMatch ? nameMatch[1] : 'python-project';
115 | },
116 | 'Cargo.toml': async () => {
117 | tools.push('Cargo');
118 | const content = await fs.readFile(path.join(directory, 'Cargo.toml'), 'utf8');
119 |
120 | const nameMatch = content.match(/^name\s*=\s*["']([^"']+)["']/m);
121 |
122 | if (content.includes('actix-web')) frameworks.push('Actix Web');
123 | if (content.includes('rocket')) frameworks.push('Rocket');
124 | if (content.includes('warp')) frameworks.push('Warp');
125 | if (content.includes('tokio')) frameworks.push('Tokio');
126 |
127 | return nameMatch ? nameMatch[1] : 'rust-project';
128 | },
129 | 'go.mod': async () => {
130 | tools.push('Go Modules');
131 | const content = await fs.readFile(path.join(directory, 'go.mod'), 'utf8');
132 |
133 | const moduleMatch = content.match(/^module\s+(.+)$/m);
134 |
135 | if (content.includes('gin-gonic/gin')) frameworks.push('Gin');
136 | if (content.includes('gorilla/mux')) frameworks.push('Gorilla Mux');
137 | if (content.includes('fiber')) frameworks.push('Fiber');
138 |
139 | return moduleMatch ? path.basename(moduleMatch[1]) : 'go-project';
140 | },
141 | 'pom.xml': () => {
142 | tools.push('Maven');
143 | frameworks.push('Java/Maven');
144 | return 'java-maven-project';
145 | },
146 | 'build.gradle': () => {
147 | tools.push('Gradle');
148 | frameworks.push('Java/Gradle');
149 | return 'java-gradle-project';
150 | },
151 | 'docker-compose.yml': () => {
152 | tools.push('Docker Compose');
153 | return null;
154 | },
155 | 'Dockerfile': () => {
156 | tools.push('Docker');
157 | return null;
158 | },
159 | '.env': () => {
160 | tools.push('Environment Config');
161 | return null;
162 | }
163 | };
164 |
165 | let projectName = null;
166 |
167 | for (const file of files) {
168 | if (configFiles[file]) {
169 | const result = await configFiles[file]();
170 | if (result && !projectName) {
171 | projectName = result;
172 | }
173 | }
174 | }
175 |
176 | return {
177 | frameworks,
178 | tools,
179 | projectName
180 | };
181 |
182 | } catch (error) {
183 | return { frameworks: [], tools: [], projectName: null };
184 | }
185 | }
186 |
187 | /**
188 | * Get Git repository information
189 | */
190 | function getGitInfo(directory) {
191 | try {
192 | const gitDir = path.join(directory, '.git');
193 |
194 | // Check if this is a git repository
195 | const isGitRepo = require('fs').existsSync(gitDir);
196 | if (!isGitRepo) {
197 | return { isRepo: false };
198 | }
199 |
200 | // Get repository information
201 | const remoteBranch = execSync('git branch --show-current', { cwd: path.resolve(directory), encoding: 'utf8' }).trim();
202 | const remoteUrl = execSync('git config --get remote.origin.url', { cwd: path.resolve(directory), encoding: 'utf8' }).trim();
203 | const lastCommit = execSync('git log -1 --pretty=format:"%h %s"', { cwd: path.resolve(directory), encoding: 'utf8' }).trim();
204 |
205 | // Extract repository name from URL
206 | let repoName = 'unknown-repo';
207 | if (remoteUrl) {
208 | const match = remoteUrl.match(/([^\/]+)(?:\.git)?$/);
209 | if (match) {
210 | repoName = match[1].replace('.git', '');
211 | }
212 | }
213 |
214 | return {
215 | isRepo: true,
216 | branch: remoteBranch,
217 | remoteUrl,
218 | repoName,
219 | lastCommit
220 | };
221 |
222 | } catch (error) {
223 | return { isRepo: false, error: error.message };
224 | }
225 | }
226 |
227 | // ANSI Colors for console output
228 | const COLORS = {
229 | RESET: '\x1b[0m',
230 | BRIGHT: '\x1b[1m',
231 | DIM: '\x1b[2m',
232 | CYAN: '\x1b[36m',
233 | GREEN: '\x1b[32m',
234 | BLUE: '\x1b[34m',
235 | YELLOW: '\x1b[33m',
236 | GRAY: '\x1b[90m',
237 | RED: '\x1b[31m'
238 | };
239 |
240 | /**
241 | * Main project context detection function with enhanced visual output
242 | */
243 | async function detectProjectContext(directory = process.cwd()) {
244 | try {
245 | const directoryName = path.basename(directory);
246 | console.log(`${COLORS.BLUE}📂 Project Detector${COLORS.RESET} ${COLORS.DIM}→${COLORS.RESET} Analyzing ${COLORS.BRIGHT}${directoryName}${COLORS.RESET}`);
247 |
248 | // Get basic directory information
249 |
250 | // Detect language
251 | const language = await detectLanguage(directory);
252 |
253 | // Detect framework and tools
254 | const framework = await detectFramework(directory);
255 |
256 | // Get Git information
257 | const git = getGitInfo(directory);
258 |
259 | // Determine project name (priority: git repo > config file > directory name)
260 | const projectName = framework.projectName || git.repoName || directoryName;
261 |
262 | // Calculate confidence score
263 | let confidence = 0.5; // Base confidence
264 | if (git.isRepo) confidence += 0.3;
265 | if (framework.frameworks.length > 0) confidence += 0.2;
266 | if (language.confidence > 0.5) confidence += language.confidence * 0.3;
267 |
268 | const context = {
269 | name: projectName,
270 | directory,
271 | language: language.primary,
272 | languageDetails: language,
273 | frameworks: framework.frameworks,
274 | tools: framework.tools,
275 | git: git,
276 | confidence: Math.min(confidence, 1.0),
277 | metadata: {
278 | detectedAt: new Date().toISOString(),
279 | analyzer: 'claude-hooks-project-detector',
280 | version: '1.1.0'
281 | }
282 | };
283 |
284 | // Enhanced output with confidence indication
285 | const confidencePercent = (context.confidence * 100).toFixed(0);
286 | const confidenceColor = context.confidence > 0.8 ? COLORS.GREEN :
287 | context.confidence > 0.6 ? COLORS.YELLOW : COLORS.GRAY;
288 |
289 | console.log(`${COLORS.BLUE}📊 Detection Result${COLORS.RESET} ${COLORS.DIM}→${COLORS.RESET} ${COLORS.BRIGHT}${context.name}${COLORS.RESET} ${COLORS.GRAY}(${context.language})${COLORS.RESET} ${COLORS.DIM}•${COLORS.RESET} ${confidenceColor}${confidencePercent}%${COLORS.RESET}`);
290 |
291 | return context;
292 |
293 | } catch (error) {
294 | console.error(`${COLORS.RED}❌ Project Detector Error${COLORS.RESET} ${COLORS.DIM}→${COLORS.RESET} ${error.message}`);
295 |
296 | // Return minimal context on error
297 | return {
298 | name: path.basename(directory),
299 | directory,
300 | language: 'Unknown',
301 | frameworks: [],
302 | tools: [],
303 | confidence: 0.1,
304 | error: error.message
305 | };
306 | }
307 | }
308 |
309 | module.exports = {
310 | detectProjectContext,
311 | detectLanguage,
312 | detectFramework,
313 | getGitInfo
314 | };
315 |
316 | // Direct execution support for testing
317 | if (require.main === module) {
318 | detectProjectContext(process.cwd())
319 | .then(context => {
320 | console.log('\n=== PROJECT CONTEXT ===');
321 | console.log(JSON.stringify(context, null, 2));
322 | console.log('=== END CONTEXT ===\n');
323 | })
324 | .catch(error => console.error('Detection failed:', error));
325 | }
```
--------------------------------------------------------------------------------
/src/mcp_memory_service/sync/litestream_config.py:
--------------------------------------------------------------------------------
```python
1 | # Copyright 2024 Heinrich Krupp
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
15 | """
16 | Litestream configuration management for database synchronization.
17 | """
18 |
19 | import yaml
20 | import logging
21 | import platform
22 | from pathlib import Path
23 | from typing import Dict, Any, Optional, List
24 |
25 | logger = logging.getLogger(__name__)
26 |
27 |
28 | class LitestreamManager:
29 | """
30 | Manages Litestream configuration for SQLite database replication.
31 |
32 | Provides utilities to generate configuration files for different
33 | deployment scenarios and machine types.
34 | """
35 |
36 | def __init__(self):
37 | """Initialize the Litestream manager."""
38 | self.platform = platform.system().lower()
39 |
40 | def generate_master_config(
41 | self,
42 | db_path: Path,
43 | replica_endpoint: str,
44 | backup_path: Optional[Path] = None,
45 | checkpoint_interval: str = "30s",
46 | wal_retention: str = "10m"
47 | ) -> Dict[str, Any]:
48 | """
49 | Generate Litestream configuration for master database.
50 |
51 | Args:
52 | db_path: Path to the SQLite database
53 | replica_endpoint: Endpoint where replicas can access the stream
54 | backup_path: Optional local backup path
55 | checkpoint_interval: How often to checkpoint
56 | wal_retention: How long to retain WAL entries
57 |
58 | Returns:
59 | Litestream configuration dictionary
60 | """
61 | config = {
62 | "dbs": [{
63 | "path": str(db_path),
64 | "replicas": [],
65 | "checkpoint-interval": checkpoint_interval,
66 | "wal-retention": wal_retention
67 | }]
68 | }
69 |
70 | db_config = config["dbs"][0]
71 |
72 | # Add HTTP replica endpoint
73 | if replica_endpoint:
74 | db_config["replicas"].append({
75 | "type": "file",
76 | "path": replica_endpoint,
77 | "sync-interval": "10s"
78 | })
79 |
80 | # Add local backup if specified
81 | if backup_path:
82 | db_config["replicas"].append({
83 | "type": "file",
84 | "path": str(backup_path),
85 | "sync-interval": "1m"
86 | })
87 |
88 | return config
89 |
90 | def generate_replica_config(
91 | self,
92 | db_path: Path,
93 | upstream_url: str,
94 | sync_interval: str = "10s"
95 | ) -> Dict[str, Any]:
96 | """
97 | Generate Litestream configuration for replica database.
98 |
99 | Args:
100 | db_path: Local path for the replicated database
101 | upstream_url: URL of the master database stream
102 | sync_interval: How often to sync from upstream
103 |
104 | Returns:
105 | Litestream configuration dictionary
106 | """
107 | config = {
108 | "dbs": [{
109 | "path": str(db_path),
110 | "replicas": [{
111 | "type": "file",
112 | "url": upstream_url,
113 | "sync-interval": sync_interval
114 | }]
115 | }]
116 | }
117 |
118 | return config
119 |
120 | def generate_s3_config(
121 | self,
122 | db_path: Path,
123 | s3_endpoint: str,
124 | bucket: str,
125 | path: str,
126 | access_key: Optional[str] = None,
127 | secret_key: Optional[str] = None,
128 | is_master: bool = True
129 | ) -> Dict[str, Any]:
130 | """
131 | Generate Litestream configuration for S3-compatible storage.
132 |
133 | Args:
134 | db_path: Path to the SQLite database
135 | s3_endpoint: S3-compatible endpoint URL
136 | bucket: S3 bucket name
137 | path: Path within the bucket
138 | access_key: S3 access key (optional, can use env vars)
139 | secret_key: S3 secret key (optional, can use env vars)
140 | is_master: Whether this is the master or replica
141 |
142 | Returns:
143 | Litestream configuration dictionary
144 | """
145 | replica_config = {
146 | "type": "s3",
147 | "endpoint": s3_endpoint,
148 | "bucket": bucket,
149 | "path": path
150 | }
151 |
152 | # Add credentials if provided
153 | if access_key and secret_key:
154 | replica_config.update({
155 | "access-key-id": access_key,
156 | "secret-access-key": secret_key
157 | })
158 |
159 | if is_master:
160 | config = {
161 | "dbs": [{
162 | "path": str(db_path),
163 | "replicas": [replica_config],
164 | "checkpoint-interval": "30s",
165 | "wal-retention": "10m"
166 | }]
167 | }
168 | else:
169 | config = {
170 | "dbs": [{
171 | "path": str(db_path),
172 | "replicas": [replica_config]
173 | }]
174 | }
175 |
176 | return config
177 |
178 | def get_default_config_path(self) -> Path:
179 | """Get the default Litestream configuration file path for this platform."""
180 | if self.platform == "windows":
181 | return Path("C:/ProgramData/litestream/litestream.yml")
182 | elif self.platform == "darwin": # macOS
183 | return Path("/usr/local/etc/litestream.yml")
184 | else: # Linux
185 | return Path("/etc/litestream.yml")
186 |
187 | def write_config(self, config: Dict[str, Any], config_path: Optional[Path] = None) -> Path:
188 | """
189 | Write Litestream configuration to file.
190 |
191 | Args:
192 | config: Configuration dictionary
193 | config_path: Path to write config file (uses default if not provided)
194 |
195 | Returns:
196 | Path where configuration was written
197 | """
198 | if config_path is None:
199 | config_path = self.get_default_config_path()
200 |
201 | # Create parent directory if needed
202 | config_path.parent.mkdir(parents=True, exist_ok=True)
203 |
204 | # Write YAML configuration
205 | with open(config_path, 'w') as f:
206 | yaml.dump(config, f, default_flow_style=False, sort_keys=False)
207 |
208 | logger.info(f"Litestream configuration written to {config_path}")
209 | return config_path
210 |
211 | def generate_systemd_service(self, config_path: Path) -> str:
212 | """
213 | Generate systemd service file content for Litestream.
214 |
215 | Args:
216 | config_path: Path to the Litestream configuration file
217 |
218 | Returns:
219 | Systemd service file content
220 | """
221 | service_content = f"""[Unit]
222 | Description=Litestream replication service
223 | After=network.target
224 | StartLimitIntervalSec=0
225 |
226 | [Service]
227 | Type=simple
228 | Restart=always
229 | RestartSec=1
230 | User=root
231 | ExecStart=/usr/local/bin/litestream replicate -config {config_path}
232 |
233 | [Install]
234 | WantedBy=multi-user.target
235 | """
236 | return service_content
237 |
238 | def generate_launchd_plist(self, config_path: Path) -> str:
239 | """
240 | Generate macOS LaunchDaemon plist for Litestream.
241 |
242 | Args:
243 | config_path: Path to the Litestream configuration file
244 |
245 | Returns:
246 | LaunchDaemon plist content
247 | """
248 | plist_content = f"""<?xml version="1.0" encoding="UTF-8"?>
249 | <!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
250 | <plist version="1.0">
251 | <dict>
252 | <key>Label</key>
253 | <string>io.litestream.replication</string>
254 | <key>ProgramArguments</key>
255 | <array>
256 | <string>/usr/local/bin/litestream</string>
257 | <string>replicate</string>
258 | <string>-config</string>
259 | <string>{config_path}</string>
260 | </array>
261 | <key>RunAtLoad</key>
262 | <true/>
263 | <key>KeepAlive</key>
264 | <true/>
265 | <key>StandardOutPath</key>
266 | <string>/var/log/litestream.log</string>
267 | <key>StandardErrorPath</key>
268 | <string>/var/log/litestream.log</string>
269 | </dict>
270 | </plist>
271 | """
272 | return plist_content
273 |
274 | def get_installation_commands(self) -> List[str]:
275 | """
276 | Get platform-specific Litestream installation commands.
277 |
278 | Returns:
279 | List of commands to install Litestream
280 | """
281 | if self.platform == "windows":
282 | return [
283 | "# Download and install Litestream for Windows",
284 | "# Visit: https://github.com/benbjohnson/litestream/releases",
285 | "# Extract litestream.exe to C:\\Program Files\\Litestream\\",
286 | "# Add to PATH environment variable"
287 | ]
288 | elif self.platform == "darwin": # macOS
289 | return [
290 | "brew install benbjohnson/litestream/litestream"
291 | ]
292 | else: # Linux
293 | return [
294 | "curl -LsS https://github.com/benbjohnson/litestream/releases/latest/download/litestream-linux-amd64.tar.gz | tar -xzf -",
295 | "sudo mv litestream /usr/local/bin/",
296 | "sudo chmod +x /usr/local/bin/litestream"
297 | ]
298 |
299 | def generate_deployment_script(
300 | self,
301 | role: str, # "master" or "replica"
302 | db_path: Path,
303 | replica_endpoint: Optional[str] = None,
304 | upstream_url: Optional[str] = None
305 | ) -> str:
306 | """
307 | Generate a deployment script for setting up Litestream.
308 |
309 | Args:
310 | role: Whether this is a "master" or "replica"
311 | db_path: Path to the SQLite database
312 | replica_endpoint: Endpoint for serving replicas (master only)
313 | upstream_url: URL of master stream (replica only)
314 |
315 | Returns:
316 | Shell script content for deployment
317 | """
318 | install_commands = self.get_installation_commands()
319 |
320 | script_lines = [
321 | "#!/bin/bash",
322 | "# Litestream deployment script",
323 | f"# Role: {role}",
324 | "",
325 | "set -e",
326 | "",
327 | "echo 'Installing Litestream...'",
328 | ]
329 |
330 | script_lines.extend(install_commands)
331 | script_lines.extend([
332 | "",
333 | "echo 'Generating configuration...'",
334 | ])
335 |
336 | if role == "master":
337 | script_lines.extend([
338 | f"# Master configuration for {db_path}",
339 | f"# Serving replicas at: {replica_endpoint}",
340 | ])
341 | else:
342 | script_lines.extend([
343 | f"# Replica configuration for {db_path}",
344 | f"# Syncing from: {upstream_url}",
345 | ])
346 |
347 | script_lines.extend([
348 | "",
349 | "echo 'Starting Litestream service...'",
350 | ])
351 |
352 | if self.platform == "linux":
353 | script_lines.extend([
354 | "sudo systemctl enable litestream",
355 | "sudo systemctl start litestream",
356 | "sudo systemctl status litestream",
357 | ])
358 | elif self.platform == "darwin":
359 | script_lines.extend([
360 | "sudo launchctl load /Library/LaunchDaemons/io.litestream.replication.plist",
361 | "sudo launchctl start io.litestream.replication",
362 | ])
363 |
364 | script_lines.extend([
365 | "",
366 | "echo 'Litestream deployment completed!'",
367 | ""
368 | ])
369 |
370 | return "\n".join(script_lines)
```
--------------------------------------------------------------------------------
/src/mcp_memory_service/ingestion/json_loader.py:
--------------------------------------------------------------------------------
```python
1 | # Copyright 2024 Heinrich Krupp
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
15 | """
16 | JSON document loader for structured data files.
17 | """
18 |
19 | import json
20 | import logging
21 | from pathlib import Path
22 | from typing import AsyncGenerator, Dict, Any, Union, List
23 | import asyncio
24 |
25 | from .base import DocumentLoader, DocumentChunk
26 | from .chunker import TextChunker, ChunkingStrategy
27 |
28 | logger = logging.getLogger(__name__)
29 |
30 |
31 | class JSONLoader(DocumentLoader):
32 | """
33 | Document loader for JSON data files.
34 |
35 | Features:
36 | - Flattens nested JSON structures to searchable text
37 | - Preserves key-value context (e.g., "config.database.host: localhost")
38 | - Handles arrays and nested objects recursively
39 | - Supports configurable flattening strategies
40 | """
41 |
42 | def __init__(self, chunk_size: int = 1000, chunk_overlap: int = 200):
43 | """
44 | Initialize JSON loader.
45 |
46 | Args:
47 | chunk_size: Target size for text chunks in characters
48 | chunk_overlap: Number of characters to overlap between chunks
49 | """
50 | super().__init__(chunk_size, chunk_overlap)
51 | self.supported_extensions = ['json']
52 |
53 | self.chunker = TextChunker(ChunkingStrategy(
54 | chunk_size=chunk_size,
55 | chunk_overlap=chunk_overlap,
56 | respect_paragraph_boundaries=False, # JSON doesn't have paragraphs
57 | respect_sentence_boundaries=False, # JSON doesn't have sentences
58 | min_chunk_size=10 # Allow smaller chunks for structured data
59 | ))
60 |
61 | def can_handle(self, file_path: Path) -> bool:
62 | """
63 | Check if this loader can handle the given JSON file.
64 |
65 | Args:
66 | file_path: Path to the file to check
67 |
68 | Returns:
69 | True if this loader can process the JSON file
70 | """
71 | if not file_path.exists() or not file_path.is_file():
72 | return False
73 |
74 | extension = file_path.suffix.lower().lstrip('.')
75 | return extension in self.supported_extensions
76 |
77 | async def extract_chunks(self, file_path: Path, **kwargs) -> AsyncGenerator[DocumentChunk, None]:
78 | """
79 | Extract text chunks from a JSON file.
80 |
81 | Args:
82 | file_path: Path to the JSON file
83 | **kwargs: Additional options:
84 | - flatten_strategy: How to flatten nested structures ('dot_notation', 'bracket_notation')
85 | - max_depth: Maximum nesting depth to flatten (default: unlimited)
86 | - include_types: Whether to include value types in flattened text (default: False)
87 | - array_handling: How to handle arrays ('expand', 'summarize', 'flatten')
88 |
89 | Yields:
90 | DocumentChunk objects containing extracted text and metadata
91 |
92 | Raises:
93 | FileNotFoundError: If the JSON file doesn't exist
94 | ValueError: If the JSON file can't be parsed or processed
95 | """
96 | await self.validate_file(file_path)
97 |
98 | flatten_strategy = kwargs.get('flatten_strategy', 'dot_notation')
99 | max_depth = kwargs.get('max_depth', None)
100 | include_types = kwargs.get('include_types', False)
101 | array_handling = kwargs.get('array_handling', 'expand')
102 |
103 | logger.info(f"Extracting chunks from JSON file: {file_path}")
104 |
105 | try:
106 | # Read and parse JSON
107 | data, encoding = await self._read_json_file(file_path)
108 |
109 | # Flatten the JSON structure
110 | flattened_text = self._flatten_json(
111 | data,
112 | flatten_strategy=flatten_strategy,
113 | max_depth=max_depth,
114 | include_types=include_types,
115 | array_handling=array_handling
116 | )
117 |
118 | # Create base metadata
119 | base_metadata = self.get_base_metadata(file_path)
120 | base_metadata.update({
121 | 'encoding': encoding,
122 | 'content_type': 'json',
123 | 'flatten_strategy': flatten_strategy,
124 | 'array_handling': array_handling,
125 | 'include_types': include_types,
126 | 'max_depth': max_depth,
127 | 'original_keys_count': self._count_keys(data),
128 | 'flattened_text_length': len(flattened_text)
129 | })
130 |
131 | # Chunk the flattened text
132 | chunks = self.chunker.chunk_text(flattened_text, base_metadata)
133 |
134 | for i, (chunk_text, chunk_metadata) in enumerate(chunks):
135 | yield DocumentChunk(
136 | content=chunk_text,
137 | metadata=chunk_metadata,
138 | chunk_index=i,
139 | source_file=file_path
140 | )
141 |
142 | except json.JSONDecodeError as e:
143 | logger.error(f"Invalid JSON in file {file_path}: {str(e)}")
144 | raise ValueError(f"Invalid JSON format: {str(e)}") from e
145 | except Exception as e:
146 | logger.error(f"Error extracting from JSON file {file_path}: {type(e).__name__} - {str(e)}")
147 | raise ValueError(f"Failed to extract JSON content: {str(e)}") from e
148 |
149 | async def _read_json_file(self, file_path: Path) -> tuple:
150 | """
151 | Read and parse JSON file.
152 |
153 | Args:
154 | file_path: Path to the JSON file
155 |
156 | Returns:
157 | Tuple of (parsed_data, encoding_used)
158 | """
159 | def _read_sync():
160 | # Try UTF-8 first (most common for JSON)
161 | try:
162 | with open(file_path, 'r', encoding='utf-8') as file:
163 | content = file.read()
164 | data = json.loads(content)
165 | return data, 'utf-8'
166 | except UnicodeDecodeError:
167 | # Fallback to other encodings
168 | encodings_to_try = ['utf-16', 'utf-32', 'latin-1']
169 | for encoding in encodings_to_try:
170 | try:
171 | with open(file_path, 'r', encoding=encoding) as file:
172 | content = file.read()
173 | data = json.loads(content)
174 | return data, encoding
175 | except UnicodeDecodeError:
176 | continue
177 | except json.JSONDecodeError:
178 | continue
179 |
180 | # Last resort with error replacement
181 | with open(file_path, 'r', encoding='utf-8', errors='replace') as file:
182 | content = file.read()
183 | data = json.loads(content)
184 | return data, 'utf-8'
185 |
186 | # Run file reading in thread pool
187 | loop = asyncio.get_event_loop()
188 | return await loop.run_in_executor(None, _read_sync)
189 |
190 | def _flatten_json(
191 | self,
192 | data: Any,
193 | prefix: str = "",
194 | flatten_strategy: str = 'dot_notation',
195 | max_depth: int = None,
196 | current_depth: int = 0,
197 | include_types: bool = False,
198 | array_handling: str = 'expand'
199 | ) -> str:
200 | """
201 | Flatten JSON structure to searchable text.
202 |
203 | Args:
204 | data: JSON data to flatten
205 | prefix: Current key prefix
206 | flatten_strategy: Flattening strategy
207 | max_depth: Maximum depth to flatten
208 | current_depth: Current nesting depth
209 | include_types: Whether to include value types
210 | array_handling: How to handle arrays
211 |
212 | Returns:
213 | Flattened text representation
214 | """
215 | if max_depth is not None and current_depth >= max_depth:
216 | return f"{prefix}: [nested structure truncated at depth {max_depth}]\n"
217 |
218 | lines = []
219 |
220 | if isinstance(data, dict):
221 | for key, value in data.items():
222 | new_prefix = self._build_prefix(prefix, key, flatten_strategy)
223 | flattened_value = self._flatten_json(
224 | value, new_prefix, flatten_strategy, max_depth,
225 | current_depth + 1, include_types, array_handling
226 | )
227 | lines.append(flattened_value)
228 | elif isinstance(data, list):
229 | if array_handling == 'summarize':
230 | lines.append(f"{prefix}: [array with {len(data)} items]\n")
231 | elif array_handling == 'flatten':
232 | # Flatten all array items with indexed keys
233 | for i, item in enumerate(data):
234 | new_prefix = self._build_prefix(prefix, str(i), flatten_strategy)
235 | flattened_item = self._flatten_json(
236 | item, new_prefix, flatten_strategy, max_depth,
237 | current_depth + 1, include_types, array_handling
238 | )
239 | lines.append(flattened_item)
240 | else: # 'expand' - default
241 | # Expand arrays as separate entries
242 | for i, item in enumerate(data):
243 | if isinstance(item, (dict, list)):
244 | new_prefix = f"{prefix}[{i}]"
245 | flattened_item = self._flatten_json(
246 | item, new_prefix, flatten_strategy, max_depth,
247 | current_depth + 1, include_types, array_handling
248 | )
249 | lines.append(flattened_item)
250 | else:
251 | # Simple values in arrays
252 | type_info = f" ({type(item).__name__})" if include_types else ""
253 | lines.append(f"{prefix}[{i}]: {item}{type_info}\n")
254 | else:
255 | # Primitive values
256 | type_info = f" ({type(data).__name__})" if include_types else ""
257 | lines.append(f"{prefix}: {data}{type_info}\n")
258 |
259 | return "".join(lines)
260 |
261 | def _build_prefix(self, current_prefix: str, key: str, strategy: str) -> str:
262 | """
263 | Build the prefix for nested keys.
264 |
265 | Args:
266 | current_prefix: Current prefix
267 | key: Key to add
268 | strategy: Flattening strategy
269 |
270 | Returns:
271 | New prefix string
272 | """
273 | if not current_prefix:
274 | return key
275 |
276 | if strategy == 'bracket_notation':
277 | return f"{current_prefix}[{key}]"
278 | else: # 'dot_notation' - default
279 | return f"{current_prefix}.{key}"
280 |
281 | def _count_keys(self, data: Any) -> int:
282 | """
283 | Count total number of keys in JSON structure.
284 |
285 | Args:
286 | data: JSON data to count keys in
287 |
288 | Returns:
289 | Total number of keys
290 | """
291 | if isinstance(data, dict):
292 | count = len(data)
293 | for value in data.values():
294 | count += self._count_keys(value)
295 | return count
296 | elif isinstance(data, list):
297 | count = 0
298 | for item in data:
299 | count += self._count_keys(item)
300 | return count
301 | else:
302 | return 0
303 |
304 |
305 | # Register the JSON loader
306 | def _register_json_loader():
307 | """Register JSON loader with the registry."""
308 | try:
309 | from .registry import register_loader
310 | register_loader(JSONLoader, ['json'])
311 | logger.debug("JSON loader registered successfully")
312 | except ImportError:
313 | logger.debug("Registry not available during import")
314 |
315 |
316 | # Auto-register when module is imported
317 | _register_json_loader()
318 |
```
--------------------------------------------------------------------------------
/claude-hooks/README-NATURAL-TRIGGERS.md:
--------------------------------------------------------------------------------
```markdown
1 | # Natural Memory Triggers for Claude Code
2 |
3 | 🧠 **Intelligent mid-conversation memory awareness with performance optimization**
4 |
5 | ## Overview
6 |
7 | The Natural Memory Triggers system provides seamless, intelligent memory awareness during conversations that feels like Claude naturally "remembers" rather than executing explicit system hooks. It uses multi-tiered performance architecture to balance memory intelligence with responsiveness.
8 |
9 | > **🎯 v8.5.1 NEW**: **Dynamic Memory Weight Adjustment** - Intelligent auto-calibration automatically detects when memories are stale and adjusts scoring weights to prioritize recent work. No more outdated context!
10 |
11 | ## Key Features
12 |
13 | ### 🎯 **Natural Language Pattern Detection**
14 | - **Explicit Memory Requests**: "What did we decide about...?", "Remind me how we..."
15 | - **Past Work References**: "Similar to what we did...", "Like we discussed before..."
16 | - **Technical Discussions**: Architecture, security, database topics that benefit from context
17 | - **Project Continuity**: "Continue with...", "Next step...", problem-solving patterns
18 |
19 | ### ⚡ **Performance-Aware Architecture**
20 | - **Tiered Processing**: Instant (< 50ms), Fast (< 150ms), Intensive (< 500ms)
21 | - **Smart Performance Profiles**: Speed-focused, Balanced, Memory-aware, Adaptive
22 | - **Automatic Degradation**: Gracefully reduces complexity when performance budgets are exceeded
23 | - **User-Configurable Trade-offs**: Full control over speed vs intelligence balance
24 |
25 | ### 🔄 **Adaptive Learning**
26 | - **User Preference Learning**: Adapts to user tolerance for latency vs memory awareness
27 | - **Pattern Confidence Adjustment**: Learns which patterns are most valuable to the user
28 | - **Context-Aware Triggering**: Considers project context, conversation history, and topic shifts
29 |
30 | ## Quick Start
31 |
32 | ### Installation
33 |
34 | The system is integrated into the existing Claude Code hooks. No additional installation required.
35 |
36 | ### Basic Usage
37 |
38 | ```bash
39 | # Check current status
40 | node claude-hooks/memory-mode-controller.js status
41 |
42 | # Switch to balanced mode (recommended)
43 | node claude-hooks/memory-mode-controller.js profile balanced
44 |
45 | # Enable natural triggers
46 | node claude-hooks/memory-mode-controller.js enable
47 | ```
48 |
49 | ### Performance Profiles
50 |
51 | Choose the profile that best matches your preferences:
52 |
53 | ```bash
54 | # Fastest response, minimal memory awareness (< 100ms)
55 | node claude-hooks/memory-mode-controller.js profile speed_focused
56 |
57 | # Moderate latency, smart triggers (< 200ms) - RECOMMENDED
58 | node claude-hooks/memory-mode-controller.js profile balanced
59 |
60 | # Full memory awareness, accept higher latency (< 500ms)
61 | node claude-hooks/memory-mode-controller.js profile memory_aware
62 |
63 | # Auto-adjust based on usage patterns
64 | node claude-hooks/memory-mode-controller.js profile adaptive
65 | ```
66 |
67 | ## How It Works
68 |
69 | ### Trigger Detection
70 |
71 | The system uses a three-tiered approach to detect when memory context would be helpful:
72 |
73 | #### **Tier 1: Instant Detection (< 50ms)**
74 | - Regex-based pattern matching for explicit memory requests
75 | - Cache lookups for previously analyzed messages
76 | - Simple keyword extraction for technical terms
77 |
78 | #### **Tier 2: Fast Analysis (< 150ms)**
79 | - Contextual analysis with project information
80 | - Topic shift detection from conversation history
81 | - Enhanced pattern matching with semantic context
82 |
83 | #### **Tier 3: Intensive Analysis (< 500ms)**
84 | - Deep semantic understanding (when available)
85 | - Full conversation context analysis
86 | - Complex pattern relationships
87 |
88 | ### Example Triggers
89 |
90 | **Explicit Memory Requests** (High Confidence):
91 | ```
92 | "What did we decide about the authentication approach?"
93 | "Remind me how we handled user sessions"
94 | "Remember when we discussed the database schema?"
95 | ```
96 |
97 | **Past Work References** (Medium Confidence):
98 | ```
99 | "Similar to what we implemented last time"
100 | "Like we discussed in the previous meeting"
101 | "The same approach we used for the API"
102 | ```
103 |
104 | **Technical Discussions** (Context-Dependent):
105 | ```
106 | "Let's design the authentication architecture"
107 | "How should we handle database migrations?"
108 | "What's our security strategy?"
109 | ```
110 |
111 | ## Configuration
112 |
113 | ### Basic Configuration
114 |
115 | Edit `claude-hooks/config.json`:
116 |
117 | ```json
118 | {
119 | "naturalTriggers": {
120 | "enabled": true,
121 | "sensitivity": 0.7, // 0-1, higher = more sensitive
122 | "triggerThreshold": 0.6, // 0-1, confidence needed to trigger
123 | "cooldownPeriod": 30000, // Milliseconds between triggers
124 | "maxMemoriesPerTrigger": 5, // Max memories to inject per trigger
125 | "adaptiveLearning": true // Learn from user feedback
126 | }
127 | }
128 | ```
129 |
130 | ### Performance Profiles
131 |
132 | Customize performance profiles in the configuration:
133 |
134 | ```json
135 | {
136 | "performance": {
137 | "defaultProfile": "balanced",
138 | "profiles": {
139 | "speed_focused": {
140 | "maxLatency": 100,
141 | "enabledTiers": ["instant"],
142 | "backgroundProcessing": false
143 | },
144 | "balanced": {
145 | "maxLatency": 200,
146 | "enabledTiers": ["instant", "fast"],
147 | "backgroundProcessing": true
148 | },
149 | "memory_aware": {
150 | "maxLatency": 500,
151 | "enabledTiers": ["instant", "fast", "intensive"],
152 | "backgroundProcessing": true
153 | }
154 | }
155 | }
156 | }
157 | ```
158 |
159 | ## Command Line Interface
160 |
161 | ### Memory Mode Controller
162 |
163 | ```bash
164 | # Get current status and configuration
165 | node claude-hooks/memory-mode-controller.js status
166 |
167 | # Switch performance profiles
168 | node claude-hooks/memory-mode-controller.js profile <speed_focused|balanced|memory_aware|adaptive>
169 |
170 | # Adjust sensitivity (0-1, higher = more triggers)
171 | node claude-hooks/memory-mode-controller.js sensitivity 0.8
172 |
173 | # Adjust trigger threshold (0-1, higher = need more confidence)
174 | node claude-hooks/memory-mode-controller.js threshold 0.7
175 |
176 | # Enable/disable natural triggers
177 | node claude-hooks/memory-mode-controller.js enable
178 | node claude-hooks/memory-mode-controller.js disable
179 | node claude-hooks/memory-mode-controller.js toggle
180 |
181 | # List all available profiles
182 | node claude-hooks/memory-mode-controller.js list
183 |
184 | # Reset to defaults
185 | node claude-hooks/memory-mode-controller.js reset
186 | ```
187 |
188 | ## Testing
189 |
190 | Run the comprehensive test suite:
191 |
192 | ```bash
193 | # Full test suite
194 | node claude-hooks/test-natural-triggers.js
195 |
196 | # Test dual protocol functionality
197 | node claude-hooks/test-dual-protocol-hook.js
198 | ```
199 |
200 | The test suite covers:
201 | - Performance management and timing
202 | - Pattern detection accuracy
203 | - Conversation monitoring
204 | - Integration testing
205 | - Performance profile behavior
206 |
207 | ## Performance Optimization
208 |
209 | ### Latency Targets
210 |
211 | | Profile | Target Latency | Use Case |
212 | |---------|---------------|----------|
213 | | Speed Focused | < 100ms | Priority on responsiveness |
214 | | Balanced | < 200ms | Good balance (recommended) |
215 | | Memory Aware | < 500ms | Maximum memory intelligence |
216 | | Adaptive | Variable | Learns user preferences |
217 |
218 | ### Performance Monitoring
219 |
220 | The system automatically tracks:
221 | - Hook execution latency
222 | - Pattern detection accuracy
223 | - User acceptance rates
224 | - Memory query performance
225 |
226 | ### Optimization Tips
227 |
228 | 1. **Start with Balanced Mode**: Good default for most users
229 | 2. **Monitor Performance**: Check status regularly to see average latencies
230 | 3. **Adjust Sensitivity**: Lower sensitivity = fewer false positives
231 | 4. **Use Cooldown Period**: Prevents excessive triggering
232 | 5. **Enable Learning**: Let the system adapt to your preferences
233 |
234 | ## Architecture
235 |
236 | ### Component Overview
237 |
238 | ```
239 | ┌─────────────────────────────────────────────────────────────┐
240 | │ Mid-Conversation Hook │
241 | ├─────────────────────────────────────────────────────────────┤
242 | │ ┌─────────────────┐ ┌──────────────────┐ ┌─────────────┐ │
243 | │ │ Performance │ │ Conversation │ │ Pattern │ │
244 | │ │ Manager │ │ Monitor │ │ Detector │ │
245 | │ │ │ │ │ │ │ │
246 | │ │ • Timing │ │ • Topic Extract │ │ • Regex │ │
247 | │ │ • Profiles │ │ • Semantic Shift │ │ • Context │ │
248 | │ │ • Learning │ │ • Caching │ │ • Learning │ │
249 | │ └─────────────────┘ └──────────────────┘ └─────────────┘ │
250 | └─────────────────────────────────────────────────────────────┘
251 | │
252 | ▼
253 | ┌─────────────────────────────────────────────────────────────┐
254 | │ Memory Client │
255 | │ (Dual Protocol: HTTP + MCP) │
256 | └─────────────────────────────────────────────────────────────┘
257 | ```
258 |
259 | ### Key Classes
260 |
261 | - **`MidConversationHook`**: Main orchestrator for trigger detection and execution
262 | - **`TieredConversationMonitor`**: Multi-tier conversation analysis with performance awareness
263 | - **`AdaptivePatternDetector`**: Natural language pattern detection with learning
264 | - **`PerformanceManager`**: Performance monitoring, budgeting, and profile management
265 | - **`MemoryClient`**: Unified interface for HTTP and MCP memory operations
266 |
267 | ## Troubleshooting
268 |
269 | ### Common Issues
270 |
271 | **Q: Triggers aren't firing when expected**
272 | ```bash
273 | # Check if natural triggers are enabled
274 | node claude-hooks/memory-mode-controller.js status
275 |
276 | # Lower the trigger threshold
277 | node claude-hooks/memory-mode-controller.js threshold 0.5
278 |
279 | # Increase sensitivity
280 | node claude-hooks/memory-mode-controller.js sensitivity 0.8
281 | ```
282 |
283 | **Q: Performance is slower than expected**
284 | ```bash
285 | # Switch to speed-focused mode
286 | node claude-hooks/memory-mode-controller.js profile speed_focused
287 |
288 | # Check current latency
289 | node claude-hooks/memory-mode-controller.js status
290 | ```
291 |
292 | **Q: Too many false positive triggers**
293 | ```bash
294 | # Lower sensitivity
295 | node claude-hooks/memory-mode-controller.js sensitivity 0.5
296 |
297 | # Increase threshold
298 | node claude-hooks/memory-mode-controller.js threshold 0.8
299 |
300 | # Increase cooldown period (edit config.json)
301 | ```
302 |
303 | ### Debug Mode
304 |
305 | Enable detailed logging:
306 |
307 | ```json
308 | {
309 | "logging": {
310 | "level": "debug",
311 | "enableDebug": true,
312 | "logToFile": true
313 | }
314 | }
315 | ```
316 |
317 | ### Performance Analysis
318 |
319 | Monitor hook performance:
320 |
321 | ```bash
322 | # Check status for performance metrics
323 | node claude-hooks/memory-mode-controller.js status
324 |
325 | # Run performance tests
326 | node claude-hooks/test-natural-triggers.js
327 | ```
328 |
329 | ## Integration with Claude Code
330 |
331 | ### Session Start Integration
332 |
333 | The natural triggers work alongside the existing session start hooks:
334 |
335 | 1. **Session Start**: Loads initial memory context (existing functionality)
336 | 2. **Mid-Conversation**: Intelligently refreshes context when patterns suggest it's needed
337 | 3. **Adaptive Learning**: Learns from user interactions to improve trigger accuracy
338 |
339 | ### Memory Storage Integration
340 |
341 | Uses the existing dual-protocol memory service:
342 | - **HTTP Protocol**: Web-based memory service (https://localhost:8443)
343 | - **MCP Protocol**: Direct server process communication
344 | - **Smart Fallback**: Automatically switches protocols if one fails
345 |
346 | ## Roadmap
347 |
348 | ### Planned Enhancements
349 |
350 | 1. **Advanced Semantic Analysis**: Integration with more sophisticated NLP models
351 | 2. **Cross-Session Learning**: Remember user preferences across Claude Code sessions
352 | 3. **Project-Specific Patterns**: Learn patterns specific to different projects
353 | 4. **Real-time Performance Tuning**: Dynamic adjustment based on system resources
354 | 5. **Visual Performance Dashboard**: Web-based interface for monitoring and configuration
355 |
356 | ### Contributing
357 |
358 | The natural triggers system is designed to be extensible:
359 |
360 | 1. **Custom Pattern Categories**: Add new pattern types in `AdaptivePatternDetector`
361 | 2. **Performance Profiles**: Define custom profiles in the configuration
362 | 3. **Integration Points**: Hook into additional Claude Code events
363 | 4. **Learning Algorithms**: Enhance the adaptive learning mechanisms
364 |
365 | ## License
366 |
367 | This system is part of the MCP Memory Service project and follows the same licensing terms.
368 |
369 | ---
370 |
371 | 🧠 **The goal is to make memory awareness feel natural and seamless, like Claude simply "remembers" your conversations and project context.**
```
--------------------------------------------------------------------------------
/claude-hooks/utilities/performance-manager.js:
--------------------------------------------------------------------------------
```javascript
1 | /**
2 | * Performance Manager for Memory Hooks
3 | * Provides intelligent performance monitoring and adaptive hook management
4 | */
5 |
6 | class PerformanceManager {
7 | constructor(config = {}) {
8 | this.config = config;
9 | this.metrics = {
10 | totalLatency: [],
11 | hookLatencies: new Map(),
12 | userSatisfaction: [],
13 | degradationEvents: 0
14 | };
15 |
16 | // Performance tiers
17 | this.tiers = {
18 | instant: { maxLatency: 50, priority: 'critical' },
19 | fast: { maxLatency: 150, priority: 'high' },
20 | intensive: { maxLatency: 500, priority: 'medium' }
21 | };
22 |
23 | // Current performance profile
24 | this.activeProfile = config.defaultProfile || 'balanced';
25 | this.performanceBudget = this.getProfileBudget(this.activeProfile);
26 |
27 | // Adaptive learning
28 | this.userPreferences = {
29 | toleranceLevel: 0.5, // 0 = speed focused, 1 = memory focused
30 | learningEnabled: true,
31 | feedbackHistory: []
32 | };
33 | }
34 |
35 | /**
36 | * Get performance budget for a profile
37 | */
38 | getProfileBudget(profileName) {
39 | // Use config profiles first, with hardcoded fallbacks
40 | const configProfiles = this.config.profiles || {};
41 |
42 | // If profile exists in config, use it (with fallback for missing adaptive calculations)
43 | if (configProfiles[profileName]) {
44 | const profile = { ...configProfiles[profileName] };
45 |
46 | // Handle adaptive profile calculations if needed
47 | if (profileName === 'adaptive') {
48 | profile.maxLatency = profile.maxLatency || this.calculateAdaptiveLatency();
49 | profile.enabledTiers = profile.enabledTiers || this.calculateAdaptiveTiers();
50 | }
51 |
52 | return profile;
53 | }
54 |
55 | // Fallback to hardcoded profiles if not found in config
56 | const fallbackProfiles = {
57 | speed_focused: {
58 | maxLatency: 100,
59 | enabledTiers: ['instant'],
60 | backgroundProcessing: false,
61 | degradeThreshold: 200
62 | },
63 | balanced: {
64 | maxLatency: 200,
65 | enabledTiers: ['instant', 'fast'],
66 | backgroundProcessing: true,
67 | degradeThreshold: 400
68 | },
69 | memory_aware: {
70 | maxLatency: 500,
71 | enabledTiers: ['instant', 'fast', 'intensive'],
72 | backgroundProcessing: true,
73 | degradeThreshold: 1000
74 | },
75 | adaptive: {
76 | maxLatency: this.calculateAdaptiveLatency(),
77 | enabledTiers: this.calculateAdaptiveTiers(),
78 | backgroundProcessing: true,
79 | degradeThreshold: 800,
80 | autoAdjust: true
81 | }
82 | };
83 |
84 | return fallbackProfiles[profileName] || fallbackProfiles.balanced;
85 | }
86 |
87 | /**
88 | * Calculate adaptive latency based on user behavior
89 | */
90 | calculateAdaptiveLatency() {
91 | if (this.metrics.totalLatency.length < 10) {
92 | return 200; // Default for new users
93 | }
94 |
95 | const avgLatency = this.metrics.totalLatency.reduce((a, b) => a + b, 0) / this.metrics.totalLatency.length;
96 | const userTolerance = this.userPreferences?.toleranceLevel || 0.5;
97 |
98 | // Adaptive calculation: balance observed tolerance with user preference
99 | return Math.min(500, Math.max(100, avgLatency * (1 + userTolerance)));
100 | }
101 |
102 | /**
103 | * Calculate which tiers should be enabled adaptively
104 | */
105 | calculateAdaptiveTiers() {
106 | const tolerance = this.userPreferences?.toleranceLevel || 0.5;
107 |
108 | if (tolerance < 0.3) return ['instant'];
109 | if (tolerance < 0.7) return ['instant', 'fast'];
110 | return ['instant', 'fast', 'intensive'];
111 | }
112 |
113 | /**
114 | * Start timing a hook operation
115 | */
116 | startTiming(hookName, tier = 'fast') {
117 | return {
118 | hookName,
119 | tier,
120 | startTime: Date.now(),
121 | expectedLatency: this.tiers[tier]?.maxLatency || 150
122 | };
123 | }
124 |
125 | /**
126 | * End timing and record metrics
127 | */
128 | endTiming(timingContext) {
129 | const endTime = Date.now();
130 | const latency = endTime - timingContext.startTime;
131 |
132 | // Record metrics
133 | this.recordHookLatency(timingContext.hookName, latency, timingContext.tier);
134 | this.recordTotalLatency(latency);
135 |
136 | // Check if we exceeded performance budget
137 | const exceedsThreshold = latency > this.performanceBudget.degradeThreshold;
138 | if (exceedsThreshold) {
139 | this.handlePerformanceDegradation(timingContext.hookName, latency);
140 | }
141 |
142 | return {
143 | latency,
144 | tier: timingContext.tier,
145 | withinBudget: latency <= this.performanceBudget.maxLatency,
146 | exceedsThreshold
147 | };
148 | }
149 |
150 | /**
151 | * Record hook-specific latency
152 | */
153 | recordHookLatency(hookName, latency, tier) {
154 | if (!this.metrics.hookLatencies.has(hookName)) {
155 | this.metrics.hookLatencies.set(hookName, []);
156 | }
157 |
158 | const hookMetrics = this.metrics.hookLatencies.get(hookName);
159 | hookMetrics.push({ latency, tier, timestamp: Date.now() });
160 |
161 | // Keep only recent measurements (last 50)
162 | if (hookMetrics.length > 50) {
163 | hookMetrics.splice(0, hookMetrics.length - 50);
164 | }
165 | }
166 |
167 | /**
168 | * Record total request latency
169 | */
170 | recordTotalLatency(latency) {
171 | this.metrics.totalLatency.push(latency);
172 |
173 | // Keep rolling window of recent measurements
174 | if (this.metrics.totalLatency.length > 100) {
175 | this.metrics.totalLatency.splice(0, this.metrics.totalLatency.length - 100);
176 | }
177 | }
178 |
179 | /**
180 | * Handle performance degradation
181 | */
182 | handlePerformanceDegradation(hookName, latency) {
183 | this.metrics.degradationEvents++;
184 |
185 | console.warn(`[Performance] Hook "${hookName}" exceeded threshold: ${latency}ms`);
186 |
187 | // Adaptive response based on profile
188 | if (this.performanceBudget.autoAdjust) {
189 | this.adaptToPerformance(hookName, latency);
190 | }
191 | }
192 |
193 | /**
194 | * Adapt hooks based on performance
195 | */
196 | adaptToPerformance(hookName, latency) {
197 | // If a hook consistently performs poorly, suggest tier reduction
198 | const hookHistory = this.metrics.hookLatencies.get(hookName) || [];
199 | const recentHistory = hookHistory.slice(-10);
200 |
201 | if (recentHistory.length >= 5) {
202 | const avgLatency = recentHistory.reduce((a, b) => a + b.latency, 0) / recentHistory.length;
203 |
204 | if (avgLatency > this.performanceBudget.maxLatency * 1.5) {
205 | // Suggest moving hook to lower tier or disabling
206 | this.suggestHookOptimization(hookName, avgLatency);
207 | }
208 | }
209 | }
210 |
211 | /**
212 | * Suggest hook optimization
213 | */
214 | suggestHookOptimization(hookName, avgLatency) {
215 | const suggestion = {
216 | hookName,
217 | avgLatency,
218 | suggestion: avgLatency > 300 ? 'disable' : 'reduce_tier',
219 | timestamp: Date.now()
220 | };
221 |
222 | console.log(`[Performance] Suggestion for ${hookName}: ${suggestion.suggestion} (avg: ${avgLatency}ms)`);
223 | return suggestion;
224 | }
225 |
226 | /**
227 | * Check if a hook should run based on current performance profile
228 | */
229 | shouldRunHook(hookName, tier = 'fast') {
230 | const profile = this.performanceBudget;
231 |
232 | // Check if tier is enabled
233 | if (!profile.enabledTiers.includes(tier)) {
234 | return false;
235 | }
236 |
237 | // Check recent performance
238 | const hookHistory = this.metrics.hookLatencies.get(hookName);
239 | if (hookHistory && hookHistory.length > 5) {
240 | const recentLatencies = hookHistory.slice(-5);
241 | const avgLatency = recentLatencies.reduce((a, b) => a + b.latency, 0) / recentLatencies.length;
242 |
243 | // Don't run if consistently exceeds budget
244 | if (avgLatency > profile.maxLatency * 1.2) {
245 | return false;
246 | }
247 | }
248 |
249 | return true;
250 | }
251 |
252 | /**
253 | * Switch performance profile
254 | */
255 | switchProfile(profileName) {
256 | if (!['speed_focused', 'balanced', 'memory_aware', 'adaptive'].includes(profileName)) {
257 | throw new Error(`Invalid profile: ${profileName}`);
258 | }
259 |
260 | this.activeProfile = profileName;
261 | this.performanceBudget = this.getProfileBudget(profileName);
262 |
263 | console.log(`[Performance] Switched to profile: ${profileName}`);
264 | return this.performanceBudget;
265 | }
266 |
267 | /**
268 | * Learn from user feedback
269 | */
270 | recordUserFeedback(isPositive, context = {}) {
271 | if (!this.userPreferences.learningEnabled) return;
272 |
273 | const feedback = {
274 | positive: isPositive,
275 | context,
276 | latency: context.latency || 0,
277 | timestamp: Date.now()
278 | };
279 |
280 | this.userPreferences.feedbackHistory.push(feedback);
281 |
282 | // Update tolerance based on feedback
283 | this.updateUserTolerance(feedback);
284 |
285 | // Keep feedback history manageable
286 | if (this.userPreferences.feedbackHistory.length > 50) {
287 | this.userPreferences.feedbackHistory.splice(0, 10);
288 | }
289 | }
290 |
291 | /**
292 | * Update user tolerance based on feedback patterns
293 | */
294 | updateUserTolerance(feedback) {
295 | const recent = this.userPreferences?.feedbackHistory?.slice(-10) || [];
296 | const positiveCount = recent.filter(f => f.positive).length;
297 | const negativeCount = recent.length - positiveCount;
298 |
299 | // Ensure userPreferences is initialized
300 | if (!this.userPreferences) {
301 | this.userPreferences = {
302 | toleranceLevel: 0.5,
303 | learningEnabled: true,
304 | feedbackHistory: []
305 | };
306 | }
307 |
308 | // Adjust tolerance based on feedback patterns
309 | if (feedback.positive && feedback.latency > 200) {
310 | // User satisfied with higher latency, increase tolerance
311 | this.userPreferences.toleranceLevel = Math.min(1.0, this.userPreferences.toleranceLevel + 0.1);
312 | } else if (!feedback.positive && feedback.latency > 100) {
313 | // User dissatisfied with latency, decrease tolerance
314 | this.userPreferences.toleranceLevel = Math.max(0.0, this.userPreferences.toleranceLevel - 0.1);
315 | }
316 | }
317 |
318 | /**
319 | * Get performance report
320 | */
321 | getPerformanceReport() {
322 | const totalRequests = this.metrics.totalLatency.length;
323 | const avgLatency = totalRequests > 0 ?
324 | this.metrics.totalLatency.reduce((a, b) => a + b, 0) / totalRequests : 0;
325 |
326 | const hookSummary = {};
327 | this.metrics.hookLatencies.forEach((latencies, hookName) => {
328 | const avgHookLatency = latencies.reduce((a, b) => a + b.latency, 0) / latencies.length;
329 | hookSummary[hookName] = {
330 | avgLatency: Math.round(avgHookLatency),
331 | calls: latencies.length,
332 | tier: latencies[latencies.length - 1]?.tier || 'unknown'
333 | };
334 | });
335 |
336 | return {
337 | profile: this.activeProfile,
338 | totalRequests,
339 | avgLatency: Math.round(avgLatency),
340 | degradationEvents: this.metrics.degradationEvents,
341 | userTolerance: this.userPreferences.toleranceLevel,
342 | hookPerformance: hookSummary,
343 | budget: this.performanceBudget
344 | };
345 | }
346 |
347 | /**
348 | * Reset metrics (useful for testing)
349 | */
350 | resetMetrics() {
351 | this.metrics = {
352 | totalLatency: [],
353 | hookLatencies: new Map(),
354 | userSatisfaction: [],
355 | degradationEvents: 0
356 | };
357 | }
358 | }
359 |
360 | module.exports = { PerformanceManager };
```
--------------------------------------------------------------------------------
/scripts/database/simple_timestamp_check.py:
--------------------------------------------------------------------------------
```python
1 | #!/usr/bin/env python3
2 | """
3 | Production-ready script to analyze timestamp health in MCP Memory Service databases.
4 |
5 | This tool provides comprehensive timestamp analysis for SQLite-based memory storage,
6 | helping identify and diagnose timestamp-related issues that could affect search functionality.
7 | """
8 |
9 | import sys
10 | import sqlite3
11 | import json
12 | import argparse
13 | import logging
14 | from datetime import datetime
15 | from pathlib import Path
16 | from typing import Optional, Dict, Any
17 |
18 | # Configure logging
19 | logging.basicConfig(
20 | level=logging.INFO,
21 | format='%(levelname)s: %(message)s'
22 | )
23 | logger = logging.getLogger(__name__)
24 |
25 |
26 | def analyze_timestamps(db_path: str, output_format: str = 'text', verbose: bool = False) -> Dict[str, Any]:
27 | """Analyze timestamp fields directly in the database.
28 |
29 | Args:
30 | db_path: Path to the SQLite database file
31 | output_format: Output format ('text', 'json', or 'summary')
32 | verbose: Enable verbose output
33 |
34 | Returns:
35 | Dictionary containing analysis results
36 | """
37 | results = {}
38 |
39 | if output_format == 'text':
40 | print(f"=== Analyzing timestamps in {db_path} ===")
41 |
42 | # Validate database path
43 | db_file = Path(db_path)
44 | if not db_file.exists():
45 | error_msg = f"Database file not found: {db_path}"
46 | logger.error(error_msg)
47 | return {'error': error_msg, 'success': False}
48 |
49 | try:
50 | conn = sqlite3.connect(db_path)
51 | conn.row_factory = sqlite3.Row
52 |
53 | # Get basic stats
54 | cursor = conn.execute("SELECT COUNT(*) as total FROM memories")
55 | total_count = cursor.fetchone()['total']
56 | results['total_memories'] = total_count
57 |
58 | if output_format == 'text':
59 | print(f"📊 Total memories in database: {total_count}")
60 |
61 | # Analyze timestamp fields
62 | cursor = conn.execute("""
63 | SELECT
64 | COUNT(*) as total,
65 | COUNT(created_at) as has_created_at,
66 | COUNT(created_at_iso) as has_created_at_iso,
67 | COUNT(CASE WHEN created_at IS NULL AND created_at_iso IS NULL THEN 1 END) as missing_both,
68 | MIN(created_at) as earliest_ts,
69 | MAX(created_at) as latest_ts
70 | FROM memories
71 | """)
72 |
73 | stats = cursor.fetchone()
74 |
75 | # Store results
76 | results['timestamp_stats'] = {
77 | 'total': stats['total'],
78 | 'has_created_at': stats['has_created_at'],
79 | 'has_created_at_iso': stats['has_created_at_iso'],
80 | 'missing_both': stats['missing_both'],
81 | 'missing_created_at': stats['total'] - stats['has_created_at'],
82 | 'missing_created_at_iso': stats['total'] - stats['has_created_at_iso']
83 | }
84 |
85 | if output_format == 'text':
86 | print(f"\n🕐 TIMESTAMP ANALYSIS:")
87 | print(f" Total entries: {stats['total']}")
88 | print(f" Has created_at (float): {stats['has_created_at']}")
89 | print(f" Has created_at_iso (ISO): {stats['has_created_at_iso']}")
90 | print(f" Missing both timestamps: {stats['missing_both']}")
91 |
92 | if output_format == 'text':
93 | if stats['has_created_at'] > 0:
94 | missing_created_at = stats['total'] - stats['has_created_at']
95 | print(f" Missing created_at: {missing_created_at}")
96 |
97 | if stats['has_created_at_iso'] > 0:
98 | missing_created_at_iso = stats['total'] - stats['has_created_at_iso']
99 | print(f" Missing created_at_iso: {missing_created_at_iso}")
100 |
101 | # Show timestamp range
102 | if stats['earliest_ts'] and stats['latest_ts']:
103 | earliest = datetime.fromtimestamp(stats['earliest_ts'])
104 | latest = datetime.fromtimestamp(stats['latest_ts'])
105 | results['timestamp_range'] = {
106 | 'earliest': earliest.isoformat(),
107 | 'latest': latest.isoformat(),
108 | 'earliest_float': stats['earliest_ts'],
109 | 'latest_float': stats['latest_ts']
110 | }
111 |
112 | if output_format == 'text':
113 | print(f"\n📅 TIMESTAMP RANGE:")
114 | print(f" Earliest: {earliest} ({stats['earliest_ts']})")
115 | print(f" Latest: {latest} ({stats['latest_ts']})")
116 |
117 | # Find problematic entries
118 | cursor = conn.execute("""
119 | SELECT id, content_hash, created_at, created_at_iso,
120 | SUBSTR(content, 1, 100) as content_preview
121 | FROM memories
122 | WHERE created_at IS NULL AND created_at_iso IS NULL
123 | LIMIT 10
124 | """)
125 |
126 | problematic = cursor.fetchall()
127 | results['missing_both_examples'] = len(problematic)
128 |
129 | if output_format == 'text' and problematic:
130 | print(f"\n⚠️ ENTRIES MISSING BOTH TIMESTAMPS ({len(problematic)} shown):")
131 | for row in problematic:
132 | print(f" ID {row['id']}: {row['content_preview']}...")
133 | if verbose:
134 | print(f" Hash: {row['content_hash']}")
135 | print(f" created_at: {row['created_at']}")
136 | print(f" created_at_iso: {row['created_at_iso']}")
137 | print()
138 |
139 | # Find entries with only one timestamp type
140 | cursor = conn.execute("""
141 | SELECT COUNT(*) as count
142 | FROM memories
143 | WHERE (created_at IS NULL AND created_at_iso IS NOT NULL)
144 | OR (created_at IS NOT NULL AND created_at_iso IS NULL)
145 | """)
146 |
147 | partial_timestamps = cursor.fetchone()['count']
148 | results['partial_timestamps'] = partial_timestamps
149 |
150 | if output_format == 'text' and partial_timestamps > 0:
151 | print(f"\n⚠️ ENTRIES WITH PARTIAL TIMESTAMPS: {partial_timestamps}")
152 |
153 | # Show some examples
154 | cursor = conn.execute("""
155 | SELECT id, content_hash, created_at, created_at_iso,
156 | SUBSTR(content, 1, 60) as content_preview
157 | FROM memories
158 | WHERE (created_at IS NULL AND created_at_iso IS NOT NULL)
159 | OR (created_at IS NOT NULL AND created_at_iso IS NULL)
160 | LIMIT 5
161 | """)
162 |
163 | examples = cursor.fetchall()
164 | if output_format == 'text' and verbose:
165 | for row in examples:
166 | print(f" ID {row['id']}: {row['content_preview']}...")
167 | print(f" created_at: {row['created_at']}")
168 | print(f" created_at_iso: {row['created_at_iso']}")
169 | print()
170 |
171 | # Health assessment
172 | health_status = 'EXCELLENT'
173 | health_message = 'All memories have complete timestamps'
174 |
175 | if stats['missing_both'] > 0:
176 | if stats['missing_both'] < stats['total'] * 0.01:
177 | health_status = 'GOOD'
178 | health_message = f"Only {stats['missing_both']}/{stats['total']} missing all timestamps"
179 | elif stats['missing_both'] < stats['total'] * 0.1:
180 | health_status = 'WARNING'
181 | health_message = f"{stats['missing_both']}/{stats['total']} missing all timestamps"
182 | else:
183 | health_status = 'CRITICAL'
184 | health_message = f"{stats['missing_both']}/{stats['total']} missing all timestamps"
185 |
186 | results['health'] = {
187 | 'status': health_status,
188 | 'message': health_message,
189 | 'partial_timestamps': partial_timestamps
190 | }
191 |
192 | if output_format == 'text':
193 | print(f"\n🏥 DATABASE HEALTH:")
194 | emoji = {'EXCELLENT': '✅', 'GOOD': '✅', 'WARNING': '⚠️', 'CRITICAL': '❌'}
195 | print(f" {emoji.get(health_status, '?')} {health_status}: {health_message}")
196 |
197 | if partial_timestamps > 0:
198 | print(f" ⚠️ {partial_timestamps} entries have only partial timestamp data")
199 | else:
200 | print(" ✅ All entries with timestamps have both float and ISO formats")
201 |
202 | conn.close()
203 | results['success'] = True
204 | return results
205 |
206 | except sqlite3.OperationalError as e:
207 | if 'no such table: memories' in str(e):
208 | error_msg = "Database does not contain 'memories' table. Is this a valid MCP Memory Service database?"
209 | else:
210 | error_msg = f"Database error: {e}"
211 | logger.error(error_msg)
212 | results['error'] = error_msg
213 | results['success'] = False
214 | except Exception as e:
215 | error_msg = f"Unexpected error: {e}"
216 | logger.error(error_msg)
217 | results['error'] = error_msg
218 | results['success'] = False
219 | finally:
220 | if 'conn' in locals():
221 | conn.close()
222 |
223 | return results
224 |
225 | def main():
226 | """Main entry point with CLI argument parsing."""
227 | # Set up argument parser
228 | parser = argparse.ArgumentParser(
229 | description='Analyze timestamp health in MCP Memory Service SQLite databases',
230 | formatter_class=argparse.RawDescriptionHelpFormatter,
231 | epilog="""
232 | Examples:
233 | %(prog)s # Use default database path
234 | %(prog)s /path/to/database.db # Analyze specific database
235 | %(prog)s -f json -o results.json # Output JSON to file
236 | %(prog)s --verbose # Show detailed analysis
237 | %(prog)s --format summary # Quick health check only
238 | """
239 | )
240 |
241 | # Default database path for macOS
242 | default_db_path = Path.home() / "Library" / "Application Support" / "mcp-memory" / "sqlite_vec.db"
243 |
244 | parser.add_argument(
245 | 'database',
246 | nargs='?',
247 | default=str(default_db_path),
248 | help=f'Path to SQLite database (default: {default_db_path})'
249 | )
250 |
251 | parser.add_argument(
252 | '-f', '--format',
253 | choices=['text', 'json', 'summary'],
254 | default='text',
255 | help='Output format (default: text)'
256 | )
257 |
258 | parser.add_argument(
259 | '-o', '--output',
260 | help='Output file path (default: stdout)'
261 | )
262 |
263 | parser.add_argument(
264 | '-v', '--verbose',
265 | action='store_true',
266 | help='Show verbose output with additional details'
267 | )
268 |
269 | parser.add_argument(
270 | '--quiet',
271 | action='store_true',
272 | help='Suppress all output except errors'
273 | )
274 |
275 | args = parser.parse_args()
276 |
277 | # Configure logging based on verbosity
278 | if args.quiet:
279 | logging.getLogger().setLevel(logging.ERROR)
280 | elif args.verbose:
281 | logging.getLogger().setLevel(logging.DEBUG)
282 |
283 | # Analyze the database
284 | results = analyze_timestamps(args.database, args.format, args.verbose)
285 |
286 | # Handle output
287 | if args.format == 'json':
288 | output = json.dumps(results, indent=2, default=str)
289 | if args.output:
290 | with open(args.output, 'w') as f:
291 | f.write(output)
292 | if not args.quiet:
293 | print(f"Results written to {args.output}")
294 | else:
295 | print(output)
296 | elif args.format == 'summary':
297 | if results.get('success'):
298 | health = results.get('health', {})
299 | print(f"Status: {health.get('status', 'UNKNOWN')}")
300 | print(f"Message: {health.get('message', 'No health data')}")
301 | print(f"Total Memories: {results.get('total_memories', 0)}")
302 | missing = results.get('timestamp_stats', {}).get('missing_both', 0)
303 | if missing > 0:
304 | print(f"Missing Timestamps: {missing}")
305 | else:
306 | print(f"Error: {results.get('error', 'Unknown error')}")
307 |
308 | # Return appropriate exit code
309 | if results.get('success'):
310 | health_status = results.get('health', {}).get('status', 'UNKNOWN')
311 | if health_status in ['EXCELLENT', 'GOOD']:
312 | sys.exit(0)
313 | elif health_status == 'WARNING':
314 | sys.exit(1)
315 | else:
316 | sys.exit(2)
317 | else:
318 | sys.exit(3)
319 |
320 | if __name__ == "__main__":
321 | main()
```
--------------------------------------------------------------------------------
/src/mcp_memory_service/web/oauth/registration.py:
--------------------------------------------------------------------------------
```python
1 | # Copyright 2024 Heinrich Krupp
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
15 | """
16 | OAuth 2.1 Dynamic Client Registration implementation for MCP Memory Service.
17 |
18 | Implements RFC 7591 - OAuth 2.0 Dynamic Client Registration Protocol.
19 | """
20 |
21 | import time
22 | import logging
23 | from typing import List, Optional
24 | from urllib.parse import urlparse, ParseResult
25 | from fastapi import APIRouter, HTTPException, status
26 | from pydantic import ValidationError
27 |
28 | from .models import (
29 | ClientRegistrationRequest,
30 | ClientRegistrationResponse,
31 | RegisteredClient
32 | )
33 | from .storage import oauth_storage
34 |
35 | logger = logging.getLogger(__name__)
36 |
37 | router = APIRouter()
38 |
39 |
40 | def validate_redirect_uris(redirect_uris: Optional[List[str]]) -> None:
41 | """
42 | Validate redirect URIs according to OAuth 2.1 security requirements.
43 |
44 | Uses proper URL parsing to prevent bypass attacks and validates schemes
45 | against a secure whitelist to prevent dangerous scheme injection.
46 | """
47 | if not redirect_uris:
48 | return
49 |
50 | # Allowed schemes - whitelist approach for security
51 | ALLOWED_SCHEMES = {
52 | 'https', # HTTPS (preferred)
53 | 'http', # HTTP (localhost only)
54 | # Native app custom schemes (common patterns)
55 | 'com.example.app', # Reverse domain notation
56 | 'myapp', # Simple custom scheme
57 | # Add more custom schemes as needed, but NEVER allow:
58 | # javascript:, data:, file:, vbscript:, about:, chrome:, etc.
59 | }
60 |
61 | # Dangerous schemes that must be blocked
62 | DANGEROUS_SCHEMES = {
63 | 'javascript', 'data', 'file', 'vbscript', 'about', 'chrome',
64 | 'chrome-extension', 'moz-extension', 'ms-appx', 'blob'
65 | }
66 |
67 | for uri in redirect_uris:
68 | uri_str = str(uri).strip()
69 |
70 | if not uri_str:
71 | raise HTTPException(
72 | status_code=status.HTTP_400_BAD_REQUEST,
73 | detail={
74 | "error": "invalid_redirect_uri",
75 | "error_description": "Empty redirect URI not allowed"
76 | }
77 | )
78 |
79 | try:
80 | # Parse URL using proper URL parser to prevent bypass attacks
81 | parsed: ParseResult = urlparse(uri_str)
82 |
83 | if not parsed.scheme:
84 | raise HTTPException(
85 | status_code=status.HTTP_400_BAD_REQUEST,
86 | detail={
87 | "error": "invalid_redirect_uri",
88 | "error_description": f"Missing scheme in redirect URI: {uri_str}"
89 | }
90 | )
91 |
92 | # Check for dangerous schemes first (security)
93 | if parsed.scheme.lower() in DANGEROUS_SCHEMES:
94 | raise HTTPException(
95 | status_code=status.HTTP_400_BAD_REQUEST,
96 | detail={
97 | "error": "invalid_redirect_uri",
98 | "error_description": f"Dangerous scheme '{parsed.scheme}' not allowed in redirect URI"
99 | }
100 | )
101 |
102 | # For HTTP scheme, enforce strict localhost validation
103 | if parsed.scheme.lower() == 'http':
104 | if not parsed.netloc:
105 | raise HTTPException(
106 | status_code=status.HTTP_400_BAD_REQUEST,
107 | detail={
108 | "error": "invalid_redirect_uri",
109 | "error_description": f"HTTP URI missing host: {uri_str}"
110 | }
111 | )
112 |
113 | # Extract hostname from netloc (handles port numbers correctly)
114 | hostname = parsed.hostname
115 | if not hostname:
116 | raise HTTPException(
117 | status_code=status.HTTP_400_BAD_REQUEST,
118 | detail={
119 | "error": "invalid_redirect_uri",
120 | "error_description": f"Cannot extract hostname from HTTP URI: {uri_str}"
121 | }
122 | )
123 |
124 | # Strict localhost validation - only allow exact matches
125 | if hostname.lower() not in ('localhost', '127.0.0.1', '::1'):
126 | raise HTTPException(
127 | status_code=status.HTTP_400_BAD_REQUEST,
128 | detail={
129 | "error": "invalid_redirect_uri",
130 | "error_description": f"HTTP redirect URIs must use localhost, 127.0.0.1, or ::1. Got: {hostname}"
131 | }
132 | )
133 |
134 | # For HTTPS, allow any valid hostname (production requirement)
135 | elif parsed.scheme.lower() == 'https':
136 | if not parsed.netloc:
137 | raise HTTPException(
138 | status_code=status.HTTP_400_BAD_REQUEST,
139 | detail={
140 | "error": "invalid_redirect_uri",
141 | "error_description": f"HTTPS URI missing host: {uri_str}"
142 | }
143 | )
144 |
145 | # For custom schemes (native apps), validate they're in allowed list
146 | elif parsed.scheme.lower() not in [s.lower() for s in ALLOWED_SCHEMES]:
147 | raise HTTPException(
148 | status_code=status.HTTP_400_BAD_REQUEST,
149 | detail={
150 | "error": "invalid_redirect_uri",
151 | "error_description": f"Unsupported scheme '{parsed.scheme}'. Allowed: {', '.join(sorted(ALLOWED_SCHEMES))}"
152 | }
153 | )
154 |
155 | except ValueError as e:
156 | # URL parsing failed
157 | raise HTTPException(
158 | status_code=status.HTTP_400_BAD_REQUEST,
159 | detail={
160 | "error": "invalid_redirect_uri",
161 | "error_description": f"Invalid URL format: {uri_str}. Error: {str(e)}"
162 | }
163 | )
164 |
165 |
166 | def validate_grant_types(grant_types: List[str]) -> None:
167 | """Validate that requested grant types are supported."""
168 | supported_grant_types = {"authorization_code", "client_credentials"}
169 |
170 | for grant_type in grant_types:
171 | if grant_type not in supported_grant_types:
172 | raise HTTPException(
173 | status_code=status.HTTP_400_BAD_REQUEST,
174 | detail={
175 | "error": "invalid_client_metadata",
176 | "error_description": f"Unsupported grant type: {grant_type}. Supported: {list(supported_grant_types)}"
177 | }
178 | )
179 |
180 |
181 | def validate_response_types(response_types: List[str]) -> None:
182 | """Validate that requested response types are supported."""
183 | supported_response_types = {"code"}
184 |
185 | for response_type in response_types:
186 | if response_type not in supported_response_types:
187 | raise HTTPException(
188 | status_code=status.HTTP_400_BAD_REQUEST,
189 | detail={
190 | "error": "invalid_client_metadata",
191 | "error_description": f"Unsupported response type: {response_type}. Supported: {list(supported_response_types)}"
192 | }
193 | )
194 |
195 |
196 | @router.post("/register", response_model=ClientRegistrationResponse, status_code=status.HTTP_201_CREATED)
197 | async def register_client(request: ClientRegistrationRequest) -> ClientRegistrationResponse:
198 | """
199 | OAuth 2.1 Dynamic Client Registration endpoint.
200 |
201 | Implements RFC 7591 - OAuth 2.0 Dynamic Client Registration Protocol.
202 | Allows clients to register dynamically with the authorization server.
203 | """
204 | logger.info("OAuth client registration request received")
205 |
206 | try:
207 | # Validate client metadata
208 | if request.redirect_uris:
209 | validate_redirect_uris([str(uri) for uri in request.redirect_uris])
210 |
211 | if request.grant_types:
212 | validate_grant_types(request.grant_types)
213 |
214 | if request.response_types:
215 | validate_response_types(request.response_types)
216 |
217 | # Generate client credentials
218 | client_id = oauth_storage.generate_client_id()
219 | client_secret = oauth_storage.generate_client_secret()
220 |
221 | # Prepare default values
222 | grant_types = request.grant_types or ["authorization_code"]
223 | response_types = request.response_types or ["code"]
224 | token_endpoint_auth_method = request.token_endpoint_auth_method or "client_secret_basic"
225 |
226 | # Create registered client
227 | registered_client = RegisteredClient(
228 | client_id=client_id,
229 | client_secret=client_secret,
230 | redirect_uris=[str(uri) for uri in request.redirect_uris] if request.redirect_uris else [],
231 | grant_types=grant_types,
232 | response_types=response_types,
233 | token_endpoint_auth_method=token_endpoint_auth_method,
234 | client_name=request.client_name,
235 | created_at=time.time()
236 | )
237 |
238 | # Store the client
239 | await oauth_storage.store_client(registered_client)
240 |
241 | # Create response
242 | response = ClientRegistrationResponse(
243 | client_id=client_id,
244 | client_secret=client_secret,
245 | redirect_uris=registered_client.redirect_uris,
246 | grant_types=grant_types,
247 | response_types=response_types,
248 | token_endpoint_auth_method=token_endpoint_auth_method,
249 | client_name=request.client_name
250 | )
251 |
252 | logger.info(f"OAuth client registered successfully: client_id={client_id}, name={request.client_name}")
253 | return response
254 |
255 | except ValidationError as e:
256 | logger.warning(f"OAuth client registration validation error: {e}")
257 | raise HTTPException(
258 | status_code=status.HTTP_400_BAD_REQUEST,
259 | detail={
260 | "error": "invalid_client_metadata",
261 | "error_description": f"Invalid client metadata: {str(e)}"
262 | }
263 | )
264 | except HTTPException:
265 | # Re-raise HTTP exceptions (validation errors)
266 | raise
267 | except Exception as e:
268 | logger.error(f"OAuth client registration error: {e}")
269 | raise HTTPException(
270 | status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
271 | detail={
272 | "error": "server_error",
273 | "error_description": "Internal server error during client registration"
274 | }
275 | )
276 |
277 |
278 | @router.get("/clients/{client_id}")
279 | async def get_client_info(client_id: str) -> ClientRegistrationResponse:
280 | """
281 | Get information about a registered client.
282 |
283 | Note: This is an extension endpoint, not part of RFC 7591.
284 | Useful for debugging and client management.
285 | """
286 | logger.info(f"Client info request for client_id={client_id}")
287 |
288 | client = await oauth_storage.get_client(client_id)
289 | if not client:
290 | raise HTTPException(
291 | status_code=status.HTTP_404_NOT_FOUND,
292 | detail={
293 | "error": "invalid_client",
294 | "error_description": "Client not found"
295 | }
296 | )
297 |
298 | # Return client information (without secret for security)
299 | return ClientRegistrationResponse(
300 | client_id=client.client_id,
301 | client_secret="[REDACTED]", # Don't expose the secret
302 | redirect_uris=client.redirect_uris,
303 | grant_types=client.grant_types,
304 | response_types=client.response_types,
305 | token_endpoint_auth_method=client.token_endpoint_auth_method,
306 | client_name=client.client_name
307 | )
```
--------------------------------------------------------------------------------
/scripts/migration/migrate_timestamps.py:
--------------------------------------------------------------------------------
```python
1 | #!/usr/bin/env python3
2 | # Copyright 2024 Heinrich Krupp
3 | #
4 | # Licensed under the Apache License, Version 2.0 (the "License");
5 | # you may not use this file except in compliance with the License.
6 | # You may obtain a copy of the License at
7 | #
8 | # http://www.apache.org/licenses/LICENSE-2.0
9 | #
10 | # Unless required by applicable law or agreed to in writing, software
11 | # distributed under the License is distributed on an "AS IS" BASIS,
12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | # See the License for the specific language governing permissions and
14 | # limitations under the License.
15 |
16 | """
17 | Enhanced SQLite migration script to fix timestamp formats in ChromaDB.
18 | This improved version populates all timestamp columns with appropriate values.
19 | """
20 | import sqlite3
21 | import logging
22 | import os
23 | import sys
24 | import platform
25 | from pathlib import Path
26 | import json
27 | import datetime
28 |
29 | # Configure logging
30 | logging.basicConfig(
31 | level=logging.INFO,
32 | format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
33 | )
34 | logger = logging.getLogger("sqlite_migration")
35 |
36 | def find_claude_chroma_db():
37 | """
38 | Finds the Claude desktop ChromaDB storage location based on the operating system.
39 | """
40 | system = platform.system()
41 | home = Path.home()
42 |
43 | # List of potential ChromaDB locations for Claude desktop
44 | possible_locations = []
45 |
46 | if system == "Darwin": # macOS
47 | # Standard iCloud Drive location
48 | icloud_path = home / "Library" / "Mobile Documents" / "com~apple~CloudDocs" / "AI" / "claude-memory" / "chroma_db"
49 | possible_locations.append(icloud_path)
50 |
51 | # Local AppData location
52 | local_path = home / "Library" / "Application Support" / "Claude" / "claude-memory" / "chroma_db"
53 | possible_locations.append(local_path)
54 |
55 | elif system == "Windows":
56 | # Standard Windows location
57 | appdata_path = Path(os.environ.get("LOCALAPPDATA", "")) / "Claude" / "claude-memory" / "chroma_db"
58 | possible_locations.append(appdata_path)
59 |
60 | # OneDrive potential path
61 | onedrive_path = home / "OneDrive" / "Documents" / "Claude" / "claude-memory" / "chroma_db"
62 | possible_locations.append(onedrive_path)
63 |
64 | elif system == "Linux":
65 | # Standard Linux location
66 | linux_path = home / ".config" / "Claude" / "claude-memory" / "chroma_db"
67 | possible_locations.append(linux_path)
68 |
69 | # Try to find config file that might tell us the location
70 | config_locations = []
71 |
72 | if system == "Darwin":
73 | config_locations.append(home / "Library" / "Application Support" / "Claude" / "config.json")
74 | elif system == "Windows":
75 | config_locations.append(Path(os.environ.get("APPDATA", "")) / "Claude" / "config.json")
76 | elif system == "Linux":
77 | config_locations.append(home / ".config" / "Claude" / "config.json")
78 |
79 | # Check if config file exists and try to read DB path from it
80 | for config_path in config_locations:
81 | if config_path.exists():
82 | try:
83 | with open(config_path, 'r') as f:
84 | config = json.load(f)
85 | if 'memoryStoragePath' in config:
86 | mem_path = Path(config['memoryStoragePath']) / "chroma_db"
87 | possible_locations.insert(0, mem_path) # Prioritize this path
88 | logger.info(f"Found memory path in config: {mem_path}")
89 | except Exception as e:
90 | logger.warning(f"Error reading config file {config_path}: {e}")
91 |
92 | # Check all possible locations
93 | for location in possible_locations:
94 | db_path = location / "chroma.sqlite3"
95 | if db_path.exists():
96 | logger.info(f"Found ChromaDB at: {db_path}")
97 | return str(db_path)
98 |
99 | logger.error("Could not find Claude's ChromaDB storage location")
100 | return None
101 |
102 | def get_table_schema(cursor, table_name):
103 | """
104 | Retrieves the schema of a table to understand available columns.
105 | """
106 | cursor.execute(f"PRAGMA table_info({table_name})")
107 | columns = cursor.fetchall()
108 | return {col[1]: col for col in columns} # returns dict with column name as key
109 |
110 | def timestamp_to_all_types(timestamp_value):
111 | """
112 | Convert a timestamp to all possible formats:
113 | - integer (unix timestamp)
114 | - float (unix timestamp with milliseconds)
115 | - string (ISO format)
116 | """
117 | # Handle different input types
118 | timestamp_int = None
119 |
120 | if isinstance(timestamp_value, int):
121 | timestamp_int = timestamp_value
122 | elif isinstance(timestamp_value, float):
123 | timestamp_int = int(timestamp_value)
124 | elif isinstance(timestamp_value, str):
125 | try:
126 | # Try to parse as float first
127 | timestamp_int = int(float(timestamp_value))
128 | except ValueError:
129 | # Try to parse as ISO date
130 | try:
131 | dt = datetime.datetime.fromisoformat(timestamp_value.replace('Z', '+00:00'))
132 | timestamp_int = int(dt.timestamp())
133 | except ValueError:
134 | # Try different date formats
135 | for fmt in ["%Y-%m-%d %H:%M:%S", "%Y-%m-%dT%H:%M:%S", "%Y/%m/%d %H:%M:%S"]:
136 | try:
137 | dt = datetime.datetime.strptime(timestamp_value, fmt)
138 | timestamp_int = int(dt.timestamp())
139 | break
140 | except ValueError:
141 | continue
142 |
143 | if timestamp_int is None:
144 | raise ValueError(f"Could not convert timestamp value: {timestamp_value}")
145 |
146 | # Generate all formats
147 | timestamp_float = float(timestamp_int)
148 |
149 | # ISO format string representation
150 | dt = datetime.datetime.fromtimestamp(timestamp_int, tz=datetime.timezone.utc)
151 | timestamp_str = dt.isoformat().replace('+00:00', 'Z')
152 |
153 | return {
154 | 'int': timestamp_int,
155 | 'float': timestamp_float,
156 | 'string': timestamp_str
157 | }
158 |
159 | def migrate_timestamps_in_sqlite(db_path):
160 | """
161 | Enhanced migration that identifies timestamp data across all columns
162 | and populates all columns with consistent type values.
163 | """
164 | logger.info(f"Connecting to SQLite database at {db_path}")
165 |
166 | if not os.path.exists(db_path):
167 | logger.error(f"Database file not found: {db_path}")
168 | return False
169 |
170 | try:
171 | # Connect to the SQLite database
172 | conn = sqlite3.connect(db_path)
173 | cursor = conn.cursor()
174 |
175 | # Check if the embedding_metadata table exists
176 | cursor.execute("SELECT name FROM sqlite_master WHERE type='table' AND name='embedding_metadata'")
177 | if not cursor.fetchone():
178 | logger.error("Table embedding_metadata not found in database.")
179 | conn.close()
180 | return False
181 |
182 | # Get table schema to understand column structure
183 | schema = get_table_schema(cursor, "embedding_metadata")
184 | logger.info(f"Table schema: {schema}")
185 |
186 | # Find all timestamp entries from any column
187 | logger.info("Identifying all timestamp entries...")
188 | cursor.execute("""
189 | SELECT id, key, string_value, int_value, float_value
190 | FROM embedding_metadata
191 | WHERE key = 'timestamp'
192 | """)
193 | all_rows = cursor.fetchall()
194 |
195 | if not all_rows:
196 | logger.warning("No timestamp entries found in the database.")
197 | conn.close()
198 | return True
199 |
200 | logger.info(f"Found {len(all_rows)} timestamp entries to process")
201 |
202 | # Process each timestamp row
203 | processed_count = 0
204 | failed_count = 0
205 |
206 | for row in all_rows:
207 | id_val, key, string_val, int_val, float_val = row
208 | source_value = None
209 | source_type = None
210 |
211 | # Find which column has a non-NULL value
212 | if int_val is not None:
213 | source_value = int_val
214 | source_type = 'int'
215 | elif float_val is not None:
216 | source_value = float_val
217 | source_type = 'float'
218 | elif string_val is not None:
219 | source_value = string_val
220 | source_type = 'string'
221 |
222 | if source_value is None:
223 | logger.warning(f"Row ID {id_val} has no timestamp value in any column")
224 | failed_count += 1
225 | continue
226 |
227 | try:
228 | # Convert to all types
229 | logger.info(f"Processing ID {id_val}: {source_type} value {source_value}")
230 | all_formats = timestamp_to_all_types(source_value)
231 |
232 | # Update the row with all formats
233 | cursor.execute("""
234 | UPDATE embedding_metadata
235 | SET int_value = ?, float_value = ?, string_value = ?
236 | WHERE id = ? AND KEY ='timestamp'
237 | """, (all_formats['int'], all_formats['float'], all_formats['string'], id_val))
238 |
239 | processed_count += 1
240 |
241 | except Exception as e:
242 | logger.error(f"Error processing timestamp for ID {id_val}: {e}")
243 | failed_count += 1
244 |
245 | # Commit all changes
246 | conn.commit()
247 |
248 | # Verify the changes
249 | cursor.execute("""
250 | SELECT COUNT(*)
251 | FROM embedding_metadata
252 | WHERE key = 'timestamp' AND (int_value IS NULL OR float_value IS NULL OR string_value IS NULL)
253 | """)
254 | incomplete = cursor.fetchone()[0]
255 |
256 | cursor.execute("""
257 | SELECT COUNT(*)
258 | FROM embedding_metadata
259 | WHERE key = 'timestamp' AND int_value IS NOT NULL AND float_value IS NOT NULL AND string_value IS NOT NULL
260 | """)
261 | complete = cursor.fetchone()[0]
262 |
263 | logger.info(f"Migration summary:")
264 | logger.info(f" - {processed_count} timestamp entries processed successfully")
265 | logger.info(f" - {failed_count} timestamp entries failed to process")
266 | logger.info(f" - {complete} timestamp entries now have values in all columns")
267 | logger.info(f" - {incomplete} timestamp entries still have NULL values in some columns")
268 |
269 | # Show some examples of remaining problematic entries if any
270 | if incomplete > 0:
271 | cursor.execute("""
272 | SELECT id, key, string_value, int_value, float_value
273 | FROM embedding_metadata
274 | WHERE key = 'timestamp' AND (int_value IS NULL OR float_value IS NULL OR string_value IS NULL)
275 | LIMIT 5
276 | """)
277 | problem_rows = cursor.fetchall()
278 | logger.info(f"Examples of incomplete entries: {problem_rows}")
279 |
280 | conn.close()
281 | return incomplete == 0
282 |
283 | except Exception as e:
284 | logger.error(f"Error during SQLite migration: {e}")
285 | return False
286 |
287 | def main():
288 | # Check if a database path was provided as a command-line argument
289 | if len(sys.argv) >= 2:
290 | db_path = sys.argv[1]
291 | else:
292 | # Try to automatically find the ChromaDB location
293 | db_path = find_claude_chroma_db()
294 | if not db_path:
295 | print("Could not automatically find Claude's ChromaDB location.")
296 | print("Please provide the path as a command-line argument:")
297 | print("python migrate_timestamps.py /path/to/chroma.sqlite3")
298 | sys.exit(1)
299 |
300 | print(f"Using database: {db_path}")
301 | success = migrate_timestamps_in_sqlite(db_path)
302 |
303 | if success:
304 | print("\n✅ Migration completed successfully!")
305 | print("All timestamps now have consistent values in all columns (int_value, float_value, and string_value).")
306 | sys.exit(0)
307 | else:
308 | print("\n⚠️ Migration completed with issues. Check the logs for details.")
309 | sys.exit(1)
310 |
311 | if __name__ == "__main__":
312 | main()
```
--------------------------------------------------------------------------------
/docs/statistics/generate_charts.py:
--------------------------------------------------------------------------------
```python
1 | #!/usr/bin/env python3
2 | """
3 | Generate statistical visualizations for MCP Memory Service repository.
4 |
5 | This script creates charts from CSV data exports to visualize:
6 | - Monthly commit and release trends
7 | - Activity patterns by hour and day of week
8 | - Contributor breakdown
9 | - October 2025 sprint visualization
10 |
11 | Usage:
12 | python generate_charts.py
13 |
14 | Output:
15 | PNG files in docs/statistics/charts/
16 | """
17 |
18 | import pandas as pd
19 | import matplotlib.pyplot as plt
20 | import seaborn as sns
21 | from pathlib import Path
22 | import numpy as np
23 |
24 | # Set style
25 | sns.set_style("whitegrid")
26 | plt.rcParams['figure.figsize'] = (12, 6)
27 | plt.rcParams['font.size'] = 10
28 |
29 | # Paths
30 | DATA_DIR = Path(__file__).parent / "data"
31 | CHARTS_DIR = Path(__file__).parent / "charts"
32 | CHARTS_DIR.mkdir(exist_ok=True)
33 |
34 | def create_monthly_activity_chart():
35 | """Create dual-axis chart showing commits and releases over time."""
36 | df = pd.read_csv(DATA_DIR / "monthly_activity.csv")
37 |
38 | fig, ax1 = plt.subplots(figsize=(14, 7))
39 |
40 | # Commits line
41 | color = 'tab:blue'
42 | ax1.set_xlabel('Month', fontsize=12, fontweight='bold')
43 | ax1.set_ylabel('Commits', color=color, fontsize=12, fontweight='bold')
44 | ax1.plot(df['month'], df['commits'], color=color, marker='o', linewidth=2.5,
45 | markersize=8, label='Commits')
46 | ax1.tick_params(axis='y', labelcolor=color)
47 | ax1.grid(True, alpha=0.3)
48 |
49 | # Releases bars
50 | ax2 = ax1.twinx()
51 | color = 'tab:orange'
52 | ax2.set_ylabel('Releases', color=color, fontsize=12, fontweight='bold')
53 | ax2.bar(df['month'], df['releases'], color=color, alpha=0.6, label='Releases')
54 | ax2.tick_params(axis='y', labelcolor=color)
55 |
56 | # Title and formatting
57 | plt.title('MCP Memory Service - Monthly Activity (Dec 2024 - Oct 2025)',
58 | fontsize=14, fontweight='bold', pad=20)
59 |
60 | # Rotate x-axis labels
61 | ax1.set_xticklabels(df['month'], rotation=45, ha='right')
62 |
63 | # Add legends
64 | lines1, labels1 = ax1.get_legend_handles_labels()
65 | lines2, labels2 = ax2.get_legend_handles_labels()
66 | ax1.legend(lines1 + lines2, labels1 + labels2, loc='upper left', fontsize=10)
67 |
68 | # Highlight October 2025
69 | oct_idx = df[df['month'] == '2025-10'].index[0]
70 | ax1.axvspan(oct_idx - 0.4, oct_idx + 0.4, alpha=0.2, color='red',
71 | label='October Sprint')
72 |
73 | plt.tight_layout()
74 | plt.savefig(CHARTS_DIR / "monthly_activity.png", dpi=300, bbox_inches='tight')
75 | print("✅ Created: monthly_activity.png")
76 | plt.close()
77 |
78 | def create_activity_heatmap():
79 | """Create heatmap showing activity by hour and day of week."""
80 | # Read hourly data
81 | hourly_df = pd.read_csv(DATA_DIR / "activity_by_hour.csv")
82 | daily_df = pd.read_csv(DATA_DIR / "activity_by_day.csv")
83 |
84 | # Create a simulated day x hour matrix (for visualization purposes)
85 | # In reality, we'd need actual day+hour data from git log
86 | days = ['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday']
87 | hours = range(24)
88 |
89 | # Create visualization showing just hourly distribution
90 | fig, (ax1, ax2) = plt.subplots(2, 1, figsize=(14, 10))
91 |
92 | # Hourly activity bar chart
93 | ax1.bar(hourly_df['hour'], hourly_df['commits'], color='steelblue', alpha=0.8)
94 | ax1.set_xlabel('Hour of Day', fontsize=12, fontweight='bold')
95 | ax1.set_ylabel('Number of Commits', fontsize=12, fontweight='bold')
96 | ax1.set_title('Activity by Hour of Day', fontsize=14, fontweight='bold', pad=15)
97 | ax1.grid(axis='y', alpha=0.3)
98 |
99 | # Highlight peak hours (20-22)
100 | peak_hours = [20, 21, 22]
101 | for hour in peak_hours:
102 | idx = hourly_df[hourly_df['hour'] == hour].index[0]
103 | ax1.bar(hour, hourly_df.loc[idx, 'commits'], color='red', alpha=0.7)
104 |
105 | ax1.text(21, 170, 'Peak Hours\n(19:00-23:00)\n46% of commits',
106 | ha='center', va='bottom', fontsize=11,
107 | bbox=dict(boxstyle='round', facecolor='wheat', alpha=0.5))
108 |
109 | # Day of week activity
110 | day_order = ['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday']
111 | daily_sorted = daily_df.set_index('day_of_week').loc[day_order].reset_index()
112 |
113 | colors = ['steelblue' if day not in ['Saturday', 'Sunday'] else 'orange'
114 | for day in daily_sorted['day_of_week']]
115 |
116 | ax2.barh(daily_sorted['day_of_week'], daily_sorted['commits'], color=colors, alpha=0.8)
117 | ax2.set_xlabel('Number of Commits', fontsize=12, fontweight='bold')
118 | ax2.set_ylabel('Day of Week', fontsize=12, fontweight='bold')
119 | ax2.set_title('Activity by Day of Week', fontsize=14, fontweight='bold', pad=15)
120 | ax2.grid(axis='x', alpha=0.3)
121 |
122 | # Add percentage labels
123 | for idx, row in daily_sorted.iterrows():
124 | ax2.text(row['commits'] + 5, idx, row['percentage'],
125 | va='center', fontsize=10)
126 |
127 | ax2.text(250, 5.5, 'Weekend\nWarrior\n39% total',
128 | ha='center', va='center', fontsize=11,
129 | bbox=dict(boxstyle='round', facecolor='orange', alpha=0.3))
130 |
131 | plt.tight_layout()
132 | plt.savefig(CHARTS_DIR / "activity_patterns.png", dpi=300, bbox_inches='tight')
133 | print("✅ Created: activity_patterns.png")
134 | plt.close()
135 |
136 | def create_contributor_pie_chart():
137 | """Create pie chart showing contributor distribution."""
138 | df = pd.read_csv(DATA_DIR / "contributors.csv")
139 |
140 | # Combine Henry, doobidoo, Heinrich Krupp (same person)
141 | primary_commits = df[df['contributor'].isin(['Henry', 'doobidoo', 'Heinrich Krupp'])]['commits'].sum()
142 | other_commits = df[~df['contributor'].isin(['Henry', 'doobidoo', 'Heinrich Krupp'])]['commits'].sum()
143 |
144 | labels = [f'Primary Maintainer\n(Henry + aliases)', 'External Contributors']
145 | sizes = [primary_commits, other_commits]
146 | colors = ['#FF9999', '#66B2FF']
147 | explode = (0.1, 0)
148 |
149 | fig, ax = plt.subplots(figsize=(10, 8))
150 |
151 | wedges, texts, autotexts = ax.pie(sizes, explode=explode, labels=labels, colors=colors,
152 | autopct='%1.1f%%', shadow=True, startangle=90,
153 | textprops={'fontsize': 12, 'fontweight': 'bold'})
154 |
155 | # Make percentage text larger
156 | for autotext in autotexts:
157 | autotext.set_color('white')
158 | autotext.set_fontsize(14)
159 | autotext.set_fontweight('bold')
160 |
161 | ax.set_title('Contributor Distribution (1,536 total commits)',
162 | fontsize=14, fontweight='bold', pad=20)
163 |
164 | # Add legend with individual contributors
165 | top_contributors = df.head(10)
166 | legend_labels = [f"{row['contributor']}: {row['commits']} ({row['percentage']})"
167 | for _, row in top_contributors.iterrows()]
168 |
169 | plt.legend(legend_labels, title="Top 10 Contributors",
170 | loc='center left', bbox_to_anchor=(1, 0, 0.5, 1), fontsize=9)
171 |
172 | plt.tight_layout()
173 | plt.savefig(CHARTS_DIR / "contributors.png", dpi=300, bbox_inches='tight')
174 | print("✅ Created: contributors.png")
175 | plt.close()
176 |
177 | def create_october_sprint_chart():
178 | """Create detailed visualization of October 2025 sprint."""
179 | # October daily data (from earlier analysis)
180 | oct_days = [2, 3, 4, 5, 6, 7, 8, 9, 11, 12, 13, 14, 16, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31]
181 | oct_commits = [16, 46, 26, 9, 2, 14, 9, 1, 7, 13, 3, 12, 4, 5, 5, 9, 15, 16, 5, 38, 5, 24, 1, 12, 12]
182 |
183 | fig, ax = plt.subplots(figsize=(16, 7))
184 |
185 | # Bar chart
186 | bars = ax.bar(oct_days, oct_commits, color='steelblue', alpha=0.8)
187 |
188 | # Highlight the sprint days (28-31)
189 | sprint_days = [28, 29, 30, 31]
190 | for i, day in enumerate(oct_days):
191 | if day in sprint_days:
192 | bars[i].set_color('red')
193 | bars[i].set_alpha(0.9)
194 |
195 | ax.set_xlabel('Day of October 2025', fontsize=12, fontweight='bold')
196 | ax.set_ylabel('Number of Commits', fontsize=12, fontweight='bold')
197 | ax.set_title('October 2025: The Sprint Month (310 commits, 65 releases)',
198 | fontsize=14, fontweight='bold', pad=20)
199 | ax.grid(axis='y', alpha=0.3)
200 |
201 | # Add annotations for key days
202 | ax.annotate('Peak Day\n46 commits', xy=(3, 46), xytext=(3, 52),
203 | ha='center', fontsize=10, fontweight='bold',
204 | bbox=dict(boxstyle='round', facecolor='yellow', alpha=0.5),
205 | arrowprops=dict(arrowstyle='->', connectionstyle='arc3,rad=0'))
206 |
207 | ax.annotate('13 Releases\nin 4 Days', xy=(29.5, 35), xytext=(29.5, 42),
208 | ha='center', fontsize=11, fontweight='bold',
209 | bbox=dict(boxstyle='round', facecolor='red', alpha=0.3),
210 | arrowprops=dict(arrowstyle='->', connectionstyle='arc3,rad=0'))
211 |
212 | # Add text box with sprint details
213 | sprint_text = 'Oct 28-31 Sprint:\n• v8.12.0 → v8.15.1\n• 13 releases\n• 49 commits\n• Production bugs fixed'
214 | ax.text(8, 40, sprint_text, fontsize=11,
215 | bbox=dict(boxstyle='round', facecolor='lightblue', alpha=0.5))
216 |
217 | plt.tight_layout()
218 | plt.savefig(CHARTS_DIR / "october_sprint.png", dpi=300, bbox_inches='tight')
219 | print("✅ Created: october_sprint.png")
220 | plt.close()
221 |
222 | def create_growth_trajectory():
223 | """Create cumulative commits chart showing growth over time."""
224 | df = pd.read_csv(DATA_DIR / "monthly_activity.csv")
225 |
226 | # Calculate cumulative commits
227 | df['cumulative_commits'] = df['commits'].cumsum()
228 | df['cumulative_releases'] = df['releases'].cumsum()
229 |
230 | fig, ax1 = plt.subplots(figsize=(14, 7))
231 |
232 | # Cumulative commits
233 | color = 'tab:blue'
234 | ax1.set_xlabel('Month', fontsize=12, fontweight='bold')
235 | ax1.set_ylabel('Cumulative Commits', color=color, fontsize=12, fontweight='bold')
236 | ax1.plot(df['month'], df['cumulative_commits'], color=color, marker='o',
237 | linewidth=3, markersize=8, label='Cumulative Commits')
238 | ax1.tick_params(axis='y', labelcolor=color)
239 | ax1.grid(True, alpha=0.3)
240 | ax1.fill_between(range(len(df)), df['cumulative_commits'], alpha=0.3, color=color)
241 |
242 | # Cumulative releases
243 | ax2 = ax1.twinx()
244 | color = 'tab:green'
245 | ax2.set_ylabel('Cumulative Releases', color=color, fontsize=12, fontweight='bold')
246 | ax2.plot(df['month'], df['cumulative_releases'], color=color, marker='s',
247 | linewidth=3, markersize=8, label='Cumulative Releases', linestyle='--')
248 | ax2.tick_params(axis='y', labelcolor=color)
249 |
250 | # Title
251 | plt.title('MCP Memory Service - Growth Trajectory (10 Months)',
252 | fontsize=14, fontweight='bold', pad=20)
253 |
254 | # Rotate labels
255 | ax1.set_xticklabels(df['month'], rotation=45, ha='right')
256 |
257 | # Add milestone annotations
258 | ax1.annotate('First Release\nv1.0', xy=(0, 55), xytext=(1, 200),
259 | ha='center', fontsize=10,
260 | arrowprops=dict(arrowstyle='->', connectionstyle='arc3,rad=0.3'))
261 |
262 | ax1.annotate('1,000th\nCommit', xy=(8, 1000), xytext=(7, 1200),
263 | ha='center', fontsize=10,
264 | arrowprops=dict(arrowstyle='->', connectionstyle='arc3,rad=-0.3'))
265 |
266 | # Legends
267 | lines1, labels1 = ax1.get_legend_handles_labels()
268 | lines2, labels2 = ax2.get_legend_handles_labels()
269 | ax1.legend(lines1 + lines2, labels1 + labels2, loc='upper left', fontsize=10)
270 |
271 | plt.tight_layout()
272 | plt.savefig(CHARTS_DIR / "growth_trajectory.png", dpi=300, bbox_inches='tight')
273 | print("✅ Created: growth_trajectory.png")
274 | plt.close()
275 |
276 | def main():
277 | """Generate all charts."""
278 | print("🎨 Generating statistical visualizations...")
279 | print()
280 |
281 | create_monthly_activity_chart()
282 | create_activity_heatmap()
283 | create_contributor_pie_chart()
284 | create_october_sprint_chart()
285 | create_growth_trajectory()
286 |
287 | print()
288 | print("✅ All charts generated successfully!")
289 | print(f"📁 Output directory: {CHARTS_DIR}")
290 | print()
291 | print("Generated charts:")
292 | print(" 1. monthly_activity.png - Commits and releases over time")
293 | print(" 2. activity_patterns.png - Hourly and daily patterns")
294 | print(" 3. contributors.png - Contributor distribution")
295 | print(" 4. october_sprint.png - October 2025 detailed view")
296 | print(" 5. growth_trajectory.png - Cumulative growth")
297 |
298 | if __name__ == "__main__":
299 | main()
300 |
```
--------------------------------------------------------------------------------
/docs/natural-memory-triggers/installation-guide.md:
--------------------------------------------------------------------------------
```markdown
1 | # Natural Memory Triggers v7.1.3 - Installation Guide
2 |
3 | This guide provides detailed installation instructions for Natural Memory Triggers, the intelligent automatic memory awareness system for Claude Code.
4 |
5 | ## Prerequisites
6 |
7 | Before installing Natural Memory Triggers, ensure you have:
8 |
9 | - ✅ **Claude Code CLI** installed and working (`claude --version`)
10 | - ✅ **Node.js 14+** for hook execution (`node --version`)
11 | - ✅ **MCP Memory Service** running (`curl -k https://localhost:8443/api/health`)
12 | - ✅ **Valid configuration** at `~/.claude/hooks/config.json`
13 |
14 | ## Installation Methods
15 |
16 | ### Method 1: Automated Installation (Recommended)
17 |
18 | The automated installer handles the complete setup with comprehensive testing:
19 |
20 | ```bash
21 | # Navigate to the claude-hooks directory
22 | cd mcp-memory-service/claude-hooks
23 |
24 | # Install with unified Python installer
25 | python install_hooks.py --natural-triggers
26 | ```
27 |
28 | **What the installer does:**
29 |
30 | 1. **System Verification**
31 | - Checks Claude Code CLI availability
32 | - Validates Node.js version compatibility
33 | - Tests MCP Memory Service connectivity
34 | - Verifies directory permissions
35 |
36 | 2. **Backup Operations**
37 | - Creates backup of existing `~/.claude/hooks/` directory
38 | - Preserves current configuration files
39 | - Backs up existing hook implementations
40 |
41 | 3. **Component Installation**
42 | - Copies Natural Memory Triggers core components
43 | - Installs multi-tier conversation monitor
44 | - Sets up performance manager and git analyzer
45 | - Installs CLI management controller
46 |
47 | 4. **Configuration Setup**
48 | - Merges new configuration sections with existing settings
49 | - Preserves user customizations
50 | - Adds Natural Memory Triggers specific settings
51 | - Configures performance profiles
52 |
53 | 5. **Testing and Validation**
54 | - Runs 18 comprehensive tests
55 | - Tests semantic analysis functionality
56 | - Validates CLI controller operations
57 | - Checks memory service integration
58 |
59 | 6. **Installation Report**
60 | - Provides detailed installation summary
61 | - Lists installed components and their versions
62 | - Shows configuration status and recommendations
63 | - Provides next steps and usage instructions
64 |
65 | ### Method 2: Manual Installation
66 |
67 | For users who prefer manual control or need custom configurations:
68 |
69 | #### Step 1: Directory Setup
70 |
71 | ```bash
72 | # Create required directory structure
73 | mkdir -p ~/.claude/hooks/{core,utilities,tests}
74 |
75 | # Verify directory creation
76 | ls -la ~/.claude/hooks/
77 | ```
78 |
79 | #### Step 2: Copy Core Components
80 |
81 | ```bash
82 | # Copy main hook implementation
83 | cp claude-hooks/core/mid-conversation.js ~/.claude/hooks/core/
84 |
85 | # Copy utility modules
86 | cp claude-hooks/utilities/tiered-conversation-monitor.js ~/.claude/hooks/utilities/
87 | cp claude-hooks/utilities/performance-manager.js ~/.claude/hooks/utilities/
88 | cp claude-hooks/utilities/git-analyzer.js ~/.claude/hooks/utilities/
89 | cp claude-hooks/utilities/mcp-client.js ~/.claude/hooks/utilities/
90 |
91 | # Copy CLI management system
92 | cp claude-hooks/memory-mode-controller.js ~/.claude/hooks/
93 |
94 | # Copy test suite
95 | cp claude-hooks/test-natural-triggers.js ~/.claude/hooks/
96 | ```
97 |
98 | #### Step 3: Configuration Setup
99 |
100 | ```bash
101 | # Copy base configuration if it doesn't exist
102 | if [ ! -f ~/.claude/hooks/config.json ]; then
103 | cp claude-hooks/config.template.json ~/.claude/hooks/config.json
104 | fi
105 |
106 | # Edit configuration file
107 | nano ~/.claude/hooks/config.json
108 | ```
109 |
110 | Add the following sections to your configuration:
111 |
112 | ```json
113 | {
114 | "naturalTriggers": {
115 | "enabled": true,
116 | "triggerThreshold": 0.6,
117 | "cooldownPeriod": 30000,
118 | "maxMemoriesPerTrigger": 5
119 | },
120 | "performance": {
121 | "defaultProfile": "balanced",
122 | "enableMonitoring": true,
123 | "autoAdjust": true,
124 | "profiles": {
125 | "speed_focused": {
126 | "maxLatency": 100,
127 | "enabledTiers": ["instant"],
128 | "backgroundProcessing": false,
129 | "degradeThreshold": 200,
130 | "description": "Fastest response, minimal memory awareness"
131 | },
132 | "balanced": {
133 | "maxLatency": 200,
134 | "enabledTiers": ["instant", "fast"],
135 | "backgroundProcessing": true,
136 | "degradeThreshold": 400,
137 | "description": "Moderate latency, smart memory triggers"
138 | },
139 | "memory_aware": {
140 | "maxLatency": 500,
141 | "enabledTiers": ["instant", "fast", "intensive"],
142 | "backgroundProcessing": true,
143 | "degradeThreshold": 1000,
144 | "description": "Full memory awareness, accept higher latency"
145 | },
146 | "adaptive": {
147 | "autoAdjust": true,
148 | "degradeThreshold": 800,
149 | "backgroundProcessing": true,
150 | "description": "Auto-adjust based on performance and user preferences"
151 | }
152 | }
153 | }
154 | }
155 | ```
156 |
157 | #### Step 4: Set File Permissions
158 |
159 | ```bash
160 | # Make hook files executable
161 | chmod +x ~/.claude/hooks/core/*.js
162 | chmod +x ~/.claude/hooks/memory-mode-controller.js
163 | chmod +x ~/.claude/hooks/test-natural-triggers.js
164 |
165 | # Set appropriate directory permissions
166 | chmod 755 ~/.claude/hooks
167 | chmod -R 644 ~/.claude/hooks/*.json
168 | ```
169 |
170 | #### Step 5: Test Installation
171 |
172 | ```bash
173 | # Run comprehensive test suite
174 | cd ~/.claude/hooks
175 | node test-natural-triggers.js
176 |
177 | # Test CLI controller
178 | node memory-mode-controller.js status
179 |
180 | # Test specific components
181 | node -e "
182 | const { TieredConversationMonitor } = require('./utilities/tiered-conversation-monitor');
183 | const monitor = new TieredConversationMonitor();
184 | console.log('✅ TieredConversationMonitor loaded successfully');
185 | "
186 | ```
187 |
188 | ## Installation Verification
189 |
190 | ### Test 1: System Components
191 |
192 | ```bash
193 | # Verify all components are in place
194 | ls ~/.claude/hooks/core/mid-conversation.js
195 | ls ~/.claude/hooks/utilities/tiered-conversation-monitor.js
196 | ls ~/.claude/hooks/utilities/performance-manager.js
197 | ls ~/.claude/hooks/utilities/git-analyzer.js
198 | ls ~/.claude/hooks/memory-mode-controller.js
199 | ```
200 |
201 | ### Test 2: Configuration Validation
202 |
203 | ```bash
204 | # Check configuration syntax
205 | cat ~/.claude/hooks/config.json | node -e "
206 | try {
207 | const config = JSON.parse(require('fs').readFileSync(0, 'utf8'));
208 | console.log('✅ Configuration JSON is valid');
209 | console.log('Natural Triggers enabled:', config.naturalTriggers?.enabled);
210 | console.log('Default profile:', config.performance?.defaultProfile);
211 | } catch (error) {
212 | console.error('❌ Configuration error:', error.message);
213 | }
214 | "
215 | ```
216 |
217 | ### Test 3: CLI Controller
218 |
219 | ```bash
220 | # Test CLI management system
221 | node ~/.claude/hooks/memory-mode-controller.js status
222 | node ~/.claude/hooks/memory-mode-controller.js profiles
223 | ```
224 |
225 | Expected output:
226 | ```
227 | 📊 Memory Hook Status
228 | Current Profile: balanced
229 | Description: Moderate latency, smart memory triggers
230 | Natural Triggers: enabled
231 | Sensitivity: 0.6
232 | Performance: 0ms avg latency, 0 degradation events
233 | ```
234 |
235 | ### Test 4: Memory Service Integration
236 |
237 | ```bash
238 | # Test memory service connectivity
239 | node ~/.claude/hooks/memory-mode-controller.js test "What did we decide about authentication?"
240 | ```
241 |
242 | Expected behavior:
243 | - Should attempt to analyze the test query
244 | - Should show tier processing (instant → fast → intensive)
245 | - Should either retrieve relevant memories or show "no relevant memories found"
246 | - Should complete without errors
247 |
248 | ## Post-Installation Configuration
249 |
250 | ### Performance Profile Selection
251 |
252 | Choose the appropriate profile for your workflow:
253 |
254 | ```bash
255 | # For quick coding sessions (minimal interruption)
256 | node memory-mode-controller.js profile speed_focused
257 |
258 | # For general development work (recommended)
259 | node memory-mode-controller.js profile balanced
260 |
261 | # For architecture and research work (maximum context)
262 | node memory-mode-controller.js profile memory_aware
263 |
264 | # For adaptive learning (system learns your preferences)
265 | node memory-mode-controller.js profile adaptive
266 | ```
267 |
268 | ### Sensitivity Tuning
269 |
270 | Adjust trigger sensitivity based on your preferences:
271 |
272 | ```bash
273 | # More triggers (lower threshold)
274 | node memory-mode-controller.js sensitivity 0.4
275 |
276 | # Balanced triggers (recommended)
277 | node memory-mode-controller.js sensitivity 0.6
278 |
279 | # Fewer triggers (higher threshold)
280 | node memory-mode-controller.js sensitivity 0.8
281 | ```
282 |
283 | ### Git Integration Setup
284 |
285 | For enhanced Git-aware context, ensure your repository has:
286 |
287 | - **Recent commit history** (Natural Memory Triggers analyzes last 14 days)
288 | - **Readable CHANGELOG.md** (parsed for version context)
289 | - **Proper git configuration** (for commit author and timestamps)
290 |
291 | ## Troubleshooting Installation Issues
292 |
293 | ### Issue 1: Node.js Not Found
294 |
295 | **Error**: `node: command not found`
296 |
297 | **Solution**:
298 | ```bash
299 | # Install Node.js (version 14 or higher)
300 | # macOS with Homebrew:
301 | brew install node
302 |
303 | # Ubuntu/Debian:
304 | sudo apt update && sudo apt install nodejs npm
305 |
306 | # Windows:
307 | # Download from https://nodejs.org/
308 |
309 | # Verify installation
310 | node --version
311 | npm --version
312 | ```
313 |
314 | ### Issue 2: Permission Errors
315 |
316 | **Error**: `Permission denied` when running hooks
317 |
318 | **Solution**:
319 | ```bash
320 | # Fix file permissions
321 | chmod +x ~/.claude/hooks/core/*.js
322 | chmod +x ~/.claude/hooks/memory-mode-controller.js
323 |
324 | # Fix directory permissions
325 | chmod 755 ~/.claude/hooks
326 | chmod -R 644 ~/.claude/hooks/*.json
327 | ```
328 |
329 | ### Issue 3: Memory Service Connection Failed
330 |
331 | **Error**: `Network error` or `ENOTFOUND`
332 |
333 | **Diagnosis**:
334 | ```bash
335 | # Test memory service directly
336 | curl -k https://localhost:8443/api/health
337 |
338 | # Check configuration
339 | cat ~/.claude/hooks/config.json | grep -A 5 "memoryService"
340 | ```
341 |
342 | **Solutions**:
343 | 1. **Start Memory Service**: `uv run memory server`
344 | 2. **Check API Key**: Ensure valid API key in configuration
345 | 3. **Firewall Settings**: Verify port 8443 is accessible
346 | 4. **SSL Issues**: Self-signed certificates may need special handling
347 |
348 | ### Issue 4: Configuration Conflicts
349 |
350 | **Error**: `Parse error: Expected property name or '}' in JSON`
351 |
352 | **Solution**:
353 | ```bash
354 | # Validate JSON syntax
355 | cat ~/.claude/hooks/config.json | python -m json.tool
356 |
357 | # If corrupted, restore from backup
358 | cp ~/.claude/hooks/config.json.backup ~/.claude/hooks/config.json
359 |
360 | # Or reset to defaults
361 | node memory-mode-controller.js reset
362 | ```
363 |
364 | ### Issue 5: Claude Code Integration Issues
365 |
366 | **Error**: Hooks not detected by Claude Code
367 |
368 | **Diagnosis**:
369 | ```bash
370 | # Check Claude Code settings
371 | cat ~/.claude/settings.json | grep -A 10 "hooks"
372 |
373 | # Verify hook files location
374 | ls -la ~/.claude/hooks/core/
375 | ```
376 |
377 | **Solutions**:
378 | 1. **Correct Location**: Ensure hooks are in `~/.claude/hooks/` not `~/.claude-code/hooks/`
379 | 2. **Settings Update**: Update `~/.claude/settings.json` with correct paths
380 | 3. **Restart Claude Code**: Some changes require restart
381 | 4. **Debug Mode**: Run `claude --debug hooks` to see hook loading messages
382 |
383 | ## Installation Verification Checklist
384 |
385 | - [ ] All core components copied to `~/.claude/hooks/`
386 | - [ ] Configuration file includes `naturalTriggers` and `performance` sections
387 | - [ ] File permissions set correctly (executable hooks, readable configs)
388 | - [ ] CLI controller responds to `status` command
389 | - [ ] Test suite passes all 18 tests
390 | - [ ] Memory service connectivity verified
391 | - [ ] Performance profile selected and applied
392 | - [ ] Git integration working (if applicable)
393 | - [ ] Claude Code detects and loads hooks
394 |
395 | ## Next Steps
396 |
397 | After successful installation:
398 |
399 | 1. **Read the User Guide**: Comprehensive usage instructions at [Natural Memory Triggers v7.1.3 Guide](https://github.com/doobidoo/mcp-memory-service/wiki/Natural-Memory-Triggers-v7.1.0)
400 |
401 | 2. **Try the System**: Ask Claude Code questions like:
402 | - "What approach did we use for authentication?"
403 | - "How did we handle error handling in this project?"
404 | - "What were the main architectural decisions we made?"
405 |
406 | 3. **Monitor Performance**: Check system metrics periodically:
407 | ```bash
408 | node memory-mode-controller.js metrics
409 | ```
410 |
411 | 4. **Customize Settings**: Adjust profiles and sensitivity based on your workflow:
412 | ```bash
413 | node memory-mode-controller.js profile memory_aware
414 | node memory-mode-controller.js sensitivity 0.7
415 | ```
416 |
417 | 5. **Provide Feedback**: The adaptive profile learns from your usage patterns, so use the system regularly for best results.
418 |
419 | ---
420 |
421 | **Natural Memory Triggers v7.1.3** transforms Claude Code into an intelligent development assistant that automatically understands when you need context from your project history! 🚀
```
--------------------------------------------------------------------------------
/scripts/maintenance/recover_timestamps_from_cloudflare.py:
--------------------------------------------------------------------------------
```python
1 | #!/usr/bin/env python3
2 | """
3 | Timestamp Recovery Script - Recover corrupted timestamps from Cloudflare
4 |
5 | This script helps recover from the timestamp regression bug (v8.25.0-v8.27.0)
6 | where created_at timestamps were reset during metadata sync operations.
7 |
8 | If you use the hybrid backend and Cloudflare has the correct timestamps,
9 | this script will restore them to your local SQLite database.
10 |
11 | Usage:
12 | python scripts/maintenance/recover_timestamps_from_cloudflare.py --dry-run
13 | python scripts/maintenance/recover_timestamps_from_cloudflare.py # Apply fixes
14 | """
15 |
16 | import asyncio
17 | import sys
18 | import argparse
19 | import time
20 | from datetime import datetime
21 | from pathlib import Path
22 | from typing import List, Tuple
23 |
24 | # Add project root to path
25 | project_root = Path(__file__).parent.parent.parent
26 | sys.path.insert(0, str(project_root / "src"))
27 |
28 | from mcp_memory_service.storage.factory import create_storage_instance
29 | from mcp_memory_service.storage.hybrid import HybridMemoryStorage
30 | from mcp_memory_service.config import get_config
31 |
32 |
33 | class TimestampRecovery:
34 | """Recover corrupted timestamps from Cloudflare."""
35 |
36 | def __init__(self, hybrid_storage: HybridMemoryStorage, dry_run: bool = True):
37 | self.hybrid = hybrid_storage
38 | self.primary = hybrid_storage.primary # SQLite-vec
39 | self.secondary = hybrid_storage.secondary # Cloudflare
40 | self.dry_run = dry_run
41 |
42 | self.stats = {
43 | 'total_checked': 0,
44 | 'mismatches_found': 0,
45 | 'recovered': 0,
46 | 'errors': 0,
47 | 'skipped': 0
48 | }
49 |
50 | async def recover_all_timestamps(self) -> Tuple[bool, dict]:
51 | """
52 | Recover timestamps for all memories by comparing SQLite vs Cloudflare.
53 |
54 | Returns:
55 | Tuple of (success, stats_dict)
56 | """
57 | print("="*70)
58 | print("⏰ TIMESTAMP RECOVERY FROM CLOUDFLARE")
59 | print("="*70)
60 | print(f"Mode: {'DRY RUN (no changes)' if self.dry_run else 'LIVE (will apply fixes)'}")
61 | print()
62 |
63 | try:
64 | # Get all memories from both backends
65 | print("1️⃣ Fetching memories from local SQLite...")
66 | local_memories = await self._get_all_local_memories()
67 | print(f" Found {len(local_memories)} local memories")
68 |
69 | print("\n2️⃣ Fetching memories from Cloudflare...")
70 | cf_memories = await self._get_all_cloudflare_memories()
71 | print(f" Found {len(cf_memories)} Cloudflare memories")
72 |
73 | # Build Cloudflare memory lookup
74 | cf_lookup = {m.content_hash: m for m in cf_memories}
75 |
76 | print("\n3️⃣ Comparing timestamps...")
77 | mismatches = []
78 |
79 | for local_memory in local_memories:
80 | self.stats['total_checked'] += 1
81 | content_hash = local_memory.content_hash
82 |
83 | cf_memory = cf_lookup.get(content_hash)
84 | if not cf_memory:
85 | self.stats['skipped'] += 1
86 | continue
87 |
88 | # Compare timestamps (allow 1 second tolerance)
89 | if abs(local_memory.created_at - cf_memory.created_at) > 1.0:
90 | mismatches.append((local_memory, cf_memory))
91 | self.stats['mismatches_found'] += 1
92 |
93 | if not mismatches:
94 | print(" ✅ No timestamp mismatches found!")
95 | return True, self.stats
96 |
97 | print(f" ⚠️ Found {len(mismatches)} timestamp mismatches")
98 |
99 | # Analyze and fix mismatches
100 | print("\n4️⃣ Analyzing and fixing mismatches...")
101 | await self._fix_mismatches(mismatches)
102 |
103 | # Print summary
104 | print("\n" + "="*70)
105 | print("📊 RECOVERY SUMMARY")
106 | print("="*70)
107 | print(f"Total checked: {self.stats['total_checked']}")
108 | print(f"Mismatches found: {self.stats['mismatches_found']}")
109 | print(f"Recovered: {self.stats['recovered']}")
110 | print(f"Errors: {self.stats['errors']}")
111 | print(f"Skipped: {self.stats['skipped']}")
112 |
113 | if self.dry_run:
114 | print("\n💡 This was a DRY RUN. Run without --dry-run to apply fixes.")
115 | else:
116 | print("\n✅ Recovery complete! Timestamps have been restored.")
117 |
118 | return self.stats['errors'] == 0, self.stats
119 |
120 | except Exception as e:
121 | print(f"\n❌ Recovery failed: {e}")
122 | import traceback
123 | traceback.print_exc()
124 | return False, self.stats
125 |
126 | async def _get_all_local_memories(self) -> List:
127 | """Get all memories from local SQLite."""
128 | if not hasattr(self.primary, 'conn'):
129 | raise ValueError("Primary storage must be SQLite-vec")
130 |
131 | cursor = self.primary.conn.execute('''
132 | SELECT content_hash, created_at, created_at_iso, updated_at, updated_at_iso
133 | FROM memories
134 | ORDER BY created_at
135 | ''')
136 |
137 | class LocalMemory:
138 | def __init__(self, content_hash, created_at, created_at_iso, updated_at, updated_at_iso):
139 | self.content_hash = content_hash
140 | self.created_at = created_at
141 | self.created_at_iso = created_at_iso
142 | self.updated_at = updated_at
143 | self.updated_at_iso = updated_at_iso
144 |
145 | memories = []
146 | for row in cursor.fetchall():
147 | memories.append(LocalMemory(*row))
148 |
149 | return memories
150 |
151 | async def _get_all_cloudflare_memories(self) -> List:
152 | """Get all memories from Cloudflare."""
153 | # Use search_by_tag with empty tag list to get all
154 | # (Cloudflare backend may not have a get_all method)
155 | try:
156 | # Try to get all via D1 query
157 | if hasattr(self.secondary, '_retry_request'):
158 | sql = '''
159 | SELECT content_hash, created_at, created_at_iso,
160 | updated_at, updated_at_iso
161 | FROM memories
162 | ORDER BY created_at
163 | '''
164 | payload = {"sql": sql, "params": []}
165 | response = await self.secondary._retry_request(
166 | "POST",
167 | f"{self.secondary.d1_url}/query",
168 | json=payload
169 | )
170 | result = response.json()
171 |
172 | if result.get("success") and result.get("result", [{}])[0].get("results"):
173 | class CFMemory:
174 | def __init__(self, content_hash, created_at, created_at_iso, updated_at, updated_at_iso):
175 | self.content_hash = content_hash
176 | self.created_at = created_at
177 | self.created_at_iso = created_at_iso
178 | self.updated_at = updated_at
179 | self.updated_at_iso = updated_at_iso
180 |
181 | memories = []
182 | for row in result["result"][0]["results"]:
183 | memories.append(CFMemory(
184 | row["content_hash"],
185 | row["created_at"],
186 | row["created_at_iso"],
187 | row["updated_at"],
188 | row["updated_at_iso"]
189 | ))
190 |
191 | return memories
192 |
193 | except Exception as e:
194 | print(f" ⚠️ Could not fetch Cloudflare memories: {e}")
195 |
196 | return []
197 |
198 | async def _fix_mismatches(self, mismatches: List[Tuple]) -> None:
199 | """Fix timestamp mismatches by updating local from Cloudflare."""
200 | for i, (local, cf) in enumerate(mismatches, 1):
201 | try:
202 | # Determine which is correct based on logic:
203 | # - Cloudflare should have the original created_at
204 | # - If local created_at is very recent but Cloudflare is old,
205 | # it's likely the bug (reset to current time)
206 |
207 | local_age = time.time() - local.created_at
208 | cf_age = time.time() - cf.created_at
209 |
210 | # If local is < 24h old but CF is > 7 days old, likely corrupted
211 | is_likely_corrupted = local_age < 86400 and cf_age > 604800
212 |
213 | if is_likely_corrupted or cf.created_at < local.created_at:
214 | # Use Cloudflare timestamp (it's older/more likely correct)
215 | if i <= 5: # Show first 5
216 | print(f"\n {i}. {local.content_hash[:8]}:")
217 | print(f" Local: {local.created_at_iso} ({local_age/86400:.1f} days ago)")
218 | print(f" Cloudflare: {cf.created_at_iso} ({cf_age/86400:.1f} days ago)")
219 | print(f" → Restoring from Cloudflare")
220 |
221 | if not self.dry_run:
222 | # Update local SQLite with Cloudflare timestamps
223 | success, _ = await self.primary.update_memory_metadata(
224 | local.content_hash,
225 | {
226 | 'created_at': cf.created_at,
227 | 'created_at_iso': cf.created_at_iso,
228 | 'updated_at': cf.updated_at,
229 | 'updated_at_iso': cf.updated_at_iso,
230 | },
231 | preserve_timestamps=False # Use provided timestamps
232 | )
233 |
234 | if success:
235 | self.stats['recovered'] += 1
236 | else:
237 | self.stats['errors'] += 1
238 | print(f" ❌ Failed to update")
239 | else:
240 | self.stats['recovered'] += 1 # Would recover
241 |
242 | else:
243 | # Local is older, keep it
244 | if i <= 5:
245 | print(f"\n {i}. {local.content_hash[:8]}: Local older, keeping local")
246 | self.stats['skipped'] += 1
247 |
248 | except Exception as e:
249 | print(f" ❌ Error: {e}")
250 | self.stats['errors'] += 1
251 |
252 | if len(mismatches) > 5:
253 | print(f"\n ... and {len(mismatches) - 5} more")
254 |
255 |
256 | async def main():
257 | """Main recovery function."""
258 | parser = argparse.ArgumentParser(
259 | description="Recover corrupted timestamps from Cloudflare backup"
260 | )
261 | parser.add_argument(
262 | "--dry-run",
263 | action="store_true",
264 | help="Preview changes without applying them (default: True unless explicitly disabled)"
265 | )
266 | parser.add_argument(
267 | "--apply",
268 | action="store_true",
269 | help="Apply fixes (overrides dry-run)"
270 | )
271 |
272 | args = parser.parse_args()
273 |
274 | # Default to dry-run unless --apply is specified
275 | dry_run = not args.apply
276 |
277 | try:
278 | # Initialize hybrid storage
279 | config = get_config()
280 |
281 | if config.storage_backend != "hybrid":
282 | print("❌ This script requires hybrid backend")
283 | print(f" Current backend: {config.storage_backend}")
284 | print("\n To use hybrid backend, set in .env:")
285 | print(" MCP_MEMORY_STORAGE_BACKEND=hybrid")
286 | sys.exit(1)
287 |
288 | storage = await create_storage_instance(config.sqlite_db_path)
289 |
290 | if not isinstance(storage, HybridMemoryStorage):
291 | print("❌ Storage is not hybrid backend")
292 | sys.exit(1)
293 |
294 | # Run recovery
295 | recovery = TimestampRecovery(storage, dry_run=dry_run)
296 | success, stats = await recovery.recover_all_timestamps()
297 |
298 | # Close storage
299 | if hasattr(storage, 'close'):
300 | storage.close()
301 |
302 | # Exit with appropriate code
303 | sys.exit(0 if success else 1)
304 |
305 | except Exception as e:
306 | print(f"\n❌ Recovery failed: {e}")
307 | import traceback
308 | traceback.print_exc()
309 | sys.exit(1)
310 |
311 |
312 | if __name__ == "__main__":
313 | asyncio.run(main())
314 |
```
--------------------------------------------------------------------------------
/tests/unit/test_storage_interface_compatibility.py:
--------------------------------------------------------------------------------
```python
1 | """
2 | Unit tests for storage backend interface compatibility.
3 |
4 | These tests verify that all storage backends implement the same interface,
5 | catching issues like mismatched method signatures or missing methods.
6 |
7 | Added to prevent production bugs like v8.12.0 where:
8 | - count_all_memories() had different signatures across backends
9 | - Some backends had 'tags' parameter, others didn't
10 | - Database-level filtering wasn't uniformly implemented
11 | """
12 |
13 | import pytest
14 | import inspect
15 | from abc import ABC
16 | from typing import get_type_hints
17 |
18 |
19 | def get_all_storage_classes():
20 | """Get all concrete storage backend classes."""
21 | from mcp_memory_service.storage.base import MemoryStorage
22 | from mcp_memory_service.storage.sqlite_vec import SqliteVecMemoryStorage
23 | from mcp_memory_service.storage.cloudflare import CloudflareStorage
24 | from mcp_memory_service.storage.hybrid import HybridMemoryStorage
25 |
26 | return [
27 | ('SqliteVecMemoryStorage', SqliteVecMemoryStorage),
28 | ('CloudflareStorage', CloudflareStorage),
29 | ('HybridMemoryStorage', HybridMemoryStorage),
30 | ]
31 |
32 |
33 | def test_base_class_is_abstract():
34 | """Test that MemoryStorage base class is abstract."""
35 | from mcp_memory_service.storage.base import MemoryStorage
36 |
37 | # Should be an ABC
38 | assert issubclass(MemoryStorage, ABC)
39 |
40 | # Should not be instantiable directly
41 | with pytest.raises(TypeError):
42 | MemoryStorage()
43 |
44 |
45 | def test_all_backends_inherit_from_base():
46 | """Test that all storage backends inherit from MemoryStorage."""
47 | from mcp_memory_service.storage.base import MemoryStorage
48 |
49 | for name, storage_class in get_all_storage_classes():
50 | assert issubclass(storage_class, MemoryStorage), \
51 | f"{name} must inherit from MemoryStorage"
52 |
53 |
54 | def test_all_backends_implement_required_methods():
55 | """Test that all backends implement required abstract methods."""
56 | from mcp_memory_service.storage.base import MemoryStorage
57 |
58 | # Get abstract methods from base class
59 | abstract_methods = {
60 | name for name, method in inspect.getmembers(MemoryStorage)
61 | if getattr(method, '__isabstractmethod__', False)
62 | }
63 |
64 | # Each backend must implement all abstract methods
65 | for name, storage_class in get_all_storage_classes():
66 | for method_name in abstract_methods:
67 | assert hasattr(storage_class, method_name), \
68 | f"{name} missing required method: {method_name}"
69 |
70 |
71 | def test_store_signature_compatibility():
72 | """Test that store has compatible signature across backends."""
73 | signatures = {}
74 |
75 | for name, storage_class in get_all_storage_classes():
76 | sig = inspect.signature(storage_class.store)
77 | signatures[name] = sig
78 |
79 | # All signatures should have same parameters (ignoring 'self')
80 | first_name = list(signatures.keys())[0]
81 | first_params = list(signatures[first_name].parameters.keys())[1:] # Skip 'self'
82 |
83 | for name, sig in signatures.items():
84 | params = list(sig.parameters.keys())[1:] # Skip 'self'
85 | assert params == first_params, \
86 | f"{name}.store parameters {params} don't match {first_name} {first_params}"
87 |
88 |
89 | def test_get_all_memories_signature_compatibility():
90 | """Test that get_all_memories has compatible signature across backends."""
91 | signatures = {}
92 |
93 | for name, storage_class in get_all_storage_classes():
94 | sig = inspect.signature(storage_class.get_all_memories)
95 | signatures[name] = sig
96 |
97 | # All signatures should have same parameters (ignoring 'self')
98 | first_name = list(signatures.keys())[0]
99 | first_params = list(signatures[first_name].parameters.keys())[1:] # Skip 'self'
100 |
101 | for name, sig in signatures.items():
102 | params = list(sig.parameters.keys())[1:] # Skip 'self'
103 | assert params == first_params, \
104 | f"{name}.get_all_memories parameters {params} don't match {first_name} {first_params}"
105 |
106 |
107 | def test_count_all_memories_signature_compatibility():
108 | """Test that count_all_memories has compatible signature across backends.
109 |
110 | This test specifically prevents the v8.12.0 bug where count_all_memories()
111 | had different signatures across backends (some had 'tags', others didn't).
112 | """
113 | signatures = {}
114 |
115 | for name, storage_class in get_all_storage_classes():
116 | if hasattr(storage_class, 'count_all_memories'):
117 | sig = inspect.signature(storage_class.count_all_memories)
118 | signatures[name] = sig
119 |
120 | # All signatures should have same parameters (ignoring 'self')
121 | if len(signatures) > 1:
122 | first_name = list(signatures.keys())[0]
123 | first_params = list(signatures[first_name].parameters.keys())[1:] # Skip 'self'
124 |
125 | for name, sig in signatures.items():
126 | params = list(sig.parameters.keys())[1:] # Skip 'self'
127 | assert params == first_params, \
128 | f"{name}.count_all_memories parameters {params} don't match {first_name} {first_params}"
129 |
130 |
131 | def test_retrieve_signature_compatibility():
132 | """Test that retrieve has compatible signature across backends."""
133 | signatures = {}
134 |
135 | for name, storage_class in get_all_storage_classes():
136 | sig = inspect.signature(storage_class.retrieve)
137 | signatures[name] = sig
138 |
139 | # All signatures should have same parameters (ignoring 'self')
140 | first_name = list(signatures.keys())[0]
141 | first_params = list(signatures[first_name].parameters.keys())[1:] # Skip 'self'
142 |
143 | for name, sig in signatures.items():
144 | params = list(sig.parameters.keys())[1:] # Skip 'self'
145 | assert params == first_params, \
146 | f"{name}.retrieve parameters {params} don't match {first_name} {first_params}"
147 |
148 |
149 | def test_delete_signature_compatibility():
150 | """Test that delete has compatible signature across backends."""
151 | signatures = {}
152 |
153 | for name, storage_class in get_all_storage_classes():
154 | sig = inspect.signature(storage_class.delete)
155 | signatures[name] = sig
156 |
157 | # All signatures should have same parameters (ignoring 'self')
158 | first_name = list(signatures.keys())[0]
159 | first_params = list(signatures[first_name].parameters.keys())[1:] # Skip 'self'
160 |
161 | for name, sig in signatures.items():
162 | params = list(sig.parameters.keys())[1:] # Skip 'self'
163 | assert params == first_params, \
164 | f"{name}.delete parameters {params} don't match {first_name} {first_params}"
165 |
166 |
167 | def test_get_stats_signature_compatibility():
168 | """Test that get_stats has compatible signature across backends."""
169 | signatures = {}
170 |
171 | for name, storage_class in get_all_storage_classes():
172 | sig = inspect.signature(storage_class.get_stats)
173 | signatures[name] = sig
174 |
175 | # All signatures should have same parameters (ignoring 'self')
176 | first_name = list(signatures.keys())[0]
177 | first_params = list(signatures[first_name].parameters.keys())[1:] # Skip 'self'
178 |
179 | for name, sig in signatures.items():
180 | params = list(sig.parameters.keys())[1:] # Skip 'self'
181 | assert params == first_params, \
182 | f"{name}.get_stats parameters {params} don't match {first_name} {first_params}"
183 |
184 |
185 | def test_all_backends_have_same_public_methods():
186 | """Test that all backends expose the same public interface.
187 |
188 | This catches missing methods that should be implemented.
189 | """
190 | from mcp_memory_service.storage.base import MemoryStorage
191 |
192 | # Get public methods from base class (those without leading underscore)
193 | base_methods = {
194 | name for name, method in inspect.getmembers(MemoryStorage, predicate=inspect.isfunction)
195 | if not name.startswith('_')
196 | }
197 |
198 | for name, storage_class in get_all_storage_classes():
199 | backend_methods = {
200 | method_name for method_name, method in inspect.getmembers(storage_class, predicate=inspect.isfunction)
201 | if not method_name.startswith('_')
202 | }
203 |
204 | # Backend should implement all base methods
205 | missing = base_methods - backend_methods
206 | assert not missing, \
207 | f"{name} missing public methods: {missing}"
208 |
209 |
210 | def test_async_method_consistency():
211 | """Test that async methods are consistently async across backends.
212 |
213 | If one backend makes a method async, all should be async.
214 | """
215 | from mcp_memory_service.storage.base import MemoryStorage
216 |
217 | # Get all public methods from base class
218 | base_methods = [
219 | name for name, method in inspect.getmembers(MemoryStorage, predicate=inspect.isfunction)
220 | if not name.startswith('_')
221 | ]
222 |
223 | # Track which methods are async in each backend
224 | async_status = {method: set() for method in base_methods}
225 |
226 | for name, storage_class in get_all_storage_classes():
227 | for method_name in base_methods:
228 | if hasattr(storage_class, method_name):
229 | method = getattr(storage_class, method_name)
230 | if inspect.iscoroutinefunction(method):
231 | async_status[method_name].add(name)
232 |
233 | # Each method should either be async in all backends or none
234 | for method_name, async_backends in async_status.items():
235 | if async_backends:
236 | all_backends = {name for name, _ in get_all_storage_classes()}
237 | assert async_backends == all_backends, \
238 | f"{method_name} is async in {async_backends} but not in {all_backends - async_backends}"
239 |
240 |
241 | def test_backends_handle_tags_parameter_consistently():
242 | """Test that all backends handle 'tags' parameter consistently.
243 |
244 | This specifically targets the v8.12.0 bug where count_all_memories()
245 | had 'tags' in some backends but not others.
246 | """
247 | methods_with_tags = ['get_all_memories', 'count_all_memories']
248 |
249 | for method_name in methods_with_tags:
250 | has_tags_param = {}
251 |
252 | for name, storage_class in get_all_storage_classes():
253 | if hasattr(storage_class, method_name):
254 | sig = inspect.signature(getattr(storage_class, method_name))
255 | has_tags_param[name] = 'tags' in sig.parameters
256 |
257 | # All backends should handle tags consistently
258 | if has_tags_param:
259 | first_name = list(has_tags_param.keys())[0]
260 | first_value = has_tags_param[first_name]
261 |
262 | for name, has_tags in has_tags_param.items():
263 | assert has_tags == first_value, \
264 | f"{name}.{method_name} 'tags' parameter inconsistent: {name}={has_tags}, {first_name}={first_value}"
265 |
266 |
267 | def test_return_type_consistency():
268 | """Test that methods return consistent types across backends.
269 |
270 | This helps catch issues where one backend returns dict and another returns a custom class.
271 | """
272 | from mcp_memory_service.storage.base import MemoryStorage
273 |
274 | # Methods to check return types
275 | methods_to_check = ['get_stats', 'store', 'delete']
276 |
277 | for method_name in methods_to_check:
278 | if not hasattr(MemoryStorage, method_name):
279 | continue
280 |
281 | return_types = {}
282 |
283 | for name, storage_class in get_all_storage_classes():
284 | if hasattr(storage_class, method_name):
285 | method = getattr(storage_class, method_name)
286 | try:
287 | type_hints = get_type_hints(method)
288 | if 'return' in type_hints:
289 | return_types[name] = type_hints['return']
290 | except Exception:
291 | # Some methods may not have type hints
292 | pass
293 |
294 | # If we have return types, they should match
295 | if len(return_types) > 1:
296 | first_name = list(return_types.keys())[0]
297 | first_type = return_types[first_name]
298 |
299 | for name, return_type in return_types.items():
300 | # Allow for Coroutine wrappers in async methods
301 | assert return_type == first_type or str(return_type).startswith('typing.Coroutine'), \
302 | f"{name}.{method_name} return type {return_type} doesn't match {first_name} {first_type}"
303 |
304 |
305 | if __name__ == "__main__":
306 | # Allow running tests directly for quick verification
307 | pytest.main([__file__, "-v"])
308 |
```