#
tokens: 49448/50000 7/625 files (page 31/47)
lines: on (toggle) GitHub
raw markdown copy reset
This is page 31 of 47. Use http://codebase.md/doobidoo/mcp-memory-service?lines=true&page={x} to view the full context.

# Directory Structure

```
├── .claude
│   ├── agents
│   │   ├── amp-bridge.md
│   │   ├── amp-pr-automator.md
│   │   ├── code-quality-guard.md
│   │   ├── gemini-pr-automator.md
│   │   └── github-release-manager.md
│   ├── settings.local.json.backup
│   └── settings.local.json.local
├── .commit-message
├── .dockerignore
├── .env.example
├── .env.sqlite.backup
├── .envnn#
├── .gitattributes
├── .github
│   ├── FUNDING.yml
│   ├── ISSUE_TEMPLATE
│   │   ├── bug_report.yml
│   │   ├── config.yml
│   │   ├── feature_request.yml
│   │   └── performance_issue.yml
│   ├── pull_request_template.md
│   └── workflows
│       ├── bridge-tests.yml
│       ├── CACHE_FIX.md
│       ├── claude-code-review.yml
│       ├── claude.yml
│       ├── cleanup-images.yml.disabled
│       ├── dev-setup-validation.yml
│       ├── docker-publish.yml
│       ├── LATEST_FIXES.md
│       ├── main-optimized.yml.disabled
│       ├── main.yml
│       ├── publish-and-test.yml
│       ├── README_OPTIMIZATION.md
│       ├── release-tag.yml.disabled
│       ├── release.yml
│       ├── roadmap-review-reminder.yml
│       ├── SECRET_CONDITIONAL_FIX.md
│       └── WORKFLOW_FIXES.md
├── .gitignore
├── .mcp.json.backup
├── .mcp.json.template
├── .pyscn
│   ├── .gitignore
│   └── reports
│       └── analyze_20251123_214224.html
├── AGENTS.md
├── archive
│   ├── deployment
│   │   ├── deploy_fastmcp_fixed.sh
│   │   ├── deploy_http_with_mcp.sh
│   │   └── deploy_mcp_v4.sh
│   ├── deployment-configs
│   │   ├── empty_config.yml
│   │   └── smithery.yaml
│   ├── development
│   │   └── test_fastmcp.py
│   ├── docs-removed-2025-08-23
│   │   ├── authentication.md
│   │   ├── claude_integration.md
│   │   ├── claude-code-compatibility.md
│   │   ├── claude-code-integration.md
│   │   ├── claude-code-quickstart.md
│   │   ├── claude-desktop-setup.md
│   │   ├── complete-setup-guide.md
│   │   ├── database-synchronization.md
│   │   ├── development
│   │   │   ├── autonomous-memory-consolidation.md
│   │   │   ├── CLEANUP_PLAN.md
│   │   │   ├── CLEANUP_README.md
│   │   │   ├── CLEANUP_SUMMARY.md
│   │   │   ├── dream-inspired-memory-consolidation.md
│   │   │   ├── hybrid-slm-memory-consolidation.md
│   │   │   ├── mcp-milestone.md
│   │   │   ├── multi-client-architecture.md
│   │   │   ├── test-results.md
│   │   │   └── TIMESTAMP_FIX_SUMMARY.md
│   │   ├── distributed-sync.md
│   │   ├── invocation_guide.md
│   │   ├── macos-intel.md
│   │   ├── master-guide.md
│   │   ├── mcp-client-configuration.md
│   │   ├── multi-client-server.md
│   │   ├── service-installation.md
│   │   ├── sessions
│   │   │   └── MCP_ENHANCEMENT_SESSION_MEMORY_v4.1.0.md
│   │   ├── UBUNTU_SETUP.md
│   │   ├── ubuntu.md
│   │   ├── windows-setup.md
│   │   └── windows.md
│   ├── docs-root-cleanup-2025-08-23
│   │   ├── AWESOME_LIST_SUBMISSION.md
│   │   ├── CLOUDFLARE_IMPLEMENTATION.md
│   │   ├── DOCUMENTATION_ANALYSIS.md
│   │   ├── DOCUMENTATION_CLEANUP_PLAN.md
│   │   ├── DOCUMENTATION_CONSOLIDATION_COMPLETE.md
│   │   ├── LITESTREAM_SETUP_GUIDE.md
│   │   ├── lm_studio_system_prompt.md
│   │   ├── PYTORCH_DOWNLOAD_FIX.md
│   │   └── README-ORIGINAL-BACKUP.md
│   ├── investigations
│   │   └── MACOS_HOOKS_INVESTIGATION.md
│   ├── litestream-configs-v6.3.0
│   │   ├── install_service.sh
│   │   ├── litestream_master_config_fixed.yml
│   │   ├── litestream_master_config.yml
│   │   ├── litestream_replica_config_fixed.yml
│   │   ├── litestream_replica_config.yml
│   │   ├── litestream_replica_simple.yml
│   │   ├── litestream-http.service
│   │   ├── litestream.service
│   │   └── requirements-cloudflare.txt
│   ├── release-notes
│   │   └── release-notes-v7.1.4.md
│   └── setup-development
│       ├── README.md
│       ├── setup_consolidation_mdns.sh
│       ├── STARTUP_SETUP_GUIDE.md
│       └── test_service.sh
├── CHANGELOG-HISTORIC.md
├── CHANGELOG.md
├── claude_commands
│   ├── memory-context.md
│   ├── memory-health.md
│   ├── memory-ingest-dir.md
│   ├── memory-ingest.md
│   ├── memory-recall.md
│   ├── memory-search.md
│   ├── memory-store.md
│   ├── README.md
│   └── session-start.md
├── claude-hooks
│   ├── config.json
│   ├── config.template.json
│   ├── CONFIGURATION.md
│   ├── core
│   │   ├── memory-retrieval.js
│   │   ├── mid-conversation.js
│   │   ├── session-end.js
│   │   ├── session-start.js
│   │   └── topic-change.js
│   ├── debug-pattern-test.js
│   ├── install_claude_hooks_windows.ps1
│   ├── install_hooks.py
│   ├── memory-mode-controller.js
│   ├── MIGRATION.md
│   ├── README-NATURAL-TRIGGERS.md
│   ├── README-phase2.md
│   ├── README.md
│   ├── simple-test.js
│   ├── statusline.sh
│   ├── test-adaptive-weights.js
│   ├── test-dual-protocol-hook.js
│   ├── test-mcp-hook.js
│   ├── test-natural-triggers.js
│   ├── test-recency-scoring.js
│   ├── tests
│   │   ├── integration-test.js
│   │   ├── phase2-integration-test.js
│   │   ├── test-code-execution.js
│   │   ├── test-cross-session.json
│   │   ├── test-session-tracking.json
│   │   └── test-threading.json
│   ├── utilities
│   │   ├── adaptive-pattern-detector.js
│   │   ├── context-formatter.js
│   │   ├── context-shift-detector.js
│   │   ├── conversation-analyzer.js
│   │   ├── dynamic-context-updater.js
│   │   ├── git-analyzer.js
│   │   ├── mcp-client.js
│   │   ├── memory-client.js
│   │   ├── memory-scorer.js
│   │   ├── performance-manager.js
│   │   ├── project-detector.js
│   │   ├── session-tracker.js
│   │   ├── tiered-conversation-monitor.js
│   │   └── version-checker.js
│   └── WINDOWS-SESSIONSTART-BUG.md
├── CLAUDE.md
├── CODE_OF_CONDUCT.md
├── CONTRIBUTING.md
├── Development-Sprint-November-2025.md
├── docs
│   ├── amp-cli-bridge.md
│   ├── api
│   │   ├── code-execution-interface.md
│   │   ├── memory-metadata-api.md
│   │   ├── PHASE1_IMPLEMENTATION_SUMMARY.md
│   │   ├── PHASE2_IMPLEMENTATION_SUMMARY.md
│   │   ├── PHASE2_REPORT.md
│   │   └── tag-standardization.md
│   ├── architecture
│   │   ├── search-enhancement-spec.md
│   │   └── search-examples.md
│   ├── architecture.md
│   ├── archive
│   │   └── obsolete-workflows
│   │       ├── load_memory_context.md
│   │       └── README.md
│   ├── assets
│   │   └── images
│   │       ├── dashboard-v3.3.0-preview.png
│   │       ├── memory-awareness-hooks-example.png
│   │       ├── project-infographic.svg
│   │       └── README.md
│   ├── CLAUDE_CODE_QUICK_REFERENCE.md
│   ├── cloudflare-setup.md
│   ├── deployment
│   │   ├── docker.md
│   │   ├── dual-service.md
│   │   ├── production-guide.md
│   │   └── systemd-service.md
│   ├── development
│   │   ├── ai-agent-instructions.md
│   │   ├── code-quality
│   │   │   ├── phase-2a-completion.md
│   │   │   ├── phase-2a-handle-get-prompt.md
│   │   │   ├── phase-2a-index.md
│   │   │   ├── phase-2a-install-package.md
│   │   │   └── phase-2b-session-summary.md
│   │   ├── code-quality-workflow.md
│   │   ├── dashboard-workflow.md
│   │   ├── issue-management.md
│   │   ├── pr-review-guide.md
│   │   ├── refactoring-notes.md
│   │   ├── release-checklist.md
│   │   └── todo-tracker.md
│   ├── docker-optimized-build.md
│   ├── document-ingestion.md
│   ├── DOCUMENTATION_AUDIT.md
│   ├── enhancement-roadmap-issue-14.md
│   ├── examples
│   │   ├── analysis-scripts.js
│   │   ├── maintenance-session-example.md
│   │   ├── memory-distribution-chart.jsx
│   │   └── tag-schema.json
│   ├── first-time-setup.md
│   ├── glama-deployment.md
│   ├── guides
│   │   ├── advanced-command-examples.md
│   │   ├── chromadb-migration.md
│   │   ├── commands-vs-mcp-server.md
│   │   ├── mcp-enhancements.md
│   │   ├── mdns-service-discovery.md
│   │   ├── memory-consolidation-guide.md
│   │   ├── migration.md
│   │   ├── scripts.md
│   │   └── STORAGE_BACKENDS.md
│   ├── HOOK_IMPROVEMENTS.md
│   ├── hooks
│   │   └── phase2-code-execution-migration.md
│   ├── http-server-management.md
│   ├── ide-compatability.md
│   ├── IMAGE_RETENTION_POLICY.md
│   ├── images
│   │   └── dashboard-placeholder.md
│   ├── implementation
│   │   ├── health_checks.md
│   │   └── performance.md
│   ├── IMPLEMENTATION_PLAN_HTTP_SSE.md
│   ├── integration
│   │   ├── homebrew.md
│   │   └── multi-client.md
│   ├── integrations
│   │   ├── gemini.md
│   │   ├── groq-bridge.md
│   │   ├── groq-integration-summary.md
│   │   └── groq-model-comparison.md
│   ├── integrations.md
│   ├── legacy
│   │   └── dual-protocol-hooks.md
│   ├── LM_STUDIO_COMPATIBILITY.md
│   ├── maintenance
│   │   └── memory-maintenance.md
│   ├── mastery
│   │   ├── api-reference.md
│   │   ├── architecture-overview.md
│   │   ├── configuration-guide.md
│   │   ├── local-setup-and-run.md
│   │   ├── testing-guide.md
│   │   └── troubleshooting.md
│   ├── migration
│   │   └── code-execution-api-quick-start.md
│   ├── natural-memory-triggers
│   │   ├── cli-reference.md
│   │   ├── installation-guide.md
│   │   └── performance-optimization.md
│   ├── oauth-setup.md
│   ├── pr-graphql-integration.md
│   ├── quick-setup-cloudflare-dual-environment.md
│   ├── README.md
│   ├── remote-configuration-wiki-section.md
│   ├── research
│   │   ├── code-execution-interface-implementation.md
│   │   └── code-execution-interface-summary.md
│   ├── ROADMAP.md
│   ├── sqlite-vec-backend.md
│   ├── statistics
│   │   ├── charts
│   │   │   ├── activity_patterns.png
│   │   │   ├── contributors.png
│   │   │   ├── growth_trajectory.png
│   │   │   ├── monthly_activity.png
│   │   │   └── october_sprint.png
│   │   ├── data
│   │   │   ├── activity_by_day.csv
│   │   │   ├── activity_by_hour.csv
│   │   │   ├── contributors.csv
│   │   │   └── monthly_activity.csv
│   │   ├── generate_charts.py
│   │   └── REPOSITORY_STATISTICS.md
│   ├── technical
│   │   ├── development.md
│   │   ├── memory-migration.md
│   │   ├── migration-log.md
│   │   ├── sqlite-vec-embedding-fixes.md
│   │   └── tag-storage.md
│   ├── testing
│   │   └── regression-tests.md
│   ├── testing-cloudflare-backend.md
│   ├── troubleshooting
│   │   ├── cloudflare-api-token-setup.md
│   │   ├── cloudflare-authentication.md
│   │   ├── general.md
│   │   ├── hooks-quick-reference.md
│   │   ├── pr162-schema-caching-issue.md
│   │   ├── session-end-hooks.md
│   │   └── sync-issues.md
│   └── tutorials
│       ├── advanced-techniques.md
│       ├── data-analysis.md
│       └── demo-session-walkthrough.md
├── examples
│   ├── claude_desktop_config_template.json
│   ├── claude_desktop_config_windows.json
│   ├── claude-desktop-http-config.json
│   ├── config
│   │   └── claude_desktop_config.json
│   ├── http-mcp-bridge.js
│   ├── memory_export_template.json
│   ├── README.md
│   ├── setup
│   │   └── setup_multi_client_complete.py
│   └── start_https_example.sh
├── install_service.py
├── install.py
├── LICENSE
├── NOTICE
├── pyproject.toml
├── pytest.ini
├── README.md
├── run_server.py
├── scripts
│   ├── .claude
│   │   └── settings.local.json
│   ├── archive
│   │   └── check_missing_timestamps.py
│   ├── backup
│   │   ├── backup_memories.py
│   │   ├── backup_sqlite_vec.sh
│   │   ├── export_distributable_memories.sh
│   │   └── restore_memories.py
│   ├── benchmarks
│   │   ├── benchmark_code_execution_api.py
│   │   ├── benchmark_hybrid_sync.py
│   │   └── benchmark_server_caching.py
│   ├── database
│   │   ├── analyze_sqlite_vec_db.py
│   │   ├── check_sqlite_vec_status.py
│   │   ├── db_health_check.py
│   │   └── simple_timestamp_check.py
│   ├── development
│   │   ├── debug_server_initialization.py
│   │   ├── find_orphaned_files.py
│   │   ├── fix_mdns.sh
│   │   ├── fix_sitecustomize.py
│   │   ├── remote_ingest.sh
│   │   ├── setup-git-merge-drivers.sh
│   │   ├── uv-lock-merge.sh
│   │   └── verify_hybrid_sync.py
│   ├── hooks
│   │   └── pre-commit
│   ├── installation
│   │   ├── install_linux_service.py
│   │   ├── install_macos_service.py
│   │   ├── install_uv.py
│   │   ├── install_windows_service.py
│   │   ├── install.py
│   │   ├── setup_backup_cron.sh
│   │   ├── setup_claude_mcp.sh
│   │   └── setup_cloudflare_resources.py
│   ├── linux
│   │   ├── service_status.sh
│   │   ├── start_service.sh
│   │   ├── stop_service.sh
│   │   ├── uninstall_service.sh
│   │   └── view_logs.sh
│   ├── maintenance
│   │   ├── assign_memory_types.py
│   │   ├── check_memory_types.py
│   │   ├── cleanup_corrupted_encoding.py
│   │   ├── cleanup_memories.py
│   │   ├── cleanup_organize.py
│   │   ├── consolidate_memory_types.py
│   │   ├── consolidation_mappings.json
│   │   ├── delete_orphaned_vectors_fixed.py
│   │   ├── fast_cleanup_duplicates_with_tracking.sh
│   │   ├── find_all_duplicates.py
│   │   ├── find_cloudflare_duplicates.py
│   │   ├── find_duplicates.py
│   │   ├── memory-types.md
│   │   ├── README.md
│   │   ├── recover_timestamps_from_cloudflare.py
│   │   ├── regenerate_embeddings.py
│   │   ├── repair_malformed_tags.py
│   │   ├── repair_memories.py
│   │   ├── repair_sqlite_vec_embeddings.py
│   │   ├── repair_zero_embeddings.py
│   │   ├── restore_from_json_export.py
│   │   └── scan_todos.sh
│   ├── migration
│   │   ├── cleanup_mcp_timestamps.py
│   │   ├── legacy
│   │   │   └── migrate_chroma_to_sqlite.py
│   │   ├── mcp-migration.py
│   │   ├── migrate_sqlite_vec_embeddings.py
│   │   ├── migrate_storage.py
│   │   ├── migrate_tags.py
│   │   ├── migrate_timestamps.py
│   │   ├── migrate_to_cloudflare.py
│   │   ├── migrate_to_sqlite_vec.py
│   │   ├── migrate_v5_enhanced.py
│   │   ├── TIMESTAMP_CLEANUP_README.md
│   │   └── verify_mcp_timestamps.py
│   ├── pr
│   │   ├── amp_collect_results.sh
│   │   ├── amp_detect_breaking_changes.sh
│   │   ├── amp_generate_tests.sh
│   │   ├── amp_pr_review.sh
│   │   ├── amp_quality_gate.sh
│   │   ├── amp_suggest_fixes.sh
│   │   ├── auto_review.sh
│   │   ├── detect_breaking_changes.sh
│   │   ├── generate_tests.sh
│   │   ├── lib
│   │   │   └── graphql_helpers.sh
│   │   ├── quality_gate.sh
│   │   ├── resolve_threads.sh
│   │   ├── run_pyscn_analysis.sh
│   │   ├── run_quality_checks.sh
│   │   ├── thread_status.sh
│   │   └── watch_reviews.sh
│   ├── quality
│   │   ├── fix_dead_code_install.sh
│   │   ├── phase1_dead_code_analysis.md
│   │   ├── phase2_complexity_analysis.md
│   │   ├── README_PHASE1.md
│   │   ├── README_PHASE2.md
│   │   ├── track_pyscn_metrics.sh
│   │   └── weekly_quality_review.sh
│   ├── README.md
│   ├── run
│   │   ├── run_mcp_memory.sh
│   │   ├── run-with-uv.sh
│   │   └── start_sqlite_vec.sh
│   ├── run_memory_server.py
│   ├── server
│   │   ├── check_http_server.py
│   │   ├── check_server_health.py
│   │   ├── memory_offline.py
│   │   ├── preload_models.py
│   │   ├── run_http_server.py
│   │   ├── run_memory_server.py
│   │   ├── start_http_server.bat
│   │   └── start_http_server.sh
│   ├── service
│   │   ├── deploy_dual_services.sh
│   │   ├── install_http_service.sh
│   │   ├── mcp-memory-http.service
│   │   ├── mcp-memory.service
│   │   ├── memory_service_manager.sh
│   │   ├── service_control.sh
│   │   ├── service_utils.py
│   │   └── update_service.sh
│   ├── sync
│   │   ├── check_drift.py
│   │   ├── claude_sync_commands.py
│   │   ├── export_memories.py
│   │   ├── import_memories.py
│   │   ├── litestream
│   │   │   ├── apply_local_changes.sh
│   │   │   ├── enhanced_memory_store.sh
│   │   │   ├── init_staging_db.sh
│   │   │   ├── io.litestream.replication.plist
│   │   │   ├── manual_sync.sh
│   │   │   ├── memory_sync.sh
│   │   │   ├── pull_remote_changes.sh
│   │   │   ├── push_to_remote.sh
│   │   │   ├── README.md
│   │   │   ├── resolve_conflicts.sh
│   │   │   ├── setup_local_litestream.sh
│   │   │   ├── setup_remote_litestream.sh
│   │   │   ├── staging_db_init.sql
│   │   │   ├── stash_local_changes.sh
│   │   │   ├── sync_from_remote_noconfig.sh
│   │   │   └── sync_from_remote.sh
│   │   ├── README.md
│   │   ├── safe_cloudflare_update.sh
│   │   ├── sync_memory_backends.py
│   │   └── sync_now.py
│   ├── testing
│   │   ├── run_complete_test.py
│   │   ├── run_memory_test.sh
│   │   ├── simple_test.py
│   │   ├── test_cleanup_logic.py
│   │   ├── test_cloudflare_backend.py
│   │   ├── test_docker_functionality.py
│   │   ├── test_installation.py
│   │   ├── test_mdns.py
│   │   ├── test_memory_api.py
│   │   ├── test_memory_simple.py
│   │   ├── test_migration.py
│   │   ├── test_search_api.py
│   │   ├── test_sqlite_vec_embeddings.py
│   │   ├── test_sse_events.py
│   │   ├── test-connection.py
│   │   └── test-hook.js
│   ├── utils
│   │   ├── claude_commands_utils.py
│   │   ├── generate_personalized_claude_md.sh
│   │   ├── groq
│   │   ├── groq_agent_bridge.py
│   │   ├── list-collections.py
│   │   ├── memory_wrapper_uv.py
│   │   ├── query_memories.py
│   │   ├── smithery_wrapper.py
│   │   ├── test_groq_bridge.sh
│   │   └── uv_wrapper.py
│   └── validation
│       ├── check_dev_setup.py
│       ├── check_documentation_links.py
│       ├── diagnose_backend_config.py
│       ├── validate_configuration_complete.py
│       ├── validate_memories.py
│       ├── validate_migration.py
│       ├── validate_timestamp_integrity.py
│       ├── verify_environment.py
│       ├── verify_pytorch_windows.py
│       └── verify_torch.py
├── SECURITY.md
├── selective_timestamp_recovery.py
├── SPONSORS.md
├── src
│   └── mcp_memory_service
│       ├── __init__.py
│       ├── api
│       │   ├── __init__.py
│       │   ├── client.py
│       │   ├── operations.py
│       │   ├── sync_wrapper.py
│       │   └── types.py
│       ├── backup
│       │   ├── __init__.py
│       │   └── scheduler.py
│       ├── cli
│       │   ├── __init__.py
│       │   ├── ingestion.py
│       │   ├── main.py
│       │   └── utils.py
│       ├── config.py
│       ├── consolidation
│       │   ├── __init__.py
│       │   ├── associations.py
│       │   ├── base.py
│       │   ├── clustering.py
│       │   ├── compression.py
│       │   ├── consolidator.py
│       │   ├── decay.py
│       │   ├── forgetting.py
│       │   ├── health.py
│       │   └── scheduler.py
│       ├── dependency_check.py
│       ├── discovery
│       │   ├── __init__.py
│       │   ├── client.py
│       │   └── mdns_service.py
│       ├── embeddings
│       │   ├── __init__.py
│       │   └── onnx_embeddings.py
│       ├── ingestion
│       │   ├── __init__.py
│       │   ├── base.py
│       │   ├── chunker.py
│       │   ├── csv_loader.py
│       │   ├── json_loader.py
│       │   ├── pdf_loader.py
│       │   ├── registry.py
│       │   ├── semtools_loader.py
│       │   └── text_loader.py
│       ├── lm_studio_compat.py
│       ├── mcp_server.py
│       ├── models
│       │   ├── __init__.py
│       │   └── memory.py
│       ├── server.py
│       ├── services
│       │   ├── __init__.py
│       │   └── memory_service.py
│       ├── storage
│       │   ├── __init__.py
│       │   ├── base.py
│       │   ├── cloudflare.py
│       │   ├── factory.py
│       │   ├── http_client.py
│       │   ├── hybrid.py
│       │   └── sqlite_vec.py
│       ├── sync
│       │   ├── __init__.py
│       │   ├── exporter.py
│       │   ├── importer.py
│       │   └── litestream_config.py
│       ├── utils
│       │   ├── __init__.py
│       │   ├── cache_manager.py
│       │   ├── content_splitter.py
│       │   ├── db_utils.py
│       │   ├── debug.py
│       │   ├── document_processing.py
│       │   ├── gpu_detection.py
│       │   ├── hashing.py
│       │   ├── http_server_manager.py
│       │   ├── port_detection.py
│       │   ├── system_detection.py
│       │   └── time_parser.py
│       └── web
│           ├── __init__.py
│           ├── api
│           │   ├── __init__.py
│           │   ├── analytics.py
│           │   ├── backup.py
│           │   ├── consolidation.py
│           │   ├── documents.py
│           │   ├── events.py
│           │   ├── health.py
│           │   ├── manage.py
│           │   ├── mcp.py
│           │   ├── memories.py
│           │   ├── search.py
│           │   └── sync.py
│           ├── app.py
│           ├── dependencies.py
│           ├── oauth
│           │   ├── __init__.py
│           │   ├── authorization.py
│           │   ├── discovery.py
│           │   ├── middleware.py
│           │   ├── models.py
│           │   ├── registration.py
│           │   └── storage.py
│           ├── sse.py
│           └── static
│               ├── app.js
│               ├── index.html
│               ├── README.md
│               ├── sse_test.html
│               └── style.css
├── start_http_debug.bat
├── start_http_server.sh
├── test_document.txt
├── test_version_checker.js
├── tests
│   ├── __init__.py
│   ├── api
│   │   ├── __init__.py
│   │   ├── test_compact_types.py
│   │   └── test_operations.py
│   ├── bridge
│   │   ├── mock_responses.js
│   │   ├── package-lock.json
│   │   ├── package.json
│   │   └── test_http_mcp_bridge.js
│   ├── conftest.py
│   ├── consolidation
│   │   ├── __init__.py
│   │   ├── conftest.py
│   │   ├── test_associations.py
│   │   ├── test_clustering.py
│   │   ├── test_compression.py
│   │   ├── test_consolidator.py
│   │   ├── test_decay.py
│   │   └── test_forgetting.py
│   ├── contracts
│   │   └── api-specification.yml
│   ├── integration
│   │   ├── package-lock.json
│   │   ├── package.json
│   │   ├── test_api_key_fallback.py
│   │   ├── test_api_memories_chronological.py
│   │   ├── test_api_tag_time_search.py
│   │   ├── test_api_with_memory_service.py
│   │   ├── test_bridge_integration.js
│   │   ├── test_cli_interfaces.py
│   │   ├── test_cloudflare_connection.py
│   │   ├── test_concurrent_clients.py
│   │   ├── test_data_serialization_consistency.py
│   │   ├── test_http_server_startup.py
│   │   ├── test_mcp_memory.py
│   │   ├── test_mdns_integration.py
│   │   ├── test_oauth_basic_auth.py
│   │   ├── test_oauth_flow.py
│   │   ├── test_server_handlers.py
│   │   └── test_store_memory.py
│   ├── performance
│   │   ├── test_background_sync.py
│   │   └── test_hybrid_live.py
│   ├── README.md
│   ├── smithery
│   │   └── test_smithery.py
│   ├── sqlite
│   │   └── simple_sqlite_vec_test.py
│   ├── test_client.py
│   ├── test_content_splitting.py
│   ├── test_database.py
│   ├── test_hybrid_cloudflare_limits.py
│   ├── test_hybrid_storage.py
│   ├── test_memory_ops.py
│   ├── test_semantic_search.py
│   ├── test_sqlite_vec_storage.py
│   ├── test_time_parser.py
│   ├── test_timestamp_preservation.py
│   ├── timestamp
│   │   ├── test_hook_vs_manual_storage.py
│   │   ├── test_issue99_final_validation.py
│   │   ├── test_search_retrieval_inconsistency.py
│   │   ├── test_timestamp_issue.py
│   │   └── test_timestamp_simple.py
│   └── unit
│       ├── conftest.py
│       ├── test_cloudflare_storage.py
│       ├── test_csv_loader.py
│       ├── test_fastapi_dependencies.py
│       ├── test_import.py
│       ├── test_json_loader.py
│       ├── test_mdns_simple.py
│       ├── test_mdns.py
│       ├── test_memory_service.py
│       ├── test_memory.py
│       ├── test_semtools_loader.py
│       ├── test_storage_interface_compatibility.py
│       └── test_tag_time_filtering.py
├── tools
│   ├── docker
│   │   ├── DEPRECATED.md
│   │   ├── docker-compose.http.yml
│   │   ├── docker-compose.pythonpath.yml
│   │   ├── docker-compose.standalone.yml
│   │   ├── docker-compose.uv.yml
│   │   ├── docker-compose.yml
│   │   ├── docker-entrypoint-persistent.sh
│   │   ├── docker-entrypoint-unified.sh
│   │   ├── docker-entrypoint.sh
│   │   ├── Dockerfile
│   │   ├── Dockerfile.glama
│   │   ├── Dockerfile.slim
│   │   ├── README.md
│   │   └── test-docker-modes.sh
│   └── README.md
└── uv.lock
```

# Files

--------------------------------------------------------------------------------
/tests/unit/test_memory_service.py:
--------------------------------------------------------------------------------

```python
  1 | """
  2 | Unit tests for MemoryService business logic.
  3 | 
  4 | These tests verify the MemoryService class centralizes memory operations
  5 | correctly and provides consistent behavior across all interfaces.
  6 | """
  7 | 
  8 | import pytest
  9 | from unittest.mock import AsyncMock, MagicMock, patch
 10 | from datetime import datetime
 11 | from typing import List
 12 | 
 13 | from mcp_memory_service.services.memory_service import MemoryService
 14 | from mcp_memory_service.models.memory import Memory
 15 | from mcp_memory_service.storage.base import MemoryStorage
 16 | 
 17 | 
 18 | # Test Fixtures
 19 | 
 20 | @pytest.fixture
 21 | def mock_storage():
 22 |     """Create a mock storage backend."""
 23 |     storage = AsyncMock(spec=MemoryStorage)
 24 |     # Add required properties
 25 |     storage.max_content_length = 1000
 26 |     storage.supports_chunking = True
 27 |     # Setup method return values to avoid AttributeError
 28 |     storage.store.return_value = (True, "Success")
 29 |     storage.delete.return_value = (True, "Deleted")
 30 |     storage.get_stats.return_value = {
 31 |         "backend": "mock",
 32 |         "total_memories": 0
 33 |     }
 34 |     return storage
 35 | 
 36 | 
 37 | @pytest.fixture
 38 | def memory_service(mock_storage):
 39 |     """Create a MemoryService instance with mock storage."""
 40 |     return MemoryService(storage=mock_storage)
 41 | 
 42 | 
 43 | @pytest.fixture
 44 | def sample_memory():
 45 |     """Create a sample memory object for testing."""
 46 |     return Memory(
 47 |         content="Test memory content",
 48 |         content_hash="test_hash_123",
 49 |         tags=["test", "sample"],
 50 |         memory_type="note",
 51 |         metadata={"source": "test"},
 52 |         created_at=1698765432.0,
 53 |         updated_at=1698765432.0
 54 |     )
 55 | 
 56 | 
 57 | @pytest.fixture
 58 | def sample_memories():
 59 |     """Create a list of sample memories."""
 60 |     memories = []
 61 |     for i in range(5):
 62 |         memories.append(Memory(
 63 |             content=f"Test memory {i+1}",
 64 |             content_hash=f"hash_{i+1}",
 65 |             tags=[f"tag{i+1}", "test"],
 66 |             memory_type="note",
 67 |             metadata={"index": i+1},
 68 |             created_at=1698765432.0 + i * 100,
 69 |             updated_at=1698765432.0 + i * 100
 70 |         ))
 71 |     return memories
 72 | 
 73 | 
 74 | # Test list_memories method
 75 | 
 76 | @pytest.mark.asyncio
 77 | async def test_list_memories_basic_pagination(memory_service, mock_storage, sample_memories):
 78 |     """Test basic pagination functionality."""
 79 |     # Setup mock
 80 |     mock_storage.get_all_memories.return_value = sample_memories[:2]
 81 |     mock_storage.count_all_memories.return_value = 5
 82 | 
 83 |     # Execute
 84 |     result = await memory_service.list_memories(page=1, page_size=2)
 85 | 
 86 |     # Verify
 87 |     assert result["page"] == 1
 88 |     assert result["page_size"] == 2
 89 |     assert result["total"] == 5
 90 |     assert result["has_more"] is True
 91 |     assert len(result["memories"]) == 2
 92 | 
 93 |     # Verify storage called with correct parameters
 94 |     mock_storage.get_all_memories.assert_called_once_with(
 95 |         limit=2,
 96 |         offset=0,
 97 |         memory_type=None,
 98 |         tags=None
 99 |     )
100 | 
101 | 
102 | @pytest.mark.asyncio
103 | async def test_list_memories_with_tag_filter(memory_service, mock_storage, sample_memories):
104 |     """Test filtering by tag."""
105 |     filtered_memories = [m for m in sample_memories if "tag1" in m.tags]
106 |     mock_storage.get_all_memories.return_value = filtered_memories
107 |     mock_storage.count_all_memories.return_value = len(filtered_memories)
108 | 
109 |     result = await memory_service.list_memories(page=1, page_size=10, tag="tag1")
110 | 
111 |     # Verify tag passed to storage as list
112 |     mock_storage.get_all_memories.assert_called_once()
113 |     call_kwargs = mock_storage.get_all_memories.call_args.kwargs
114 |     assert call_kwargs["tags"] == ["tag1"]
115 | 
116 | 
117 | @pytest.mark.asyncio
118 | async def test_list_memories_with_type_filter(memory_service, mock_storage, sample_memories):
119 |     """Test filtering by memory type."""
120 |     mock_storage.get_all_memories.return_value = sample_memories
121 |     mock_storage.count_all_memories.return_value = 5
122 | 
123 |     result = await memory_service.list_memories(page=1, page_size=10, memory_type="note")
124 | 
125 |     # Verify type passed to storage
126 |     mock_storage.get_all_memories.assert_called_once()
127 |     call_kwargs = mock_storage.get_all_memories.call_args.kwargs
128 |     assert call_kwargs["memory_type"] == "note"
129 | 
130 | 
131 | @pytest.mark.asyncio
132 | async def test_list_memories_offset_calculation(memory_service, mock_storage):
133 |     """Test correct offset calculation for different pages."""
134 |     mock_storage.get_all_memories.return_value = []
135 |     mock_storage.count_all_memories.return_value = 0
136 | 
137 |     # Page 3 with page_size 10 should have offset 20
138 |     await memory_service.list_memories(page=3, page_size=10)
139 | 
140 |     call_kwargs = mock_storage.get_all_memories.call_args.kwargs
141 |     assert call_kwargs["offset"] == 20
142 |     assert call_kwargs["limit"] == 10
143 | 
144 | 
145 | @pytest.mark.asyncio
146 | async def test_list_memories_has_more_false(memory_service, mock_storage, sample_memories):
147 |     """Test has_more is False when no more results."""
148 |     mock_storage.get_all_memories.return_value = sample_memories
149 |     mock_storage.count_all_memories.return_value = 5
150 | 
151 |     # Requesting page that includes last item
152 |     result = await memory_service.list_memories(page=1, page_size=10)
153 | 
154 |     assert result["has_more"] is False
155 | 
156 | 
157 | @pytest.mark.asyncio
158 | async def test_list_memories_error_handling(memory_service, mock_storage):
159 |     """Test error handling in list_memories."""
160 |     mock_storage.get_all_memories.side_effect = Exception("Database error")
161 | 
162 |     result = await memory_service.list_memories(page=1, page_size=10)
163 | 
164 |     assert result["success"] is False
165 |     assert "error" in result
166 |     assert "Database error" in result["error"]
167 |     assert result["memories"] == []
168 | 
169 | 
170 | # Test store_memory method
171 | 
172 | @pytest.mark.asyncio
173 | async def test_store_memory_basic(memory_service, mock_storage):
174 |     """Test basic memory storage."""
175 |     mock_storage.store.return_value = (True, "Success")
176 | 
177 |     result = await memory_service.store_memory(
178 |         content="Test content",
179 |         tags=["test"],
180 |         memory_type="note"
181 |     )
182 | 
183 |     assert result["success"] is True
184 |     assert "memory" in result
185 |     assert result["memory"]["content"] == "Test content"
186 | 
187 |     # Verify storage.store was called
188 |     mock_storage.store.assert_called_once()
189 |     stored_memory = mock_storage.store.call_args.args[0]
190 |     assert stored_memory.content == "Test content"
191 |     assert stored_memory.tags == ["test"]
192 | 
193 | 
194 | @pytest.mark.asyncio
195 | async def test_store_memory_with_hostname_tagging(memory_service, mock_storage):
196 |     """Test hostname tagging is applied correctly."""
197 |     mock_storage.store.return_value = None
198 | 
199 |     result = await memory_service.store_memory(
200 |         content="Test content",
201 |         tags=["test"],
202 |         client_hostname="my-machine"
203 |     )
204 | 
205 |     # Verify hostname tag added
206 |     stored_memory = mock_storage.store.call_args.args[0]
207 |     assert "source:my-machine" in stored_memory.tags
208 |     assert stored_memory.metadata["hostname"] == "my-machine"
209 | 
210 | 
211 | @pytest.mark.asyncio
212 | async def test_store_memory_hostname_not_duplicated(memory_service, mock_storage):
213 |     """Test hostname tag is not duplicated if already present."""
214 |     mock_storage.store.return_value = None
215 | 
216 |     result = await memory_service.store_memory(
217 |         content="Test content",
218 |         tags=["test", "source:my-machine"],
219 |         client_hostname="my-machine"
220 |     )
221 | 
222 |     stored_memory = mock_storage.store.call_args.args[0]
223 |     # Count occurrences of hostname tag
224 |     hostname_tags = [t for t in stored_memory.tags if t.startswith("source:")]
225 |     assert len(hostname_tags) == 1
226 | 
227 | 
228 | @pytest.mark.asyncio
229 | @patch('mcp_memory_service.services.memory_service.ENABLE_AUTO_SPLIT', True)
230 | async def test_store_memory_with_chunking(memory_service, mock_storage):
231 |     """Test content chunking when enabled and content is large."""
232 |     mock_storage.store.return_value = (True, "Success")
233 |     # Set max_content_length to trigger chunking
234 |     mock_storage.max_content_length = 100
235 | 
236 |     # Create content larger than max_content_length
237 |     long_content = "x" * 200
238 | 
239 |     with patch('mcp_memory_service.services.memory_service.split_content') as mock_split:
240 |         mock_split.return_value = ["chunk1", "chunk2"]
241 | 
242 |         result = await memory_service.store_memory(content=long_content)
243 | 
244 |         assert result["success"] is True
245 |         assert "memories" in result
246 |         assert result["total_chunks"] == 2
247 |         assert "original_hash" in result
248 | 
249 |         # Verify storage.store called twice (once per chunk)
250 |         assert mock_storage.store.call_count == 2
251 | 
252 | 
253 | @pytest.mark.asyncio
254 | async def test_store_memory_validation_error(memory_service, mock_storage):
255 |     """Test ValueError is caught and returned as error."""
256 |     mock_storage.store.side_effect = ValueError("Invalid content")
257 | 
258 |     result = await memory_service.store_memory(content="Test")
259 | 
260 |     assert result["success"] is False
261 |     assert "error" in result
262 |     assert "Invalid memory data" in result["error"]
263 | 
264 | 
265 | @pytest.mark.asyncio
266 | async def test_store_memory_connection_error(memory_service, mock_storage):
267 |     """Test ConnectionError is caught and handled."""
268 |     mock_storage.store.side_effect = ConnectionError("Storage unavailable")
269 | 
270 |     result = await memory_service.store_memory(content="Test")
271 | 
272 |     assert result["success"] is False
273 |     assert "error" in result
274 |     assert "Storage connection failed" in result["error"]
275 | 
276 | 
277 | @pytest.mark.asyncio
278 | async def test_store_memory_unexpected_error(memory_service, mock_storage):
279 |     """Test unexpected exceptions are caught."""
280 |     mock_storage.store.side_effect = RuntimeError("Unexpected error")
281 | 
282 |     result = await memory_service.store_memory(content="Test")
283 | 
284 |     assert result["success"] is False
285 |     assert "error" in result
286 |     assert "Failed to store memory" in result["error"]
287 | 
288 | 
289 | # Test retrieve_memories method
290 | 
291 | @pytest.mark.asyncio
292 | async def test_retrieve_memories_basic(memory_service, mock_storage, sample_memories):
293 |     """Test basic semantic search retrieval."""
294 |     mock_storage.retrieve.return_value = sample_memories[:3]
295 | 
296 |     result = await memory_service.retrieve_memories(query="test query", n_results=3)
297 | 
298 |     assert result["query"] == "test query"
299 |     assert result["count"] == 3
300 |     assert len(result["memories"]) == 3
301 | 
302 |     # After fix: storage.retrieve() only accepts query and n_results
303 |     mock_storage.retrieve.assert_called_once_with(
304 |         query="test query",
305 |         n_results=3
306 |     )
307 | 
308 | 
309 | @pytest.mark.asyncio
310 | async def test_retrieve_memories_with_filters(memory_service, mock_storage, sample_memories):
311 |     """Test retrieval with tag and type filters."""
312 |     # Return memories that will be filtered by MemoryService
313 |     mock_storage.retrieve.return_value = sample_memories
314 | 
315 |     # Create a memory with matching tags for filtering
316 |     from mcp_memory_service.models.memory import Memory
317 |     import hashlib
318 |     content_hash = hashlib.sha256("test content".encode()).hexdigest()
319 |     matching_memory = Memory(
320 |         content="test content",
321 |         content_hash=content_hash,
322 |         tags=["tag1"],
323 |         memory_type="note",
324 |         created_at=1234567890.0
325 |     )
326 |     matching_memory.metadata = {"tags": ["tag1"], "memory_type": "note"}
327 |     mock_storage.retrieve.return_value = [matching_memory]
328 | 
329 |     result = await memory_service.retrieve_memories(
330 |         query="test",
331 |         n_results=5,
332 |         tags=["tag1"],
333 |         memory_type="note"
334 |     )
335 | 
336 |     # After fix: storage.retrieve() only accepts query and n_results
337 |     # Filtering is done by MemoryService after retrieval
338 |     mock_storage.retrieve.assert_called_once_with(
339 |         query="test",
340 |         n_results=5
341 |     )
342 | 
343 | 
344 | @pytest.mark.asyncio
345 | async def test_retrieve_memories_error_handling(memory_service, mock_storage):
346 |     """Test error handling in retrieve_memories."""
347 |     mock_storage.retrieve.side_effect = Exception("Retrieval failed")
348 | 
349 |     result = await memory_service.retrieve_memories(query="test")
350 | 
351 |     assert "error" in result
352 |     assert result["memories"] == []
353 |     assert "Failed to retrieve memories" in result["error"]
354 | 
355 | 
356 | # Test search_by_tag method
357 | 
358 | @pytest.mark.asyncio
359 | async def test_search_by_tag_single_tag(memory_service, mock_storage, sample_memories):
360 |     """Test searching by a single tag."""
361 |     mock_storage.search_by_tag.return_value = sample_memories[:2]
362 | 
363 |     result = await memory_service.search_by_tag(tags="test")
364 | 
365 |     assert result["tags"] == ["test"]
366 |     assert result["match_type"] == "ANY"
367 |     assert result["count"] == 2
368 | 
369 |     mock_storage.search_by_tag.assert_called_once_with(
370 |         tags=["test"]
371 |     )
372 | 
373 | 
374 | @pytest.mark.asyncio
375 | async def test_search_by_tag_multiple_tags(memory_service, mock_storage, sample_memories):
376 |     """Test searching by multiple tags."""
377 |     mock_storage.search_by_tag.return_value = sample_memories
378 | 
379 |     result = await memory_service.search_by_tag(tags=["tag1", "tag2"])
380 | 
381 |     assert result["tags"] == ["tag1", "tag2"]
382 |     assert result["match_type"] == "ANY"
383 | 
384 | 
385 | @pytest.mark.asyncio
386 | async def test_search_by_tag_match_all(memory_service, mock_storage, sample_memories):
387 |     """Test searching with match_all=True."""
388 |     mock_storage.search_by_tag.return_value = sample_memories[:1]
389 | 
390 |     result = await memory_service.search_by_tag(tags=["tag1", "tag2"], match_all=True)
391 | 
392 |     assert result["match_type"] == "ALL"
393 |     mock_storage.search_by_tag.assert_called_once_with(
394 |         tags=["tag1", "tag2"]
395 |     )
396 | 
397 | 
398 | @pytest.mark.asyncio
399 | async def test_search_by_tag_error_handling(memory_service, mock_storage):
400 |     """Test error handling in search_by_tag."""
401 |     mock_storage.search_by_tag.side_effect = Exception("Search failed")
402 | 
403 |     result = await memory_service.search_by_tag(tags="test")
404 | 
405 |     assert "error" in result
406 |     assert result["memories"] == []
407 |     assert "Failed to search by tags" in result["error"]
408 | 
409 | 
410 | # Test get_memory_by_hash method
411 | 
412 | @pytest.mark.asyncio
413 | async def test_get_memory_by_hash_found(memory_service, mock_storage, sample_memory):
414 |     """Test getting memory by hash when found."""
415 |     mock_storage.get_by_hash.return_value = sample_memory
416 | 
417 |     result = await memory_service.get_memory_by_hash("test_hash_123")
418 | 
419 |     assert result["found"] is True
420 |     assert "memory" in result
421 |     assert result["memory"]["content_hash"] == "test_hash_123"
422 |     mock_storage.get_by_hash.assert_called_once_with("test_hash_123")
423 | 
424 | 
425 | @pytest.mark.asyncio
426 | async def test_get_memory_by_hash_not_found(memory_service, mock_storage):
427 |     """Test getting memory by hash when not found."""
428 |     mock_storage.get_by_hash.return_value = None
429 | 
430 |     result = await memory_service.get_memory_by_hash("nonexistent_hash")
431 | 
432 |     assert result["found"] is False
433 |     assert result["content_hash"] == "nonexistent_hash"
434 |     mock_storage.get_by_hash.assert_called_once_with("nonexistent_hash")
435 | 
436 | 
437 | @pytest.mark.asyncio
438 | async def test_get_memory_by_hash_error(memory_service, mock_storage):
439 |     """Test error handling in get_memory_by_hash."""
440 |     mock_storage.get_by_hash.side_effect = Exception("Database error")
441 | 
442 |     result = await memory_service.get_memory_by_hash("test_hash")
443 | 
444 |     assert result["found"] is False
445 |     assert "error" in result
446 |     assert "Failed to get memory" in result["error"]
447 | 
448 | 
449 | # Test delete_memory method
450 | 
451 | @pytest.mark.asyncio
452 | async def test_delete_memory_success(memory_service, mock_storage):
453 |     """Test successful memory deletion."""
454 |     mock_storage.delete.return_value = (True, "Deleted successfully")
455 | 
456 |     result = await memory_service.delete_memory("test_hash")
457 | 
458 |     assert result["success"] is True
459 |     assert result["content_hash"] == "test_hash"
460 |     mock_storage.delete.assert_called_once_with("test_hash")
461 | 
462 | 
463 | @pytest.mark.asyncio
464 | async def test_delete_memory_not_found(memory_service, mock_storage):
465 |     """Test deleting non-existent memory."""
466 |     mock_storage.delete.return_value = (False, "Not found")
467 | 
468 |     result = await memory_service.delete_memory("nonexistent_hash")
469 | 
470 |     assert result["success"] is False
471 | 
472 | 
473 | @pytest.mark.asyncio
474 | async def test_delete_memory_error(memory_service, mock_storage):
475 |     """Test error handling in delete_memory."""
476 |     mock_storage.delete.side_effect = Exception("Delete failed")
477 | 
478 |     result = await memory_service.delete_memory("test_hash")
479 | 
480 |     assert result["success"] is False
481 |     assert "error" in result
482 |     assert "Failed to delete memory" in result["error"]
483 | 
484 | 
485 | # Test health_check method
486 | 
487 | @pytest.mark.asyncio
488 | async def test_health_check_success(memory_service, mock_storage):
489 |     """Test successful health check."""
490 |     mock_storage.get_stats.return_value = {
491 |         "backend": "sqlite-vec",
492 |         "total_memories": 100,
493 |         "database_size": "5MB"
494 |     }
495 | 
496 |     result = await memory_service.health_check()
497 | 
498 |     assert result["healthy"] is True
499 |     assert result["storage_type"] == "sqlite-vec"
500 |     assert result["total_memories"] == 100
501 |     assert "last_updated" in result
502 | 
503 | 
504 | @pytest.mark.asyncio
505 | async def test_health_check_failure(memory_service, mock_storage):
506 |     """Test health check when storage fails."""
507 |     mock_storage.get_stats.side_effect = Exception("Health check failed")
508 | 
509 |     result = await memory_service.health_check()
510 | 
511 |     assert result["healthy"] is False
512 |     assert "error" in result
513 |     assert "Health check failed" in result["error"]
514 | 
515 | 
516 | # Test _format_memory_response method
517 | 
518 | def test_format_memory_response(memory_service, sample_memory):
519 |     """Test memory formatting for API responses."""
520 |     formatted = memory_service._format_memory_response(sample_memory)
521 | 
522 |     assert formatted["content"] == sample_memory.content
523 |     assert formatted["content_hash"] == sample_memory.content_hash
524 |     assert formatted["tags"] == sample_memory.tags
525 |     assert formatted["memory_type"] == sample_memory.memory_type
526 |     assert formatted["metadata"] == sample_memory.metadata
527 |     assert "created_at" in formatted
528 |     assert "updated_at" in formatted
529 |     assert "created_at_iso" in formatted
530 |     assert "updated_at_iso" in formatted
531 | 
532 | 
533 | def test_format_memory_response_preserves_all_fields(memory_service, sample_memory):
534 |     """Test that formatting preserves all memory fields."""
535 |     formatted = memory_service._format_memory_response(sample_memory)
536 | 
537 |     # Verify all TypedDict fields are present
538 |     required_fields = [
539 |         "content", "content_hash", "tags", "memory_type", "metadata",
540 |         "created_at", "updated_at", "created_at_iso", "updated_at_iso"
541 |     ]
542 | 
543 |     for field in required_fields:
544 |         assert field in formatted
545 | 
546 | 
547 | # Integration-style tests (still using mocks but testing workflows)
548 | 
549 | @pytest.mark.asyncio
550 | async def test_store_and_retrieve_workflow(memory_service, mock_storage, sample_memory):
551 |     """Test complete workflow: store then retrieve."""
552 |     # Setup mocks
553 |     mock_storage.store.return_value = (True, "Success")
554 |     mock_storage.retrieve.return_value = [sample_memory]
555 | 
556 |     # Store memory
557 |     store_result = await memory_service.store_memory(
558 |         content="Test workflow",
559 |         tags=["workflow"],
560 |         memory_type="test"
561 |     )
562 |     assert store_result["success"] is True
563 | 
564 |     # Retrieve memory
565 |     retrieve_result = await memory_service.retrieve_memories(query="workflow")
566 |     assert len(retrieve_result["memories"]) > 0
567 | 
568 | 
569 | @pytest.mark.asyncio
570 | async def test_list_memories_database_level_filtering(memory_service, mock_storage):
571 |     """Test that list_memories uses database-level filtering (not loading all)."""
572 |     mock_storage.get_all_memories.return_value = []
573 |     mock_storage.count_all_memories.return_value = 1000
574 | 
575 |     # Request page 1 with 10 items from 1000 total
576 |     result = await memory_service.list_memories(page=1, page_size=10)
577 | 
578 |     # Verify we only requested 10 items, not all 1000
579 |     call_kwargs = mock_storage.get_all_memories.call_args.kwargs
580 |     assert call_kwargs["limit"] == 10
581 |     assert call_kwargs["offset"] == 0
582 | 
583 |     # This proves we're using database-level filtering, not O(n) memory loading
584 |     mock_storage.get_all_memories.assert_called_once()
585 | 
586 | 
587 | @pytest.mark.asyncio
588 | async def test_empty_tags_list_stored_correctly(memory_service, mock_storage):
589 |     """Test that empty or None tags are handled correctly."""
590 |     mock_storage.store.return_value = None
591 | 
592 |     # Store with None tags
593 |     result = await memory_service.store_memory(content="Test", tags=None)
594 | 
595 |     stored_memory = mock_storage.store.call_args.args[0]
596 |     assert isinstance(stored_memory.tags, list)
597 |     assert len(stored_memory.tags) == 0
598 | 
599 | 
600 | @pytest.mark.asyncio
601 | async def test_metadata_preserved_through_storage(memory_service, mock_storage):
602 |     """Test that metadata is preserved correctly."""
603 |     mock_storage.store.return_value = None
604 | 
605 |     custom_metadata = {"key1": "value1", "key2": 123}
606 |     result = await memory_service.store_memory(
607 |         content="Test",
608 |         metadata=custom_metadata
609 |     )
610 | 
611 |     stored_memory = mock_storage.store.call_args.args[0]
612 |     assert "key1" in stored_memory.metadata
613 |     assert stored_memory.metadata["key1"] == "value1"
614 | 
```

--------------------------------------------------------------------------------
/src/mcp_memory_service/consolidation/health.py:
--------------------------------------------------------------------------------

```python
  1 | # Copyright 2024 Heinrich Krupp  
  2 | #
  3 | # Licensed under the Apache License, Version 2.0 (the "License");
  4 | # you may not use this file except in compliance with the License.
  5 | # You may obtain a copy of the License at
  6 | #
  7 | #     http://www.apache.org/licenses/LICENSE-2.0
  8 | #
  9 | # Unless required by applicable law or agreed to in writing, software
 10 | # distributed under the License is distributed on an "AS IS" BASIS,
 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 12 | # See the License for the specific language governing permissions and
 13 | # limitations under the License.
 14 | 
 15 | """Health monitoring and error handling for consolidation system."""
 16 | 
 17 | import asyncio
 18 | import logging
 19 | from typing import Dict, Any, List, Optional
 20 | from datetime import datetime, timedelta
 21 | from dataclasses import dataclass, field
 22 | from enum import Enum
 23 | import traceback
 24 | 
 25 | from .base import ConsolidationError
 26 | 
 27 | 
 28 | class HealthStatus(Enum):
 29 |     """Health status levels."""
 30 |     HEALTHY = "healthy"
 31 |     DEGRADED = "degraded"
 32 |     UNHEALTHY = "unhealthy"
 33 |     CRITICAL = "critical"
 34 | 
 35 | 
 36 | @dataclass
 37 | class HealthMetric:
 38 |     """Represents a health metric."""
 39 |     name: str
 40 |     value: Any
 41 |     status: HealthStatus
 42 |     message: str = ""
 43 |     timestamp: datetime = field(default_factory=datetime.now)
 44 |     threshold_warning: Optional[float] = None
 45 |     threshold_critical: Optional[float] = None
 46 | 
 47 | 
 48 | @dataclass
 49 | class HealthAlert:
 50 |     """Represents a health alert."""
 51 |     alert_id: str
 52 |     component: str
 53 |     severity: HealthStatus
 54 |     message: str
 55 |     timestamp: datetime = field(default_factory=datetime.now)
 56 |     resolved: bool = False
 57 |     resolution_timestamp: Optional[datetime] = None
 58 | 
 59 | 
 60 | class ConsolidationHealthMonitor:
 61 |     """Monitors health of the consolidation system."""
 62 |     
 63 |     def __init__(self, config=None):
 64 |         self.config = config
 65 |         self.logger = logging.getLogger(__name__)
 66 |         
 67 |         # Health metrics storage
 68 |         self.metrics: Dict[str, HealthMetric] = {}
 69 |         self.alerts: List[HealthAlert] = []
 70 |         self.error_history: List[Dict[str, Any]] = []
 71 |         
 72 |         # Health thresholds
 73 |         self.thresholds = {
 74 |             'consolidation_success_rate': {'warning': 0.8, 'critical': 0.6},
 75 |             'average_duration_seconds': {'warning': 300, 'critical': 600},
 76 |             'memory_processing_rate': {'warning': 0.1, 'critical': 0.05},
 77 |             'error_rate': {'warning': 0.1, 'critical': 0.2},
 78 |             'storage_response_time': {'warning': 5.0, 'critical': 10.0}
 79 |         }
 80 |         
 81 |         # Performance tracking
 82 |         self.performance_history: List[Dict[str, Any]] = []
 83 |         self.max_history_entries = 1000
 84 |         
 85 |         # Component health cache
 86 |         self.component_health_cache: Dict[str, Dict[str, Any]] = {}
 87 |         self.cache_ttl = timedelta(minutes=5)
 88 |         self.last_health_check = {}
 89 |     
 90 |     async def check_overall_health(self) -> Dict[str, Any]:
 91 |         """Check overall consolidation system health."""
 92 |         try:
 93 |             health = {
 94 |                 'status': HealthStatus.HEALTHY.value,
 95 |                 'timestamp': datetime.now().isoformat(),
 96 |                 'components': {},
 97 |                 'metrics': {},
 98 |                 'alerts': [],
 99 |                 'recommendations': []
100 |             }
101 |             
102 |             # Check individual components
103 |             components = [
104 |                 'decay_calculator',
105 |                 'association_engine',
106 |                 'clustering_engine', 
107 |                 'compression_engine',
108 |                 'forgetting_engine',
109 |                 'scheduler',
110 |                 'storage_backend'
111 |             ]
112 |             
113 |             overall_status = HealthStatus.HEALTHY
114 |             
115 |             for component in components:
116 |                 component_health = await self._check_component_health(component)
117 |                 health['components'][component] = component_health
118 |                 
119 |                 # Update overall status based on component health
120 |                 component_status = HealthStatus(component_health.get('status', 'healthy'))
121 |                 if component_status == HealthStatus.CRITICAL:
122 |                     overall_status = HealthStatus.CRITICAL
123 |                 elif component_status == HealthStatus.UNHEALTHY and overall_status != HealthStatus.CRITICAL:
124 |                     overall_status = HealthStatus.UNHEALTHY
125 |                 elif component_status == HealthStatus.DEGRADED and overall_status == HealthStatus.HEALTHY:
126 |                     overall_status = HealthStatus.DEGRADED
127 |             
128 |             # Add current metrics
129 |             health['metrics'] = {name: {
130 |                 'value': metric.value,
131 |                 'status': metric.status.value,
132 |                 'message': metric.message,
133 |                 'timestamp': metric.timestamp.isoformat()
134 |             } for name, metric in self.metrics.items()}
135 |             
136 |             # Add active alerts
137 |             active_alerts = [alert for alert in self.alerts if not alert.resolved]
138 |             health['alerts'] = [{
139 |                 'alert_id': alert.alert_id,
140 |                 'component': alert.component,
141 |                 'severity': alert.severity.value,
142 |                 'message': alert.message,
143 |                 'timestamp': alert.timestamp.isoformat()
144 |             } for alert in active_alerts[-10:]]  # Last 10 alerts
145 |             
146 |             # Add recommendations
147 |             health['recommendations'] = await self._generate_health_recommendations()
148 |             
149 |             health['status'] = overall_status.value
150 |             
151 |             return health
152 |             
153 |         except Exception as e:
154 |             self.logger.error(f"Error checking overall health: {e}")
155 |             return {
156 |                 'status': HealthStatus.CRITICAL.value,
157 |                 'timestamp': datetime.now().isoformat(),
158 |                 'error': str(e),
159 |                 'components': {},
160 |                 'metrics': {},
161 |                 'alerts': [],
162 |                 'recommendations': []
163 |             }
164 |     
165 |     async def _check_component_health(self, component: str) -> Dict[str, Any]:
166 |         """Check health of a specific component."""
167 |         # Check cache first
168 |         now = datetime.now()
169 |         if (component in self.component_health_cache and 
170 |             component in self.last_health_check and
171 |             now - self.last_health_check[component] < self.cache_ttl):
172 |             return self.component_health_cache[component]
173 |         
174 |         try:
175 |             health = {
176 |                 'status': HealthStatus.HEALTHY.value,
177 |                 'timestamp': now.isoformat(),
178 |                 'checks': {},
179 |                 'metrics': {}
180 |             }
181 |             
182 |             if component == 'decay_calculator':
183 |                 health.update(await self._check_decay_calculator_health())
184 |             elif component == 'association_engine':
185 |                 health.update(await self._check_association_engine_health())
186 |             elif component == 'clustering_engine':
187 |                 health.update(await self._check_clustering_engine_health())
188 |             elif component == 'compression_engine':
189 |                 health.update(await self._check_compression_engine_health())
190 |             elif component == 'forgetting_engine':
191 |                 health.update(await self._check_forgetting_engine_health())
192 |             elif component == 'scheduler':
193 |                 health.update(await self._check_scheduler_health())
194 |             elif component == 'storage_backend':
195 |                 health.update(await self._check_storage_backend_health())
196 |             
197 |             # Cache the result
198 |             self.component_health_cache[component] = health
199 |             self.last_health_check[component] = now
200 |             
201 |             return health
202 |             
203 |         except Exception as e:
204 |             self.logger.error(f"Error checking {component} health: {e}")
205 |             return {
206 |                 'status': HealthStatus.UNHEALTHY.value,
207 |                 'timestamp': now.isoformat(),
208 |                 'error': str(e),
209 |                 'checks': {},
210 |                 'metrics': {}
211 |             }
212 |     
213 |     async def _check_decay_calculator_health(self) -> Dict[str, Any]:
214 |         """Check decay calculator health."""
215 |         return {
216 |             'checks': {
217 |                 'configuration': 'valid',
218 |                 'retention_periods': 'configured',
219 |                 'decay_algorithm': 'functional'
220 |             },
221 |             'metrics': {
222 |                 'recent_calculations': len([h for h in self.performance_history 
223 |                                           if h.get('component') == 'decay_calculator'
224 |                                           and h.get('timestamp', datetime.min) > datetime.now() - timedelta(hours=1)])
225 |             }
226 |         }
227 |     
228 |     async def _check_association_engine_health(self) -> Dict[str, Any]:
229 |         """Check association engine health."""
230 |         recent_associations = len([h for h in self.performance_history 
231 |                                  if h.get('component') == 'association_engine'
232 |                                  and h.get('timestamp', datetime.min) > datetime.now() - timedelta(hours=1)])
233 |         
234 |         return {
235 |             'checks': {
236 |                 'similarity_thresholds': 'configured',
237 |                 'concept_extraction': 'functional',
238 |                 'association_discovery': 'active'
239 |             },
240 |             'metrics': {
241 |                 'recent_associations_discovered': recent_associations,
242 |                 'similarity_range': '0.3-0.7'
243 |             }
244 |         }
245 |     
246 |     async def _check_clustering_engine_health(self) -> Dict[str, Any]:
247 |         """Check clustering engine health."""
248 |         return {
249 |             'checks': {
250 |                 'clustering_algorithm': 'available',
251 |                 'minimum_cluster_size': 'configured',
252 |                 'embedding_processing': 'functional'
253 |             },
254 |             'metrics': {
255 |                 'recent_clusters_created': len([h for h in self.performance_history 
256 |                                               if h.get('component') == 'clustering_engine'
257 |                                               and h.get('timestamp', datetime.min) > datetime.now() - timedelta(hours=1)])
258 |             }
259 |         }
260 |     
261 |     async def _check_compression_engine_health(self) -> Dict[str, Any]:
262 |         """Check compression engine health."""
263 |         return {
264 |             'checks': {
265 |                 'summary_generation': 'functional',
266 |                 'concept_extraction': 'active',
267 |                 'compression_ratio': 'optimal'
268 |             },
269 |             'metrics': {
270 |                 'recent_compressions': len([h for h in self.performance_history 
271 |                                           if h.get('component') == 'compression_engine'
272 |                                           and h.get('timestamp', datetime.min) > datetime.now() - timedelta(hours=1)])
273 |             }
274 |         }
275 |     
276 |     async def _check_forgetting_engine_health(self) -> Dict[str, Any]:
277 |         """Check forgetting engine health."""
278 |         return {
279 |             'checks': {
280 |                 'archive_storage': 'accessible',
281 |                 'relevance_thresholds': 'configured',
282 |                 'controlled_forgetting': 'safe'
283 |             },
284 |             'metrics': {
285 |                 'recent_archival_operations': len([h for h in self.performance_history 
286 |                                                  if h.get('component') == 'forgetting_engine'
287 |                                                  and h.get('timestamp', datetime.min) > datetime.now() - timedelta(hours=1)])
288 |             }
289 |         }
290 |     
291 |     async def _check_scheduler_health(self) -> Dict[str, Any]:
292 |         """Check scheduler health."""
293 |         return {
294 |             'checks': {
295 |                 'scheduler_running': 'active',
296 |                 'job_scheduling': 'functional',
297 |                 'cron_expressions': 'valid'
298 |             },
299 |             'metrics': {
300 |                 'scheduled_jobs': 'configured',
301 |                 'last_execution': 'recent'
302 |             }
303 |         }
304 |     
305 |     async def _check_storage_backend_health(self) -> Dict[str, Any]:
306 |         """Check storage backend health."""
307 |         return {
308 |             'checks': {
309 |                 'storage_connection': 'connected',
310 |                 'read_operations': 'functional', 
311 |                 'write_operations': 'functional',
312 |                 'backup_integrity': 'verified'
313 |             },
314 |             'metrics': {
315 |                 'response_time_ms': 'normal',
316 |                 'storage_utilization': 'optimal'
317 |             }
318 |         }
319 |     
320 |     async def _generate_health_recommendations(self) -> List[str]:
321 |         """Generate health recommendations based on current system state."""
322 |         recommendations = []
323 |         
324 |         # Check error rates
325 |         recent_errors = len([e for e in self.error_history 
326 |                            if e.get('timestamp', datetime.min) > datetime.now() - timedelta(hours=24)])
327 |         
328 |         if recent_errors > 10:
329 |             recommendations.append("High error rate detected. Consider reviewing consolidation configuration.")
330 |         
331 |         # Check performance metrics
332 |         if 'average_duration_seconds' in self.metrics:
333 |             duration = self.metrics['average_duration_seconds'].value
334 |             if duration > 300:
335 |                 recommendations.append("Consolidation operations are taking longer than expected. Consider optimizing memory processing.")
336 |         
337 |         # Check active alerts
338 |         critical_alerts = [a for a in self.alerts if not a.resolved and a.severity == HealthStatus.CRITICAL]
339 |         if critical_alerts:
340 |             recommendations.append("Critical alerts detected. Immediate attention required.")
341 |         
342 |         # Check storage health
343 |         if 'storage_response_time' in self.metrics:
344 |             response_time = self.metrics['storage_response_time'].value
345 |             if response_time > 5.0:
346 |                 recommendations.append("Storage backend response time is elevated. Check database performance.")
347 |         
348 |         return recommendations
349 |     
350 |     def record_consolidation_performance(self, time_horizon: str, duration: float, 
351 |                                        memories_processed: int, success: bool, 
352 |                                        errors: List[str] = None):
353 |         """Record performance metrics from a consolidation run."""
354 |         entry = {
355 |             'timestamp': datetime.now(),
356 |             'time_horizon': time_horizon,
357 |             'duration_seconds': duration,
358 |             'memories_processed': memories_processed,
359 |             'success': success,
360 |             'errors': errors or [],
361 |             'memories_per_second': memories_processed / duration if duration > 0 else 0
362 |         }
363 |         
364 |         self.performance_history.append(entry)
365 |         
366 |         # Trim history to max size
367 |         if len(self.performance_history) > self.max_history_entries:
368 |             self.performance_history = self.performance_history[-self.max_history_entries:]
369 |         
370 |         # Update metrics
371 |         self._update_performance_metrics()
372 |         
373 |         # Check for alerts
374 |         if not success or (errors and len(errors) > 0):
375 |             self._create_alert(
376 |                 component='consolidator',
377 |                 severity=HealthStatus.DEGRADED if success else HealthStatus.UNHEALTHY,
378 |                 message=f"Consolidation issues detected: {', '.join(errors[:3])}"
379 |             )
380 |     
381 |     def record_error(self, component: str, error: Exception, context: Dict[str, Any] = None):
382 |         """Record an error in the consolidation system.""" 
383 |         error_entry = {
384 |             'timestamp': datetime.now(),
385 |             'component': component,
386 |             'error_type': type(error).__name__,
387 |             'error_message': str(error),
388 |             'traceback': traceback.format_exc(),
389 |             'context': context or {}
390 |         }
391 |         
392 |         self.error_history.append(error_entry)
393 |         
394 |         # Trim error history
395 |         if len(self.error_history) > self.max_history_entries:
396 |             self.error_history = self.error_history[-self.max_history_entries:]
397 |         
398 |         # Create alert for serious errors
399 |         if isinstance(error, ConsolidationError):
400 |             severity = HealthStatus.UNHEALTHY
401 |         else:
402 |             severity = HealthStatus.DEGRADED
403 |         
404 |         self._create_alert(
405 |             component=component,
406 |             severity=severity,
407 |             message=f"{type(error).__name__}: {str(error)}"
408 |         )
409 |         
410 |         self.logger.error(f"Error in {component}: {error}", exc_info=True)
411 |     
412 |     def _update_performance_metrics(self):
413 |         """Update performance metrics based on recent data."""
414 |         now = datetime.now()
415 |         recent_cutoff = now - timedelta(hours=24)
416 |         
417 |         # Get recent performance data
418 |         recent_runs = [r for r in self.performance_history if r['timestamp'] > recent_cutoff]
419 |         
420 |         if not recent_runs:
421 |             return
422 |         
423 |         # Calculate success rate
424 |         successful_runs = [r for r in recent_runs if r['success']]
425 |         success_rate = len(successful_runs) / len(recent_runs)
426 |         
427 |         self.metrics['consolidation_success_rate'] = HealthMetric(
428 |             name='consolidation_success_rate',
429 |             value=success_rate,
430 |             status=self._get_status_for_metric('consolidation_success_rate', success_rate),
431 |             message=f"{len(successful_runs)}/{len(recent_runs)} consolidations successful"
432 |         )
433 |         
434 |         # Calculate average duration
435 |         avg_duration = sum(r['duration_seconds'] for r in recent_runs) / len(recent_runs)
436 |         
437 |         self.metrics['average_duration_seconds'] = HealthMetric(
438 |             name='average_duration_seconds',
439 |             value=avg_duration,
440 |             status=self._get_status_for_metric('average_duration_seconds', avg_duration),
441 |             message=f"Average consolidation duration: {avg_duration:.1f}s"
442 |         )
443 |         
444 |         # Calculate processing rate
445 |         total_memories = sum(r['memories_processed'] for r in recent_runs)
446 |         total_duration = sum(r['duration_seconds'] for r in recent_runs)
447 |         processing_rate = total_memories / total_duration if total_duration > 0 else 0
448 |         
449 |         self.metrics['memory_processing_rate'] = HealthMetric(
450 |             name='memory_processing_rate',
451 |             value=processing_rate,
452 |             status=self._get_status_for_metric('memory_processing_rate', processing_rate),
453 |             message=f"Processing rate: {processing_rate:.2f} memories/second"
454 |         )
455 |         
456 |         # Calculate error rate
457 |         recent_error_cutoff = now - timedelta(hours=1)
458 |         recent_errors = [e for e in self.error_history if e['timestamp'] > recent_error_cutoff]
459 |         error_rate = len(recent_errors) / max(len(recent_runs), 1)
460 |         
461 |         self.metrics['error_rate'] = HealthMetric(
462 |             name='error_rate',
463 |             value=error_rate,
464 |             status=self._get_status_for_metric('error_rate', error_rate),
465 |             message=f"Error rate: {error_rate:.2f} errors per consolidation"
466 |         )
467 |     
468 |     def _get_status_for_metric(self, metric_name: str, value: float) -> HealthStatus:
469 |         """Determine health status for a metric value."""
470 |         if metric_name not in self.thresholds:
471 |             return HealthStatus.HEALTHY
472 |         
473 |         thresholds = self.thresholds[metric_name]
474 |         
475 |         # For error rate and duration, higher is worse
476 |         if metric_name in ['error_rate', 'average_duration_seconds', 'storage_response_time']:
477 |             if value >= thresholds['critical']:
478 |                 return HealthStatus.CRITICAL
479 |             elif value >= thresholds['warning']:
480 |                 return HealthStatus.DEGRADED
481 |             else:
482 |                 return HealthStatus.HEALTHY
483 |         
484 |         # For success rate and processing rate, lower is worse
485 |         else:
486 |             if value <= thresholds['critical']:
487 |                 return HealthStatus.CRITICAL
488 |             elif value <= thresholds['warning']:
489 |                 return HealthStatus.DEGRADED
490 |             else:
491 |                 return HealthStatus.HEALTHY
492 |     
493 |     def _create_alert(self, component: str, severity: HealthStatus, message: str):
494 |         """Create a new health alert."""
495 |         alert_id = f"{component}_{severity.value}_{datetime.now().strftime('%Y%m%d_%H%M%S')}"
496 |         
497 |         alert = HealthAlert(
498 |             alert_id=alert_id,
499 |             component=component,
500 |             severity=severity,
501 |             message=message
502 |         )
503 |         
504 |         self.alerts.append(alert)
505 |         
506 |         # Trim alerts to reasonable size
507 |         if len(self.alerts) > 100:
508 |             self.alerts = self.alerts[-100:]
509 |         
510 |         self.logger.warning(f"Health alert [{severity.value}] for {component}: {message}")
511 |     
512 |     def resolve_alert(self, alert_id: str):
513 |         """Mark an alert as resolved."""
514 |         for alert in self.alerts:
515 |             if alert.alert_id == alert_id and not alert.resolved:
516 |                 alert.resolved = True
517 |                 alert.resolution_timestamp = datetime.now()
518 |                 self.logger.info(f"Alert {alert_id} resolved")
519 |                 break
520 |     
521 |     async def get_health_summary(self) -> Dict[str, Any]:
522 |         """Get a summary of consolidation system health."""
523 |         health = await self.check_overall_health()
524 |         
525 |         return {
526 |             'overall_status': health['status'],
527 |             'timestamp': health['timestamp'],
528 |             'component_count': len(health['components']),
529 |             'healthy_components': len([c for c in health['components'].values() 
530 |                                      if c.get('status') == 'healthy']),
531 |             'active_alerts': len([a for a in health['alerts'] if not a.get('resolved', False)]),
532 |             'critical_alerts': len([a for a in health['alerts'] 
533 |                                   if a.get('severity') == 'critical' and not a.get('resolved', False)]),
534 |             'recommendations_count': len(health.get('recommendations', [])),
535 |             'recent_errors': len([e for e in self.error_history 
536 |                                 if e.get('timestamp', datetime.min) > datetime.now() - timedelta(hours=24)])
537 |         }
```

--------------------------------------------------------------------------------
/tests/consolidation/test_forgetting.py:
--------------------------------------------------------------------------------

```python
  1 | """Unit tests for the controlled forgetting engine."""
  2 | 
  3 | import pytest
  4 | import os
  5 | import json
  6 | import tempfile
  7 | from datetime import datetime, timedelta
  8 | from pathlib import Path
  9 | 
 10 | from mcp_memory_service.consolidation.forgetting import (
 11 |     ControlledForgettingEngine, 
 12 |     ForgettingCandidate,
 13 |     ForgettingResult
 14 | )
 15 | from mcp_memory_service.consolidation.decay import RelevanceScore
 16 | from mcp_memory_service.models.memory import Memory
 17 | 
 18 | 
 19 | @pytest.mark.unit
 20 | class TestControlledForgettingEngine:
 21 |     """Test the controlled forgetting system."""
 22 |     
 23 |     @pytest.fixture
 24 |     def forgetting_engine(self, consolidation_config):
 25 |         return ControlledForgettingEngine(consolidation_config)
 26 |     
 27 |     @pytest.fixture
 28 |     def sample_relevance_scores(self, sample_memories):
 29 |         """Create sample relevance scores for memories."""
 30 |         scores = []
 31 |         for i, memory in enumerate(sample_memories):
 32 |             # Create varied relevance scores
 33 |             if "critical" in memory.tags:
 34 |                 total_score = 1.5
 35 |             elif "temporary" in memory.tags:
 36 |                 total_score = 0.05  # Very low relevance
 37 |             elif "test" in memory.content:
 38 |                 total_score = 0.02  # Low quality content
 39 |             else:
 40 |                 total_score = 0.8
 41 |             
 42 |             score = RelevanceScore(
 43 |                 memory_hash=memory.content_hash,
 44 |                 total_score=total_score,
 45 |                 base_importance=1.0,
 46 |                 decay_factor=0.8,
 47 |                 connection_boost=1.0,
 48 |                 access_boost=1.0,
 49 |                 metadata={"test_score": True}
 50 |             )
 51 |             scores.append(score)
 52 |         
 53 |         return scores
 54 |     
 55 |     @pytest.mark.asyncio
 56 |     async def test_basic_forgetting_process(self, forgetting_engine, sample_memories, sample_relevance_scores):
 57 |         """Test basic forgetting process functionality."""
 58 |         access_patterns = {
 59 |             sample_memories[0].content_hash: datetime.now() - timedelta(days=100)  # Old access
 60 |         }
 61 |         
 62 |         results = await forgetting_engine.process(
 63 |             sample_memories, 
 64 |             sample_relevance_scores,
 65 |             access_patterns=access_patterns,
 66 |             time_horizon="monthly"
 67 |         )
 68 |         
 69 |         assert isinstance(results, list)
 70 |         assert all(isinstance(result, ForgettingResult) for result in results)
 71 |         
 72 |         # Check that some memories were processed for forgetting
 73 |         actions = [result.action_taken for result in results]
 74 |         valid_actions = {"archived", "compressed", "deleted", "skipped"}
 75 |         assert all(action in valid_actions for action in actions)
 76 |     
 77 |     @pytest.mark.asyncio
 78 |     async def test_identify_forgetting_candidates(self, forgetting_engine, sample_memories, sample_relevance_scores):
 79 |         """Test identification of forgetting candidates."""
 80 |         access_patterns = {}
 81 |         
 82 |         candidates = await forgetting_engine._identify_forgetting_candidates(
 83 |             sample_memories,
 84 |             {score.memory_hash: score for score in sample_relevance_scores},
 85 |             access_patterns,
 86 |             "monthly"
 87 |         )
 88 |         
 89 |         assert isinstance(candidates, list)
 90 |         assert all(isinstance(candidate, ForgettingCandidate) for candidate in candidates)
 91 |         
 92 |         # Check candidate properties
 93 |         for candidate in candidates:
 94 |             assert isinstance(candidate.memory, Memory)
 95 |             assert isinstance(candidate.relevance_score, RelevanceScore)
 96 |             assert isinstance(candidate.forgetting_reasons, list)
 97 |             assert len(candidate.forgetting_reasons) > 0
 98 |             assert candidate.archive_priority in [1, 2, 3]
 99 |             assert isinstance(candidate.can_be_deleted, bool)
100 |     
101 |     @pytest.mark.asyncio
102 |     async def test_protected_memory_exclusion(self, forgetting_engine, sample_memories, sample_relevance_scores):
103 |         """Test that protected memories are excluded from forgetting."""
104 |         # Find critical memory (should be protected)
105 |         critical_memory = next((m for m in sample_memories if "critical" in m.tags), None)
106 |         
107 |         if critical_memory:
108 |             candidates = await forgetting_engine._identify_forgetting_candidates(
109 |                 [critical_memory],
110 |                 {critical_memory.content_hash: sample_relevance_scores[0]},  # Use first score
111 |                 {},
112 |                 "yearly"
113 |             )
114 |             
115 |             # Critical memory should not be a candidate for forgetting
116 |             assert len(candidates) == 0
117 |     
118 |     @pytest.mark.asyncio
119 |     async def test_low_relevance_identification(self, forgetting_engine):
120 |         """Test identification of low relevance memories."""
121 |         now = datetime.now()
122 |         
123 |         low_relevance_memory = Memory(
124 |             content="Low relevance test content",
125 |             content_hash="low_relevance",
126 |             tags=["test"],
127 |             embedding=[0.1] * 320,
128 |             created_at=now.timestamp(),
129 |             updated_at=(now - timedelta(days=100)).timestamp()  # Old access
130 |         )
131 |         
132 |         low_score = RelevanceScore(
133 |             memory_hash="low_relevance",
134 |             total_score=0.05,  # Below threshold
135 |             base_importance=1.0,
136 |             decay_factor=0.1,
137 |             connection_boost=1.0,
138 |             access_boost=1.0,
139 |             metadata={}
140 |         )
141 |         
142 |         candidates = await forgetting_engine._identify_forgetting_candidates(
143 |             [low_relevance_memory],
144 |             {"low_relevance": low_score},
145 |             {},
146 |             "monthly"
147 |         )
148 |         
149 |         assert len(candidates) > 0
150 |         candidate = candidates[0]
151 |         assert "low_relevance" in candidate.forgetting_reasons
152 |     
153 |     @pytest.mark.asyncio
154 |     async def test_old_access_identification(self, forgetting_engine):
155 |         """Test identification of memories with old access patterns."""
156 |         now = datetime.now()
157 |         
158 |         old_access_memory = Memory(
159 |             content="Memory with old access",
160 |             content_hash="old_access",
161 |             tags=["test"],
162 |             embedding=[0.1] * 320,
163 |             created_at=now.timestamp()
164 |         )
165 |         
166 |         score = RelevanceScore(
167 |             memory_hash="old_access",
168 |             total_score=0.5,  # Decent relevance
169 |             base_importance=1.0,
170 |             decay_factor=0.8,
171 |             connection_boost=1.0,
172 |             access_boost=1.0,
173 |             metadata={}
174 |         )
175 |         
176 |         # Very old access
177 |         old_access_patterns = {
178 |             "old_access": now - timedelta(days=120)  # Older than threshold
179 |         }
180 |         
181 |         candidates = await forgetting_engine._identify_forgetting_candidates(
182 |             [old_access_memory],
183 |             {"old_access": score},
184 |             old_access_patterns,
185 |             "monthly"
186 |         )
187 |         
188 |         assert len(candidates) > 0
189 |         candidate = candidates[0]
190 |         assert "old_access" in candidate.forgetting_reasons
191 |     
192 |     @pytest.mark.asyncio
193 |     async def test_temporary_memory_expiration(self, forgetting_engine):
194 |         """Test identification of expired temporary memories."""
195 |         now = datetime.now()
196 |         
197 |         expired_temp_memory = Memory(
198 |             content="Expired temporary memory",
199 |             content_hash="expired_temp",
200 |             tags=["temporary"],
201 |             memory_type="temporary",
202 |             embedding=[0.1] * 320,
203 |             created_at=(now - timedelta(days=10)).timestamp()  # Older than 7 days
204 |         )
205 |         
206 |         score = RelevanceScore(
207 |             memory_hash="expired_temp",
208 |             total_score=0.8,  # Good relevance, but temporary
209 |             base_importance=1.0,
210 |             decay_factor=0.8,
211 |             connection_boost=1.0,
212 |             access_boost=1.0,
213 |             metadata={}
214 |         )
215 |         
216 |         candidates = await forgetting_engine._identify_forgetting_candidates(
217 |             [expired_temp_memory],
218 |             {"expired_temp": score},
219 |             {},
220 |             "monthly"
221 |         )
222 |         
223 |         assert len(candidates) > 0
224 |         candidate = candidates[0]
225 |         assert "expired_temporary" in candidate.forgetting_reasons
226 |         assert candidate.can_be_deleted is True
227 |     
228 |     @pytest.mark.asyncio
229 |     async def test_low_quality_content_detection(self, forgetting_engine):
230 |         """Test detection of low quality content."""
231 |         # Very short content
232 |         short_memory = Memory(
233 |             content="test",
234 |             content_hash="short",
235 |             tags=["test"],
236 |             embedding=[0.1] * 320,
237 |             created_at=datetime.now().timestamp()
238 |         )
239 |         
240 |         # Repetitive content
241 |         repetitive_memory = Memory(
242 |             content="test test test test test test",
243 |             content_hash="repetitive",
244 |             tags=["test"],
245 |             embedding=[0.1] * 320,
246 |             created_at=datetime.now().timestamp()
247 |         )
248 |         
249 |         # Mostly non-alphabetic
250 |         non_alpha_memory = Memory(
251 |             content="!@#$%^&*()_+{}|<>?",
252 |             content_hash="non_alpha",
253 |             tags=["test"],
254 |             embedding=[0.1] * 320,
255 |             created_at=datetime.now().timestamp()
256 |         )
257 |         
258 |         test_memories = [short_memory, repetitive_memory, non_alpha_memory]
259 |         
260 |         for memory in test_memories:
261 |             is_low_quality = forgetting_engine._is_low_quality_content(memory)
262 |             assert is_low_quality is True
263 |     
264 |     @pytest.mark.asyncio
265 |     async def test_duplicate_detection(self, forgetting_engine, sample_memories):
266 |         """Test detection of potential duplicate content."""
267 |         # Create a memory that's very similar to an existing one
268 |         existing_memory = sample_memories[0]
269 |         duplicate_memory = Memory(
270 |             content=existing_memory.content + " duplicate",  # Very similar
271 |             content_hash="duplicate_test",
272 |             tags=existing_memory.tags,
273 |             embedding=existing_memory.embedding,
274 |             created_at=datetime.now().timestamp()
275 |         )
276 |         
277 |         test_memories = sample_memories + [duplicate_memory]
278 |         
279 |         is_duplicate = forgetting_engine._appears_to_be_duplicate(duplicate_memory, test_memories)
280 |         # This might not always detect as duplicate due to the simple algorithm
281 |         # Just ensure the method runs without error
282 |         assert isinstance(is_duplicate, bool)
283 |     
284 |     @pytest.mark.asyncio
285 |     async def test_archive_memory(self, forgetting_engine):
286 |         """Test archiving a memory to filesystem."""
287 |         memory = Memory(
288 |             content="Memory to archive",
289 |             content_hash="archive_test",
290 |             tags=["test", "archive"],
291 |             embedding=[0.1] * 320,
292 |             created_at=datetime.now().timestamp()
293 |         )
294 |         
295 |         score = RelevanceScore(
296 |             memory_hash="archive_test",
297 |             total_score=0.3,
298 |             base_importance=1.0,
299 |             decay_factor=0.5,
300 |             connection_boost=1.0,
301 |             access_boost=1.0,
302 |             metadata={}
303 |         )
304 |         
305 |         candidate = ForgettingCandidate(
306 |             memory=memory,
307 |             relevance_score=score,
308 |             forgetting_reasons=["test_archival"],
309 |             archive_priority=2,
310 |             can_be_deleted=False
311 |         )
312 |         
313 |         result = await forgetting_engine._archive_memory(candidate)
314 |         
315 |         assert isinstance(result, ForgettingResult)
316 |         assert result.action_taken == "archived"
317 |         assert result.archive_path is not None
318 |         assert os.path.exists(result.archive_path)
319 |         
320 |         # Check archive file content
321 |         with open(result.archive_path, 'r') as f:
322 |             archive_data = json.load(f)
323 |         
324 |         assert "memory" in archive_data
325 |         assert "relevance_score" in archive_data
326 |         assert "forgetting_metadata" in archive_data
327 |         assert archive_data["memory"]["content_hash"] == "archive_test"
328 |     
329 |     @pytest.mark.asyncio
330 |     async def test_compress_memory(self, forgetting_engine):
331 |         """Test compressing a memory."""
332 |         memory = Memory(
333 |             content="This is a longer memory content that should be compressed to preserve key information while reducing size",
334 |             content_hash="compress_test",
335 |             tags=["test", "compression"],
336 |             embedding=[0.1] * 320,
337 |             created_at=datetime.now().timestamp()
338 |         )
339 |         
340 |         score = RelevanceScore(
341 |             memory_hash="compress_test",
342 |             total_score=0.4,
343 |             base_importance=1.0,
344 |             decay_factor=0.6,
345 |             connection_boost=1.0,
346 |             access_boost=1.0,
347 |             metadata={}
348 |         )
349 |         
350 |         candidate = ForgettingCandidate(
351 |             memory=memory,
352 |             relevance_score=score,
353 |             forgetting_reasons=["test_compression"],
354 |             archive_priority=3,
355 |             can_be_deleted=False
356 |         )
357 |         
358 |         result = await forgetting_engine._compress_memory(candidate)
359 |         
360 |         assert isinstance(result, ForgettingResult)
361 |         assert result.action_taken == "compressed"
362 |         assert result.compressed_version is not None
363 |         assert result.archive_path is not None
364 |         
365 |         # Check compressed memory
366 |         compressed = result.compressed_version
367 |         assert compressed.memory_type == "compressed"
368 |         assert "compressed" in compressed.tags
369 |         assert len(compressed.content) <= len(memory.content)
370 |         assert "original_hash" in compressed.metadata
371 |         assert "compression_ratio" in compressed.metadata
372 |     
373 |     @pytest.mark.asyncio
374 |     async def test_delete_memory(self, forgetting_engine):
375 |         """Test deleting a memory with backup."""
376 |         memory = Memory(
377 |             content="Memory to delete",
378 |             content_hash="delete_test",
379 |             tags=["test", "delete"],
380 |             embedding=[0.1] * 320,
381 |             created_at=datetime.now().timestamp()
382 |         )
383 |         
384 |         score = RelevanceScore(
385 |             memory_hash="delete_test",
386 |             total_score=0.01,
387 |             base_importance=1.0,
388 |             decay_factor=0.1,
389 |             connection_boost=1.0,
390 |             access_boost=1.0,
391 |             metadata={}
392 |         )
393 |         
394 |         candidate = ForgettingCandidate(
395 |             memory=memory,
396 |             relevance_score=score,
397 |             forgetting_reasons=["potential_duplicate"],
398 |             archive_priority=1,
399 |             can_be_deleted=True
400 |         )
401 |         
402 |         result = await forgetting_engine._delete_memory(candidate)
403 |         
404 |         assert isinstance(result, ForgettingResult)
405 |         assert result.action_taken == "deleted"
406 |         assert result.archive_path is not None  # Backup should exist
407 |         assert os.path.exists(result.archive_path)
408 |         
409 |         # Check backup file
410 |         with open(result.archive_path, 'r') as f:
411 |             backup_data = json.load(f)
412 |         
413 |         assert "memory" in backup_data
414 |         assert "deletion_metadata" in backup_data
415 |         assert backup_data["memory"]["content_hash"] == "delete_test"
416 |     
417 |     @pytest.mark.asyncio
418 |     async def test_memory_recovery(self, forgetting_engine):
419 |         """Test recovery of forgotten memories."""
420 |         # First archive a memory
421 |         memory = Memory(
422 |             content="Memory for recovery test",
423 |             content_hash="recovery_test",
424 |             tags=["test", "recovery"],
425 |             embedding=[0.1] * 320,
426 |             created_at=datetime.now().timestamp()
427 |         )
428 |         
429 |         score = RelevanceScore(
430 |             memory_hash="recovery_test",
431 |             total_score=0.2,
432 |             base_importance=1.0,
433 |             decay_factor=0.4,
434 |             connection_boost=1.0,
435 |             access_boost=1.0,
436 |             metadata={}
437 |         )
438 |         
439 |         candidate = ForgettingCandidate(
440 |             memory=memory,
441 |             relevance_score=score,
442 |             forgetting_reasons=["test_recovery"],
443 |             archive_priority=2,
444 |             can_be_deleted=False
445 |         )
446 |         
447 |         # Archive the memory
448 |         await forgetting_engine._archive_memory(candidate)
449 |         
450 |         # Now try to recover it
451 |         recovered_memory = await forgetting_engine.recover_memory("recovery_test")
452 |         
453 |         assert recovered_memory is not None
454 |         assert isinstance(recovered_memory, Memory)
455 |         assert recovered_memory.content_hash == "recovery_test"
456 |         assert recovered_memory.content == memory.content
457 |     
458 |     @pytest.mark.asyncio
459 |     async def test_forgetting_statistics(self, forgetting_engine, sample_memories, sample_relevance_scores):
460 |         """Test getting forgetting statistics."""
461 |         # Process some memories to generate statistics
462 |         access_patterns = {
463 |             sample_memories[0].content_hash: datetime.now() - timedelta(days=100)
464 |         }
465 |         
466 |         await forgetting_engine.process(
467 |             sample_memories[:3],  # Use subset for faster test
468 |             sample_relevance_scores[:3],
469 |             access_patterns=access_patterns,
470 |             time_horizon="monthly"
471 |         )
472 |         
473 |         stats = await forgetting_engine.get_forgetting_statistics()
474 |         
475 |         assert isinstance(stats, dict)
476 |         assert "total_archived" in stats
477 |         assert "total_compressed" in stats
478 |         assert "total_deleted" in stats
479 |         assert "archive_size_bytes" in stats
480 |         
481 |         # Values should be non-negative
482 |         assert stats["total_archived"] >= 0
483 |         assert stats["total_compressed"] >= 0
484 |         assert stats["total_deleted"] >= 0
485 |         assert stats["archive_size_bytes"] >= 0
486 |     
487 |     @pytest.mark.asyncio
488 |     async def test_create_compressed_content(self, forgetting_engine):
489 |         """Test creation of compressed content."""
490 |         original_content = """
491 |         This is a longer piece of content that contains multiple sentences. 
492 |         It has important information in the first sentence. 
493 |         The middle part contains additional details and context.
494 |         The final sentence wraps up the content nicely.
495 |         """
496 |         
497 |         compressed = forgetting_engine._create_compressed_content(original_content)
498 |         
499 |         assert isinstance(compressed, str)
500 |         assert len(compressed) <= len(original_content)
501 |         assert len(compressed) > 0
502 |         
503 |         # Should contain compression indicator if significantly compressed
504 |         if len(compressed) < len(original_content) * 0.8:
505 |             assert "[Compressed]" in compressed
506 |     
507 |     @pytest.mark.asyncio
508 |     async def test_extract_important_terms(self, forgetting_engine):
509 |         """Test extraction of important terms from text."""
510 |         text = """
511 |         The CamelCaseVariable is used with the API_ENDPOINT.
512 |         Visit https://example.com for documentation.
513 |         Contact [email protected] for help.
514 |         The temperature is 25.5 degrees.
515 |         See "important documentation" for details.
516 |         Use snake_case_variables appropriately.
517 |         """
518 |         
519 |         terms = forgetting_engine._extract_important_terms(text)
520 |         
521 |         assert isinstance(terms, list)
522 |         assert len(terms) <= 10  # Should be limited
523 |         
524 |         # Should extract various types of important terms
525 |         terms_lower = [term.lower() for term in terms]
526 |         term_str = " ".join(terms_lower)
527 |         
528 |         # Should find some patterns (exact matches may vary)
529 |         assert len(terms) > 0  # At least some terms should be found
530 |     
531 |     @pytest.mark.asyncio
532 |     async def test_archive_directories_creation(self, temp_archive_path):
533 |         """Test that archive directories are created properly."""
534 |         config = type('Config', (), {
535 |             'relevance_threshold': 0.1,
536 |             'access_threshold_days': 90,
537 |             'archive_location': temp_archive_path
538 |         })()
539 |         
540 |         engine = ControlledForgettingEngine(config)
541 |         
542 |         # Check that directories were created
543 |         assert engine.archive_path.exists()
544 |         assert engine.daily_archive.exists()
545 |         assert engine.compressed_archive.exists()
546 |         assert engine.metadata_archive.exists()
547 |     
548 |     @pytest.mark.asyncio
549 |     async def test_empty_input_handling(self, forgetting_engine):
550 |         """Test handling of empty inputs."""
551 |         results = await forgetting_engine.process([], [])
552 |         assert results == []
553 |     
554 |     @pytest.mark.asyncio
555 |     async def test_time_horizon_filtering(self, forgetting_engine, sample_memories, sample_relevance_scores):
556 |         """Test that time horizon affects forgetting behavior."""
557 |         access_patterns = {
558 |             sample_memories[0].content_hash: datetime.now() - timedelta(days=100)
559 |         }
560 |         
561 |         # Test with different time horizons
562 |         daily_results = await forgetting_engine.process(
563 |             sample_memories[:2],
564 |             sample_relevance_scores[:2],
565 |             access_patterns=access_patterns,
566 |             time_horizon="daily"
567 |         )
568 |         
569 |         yearly_results = await forgetting_engine.process(
570 |             sample_memories[:2],
571 |             sample_relevance_scores[:2],
572 |             access_patterns=access_patterns,
573 |             time_horizon="yearly"
574 |         )
575 |         
576 |         # Different time horizons may produce different results
577 |         # At minimum, both should handle the input without errors
578 |         assert isinstance(daily_results, list)
579 |         assert isinstance(yearly_results, list)
580 |     
581 |     @pytest.mark.asyncio
582 |     async def test_metadata_entry_creation(self, forgetting_engine):
583 |         """Test creation of metadata log entries."""
584 |         memory = Memory(
585 |             content="Test memory for metadata",
586 |             content_hash="metadata_test",
587 |             tags=["test"],
588 |             embedding=[0.1] * 320,
589 |             created_at=datetime.now().timestamp()
590 |         )
591 |         
592 |         archive_path = forgetting_engine.metadata_archive / "test_archive.json"
593 |         
594 |         await forgetting_engine._create_metadata_entry(memory, archive_path, "archived")
595 |         
596 |         # Check that log file was created
597 |         log_file = forgetting_engine.metadata_archive / "forgetting_log.jsonl"
598 |         assert log_file.exists()
599 |         
600 |         # Check log content
601 |         with open(log_file, 'r') as f:
602 |             log_content = f.read().strip()
603 |         
604 |         assert len(log_content) > 0
605 |         
606 |         # Parse the JSON line
607 |         log_entry = json.loads(log_content.split('\n')[-1])  # Get last line
608 |         assert log_entry["memory_hash"] == "metadata_test"
609 |         assert log_entry["action"] == "archived"
```

--------------------------------------------------------------------------------
/claude-hooks/tests/phase2-integration-test.js:
--------------------------------------------------------------------------------

```javascript
  1 | #!/usr/bin/env node
  2 | 
  3 | /**
  4 |  * Phase 2 Integration Tests
  5 |  * Comprehensive testing for intelligent context updates and conversation awareness
  6 |  */
  7 | 
  8 | const path = require('path');
  9 | 
 10 | // Import Phase 2 components
 11 | const { analyzeConversation, detectTopicChanges } = require('../utilities/conversation-analyzer');
 12 | const { scoreMemoryRelevance } = require('../utilities/memory-scorer');
 13 | const { SessionTracker } = require('../utilities/session-tracker');
 14 | const { DynamicContextUpdater } = require('../utilities/dynamic-context-updater');
 15 | 
 16 | // Test utilities
 17 | function createMockMemory(content, tags = [], createdDaysAgo = 0) {
 18 |     const created = new Date();
 19 |     created.setDate(created.getDate() - createdDaysAgo);
 20 |     
 21 |     return {
 22 |         content: content,
 23 |         content_hash: `hash-${Math.random().toString(36).substr(2, 9)}`,
 24 |         tags: tags,
 25 |         created_at: created.toISOString(),
 26 |         memory_type: 'note'
 27 |     };
 28 | }
 29 | 
 30 | function createMockProjectContext() {
 31 |     return {
 32 |         name: 'mcp-memory-service',
 33 |         type: 'Multi-language Project',
 34 |         languages: ['javascript', 'python'],
 35 |         frameworks: ['node.js', 'fastapi'],
 36 |         tools: ['git', 'npm', 'pip'],
 37 |         confidence: 0.95
 38 |     };
 39 | }
 40 | 
 41 | // Test suite
 42 | class Phase2TestSuite {
 43 |     constructor() {
 44 |         this.testResults = [];
 45 |         this.totalTests = 0;
 46 |         this.passedTests = 0;
 47 |     }
 48 | 
 49 |     async runTest(testName, testFunction) {
 50 |         console.log(`\n🧪 Testing: ${testName}`);
 51 |         this.totalTests++;
 52 | 
 53 |         try {
 54 |             const result = await testFunction();
 55 |             if (result === true || result === undefined) {
 56 |                 console.log(`✅ PASS: ${testName}`);
 57 |                 this.passedTests++;
 58 |                 this.testResults.push({ name: testName, status: 'PASS' });
 59 |             } else {
 60 |                 console.log(`❌ FAIL: ${testName} - ${result}`);
 61 |                 this.testResults.push({ name: testName, status: 'FAIL', reason: result });
 62 |             }
 63 |         } catch (error) {
 64 |             console.log(`❌ ERROR: ${testName} - ${error.message}`);
 65 |             this.testResults.push({ name: testName, status: 'ERROR', error: error.message });
 66 |         }
 67 |     }
 68 | 
 69 |     async runAllTests() {
 70 |         console.log('🚀 Phase 2 Integration Tests - Intelligent Context Updates');
 71 |         console.log('Testing conversation awareness, dynamic memory loading, and cross-session intelligence\n');
 72 | 
 73 |         // Conversation Analysis Tests
 74 |         await this.runTest('Conversation Analysis - Topic Detection', this.testTopicDetection);
 75 |         await this.runTest('Conversation Analysis - Entity Extraction', this.testEntityExtraction);
 76 |         await this.runTest('Conversation Analysis - Intent Detection', this.testIntentDetection);
 77 |         await this.runTest('Conversation Analysis - Code Context Detection', this.testCodeContextDetection);
 78 | 
 79 |         // Topic Change Detection Tests
 80 |         await this.runTest('Topic Change Detection - Significant Changes', this.testSignificantTopicChanges);
 81 |         await this.runTest('Topic Change Detection - Minor Changes', this.testMinorTopicChanges);
 82 | 
 83 |         // Enhanced Memory Scoring Tests
 84 |         await this.runTest('Enhanced Memory Scoring - Conversation Context', this.testConversationContextScoring);
 85 |         await this.runTest('Enhanced Memory Scoring - Weight Adjustment', this.testWeightAdjustment);
 86 | 
 87 |         // Session Tracking Tests  
 88 |         await this.runTest('Session Tracking - Session Creation', this.testSessionCreation);
 89 |         await this.runTest('Session Tracking - Conversation Threading', this.testConversationThreading);
 90 |         await this.runTest('Session Tracking - Cross-session Context', this.testCrossSessionContext);
 91 | 
 92 |         // Dynamic Context Update Tests
 93 |         await this.runTest('Dynamic Context Update - Update Triggering', this.testUpdateTriggering);
 94 |         await this.runTest('Dynamic Context Update - Rate Limiting', this.testRateLimiting);
 95 |         await this.runTest('Dynamic Context Update - Context Formatting', this.testContextFormatting);
 96 | 
 97 |         // Integration Tests
 98 |         await this.runTest('Full Integration - Conversation Flow', this.testFullConversationFlow);
 99 | 
100 |         this.printSummary();
101 |     }
102 | 
103 |     // Test implementations
104 |     async testTopicDetection() {
105 |         const conversationText = `
106 |         I'm having issues with the database performance. The SQLite queries are running slowly
107 |         and I think we need to optimize the memory service. Let's debug this architecture problem
108 |         and implement a better caching solution.
109 |         `;
110 | 
111 |         const analysis = analyzeConversation(conversationText);
112 |         
113 |         const topicNames = analysis.topics.map(t => t.name);
114 |         const hasDbTopic = topicNames.includes('database');
115 |         const hasDebuggingTopic = topicNames.includes('debugging');
116 |         const hasArchTopic = topicNames.includes('architecture');
117 | 
118 |         if (!hasDbTopic) return 'Database topic not detected';
119 |         if (!hasDebuggingTopic) return 'Debugging topic not detected';
120 |         if (!hasArchTopic) return 'Architecture topic not detected';
121 |         if (analysis.topics.length === 0) return 'No topics detected';
122 | 
123 |         console.log(`  Detected ${analysis.topics.length} topics: ${topicNames.join(', ')}`);
124 |         return true;
125 |     }
126 | 
127 |     async testEntityExtraction() {
128 |         const conversationText = `
129 |         We're using JavaScript with React for the frontend and Python with FastAPI for the backend.
130 |         The database is PostgreSQL and we're deploying on AWS with Docker containers.
131 |         `;
132 | 
133 |         const analysis = analyzeConversation(conversationText);
134 |         
135 |         const entityNames = analysis.entities.map(e => e.name);
136 |         const hasJS = entityNames.includes('javascript');
137 |         const hasReact = entityNames.includes('react');
138 |         const hasPython = entityNames.includes('python');
139 |         const hasFastAPI = entityNames.includes('fastapi');
140 | 
141 |         if (!hasJS) return 'JavaScript entity not detected';
142 |         if (!hasReact) return 'React entity not detected'; 
143 |         if (!hasPython) return 'Python entity not detected';
144 | 
145 |         console.log(`  Detected ${analysis.entities.length} entities: ${entityNames.join(', ')}`);
146 |         return true;
147 |     }
148 | 
149 |     async testIntentDetection() {
150 |         const conversationText = `
151 |         How do I fix this error in the authentication system? The JWT tokens are not validating
152 |         properly and users can't log in. I need to solve this problem quickly.
153 |         `;
154 | 
155 |         const analysis = analyzeConversation(conversationText);
156 |         
157 |         if (!analysis.intent) return 'Intent not detected';
158 |         if (analysis.intent.name !== 'problem-solving') {
159 |             return `Expected 'problem-solving' intent, got '${analysis.intent.name}'`;
160 |         }
161 |         if (analysis.intent.confidence < 0.5) {
162 |             return `Intent confidence too low: ${analysis.intent.confidence}`;
163 |         }
164 | 
165 |         console.log(`  Detected intent: ${analysis.intent.name} (${(analysis.intent.confidence * 100).toFixed(1)}%)`);
166 |         return true;
167 |     }
168 | 
169 |     async testCodeContextDetection() {
170 |         const conversationText = `
171 |         Here's the function that's causing issues:
172 |         
173 |         \`\`\`javascript
174 |         function validateToken(token) {
175 |             return jwt.verify(token, secret);
176 |         }
177 |         \`\`\`
178 |         
179 |         The error message is: "TokenExpiredError: jwt expired"
180 |         Can you help me fix this in auth.js?
181 |         `;
182 | 
183 |         const analysis = analyzeConversation(conversationText);
184 |         
185 |         if (!analysis.codeContext) return 'Code context not detected';
186 |         if (!analysis.codeContext.isCodeRelated) return 'Code relationship not detected';
187 |         if (!analysis.codeContext.hasCodeBlocks) return 'Code blocks not detected';
188 |         if (!analysis.codeContext.hasErrorMessages) return 'Error messages not detected';
189 |         if (!analysis.codeContext.hasFilePaths) return 'File paths not detected';
190 | 
191 |         console.log(`  Code context detected: languages=[${analysis.codeContext.languages.join(', ')}]`);
192 |         return true;
193 |     }
194 | 
195 |     async testSignificantTopicChanges() {
196 |         const previousAnalysis = analyzeConversation('We are implementing a new authentication system using JWT tokens.');
197 |         const currentAnalysis = analyzeConversation('Now I need to debug a database performance issue with slow queries.');
198 | 
199 |         const changes = detectTopicChanges(previousAnalysis, currentAnalysis);
200 | 
201 |         if (!changes.hasTopicShift) return 'Topic shift not detected';
202 |         if (changes.significanceScore < 0.3) {
203 |             return `Significance score too low: ${changes.significanceScore}`;
204 |         }
205 |         if (changes.newTopics.length === 0) return 'New topics not detected';
206 | 
207 |         console.log(`  Topic shift detected: score=${changes.significanceScore.toFixed(2)}, new topics=${changes.newTopics.length}`);
208 |         return true;
209 |     }
210 | 
211 |     async testMinorTopicChanges() {
212 |         const previousAnalysis = analyzeConversation('We are implementing JWT authentication.');
213 |         const currentAnalysis = analyzeConversation('Let me add better error handling to the authentication code.');
214 | 
215 |         const changes = detectTopicChanges(previousAnalysis, currentAnalysis);
216 | 
217 |         // Minor changes should have lower significance
218 |         if (changes.hasTopicShift && changes.significanceScore > 0.5) {
219 |             return `Significance score too high for minor change: ${changes.significanceScore}`;
220 |         }
221 | 
222 |         console.log(`  Minor change detected correctly: score=${changes.significanceScore.toFixed(2)}`);
223 |         return true;
224 |     }
225 | 
226 |     async testConversationContextScoring() {
227 |         const memories = [
228 |             createMockMemory('Database optimization techniques for SQLite', ['database', 'optimization'], 1),
229 |             createMockMemory('JWT authentication implementation guide', ['auth', 'jwt'], 2),
230 |             createMockMemory('React component debugging tips', ['react', 'debugging'], 3)
231 |         ];
232 | 
233 |         const projectContext = createMockProjectContext();
234 |         const conversationAnalysis = analyzeConversation('I need help optimizing database queries for better performance');
235 | 
236 |         const scoredMemories = scoreMemoryRelevance(memories, projectContext, {
237 |             includeConversationContext: true,
238 |             conversationAnalysis: conversationAnalysis
239 |         });
240 | 
241 |         // Database memory should score highest due to conversation context
242 |         const dbMemory = scoredMemories.find(m => m.content.includes('Database optimization'));
243 |         if (!dbMemory) return 'Database memory not found in results';
244 |         if (dbMemory.relevanceScore < 0.35) {
245 |             return `Database memory score too low: ${dbMemory.relevanceScore}`;
246 |         }
247 | 
248 |         // Verify conversation context was used
249 |         if (!dbMemory.scoreBreakdown.conversationRelevance) {
250 |             return 'Conversation relevance not calculated';
251 |         }
252 | 
253 |         console.log(`  Database memory scored highest: ${dbMemory.relevanceScore.toFixed(3)} (conversation: ${dbMemory.scoreBreakdown.conversationRelevance.toFixed(3)})`);
254 |         return true;
255 |     }
256 | 
257 |     async testWeightAdjustment() {
258 |         const memory = createMockMemory('Authentication system implementation', ['auth'], 1);
259 |         const projectContext = createMockProjectContext();
260 |         const conversationAnalysis = analyzeConversation('How to implement authentication?');
261 | 
262 |         // Test with conversation context enabled
263 |         const withContext = scoreMemoryRelevance([memory], projectContext, {
264 |             includeConversationContext: true,
265 |             conversationAnalysis: conversationAnalysis
266 |         })[0];
267 | 
268 |         // Test without conversation context
269 |         const withoutContext = scoreMemoryRelevance([memory], projectContext, {
270 |             includeConversationContext: false
271 |         })[0];
272 | 
273 |         if (!withContext.hasConversationContext) return 'Conversation context not enabled';
274 |         if (withContext.hasConversationContext === withoutContext.hasConversationContext) {
275 |             return 'Weight adjustment not applied';
276 |         }
277 | 
278 |         console.log(`  Weight adjustment applied: with context=${withContext.relevanceScore.toFixed(3)}, without=${withoutContext.relevanceScore.toFixed(3)}`);
279 |         return true;
280 |     }
281 | 
282 |     async testSessionCreation() {
283 |         const sessionTracker = new SessionTracker({
284 |             trackingDataPath: path.join(__dirname, 'test-session-tracking.json')
285 |         });
286 | 
287 |         await sessionTracker.initialize();
288 | 
289 |         const sessionId = 'test-session-' + Date.now();
290 |         const context = {
291 |             projectContext: createMockProjectContext(),
292 |             workingDirectory: '/test/directory'
293 |         };
294 | 
295 |         const session = await sessionTracker.startSession(sessionId, context);
296 | 
297 |         if (!session) return 'Session not created';
298 |         if (session.id !== sessionId) return 'Session ID mismatch';
299 |         if (session.status !== 'active') return 'Session status not active';
300 |         if (!session.projectContext) return 'Project context not stored';
301 | 
302 |         console.log(`  Session created: ${session.id} for project ${session.projectContext.name}`);
303 |         return true;
304 |     }
305 | 
306 |     async testConversationThreading() {
307 |         const sessionTracker = new SessionTracker({
308 |             trackingDataPath: path.join(__dirname, 'test-threading.json')
309 |         });
310 | 
311 |         await sessionTracker.initialize();
312 | 
313 |         const context = {
314 |             projectContext: createMockProjectContext(),
315 |             workingDirectory: '/test/directory'
316 |         };
317 | 
318 |         // Create first session
319 |         const session1 = await sessionTracker.startSession('session-1', context);
320 |         await sessionTracker.endSession('session-1', { type: 'completed', summary: 'Implemented auth' });
321 | 
322 |         // Create related session
323 |         const session2 = await sessionTracker.startSession('session-2', context);
324 | 
325 |         if (!session1.threadId) return 'Thread ID not created for first session';
326 |         if (!session2.threadId) return 'Thread ID not created for second session';
327 | 
328 |         // Sessions should be linked if they're related
329 |         const areLinked = session1.threadId === session2.threadId || 
330 |                          session2.parentSessionId === session1.id;
331 | 
332 |         console.log(`  Threading: session1=${session1.threadId}, session2=${session2.threadId}, linked=${areLinked}`);
333 |         return true;
334 |     }
335 | 
336 |     async testCrossSessionContext() {
337 |         const sessionTracker = new SessionTracker({
338 |             trackingDataPath: path.join(__dirname, 'test-cross-session.json')
339 |         });
340 | 
341 |         await sessionTracker.initialize();
342 | 
343 |         const projectContext = createMockProjectContext();
344 | 
345 |         // Create and end a session with outcome
346 |         const session1 = await sessionTracker.startSession('cross-session-1', { projectContext });
347 |         await sessionTracker.endSession('cross-session-1', {
348 |             type: 'implementation',
349 |             summary: 'Implemented user authentication',
350 |             topics: ['auth', 'jwt']
351 |         });
352 | 
353 |         // Get conversation context for new session
354 |         const context = await sessionTracker.getConversationContext(projectContext);
355 | 
356 |         if (!context) return 'Cross-session context not retrieved';
357 |         if (context.recentSessions.length === 0) return 'No recent sessions found';
358 |         if (!context.projectName) return 'Project name not in context';
359 | 
360 |         console.log(`  Cross-session context: ${context.recentSessions.length} recent sessions for ${context.projectName}`);
361 |         return true;
362 |     }
363 | 
364 |     async testUpdateTriggering() {
365 |         const updater = new DynamicContextUpdater({
366 |             updateThreshold: 0.3,
367 |             maxMemoriesPerUpdate: 2
368 |         });
369 | 
370 |         await updater.initialize({
371 |             projectContext: createMockProjectContext()
372 |         });
373 | 
374 |         // Mock memory service config
375 |         const mockConfig = {
376 |             endpoint: 'https://mock.local:8443',
377 |             apiKey: 'mock-key'
378 |         };
379 | 
380 |         // Mock context injector
381 |         let injectedContext = null;
382 |         const mockInjector = (context) => {
383 |             injectedContext = context;
384 |         };
385 | 
386 |         // Simulate conversation with significant topic change
387 |         const conversationText = 'I need help debugging this authentication error in the JWT validation';
388 | 
389 |         // This would normally trigger an update, but we'll simulate the decision logic
390 |         const analysis = analyzeConversation(conversationText);
391 |         const changes = detectTopicChanges(null, analysis);
392 | 
393 |         if (!changes.hasTopicShift) return 'Topic shift not detected for significant conversation change';
394 |         if (changes.significanceScore < 0.3) return 'Significance score below threshold';
395 | 
396 |         console.log(`  Update would be triggered: significance=${changes.significanceScore.toFixed(2)}`);
397 |         return true;
398 |     }
399 | 
400 |     async testRateLimiting() {
401 |         const updater = new DynamicContextUpdater({
402 |             updateCooldownMs: 1000,  // 1 second cooldown
403 |             maxUpdatesPerSession: 3
404 |         });
405 | 
406 |         await updater.initialize({
407 |             projectContext: createMockProjectContext()
408 |         });
409 | 
410 |         // First update should be allowed
411 |         if (!updater.shouldProcessUpdate()) return 'First update not allowed';
412 | 
413 |         // Simulate update
414 |         updater.lastUpdateTime = Date.now();
415 |         updater.updateCount = 1;
416 | 
417 |         // Immediate second update should be blocked by cooldown
418 |         if (updater.shouldProcessUpdate()) return 'Cooldown not enforced';
419 | 
420 |         // After cooldown, should be allowed
421 |         updater.lastUpdateTime = Date.now() - 2000; // 2 seconds ago
422 |         if (!updater.shouldProcessUpdate()) return 'Update after cooldown not allowed';
423 | 
424 |         // But not if max updates reached
425 |         updater.updateCount = 10; // Exceed max
426 |         if (updater.shouldProcessUpdate()) return 'Max updates limit not enforced';
427 | 
428 |         console.log('  Rate limiting working correctly');
429 |         return true;
430 |     }
431 | 
432 |     async testContextFormatting() {
433 |         const memories = [
434 |             createMockMemory('Database optimization completed successfully', ['database', 'optimization'], 1),
435 |             createMockMemory('JWT implementation guide for auth', ['auth', 'jwt'], 2)
436 |         ];
437 | 
438 |         memories.forEach(memory => {
439 |             memory.relevanceScore = 0.8;
440 |         });
441 | 
442 |         const updater = new DynamicContextUpdater();
443 |         const analysis = analyzeConversation('Working on database performance improvements');
444 |         const changes = { newTopics: [{ name: 'database' }], changedIntents: false };
445 | 
446 |         const formatted = updater.formatContextUpdate(memories, analysis, changes, null);
447 | 
448 |         if (!formatted.includes('Dynamic Context Update')) return 'Header not found';
449 |         if (!formatted.includes('New topics detected')) return 'Topic change not mentioned';
450 |         if (!formatted.includes('Database optimization')) return 'Memory content not included';
451 | 
452 |         console.log('  Context formatting working correctly');
453 |         return true;
454 |     }
455 | 
456 |     async testFullConversationFlow() {
457 |         // This test simulates a full conversation flow with topic changes
458 |         const sessionTracker = new SessionTracker({
459 |             trackingDataPath: path.join(__dirname, 'test-full-flow.json')
460 |         });
461 | 
462 |         const updater = new DynamicContextUpdater({
463 |             updateThreshold: 0.15,  // Even lower threshold for testing
464 |             enableCrossSessionContext: true
465 |         });
466 | 
467 |         await sessionTracker.initialize();
468 |         await updater.initialize({
469 |             projectContext: createMockProjectContext()
470 |         });
471 | 
472 |         // Simulate conversation evolution
473 |         const conversations = [
474 |             'Starting work on authentication system implementation',
475 |             'Now debugging database performance issues with slow queries and errors',
476 |             'Switching focus to frontend React component optimization and testing framework'
477 |         ];
478 | 
479 |         let lastAnalysis = null;
480 |         let significantChanges = 0;
481 | 
482 |         for (let i = 0; i < conversations.length; i++) {
483 |             const analysis = analyzeConversation(conversations[i]);
484 |             
485 |             if (lastAnalysis) {
486 |                 const changes = detectTopicChanges(lastAnalysis, analysis);
487 |                 if (changes.hasTopicShift && changes.significanceScore > 0.15) {
488 |                     significantChanges++;
489 |                 }
490 |             }
491 |             
492 |             lastAnalysis = analysis;
493 |         }
494 | 
495 |         if (significantChanges < 2) {
496 |             return `Expected at least 2 significant changes, got ${significantChanges}`;
497 |         }
498 | 
499 |         console.log(`  Full conversation flow: ${significantChanges} significant topic changes detected`);
500 |         return true;
501 |     }
502 | 
503 |     printSummary() {
504 |         console.log('\n============================================================');
505 |         console.log('🎯 PHASE 2 TEST SUMMARY');
506 |         console.log('============================================================');
507 |         console.log(`Total Tests: ${this.totalTests}`);
508 |         console.log(`✅ Passed: ${this.passedTests}`);
509 |         console.log(`❌ Failed: ${this.totalTests - this.passedTests}`);
510 |         console.log(`Success Rate: ${((this.passedTests / this.totalTests) * 100).toFixed(1)}%`);
511 |         console.log('============================================================');
512 | 
513 |         if (this.passedTests === this.totalTests) {
514 |             console.log('🎉 ALL PHASE 2 TESTS PASSED! Intelligent context updates ready.');
515 |         } else {
516 |             console.log('\n❌ Failed Tests:');
517 |             this.testResults
518 |                 .filter(result => result.status !== 'PASS')
519 |                 .forEach(result => {
520 |                     console.log(`  - ${result.name}: ${result.reason || result.error || 'Unknown error'}`);
521 |                 });
522 |         }
523 | 
524 |         console.log('\n📋 Phase 2 Features Tested:');
525 |         console.log('  ✅ Conversation Analysis & Topic Detection');
526 |         console.log('  ✅ Dynamic Context Updates & Memory Loading');  
527 |         console.log('  ✅ Enhanced Memory Scoring with Conversation Context');
528 |         console.log('  ✅ Session Tracking & Cross-Session Intelligence');
529 |         console.log('  ✅ Rate Limiting & Update Management');
530 |         console.log('  ✅ Full Conversation Flow Integration');
531 |     }
532 | }
533 | 
534 | // Run tests if called directly
535 | if (require.main === module) {
536 |     const testSuite = new Phase2TestSuite();
537 |     testSuite.runAllTests().catch(error => {
538 |         console.error('Test suite failed:', error);
539 |         process.exit(1);
540 |     });
541 | }
542 | 
543 | module.exports = Phase2TestSuite;
```

--------------------------------------------------------------------------------
/scripts/maintenance/consolidate_memory_types.py:
--------------------------------------------------------------------------------

```python
  1 | #!/usr/bin/env python3
  2 | """
  3 | Memory Type Consolidation Script
  4 | 
  5 | Consolidates fragmented memory types into a standardized taxonomy.
  6 | Run with --dry-run to preview changes before executing.
  7 | 
  8 | ⚠️ IMPORTANT SAFETY NOTES:
  9 | - Creates automatic backup before execution
 10 | - Stop HTTP server before running: systemctl --user stop mcp-memory-http.service
 11 | - Disconnect MCP clients (use /mcp in Claude Code)
 12 | - Database must not be locked or in use
 13 | 
 14 | Usage:
 15 |     python consolidate_memory_types.py --dry-run  # Preview changes (safe)
 16 |     python consolidate_memory_types.py            # Execute consolidation
 17 |     python consolidate_memory_types.py --config custom_mappings.json  # Use custom mappings
 18 | """
 19 | 
 20 | import sqlite3
 21 | import sys
 22 | import os
 23 | import subprocess
 24 | import shutil
 25 | from pathlib import Path
 26 | from typing import Dict, Tuple, Optional
 27 | from collections import defaultdict
 28 | from datetime import datetime
 29 | 
 30 | # Database path (platform-aware)
 31 | import platform
 32 | if platform.system() == "Darwin":  # macOS
 33 |     DB_PATH = Path.home() / "Library/Application Support/mcp-memory/sqlite_vec.db"
 34 | elif platform.system() == "Windows":
 35 |     DB_PATH = Path(os.getenv('LOCALAPPDATA')) / "mcp-memory" / "sqlite_vec.db"
 36 | else:  # Linux and other Unix-like systems
 37 |     DB_PATH = Path.home() / ".local/share/mcp-memory/sqlite_vec.db"
 38 | 
 39 | # Version
 40 | VERSION = "1.0.0"
 41 | 
 42 | # Consolidation mapping: old_type -> new_type
 43 | # Special handling for empty strings, NULL values, and pattern-based consolidation
 44 | CONSOLIDATION_MAP: Dict[str, str] = {
 45 |     # Empty type and NULL -> note
 46 |     "": "note",
 47 |     None: "note",
 48 | 
 49 |     # Session variants -> session
 50 |     "session-summary": "session",
 51 |     "session-checkpoint": "session",
 52 |     "session-completion": "session",
 53 |     "session-context": "session",
 54 |     "analysis-session": "session",
 55 |     "development-session": "session",
 56 |     "development_session": "session",
 57 |     "maintenance-session": "session",
 58 |     "project-session": "session",
 59 | 
 60 |     # Special sessions -> troubleshooting
 61 |     "troubleshooting-session": "troubleshooting",
 62 |     "diagnostic-session": "troubleshooting",
 63 |     "technical-session": "troubleshooting",
 64 | 
 65 |     # Milestone and completion variants -> milestone
 66 |     "project-milestone": "milestone",
 67 |     "development-milestone": "milestone",
 68 |     "major-milestone": "milestone",
 69 |     "major_milestone": "milestone",
 70 |     "documentation-milestone": "milestone",
 71 |     "release-milestone": "milestone",
 72 |     "deployment-milestone": "milestone",
 73 |     "final-milestone": "milestone",
 74 |     "progress-milestone": "milestone",
 75 |     "mission-accomplished": "milestone",
 76 | 
 77 |     # Completion types -> milestone
 78 |     "completion": "milestone",
 79 |     "project-completion": "milestone",
 80 |     "work-completion": "milestone",
 81 |     "completion-summary": "milestone",
 82 |     "milestone-completion": "milestone",
 83 |     "release-completion": "milestone",
 84 |     "development-completion": "milestone",
 85 |     "documentation-completion": "milestone",
 86 |     "feature-completion": "milestone",
 87 |     "final-completion": "milestone",
 88 |     "implementation-completion": "milestone",
 89 |     "merge-completion": "milestone",
 90 |     "session-completion": "milestone",
 91 |     "workflow-complete": "milestone",
 92 | 
 93 |     # Technical prefix removal - documentation
 94 |     "technical-documentation": "documentation",
 95 | 
 96 |     # Technical prefix removal - implementation
 97 |     "technical-implementation": "implementation",
 98 | 
 99 |     # Technical prefix removal - solution
100 |     "technical-solution": "solution",
101 |     "technical solution": "solution",
102 | 
103 |     # Technical prefix removal - fix
104 |     "technical-fix": "fix",
105 | 
106 |     # Technical prefix removal - analysis
107 |     "technical-analysis": "analysis",
108 | 
109 |     # Technical prefix removal - reference
110 |     "technical-reference": "reference",
111 | 
112 |     # Technical prefix removal - note
113 |     "technical-note": "note",
114 |     "technical-notes": "note",
115 | 
116 |     # Technical prefix removal - guide
117 |     "technical-guide": "guide",
118 |     "technical-guidance": "guide",
119 |     "technical-howto": "guide",
120 | 
121 |     # Technical prefix removal - other
122 |     "technical-specification": "architecture",
123 |     "technical-decision": "architecture",
124 |     "technical-design": "architecture",
125 |     "technical-knowledge": "reference",
126 |     "technical_knowledge": "reference",
127 |     "technical-finding": "analysis",
128 |     "technical-pattern": "architecture",
129 |     "technical-rule": "process",
130 |     "technical-process": "process",
131 |     "technical-achievement": "achievement",
132 |     "technical_achievement": "achievement",
133 |     "technical-data": "document",
134 |     "technical-diagram": "document",
135 |     "technical-enhancement": "feature",
136 |     "technical-problem": "troubleshooting",
137 |     "technical-setup": "configuration",
138 |     "technical-summary": "note",
139 |     "technical-todo": "note",
140 | 
141 |     # Project prefix removal
142 |     "project-documentation": "documentation",
143 |     "project-status": "status",
144 |     "project-summary": "note",
145 |     "project-update": "status",
146 |     "project-management": "process",
147 |     "project-improvement": "feature",
148 |     "project-action": "note",
149 |     "project-event": "note",
150 |     "project-final-update": "status",
151 |     "project-goals": "note",
152 |     "project-implementation": "implementation",
153 |     "project-outcome": "milestone",
154 |     "project-overview": "note",
155 |     "project-policy": "process",
156 |     "project-requirement": "note",
157 |     "project-planning": "process",
158 |     "project-plan": "process",
159 |     "project-roadmap": "process",
160 |     "project-strategy": "process",
161 |     "project-task": "note",
162 |     "project-timeline": "process",
163 |     "project-tracker": "status",
164 |     "project-workflow": "process",
165 |     "project-issue": "troubleshooting",
166 |     "project-problem": "troubleshooting",
167 |     "project-challenge": "troubleshooting",
168 |     "project-risk": "note",
169 |     "project-solution": "solution",
170 |     "project-result": "milestone",
171 |     "project-success": "achievement",
172 |     "project-failure": "note",
173 |     "project-learning": "reference",
174 |     "project-lesson": "reference",
175 |     "project-feedback": "note",
176 |     "project-review": "analysis",
177 |     "project-assessment": "analysis",
178 |     "project-evaluation": "analysis",
179 |     "project-analysis": "analysis",
180 |     "project-report": "analysis",
181 |     "project-metrics": "analysis",
182 |     "project-performance": "analysis",
183 |     "project-impact": "analysis",
184 |     "project-outcome-analysis": "analysis",
185 |     "project-benefit": "achievement",
186 |     "project-achievement": "achievement",
187 | 
188 |     # System and configuration
189 |     "system-config": "configuration",
190 |     "system-setup": "configuration",
191 |     "server-config": "configuration",
192 |     "server-configuration": "configuration",
193 |     "system-configuration": "configuration",
194 |     "infrastructure_setup": "configuration",
195 |     "setup": "configuration",
196 |     "setup-guide": "guide",
197 |     "setup-memo": "configuration",
198 |     "configuration-guide": "guide",
199 |     "user-preference": "configuration",
200 | 
201 |     # Infrastructure
202 |     "infrastructure-change": "infrastructure",
203 |     "infrastructure-analysis": "infrastructure",
204 |     "infrastructure-report": "infrastructure",
205 | 
206 |     # Process and workflow
207 |     "workflow": "process",
208 |     "procedure": "process",
209 |     "workflow-guide": "guide",
210 |     "process-guide": "guide",
211 |     "process-improvement": "process",
212 |     "action-plan": "process",
213 |     "detailed-process": "process",
214 |     "development-plan": "process",
215 |     "comprehensive-plan": "process",
216 |     "cleanup": "process",
217 |     "maintenance": "process",
218 | 
219 |     # Installation and features
220 |     "installation-guide": "guide",
221 |     "feature-specification": "feature",
222 |     "feature-summary": "feature",
223 | 
224 |     # General mappings
225 |     "summary": "note",
226 |     "memo": "note",
227 |     "reminder": "note",
228 |     "clarification": "note",
229 |     "checkpoint": "note",
230 |     "finding": "analysis",
231 |     "report": "analysis",
232 |     "analysis-summary": "analysis",
233 |     "analysis-report": "analysis",
234 |     "financial-analysis": "analysis",
235 |     "security-analysis": "analysis",
236 |     "verification": "test",
237 |     "correction": "fix",
238 |     "enhancement": "feature",
239 |     "improvement": "feature",
240 |     "improvement-summary": "feature",
241 |     "fix-summary": "fix",
242 |     "user-feedback": "note",
243 |     "user-identity": "note",
244 |     "user-account": "configuration",
245 | 
246 |     # Extended mappings from database analysis
247 |     "marketing": "note",
248 |     "support": "note",
249 |     "integration": "implementation",
250 |     "strategy-integration": "implementation",
251 |     "methodology": "process",
252 |     "guideline": "guide",
253 |     "critical-lesson": "reference",
254 |     "security-reminder": "security",
255 |     "security-recovery": "security",
256 |     "security-resolution": "security",
257 |     "security-update": "security",
258 |     "workflow-rule": "process",
259 |     "professional_story": "note",
260 | 
261 |     # Additional types found in database
262 |     "applescript-template": "document",
263 |     "project": "note",
264 |     "test-document": "test",
265 |     "documentation-summary": "documentation",
266 |     "documentation-final": "documentation",
267 |     "fact": "note",
268 |     "development-summary": "note",
269 |     "lesson-learned": "reference",
270 |     "reference-guide": "guide",
271 |     "task": "note",
272 |     "update": "status",
273 | 
274 |     # German terms (found in database)
275 |     "Bankzahlung": "note",
276 |     "Betrugsschema": "note",
277 |     "Finanzbeweis": "note",
278 |     "Strafanzeige": "note",
279 | 
280 |     # Analysis and investigation types
281 |     "analysis-finding": "analysis",
282 |     "analysis-start": "analysis",
283 |     "comprehensive-analysis": "analysis",
284 |     "final-analysis": "analysis",
285 |     "investigation": "analysis",
286 |     "issue-identification": "analysis",
287 |     "issue_investigation": "analysis",
288 |     "temporal-analysis": "analysis",
289 |     "testing-insights": "analysis",
290 | 
291 |     # Architecture types
292 |     "architecture-decision": "architecture",
293 |     "architecture-visualization": "architecture",
294 |     "concept-design": "architecture",
295 |     "design-decision": "architecture",
296 |     "tool-decision": "architecture",
297 | 
298 |     # Status and reporting
299 |     "backup-record": "status",
300 |     "maintenance-report": "status",
301 |     "maintenance-summary": "status",
302 |     "progress": "status",
303 |     "progress-tracking": "status",
304 |     "system-health-report": "status",
305 |     "system-report": "status",
306 | 
307 |     # Reference types
308 |     "best-practice": "reference",
309 |     "learning": "reference",
310 |     "lesson": "reference",
311 |     "network-info": "reference",
312 |     "principle": "reference",
313 | 
314 |     # Fix types
315 |     "bug": "fix",
316 |     "critical-fix": "fix",
317 | 
318 |     # Troubleshooting types
319 |     "configuration-issue": "troubleshooting",
320 |     "critical-issue": "troubleshooting",
321 |     "debugging": "troubleshooting",
322 |     "error": "troubleshooting",
323 |     "problem-escalation": "troubleshooting",
324 | 
325 |     # Solution types
326 |     "final-resolution": "solution",
327 |     "solution-complete": "solution",
328 |     "solution-design": "solution",
329 |     "solution-implemented": "solution",
330 | 
331 |     # Test types
332 |     "compatibility-test": "test",
333 |     "debug-test": "test",
334 |     "functionality-test": "test",
335 |     "healthcheck_test": "test",
336 |     "post-fix-test": "test",
337 |     "post-restart-test": "test",
338 |     "string-format-test": "test",
339 |     "system_test": "test",
340 |     "test-case": "test",
341 |     "test-result": "test",
342 |     "testing": "test",
343 |     "validation": "test",
344 |     "validation-results": "test",
345 |     "verification-test": "test",
346 | 
347 |     # Guide types
348 |     "comprehensive-guide": "guide",
349 |     "tutorial_resource": "guide",
350 | 
351 |     # Document types
352 |     "comprehensive_collection": "document",
353 |     "strategy-document": "document",
354 | 
355 |     # Achievement types
356 |     "success": "achievement",
357 |     "work-achievement": "achievement",
358 | 
359 |     # Note types (various)
360 |     "concept": "note",
361 |     "contribution": "note",
362 |     "critical-discovery": "note",
363 |     "design-note": "note",
364 |     "discovery": "note",
365 |     "important-note": "note",
366 |     "imported": "note",
367 |     "network-limitation": "note",
368 |     "reflection": "note",
369 |     "server-behavior": "note",
370 |     "system": "note",
371 |     "tool": "note",
372 |     "user-input": "note",
373 |     "user-question": "note",
374 |     "user-request": "note",
375 |     "issue_creation": "note",
376 | }
377 | 
378 | def check_http_server_running() -> bool:
379 |     """Check if HTTP server is running (Linux only)."""
380 |     try:
381 |         # Check systemd service
382 |         result = subprocess.run(
383 |             ["systemctl", "--user", "is-active", "mcp-memory-http.service"],
384 |             capture_output=True,
385 |             text=True
386 |         )
387 |         return result.returncode == 0
388 |     except (subprocess.SubprocessError, FileNotFoundError):
389 |         # Not Linux or systemctl not available
390 |         return False
391 | 
392 | 
393 | def check_database_locked(db_path: Path) -> bool:
394 |     """Check if database is currently locked."""
395 |     try:
396 |         # Try to open with a very short timeout
397 |         conn = sqlite3.connect(db_path, timeout=0.1)
398 |         cursor = conn.cursor()
399 |         cursor.execute("BEGIN IMMEDIATE")
400 |         conn.rollback()
401 |         conn.close()
402 |         return False
403 |     except sqlite3.OperationalError:
404 |         return True
405 | 
406 | 
407 | def create_backup(db_path: Path, dry_run: bool = False) -> Optional[Path]:
408 |     """Create a timestamped backup of the database."""
409 |     if dry_run:
410 |         return None
411 | 
412 |     timestamp = datetime.now().strftime("%Y%m%d-%H%M%S")
413 |     backup_path = db_path.parent / f"{db_path.stem}.backup-{timestamp}{db_path.suffix}"
414 | 
415 |     try:
416 |         shutil.copy2(db_path, backup_path)
417 | 
418 |         # Verify backup
419 |         if not backup_path.exists():
420 |             raise FileNotFoundError(f"Backup file not created: {backup_path}")
421 | 
422 |         if backup_path.stat().st_size != db_path.stat().st_size:
423 |             raise ValueError(f"Backup size mismatch: {backup_path.stat().st_size} != {db_path.stat().st_size}")
424 | 
425 |         return backup_path
426 |     except Exception as e:
427 |         print(f"\n❌ Error creating backup: {e}")
428 |         raise
429 | 
430 | 
431 | def perform_safety_checks(db_path: Path, dry_run: bool = False) -> bool:
432 |     """Perform all safety checks before consolidation."""
433 |     print("\n" + "="*80)
434 |     print("Safety Checks")
435 |     print("="*80)
436 | 
437 |     all_passed = True
438 | 
439 |     # Check 1: Database exists
440 |     if not db_path.exists():
441 |         print("❌ Database not found at:", db_path)
442 |         return False
443 |     print(f"✓ Database found: {db_path}")
444 | 
445 |     # Check 2: Database is not locked
446 |     if check_database_locked(db_path):
447 |         print("❌ Database is currently locked (in use by another process)")
448 |         print("   Stop HTTP server: systemctl --user stop mcp-memory-http.service")
449 |         print("   Disconnect MCP: Use /mcp command in Claude Code")
450 |         all_passed = False
451 |     else:
452 |         print("✓ Database is not locked")
453 | 
454 |     # Check 3: HTTP server status (Linux only)
455 |     if os.name != 'nt':  # Not Windows
456 |         if check_http_server_running():
457 |             print("⚠️  HTTP server is running")
458 |             print("   Recommended: systemctl --user stop mcp-memory-http.service")
459 |             if not dry_run:
460 |                 response = input("   Continue anyway? (yes/no): ")
461 |                 if response.lower() != "yes":
462 |                     all_passed = False
463 |         else:
464 |             print("✓ HTTP server is not running")
465 | 
466 |     # Check 4: Sufficient disk space
467 |     stat = os.statvfs(db_path.parent)
468 |     free_space = stat.f_bavail * stat.f_frsize
469 |     db_size = db_path.stat().st_size
470 |     if free_space < db_size * 2:  # Need at least 2x database size
471 |         print(f"⚠️  Low disk space: {free_space / 1024**2:.1f} MB free, need {db_size * 2 / 1024**2:.1f} MB")
472 |         all_passed = False
473 |     else:
474 |         print(f"✓ Sufficient disk space: {free_space / 1024**2:.1f} MB free")
475 | 
476 |     print("="*80)
477 | 
478 |     return all_passed
479 | 
480 | 
481 | def analyze_database(conn: sqlite3.Connection) -> Tuple[Dict[str, int], int]:
482 |     """Analyze current state of memory types."""
483 |     cursor = conn.cursor()
484 | 
485 |     # Get type distribution
486 |     cursor.execute("SELECT memory_type, COUNT(*) FROM memories GROUP BY memory_type")
487 |     type_counts = {row[0]: row[1] for row in cursor.fetchall()}
488 | 
489 |     # Get total count
490 |     cursor.execute("SELECT COUNT(*) FROM memories")
491 |     total = cursor.fetchone()[0]
492 | 
493 |     return type_counts, total
494 | 
495 | 
496 | def preview_consolidation(type_counts: Dict[str, int]) -> Dict[str, Dict[str, int]]:
497 |     """Preview what the consolidation will do."""
498 |     # Group by target type
499 |     consolidation_preview = defaultdict(lambda: {"old_count": 0, "sources": []})
500 |     unchanged = {}
501 | 
502 |     for old_type, count in type_counts.items():
503 |         if old_type in CONSOLIDATION_MAP:
504 |             new_type = CONSOLIDATION_MAP[old_type]
505 |             consolidation_preview[new_type]["old_count"] += count
506 |             consolidation_preview[new_type]["sources"].append(f"{old_type} ({count})")
507 |         else:
508 |             unchanged[old_type] = count
509 | 
510 |     return dict(consolidation_preview), unchanged
511 | 
512 | 
513 | def execute_consolidation(conn: sqlite3.Connection, dry_run: bool = True) -> Tuple[int, Dict[str, int]]:
514 |     """Execute the consolidation."""
515 |     cursor = conn.cursor()
516 |     total_updated = 0
517 |     updates_by_type = defaultdict(int)
518 | 
519 |     if dry_run:
520 |         print("\n" + "="*80)
521 |         print("DRY RUN MODE - No changes will be made")
522 |         print("="*80 + "\n")
523 | 
524 |     # Process each mapping
525 |     for old_type, new_type in CONSOLIDATION_MAP.items():
526 |         # Handle None/NULL specially
527 |         if old_type is None:
528 |             if dry_run:
529 |                 cursor.execute("SELECT COUNT(*) FROM memories WHERE memory_type IS NULL")
530 |                 count = cursor.fetchone()[0]
531 |                 if count > 0:
532 |                     print(f"Would update {count:4d} memories: (None/NULL) → {new_type}")
533 |                     total_updated += count
534 |                     updates_by_type[new_type] += count
535 |             else:
536 |                 cursor.execute("UPDATE memories SET memory_type = ? WHERE memory_type IS NULL", (new_type,))
537 |                 count = cursor.rowcount
538 |                 if count > 0:
539 |                     print(f"Updated {count:4d} memories: (None/NULL) → {new_type}")
540 |                     total_updated += count
541 |                     updates_by_type[new_type] += count
542 |         else:
543 |             if dry_run:
544 |                 cursor.execute(
545 |                     "SELECT COUNT(*) FROM memories WHERE memory_type = ?",
546 |                     (old_type,)
547 |                 )
548 |                 count = cursor.fetchone()[0]
549 |                 if count > 0:
550 |                     print(f"Would update {count:4d} memories: {old_type!r:40s} → {new_type}")
551 |                     total_updated += count
552 |                     updates_by_type[new_type] += count
553 |             else:
554 |                 cursor.execute(
555 |                     "UPDATE memories SET memory_type = ? WHERE memory_type = ?",
556 |                     (new_type, old_type)
557 |                 )
558 |                 count = cursor.rowcount
559 |                 if count > 0:
560 |                     print(f"Updated {count:4d} memories: {old_type!r:40s} → {new_type}")
561 |                     total_updated += count
562 |                     updates_by_type[new_type] += count
563 | 
564 |     return total_updated, dict(updates_by_type)
565 | 
566 | 
567 | def main():
568 |     """Main execution."""
569 |     dry_run = "--dry-run" in sys.argv
570 | 
571 |     print(f"\nMemory Type Consolidation Script v{VERSION}")
572 |     print(f"Database: {DB_PATH}")
573 |     print(f"Mode: {'DRY RUN (preview only)' if dry_run else 'LIVE EXECUTION'}")
574 |     print("="*80)
575 | 
576 |     # Perform safety checks
577 |     if not perform_safety_checks(DB_PATH, dry_run):
578 |         print("\n❌ Safety checks failed. Aborting.")
579 |         sys.exit(1)
580 | 
581 |     # Create backup (unless dry-run)
582 |     if not dry_run:
583 |         print("\nCreating backup...")
584 |         try:
585 |             backup_path = create_backup(DB_PATH, dry_run)
586 |             if backup_path:
587 |                 print(f"✓ Backup created: {backup_path}")
588 |                 print(f"  Size: {backup_path.stat().st_size / 1024**2:.2f} MB")
589 |         except Exception as e:
590 |             print(f"❌ Failed to create backup: {e}")
591 |             sys.exit(1)
592 | 
593 |     # Connect to database
594 |     conn = sqlite3.connect(DB_PATH, timeout=30)
595 | 
596 |     try:
597 |         # Analyze current state
598 |         print("\nAnalyzing current state...")
599 |         type_counts, total = analyze_database(conn)
600 |         unique_types = len(type_counts)
601 | 
602 |         print(f"\nCurrent State:")
603 |         print(f"  Total memories: {total:,}")
604 |         print(f"  Unique types: {unique_types}")
605 |         print(f"  Empty type: {type_counts.get('', 0)}")
606 | 
607 |         # Preview consolidation
608 |         print("\nConsolidation Preview:")
609 |         consolidation_preview, unchanged = preview_consolidation(type_counts)
610 | 
611 |         print(f"\nTypes that will be consolidated:")
612 |         for new_type in sorted(consolidation_preview.keys()):
613 |             info = consolidation_preview[new_type]
614 |             print(f"\n  {new_type}: {info['old_count']} memories from {len(info['sources'])} sources")
615 |             for source in sorted(info['sources']):
616 |                 print(f"    ← {source}")
617 | 
618 |         print(f"\nTypes that will remain unchanged: {len(unchanged)}")
619 |         for old_type, count in sorted(unchanged.items(), key=lambda x: -x[1])[:20]:
620 |             type_display = old_type if old_type is not None else "(None/NULL)"
621 |             print(f"  {type_display:40s} {count:4d}")
622 |         if len(unchanged) > 20:
623 |             print(f"  ... and {len(unchanged) - 20} more")
624 | 
625 |         # Execute consolidation
626 |         print("\n" + "="*80)
627 |         if not dry_run:
628 |             response = input("\nProceed with consolidation? (yes/no): ")
629 |             if response.lower() != "yes":
630 |                 print("Consolidation cancelled.")
631 |                 return
632 | 
633 |         total_updated, updates_by_type = execute_consolidation(conn, dry_run)
634 | 
635 |         if not dry_run:
636 |             conn.commit()
637 |             print(f"\n✓ Consolidation complete!")
638 | 
639 |         print(f"\nTotal memories updated: {total_updated:,}")
640 |         print(f"\nBreakdown by target type:")
641 |         for new_type in sorted(updates_by_type.keys(), key=lambda x: -updates_by_type[x]):
642 |             print(f"  {new_type:30s} +{updates_by_type[new_type]:4d}")
643 | 
644 |         # Show final state
645 |         if not dry_run:
646 |             print("\nAnalyzing final state...")
647 |             final_type_counts, final_total = analyze_database(conn)
648 |             final_unique_types = len(final_type_counts)
649 | 
650 |             print(f"\nFinal State:")
651 |             print(f"  Total memories: {final_total:,}")
652 |             print(f"  Unique types: {final_unique_types}")
653 |             print(f"  Reduction: {unique_types} → {final_unique_types} types ({unique_types - final_unique_types} removed)")
654 | 
655 |             print(f"\nTop types by count:")
656 |             for memory_type, count in sorted(final_type_counts.items(), key=lambda x: -x[1])[:25]:
657 |                 pct = (count / final_total) * 100
658 |                 print(f"  {memory_type:30s} {count:4d} ({pct:5.1f}%)")
659 | 
660 |     except Exception as e:
661 |         print(f"\nError: {e}")
662 |         if not dry_run:
663 |             conn.rollback()
664 |             print("Changes rolled back.")
665 |         raise
666 | 
667 |     finally:
668 |         conn.close()
669 | 
670 |     if dry_run:
671 |         print("\n" + "="*80)
672 |         print("DRY RUN COMPLETE - Run without --dry-run to execute")
673 |         print("="*80)
674 | 
675 | 
676 | if __name__ == "__main__":
677 |     main()
678 | 
```

--------------------------------------------------------------------------------
/tests/unit/test_cloudflare_storage.py:
--------------------------------------------------------------------------------

```python
  1 | # Copyright 2024 Heinrich Krupp
  2 | #
  3 | # Licensed under the Apache License, Version 2.0 (the "License");
  4 | # you may not use this file except in compliance with the License.
  5 | # You may obtain a copy of the License at
  6 | #
  7 | #     http://www.apache.org/licenses/LICENSE-2.0
  8 | #
  9 | # Unless required by applicable law or agreed to in writing, software
 10 | # distributed under the License is distributed on an "AS IS" BASIS,
 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 12 | # See the License for the specific language governing permissions and
 13 | # limitations under the License.
 14 | 
 15 | """Tests for Cloudflare storage backend."""
 16 | 
 17 | import pytest
 18 | import asyncio
 19 | from unittest.mock import Mock, AsyncMock, patch
 20 | from typing import List
 21 | 
 22 | from src.mcp_memory_service.storage.cloudflare import CloudflareStorage
 23 | from src.mcp_memory_service.models.memory import Memory
 24 | from src.mcp_memory_service.utils.hashing import generate_content_hash
 25 | 
 26 | 
 27 | @pytest.fixture
 28 | def cloudflare_storage():
 29 |     """Create a CloudflareStorage instance for testing."""
 30 |     return CloudflareStorage(
 31 |         api_token="test-token",
 32 |         account_id="test-account",
 33 |         vectorize_index="test-index",
 34 |         d1_database_id="test-db",
 35 |         r2_bucket="test-bucket",
 36 |         embedding_model="@cf/baai/bge-base-en-v1.5"
 37 |     )
 38 | 
 39 | 
 40 | @pytest.fixture
 41 | def sample_memory():
 42 |     """Create a sample memory for testing."""
 43 |     content = "This is a test memory"
 44 |     return Memory(
 45 |         content=content,
 46 |         content_hash=generate_content_hash(content),
 47 |         tags=["test", "memory"],
 48 |         memory_type="standard"
 49 |     )
 50 | 
 51 | 
 52 | class TestCloudflareStorage:
 53 |     """Test suite for CloudflareStorage."""
 54 |     
 55 |     def test_initialization(self, cloudflare_storage):
 56 |         """Test CloudflareStorage initialization."""
 57 |         assert cloudflare_storage.api_token == "test-token"
 58 |         assert cloudflare_storage.account_id == "test-account"
 59 |         assert cloudflare_storage.vectorize_index == "test-index"
 60 |         assert cloudflare_storage.d1_database_id == "test-db"
 61 |         assert cloudflare_storage.r2_bucket == "test-bucket"
 62 |         assert not cloudflare_storage._initialized
 63 |         
 64 |     @pytest.mark.asyncio
 65 |     async def test_get_client(self, cloudflare_storage):
 66 |         """Test HTTP client creation."""
 67 |         client = await cloudflare_storage._get_client()
 68 |         assert client is not None
 69 |         assert cloudflare_storage.client == client
 70 |         
 71 |         # Verify headers are set correctly
 72 |         assert "Authorization" in client.headers
 73 |         assert client.headers["Authorization"] == "Bearer test-token"
 74 |         assert client.headers["Content-Type"] == "application/json"
 75 |         
 76 |     @pytest.mark.asyncio
 77 |     async def test_generate_embedding_cache(self, cloudflare_storage):
 78 |         """Test embedding generation and caching."""
 79 |         test_text = "Test content for embedding"
 80 |         
 81 |         # Mock the API call
 82 |         mock_response = Mock()
 83 |         mock_response.json.return_value = {
 84 |             "success": True,
 85 |             "result": {"data": [[0.1, 0.2, 0.3, 0.4, 0.5]]}
 86 |         }
 87 |         
 88 |         with patch.object(cloudflare_storage, '_retry_request', return_value=mock_response):
 89 |             # First call should make API request
 90 |             embedding1 = await cloudflare_storage._generate_embedding(test_text)
 91 |             assert embedding1 == [0.1, 0.2, 0.3, 0.4, 0.5]
 92 |             
 93 |             # Second call should use cache
 94 |             embedding2 = await cloudflare_storage._generate_embedding(test_text)
 95 |             assert embedding2 == [0.1, 0.2, 0.3, 0.4, 0.5]
 96 |             assert embedding1 == embedding2
 97 |             
 98 |             # Verify cache is populated
 99 |             assert len(cloudflare_storage._embedding_cache) == 1
100 |     
101 |     @pytest.mark.asyncio
102 |     async def test_embedding_api_failure(self, cloudflare_storage):
103 |         """Test handling of embedding API failures."""
104 |         test_text = "Test content"
105 |         
106 |         # Mock failed API response
107 |         mock_response = Mock()
108 |         mock_response.json.return_value = {
109 |             "success": False,
110 |             "errors": ["API error"]
111 |         }
112 |         
113 |         with patch.object(cloudflare_storage, '_retry_request', return_value=mock_response):
114 |             with pytest.raises(ValueError, match="Workers AI embedding failed"):
115 |                 await cloudflare_storage._generate_embedding(test_text)
116 |     
117 |     @pytest.mark.asyncio
118 |     async def test_retry_logic(self, cloudflare_storage):
119 |         """Test retry logic with rate limiting."""
120 |         import httpx
121 |         
122 |         # Mock rate limited response followed by success
123 |         responses = [
124 |             Mock(status_code=429, raise_for_status=Mock(side_effect=httpx.HTTPStatusError("Rate limited", request=Mock(), response=Mock()))),
125 |             Mock(status_code=200, raise_for_status=Mock(), json=Mock(return_value={"success": True}))
126 |         ]
127 |         
128 |         with patch('httpx.AsyncClient.request', side_effect=responses):
129 |             with patch('asyncio.sleep'):  # Speed up test
130 |                 response = await cloudflare_storage._retry_request("GET", "https://test.com")
131 |                 assert response.status_code == 200
132 |     
133 |     @pytest.mark.asyncio
134 |     async def test_initialization_schema_creation(self, cloudflare_storage):
135 |         """Test D1 schema initialization."""
136 |         mock_response = Mock()
137 |         mock_response.json.return_value = {"success": True}
138 |         
139 |         with patch.object(cloudflare_storage, '_retry_request', return_value=mock_response) as mock_request:
140 |             with patch.object(cloudflare_storage, '_verify_vectorize_index'):
141 |                 with patch.object(cloudflare_storage, '_verify_r2_bucket'):
142 |                     await cloudflare_storage.initialize()
143 |                     
144 |                     # Verify D1 schema creation was called
145 |                     assert any("CREATE TABLE" in str(call) for call in mock_request.call_args_list)
146 |                     assert cloudflare_storage._initialized
147 |     
148 |     @pytest.mark.asyncio
149 |     async def test_store_memory_small_content(self, cloudflare_storage, sample_memory):
150 |         """Test storing memory with small content (no R2)."""
151 |         # Mock successful responses
152 |         mock_embedding = [0.1, 0.2, 0.3]
153 |         mock_d1_response = Mock()
154 |         mock_d1_response.json.return_value = {
155 |             "success": True,
156 |             "result": [{"meta": {"last_row_id": 123}}]
157 |         }
158 | 
159 |         with patch.object(cloudflare_storage, '_generate_embedding', return_value=mock_embedding):
160 |             # Mock Vectorize storage (bypasses HTTP client) - must be AsyncMock for async method
161 |             with patch.object(cloudflare_storage, '_store_vectorize_vector', new_callable=AsyncMock):
162 |                 with patch.object(cloudflare_storage, '_retry_request') as mock_request:
163 |                     # Need 5 responses: 1 for memory insert + 2 tags * 2 calls each (insert + link)
164 |                     mock_request.side_effect = [
165 |                         mock_d1_response,  # Insert memory
166 |                         mock_d1_response,  # Insert tag "test"
167 |                         mock_d1_response,  # Link tag "test"
168 |                         mock_d1_response,  # Insert tag "memory"
169 |                         mock_d1_response   # Link tag "memory"
170 |                     ]
171 | 
172 |                     success, message = await cloudflare_storage.store(sample_memory)
173 | 
174 |                     assert success
175 |                     assert "successfully" in message.lower()
176 | 
177 |                     # Verify all D1 calls were made
178 |                     assert mock_request.call_count == 5
179 |     
180 |     @pytest.mark.asyncio
181 |     async def test_store_memory_large_content(self, cloudflare_storage):
182 |         """Test storing memory with large content (uses R2)."""
183 |         # Create memory with large content
184 |         large_content = "x" * (2 * 1024 * 1024)  # 2MB content
185 |         memory = Memory(
186 |             content=large_content,
187 |             content_hash=generate_content_hash(large_content),
188 |             tags=["large"],
189 |             memory_type="standard"
190 |         )
191 | 
192 |         mock_embedding = [0.1, 0.2, 0.3]
193 |         mock_response = Mock()
194 |         mock_response.json.return_value = {"success": True, "result": [{"meta": {"last_row_id": 123}}]}
195 |         mock_response.status_code = 200
196 | 
197 |         with patch.object(cloudflare_storage, '_generate_embedding', return_value=mock_embedding):
198 |             # Mock Vectorize storage (bypasses HTTP client) - must be AsyncMock for async method
199 |             with patch.object(cloudflare_storage, '_store_vectorize_vector', new_callable=AsyncMock):
200 |                 with patch.object(cloudflare_storage, '_retry_request', return_value=mock_response):
201 |                     success, message = await cloudflare_storage.store(memory)
202 | 
203 |                     assert success
204 |                 assert "successfully" in message.lower()
205 |     
206 |     @pytest.mark.asyncio
207 |     async def test_retrieve_memories(self, cloudflare_storage):
208 |         """Test retrieving memories by semantic search."""
209 |         mock_embedding = [0.1, 0.2, 0.3]
210 |         mock_vectorize_response = Mock()
211 |         mock_vectorize_response.json.return_value = {
212 |             "success": True,
213 |             "result": {
214 |                 "matches": [{
215 |                     "id": "mem_test123",
216 |                     "score": 0.95,
217 |                     "metadata": {"content_hash": "test123"}
218 |                 }]
219 |             }
220 |         }
221 |         
222 |         mock_d1_response = Mock()
223 |         mock_d1_response.json.return_value = {
224 |             "success": True,
225 |             "result": [{
226 |                 "results": [{
227 |                     "id": 1,
228 |                     "content_hash": "test123",
229 |                     "content": "Test memory content",
230 |                     "memory_type": "standard",
231 |                     "created_at": 1234567890,
232 |                     "metadata_json": "{}"
233 |                 }]
234 |             }]
235 |         }
236 |         
237 |         # Mock tag loading
238 |         mock_tags_response = Mock()
239 |         mock_tags_response.json.return_value = {
240 |             "success": True,
241 |             "result": [{"results": [{"name": "test"}, {"name": "memory"}]}]
242 |         }
243 |         
244 |         with patch.object(cloudflare_storage, '_generate_embedding', return_value=mock_embedding):
245 |             with patch.object(cloudflare_storage, '_retry_request') as mock_request:
246 |                 mock_request.side_effect = [mock_vectorize_response, mock_d1_response, mock_tags_response]
247 |                 
248 |                 results = await cloudflare_storage.retrieve("test query", 5)
249 |                 
250 |                 assert len(results) == 1
251 |                 assert results[0].similarity_score == 0.95
252 |                 assert results[0].memory.content == "Test memory content"
253 |                 assert results[0].memory.content_hash == "test123"
254 |     
255 |     @pytest.mark.asyncio
256 |     async def test_search_by_tag(self, cloudflare_storage):
257 |         """Test searching memories by tags."""
258 |         mock_d1_response = Mock()
259 |         mock_d1_response.json.return_value = {
260 |             "success": True,
261 |             "result": [{
262 |                 "results": [{
263 |                     "id": 1,
264 |                     "content_hash": "test123",
265 |                     "content": "Tagged memory",
266 |                     "memory_type": "standard"
267 |                 }]
268 |             }]
269 |         }
270 |         
271 |         mock_tags_response = Mock()
272 |         mock_tags_response.json.return_value = {
273 |             "success": True,
274 |             "result": [{"results": [{"name": "test"}]}]
275 |         }
276 | 
277 |         with patch.object(cloudflare_storage, '_retry_request') as mock_request:
278 |             mock_request.side_effect = [mock_d1_response, mock_tags_response]
279 | 
280 |             memories = await cloudflare_storage.search_by_tag(["test"])
281 | 
282 |             assert len(memories) == 1
283 |             assert memories[0].content == "Tagged memory"
284 |             assert memories[0].content_hash == "test123"
285 | 
286 |     @pytest.mark.asyncio
287 |     async def test_search_by_tags_or_operation(self, cloudflare_storage):
288 |         """Test search_by_tags uses OR semantics by default."""
289 |         mock_d1_response = Mock()
290 |         mock_d1_response.json.return_value = {
291 |             "success": True,
292 |             "result": [{
293 |                 "results": [{
294 |                     "id": 2,
295 |                     "content_hash": "abc123",
296 |                     "content": "Multi tag memory",
297 |                     "memory_type": "standard"
298 |                 }]
299 |             }]
300 |         }
301 | 
302 |         mock_tags_response = Mock()
303 |         mock_tags_response.json.return_value = {
304 |             "success": True,
305 |             "result": [{"results": [{"name": "alpha"}, {"name": "beta"}]}]
306 |         }
307 | 
308 |         with patch.object(cloudflare_storage, '_retry_request') as mock_request:
309 |             mock_request.side_effect = [mock_d1_response, mock_tags_response]
310 | 
311 |             memories = await cloudflare_storage.search_by_tags(["alpha", "beta"], operation="OR")
312 | 
313 |             assert len(memories) == 1
314 |             assert memories[0].content_hash == "abc123"
315 | 
316 |             query_payload = mock_request.call_args_list[0].kwargs["json"]
317 |             assert "HAVING" not in query_payload["sql"].upper()
318 |             assert query_payload["params"] == ["alpha", "beta"]
319 | 
320 |     @pytest.mark.asyncio
321 |     async def test_search_by_tags_and_operation(self, cloudflare_storage):
322 |         """Test search_by_tags enforces AND semantics when requested."""
323 |         mock_d1_response = Mock()
324 |         mock_d1_response.json.return_value = {
325 |             "success": True,
326 |             "result": [{
327 |                 "results": [{
328 |                     "id": 3,
329 |                     "content_hash": "def456",
330 |                     "content": "All tag match",
331 |                     "memory_type": "standard"
332 |                 }]
333 |             }]
334 |         }
335 | 
336 |         mock_tags_response = Mock()
337 |         mock_tags_response.json.return_value = {
338 |             "success": True,
339 |             "result": [{"results": [{"name": "alpha"}, {"name": "beta"}]}]
340 |         }
341 | 
342 |         with patch.object(cloudflare_storage, '_retry_request') as mock_request:
343 |             mock_request.side_effect = [mock_d1_response, mock_tags_response]
344 | 
345 |             memories = await cloudflare_storage.search_by_tags(["alpha", "beta"], operation="AND")
346 | 
347 |             assert len(memories) == 1
348 |             assert memories[0].content_hash == "def456"
349 | 
350 |             query_payload = mock_request.call_args_list[0].kwargs["json"]
351 |             assert "HAVING COUNT(DISTINCT T.NAME) = ?" in query_payload["sql"].upper()
352 |             assert query_payload["params"] == ["alpha", "beta", 2]
353 | 
354 |     @pytest.mark.asyncio
355 |     async def test_get_all_tags(self, cloudflare_storage):
356 |         """Test retrieving all distinct tags from Cloudflare storage."""
357 |         mock_response = Mock()
358 |         mock_response.json.return_value = {
359 |             "success": True,
360 |             "result": [{
361 |                 "results": [
362 |                     {"name": "alpha"},
363 |                     {"name": "beta"}
364 |                 ]
365 |             }]
366 |         }
367 | 
368 |         with patch.object(cloudflare_storage, '_retry_request', return_value=mock_response) as mock_request:
369 |             tags = await cloudflare_storage.get_all_tags()
370 |             assert tags == ["alpha", "beta"]
371 |             assert mock_request.called
372 | 
373 |     @pytest.mark.asyncio
374 |     async def test_get_all_tags_with_counts(self, cloudflare_storage):
375 |         """Test retrieving tag usage counts using memory_tags join."""
376 |         mock_response = Mock()
377 |         mock_response.json.return_value = {
378 |             "success": True,
379 |             "result": [{
380 |                 "results": [
381 |                     {"tag": "alpha", "count": 5},
382 |                     {"tag": "beta", "count": 2}
383 |                 ]
384 |             }]
385 |         }
386 | 
387 |         with patch.object(cloudflare_storage, '_retry_request', return_value=mock_response) as mock_request:
388 |             tags_with_counts = await cloudflare_storage.get_all_tags_with_counts()
389 |             assert tags_with_counts == [
390 |                 {"tag": "alpha", "count": 5},
391 |                 {"tag": "beta", "count": 2}
392 |             ]
393 |             sql_payload = mock_request.call_args.kwargs["json"]["sql"].lower()
394 |             assert "left join" in sql_payload and "group by" in sql_payload
395 | 
396 |     @pytest.mark.asyncio
397 |     async def test_get_all_memories_with_tag_filter(self, cloudflare_storage):
398 |         """Ensure list_memories SQL filters through memory_tags join."""
399 |         memory_row = {
400 |             "id": 1,
401 |             "content_hash": "abc123",
402 |             "content": "filtered content",
403 |             "memory_type": "standard",
404 |             "metadata_json": "{}",
405 |             "created_at": 123,
406 |             "created_at_iso": "2025-11-17T00:00:00Z",
407 |             "updated_at": None,
408 |             "updated_at_iso": None
409 |         }
410 | 
411 |         mock_memories_response = Mock()
412 |         mock_memories_response.json.return_value = {
413 |             "success": True,
414 |             "result": [{"results": [memory_row]}]
415 |         }
416 | 
417 |         mock_tags_response = Mock()
418 |         mock_tags_response.json.return_value = {
419 |             "success": True,
420 |             "result": [{"results": [{"name": "status:initialized"}]}]
421 |         }
422 | 
423 |         with patch.object(cloudflare_storage, '_retry_request') as mock_request:
424 |             mock_request.side_effect = [mock_memories_response, mock_tags_response]
425 | 
426 |             memories = await cloudflare_storage.get_all_memories(limit=10, tags=["status:initialized"])
427 | 
428 |             assert len(memories) == 1
429 |             sql_payload = mock_request.call_args_list[0].kwargs["json"]
430 |             sql_text = sql_payload["sql"].upper()
431 |             assert "JOIN MEMORY_TAGS" in sql_text and "HAVING COUNT(DISTINCT T.NAME) = ?" in sql_text
432 |             assert sql_payload["params"] == ["status:initialized", 1, 10]
433 | 
434 |     @pytest.mark.asyncio
435 |     async def test_count_all_memories_with_tag_filter(self, cloudflare_storage):
436 |         """Ensure count uses distinct memory IDs and tag filtering."""
437 |         mock_response = Mock()
438 |         mock_response.json.return_value = {
439 |             "success": True,
440 |             "result": [{"results": [{"count": 3}]}]
441 |         }
442 | 
443 |         with patch.object(cloudflare_storage, '_retry_request', return_value=mock_response) as mock_request:
444 |             count = await cloudflare_storage.count_all_memories(tags=["alpha", "beta"])
445 | 
446 |             assert count == 3
447 |             sql_payload = mock_request.call_args.kwargs["json"]
448 |             sql_text = sql_payload["sql"].upper()
449 |             assert "COUNT(*) AS COUNT FROM" in sql_text and "HAVING COUNT(DISTINCT T.NAME) = ?" in sql_text
450 |             assert sql_payload["params"] == ["alpha", "beta", 2]
451 | 
452 |     @pytest.mark.asyncio
453 |     async def test_delete_memory(self, cloudflare_storage):
454 |         """Test deleting a memory."""
455 |         mock_find_response = Mock()
456 |         mock_find_response.json.return_value = {
457 |             "success": True,
458 |             "result": [{
459 |                 "results": [{
460 |                     "id": 1,
461 |                     "vector_id": "mem_test123",
462 |                     "r2_key": None
463 |                 }]
464 |             }]
465 |         }
466 |         
467 |         mock_delete_response = Mock()
468 |         mock_delete_response.json.return_value = {"success": True}
469 |         
470 |         with patch.object(cloudflare_storage, '_retry_request') as mock_request:
471 |             mock_request.side_effect = [mock_find_response, mock_delete_response, mock_delete_response]
472 |             
473 |             success, message = await cloudflare_storage.delete("test123")
474 |             
475 |             assert success
476 |             assert "successfully" in message.lower()
477 |     
478 |     @pytest.mark.asyncio
479 |     async def test_get_stats(self, cloudflare_storage):
480 |         """Test getting storage statistics."""
481 |         mock_response = Mock()
482 |         mock_response.json.return_value = {
483 |             "success": True,
484 |             "result": [{
485 |                 "results": [{
486 |                     "total_memories": 10,
487 |                     "total_content_size": 1024,
488 |                     "total_vectors": 10,
489 |                     "r2_stored_count": 2
490 |                 }]
491 |             }]
492 |         }
493 |         
494 |         with patch.object(cloudflare_storage, '_retry_request', return_value=mock_response):
495 |             stats = await cloudflare_storage.get_stats()
496 |             
497 |             assert stats["total_memories"] == 10
498 |             assert stats["total_content_size_bytes"] == 1024
499 |             assert stats["storage_backend"] == "cloudflare"
500 |             assert stats["vectorize_index"] == "test-index"
501 |             assert stats["status"] == "operational"
502 |     
503 |     @pytest.mark.asyncio
504 |     async def test_cleanup_duplicates(self, cloudflare_storage):
505 |         """Test cleaning up duplicate memories."""
506 |         mock_find_response = Mock()
507 |         mock_find_response.json.return_value = {
508 |             "success": True,
509 |             "result": [{
510 |                 "results": [{
511 |                     "content_hash": "duplicate123",
512 |                     "count": 3,
513 |                     "keep_id": 1
514 |                 }]
515 |             }]
516 |         }
517 |         
518 |         mock_delete_response = Mock()
519 |         mock_delete_response.json.return_value = {
520 |             "success": True,
521 |             "result": [{"meta": {"changes": 2}}]
522 |         }
523 |         
524 |         with patch.object(cloudflare_storage, '_retry_request') as mock_request:
525 |             mock_request.side_effect = [mock_find_response, mock_delete_response]
526 |             
527 |             count, message = await cloudflare_storage.cleanup_duplicates()
528 |             
529 |             assert count == 2
530 |             assert "2 duplicates" in message
531 |     
532 |     @pytest.mark.asyncio
533 |     async def test_close(self, cloudflare_storage):
534 |         """Test closing the storage backend."""
535 |         # Create a mock client
536 |         mock_client = AsyncMock()
537 |         cloudflare_storage.client = mock_client
538 |         cloudflare_storage._embedding_cache = {"test": [1, 2, 3]}
539 | 
540 |         await cloudflare_storage.close()
541 | 
542 |         # Verify client was closed and cache cleared
543 |         mock_client.aclose.assert_called_once()
544 |         assert cloudflare_storage.client is None
545 |         assert len(cloudflare_storage._embedding_cache) == 0
546 | 
547 |     def test_sanitized_method(self, cloudflare_storage):
548 |         """Test tag sanitization method."""
549 |         # Test with None
550 |         assert cloudflare_storage.sanitized(None) == "[]"
551 | 
552 |         # Test with string
553 |         assert cloudflare_storage.sanitized("tag1,tag2,tag3") == '["tag1", "tag2", "tag3"]'
554 | 
555 |         # Test with list
556 |         assert cloudflare_storage.sanitized(["tag1", "tag2"]) == '["tag1", "tag2"]'
557 | 
558 |         # Test with empty string
559 |         assert cloudflare_storage.sanitized("") == "[]"
560 | 
561 |         # Test with empty list
562 |         assert cloudflare_storage.sanitized([]) == "[]"
563 | 
564 |         # Test with mixed types in list
565 |         assert cloudflare_storage.sanitized([1, "tag2", 3.14]) == '["1", "tag2", "3.14"]'
566 | 
```

--------------------------------------------------------------------------------
/claude-hooks/tests/integration-test.js:
--------------------------------------------------------------------------------

```javascript
  1 | #!/usr/bin/env node
  2 | 
  3 | /**
  4 |  * Integration Test for Claude Code Memory Awareness Hooks
  5 |  * Tests the complete Phase 1 implementation end-to-end
  6 |  */
  7 | 
  8 | const fs = require('fs');
  9 | const path = require('path');
 10 | 
 11 | // Import hooks and utilities
 12 | const sessionStartHook = require('../core/session-start');
 13 | const sessionEndHook = require('../core/session-end');
 14 | const { detectProjectContext } = require('../utilities/project-detector');
 15 | const { scoreMemoryRelevance } = require('../utilities/memory-scorer');
 16 | const { formatMemoriesForContext } = require('../utilities/context-formatter');
 17 | 
 18 | /**
 19 |  * Test Results Tracker
 20 |  */
 21 | class TestResults {
 22 |     constructor() {
 23 |         this.tests = [];
 24 |         this.passed = 0;
 25 |         this.failed = 0;
 26 |     }
 27 |     
 28 |     test(name, testFn) {
 29 |         console.log(`\n🧪 Testing: ${name}`);
 30 |         try {
 31 |             const result = testFn();
 32 |             if (result === true || (result && result.success !== false)) {
 33 |                 console.log(`✅ PASS: ${name}`);
 34 |                 this.passed++;
 35 |                 this.tests.push({ name, status: 'PASS', result });
 36 |             } else {
 37 |                 console.log(`❌ FAIL: ${name} - ${result.error || 'Test returned false'}`);
 38 |                 this.failed++;
 39 |                 this.tests.push({ name, status: 'FAIL', error: result.error || 'Test returned false' });
 40 |             }
 41 |         } catch (error) {
 42 |             console.log(`❌ FAIL: ${name} - ${error.message}`);
 43 |             this.failed++;
 44 |             this.tests.push({ name, status: 'FAIL', error: error.message });
 45 |         }
 46 |     }
 47 |     
 48 |     async asyncTest(name, testFn) {
 49 |         console.log(`\n🧪 Testing: ${name}`);
 50 |         try {
 51 |             const result = await testFn();
 52 |             if (result === true || (result && result.success !== false)) {
 53 |                 console.log(`✅ PASS: ${name}`);
 54 |                 this.passed++;
 55 |                 this.tests.push({ name, status: 'PASS', result });
 56 |             } else {
 57 |                 console.log(`❌ FAIL: ${name} - ${result.error || 'Test returned false'}`);
 58 |                 this.failed++;
 59 |                 this.tests.push({ name, status: 'FAIL', error: result.error || 'Test returned false' });
 60 |             }
 61 |         } catch (error) {
 62 |             console.log(`❌ FAIL: ${name} - ${error.message}`);
 63 |             this.failed++;
 64 |             this.tests.push({ name, status: 'FAIL', error: error.message });
 65 |         }
 66 |     }
 67 |     
 68 |     summary() {
 69 |         console.log('\n' + '='.repeat(60));
 70 |         console.log('🎯 TEST SUMMARY');
 71 |         console.log('='.repeat(60));
 72 |         console.log(`Total Tests: ${this.tests.length}`);
 73 |         console.log(`✅ Passed: ${this.passed}`);
 74 |         console.log(`❌ Failed: ${this.failed}`);
 75 |         console.log(`Success Rate: ${((this.passed / this.tests.length) * 100).toFixed(1)}%`);
 76 |         
 77 |         if (this.failed > 0) {
 78 |             console.log('\n🔍 FAILED TESTS:');
 79 |             this.tests.filter(t => t.status === 'FAIL').forEach(test => {
 80 |                 console.log(`  - ${test.name}: ${test.error}`);
 81 |             });
 82 |         }
 83 |         
 84 |         console.log('='.repeat(60));
 85 |         return this.failed === 0;
 86 |     }
 87 | }
 88 | 
 89 | /**
 90 |  * Mock data for testing
 91 |  */
 92 | const mockMemories = [
 93 |     {
 94 |         content: 'Decided to use SQLite-vec instead of ChromaDB for better performance in MCP Memory Service. SQLite-vec provides 10x faster startup and uses 75% less memory.',
 95 |         tags: ['mcp-memory-service', 'decision', 'sqlite-vec', 'performance'],
 96 |         memory_type: 'decision',
 97 |         created_at_iso: '2025-08-19T10:00:00Z'
 98 |     },
 99 |     {
100 |         content: 'Implemented comprehensive Claude Code hooks system for automatic memory awareness. Created session-start, session-end, and topic-change hooks with project detection.',
101 |         tags: ['claude-code', 'hooks', 'architecture', 'memory-awareness'],
102 |         memory_type: 'architecture',
103 |         created_at_iso: '2025-08-19T09:30:00Z'
104 |     },
105 |     {
106 |         content: 'Fixed critical bug in project detector - was not handling pyproject.toml files correctly. Added proper Python project detection.',
107 |         tags: ['bug-fix', 'project-detector', 'python'],
108 |         memory_type: 'bug-fix',
109 |         created_at_iso: '2025-08-18T15:30:00Z'
110 |     },
111 |     {
112 |         content: 'Learning session on memory relevance scoring algorithms. Implemented time decay, tag matching, and content analysis for intelligent memory selection.',
113 |         tags: ['learning', 'algorithms', 'memory-scoring'],
114 |         memory_type: 'insight',
115 |         created_at_iso: '2025-08-17T14:00:00Z'
116 |     },
117 |     {
118 |         content: 'Random note about completely unrelated project for testing filtering',
119 |         tags: ['other-project', 'unrelated', 'test'],
120 |         memory_type: 'note',
121 |         created_at_iso: '2025-08-01T08:00:00Z'
122 |     }
123 | ];
124 | 
125 | const mockProjectContext = {
126 |     name: 'mcp-memory-service',
127 |     directory: process.cwd(),
128 |     language: 'JavaScript',
129 |     frameworks: ['Node.js'],
130 |     tools: ['npm'],
131 |     git: {
132 |         isRepo: true,
133 |         branch: 'main',
134 |         repoName: 'mcp-memory-service',
135 |         lastCommit: 'abc1234 Implement memory awareness hooks'
136 |     },
137 |     confidence: 0.9
138 | };
139 | 
140 | const mockConversation = {
141 |     messages: [
142 |         {
143 |             role: 'user',
144 |             content: 'I need to implement a memory awareness system for Claude Code that automatically injects relevant project memories.'
145 |         },
146 |         {
147 |             role: 'assistant', 
148 |             content: 'I\'ll help you create a comprehensive memory awareness system. We decided to use Claude Code hooks for session management and implement automatic context injection. This will include project detection, memory scoring, and intelligent context formatting.'
149 |         },
150 |         {
151 |             role: 'user',
152 |             content: 'Great! I learned that we need project detection algorithms and memory scoring systems. Can you implement the project detector?'
153 |         },
154 |         {
155 |             role: 'assistant',
156 |             content: 'Exactly. I implemented the project detector in project-detector.js with support for multiple languages and frameworks. I also created memory scoring algorithms with time decay and relevance matching. Next we need to test the complete system and add session consolidation.'
157 |         }
158 |     ]
159 | };
160 | 
161 | /**
162 |  * Run comprehensive tests
163 |  */
164 | async function runTests() {
165 |     console.log('🚀 Claude Code Memory Awareness - Integration Tests');
166 |     console.log('Testing Phase 1 Implementation\n');
167 |     
168 |     const results = new TestResults();
169 |     
170 |     // Test 1: Project Detection
171 |     await results.asyncTest('Project Detection', async () => {
172 |         const context = await detectProjectContext(process.cwd());
173 |         
174 |         if (!context.name) {
175 |             return { success: false, error: 'No project name detected' };
176 |         }
177 |         
178 |         if (!context.language) {
179 |             return { success: false, error: 'No language detected' };
180 |         }
181 |         
182 |         console.log(`  Detected: ${context.name} (${context.language}), Confidence: ${(context.confidence * 100).toFixed(1)}%`);
183 |         return { success: true, context };
184 |     });
185 |     
186 |     // Test 2: Memory Relevance Scoring
187 |     results.test('Memory Relevance Scoring', () => {
188 |         const scored = scoreMemoryRelevance(mockMemories, mockProjectContext);
189 |         
190 |         if (!Array.isArray(scored)) {
191 |             return { success: false, error: 'Scoring did not return array' };
192 |         }
193 |         
194 |         if (scored.length !== mockMemories.length) {
195 |             return { success: false, error: 'Scoring lost memories' };
196 |         }
197 |         
198 |         // Check that memories have scores
199 |         for (const memory of scored) {
200 |             if (typeof memory.relevanceScore !== 'number') {
201 |                 return { success: false, error: 'Memory missing relevance score' };
202 |             }
203 |         }
204 |         
205 |         // Check that memories are sorted by relevance (highest first)
206 |         for (let i = 1; i < scored.length; i++) {
207 |             if (scored[i].relevanceScore > scored[i-1].relevanceScore) {
208 |                 return { success: false, error: 'Memories not sorted by relevance' };
209 |             }
210 |         }
211 |         
212 |         console.log(`  Scored ${scored.length} memories, top score: ${scored[0].relevanceScore.toFixed(3)}`);
213 |         return { success: true, scored };
214 |     });
215 |     
216 |     // Test 3: Context Formatting
217 |     results.test('Context Formatting', () => {
218 |         const scored = scoreMemoryRelevance(mockMemories, mockProjectContext);
219 |         const formatted = formatMemoriesForContext(scored, mockProjectContext);
220 |         
221 |         if (typeof formatted !== 'string') {
222 |             return { success: false, error: 'Formatting did not return string' };
223 |         }
224 |         
225 |         if (formatted.length < 100) {
226 |             return { success: false, error: 'Formatted context too short' };
227 |         }
228 |         
229 |         // Check for key formatting elements
230 |         if (!formatted.includes('Memory Context')) {
231 |             return { success: false, error: 'Missing memory context header' };
232 |         }
233 |         
234 |         if (!formatted.includes(mockProjectContext.name)) {
235 |             return { success: false, error: 'Missing project name in context' };
236 |         }
237 |         
238 |         console.log(`  Generated ${formatted.length} characters of formatted context`);
239 |         return { success: true, formatted };
240 |     });
241 |     
242 |     // Test 4: Session Start Hook Structure
243 |     results.test('Session Start Hook Structure', () => {
244 |         if (typeof sessionStartHook.handler !== 'function') {
245 |             return { success: false, error: 'Session start hook missing handler function' };
246 |         }
247 |         
248 |         if (!sessionStartHook.name || !sessionStartHook.version) {
249 |             return { success: false, error: 'Session start hook missing metadata' };
250 |         }
251 |         
252 |         if (sessionStartHook.trigger !== 'session-start') {
253 |             return { success: false, error: 'Session start hook wrong trigger' };
254 |         }
255 |         
256 |         console.log(`  Hook: ${sessionStartHook.name} v${sessionStartHook.version}`);
257 |         return { success: true };
258 |     });
259 |     
260 |     // Test 5: Session End Hook Structure
261 |     results.test('Session End Hook Structure', () => {
262 |         if (typeof sessionEndHook.handler !== 'function') {
263 |             return { success: false, error: 'Session end hook missing handler function' };
264 |         }
265 |         
266 |         if (!sessionEndHook.name || !sessionEndHook.version) {
267 |             return { success: false, error: 'Session end hook missing metadata' };
268 |         }
269 |         
270 |         if (sessionEndHook.trigger !== 'session-end') {
271 |             return { success: false, error: 'Session end hook wrong trigger' };
272 |         }
273 |         
274 |         console.log(`  Hook: ${sessionEndHook.name} v${sessionEndHook.version}`);
275 |         return { success: true };
276 |     });
277 |     
278 |     // Test 6: Configuration Loading
279 |     results.test('Configuration Loading', () => {
280 |         const configPath = path.join(__dirname, '../config.json');
281 | 
282 |         if (!fs.existsSync(configPath)) {
283 |             return { success: false, error: 'Configuration file not found' };
284 |         }
285 | 
286 |         try {
287 |             const config = JSON.parse(fs.readFileSync(configPath, 'utf8'));
288 | 
289 |             if (!config.memoryService) {
290 |                 return { success: false, error: 'Invalid configuration structure' };
291 |             }
292 | 
293 |             // Support both old (direct endpoint) and new (dual-protocol) structures
294 |             const endpoint = config.memoryService.endpoint || config.memoryService.http?.endpoint;
295 | 
296 |             if (!endpoint) {
297 |                 return { success: false, error: 'No endpoint configured (checked both old and new format)' };
298 |             }
299 | 
300 |             console.log(`  Endpoint: ${endpoint}`);
301 |             return { success: true, config };
302 | 
303 |         } catch (error) {
304 |             return { success: false, error: `Configuration parse error: ${error.message}` };
305 |         }
306 |     });
307 |     
308 |     // Test 7: File Structure
309 |     results.test('File Structure Validation', () => {
310 |         const requiredFiles = [
311 |             '../core/session-start.js',
312 |             '../core/session-end.js',
313 |             '../utilities/project-detector.js',
314 |             '../utilities/memory-scorer.js', 
315 |             '../utilities/context-formatter.js',
316 |             '../config.json',
317 |             '../config.template.json',
318 |             '../README.md'
319 |         ];
320 |         
321 |         for (const file of requiredFiles) {
322 |             const fullPath = path.join(__dirname, file);
323 |             if (!fs.existsSync(fullPath)) {
324 |                 return { success: false, error: `Missing required file: ${file}` };
325 |             }
326 |         }
327 |         
328 |         console.log(`  All ${requiredFiles.length} required files present`);
329 |         return { success: true };
330 |     });
331 |     
332 |     // Test 8: Mock Session Start (Limited Test)
333 |     await results.asyncTest('Mock Session Start Hook', async () => {
334 |         const mockContext = {
335 |             workingDirectory: process.cwd(),
336 |             sessionId: 'test-session',
337 |             injectSystemMessage: async (message) => {
338 |                 if (typeof message !== 'string' || message.length < 50) {
339 |                     throw new Error('Invalid message injection');
340 |                 }
341 |                 console.log(`  Injected ${message.length} characters of context`);
342 |                 return true;
343 |             }
344 |         };
345 |         
346 |         try {
347 |             // Note: This will attempt to contact the memory service
348 |             // In a real test environment, we'd mock this
349 |             await sessionStartHook.handler(mockContext);
350 |             return { success: true };
351 |         } catch (error) {
352 |             // Expected to fail without real memory service connection or when dependencies are missing
353 |             if (error.message.includes('Network error') ||
354 |                 error.message.includes('ENOTFOUND') ||
355 |                 error.message.includes('memoryClient is not defined') ||
356 |                 error.message.includes('No active connection')) {
357 |                 console.log('  ⚠️  Expected error (no memory service or connection available)');
358 |                 console.log('  This is expected if the service is not running during tests');
359 |                 return { success: true }; // This is expected in test environment
360 |             }
361 |             throw error;
362 |         }
363 |     });
364 |     
365 |     // Test 9: Package Dependencies
366 |     results.test('Package Dependencies Check', () => {
367 |         const requiredModules = ['fs', 'path', 'https', 'child_process'];
368 |         
369 |         for (const module of requiredModules) {
370 |             try {
371 |                 require(module);
372 |             } catch (error) {
373 |                 return { success: false, error: `Missing required module: ${module}` };
374 |             }
375 |         }
376 |         
377 |         console.log(`  All ${requiredModules.length} required Node.js modules available`);
378 |         return { success: true };
379 |     });
380 |     
381 |     // Test 10: Claude Code Settings Validation
382 |     results.test('Claude Code Settings Configuration', () => {
383 |         const settingsPath = path.join(process.env.HOME, '.claude', 'settings.json');
384 |         
385 |         if (!fs.existsSync(settingsPath)) {
386 |             return { success: false, error: 'Claude Code settings.json not found' };
387 |         }
388 |         
389 |         try {
390 |             const settings = JSON.parse(fs.readFileSync(settingsPath, 'utf8'));
391 |             
392 |             // Check for hooks configuration
393 |             if (!settings.hooks) {
394 |                 return { success: false, error: 'No hooks configuration found in settings' };
395 |             }
396 |             
397 |             // Check for SessionStart hook
398 |             if (!settings.hooks.SessionStart || !Array.isArray(settings.hooks.SessionStart)) {
399 |                 return { success: false, error: 'SessionStart hooks not configured' };
400 |             }
401 |             
402 |             // Check for SessionEnd hook
403 |             if (!settings.hooks.SessionEnd || !Array.isArray(settings.hooks.SessionEnd)) {
404 |                 return { success: false, error: 'SessionEnd hooks not configured' };
405 |             }
406 |             
407 |             // Check hook command paths
408 |             const startHook = JSON.stringify(settings.hooks.SessionStart);
409 |             const endHook = JSON.stringify(settings.hooks.SessionEnd);
410 |             
411 |             if (!startHook.includes('session-start.js')) {
412 |                 return { success: false, error: 'SessionStart hook command not configured correctly' };
413 |             }
414 |             
415 |             if (!endHook.includes('session-end.js')) {
416 |                 return { success: false, error: 'SessionEnd hook command not configured correctly' };
417 |             }
418 |             
419 |             console.log('  Claude Code settings configured correctly');
420 |             return { success: true, settings };
421 |             
422 |         } catch (parseError) {
423 |             return { success: false, error: `Settings parse error: ${parseError.message}` };
424 |         }
425 |     });
426 |     
427 |     // Test 11: Hook Files Location Validation
428 |     results.test('Hook Files in Correct Location', () => {
429 |         const hookDir = path.join(process.env.HOME, '.claude', 'hooks');
430 |         const requiredHooks = [
431 |             'core/session-start.js',
432 |             'core/session-end.js', 
433 |             'utilities/project-detector.js',
434 |             'utilities/memory-scorer.js',
435 |             'utilities/context-formatter.js'
436 |         ];
437 |         
438 |         for (const hookFile of requiredHooks) {
439 |             const fullPath = path.join(hookDir, hookFile);
440 |             if (!fs.existsSync(fullPath)) {
441 |                 return { success: false, error: `Hook file missing: ${hookFile}` };
442 |             }
443 |         }
444 |         
445 |         console.log(`  All hooks installed in ${hookDir}`);
446 |         return { success: true };
447 |     });
448 |     
449 |     // Test 12: Claude Code CLI Availability
450 |     results.test('Claude Code CLI Availability', () => {
451 |         const { execSync } = require('child_process');
452 |         
453 |         try {
454 |             execSync('which claude', { stdio: 'pipe' });
455 |             console.log('  Claude Code CLI available');
456 |             return { success: true };
457 |         } catch (error) {
458 |             return { success: false, error: 'Claude Code CLI not found in PATH' };
459 |         }
460 |     });
461 |     
462 |     // Test 13: Memory Service Protocol
463 |     results.test('Memory Service Protocol Compatibility', () => {
464 |         // Test that we're generating the correct MCP JSON-RPC calls
465 |         const testCall = {
466 |             jsonrpc: '2.0',
467 |             id: 1,
468 |             method: 'tools/call',
469 |             params: {
470 |                 name: 'retrieve_memory',
471 |                 arguments: {
472 |                     query: 'test query',
473 |                     tags: ['test'],
474 |                     limit: 5
475 |                 }
476 |             }
477 |         };
478 |         
479 |         const serialized = JSON.stringify(testCall);
480 |         const parsed = JSON.parse(serialized);
481 |         
482 |         if (!parsed.jsonrpc || parsed.jsonrpc !== '2.0') {
483 |             return { success: false, error: 'Invalid JSON-RPC format' };
484 |         }
485 |         
486 |         if (!parsed.params || !parsed.params.name || !parsed.params.arguments) {
487 |             return { success: false, error: 'Invalid MCP call structure' };
488 |         }
489 |         
490 |         console.log(`  MCP protocol structure valid`);
491 |         return { success: true };
492 |     });
493 |     
494 |     // Test 14: Memory Service Connectivity
495 |     await results.asyncTest('Memory Service Connectivity', async () => {
496 |         const configPath = path.join(__dirname, '../config.json');
497 | 
498 |         if (!fs.existsSync(configPath)) {
499 |             return { success: false, error: 'Configuration file not found for connectivity test' };
500 |         }
501 | 
502 |         try {
503 |             const config = JSON.parse(fs.readFileSync(configPath, 'utf8'));
504 | 
505 |             // Support both old (direct) and new (dual-protocol) structures
506 |             const endpoint = config.memoryService?.endpoint || config.memoryService?.http?.endpoint;
507 |             const apiKey = config.memoryService?.apiKey || config.memoryService?.http?.apiKey;
508 | 
509 |             if (!endpoint) {
510 |                 return { success: false, error: 'No memory service endpoint configured (checked both old and new format)' };
511 |             }
512 |             
513 |             // Test basic connectivity (simplified test)
514 |             const https = require('https');
515 |             const url = new URL('/api/health', endpoint);
516 |             
517 |             return new Promise((resolve) => {
518 |                 const options = {
519 |                     hostname: url.hostname,
520 |                     port: url.port || 8443,
521 |                     path: url.pathname,
522 |                     method: 'GET',
523 |                     timeout: 5000,
524 |                     rejectUnauthorized: false
525 |                 };
526 |                 
527 |                 const req = https.request(options, (res) => {
528 |                     console.log(`  Memory service responded with status: ${res.statusCode}`);
529 |                     if (res.statusCode === 200 || res.statusCode === 401) {
530 |                         // 401 is expected without API key, but service is running
531 |                         resolve({ success: true });
532 |                     } else {
533 |                         resolve({ success: false, error: `Service returned status: ${res.statusCode}` });
534 |                     }
535 |                 });
536 |                 
537 |                 req.on('error', (error) => {
538 |                     // Mark as success with warning if service isn't running (expected in test environments)
539 |                     console.log(`  ⚠️  Memory service not available: ${error.message}`);
540 |                     console.log('  This is expected if the service is not running during tests');
541 |                     resolve({ success: true });
542 |                 });
543 | 
544 |                 req.on('timeout', () => {
545 |                     console.log('  ⚠️  Connection timeout - service may not be running');
546 |                     console.log('  This is expected if the service is not running during tests');
547 |                     resolve({ success: true });
548 |                 });
549 |                 
550 |                 req.end();
551 |             });
552 |             
553 |         } catch (parseError) {
554 |             return { success: false, error: `Configuration parse error: ${parseError.message}` };
555 |         }
556 |     });
557 |     
558 |     // Display summary
559 |     const allTestsPassed = results.summary();
560 |     
561 |     if (allTestsPassed) {
562 |         console.log('\n🎉 ALL TESTS PASSED! Phase 1 implementation is ready.');
563 |         console.log('\n📋 Next Steps:');
564 |         console.log('  1. Install hooks in Claude Code hooks directory');
565 |         console.log('  2. Configure memory service endpoint in config.json');
566 |         console.log('  3. Test with real Claude Code session');
567 |         console.log('  4. Begin Phase 2 implementation (dynamic memory loading)');
568 |     } else {
569 |         console.log('\n⚠️  Some tests failed. Please fix issues before proceeding.');
570 |     }
571 |     
572 |     return allTestsPassed;
573 | }
574 | 
575 | // Run tests if called directly
576 | if (require.main === module) {
577 |     runTests()
578 |         .then(success => {
579 |             process.exit(success ? 0 : 1);
580 |         })
581 |         .catch(error => {
582 |             console.error('\n💥 Test suite crashed:', error.message);
583 |             console.error(error.stack);
584 |             process.exit(1);
585 |         });
586 | }
587 | 
588 | module.exports = { runTests };
```
Page 31/47FirstPrevNextLast