#
tokens: 48885/50000 4/772 files (page 45/62)
lines: on (toggle) GitHub
raw markdown copy reset
This is page 45 of 62. Use http://codebase.md/doobidoo/mcp-memory-service?lines=true&page={x} to view the full context.

# Directory Structure

```
├── .claude
│   ├── agents
│   │   ├── amp-bridge.md
│   │   ├── amp-pr-automator.md
│   │   ├── code-quality-guard.md
│   │   ├── gemini-pr-automator.md
│   │   └── github-release-manager.md
│   ├── commands
│   │   ├── README.md
│   │   ├── refactor-function
│   │   ├── refactor-function-prod
│   │   └── refactor-function.md
│   ├── consolidation-fix-handoff.md
│   ├── consolidation-hang-fix-summary.md
│   ├── directives
│   │   ├── agents.md
│   │   ├── code-quality-workflow.md
│   │   ├── consolidation-details.md
│   │   ├── development-setup.md
│   │   ├── hooks-configuration.md
│   │   ├── memory-first.md
│   │   ├── memory-tagging.md
│   │   ├── pr-workflow.md
│   │   ├── quality-system-details.md
│   │   ├── README.md
│   │   ├── refactoring-checklist.md
│   │   ├── storage-backends.md
│   │   └── version-management.md
│   ├── prompts
│   │   └── hybrid-cleanup-integration.md
│   ├── settings.local.json.backup
│   └── settings.local.json.local
├── .commit-message
├── .coveragerc
├── .dockerignore
├── .env.example
├── .env.sqlite.backup
├── .envnn#
├── .gitattributes
├── .github
│   ├── FUNDING.yml
│   ├── ISSUE_TEMPLATE
│   │   ├── bug_report.yml
│   │   ├── config.yml
│   │   ├── feature_request.yml
│   │   └── performance_issue.yml
│   ├── pull_request_template.md
│   └── workflows
│       ├── bridge-tests.yml
│       ├── CACHE_FIX.md
│       ├── claude-branch-automation.yml
│       ├── claude-code-review.yml
│       ├── claude.yml
│       ├── cleanup-images.yml.disabled
│       ├── dev-setup-validation.yml
│       ├── docker-publish.yml
│       ├── dockerfile-lint.yml
│       ├── LATEST_FIXES.md
│       ├── main-optimized.yml.disabled
│       ├── main.yml
│       ├── publish-and-test.yml
│       ├── publish-dual.yml
│       ├── README_OPTIMIZATION.md
│       ├── release-tag.yml.disabled
│       ├── release.yml
│       ├── roadmap-review-reminder.yml
│       ├── SECRET_CONDITIONAL_FIX.md
│       └── WORKFLOW_FIXES.md
├── .gitignore
├── .mcp.json.backup
├── .mcp.json.template
├── .metrics
│   ├── baseline_cc_install_hooks.txt
│   ├── baseline_mi_install_hooks.txt
│   ├── baseline_nesting_install_hooks.txt
│   ├── BASELINE_REPORT.md
│   ├── COMPLEXITY_COMPARISON.txt
│   ├── QUICK_REFERENCE.txt
│   ├── README.md
│   ├── REFACTORED_BASELINE.md
│   ├── REFACTORING_COMPLETION_REPORT.md
│   └── TRACKING_TABLE.md
├── .pyscn
│   ├── .gitignore
│   └── reports
│       └── analyze_20251123_214224.html
├── AGENTS.md
├── ai-optimized-tool-descriptions.py
├── archive
│   ├── deployment
│   │   ├── deploy_fastmcp_fixed.sh
│   │   ├── deploy_http_with_mcp.sh
│   │   └── deploy_mcp_v4.sh
│   ├── deployment-configs
│   │   ├── empty_config.yml
│   │   └── smithery.yaml
│   ├── development
│   │   └── test_fastmcp.py
│   ├── docs-removed-2025-08-23
│   │   ├── authentication.md
│   │   ├── claude_integration.md
│   │   ├── claude-code-compatibility.md
│   │   ├── claude-code-integration.md
│   │   ├── claude-code-quickstart.md
│   │   ├── claude-desktop-setup.md
│   │   ├── complete-setup-guide.md
│   │   ├── database-synchronization.md
│   │   ├── development
│   │   │   ├── autonomous-memory-consolidation.md
│   │   │   ├── CLEANUP_PLAN.md
│   │   │   ├── CLEANUP_README.md
│   │   │   ├── CLEANUP_SUMMARY.md
│   │   │   ├── dream-inspired-memory-consolidation.md
│   │   │   ├── hybrid-slm-memory-consolidation.md
│   │   │   ├── mcp-milestone.md
│   │   │   ├── multi-client-architecture.md
│   │   │   ├── test-results.md
│   │   │   └── TIMESTAMP_FIX_SUMMARY.md
│   │   ├── distributed-sync.md
│   │   ├── invocation_guide.md
│   │   ├── macos-intel.md
│   │   ├── master-guide.md
│   │   ├── mcp-client-configuration.md
│   │   ├── multi-client-server.md
│   │   ├── service-installation.md
│   │   ├── sessions
│   │   │   └── MCP_ENHANCEMENT_SESSION_MEMORY_v4.1.0.md
│   │   ├── UBUNTU_SETUP.md
│   │   ├── ubuntu.md
│   │   ├── windows-setup.md
│   │   └── windows.md
│   ├── docs-root-cleanup-2025-08-23
│   │   ├── AWESOME_LIST_SUBMISSION.md
│   │   ├── CLOUDFLARE_IMPLEMENTATION.md
│   │   ├── DOCUMENTATION_ANALYSIS.md
│   │   ├── DOCUMENTATION_CLEANUP_PLAN.md
│   │   ├── DOCUMENTATION_CONSOLIDATION_COMPLETE.md
│   │   ├── LITESTREAM_SETUP_GUIDE.md
│   │   ├── lm_studio_system_prompt.md
│   │   ├── PYTORCH_DOWNLOAD_FIX.md
│   │   └── README-ORIGINAL-BACKUP.md
│   ├── investigations
│   │   └── MACOS_HOOKS_INVESTIGATION.md
│   ├── litestream-configs-v6.3.0
│   │   ├── install_service.sh
│   │   ├── litestream_master_config_fixed.yml
│   │   ├── litestream_master_config.yml
│   │   ├── litestream_replica_config_fixed.yml
│   │   ├── litestream_replica_config.yml
│   │   ├── litestream_replica_simple.yml
│   │   ├── litestream-http.service
│   │   ├── litestream.service
│   │   └── requirements-cloudflare.txt
│   ├── release-notes
│   │   └── release-notes-v7.1.4.md
│   └── setup-development
│       ├── README.md
│       ├── setup_consolidation_mdns.sh
│       ├── STARTUP_SETUP_GUIDE.md
│       └── test_service.sh
├── CHANGELOG-HISTORIC.md
├── CHANGELOG.md
├── claude_commands
│   ├── memory-context.md
│   ├── memory-health.md
│   ├── memory-ingest-dir.md
│   ├── memory-ingest.md
│   ├── memory-recall.md
│   ├── memory-search.md
│   ├── memory-store.md
│   ├── README.md
│   └── session-start.md
├── claude-hooks
│   ├── config.json
│   ├── config.template.json
│   ├── CONFIGURATION.md
│   ├── core
│   │   ├── auto-capture-hook.js
│   │   ├── auto-capture-hook.ps1
│   │   ├── memory-retrieval.js
│   │   ├── mid-conversation.js
│   │   ├── permission-request.js
│   │   ├── session-end.js
│   │   ├── session-start.js
│   │   └── topic-change.js
│   ├── debug-pattern-test.js
│   ├── install_claude_hooks_windows.ps1
│   ├── install_hooks.py
│   ├── memory-mode-controller.js
│   ├── MIGRATION.md
│   ├── README-AUTO-CAPTURE.md
│   ├── README-NATURAL-TRIGGERS.md
│   ├── README-PERMISSION-REQUEST.md
│   ├── README-phase2.md
│   ├── README.md
│   ├── simple-test.js
│   ├── statusline.sh
│   ├── test-adaptive-weights.js
│   ├── test-dual-protocol-hook.js
│   ├── test-mcp-hook.js
│   ├── test-natural-triggers.js
│   ├── test-recency-scoring.js
│   ├── tests
│   │   ├── integration-test.js
│   │   ├── phase2-integration-test.js
│   │   ├── test-code-execution.js
│   │   ├── test-cross-session.json
│   │   ├── test-permission-request.js
│   │   ├── test-session-tracking.json
│   │   └── test-threading.json
│   ├── utilities
│   │   ├── adaptive-pattern-detector.js
│   │   ├── auto-capture-patterns.js
│   │   ├── context-formatter.js
│   │   ├── context-shift-detector.js
│   │   ├── conversation-analyzer.js
│   │   ├── dynamic-context-updater.js
│   │   ├── git-analyzer.js
│   │   ├── mcp-client.js
│   │   ├── memory-client.js
│   │   ├── memory-scorer.js
│   │   ├── performance-manager.js
│   │   ├── project-detector.js
│   │   ├── session-cache.json
│   │   ├── session-tracker.js
│   │   ├── tiered-conversation-monitor.js
│   │   ├── user-override-detector.js
│   │   └── version-checker.js
│   └── WINDOWS-SESSIONSTART-BUG.md
├── CLAUDE.md
├── CODE_OF_CONDUCT.md
├── COMMIT_MESSAGE.md
├── CONTRIBUTING.md
├── Development-Sprint-November-2025.md
├── docs
│   ├── amp-cli-bridge.md
│   ├── api
│   │   ├── code-execution-interface.md
│   │   ├── memory-metadata-api.md
│   │   ├── PHASE1_IMPLEMENTATION_SUMMARY.md
│   │   ├── PHASE2_IMPLEMENTATION_SUMMARY.md
│   │   ├── PHASE2_REPORT.md
│   │   └── tag-standardization.md
│   ├── architecture
│   │   ├── graph-database-design.md
│   │   ├── search-enhancement-spec.md
│   │   └── search-examples.md
│   ├── architecture.md
│   ├── archive
│   │   └── obsolete-workflows
│   │       ├── load_memory_context.md
│   │       └── README.md
│   ├── assets
│   │   └── images
│   │       ├── dashboard-v3.3.0-preview.png
│   │       ├── memory-awareness-hooks-example.png
│   │       ├── project-infographic.svg
│   │       └── README.md
│   ├── CLAUDE_CODE_QUICK_REFERENCE.md
│   ├── cloudflare-setup.md
│   ├── demo-recording-script.md
│   ├── deployment
│   │   ├── docker.md
│   │   ├── dual-service.md
│   │   ├── production-guide.md
│   │   └── systemd-service.md
│   ├── development
│   │   ├── ai-agent-instructions.md
│   │   ├── code-quality
│   │   │   ├── phase-2a-completion.md
│   │   │   ├── phase-2a-handle-get-prompt.md
│   │   │   ├── phase-2a-index.md
│   │   │   ├── phase-2a-install-package.md
│   │   │   └── phase-2b-session-summary.md
│   │   ├── code-quality-workflow.md
│   │   ├── dashboard-workflow.md
│   │   ├── issue-management.md
│   │   ├── pr-280-post-mortem.md
│   │   ├── pr-review-guide.md
│   │   ├── refactoring-notes.md
│   │   ├── release-checklist.md
│   │   └── todo-tracker.md
│   ├── docker-optimized-build.md
│   ├── document-ingestion.md
│   ├── DOCUMENTATION_AUDIT.md
│   ├── enhancement-roadmap-issue-14.md
│   ├── examples
│   │   ├── analysis-scripts.js
│   │   ├── maintenance-session-example.md
│   │   ├── memory-distribution-chart.jsx
│   │   ├── quality-system-configs.md
│   │   └── tag-schema.json
│   ├── features
│   │   └── association-quality-boost.md
│   ├── first-time-setup.md
│   ├── glama-deployment.md
│   ├── guides
│   │   ├── advanced-command-examples.md
│   │   ├── chromadb-migration.md
│   │   ├── commands-vs-mcp-server.md
│   │   ├── mcp-enhancements.md
│   │   ├── mdns-service-discovery.md
│   │   ├── memory-consolidation-guide.md
│   │   ├── memory-quality-guide.md
│   │   ├── migration.md
│   │   ├── scripts.md
│   │   └── STORAGE_BACKENDS.md
│   ├── HOOK_IMPROVEMENTS.md
│   ├── hooks
│   │   └── phase2-code-execution-migration.md
│   ├── http-server-management.md
│   ├── ide-compatability.md
│   ├── IMAGE_RETENTION_POLICY.md
│   ├── images
│   │   ├── dashboard-placeholder.md
│   │   └── update-restart-demo.png
│   ├── implementation
│   │   ├── health_checks.md
│   │   └── performance.md
│   ├── IMPLEMENTATION_PLAN_HTTP_SSE.md
│   ├── integration
│   │   ├── homebrew.md
│   │   └── multi-client.md
│   ├── integrations
│   │   ├── gemini.md
│   │   ├── groq-bridge.md
│   │   ├── groq-integration-summary.md
│   │   └── groq-model-comparison.md
│   ├── integrations.md
│   ├── legacy
│   │   └── dual-protocol-hooks.md
│   ├── LIGHTWEIGHT_ONNX_SETUP.md
│   ├── LM_STUDIO_COMPATIBILITY.md
│   ├── maintenance
│   │   └── memory-maintenance.md
│   ├── mastery
│   │   ├── api-reference.md
│   │   ├── architecture-overview.md
│   │   ├── configuration-guide.md
│   │   ├── local-setup-and-run.md
│   │   ├── testing-guide.md
│   │   └── troubleshooting.md
│   ├── migration
│   │   ├── code-execution-api-quick-start.md
│   │   └── graph-migration-guide.md
│   ├── natural-memory-triggers
│   │   ├── cli-reference.md
│   │   ├── installation-guide.md
│   │   └── performance-optimization.md
│   ├── oauth-setup.md
│   ├── pr-graphql-integration.md
│   ├── quality-system-ui-implementation.md
│   ├── quick-setup-cloudflare-dual-environment.md
│   ├── README.md
│   ├── refactoring
│   │   └── phase-3-3-analysis.md
│   ├── releases
│   │   └── v8.72.0-testing.md
│   ├── remote-configuration-wiki-section.md
│   ├── research
│   │   ├── code-execution-interface-implementation.md
│   │   └── code-execution-interface-summary.md
│   ├── ROADMAP.md
│   ├── sqlite-vec-backend.md
│   ├── statistics
│   │   ├── charts
│   │   │   ├── activity_patterns.png
│   │   │   ├── contributors.png
│   │   │   ├── growth_trajectory.png
│   │   │   ├── monthly_activity.png
│   │   │   └── october_sprint.png
│   │   ├── data
│   │   │   ├── activity_by_day.csv
│   │   │   ├── activity_by_hour.csv
│   │   │   ├── contributors.csv
│   │   │   └── monthly_activity.csv
│   │   ├── generate_charts.py
│   │   └── REPOSITORY_STATISTICS.md
│   ├── technical
│   │   ├── development.md
│   │   ├── memory-migration.md
│   │   ├── migration-log.md
│   │   ├── sqlite-vec-embedding-fixes.md
│   │   └── tag-storage.md
│   ├── testing
│   │   └── regression-tests.md
│   ├── testing-cloudflare-backend.md
│   ├── troubleshooting
│   │   ├── cloudflare-api-token-setup.md
│   │   ├── cloudflare-authentication.md
│   │   ├── database-transfer-migration.md
│   │   ├── general.md
│   │   ├── hooks-quick-reference.md
│   │   ├── memory-management.md
│   │   ├── pr162-schema-caching-issue.md
│   │   ├── session-end-hooks.md
│   │   └── sync-issues.md
│   ├── tutorials
│   │   ├── advanced-techniques.md
│   │   ├── data-analysis.md
│   │   └── demo-session-walkthrough.md
│   ├── wiki-documentation-plan.md
│   └── wiki-Graph-Database-Architecture.md
├── examples
│   ├── claude_desktop_config_template.json
│   ├── claude_desktop_config_windows.json
│   ├── claude-desktop-http-config.json
│   ├── config
│   │   └── claude_desktop_config.json
│   ├── http-mcp-bridge.js
│   ├── memory_export_template.json
│   ├── README.md
│   ├── setup
│   │   └── setup_multi_client_complete.py
│   └── start_https_example.sh
├── IMPLEMENTATION_SUMMARY.md
├── install_service.py
├── install.py
├── LICENSE
├── NOTICE
├── PR_DESCRIPTION.md
├── pyproject-lite.toml
├── pyproject.toml
├── pytest.ini
├── README.md
├── release-notes-v8.61.0.md
├── run_server.py
├── scripts
│   ├── .claude
│   │   └── settings.local.json
│   ├── archive
│   │   └── check_missing_timestamps.py
│   ├── backup
│   │   ├── backup_memories.py
│   │   ├── backup_sqlite_vec.sh
│   │   ├── export_distributable_memories.sh
│   │   └── restore_memories.py
│   ├── benchmarks
│   │   ├── benchmark_code_execution_api.py
│   │   ├── benchmark_hybrid_sync.py
│   │   └── benchmark_server_caching.py
│   ├── ci
│   │   ├── check_dockerfile_args.sh
│   │   └── validate_imports.sh
│   ├── database
│   │   ├── analyze_sqlite_vec_db.py
│   │   ├── check_sqlite_vec_status.py
│   │   ├── db_health_check.py
│   │   └── simple_timestamp_check.py
│   ├── development
│   │   ├── debug_server_initialization.py
│   │   ├── find_orphaned_files.py
│   │   ├── fix_mdns.sh
│   │   ├── fix_sitecustomize.py
│   │   ├── remote_ingest.sh
│   │   ├── setup-git-merge-drivers.sh
│   │   ├── uv-lock-merge.sh
│   │   └── verify_hybrid_sync.py
│   ├── hooks
│   │   └── pre-commit
│   ├── installation
│   │   ├── install_linux_service.py
│   │   ├── install_macos_service.py
│   │   ├── install_uv.py
│   │   ├── install_windows_service.py
│   │   ├── install.py
│   │   ├── setup_backup_cron.sh
│   │   ├── setup_claude_mcp.sh
│   │   └── setup_cloudflare_resources.py
│   ├── linux
│   │   ├── service_status.sh
│   │   ├── start_service.sh
│   │   ├── stop_service.sh
│   │   ├── uninstall_service.sh
│   │   └── view_logs.sh
│   ├── maintenance
│   │   ├── add_project_tags.py
│   │   ├── apply_quality_boost_retroactively.py
│   │   ├── assign_memory_types.py
│   │   ├── auto_retag_memory_merge.py
│   │   ├── auto_retag_memory.py
│   │   ├── backfill_graph_table.py
│   │   ├── check_memory_types.py
│   │   ├── cleanup_association_memories_hybrid.py
│   │   ├── cleanup_association_memories.py
│   │   ├── cleanup_corrupted_encoding.py
│   │   ├── cleanup_low_quality.py
│   │   ├── cleanup_memories.py
│   │   ├── cleanup_organize.py
│   │   ├── consolidate_memory_types.py
│   │   ├── consolidation_mappings.json
│   │   ├── delete_orphaned_vectors_fixed.py
│   │   ├── delete_test_memories.py
│   │   ├── fast_cleanup_duplicates_with_tracking.sh
│   │   ├── find_all_duplicates.py
│   │   ├── find_cloudflare_duplicates.py
│   │   ├── find_duplicates.py
│   │   ├── memory-types.md
│   │   ├── README.md
│   │   ├── recover_timestamps_from_cloudflare.py
│   │   ├── regenerate_embeddings.py
│   │   ├── repair_malformed_tags.py
│   │   ├── repair_memories.py
│   │   ├── repair_sqlite_vec_embeddings.py
│   │   ├── repair_zero_embeddings.py
│   │   ├── restore_from_json_export.py
│   │   ├── retag_valuable_memories.py
│   │   ├── scan_todos.sh
│   │   ├── soft_delete_test_memories.py
│   │   └── sync_status.py
│   ├── migration
│   │   ├── cleanup_mcp_timestamps.py
│   │   ├── legacy
│   │   │   └── migrate_chroma_to_sqlite.py
│   │   ├── mcp-migration.py
│   │   ├── migrate_sqlite_vec_embeddings.py
│   │   ├── migrate_storage.py
│   │   ├── migrate_tags.py
│   │   ├── migrate_timestamps.py
│   │   ├── migrate_to_cloudflare.py
│   │   ├── migrate_to_sqlite_vec.py
│   │   ├── migrate_v5_enhanced.py
│   │   ├── TIMESTAMP_CLEANUP_README.md
│   │   └── verify_mcp_timestamps.py
│   ├── pr
│   │   ├── amp_collect_results.sh
│   │   ├── amp_detect_breaking_changes.sh
│   │   ├── amp_generate_tests.sh
│   │   ├── amp_pr_review.sh
│   │   ├── amp_quality_gate.sh
│   │   ├── amp_suggest_fixes.sh
│   │   ├── auto_review.sh
│   │   ├── detect_breaking_changes.sh
│   │   ├── generate_tests.sh
│   │   ├── lib
│   │   │   └── graphql_helpers.sh
│   │   ├── pre_pr_check.sh
│   │   ├── quality_gate.sh
│   │   ├── resolve_threads.sh
│   │   ├── run_pyscn_analysis.sh
│   │   ├── run_quality_checks_on_files.sh
│   │   ├── run_quality_checks.sh
│   │   ├── thread_status.sh
│   │   └── watch_reviews.sh
│   ├── quality
│   │   ├── bulk_evaluate_onnx.py
│   │   ├── check_test_scores.py
│   │   ├── debug_deberta_scoring.py
│   │   ├── export_deberta_onnx.py
│   │   ├── fix_dead_code_install.sh
│   │   ├── migrate_to_deberta.py
│   │   ├── phase1_dead_code_analysis.md
│   │   ├── phase2_complexity_analysis.md
│   │   ├── README_PHASE1.md
│   │   ├── README_PHASE2.md
│   │   ├── rescore_deberta.py
│   │   ├── rescore_fallback.py
│   │   ├── reset_onnx_scores.py
│   │   ├── track_pyscn_metrics.sh
│   │   └── weekly_quality_review.sh
│   ├── README.md
│   ├── run
│   │   ├── memory_wrapper_cleanup.ps1
│   │   ├── memory_wrapper_cleanup.py
│   │   ├── memory_wrapper_cleanup.sh
│   │   ├── README_CLEANUP_WRAPPER.md
│   │   ├── run_mcp_memory.sh
│   │   ├── run-with-uv.sh
│   │   └── start_sqlite_vec.sh
│   ├── run_memory_server.py
│   ├── server
│   │   ├── check_http_server.py
│   │   ├── check_server_health.py
│   │   ├── memory_offline.py
│   │   ├── preload_models.py
│   │   ├── run_http_server.py
│   │   ├── run_memory_server.py
│   │   ├── start_http_server.bat
│   │   └── start_http_server.sh
│   ├── service
│   │   ├── deploy_dual_services.sh
│   │   ├── http_server_manager.sh
│   │   ├── install_http_service.sh
│   │   ├── mcp-memory-http.service
│   │   ├── mcp-memory.service
│   │   ├── memory_service_manager.sh
│   │   ├── service_control.sh
│   │   ├── service_utils.py
│   │   ├── update_service.sh
│   │   └── windows
│   │       ├── add_watchdog_trigger.ps1
│   │       ├── install_scheduled_task.ps1
│   │       ├── manage_service.ps1
│   │       ├── run_http_server_background.ps1
│   │       ├── uninstall_scheduled_task.ps1
│   │       └── update_and_restart.ps1
│   ├── setup-lightweight.sh
│   ├── sync
│   │   ├── check_drift.py
│   │   ├── claude_sync_commands.py
│   │   ├── export_memories.py
│   │   ├── import_memories.py
│   │   ├── litestream
│   │   │   ├── apply_local_changes.sh
│   │   │   ├── enhanced_memory_store.sh
│   │   │   ├── init_staging_db.sh
│   │   │   ├── io.litestream.replication.plist
│   │   │   ├── manual_sync.sh
│   │   │   ├── memory_sync.sh
│   │   │   ├── pull_remote_changes.sh
│   │   │   ├── push_to_remote.sh
│   │   │   ├── README.md
│   │   │   ├── resolve_conflicts.sh
│   │   │   ├── setup_local_litestream.sh
│   │   │   ├── setup_remote_litestream.sh
│   │   │   ├── staging_db_init.sql
│   │   │   ├── stash_local_changes.sh
│   │   │   ├── sync_from_remote_noconfig.sh
│   │   │   └── sync_from_remote.sh
│   │   ├── README.md
│   │   ├── safe_cloudflare_update.sh
│   │   ├── sync_memory_backends.py
│   │   └── sync_now.py
│   ├── testing
│   │   ├── run_complete_test.py
│   │   ├── run_memory_test.sh
│   │   ├── simple_test.py
│   │   ├── test_cleanup_logic.py
│   │   ├── test_cloudflare_backend.py
│   │   ├── test_docker_functionality.py
│   │   ├── test_installation.py
│   │   ├── test_mdns.py
│   │   ├── test_memory_api.py
│   │   ├── test_memory_simple.py
│   │   ├── test_migration.py
│   │   ├── test_search_api.py
│   │   ├── test_sqlite_vec_embeddings.py
│   │   ├── test_sse_events.py
│   │   ├── test-connection.py
│   │   └── test-hook.js
│   ├── update_and_restart.sh
│   ├── utils
│   │   ├── claude_commands_utils.py
│   │   ├── detect_platform.py
│   │   ├── generate_personalized_claude_md.sh
│   │   ├── groq
│   │   ├── groq_agent_bridge.py
│   │   ├── list-collections.py
│   │   ├── memory_wrapper_uv.py
│   │   ├── query_memories.py
│   │   ├── README_detect_platform.md
│   │   ├── smithery_wrapper.py
│   │   ├── test_groq_bridge.sh
│   │   └── uv_wrapper.py
│   └── validation
│       ├── check_dev_setup.py
│       ├── check_documentation_links.py
│       ├── check_handler_coverage.py
│       ├── diagnose_backend_config.py
│       ├── validate_configuration_complete.py
│       ├── validate_graph_tools.py
│       ├── validate_memories.py
│       ├── validate_migration.py
│       ├── validate_timestamp_integrity.py
│       ├── verify_environment.py
│       ├── verify_pytorch_windows.py
│       └── verify_torch.py
├── SECURITY.md
├── selective_timestamp_recovery.py
├── SPONSORS.md
├── src
│   └── mcp_memory_service
│       ├── __init__.py
│       ├── _version.py
│       ├── api
│       │   ├── __init__.py
│       │   ├── client.py
│       │   ├── operations.py
│       │   ├── sync_wrapper.py
│       │   └── types.py
│       ├── backup
│       │   ├── __init__.py
│       │   └── scheduler.py
│       ├── cli
│       │   ├── __init__.py
│       │   ├── ingestion.py
│       │   ├── main.py
│       │   └── utils.py
│       ├── config.py
│       ├── consolidation
│       │   ├── __init__.py
│       │   ├── associations.py
│       │   ├── base.py
│       │   ├── clustering.py
│       │   ├── compression.py
│       │   ├── consolidator.py
│       │   ├── decay.py
│       │   ├── forgetting.py
│       │   ├── health.py
│       │   └── scheduler.py
│       ├── dependency_check.py
│       ├── discovery
│       │   ├── __init__.py
│       │   ├── client.py
│       │   └── mdns_service.py
│       ├── embeddings
│       │   ├── __init__.py
│       │   └── onnx_embeddings.py
│       ├── ingestion
│       │   ├── __init__.py
│       │   ├── base.py
│       │   ├── chunker.py
│       │   ├── csv_loader.py
│       │   ├── json_loader.py
│       │   ├── pdf_loader.py
│       │   ├── registry.py
│       │   ├── semtools_loader.py
│       │   └── text_loader.py
│       ├── lm_studio_compat.py
│       ├── mcp_server.py
│       ├── models
│       │   ├── __init__.py
│       │   └── memory.py
│       ├── quality
│       │   ├── __init__.py
│       │   ├── ai_evaluator.py
│       │   ├── async_scorer.py
│       │   ├── config.py
│       │   ├── implicit_signals.py
│       │   ├── metadata_codec.py
│       │   ├── onnx_ranker.py
│       │   └── scorer.py
│       ├── server
│       │   ├── __init__.py
│       │   ├── __main__.py
│       │   ├── cache_manager.py
│       │   ├── client_detection.py
│       │   ├── environment.py
│       │   ├── handlers
│       │   │   ├── __init__.py
│       │   │   ├── consolidation.py
│       │   │   ├── documents.py
│       │   │   ├── graph.py
│       │   │   ├── memory.py
│       │   │   ├── quality.py
│       │   │   └── utility.py
│       │   └── logging_config.py
│       ├── server_impl.py
│       ├── services
│       │   ├── __init__.py
│       │   └── memory_service.py
│       ├── storage
│       │   ├── __init__.py
│       │   ├── base.py
│       │   ├── cloudflare.py
│       │   ├── factory.py
│       │   ├── graph.py
│       │   ├── http_client.py
│       │   ├── hybrid.py
│       │   ├── migrations
│       │   │   └── 008_add_graph_table.sql
│       │   └── sqlite_vec.py
│       ├── sync
│       │   ├── __init__.py
│       │   ├── exporter.py
│       │   ├── importer.py
│       │   └── litestream_config.py
│       ├── utils
│       │   ├── __init__.py
│       │   ├── cache_manager.py
│       │   ├── content_splitter.py
│       │   ├── db_utils.py
│       │   ├── debug.py
│       │   ├── directory_ingestion.py
│       │   ├── document_processing.py
│       │   ├── gpu_detection.py
│       │   ├── hashing.py
│       │   ├── health_check.py
│       │   ├── http_server_manager.py
│       │   ├── port_detection.py
│       │   ├── quality_analytics.py
│       │   ├── startup_orchestrator.py
│       │   ├── system_detection.py
│       │   └── time_parser.py
│       └── web
│           ├── __init__.py
│           ├── api
│           │   ├── __init__.py
│           │   ├── analytics.py
│           │   ├── backup.py
│           │   ├── consolidation.py
│           │   ├── documents.py
│           │   ├── events.py
│           │   ├── health.py
│           │   ├── manage.py
│           │   ├── mcp.py
│           │   ├── memories.py
│           │   ├── quality.py
│           │   ├── search.py
│           │   └── sync.py
│           ├── app.py
│           ├── dependencies.py
│           ├── oauth
│           │   ├── __init__.py
│           │   ├── authorization.py
│           │   ├── discovery.py
│           │   ├── middleware.py
│           │   ├── models.py
│           │   ├── registration.py
│           │   └── storage.py
│           ├── sse.py
│           └── static
│               ├── app.js
│               ├── i18n
│               │   ├── de.json
│               │   ├── en.json
│               │   ├── es.json
│               │   ├── fr.json
│               │   ├── ja.json
│               │   ├── ko.json
│               │   └── zh.json
│               ├── index.html
│               ├── README.md
│               ├── sse_test.html
│               └── style.css
├── start_http_debug.bat
├── start_http_server.sh
├── test_document.txt
├── test_version_checker.js
├── TESTING_NOTES.md
├── tests
│   ├── __init__.py
│   ├── api
│   │   ├── __init__.py
│   │   ├── test_compact_types.py
│   │   └── test_operations.py
│   ├── bridge
│   │   ├── mock_responses.js
│   │   ├── package-lock.json
│   │   ├── package.json
│   │   └── test_http_mcp_bridge.js
│   ├── conftest.py
│   ├── consolidation
│   │   ├── __init__.py
│   │   ├── conftest.py
│   │   ├── test_associations.py
│   │   ├── test_clustering.py
│   │   ├── test_compression.py
│   │   ├── test_consolidator.py
│   │   ├── test_decay.py
│   │   ├── test_forgetting.py
│   │   └── test_graph_modes.py
│   ├── contracts
│   │   └── api-specification.yml
│   ├── integration
│   │   ├── conftest.py
│   │   ├── HANDLER_COVERAGE_REPORT.md
│   │   ├── package-lock.json
│   │   ├── package.json
│   │   ├── test_all_memory_handlers.py
│   │   ├── test_api_key_fallback.py
│   │   ├── test_api_memories_chronological.py
│   │   ├── test_api_tag_time_search.py
│   │   ├── test_api_with_memory_service.py
│   │   ├── test_bridge_integration.js
│   │   ├── test_cli_interfaces.py
│   │   ├── test_cloudflare_connection.py
│   │   ├── test_concurrent_clients.py
│   │   ├── test_data_serialization_consistency.py
│   │   ├── test_http_server_startup.py
│   │   ├── test_mcp_memory.py
│   │   ├── test_mdns_integration.py
│   │   ├── test_oauth_basic_auth.py
│   │   ├── test_oauth_flow.py
│   │   ├── test_server_handlers.py
│   │   └── test_store_memory.py
│   ├── performance
│   │   ├── test_background_sync.py
│   │   └── test_hybrid_live.py
│   ├── README.md
│   ├── smithery
│   │   └── test_smithery.py
│   ├── sqlite
│   │   └── simple_sqlite_vec_test.py
│   ├── storage
│   │   ├── conftest.py
│   │   └── test_graph_storage.py
│   ├── test_client.py
│   ├── test_content_splitting.py
│   ├── test_database.py
│   ├── test_deberta_quality.py
│   ├── test_fallback_quality.py
│   ├── test_graph_traversal.py
│   ├── test_hybrid_cloudflare_limits.py
│   ├── test_hybrid_storage.py
│   ├── test_lightweight_onnx.py
│   ├── test_memory_ops.py
│   ├── test_memory_wrapper_cleanup.py
│   ├── test_quality_integration.py
│   ├── test_quality_system.py
│   ├── test_semantic_search.py
│   ├── test_sqlite_vec_storage.py
│   ├── test_time_parser.py
│   ├── test_timestamp_preservation.py
│   ├── timestamp
│   │   ├── test_hook_vs_manual_storage.py
│   │   ├── test_issue99_final_validation.py
│   │   ├── test_search_retrieval_inconsistency.py
│   │   ├── test_timestamp_issue.py
│   │   └── test_timestamp_simple.py
│   └── unit
│       ├── conftest.py
│       ├── test_cloudflare_storage.py
│       ├── test_csv_loader.py
│       ├── test_fastapi_dependencies.py
│       ├── test_import.py
│       ├── test_imports.py
│       ├── test_json_loader.py
│       ├── test_mdns_simple.py
│       ├── test_mdns.py
│       ├── test_memory_service.py
│       ├── test_memory.py
│       ├── test_semtools_loader.py
│       ├── test_storage_interface_compatibility.py
│       ├── test_tag_time_filtering.py
│       └── test_uv_no_pip_installer_fallback.py
├── tools
│   ├── docker
│   │   ├── DEPRECATED.md
│   │   ├── docker-compose.http.yml
│   │   ├── docker-compose.pythonpath.yml
│   │   ├── docker-compose.standalone.yml
│   │   ├── docker-compose.uv.yml
│   │   ├── docker-compose.yml
│   │   ├── docker-entrypoint-persistent.sh
│   │   ├── docker-entrypoint-unified.sh
│   │   ├── docker-entrypoint.sh
│   │   ├── Dockerfile
│   │   ├── Dockerfile.glama
│   │   ├── Dockerfile.slim
│   │   ├── README.md
│   │   └── test-docker-modes.sh
│   └── README.md
├── uv.lock
└── verify_compression.sh
```

# Files

--------------------------------------------------------------------------------
/src/mcp_memory_service/utils/time_parser.py:
--------------------------------------------------------------------------------

```python
  1 | # Copyright 2024 Heinrich Krupp
  2 | #
  3 | # Licensed under the Apache License, Version 2.0 (the "License");
  4 | # you may not use this file except in compliance with the License.
  5 | # You may obtain a copy of the License at
  6 | #
  7 | #     http://www.apache.org/licenses/LICENSE-2.0
  8 | #
  9 | # Unless required by applicable law or agreed to in writing, software
 10 | # distributed under the License is distributed on an "AS IS" BASIS,
 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 12 | # See the License for the specific language governing permissions and
 13 | # limitations under the License.
 14 | 
 15 | """
 16 | Natural language time expression parser for MCP Memory Service.
 17 | 
 18 | This module provides utilities to parse and understand various time expressions
 19 | for retrieving memories based on when they were stored.
 20 | """
 21 | import re
 22 | import logging
 23 | from datetime import datetime, timedelta, date, time
 24 | from typing import Tuple, Optional, Dict, Any, List
 25 | 
 26 | logger = logging.getLogger(__name__)
 27 | 
 28 | # Named time periods and their approximate date ranges
 29 | NAMED_PERIODS = {
 30 |     # Holidays (US/Western-centric, would need localization for global use)
 31 |     "christmas": {"month": 12, "day": 25, "window": 3},
 32 |     "new year": {"month": 1, "day": 1, "window": 3},
 33 |     "valentine": {"month": 2, "day": 14, "window": 1},
 34 |     "halloween": {"month": 10, "day": 31, "window": 3},
 35 |     "thanksgiving": {"month": 11, "day": -1, "window": 3},  # -1 means fourth Thursday
 36 |     
 37 |     # Seasons (Northern Hemisphere)
 38 |     "spring": {"start_month": 3, "start_day": 20, "end_month": 6, "end_day": 20},
 39 |     "summer": {"start_month": 6, "start_day": 21, "end_month": 9, "end_day": 22},
 40 |     "fall": {"start_month": 9, "start_day": 23, "end_month": 12, "end_day": 20},
 41 |     "autumn": {"start_month": 9, "start_day": 23, "end_month": 12, "end_day": 20},
 42 |     "winter": {"start_month": 12, "start_day": 21, "end_month": 3, "end_day": 19},
 43 | }
 44 | 
 45 | # Time of day mappings (24-hour format)
 46 | TIME_OF_DAY = {
 47 |     "morning": (5, 11),    # 5:00 AM - 11:59 AM
 48 |     "noon": (12, 12),      # 12:00 PM
 49 |     "afternoon": (13, 17), # 1:00 PM - 5:59 PM
 50 |     "evening": (18, 21),   # 6:00 PM - 9:59 PM
 51 |     "night": (22, 4),      # 10:00 PM - 4:59 AM (wraps around midnight)
 52 |     "midnight": (0, 0),    # 12:00 AM
 53 | }
 54 | 
 55 | # Regular expressions for various time patterns
 56 | PATTERNS = {
 57 |     "relative_days": re.compile(r'(?:(\d+)\s+days?\s+ago)|(?:yesterday)|(?:today)'),
 58 |     "relative_weeks": re.compile(r'(\d+)\s+weeks?\s+ago'),
 59 |     "relative_months": re.compile(r'(\d+)\s+months?\s+ago'),
 60 |     "relative_years": re.compile(r'(\d+)\s+years?\s+ago'),
 61 |     "last_n_periods": re.compile(r'last\s+(\d+)\s+(days?|weeks?|months?|years?)'),
 62 |     "last_period": re.compile(r'last\s+(day|week|month|year|summer|spring|winter|fall|autumn)'),
 63 |     "this_period": re.compile(r'this\s+(day|week|month|year|summer|spring|winter|fall|autumn)'),
 64 |     "month_name": re.compile(r'(january|february|march|april|may|june|july|august|september|october|november|december)'),
 65 |     "date_range": re.compile(r'between\s+(.+?)\s+and\s+(.+?)(?:\s|$)'),
 66 |     "time_of_day": re.compile(r'(morning|afternoon|evening|night|noon|midnight)'),
 67 |     "recent": re.compile(r'recent|lately|recently'),
 68 |     "specific_date": re.compile(r'(\d{1,2})[/-](\d{1,2})(?:[/-](\d{2,4}))?'),
 69 |     "full_date": re.compile(r'(\d{4})-(\d{1,2})-(\d{1,2})'),
 70 |     "named_period": re.compile(r'(spring|summer|winter|fall|autumn|christmas|new\s*year|valentine|halloween|thanksgiving|spring\s*break|summer\s*break|winter\s*break)'),    "half_year": re.compile(r'(first|second)\s+half\s+of\s+(\d{4})'),
 71 |     "quarter": re.compile(r'(first|second|third|fourth|1st|2nd|3rd|4th)\s+quarter(?:\s+of\s+(\d{4}))?'),
 72 | }
 73 | 
 74 | def _calculate_season_date_range(
 75 |     period: str,
 76 |     season_info: Dict[str, int],
 77 |     base_year: int,
 78 |     current_month: Optional[int] = None
 79 | ) -> Tuple[datetime, datetime]:
 80 |     """
 81 |     Calculate start and end dates for a season, handling winter's year boundary.
 82 | 
 83 |     Args:
 84 |         period: Season name ("winter", "spring", "summer", "fall"/"autumn")
 85 |         season_info: Dictionary with start_month, start_day, end_month, end_day
 86 |         base_year: The year to use as reference for calculation
 87 |         current_month: Current month (1-12) for context-aware winter calculation (optional)
 88 | 
 89 |     Returns:
 90 |         Tuple of (start_datetime, end_datetime) for the season
 91 |     """
 92 |     if period == "winter":
 93 |         # Winter spans year boundary (Dec -> Mar)
 94 |         # Determine start year based on current month context
 95 |         if current_month is not None and current_month <= 3:
 96 |             # We're in Jan-Mar, so winter started the previous year
 97 |             start_year = base_year - 1
 98 |             end_year = base_year
 99 |         else:
100 |             # We're in any other month, winter starts this year
101 |             start_year = base_year
102 |             end_year = base_year + 1
103 | 
104 |         start_dt = datetime(start_year, season_info["start_month"], season_info["start_day"])
105 |         end_dt = datetime(end_year, season_info["end_month"], season_info["end_day"], 23, 59, 59)
106 |     else:
107 |         # All other seasons fall within a single calendar year
108 |         start_dt = datetime(base_year, season_info["start_month"], season_info["start_day"])
109 |         end_dt = datetime(base_year, season_info["end_month"], season_info["end_day"], 23, 59, 59)
110 | 
111 |     return start_dt, end_dt
112 | 
113 | def parse_time_expression(query: str) -> Tuple[Optional[float], Optional[float]]:
114 |     """
115 |     Parse a natural language time expression and return timestamp range.
116 |     
117 |     Args:
118 |         query: A natural language query with time expressions
119 |         
120 |     Returns:
121 |         Tuple of (start_timestamp, end_timestamp), either may be None
122 |     """
123 |     query = query.lower().strip()
124 |     
125 |     # Check for multiple patterns in a single query
126 |     try:
127 |         # First check for date ranges like "between X and Y"
128 |         date_range_match = PATTERNS["date_range"].search(query)
129 |         if date_range_match:
130 |             start_expr = date_range_match.group(1)
131 |             end_expr = date_range_match.group(2)
132 |             start_ts, _ = parse_time_expression(start_expr)
133 |             _, end_ts = parse_time_expression(end_expr)
134 |             return start_ts, end_ts
135 |         
136 |         # Check for full ISO dates (YYYY-MM-DD) FIRST
137 |         full_date_match = PATTERNS["full_date"].search(query)
138 |         if full_date_match:
139 |             year, month, day = full_date_match.groups()
140 |             try:
141 |                 specific_date = date(int(year), int(month), int(day))
142 |                 start_dt = datetime.combine(specific_date, time.min)
143 |                 end_dt = datetime.combine(specific_date, time.max)
144 |                 return start_dt.timestamp(), end_dt.timestamp()
145 |             except ValueError as e:
146 |                 logger.warning(f"Invalid date: {e}")
147 |                 return None, None
148 |             
149 |         # Check for specific dates (MM/DD/YYYY)
150 |         specific_date_match = PATTERNS["specific_date"].search(query)
151 |         if specific_date_match:
152 |             month, day, year = specific_date_match.groups()
153 |             month = int(month)
154 |             day = int(day)
155 |             current_year = datetime.now().year
156 |             year = int(year) if year else current_year
157 |             # Handle 2-digit years
158 |             if year and year < 100:
159 |                 year = 2000 + year if year < 50 else 1900 + year
160 |                 
161 |             try:
162 |                 specific_date = date(year, month, day)
163 |                 start_dt = datetime.combine(specific_date, time.min)
164 |                 end_dt = datetime.combine(specific_date, time.max)
165 |                 return start_dt.timestamp(), end_dt.timestamp()
166 |             except ValueError as e:
167 |                 logger.warning(f"Invalid date: {e}")
168 |                 return None, None
169 |         
170 |         # Relative days: "X days ago", "yesterday", "today"
171 |         days_ago_match = PATTERNS["relative_days"].search(query)
172 |         if days_ago_match:
173 |             if "yesterday" in query:
174 |                 days = 1
175 |             elif "today" in query:
176 |                 days = 0
177 |             else:
178 |                 days = int(days_ago_match.group(1))
179 |                 
180 |             target_date = date.today() - timedelta(days=days)
181 |             
182 |             # Check for time of day modifiers
183 |             time_of_day_match = PATTERNS["time_of_day"].search(query)
184 |             if time_of_day_match:
185 |                 # Narrow the range based on time of day
186 |                 return get_time_of_day_range(target_date, time_of_day_match.group(1))
187 |             else:
188 |                 # Return the full day
189 |                 start_dt = datetime.combine(target_date, time.min)
190 |                 end_dt = datetime.combine(target_date, time.max)
191 |                 return start_dt.timestamp(), end_dt.timestamp()
192 |         
193 |         # Relative weeks: "X weeks ago"
194 |         weeks_ago_match = PATTERNS["relative_weeks"].search(query)
195 |         if weeks_ago_match:
196 |             weeks = int(weeks_ago_match.group(1))
197 |             target_date = date.today() - timedelta(weeks=weeks)
198 |             # Get the start of the week (Monday)
199 |             start_date = target_date - timedelta(days=target_date.weekday())
200 |             end_date = start_date + timedelta(days=6)
201 |             start_dt = datetime.combine(start_date, time.min)
202 |             end_dt = datetime.combine(end_date, time.max)
203 |             return start_dt.timestamp(), end_dt.timestamp()
204 |         
205 |         # Relative months: "X months ago"
206 |         months_ago_match = PATTERNS["relative_months"].search(query)
207 |         if months_ago_match:
208 |             months = int(months_ago_match.group(1))
209 |             current = datetime.now()
210 |             # Calculate target month
211 |             year = current.year
212 |             month = current.month - months
213 |             
214 |             # Adjust year if month goes negative
215 |             while month <= 0:
216 |                 year -= 1
217 |                 month += 12
218 |                 
219 |             # Get first and last day of the month
220 |             first_day = date(year, month, 1)
221 |             if month == 12:
222 |                 last_day = date(year + 1, 1, 1) - timedelta(days=1)
223 |             else:
224 |                 last_day = date(year, month + 1, 1) - timedelta(days=1)
225 |                 
226 |             start_dt = datetime.combine(first_day, time.min)
227 |             end_dt = datetime.combine(last_day, time.max)
228 |             return start_dt.timestamp(), end_dt.timestamp()
229 |         
230 |         # Relative years: "X years ago"
231 |         years_ago_match = PATTERNS["relative_years"].search(query)
232 |         if years_ago_match:
233 |             years = int(years_ago_match.group(1))
234 |             current_year = datetime.now().year
235 |             target_year = current_year - years
236 |             start_dt = datetime(target_year, 1, 1, 0, 0, 0)
237 |             end_dt = datetime(target_year, 12, 31, 23, 59, 59)
238 |             return start_dt.timestamp(), end_dt.timestamp()
239 | 
240 |         # "Last N X" expressions (e.g., "last 3 days", "last 2 weeks")
241 |         # Check this BEFORE "last_period" to match more specific pattern first
242 |         last_n_periods_match = PATTERNS["last_n_periods"].search(query)
243 |         if last_n_periods_match:
244 |             n = int(last_n_periods_match.group(1))
245 |             period = last_n_periods_match.group(2)
246 |             return get_last_n_periods_range(n, period)
247 | 
248 |         # "Last X" expressions (e.g., "last week", "last month")
249 |         last_period_match = PATTERNS["last_period"].search(query)
250 |         if last_period_match:
251 |             period = last_period_match.group(1)
252 |             return get_last_period_range(period)
253 |         
254 |         # "This X" expressions
255 |         this_period_match = PATTERNS["this_period"].search(query)
256 |         if this_period_match:
257 |             period = this_period_match.group(1)
258 |             return get_this_period_range(period)
259 |         
260 |         # Month names
261 |         month_match = PATTERNS["month_name"].search(query)
262 |         if month_match:
263 |             month_name = month_match.group(1)
264 |             return get_month_range(month_name)
265 |         
266 |         # Named periods (holidays, etc.)
267 |         named_period_match = PATTERNS["named_period"].search(query)
268 |         if named_period_match:
269 |             period_name = named_period_match.group(1)  # <-- Just get the matched group without replacing
270 |             return get_named_period_range(period_name)
271 |         
272 |         # Half year expressions
273 |         half_year_match = PATTERNS["half_year"].search(query)
274 |         if half_year_match:
275 |             half = half_year_match.group(1)
276 |             year_str = half_year_match.group(2)
277 |             year = int(year_str) if year_str else datetime.now().year
278 |             
279 |             if half.lower() == "first":
280 |                 start_dt = datetime(year, 1, 1, 0, 0, 0)
281 |                 end_dt = datetime(year, 6, 30, 23, 59, 59)
282 |             else:  # "second"
283 |                 start_dt = datetime(year, 7, 1, 0, 0, 0)
284 |                 end_dt = datetime(year, 12, 31, 23, 59, 59)
285 |                 
286 |             return start_dt.timestamp(), end_dt.timestamp()
287 |         
288 |         # Quarter expressions
289 |         quarter_match = PATTERNS["quarter"].search(query)
290 |         if quarter_match:
291 |             quarter = quarter_match.group(1).lower()
292 |             year_str = quarter_match.group(2)
293 |             year = int(year_str) if year_str else datetime.now().year
294 |             
295 |             # Map textual quarter to number
296 |             quarter_num = {"first": 1, "1st": 1, "second": 2, "2nd": 2, 
297 |                           "third": 3, "3rd": 3, "fourth": 4, "4th": 4}[quarter]
298 |             
299 |             # Calculate quarter start and end dates
300 |             quarter_month = (quarter_num - 1) * 3 + 1
301 |             start_dt = datetime(year, quarter_month, 1, 0, 0, 0)
302 |             
303 |             if quarter_month + 3 > 12:
304 |                 end_dt = datetime(year + 1, 1, 1, 0, 0, 0) - timedelta(seconds=1)
305 |             else:
306 |                 end_dt = datetime(year, quarter_month + 3, 1, 0, 0, 0) - timedelta(seconds=1)
307 |                 
308 |             return start_dt.timestamp(), end_dt.timestamp()
309 |         
310 |         # Recent/fuzzy time expressions
311 |         recent_match = PATTERNS["recent"].search(query)
312 |         if recent_match:
313 |             # Default to last 7 days for "recent"
314 |             end_dt = datetime.now()
315 |             start_dt = end_dt - timedelta(days=7)
316 |             return start_dt.timestamp(), end_dt.timestamp()
317 |             
318 |         # If no time expression is found, return None for both timestamps
319 |         return None, None
320 |         
321 |     except Exception as e:
322 |         logger.error(f"Error parsing time expression: {e}")
323 |         return None, None
324 | 
325 | def get_time_of_day_range(target_date: date, time_period: str) -> Tuple[float, float]:
326 |     """Get timestamp range for a specific time of day on a given date."""
327 |     if time_period in TIME_OF_DAY:
328 |         start_hour, end_hour = TIME_OF_DAY[time_period]
329 |         
330 |         # Handle periods that wrap around midnight
331 |         if start_hour > end_hour:  # e.g., "night" = (22, 4)
332 |             # For periods that span midnight, we need to handle specially
333 |             if time_period == "night":
334 |                 start_dt = datetime.combine(target_date, time(start_hour, 0))
335 |                 end_dt = datetime.combine(target_date + timedelta(days=1), time(end_hour, 59, 59))
336 |             else:
337 |                 # Default handling for other wrapping periods
338 |                 start_dt = datetime.combine(target_date, time(start_hour, 0))
339 |                 end_dt = datetime.combine(target_date + timedelta(days=1), time(end_hour, 59, 59))
340 |         else:
341 |             # Normal periods within a single day
342 |             start_dt = datetime.combine(target_date, time(start_hour, 0))
343 |             if end_hour == start_hour:  # For noon, midnight (specific hour)
344 |                 end_dt = datetime.combine(target_date, time(end_hour, 59, 59))
345 |             else:
346 |                 end_dt = datetime.combine(target_date, time(end_hour, 59, 59))
347 |                 
348 |         return start_dt.timestamp(), end_dt.timestamp()
349 |     else:
350 |         # Fallback to full day
351 |         start_dt = datetime.combine(target_date, time.min)
352 |         end_dt = datetime.combine(target_date, time.max)
353 |         return start_dt.timestamp(), end_dt.timestamp()
354 | 
355 | def get_last_period_range(period: str) -> Tuple[float, float]:
356 |     """Get timestamp range for 'last X' expressions."""
357 |     now = datetime.now()
358 |     today = date.today()
359 |     
360 |     if period == "day":
361 |         # Last day = yesterday
362 |         yesterday = today - timedelta(days=1)
363 |         start_dt = datetime.combine(yesterday, time.min)
364 |         end_dt = datetime.combine(yesterday, time.max)
365 |     elif period == "week":
366 |         # Last week = previous calendar week (Mon-Sun)
367 |         # Find last Monday
368 |         last_monday = today - timedelta(days=today.weekday() + 7)
369 |         # Find last Sunday
370 |         last_sunday = last_monday + timedelta(days=6)
371 |         start_dt = datetime.combine(last_monday, time.min)
372 |         end_dt = datetime.combine(last_sunday, time.max)
373 |     elif period == "month":
374 |         # Last month = previous calendar month
375 |         first_of_this_month = date(today.year, today.month, 1)
376 |         if today.month == 1:
377 |             last_month = 12
378 |             last_month_year = today.year - 1
379 |         else:
380 |             last_month = today.month - 1
381 |             last_month_year = today.year
382 |             
383 |         first_of_last_month = date(last_month_year, last_month, 1)
384 |         last_of_last_month = first_of_this_month - timedelta(days=1)
385 |         
386 |         start_dt = datetime.combine(first_of_last_month, time.min)
387 |         end_dt = datetime.combine(last_of_last_month, time.max)
388 |     elif period == "year":
389 |         # Last year = previous calendar year
390 |         last_year = today.year - 1
391 |         start_dt = datetime(last_year, 1, 1, 0, 0, 0)
392 |         end_dt = datetime(last_year, 12, 31, 23, 59, 59)
393 |     elif period in ["summer", "spring", "winter", "fall", "autumn"]:
394 |         # Last season
395 |         season_info = NAMED_PERIODS[period]
396 |         current_year = today.year
397 |         
398 |         # Determine if we're currently in this season
399 |         current_month = today.month
400 |         current_day = today.day
401 |         is_current_season = False
402 |         
403 |         # Check if today falls within the season's date range
404 |         if period in ["winter"]:  # Winter spans year boundary
405 |             if (current_month >= season_info["start_month"] or 
406 |                 (current_month <= season_info["end_month"] and 
407 |                  current_day <= season_info["end_day"])):
408 |                 is_current_season = True
409 |         else:
410 |             if (current_month >= season_info["start_month"] and current_month <= season_info["end_month"] and
411 |                 current_day >= season_info["start_day"] if current_month == season_info["start_month"] else True and
412 |                 current_day <= season_info["end_day"] if current_month == season_info["end_month"] else True):
413 |                 is_current_season = True
414 |         
415 |         # If we're currently in the season, get last year's season
416 |         if is_current_season:
417 |             year = current_year - 1
418 |         else:
419 |             year = current_year
420 |             
421 |         # Calculate season date range (handles winter's year boundary)
422 |         context_month = current_month if is_current_season else None
423 |         start_dt, end_dt = _calculate_season_date_range(period, season_info, year, context_month)
424 |     else:
425 |         # Fallback - last 24 hours
426 |         end_dt = now
427 |         start_dt = end_dt - timedelta(days=1)
428 |         
429 |     return start_dt.timestamp(), end_dt.timestamp()
430 | 
431 | def get_last_n_periods_range(n: int, period: str) -> Tuple[float, float]:
432 |     """Get timestamp range for 'last N X' expressions (e.g., 'last 3 days')."""
433 |     now = datetime.now()
434 |     today = date.today()
435 | 
436 |     # Normalize period to singular form
437 |     period = period.rstrip('s')  # Remove trailing 's' if present
438 | 
439 |     if period == "day":
440 |         # Last N days means from N days ago 00:00 until now
441 |         start_date = today - timedelta(days=n)
442 |         start_dt = datetime.combine(start_date, time.min)
443 |         end_dt = now
444 |     elif period == "week":
445 |         # Last N weeks means from N weeks ago Monday 00:00 until now
446 |         start_date = today - timedelta(weeks=n)
447 |         # Get Monday of that week
448 |         start_date = start_date - timedelta(days=start_date.weekday())
449 |         start_dt = datetime.combine(start_date, time.min)
450 |         end_dt = now
451 |     elif period == "month":
452 |         # Last N months means from N months ago first day 00:00 until now
453 |         current = datetime.now()
454 |         year = current.year
455 |         month = current.month - n
456 | 
457 |         # Handle year boundary
458 |         while month <= 0:
459 |             month += 12
460 |             year -= 1
461 | 
462 |         start_date = date(year, month, 1)
463 |         start_dt = datetime.combine(start_date, time.min)
464 |         end_dt = now
465 |     elif period == "year":
466 |         # Last N years means from N years ago Jan 1 00:00 until now
467 |         start_year = today.year - n
468 |         start_dt = datetime(start_year, 1, 1, 0, 0, 0)
469 |         end_dt = now
470 |     else:
471 |         # Fallback - interpret as days
472 |         start_date = today - timedelta(days=n)
473 |         start_dt = datetime.combine(start_date, time.min)
474 |         end_dt = now
475 | 
476 |     return start_dt.timestamp(), end_dt.timestamp()
477 | 
478 | def get_this_period_range(period: str) -> Tuple[float, float]:
479 |     """Get timestamp range for 'this X' expressions."""
480 |     now = datetime.now()
481 |     today = date.today()
482 |     
483 |     if period == "day":
484 |         # This day = today
485 |         start_dt = datetime.combine(today, time.min)
486 |         end_dt = datetime.combine(today, time.max)
487 |     elif period == "week":
488 |         # This week = current calendar week (Mon-Sun)
489 |         # Find this Monday
490 |         monday = today - timedelta(days=today.weekday())
491 |         sunday = monday + timedelta(days=6)
492 |         start_dt = datetime.combine(monday, time.min)
493 |         end_dt = datetime.combine(sunday, time.max)
494 |     elif period == "month":
495 |         # This month = current calendar month
496 |         first_of_month = date(today.year, today.month, 1)
497 |         if today.month == 12:
498 |             first_of_next_month = date(today.year + 1, 1, 1)
499 |         else:
500 |             first_of_next_month = date(today.year, today.month + 1, 1)
501 |             
502 |         last_of_month = first_of_next_month - timedelta(days=1)
503 |         
504 |         start_dt = datetime.combine(first_of_month, time.min)
505 |         end_dt = datetime.combine(last_of_month, time.max)
506 |     elif period == "year":
507 |         # This year = current calendar year
508 |         start_dt = datetime(today.year, 1, 1, 0, 0, 0)
509 |         end_dt = datetime(today.year, 12, 31, 23, 59, 59)
510 |     elif period in ["summer", "spring", "winter", "fall", "autumn"]:
511 |         # This season
512 |         season_info = NAMED_PERIODS[period]
513 |         current_year = today.year
514 |         
515 |         # Calculate season date range (handles winter's year boundary)
516 |         start_dt, end_dt = _calculate_season_date_range(period, season_info, current_year, today.month)
517 |     else:
518 |         # Fallback - current 24 hours
519 |         end_dt = now
520 |         start_dt = datetime.combine(today, time.min)
521 |         
522 |     return start_dt.timestamp(), end_dt.timestamp()
523 | 
524 | def get_month_range(month_name: str) -> Tuple[float, float]:
525 |     """Get timestamp range for a named month."""
526 |     # Map month name to number
527 |     month_map = {
528 |         "january": 1, "february": 2, "march": 3, "april": 4,
529 |         "may": 5, "june": 6, "july": 7, "august": 8,
530 |         "september": 9, "october": 10, "november": 11, "december": 12
531 |     }
532 |     
533 |     if month_name in month_map:
534 |         month_num = month_map[month_name]
535 |         current_year = datetime.now().year
536 |         
537 |         # If the month is in the future for this year, use last year
538 |         current_month = datetime.now().month
539 |         year = current_year if month_num <= current_month else current_year - 1
540 |         
541 |         # Get first and last day of the month
542 |         first_day = date(year, month_num, 1)
543 |         if month_num == 12:
544 |             last_day = date(year + 1, 1, 1) - timedelta(days=1)
545 |         else:
546 |             last_day = date(year, month_num + 1, 1) - timedelta(days=1)
547 |             
548 |         start_dt = datetime.combine(first_day, time.min)
549 |         end_dt = datetime.combine(last_day, time.max)
550 |         return start_dt.timestamp(), end_dt.timestamp()
551 |     else:
552 |         return None, None
553 | 
554 | def get_named_period_range(period_name: str) -> Tuple[Optional[float], Optional[float]]:
555 |     """Get timestamp range for named periods like holidays."""
556 |     period_name = period_name.lower().replace("_", " ")
557 |     current_year = datetime.now().year
558 |     current_month = datetime.now().month
559 |     current_day = datetime.now().day
560 |     
561 |     if period_name in NAMED_PERIODS:
562 |         info = NAMED_PERIODS[period_name]
563 |         # Found matching period
564 |         # Determine if the period is in the past or future for this year
565 |         if "month" in info and "day" in info:
566 |             # Simple fixed-date holiday
567 |             month = info["month"]
568 |             day = info["day"]
569 |             window = info.get("window", 1)  # Default 1-day window
570 |             
571 |             # Special case for Thanksgiving (fourth Thursday in November)
572 |             if day == -1 and month == 11:  # Thanksgiving
573 |                 # Find the fourth Thursday in November
574 |                 first_day = date(current_year, 11, 1)
575 |                 # Find first Thursday
576 |                 first_thursday = first_day + timedelta(days=((3 - first_day.weekday()) % 7))
577 |                 # Fourth Thursday is 3 weeks later
578 |                 thanksgiving = first_thursday + timedelta(weeks=3)
579 |                 day = thanksgiving.day
580 |             
581 |             # Check if the holiday has passed this year
582 |             is_past = (current_month > month or 
583 |                         (current_month == month and current_day > day + window))
584 |                         
585 |             year = current_year if not is_past else current_year - 1
586 |             target_date = date(year, month, day)
587 |             
588 |             # Create date range with window
589 |             start_date = target_date - timedelta(days=window)
590 |             end_date = target_date + timedelta(days=window)
591 |             
592 |             start_dt = datetime.combine(start_date, time.min)
593 |             end_dt = datetime.combine(end_date, time.max)
594 |             return start_dt.timestamp(), end_dt.timestamp()
595 |             
596 |         elif "start_month" in info and "end_month" in info:
597 |             # Season or date range
598 |             start_month = info["start_month"]
599 |             start_day = info["start_day"]
600 |             end_month = info["end_month"]
601 |             end_day = info["end_day"]
602 |             
603 |             # Determine year based on current date
604 |             if start_month > end_month:  # Period crosses year boundary
605 |                 if current_month < end_month or (current_month == end_month and current_day <= end_day):
606 |                     # We're in the end part of the period that started last year
607 |                     start_dt = datetime(current_year - 1, start_month, start_day)
608 |                     end_dt = datetime(current_year, end_month, end_day, 23, 59, 59)
609 |                 else:
610 |                     # The period is either coming up this year or happened earlier this year
611 |                     if current_month > start_month or (current_month == start_month and current_day >= start_day):
612 |                         # Period already started this year
613 |                         start_dt = datetime(current_year, start_month, start_day)
614 |                         end_dt = datetime(current_year + 1, end_month, end_day, 23, 59, 59)
615 |                     else:
616 |                         # Period from last year
617 |                         start_dt = datetime(current_year - 1, start_month, start_day)
618 |                         end_dt = datetime(current_year, end_month, end_day, 23, 59, 59)
619 |             else:
620 |                 # Period within a single year
621 |                 # Check if period has already occurred this year
622 |                 if (current_month > end_month or 
623 |                     (current_month == end_month and current_day > end_day)):
624 |                     # Period already passed this year
625 |                     start_dt = datetime(current_year, start_month, start_day)
626 |                     end_dt = datetime(current_year, end_month, end_day, 23, 59, 59)
627 |                 else:
628 |                     # Check if current date is within the period
629 |                     is_within_period = (
630 |                         (current_month > start_month or 
631 |                             (current_month == start_month and current_day >= start_day))
632 |                         and
633 |                         (current_month < end_month or 
634 |                             (current_month == end_month and current_day <= end_day))
635 |                     )
636 |                     
637 |                     if is_within_period:
638 |                         # We're in the period this year
639 |                         start_dt = datetime(current_year, start_month, start_day)
640 |                         end_dt = datetime(current_year, end_month, end_day, 23, 59, 59)
641 |                     else:
642 |                         # Period from last year
643 |                         start_dt = datetime(current_year - 1, start_month, start_day)
644 |                         end_dt = datetime(current_year - 1, end_month, end_day, 23, 59, 59)
645 |             
646 |             return start_dt.timestamp(), end_dt.timestamp()
647 |     
648 |     # If no match found
649 |     return None, None
650 | 
651 | # Helper function to detect time expressions in a general query
652 | def extract_time_expression(query: str) -> Tuple[str, Tuple[Optional[float], Optional[float]]]:
653 |     """
654 |     Extract time-related expressions from a query and return the timestamps.
655 |     
656 |     Args:
657 |         query: A natural language query that may contain time expressions
658 |         
659 |     Returns:
660 |         Tuple of (cleaned_query, (start_timestamp, end_timestamp))
661 |         The cleaned_query has time expressions removed
662 |     """
663 |     # Check for time expressions
664 |     time_expressions = [
665 |         r'\b\d+\s+days?\s+ago\b',
666 |         r'\byesterday\b',
667 |         r'\btoday\b',
668 |         r'\b\d+\s+weeks?\s+ago\b',
669 |         r'\b\d+\s+months?\s+ago\b',
670 |         r'\b\d+\s+years?\s+ago\b',
671 |         r'\blast\s+(day|week|month|year|summer|spring|winter|fall|autumn)\b',
672 |         r'\bthis\s+(day|week|month|year|summer|spring|winter|fall|autumn)\b',
673 |         r'\b(january|february|march|april|may|june|july|august|september|october|november|december)\b',
674 |         r'\bbetween\s+.+?\s+and\s+.+?(?:\s|$)',
675 |         r'\bin\s+the\s+(morning|afternoon|evening|night|noon|midnight)\b',
676 |         r'\brecent|lately|recently\b',
677 |         r'\b\d{1,2}[/-]\d{1,2}(?:[/-]\d{2,4})?\b',
678 |         r'\b\d{4}-\d{1,2}-\d{1,2}\b',
679 |         r'\b(spring|summer|winter|fall|autumn|christmas|new\s*year|valentine|halloween|thanksgiving|spring\s*break|summer\s*break|winter\s*break)\b',
680 |         r'\b(first|second)\s+half\s+of\s+\d{4}\b',
681 |         r'\b(first|second|third|fourth|1st|2nd|3rd|4th)\s+quarter(?:\s+of\s+\d{4})?\b',
682 |         r'\bfrom\s+.+\s+to\s+.+\b'
683 |     ]
684 |     
685 |     # Combine all patterns
686 |     combined_pattern = '|'.join(f'({expr})' for expr in time_expressions)
687 |     combined_regex = re.compile(combined_pattern, re.IGNORECASE)
688 |     
689 |     # Find all matches
690 |     matches = list(combined_regex.finditer(query))
691 |     if not matches:
692 |         return query, (None, None)
693 |     
694 |     # Extract the time expressions
695 |     time_expressions = []
696 |     for match in matches:
697 |         span = match.span()
698 |         expression = query[span[0]:span[1]]
699 |         time_expressions.append(expression)
700 |     
701 |     # Parse time expressions to get timestamps
702 |     full_time_expression = ' '.join(time_expressions)
703 |     start_ts, end_ts = parse_time_expression(full_time_expression)
704 |     
705 |     # Remove time expressions from the query
706 |     cleaned_query = query
707 |     for expr in time_expressions:
708 |         cleaned_query = cleaned_query.replace(expr, '')
709 |     
710 |     # Clean up multiple spaces
711 |     cleaned_query = re.sub(r'\s+', ' ', cleaned_query).strip()
712 |     
713 |     return cleaned_query, (start_ts, end_ts)
```

--------------------------------------------------------------------------------
/docs/examples/analysis-scripts.js:
--------------------------------------------------------------------------------

```javascript
   1 | /**
   2 |  * Memory Analysis Scripts
   3 |  * 
   4 |  * A collection of JavaScript functions for analyzing and extracting insights
   5 |  * from MCP Memory Service data. These scripts demonstrate practical approaches
   6 |  * to memory data analysis, pattern recognition, and visualization preparation.
   7 |  * 
   8 |  * Usage: Import individual functions or use as reference for building
   9 |  * custom analysis pipelines.
  10 |  */
  11 | 
  12 | // =============================================================================
  13 | // TEMPORAL ANALYSIS FUNCTIONS
  14 | // =============================================================================
  15 | 
  16 | /**
  17 |  * Analyze memory distribution over time periods
  18 |  * @param {Array} memories - Array of memory objects with timestamps
  19 |  * @returns {Object} Distribution data organized by time periods
  20 |  */
  21 | function analyzeTemporalDistribution(memories) {
  22 |   const distribution = {
  23 |     monthly: {},
  24 |     weekly: {},
  25 |     daily: {},
  26 |     hourly: {}
  27 |   };
  28 | 
  29 |   memories.forEach(memory => {
  30 |     const date = new Date(memory.timestamp);
  31 |     
  32 |     // Monthly distribution
  33 |     const monthKey = `${date.getFullYear()}-${String(date.getMonth() + 1).padStart(2, '0')}`;
  34 |     if (!distribution.monthly[monthKey]) {
  35 |       distribution.monthly[monthKey] = [];
  36 |     }
  37 |     distribution.monthly[monthKey].push(memory);
  38 | 
  39 |     // Weekly distribution (week of year)
  40 |     const weekKey = `${date.getFullYear()}-W${getWeekNumber(date)}`;
  41 |     if (!distribution.weekly[weekKey]) {
  42 |       distribution.weekly[weekKey] = [];
  43 |     }
  44 |     distribution.weekly[weekKey].push(memory);
  45 | 
  46 |     // Daily distribution (day of week)
  47 |     const dayKey = date.toLocaleDateString('en-US', { weekday: 'long' });
  48 |     if (!distribution.daily[dayKey]) {
  49 |       distribution.daily[dayKey] = [];
  50 |     }
  51 |     distribution.daily[dayKey].push(memory);
  52 | 
  53 |     // Hourly distribution
  54 |     const hourKey = date.getHours();
  55 |     if (!distribution.hourly[hourKey]) {
  56 |       distribution.hourly[hourKey] = [];
  57 |     }
  58 |     distribution.hourly[hourKey].push(memory);
  59 |   });
  60 | 
  61 |   return distribution;
  62 | }
  63 | 
  64 | /**
  65 |  * Calculate week number for a given date
  66 |  * @param {Date} date - Date object
  67 |  * @returns {number} Week number
  68 |  */
  69 | function getWeekNumber(date) {
  70 |   const firstDayOfYear = new Date(date.getFullYear(), 0, 1);
  71 |   const pastDaysOfYear = (date - firstDayOfYear) / 86400000;
  72 |   return Math.ceil((pastDaysOfYear + firstDayOfYear.getDay() + 1) / 7);
  73 | }
  74 | 
  75 | /**
  76 |  * Prepare temporal data for chart visualization
  77 |  * @param {Object} distribution - Distribution object from analyzeTemporalDistribution
  78 |  * @param {string} period - Time period ('monthly', 'weekly', 'daily', 'hourly')
  79 |  * @returns {Array} Chart-ready data array
  80 |  */
  81 | function prepareTemporalChartData(distribution, period = 'monthly') {
  82 |   const data = distribution[period];
  83 |   
  84 |   const chartData = Object.entries(data)
  85 |     .map(([key, memories]) => ({
  86 |       period: formatPeriodLabel(key, period),
  87 |       count: memories.length,
  88 |       memories: memories,
  89 |       key: key
  90 |     }))
  91 |     .sort((a, b) => a.key.localeCompare(b.key));
  92 | 
  93 |   return chartData;
  94 | }
  95 | 
  96 | /**
  97 |  * Format period labels for display
  98 |  * @param {string} key - Period key
  99 |  * @param {string} period - Period type
 100 |  * @returns {string} Formatted label
 101 |  */
 102 | function formatPeriodLabel(key, period) {
 103 |   switch (period) {
 104 |     case 'monthly':
 105 |       const [year, month] = key.split('-');
 106 |       const monthNames = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun',
 107 |                          'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec'];
 108 |       return `${monthNames[parseInt(month) - 1]} ${year}`;
 109 |     
 110 |     case 'weekly':
 111 |       return key; // Already formatted as YYYY-WXX
 112 |     
 113 |     case 'daily':
 114 |       return key; // Day names are already formatted
 115 |     
 116 |     case 'hourly':
 117 |       const hour = parseInt(key);
 118 |       return `${hour}:00`;
 119 |     
 120 |     default:
 121 |       return key;
 122 |   }
 123 | }
 124 | 
 125 | // =============================================================================
 126 | // TAG ANALYSIS FUNCTIONS
 127 | // =============================================================================
 128 | 
 129 | /**
 130 |  * Analyze tag usage frequency and patterns
 131 |  * @param {Array} memories - Array of memory objects
 132 |  * @returns {Object} Tag analysis results
 133 |  */
 134 | function analyzeTagUsage(memories) {
 135 |   const tagFrequency = {};
 136 |   const tagCombinations = {};
 137 |   const categoryDistribution = {};
 138 | 
 139 |   memories.forEach(memory => {
 140 |     const tags = memory.tags || [];
 141 |     
 142 |     // Tag frequency analysis
 143 |     tags.forEach(tag => {
 144 |       tagFrequency[tag] = (tagFrequency[tag] || 0) + 1;
 145 |       
 146 |       // Categorize tags
 147 |       const category = categorizeTag(tag);
 148 |       if (!categoryDistribution[category]) {
 149 |         categoryDistribution[category] = {};
 150 |       }
 151 |       categoryDistribution[category][tag] = (categoryDistribution[category][tag] || 0) + 1;
 152 |     });
 153 | 
 154 |     // Tag combination analysis
 155 |     if (tags.length > 1) {
 156 |       for (let i = 0; i < tags.length; i++) {
 157 |         for (let j = i + 1; j < tags.length; j++) {
 158 |           const combo = [tags[i], tags[j]].sort().join(' + ');
 159 |           tagCombinations[combo] = (tagCombinations[combo] || 0) + 1;
 160 |         }
 161 |       }
 162 |     }
 163 |   });
 164 | 
 165 |   return {
 166 |     frequency: Object.entries(tagFrequency)
 167 |       .sort(([,a], [,b]) => b - a),
 168 |     combinations: Object.entries(tagCombinations)
 169 |       .sort(([,a], [,b]) => b - a)
 170 |       .slice(0, 20), // Top 20 combinations
 171 |     categories: categoryDistribution,
 172 |     totalTags: Object.keys(tagFrequency).length,
 173 |     averageTagsPerMemory: memories.reduce((sum, m) => sum + (m.tags?.length || 0), 0) / memories.length
 174 |   };
 175 | }
 176 | 
 177 | /**
 178 |  * Categorize a tag based on common patterns
 179 |  * @param {string} tag - Tag to categorize
 180 |  * @returns {string} Category name
 181 |  */
 182 | function categorizeTag(tag) {
 183 |   const patterns = {
 184 |     'projects': /^(mcp-memory-service|memory-dashboard|github-integration|mcp-protocol)/,
 185 |     'technologies': /^(python|react|typescript|chromadb|git|docker|aws|npm)/,
 186 |     'activities': /^(testing|debugging|development|documentation|deployment|maintenance)/,
 187 |     'status': /^(resolved|in-progress|blocked|verified|completed|experimental)/,
 188 |     'content-types': /^(concept|architecture|tutorial|reference|example|guide)/,
 189 |     'temporal': /^(january|february|march|april|may|june|q1|q2|2025)/,
 190 |     'priorities': /^(urgent|high-priority|low-priority|critical)/
 191 |   };
 192 | 
 193 |   for (const [category, pattern] of Object.entries(patterns)) {
 194 |     if (pattern.test(tag)) {
 195 |       return category;
 196 |     }
 197 |   }
 198 | 
 199 |   return 'other';
 200 | }
 201 | 
 202 | /**
 203 |  * Find tag inconsistencies and suggest improvements
 204 |  * @param {Array} memories - Array of memory objects
 205 |  * @returns {Object} Consistency analysis results
 206 |  */
 207 | function analyzeTagConsistency(memories) {
 208 |   const inconsistencies = [];
 209 |   const suggestions = [];
 210 |   const patterns = {};
 211 | 
 212 |   memories.forEach((memory, index) => {
 213 |     const content = memory.content || '';
 214 |     const tags = memory.tags || [];
 215 | 
 216 |     // Common content patterns that should have corresponding tags
 217 |     const contentPatterns = {
 218 |       'test': /\b(test|testing|TEST)\b/i,
 219 |       'bug': /\b(bug|issue|error|problem)\b/i,
 220 |       'debug': /\b(debug|debugging|fix|fixed)\b/i,
 221 |       'documentation': /\b(document|guide|tutorial|readme)\b/i,
 222 |       'concept': /\b(concept|idea|design|architecture)\b/i,
 223 |       'implementation': /\b(implement|implementation|develop|development)\b/i
 224 |     };
 225 | 
 226 |     Object.entries(contentPatterns).forEach(([expectedTag, pattern]) => {
 227 |       if (pattern.test(content)) {
 228 |         const hasRelatedTag = tags.some(tag => 
 229 |           tag.includes(expectedTag) || 
 230 |           expectedTag.includes(tag.split('-')[0])
 231 |         );
 232 | 
 233 |         if (!hasRelatedTag) {
 234 |           inconsistencies.push({
 235 |             memoryIndex: index,
 236 |             type: 'missing-tag',
 237 |             expectedTag: expectedTag,
 238 |             content: content.substring(0, 100) + '...',
 239 |             currentTags: tags
 240 |           });
 241 |         }
 242 |       }
 243 |     });
 244 | 
 245 |     // Check for overly generic tags
 246 |     const genericTags = ['test', 'memory', 'note', 'temp', 'example'];
 247 |     const hasGenericOnly = tags.length > 0 && 
 248 |       tags.every(tag => genericTags.includes(tag));
 249 | 
 250 |     if (hasGenericOnly) {
 251 |       suggestions.push({
 252 |         memoryIndex: index,
 253 |         type: 'improve-specificity',
 254 |         suggestion: 'Replace generic tags with specific categories',
 255 |         currentTags: tags,
 256 |         content: content.substring(0, 100) + '...'
 257 |       });
 258 |     }
 259 |   });
 260 | 
 261 |   return {
 262 |     inconsistencies,
 263 |     suggestions,
 264 |     consistencyScore: ((memories.length - inconsistencies.length) / memories.length) * 100,
 265 |     totalIssues: inconsistencies.length + suggestions.length
 266 |   };
 267 | }
 268 | 
 269 | // =============================================================================
 270 | // CONTENT ANALYSIS FUNCTIONS
 271 | // =============================================================================
 272 | 
 273 | /**
 274 |  * Analyze content patterns and themes
 275 |  * @param {Array} memories - Array of memory objects
 276 |  * @returns {Object} Content analysis results
 277 |  */
 278 | function analyzeContentPatterns(memories) {
 279 |   const themes = {};
 280 |   const contentTypes = {};
 281 |   const wordFrequency = {};
 282 |   const lengthDistribution = {};
 283 | 
 284 |   memories.forEach(memory => {
 285 |     const content = memory.content || '';
 286 |     const words = extractKeywords(content);
 287 |     const contentType = detectContentType(content);
 288 | 
 289 |     // Theme analysis based on keywords
 290 |     words.forEach(word => {
 291 |       wordFrequency[word] = (wordFrequency[word] || 0) + 1;
 292 |     });
 293 | 
 294 |     // Content type distribution
 295 |     contentTypes[contentType] = (contentTypes[contentType] || 0) + 1;
 296 | 
 297 |     // Length distribution
 298 |     const lengthCategory = categorizeContentLength(content.length);
 299 |     lengthDistribution[lengthCategory] = (lengthDistribution[lengthCategory] || 0) + 1;
 300 |   });
 301 | 
 302 |   // Extract top themes from word frequency
 303 |   const topWords = Object.entries(wordFrequency)
 304 |     .sort(([,a], [,b]) => b - a)
 305 |     .slice(0, 50);
 306 | 
 307 |   return {
 308 |     themes: extractThemes(topWords),
 309 |     contentTypes,
 310 |     lengthDistribution,
 311 |     wordFrequency: topWords,
 312 |     averageLength: memories.reduce((sum, m) => sum + (m.content?.length || 0), 0) / memories.length
 313 |   };
 314 | }
 315 | 
 316 | /**
 317 |  * Extract keywords from content
 318 |  * @param {string} content - Memory content
 319 |  * @returns {Array} Array of keywords
 320 |  */
 321 | function extractKeywords(content) {
 322 |   const stopWords = new Set([
 323 |     'the', 'a', 'an', 'and', 'or', 'but', 'in', 'on', 'at', 'to', 'for', 'of', 'with',
 324 |     'by', 'from', 'up', 'about', 'into', 'through', 'during', 'before', 'after',
 325 |     'above', 'below', 'between', 'among', 'is', 'are', 'was', 'were', 'be', 'been',
 326 |     'have', 'has', 'had', 'do', 'does', 'did', 'will', 'would', 'could', 'should',
 327 |     'may', 'might', 'must', 'can', 'this', 'that', 'these', 'those'
 328 |   ]);
 329 | 
 330 |   return content
 331 |     .toLowerCase()
 332 |     .replace(/[^\w\s-]/g, ' ') // Remove punctuation except hyphens
 333 |     .split(/\s+/)
 334 |     .filter(word => 
 335 |       word.length > 2 && 
 336 |       !stopWords.has(word) &&
 337 |       !word.match(/^\d+$/) // Exclude pure numbers
 338 |     );
 339 | }
 340 | 
 341 | /**
 342 |  * Detect content type based on patterns
 343 |  * @param {string} content - Memory content
 344 |  * @returns {string} Content type
 345 |  */
 346 | function detectContentType(content) {
 347 |   const patterns = {
 348 |     'code': /```|function\s*\(|class\s+\w+|import\s+\w+/,
 349 |     'documentation': /^#+\s|README|GUIDE|TUTORIAL/i,
 350 |     'issue': /issue|bug|error|problem|fix|resolved/i,
 351 |     'concept': /concept|idea|design|architecture|approach/i,
 352 |     'test': /test|testing|verify|validation|TEST/i,
 353 |     'configuration': /config|setup|installation|environment/i,
 354 |     'analysis': /analysis|report|summary|statistics|metrics/i
 355 |   };
 356 | 
 357 |   for (const [type, pattern] of Object.entries(patterns)) {
 358 |     if (pattern.test(content)) {
 359 |       return type;
 360 |     }
 361 |   }
 362 | 
 363 |   return 'general';
 364 | }
 365 | 
 366 | /**
 367 |  * Categorize content by length
 368 |  * @param {number} length - Content length in characters
 369 |  * @returns {string} Length category
 370 |  */
 371 | function categorizeContentLength(length) {
 372 |   if (length < 100) return 'very-short';
 373 |   if (length < 500) return 'short';
 374 |   if (length < 1500) return 'medium';
 375 |   if (length < 3000) return 'long';
 376 |   return 'very-long';
 377 | }
 378 | 
 379 | /**
 380 |  * Extract themes from word frequency data
 381 |  * @param {Array} topWords - Array of [word, frequency] pairs
 382 |  * @returns {Object} Organized themes
 383 |  */
 384 | function extractThemes(topWords) {
 385 |   const themeCategories = {
 386 |     technology: ['python', 'react', 'typescript', 'chromadb', 'git', 'docker', 'api', 'database'],
 387 |     development: ['development', 'implementation', 'code', 'programming', 'build', 'deploy'],
 388 |     testing: ['test', 'testing', 'debug', 'debugging', 'verification', 'quality'],
 389 |     project: ['project', 'service', 'system', 'application', 'platform', 'tool'],
 390 |     process: ['process', 'workflow', 'methodology', 'procedure', 'approach', 'strategy']
 391 |   };
 392 | 
 393 |   const themes = {};
 394 |   const wordMap = new Map(topWords);
 395 | 
 396 |   Object.entries(themeCategories).forEach(([theme, keywords]) => {
 397 |     themes[theme] = keywords
 398 |       .filter(keyword => wordMap.has(keyword))
 399 |       .map(keyword => ({ word: keyword, frequency: wordMap.get(keyword) }))
 400 |       .sort((a, b) => b.frequency - a.frequency);
 401 |   });
 402 | 
 403 |   return themes;
 404 | }
 405 | 
 406 | // =============================================================================
 407 | // QUALITY ANALYSIS FUNCTIONS
 408 | // =============================================================================
 409 | 
 410 | /**
 411 |  * Assess overall memory quality and organization
 412 |  * @param {Array} memories - Array of memory objects
 413 |  * @returns {Object} Quality assessment results
 414 |  */
 415 | function assessMemoryQuality(memories) {
 416 |   const metrics = {
 417 |     tagging: assessTaggingQuality(memories),
 418 |     content: assessContentQuality(memories),
 419 |     organization: assessOrganizationQuality(memories),
 420 |     searchability: assessSearchabilityQuality(memories)
 421 |   };
 422 | 
 423 |   // Calculate overall quality score
 424 |   const overallScore = Object.values(metrics)
 425 |     .reduce((sum, metric) => sum + metric.score, 0) / Object.keys(metrics).length;
 426 | 
 427 |   return {
 428 |     overallScore: Math.round(overallScore),
 429 |     metrics,
 430 |     recommendations: generateQualityRecommendations(metrics),
 431 |     totalMemories: memories.length
 432 |   };
 433 | }
 434 | 
 435 | /**
 436 |  * Assess tagging quality
 437 |  * @param {Array} memories - Array of memory objects
 438 |  * @returns {Object} Tagging quality assessment
 439 |  */
 440 | function assessTaggingQuality(memories) {
 441 |   let taggedCount = 0;
 442 |   let wellTaggedCount = 0;
 443 |   let totalTags = 0;
 444 | 
 445 |   memories.forEach(memory => {
 446 |     const tags = memory.tags || [];
 447 |     totalTags += tags.length;
 448 | 
 449 |     if (tags.length > 0) {
 450 |       taggedCount++;
 451 |       
 452 |       // Well-tagged: has 3+ tags from different categories
 453 |       if (tags.length >= 3) {
 454 |         const categories = new Set(tags.map(tag => categorizeTag(tag)));
 455 |         if (categories.size >= 2) {
 456 |           wellTaggedCount++;
 457 |         }
 458 |       }
 459 |     }
 460 |   });
 461 | 
 462 |   const taggedPercentage = (taggedCount / memories.length) * 100;
 463 |   const wellTaggedPercentage = (wellTaggedCount / memories.length) * 100;
 464 |   const averageTagsPerMemory = totalTags / memories.length;
 465 | 
 466 |   let score = 0;
 467 |   if (taggedPercentage >= 90) score += 40;
 468 |   else if (taggedPercentage >= 70) score += 30;
 469 |   else if (taggedPercentage >= 50) score += 20;
 470 | 
 471 |   if (wellTaggedPercentage >= 70) score += 30;
 472 |   else if (wellTaggedPercentage >= 50) score += 20;
 473 |   else if (wellTaggedPercentage >= 30) score += 10;
 474 | 
 475 |   if (averageTagsPerMemory >= 4) score += 30;
 476 |   else if (averageTagsPerMemory >= 3) score += 20;
 477 |   else if (averageTagsPerMemory >= 2) score += 10;
 478 | 
 479 |   return {
 480 |     score,
 481 |     taggedPercentage: Math.round(taggedPercentage),
 482 |     wellTaggedPercentage: Math.round(wellTaggedPercentage),
 483 |     averageTagsPerMemory: Math.round(averageTagsPerMemory * 10) / 10,
 484 |     issues: {
 485 |       untagged: memories.length - taggedCount,
 486 |       poorlyTagged: taggedCount - wellTaggedCount
 487 |     }
 488 |   };
 489 | }
 490 | 
 491 | /**
 492 |  * Assess content quality
 493 |  * @param {Array} memories - Array of memory objects
 494 |  * @returns {Object} Content quality assessment
 495 |  */
 496 | function assessContentQuality(memories) {
 497 |   let substantialContent = 0;
 498 |   let hasDescription = 0;
 499 |   let totalLength = 0;
 500 | 
 501 |   memories.forEach(memory => {
 502 |     const content = memory.content || '';
 503 |     totalLength += content.length;
 504 | 
 505 |     if (content.length >= 50) {
 506 |       substantialContent++;
 507 |     }
 508 | 
 509 |     if (content.length >= 200) {
 510 |       hasDescription++;
 511 |     }
 512 |   });
 513 | 
 514 |   const substantialPercentage = (substantialContent / memories.length) * 100;
 515 |   const descriptivePercentage = (hasDescription / memories.length) * 100;
 516 |   const averageLength = totalLength / memories.length;
 517 | 
 518 |   let score = 0;
 519 |   if (substantialPercentage >= 90) score += 50;
 520 |   else if (substantialPercentage >= 70) score += 35;
 521 |   else if (substantialPercentage >= 50) score += 20;
 522 | 
 523 |   if (descriptivePercentage >= 60) score += 30;
 524 |   else if (descriptivePercentage >= 40) score += 20;
 525 |   else if (descriptivePercentage >= 20) score += 10;
 526 | 
 527 |   if (averageLength >= 300) score += 20;
 528 |   else if (averageLength >= 150) score += 10;
 529 | 
 530 |   return {
 531 |     score,
 532 |     substantialPercentage: Math.round(substantialPercentage),
 533 |     descriptivePercentage: Math.round(descriptivePercentage),
 534 |     averageLength: Math.round(averageLength),
 535 |     issues: {
 536 |       tooShort: memories.length - substantialContent,
 537 |       lackingDescription: memories.length - hasDescription
 538 |     }
 539 |   };
 540 | }
 541 | 
 542 | /**
 543 |  * Assess organization quality
 544 |  * @param {Array} memories - Array of memory objects
 545 |  * @returns {Object} Organization quality assessment
 546 |  */
 547 | function assessOrganizationQuality(memories) {
 548 |   const tagAnalysis = analyzeTagUsage(memories);
 549 |   const categories = Object.keys(tagAnalysis.categories);
 550 |   const topTags = tagAnalysis.frequency.slice(0, 10);
 551 | 
 552 |   // Check for balanced tag distribution
 553 |   const tagDistribution = tagAnalysis.frequency.map(([, count]) => count);
 554 |   const maxUsage = Math.max(...tagDistribution);
 555 |   const minUsage = Math.min(...tagDistribution);
 556 |   const distributionBalance = minUsage / maxUsage;
 557 | 
 558 |   let score = 0;
 559 |   
 560 |   // Category diversity
 561 |   if (categories.length >= 5) score += 30;
 562 |   else if (categories.length >= 3) score += 20;
 563 |   else if (categories.length >= 2) score += 10;
 564 | 
 565 |   // Tag usage balance
 566 |   if (distributionBalance >= 0.3) score += 25;
 567 |   else if (distributionBalance >= 0.2) score += 15;
 568 |   else if (distributionBalance >= 0.1) score += 5;
 569 | 
 570 |   // Consistent tag combinations
 571 |   if (tagAnalysis.combinations.length >= 10) score += 25;
 572 |   else if (tagAnalysis.combinations.length >= 5) score += 15;
 573 | 
 574 |   // Avoid over-concentration
 575 |   const topTagUsagePercentage = (topTags[0]?.[1] || 0) / memories.length * 100;
 576 |   if (topTagUsagePercentage <= 30) score += 20;
 577 |   else if (topTagUsagePercentage <= 40) score += 10;
 578 | 
 579 |   return {
 580 |     score,
 581 |     categoryCount: categories.length,
 582 |     tagDistributionBalance: Math.round(distributionBalance * 100),
 583 |     topTagUsagePercentage: Math.round(topTagUsagePercentage),
 584 |     consistentCombinations: tagAnalysis.combinations.length,
 585 |     issues: {
 586 |       fewCategories: categories.length < 3,
 587 |       imbalancedDistribution: distributionBalance < 0.2,
 588 |       overConcentration: topTagUsagePercentage > 40
 589 |     }
 590 |   };
 591 | }
 592 | 
 593 | /**
 594 |  * Assess searchability quality
 595 |  * @param {Array} memories - Array of memory objects
 596 |  * @returns {Object} Searchability quality assessment
 597 |  */
 598 | function assessSearchabilityQuality(memories) {
 599 |   const contentAnalysis = analyzeContentPatterns(memories);
 600 |   const tagAnalysis = analyzeTagUsage(memories);
 601 | 
 602 |   // Calculate searchability metrics
 603 |   const keywordDiversity = Object.keys(contentAnalysis.wordFrequency).length;
 604 |   const tagDiversity = tagAnalysis.totalTags;
 605 |   const averageTagsPerMemory = tagAnalysis.averageTagsPerMemory;
 606 | 
 607 |   let score = 0;
 608 | 
 609 |   // Keyword diversity
 610 |   if (keywordDiversity >= 100) score += 25;
 611 |   else if (keywordDiversity >= 50) score += 15;
 612 |   else if (keywordDiversity >= 25) score += 5;
 613 | 
 614 |   // Tag diversity
 615 |   if (tagDiversity >= 50) score += 25;
 616 |   else if (tagDiversity >= 30) score += 15;
 617 |   else if (tagDiversity >= 15) score += 5;
 618 | 
 619 |   // Tag coverage
 620 |   if (averageTagsPerMemory >= 4) score += 25;
 621 |   else if (averageTagsPerMemory >= 3) score += 15;
 622 |   else if (averageTagsPerMemory >= 2) score += 5;
 623 | 
 624 |   // Content type diversity
 625 |   const contentTypes = Object.keys(contentAnalysis.contentTypes).length;
 626 |   if (contentTypes >= 5) score += 25;
 627 |   else if (contentTypes >= 3) score += 15;
 628 |   else if (contentTypes >= 2) score += 5;
 629 | 
 630 |   return {
 631 |     score,
 632 |     keywordDiversity,
 633 |     tagDiversity,
 634 |     averageTagsPerMemory: Math.round(averageTagsPerMemory * 10) / 10,
 635 |     contentTypeDiversity: contentTypes,
 636 |     issues: {
 637 |       lowKeywordDiversity: keywordDiversity < 25,
 638 |       lowTagDiversity: tagDiversity < 15,
 639 |       poorTagCoverage: averageTagsPerMemory < 2
 640 |     }
 641 |   };
 642 | }
 643 | 
 644 | /**
 645 |  * Generate quality improvement recommendations
 646 |  * @param {Object} metrics - Quality metrics object
 647 |  * @returns {Array} Array of recommendations
 648 |  */
 649 | function generateQualityRecommendations(metrics) {
 650 |   const recommendations = [];
 651 | 
 652 |   // Tagging recommendations
 653 |   if (metrics.tagging.taggedPercentage < 90) {
 654 |     recommendations.push({
 655 |       category: 'tagging',
 656 |       priority: 'high',
 657 |       issue: `${metrics.tagging.issues.untagged} memories are untagged`,
 658 |       action: 'Run memory maintenance session to tag untagged memories',
 659 |       expectedImprovement: 'Improve searchability and organization'
 660 |     });
 661 |   }
 662 | 
 663 |   if (metrics.tagging.averageTagsPerMemory < 3) {
 664 |     recommendations.push({
 665 |       category: 'tagging',
 666 |       priority: 'medium',
 667 |       issue: 'Low average tags per memory',
 668 |       action: 'Add more specific and categorical tags to existing memories',
 669 |       expectedImprovement: 'Better categorization and discoverability'
 670 |     });
 671 |   }
 672 | 
 673 |   // Content recommendations
 674 |   if (metrics.content.substantialPercentage < 80) {
 675 |     recommendations.push({
 676 |       category: 'content',
 677 |       priority: 'medium',
 678 |       issue: `${metrics.content.issues.tooShort} memories have minimal content`,
 679 |       action: 'Expand brief memories with more context and details',
 680 |       expectedImprovement: 'Increased information value and searchability'
 681 |     });
 682 |   }
 683 | 
 684 |   // Organization recommendations
 685 |   if (metrics.organization.categoryCount < 3) {
 686 |     recommendations.push({
 687 |       category: 'organization',
 688 |       priority: 'high',
 689 |       issue: 'Limited tag category diversity',
 690 |       action: 'Implement standardized tag schema with multiple categories',
 691 |       expectedImprovement: 'Better knowledge organization structure'
 692 |     });
 693 |   }
 694 | 
 695 |   if (metrics.organization.tagDistributionBalance < 20) {
 696 |     recommendations.push({
 697 |       category: 'organization',
 698 |       priority: 'medium',
 699 |       issue: 'Imbalanced tag usage distribution',
 700 |       action: 'Review and balance tag usage across content types',
 701 |       expectedImprovement: 'More consistent knowledge organization'
 702 |     });
 703 |   }
 704 | 
 705 |   // Searchability recommendations
 706 |   if (metrics.searchability.tagDiversity < 30) {
 707 |     recommendations.push({
 708 |       category: 'searchability',
 709 |       priority: 'medium',
 710 |       issue: 'Limited tag vocabulary',
 711 |       action: 'Expand tag vocabulary with more specific and varied tags',
 712 |       expectedImprovement: 'Enhanced search precision and recall'
 713 |     });
 714 |   }
 715 | 
 716 |   return recommendations.sort((a, b) => {
 717 |     const priorityOrder = { 'high': 3, 'medium': 2, 'low': 1 };
 718 |     return priorityOrder[b.priority] - priorityOrder[a.priority];
 719 |   });
 720 | }
 721 | 
 722 | // =============================================================================
 723 | // VISUALIZATION DATA PREPARATION
 724 | // =============================================================================
 725 | 
 726 | /**
 727 |  * Prepare comprehensive data package for visualizations
 728 |  * @param {Array} memories - Array of memory objects
 729 |  * @returns {Object} Complete visualization data package
 730 |  */
 731 | function prepareVisualizationData(memories) {
 732 |   const temporal = analyzeTemporalDistribution(memories);
 733 |   const tags = analyzeTagUsage(memories);
 734 |   const content = analyzeContentPatterns(memories);
 735 |   const quality = assessMemoryQuality(memories);
 736 | 
 737 |   return {
 738 |     metadata: {
 739 |       totalMemories: memories.length,
 740 |       analysisDate: new Date().toISOString(),
 741 |       dataVersion: '1.0'
 742 |     },
 743 |     
 744 |     // Chart data for different visualizations
 745 |     charts: {
 746 |       temporalDistribution: prepareTemporalChartData(temporal, 'monthly'),
 747 |       weeklyPattern: prepareTemporalChartData(temporal, 'weekly'),
 748 |       dailyPattern: prepareTemporalChartData(temporal, 'daily'),
 749 |       hourlyPattern: prepareTemporalChartData(temporal, 'hourly'),
 750 |       
 751 |       tagFrequency: tags.frequency.slice(0, 20).map(([tag, count]) => ({
 752 |         tag,
 753 |         count,
 754 |         category: categorizeTag(tag)
 755 |       })),
 756 |       
 757 |       tagCombinations: tags.combinations.slice(0, 10).map(([combo, count]) => ({
 758 |         combination: combo,
 759 |         count,
 760 |         tags: combo.split(' + ')
 761 |       })),
 762 |       
 763 |       contentTypes: Object.entries(content.contentTypes).map(([type, count]) => ({
 764 |         type,
 765 |         count,
 766 |         percentage: Math.round((count / memories.length) * 100)
 767 |       })),
 768 |       
 769 |       contentLengths: Object.entries(content.lengthDistribution).map(([category, count]) => ({
 770 |         category,
 771 |         count,
 772 |         percentage: Math.round((count / memories.length) * 100)
 773 |       }))
 774 |     },
 775 |     
 776 |     // Summary statistics
 777 |     statistics: {
 778 |       temporal: {
 779 |         peakMonth: findPeakPeriod(temporal.monthly),
 780 |         mostActiveDay: findPeakPeriod(temporal.daily),
 781 |         mostActiveHour: findPeakPeriod(temporal.hourly)
 782 |       },
 783 |       
 784 |       tags: {
 785 |         totalUniqueTags: tags.totalTags,
 786 |         averageTagsPerMemory: Math.round(tags.averageTagsPerMemory * 10) / 10,
 787 |         mostUsedTag: tags.frequency[0],
 788 |         categoryDistribution: Object.keys(tags.categories).length
 789 |       },
 790 |       
 791 |       content: {
 792 |         averageLength: Math.round(content.averageLength),
 793 |         mostCommonType: Object.entries(content.contentTypes)
 794 |           .sort(([,a], [,b]) => b - a)[0],
 795 |         keywordCount: Object.keys(content.wordFrequency).length
 796 |       },
 797 |       
 798 |       quality: {
 799 |         overallScore: quality.overallScore,
 800 |         taggedPercentage: quality.metrics.tagging.taggedPercentage,
 801 |         organizationScore: quality.metrics.organization.score,
 802 |         recommendationCount: quality.recommendations.length
 803 |       }
 804 |     },
 805 |     
 806 |     // Raw analysis data for advanced processing
 807 |     rawData: {
 808 |       temporal,
 809 |       tags,
 810 |       content,
 811 |       quality
 812 |     }
 813 |   };
 814 | }
 815 | 
 816 | /**
 817 |  * Find peak period from distribution data
 818 |  * @param {Object} distribution - Distribution object
 819 |  * @returns {Object} Peak period information
 820 |  */
 821 | function findPeakPeriod(distribution) {
 822 |   const entries = Object.entries(distribution);
 823 |   if (entries.length === 0) return null;
 824 | 
 825 |   const peak = entries.reduce((max, [period, memories]) => 
 826 |     memories.length > max.count ? { period, count: memories.length } : max,
 827 |     { period: null, count: 0 }
 828 |   );
 829 | 
 830 |   return peak;
 831 | }
 832 | 
 833 | // =============================================================================
 834 | // EXPORT FUNCTIONS
 835 | // =============================================================================
 836 | 
 837 | /**
 838 |  * Export analysis results to various formats
 839 |  * @param {Object} analysisData - Complete analysis data
 840 |  * @param {string} format - Export format ('json', 'csv', 'summary')
 841 |  * @returns {string} Formatted export data
 842 |  */
 843 | function exportAnalysisData(analysisData, format = 'json') {
 844 |   switch (format) {
 845 |     case 'json':
 846 |       return JSON.stringify(analysisData, null, 2);
 847 |     
 848 |     case 'csv':
 849 |       return exportToCSV(analysisData);
 850 |     
 851 |     case 'summary':
 852 |       return generateSummaryReport(analysisData);
 853 |     
 854 |     default:
 855 |       throw new Error(`Unsupported export format: ${format}`);
 856 |   }
 857 | }
 858 | 
 859 | /**
 860 |  * Export key metrics to CSV format
 861 |  * @param {Object} analysisData - Analysis data
 862 |  * @returns {string} CSV formatted data
 863 |  */
 864 | function exportToCSV(analysisData) {
 865 |   const csvSections = [];
 866 | 
 867 |   // Temporal data
 868 |   csvSections.push('TEMPORAL DISTRIBUTION');
 869 |   csvSections.push('Month,Count');
 870 |   analysisData.charts.temporalDistribution.forEach(item => {
 871 |     csvSections.push(`${item.period},${item.count}`);
 872 |   });
 873 |   csvSections.push('');
 874 | 
 875 |   // Tag frequency
 876 |   csvSections.push('TAG FREQUENCY');
 877 |   csvSections.push('Tag,Count,Category');
 878 |   analysisData.charts.tagFrequency.forEach(item => {
 879 |     csvSections.push(`${item.tag},${item.count},${item.category}`);
 880 |   });
 881 |   csvSections.push('');
 882 | 
 883 |   // Content types
 884 |   csvSections.push('CONTENT TYPES');
 885 |   csvSections.push('Type,Count,Percentage');
 886 |   analysisData.charts.contentTypes.forEach(item => {
 887 |     csvSections.push(`${item.type},${item.count},${item.percentage}%`);
 888 |   });
 889 | 
 890 |   return csvSections.join('\n');
 891 | }
 892 | 
 893 | /**
 894 |  * Generate a human-readable summary report
 895 |  * @param {Object} analysisData - Analysis data
 896 |  * @returns {string} Summary report
 897 |  */
 898 | function generateSummaryReport(analysisData) {
 899 |   const stats = analysisData.statistics;
 900 |   const quality = analysisData.rawData.quality;
 901 | 
 902 |   return `
 903 | MEMORY ANALYSIS SUMMARY REPORT
 904 | Generated: ${new Date().toLocaleDateString()}
 905 | 
 906 | DATABASE OVERVIEW:
 907 | - Total Memories: ${analysisData.metadata.totalMemories}
 908 | - Overall Quality Score: ${stats.quality.overallScore}/100
 909 | - Tagged Memories: ${stats.quality.taggedPercentage}%
 910 | 
 911 | TEMPORAL PATTERNS:
 912 | - Peak Activity: ${stats.temporal.peakMonth?.period} (${stats.temporal.peakMonth?.count} memories)
 913 | - Most Active Day: ${stats.temporal.mostActiveDay?.period}
 914 | - Most Active Hour: ${stats.temporal.mostActiveHour?.period}:00
 915 | 
 916 | TAG ANALYSIS:
 917 | - Unique Tags: ${stats.tags.totalUniqueTags}
 918 | - Average Tags per Memory: ${stats.tags.averageTagsPerMemory}
 919 | - Most Used Tag: ${stats.tags.mostUsedTag?.[0]} (${stats.tags.mostUsedTag?.[1]} uses)
 920 | - Tag Categories: ${stats.tags.categoryDistribution}
 921 | 
 922 | CONTENT INSIGHTS:
 923 | - Average Length: ${stats.content.averageLength} characters
 924 | - Most Common Type: ${stats.content.mostCommonType?.[0]}
 925 | - Unique Keywords: ${stats.content.keywordCount}
 926 | 
 927 | QUALITY RECOMMENDATIONS:
 928 | ${quality.recommendations.slice(0, 3).map(rec => 
 929 |   `- ${rec.priority.toUpperCase()}: ${rec.action}`
 930 | ).join('\n')}
 931 | 
 932 | For detailed analysis, use the full JSON export or visualization tools.
 933 | `.trim();
 934 | }
 935 | 
 936 | // =============================================================================
 937 | // MAIN ANALYSIS PIPELINE
 938 | // =============================================================================
 939 | 
 940 | /**
 941 |  * Run complete analysis pipeline on memory data
 942 |  * @param {Array} memories - Array of memory objects
 943 |  * @returns {Object} Complete analysis results
 944 |  */
 945 | async function runCompleteAnalysis(memories) {
 946 |   console.log('Starting comprehensive memory analysis...');
 947 |   
 948 |   const startTime = Date.now();
 949 |   
 950 |   try {
 951 |     // Run all analysis functions
 952 |     const results = prepareVisualizationData(memories);
 953 |     
 954 |     const endTime = Date.now();
 955 |     const duration = endTime - startTime;
 956 |     
 957 |     console.log(`Analysis complete in ${duration}ms`);
 958 |     console.log(`Analyzed ${memories.length} memories`);
 959 |     console.log(`Overall quality score: ${results.statistics.quality.overallScore}/100`);
 960 |     
 961 |     return {
 962 |       ...results,
 963 |       meta: {
 964 |         analysisDuration: duration,
 965 |         analysisTimestamp: new Date().toISOString(),
 966 |         version: '1.0'
 967 |       }
 968 |     };
 969 |     
 970 |   } catch (error) {
 971 |     console.error('Analysis failed:', error);
 972 |     throw error;
 973 |   }
 974 | }
 975 | 
 976 | // Export all functions for use in other modules
 977 | if (typeof module !== 'undefined' && module.exports) {
 978 |   module.exports = {
 979 |     // Temporal analysis
 980 |     analyzeTemporalDistribution,
 981 |     prepareTemporalChartData,
 982 |     
 983 |     // Tag analysis
 984 |     analyzeTagUsage,
 985 |     analyzeTagConsistency,
 986 |     categorizeTag,
 987 |     
 988 |     // Content analysis
 989 |     analyzeContentPatterns,
 990 |     detectContentType,
 991 |     extractKeywords,
 992 |     
 993 |     // Quality analysis
 994 |     assessMemoryQuality,
 995 |     generateQualityRecommendations,
 996 |     
 997 |     // Visualization
 998 |     prepareVisualizationData,
 999 |     
1000 |     // Export utilities
1001 |     exportAnalysisData,
1002 |     generateSummaryReport,
1003 |     
1004 |     // Main pipeline
1005 |     runCompleteAnalysis
1006 |   };
1007 | }
1008 | 
1009 | /**
1010 |  * Usage Examples:
1011 |  * 
1012 |  * // Basic usage with MCP Memory Service data
1013 |  * const memories = await retrieve_memory({ query: "all memories", n_results: 500 });
1014 |  * const analysis = await runCompleteAnalysis(memories);
1015 |  * 
1016 |  * // Specific analyses
1017 |  * const temporalData = analyzeTemporalDistribution(memories);
1018 |  * const tagAnalysis = analyzeTagUsage(memories);
1019 |  * const qualityReport = assessMemoryQuality(memories);
1020 |  * 
1021 |  * // Export results
1022 |  * const jsonExport = exportAnalysisData(analysis, 'json');
1023 |  * const csvExport = exportAnalysisData(analysis, 'csv');
1024 |  * const summary = exportAnalysisData(analysis, 'summary');
1025 |  * 
1026 |  * // Prepare data for React charts
1027 |  * const chartData = prepareVisualizationData(memories);
1028 |  * // Use chartData.charts.temporalDistribution with the React component
1029 |  */
```

--------------------------------------------------------------------------------
/src/mcp_memory_service/server/handlers/memory.py:
--------------------------------------------------------------------------------

```python
  1 | # Copyright 2024 Heinrich Krupp
  2 | #
  3 | # Licensed under the Apache License, Version 2.0 (the "License");
  4 | # you may not use this file except in compliance with the License.
  5 | # You may obtain a copy of the License at
  6 | #
  7 | #     http://www.apache.org/licenses/LICENSE-2.0
  8 | #
  9 | # Unless required by applicable law or agreed to in writing, software
 10 | # distributed under the License is distributed on an "AS IS" BASIS,
 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 12 | # See the License for the specific language governing permissions and
 13 | # limitations under the License.
 14 | 
 15 | """
 16 | Memory handler functions for MCP server.
 17 | 
 18 | Core CRUD operations, retrieval, search, deletion, and timeframe-based operations.
 19 | Extracted from server_impl.py Phase 2.1 refactoring.
 20 | """
 21 | 
 22 | import asyncio
 23 | import logging
 24 | import time
 25 | import traceback
 26 | import uuid
 27 | from datetime import datetime
 28 | from typing import List
 29 | 
 30 | from mcp import types
 31 | 
 32 | logger = logging.getLogger(__name__)
 33 | 
 34 | 
 35 | async def handle_store_memory(server, arguments: dict) -> List[types.TextContent]:
 36 |     content = arguments.get("content")
 37 |     metadata = arguments.get("metadata", {})
 38 | 
 39 |     if not content:
 40 |         return [types.TextContent(type="text", text="Error: Content is required")]
 41 | 
 42 |     try:
 43 |         # Initialize storage lazily when needed (also initializes memory_service)
 44 |         await server._ensure_storage_initialized()
 45 | 
 46 |         # Extract parameters for MemoryService call
 47 |         tags = metadata.get("tags", "")
 48 |         memory_type = metadata.get("type", "note")  # HTTP server uses metadata.type
 49 |         client_hostname = arguments.get("client_hostname")
 50 | 
 51 |         # Call shared MemoryService business logic
 52 |         result = await server.memory_service.store_memory(
 53 |             content=content,
 54 |             tags=tags,
 55 |             memory_type=memory_type,
 56 |             metadata=metadata,
 57 |             client_hostname=client_hostname
 58 |         )
 59 | 
 60 |         # Convert MemoryService result to MCP response format
 61 |         if not result.get("success"):
 62 |             error_msg = result.get("error", "Unknown error")
 63 |             return [types.TextContent(type="text", text=f"Error storing memory: {error_msg}")]
 64 | 
 65 |         if "memories" in result:
 66 |             # Chunked response - multiple memories created
 67 |             num_chunks = len(result["memories"])
 68 |             original_hash = result.get("original_hash", "unknown")
 69 |             message = f"Successfully stored {num_chunks} memory chunks (original hash: {original_hash[:8]}...)"
 70 |         else:
 71 |             # Single memory response
 72 |             memory_hash = result["memory"]["content_hash"]
 73 |             message = f"Memory stored successfully (hash: {memory_hash[:8]}...)"
 74 | 
 75 |         return [types.TextContent(type="text", text=message)]
 76 | 
 77 |     except Exception as e:
 78 |         logger.error(f"Error storing memory: {str(e)}\n{traceback.format_exc()}")
 79 |         return [types.TextContent(type="text", text=f"Error storing memory: {str(e)}")]
 80 | 
 81 | 
 82 | async def handle_retrieve_memory(server, arguments: dict) -> List[types.TextContent]:
 83 |     query = arguments.get("query")
 84 |     n_results = arguments.get("n_results", 5)
 85 | 
 86 |     if not query:
 87 |         return [types.TextContent(type="text", text="Error: Query is required")]
 88 | 
 89 |     try:
 90 |         # Initialize storage lazily when needed (also initializes memory_service)
 91 |         await server._ensure_storage_initialized()
 92 | 
 93 |         # Track performance
 94 |         start_time = time.time()
 95 | 
 96 |         # Call shared MemoryService business logic
 97 |         result = await server.memory_service.retrieve_memories(
 98 |             query=query,
 99 |             n_results=n_results
100 |         )
101 | 
102 |         query_time_ms = (time.time() - start_time) * 1000
103 | 
104 |         # Record query time for performance monitoring
105 |         server.record_query_time(query_time_ms)
106 | 
107 |         if result.get("error"):
108 |             return [types.TextContent(type="text", text=f"Error retrieving memories: {result['error']}")]
109 | 
110 |         memories = result.get("memories", [])
111 |         if not memories:
112 |             return [types.TextContent(type="text", text="No matching memories found")]
113 | 
114 |         # Format results in HTTP server style (different from MCP server)
115 |         formatted_results = []
116 |         for i, memory in enumerate(memories):
117 |             memory_info = [f"Memory {i+1}:"]
118 |             # HTTP server uses created_at instead of timestamp
119 |             created_at = memory.get("created_at")
120 |             if created_at:
121 |                 # Parse ISO string and format
122 |                 try:
123 |                     # Handle both float (timestamp) and string (ISO format) types
124 |                     if isinstance(created_at, (int, float)):
125 |                         dt = datetime.fromtimestamp(created_at)
126 |                     else:
127 |                         dt = datetime.fromisoformat(created_at.replace('Z', '+00:00'))
128 |                     memory_info.append(f"Timestamp: {dt.strftime('%Y-%m-%d %H:%M:%S')}")
129 |                 except (ValueError, TypeError):
130 |                     memory_info.append(f"Timestamp: {created_at}")
131 | 
132 |             memory_info.extend([
133 |                 f"Content: {memory['content']}",
134 |                 f"Hash: {memory['content_hash']}",
135 |                 f"Relevance Score: {memory['similarity_score']:.2f}"
136 |             ])
137 |             tags = memory.get("tags", [])
138 |             if tags:
139 |                 memory_info.append(f"Tags: {', '.join(tags)}")
140 |             memory_info.append("---")
141 |             formatted_results.append("\n".join(memory_info))
142 | 
143 |         return [types.TextContent(
144 |             type="text",
145 |             text="Found the following memories:\n\n" + "\n".join(formatted_results)
146 |         )]
147 |     except Exception as e:
148 |         logger.error(f"Error retrieving memories: {str(e)}\n{traceback.format_exc()}")
149 |         return [types.TextContent(type="text", text=f"Error retrieving memories: {str(e)}")]
150 | 
151 | 
152 | async def handle_retrieve_with_quality_boost(server, arguments: dict) -> List[types.TextContent]:
153 |     """Handle quality-boosted memory retrieval with reranking."""
154 |     query = arguments.get("query")
155 |     n_results = arguments.get("n_results", 10)
156 |     quality_weight = arguments.get("quality_weight", 0.3)
157 | 
158 |     if not query:
159 |         return [types.TextContent(type="text", text="Error: Query is required")]
160 | 
161 |     # Validate quality_weight
162 |     if not 0.0 <= quality_weight <= 1.0:
163 |         return [types.TextContent(
164 |             type="text",
165 |             text=f"Error: quality_weight must be 0.0-1.0, got {quality_weight}"
166 |         )]
167 | 
168 |     try:
169 |         # Initialize storage
170 |         storage = await server._ensure_storage_initialized()
171 | 
172 |         # Track performance
173 |         start_time = time.time()
174 | 
175 |         # Call quality-boosted retrieval
176 |         results = await storage.retrieve_with_quality_boost(
177 |             query=query,
178 |             n_results=n_results,
179 |             quality_boost=True,
180 |             quality_weight=quality_weight
181 |         )
182 | 
183 |         query_time_ms = (time.time() - start_time) * 1000
184 | 
185 |         # Record query time for performance monitoring
186 |         server.record_query_time(query_time_ms)
187 | 
188 |         if not results:
189 |             return [types.TextContent(type="text", text="No matching memories found")]
190 | 
191 |         # Format results with quality information
192 |         response_parts = [
193 |             f"# Quality-Boosted Search Results",
194 |             f"Query: {query}",
195 |             f"Quality Weight: {quality_weight:.1%} (Semantic: {1-quality_weight:.1%})",
196 |             f"Results: {len(results)}",
197 |             f"Search Time: {query_time_ms:.0f}ms",
198 |             ""
199 |         ]
200 | 
201 |         for i, result in enumerate(results, 1):
202 |             memory = result.memory
203 |             semantic_score = result.debug_info.get('original_semantic_score', 0) if result.debug_info else result.relevance_score
204 |             quality_score = result.debug_info.get('quality_score', 0.5) if result.debug_info else memory.quality_score
205 |             composite_score = result.relevance_score
206 | 
207 |             # Format timestamp
208 |             created_at = memory.created_at
209 |             if created_at:
210 |                 try:
211 |                     dt = datetime.fromtimestamp(created_at)
212 |                     timestamp_str = dt.strftime('%Y-%m-%d %H:%M:%S')
213 |                 except (ValueError, TypeError):
214 |                     timestamp_str = str(created_at)
215 |             else:
216 |                 timestamp_str = "N/A"
217 | 
218 |             memory_info = [
219 |                 f"## Result {i} (Score: {composite_score:.3f})",
220 |                 f"- Semantic: {semantic_score:.3f}",
221 |                 f"- Quality: {quality_score:.3f}",
222 |                 f"- Timestamp: {timestamp_str}",
223 |                 f"- Hash: {memory.content_hash[:12]}...",
224 |                 f"- Content: {memory.content[:200]}{'...' if len(memory.content) > 200 else ''}",
225 |             ]
226 | 
227 |             if memory.tags:
228 |                 memory_info.append(f"- Tags: {', '.join(memory.tags)}")
229 | 
230 |             response_parts.append("\n".join(memory_info))
231 |             response_parts.append("")
232 | 
233 |         return [types.TextContent(type="text", text="\n".join(response_parts))]
234 | 
235 |     except Exception as e:
236 |         logger.error(f"Error in quality-boosted retrieval: {str(e)}\n{traceback.format_exc()}")
237 |         return [types.TextContent(
238 |             type="text",
239 |             text=f"Error retrieving memories with quality boost: {str(e)}"
240 |         )]
241 | 
242 | 
243 | async def handle_search_by_tag(server, arguments: dict) -> List[types.TextContent]:
244 |     from ...services.memory_service import normalize_tags
245 | 
246 |     tags = normalize_tags(arguments.get("tags", []))
247 | 
248 |     if not tags:
249 |         return [types.TextContent(type="text", text="Error: Tags are required")]
250 | 
251 |     try:
252 |         # Initialize storage lazily when needed (also initializes memory_service)
253 |         await server._ensure_storage_initialized()
254 | 
255 |         # Call shared MemoryService business logic
256 |         result = await server.memory_service.search_by_tag(tags=tags)
257 | 
258 |         if result.get("error"):
259 |             return [types.TextContent(type="text", text=f"Error searching by tags: {result['error']}")]
260 | 
261 |         memories = result.get("memories", [])
262 |         if not memories:
263 |             return [types.TextContent(
264 |                 type="text",
265 |                 text=f"No memories found with tags: {', '.join(tags)}"
266 |             )]
267 | 
268 |         formatted_results = []
269 |         for i, memory in enumerate(memories):
270 |             memory_info = [f"Memory {i+1}:"]
271 |             created_at = memory.get("created_at")
272 |             if created_at:
273 |                 try:
274 |                     # Handle both float (timestamp) and string (ISO format) types
275 |                     if isinstance(created_at, (int, float)):
276 |                         dt = datetime.fromtimestamp(created_at)
277 |                     else:
278 |                         dt = datetime.fromisoformat(created_at.replace('Z', '+00:00'))
279 |                     memory_info.append(f"Timestamp: {dt.strftime('%Y-%m-%d %H:%M:%S')}")
280 |                 except (ValueError, TypeError) as e:
281 |                     memory_info.append(f"Timestamp: {created_at}")
282 | 
283 |             memory_info.extend([
284 |                 f"Content: {memory['content']}",
285 |                 f"Hash: {memory['content_hash']}",
286 |                 f"Tags: {', '.join(memory.get('tags', []))}"
287 |             ])
288 |             memory_type = memory.get("memory_type")
289 |             if memory_type:
290 |                 memory_info.append(f"Type: {memory_type}")
291 |             memory_info.append("---")
292 |             formatted_results.append("\n".join(memory_info))
293 | 
294 |         return [types.TextContent(
295 |             type="text",
296 |             text="Found the following memories:\n\n" + "\n".join(formatted_results)
297 |         )]
298 |     except Exception as e:
299 |         logger.error(f"Error searching by tags: {str(e)}\n{traceback.format_exc()}")
300 |         return [types.TextContent(type="text", text=f"Error searching by tags: {str(e)}")]
301 | 
302 | 
303 | async def handle_delete_memory(server, arguments: dict) -> List[types.TextContent]:
304 |     content_hash = arguments.get("content_hash")
305 | 
306 |     try:
307 |         # Initialize storage lazily when needed (also initializes memory_service)
308 |         await server._ensure_storage_initialized()
309 | 
310 |         # Call shared MemoryService business logic
311 |         result = await server.memory_service.delete_memory(content_hash)
312 | 
313 |         # Handle response based on success/failure format
314 |         if result["success"]:
315 |             return [types.TextContent(type="text", text=f"Memory deleted successfully: {result['content_hash'][:16]}...")]
316 |         else:
317 |             return [types.TextContent(type="text", text=f"Failed to delete memory: {result.get('error', 'Unknown error')}")]
318 |     except Exception as e:
319 |         logger.error(f"Error deleting memory: {str(e)}\n{traceback.format_exc()}")
320 |         return [types.TextContent(type="text", text=f"Error deleting memory: {str(e)}")]
321 | 
322 | 
323 | async def handle_delete_by_tag(server, arguments: dict) -> List[types.TextContent]:
324 |     """Handler for deleting memories by tags."""
325 |     from ...services.memory_service import normalize_tags
326 | 
327 |     tags = arguments.get("tags", [])
328 | 
329 |     if not tags:
330 |         return [types.TextContent(type="text", text="Error: Tags array is required")]
331 | 
332 |     # Normalize tags (handles comma-separated strings and arrays)
333 |     tags = normalize_tags(tags)
334 | 
335 |     try:
336 |         # Initialize storage lazily when needed
337 |         storage = await server._ensure_storage_initialized()
338 |         # Use delete_by_tags (plural) since tags is a list after normalize_tags
339 |         count, message = await storage.delete_by_tags(tags)
340 |         return [types.TextContent(type="text", text=message)]
341 |     except Exception as e:
342 |         logger.error(f"Error deleting by tag: {str(e)}\n{traceback.format_exc()}")
343 |         return [types.TextContent(type="text", text=f"Error deleting by tag: {str(e)}")]
344 | 
345 | 
346 | async def handle_delete_by_tags(server, arguments: dict) -> List[types.TextContent]:
347 |     """Handler for explicit multiple tag deletion with progress tracking."""
348 |     from ...services.memory_service import normalize_tags
349 | 
350 |     tags = normalize_tags(arguments.get("tags", []))
351 | 
352 |     if not tags:
353 |         return [types.TextContent(type="text", text="Error: Tags array is required")]
354 | 
355 |     try:
356 |         # Initialize storage lazily when needed
357 |         storage = await server._ensure_storage_initialized()
358 | 
359 |         # Generate operation ID for progress tracking
360 |         operation_id = f"delete_by_tags_{uuid.uuid4().hex[:8]}"
361 | 
362 |         # Send initial progress notification
363 |         await server.send_progress_notification(operation_id, 0, f"Starting deletion of memories with tags: {', '.join(tags)}")
364 | 
365 |         # Execute deletion with progress updates
366 |         await server.send_progress_notification(operation_id, 25, "Searching for memories to delete...")
367 | 
368 |         # If storage supports progress callbacks, use them
369 |         if hasattr(storage, 'delete_by_tags_with_progress'):
370 |             count, message = await storage.delete_by_tags_with_progress(
371 |                 tags,
372 |                 progress_callback=lambda p, msg: asyncio.create_task(
373 |                     server.send_progress_notification(operation_id, 25 + (p * 0.7), msg)
374 |                 )
375 |             )
376 |         else:
377 |             await server.send_progress_notification(operation_id, 50, "Deleting memories...")
378 |             count, message = await storage.delete_by_tags(tags)
379 |             await server.send_progress_notification(operation_id, 90, f"Deleted {count} memories")
380 | 
381 |         # Complete the operation
382 |         await server.send_progress_notification(operation_id, 100, f"Deletion completed: {message}")
383 | 
384 |         return [types.TextContent(type="text", text=f"{message} (Operation ID: {operation_id})")]
385 |     except Exception as e:
386 |         logger.error(f"Error deleting by tags: {str(e)}\n{traceback.format_exc()}")
387 |         return [types.TextContent(type="text", text=f"Error deleting by tags: {str(e)}")]
388 | 
389 | 
390 | async def handle_delete_by_all_tags(server, arguments: dict) -> List[types.TextContent]:
391 |     """Handler for deleting memories that contain ALL specified tags."""
392 |     from ...services.memory_service import normalize_tags
393 | 
394 |     tags = normalize_tags(arguments.get("tags", []))
395 | 
396 |     if not tags:
397 |         return [types.TextContent(type="text", text="Error: Tags array is required")]
398 | 
399 |     try:
400 |         # Initialize storage lazily when needed
401 |         storage = await server._ensure_storage_initialized()
402 |         count, message = await storage.delete_by_all_tags(tags)
403 |         return [types.TextContent(type="text", text=message)]
404 |     except Exception as e:
405 |         logger.error(f"Error deleting by all tags: {str(e)}\n{traceback.format_exc()}")
406 |         return [types.TextContent(type="text", text=f"Error deleting by all tags: {str(e)}")]
407 | 
408 | 
409 | async def handle_cleanup_duplicates(server, arguments: dict) -> List[types.TextContent]:
410 |     try:
411 |         # Initialize storage lazily when needed
412 |         storage = await server._ensure_storage_initialized()
413 |         count, message = await storage.cleanup_duplicates()
414 |         return [types.TextContent(type="text", text=message)]
415 |     except Exception as e:
416 |         logger.error(f"Error cleaning up duplicates: {str(e)}\n{traceback.format_exc()}")
417 |         return [types.TextContent(type="text", text=f"Error cleaning up duplicates: {str(e)}")]
418 | 
419 | 
420 | async def handle_update_memory_metadata(server, arguments: dict) -> List[types.TextContent]:
421 |     """Handle memory metadata update requests."""
422 |     try:
423 |         from ...services.memory_service import normalize_tags
424 | 
425 |         content_hash = arguments.get("content_hash")
426 |         updates = arguments.get("updates")
427 |         preserve_timestamps = arguments.get("preserve_timestamps", True)
428 | 
429 |         if not content_hash:
430 |             return [types.TextContent(type="text", text="Error: content_hash is required")]
431 | 
432 |         if not updates:
433 |             return [types.TextContent(type="text", text="Error: updates dictionary is required")]
434 | 
435 |         if not isinstance(updates, dict):
436 |             return [types.TextContent(type="text", text="Error: updates must be a dictionary")]
437 | 
438 |         # Normalize tags if present in updates
439 |         if "tags" in updates:
440 |             updates["tags"] = normalize_tags(updates["tags"])
441 | 
442 |         # Initialize storage lazily when needed
443 |         storage = await server._ensure_storage_initialized()
444 | 
445 |         # Call the storage method
446 |         success, message = await storage.update_memory_metadata(
447 |             content_hash=content_hash,
448 |             updates=updates,
449 |             preserve_timestamps=preserve_timestamps
450 |         )
451 | 
452 |         if success:
453 |             logger.info(f"Successfully updated metadata for memory {content_hash}")
454 |             return [types.TextContent(
455 |                 type="text",
456 |                 text=f"Successfully updated memory metadata. {message}"
457 |             )]
458 |         else:
459 |             logger.warning(f"Failed to update metadata for memory {content_hash}: {message}")
460 |             return [types.TextContent(type="text", text=f"Failed to update memory metadata: {message}")]
461 | 
462 |     except Exception as e:
463 |         error_msg = f"Error updating memory metadata: {str(e)}"
464 |         logger.error(f"{error_msg}\n{traceback.format_exc()}")
465 |         return [types.TextContent(type="text", text=error_msg)]
466 | 
467 | 
468 | async def handle_debug_retrieve(server, arguments: dict) -> List[types.TextContent]:
469 |     query = arguments.get("query")
470 |     n_results = arguments.get("n_results", 5)
471 |     similarity_threshold = arguments.get("similarity_threshold", 0.0)
472 | 
473 |     if not query:
474 |         return [types.TextContent(type="text", text="Error: Query is required")]
475 | 
476 |     try:
477 |         # Initialize storage lazily when needed
478 |         storage = await server._ensure_storage_initialized()
479 | 
480 |         from ..utils.debug import debug_retrieve_memory
481 |         results = await debug_retrieve_memory(
482 |             storage,
483 |             query,
484 |             n_results,
485 |             similarity_threshold
486 |         )
487 | 
488 |         if not results:
489 |             return [types.TextContent(type="text", text="No matching memories found")]
490 | 
491 |         formatted_results = []
492 |         for i, result in enumerate(results):
493 |             memory_info = [
494 |                 f"Memory {i+1}:",
495 |                 f"Content: {result.memory.content}",
496 |                 f"Hash: {result.memory.content_hash}",
497 |                 f"Similarity Score: {result.relevance_score:.4f}"
498 |             ]
499 | 
500 |             # Add debug info if available
501 |             if result.debug_info:
502 |                 if 'raw_distance' in result.debug_info:
503 |                     memory_info.append(f"Raw Distance: {result.debug_info['raw_distance']:.4f}")
504 |                 if 'backend' in result.debug_info:
505 |                     memory_info.append(f"Backend: {result.debug_info['backend']}")
506 |                 if 'query' in result.debug_info:
507 |                     memory_info.append(f"Query: {result.debug_info['query']}")
508 |                 if 'similarity_threshold' in result.debug_info:
509 |                     memory_info.append(f"Threshold: {result.debug_info['similarity_threshold']:.2f}")
510 | 
511 |             if result.memory.tags:
512 |                 memory_info.append(f"Tags: {', '.join(result.memory.tags)}")
513 |             memory_info.append("---")
514 |             formatted_results.append("\n".join(memory_info))
515 | 
516 |         return [types.TextContent(
517 |             type="text",
518 |             text="Found the following memories:\n\n" + "\n".join(formatted_results)
519 |         )]
520 |     except Exception as e:
521 |         return [types.TextContent(type="text", text=f"Error in debug retrieve: {str(e)}")]
522 | 
523 | 
524 | async def handle_exact_match_retrieve(server, arguments: dict) -> List[types.TextContent]:
525 |     content = arguments.get("content")
526 |     if not content:
527 |         return [types.TextContent(type="text", text="Error: Content is required")]
528 | 
529 |     try:
530 |         # Initialize storage lazily when needed
531 |         storage = await server._ensure_storage_initialized()
532 | 
533 |         from ..utils.debug import exact_match_retrieve
534 |         memories = await exact_match_retrieve(storage, content)
535 | 
536 |         if not memories:
537 |             return [types.TextContent(type="text", text="No exact matches found")]
538 | 
539 |         formatted_results = []
540 |         for i, memory in enumerate(memories):
541 |             memory_info = [
542 |                 f"Memory {i+1}:",
543 |                 f"Content: {memory.content}",
544 |                 f"Hash: {memory.content_hash}"
545 |             ]
546 | 
547 |             if memory.tags:
548 |                 memory_info.append(f"Tags: {', '.join(memory.tags)}")
549 |             memory_info.append("---")
550 |             formatted_results.append("\n".join(memory_info))
551 | 
552 |         return [types.TextContent(
553 |             type="text",
554 |             text="Found the following exact matches:\n\n" + "\n".join(formatted_results)
555 |         )]
556 |     except Exception as e:
557 |         return [types.TextContent(type="text", text=f"Error in exact match retrieve: {str(e)}")]
558 | 
559 | 
560 | async def handle_get_raw_embedding(server, arguments: dict) -> List[types.TextContent]:
561 |     content = arguments.get("content")
562 |     if not content:
563 |         return [types.TextContent(type="text", text="Error: Content is required")]
564 | 
565 |     try:
566 |         # Initialize storage lazily when needed
567 |         storage = await server._ensure_storage_initialized()
568 | 
569 |         from ..utils.debug import get_raw_embedding
570 |         result = await asyncio.to_thread(get_raw_embedding, storage, content)
571 | 
572 |         if result["status"] == "success":
573 |             embedding = result["embedding"]
574 |             dimension = result["dimension"]
575 |             # Show first 10 and last 10 values for readability
576 |             if len(embedding) > 20:
577 |                 embedding_str = f"[{', '.join(f'{x:.6f}' for x in embedding[:10])}, ..., {', '.join(f'{x:.6f}' for x in embedding[-10:])}]"
578 |             else:
579 |                 embedding_str = f"[{', '.join(f'{x:.6f}' for x in embedding)}]"
580 | 
581 |             return [types.TextContent(
582 |                 type="text",
583 |                 text=f"Embedding generated successfully:\n"
584 |                      f"Dimension: {dimension}\n"
585 |                      f"Vector: {embedding_str}"
586 |             )]
587 |         else:
588 |             return [types.TextContent(
589 |                 type="text",
590 |                 text=f"Failed to generate embedding: {result['error']}"
591 |             )]
592 | 
593 |     except Exception as e:
594 |         return [types.TextContent(type="text", text=f"Error getting raw embedding: {str(e)}")]
595 | 
596 | 
597 | async def handle_recall_memory(server, arguments: dict) -> List[types.TextContent]:
598 |     """
599 |     Handle memory recall requests with natural language time expressions.
600 | 
601 |     This handler parses natural language time expressions from the query,
602 |     extracts time ranges, and combines them with optional semantic search.
603 |     """
604 |     from ...utils.time_parser import extract_time_expression, parse_time_expression
605 | 
606 |     query = arguments.get("query", "")
607 |     n_results = arguments.get("n_results", 5)
608 | 
609 |     if not query:
610 |         return [types.TextContent(type="text", text="Error: Query is required")]
611 | 
612 |     try:
613 |         # Initialize storage lazily when needed
614 |         storage = await server._ensure_storage_initialized()
615 | 
616 |         # Parse natural language time expressions
617 |         cleaned_query, (start_timestamp, end_timestamp) = extract_time_expression(query)
618 | 
619 |         # Log the parsed timestamps and clean query
620 |         logger.info(f"Original query: {query}")
621 |         logger.info(f"Cleaned query for semantic search: {cleaned_query}")
622 |         logger.info(f"Parsed time range: {start_timestamp} to {end_timestamp}")
623 | 
624 |         # Log more detailed timestamp information for debugging
625 |         if start_timestamp is not None:
626 |             start_dt = datetime.fromtimestamp(start_timestamp)
627 |             logger.info(f"Start timestamp: {start_timestamp} ({start_dt.strftime('%Y-%m-%d %H:%M:%S')})")
628 |         if end_timestamp is not None:
629 |             end_dt = datetime.fromtimestamp(end_timestamp)
630 |             logger.info(f"End timestamp: {end_timestamp} ({end_dt.strftime('%Y-%m-%d %H:%M:%S')})")
631 | 
632 |         if start_timestamp is None and end_timestamp is None:
633 |             # No time expression found, try direct parsing
634 |             logger.info("No time expression found in query, trying direct parsing")
635 |             start_timestamp, end_timestamp = parse_time_expression(query)
636 |             logger.info(f"Direct parse result: {start_timestamp} to {end_timestamp}")
637 | 
638 |         # Format human-readable time range for response
639 |         time_range_str = ""
640 |         if start_timestamp is not None and end_timestamp is not None:
641 |             start_dt = datetime.fromtimestamp(start_timestamp)
642 |             end_dt = datetime.fromtimestamp(end_timestamp)
643 |             time_range_str = f" from {start_dt.strftime('%Y-%m-%d %H:%M')} to {end_dt.strftime('%Y-%m-%d %H:%M')}"
644 | 
645 |         # Retrieve memories with timestamp filter and optional semantic search
646 |         # If cleaned_query is empty or just whitespace after removing time expressions,
647 |         # we should perform time-based retrieval only
648 |         semantic_query = cleaned_query.strip() if cleaned_query.strip() else None
649 | 
650 |         # Use the enhanced recall method that combines semantic search with time filtering,
651 |         # or just time filtering if no semantic query
652 |         results = await storage.recall(
653 |             query=semantic_query,
654 |             n_results=n_results,
655 |             start_timestamp=start_timestamp,
656 |             end_timestamp=end_timestamp
657 |         )
658 | 
659 |         if not results:
660 |             no_results_msg = f"No memories found{time_range_str}"
661 |             return [types.TextContent(type="text", text=no_results_msg)]
662 | 
663 |         # Format results
664 |         formatted_results = []
665 |         for i, result in enumerate(results):
666 |             memory_dt = result.memory.timestamp
667 | 
668 |             memory_info = [
669 |                 f"Memory {i+1}:",
670 |             ]
671 | 
672 |             # Add timestamp if available
673 |             if memory_dt:
674 |                 memory_info.append(f"Timestamp: {memory_dt.strftime('%Y-%m-%d %H:%M:%S')}")
675 | 
676 |             # Add other memory information
677 |             memory_info.extend([
678 |                 f"Content: {result.memory.content}",
679 |                 f"Hash: {result.memory.content_hash}"
680 |             ])
681 | 
682 |             # Add relevance score if available (may not be for time-only queries)
683 |             if hasattr(result, 'relevance_score') and result.relevance_score is not None:
684 |                 memory_info.append(f"Relevance Score: {result.relevance_score:.2f}")
685 | 
686 |             # Add tags if available
687 |             if result.memory.tags:
688 |                 memory_info.append(f"Tags: {', '.join(result.memory.tags)}")
689 | 
690 |             memory_info.append("---")
691 |             formatted_results.append("\n".join(memory_info))
692 | 
693 |         # Include time range in response if available
694 |         found_msg = f"Found {len(results)} memories{time_range_str}:"
695 |         return [types.TextContent(
696 |             type="text",
697 |             text=f"{found_msg}\n\n" + "\n".join(formatted_results)
698 |         )]
699 | 
700 |     except Exception as e:
701 |         logger.error(f"Error in recall_memory: {str(e)}\n{traceback.format_exc()}")
702 |         return [types.TextContent(type="text", text=f"Error recalling memories: {str(e)}")]
703 | 
704 | 
705 | async def handle_recall_by_timeframe(server, arguments: dict) -> List[types.TextContent]:
706 |     """Handle recall by timeframe requests."""
707 |     try:
708 |         # Initialize storage lazily when needed
709 |         storage = await server._ensure_storage_initialized()
710 | 
711 |         start_date = datetime.fromisoformat(arguments["start_date"]).date()
712 |         end_date = datetime.fromisoformat(arguments.get("end_date", arguments["start_date"])).date()
713 |         n_results = arguments.get("n_results", 5)
714 | 
715 |         # Get timestamp range
716 |         start_timestamp = datetime(start_date.year, start_date.month, start_date.day).timestamp()
717 |         end_timestamp = datetime(end_date.year, end_date.month, end_date.day, 23, 59, 59).timestamp()
718 | 
719 |         # Log the timestamp values for debugging
720 |         logger.info(f"Recall by timeframe: {start_date} to {end_date}")
721 |         logger.info(f"Start timestamp: {start_timestamp} ({datetime.fromtimestamp(start_timestamp).strftime('%Y-%m-%d %H:%M:%S')})")
722 |         logger.info(f"End timestamp: {end_timestamp} ({datetime.fromtimestamp(end_timestamp).strftime('%Y-%m-%d %H:%M:%S')})")
723 | 
724 |         # Retrieve memories with proper parameters - query is None because this is pure time-based filtering
725 |         results = await storage.recall(
726 |             query=None,
727 |             n_results=n_results,
728 |             start_timestamp=start_timestamp,
729 |             end_timestamp=end_timestamp
730 |         )
731 | 
732 |         if not results:
733 |             return [types.TextContent(type="text", text=f"No memories found from {start_date} to {end_date}")]
734 | 
735 |         formatted_results = []
736 |         for i, result in enumerate(results):
737 |             memory_timestamp = result.memory.timestamp
738 |             memory_info = [
739 |                 f"Memory {i+1}:",
740 |             ]
741 | 
742 |             # Add timestamp if available
743 |             if memory_timestamp:
744 |                 memory_info.append(f"Timestamp: {memory_timestamp.strftime('%Y-%m-%d %H:%M:%S')}")
745 | 
746 |             memory_info.extend([
747 |                 f"Content: {result.memory.content}",
748 |                 f"Hash: {result.memory.content_hash}"
749 |             ])
750 | 
751 |             if result.memory.tags:
752 |                 memory_info.append(f"Tags: {', '.join(result.memory.tags)}")
753 |             memory_info.append("---")
754 |             formatted_results.append("\n".join(memory_info))
755 | 
756 |         return [types.TextContent(
757 |             type="text",
758 |             text=f"Found {len(results)} memories from {start_date} to {end_date}:\n\n" + "\n".join(formatted_results)
759 |         )]
760 | 
761 |     except Exception as e:
762 |         logger.error(f"Error in recall_by_timeframe: {str(e)}\n{traceback.format_exc()}")
763 |         return [types.TextContent(
764 |             type="text",
765 |             text=f"Error recalling memories: {str(e)}"
766 |         )]
767 | 
768 | 
769 | async def handle_delete_by_timeframe(server, arguments: dict) -> List[types.TextContent]:
770 |     """Handle delete by timeframe requests."""
771 |     try:
772 |         # Initialize storage lazily when needed
773 |         storage = await server._ensure_storage_initialized()
774 | 
775 |         start_date = datetime.fromisoformat(arguments["start_date"]).date()
776 |         end_date = datetime.fromisoformat(arguments.get("end_date", arguments["start_date"])).date()
777 |         tag = arguments.get("tag")
778 | 
779 |         count, message = await storage.delete_by_timeframe(start_date, end_date, tag)
780 |         return [types.TextContent(
781 |             type="text",
782 |             text=f"Deleted {count} memories: {message}"
783 |         )]
784 | 
785 |     except Exception as e:
786 |         return [types.TextContent(
787 |             type="text",
788 |             text=f"Error deleting memories: {str(e)}"
789 |         )]
790 | 
791 | 
792 | async def handle_delete_before_date(server, arguments: dict) -> List[types.TextContent]:
793 |     """Handle delete before date requests."""
794 |     try:
795 |         # Initialize storage lazily when needed
796 |         storage = await server._ensure_storage_initialized()
797 | 
798 |         before_date = datetime.fromisoformat(arguments["before_date"]).date()
799 |         tag = arguments.get("tag")
800 | 
801 |         count, message = await storage.delete_before_date(before_date, tag)
802 |         return [types.TextContent(
803 |             type="text",
804 |             text=f"Deleted {count} memories: {message}"
805 |         )]
806 | 
807 |     except Exception as e:
808 |         return [types.TextContent(
809 |             type="text",
810 |             text=f"Error deleting memories: {str(e)}"
811 |         )]
812 | 
```

--------------------------------------------------------------------------------
/tests/integration/test_api_with_memory_service.py:
--------------------------------------------------------------------------------

```python
   1 | """
   2 | Integration tests for API endpoints using MemoryService.
   3 | 
   4 | These tests verify that the API layer correctly integrates with
   5 | MemoryService for all memory operations and maintains consistent behavior.
   6 | """
   7 | 
   8 | import pytest
   9 | import pytest_asyncio
  10 | import tempfile
  11 | import os
  12 | from unittest.mock import AsyncMock, MagicMock, patch
  13 | from fastapi.testclient import TestClient
  14 | 
  15 | from mcp_memory_service.web.dependencies import set_storage, get_memory_service
  16 | from mcp_memory_service.services.memory_service import MemoryService
  17 | from mcp_memory_service.models.memory import Memory, MemoryQueryResult
  18 | from mcp_memory_service.storage.sqlite_vec import SqliteVecMemoryStorage
  19 | 
  20 | 
  21 | # Test Fixtures
  22 | 
  23 | @pytest.fixture
  24 | def temp_db():
  25 |     """Create a temporary database for testing."""
  26 |     with tempfile.TemporaryDirectory() as tmpdir:
  27 |         db_path = os.path.join(tmpdir, "test.db")
  28 |         yield db_path
  29 | 
  30 | 
  31 | @pytest_asyncio.fixture
  32 | async def initialized_storage(temp_db):
  33 |     """Create and initialize a real SQLite storage backend."""
  34 |     storage = SqliteVecMemoryStorage(temp_db)
  35 |     await storage.initialize()
  36 |     yield storage
  37 |     storage.close()
  38 | 
  39 | 
  40 | @pytest.fixture
  41 | def test_app(initialized_storage, monkeypatch):
  42 |     """Create a FastAPI test application with initialized storage."""
  43 |     # Disable authentication for tests
  44 |     monkeypatch.setenv('MCP_API_KEY', '')
  45 |     monkeypatch.setenv('MCP_OAUTH_ENABLED', 'false')
  46 |     monkeypatch.setenv('MCP_ALLOW_ANONYMOUS_ACCESS', 'true')
  47 | 
  48 |     # Import here to avoid circular dependencies
  49 |     from mcp_memory_service.web.server import app
  50 | 
  51 |     # Set storage for the app
  52 |     set_storage(initialized_storage)
  53 | 
  54 |     client = TestClient(app)
  55 |     yield client
  56 | 
  57 | 
  58 | @pytest.fixture
  59 | def mock_storage():
  60 |     """Create a mock storage for isolated testing."""
  61 |     storage = AsyncMock()
  62 |     storage.max_content_length = None  # Set max_content_length to None to avoid comparison errors
  63 |     return storage
  64 | 
  65 | 
  66 | @pytest.fixture
  67 | def mock_memory_service(mock_storage):
  68 |     """Create a MemoryService with mock storage."""
  69 |     return MemoryService(storage=mock_storage)
  70 | 
  71 | 
  72 | @pytest.fixture
  73 | def sample_memory():
  74 |     """Create a sample memory for testing."""
  75 |     return Memory(
  76 |         content="Integration test memory",
  77 |         content_hash="test_hash_123",
  78 |         tags=["integration", "test"],
  79 |         memory_type="note",
  80 |         metadata={"source": "test"},
  81 |         created_at=1698765432.0,
  82 |         updated_at=1698765432.0
  83 |     )
  84 | 
  85 | 
  86 | # Test API Store Memory Endpoint
  87 | 
  88 | @pytest.mark.asyncio
  89 | async def test_api_store_memory_uses_service(mock_storage, unique_content):
  90 |     """Test that POST /api/memories uses MemoryService."""
  91 |     mock_storage.store.return_value = (True, "Memory stored successfully")
  92 | 
  93 |     # Create service
  94 |     service = MemoryService(storage=mock_storage)
  95 | 
  96 |     # Simulate API call through service
  97 |     result = await service.store_memory(
  98 |         content=unique_content("Test API storage"),
  99 |         tags=["api", "test"],
 100 |         memory_type="note"
 101 |     )
 102 | 
 103 |     assert result["success"] is True
 104 |     assert "memory" in result
 105 |     mock_storage.store.assert_called_once()
 106 | 
 107 | 
 108 | @pytest.mark.asyncio
 109 | async def test_api_store_memory_hostname_from_header(mock_storage, unique_content):
 110 |     """Test that X-Client-Hostname header is processed correctly."""
 111 |     mock_storage.store.return_value = (True, "Memory stored successfully")
 112 | 
 113 |     service = MemoryService(storage=mock_storage)
 114 | 
 115 |     # Simulate API call with hostname
 116 |     result = await service.store_memory(
 117 |         content=unique_content("Test with hostname"),
 118 |         tags=["test"],
 119 |         client_hostname="client-machine"
 120 |     )
 121 | 
 122 |     # Verify hostname tag was added
 123 |     stored_memory = mock_storage.store.call_args.args[0]
 124 |     assert "source:client-machine" in stored_memory.tags
 125 |     assert stored_memory.metadata["hostname"] == "client-machine"
 126 | 
 127 | 
 128 | @pytest.mark.asyncio
 129 | async def test_api_store_memory_hostname_from_request_body(mock_storage, unique_content):
 130 |     """Test that client_hostname in request body works."""
 131 |     mock_storage.store.return_value = (True, "Memory stored successfully")
 132 | 
 133 |     service = MemoryService(storage=mock_storage)
 134 | 
 135 |     # Simulate API call with hostname in body
 136 |     result = await service.store_memory(
 137 |         content=unique_content("Test"),
 138 |         client_hostname="body-hostname"
 139 |     )
 140 | 
 141 |     stored_memory = mock_storage.store.call_args.args[0]
 142 |     assert "source:body-hostname" in stored_memory.tags
 143 | 
 144 | 
 145 | # Test API List Memories Endpoint
 146 | 
 147 | @pytest.mark.asyncio
 148 | async def test_api_list_memories_uses_database_filtering(mock_storage):
 149 |     """Test that GET /api/memories uses database-level filtering."""
 150 |     # Setup mock to return limited results
 151 |     mock_storage.get_all_memories.return_value = []
 152 |     mock_storage.count_all_memories.return_value = 1000
 153 | 
 154 |     service = MemoryService(storage=mock_storage)
 155 | 
 156 |     # Request page 1 with 10 items from 1000 total
 157 |     result = await service.list_memories(page=1, page_size=10)
 158 | 
 159 |     # CRITICAL: Verify only 10 items requested, not all 1000
 160 |     # This proves database-level filtering, not O(n) loading
 161 |     call_kwargs = mock_storage.get_all_memories.call_args.kwargs
 162 |     assert call_kwargs["limit"] == 10
 163 |     assert call_kwargs["offset"] == 0
 164 |     assert result["total"] == 1000
 165 |     assert result["has_more"] is True
 166 | 
 167 | 
 168 | @pytest.mark.asyncio
 169 | async def test_api_list_memories_pagination_through_service(mock_storage):
 170 |     """Test end-to-end pagination workflow."""
 171 |     # Create mock memories
 172 |     memories = [
 173 |         Memory(
 174 |             content=f"Memory {i}",
 175 |             content_hash=f"hash_{i}",
 176 |             tags=["test"],
 177 |             memory_type="note",
 178 |             metadata={},
 179 |             created_at=1698765432.0 + i,
 180 |             updated_at=1698765432.0 + i
 181 |         )
 182 |         for i in range(25)
 183 |     ]
 184 | 
 185 |     # Page 1: First 10 memories
 186 |     mock_storage.get_all_memories.return_value = memories[:10]
 187 |     mock_storage.count_all_memories.return_value = 25
 188 | 
 189 |     service = MemoryService(storage=mock_storage)
 190 |     page1 = await service.list_memories(page=1, page_size=10)
 191 | 
 192 |     assert page1["page"] == 1
 193 |     assert page1["page_size"] == 10
 194 |     assert page1["total"] == 25
 195 |     assert page1["has_more"] is True
 196 |     assert len(page1["memories"]) == 10
 197 | 
 198 |     # Page 2: Next 10 memories
 199 |     mock_storage.get_all_memories.return_value = memories[10:20]
 200 |     page2 = await service.list_memories(page=2, page_size=10)
 201 | 
 202 |     assert page2["page"] == 2
 203 |     assert page2["has_more"] is True
 204 | 
 205 |     # Page 3: Last 5 memories
 206 |     mock_storage.get_all_memories.return_value = memories[20:25]
 207 |     page3 = await service.list_memories(page=3, page_size=10)
 208 | 
 209 |     assert page3["page"] == 3
 210 |     assert page3["has_more"] is False
 211 |     assert len(page3["memories"]) == 5
 212 | 
 213 | 
 214 | @pytest.mark.asyncio
 215 | async def test_api_list_memories_tag_filter(mock_storage):
 216 |     """Test filtering by tag through API."""
 217 |     mock_storage.get_all_memories.return_value = []
 218 |     mock_storage.count_all_memories.return_value = 0
 219 | 
 220 |     service = MemoryService(storage=mock_storage)
 221 | 
 222 |     result = await service.list_memories(page=1, page_size=10, tag="important")
 223 | 
 224 |     # Verify tag passed to storage as list
 225 |     call_kwargs = mock_storage.get_all_memories.call_args.kwargs
 226 |     assert call_kwargs["tags"] == ["important"]
 227 | 
 228 | 
 229 | @pytest.mark.asyncio
 230 | async def test_api_list_memories_type_filter(mock_storage):
 231 |     """Test filtering by memory type through API."""
 232 |     mock_storage.get_all_memories.return_value = []
 233 |     mock_storage.count_all_memories.return_value = 0
 234 | 
 235 |     service = MemoryService(storage=mock_storage)
 236 | 
 237 |     result = await service.list_memories(page=1, page_size=10, memory_type="reference")
 238 | 
 239 |     call_kwargs = mock_storage.get_all_memories.call_args.kwargs
 240 |     assert call_kwargs["memory_type"] == "reference"
 241 | 
 242 | 
 243 | @pytest.mark.asyncio
 244 | async def test_api_list_memories_combined_filters(mock_storage):
 245 |     """Test combining tag and type filters."""
 246 |     mock_storage.get_all_memories.return_value = []
 247 |     mock_storage.count_all_memories.return_value = 0
 248 | 
 249 |     service = MemoryService(storage=mock_storage)
 250 | 
 251 |     result = await service.list_memories(
 252 |         page=1,
 253 |         page_size=10,
 254 |         tag="work",
 255 |         memory_type="task"
 256 |     )
 257 | 
 258 |     call_kwargs = mock_storage.get_all_memories.call_args.kwargs
 259 |     assert call_kwargs["tags"] == ["work"]
 260 |     assert call_kwargs["memory_type"] == "task"
 261 | 
 262 | 
 263 | # Test API Search Endpoints
 264 | 
 265 | @pytest.mark.asyncio
 266 | async def test_api_semantic_search_uses_service(mock_storage, sample_memory):
 267 |     """Test POST /api/search uses MemoryService."""
 268 |     mock_storage.retrieve.return_value = [
 269 |         MemoryQueryResult(memory=sample_memory, relevance_score=0.9)
 270 |     ]
 271 | 
 272 |     service = MemoryService(storage=mock_storage)
 273 | 
 274 |     result = await service.retrieve_memories(query="test query", n_results=5)
 275 | 
 276 |     assert result["query"] == "test query"
 277 |     assert result["count"] == 1
 278 |     mock_storage.retrieve.assert_called_once()
 279 | 
 280 | 
 281 | @pytest.mark.asyncio
 282 | async def test_api_tag_search_uses_service(mock_storage, sample_memory):
 283 |     """Test POST /api/search/by-tag uses MemoryService."""
 284 |     mock_storage.search_by_tag.return_value = [sample_memory]
 285 | 
 286 |     service = MemoryService(storage=mock_storage)
 287 | 
 288 |     result = await service.search_by_tag(tags=["test"], match_all=False)
 289 | 
 290 |     assert result["tags"] == ["test"]
 291 |     assert result["match_type"] == "ANY"
 292 |     assert result["count"] == 1
 293 | 
 294 | 
 295 | @pytest.mark.asyncio
 296 | async def test_api_time_search_uses_service(mock_storage, sample_memory):
 297 |     """Test POST /api/search/by-time flow (if applicable)."""
 298 |     # Note: Time search might use retrieve_memories with time filters
 299 |     mock_storage.retrieve.return_value = [
 300 |         MemoryQueryResult(memory=sample_memory, relevance_score=0.9)
 301 |     ]
 302 | 
 303 |     service = MemoryService(storage=mock_storage)
 304 | 
 305 |     # Simulate time-based search
 306 |     result = await service.retrieve_memories(query="last week", n_results=10)
 307 | 
 308 |     assert "memories" in result
 309 | 
 310 | 
 311 | # Test API Delete Endpoint
 312 | 
 313 | @pytest.mark.asyncio
 314 | async def test_api_delete_memory_uses_service(mock_storage):
 315 |     """Test DELETE /api/memories/{hash} uses MemoryService."""
 316 |     mock_storage.delete.return_value = (True, "Memory deleted successfully")
 317 | 
 318 |     service = MemoryService(storage=mock_storage)
 319 | 
 320 |     result = await service.delete_memory("test_hash_123")
 321 | 
 322 |     assert result["success"] is True
 323 |     assert result["content_hash"] == "test_hash_123"
 324 |     mock_storage.delete.assert_called_once_with("test_hash_123")
 325 | 
 326 | 
 327 | @pytest.mark.asyncio
 328 | async def test_api_delete_memory_not_found(mock_storage):
 329 |     """Test deleting non-existent memory returns proper response."""
 330 |     mock_storage.delete_memory.return_value = False
 331 | 
 332 |     service = MemoryService(storage=mock_storage)
 333 | 
 334 |     result = await service.delete_memory("nonexistent")
 335 | 
 336 |     assert result["success"] is False
 337 | 
 338 | 
 339 | # Test API Get Memory Endpoint
 340 | 
 341 | @pytest.mark.asyncio
 342 | async def test_api_get_memory_by_hash_uses_service(mock_storage, sample_memory):
 343 |     """Test GET /api/memories/{hash} uses MemoryService."""
 344 |     mock_storage.get_by_hash.return_value = sample_memory
 345 | 
 346 |     service = MemoryService(storage=mock_storage)
 347 | 
 348 |     result = await service.get_memory_by_hash("test_hash_123")
 349 | 
 350 |     assert result["found"] is True
 351 |     assert result["memory"]["content_hash"] == "test_hash_123"
 352 |     mock_storage.get_by_hash.assert_called_once_with("test_hash_123")
 353 | 
 354 | 
 355 | # Test Dependency Injection
 356 | 
 357 | def test_get_memory_service_dependency_injection():
 358 |     """Test that get_memory_service creates service with correct storage."""
 359 |     from mcp_memory_service.web.dependencies import get_memory_service
 360 | 
 361 |     # Create mock storage
 362 |     mock_storage = MagicMock()
 363 | 
 364 |     # Override dependency
 365 |     def override_get_storage():
 366 |         return mock_storage
 367 | 
 368 |     # Get service
 369 |     service = get_memory_service(storage=mock_storage)
 370 | 
 371 |     assert isinstance(service, MemoryService)
 372 |     assert service.storage == mock_storage
 373 | 
 374 | 
 375 | # Performance and Scaling Tests
 376 | 
 377 | @pytest.mark.asyncio
 378 | async def test_list_memories_performance_with_large_dataset(mock_storage):
 379 |     """
 380 |     Test that list_memories remains efficient with large datasets.
 381 | 
 382 |     This verifies the fix for O(n) memory loading anti-pattern.
 383 |     """
 384 |     # Simulate 10,000 memories in database
 385 |     mock_storage.get_all_memories.return_value = []
 386 |     mock_storage.count_all_memories.return_value = 10000
 387 | 
 388 |     service = MemoryService(storage=mock_storage)
 389 | 
 390 |     # Request just 20 items
 391 |     result = await service.list_memories(page=1, page_size=20)
 392 | 
 393 |     # CRITICAL: Verify we only queried for 20 items, not all 10,000
 394 |     call_kwargs = mock_storage.get_all_memories.call_args.kwargs
 395 |     assert call_kwargs["limit"] == 20
 396 |     assert call_kwargs["offset"] == 0
 397 | 
 398 |     # This proves database-level filtering prevents loading 10,000 records
 399 |     assert result["total"] == 10000
 400 |     assert result["has_more"] is True
 401 | 
 402 | 
 403 | @pytest.mark.asyncio
 404 | async def test_tag_filter_performance(mock_storage):
 405 |     """Test that tag filtering happens at database level."""
 406 |     mock_storage.get_all_memories.return_value = []
 407 |     mock_storage.count_all_memories.return_value = 50
 408 | 
 409 |     service = MemoryService(storage=mock_storage)
 410 | 
 411 |     result = await service.list_memories(page=1, page_size=10, tag="important")
 412 | 
 413 |     # Verify tag filter passed to database query
 414 |     call_kwargs = mock_storage.get_all_memories.call_args.kwargs
 415 |     assert call_kwargs["tags"] == ["important"]
 416 | 
 417 |     # Result should only reflect filtered count
 418 |     assert result["total"] == 50  # Only memories matching tag
 419 | 
 420 | 
 421 | # Error Handling Tests
 422 | 
 423 | @pytest.mark.asyncio
 424 | async def test_api_handles_storage_errors_gracefully(mock_storage):
 425 |     """Test that API returns proper errors when storage fails."""
 426 |     mock_storage.get_all_memories.side_effect = Exception("Database connection lost")
 427 | 
 428 |     service = MemoryService(storage=mock_storage)
 429 | 
 430 |     result = await service.list_memories(page=1, page_size=10)
 431 | 
 432 |     assert result["success"] is False
 433 |     assert "error" in result
 434 |     assert "Database connection lost" in result["error"]
 435 | 
 436 | 
 437 | @pytest.mark.asyncio
 438 | async def test_api_validates_input_through_service(mock_storage, unique_content):
 439 |     """Test that validation errors from storage are handled."""
 440 |     mock_storage.store.side_effect = ValueError("Invalid content format")
 441 | 
 442 |     service = MemoryService(storage=mock_storage)
 443 | 
 444 |     result = await service.store_memory(content=unique_content("invalid"))
 445 | 
 446 |     assert result["success"] is False
 447 |     assert "Invalid memory data" in result["error"]
 448 | 
 449 | 
 450 | # Consistency Tests
 451 | 
 452 | @pytest.mark.asyncio
 453 | async def test_api_and_mcp_use_same_service_logic(mock_storage, unique_content):
 454 |     """
 455 |     Test that API and MCP tools use the same MemoryService logic.
 456 | 
 457 |     This verifies the DRY principle - both interfaces share the same
 458 |     business logic through MemoryService.
 459 |     """
 460 |     service = MemoryService(storage=mock_storage)
 461 | 
 462 |     # Store through service (used by both API and MCP)
 463 |     mock_storage.store.return_value = (True, "Memory stored successfully")
 464 |     result1 = await service.store_memory(content=unique_content("Test"), tags=["shared"])
 465 | 
 466 |     # Retrieve through service (used by both API and MCP)
 467 |     mock_storage.retrieve.return_value = []
 468 |     result2 = await service.retrieve_memories(query="test")
 469 | 
 470 |     # Both operations used the same service
 471 |     assert result1["success"] is True
 472 |     assert "memories" in result2
 473 | 
 474 | 
 475 | @pytest.mark.asyncio
 476 | async def test_response_format_consistency(mock_storage, sample_memory):
 477 |     """Test that all service methods return consistently formatted responses."""
 478 |     mock_storage.get_all_memories.return_value = [sample_memory]
 479 |     mock_storage.count_all_memories.return_value = 1
 480 |     mock_storage.retrieve.return_value = [
 481 |         MemoryQueryResult(memory=sample_memory, relevance_score=0.9)
 482 |     ]
 483 |     mock_storage.search_by_tag.return_value = [sample_memory]
 484 | 
 485 |     service = MemoryService(storage=mock_storage)
 486 | 
 487 |     # Get responses from different methods
 488 |     list_result = await service.list_memories(page=1, page_size=10)
 489 |     retrieve_result = await service.retrieve_memories(query="test")
 490 |     tag_result = await service.search_by_tag(tags="test")
 491 | 
 492 |     # All should have consistently formatted memories
 493 |     list_memory = list_result["memories"][0]
 494 |     retrieve_memory = retrieve_result["memories"][0]
 495 |     tag_memory = tag_result["memories"][0]
 496 | 
 497 |     # Verify all have same format
 498 |     required_fields = ["content", "content_hash", "tags", "memory_type", "created_at"]
 499 |     for field in required_fields:
 500 |         assert field in list_memory
 501 |         assert field in retrieve_memory
 502 |         assert field in tag_memory
 503 | 
 504 | 
 505 | # Real Storage Integration Test (End-to-End)
 506 | 
 507 | @pytest.mark.asyncio
 508 | @pytest.mark.integration
 509 | async def test_end_to_end_workflow_with_real_storage(temp_db):
 510 |     """
 511 |     End-to-end test with real SQLite storage (not mocked).
 512 | 
 513 |     This verifies the complete integration stack works correctly.
 514 |     """
 515 |     # Create real storage
 516 |     storage = SqliteVecMemoryStorage(temp_db)
 517 |     await storage.initialize()
 518 | 
 519 |     try:
 520 |         # Create service with real storage
 521 |         service = MemoryService(storage=storage)
 522 | 
 523 |         # Store a memory
 524 |         store_result = await service.store_memory(
 525 |             content="End-to-end test memory",
 526 |             tags=["e2e", "integration"],
 527 |             memory_type="test"
 528 |         )
 529 |         assert store_result["success"] is True
 530 | 
 531 |         # List memories
 532 |         list_result = await service.list_memories(page=1, page_size=10)
 533 |         assert len(list_result["memories"]) > 0
 534 | 
 535 |         # Search by tag
 536 |         tag_result = await service.search_by_tag(tags="e2e")
 537 |         assert len(tag_result["memories"]) > 0
 538 | 
 539 |         # Get specific memory
 540 |         content_hash = store_result["memory"]["content_hash"]
 541 |         get_result = await service.get_memory_by_hash(content_hash)
 542 |         assert get_result["found"] is True
 543 | 
 544 |         # Delete memory
 545 |         delete_result = await service.delete_memory(content_hash)
 546 |         assert delete_result["success"] is True
 547 | 
 548 |         # Verify deleted
 549 |         get_after_delete = await service.get_memory_by_hash(content_hash)
 550 |         assert get_after_delete["found"] is False
 551 | 
 552 |     finally:
 553 |         storage.close()
 554 | 
 555 | 
 556 | # Real HTTP API Integration Tests with TestClient
 557 | 
 558 | @pytest.mark.asyncio
 559 | @pytest.mark.integration
 560 | async def test_http_api_store_memory_endpoint(temp_db, unique_content, monkeypatch):
 561 |     """
 562 |     Test POST /api/memories endpoint with real HTTP request.
 563 | 
 564 |     Uses TestClient to make actual HTTP request to FastAPI app.
 565 |     """
 566 |     # Disable authentication for tests
 567 |     monkeypatch.setenv('MCP_API_KEY', '')
 568 |     monkeypatch.setenv('MCP_OAUTH_ENABLED', 'false')
 569 |     monkeypatch.setenv('MCP_ALLOW_ANONYMOUS_ACCESS', 'true')
 570 | 
 571 |     # Create real storage
 572 |     storage = SqliteVecMemoryStorage(temp_db)
 573 |     await storage.initialize()
 574 | 
 575 |     try:
 576 |         # Import app and set storage
 577 |         from mcp_memory_service.web.app import app
 578 |         set_storage(storage)
 579 | 
 580 |         # Create TestClient
 581 |         client = TestClient(app)
 582 | 
 583 |         # Generate unique content
 584 |         content = unique_content("HTTP API test memory")
 585 | 
 586 |         # Make HTTP POST request
 587 |         response = client.post(
 588 |             "/api/memories",
 589 |             json={
 590 |                 "content": content,
 591 |                 "tags": ["http", "api", "test"],
 592 |                 "memory_type": "note"
 593 |             }
 594 |         )
 595 | 
 596 |         # Verify response
 597 |         assert response.status_code == 200
 598 |         data = response.json()
 599 |         assert data["success"] is True
 600 |         assert "memory" in data
 601 |         assert data["memory"]["content"] == content
 602 |         assert "http" in data["memory"]["tags"]
 603 | 
 604 |     finally:
 605 |         storage.close()
 606 | 
 607 | 
 608 | @pytest.mark.asyncio
 609 | @pytest.mark.integration
 610 | async def test_http_api_list_memories_endpoint(temp_db, unique_content, monkeypatch):
 611 |     """
 612 |     Test GET /api/memories endpoint with real HTTP request.
 613 | 
 614 |     Verifies pagination and filtering work through HTTP API.
 615 |     """
 616 |     # Disable authentication for tests
 617 |     monkeypatch.setenv('MCP_API_KEY', '')
 618 |     monkeypatch.setenv('MCP_OAUTH_ENABLED', 'false')
 619 |     monkeypatch.setenv('MCP_ALLOW_ANONYMOUS_ACCESS', 'true')
 620 | 
 621 |     storage = SqliteVecMemoryStorage(temp_db)
 622 |     await storage.initialize()
 623 | 
 624 |     try:
 625 |         from mcp_memory_service.web.app import app
 626 |         set_storage(storage)
 627 | 
 628 |         # Store test memories first
 629 |         service = MemoryService(storage=storage)
 630 |         for i in range(5):
 631 |             await service.store_memory(
 632 |                 content=unique_content(f"Test memory {i}"),
 633 |                 tags=["test"],
 634 |                 memory_type="note"
 635 |             )
 636 | 
 637 |         # Make HTTP GET request
 638 |         client = TestClient(app)
 639 |         response = client.get("/api/memories?page=1&page_size=10")
 640 | 
 641 |         # Verify response
 642 |         assert response.status_code == 200
 643 |         data = response.json()
 644 |         assert "memories" in data
 645 |         assert len(data["memories"]) == 5
 646 |         assert data["total"] == 5
 647 |         assert data["page"] == 1
 648 | 
 649 |     finally:
 650 |         storage.close()
 651 | 
 652 | 
 653 | @pytest.mark.asyncio
 654 | @pytest.mark.integration
 655 | async def test_http_api_search_endpoint(temp_db, unique_content, monkeypatch):
 656 |     """
 657 |     Test POST /api/search endpoint with real HTTP request.
 658 | 
 659 |     Verifies semantic search works through HTTP API.
 660 |     """
 661 |     # Disable authentication for tests
 662 |     monkeypatch.setenv('MCP_API_KEY', '')
 663 |     monkeypatch.setenv('MCP_OAUTH_ENABLED', 'false')
 664 |     monkeypatch.setenv('MCP_ALLOW_ANONYMOUS_ACCESS', 'true')
 665 | 
 666 |     storage = SqliteVecMemoryStorage(temp_db)
 667 |     await storage.initialize()
 668 | 
 669 |     try:
 670 |         from mcp_memory_service.web.app import app
 671 |         set_storage(storage)
 672 | 
 673 |         # Store searchable memory
 674 |         service = MemoryService(storage=storage)
 675 |         await service.store_memory(
 676 |             content=unique_content("Python programming language tutorial"),
 677 |             tags=["python", "tutorial"],
 678 |             memory_type="reference"
 679 |         )
 680 | 
 681 |         # Make HTTP POST request for search
 682 |         client = TestClient(app)
 683 |         response = client.post(
 684 |             "/api/search",
 685 |             json={"query": "python tutorial", "limit": 5}
 686 |         )
 687 | 
 688 |         # Verify response
 689 |         assert response.status_code == 200
 690 |         data = response.json()
 691 |         assert "results" in data
 692 |         assert data["query"] == "python tutorial"
 693 | 
 694 |     finally:
 695 |         storage.close()
 696 | 
 697 | 
 698 | @pytest.mark.asyncio
 699 | @pytest.mark.integration
 700 | async def test_http_api_search_by_tag_endpoint(temp_db, unique_content, monkeypatch):
 701 |     """
 702 |     Test POST /api/search/by-tag endpoint with real HTTP request.
 703 | 
 704 |     Verifies tag search works through HTTP API.
 705 |     """
 706 |     # Disable authentication for tests
 707 |     monkeypatch.setenv('MCP_API_KEY', '')
 708 |     monkeypatch.setenv('MCP_OAUTH_ENABLED', 'false')
 709 |     monkeypatch.setenv('MCP_ALLOW_ANONYMOUS_ACCESS', 'true')
 710 | 
 711 |     storage = SqliteVecMemoryStorage(temp_db)
 712 |     await storage.initialize()
 713 | 
 714 |     try:
 715 |         from mcp_memory_service.web.app import app
 716 |         set_storage(storage)
 717 | 
 718 |         # Store memories with tags
 719 |         service = MemoryService(storage=storage)
 720 |         await service.store_memory(
 721 |             content=unique_content("Important work item"),
 722 |             tags=["important", "work"],
 723 |             memory_type="task"
 724 |         )
 725 |         await service.store_memory(
 726 |             content=unique_content("Personal note"),
 727 |             tags=["personal"],
 728 |             memory_type="note"
 729 |         )
 730 | 
 731 |         # Search by tag via HTTP
 732 |         client = TestClient(app)
 733 |         response = client.post(
 734 |             "/api/search/by-tag",
 735 |             json={"tags": ["important"], "limit": 10}
 736 |         )
 737 | 
 738 |         # Verify response
 739 |         assert response.status_code == 200
 740 |         data = response.json()
 741 |         assert len(data["results"]) == 1
 742 |         assert "important" in data["results"][0]["memory"]["tags"]
 743 | 
 744 |     finally:
 745 |         storage.close()
 746 | 
 747 | 
 748 | @pytest.mark.asyncio
 749 | @pytest.mark.integration
 750 | async def test_http_api_get_memory_by_hash_endpoint(temp_db, unique_content, monkeypatch):
 751 |     """
 752 |     Test GET /api/memories/{hash} endpoint with real HTTP request.
 753 | 
 754 |     Verifies retrieving specific memory by hash works.
 755 |     """
 756 |     # Disable authentication for tests
 757 |     monkeypatch.setenv('MCP_API_KEY', '')
 758 |     monkeypatch.setenv('MCP_OAUTH_ENABLED', 'false')
 759 |     monkeypatch.setenv('MCP_ALLOW_ANONYMOUS_ACCESS', 'true')
 760 | 
 761 |     storage = SqliteVecMemoryStorage(temp_db)
 762 |     await storage.initialize()
 763 | 
 764 |     try:
 765 |         from mcp_memory_service.web.app import app
 766 |         set_storage(storage)
 767 | 
 768 |         # Store a memory
 769 |         service = MemoryService(storage=storage)
 770 |         content = unique_content("Memory to retrieve")
 771 |         store_result = await service.store_memory(
 772 |             content=content,
 773 |             tags=["test"],
 774 |             memory_type="note"
 775 |         )
 776 |         content_hash = store_result["memory"]["content_hash"]
 777 | 
 778 |         # Retrieve via HTTP
 779 |         client = TestClient(app)
 780 |         response = client.get(f"/api/memories/{content_hash}")
 781 | 
 782 |         # Verify response
 783 |         assert response.status_code == 200
 784 |         data = response.json()
 785 |         assert data["content"] == content
 786 |         assert data["content_hash"] == content_hash
 787 | 
 788 |     finally:
 789 |         storage.close()
 790 | 
 791 | 
 792 | @pytest.mark.asyncio
 793 | @pytest.mark.integration
 794 | async def test_http_api_delete_memory_endpoint(temp_db, unique_content, monkeypatch):
 795 |     """
 796 |     Test DELETE /api/memories/{hash} endpoint with real HTTP request.
 797 | 
 798 |     Verifies deletion works through HTTP API.
 799 |     """
 800 |     # Disable authentication for tests
 801 |     monkeypatch.setenv('MCP_API_KEY', '')
 802 |     monkeypatch.setenv('MCP_OAUTH_ENABLED', 'false')
 803 |     monkeypatch.setenv('MCP_ALLOW_ANONYMOUS_ACCESS', 'true')
 804 | 
 805 |     storage = SqliteVecMemoryStorage(temp_db)
 806 |     await storage.initialize()
 807 | 
 808 |     try:
 809 |         from mcp_memory_service.web.app import app
 810 |         set_storage(storage)
 811 | 
 812 |         # Store a memory
 813 |         service = MemoryService(storage=storage)
 814 |         store_result = await service.store_memory(
 815 |             content=unique_content("Memory to delete"),
 816 |             tags=["test"],
 817 |             memory_type="note"
 818 |         )
 819 |         content_hash = store_result["memory"]["content_hash"]
 820 | 
 821 |         # Delete via HTTP
 822 |         client = TestClient(app)
 823 |         response = client.delete(f"/api/memories/{content_hash}")
 824 | 
 825 |         # Verify response
 826 |         assert response.status_code == 200
 827 |         data = response.json()
 828 |         assert data["success"] is True
 829 | 
 830 |         # Verify memory is gone
 831 |         get_response = client.get(f"/api/memories/{content_hash}")
 832 |         assert get_response.status_code == 404
 833 | 
 834 |     finally:
 835 |         storage.close()
 836 | 
 837 | 
 838 | @pytest.mark.asyncio
 839 | @pytest.mark.integration
 840 | async def test_http_api_pagination_with_real_data(temp_db, unique_content, monkeypatch):
 841 |     """
 842 |     Test pagination through HTTP API with multiple pages.
 843 | 
 844 |     Verifies database-level pagination prevents O(n) loading.
 845 |     """
 846 |     # Disable authentication for tests
 847 |     monkeypatch.setenv('MCP_API_KEY', '')
 848 |     monkeypatch.setenv('MCP_OAUTH_ENABLED', 'false')
 849 |     monkeypatch.setenv('MCP_ALLOW_ANONYMOUS_ACCESS', 'true')
 850 | 
 851 |     storage = SqliteVecMemoryStorage(temp_db)
 852 |     await storage.initialize()
 853 | 
 854 |     try:
 855 |         from mcp_memory_service.web.app import app
 856 |         set_storage(storage)
 857 | 
 858 |         # Store 25 memories
 859 |         service = MemoryService(storage=storage)
 860 |         for i in range(25):
 861 |             await service.store_memory(
 862 |                 content=unique_content(f"Pagination test {i}"),
 863 |                 tags=["pagination"],
 864 |                 memory_type="note"
 865 |             )
 866 | 
 867 |         client = TestClient(app)
 868 | 
 869 |         # Page 1: First 10
 870 |         response1 = client.get("/api/memories?page=1&page_size=10")
 871 |         assert response1.status_code == 200
 872 |         data1 = response1.json()
 873 |         assert len(data1["memories"]) == 10
 874 |         assert data1["total"] == 25
 875 |         assert data1["has_more"] is True
 876 | 
 877 |         # Page 2: Next 10
 878 |         response2 = client.get("/api/memories?page=2&page_size=10")
 879 |         data2 = response2.json()
 880 |         assert len(data2["memories"]) == 10
 881 |         assert data2["has_more"] is True
 882 | 
 883 |         # Page 3: Last 5
 884 |         response3 = client.get("/api/memories?page=3&page_size=10")
 885 |         data3 = response3.json()
 886 |         assert len(data3["memories"]) == 5
 887 |         assert data3["has_more"] is False
 888 | 
 889 |     finally:
 890 |         storage.close()
 891 | 
 892 | 
 893 | @pytest.mark.asyncio
 894 | @pytest.mark.integration
 895 | async def test_http_api_error_handling_invalid_json(temp_db, monkeypatch):
 896 |     """
 897 |     Test that HTTP API handles malformed JSON gracefully.
 898 | 
 899 |     This would have caught v8.12.0 syntax errors.
 900 |     """
 901 |     # Disable authentication for tests
 902 |     monkeypatch.setenv('MCP_API_KEY', '')
 903 |     monkeypatch.setenv('MCP_OAUTH_ENABLED', 'false')
 904 |     monkeypatch.setenv('MCP_ALLOW_ANONYMOUS_ACCESS', 'true')
 905 | 
 906 |     storage = SqliteVecMemoryStorage(temp_db)
 907 |     await storage.initialize()
 908 | 
 909 |     try:
 910 |         from mcp_memory_service.web.app import app
 911 |         set_storage(storage)
 912 | 
 913 |         client = TestClient(app)
 914 | 
 915 |         # Send malformed JSON
 916 |         response = client.post(
 917 |             "/api/memories",
 918 |             data="{'this': 'is not valid json}",  # Missing quote
 919 |             headers={"Content-Type": "application/json"}
 920 |         )
 921 | 
 922 |         # Should return 400 or 422, not 500
 923 |         assert response.status_code in [400, 422]
 924 | 
 925 |     finally:
 926 |         storage.close()
 927 | 
 928 | 
 929 | @pytest.mark.asyncio
 930 | @pytest.mark.integration
 931 | async def test_http_api_client_hostname_header(temp_db, unique_content, monkeypatch):
 932 |     """
 933 |     Test that X-Client-Hostname header is processed correctly.
 934 | 
 935 |     Verifies hostname tagging works through real HTTP request.
 936 |     NOTE: Requires MCP_MEMORY_INCLUDE_HOSTNAME=true for hostname tagging to be enabled.
 937 |     """
 938 |     # Disable authentication for tests
 939 |     monkeypatch.setenv('MCP_API_KEY', '')
 940 |     monkeypatch.setenv('MCP_OAUTH_ENABLED', 'false')
 941 |     monkeypatch.setenv('MCP_ALLOW_ANONYMOUS_ACCESS', 'true')
 942 | 
 943 |     # Enable hostname tagging for this test
 944 |     monkeypatch.setenv('MCP_MEMORY_INCLUDE_HOSTNAME', 'true')
 945 | 
 946 |     # Must reload config module to pick up new env var
 947 |     import importlib
 948 |     from mcp_memory_service import config
 949 |     importlib.reload(config)
 950 | 
 951 |     # Also patch the already-imported INCLUDE_HOSTNAME in memories.py
 952 |     from mcp_memory_service.web.api import memories
 953 |     monkeypatch.setattr(memories, 'INCLUDE_HOSTNAME', True)
 954 | 
 955 |     storage = SqliteVecMemoryStorage(temp_db)
 956 |     await storage.initialize()
 957 | 
 958 |     try:
 959 |         from mcp_memory_service.web.app import app
 960 |         set_storage(storage)
 961 | 
 962 |         client = TestClient(app)
 963 | 
 964 |         # Send request with hostname header
 965 |         response = client.post(
 966 |             "/api/memories",
 967 |             json={
 968 |                 "content": unique_content("Test with hostname"),
 969 |                 "tags": ["test"]
 970 |             },
 971 |             headers={"X-Client-Hostname": "test-machine"}
 972 |         )
 973 | 
 974 |         # Verify hostname tag added
 975 |         assert response.status_code == 200
 976 |         data = response.json()
 977 |         assert "source:test-machine" in data["memory"]["tags"]
 978 |         assert data["memory"]["metadata"]["hostname"] == "test-machine"
 979 | 
 980 |     finally:
 981 |         storage.close()
 982 | 
 983 | 
 984 | @pytest.mark.asyncio
 985 | @pytest.mark.integration
 986 | async def test_http_api_complete_crud_workflow(temp_db, unique_content, monkeypatch):
 987 |     """
 988 |     Complete end-to-end CRUD workflow through real HTTP API.
 989 | 
 990 |     This verifies the entire HTTP API stack works correctly.
 991 |     """
 992 |     # Disable authentication for tests
 993 |     monkeypatch.setenv('MCP_API_KEY', '')
 994 |     monkeypatch.setenv('MCP_OAUTH_ENABLED', 'false')
 995 |     monkeypatch.setenv('MCP_ALLOW_ANONYMOUS_ACCESS', 'true')
 996 | 
 997 |     storage = SqliteVecMemoryStorage(temp_db)
 998 |     await storage.initialize()
 999 | 
1000 |     try:
1001 |         from mcp_memory_service.web.app import app
1002 |         set_storage(storage)
1003 | 
1004 |         client = TestClient(app)
1005 | 
1006 |         # Generate unique content
1007 |         content = unique_content("CRUD test memory")
1008 | 
1009 |         # CREATE: Store a memory
1010 |         create_response = client.post(
1011 |             "/api/memories",
1012 |             json={
1013 |                 "content": content,
1014 |                 "tags": ["crud", "test"],
1015 |                 "memory_type": "note"
1016 |             }
1017 |         )
1018 |         assert create_response.status_code == 200
1019 |         content_hash = create_response.json()["memory"]["content_hash"]
1020 | 
1021 |         # READ: List all memories
1022 |         list_response = client.get("/api/memories")
1023 |         assert list_response.status_code == 200
1024 |         assert len(list_response.json()["memories"]) > 0
1025 | 
1026 |         # READ: Get specific memory
1027 |         get_response = client.get(f"/api/memories/{content_hash}")
1028 |         assert get_response.status_code == 200
1029 |         assert get_response.json()["content"] == content
1030 | 
1031 |         # UPDATE: Search for memory
1032 |         search_response = client.post(
1033 |             "/api/search",
1034 |             json={"query": "CRUD test", "limit": 5}
1035 |         )
1036 |         assert search_response.status_code == 200
1037 |         assert len(search_response.json()["results"]) > 0
1038 | 
1039 |         # DELETE: Remove memory
1040 |         delete_response = client.delete(f"/api/memories/{content_hash}")
1041 |         assert delete_response.status_code == 200
1042 |         assert delete_response.json()["success"] is True
1043 | 
1044 |         # VERIFY: Memory is gone
1045 |         verify_response = client.get(f"/api/memories/{content_hash}")
1046 |         assert verify_response.status_code == 404
1047 | 
1048 |     finally:
1049 |         storage.close()
1050 | 
```
Page 45/62FirstPrevNextLast