#
tokens: 47937/50000 16/625 files (page 13/47)
lines: on (toggle) GitHub
raw markdown copy reset
This is page 13 of 47. Use http://codebase.md/doobidoo/mcp-memory-service?lines=true&page={x} to view the full context.

# Directory Structure

```
├── .claude
│   ├── agents
│   │   ├── amp-bridge.md
│   │   ├── amp-pr-automator.md
│   │   ├── code-quality-guard.md
│   │   ├── gemini-pr-automator.md
│   │   └── github-release-manager.md
│   ├── settings.local.json.backup
│   └── settings.local.json.local
├── .commit-message
├── .dockerignore
├── .env.example
├── .env.sqlite.backup
├── .envnn#
├── .gitattributes
├── .github
│   ├── FUNDING.yml
│   ├── ISSUE_TEMPLATE
│   │   ├── bug_report.yml
│   │   ├── config.yml
│   │   ├── feature_request.yml
│   │   └── performance_issue.yml
│   ├── pull_request_template.md
│   └── workflows
│       ├── bridge-tests.yml
│       ├── CACHE_FIX.md
│       ├── claude-code-review.yml
│       ├── claude.yml
│       ├── cleanup-images.yml.disabled
│       ├── dev-setup-validation.yml
│       ├── docker-publish.yml
│       ├── LATEST_FIXES.md
│       ├── main-optimized.yml.disabled
│       ├── main.yml
│       ├── publish-and-test.yml
│       ├── README_OPTIMIZATION.md
│       ├── release-tag.yml.disabled
│       ├── release.yml
│       ├── roadmap-review-reminder.yml
│       ├── SECRET_CONDITIONAL_FIX.md
│       └── WORKFLOW_FIXES.md
├── .gitignore
├── .mcp.json.backup
├── .mcp.json.template
├── .pyscn
│   ├── .gitignore
│   └── reports
│       └── analyze_20251123_214224.html
├── AGENTS.md
├── archive
│   ├── deployment
│   │   ├── deploy_fastmcp_fixed.sh
│   │   ├── deploy_http_with_mcp.sh
│   │   └── deploy_mcp_v4.sh
│   ├── deployment-configs
│   │   ├── empty_config.yml
│   │   └── smithery.yaml
│   ├── development
│   │   └── test_fastmcp.py
│   ├── docs-removed-2025-08-23
│   │   ├── authentication.md
│   │   ├── claude_integration.md
│   │   ├── claude-code-compatibility.md
│   │   ├── claude-code-integration.md
│   │   ├── claude-code-quickstart.md
│   │   ├── claude-desktop-setup.md
│   │   ├── complete-setup-guide.md
│   │   ├── database-synchronization.md
│   │   ├── development
│   │   │   ├── autonomous-memory-consolidation.md
│   │   │   ├── CLEANUP_PLAN.md
│   │   │   ├── CLEANUP_README.md
│   │   │   ├── CLEANUP_SUMMARY.md
│   │   │   ├── dream-inspired-memory-consolidation.md
│   │   │   ├── hybrid-slm-memory-consolidation.md
│   │   │   ├── mcp-milestone.md
│   │   │   ├── multi-client-architecture.md
│   │   │   ├── test-results.md
│   │   │   └── TIMESTAMP_FIX_SUMMARY.md
│   │   ├── distributed-sync.md
│   │   ├── invocation_guide.md
│   │   ├── macos-intel.md
│   │   ├── master-guide.md
│   │   ├── mcp-client-configuration.md
│   │   ├── multi-client-server.md
│   │   ├── service-installation.md
│   │   ├── sessions
│   │   │   └── MCP_ENHANCEMENT_SESSION_MEMORY_v4.1.0.md
│   │   ├── UBUNTU_SETUP.md
│   │   ├── ubuntu.md
│   │   ├── windows-setup.md
│   │   └── windows.md
│   ├── docs-root-cleanup-2025-08-23
│   │   ├── AWESOME_LIST_SUBMISSION.md
│   │   ├── CLOUDFLARE_IMPLEMENTATION.md
│   │   ├── DOCUMENTATION_ANALYSIS.md
│   │   ├── DOCUMENTATION_CLEANUP_PLAN.md
│   │   ├── DOCUMENTATION_CONSOLIDATION_COMPLETE.md
│   │   ├── LITESTREAM_SETUP_GUIDE.md
│   │   ├── lm_studio_system_prompt.md
│   │   ├── PYTORCH_DOWNLOAD_FIX.md
│   │   └── README-ORIGINAL-BACKUP.md
│   ├── investigations
│   │   └── MACOS_HOOKS_INVESTIGATION.md
│   ├── litestream-configs-v6.3.0
│   │   ├── install_service.sh
│   │   ├── litestream_master_config_fixed.yml
│   │   ├── litestream_master_config.yml
│   │   ├── litestream_replica_config_fixed.yml
│   │   ├── litestream_replica_config.yml
│   │   ├── litestream_replica_simple.yml
│   │   ├── litestream-http.service
│   │   ├── litestream.service
│   │   └── requirements-cloudflare.txt
│   ├── release-notes
│   │   └── release-notes-v7.1.4.md
│   └── setup-development
│       ├── README.md
│       ├── setup_consolidation_mdns.sh
│       ├── STARTUP_SETUP_GUIDE.md
│       └── test_service.sh
├── CHANGELOG-HISTORIC.md
├── CHANGELOG.md
├── claude_commands
│   ├── memory-context.md
│   ├── memory-health.md
│   ├── memory-ingest-dir.md
│   ├── memory-ingest.md
│   ├── memory-recall.md
│   ├── memory-search.md
│   ├── memory-store.md
│   ├── README.md
│   └── session-start.md
├── claude-hooks
│   ├── config.json
│   ├── config.template.json
│   ├── CONFIGURATION.md
│   ├── core
│   │   ├── memory-retrieval.js
│   │   ├── mid-conversation.js
│   │   ├── session-end.js
│   │   ├── session-start.js
│   │   └── topic-change.js
│   ├── debug-pattern-test.js
│   ├── install_claude_hooks_windows.ps1
│   ├── install_hooks.py
│   ├── memory-mode-controller.js
│   ├── MIGRATION.md
│   ├── README-NATURAL-TRIGGERS.md
│   ├── README-phase2.md
│   ├── README.md
│   ├── simple-test.js
│   ├── statusline.sh
│   ├── test-adaptive-weights.js
│   ├── test-dual-protocol-hook.js
│   ├── test-mcp-hook.js
│   ├── test-natural-triggers.js
│   ├── test-recency-scoring.js
│   ├── tests
│   │   ├── integration-test.js
│   │   ├── phase2-integration-test.js
│   │   ├── test-code-execution.js
│   │   ├── test-cross-session.json
│   │   ├── test-session-tracking.json
│   │   └── test-threading.json
│   ├── utilities
│   │   ├── adaptive-pattern-detector.js
│   │   ├── context-formatter.js
│   │   ├── context-shift-detector.js
│   │   ├── conversation-analyzer.js
│   │   ├── dynamic-context-updater.js
│   │   ├── git-analyzer.js
│   │   ├── mcp-client.js
│   │   ├── memory-client.js
│   │   ├── memory-scorer.js
│   │   ├── performance-manager.js
│   │   ├── project-detector.js
│   │   ├── session-tracker.js
│   │   ├── tiered-conversation-monitor.js
│   │   └── version-checker.js
│   └── WINDOWS-SESSIONSTART-BUG.md
├── CLAUDE.md
├── CODE_OF_CONDUCT.md
├── CONTRIBUTING.md
├── Development-Sprint-November-2025.md
├── docs
│   ├── amp-cli-bridge.md
│   ├── api
│   │   ├── code-execution-interface.md
│   │   ├── memory-metadata-api.md
│   │   ├── PHASE1_IMPLEMENTATION_SUMMARY.md
│   │   ├── PHASE2_IMPLEMENTATION_SUMMARY.md
│   │   ├── PHASE2_REPORT.md
│   │   └── tag-standardization.md
│   ├── architecture
│   │   ├── search-enhancement-spec.md
│   │   └── search-examples.md
│   ├── architecture.md
│   ├── archive
│   │   └── obsolete-workflows
│   │       ├── load_memory_context.md
│   │       └── README.md
│   ├── assets
│   │   └── images
│   │       ├── dashboard-v3.3.0-preview.png
│   │       ├── memory-awareness-hooks-example.png
│   │       ├── project-infographic.svg
│   │       └── README.md
│   ├── CLAUDE_CODE_QUICK_REFERENCE.md
│   ├── cloudflare-setup.md
│   ├── deployment
│   │   ├── docker.md
│   │   ├── dual-service.md
│   │   ├── production-guide.md
│   │   └── systemd-service.md
│   ├── development
│   │   ├── ai-agent-instructions.md
│   │   ├── code-quality
│   │   │   ├── phase-2a-completion.md
│   │   │   ├── phase-2a-handle-get-prompt.md
│   │   │   ├── phase-2a-index.md
│   │   │   ├── phase-2a-install-package.md
│   │   │   └── phase-2b-session-summary.md
│   │   ├── code-quality-workflow.md
│   │   ├── dashboard-workflow.md
│   │   ├── issue-management.md
│   │   ├── pr-review-guide.md
│   │   ├── refactoring-notes.md
│   │   ├── release-checklist.md
│   │   └── todo-tracker.md
│   ├── docker-optimized-build.md
│   ├── document-ingestion.md
│   ├── DOCUMENTATION_AUDIT.md
│   ├── enhancement-roadmap-issue-14.md
│   ├── examples
│   │   ├── analysis-scripts.js
│   │   ├── maintenance-session-example.md
│   │   ├── memory-distribution-chart.jsx
│   │   └── tag-schema.json
│   ├── first-time-setup.md
│   ├── glama-deployment.md
│   ├── guides
│   │   ├── advanced-command-examples.md
│   │   ├── chromadb-migration.md
│   │   ├── commands-vs-mcp-server.md
│   │   ├── mcp-enhancements.md
│   │   ├── mdns-service-discovery.md
│   │   ├── memory-consolidation-guide.md
│   │   ├── migration.md
│   │   ├── scripts.md
│   │   └── STORAGE_BACKENDS.md
│   ├── HOOK_IMPROVEMENTS.md
│   ├── hooks
│   │   └── phase2-code-execution-migration.md
│   ├── http-server-management.md
│   ├── ide-compatability.md
│   ├── IMAGE_RETENTION_POLICY.md
│   ├── images
│   │   └── dashboard-placeholder.md
│   ├── implementation
│   │   ├── health_checks.md
│   │   └── performance.md
│   ├── IMPLEMENTATION_PLAN_HTTP_SSE.md
│   ├── integration
│   │   ├── homebrew.md
│   │   └── multi-client.md
│   ├── integrations
│   │   ├── gemini.md
│   │   ├── groq-bridge.md
│   │   ├── groq-integration-summary.md
│   │   └── groq-model-comparison.md
│   ├── integrations.md
│   ├── legacy
│   │   └── dual-protocol-hooks.md
│   ├── LM_STUDIO_COMPATIBILITY.md
│   ├── maintenance
│   │   └── memory-maintenance.md
│   ├── mastery
│   │   ├── api-reference.md
│   │   ├── architecture-overview.md
│   │   ├── configuration-guide.md
│   │   ├── local-setup-and-run.md
│   │   ├── testing-guide.md
│   │   └── troubleshooting.md
│   ├── migration
│   │   └── code-execution-api-quick-start.md
│   ├── natural-memory-triggers
│   │   ├── cli-reference.md
│   │   ├── installation-guide.md
│   │   └── performance-optimization.md
│   ├── oauth-setup.md
│   ├── pr-graphql-integration.md
│   ├── quick-setup-cloudflare-dual-environment.md
│   ├── README.md
│   ├── remote-configuration-wiki-section.md
│   ├── research
│   │   ├── code-execution-interface-implementation.md
│   │   └── code-execution-interface-summary.md
│   ├── ROADMAP.md
│   ├── sqlite-vec-backend.md
│   ├── statistics
│   │   ├── charts
│   │   │   ├── activity_patterns.png
│   │   │   ├── contributors.png
│   │   │   ├── growth_trajectory.png
│   │   │   ├── monthly_activity.png
│   │   │   └── october_sprint.png
│   │   ├── data
│   │   │   ├── activity_by_day.csv
│   │   │   ├── activity_by_hour.csv
│   │   │   ├── contributors.csv
│   │   │   └── monthly_activity.csv
│   │   ├── generate_charts.py
│   │   └── REPOSITORY_STATISTICS.md
│   ├── technical
│   │   ├── development.md
│   │   ├── memory-migration.md
│   │   ├── migration-log.md
│   │   ├── sqlite-vec-embedding-fixes.md
│   │   └── tag-storage.md
│   ├── testing
│   │   └── regression-tests.md
│   ├── testing-cloudflare-backend.md
│   ├── troubleshooting
│   │   ├── cloudflare-api-token-setup.md
│   │   ├── cloudflare-authentication.md
│   │   ├── general.md
│   │   ├── hooks-quick-reference.md
│   │   ├── pr162-schema-caching-issue.md
│   │   ├── session-end-hooks.md
│   │   └── sync-issues.md
│   └── tutorials
│       ├── advanced-techniques.md
│       ├── data-analysis.md
│       └── demo-session-walkthrough.md
├── examples
│   ├── claude_desktop_config_template.json
│   ├── claude_desktop_config_windows.json
│   ├── claude-desktop-http-config.json
│   ├── config
│   │   └── claude_desktop_config.json
│   ├── http-mcp-bridge.js
│   ├── memory_export_template.json
│   ├── README.md
│   ├── setup
│   │   └── setup_multi_client_complete.py
│   └── start_https_example.sh
├── install_service.py
├── install.py
├── LICENSE
├── NOTICE
├── pyproject.toml
├── pytest.ini
├── README.md
├── run_server.py
├── scripts
│   ├── .claude
│   │   └── settings.local.json
│   ├── archive
│   │   └── check_missing_timestamps.py
│   ├── backup
│   │   ├── backup_memories.py
│   │   ├── backup_sqlite_vec.sh
│   │   ├── export_distributable_memories.sh
│   │   └── restore_memories.py
│   ├── benchmarks
│   │   ├── benchmark_code_execution_api.py
│   │   ├── benchmark_hybrid_sync.py
│   │   └── benchmark_server_caching.py
│   ├── database
│   │   ├── analyze_sqlite_vec_db.py
│   │   ├── check_sqlite_vec_status.py
│   │   ├── db_health_check.py
│   │   └── simple_timestamp_check.py
│   ├── development
│   │   ├── debug_server_initialization.py
│   │   ├── find_orphaned_files.py
│   │   ├── fix_mdns.sh
│   │   ├── fix_sitecustomize.py
│   │   ├── remote_ingest.sh
│   │   ├── setup-git-merge-drivers.sh
│   │   ├── uv-lock-merge.sh
│   │   └── verify_hybrid_sync.py
│   ├── hooks
│   │   └── pre-commit
│   ├── installation
│   │   ├── install_linux_service.py
│   │   ├── install_macos_service.py
│   │   ├── install_uv.py
│   │   ├── install_windows_service.py
│   │   ├── install.py
│   │   ├── setup_backup_cron.sh
│   │   ├── setup_claude_mcp.sh
│   │   └── setup_cloudflare_resources.py
│   ├── linux
│   │   ├── service_status.sh
│   │   ├── start_service.sh
│   │   ├── stop_service.sh
│   │   ├── uninstall_service.sh
│   │   └── view_logs.sh
│   ├── maintenance
│   │   ├── assign_memory_types.py
│   │   ├── check_memory_types.py
│   │   ├── cleanup_corrupted_encoding.py
│   │   ├── cleanup_memories.py
│   │   ├── cleanup_organize.py
│   │   ├── consolidate_memory_types.py
│   │   ├── consolidation_mappings.json
│   │   ├── delete_orphaned_vectors_fixed.py
│   │   ├── fast_cleanup_duplicates_with_tracking.sh
│   │   ├── find_all_duplicates.py
│   │   ├── find_cloudflare_duplicates.py
│   │   ├── find_duplicates.py
│   │   ├── memory-types.md
│   │   ├── README.md
│   │   ├── recover_timestamps_from_cloudflare.py
│   │   ├── regenerate_embeddings.py
│   │   ├── repair_malformed_tags.py
│   │   ├── repair_memories.py
│   │   ├── repair_sqlite_vec_embeddings.py
│   │   ├── repair_zero_embeddings.py
│   │   ├── restore_from_json_export.py
│   │   └── scan_todos.sh
│   ├── migration
│   │   ├── cleanup_mcp_timestamps.py
│   │   ├── legacy
│   │   │   └── migrate_chroma_to_sqlite.py
│   │   ├── mcp-migration.py
│   │   ├── migrate_sqlite_vec_embeddings.py
│   │   ├── migrate_storage.py
│   │   ├── migrate_tags.py
│   │   ├── migrate_timestamps.py
│   │   ├── migrate_to_cloudflare.py
│   │   ├── migrate_to_sqlite_vec.py
│   │   ├── migrate_v5_enhanced.py
│   │   ├── TIMESTAMP_CLEANUP_README.md
│   │   └── verify_mcp_timestamps.py
│   ├── pr
│   │   ├── amp_collect_results.sh
│   │   ├── amp_detect_breaking_changes.sh
│   │   ├── amp_generate_tests.sh
│   │   ├── amp_pr_review.sh
│   │   ├── amp_quality_gate.sh
│   │   ├── amp_suggest_fixes.sh
│   │   ├── auto_review.sh
│   │   ├── detect_breaking_changes.sh
│   │   ├── generate_tests.sh
│   │   ├── lib
│   │   │   └── graphql_helpers.sh
│   │   ├── quality_gate.sh
│   │   ├── resolve_threads.sh
│   │   ├── run_pyscn_analysis.sh
│   │   ├── run_quality_checks.sh
│   │   ├── thread_status.sh
│   │   └── watch_reviews.sh
│   ├── quality
│   │   ├── fix_dead_code_install.sh
│   │   ├── phase1_dead_code_analysis.md
│   │   ├── phase2_complexity_analysis.md
│   │   ├── README_PHASE1.md
│   │   ├── README_PHASE2.md
│   │   ├── track_pyscn_metrics.sh
│   │   └── weekly_quality_review.sh
│   ├── README.md
│   ├── run
│   │   ├── run_mcp_memory.sh
│   │   ├── run-with-uv.sh
│   │   └── start_sqlite_vec.sh
│   ├── run_memory_server.py
│   ├── server
│   │   ├── check_http_server.py
│   │   ├── check_server_health.py
│   │   ├── memory_offline.py
│   │   ├── preload_models.py
│   │   ├── run_http_server.py
│   │   ├── run_memory_server.py
│   │   ├── start_http_server.bat
│   │   └── start_http_server.sh
│   ├── service
│   │   ├── deploy_dual_services.sh
│   │   ├── install_http_service.sh
│   │   ├── mcp-memory-http.service
│   │   ├── mcp-memory.service
│   │   ├── memory_service_manager.sh
│   │   ├── service_control.sh
│   │   ├── service_utils.py
│   │   └── update_service.sh
│   ├── sync
│   │   ├── check_drift.py
│   │   ├── claude_sync_commands.py
│   │   ├── export_memories.py
│   │   ├── import_memories.py
│   │   ├── litestream
│   │   │   ├── apply_local_changes.sh
│   │   │   ├── enhanced_memory_store.sh
│   │   │   ├── init_staging_db.sh
│   │   │   ├── io.litestream.replication.plist
│   │   │   ├── manual_sync.sh
│   │   │   ├── memory_sync.sh
│   │   │   ├── pull_remote_changes.sh
│   │   │   ├── push_to_remote.sh
│   │   │   ├── README.md
│   │   │   ├── resolve_conflicts.sh
│   │   │   ├── setup_local_litestream.sh
│   │   │   ├── setup_remote_litestream.sh
│   │   │   ├── staging_db_init.sql
│   │   │   ├── stash_local_changes.sh
│   │   │   ├── sync_from_remote_noconfig.sh
│   │   │   └── sync_from_remote.sh
│   │   ├── README.md
│   │   ├── safe_cloudflare_update.sh
│   │   ├── sync_memory_backends.py
│   │   └── sync_now.py
│   ├── testing
│   │   ├── run_complete_test.py
│   │   ├── run_memory_test.sh
│   │   ├── simple_test.py
│   │   ├── test_cleanup_logic.py
│   │   ├── test_cloudflare_backend.py
│   │   ├── test_docker_functionality.py
│   │   ├── test_installation.py
│   │   ├── test_mdns.py
│   │   ├── test_memory_api.py
│   │   ├── test_memory_simple.py
│   │   ├── test_migration.py
│   │   ├── test_search_api.py
│   │   ├── test_sqlite_vec_embeddings.py
│   │   ├── test_sse_events.py
│   │   ├── test-connection.py
│   │   └── test-hook.js
│   ├── utils
│   │   ├── claude_commands_utils.py
│   │   ├── generate_personalized_claude_md.sh
│   │   ├── groq
│   │   ├── groq_agent_bridge.py
│   │   ├── list-collections.py
│   │   ├── memory_wrapper_uv.py
│   │   ├── query_memories.py
│   │   ├── smithery_wrapper.py
│   │   ├── test_groq_bridge.sh
│   │   └── uv_wrapper.py
│   └── validation
│       ├── check_dev_setup.py
│       ├── check_documentation_links.py
│       ├── diagnose_backend_config.py
│       ├── validate_configuration_complete.py
│       ├── validate_memories.py
│       ├── validate_migration.py
│       ├── validate_timestamp_integrity.py
│       ├── verify_environment.py
│       ├── verify_pytorch_windows.py
│       └── verify_torch.py
├── SECURITY.md
├── selective_timestamp_recovery.py
├── SPONSORS.md
├── src
│   └── mcp_memory_service
│       ├── __init__.py
│       ├── api
│       │   ├── __init__.py
│       │   ├── client.py
│       │   ├── operations.py
│       │   ├── sync_wrapper.py
│       │   └── types.py
│       ├── backup
│       │   ├── __init__.py
│       │   └── scheduler.py
│       ├── cli
│       │   ├── __init__.py
│       │   ├── ingestion.py
│       │   ├── main.py
│       │   └── utils.py
│       ├── config.py
│       ├── consolidation
│       │   ├── __init__.py
│       │   ├── associations.py
│       │   ├── base.py
│       │   ├── clustering.py
│       │   ├── compression.py
│       │   ├── consolidator.py
│       │   ├── decay.py
│       │   ├── forgetting.py
│       │   ├── health.py
│       │   └── scheduler.py
│       ├── dependency_check.py
│       ├── discovery
│       │   ├── __init__.py
│       │   ├── client.py
│       │   └── mdns_service.py
│       ├── embeddings
│       │   ├── __init__.py
│       │   └── onnx_embeddings.py
│       ├── ingestion
│       │   ├── __init__.py
│       │   ├── base.py
│       │   ├── chunker.py
│       │   ├── csv_loader.py
│       │   ├── json_loader.py
│       │   ├── pdf_loader.py
│       │   ├── registry.py
│       │   ├── semtools_loader.py
│       │   └── text_loader.py
│       ├── lm_studio_compat.py
│       ├── mcp_server.py
│       ├── models
│       │   ├── __init__.py
│       │   └── memory.py
│       ├── server.py
│       ├── services
│       │   ├── __init__.py
│       │   └── memory_service.py
│       ├── storage
│       │   ├── __init__.py
│       │   ├── base.py
│       │   ├── cloudflare.py
│       │   ├── factory.py
│       │   ├── http_client.py
│       │   ├── hybrid.py
│       │   └── sqlite_vec.py
│       ├── sync
│       │   ├── __init__.py
│       │   ├── exporter.py
│       │   ├── importer.py
│       │   └── litestream_config.py
│       ├── utils
│       │   ├── __init__.py
│       │   ├── cache_manager.py
│       │   ├── content_splitter.py
│       │   ├── db_utils.py
│       │   ├── debug.py
│       │   ├── document_processing.py
│       │   ├── gpu_detection.py
│       │   ├── hashing.py
│       │   ├── http_server_manager.py
│       │   ├── port_detection.py
│       │   ├── system_detection.py
│       │   └── time_parser.py
│       └── web
│           ├── __init__.py
│           ├── api
│           │   ├── __init__.py
│           │   ├── analytics.py
│           │   ├── backup.py
│           │   ├── consolidation.py
│           │   ├── documents.py
│           │   ├── events.py
│           │   ├── health.py
│           │   ├── manage.py
│           │   ├── mcp.py
│           │   ├── memories.py
│           │   ├── search.py
│           │   └── sync.py
│           ├── app.py
│           ├── dependencies.py
│           ├── oauth
│           │   ├── __init__.py
│           │   ├── authorization.py
│           │   ├── discovery.py
│           │   ├── middleware.py
│           │   ├── models.py
│           │   ├── registration.py
│           │   └── storage.py
│           ├── sse.py
│           └── static
│               ├── app.js
│               ├── index.html
│               ├── README.md
│               ├── sse_test.html
│               └── style.css
├── start_http_debug.bat
├── start_http_server.sh
├── test_document.txt
├── test_version_checker.js
├── tests
│   ├── __init__.py
│   ├── api
│   │   ├── __init__.py
│   │   ├── test_compact_types.py
│   │   └── test_operations.py
│   ├── bridge
│   │   ├── mock_responses.js
│   │   ├── package-lock.json
│   │   ├── package.json
│   │   └── test_http_mcp_bridge.js
│   ├── conftest.py
│   ├── consolidation
│   │   ├── __init__.py
│   │   ├── conftest.py
│   │   ├── test_associations.py
│   │   ├── test_clustering.py
│   │   ├── test_compression.py
│   │   ├── test_consolidator.py
│   │   ├── test_decay.py
│   │   └── test_forgetting.py
│   ├── contracts
│   │   └── api-specification.yml
│   ├── integration
│   │   ├── package-lock.json
│   │   ├── package.json
│   │   ├── test_api_key_fallback.py
│   │   ├── test_api_memories_chronological.py
│   │   ├── test_api_tag_time_search.py
│   │   ├── test_api_with_memory_service.py
│   │   ├── test_bridge_integration.js
│   │   ├── test_cli_interfaces.py
│   │   ├── test_cloudflare_connection.py
│   │   ├── test_concurrent_clients.py
│   │   ├── test_data_serialization_consistency.py
│   │   ├── test_http_server_startup.py
│   │   ├── test_mcp_memory.py
│   │   ├── test_mdns_integration.py
│   │   ├── test_oauth_basic_auth.py
│   │   ├── test_oauth_flow.py
│   │   ├── test_server_handlers.py
│   │   └── test_store_memory.py
│   ├── performance
│   │   ├── test_background_sync.py
│   │   └── test_hybrid_live.py
│   ├── README.md
│   ├── smithery
│   │   └── test_smithery.py
│   ├── sqlite
│   │   └── simple_sqlite_vec_test.py
│   ├── test_client.py
│   ├── test_content_splitting.py
│   ├── test_database.py
│   ├── test_hybrid_cloudflare_limits.py
│   ├── test_hybrid_storage.py
│   ├── test_memory_ops.py
│   ├── test_semantic_search.py
│   ├── test_sqlite_vec_storage.py
│   ├── test_time_parser.py
│   ├── test_timestamp_preservation.py
│   ├── timestamp
│   │   ├── test_hook_vs_manual_storage.py
│   │   ├── test_issue99_final_validation.py
│   │   ├── test_search_retrieval_inconsistency.py
│   │   ├── test_timestamp_issue.py
│   │   └── test_timestamp_simple.py
│   └── unit
│       ├── conftest.py
│       ├── test_cloudflare_storage.py
│       ├── test_csv_loader.py
│       ├── test_fastapi_dependencies.py
│       ├── test_import.py
│       ├── test_json_loader.py
│       ├── test_mdns_simple.py
│       ├── test_mdns.py
│       ├── test_memory_service.py
│       ├── test_memory.py
│       ├── test_semtools_loader.py
│       ├── test_storage_interface_compatibility.py
│       └── test_tag_time_filtering.py
├── tools
│   ├── docker
│   │   ├── DEPRECATED.md
│   │   ├── docker-compose.http.yml
│   │   ├── docker-compose.pythonpath.yml
│   │   ├── docker-compose.standalone.yml
│   │   ├── docker-compose.uv.yml
│   │   ├── docker-compose.yml
│   │   ├── docker-entrypoint-persistent.sh
│   │   ├── docker-entrypoint-unified.sh
│   │   ├── docker-entrypoint.sh
│   │   ├── Dockerfile
│   │   ├── Dockerfile.glama
│   │   ├── Dockerfile.slim
│   │   ├── README.md
│   │   └── test-docker-modes.sh
│   └── README.md
└── uv.lock
```

# Files

--------------------------------------------------------------------------------
/.github/workflows/dev-setup-validation.yml:
--------------------------------------------------------------------------------

```yaml
  1 | name: Development Setup Validation
  2 | 
  3 | # Test the development setup procedures and stale venv prevention mechanisms
  4 | on:
  5 |   push:
  6 |     branches: [ main, develop, release/** ]
  7 |     paths:
  8 |       - 'scripts/validation/check_dev_setup.py'
  9 |       - 'scripts/installation/install.py'
 10 |       - 'scripts/hooks/pre-commit'
 11 |       - 'src/mcp_memory_service/__init__.py'
 12 |       - 'pyproject.toml'
 13 |       - '.github/workflows/dev-setup-validation.yml'
 14 |   pull_request:
 15 |     branches: [ main, develop ]
 16 |     paths:
 17 |       - 'scripts/validation/check_dev_setup.py'
 18 |       - 'scripts/installation/install.py'
 19 |       - 'scripts/hooks/pre-commit'
 20 |       - 'src/mcp_memory_service/__init__.py'
 21 |       - 'pyproject.toml'
 22 |   workflow_dispatch:
 23 | 
 24 | jobs:
 25 |   test-editable-install:
 26 |     name: Test Editable Install Detection
 27 |     runs-on: ubuntu-latest
 28 | 
 29 |     steps:
 30 |       - name: Checkout code
 31 |         uses: actions/checkout@v4
 32 | 
 33 |       - name: Set up Python 3.10
 34 |         uses: actions/setup-python@v4
 35 |         with:
 36 |           python-version: '3.10'
 37 | 
 38 |       - name: Test editable install workflow
 39 |         run: |
 40 |           # Create virtual environment
 41 |           python -m venv test_venv
 42 |           source test_venv/bin/activate
 43 | 
 44 |           # Install in editable mode
 45 |           pip install -e .
 46 | 
 47 |           # Verify editable install is detected
 48 |           python scripts/validation/check_dev_setup.py
 49 | 
 50 |           # Should exit 0 (success)
 51 |           if [ $? -ne 0 ]; then
 52 |             echo "ERROR: Editable install not detected correctly"
 53 |             exit 1
 54 |           fi
 55 | 
 56 |           echo "✅ Editable install detection works correctly"
 57 | 
 58 |       - name: Verify version consistency check
 59 |         run: |
 60 |           source test_venv/bin/activate
 61 | 
 62 |           # Get source version
 63 |           SOURCE_VERSION=$(grep '__version__' src/mcp_memory_service/__init__.py | cut -d'"' -f2)
 64 | 
 65 |           # Get installed version
 66 |           INSTALLED_VERSION=$(python -c "import mcp_memory_service; print(mcp_memory_service.__version__)")
 67 | 
 68 |           echo "Source version: $SOURCE_VERSION"
 69 |           echo "Installed version: $INSTALLED_VERSION"
 70 | 
 71 |           if [ "$SOURCE_VERSION" != "$INSTALLED_VERSION" ]; then
 72 |             echo "ERROR: Version mismatch despite editable install"
 73 |             exit 1
 74 |           fi
 75 | 
 76 |           echo "✅ Version consistency check passed"
 77 | 
 78 |   test-non-editable-detection:
 79 |     name: Test Non-Editable Install Detection
 80 |     runs-on: ubuntu-latest
 81 | 
 82 |     steps:
 83 |       - name: Checkout code
 84 |         uses: actions/checkout@v4
 85 | 
 86 |       - name: Set up Python 3.10
 87 |         uses: actions/setup-python@v4
 88 |         with:
 89 |           python-version: '3.10'
 90 | 
 91 |       - name: Test non-editable install detection
 92 |         run: |
 93 |           # Create virtual environment
 94 |           python -m venv bad_venv
 95 |           source bad_venv/bin/activate
 96 | 
 97 |           # Install WITHOUT editable mode (this is the problem case)
 98 |           pip install .
 99 | 
100 |           # Run detection script - should FAIL (exit 1)
101 |           EXIT_CODE=0
102 |           python scripts/validation/check_dev_setup.py || EXIT_CODE=$?
103 | 
104 |           # We expect failure (exit 1) because it's not editable
105 |           if [ $EXIT_CODE -eq 0 ]; then
106 |             echo "ERROR: Non-editable install was not detected as a problem"
107 |             exit 1
108 |           fi
109 | 
110 |           echo "✅ Non-editable install correctly detected as problematic"
111 | 
112 |   test-version-mismatch-detection:
113 |     name: Test Version Mismatch Detection
114 |     runs-on: ubuntu-latest
115 | 
116 |     steps:
117 |       - name: Checkout code
118 |         uses: actions/checkout@v4
119 | 
120 |       - name: Set up Python 3.10
121 |         uses: actions/setup-python@v4
122 |         with:
123 |           python-version: '3.10'
124 | 
125 |       - name: Test version mismatch scenario
126 |         run: |
127 |           # Create virtual environment
128 |           python -m venv mismatch_venv
129 |           source mismatch_venv/bin/activate
130 | 
131 |           # Install current version
132 |           pip install .
133 | 
134 |           # Simulate version change in source (the stale venv scenario)
135 |           # Save original version
136 |           ORIGINAL_VERSION=$(grep '__version__' src/mcp_memory_service/__init__.py)
137 | 
138 |           # Change source version temporarily
139 |           sed -i 's/__version__ = ".*"/__version__ = "99.99.99"/' src/mcp_memory_service/__init__.py
140 | 
141 |           # Run detection script - should FAIL because versions don't match
142 |           EXIT_CODE=0
143 |           python scripts/validation/check_dev_setup.py || EXIT_CODE=$?
144 | 
145 |           # Restore original version
146 |           echo "$ORIGINAL_VERSION" | sed 's/.*\(__version__.*\)/\1/' > temp_version
147 |           sed -i "s/__version__ = .*$/$(cat temp_version)/" src/mcp_memory_service/__init__.py
148 |           rm temp_version
149 | 
150 |           # We expect failure (exit 1) because of version mismatch
151 |           if [ $EXIT_CODE -eq 0 ]; then
152 |             echo "ERROR: Version mismatch was not detected"
153 |             exit 1
154 |           fi
155 | 
156 |           echo "✅ Version mismatch correctly detected"
157 | 
158 |   test-install-py-developer-detection:
159 |     name: Test install.py Developer Detection
160 |     runs-on: ubuntu-latest
161 | 
162 |     steps:
163 |       - name: Checkout code
164 |         uses: actions/checkout@v4
165 | 
166 |       - name: Set up Python 3.10
167 |         uses: actions/setup-python@v4
168 |         with:
169 |           python-version: '3.10'
170 | 
171 |       - name: Verify .git directory is present
172 |         run: |
173 |           if [ ! -d ".git" ]; then
174 |             echo "ERROR: .git directory not found (developer detection won't work)"
175 |             exit 1
176 |           fi
177 |           echo "✅ .git directory present for developer detection"
178 | 
179 |       - name: Test developer context detection
180 |         run: |
181 |           # Create test script to check if developer detection works
182 |           python3 << 'EOF'
183 |           import sys
184 |           sys.path.insert(0, 'scripts/installation')
185 | 
186 |           # Import the install script's detection function
187 |           import install
188 | 
189 |           # Test developer detection
190 |           is_dev = install.detect_development_context()
191 | 
192 |           if not is_dev:
193 |               print("ERROR: Developer context not detected despite .git directory")
194 |               sys.exit(1)
195 | 
196 |           print("✅ Developer context detection works correctly")
197 |           EOF
198 | 
199 |   test-runtime-version-warning:
200 |     name: Test Runtime Version Warning
201 |     runs-on: ubuntu-latest
202 | 
203 |     steps:
204 |       - name: Checkout code
205 |         uses: actions/checkout@v4
206 | 
207 |       - name: Set up Python 3.10
208 |         uses: actions/setup-python@v4
209 |         with:
210 |           python-version: '3.10'
211 | 
212 |       - name: Test version check function
213 |         run: |
214 |           # Create virtual environment
215 |           python -m venv runtime_venv
216 |           source runtime_venv/bin/activate
217 | 
218 |           # Install in editable mode
219 |           pip install -e .
220 | 
221 |           # Test the runtime version check function
222 |           python3 << 'EOF'
223 |           from mcp_memory_service.server import check_version_consistency
224 |           import logging
225 | 
226 |           # Set up logging to see warnings
227 |           logging.basicConfig(level=logging.WARNING)
228 | 
229 |           print("Testing version check function...")
230 |           check_version_consistency()
231 |           print("✅ Version check function executed without errors")
232 |           EOF
233 | 
234 |   summary:
235 |     name: Validation Summary
236 |     runs-on: ubuntu-latest
237 |     needs: [test-editable-install, test-non-editable-detection, test-version-mismatch-detection, test-install-py-developer-detection, test-runtime-version-warning]
238 |     if: always()
239 | 
240 |     steps:
241 |       - name: Check all tests passed
242 |         run: |
243 |           echo "Development Setup Validation Results:"
244 |           echo "======================================"
245 |           if [ "${{ needs.test-editable-install.result }}" == "success" ] && \
246 |              [ "${{ needs.test-non-editable-detection.result }}" == "success" ] && \
247 |              [ "${{ needs.test-version-mismatch-detection.result }}" == "success" ] && \
248 |              [ "${{ needs.test-install-py-developer-detection.result }}" == "success" ] && \
249 |              [ "${{ needs.test-runtime-version-warning.result }}" == "success" ]; then
250 |             echo "✅ All development setup validation tests passed!"
251 |             exit 0
252 |           else
253 |             echo "❌ Some validation tests failed"
254 |             echo "Editable Install: ${{ needs.test-editable-install.result }}"
255 |             echo "Non-Editable Detection: ${{ needs.test-non-editable-detection.result }}"
256 |             echo "Version Mismatch Detection: ${{ needs.test-version-mismatch-detection.result }}"
257 |             echo "install.py Developer Detection: ${{ needs.test-install-py-developer-detection.result }}"
258 |             echo "Runtime Version Warning: ${{ needs.test-runtime-version-warning.result }}"
259 |             exit 1
260 |           fi
261 | 
```

--------------------------------------------------------------------------------
/src/mcp_memory_service/web/api/consolidation.py:
--------------------------------------------------------------------------------

```python
  1 | # Copyright 2024 Heinrich Krupp
  2 | #
  3 | # Licensed under the Apache License, Version 2.0 (the "License");
  4 | # you may not use this file except in compliance with the License.
  5 | # You may obtain a copy of the License at
  6 | #
  7 | #     http://www.apache.org/licenses/LICENSE-2.0
  8 | #
  9 | # Unless required by applicable law or agreed to in writing, software
 10 | # distributed under the License is distributed on an "AS IS" BASIS,
 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 12 | # See the License for the specific language governing permissions and
 13 | # limitations under the License.
 14 | 
 15 | """
 16 | Consolidation API endpoints for HTTP server.
 17 | 
 18 | Provides RESTful HTTP access to memory consolidation operations
 19 | including manual triggers and scheduler status queries.
 20 | """
 21 | 
 22 | import logging
 23 | from typing import Dict, Any, Optional
 24 | from datetime import datetime
 25 | from fastapi import APIRouter, HTTPException
 26 | from pydantic import BaseModel, Field
 27 | 
 28 | logger = logging.getLogger(__name__)
 29 | 
 30 | router = APIRouter(prefix="/api/consolidation", tags=["consolidation"])
 31 | 
 32 | 
 33 | class ConsolidationRequest(BaseModel):
 34 |     """Request model for triggering consolidation."""
 35 |     time_horizon: str = Field(
 36 |         default="weekly",
 37 |         description="Time horizon for consolidation (daily, weekly, monthly, quarterly, yearly)"
 38 |     )
 39 | 
 40 | 
 41 | class ConsolidationResponse(BaseModel):
 42 |     """Response model for consolidation operations."""
 43 |     status: str = Field(description="Operation status (completed, running, failed)")
 44 |     horizon: str = Field(description="Time horizon that was consolidated")
 45 |     processed: int = Field(description="Number of memories processed")
 46 |     compressed: int = Field(description="Number of memories compressed")
 47 |     forgotten: int = Field(description="Number of memories forgotten/archived")
 48 |     duration: float = Field(description="Operation duration in seconds")
 49 | 
 50 | 
 51 | class SchedulerStatusResponse(BaseModel):
 52 |     """Response model for scheduler status."""
 53 |     running: bool = Field(description="Whether scheduler is active")
 54 |     next_daily: Optional[str] = Field(None, description="Next daily run time (ISO format)")
 55 |     next_weekly: Optional[str] = Field(None, description="Next weekly run time (ISO format)")
 56 |     next_monthly: Optional[str] = Field(None, description="Next monthly run time (ISO format)")
 57 |     jobs_executed: int = Field(description="Total successful jobs executed")
 58 |     jobs_failed: int = Field(description="Total failed jobs")
 59 | 
 60 | 
 61 | class RecommendationsResponse(BaseModel):
 62 |     """Response model for consolidation recommendations."""
 63 |     recommendation: str = Field(description="Recommendation status")
 64 |     memory_count: int = Field(description="Total memories in system")
 65 |     reasons: list[str] = Field(description="List of recommendation reasons")
 66 |     estimated_duration: float = Field(description="Estimated duration in seconds")
 67 | 
 68 | 
 69 | @router.post("/trigger", response_model=ConsolidationResponse)
 70 | async def trigger_consolidation(request: ConsolidationRequest) -> Dict[str, Any]:
 71 |     """
 72 |     Trigger a consolidation operation manually.
 73 | 
 74 |     This endpoint initiates a consolidation run for the specified time horizon.
 75 |     The operation runs asynchronously and returns immediately with the result.
 76 | 
 77 |     Args:
 78 |         request: ConsolidationRequest with time_horizon
 79 | 
 80 |     Returns:
 81 |         ConsolidationResponse with operation metrics
 82 | 
 83 |     Raises:
 84 |         HTTPException: If consolidation fails or is not available
 85 | 
 86 |     Example:
 87 |         POST /api/consolidation/trigger
 88 |         {
 89 |             "time_horizon": "weekly"
 90 |         }
 91 | 
 92 |         Response:
 93 |         {
 94 |             "status": "completed",
 95 |             "horizon": "weekly",
 96 |             "processed": 2418,
 97 |             "compressed": 156,
 98 |             "forgotten": 43,
 99 |             "duration": 24.2
100 |         }
101 |     """
102 |     try:
103 |         from ...api.operations import _consolidate_async
104 | 
105 |         # Call the shared async implementation
106 |         result = await _consolidate_async(request.time_horizon)
107 | 
108 |         # Convert to dict for HTTP response
109 |         return result._asdict()
110 | 
111 |     except ValueError as e:
112 |         # Invalid time horizon
113 |         raise HTTPException(status_code=400, detail=str(e))
114 |     except RuntimeError as e:
115 |         # Consolidator not available
116 |         raise HTTPException(status_code=503, detail=str(e))
117 |     except Exception as e:
118 |         logger.error(f"Consolidation trigger failed: {e}")
119 |         raise HTTPException(status_code=500, detail=f"Consolidation failed: {e}")
120 | 
121 | 
122 | @router.get("/status", response_model=SchedulerStatusResponse)
123 | async def get_scheduler_status() -> Dict[str, Any]:
124 |     """
125 |     Get consolidation scheduler status and next run times.
126 | 
127 |     Returns information about the scheduler state including next
128 |     scheduled runs for each time horizon and execution statistics.
129 | 
130 |     Returns:
131 |         SchedulerStatusResponse with scheduler state
132 | 
133 |     Example:
134 |         GET /api/consolidation/status
135 | 
136 |         Response:
137 |         {
138 |             "running": true,
139 |             "next_daily": "2025-11-10T02:00:00+01:00",
140 |             "next_weekly": "2025-11-16T03:00:00+01:00",
141 |             "next_monthly": "2025-12-01T04:00:00+01:00",
142 |             "jobs_executed": 42,
143 |             "jobs_failed": 0
144 |         }
145 |     """
146 |     try:
147 |         from datetime import datetime
148 |         from ...api.operations import _scheduler_status_async
149 | 
150 |         # Call the shared async implementation
151 |         result = await _scheduler_status_async()
152 | 
153 |         # Convert timestamps to ISO format for HTTP response
154 |         status_data = {
155 |             "running": result.running,
156 |             "next_daily": datetime.fromtimestamp(result.next_daily).isoformat() if result.next_daily else None,
157 |             "next_weekly": datetime.fromtimestamp(result.next_weekly).isoformat() if result.next_weekly else None,
158 |             "next_monthly": datetime.fromtimestamp(result.next_monthly).isoformat() if result.next_monthly else None,
159 |             "jobs_executed": result.jobs_executed,
160 |             "jobs_failed": result.jobs_failed
161 |         }
162 | 
163 |         return status_data
164 | 
165 |     except Exception as e:
166 |         logger.error(f"Failed to get scheduler status: {e}")
167 |         raise HTTPException(status_code=500, detail=f"Failed to get status: {e}")
168 | 
169 | 
170 | @router.get("/recommendations/{time_horizon}", response_model=RecommendationsResponse)
171 | async def get_recommendations(time_horizon: str) -> Dict[str, Any]:
172 |     """
173 |     Get consolidation recommendations for a specific time horizon.
174 | 
175 |     Analyzes the current memory state and provides recommendations
176 |     on whether consolidation would be beneficial.
177 | 
178 |     Args:
179 |         time_horizon: Time horizon to analyze (daily, weekly, monthly, quarterly, yearly)
180 | 
181 |     Returns:
182 |         RecommendationsResponse with recommendation details
183 | 
184 |     Raises:
185 |         HTTPException: If analysis fails
186 | 
187 |     Example:
188 |         GET /api/consolidation/recommendations/weekly
189 | 
190 |         Response:
191 |         {
192 |             "recommendation": "CONSOLIDATION_BENEFICIAL",
193 |             "memory_count": 2418,
194 |             "reasons": [
195 |                 "Consider running compression to reduce memory usage",
196 |                 "Many old memories present - consider forgetting/archival",
197 |                 "Good candidate for association discovery"
198 |             ],
199 |             "estimated_duration": 24.2
200 |         }
201 |     """
202 |     try:
203 |         from ...api.client import get_consolidator
204 | 
205 |         # Validate time horizon
206 |         valid_horizons = ['daily', 'weekly', 'monthly', 'quarterly', 'yearly']
207 |         if time_horizon not in valid_horizons:
208 |             raise HTTPException(
209 |                 status_code=400,
210 |                 detail=f"Invalid time_horizon. Must be one of: {', '.join(valid_horizons)}"
211 |             )
212 | 
213 |         # Get consolidator instance
214 |         consolidator = get_consolidator()
215 |         if consolidator is None:
216 |             raise HTTPException(
217 |                 status_code=503,
218 |                 detail="Consolidator not available. Check server configuration."
219 |             )
220 | 
221 |         # Get recommendations
222 |         recommendations = await consolidator.get_consolidation_recommendations(time_horizon)
223 | 
224 |         return {
225 |             "recommendation": recommendations.get("recommendation", "UNKNOWN"),
226 |             "memory_count": recommendations.get("memory_count", 0),
227 |             "reasons": recommendations.get("reasons", []),
228 |             "estimated_duration": recommendations.get("estimated_duration_seconds", 0.0)
229 |         }
230 | 
231 |     except HTTPException:
232 |         raise
233 |     except Exception as e:
234 |         logger.error(f"Failed to get recommendations: {e}")
235 |         raise HTTPException(status_code=500, detail=f"Failed to get recommendations: {e}")
236 | 
```

--------------------------------------------------------------------------------
/scripts/server/run_http_server.py:
--------------------------------------------------------------------------------

```python
  1 | #!/usr/bin/env python3
  2 | # Copyright 2024 Heinrich Krupp
  3 | #
  4 | # Licensed under the Apache License, Version 2.0 (the "License");
  5 | # you may not use this file except in compliance with the License.
  6 | # You may obtain a copy of the License at
  7 | #
  8 | #     http://www.apache.org/licenses/LICENSE-2.0
  9 | #
 10 | # Unless required by applicable law or agreed to in writing, software
 11 | # distributed under the License is distributed on an "AS IS" BASIS,
 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 13 | # See the License for the specific language governing permissions and
 14 | # limitations under the License.
 15 | 
 16 | """
 17 | Run the MCP Memory Service HTTP server.
 18 | 
 19 | This script starts the FastAPI server with uvicorn.
 20 | """
 21 | 
 22 | import os
 23 | import sys
 24 | import logging
 25 | import asyncio
 26 | import tempfile
 27 | import subprocess
 28 | from datetime import datetime, timedelta
 29 | 
 30 | # Add the src directory to the Python path
 31 | sys.path.insert(0, os.path.join(os.path.dirname(os.path.dirname(__file__)), 'src'))
 32 | 
 33 | def generate_self_signed_cert():
 34 |     """Generate a self-signed certificate for development."""
 35 |     try:
 36 |         # Create temporary directory for certificates
 37 |         cert_dir = os.path.join(tempfile.gettempdir(), 'mcp-memory-certs')
 38 |         os.makedirs(cert_dir, exist_ok=True)
 39 |         
 40 |         cert_file = os.path.join(cert_dir, 'cert.pem')
 41 |         key_file = os.path.join(cert_dir, 'key.pem')
 42 |         
 43 |         # Check if certificates already exist and are still valid
 44 |         if os.path.exists(cert_file) and os.path.exists(key_file):
 45 |             try:
 46 |                 # Check certificate expiration
 47 |                 result = subprocess.run([
 48 |                     'openssl', 'x509', '-in', cert_file, '-noout', '-enddate'
 49 |                 ], capture_output=True, text=True, check=True)
 50 |                 
 51 |                 # Parse expiration date
 52 |                 end_date_str = result.stdout.split('=')[1].strip()
 53 |                 end_date = datetime.strptime(end_date_str, '%b %d %H:%M:%S %Y %Z')
 54 |                 
 55 |                 # If certificate expires in more than 7 days, reuse it
 56 |                 if end_date > datetime.now() + timedelta(days=7):
 57 |                     print(f"Using existing self-signed certificate: {cert_file}")
 58 |                     return cert_file, key_file
 59 |                     
 60 |             except Exception:
 61 |                 pass  # Fall through to generate new certificate
 62 |         
 63 |         print("Generating self-signed certificate for HTTPS...")
 64 |         
 65 |         # Generate private key
 66 |         subprocess.run([
 67 |             'openssl', 'genrsa', '-out', key_file, '2048'
 68 |         ], check=True, capture_output=True)
 69 |         
 70 |         # Generate certificate with Subject Alternative Names for better compatibility
 71 |         # Get local IP addresses dynamically
 72 |         import socket
 73 |         local_ips = []
 74 |         try:
 75 |             # Get primary local IP
 76 |             s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
 77 |             s.connect(("8.8.8.8", 80))
 78 |             local_ip = s.getsockname()[0]
 79 |             s.close()
 80 |             local_ips.append(local_ip)
 81 |         except Exception:
 82 |             pass
 83 |         
 84 |         # Build SAN list with common names and detected IPs
 85 |         san_entries = [
 86 |             "DNS:memory.local",
 87 |             "DNS:localhost", 
 88 |             "DNS:*.local",
 89 |             "IP:127.0.0.1",
 90 |             "IP:::1"  # IPv6 localhost
 91 |         ]
 92 |         
 93 |         # Add detected local IPs
 94 |         for ip in local_ips:
 95 |             if ip not in ["127.0.0.1"]:
 96 |                 san_entries.append(f"IP:{ip}")
 97 |         
 98 |         # Add additional IPs from environment variable if specified
 99 |         additional_ips = os.getenv('MCP_SSL_ADDITIONAL_IPS', '')
100 |         if additional_ips:
101 |             for ip in additional_ips.split(','):
102 |                 ip = ip.strip()
103 |                 if ip and ip not in [entry.split(':')[1] for entry in san_entries if entry.startswith('IP:')]:
104 |                     san_entries.append(f"IP:{ip}")
105 |         
106 |         # Add additional hostnames from environment variable if specified  
107 |         additional_hostnames = os.getenv('MCP_SSL_ADDITIONAL_HOSTNAMES', '')
108 |         if additional_hostnames:
109 |             for hostname in additional_hostnames.split(','):
110 |                 hostname = hostname.strip()
111 |                 if hostname and f"DNS:{hostname}" not in san_entries:
112 |                     san_entries.append(f"DNS:{hostname}")
113 |         
114 |         san_string = ",".join(san_entries)
115 |         
116 |         print(f"Generating certificate with SANs: {san_string}")
117 |         
118 |         subprocess.run([
119 |             'openssl', 'req', '-new', '-x509', '-key', key_file, '-out', cert_file,
120 |             '-days', '365', '-subj', '/C=US/ST=Local/L=Local/O=MCP Memory Service/CN=memory.local',
121 |             '-addext', f'subjectAltName={san_string}'
122 |         ], check=True, capture_output=True)
123 |         
124 |         print(f"Generated self-signed certificate: {cert_file}")
125 |         print("WARNING: This is a development certificate. Use proper certificates in production.")
126 |         
127 |         return cert_file, key_file
128 |         
129 |     except subprocess.CalledProcessError as e:
130 |         print(f"Error generating certificate: {e}")
131 |         print("Make sure OpenSSL is installed and available in PATH")
132 |         return None, None
133 |     except Exception as e:
134 |         print(f"Unexpected error generating certificate: {e}")
135 |         return None, None
136 | 
137 | 
138 | def main():
139 |     """Run the HTTP server."""
140 |     # Set up logging
141 |     logging.basicConfig(
142 |         level=logging.INFO,
143 |         format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
144 |     )
145 |     
146 |     # Set default environment variables for testing
147 |     os.environ.setdefault('MCP_HTTP_ENABLED', 'true')
148 |     # Don't override MCP_MEMORY_STORAGE_BACKEND - respect .env and environment settings
149 |     # os.environ.setdefault('MCP_MEMORY_STORAGE_BACKEND', 'sqlite_vec')
150 |     os.environ.setdefault('LOG_LEVEL', 'INFO')
151 |     
152 |     try:
153 |         import uvicorn
154 |         from mcp_memory_service.web.app import app
155 |         from mcp_memory_service.config import (
156 |             HTTP_HOST, HTTP_PORT, HTTPS_ENABLED, SSL_CERT_FILE, SSL_KEY_FILE
157 |         )
158 |         
159 |         # SSL configuration
160 |         ssl_keyfile = None
161 |         ssl_certfile = None
162 |         protocol = "http"
163 |         
164 |         if HTTPS_ENABLED:
165 |             protocol = "https"
166 |             
167 |             if SSL_CERT_FILE and SSL_KEY_FILE:
168 |                 # Use provided certificates
169 |                 if os.path.exists(SSL_CERT_FILE) and os.path.exists(SSL_KEY_FILE):
170 |                     ssl_certfile = SSL_CERT_FILE
171 |                     ssl_keyfile = SSL_KEY_FILE
172 |                     print(f"Using provided SSL certificates: {SSL_CERT_FILE}")
173 |                 else:
174 |                     print(f"Error: Provided SSL certificates not found!")
175 |                     print(f"Cert file: {SSL_CERT_FILE}")
176 |                     print(f"Key file: {SSL_KEY_FILE}")
177 |                     sys.exit(1)
178 |             else:
179 |                 # Generate self-signed certificate
180 |                 ssl_certfile, ssl_keyfile = generate_self_signed_cert()
181 |                 if not ssl_certfile or not ssl_keyfile:
182 |                     print("Failed to generate SSL certificate. Falling back to HTTP.")
183 |                     protocol = "http"
184 |                     ssl_certfile = ssl_keyfile = None
185 |         
186 |         # Display startup information
187 |         host_display = HTTP_HOST if HTTP_HOST != '0.0.0.0' else 'localhost'
188 |         print(f"Starting MCP Memory Service {protocol.upper()} server on {HTTP_HOST}:{HTTP_PORT}")
189 |         print(f"Dashboard: {protocol}://{host_display}:{HTTP_PORT}")
190 |         print(f"API Docs: {protocol}://{host_display}:{HTTP_PORT}/api/docs")
191 |         
192 |         if protocol == "https":
193 |             print(f"SSL Certificate: {ssl_certfile}")
194 |             print(f"SSL Key: {ssl_keyfile}")
195 |             print("NOTE: Browsers may show security warnings for self-signed certificates")
196 |         
197 |         print("Press Ctrl+C to stop")
198 |         
199 |         # Start uvicorn server
200 |         uvicorn_kwargs = {
201 |             "app": app,
202 |             "host": HTTP_HOST,
203 |             "port": HTTP_PORT,
204 |             "log_level": "info",
205 |             "access_log": True
206 |         }
207 |         
208 |         if ssl_certfile and ssl_keyfile:
209 |             uvicorn_kwargs["ssl_certfile"] = ssl_certfile
210 |             uvicorn_kwargs["ssl_keyfile"] = ssl_keyfile
211 |         
212 |         uvicorn.run(**uvicorn_kwargs)
213 |         
214 |     except ImportError as e:
215 |         print(f"Error: Missing dependencies. Please run 'python install.py' first.")
216 |         print(f"Details: {e}")
217 |         sys.exit(1)
218 |     except Exception as e:
219 |         print(f"Error starting server: {e}")
220 |         sys.exit(1)
221 | 
222 | 
223 | if __name__ == "__main__":
224 |     main()
```

--------------------------------------------------------------------------------
/scripts/sync/import_memories.py:
--------------------------------------------------------------------------------

```python
  1 | #!/usr/bin/env python3
  2 | # Copyright 2024 Heinrich Krupp
  3 | #
  4 | # Licensed under the Apache License, Version 2.0 (the "License");
  5 | # you may not use this file except in compliance with the License.
  6 | # You may obtain a copy of the License at
  7 | #
  8 | #     http://www.apache.org/licenses/LICENSE-2.0
  9 | #
 10 | # Unless required by applicable law or agreed to in writing, software
 11 | # distributed under the License is distributed on an "AS IS" BASIS,
 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 13 | # See the License for the specific language governing permissions and
 14 | # limitations under the License.
 15 | 
 16 | """
 17 | Import memories from JSON exports into SQLite-vec database.
 18 | 
 19 | This script imports memories from one or more JSON export files into
 20 | a central SQLite-vec database, handling deduplication and preserving
 21 | original timestamps and metadata.
 22 | """
 23 | 
 24 | import asyncio
 25 | import sys
 26 | import logging
 27 | import argparse
 28 | import json
 29 | from pathlib import Path
 30 | from datetime import datetime
 31 | 
 32 | # Add project src to path
 33 | project_root = Path(__file__).parent.parent.parent
 34 | sys.path.insert(0, str(project_root / "src"))
 35 | 
 36 | from mcp_memory_service.storage.sqlite_vec import SqliteVecMemoryStorage
 37 | from mcp_memory_service.sync.importer import MemoryImporter
 38 | from mcp_memory_service.config import SQLITE_VEC_PATH, STORAGE_BACKEND
 39 | 
 40 | # Configure logging
 41 | logging.basicConfig(
 42 |     level=logging.INFO,
 43 |     format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
 44 | )
 45 | logger = logging.getLogger(__name__)
 46 | 
 47 | 
 48 | def get_default_db_path() -> Path:
 49 |     """Get the default database path for this platform."""
 50 |     if STORAGE_BACKEND == 'sqlite_vec' and SQLITE_VEC_PATH:
 51 |         return Path(SQLITE_VEC_PATH)
 52 |     else:
 53 |         # Fallback to BASE_DIR if not using sqlite_vec backend
 54 |         from mcp_memory_service.config import BASE_DIR
 55 |         return Path(BASE_DIR) / "sqlite_vec.db"
 56 | 
 57 | 
 58 | async def import_memories(
 59 |     json_files: list,
 60 |     db_path: Path,
 61 |     deduplicate: bool = True,
 62 |     add_source_tags: bool = True,
 63 |     dry_run: bool = False
 64 | ):
 65 |     """Import memories from JSON files into database."""
 66 |     logger.info(f"Starting memory import to {db_path}")
 67 |     logger.info(f"JSON files: {[str(f) for f in json_files]}")
 68 |     
 69 |     # Validate input files
 70 |     for json_file in json_files:
 71 |         if not json_file.exists():
 72 |             logger.error(f"JSON file not found: {json_file}")
 73 |             return False
 74 |         
 75 |         # Quick validation of JSON format
 76 |         try:
 77 |             with open(json_file, 'r') as f:
 78 |                 data = json.load(f)
 79 |                 if "export_metadata" not in data or "memories" not in data:
 80 |                     logger.error(f"Invalid export format in {json_file}")
 81 |                     return False
 82 |         except Exception as e:
 83 |             logger.error(f"Error reading {json_file}: {str(e)}")
 84 |             return False
 85 |     
 86 |     try:
 87 |         # Initialize storage
 88 |         logger.info("Initializing SQLite-vec storage...")
 89 |         storage = SqliteVecMemoryStorage(str(db_path))
 90 |         await storage.initialize()
 91 |         
 92 |         # Create importer
 93 |         importer = MemoryImporter(storage)
 94 |         
 95 |         # Show analysis first
 96 |         logger.info("Analyzing import files...")
 97 |         analysis = await importer.analyze_import(json_files)
 98 |         
 99 |         logger.info(f"Import Analysis:")
100 |         logger.info(f"  Total memories to process: {analysis['total_memories']}")
101 |         logger.info(f"  Unique memories: {analysis['unique_memories']}")
102 |         logger.info(f"  Potential duplicates: {analysis['potential_duplicates']}")
103 |         logger.info(f"  Import conflicts: {len(analysis['conflicts'])}")
104 |         
105 |         logger.info(f"  Sources:")
106 |         for source, stats in analysis['sources'].items():
107 |             logger.info(f"    {source}: {stats['new_memories']}/{stats['total_memories']} new memories")
108 |         
109 |         if analysis['conflicts']:
110 |             logger.warning(f"Found {len(analysis['conflicts'])} conflicts between import files")
111 |         
112 |         # Ask for confirmation if not dry run
113 |         if not dry_run:
114 |             logger.info("")
115 |             response = input("Proceed with import? (y/N): ")
116 |             if response.lower() != 'y':
117 |                 logger.info("Import cancelled by user")
118 |                 return False
119 |         
120 |         # Perform import
121 |         logger.info(f"{'[DRY RUN] ' if dry_run else ''}Starting import...")
122 |         result = await importer.import_from_json(
123 |             json_files=json_files,
124 |             deduplicate=deduplicate,
125 |             add_source_tags=add_source_tags,
126 |             dry_run=dry_run
127 |         )
128 |         
129 |         # Show results
130 |         logger.info(f"Import {'simulation ' if dry_run else ''}completed!")
131 |         logger.info(f"  Files processed: {result['files_processed']}")
132 |         logger.info(f"  Total processed: {result['total_processed']}")
133 |         logger.info(f"  Successfully imported: {result['imported']}")
134 |         logger.info(f"  Duplicates skipped: {result['duplicates_skipped']}")
135 |         logger.info(f"  Errors: {result['errors']}")
136 |         
137 |         logger.info(f"  Source breakdown:")
138 |         for source, stats in result['sources'].items():
139 |             logger.info(f"    {source}: {stats['imported']}/{stats['total']} imported, {stats['duplicates']} duplicates")
140 |         
141 |         if not dry_run and result['imported'] > 0:
142 |             # Show next steps
143 |             logger.info("")
144 |             logger.info("Next steps:")
145 |             logger.info("1. Verify the imported memories using the web interface or API")
146 |             logger.info("2. Set up Litestream for ongoing synchronization")
147 |             logger.info("3. Configure replica nodes to sync from this central database")
148 |         
149 |         return result['errors'] == 0
150 |         
151 |     except Exception as e:
152 |         logger.error(f"Import failed: {str(e)}")
153 |         return False
154 | 
155 | 
156 | async def main():
157 |     """Main function."""
158 |     parser = argparse.ArgumentParser(
159 |         description="Import memories from JSON exports into SQLite-vec database",
160 |         formatter_class=argparse.RawDescriptionHelpFormatter,
161 |         epilog="""
162 | Examples:
163 |   # Import single JSON file
164 |   python import_memories.py windows_export.json
165 |   
166 |   # Import multiple JSON files
167 |   python import_memories.py windows_export.json macbook_export.json
168 |   
169 |   # Import to specific database
170 |   python import_memories.py --db-path /path/to/sqlite_vec.db exports/*.json
171 |   
172 |   # Dry run to see what would be imported
173 |   python import_memories.py --dry-run exports/*.json
174 |   
175 |   # Import without deduplication (allow duplicates)
176 |   python import_memories.py --no-deduplicate exports/*.json
177 |   
178 |   # Import without adding source tags
179 |   python import_memories.py --no-source-tags exports/*.json
180 |         """
181 |     )
182 |     
183 |     parser.add_argument(
184 |         "json_files",
185 |         nargs="+",
186 |         type=Path,
187 |         help="JSON export files to import"
188 |     )
189 |     
190 |     parser.add_argument(
191 |         "--db-path",
192 |         type=Path,
193 |         default=get_default_db_path(),
194 |         help=f"Path to SQLite-vec database (default: {get_default_db_path()})"
195 |     )
196 |     
197 |     parser.add_argument(
198 |         "--dry-run",
199 |         action="store_true",
200 |         help="Analyze imports without actually storing data"
201 |     )
202 |     
203 |     parser.add_argument(
204 |         "--no-deduplicate",
205 |         action="store_true",
206 |         help="Allow duplicate memories (don't skip based on content hash)"
207 |     )
208 |     
209 |     parser.add_argument(
210 |         "--no-source-tags",
211 |         action="store_true",
212 |         help="Don't add source machine tags to imported memories"
213 |     )
214 |     
215 |     parser.add_argument(
216 |         "--verbose",
217 |         action="store_true",
218 |         help="Enable verbose logging"
219 |     )
220 |     
221 |     args = parser.parse_args()
222 |     
223 |     # Set logging level
224 |     if args.verbose:
225 |         logging.getLogger().setLevel(logging.DEBUG)
226 |     
227 |     # Show configuration
228 |     logger.info("Memory Import Configuration:")
229 |     logger.info(f"  Database: {args.db_path}")
230 |     logger.info(f"  JSON files: {[str(f) for f in args.json_files]}")
231 |     logger.info(f"  Dry run: {args.dry_run}")
232 |     logger.info(f"  Deduplicate: {not args.no_deduplicate}")
233 |     logger.info(f"  Add source tags: {not args.no_source_tags}")
234 |     logger.info("")
235 |     
236 |     # Validate JSON files exist
237 |     missing_files = [f for f in args.json_files if not f.exists()]
238 |     if missing_files:
239 |         logger.error(f"Missing JSON files: {missing_files}")
240 |         sys.exit(1)
241 |     
242 |     # Run import
243 |     success = await import_memories(
244 |         json_files=args.json_files,
245 |         db_path=args.db_path,
246 |         deduplicate=not args.no_deduplicate,
247 |         add_source_tags=not args.no_source_tags,
248 |         dry_run=args.dry_run
249 |     )
250 |     
251 |     sys.exit(0 if success else 1)
252 | 
253 | 
254 | if __name__ == "__main__":
255 |     asyncio.run(main())
```

--------------------------------------------------------------------------------
/scripts/development/fix_sitecustomize.py:
--------------------------------------------------------------------------------

```python
  1 | #!/usr/bin/env python3
  2 | # Copyright 2024 Heinrich Krupp
  3 | #
  4 | # Licensed under the Apache License, Version 2.0 (the "License");
  5 | # you may not use this file except in compliance with the License.
  6 | # You may obtain a copy of the License at
  7 | #
  8 | #     http://www.apache.org/licenses/LICENSE-2.0
  9 | #
 10 | # Unless required by applicable law or agreed to in writing, software
 11 | # distributed under the License is distributed on an "AS IS" BASIS,
 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 13 | # See the License for the specific language governing permissions and
 14 | # limitations under the License.
 15 | 
 16 | """
 17 | Enhanced fix script for sitecustomize.py recursion issues.
 18 | This script replaces the problematic sitecustomize.py with a fixed version
 19 | that works on Linux WSL2 with CUDA 12.4 and other platforms.
 20 | """
 21 | import os
 22 | import sys
 23 | import site
 24 | import shutil
 25 | import platform
 26 | 
 27 | def print_info(text):
 28 |     """Print formatted info text."""
 29 |     print(f"[INFO] {text}")
 30 | 
 31 | def print_error(text):
 32 |     """Print formatted error text."""
 33 |     print(f"[ERROR] {text}")
 34 | 
 35 | def print_success(text):
 36 |     """Print formatted success text."""
 37 |     print(f"[SUCCESS] {text}")
 38 | 
 39 | def print_warning(text):
 40 |     """Print formatted warning text."""
 41 |     print(f"[WARNING] {text}")
 42 | 
 43 | def fix_sitecustomize():
 44 |     """Fix the sitecustomize.py file to prevent recursion."""
 45 |     # Get site-packages directory
 46 |     site_packages = site.getsitepackages()[0]
 47 |     
 48 |     # Path to sitecustomize.py
 49 |     sitecustomize_path = os.path.join(site_packages, 'sitecustomize.py')
 50 |     
 51 |     # Check if file exists
 52 |     if not os.path.exists(sitecustomize_path):
 53 |         print_error(f"sitecustomize.py not found at {sitecustomize_path}")
 54 |         return False
 55 |     
 56 |     # Create backup
 57 |     backup_path = sitecustomize_path + '.bak'
 58 |     if not os.path.exists(backup_path):
 59 |         print_info(f"Creating backup of sitecustomize.py at {backup_path}")
 60 |         shutil.copy2(sitecustomize_path, backup_path)
 61 |         print_success(f"Backup created at {backup_path}")
 62 |     else:
 63 |         print_warning(f"Backup already exists at {backup_path}")
 64 |     
 65 |     # Create fixed sitecustomize.py
 66 |     print_info(f"Creating fixed sitecustomize.py at {sitecustomize_path}")
 67 |     
 68 |     # Detect system for platform-specific fixes
 69 |     system = platform.system().lower()
 70 |     is_wsl = "microsoft" in platform.release().lower() if system == "linux" else False
 71 |     
 72 |     # Create content based on platform
 73 |     if is_wsl:
 74 |         # Special content for WSL with enhanced error handling
 75 |         content = """# Fixed sitecustomize.py to prevent recursion issues on WSL
 76 | # Import standard library modules first to avoid recursion
 77 | import sys
 78 | import os
 79 | import importlib.util
 80 | import importlib.machinery
 81 | import warnings
 82 | 
 83 | # Disable warnings to reduce noise
 84 | warnings.filterwarnings("ignore", category=DeprecationWarning)
 85 | warnings.filterwarnings("ignore", category=ImportWarning)
 86 | 
 87 | # Print debug info to stderr to avoid interfering with MCP protocol
 88 | print("sitecustomize.py loaded", file=sys.stderr)
 89 | 
 90 | # Set environment variables to prevent pip from installing dependencies
 91 | os.environ["PIP_NO_DEPENDENCIES"] = "1"
 92 | os.environ["PIP_NO_INSTALL"] = "1"
 93 | 
 94 | # Disable automatic torch installation
 95 | os.environ["PYTORCH_IGNORE_DUPLICATE_MODULE_REGISTRATION"] = "1"
 96 | 
 97 | # Create a custom import hook to prevent automatic installation
 98 | class PreventAutoInstallImportHook:
 99 |     def __init__(self):
100 |         self.blocked_packages = ['torch', 'torchvision', 'torchaudio', 'torchao']
101 |         # Keep track of packages we've already tried to find to prevent recursion
102 |         self.checked_packages = set()
103 |     
104 |     def find_spec(self, fullname, path, target=None):
105 |         # Prevent recursion by checking if we've already tried to find this package
106 |         if fullname in self.checked_packages:
107 |             return None
108 |         
109 |         # Check if this is a package we want to block
110 |         if any(fullname.startswith(pkg) for pkg in self.blocked_packages):
111 |             # Add to checked packages to prevent recursion
112 |             self.checked_packages.add(fullname)
113 |             
114 |             # Try to find the package directly using the loader
115 |             try:
116 |                 # Try to find the module directly
117 |                 loader = importlib.machinery.PathFinder.find_spec(fullname, path)
118 |                 if loader is not None:
119 |                     return loader
120 |             except Exception:
121 |                 pass
122 |             
123 |             # If not found, print a warning and return None
124 |             print(f"WARNING: Blocked automatic installation of {fullname}", file=sys.stderr)
125 |             return None
126 |         
127 |         # Return None to let the normal import system handle it
128 |         return None
129 | 
130 | # Register the import hook
131 | sys.meta_path.insert(0, PreventAutoInstallImportHook())
132 | 
133 | # Disable distutils setup hooks that can cause recursion
134 | try:
135 |     import setuptools
136 |     setuptools._distutils_hack = None
137 | except Exception:
138 |     pass
139 | 
140 | # Disable _distutils_hack completely
141 | sys.modules['_distutils_hack'] = None
142 | """
143 |     else:
144 |         # Standard content for other platforms
145 |         content = """# Fixed sitecustomize.py to prevent recursion issues
146 | import sys
147 | import os
148 | import importlib.util
149 | import importlib.machinery
150 | 
151 | # Print debug info
152 | print("sitecustomize.py loaded", file=sys.stderr)
153 | 
154 | # Set environment variables to prevent pip from installing dependencies
155 | os.environ["PIP_NO_DEPENDENCIES"] = "1"
156 | os.environ["PIP_NO_INSTALL"] = "1"
157 | 
158 | # Create a custom import hook to prevent automatic installation
159 | class PreventAutoInstallImportHook:
160 |     def __init__(self):
161 |         self.blocked_packages = ['torch', 'torchvision', 'torchaudio']
162 |         # Keep track of packages we've already tried to find to prevent recursion
163 |         self.checked_packages = set()
164 |     
165 |     def find_spec(self, fullname, path, target=None):
166 |         # Prevent recursion by checking if we've already tried to find this package
167 |         if fullname in self.checked_packages:
168 |             return None
169 |         
170 |         # Check if this is a package we want to block
171 |         if any(fullname.startswith(pkg) for pkg in self.blocked_packages):
172 |             # Add to checked packages to prevent recursion
173 |             self.checked_packages.add(fullname)
174 |             
175 |             # Try to find the package directly using the loader
176 |             try:
177 |                 # Try to find the module directly
178 |                 loader = importlib.machinery.PathFinder.find_spec(fullname, path)
179 |                 if loader is not None:
180 |                     return loader
181 |             except Exception:
182 |                 pass
183 |             
184 |             # If not found, print a warning and return None
185 |             print(f"WARNING: Blocked automatic installation of {fullname}", file=sys.stderr)
186 |             return None
187 |         
188 |         # Return None to let the normal import system handle it
189 |         return None
190 | 
191 | # Register the import hook
192 | sys.meta_path.insert(0, PreventAutoInstallImportHook())
193 | """
194 |     
195 |     # Write the content to the file
196 |     with open(sitecustomize_path, 'w') as f:
197 |         f.write(content)
198 |     
199 |     print_success(f"Fixed sitecustomize.py created at {sitecustomize_path}")
200 |     
201 |     # Additional fix for distutils on WSL
202 |     if is_wsl:
203 |         try:
204 |             # Try to fix _distutils_hack.py
205 |             distutils_hack_path = os.path.join(site_packages, '_distutils_hack', '__init__.py')
206 |             if os.path.exists(distutils_hack_path):
207 |                 print_info(f"Fixing _distutils_hack at {distutils_hack_path}")
208 |                 
209 |                 # Create backup
210 |                 hack_backup_path = distutils_hack_path + '.bak'
211 |                 if not os.path.exists(hack_backup_path):
212 |                     shutil.copy2(distutils_hack_path, hack_backup_path)
213 |                     print_success(f"Backup created at {hack_backup_path}")
214 |                 
215 |                 # Read the file
216 |                 with open(distutils_hack_path, 'r') as f:
217 |                     content = f.read()
218 |                 
219 |                 # Modify the content to disable the problematic parts
220 |                 content = content.replace("def do_override():", "def do_override():\n    return")
221 |                 
222 |                 # Write the modified content
223 |                 with open(distutils_hack_path, 'w') as f:
224 |                     f.write(content)
225 |                 
226 |                 print_success(f"Fixed _distutils_hack at {distutils_hack_path}")
227 |         except Exception as e:
228 |             print_warning(f"Could not fix _distutils_hack: {e}")
229 |     
230 |     return True
231 | 
232 | def main():
233 |     """Main function."""
234 |     print_info("Enhanced fix for sitecustomize.py to prevent recursion issues")
235 |     
236 |     if fix_sitecustomize():
237 |         print_success("sitecustomize.py fixed successfully")
238 |     else:
239 |         print_error("Failed to fix sitecustomize.py")
240 |         sys.exit(1)
241 | 
242 | if __name__ == "__main__":
243 |     main()
```

--------------------------------------------------------------------------------
/src/mcp_memory_service/dependency_check.py:
--------------------------------------------------------------------------------

```python
  1 | """
  2 | Dependency pre-check to ensure all required packages are installed.
  3 | This prevents runtime downloads during server initialization that cause timeouts.
  4 | """
  5 | 
  6 | import sys
  7 | import subprocess
  8 | import platform
  9 | import logging
 10 | import os
 11 | from typing import Tuple, Optional
 12 | 
 13 | logger = logging.getLogger(__name__)
 14 | 
 15 | def detect_mcp_client_simple():
 16 |     """Simple MCP client detection for dependency checking."""
 17 |     try:
 18 |         # Check environment variables first
 19 |         if os.getenv('LM_STUDIO'):
 20 |             return 'lm_studio'
 21 |         if os.getenv('CLAUDE_DESKTOP'):
 22 |             return 'claude_desktop'
 23 |             
 24 |         import psutil
 25 |         current_process = psutil.Process()
 26 |         parent = current_process.parent()
 27 |         
 28 |         if parent:
 29 |             parent_name = parent.name().lower()
 30 |             if 'claude' in parent_name:
 31 |                 return 'claude_desktop'
 32 |             if 'lmstudio' in parent_name or 'lm-studio' in parent_name:
 33 |                 return 'lm_studio'
 34 |         
 35 |         # Default to Claude Desktop for strict mode
 36 |         return 'claude_desktop'
 37 |     except:
 38 |         return 'claude_desktop'
 39 | 
 40 | def check_torch_installed() -> Tuple[bool, Optional[str]]:
 41 |     """
 42 |     Check if PyTorch is properly installed.
 43 |     Returns (is_installed, version_string)
 44 |     """
 45 |     try:
 46 |         import torch
 47 |         # Check if torch has __version__ attribute (it should)
 48 |         version = getattr(torch, '__version__', 'unknown')
 49 |         # Also verify torch is functional
 50 |         try:
 51 |             _ = torch.tensor([1.0])
 52 |             return True, version
 53 |         except Exception:
 54 |             return False, None
 55 |     except ImportError:
 56 |         return False, None
 57 | 
 58 | def check_sentence_transformers_installed() -> Tuple[bool, Optional[str]]:
 59 |     """
 60 |     Check if sentence-transformers is properly installed.
 61 |     Returns (is_installed, version_string)
 62 |     """
 63 |     try:
 64 |         import sentence_transformers
 65 |         return True, sentence_transformers.__version__
 66 |     except ImportError:
 67 |         return False, None
 68 | 
 69 | def check_critical_dependencies() -> Tuple[bool, list]:
 70 |     """
 71 |     Check if all critical dependencies are installed.
 72 |     Returns (all_installed, missing_packages)
 73 |     """
 74 |     missing = []
 75 |     
 76 |     # Check PyTorch
 77 |     torch_installed, torch_version = check_torch_installed()
 78 |     if not torch_installed:
 79 |         missing.append("torch")
 80 |     else:
 81 |         logger.debug(f"PyTorch {torch_version} is installed")
 82 |     
 83 |     # Check sentence-transformers
 84 |     st_installed, st_version = check_sentence_transformers_installed()
 85 |     if not st_installed:
 86 |         missing.append("sentence-transformers")
 87 |     else:
 88 |         logger.debug(f"sentence-transformers {st_version} is installed")
 89 |     
 90 |     # Check other critical packages
 91 |     critical_packages = [
 92 |         "sqlite-vec",
 93 |         "mcp",
 94 |         "aiohttp",
 95 |         "fastapi",
 96 |         "uvicorn"
 97 |     ]
 98 |     
 99 |     for package in critical_packages:
100 |         try:
101 |             __import__(package.replace("-", "_"))
102 |             logger.debug(f"{package} is installed")
103 |         except ImportError:
104 |             missing.append(package)
105 |     
106 |     return len(missing) == 0, missing
107 | 
108 | def suggest_installation_command(missing_packages: list) -> str:
109 |     """
110 |     Generate the appropriate installation command for missing packages.
111 |     """
112 |     if not missing_packages:
113 |         return ""
114 |     
115 |     # For Windows, suggest running install.py
116 |     if platform.system() == "Windows":
117 |         return "python install.py"
118 |     else:
119 |         return "python install.py"
120 | 
121 | def run_dependency_check() -> bool:
122 |     """
123 |     Run the dependency check and provide user feedback.
124 |     Returns True if all dependencies are satisfied, False otherwise.
125 |     """
126 |     client_type = detect_mcp_client_simple()
127 |     all_installed, missing = check_critical_dependencies()
128 |     
129 |     # Only show output for LM Studio to avoid JSON parsing errors in Claude Desktop
130 |     if client_type == 'lm_studio':
131 |         print("\n=== MCP Memory Service Dependency Check ===", file=sys.stdout, flush=True)
132 |         
133 |         if all_installed:
134 |             print("[OK] All dependencies are installed", file=sys.stdout, flush=True)
135 |         else:
136 |             print(f"[MISSING] Missing dependencies detected: {', '.join(missing)}", file=sys.stdout, flush=True)
137 |             print("\n[WARNING] IMPORTANT: Missing dependencies will cause timeouts!", file=sys.stdout, flush=True)
138 |             print("[INSTALL] To install missing dependencies, run:", file=sys.stdout, flush=True)
139 |             print(f"   {suggest_installation_command(missing)}", file=sys.stdout, flush=True)
140 |             print("\nThe server will attempt to continue, but may timeout during initialization.", file=sys.stdout, flush=True)
141 |             print("============================================\n", file=sys.stdout, flush=True)
142 |     
143 |     return all_installed
144 | 
145 | def is_first_run() -> bool:
146 |     """
147 |     Check if this appears to be the first run of the server.
148 |     Enhanced for Windows and Claude Desktop environments.
149 |     """
150 |     # Enhanced cache detection for Windows and different environments
151 |     cache_indicators = []
152 |     
153 |     # Standard HuggingFace cache locations
154 |     cache_indicators.extend([
155 |         os.path.expanduser("~/.cache/huggingface/hub"),
156 |         os.path.expanduser("~/.cache/torch/sentence_transformers"),
157 |     ])
158 |     
159 |     # Windows-specific locations
160 |     if platform.system() == "Windows":
161 |         username = os.environ.get('USERNAME', os.environ.get('USER', ''))
162 |         cache_indicators.extend([
163 |             f"C:\\Users\\{username}\\.cache\\huggingface\\hub",
164 |             f"C:\\Users\\{username}\\.cache\\torch\\sentence_transformers",
165 |             f"C:\\Users\\{username}\\AppData\\Local\\huggingface\\hub",
166 |             f"C:\\Users\\{username}\\AppData\\Local\\torch\\sentence_transformers",
167 |             os.path.expanduser("~/AppData/Local/sentence-transformers"),
168 |         ])
169 |     
170 |     # Check environment variables for custom cache locations
171 |     hf_home = os.environ.get('HF_HOME')
172 |     if hf_home:
173 |         cache_indicators.append(os.path.join(hf_home, 'hub'))
174 |     
175 |     transformers_cache = os.environ.get('TRANSFORMERS_CACHE')
176 |     if transformers_cache:
177 |         cache_indicators.append(transformers_cache)
178 |     
179 |     sentence_transformers_home = os.environ.get('SENTENCE_TRANSFORMERS_HOME')
180 |     if sentence_transformers_home:
181 |         cache_indicators.append(sentence_transformers_home)
182 |     
183 |     # Check each cache location
184 |     for path in cache_indicators:
185 |         if os.path.exists(path):
186 |             try:
187 |                 contents = os.listdir(path)
188 |                 # Look for sentence-transformers models specifically
189 |                 for item in contents:
190 |                     item_lower = item.lower()
191 |                     # Check for common sentence-transformers model indicators
192 |                     if any(indicator in item_lower for indicator in [
193 |                         'sentence-transformers', 'miniml', 'all-miniml', 
194 |                         'paraphrase', 'distilbert', 'mpnet', 'roberta'
195 |                     ]):
196 |                         logger.debug(f"Found cached model in {path}: {item}")
197 |                         return False
198 |                         
199 |                 # Also check for any model directories
200 |                 for item in contents:
201 |                     item_path = os.path.join(path, item)
202 |                     if os.path.isdir(item_path):
203 |                         try:
204 |                             sub_contents = os.listdir(item_path)
205 |                             # Look for model files
206 |                             if any(f.endswith(('.bin', '.safetensors', '.json')) for f in sub_contents):
207 |                                 logger.debug(f"Found model files in {item_path}")
208 |                                 return False
209 |                         except (OSError, PermissionError):
210 |                             continue
211 |                             
212 |             except (OSError, PermissionError):
213 |                 logger.debug(f"Could not access cache directory: {path}")
214 |                 continue
215 |     
216 |     logger.debug("No cached sentence-transformers models found - this appears to be first run")
217 |     return True
218 | 
219 | def get_recommended_timeout() -> float:
220 |     """
221 |     Get the recommended timeout based on system and dependencies.
222 |     """
223 |     # Check if dependencies are missing
224 |     all_installed, missing = check_critical_dependencies()
225 |     
226 |     # Check if it's first run (models need downloading)
227 |     first_run = is_first_run()
228 |     
229 |     # Base timeout
230 |     timeout = 30.0 if platform.system() == "Windows" else 15.0
231 |     
232 |     # Extend timeout if dependencies are missing
233 |     if not all_installed:
234 |         timeout *= 2  # Double the timeout
235 |         logger.warning(f"Dependencies missing, extending timeout to {timeout}s")
236 |     
237 |     # Extend timeout if it's first run
238 |     if first_run:
239 |         timeout *= 2  # Double the timeout
240 |         logger.warning(f"First run detected, extending timeout to {timeout}s")
241 |     
242 |     return timeout
```

--------------------------------------------------------------------------------
/claude-hooks/core/memory-retrieval.js:
--------------------------------------------------------------------------------

```javascript
  1 | /**
  2 |  * On-Demand Memory Retrieval Hook
  3 |  * Allows users to manually request context refresh when needed
  4 |  */
  5 | 
  6 | const fs = require('fs').promises;
  7 | const path = require('path');
  8 | const https = require('https');
  9 | 
 10 | // Import utilities
 11 | const { detectProjectContext } = require('../utilities/project-detector');
 12 | const { scoreMemoryRelevance } = require('../utilities/memory-scorer');
 13 | const { formatMemoriesForContext } = require('../utilities/context-formatter');
 14 | 
 15 | /**
 16 |  * Load hook configuration
 17 |  */
 18 | async function loadConfig() {
 19 |     try {
 20 |         const configPath = path.join(__dirname, '../config.json');
 21 |         const configData = await fs.readFile(configPath, 'utf8');
 22 |         return JSON.parse(configData);
 23 |     } catch (error) {
 24 |         console.warn('[Memory Retrieval] Using default configuration:', error.message);
 25 |         return {
 26 |             memoryService: {
 27 |                 endpoint: 'https://narrowbox.local:8443',
 28 |                 apiKey: 'test-key-123',
 29 |                 maxMemoriesPerSession: 5
 30 |             }
 31 |         };
 32 |     }
 33 | }
 34 | 
 35 | /**
 36 |  * Query memory service for relevant memories
 37 |  */
 38 | async function queryMemoryService(endpoint, apiKey, query) {
 39 |     return new Promise((resolve, reject) => {
 40 |         const url = new URL('/mcp', endpoint);
 41 |         const postData = JSON.stringify({
 42 |             jsonrpc: '2.0',
 43 |             id: 1,
 44 |             method: 'tools/call',
 45 |             params: {
 46 |                 name: 'retrieve_memory',
 47 |                 arguments: {
 48 |                     query: query.semanticQuery || '',
 49 |                     n_results: query.limit || 5
 50 |                 }
 51 |             }
 52 |         });
 53 | 
 54 |         const options = {
 55 |             hostname: url.hostname,
 56 |             port: url.port || 8443,
 57 |             path: url.pathname,
 58 |             method: 'POST',
 59 |             headers: {
 60 |                 'Content-Type': 'application/json',
 61 |                 'Content-Length': Buffer.byteLength(postData),
 62 |                 'Authorization': `Bearer ${apiKey}`
 63 |             },
 64 |             rejectUnauthorized: false // For self-signed certificates
 65 |         };
 66 | 
 67 |         const req = https.request(options, (res) => {
 68 |             let data = '';
 69 |             res.on('data', (chunk) => {
 70 |                 data += chunk;
 71 |             });
 72 |             res.on('end', () => {
 73 |                 try {
 74 |                     const response = JSON.parse(data);
 75 |                     if (response.result && response.result.content) {
 76 |                         let textData = response.result.content[0].text;
 77 |                         
 78 |                         try {
 79 |                             // Convert Python dict format to JSON format safely
 80 |                             textData = textData
 81 |                                 .replace(/'/g, '"')
 82 |                                 .replace(/True/g, 'true')
 83 |                                 .replace(/False/g, 'false')
 84 |                                 .replace(/None/g, 'null');
 85 |                             
 86 |                             const memories = JSON.parse(textData);
 87 |                             resolve(memories.results || memories.memories || []);
 88 |                         } catch (conversionError) {
 89 |                             console.warn('[Memory Retrieval] Could not parse memory response:', conversionError.message);
 90 |                             resolve([]);
 91 |                         }
 92 |                     } else {
 93 |                         resolve([]);
 94 |                     }
 95 |                 } catch (parseError) {
 96 |                     console.warn('[Memory Retrieval] Parse error:', parseError.message);
 97 |                     resolve([]);
 98 |                 }
 99 |             });
100 |         });
101 | 
102 |         req.on('error', (error) => {
103 |             console.warn('[Memory Retrieval] Network error:', error.message);
104 |             resolve([]);
105 |         });
106 | 
107 |         req.write(postData);
108 |         req.end();
109 |     });
110 | }
111 | 
112 | /**
113 |  * On-demand memory retrieval function
114 |  */
115 | async function retrieveMemories(context) {
116 |     try {
117 |         console.log('[Memory Retrieval] On-demand memory retrieval requested...');
118 |         
119 |         // Load configuration
120 |         const config = await loadConfig();
121 |         
122 |         // Detect project context
123 |         const projectContext = await detectProjectContext(context.workingDirectory || process.cwd());
124 |         console.log(`[Memory Retrieval] Project context: ${projectContext.name} (${projectContext.language})`);
125 |         
126 |         // Parse user query if provided
127 |         const userQuery = context.query || context.message || '';
128 |         
129 |         // Build memory query
130 |         const memoryQuery = {
131 |             tags: [
132 |                 projectContext.name,
133 |                 `language:${projectContext.language}`,
134 |                 'key-decisions',
135 |                 'architecture',
136 |                 'recent-insights'
137 |             ].filter(Boolean),
138 |             semanticQuery: userQuery.length > 0 ? 
139 |                 `${projectContext.name} ${userQuery}` : 
140 |                 `${projectContext.name} project context decisions architecture`,
141 |             limit: config.memoryService.maxMemoriesPerSession || 5,
142 |             timeFilter: 'last-month'
143 |         };
144 |         
145 |         // Query memory service
146 |         const memories = await queryMemoryService(
147 |             config.memoryService.endpoint,
148 |             config.memoryService.apiKey,
149 |             memoryQuery
150 |         );
151 |         
152 |         if (memories.length > 0) {
153 |             console.log(`[Memory Retrieval] Found ${memories.length} relevant memories`);
154 |             
155 |             // Score memories for relevance
156 |             const scoredMemories = scoreMemoryRelevance(memories, projectContext);
157 |             
158 |             // Take top scored memories
159 |             const topMemories = scoredMemories.slice(0, config.memoryService.maxMemoriesPerSession || 5);
160 |             
161 |             // Format memories for display
162 |             const contextMessage = formatMemoriesForContext(topMemories, projectContext, {
163 |                 includeScore: true, // Show scores for manual retrieval
164 |                 groupByCategory: topMemories.length > 3,
165 |                 maxMemories: config.memoryService.maxMemoriesPerSession || 5,
166 |                 includeTimestamp: true
167 |             });
168 |             
169 |             // Output formatted context
170 |             if (context.displayResult) {
171 |                 await context.displayResult(contextMessage);
172 |                 console.log('[Memory Retrieval] Successfully displayed memory context');
173 |             } else {
174 |                 // Fallback: log context
175 |                 console.log('\n=== RETRIEVED MEMORY CONTEXT ===');
176 |                 console.log(contextMessage);
177 |                 console.log('=== END CONTEXT ===\n');
178 |             }
179 |             
180 |             return {
181 |                 success: true,
182 |                 memoriesFound: memories.length,
183 |                 memoriesShown: topMemories.length,
184 |                 context: contextMessage
185 |             };
186 |             
187 |         } else {
188 |             const message = `## 📋 Memory Retrieval\n\nNo relevant memories found for query: "${userQuery || 'project context'}"\n\nTry a different search term or check if your memory service is running.`;
189 |             
190 |             if (context.displayResult) {
191 |                 await context.displayResult(message);
192 |             } else {
193 |                 console.log(message);
194 |             }
195 |             
196 |             return {
197 |                 success: false,
198 |                 memoriesFound: 0,
199 |                 memoriesShown: 0,
200 |                 context: message
201 |             };
202 |         }
203 |         
204 |     } catch (error) {
205 |         console.error('[Memory Retrieval] Error retrieving memories:', error.message);
206 |         const errorMessage = `## ❌ Memory Retrieval Error\n\n${error.message}\n\nCheck your memory service configuration and connection.`;
207 |         
208 |         if (context.displayResult) {
209 |             await context.displayResult(errorMessage);
210 |         }
211 |         
212 |         return {
213 |             success: false,
214 |             error: error.message
215 |         };
216 |     }
217 | }
218 | 
219 | /**
220 |  * Hook metadata for Claude Code
221 |  */
222 | module.exports = {
223 |     name: 'on-demand-memory-retrieval',
224 |     version: '1.0.0',
225 |     description: 'Retrieve relevant memories on user request',
226 |     trigger: 'manual', // This hook is triggered manually
227 |     handler: retrieveMemories,
228 |     config: {
229 |         async: true,
230 |         timeout: 10000,
231 |         priority: 'normal'
232 |     }
233 | };
234 | 
235 | // Direct execution support for testing
236 | if (require.main === module) {
237 |     // Test the retrieval with mock context
238 |     const mockContext = {
239 |         workingDirectory: process.cwd(),
240 |         query: 'architecture decisions',
241 |         displayResult: async (message) => {
242 |             console.log('=== MOCK DISPLAY RESULT ===');
243 |             console.log(message);
244 |             console.log('=== END MOCK DISPLAY ===');
245 |         }
246 |     };
247 |     
248 |     retrieveMemories(mockContext)
249 |         .then(result => console.log('Retrieval test completed:', result))
250 |         .catch(error => console.error('Retrieval test failed:', error));
251 | }
```

--------------------------------------------------------------------------------
/tests/test_time_parser.py:
--------------------------------------------------------------------------------

```python
  1 | """
  2 | Unit tests for time_parser module
  3 | """
  4 | import pytest
  5 | from datetime import datetime, date, timedelta
  6 | import time
  7 | 
  8 | import sys
  9 | import os
 10 | sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', 'src'))
 11 | 
 12 | from mcp_memory_service.utils.time_parser import (
 13 |     parse_time_expression,
 14 |     extract_time_expression,
 15 |     get_time_of_day_range,
 16 |     get_last_period_range,
 17 |     get_this_period_range,
 18 |     get_month_range,
 19 |     get_named_period_range
 20 | )
 21 | 
 22 | 
 23 | class TestTimeParser:
 24 |     """Test time parsing functionality"""
 25 |     
 26 |     def test_relative_days(self):
 27 |         """Test parsing relative day expressions"""
 28 |         # Test "yesterday"
 29 |         start_ts, end_ts = parse_time_expression("yesterday")
 30 |         assert start_ts is not None
 31 |         assert end_ts is not None
 32 |         
 33 |         yesterday = date.today() - timedelta(days=1)
 34 |         start_dt = datetime.fromtimestamp(start_ts)
 35 |         end_dt = datetime.fromtimestamp(end_ts)
 36 |         
 37 |         assert start_dt.date() == yesterday
 38 |         assert end_dt.date() == yesterday
 39 |         assert start_dt.time() == datetime.min.time()
 40 |         assert end_dt.time().hour == 23
 41 |         assert end_dt.time().minute == 59
 42 |         
 43 |         # Test "3 days ago"
 44 |         start_ts, end_ts = parse_time_expression("3 days ago")
 45 |         three_days_ago = date.today() - timedelta(days=3)
 46 |         start_dt = datetime.fromtimestamp(start_ts)
 47 |         assert start_dt.date() == three_days_ago
 48 |         
 49 |         # Test "today"
 50 |         start_ts, end_ts = parse_time_expression("today")
 51 |         start_dt = datetime.fromtimestamp(start_ts)
 52 |         assert start_dt.date() == date.today()
 53 |     
 54 |     def test_relative_weeks(self):
 55 |         """Test parsing relative week expressions"""
 56 |         start_ts, end_ts = parse_time_expression("2 weeks ago")
 57 |         assert start_ts is not None
 58 |         assert end_ts is not None
 59 |         
 60 |         start_dt = datetime.fromtimestamp(start_ts)
 61 |         end_dt = datetime.fromtimestamp(end_ts)
 62 |         
 63 |         # Should be a Monday to Sunday range
 64 |         assert start_dt.weekday() == 0  # Monday
 65 |         assert end_dt.weekday() == 6    # Sunday
 66 |         
 67 |         # Should be roughly 2 weeks ago
 68 |         days_ago = (date.today() - start_dt.date()).days
 69 |         assert 14 <= days_ago <= 20  # Allow some flexibility for week boundaries
 70 |     
 71 |     def test_relative_months(self):
 72 |         """Test parsing relative month expressions"""
 73 |         start_ts, end_ts = parse_time_expression("1 month ago")
 74 |         assert start_ts is not None
 75 |         assert end_ts is not None
 76 |         
 77 |         start_dt = datetime.fromtimestamp(start_ts)
 78 |         end_dt = datetime.fromtimestamp(end_ts)
 79 |         
 80 |         # Should be first to last day of the month
 81 |         assert start_dt.day == 1
 82 |         assert (end_dt + timedelta(days=1)).day == 1  # Next day is first of next month
 83 |     
 84 |     def test_specific_dates(self):
 85 |         """Test parsing specific date formats"""
 86 |         # Test MM/DD/YYYY format with unambiguous date
 87 |         start_ts, end_ts = parse_time_expression("03/15/2024")
 88 |         assert start_ts is not None
 89 |         
 90 |         start_dt = datetime.fromtimestamp(start_ts)
 91 |         assert start_dt.year == 2024
 92 |         assert start_dt.month == 3
 93 |         assert start_dt.day == 15
 94 |         
 95 |         # Test YYYY-MM-DD format
 96 |         start_ts, end_ts = parse_time_expression("2024-06-15")
 97 |         assert start_ts is not None
 98 |         start_dt = datetime.fromtimestamp(start_ts)
 99 |         assert start_dt.date() == date(2024, 6, 15)
100 |     
101 |     def test_month_names(self):
102 |         """Test parsing month names"""
103 |         current_year = datetime.now().year
104 |         current_month = datetime.now().month
105 |         
106 |         # Test a past month
107 |         start_ts, end_ts = parse_time_expression("january")
108 |         start_dt = datetime.fromtimestamp(start_ts)
109 |         
110 |         # Should be this year's January if we're past January, otherwise last year's
111 |         expected_year = current_year if current_month > 1 else current_year - 1
112 |         assert start_dt.month == 1
113 |         assert start_dt.year == expected_year
114 |     
115 |     def test_seasons(self):
116 |         """Test parsing season names"""
117 |         # Test summer
118 |         start_ts, end_ts = parse_time_expression("last summer")
119 |         assert start_ts is not None
120 |         assert end_ts is not None
121 |         
122 |         start_dt = datetime.fromtimestamp(start_ts)
123 |         end_dt = datetime.fromtimestamp(end_ts)
124 |         
125 |         # Summer is roughly June 21 to September 22
126 |         assert start_dt.month == 6
127 |         assert end_dt.month == 9
128 |     
129 |     def test_holidays(self):
130 |         """Test parsing holiday names"""
131 |         # Test Christmas
132 |         start_ts, end_ts = parse_time_expression("christmas")
133 |         assert start_ts is not None
134 |         
135 |         start_dt = datetime.fromtimestamp(start_ts)
136 |         end_dt = datetime.fromtimestamp(end_ts)
137 |         
138 |         # Christmas window should include Dec 25 +/- a few days
139 |         assert start_dt.month == 12
140 |         assert 22 <= start_dt.day <= 25
141 |         assert 25 <= end_dt.day <= 28
142 |     
143 |     def test_time_of_day(self):
144 |         """Test time of day parsing"""
145 |         # Test "yesterday morning"
146 |         start_ts, end_ts = parse_time_expression("yesterday morning")
147 |         start_dt = datetime.fromtimestamp(start_ts)
148 |         end_dt = datetime.fromtimestamp(end_ts)
149 |         
150 |         yesterday = date.today() - timedelta(days=1)
151 |         assert start_dt.date() == yesterday
152 |         assert 5 <= start_dt.hour <= 6  # Morning starts at 5 AM
153 |         assert 11 <= end_dt.hour <= 12  # Morning ends before noon
154 |     
155 |     def test_date_ranges(self):
156 |         """Test date range expressions"""
157 |         start_ts, end_ts = parse_time_expression("between january and march")
158 |         assert start_ts is not None
159 |         assert end_ts is not None
160 |         
161 |         start_dt = datetime.fromtimestamp(start_ts)
162 |         end_dt = datetime.fromtimestamp(end_ts)
163 |         
164 |         assert start_dt.month == 1
165 |         assert end_dt.month == 3
166 |     
167 |     def test_quarters(self):
168 |         """Test quarter expressions"""
169 |         start_ts, end_ts = parse_time_expression("first quarter of 2024")
170 |         assert start_ts is not None
171 |         
172 |         start_dt = datetime.fromtimestamp(start_ts)
173 |         end_dt = datetime.fromtimestamp(end_ts)
174 |         
175 |         assert start_dt == datetime(2024, 1, 1, 0, 0, 0)
176 |         assert end_dt.year == 2024
177 |         assert end_dt.month == 3
178 |         assert end_dt.day == 31
179 |     
180 |     def test_extract_time_expression(self):
181 |         """Test extracting time expressions from queries"""
182 |         # Test extraction with semantic content
183 |         cleaned, (start_ts, end_ts) = extract_time_expression(
184 |             "find meetings from last week about project updates"
185 |         )
186 |         
187 |         assert "meetings" in cleaned
188 |         assert "project updates" in cleaned
189 |         assert "last week" not in cleaned
190 |         assert start_ts is not None
191 |         assert end_ts is not None
192 |         
193 |         # Test multiple time expressions
194 |         cleaned, (start_ts, end_ts) = extract_time_expression(
195 |             "yesterday in the morning I had coffee"
196 |         )
197 |         
198 |         assert "coffee" in cleaned
199 |         assert "yesterday" not in cleaned
200 |         assert "in the morning" not in cleaned
201 |     
202 |     def test_edge_cases(self):
203 |         """Test edge cases and error handling"""
204 |         # Test empty string
205 |         start_ts, end_ts = parse_time_expression("")
206 |         assert start_ts is None
207 |         assert end_ts is None
208 |         
209 |         # Test invalid date format
210 |         start_ts, end_ts = parse_time_expression("13/32/2024")  # Invalid month and day
211 |         assert start_ts is None
212 |         assert end_ts is None
213 |         
214 |         # Test nonsense string
215 |         start_ts, end_ts = parse_time_expression("random gibberish text")
216 |         assert start_ts is None
217 |         assert end_ts is None
218 |     
219 |     def test_this_period_expressions(self):
220 |         """Test 'this X' period expressions"""
221 |         # This week
222 |         start_ts, end_ts = parse_time_expression("this week")
223 |         start_dt = datetime.fromtimestamp(start_ts)
224 |         end_dt = datetime.fromtimestamp(end_ts)
225 |         
226 |         # Should include today
227 |         today = date.today()
228 |         assert start_dt.date() <= today <= end_dt.date()
229 |         
230 |         # This month
231 |         start_ts, end_ts = parse_time_expression("this month")
232 |         start_dt = datetime.fromtimestamp(start_ts)
233 |         assert start_dt.month == datetime.now().month
234 |         assert start_dt.year == datetime.now().year
235 |     
236 |     def test_recent_expressions(self):
237 |         """Test 'recent' and similar expressions"""
238 |         start_ts, end_ts = parse_time_expression("recently")
239 |         assert start_ts is not None
240 |         assert end_ts is not None
241 |         
242 |         # Should default to last 7 days
243 |         days_diff = (end_ts - start_ts) / (24 * 3600)
244 |         assert 6 <= days_diff <= 8  # Allow for some time variance
245 | 
246 | 
247 | if __name__ == "__main__":
248 |     pytest.main([__file__, "-v"])
249 | 
```

--------------------------------------------------------------------------------
/src/mcp_memory_service/consolidation/decay.py:
--------------------------------------------------------------------------------

```python
  1 | # Copyright 2024 Heinrich Krupp
  2 | #
  3 | # Licensed under the Apache License, Version 2.0 (the "License");
  4 | # you may not use this file except in compliance with the License.
  5 | # You may obtain a copy of the License at
  6 | #
  7 | #     http://www.apache.org/licenses/LICENSE-2.0
  8 | #
  9 | # Unless required by applicable law or agreed to in writing, software
 10 | # distributed under the License is distributed on an "AS IS" BASIS,
 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 12 | # See the License for the specific language governing permissions and
 13 | # limitations under the License.
 14 | 
 15 | """Exponential decay scoring for memory relevance calculation."""
 16 | 
 17 | import math
 18 | from typing import List, Dict, Any, Optional
 19 | from datetime import datetime, timezone
 20 | from dataclasses import dataclass
 21 | 
 22 | from .base import ConsolidationBase, ConsolidationConfig
 23 | from ..models.memory import Memory
 24 | 
 25 | @dataclass
 26 | class RelevanceScore:
 27 |     """Represents a memory's relevance score with breakdown."""
 28 |     memory_hash: str
 29 |     total_score: float
 30 |     base_importance: float
 31 |     decay_factor: float
 32 |     connection_boost: float
 33 |     access_boost: float
 34 |     metadata: Dict[str, Any]
 35 | 
 36 | class ExponentialDecayCalculator(ConsolidationBase):
 37 |     """
 38 |     Calculates memory relevance using exponential decay.
 39 |     
 40 |     Memories naturally lose relevance over time unless reinforced by:
 41 |     - Connections to other memories
 42 |     - Recent access patterns  
 43 |     - Base importance scores
 44 |     - Memory type-specific retention periods
 45 |     """
 46 |     
 47 |     def __init__(self, config: ConsolidationConfig):
 48 |         super().__init__(config)
 49 |         self.retention_periods = config.retention_periods
 50 |         
 51 |     async def process(self, memories: List[Memory], **kwargs) -> List[RelevanceScore]:
 52 |         """Calculate relevance scores for all memories."""
 53 |         if not self._validate_memories(memories):
 54 |             return []
 55 |         
 56 |         reference_time = kwargs.get('reference_time', datetime.now())
 57 |         memory_connections = kwargs.get('connections', {})  # hash -> connection_count mapping
 58 |         access_patterns = kwargs.get('access_patterns', {})  # hash -> last_accessed mapping
 59 |         
 60 |         scores = []
 61 |         for memory in memories:
 62 |             score = await self._calculate_memory_relevance(
 63 |                 memory, reference_time, memory_connections, access_patterns
 64 |             )
 65 |             scores.append(score)
 66 |         
 67 |         self.logger.info(f"Calculated relevance scores for {len(scores)} memories")
 68 |         return scores
 69 |     
 70 |     async def _calculate_memory_relevance(
 71 |         self,
 72 |         memory: Memory,
 73 |         current_time: datetime,
 74 |         connections: Dict[str, int],
 75 |         access_patterns: Dict[str, datetime]
 76 |     ) -> RelevanceScore:
 77 |         """
 78 |         Calculate memory relevance using exponential decay.
 79 |         
 80 |         Factors:
 81 |         - Age of memory
 82 |         - Base importance score (from metadata or tags)
 83 |         - Retention period (varies by memory type)
 84 |         - Connections to other memories
 85 |         - Recent access patterns
 86 |         """
 87 |         # Get memory age in days
 88 |         age_days = self._get_memory_age_days(memory, current_time)
 89 |         
 90 |         # Extract base importance score
 91 |         base_importance = self._get_base_importance(memory)
 92 |         
 93 |         # Get retention period for memory type
 94 |         memory_type = self._extract_memory_type(memory)
 95 |         retention_period = self.retention_periods.get(memory_type, 30)
 96 |         
 97 |         # Calculate exponential decay factor
 98 |         decay_factor = math.exp(-age_days / retention_period)
 99 |         
100 |         # Calculate connection boost
101 |         connection_count = connections.get(memory.content_hash, 0)
102 |         connection_boost = 1 + (0.1 * connection_count)  # 10% boost per connection
103 |         
104 |         # Calculate access boost
105 |         access_boost = self._calculate_access_boost(memory, access_patterns, current_time)
106 |         
107 |         # Calculate total relevance score
108 |         total_score = base_importance * decay_factor * connection_boost * access_boost
109 |         
110 |         # Ensure protected memories maintain minimum relevance
111 |         if self._is_protected_memory(memory):
112 |             total_score = max(total_score, 0.5)  # Minimum 50% relevance for protected memories
113 |         
114 |         return RelevanceScore(
115 |             memory_hash=memory.content_hash,
116 |             total_score=total_score,
117 |             base_importance=base_importance,
118 |             decay_factor=decay_factor,
119 |             connection_boost=connection_boost,
120 |             access_boost=access_boost,
121 |             metadata={
122 |                 'age_days': age_days,
123 |                 'memory_type': memory_type,
124 |                 'retention_period': retention_period,
125 |                 'connection_count': connection_count,
126 |                 'is_protected': self._is_protected_memory(memory)
127 |             }
128 |         )
129 |     
130 |     def _get_base_importance(self, memory: Memory) -> float:
131 |         """
132 |         Extract base importance score from memory metadata or tags.
133 |         
134 |         Priority order:
135 |         1. Explicit importance_score in metadata
136 |         2. Importance derived from tags
137 |         3. Default score of 1.0
138 |         """
139 |         # Check for explicit importance score
140 |         if 'importance_score' in memory.metadata:
141 |             try:
142 |                 score = float(memory.metadata['importance_score'])
143 |                 return max(0.0, min(2.0, score))  # Clamp between 0 and 2
144 |             except (ValueError, TypeError):
145 |                 self.logger.warning(f"Invalid importance_score in memory {memory.content_hash}")
146 |         
147 |         # Derive importance from tags
148 |         tag_importance = {
149 |             'critical': 2.0,
150 |             'important': 1.5,
151 |             'reference': 1.3,
152 |             'urgent': 1.4,
153 |             'project': 1.2,
154 |             'personal': 1.1,
155 |             'temporary': 0.7,
156 |             'draft': 0.8,
157 |             'note': 0.9
158 |         }
159 |         
160 |         max_tag_importance = 1.0
161 |         for tag in memory.tags:
162 |             tag_score = tag_importance.get(tag.lower(), 1.0)
163 |             max_tag_importance = max(max_tag_importance, tag_score)
164 |         
165 |         return max_tag_importance
166 |     
167 |     def _calculate_access_boost(
168 |         self,
169 |         memory: Memory,
170 |         access_patterns: Dict[str, datetime],
171 |         current_time: datetime
172 |     ) -> float:
173 |         """
174 |         Calculate boost factor based on recent access patterns.
175 |         
176 |         Recent access increases relevance:
177 |         - Accessed within last day: 1.5x boost
178 |         - Accessed within last week: 1.2x boost  
179 |         - Accessed within last month: 1.1x boost
180 |         - No recent access: 1.0x (no boost)
181 |         """
182 |         last_accessed = access_patterns.get(memory.content_hash)
183 |         
184 |         if not last_accessed:
185 |             # Check memory's own updated_at timestamp
186 |             if memory.updated_at:
187 |                 last_accessed = datetime.utcfromtimestamp(memory.updated_at)
188 |             else:
189 |                 return 1.0  # No access data available
190 | 
191 |         # Normalize both datetimes to UTC timezone-aware
192 |         current_time = current_time.replace(tzinfo=timezone.utc) if current_time.tzinfo is None else current_time.astimezone(timezone.utc)
193 |         last_accessed = last_accessed.replace(tzinfo=timezone.utc) if last_accessed.tzinfo is None else last_accessed.astimezone(timezone.utc)
194 | 
195 |         days_since_access = (current_time - last_accessed).days
196 |         
197 |         if days_since_access <= 1:
198 |             return 1.5  # Accessed within last day
199 |         elif days_since_access <= 7:
200 |             return 1.2  # Accessed within last week
201 |         elif days_since_access <= 30:
202 |             return 1.1  # Accessed within last month
203 |         else:
204 |             return 1.0  # No recent access
205 |     
206 |     async def get_low_relevance_memories(
207 |         self,
208 |         scores: List[RelevanceScore],
209 |         threshold: float = 0.1
210 |     ) -> List[RelevanceScore]:
211 |         """Get memories with relevance scores below the threshold."""
212 |         return [score for score in scores if score.total_score < threshold]
213 |     
214 |     async def get_high_relevance_memories(
215 |         self,
216 |         scores: List[RelevanceScore], 
217 |         threshold: float = 1.0
218 |     ) -> List[RelevanceScore]:
219 |         """Get memories with relevance scores above the threshold."""
220 |         return [score for score in scores if score.total_score >= threshold]
221 |     
222 |     async def update_memory_relevance_metadata(
223 |         self,
224 |         memory: Memory,
225 |         score: RelevanceScore
226 |     ) -> Memory:
227 |         """Update memory metadata with calculated relevance score."""
228 |         memory.metadata.update({
229 |             'relevance_score': score.total_score,
230 |             'relevance_calculated_at': datetime.now().isoformat(),
231 |             'decay_factor': score.decay_factor,
232 |             'connection_boost': score.connection_boost,
233 |             'access_boost': score.access_boost
234 |         })
235 |         memory.touch()  # Update the updated_at timestamp
236 |         return memory
```

--------------------------------------------------------------------------------
/docs/testing-cloudflare-backend.md:
--------------------------------------------------------------------------------

```markdown
  1 | # Testing the Cloudflare Backend
  2 | 
  3 | ## Test Results Summary ✅
  4 | 
  5 | The Cloudflare backend implementation has been thoroughly tested and is **production-ready**. All core functionality works correctly with mock configurations.
  6 | 
  7 | ### ✅ Tests Completed Successfully
  8 | 
  9 | #### 1. Basic Implementation Tests
 10 | - **CloudflareStorage class initialization**: ✅ All parameters set correctly
 11 | - **URL construction**: ✅ Correct API endpoints generated
 12 | - **HTTP client creation**: ✅ Headers and configuration correct
 13 | - **Memory model integration**: ✅ Full compatibility with existing Memory class
 14 | - **Embedding cache**: ✅ Caching functionality working
 15 | - **Resource cleanup**: ✅ Proper cleanup on close()
 16 | - **Configuration defaults**: ✅ All defaults set appropriately
 17 | 
 18 | **Result**: 26/26 tests passed
 19 | 
 20 | #### 2. Configuration System Tests
 21 | - **Missing environment variables**: ✅ Proper validation and error handling
 22 | - **Complete configuration**: ✅ All settings loaded correctly
 23 | - **Backend registration**: ✅ Cloudflare properly added to SUPPORTED_BACKENDS
 24 | - **Environment variable parsing**: ✅ All types and defaults working
 25 | 
 26 | #### 3. Server Integration Tests
 27 | - **Server import with Cloudflare backend**: ✅ Successfully imports and configures
 28 | - **Backend selection logic**: ✅ Correctly identifies and would initialize CloudflareStorage
 29 | - **Configuration compatibility**: ✅ Server properly reads Cloudflare settings
 30 | 
 31 | #### 4. Migration Script Tests
 32 | - **DataMigrator class**: ✅ Proper initialization and structure
 33 | - **Command-line interface**: ✅ Argument parsing working
 34 | - **Data format conversion**: ✅ Memory objects convert to migration format
 35 | - **Export/Import workflow**: ✅ Structure ready for real data migration
 36 | 
 37 | ### 🧪 How to Test with Real Cloudflare Credentials
 38 | 
 39 | To test the implementation with actual Cloudflare services:
 40 | 
 41 | #### Step 1: Set up Cloudflare Resources
 42 | 
 43 | ```bash
 44 | # Install Wrangler CLI
 45 | npm install -g wrangler
 46 | 
 47 | # Login to Cloudflare
 48 | wrangler login
 49 | 
 50 | # Create Vectorize index
 51 | wrangler vectorize create test-mcp-memory --dimensions=768 --metric=cosine
 52 | 
 53 | # Create D1 database
 54 | wrangler d1 create test-mcp-memory-db
 55 | 
 56 | # Optional: Create R2 bucket
 57 | wrangler r2 bucket create test-mcp-memory-content
 58 | ```
 59 | 
 60 | #### Step 2: Configure Environment
 61 | 
 62 | ```bash
 63 | # Set backend to Cloudflare
 64 | export MCP_MEMORY_STORAGE_BACKEND=cloudflare
 65 | 
 66 | # Required Cloudflare settings
 67 | export CLOUDFLARE_API_TOKEN="your-real-api-token"
 68 | export CLOUDFLARE_ACCOUNT_ID="your-account-id"
 69 | export CLOUDFLARE_VECTORIZE_INDEX="test-mcp-memory"
 70 | export CLOUDFLARE_D1_DATABASE_ID="your-d1-database-id"
 71 | 
 72 | # Optional settings
 73 | export CLOUDFLARE_R2_BUCKET="test-mcp-memory-content"
 74 | export LOG_LEVEL=DEBUG  # For detailed logging
 75 | ```
 76 | 
 77 | #### Step 3: Test Basic Functionality
 78 | 
 79 | ```python
 80 | # test_real_cloudflare.py
 81 | import asyncio
 82 | import sys
 83 | sys.path.insert(0, 'src')
 84 | 
 85 | from mcp_memory_service.storage.cloudflare import CloudflareStorage
 86 | from mcp_memory_service.models.memory import Memory
 87 | from mcp_memory_service.utils.hashing import generate_content_hash
 88 | 
 89 | async def test_real_cloudflare():
 90 |     """Test with real Cloudflare credentials."""
 91 |     import os
 92 |     
 93 |     # Initialize with real credentials
 94 |     storage = CloudflareStorage(
 95 |         api_token=os.getenv('CLOUDFLARE_API_TOKEN'),
 96 |         account_id=os.getenv('CLOUDFLARE_ACCOUNT_ID'),
 97 |         vectorize_index=os.getenv('CLOUDFLARE_VECTORIZE_INDEX'),
 98 |         d1_database_id=os.getenv('CLOUDFLARE_D1_DATABASE_ID'),
 99 |         r2_bucket=os.getenv('CLOUDFLARE_R2_BUCKET')
100 |     )
101 |     
102 |     try:
103 |         # Test initialization
104 |         print("🔄 Initializing Cloudflare storage...")
105 |         await storage.initialize()
106 |         print("✅ Initialization successful!")
107 |         
108 |         # Test storing a memory
109 |         content = "This is a test memory for real Cloudflare backend"
110 |         memory = Memory(
111 |             content=content,
112 |             content_hash=generate_content_hash(content),
113 |             tags=["test", "real-cloudflare"],
114 |             memory_type="standard"
115 |         )
116 |         
117 |         print("🔄 Storing test memory...")
118 |         success, message = await storage.store(memory)
119 |         print(f"✅ Store result: {success} - {message}")
120 |         
121 |         # Test retrieval
122 |         print("🔄 Searching for stored memory...")
123 |         results = await storage.retrieve("test memory", n_results=5)
124 |         print(f"✅ Retrieved {len(results)} memories")
125 |         
126 |         # Test statistics
127 |         print("🔄 Getting storage statistics...")
128 |         stats = await storage.get_stats()
129 |         print(f"✅ Stats: {stats}")
130 |         
131 |         # Cleanup
132 |         await storage.close()
133 |         print("✅ All real Cloudflare tests completed successfully!")
134 |         
135 |     except Exception as e:
136 |         print(f"❌ Real Cloudflare test failed: {e}")
137 |         await storage.close()
138 |         raise
139 | 
140 | # Run if credentials are available
141 | if __name__ == '__main__':
142 |     import os
143 |     required_vars = [
144 |         'CLOUDFLARE_API_TOKEN',
145 |         'CLOUDFLARE_ACCOUNT_ID', 
146 |         'CLOUDFLARE_VECTORIZE_INDEX',
147 |         'CLOUDFLARE_D1_DATABASE_ID'
148 |     ]
149 |     
150 |     if all(os.getenv(var) for var in required_vars):
151 |         asyncio.run(test_real_cloudflare())
152 |     else:
153 |         print("❌ Missing required environment variables for real testing")
154 |         print("Required:", required_vars)
155 | ```
156 | 
157 | #### Step 4: Test MCP Server
158 | 
159 | ```bash
160 | # Start the MCP server with Cloudflare backend
161 | python -m src.mcp_memory_service.server
162 | 
163 | # Test via HTTP API (if HTTP enabled)
164 | curl -X POST http://localhost:8000/api/memories \
165 |   -H "Content-Type: application/json" \
166 |   -d '{"content": "Test with real Cloudflare", "tags": ["real-test"]}'
167 | ```
168 | 
169 | ### 🚀 Integration Testing with Claude Desktop
170 | 
171 | #### Step 1: Configure Claude Desktop
172 | 
173 | Add to your Claude Desktop configuration:
174 | 
175 | ```json
176 | {
177 |   "mcpServers": {
178 |     "memory": {
179 |       "command": "python",
180 |       "args": ["-m", "src.mcp_memory_service.server"],
181 |       "cwd": "/path/to/mcp-memory-service",
182 |       "env": {
183 |         "MCP_MEMORY_STORAGE_BACKEND": "cloudflare",
184 |         "CLOUDFLARE_API_TOKEN": "your-api-token",
185 |         "CLOUDFLARE_ACCOUNT_ID": "your-account-id",
186 |         "CLOUDFLARE_VECTORIZE_INDEX": "your-vectorize-index",
187 |         "CLOUDFLARE_D1_DATABASE_ID": "your-d1-database-id"
188 |       }
189 |     }
190 |   }
191 | }
192 | ```
193 | 
194 | #### Step 2: Test Memory Operations
195 | 
196 | In Claude Desktop, test these operations:
197 | 
198 | ```
199 | # Store a memory
200 | Please remember that my favorite programming language is Python and I prefer async/await patterns.
201 | 
202 | # Search memories  
203 | What do you remember about my programming preferences?
204 | 
205 | # Store with tags
206 | Please remember this important project deadline: Launch the new feature by December 15th. Tag this as: work, deadline, important.
207 | 
208 | # Search by content
209 | Tell me about any work deadlines I've mentioned.
210 | ```
211 | 
212 | ### 📊 Performance Testing
213 | 
214 | For performance testing with real Cloudflare services:
215 | 
216 | ```python
217 | import asyncio
218 | import time
219 | from statistics import mean
220 | 
221 | async def performance_test():
222 |     """Test performance with real Cloudflare backend."""
223 |     storage = CloudflareStorage(...)  # Your real credentials
224 |     await storage.initialize()
225 |     
226 |     # Test memory storage performance
227 |     store_times = []
228 |     for i in range(10):
229 |         content = f"Performance test memory {i}"
230 |         memory = Memory(content=content, content_hash=generate_content_hash(content))
231 |         
232 |         start = time.time()
233 |         await storage.store(memory)
234 |         end = time.time()
235 |         
236 |         store_times.append(end - start)
237 |     
238 |     print(f"Average store time: {mean(store_times):.3f}s")
239 |     
240 |     # Test search performance
241 |     search_times = []
242 |     for i in range(5):
243 |         start = time.time()
244 |         results = await storage.retrieve("performance test")
245 |         end = time.time()
246 |         
247 |         search_times.append(end - start)
248 |     
249 |     print(f"Average search time: {mean(search_times):.3f}s")
250 |     print(f"Found {len(results)} memories")
251 |     
252 |     await storage.close()
253 | ```
254 | 
255 | ### 🛠️ Troubleshooting Common Issues
256 | 
257 | #### Authentication Errors
258 | ```
259 | ERROR: Authentication failed
260 | ```
261 | **Solution**: Verify API token has correct permissions (Vectorize:Edit, D1:Edit, etc.)
262 | 
263 | #### Rate Limiting
264 | ```
265 | WARNING: Rate limited, retrying in 2s
266 | ```
267 | **Solution**: Normal behavior - the implementation handles this automatically
268 | 
269 | #### Vectorize Index Not Found
270 | ```
271 | ValueError: Vectorize index 'test-index' not found
272 | ```
273 | **Solution**: Create the index with `wrangler vectorize create`
274 | 
275 | #### D1 Database Issues
276 | ```
277 | Failed to initialize D1 schema
278 | ```
279 | **Solution**: Verify database ID and ensure API token has D1 permissions
280 | 
281 | ### ✨ What Makes This Implementation Special
282 | 
283 | 1. **Production Ready**: Comprehensive error handling and retry logic
284 | 2. **Global Performance**: Leverages Cloudflare's edge network
285 | 3. **Smart Architecture**: Efficient use of Vectorize, D1, and R2
286 | 4. **Zero Breaking Changes**: Drop-in replacement for existing backends
287 | 5. **Comprehensive Testing**: 26+ tests covering all functionality
288 | 6. **Easy Migration**: Tools to migrate from SQLite-vec or ChromaDB
289 | 
290 | The Cloudflare backend is ready for production use and provides a scalable, globally distributed memory service for AI applications! 🚀
```

--------------------------------------------------------------------------------
/scripts/testing/test_search_api.py:
--------------------------------------------------------------------------------

```python
  1 | #!/usr/bin/env python3
  2 | # Copyright 2024 Heinrich Krupp
  3 | #
  4 | # Licensed under the Apache License, Version 2.0 (the "License");
  5 | # you may not use this file except in compliance with the License.
  6 | # You may obtain a copy of the License at
  7 | #
  8 | #     http://www.apache.org/licenses/LICENSE-2.0
  9 | #
 10 | # Unless required by applicable law or agreed to in writing, software
 11 | # distributed under the License is distributed on an "AS IS" BASIS,
 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 13 | # See the License for the specific language governing permissions and
 14 | # limitations under the License.
 15 | 
 16 | """Test script for search API endpoints."""
 17 | 
 18 | import requests
 19 | import json
 20 | import time
 21 | 
 22 | BASE_URL = "http://localhost:8000"
 23 | 
 24 | def test_search_functionality():
 25 |     """Test all search endpoints."""
 26 |     
 27 |     print("Testing Search API Endpoints")
 28 |     print("=" * 40)
 29 |     
 30 |     # First, check server health
 31 |     print("\n[0] Health check...")
 32 |     try:
 33 |         resp = requests.get(f"{BASE_URL}/api/health", timeout=5)
 34 |         if resp.status_code != 200:
 35 |             print(f"[FAIL] Server not healthy: {resp.status_code}")
 36 |             return
 37 |         print("[PASS] Server is healthy")
 38 |     except Exception as e:
 39 |         print(f"[FAIL] Cannot connect: {e}")
 40 |         return
 41 |     
 42 |     # Create some test memories for searching
 43 |     print("\n[1] Creating test memories...")
 44 |     test_memories = [
 45 |         {
 46 |             "content": "Python programming tutorial for beginners",
 47 |             "tags": ["python", "programming", "tutorial"],
 48 |             "memory_type": "learning",
 49 |             "metadata": {"difficulty": "beginner"}
 50 |         },
 51 |         {
 52 |             "content": "Advanced machine learning algorithms with PyTorch",
 53 |             "tags": ["python", "machine-learning", "pytorch"],
 54 |             "memory_type": "learning",
 55 |             "metadata": {"difficulty": "advanced"}
 56 |         },
 57 |         {
 58 |             "content": "JavaScript async await patterns and best practices",
 59 |             "tags": ["javascript", "async", "programming"],
 60 |             "memory_type": "reference",
 61 |             "metadata": {"language": "js"}
 62 |         },
 63 |         {
 64 |             "content": "Database design principles and normalization",
 65 |             "tags": ["database", "design", "sql"],
 66 |             "memory_type": "learning",
 67 |             "metadata": {"topic": "databases"}
 68 |         },
 69 |         {
 70 |             "content": "Meeting notes from yesterday's project sync",
 71 |             "tags": ["meeting", "project", "notes"],
 72 |             "memory_type": "note",
 73 |             "metadata": {"date": "yesterday"}
 74 |         }
 75 |     ]
 76 |     
 77 |     created_hashes = []
 78 |     for i, memory in enumerate(test_memories):
 79 |         try:
 80 |             resp = requests.post(
 81 |                 f"{BASE_URL}/api/memories",
 82 |                 json=memory,
 83 |                 headers={"Content-Type": "application/json"},
 84 |                 timeout=10
 85 |             )
 86 |             if resp.status_code == 200:
 87 |                 result = resp.json()
 88 |                 if result["success"]:
 89 |                     created_hashes.append(result["content_hash"])
 90 |                     print(f"  Created memory {i+1}: {memory['content'][:30]}...")
 91 |                 else:
 92 |                     print(f"  [WARN] Memory {i+1} might already exist")
 93 |             else:
 94 |                 print(f"  [WARN] Failed to create memory {i+1}: {resp.status_code}")
 95 |         except Exception as e:
 96 |             print(f"  [WARN] Error creating memory {i+1}: {e}")
 97 |     
 98 |     print(f"[INFO] Created {len(created_hashes)} new memories")
 99 |     
100 |     # Test 2: Semantic search
101 |     print("\n[2] Testing semantic search...")
102 |     search_queries = [
103 |         "programming tutorial",
104 |         "machine learning AI",
105 |         "database SQL design",
106 |         "meeting project discussion"
107 |     ]
108 |     
109 |     for query in search_queries:
110 |         try:
111 |             search_request = {
112 |                 "query": query,
113 |                 "n_results": 3,
114 |                 "similarity_threshold": 0.1
115 |             }
116 |             
117 |             resp = requests.post(
118 |                 f"{BASE_URL}/api/search",
119 |                 json=search_request,
120 |                 headers={"Content-Type": "application/json"},
121 |                 timeout=15
122 |             )
123 |             
124 |             if resp.status_code == 200:
125 |                 result = resp.json()
126 |                 print(f"  Query: '{query}' -> {result['total_found']} results ({result['processing_time_ms']:.1f}ms)")
127 |                 
128 |                 for i, search_result in enumerate(result['results'][:2]):  # Show top 2
129 |                     memory = search_result['memory']
130 |                     score = search_result.get('similarity_score', 0)
131 |                     print(f"    {i+1}. {memory['content'][:50]}... (score: {score:.3f})")
132 |             else:
133 |                 print(f"  [FAIL] Search failed for '{query}': {resp.status_code}")
134 |                 
135 |         except Exception as e:
136 |             print(f"  [FAIL] Search error for '{query}': {e}")
137 |     
138 |     # Test 3: Tag-based search
139 |     print("\n[3] Testing tag-based search...")
140 |     tag_searches = [
141 |         {"tags": ["python"], "match_all": False},
142 |         {"tags": ["programming", "tutorial"], "match_all": False},
143 |         {"tags": ["python", "programming"], "match_all": True}
144 |     ]
145 |     
146 |     for search in tag_searches:
147 |         try:
148 |             resp = requests.post(
149 |                 f"{BASE_URL}/api/search/by-tag",
150 |                 json=search,
151 |                 headers={"Content-Type": "application/json"},
152 |                 timeout=10
153 |             )
154 |             
155 |             if resp.status_code == 200:
156 |                 result = resp.json()
157 |                 match_type = "ALL" if search["match_all"] else "ANY"
158 |                 print(f"  Tags {search['tags']} ({match_type}) -> {result['total_found']} results")
159 |                 
160 |                 for i, search_result in enumerate(result['results'][:2]):
161 |                     memory = search_result['memory']
162 |                     print(f"    {i+1}. {memory['content'][:40]}... (tags: {memory['tags']})")
163 |             else:
164 |                 print(f"  [FAIL] Tag search failed: {resp.status_code}")
165 |                 
166 |         except Exception as e:
167 |             print(f"  [FAIL] Tag search error: {e}")
168 |     
169 |     # Test 4: Time-based search
170 |     print("\n[4] Testing time-based search...")
171 |     time_queries = ["today", "yesterday", "this week", "last week"]
172 |     
173 |     for query in time_queries:
174 |         try:
175 |             time_request = {
176 |                 "query": query,
177 |                 "n_results": 5
178 |             }
179 |             
180 |             resp = requests.post(
181 |                 f"{BASE_URL}/api/search/by-time",
182 |                 json=time_request,
183 |                 headers={"Content-Type": "application/json"},
184 |                 timeout=10
185 |             )
186 |             
187 |             if resp.status_code == 200:
188 |                 result = resp.json()
189 |                 print(f"  Time: '{query}' -> {result['total_found']} results")
190 |                 
191 |                 if result['results']:
192 |                     memory = result['results'][0]['memory']
193 |                     print(f"    Example: {memory['content'][:40]}...")
194 |             elif resp.status_code == 400:
195 |                 print(f"  [INFO] Time query '{query}' not supported yet")
196 |             else:
197 |                 print(f"  [FAIL] Time search failed for '{query}': {resp.status_code}")
198 |                 
199 |         except Exception as e:
200 |             print(f"  [FAIL] Time search error for '{query}': {e}")
201 |     
202 |     # Test 5: Similar memories
203 |     print("\n[5] Testing similar memory search...")
204 |     if created_hashes:
205 |         try:
206 |             content_hash = created_hashes[0]
207 |             resp = requests.get(
208 |                 f"{BASE_URL}/api/search/similar/{content_hash}?n_results=3",
209 |                 timeout=10
210 |             )
211 |             
212 |             if resp.status_code == 200:
213 |                 result = resp.json()
214 |                 print(f"  Similar to first memory -> {result['total_found']} results")
215 |                 
216 |                 for i, search_result in enumerate(result['results'][:2]):
217 |                     memory = search_result['memory']
218 |                     score = search_result.get('similarity_score', 0)
219 |                     print(f"    {i+1}. {memory['content'][:40]}... (score: {score:.3f})")
220 |             elif resp.status_code == 404:
221 |                 print(f"  [INFO] Memory not found (expected with current get-by-hash implementation)")
222 |             else:
223 |                 print(f"  [FAIL] Similar search failed: {resp.status_code}")
224 |                 
225 |         except Exception as e:
226 |             print(f"  [FAIL] Similar search error: {e}")
227 |     
228 |     # Cleanup: Delete test memories
229 |     print(f"\n[6] Cleaning up {len(created_hashes)} test memories...")
230 |     for content_hash in created_hashes:
231 |         try:
232 |             resp = requests.delete(f"{BASE_URL}/api/memories/{content_hash}", timeout=5)
233 |             if resp.status_code == 200:
234 |                 result = resp.json()
235 |                 if result["success"]:
236 |                     print(f"  Deleted: {content_hash[:12]}...")
237 |         except Exception as e:
238 |             print(f"  [WARN] Cleanup error: {e}")
239 |     
240 |     print("\n" + "=" * 40)
241 |     print("Search API testing completed!")
242 | 
243 | if __name__ == "__main__":
244 |     test_search_functionality()
```

--------------------------------------------------------------------------------
/src/mcp_memory_service/utils/gpu_detection.py:
--------------------------------------------------------------------------------

```python
  1 | #!/usr/bin/env python3
  2 | """
  3 | Shared GPU detection utilities for MCP Memory Service.
  4 | 
  5 | This module provides unified GPU platform detection logic used across
  6 | installation and verification scripts. Supports CUDA, ROCm, MPS, and DirectML.
  7 | """
  8 | 
  9 | import os
 10 | import subprocess
 11 | from typing import Dict, Any, Tuple, Optional, Callable, List, Union
 12 | 
 13 | 
 14 | # Single source of truth for GPU platform detection configuration
 15 | GPU_PLATFORM_CHECKS = {
 16 |     'cuda': {
 17 |         'windows': {
 18 |             'env_var': 'CUDA_PATH',
 19 |             'version_cmd': lambda path: [os.path.join(path, 'bin', 'nvcc'), '--version'],
 20 |             'version_pattern': 'release'
 21 |         },
 22 |         'linux': {
 23 |             'paths': ['/usr/local/cuda', lambda: os.environ.get('CUDA_HOME')],
 24 |             'version_cmd': lambda path: [os.path.join(path, 'bin', 'nvcc'), '--version'],
 25 |             'version_pattern': 'release'
 26 |         }
 27 |     },
 28 |     'rocm': {
 29 |         'linux': {
 30 |             'paths': ['/opt/rocm', lambda: os.environ.get('ROCM_HOME')],
 31 |             'version_file': lambda path: os.path.join(path, 'bin', '.rocmversion'),
 32 |             'version_cmd': ['rocminfo'],
 33 |             'version_pattern': 'Version'
 34 |         }
 35 |     },
 36 |     'mps': {
 37 |         'macos': {
 38 |             'check_cmd': ['system_profiler', 'SPDisplaysDataType'],
 39 |             'check_pattern': 'Metal',
 40 |             'requires_arm': True
 41 |         }
 42 |     },
 43 |     'directml': {
 44 |         'windows': {
 45 |             'import_name': 'torch-directml',
 46 |             'dll_name': 'DirectML.dll'
 47 |         }
 48 |     }
 49 | }
 50 | 
 51 | 
 52 | def parse_version(output: str, pattern: str = 'release') -> Optional[str]:
 53 |     """
 54 |     Parse version string from command output.
 55 | 
 56 |     Args:
 57 |         output: Command output to parse
 58 |         pattern: Pattern to search for ('release' or 'Version')
 59 | 
 60 |     Returns:
 61 |         Parsed version string or None if not found
 62 |     """
 63 |     for line in output.split('\n'):
 64 |         if pattern in line:
 65 |             if pattern == 'release':
 66 |                 return line.split('release')[-1].strip().split(',')[0].strip()
 67 |             elif pattern == 'Version':
 68 |                 return line.split(':')[-1].strip()
 69 |     return None
 70 | 
 71 | 
 72 | def test_gpu_platform(platform: str, system_info: Dict[str, Any]) -> Tuple[bool, Optional[str]]:
 73 |     """
 74 |     Test for a specific GPU platform and return detection status.
 75 | 
 76 |     Args:
 77 |         platform: Platform name ('cuda', 'rocm', 'mps', 'directml')
 78 |         system_info: System information dictionary with keys:
 79 |             - is_windows: bool
 80 |             - is_linux: bool
 81 |             - is_macos: bool
 82 |             - is_arm: bool (for ARM/Apple Silicon)
 83 | 
 84 |     Returns:
 85 |         Tuple of (detected: bool, version: Optional[str])
 86 |     """
 87 |     if platform not in GPU_PLATFORM_CHECKS:
 88 |         return False, None
 89 | 
 90 |     platform_config = GPU_PLATFORM_CHECKS[platform]
 91 | 
 92 |     # Determine OS-specific configuration
 93 |     if system_info.get('is_windows') and 'windows' in platform_config:
 94 |         os_config = platform_config['windows']
 95 |     elif system_info.get('is_linux') and 'linux' in platform_config:
 96 |         os_config = platform_config['linux']
 97 |     elif system_info.get('is_macos') and 'macos' in platform_config:
 98 |         os_config = platform_config['macos']
 99 |     else:
100 |         return False, None
101 | 
102 |     # Platform-specific detection logic
103 |     if platform == 'cuda':
104 |         return _detect_cuda(os_config, system_info)
105 |     elif platform == 'rocm':
106 |         return _detect_rocm(os_config)
107 |     elif platform == 'mps':
108 |         return _detect_mps(os_config, system_info)
109 |     elif platform == 'directml':
110 |         return _detect_directml(os_config)
111 | 
112 |     return False, None
113 | 
114 | 
115 | def _detect_cuda(config: Dict[str, Any], system_info: Dict[str, Any]) -> Tuple[bool, Optional[str]]:
116 |     """Detect CUDA installation."""
117 |     # Check environment variable (Windows) or paths (Linux)
118 |     if 'env_var' in config:
119 |         cuda_path = os.environ.get(config['env_var'])
120 |         if not cuda_path or not os.path.exists(cuda_path):
121 |             return False, None
122 |         paths_to_check = [cuda_path]
123 |     elif 'paths' in config:
124 |         paths_to_check = []
125 |         for path in config['paths']:
126 |             if callable(path):
127 |                 path = path()
128 |             if path and os.path.exists(path):
129 |                 paths_to_check.append(path)
130 |         if not paths_to_check:
131 |             return False, None
132 |     else:
133 |         return False, None
134 | 
135 |     # Try to get version
136 |     for path in paths_to_check:
137 |         try:
138 |             version_cmd = config['version_cmd'](path)
139 |             output = subprocess.check_output(
140 |                 version_cmd,
141 |                 stderr=subprocess.STDOUT,
142 |                 universal_newlines=True
143 |             )
144 |             version = parse_version(output, config.get('version_pattern', 'release'))
145 |             return True, version
146 |         except (subprocess.SubprocessError, FileNotFoundError, OSError):
147 |             continue
148 | 
149 |     # Found path but couldn't get version
150 |     return True, None
151 | 
152 | 
153 | def _detect_rocm(config: Dict[str, Any]) -> Tuple[bool, Optional[str]]:
154 |     """Detect ROCm installation."""
155 |     paths_to_check = []
156 |     for path in config.get('paths', []):
157 |         if callable(path):
158 |             path = path()
159 |         if path and os.path.exists(path):
160 |             paths_to_check.append(path)
161 | 
162 |     if not paths_to_check:
163 |         return False, None
164 | 
165 |     # Try version file first
166 |     for path in paths_to_check:
167 |         if 'version_file' in config:
168 |             version_file = config['version_file'](path)
169 |             try:
170 |                 with open(version_file, 'r') as f:
171 |                     version = f.read().strip()
172 |                     return True, version
173 |             except (FileNotFoundError, IOError):
174 |                 pass
175 | 
176 |     # Try version command
177 |     if 'version_cmd' in config:
178 |         try:
179 |             output = subprocess.check_output(
180 |                 config['version_cmd'],
181 |                 stderr=subprocess.STDOUT,
182 |                 universal_newlines=True
183 |             )
184 |             version = parse_version(output, config.get('version_pattern', 'Version'))
185 |             return True, version
186 |         except (subprocess.SubprocessError, FileNotFoundError, OSError):
187 |             pass
188 | 
189 |     # Found path but couldn't get version
190 |     return True, None
191 | 
192 | 
193 | def _detect_mps(config: Dict[str, Any], system_info: Dict[str, Any]) -> Tuple[bool, Optional[str]]:
194 |     """Detect Apple Metal Performance Shaders (MPS)."""
195 |     # MPS requires ARM architecture
196 |     if config.get('requires_arm') and not system_info.get('is_arm'):
197 |         return False, None
198 | 
199 |     try:
200 |         result = subprocess.run(
201 |             config['check_cmd'],
202 |             capture_output=True,
203 |             text=True
204 |         )
205 |         if config['check_pattern'] in result.stdout:
206 |             return True, None  # MPS doesn't have a version string
207 |     except (subprocess.SubprocessError, FileNotFoundError, OSError):
208 |         pass
209 | 
210 |     return False, None
211 | 
212 | 
213 | def _detect_directml(config: Dict[str, Any]) -> Tuple[bool, Optional[str]]:
214 |     """Detect DirectML installation."""
215 |     # Try importing the package
216 |     try:
217 |         import pkg_resources
218 |         version = pkg_resources.get_distribution(config['import_name']).version
219 |         return True, version
220 |     except (ImportError, Exception):
221 |         pass
222 | 
223 |     # Try loading the DLL
224 |     try:
225 |         import ctypes
226 |         ctypes.WinDLL(config['dll_name'])
227 |         return True, None  # Found DLL but no version
228 |     except (ImportError, OSError, Exception):
229 |         pass
230 | 
231 |     return False, None
232 | 
233 | 
234 | def detect_gpu(system_info: Dict[str, Any]) -> Dict[str, Any]:
235 |     """
236 |     Detect all available GPU platforms and return comprehensive GPU info.
237 | 
238 |     Args:
239 |         system_info: System information dictionary with keys:
240 |             - is_windows: bool
241 |             - is_linux: bool
242 |             - is_macos: bool
243 |             - is_arm: bool (for ARM/Apple Silicon)
244 | 
245 |     Returns:
246 |         Dictionary containing:
247 |             - has_cuda: bool
248 |             - cuda_version: Optional[str]
249 |             - has_rocm: bool
250 |             - rocm_version: Optional[str]
251 |             - has_mps: bool
252 |             - has_directml: bool
253 |             - directml_version: Optional[str]
254 |             - accelerator: str ('cuda', 'rocm', 'mps', 'directml', or 'cpu')
255 |     """
256 |     gpu_info = {
257 |         "has_cuda": False,
258 |         "cuda_version": None,
259 |         "has_rocm": False,
260 |         "rocm_version": None,
261 |         "has_mps": False,
262 |         "has_directml": False,
263 |         "directml_version": None,
264 |         "accelerator": "cpu"
265 |     }
266 | 
267 |     # Test each platform
268 |     gpu_info["has_cuda"], gpu_info["cuda_version"] = test_gpu_platform('cuda', system_info)
269 |     gpu_info["has_rocm"], gpu_info["rocm_version"] = test_gpu_platform('rocm', system_info)
270 |     gpu_info["has_mps"], _ = test_gpu_platform('mps', system_info)
271 |     gpu_info["has_directml"], gpu_info["directml_version"] = test_gpu_platform('directml', system_info)
272 | 
273 |     # Determine primary accelerator (priority order: CUDA > ROCm > MPS > DirectML > CPU)
274 |     if gpu_info["has_cuda"]:
275 |         gpu_info["accelerator"] = "cuda"
276 |     elif gpu_info["has_rocm"]:
277 |         gpu_info["accelerator"] = "rocm"
278 |     elif gpu_info["has_mps"]:
279 |         gpu_info["accelerator"] = "mps"
280 |     elif gpu_info["has_directml"]:
281 |         gpu_info["accelerator"] = "directml"
282 | 
283 |     return gpu_info
284 | 
```

--------------------------------------------------------------------------------
/tests/unit/test_json_loader.py:
--------------------------------------------------------------------------------

```python
  1 | #!/usr/bin/env python3
  2 | """
  3 | Unit tests for JSON document loader.
  4 | """
  5 | 
  6 | import pytest
  7 | import asyncio
  8 | import json
  9 | from pathlib import Path
 10 | from unittest.mock import patch
 11 | 
 12 | from mcp_memory_service.ingestion.json_loader import JSONLoader
 13 | from mcp_memory_service.ingestion.base import DocumentChunk
 14 | from conftest import extract_chunks_from_temp_file
 15 | 
 16 | 
 17 | class TestJSONLoader:
 18 |     """Test suite for JSONLoader class."""
 19 | 
 20 |     def test_initialization(self):
 21 |         """Test basic initialization of JSONLoader."""
 22 |         loader = JSONLoader(chunk_size=500, chunk_overlap=50)
 23 | 
 24 |         assert loader.chunk_size == 500
 25 |         assert loader.chunk_overlap == 50
 26 |         assert 'json' in loader.supported_extensions
 27 | 
 28 |     def test_can_handle_file(self):
 29 |         """Test file format detection."""
 30 |         loader = JSONLoader()
 31 | 
 32 |         # Create temporary test files
 33 |         import tempfile
 34 |         with tempfile.TemporaryDirectory() as tmpdir:
 35 |             json_file = Path(tmpdir) / "test.json"
 36 |             json_file.touch()
 37 | 
 38 |             txt_file = Path(tmpdir) / "test.txt"
 39 |             txt_file.touch()
 40 | 
 41 |             # Test supported formats
 42 |             assert loader.can_handle(json_file) is True
 43 | 
 44 |             # Test unsupported formats
 45 |             assert loader.can_handle(txt_file) is False
 46 | 
 47 |     @pytest.mark.asyncio
 48 |     async def test_extract_chunks_simple_json(self):
 49 |         """Test extraction from simple JSON file."""
 50 |         loader = JSONLoader(chunk_size=1000, chunk_overlap=200)
 51 | 
 52 |         # Create test JSON file
 53 |         import tempfile
 54 |         with tempfile.TemporaryDirectory() as tmpdir:
 55 |             json_file = Path(tmpdir) / "test.json"
 56 |             test_data = {
 57 |                 "name": "John Doe",
 58 |                 "age": 30,
 59 |                 "city": "New York"
 60 |             }
 61 |             json_file.write_text(json.dumps(test_data, indent=2))
 62 | 
 63 |             chunks = []
 64 |             async for chunk in loader.extract_chunks(json_file):
 65 |                 chunks.append(chunk)
 66 | 
 67 |             # Verify chunks were created
 68 |             assert len(chunks) > 0
 69 | 
 70 |             # Verify chunk structure
 71 |             first_chunk = chunks[0]
 72 |             assert isinstance(first_chunk, DocumentChunk)
 73 |             assert isinstance(first_chunk.content, str)
 74 |             assert first_chunk.source_file == json_file
 75 | 
 76 |             # Verify content contains flattened JSON
 77 |             content = first_chunk.content
 78 |             assert "name: John Doe" in content
 79 |             assert "age: 30" in content
 80 |             assert "city: New York" in content
 81 | 
 82 |     @pytest.mark.asyncio
 83 |     async def test_extract_chunks_nested_json(self):
 84 |         """Test extraction from nested JSON file."""
 85 |         loader = JSONLoader(chunk_size=1000, chunk_overlap=200)
 86 | 
 87 |         # Create test JSON file with nested structure
 88 |         import tempfile
 89 |         with tempfile.TemporaryDirectory() as tmpdir:
 90 |             json_file = Path(tmpdir) / "test.json"
 91 |             test_data = {
 92 |                 "config": {
 93 |                     "database": {
 94 |                         "host": "localhost",
 95 |                         "port": 5432
 96 |                     }
 97 |                 },
 98 |                 "servers": [
 99 |                     {"name": "web", "port": 8080},
100 |                     {"name": "api", "port": 3000}
101 |                 ]
102 |             }
103 |             json_file.write_text(json.dumps(test_data, indent=2))
104 | 
105 |             chunks = []
106 |             async for chunk in loader.extract_chunks(json_file):
107 |                 chunks.append(chunk)
108 | 
109 |             # Verify chunks were created
110 |             assert len(chunks) > 0
111 | 
112 |             # Verify content contains flattened nested structure
113 |             content = chunks[0].content
114 |             assert "config.database.host: localhost" in content
115 |             assert "config.database.port: 5432" in content
116 |             assert "servers[0].name: web" in content
117 |             assert "servers[1].port: 3000" in content
118 | 
119 |     @pytest.mark.asyncio
120 |     async def test_extract_chunks_with_options(self):
121 |         """Test extraction with various options."""
122 |         loader = JSONLoader(chunk_size=1000, chunk_overlap=200)
123 | 
124 |         # Create test JSON file
125 |         test_data = {
126 |             "user": {
127 |                 "name": "John",
128 |                 "details": {
129 |                     "age": 25
130 |                 }
131 |             }
132 |         }
133 |         json_content = json.dumps(test_data, indent=2)
134 | 
135 |         # Test with bracket notation
136 |         chunks = await extract_chunks_from_temp_file(
137 |             loader,
138 |             "test.json",
139 |             json_content,
140 |             flatten_strategy='bracket_notation'
141 |         )
142 | 
143 |         content = chunks[0].content
144 |         assert "user[name]: John" in content
145 |         assert "user[details][age]: 25" in content
146 | 
147 |     @pytest.mark.asyncio
148 |     async def test_extract_chunks_invalid_json(self):
149 |         """Test handling of invalid JSON files."""
150 |         loader = JSONLoader()
151 | 
152 |         # Create invalid JSON file
153 |         import tempfile
154 |         with tempfile.TemporaryDirectory() as tmpdir:
155 |             json_file = Path(tmpdir) / "invalid.json"
156 |             json_file.write_text("{ invalid json content }")
157 | 
158 |             with pytest.raises(ValueError, match="Invalid JSON format"):
159 |                 async for chunk in loader.extract_chunks(json_file):
160 |                     pass
161 | 
162 |     @pytest.mark.asyncio
163 |     async def test_extract_chunks_empty_file(self):
164 |         """Test handling of empty JSON files."""
165 |         loader = JSONLoader()
166 | 
167 |         # Create empty file
168 |         import tempfile
169 |         with tempfile.TemporaryDirectory() as tmpdir:
170 |             json_file = Path(tmpdir) / "empty.json"
171 |             json_file.write_text("")
172 | 
173 |             with pytest.raises(ValueError, match="Invalid JSON format"):
174 |                 async for chunk in loader.extract_chunks(json_file):
175 |                     pass
176 | 
177 |     @pytest.mark.asyncio
178 |     async def test_extract_chunks_large_nested_structure(self):
179 |         """Test extraction from deeply nested JSON."""
180 |         loader = JSONLoader(chunk_size=1000, chunk_overlap=200)
181 | 
182 |         # Create deeply nested JSON
183 |         import tempfile
184 |         with tempfile.TemporaryDirectory() as tmpdir:
185 |             json_file = Path(tmpdir) / "nested.json"
186 |             test_data = {
187 |                 "level1": {
188 |                     "level2": {
189 |                         "level3": {
190 |                             "level4": {
191 |                                 "value": "deep"
192 |                             }
193 |                         }
194 |                     }
195 |                 }
196 |             }
197 |             json_file.write_text(json.dumps(test_data, indent=2))
198 | 
199 |             chunks = []
200 |             async for chunk in loader.extract_chunks(json_file):
201 |                 chunks.append(chunk)
202 | 
203 |             content = chunks[0].content
204 |             assert "level1.level2.level3.level4.value: deep" in content
205 | 
206 |     @pytest.mark.asyncio
207 |     async def test_extract_chunks_with_arrays(self):
208 |         """Test extraction with different array handling strategies."""
209 |         loader = JSONLoader(chunk_size=1000, chunk_overlap=200)
210 | 
211 |         # Create JSON with arrays
212 |         test_data = {
213 |             "items": ["apple", "banana", "cherry"],
214 |             "numbers": [1, 2, 3]
215 |         }
216 |         json_content = json.dumps(test_data, indent=2)
217 | 
218 |         # Test expand strategy (default)
219 |         chunks = await extract_chunks_from_temp_file(
220 |             loader,
221 |             "arrays.json",
222 |             json_content,
223 |             array_handling='expand'
224 |         )
225 | 
226 |         content = chunks[0].content
227 |         assert "items[0]: apple" in content
228 |         assert "items[1]: banana" in content
229 |         assert "numbers[0]: 1" in content
230 | 
231 |     @pytest.mark.asyncio
232 |     async def test_extract_chunks_metadata(self):
233 |         """Test that metadata is properly included."""
234 |         loader = JSONLoader(chunk_size=1000, chunk_overlap=200)
235 | 
236 |         # Create test JSON file
237 |         import tempfile
238 |         with tempfile.TemporaryDirectory() as tmpdir:
239 |             json_file = Path(tmpdir) / "test.json"
240 |             test_data = {"key": "value"}
241 |             json_file.write_text(json.dumps(test_data))
242 | 
243 |             chunks = []
244 |             async for chunk in loader.extract_chunks(json_file):
245 |                 chunks.append(chunk)
246 | 
247 |             first_chunk = chunks[0]
248 |             assert first_chunk.metadata['content_type'] == 'json'
249 |             assert first_chunk.metadata['encoding'] in ['utf-8', 'utf-16', 'utf-32', 'latin-1', 'cp1252']
250 |             assert 'file_size' in first_chunk.metadata
251 |             assert first_chunk.metadata['loader_type'] == 'JSONLoader'
252 | 
253 | 
254 | class TestJSONLoaderRegistry:
255 |     """Test JSON loader registration."""
256 | 
257 |     def test_loader_registration(self):
258 |         """Test that JSON loader is registered."""
259 |         from mcp_memory_service.ingestion.registry import get_loader_for_file
260 | 
261 |         import tempfile
262 |         with tempfile.TemporaryDirectory() as tmpdir:
263 |             # Test JSON file
264 |             json_file = Path(tmpdir) / "test.json"
265 |             json_file.write_text('{"test": "data"}')
266 | 
267 |             loader = get_loader_for_file(json_file)
268 | 
269 |             # Should get JSONLoader
270 |             assert loader is not None
271 |             assert isinstance(loader, JSONLoader)
272 | 
273 | 
274 | if __name__ == '__main__':
275 |     pytest.main([__file__, '-v'])
276 | 
```

--------------------------------------------------------------------------------
/tests/contracts/api-specification.yml:
--------------------------------------------------------------------------------

```yaml
  1 | # MCP Memory Service API Contract Specification
  2 | # This document defines the ACTUAL behavior of the MCP Memory Service API
  3 | # Used by the HTTP-MCP bridge and other clients
  4 | 
  5 | openapi: 3.0.3
  6 | info:
  7 |   title: MCP Memory Service API
  8 |   version: "6.6.1"
  9 |   description: |
 10 |     API contract for MCP Memory Service - defines actual response formats
 11 |     and status codes that clients can expect.
 12 |     
 13 |     CRITICAL NOTES:
 14 |     - Server returns HTTP 200 for both success and failure cases
 15 |     - Use the 'success' field in response body to determine actual result
 16 |     - All endpoints use /api prefix
 17 | 
 18 | servers:
 19 |   - url: https://memory.local:8443/api
 20 |     description: Default HTTPS server with self-signed certificate
 21 |   - url: http://localhost:8000/api
 22 |     description: Development HTTP server
 23 | 
 24 | security:
 25 |   - BearerAuth: []
 26 | 
 27 | paths:
 28 |   /health:
 29 |     get:
 30 |       summary: Service health check
 31 |       description: Returns current service status and statistics
 32 |       responses:
 33 |         '200':
 34 |           description: Service is healthy
 35 |           content:
 36 |             application/json:
 37 |               schema:
 38 |                 type: object
 39 |                 required:
 40 |                   - status
 41 |                   - version
 42 |                 properties:
 43 |                   status:
 44 |                     type: string
 45 |                     enum: [healthy]
 46 |                   version:
 47 |                     type: string
 48 |                     example: "6.6.1"
 49 |                   timestamp:
 50 |                     type: string
 51 |                     format: date-time
 52 |                   uptime_seconds:
 53 |                     type: number
 54 |                   storage_type:
 55 |                     type: string
 56 |                     enum: [sqlite_vec, cloudflare, hybrid]
 57 |                   statistics:
 58 |                     type: object
 59 |                     properties:
 60 |                       total_memories:
 61 |                         type: integer
 62 |                       total_tags:
 63 |                         type: integer
 64 |         '503':
 65 |           description: Service is unhealthy
 66 |           content:
 67 |             application/json:
 68 |               schema:
 69 |                 type: object
 70 |                 properties:
 71 |                   status:
 72 |                     type: string
 73 |                     enum: [unhealthy]
 74 |                   error:
 75 |                     type: string
 76 | 
 77 |   /memories:
 78 |     post:
 79 |       summary: Store a memory
 80 |       description: |
 81 |         Store a new memory in the service.
 82 |         
 83 |         CRITICAL: Always returns HTTP 200, regardless of success/failure!
 84 |         Check the 'success' field in response body to determine actual result.
 85 |       requestBody:
 86 |         required: true
 87 |         content:
 88 |           application/json:
 89 |             schema:
 90 |               type: object
 91 |               required:
 92 |                 - content
 93 |               properties:
 94 |                 content:
 95 |                   type: string
 96 |                   description: Memory content to store
 97 |                 tags:
 98 |                   type: array
 99 |                   items:
100 |                     type: string
101 |                   default: []
102 |                 memory_type:
103 |                   type: string
104 |                   default: "note"
105 |                 metadata:
106 |                   type: object
107 |                   default: {}
108 |       responses:
109 |         '200':
110 |           description: Request processed (check success field!)
111 |           content:
112 |             application/json:
113 |               schema:
114 |                 oneOf:
115 |                   - type: object
116 |                     title: Success
117 |                     required:
118 |                       - success
119 |                       - message
120 |                       - content_hash
121 |                       - memory
122 |                     properties:
123 |                       success:
124 |                         type: boolean
125 |                         enum: [true]
126 |                       message:
127 |                         type: string
128 |                         example: "Memory stored successfully"
129 |                       content_hash:
130 |                         type: string
131 |                       memory:
132 |                         $ref: '#/components/schemas/Memory'
133 |                   - type: object
134 |                     title: Duplicate
135 |                     required:
136 |                       - success
137 |                       - message
138 |                       - content_hash
139 |                     properties:
140 |                       success:
141 |                         type: boolean
142 |                         enum: [false]
143 |                       message:
144 |                         type: string
145 |                         example: "Duplicate content detected"
146 |                       content_hash:
147 |                         type: string
148 |                       memory:
149 |                         type: 'null'
150 |         '400':
151 |           description: Invalid request
152 |           content:
153 |             application/json:
154 |               schema:
155 |                 type: object
156 |                 properties:
157 |                   detail:
158 |                     type: string
159 |         '401':
160 |           description: Unauthorized
161 |           content:
162 |             application/json:
163 |               schema:
164 |                 type: object
165 |                 properties:
166 |                   detail:
167 |                     type: string
168 |                     example: "Invalid API key"
169 | 
170 |   /search:
171 |     get:
172 |       summary: Search memories by content
173 |       parameters:
174 |         - name: q
175 |           in: query
176 |           required: true
177 |           schema:
178 |             type: string
179 |         - name: n_results
180 |           in: query
181 |           schema:
182 |             type: integer
183 |             default: 5
184 |       responses:
185 |         '200':
186 |           description: Search results
187 |           content:
188 |             application/json:
189 |               schema:
190 |                 type: object
191 |                 properties:
192 |                   results:
193 |                     type: array
194 |                     items:
195 |                       type: object
196 |                       properties:
197 |                         memory:
198 |                           $ref: '#/components/schemas/Memory'
199 |                         relevance_score:
200 |                           type: number
201 |                           minimum: 0
202 |                           maximum: 1
203 | 
204 |   /memories/search/tags:
205 |     get:
206 |       summary: Search memories by tags
207 |       parameters:
208 |         - name: tags
209 |           in: query
210 |           required: true
211 |           schema:
212 |             type: string
213 |           description: Comma-separated list of tags
214 |       responses:
215 |         '200':
216 |           description: Tag search results
217 |           content:
218 |             application/json:
219 |               schema:
220 |                 type: object
221 |                 properties:
222 |                   memories:
223 |                     type: array
224 |                     items:
225 |                       $ref: '#/components/schemas/Memory'
226 | 
227 |   /memories/{content_hash}:
228 |     delete:
229 |       summary: Delete a memory by content hash
230 |       parameters:
231 |         - name: content_hash
232 |           in: path
233 |           required: true
234 |           schema:
235 |             type: string
236 |       responses:
237 |         '200':
238 |           description: Deletion result
239 |           content:
240 |             application/json:
241 |               schema:
242 |                 type: object
243 |                 properties:
244 |                   success:
245 |                     type: boolean
246 |                   message:
247 |                     type: string
248 |         '404':
249 |           description: Memory not found
250 |           content:
251 |             application/json:
252 |               schema:
253 |                 type: object
254 |                 properties:
255 |                   detail:
256 |                     type: string
257 | 
258 | components:
259 |   securitySchemes:
260 |     BearerAuth:
261 |       type: http
262 |       scheme: bearer
263 |       description: API key for authentication
264 | 
265 |   schemas:
266 |     Memory:
267 |       type: object
268 |       required:
269 |         - content
270 |         - content_hash
271 |         - tags
272 |         - memory_type
273 |         - created_at_iso
274 |       properties:
275 |         content:
276 |           type: string
277 |         content_hash:
278 |           type: string
279 |         tags:
280 |           type: array
281 |           items:
282 |             type: string
283 |         memory_type:
284 |           type: string
285 |         metadata:
286 |           type: object
287 |         created_at:
288 |           type: number
289 |         created_at_iso:
290 |           type: string
291 |           format: date-time
292 |         updated_at:
293 |           type: number
294 |         updated_at_iso:
295 |           type: string
296 |           format: date-time
297 | 
298 | # Contract Test Cases
299 | x-contract-tests:
300 |   critical-behaviors:
301 |     - name: "Memory storage returns 200 with success field"
302 |       description: "Server never returns 201 - always 200 with success boolean"
303 |       endpoint: "POST /memories"
304 |       expected:
305 |         status: 200
306 |         body_contains: ["success"]
307 |         
308 |     - name: "Health check uses /api/health path"
309 |       description: "Health endpoint is /api/health not /health"
310 |       endpoint: "GET /health"  
311 |       expected:
312 |         status: 200
313 |         
314 |     - name: "URL construction preserves /api base path"
315 |       description: "Bridge must not replace /api when constructing URLs"
316 |       test: "URL construction"
317 |       
318 |     - name: "Duplicate detection returns success=false"
319 |       description: "Duplicates return 200 with success=false, not error status"
320 |       endpoint: "POST /memories"
321 |       scenario: "duplicate_content"
322 |       expected:
323 |         status: 200
324 |         body:
325 |           success: false
```

--------------------------------------------------------------------------------
/tests/integration/test_oauth_basic_auth.py:
--------------------------------------------------------------------------------

```python
  1 | #!/usr/bin/env python3
  2 | """
  3 | OAuth 2.1 Basic Authentication Test
  4 | 
  5 | Tests both client_secret_basic (HTTP Basic auth) and client_secret_post (form data)
  6 | authentication methods for the OAuth token endpoint.
  7 | """
  8 | 
  9 | import asyncio
 10 | import base64
 11 | import sys
 12 | from typing import Optional
 13 | 
 14 | import httpx
 15 | 
 16 | 
 17 | async def test_oauth_basic_auth(base_url: str = "http://localhost:8000") -> bool:
 18 |     """
 19 |     Test OAuth 2.1 token endpoint with both Basic and form authentication.
 20 | 
 21 |     Returns:
 22 |         True if all tests pass, False otherwise
 23 |     """
 24 |     print(f"Testing OAuth Basic Authentication at {base_url}")
 25 |     print("=" * 60)
 26 | 
 27 |     async with httpx.AsyncClient() as client:
 28 |         try:
 29 |             # Step 1: Register a client first
 30 |             print("1. Registering OAuth client...")
 31 | 
 32 |             registration_data = {
 33 |                 "client_name": "Basic Auth Test Client",
 34 |                 "redirect_uris": ["https://example.com/callback"],
 35 |                 "grant_types": ["authorization_code"],
 36 |                 "response_types": ["code"]
 37 |             }
 38 | 
 39 |             response = await client.post(
 40 |                 f"{base_url}/oauth/register",
 41 |                 json=registration_data
 42 |             )
 43 | 
 44 |             if response.status_code != 201:
 45 |                 print(f"   ❌ Client registration failed: {response.status_code}")
 46 |                 print(f"   Response: {response.text}")
 47 |                 return False
 48 | 
 49 |             client_info = response.json()
 50 |             client_id = client_info.get("client_id")
 51 |             client_secret = client_info.get("client_secret")
 52 | 
 53 |             if not client_id or not client_secret:
 54 |                 print(f"   ❌ Missing client credentials in response")
 55 |                 return False
 56 | 
 57 |             print(f"   ✅ Client registered successfully")
 58 |             print(f"   📋 Client ID: {client_id}")
 59 | 
 60 |             # Step 2: Get authorization code
 61 |             print("\n2. Getting authorization code...")
 62 | 
 63 |             auth_params = {
 64 |                 "response_type": "code",
 65 |                 "client_id": client_id,
 66 |                 "redirect_uri": "https://example.com/callback",
 67 |                 "state": "test_state_basic_auth"
 68 |             }
 69 | 
 70 |             response = await client.get(
 71 |                 f"{base_url}/oauth/authorize",
 72 |                 params=auth_params,
 73 |                 follow_redirects=False
 74 |             )
 75 | 
 76 |             if response.status_code not in [302, 307]:
 77 |                 print(f"   ❌ Authorization failed: {response.status_code}")
 78 |                 return False
 79 | 
 80 |             location = response.headers.get("location", "")
 81 |             if "code=" not in location:
 82 |                 print(f"   ❌ No authorization code in redirect: {location}")
 83 |                 return False
 84 | 
 85 |             # Extract authorization code
 86 |             auth_code = None
 87 |             for param in location.split("?")[1].split("&"):
 88 |                 if param.startswith("code="):
 89 |                     auth_code = param.split("=")[1]
 90 |                     break
 91 | 
 92 |             if not auth_code:
 93 |                 print(f"   ❌ Could not extract authorization code")
 94 |                 return False
 95 | 
 96 |             print(f"   ✅ Authorization code obtained")
 97 | 
 98 |             # Step 3: Test token endpoint with HTTP Basic authentication
 99 |             print("\n3. Testing Token Endpoint with HTTP Basic Auth...")
100 | 
101 |             # Create Basic auth header
102 |             credentials = f"{client_id}:{client_secret}"
103 |             encoded_credentials = base64.b64encode(credentials.encode()).decode()
104 |             basic_auth_header = f"Basic {encoded_credentials}"
105 | 
106 |             token_data = {
107 |                 "grant_type": "authorization_code",
108 |                 "code": auth_code,
109 |                 "redirect_uri": "https://example.com/callback"
110 |                 # Note: client_id and client_secret NOT in form data for Basic auth
111 |             }
112 | 
113 |             response = await client.post(
114 |                 f"{base_url}/oauth/token",
115 |                 data=token_data,
116 |                 headers={
117 |                     "Content-Type": "application/x-www-form-urlencoded",
118 |                     "Authorization": basic_auth_header
119 |                 }
120 |             )
121 | 
122 |             if response.status_code != 200:
123 |                 print(f"   ❌ Basic auth token request failed: {response.status_code}")
124 |                 print(f"   Response: {response.text}")
125 |                 return False
126 | 
127 |             basic_token_response = response.json()
128 |             basic_access_token = basic_token_response.get("access_token")
129 | 
130 |             if not basic_access_token:
131 |                 print(f"   ❌ No access token in Basic auth response")
132 |                 return False
133 | 
134 |             print(f"   ✅ HTTP Basic authentication successful")
135 |             print(f"   📋 Token type: {basic_token_response.get('token_type')}")
136 | 
137 |             # Step 4: Test the access token works for API calls
138 |             print("\n4. Testing Basic auth access token...")
139 | 
140 |             headers = {"Authorization": f"Bearer {basic_access_token}"}
141 |             response = await client.get(f"{base_url}/api/memories", headers=headers)
142 | 
143 |             if response.status_code == 200:
144 |                 print(f"   ✅ Basic auth access token works for API calls")
145 |             else:
146 |                 print(f"   ❌ Basic auth access token failed API call: {response.status_code}")
147 |                 return False
148 | 
149 |             # Step 5: Get a new authorization code for form-based test
150 |             print("\n5. Getting new authorization code for form auth test...")
151 | 
152 |             auth_params["state"] = "test_state_form_auth"
153 |             response = await client.get(
154 |                 f"{base_url}/oauth/authorize",
155 |                 params=auth_params,
156 |                 follow_redirects=False
157 |             )
158 | 
159 |             location = response.headers.get("location", "")
160 |             form_auth_code = None
161 |             for param in location.split("?")[1].split("&"):
162 |                 if param.startswith("code="):
163 |                     form_auth_code = param.split("=")[1]
164 |                     break
165 | 
166 |             if not form_auth_code:
167 |                 print(f"   ❌ Could not get new authorization code")
168 |                 return False
169 | 
170 |             print(f"   ✅ New authorization code obtained")
171 | 
172 |             # Step 6: Test token endpoint with form-based authentication
173 |             print("\n6. Testing Token Endpoint with Form-based Auth...")
174 | 
175 |             token_data = {
176 |                 "grant_type": "authorization_code",
177 |                 "code": form_auth_code,
178 |                 "redirect_uri": "https://example.com/callback",
179 |                 "client_id": client_id,
180 |                 "client_secret": client_secret
181 |                 # Note: credentials in form data, NO Authorization header
182 |             }
183 | 
184 |             response = await client.post(
185 |                 f"{base_url}/oauth/token",
186 |                 data=token_data,
187 |                 headers={"Content-Type": "application/x-www-form-urlencoded"}
188 |                 # Note: NO Authorization header
189 |             )
190 | 
191 |             if response.status_code != 200:
192 |                 print(f"   ❌ Form auth token request failed: {response.status_code}")
193 |                 print(f"   Response: {response.text}")
194 |                 return False
195 | 
196 |             form_token_response = response.json()
197 |             form_access_token = form_token_response.get("access_token")
198 | 
199 |             if not form_access_token:
200 |                 print(f"   ❌ No access token in form auth response")
201 |                 return False
202 | 
203 |             print(f"   ✅ Form-based authentication successful")
204 |             print(f"   📋 Token type: {form_token_response.get('token_type')}")
205 | 
206 |             # Step 7: Test the form-based access token works for API calls
207 |             print("\n7. Testing form auth access token...")
208 | 
209 |             headers = {"Authorization": f"Bearer {form_access_token}"}
210 |             response = await client.get(f"{base_url}/api/memories", headers=headers)
211 | 
212 |             if response.status_code == 200:
213 |                 print(f"   ✅ Form auth access token works for API calls")
214 |             else:
215 |                 print(f"   ❌ Form auth access token failed API call: {response.status_code}")
216 |                 return False
217 | 
218 |             print("\n" + "=" * 60)
219 |             print("🎉 All OAuth authentication methods work correctly!")
220 |             print("✅ HTTP Basic authentication (client_secret_basic)")
221 |             print("✅ Form-based authentication (client_secret_post)")
222 |             print("✅ Both access tokens work for protected API endpoints")
223 |             return True
224 | 
225 |         except Exception as e:
226 |             print(f"\n❌ Test failed with exception: {e}")
227 |             return False
228 | 
229 | 
230 | async def main():
231 |     """Main test function."""
232 |     if len(sys.argv) > 1:
233 |         base_url = sys.argv[1]
234 |     else:
235 |         base_url = "http://localhost:8000"
236 | 
237 |     print("OAuth 2.1 Basic Authentication Test")
238 |     print("===================================")
239 |     print(f"Target: {base_url}")
240 |     print()
241 |     print("This test verifies both HTTP Basic and form-based authentication")
242 |     print("methods work correctly with the OAuth token endpoint.")
243 |     print()
244 | 
245 |     success = await test_oauth_basic_auth(base_url)
246 | 
247 |     if success:
248 |         print("\n🚀 OAuth Basic authentication implementation is working perfectly!")
249 |         sys.exit(0)
250 |     else:
251 |         print("\n💥 OAuth Basic authentication tests failed")
252 |         sys.exit(1)
253 | 
254 | 
255 | if __name__ == "__main__":
256 |     asyncio.run(main())
```

--------------------------------------------------------------------------------
/tests/timestamp/test_issue99_final_validation.py:
--------------------------------------------------------------------------------

```python
  1 | #!/usr/bin/env python3
  2 | """
  3 | Final validation test for Issue #99 fix.
  4 | This test creates memories that SHOULD be in yesterday's range
  5 | and verifies they can be found by time-based searches.
  6 | """
  7 | 
  8 | import sys
  9 | import os
 10 | sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', '..', 'src'))
 11 | 
 12 | import asyncio
 13 | import tempfile
 14 | import time
 15 | from datetime import datetime, timedelta
 16 | 
 17 | from mcp_memory_service.models.memory import Memory
 18 | from mcp_memory_service.utils.hashing import generate_content_hash
 19 | from mcp_memory_service.utils.time_parser import extract_time_expression
 20 | from mcp_memory_service.storage.sqlite_vec import SqliteVecMemoryStorage
 21 | 
 22 | class Issue99FinalValidationTest:
 23 |     """Final validation test for Issue #99 timezone fix."""
 24 | 
 25 |     def __init__(self):
 26 |         self.storage = None
 27 | 
 28 |     async def setup(self):
 29 |         """Set up test environment."""
 30 |         print("=== Final Issue #99 Validation Test ===")
 31 | 
 32 |         self.temp_db = tempfile.NamedTemporaryFile(suffix=".db", delete=False)
 33 |         self.temp_db.close()
 34 | 
 35 |         self.storage = SqliteVecMemoryStorage(
 36 |             db_path=self.temp_db.name,
 37 |             embedding_model="all-MiniLM-L6-v2"
 38 |         )
 39 |         await self.storage.initialize()
 40 |         print(f"✅ Storage initialized")
 41 | 
 42 |     async def cleanup(self):
 43 |         """Clean up test environment."""
 44 |         self.storage = None
 45 |         if hasattr(self, 'temp_db') and os.path.exists(self.temp_db.name):
 46 |             os.unlink(self.temp_db.name)
 47 | 
 48 |     async def test_timezone_fix_validation(self):
 49 |         """Validate that the timezone fix resolves Issue #99."""
 50 |         print("\n🧪 Testing Issue #99 Fix: Timezone Handling")
 51 |         print("-" * 50)
 52 | 
 53 |         # Calculate actual yesterday timestamps
 54 |         now = datetime.now()
 55 |         yesterday = now - timedelta(days=1)
 56 |         yesterday_start = yesterday.replace(hour=0, minute=0, second=0, microsecond=0)
 57 |         yesterday_middle = yesterday.replace(hour=12, minute=0, second=0, microsecond=0)
 58 |         yesterday_end = yesterday.replace(hour=23, minute=59, second=59, microsecond=999999)
 59 | 
 60 |         print(f"📅 Yesterday date range: {yesterday_start.date()}")
 61 |         print(f"🕐 Yesterday timestamps: {yesterday_start.timestamp()} to {yesterday_end.timestamp()}")
 62 | 
 63 |         # Create memories that should be found in yesterday's range
 64 |         memories = [
 65 |             {
 66 |                 "content": "Hook-style memory created yesterday morning",
 67 |                 "timestamp": yesterday_start.timestamp() + (2 * 60 * 60),  # 2 AM yesterday
 68 |                 "tags": ["claude-code-session", "yesterday-morning"]
 69 |             },
 70 |             {
 71 |                 "content": "Manual note from yesterday afternoon",
 72 |                 "timestamp": yesterday_middle.timestamp() + (3 * 60 * 60),  # 3 PM yesterday
 73 |                 "tags": ["manual-note", "yesterday-afternoon"]
 74 |             },
 75 |             {
 76 |                 "content": "Another hook memory from yesterday evening",
 77 |                 "timestamp": yesterday_end.timestamp() - (2 * 60 * 60),  # 9 PM yesterday
 78 |                 "tags": ["claude-code-session", "yesterday-evening"]
 79 |             }
 80 |         ]
 81 | 
 82 |         # Store memories with specific yesterday timestamps
 83 |         for i, mem_data in enumerate(memories):
 84 |             memory = Memory(
 85 |                 content=mem_data["content"],
 86 |                 content_hash=generate_content_hash(mem_data["content"]),
 87 |                 tags=mem_data["tags"],
 88 |                 memory_type="test-memory",
 89 |                 created_at=mem_data["timestamp"],
 90 |                 created_at_iso=datetime.fromtimestamp(mem_data["timestamp"]).isoformat() + "Z"
 91 |             )
 92 | 
 93 |             success, message = await self.storage.store(memory)
 94 |             if success:
 95 |                 print(f"✅ Stored memory {i+1}: {datetime.fromtimestamp(mem_data['timestamp'])}")
 96 |             else:
 97 |                 print(f"❌ Failed to store memory {i+1}: {message}")
 98 |                 return False
 99 | 
100 |         # Test yesterday search
101 |         query = "yesterday"
102 |         cleaned_query, (start_ts, end_ts) = extract_time_expression(query)
103 | 
104 |         print(f"\n🔍 Testing query: '{query}'")
105 |         print(f"📅 Search range: {datetime.fromtimestamp(start_ts)} to {datetime.fromtimestamp(end_ts)}")
106 | 
107 |         # Perform search
108 |         search_results = await self.storage.retrieve(query, n_results=10)
109 |         print(f"🔍 Found {len(search_results)} memories")
110 | 
111 |         # Check if we found the expected memories
112 |         found_count = 0
113 |         for result in search_results:
114 |             for mem_data in memories:
115 |                 if mem_data["content"] in result.memory.content:
116 |                     found_count += 1
117 |                     print(f"  ✅ Found: {result.memory.content}")
118 |                     break
119 | 
120 |         # Validation
121 |         expected_count = len(memories)
122 |         success = found_count == expected_count
123 | 
124 |         print(f"\n📊 Results:")
125 |         print(f"  Expected memories: {expected_count}")
126 |         print(f"  Found memories: {found_count}")
127 |         print(f"  Success: {success}")
128 | 
129 |         if success:
130 |             print("🎉 Issue #99 FIXED: Time-based search now works correctly!")
131 |         else:
132 |             print("❌ Issue #99 NOT FIXED: Time-based search still has problems")
133 | 
134 |         return success
135 | 
136 |     async def test_hook_vs_manual_consistency(self):
137 |         """Test that hook and manual memories are equally discoverable."""
138 |         print("\n🧪 Testing Hook vs Manual Memory Search Consistency")
139 |         print("-" * 50)
140 | 
141 |         # Create one hook-style and one manual-style memory for today
142 |         now = time.time()
143 |         today_morning = now - (8 * 60 * 60)  # 8 hours ago
144 | 
145 |         hook_memory = Memory(
146 |             content="Hook-generated session summary from this morning",
147 |             content_hash=generate_content_hash("Hook-generated session summary from this morning"),
148 |             tags=["claude-code-session", "session-consolidation", "morning-work"],
149 |             memory_type="session-summary",
150 |             metadata={
151 |                 "generated_by": "claude-code-session-end-hook",
152 |                 "generated_at": datetime.fromtimestamp(today_morning).isoformat() + "Z"
153 |             },
154 |             created_at=today_morning
155 |         )
156 | 
157 |         manual_memory = Memory(
158 |             content="Manual note added this morning about project status",
159 |             content_hash=generate_content_hash("Manual note added this morning about project status"),
160 |             tags=["manual-note", "project-status", "morning-work"],
161 |             memory_type="note",
162 |             metadata={
163 |                 "created_by": "manual-storage",
164 |                 "source": "user-input"
165 |             },
166 |             created_at=today_morning + 300  # 5 minutes later
167 |         )
168 | 
169 |         # Store both memories
170 |         hook_result = await self.storage.store(hook_memory)
171 |         manual_result = await self.storage.store(manual_memory)
172 | 
173 |         print(f"✅ Hook memory stored: {hook_result[0]}")
174 |         print(f"✅ Manual memory stored: {manual_result[0]}")
175 | 
176 |         # Search for memories from today
177 |         query = "today morning"
178 |         search_results = await self.storage.retrieve(query, n_results=10)
179 | 
180 |         hook_found = False
181 |         manual_found = False
182 | 
183 |         for result in search_results:
184 |             if "Hook-generated session summary" in result.memory.content:
185 |                 hook_found = True
186 |             if "Manual note added this morning" in result.memory.content:
187 |                 manual_found = True
188 | 
189 |         print(f"\n📊 Search Results for '{query}':")
190 |         print(f"  Hook memory found: {hook_found}")
191 |         print(f"  Manual memory found: {manual_found}")
192 |         print(f"  Both equally discoverable: {hook_found and manual_found}")
193 | 
194 |         return hook_found and manual_found
195 | 
196 |     async def run_validation(self):
197 |         """Run complete Issue #99 validation."""
198 |         try:
199 |             await self.setup()
200 | 
201 |             # Run validation tests
202 |             timezone_fix = await self.test_timezone_fix_validation()
203 |             consistency_fix = await self.test_hook_vs_manual_consistency()
204 | 
205 |             print("\n" + "=" * 60)
206 |             print("ISSUE #99 FINAL VALIDATION RESULTS")
207 |             print("=" * 60)
208 | 
209 |             if timezone_fix:
210 |                 print("✅ FIXED: Timezone handling in timestamp validation")
211 |             else:
212 |                 print("❌ NOT FIXED: Timezone handling still has issues")
213 | 
214 |             if consistency_fix:
215 |                 print("✅ FIXED: Hook vs Manual memory search consistency")
216 |             else:
217 |                 print("❌ NOT FIXED: Hook vs Manual memories still inconsistent")
218 | 
219 |             overall_success = timezone_fix and consistency_fix
220 | 
221 |             if overall_success:
222 |                 print("\n🎉 ISSUE #99 COMPLETELY RESOLVED!")
223 |                 print("✅ Time-based searches work correctly")
224 |                 print("✅ Hook and manual memories are equally discoverable")
225 |                 print("✅ Timezone inconsistencies have been fixed")
226 |             else:
227 |                 print("\n⚠️  ISSUE #99 PARTIALLY RESOLVED")
228 |                 print("Additional work may be needed")
229 | 
230 |             return overall_success
231 | 
232 |         finally:
233 |             await self.cleanup()
234 | 
235 | async def main():
236 |     """Main validation execution."""
237 |     validator = Issue99FinalValidationTest()
238 |     success = await validator.run_validation()
239 |     return 0 if success else 1
240 | 
241 | if __name__ == "__main__":
242 |     exit_code = asyncio.run(main())
243 |     sys.exit(exit_code)
```
Page 13/47FirstPrevNextLast