#
tokens: 39071/50000 1/772 files (page 58/62)
lines: on (toggle) GitHub
raw markdown copy reset
This is page 58 of 62. Use http://codebase.md/doobidoo/mcp-memory-service?lines=true&page={x} to view the full context.

# Directory Structure

```
├── .claude
│   ├── agents
│   │   ├── amp-bridge.md
│   │   ├── amp-pr-automator.md
│   │   ├── code-quality-guard.md
│   │   ├── gemini-pr-automator.md
│   │   └── github-release-manager.md
│   ├── commands
│   │   ├── README.md
│   │   ├── refactor-function
│   │   ├── refactor-function-prod
│   │   └── refactor-function.md
│   ├── consolidation-fix-handoff.md
│   ├── consolidation-hang-fix-summary.md
│   ├── directives
│   │   ├── agents.md
│   │   ├── code-quality-workflow.md
│   │   ├── consolidation-details.md
│   │   ├── development-setup.md
│   │   ├── hooks-configuration.md
│   │   ├── memory-first.md
│   │   ├── memory-tagging.md
│   │   ├── pr-workflow.md
│   │   ├── quality-system-details.md
│   │   ├── README.md
│   │   ├── refactoring-checklist.md
│   │   ├── storage-backends.md
│   │   └── version-management.md
│   ├── prompts
│   │   └── hybrid-cleanup-integration.md
│   ├── settings.local.json.backup
│   └── settings.local.json.local
├── .commit-message
├── .coveragerc
├── .dockerignore
├── .env.example
├── .env.sqlite.backup
├── .envnn#
├── .gitattributes
├── .github
│   ├── FUNDING.yml
│   ├── ISSUE_TEMPLATE
│   │   ├── bug_report.yml
│   │   ├── config.yml
│   │   ├── feature_request.yml
│   │   └── performance_issue.yml
│   ├── pull_request_template.md
│   └── workflows
│       ├── bridge-tests.yml
│       ├── CACHE_FIX.md
│       ├── claude-branch-automation.yml
│       ├── claude-code-review.yml
│       ├── claude.yml
│       ├── cleanup-images.yml.disabled
│       ├── dev-setup-validation.yml
│       ├── docker-publish.yml
│       ├── dockerfile-lint.yml
│       ├── LATEST_FIXES.md
│       ├── main-optimized.yml.disabled
│       ├── main.yml
│       ├── publish-and-test.yml
│       ├── publish-dual.yml
│       ├── README_OPTIMIZATION.md
│       ├── release-tag.yml.disabled
│       ├── release.yml
│       ├── roadmap-review-reminder.yml
│       ├── SECRET_CONDITIONAL_FIX.md
│       └── WORKFLOW_FIXES.md
├── .gitignore
├── .mcp.json.backup
├── .mcp.json.template
├── .metrics
│   ├── baseline_cc_install_hooks.txt
│   ├── baseline_mi_install_hooks.txt
│   ├── baseline_nesting_install_hooks.txt
│   ├── BASELINE_REPORT.md
│   ├── COMPLEXITY_COMPARISON.txt
│   ├── QUICK_REFERENCE.txt
│   ├── README.md
│   ├── REFACTORED_BASELINE.md
│   ├── REFACTORING_COMPLETION_REPORT.md
│   └── TRACKING_TABLE.md
├── .pyscn
│   ├── .gitignore
│   └── reports
│       └── analyze_20251123_214224.html
├── AGENTS.md
├── ai-optimized-tool-descriptions.py
├── archive
│   ├── deployment
│   │   ├── deploy_fastmcp_fixed.sh
│   │   ├── deploy_http_with_mcp.sh
│   │   └── deploy_mcp_v4.sh
│   ├── deployment-configs
│   │   ├── empty_config.yml
│   │   └── smithery.yaml
│   ├── development
│   │   └── test_fastmcp.py
│   ├── docs-removed-2025-08-23
│   │   ├── authentication.md
│   │   ├── claude_integration.md
│   │   ├── claude-code-compatibility.md
│   │   ├── claude-code-integration.md
│   │   ├── claude-code-quickstart.md
│   │   ├── claude-desktop-setup.md
│   │   ├── complete-setup-guide.md
│   │   ├── database-synchronization.md
│   │   ├── development
│   │   │   ├── autonomous-memory-consolidation.md
│   │   │   ├── CLEANUP_PLAN.md
│   │   │   ├── CLEANUP_README.md
│   │   │   ├── CLEANUP_SUMMARY.md
│   │   │   ├── dream-inspired-memory-consolidation.md
│   │   │   ├── hybrid-slm-memory-consolidation.md
│   │   │   ├── mcp-milestone.md
│   │   │   ├── multi-client-architecture.md
│   │   │   ├── test-results.md
│   │   │   └── TIMESTAMP_FIX_SUMMARY.md
│   │   ├── distributed-sync.md
│   │   ├── invocation_guide.md
│   │   ├── macos-intel.md
│   │   ├── master-guide.md
│   │   ├── mcp-client-configuration.md
│   │   ├── multi-client-server.md
│   │   ├── service-installation.md
│   │   ├── sessions
│   │   │   └── MCP_ENHANCEMENT_SESSION_MEMORY_v4.1.0.md
│   │   ├── UBUNTU_SETUP.md
│   │   ├── ubuntu.md
│   │   ├── windows-setup.md
│   │   └── windows.md
│   ├── docs-root-cleanup-2025-08-23
│   │   ├── AWESOME_LIST_SUBMISSION.md
│   │   ├── CLOUDFLARE_IMPLEMENTATION.md
│   │   ├── DOCUMENTATION_ANALYSIS.md
│   │   ├── DOCUMENTATION_CLEANUP_PLAN.md
│   │   ├── DOCUMENTATION_CONSOLIDATION_COMPLETE.md
│   │   ├── LITESTREAM_SETUP_GUIDE.md
│   │   ├── lm_studio_system_prompt.md
│   │   ├── PYTORCH_DOWNLOAD_FIX.md
│   │   └── README-ORIGINAL-BACKUP.md
│   ├── investigations
│   │   └── MACOS_HOOKS_INVESTIGATION.md
│   ├── litestream-configs-v6.3.0
│   │   ├── install_service.sh
│   │   ├── litestream_master_config_fixed.yml
│   │   ├── litestream_master_config.yml
│   │   ├── litestream_replica_config_fixed.yml
│   │   ├── litestream_replica_config.yml
│   │   ├── litestream_replica_simple.yml
│   │   ├── litestream-http.service
│   │   ├── litestream.service
│   │   └── requirements-cloudflare.txt
│   ├── release-notes
│   │   └── release-notes-v7.1.4.md
│   └── setup-development
│       ├── README.md
│       ├── setup_consolidation_mdns.sh
│       ├── STARTUP_SETUP_GUIDE.md
│       └── test_service.sh
├── CHANGELOG-HISTORIC.md
├── CHANGELOG.md
├── claude_commands
│   ├── memory-context.md
│   ├── memory-health.md
│   ├── memory-ingest-dir.md
│   ├── memory-ingest.md
│   ├── memory-recall.md
│   ├── memory-search.md
│   ├── memory-store.md
│   ├── README.md
│   └── session-start.md
├── claude-hooks
│   ├── config.json
│   ├── config.template.json
│   ├── CONFIGURATION.md
│   ├── core
│   │   ├── auto-capture-hook.js
│   │   ├── auto-capture-hook.ps1
│   │   ├── memory-retrieval.js
│   │   ├── mid-conversation.js
│   │   ├── permission-request.js
│   │   ├── session-end.js
│   │   ├── session-start.js
│   │   └── topic-change.js
│   ├── debug-pattern-test.js
│   ├── install_claude_hooks_windows.ps1
│   ├── install_hooks.py
│   ├── memory-mode-controller.js
│   ├── MIGRATION.md
│   ├── README-AUTO-CAPTURE.md
│   ├── README-NATURAL-TRIGGERS.md
│   ├── README-PERMISSION-REQUEST.md
│   ├── README-phase2.md
│   ├── README.md
│   ├── simple-test.js
│   ├── statusline.sh
│   ├── test-adaptive-weights.js
│   ├── test-dual-protocol-hook.js
│   ├── test-mcp-hook.js
│   ├── test-natural-triggers.js
│   ├── test-recency-scoring.js
│   ├── tests
│   │   ├── integration-test.js
│   │   ├── phase2-integration-test.js
│   │   ├── test-code-execution.js
│   │   ├── test-cross-session.json
│   │   ├── test-permission-request.js
│   │   ├── test-session-tracking.json
│   │   └── test-threading.json
│   ├── utilities
│   │   ├── adaptive-pattern-detector.js
│   │   ├── auto-capture-patterns.js
│   │   ├── context-formatter.js
│   │   ├── context-shift-detector.js
│   │   ├── conversation-analyzer.js
│   │   ├── dynamic-context-updater.js
│   │   ├── git-analyzer.js
│   │   ├── mcp-client.js
│   │   ├── memory-client.js
│   │   ├── memory-scorer.js
│   │   ├── performance-manager.js
│   │   ├── project-detector.js
│   │   ├── session-cache.json
│   │   ├── session-tracker.js
│   │   ├── tiered-conversation-monitor.js
│   │   ├── user-override-detector.js
│   │   └── version-checker.js
│   └── WINDOWS-SESSIONSTART-BUG.md
├── CLAUDE.md
├── CODE_OF_CONDUCT.md
├── COMMIT_MESSAGE.md
├── CONTRIBUTING.md
├── Development-Sprint-November-2025.md
├── docs
│   ├── amp-cli-bridge.md
│   ├── api
│   │   ├── code-execution-interface.md
│   │   ├── memory-metadata-api.md
│   │   ├── PHASE1_IMPLEMENTATION_SUMMARY.md
│   │   ├── PHASE2_IMPLEMENTATION_SUMMARY.md
│   │   ├── PHASE2_REPORT.md
│   │   └── tag-standardization.md
│   ├── architecture
│   │   ├── graph-database-design.md
│   │   ├── search-enhancement-spec.md
│   │   └── search-examples.md
│   ├── architecture.md
│   ├── archive
│   │   └── obsolete-workflows
│   │       ├── load_memory_context.md
│   │       └── README.md
│   ├── assets
│   │   └── images
│   │       ├── dashboard-v3.3.0-preview.png
│   │       ├── memory-awareness-hooks-example.png
│   │       ├── project-infographic.svg
│   │       └── README.md
│   ├── CLAUDE_CODE_QUICK_REFERENCE.md
│   ├── cloudflare-setup.md
│   ├── demo-recording-script.md
│   ├── deployment
│   │   ├── docker.md
│   │   ├── dual-service.md
│   │   ├── production-guide.md
│   │   └── systemd-service.md
│   ├── development
│   │   ├── ai-agent-instructions.md
│   │   ├── code-quality
│   │   │   ├── phase-2a-completion.md
│   │   │   ├── phase-2a-handle-get-prompt.md
│   │   │   ├── phase-2a-index.md
│   │   │   ├── phase-2a-install-package.md
│   │   │   └── phase-2b-session-summary.md
│   │   ├── code-quality-workflow.md
│   │   ├── dashboard-workflow.md
│   │   ├── issue-management.md
│   │   ├── pr-280-post-mortem.md
│   │   ├── pr-review-guide.md
│   │   ├── refactoring-notes.md
│   │   ├── release-checklist.md
│   │   └── todo-tracker.md
│   ├── docker-optimized-build.md
│   ├── document-ingestion.md
│   ├── DOCUMENTATION_AUDIT.md
│   ├── enhancement-roadmap-issue-14.md
│   ├── examples
│   │   ├── analysis-scripts.js
│   │   ├── maintenance-session-example.md
│   │   ├── memory-distribution-chart.jsx
│   │   ├── quality-system-configs.md
│   │   └── tag-schema.json
│   ├── features
│   │   └── association-quality-boost.md
│   ├── first-time-setup.md
│   ├── glama-deployment.md
│   ├── guides
│   │   ├── advanced-command-examples.md
│   │   ├── chromadb-migration.md
│   │   ├── commands-vs-mcp-server.md
│   │   ├── mcp-enhancements.md
│   │   ├── mdns-service-discovery.md
│   │   ├── memory-consolidation-guide.md
│   │   ├── memory-quality-guide.md
│   │   ├── migration.md
│   │   ├── scripts.md
│   │   └── STORAGE_BACKENDS.md
│   ├── HOOK_IMPROVEMENTS.md
│   ├── hooks
│   │   └── phase2-code-execution-migration.md
│   ├── http-server-management.md
│   ├── ide-compatability.md
│   ├── IMAGE_RETENTION_POLICY.md
│   ├── images
│   │   ├── dashboard-placeholder.md
│   │   └── update-restart-demo.png
│   ├── implementation
│   │   ├── health_checks.md
│   │   └── performance.md
│   ├── IMPLEMENTATION_PLAN_HTTP_SSE.md
│   ├── integration
│   │   ├── homebrew.md
│   │   └── multi-client.md
│   ├── integrations
│   │   ├── gemini.md
│   │   ├── groq-bridge.md
│   │   ├── groq-integration-summary.md
│   │   └── groq-model-comparison.md
│   ├── integrations.md
│   ├── legacy
│   │   └── dual-protocol-hooks.md
│   ├── LIGHTWEIGHT_ONNX_SETUP.md
│   ├── LM_STUDIO_COMPATIBILITY.md
│   ├── maintenance
│   │   └── memory-maintenance.md
│   ├── mastery
│   │   ├── api-reference.md
│   │   ├── architecture-overview.md
│   │   ├── configuration-guide.md
│   │   ├── local-setup-and-run.md
│   │   ├── testing-guide.md
│   │   └── troubleshooting.md
│   ├── migration
│   │   ├── code-execution-api-quick-start.md
│   │   └── graph-migration-guide.md
│   ├── natural-memory-triggers
│   │   ├── cli-reference.md
│   │   ├── installation-guide.md
│   │   └── performance-optimization.md
│   ├── oauth-setup.md
│   ├── pr-graphql-integration.md
│   ├── quality-system-ui-implementation.md
│   ├── quick-setup-cloudflare-dual-environment.md
│   ├── README.md
│   ├── refactoring
│   │   └── phase-3-3-analysis.md
│   ├── releases
│   │   └── v8.72.0-testing.md
│   ├── remote-configuration-wiki-section.md
│   ├── research
│   │   ├── code-execution-interface-implementation.md
│   │   └── code-execution-interface-summary.md
│   ├── ROADMAP.md
│   ├── sqlite-vec-backend.md
│   ├── statistics
│   │   ├── charts
│   │   │   ├── activity_patterns.png
│   │   │   ├── contributors.png
│   │   │   ├── growth_trajectory.png
│   │   │   ├── monthly_activity.png
│   │   │   └── october_sprint.png
│   │   ├── data
│   │   │   ├── activity_by_day.csv
│   │   │   ├── activity_by_hour.csv
│   │   │   ├── contributors.csv
│   │   │   └── monthly_activity.csv
│   │   ├── generate_charts.py
│   │   └── REPOSITORY_STATISTICS.md
│   ├── technical
│   │   ├── development.md
│   │   ├── memory-migration.md
│   │   ├── migration-log.md
│   │   ├── sqlite-vec-embedding-fixes.md
│   │   └── tag-storage.md
│   ├── testing
│   │   └── regression-tests.md
│   ├── testing-cloudflare-backend.md
│   ├── troubleshooting
│   │   ├── cloudflare-api-token-setup.md
│   │   ├── cloudflare-authentication.md
│   │   ├── database-transfer-migration.md
│   │   ├── general.md
│   │   ├── hooks-quick-reference.md
│   │   ├── memory-management.md
│   │   ├── pr162-schema-caching-issue.md
│   │   ├── session-end-hooks.md
│   │   └── sync-issues.md
│   ├── tutorials
│   │   ├── advanced-techniques.md
│   │   ├── data-analysis.md
│   │   └── demo-session-walkthrough.md
│   ├── wiki-documentation-plan.md
│   └── wiki-Graph-Database-Architecture.md
├── examples
│   ├── claude_desktop_config_template.json
│   ├── claude_desktop_config_windows.json
│   ├── claude-desktop-http-config.json
│   ├── config
│   │   └── claude_desktop_config.json
│   ├── http-mcp-bridge.js
│   ├── memory_export_template.json
│   ├── README.md
│   ├── setup
│   │   └── setup_multi_client_complete.py
│   └── start_https_example.sh
├── IMPLEMENTATION_SUMMARY.md
├── install_service.py
├── install.py
├── LICENSE
├── NOTICE
├── PR_DESCRIPTION.md
├── pyproject-lite.toml
├── pyproject.toml
├── pytest.ini
├── README.md
├── release-notes-v8.61.0.md
├── run_server.py
├── scripts
│   ├── .claude
│   │   └── settings.local.json
│   ├── archive
│   │   └── check_missing_timestamps.py
│   ├── backup
│   │   ├── backup_memories.py
│   │   ├── backup_sqlite_vec.sh
│   │   ├── export_distributable_memories.sh
│   │   └── restore_memories.py
│   ├── benchmarks
│   │   ├── benchmark_code_execution_api.py
│   │   ├── benchmark_hybrid_sync.py
│   │   └── benchmark_server_caching.py
│   ├── ci
│   │   ├── check_dockerfile_args.sh
│   │   └── validate_imports.sh
│   ├── database
│   │   ├── analyze_sqlite_vec_db.py
│   │   ├── check_sqlite_vec_status.py
│   │   ├── db_health_check.py
│   │   └── simple_timestamp_check.py
│   ├── development
│   │   ├── debug_server_initialization.py
│   │   ├── find_orphaned_files.py
│   │   ├── fix_mdns.sh
│   │   ├── fix_sitecustomize.py
│   │   ├── remote_ingest.sh
│   │   ├── setup-git-merge-drivers.sh
│   │   ├── uv-lock-merge.sh
│   │   └── verify_hybrid_sync.py
│   ├── hooks
│   │   └── pre-commit
│   ├── installation
│   │   ├── install_linux_service.py
│   │   ├── install_macos_service.py
│   │   ├── install_uv.py
│   │   ├── install_windows_service.py
│   │   ├── install.py
│   │   ├── setup_backup_cron.sh
│   │   ├── setup_claude_mcp.sh
│   │   └── setup_cloudflare_resources.py
│   ├── linux
│   │   ├── service_status.sh
│   │   ├── start_service.sh
│   │   ├── stop_service.sh
│   │   ├── uninstall_service.sh
│   │   └── view_logs.sh
│   ├── maintenance
│   │   ├── add_project_tags.py
│   │   ├── apply_quality_boost_retroactively.py
│   │   ├── assign_memory_types.py
│   │   ├── auto_retag_memory_merge.py
│   │   ├── auto_retag_memory.py
│   │   ├── backfill_graph_table.py
│   │   ├── check_memory_types.py
│   │   ├── cleanup_association_memories_hybrid.py
│   │   ├── cleanup_association_memories.py
│   │   ├── cleanup_corrupted_encoding.py
│   │   ├── cleanup_low_quality.py
│   │   ├── cleanup_memories.py
│   │   ├── cleanup_organize.py
│   │   ├── consolidate_memory_types.py
│   │   ├── consolidation_mappings.json
│   │   ├── delete_orphaned_vectors_fixed.py
│   │   ├── delete_test_memories.py
│   │   ├── fast_cleanup_duplicates_with_tracking.sh
│   │   ├── find_all_duplicates.py
│   │   ├── find_cloudflare_duplicates.py
│   │   ├── find_duplicates.py
│   │   ├── memory-types.md
│   │   ├── README.md
│   │   ├── recover_timestamps_from_cloudflare.py
│   │   ├── regenerate_embeddings.py
│   │   ├── repair_malformed_tags.py
│   │   ├── repair_memories.py
│   │   ├── repair_sqlite_vec_embeddings.py
│   │   ├── repair_zero_embeddings.py
│   │   ├── restore_from_json_export.py
│   │   ├── retag_valuable_memories.py
│   │   ├── scan_todos.sh
│   │   ├── soft_delete_test_memories.py
│   │   └── sync_status.py
│   ├── migration
│   │   ├── cleanup_mcp_timestamps.py
│   │   ├── legacy
│   │   │   └── migrate_chroma_to_sqlite.py
│   │   ├── mcp-migration.py
│   │   ├── migrate_sqlite_vec_embeddings.py
│   │   ├── migrate_storage.py
│   │   ├── migrate_tags.py
│   │   ├── migrate_timestamps.py
│   │   ├── migrate_to_cloudflare.py
│   │   ├── migrate_to_sqlite_vec.py
│   │   ├── migrate_v5_enhanced.py
│   │   ├── TIMESTAMP_CLEANUP_README.md
│   │   └── verify_mcp_timestamps.py
│   ├── pr
│   │   ├── amp_collect_results.sh
│   │   ├── amp_detect_breaking_changes.sh
│   │   ├── amp_generate_tests.sh
│   │   ├── amp_pr_review.sh
│   │   ├── amp_quality_gate.sh
│   │   ├── amp_suggest_fixes.sh
│   │   ├── auto_review.sh
│   │   ├── detect_breaking_changes.sh
│   │   ├── generate_tests.sh
│   │   ├── lib
│   │   │   └── graphql_helpers.sh
│   │   ├── pre_pr_check.sh
│   │   ├── quality_gate.sh
│   │   ├── resolve_threads.sh
│   │   ├── run_pyscn_analysis.sh
│   │   ├── run_quality_checks_on_files.sh
│   │   ├── run_quality_checks.sh
│   │   ├── thread_status.sh
│   │   └── watch_reviews.sh
│   ├── quality
│   │   ├── bulk_evaluate_onnx.py
│   │   ├── check_test_scores.py
│   │   ├── debug_deberta_scoring.py
│   │   ├── export_deberta_onnx.py
│   │   ├── fix_dead_code_install.sh
│   │   ├── migrate_to_deberta.py
│   │   ├── phase1_dead_code_analysis.md
│   │   ├── phase2_complexity_analysis.md
│   │   ├── README_PHASE1.md
│   │   ├── README_PHASE2.md
│   │   ├── rescore_deberta.py
│   │   ├── rescore_fallback.py
│   │   ├── reset_onnx_scores.py
│   │   ├── track_pyscn_metrics.sh
│   │   └── weekly_quality_review.sh
│   ├── README.md
│   ├── run
│   │   ├── memory_wrapper_cleanup.ps1
│   │   ├── memory_wrapper_cleanup.py
│   │   ├── memory_wrapper_cleanup.sh
│   │   ├── README_CLEANUP_WRAPPER.md
│   │   ├── run_mcp_memory.sh
│   │   ├── run-with-uv.sh
│   │   └── start_sqlite_vec.sh
│   ├── run_memory_server.py
│   ├── server
│   │   ├── check_http_server.py
│   │   ├── check_server_health.py
│   │   ├── memory_offline.py
│   │   ├── preload_models.py
│   │   ├── run_http_server.py
│   │   ├── run_memory_server.py
│   │   ├── start_http_server.bat
│   │   └── start_http_server.sh
│   ├── service
│   │   ├── deploy_dual_services.sh
│   │   ├── http_server_manager.sh
│   │   ├── install_http_service.sh
│   │   ├── mcp-memory-http.service
│   │   ├── mcp-memory.service
│   │   ├── memory_service_manager.sh
│   │   ├── service_control.sh
│   │   ├── service_utils.py
│   │   ├── update_service.sh
│   │   └── windows
│   │       ├── add_watchdog_trigger.ps1
│   │       ├── install_scheduled_task.ps1
│   │       ├── manage_service.ps1
│   │       ├── run_http_server_background.ps1
│   │       ├── uninstall_scheduled_task.ps1
│   │       └── update_and_restart.ps1
│   ├── setup-lightweight.sh
│   ├── sync
│   │   ├── check_drift.py
│   │   ├── claude_sync_commands.py
│   │   ├── export_memories.py
│   │   ├── import_memories.py
│   │   ├── litestream
│   │   │   ├── apply_local_changes.sh
│   │   │   ├── enhanced_memory_store.sh
│   │   │   ├── init_staging_db.sh
│   │   │   ├── io.litestream.replication.plist
│   │   │   ├── manual_sync.sh
│   │   │   ├── memory_sync.sh
│   │   │   ├── pull_remote_changes.sh
│   │   │   ├── push_to_remote.sh
│   │   │   ├── README.md
│   │   │   ├── resolve_conflicts.sh
│   │   │   ├── setup_local_litestream.sh
│   │   │   ├── setup_remote_litestream.sh
│   │   │   ├── staging_db_init.sql
│   │   │   ├── stash_local_changes.sh
│   │   │   ├── sync_from_remote_noconfig.sh
│   │   │   └── sync_from_remote.sh
│   │   ├── README.md
│   │   ├── safe_cloudflare_update.sh
│   │   ├── sync_memory_backends.py
│   │   └── sync_now.py
│   ├── testing
│   │   ├── run_complete_test.py
│   │   ├── run_memory_test.sh
│   │   ├── simple_test.py
│   │   ├── test_cleanup_logic.py
│   │   ├── test_cloudflare_backend.py
│   │   ├── test_docker_functionality.py
│   │   ├── test_installation.py
│   │   ├── test_mdns.py
│   │   ├── test_memory_api.py
│   │   ├── test_memory_simple.py
│   │   ├── test_migration.py
│   │   ├── test_search_api.py
│   │   ├── test_sqlite_vec_embeddings.py
│   │   ├── test_sse_events.py
│   │   ├── test-connection.py
│   │   └── test-hook.js
│   ├── update_and_restart.sh
│   ├── utils
│   │   ├── claude_commands_utils.py
│   │   ├── detect_platform.py
│   │   ├── generate_personalized_claude_md.sh
│   │   ├── groq
│   │   ├── groq_agent_bridge.py
│   │   ├── list-collections.py
│   │   ├── memory_wrapper_uv.py
│   │   ├── query_memories.py
│   │   ├── README_detect_platform.md
│   │   ├── smithery_wrapper.py
│   │   ├── test_groq_bridge.sh
│   │   └── uv_wrapper.py
│   └── validation
│       ├── check_dev_setup.py
│       ├── check_documentation_links.py
│       ├── check_handler_coverage.py
│       ├── diagnose_backend_config.py
│       ├── validate_configuration_complete.py
│       ├── validate_graph_tools.py
│       ├── validate_memories.py
│       ├── validate_migration.py
│       ├── validate_timestamp_integrity.py
│       ├── verify_environment.py
│       ├── verify_pytorch_windows.py
│       └── verify_torch.py
├── SECURITY.md
├── selective_timestamp_recovery.py
├── SPONSORS.md
├── src
│   └── mcp_memory_service
│       ├── __init__.py
│       ├── _version.py
│       ├── api
│       │   ├── __init__.py
│       │   ├── client.py
│       │   ├── operations.py
│       │   ├── sync_wrapper.py
│       │   └── types.py
│       ├── backup
│       │   ├── __init__.py
│       │   └── scheduler.py
│       ├── cli
│       │   ├── __init__.py
│       │   ├── ingestion.py
│       │   ├── main.py
│       │   └── utils.py
│       ├── config.py
│       ├── consolidation
│       │   ├── __init__.py
│       │   ├── associations.py
│       │   ├── base.py
│       │   ├── clustering.py
│       │   ├── compression.py
│       │   ├── consolidator.py
│       │   ├── decay.py
│       │   ├── forgetting.py
│       │   ├── health.py
│       │   └── scheduler.py
│       ├── dependency_check.py
│       ├── discovery
│       │   ├── __init__.py
│       │   ├── client.py
│       │   └── mdns_service.py
│       ├── embeddings
│       │   ├── __init__.py
│       │   └── onnx_embeddings.py
│       ├── ingestion
│       │   ├── __init__.py
│       │   ├── base.py
│       │   ├── chunker.py
│       │   ├── csv_loader.py
│       │   ├── json_loader.py
│       │   ├── pdf_loader.py
│       │   ├── registry.py
│       │   ├── semtools_loader.py
│       │   └── text_loader.py
│       ├── lm_studio_compat.py
│       ├── mcp_server.py
│       ├── models
│       │   ├── __init__.py
│       │   └── memory.py
│       ├── quality
│       │   ├── __init__.py
│       │   ├── ai_evaluator.py
│       │   ├── async_scorer.py
│       │   ├── config.py
│       │   ├── implicit_signals.py
│       │   ├── metadata_codec.py
│       │   ├── onnx_ranker.py
│       │   └── scorer.py
│       ├── server
│       │   ├── __init__.py
│       │   ├── __main__.py
│       │   ├── cache_manager.py
│       │   ├── client_detection.py
│       │   ├── environment.py
│       │   ├── handlers
│       │   │   ├── __init__.py
│       │   │   ├── consolidation.py
│       │   │   ├── documents.py
│       │   │   ├── graph.py
│       │   │   ├── memory.py
│       │   │   ├── quality.py
│       │   │   └── utility.py
│       │   └── logging_config.py
│       ├── server_impl.py
│       ├── services
│       │   ├── __init__.py
│       │   └── memory_service.py
│       ├── storage
│       │   ├── __init__.py
│       │   ├── base.py
│       │   ├── cloudflare.py
│       │   ├── factory.py
│       │   ├── graph.py
│       │   ├── http_client.py
│       │   ├── hybrid.py
│       │   ├── migrations
│       │   │   └── 008_add_graph_table.sql
│       │   └── sqlite_vec.py
│       ├── sync
│       │   ├── __init__.py
│       │   ├── exporter.py
│       │   ├── importer.py
│       │   └── litestream_config.py
│       ├── utils
│       │   ├── __init__.py
│       │   ├── cache_manager.py
│       │   ├── content_splitter.py
│       │   ├── db_utils.py
│       │   ├── debug.py
│       │   ├── directory_ingestion.py
│       │   ├── document_processing.py
│       │   ├── gpu_detection.py
│       │   ├── hashing.py
│       │   ├── health_check.py
│       │   ├── http_server_manager.py
│       │   ├── port_detection.py
│       │   ├── quality_analytics.py
│       │   ├── startup_orchestrator.py
│       │   ├── system_detection.py
│       │   └── time_parser.py
│       └── web
│           ├── __init__.py
│           ├── api
│           │   ├── __init__.py
│           │   ├── analytics.py
│           │   ├── backup.py
│           │   ├── consolidation.py
│           │   ├── documents.py
│           │   ├── events.py
│           │   ├── health.py
│           │   ├── manage.py
│           │   ├── mcp.py
│           │   ├── memories.py
│           │   ├── quality.py
│           │   ├── search.py
│           │   └── sync.py
│           ├── app.py
│           ├── dependencies.py
│           ├── oauth
│           │   ├── __init__.py
│           │   ├── authorization.py
│           │   ├── discovery.py
│           │   ├── middleware.py
│           │   ├── models.py
│           │   ├── registration.py
│           │   └── storage.py
│           ├── sse.py
│           └── static
│               ├── app.js
│               ├── i18n
│               │   ├── de.json
│               │   ├── en.json
│               │   ├── es.json
│               │   ├── fr.json
│               │   ├── ja.json
│               │   ├── ko.json
│               │   └── zh.json
│               ├── index.html
│               ├── README.md
│               ├── sse_test.html
│               └── style.css
├── start_http_debug.bat
├── start_http_server.sh
├── test_document.txt
├── test_version_checker.js
├── TESTING_NOTES.md
├── tests
│   ├── __init__.py
│   ├── api
│   │   ├── __init__.py
│   │   ├── test_compact_types.py
│   │   └── test_operations.py
│   ├── bridge
│   │   ├── mock_responses.js
│   │   ├── package-lock.json
│   │   ├── package.json
│   │   └── test_http_mcp_bridge.js
│   ├── conftest.py
│   ├── consolidation
│   │   ├── __init__.py
│   │   ├── conftest.py
│   │   ├── test_associations.py
│   │   ├── test_clustering.py
│   │   ├── test_compression.py
│   │   ├── test_consolidator.py
│   │   ├── test_decay.py
│   │   ├── test_forgetting.py
│   │   └── test_graph_modes.py
│   ├── contracts
│   │   └── api-specification.yml
│   ├── integration
│   │   ├── conftest.py
│   │   ├── HANDLER_COVERAGE_REPORT.md
│   │   ├── package-lock.json
│   │   ├── package.json
│   │   ├── test_all_memory_handlers.py
│   │   ├── test_api_key_fallback.py
│   │   ├── test_api_memories_chronological.py
│   │   ├── test_api_tag_time_search.py
│   │   ├── test_api_with_memory_service.py
│   │   ├── test_bridge_integration.js
│   │   ├── test_cli_interfaces.py
│   │   ├── test_cloudflare_connection.py
│   │   ├── test_concurrent_clients.py
│   │   ├── test_data_serialization_consistency.py
│   │   ├── test_http_server_startup.py
│   │   ├── test_mcp_memory.py
│   │   ├── test_mdns_integration.py
│   │   ├── test_oauth_basic_auth.py
│   │   ├── test_oauth_flow.py
│   │   ├── test_server_handlers.py
│   │   └── test_store_memory.py
│   ├── performance
│   │   ├── test_background_sync.py
│   │   └── test_hybrid_live.py
│   ├── README.md
│   ├── smithery
│   │   └── test_smithery.py
│   ├── sqlite
│   │   └── simple_sqlite_vec_test.py
│   ├── storage
│   │   ├── conftest.py
│   │   └── test_graph_storage.py
│   ├── test_client.py
│   ├── test_content_splitting.py
│   ├── test_database.py
│   ├── test_deberta_quality.py
│   ├── test_fallback_quality.py
│   ├── test_graph_traversal.py
│   ├── test_hybrid_cloudflare_limits.py
│   ├── test_hybrid_storage.py
│   ├── test_lightweight_onnx.py
│   ├── test_memory_ops.py
│   ├── test_memory_wrapper_cleanup.py
│   ├── test_quality_integration.py
│   ├── test_quality_system.py
│   ├── test_semantic_search.py
│   ├── test_sqlite_vec_storage.py
│   ├── test_time_parser.py
│   ├── test_timestamp_preservation.py
│   ├── timestamp
│   │   ├── test_hook_vs_manual_storage.py
│   │   ├── test_issue99_final_validation.py
│   │   ├── test_search_retrieval_inconsistency.py
│   │   ├── test_timestamp_issue.py
│   │   └── test_timestamp_simple.py
│   └── unit
│       ├── conftest.py
│       ├── test_cloudflare_storage.py
│       ├── test_csv_loader.py
│       ├── test_fastapi_dependencies.py
│       ├── test_import.py
│       ├── test_imports.py
│       ├── test_json_loader.py
│       ├── test_mdns_simple.py
│       ├── test_mdns.py
│       ├── test_memory_service.py
│       ├── test_memory.py
│       ├── test_semtools_loader.py
│       ├── test_storage_interface_compatibility.py
│       ├── test_tag_time_filtering.py
│       └── test_uv_no_pip_installer_fallback.py
├── tools
│   ├── docker
│   │   ├── DEPRECATED.md
│   │   ├── docker-compose.http.yml
│   │   ├── docker-compose.pythonpath.yml
│   │   ├── docker-compose.standalone.yml
│   │   ├── docker-compose.uv.yml
│   │   ├── docker-compose.yml
│   │   ├── docker-entrypoint-persistent.sh
│   │   ├── docker-entrypoint-unified.sh
│   │   ├── docker-entrypoint.sh
│   │   ├── Dockerfile
│   │   ├── Dockerfile.glama
│   │   ├── Dockerfile.slim
│   │   ├── README.md
│   │   └── test-docker-modes.sh
│   └── README.md
├── uv.lock
└── verify_compression.sh
```

# Files

--------------------------------------------------------------------------------
/src/mcp_memory_service/storage/sqlite_vec.py:
--------------------------------------------------------------------------------

```python
   1 | # Copyright 2024 Heinrich Krupp
   2 | #
   3 | # Licensed under the Apache License, Version 2.0 (the "License");
   4 | # you may not use this file except in compliance with the License.
   5 | # You may obtain a copy of the License at
   6 | #
   7 | #     http://www.apache.org/licenses/LICENSE-2.0
   8 | #
   9 | # Unless required by applicable law or agreed to in writing, software
  10 | # distributed under the License is distributed on an "AS IS" BASIS,
  11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12 | # See the License for the specific language governing permissions and
  13 | # limitations under the License.
  14 | 
  15 | """
  16 | SQLite-vec storage backend for MCP Memory Service.
  17 | Provides local vector similarity search using sqlite-vec extension.
  18 | """
  19 | 
  20 | import sqlite3
  21 | import json
  22 | import logging
  23 | import traceback
  24 | import time
  25 | import os
  26 | import sys
  27 | import platform
  28 | import hashlib
  29 | import struct
  30 | from collections import Counter
  31 | from typing import List, Dict, Any, Tuple, Optional, Set, Callable
  32 | from datetime import datetime, timezone, timedelta, date
  33 | import asyncio
  34 | import random
  35 | 
  36 | # Import sqlite-vec with fallback
  37 | try:
  38 |     import sqlite_vec
  39 |     from sqlite_vec import serialize_float32
  40 |     SQLITE_VEC_AVAILABLE = True
  41 | except ImportError:
  42 |     SQLITE_VEC_AVAILABLE = False
  43 |     logging.getLogger(__name__).warning("sqlite-vec not available. Install with: pip install sqlite-vec")
  44 | 
  45 | # Import sentence transformers with fallback
  46 | try:
  47 |     from sentence_transformers import SentenceTransformer
  48 |     SENTENCE_TRANSFORMERS_AVAILABLE = True
  49 | except ImportError:
  50 |     SENTENCE_TRANSFORMERS_AVAILABLE = False
  51 |     logging.getLogger(__name__).warning("sentence_transformers not available. Install for embedding support.")
  52 | 
  53 | from .base import MemoryStorage
  54 | from ..models.memory import Memory, MemoryQueryResult
  55 | from ..utils.hashing import generate_content_hash
  56 | from ..utils.system_detection import (
  57 |     get_system_info,
  58 |     get_optimal_embedding_settings,
  59 |     get_torch_device,
  60 |     AcceleratorType
  61 | )
  62 | from ..config import SQLITEVEC_MAX_CONTENT_LENGTH
  63 | 
  64 | logger = logging.getLogger(__name__)
  65 | 
  66 | # Global model cache for performance optimization
  67 | _MODEL_CACHE = {}
  68 | _EMBEDDING_CACHE = {}
  69 | 
  70 | 
  71 | def clear_model_caches() -> dict:
  72 |     """
  73 |     Clear embedding model caches to free memory.
  74 | 
  75 |     This function clears both the model cache (loaded embedding models)
  76 |     and the embedding cache (computed embeddings). It also triggers
  77 |     garbage collection to reclaim memory.
  78 | 
  79 |     Used during graceful shutdown or when memory pressure is detected.
  80 |     Note: After clearing, models will be reloaded on next use.
  81 | 
  82 |     Returns:
  83 |         Dict with counts of cleared items:
  84 |         - models_cleared: Number of model instances removed
  85 |         - embeddings_cleared: Number of cached embeddings removed
  86 |     """
  87 |     import gc
  88 | 
  89 |     global _MODEL_CACHE, _EMBEDDING_CACHE
  90 | 
  91 |     model_count = len(_MODEL_CACHE)
  92 |     embedding_count = len(_EMBEDDING_CACHE)
  93 | 
  94 |     _MODEL_CACHE.clear()
  95 |     _EMBEDDING_CACHE.clear()
  96 | 
  97 |     # Force garbage collection to reclaim memory
  98 |     collected = gc.collect()
  99 | 
 100 |     logger.info(
 101 |         f"Model caches cleared - "
 102 |         f"Models: {model_count}, Embeddings: {embedding_count}, "
 103 |         f"GC collected: {collected} objects"
 104 |     )
 105 | 
 106 |     return {
 107 |         "models_cleared": model_count,
 108 |         "embeddings_cleared": embedding_count,
 109 |         "gc_collected": collected
 110 |     }
 111 | 
 112 | 
 113 | def get_model_cache_stats() -> dict:
 114 |     """
 115 |     Get statistics about the model cache.
 116 | 
 117 |     Returns:
 118 |         Dict with cache statistics:
 119 |         - model_count: Number of cached models
 120 |         - model_keys: List of cached model keys
 121 |         - embedding_count: Number of cached embeddings
 122 |     """
 123 |     return {
 124 |         "model_count": len(_MODEL_CACHE),
 125 |         "model_keys": list(_MODEL_CACHE.keys()),
 126 |         "embedding_count": len(_EMBEDDING_CACHE)
 127 |     }
 128 | 
 129 | 
 130 | class _HashEmbeddingModel:
 131 |     """Deterministic, pure-Python embedding fallback.
 132 | 
 133 |     This is a last-resort option intended for environments where native DLL-backed
 134 |     runtimes (onnxruntime/torch) cannot be imported (e.g., WinError 1114).
 135 |     It enables basic vector storage/search with reduced quality.
 136 |     """
 137 | 
 138 |     def __init__(self, embedding_dimension: int):
 139 |         self.embedding_dimension = int(embedding_dimension)
 140 | 
 141 |     def encode(self, texts: List[str], convert_to_numpy: bool = False):
 142 |         vectors = [self._embed_one(text) for text in texts]
 143 |         if convert_to_numpy:
 144 |             try:
 145 |                 import numpy as np
 146 | 
 147 |                 return np.asarray(vectors, dtype=np.float32)
 148 |             except Exception:
 149 |                 return vectors
 150 |         return vectors
 151 | 
 152 |     def _embed_one(self, text: str) -> List[float]:
 153 |         if not text:
 154 |             return [0.0] * self.embedding_dimension
 155 | 
 156 |         # Expand SHA-256 stream deterministically until we have enough bytes
 157 |         # for `embedding_dimension` float values.
 158 |         floats: List[float] = []
 159 |         counter = 0
 160 |         needed = self.embedding_dimension
 161 |         text_bytes = text.encode("utf-8", errors="ignore")
 162 | 
 163 |         while len(floats) < needed:
 164 |             digest = hashlib.sha256(text_bytes + b"\x1f" + struct.pack("<I", counter)).digest()
 165 |             counter += 1
 166 |             # Use 4 bytes -> signed int32 -> map to [-1, 1]
 167 |             for i in range(0, len(digest) - 3, 4):
 168 |                 (val,) = struct.unpack("<i", digest[i : i + 4])
 169 |                 floats.append(val / 2147483648.0)
 170 |                 if len(floats) >= needed:
 171 |                     break
 172 | 
 173 |         return floats
 174 | 
 175 | 
 176 | def deserialize_embedding(blob: bytes) -> Optional[List[float]]:
 177 |     """
 178 |     Deserialize embedding blob from sqlite-vec format to list of floats.
 179 | 
 180 |     Args:
 181 |         blob: Binary blob containing serialized float32 array
 182 | 
 183 |     Returns:
 184 |         List of floats representing the embedding, or None if deserialization fails
 185 |     """
 186 |     if not blob:
 187 |         return None
 188 | 
 189 |     try:
 190 |         # Import numpy locally to avoid hard dependency
 191 |         import numpy as np
 192 |         # sqlite-vec stores embeddings as raw float32 arrays
 193 |         arr = np.frombuffer(blob, dtype=np.float32)
 194 |         return arr.tolist()
 195 |     except Exception as e:
 196 |         logger.warning(f"Failed to deserialize embedding: {e}")
 197 |         return None
 198 | 
 199 | 
 200 | class SqliteVecMemoryStorage(MemoryStorage):
 201 |     """
 202 |     SQLite-vec based memory storage implementation.
 203 | 
 204 |     This backend provides local vector similarity search using sqlite-vec
 205 |     while maintaining the same interface as other storage backends.
 206 |     """
 207 | 
 208 |     @property
 209 |     def max_content_length(self) -> Optional[int]:
 210 |         """SQLite-vec content length limit from configuration (default: unlimited)."""
 211 |         return SQLITEVEC_MAX_CONTENT_LENGTH
 212 | 
 213 |     @property
 214 |     def supports_chunking(self) -> bool:
 215 |         """SQLite-vec backend supports content chunking with metadata linking."""
 216 |         return True
 217 | 
 218 |     def __init__(self, db_path: str, embedding_model: str = "all-MiniLM-L6-v2"):
 219 |         """
 220 |         Initialize SQLite-vec storage.
 221 | 
 222 |         Args:
 223 |             db_path: Path to SQLite database file
 224 |             embedding_model: Name of sentence transformer model to use
 225 |         """
 226 |         self.db_path = db_path
 227 |         self.embedding_model_name = embedding_model
 228 |         self.conn = None
 229 |         self.embedding_model = None
 230 |         self.embedding_dimension = 384  # Default for all-MiniLM-L6-v2
 231 |         self._initialized = False  # Track initialization state
 232 | 
 233 |         # Performance settings
 234 |         self.enable_cache = True
 235 |         self.batch_size = 32
 236 | 
 237 |         # Ensure directory exists
 238 |         os.makedirs(os.path.dirname(self.db_path) if os.path.dirname(self.db_path) else '.', exist_ok=True)
 239 | 
 240 |         logger.info(f"Initialized SQLite-vec storage at: {self.db_path}")
 241 | 
 242 |     def _safe_json_loads(self, json_str: str, context: str = "") -> dict:
 243 |         """Safely parse JSON with comprehensive error handling and logging."""
 244 |         if not json_str:
 245 |             return {}
 246 |         try:
 247 |             result = json.loads(json_str)
 248 |             if not isinstance(result, dict):
 249 |                 logger.warning(f"Non-dict JSON in {context}: {type(result)}")
 250 |                 return {}
 251 |             return result
 252 |         except json.JSONDecodeError as e:
 253 |             logger.error(f"JSON decode error in {context}: {e}, data: {json_str[:100]}...")
 254 |             return {}
 255 |         except TypeError as e:
 256 |             logger.error(f"JSON type error in {context}: {e}")
 257 |             return {}
 258 | 
 259 |     async def _execute_with_retry(self, operation: Callable, max_retries: int = 3, initial_delay: float = 0.1):
 260 |         """
 261 |         Execute a database operation with exponential backoff retry logic.
 262 |         
 263 |         Args:
 264 |             operation: The database operation to execute
 265 |             max_retries: Maximum number of retry attempts
 266 |             initial_delay: Initial delay in seconds before first retry
 267 |             
 268 |         Returns:
 269 |             The result of the operation
 270 |             
 271 |         Raises:
 272 |             The last exception if all retries fail
 273 |         """
 274 |         last_exception = None
 275 |         delay = initial_delay
 276 |         
 277 |         for attempt in range(max_retries + 1):
 278 |             try:
 279 |                 return operation()
 280 |             except sqlite3.OperationalError as e:
 281 |                 last_exception = e
 282 |                 error_msg = str(e).lower()
 283 |                 
 284 |                 # Check if error is related to database locking
 285 |                 if "locked" in error_msg or "busy" in error_msg:
 286 |                     if attempt < max_retries:
 287 |                         # Add jitter to prevent thundering herd
 288 |                         jittered_delay = delay * (1 + random.uniform(-0.1, 0.1))
 289 |                         logger.warning(f"Database locked, retrying in {jittered_delay:.2f}s (attempt {attempt + 1}/{max_retries})")
 290 |                         await asyncio.sleep(jittered_delay)
 291 |                         # Exponential backoff
 292 |                         delay *= 2
 293 |                         continue
 294 |                     else:
 295 |                         logger.error(f"Database locked after {max_retries} retries")
 296 |                 else:
 297 |                     # Non-retryable error
 298 |                     raise
 299 |             except Exception as e:
 300 |                 # Non-SQLite errors are not retried
 301 |                 raise
 302 |         
 303 |         # If we get here, all retries failed
 304 |         raise last_exception
 305 | 
 306 |     async def _persist_access_metadata(self, memory: Memory):
 307 |         """
 308 |         Persist access tracking metadata (access_count, last_accessed_at) to storage.
 309 | 
 310 |         Args:
 311 |             memory: Memory object with updated access metadata
 312 |         """
 313 |         def update_metadata():
 314 |             self.conn.execute('''
 315 |                 UPDATE memories
 316 |                 SET metadata = ?
 317 |                 WHERE content_hash = ?
 318 |             ''', (json.dumps(memory.metadata), memory.content_hash))
 319 |             self.conn.commit()
 320 | 
 321 |         await self._execute_with_retry(update_metadata)
 322 | 
 323 |     def _check_extension_support(self):
 324 |         """Check if Python's sqlite3 supports loading extensions."""
 325 |         test_conn = None
 326 |         try:
 327 |             test_conn = sqlite3.connect(":memory:")
 328 |             if not hasattr(test_conn, 'enable_load_extension'):
 329 |                 return False, "Python sqlite3 module not compiled with extension support"
 330 |             
 331 |             # Test if we can actually enable extension loading
 332 |             test_conn.enable_load_extension(True)
 333 |             test_conn.enable_load_extension(False)
 334 |             return True, "Extension loading supported"
 335 |             
 336 |         except AttributeError as e:
 337 |             return False, f"enable_load_extension not available: {e}"
 338 |         except sqlite3.OperationalError as e:
 339 |             return False, f"Extension loading disabled: {e}"
 340 |         except Exception as e:
 341 |             return False, f"Extension support check failed: {e}"
 342 |         finally:
 343 |             if test_conn:
 344 |                 test_conn.close()
 345 | 
 346 |     def _check_dependencies(self):
 347 |         """Check and validate all required dependencies for initialization."""
 348 |         if not SQLITE_VEC_AVAILABLE:
 349 |             raise ImportError("sqlite-vec is not available. Install with: pip install sqlite-vec")
 350 | 
 351 |         # Embeddings backend is selected/initialized later.
 352 |         # On some Windows setups, importing onnxruntime/torch can fail with DLL init errors
 353 |         # (e.g. WinError 1114). We support a pure-Python fallback to keep the service usable.
 354 | 
 355 |     def _handle_extension_loading_failure(self):
 356 |         """Provide detailed error guidance when extension loading is not supported."""
 357 |         error_msg = "SQLite extension loading not supported"
 358 |         logger.error(error_msg)
 359 |         
 360 |         platform_info = f"{platform.system()} {platform.release()}"
 361 |         solutions = []
 362 |         
 363 |         if platform.system().lower() == "darwin":  # macOS
 364 |             solutions.extend([
 365 |                 "Install Python via Homebrew: brew install python",
 366 |                 "Use pyenv with extension support: PYTHON_CONFIGURE_OPTS='--enable-loadable-sqlite-extensions' pyenv install 3.12.0",
 367 |                 "Consider using Cloudflare backend: export MCP_MEMORY_STORAGE_BACKEND=cloudflare"
 368 |             ])
 369 |         elif platform.system().lower() == "linux":
 370 |             solutions.extend([
 371 |                 "Install Python with extension support: apt install python3-dev sqlite3",
 372 |                 "Rebuild Python with: ./configure --enable-loadable-sqlite-extensions",
 373 |                 "Consider using Cloudflare backend: export MCP_MEMORY_STORAGE_BACKEND=cloudflare"
 374 |             ])
 375 |         else:  # Windows and others
 376 |             solutions.extend([
 377 |                 "Use official Python installer from python.org",
 378 |                 "Install Python with conda: conda install python",
 379 |                 "Consider using Cloudflare backend: export MCP_MEMORY_STORAGE_BACKEND=cloudflare"
 380 |             ])
 381 |         
 382 |         detailed_error = f"""
 383 | {error_msg}
 384 | 
 385 | Platform: {platform_info}
 386 | Python Version: {sys.version_info.major}.{sys.version_info.minor}.{sys.version_info.micro}
 387 | 
 388 | SOLUTIONS:
 389 | {chr(10).join(f"  • {solution}" for solution in solutions)}
 390 | 
 391 | The sqlite-vec backend requires Python compiled with --enable-loadable-sqlite-extensions.
 392 | Consider using the Cloudflare backend as an alternative: it provides cloud-based vector
 393 | search without requiring local SQLite extensions.
 394 | 
 395 | To switch backends permanently, set: MCP_MEMORY_STORAGE_BACKEND=cloudflare
 396 | """
 397 |         raise RuntimeError(detailed_error.strip())
 398 | 
 399 |     def _get_connection_timeout(self) -> float:
 400 |         """Calculate database connection timeout from environment or use default."""
 401 |         timeout_seconds = 15.0  # Default: 15 seconds
 402 |         custom_pragmas_env = os.environ.get("MCP_MEMORY_SQLITE_PRAGMAS", "")
 403 |         
 404 |         if "busy_timeout" not in custom_pragmas_env:
 405 |             return timeout_seconds
 406 |         
 407 |         # Parse busy_timeout value (in milliseconds, convert to seconds)
 408 |         for pragma_pair in custom_pragmas_env.split(","):
 409 |             if "busy_timeout" in pragma_pair and "=" in pragma_pair:
 410 |                 try:
 411 |                     timeout_ms = int(pragma_pair.split("=")[1].strip())
 412 |                     timeout_seconds = timeout_ms / 1000.0
 413 |                     logger.info(f"Using custom timeout: {timeout_seconds}s from MCP_MEMORY_SQLITE_PRAGMAS")
 414 |                     return timeout_seconds
 415 |                 except (ValueError, IndexError) as e:
 416 |                     logger.warning(f"Failed to parse busy_timeout from env: {e}, using default {timeout_seconds}s")
 417 |                     return timeout_seconds
 418 |         
 419 |         return timeout_seconds
 420 | 
 421 |     def _load_sqlite_vec_extension(self):
 422 |         """Load the sqlite-vec extension with proper error handling."""
 423 |         try:
 424 |             self.conn.enable_load_extension(True)
 425 |             sqlite_vec.load(self.conn)
 426 |             self.conn.enable_load_extension(False)
 427 |             logger.info("sqlite-vec extension loaded successfully")
 428 |         except Exception as e:
 429 |             error_msg = f"Failed to load sqlite-vec extension: {e}"
 430 |             logger.error(error_msg)
 431 |             if self.conn:
 432 |                 self.conn.close()
 433 |                 self.conn = None
 434 |             
 435 |             # Provide specific guidance based on the error
 436 |             if "enable_load_extension" in str(e):
 437 |                 detailed_error = f"""
 438 | {error_msg}
 439 | 
 440 | This error occurs when Python's sqlite3 module is not compiled with extension support.
 441 | This is common on macOS with the system Python installation.
 442 | 
 443 | RECOMMENDED SOLUTIONS:
 444 |   • Use Homebrew Python: brew install python && rehash
 445 |   • Use pyenv with extensions: PYTHON_CONFIGURE_OPTS='--enable-loadable-sqlite-extensions' pyenv install 3.12.0
 446 |   • Switch to Cloudflare backend: export MCP_MEMORY_STORAGE_BACKEND=cloudflare
 447 | 
 448 | The Cloudflare backend provides cloud-based vector search without requiring local SQLite extensions.
 449 | """
 450 |             else:
 451 |                 detailed_error = f"""
 452 | {error_msg}
 453 | 
 454 | Failed to load the sqlite-vec extension. This could be due to:
 455 |   • Incompatible sqlite-vec version
 456 |   • Missing system dependencies
 457 |   • SQLite version incompatibility
 458 | 
 459 | SOLUTIONS:
 460 |   • Reinstall sqlite-vec: pip install --force-reinstall sqlite-vec
 461 |   • Switch to Cloudflare backend: export MCP_MEMORY_STORAGE_BACKEND=cloudflare
 462 |   • Check SQLite version: python -c "import sqlite3; print(sqlite3.sqlite_version)"
 463 | """
 464 |             raise RuntimeError(detailed_error.strip())
 465 | 
 466 |     def _connect_and_load_extension(self):
 467 |         """Connect to database and load the sqlite-vec extension."""
 468 |         # Calculate timeout and connect
 469 |         timeout_seconds = self._get_connection_timeout()
 470 |         self.conn = sqlite3.connect(self.db_path, timeout=timeout_seconds, check_same_thread=False)
 471 | 
 472 |         # Load extension
 473 |         self._load_sqlite_vec_extension()
 474 | 
 475 |         # Apply pragmas for concurrent access (must be done per-connection)
 476 |         default_pragmas = {
 477 |             "journal_mode": "WAL",
 478 |             "busy_timeout": "5000",
 479 |             "synchronous": "NORMAL",
 480 |             "cache_size": "10000",
 481 |             "temp_store": "MEMORY"
 482 |         }
 483 | 
 484 |         # Override with custom pragmas from environment
 485 |         custom_pragmas = os.environ.get("MCP_MEMORY_SQLITE_PRAGMAS", "")
 486 |         if custom_pragmas:
 487 |             for pragma_pair in custom_pragmas.split(","):
 488 |                 pragma_pair = pragma_pair.strip()
 489 |                 if "=" in pragma_pair:
 490 |                     pragma_name, pragma_value = pragma_pair.split("=", 1)
 491 |                     default_pragmas[pragma_name.strip()] = pragma_value.strip()
 492 |                     logger.debug(f"Custom pragma: {pragma_name}={pragma_value}")
 493 | 
 494 |         # Apply all pragmas
 495 |         for pragma_name, pragma_value in default_pragmas.items():
 496 |             try:
 497 |                 self.conn.execute(f"PRAGMA {pragma_name}={pragma_value}")
 498 |                 logger.debug(f"Applied pragma: {pragma_name}={pragma_value}")
 499 |             except sqlite3.Error as e:
 500 |                 logger.warning(f"Failed to apply pragma {pragma_name}: {e}")
 501 | 
 502 |     async def initialize(self):
 503 |         """Initialize the SQLite database with vec0 extension."""
 504 |         # Return early if already initialized to prevent multiple initialization attempts
 505 |         if self._initialized:
 506 |             return
 507 | 
 508 |         try:
 509 |             self._check_dependencies()
 510 |             
 511 |             # Check if extension loading is supported
 512 |             extension_supported, support_message = self._check_extension_support()
 513 |             if not extension_supported:
 514 |                 self._handle_extension_loading_failure()
 515 | 
 516 |             # Connect to database and load extension
 517 |             self._connect_and_load_extension()
 518 | 
 519 |             # Check if database is already initialized by another process
 520 |             # This prevents DDL lock conflicts when multiple servers start concurrently
 521 |             try:
 522 |                 cursor = self.conn.execute("SELECT name FROM sqlite_master WHERE type='table' AND name='memories'")
 523 |                 memories_table_exists = cursor.fetchone() is not None
 524 | 
 525 |                 cursor = self.conn.execute("SELECT name FROM sqlite_master WHERE type='table' AND name='memory_embeddings'")
 526 |                 embeddings_table_exists = cursor.fetchone() is not None
 527 | 
 528 |                 if memories_table_exists and embeddings_table_exists:
 529 |                     # Database exists - run migrations for new columns, then skip full DDL
 530 |                     logger.info("Database already initialized, checking for schema migrations...")
 531 | 
 532 |                     # Migration v8.64.0: Add deleted_at column for soft-delete support
 533 |                     try:
 534 |                         cursor = self.conn.execute("PRAGMA table_info(memories)")
 535 |                         columns = [row[1] for row in cursor.fetchall()]
 536 |                         if 'deleted_at' not in columns:
 537 |                             logger.info("Migrating database: Adding deleted_at column for soft-delete support...")
 538 |                             self.conn.execute('ALTER TABLE memories ADD COLUMN deleted_at REAL DEFAULT NULL')
 539 |                             self.conn.execute('CREATE INDEX IF NOT EXISTS idx_deleted_at ON memories(deleted_at)')
 540 |                             self.conn.commit()
 541 |                             logger.info("Migration complete: deleted_at column added")
 542 |                         else:
 543 |                             logger.debug("Migration check: deleted_at column already exists")
 544 |                     except Exception as e:
 545 |                         logger.warning(f"Migration check for deleted_at (non-fatal): {e}")
 546 | 
 547 |                     await self._initialize_embedding_model()
 548 |                     self._initialized = True
 549 |                     logger.info(f"SQLite-vec storage initialized successfully (existing database) with embedding dimension: {self.embedding_dimension}")
 550 |                     return
 551 |             except sqlite3.Error as e:
 552 |                 # If we can't check tables (e.g., database locked), proceed with normal initialization
 553 |                 logger.debug(f"Could not check existing tables (will attempt full initialization): {e}")
 554 | 
 555 |             # Apply default pragmas for concurrent access
 556 |             default_pragmas = {
 557 |                 "journal_mode": "WAL",  # Enable WAL mode for concurrent access
 558 |                 "busy_timeout": "5000",  # 5 second timeout for locked database
 559 |                 "synchronous": "NORMAL",  # Balanced performance/safety
 560 |                 "cache_size": "10000",  # Increase cache size
 561 |                 "temp_store": "MEMORY"  # Use memory for temp tables
 562 |             }
 563 |             
 564 |             # Check for custom pragmas from environment variable
 565 |             custom_pragmas = os.environ.get("MCP_MEMORY_SQLITE_PRAGMAS", "")
 566 |             if custom_pragmas:
 567 |                 # Parse custom pragmas (format: "pragma1=value1,pragma2=value2")
 568 |                 for pragma_pair in custom_pragmas.split(","):
 569 |                     pragma_pair = pragma_pair.strip()
 570 |                     if "=" in pragma_pair:
 571 |                         pragma_name, pragma_value = pragma_pair.split("=", 1)
 572 |                         default_pragmas[pragma_name.strip()] = pragma_value.strip()
 573 |                         logger.info(f"Custom pragma from env: {pragma_name}={pragma_value}")
 574 |             
 575 |             # Apply all pragmas
 576 |             applied_pragmas = []
 577 |             for pragma_name, pragma_value in default_pragmas.items():
 578 |                 try:
 579 |                     self.conn.execute(f"PRAGMA {pragma_name}={pragma_value}")
 580 |                     applied_pragmas.append(f"{pragma_name}={pragma_value}")
 581 |                 except sqlite3.Error as e:
 582 |                     logger.warning(f"Failed to set pragma {pragma_name}={pragma_value}: {e}")
 583 |             
 584 |             logger.info(f"SQLite pragmas applied: {', '.join(applied_pragmas)}")
 585 |             
 586 |             # Create metadata table for storage configuration
 587 |             self.conn.execute('''
 588 |                 CREATE TABLE IF NOT EXISTS metadata (
 589 |                     key TEXT PRIMARY KEY,
 590 |                     value TEXT NOT NULL
 591 |                 )
 592 |             ''')
 593 | 
 594 |             # Create regular table for memory data
 595 |             self.conn.execute('''
 596 |                 CREATE TABLE IF NOT EXISTS memories (
 597 |                     id INTEGER PRIMARY KEY AUTOINCREMENT,
 598 |                     content_hash TEXT UNIQUE NOT NULL,
 599 |                     content TEXT NOT NULL,
 600 |                     tags TEXT,
 601 |                     memory_type TEXT,
 602 |                     metadata TEXT,
 603 |                     created_at REAL,
 604 |                     updated_at REAL,
 605 |                     created_at_iso TEXT,
 606 |                     updated_at_iso TEXT,
 607 |                     deleted_at REAL DEFAULT NULL
 608 |                 )
 609 |             ''')
 610 | 
 611 |             # Migration: Add deleted_at column if table exists but column doesn't (v8.64.0)
 612 |             try:
 613 |                 cursor = self.conn.execute("PRAGMA table_info(memories)")
 614 |                 columns = [row[1] for row in cursor.fetchall()]
 615 |                 if 'deleted_at' not in columns:
 616 |                     logger.info("Migrating database: Adding deleted_at column for soft-delete support...")
 617 |                     self.conn.execute('ALTER TABLE memories ADD COLUMN deleted_at REAL DEFAULT NULL')
 618 |                     self.conn.commit()
 619 |                     logger.info("Migration complete: deleted_at column added")
 620 |             except Exception as e:
 621 |                 logger.warning(f"Migration check for deleted_at (non-fatal): {e}")
 622 |             
 623 |             # Initialize embedding model BEFORE creating vector table
 624 |             await self._initialize_embedding_model()
 625 | 
 626 |             # Check if we need to migrate from L2 to cosine distance
 627 |             # This is a one-time migration - embeddings will be regenerated automatically
 628 |             try:
 629 |                 # First check if metadata table exists
 630 |                 cursor = self.conn.execute("SELECT name FROM sqlite_master WHERE type='table' AND name='metadata'")
 631 |                 metadata_exists = cursor.fetchone() is not None
 632 | 
 633 |                 if metadata_exists:
 634 |                     cursor = self.conn.execute("SELECT value FROM metadata WHERE key='distance_metric'")
 635 |                     current_metric = cursor.fetchone()
 636 | 
 637 |                     if not current_metric or current_metric[0] != 'cosine':
 638 |                         logger.info("Migrating embeddings table from L2 to cosine distance...")
 639 |                         logger.info("This is a one-time operation - embeddings will be regenerated automatically")
 640 | 
 641 |                         # Use a timeout and retry logic for DROP TABLE to handle concurrent access
 642 |                         max_retries = 3
 643 |                         retry_delay = 1.0  # seconds
 644 | 
 645 |                         for attempt in range(max_retries):
 646 |                             try:
 647 |                                 # Drop old embeddings table (memories table is preserved)
 648 |                                 # This may fail if another process has the database locked
 649 |                                 self.conn.execute("DROP TABLE IF EXISTS memory_embeddings")
 650 |                                 logger.info("Successfully dropped old embeddings table")
 651 |                                 break
 652 |                             except sqlite3.OperationalError as drop_error:
 653 |                                 if "database is locked" in str(drop_error):
 654 |                                     if attempt < max_retries - 1:
 655 |                                         logger.warning(f"Database locked during migration (attempt {attempt + 1}/{max_retries}), retrying in {retry_delay}s...")
 656 |                                         await asyncio.sleep(retry_delay)
 657 |                                         retry_delay *= 2  # Exponential backoff
 658 |                                     else:
 659 |                                         # Last attempt failed - check if table exists
 660 |                                         # If it doesn't exist, migration was done by another process
 661 |                                         cursor = self.conn.execute("SELECT name FROM sqlite_master WHERE type='table' AND name='memory_embeddings'")
 662 |                                         if not cursor.fetchone():
 663 |                                             logger.info("Embeddings table doesn't exist - migration likely completed by another process")
 664 |                                             break
 665 |                                         else:
 666 |                                             logger.error("Failed to drop embeddings table after retries - will attempt to continue")
 667 |                                             # Don't fail initialization, just log the issue
 668 |                                             break
 669 |                                 else:
 670 |                                     raise
 671 |                 else:
 672 |                     # No metadata table means fresh install, no migration needed
 673 |                     logger.debug("Fresh database detected, no migration needed")
 674 |             except Exception as e:
 675 |                 # If anything goes wrong, log but don't fail initialization
 676 |                 logger.warning(f"Migration check warning (non-fatal): {e}")
 677 | 
 678 |             # Now create virtual table with correct dimensions using cosine distance
 679 |             # Cosine similarity is better for text embeddings than L2 distance
 680 |             self.conn.execute(f'''
 681 |                 CREATE VIRTUAL TABLE IF NOT EXISTS memory_embeddings USING vec0(
 682 |                     content_embedding FLOAT[{self.embedding_dimension}] distance_metric=cosine
 683 |                 )
 684 |             ''')
 685 | 
 686 |             # Store metric in metadata for future migrations
 687 |             self.conn.execute("""
 688 |                 INSERT OR REPLACE INTO metadata (key, value) VALUES ('distance_metric', 'cosine')
 689 |             """)
 690 |             
 691 |             # Create indexes for better performance
 692 |             self.conn.execute('CREATE INDEX IF NOT EXISTS idx_content_hash ON memories(content_hash)')
 693 |             self.conn.execute('CREATE INDEX IF NOT EXISTS idx_created_at ON memories(created_at)')
 694 |             self.conn.execute('CREATE INDEX IF NOT EXISTS idx_memory_type ON memories(memory_type)')
 695 |             self.conn.execute('CREATE INDEX IF NOT EXISTS idx_deleted_at ON memories(deleted_at)')
 696 | 
 697 |             # Mark as initialized to prevent re-initialization
 698 |             self._initialized = True
 699 | 
 700 |             logger.info(f"SQLite-vec storage initialized successfully with embedding dimension: {self.embedding_dimension}")
 701 | 
 702 |         except Exception as e:
 703 |             error_msg = f"Failed to initialize SQLite-vec storage: {str(e)}"
 704 |             logger.error(error_msg)
 705 |             logger.error(traceback.format_exc())
 706 |             raise RuntimeError(error_msg)
 707 |     
 708 |     def _is_docker_environment(self) -> bool:
 709 |         """Detect if running inside a Docker container."""
 710 |         # Check for Docker-specific files/environment
 711 |         if os.path.exists('/.dockerenv'):
 712 |             return True
 713 |         if os.environ.get('DOCKER_CONTAINER'):
 714 |             return True
 715 |         # Check if running in common container environments
 716 |         if any(os.environ.get(var) for var in ['KUBERNETES_SERVICE_HOST', 'MESOS_SANDBOX']):
 717 |             return True
 718 |         # Check cgroup for docker/containerd/podman
 719 |         try:
 720 |             with open('/proc/self/cgroup', 'r') as f:
 721 |                 return any('docker' in line or 'containerd' in line for line in f)
 722 |         except (IOError, FileNotFoundError):
 723 |             pass
 724 |         return False
 725 | 
 726 |     async def _initialize_embedding_model(self):
 727 |         """Initialize the embedding model (ONNX or SentenceTransformer based on configuration)."""
 728 |         global _MODEL_CACHE
 729 | 
 730 |         # Detect if we're in Docker
 731 |         is_docker = self._is_docker_environment()
 732 |         if is_docker:
 733 |             logger.info("🐳 Docker environment detected - adjusting model loading strategy")
 734 | 
 735 |         try:
 736 |             # Check if we should use ONNX
 737 |             use_onnx = os.environ.get('MCP_MEMORY_USE_ONNX', '').lower() in ('1', 'true', 'yes')
 738 | 
 739 |             if use_onnx:
 740 |                 # Try to use ONNX embeddings
 741 |                 logger.info("Attempting to use ONNX embeddings (PyTorch-free)")
 742 |                 try:
 743 |                     from ..embeddings import get_onnx_embedding_model
 744 | 
 745 |                     # Check cache first
 746 |                     cache_key = f"onnx_{self.embedding_model_name}"
 747 |                     if cache_key in _MODEL_CACHE:
 748 |                         self.embedding_model = _MODEL_CACHE[cache_key]
 749 |                         logger.info(f"Using cached ONNX embedding model: {self.embedding_model_name}")
 750 |                         return
 751 | 
 752 |                     # Create ONNX model
 753 |                     onnx_model = get_onnx_embedding_model(self.embedding_model_name)
 754 |                     if onnx_model:
 755 |                         self.embedding_model = onnx_model
 756 |                         self.embedding_dimension = onnx_model.embedding_dimension
 757 |                         _MODEL_CACHE[cache_key] = onnx_model
 758 |                         logger.info(f"ONNX embedding model loaded successfully. Dimension: {self.embedding_dimension}")
 759 |                         return
 760 |                     else:
 761 |                         logger.warning("ONNX model creation failed, falling back to SentenceTransformer")
 762 |                 except ImportError as e:
 763 |                     logger.warning(f"ONNX dependencies not available: {e}")
 764 |                 except Exception as e:
 765 |                     logger.warning(f"Failed to initialize ONNX embeddings: {e}")
 766 | 
 767 |             # Fall back to SentenceTransformer
 768 |             if not SENTENCE_TRANSFORMERS_AVAILABLE:
 769 |                 logger.warning(
 770 |                     "Neither ONNX nor sentence-transformers available; using pure-Python hash embeddings (quality reduced)."
 771 |                 )
 772 |                 self.embedding_model = _HashEmbeddingModel(self.embedding_dimension)
 773 |                 return
 774 | 
 775 |             # Check cache first
 776 |             cache_key = self.embedding_model_name
 777 |             if cache_key in _MODEL_CACHE:
 778 |                 self.embedding_model = _MODEL_CACHE[cache_key]
 779 |                 logger.info(f"Using cached embedding model: {self.embedding_model_name}")
 780 |                 return
 781 | 
 782 |             # Get system info for optimal settings
 783 |             system_info = get_system_info()
 784 |             device = get_torch_device()
 785 | 
 786 |             logger.info(f"Loading embedding model: {self.embedding_model_name}")
 787 |             logger.info(f"Using device: {device}")
 788 | 
 789 |             # Configure for offline mode if models are cached
 790 |             # Only set offline mode if we detect cached models to prevent initial downloads
 791 |             hf_home = os.environ.get('HF_HOME', os.path.expanduser("~/.cache/huggingface"))
 792 |             model_cache_path = os.path.join(hf_home, "hub", f"models--sentence-transformers--{self.embedding_model_name.replace('/', '--')}")
 793 |             if os.path.exists(model_cache_path):
 794 |                 os.environ['HF_HUB_OFFLINE'] = '1'
 795 |                 os.environ['TRANSFORMERS_OFFLINE'] = '1'
 796 |                 logger.info("📦 Found cached model - enabling offline mode")
 797 | 
 798 |             # Try to load from cache first, fallback to direct model name
 799 |             try:
 800 |                 # First try loading from Hugging Face cache
 801 |                 hf_home = os.environ.get('HF_HOME', os.path.expanduser("~/.cache/huggingface"))
 802 |                 cache_path = os.path.join(hf_home, "hub", f"models--sentence-transformers--{self.embedding_model_name.replace('/', '--')}")
 803 |                 if os.path.exists(cache_path):
 804 |                     # Find the snapshot directory
 805 |                     snapshots_path = os.path.join(cache_path, "snapshots")
 806 |                     if os.path.exists(snapshots_path):
 807 |                         snapshot_dirs = [d for d in os.listdir(snapshots_path) if os.path.isdir(os.path.join(snapshots_path, d))]
 808 |                         if snapshot_dirs:
 809 |                             model_path = os.path.join(snapshots_path, snapshot_dirs[0])
 810 |                             logger.info(f"Loading model from cache: {model_path}")
 811 |                             self.embedding_model = SentenceTransformer(model_path, device=device)
 812 |                         else:
 813 |                             raise FileNotFoundError("No snapshot found")
 814 |                     else:
 815 |                         raise FileNotFoundError("No snapshots directory")
 816 |                 else:
 817 |                     raise FileNotFoundError("No cache found")
 818 |             except FileNotFoundError as cache_error:
 819 |                 logger.warning(f"Model not in cache: {cache_error}")
 820 |                 # Try to download the model (may fail in Docker without network)
 821 |                 try:
 822 |                     logger.info("Attempting to download model from Hugging Face...")
 823 |                     self.embedding_model = SentenceTransformer(self.embedding_model_name, device=device)
 824 |                 except OSError as download_error:
 825 |                     # Check if this is a network connectivity issue
 826 |                     error_msg = str(download_error)
 827 |                     if any(phrase in error_msg.lower() for phrase in ['connection', 'network', 'couldn\'t connect', 'huggingface.co']):
 828 |                         # Provide Docker-specific help
 829 |                         docker_help = self._get_docker_network_help() if is_docker else ""
 830 |                         raise RuntimeError(
 831 |                             f"🔌 Model Download Error: Cannot connect to huggingface.co\n"
 832 |                             f"{'='*60}\n"
 833 |                             f"The model '{self.embedding_model_name}' needs to be downloaded but the connection failed.\n"
 834 |                             f"{docker_help}"
 835 |                             f"\n💡 Solutions:\n"
 836 |                             f"1. Mount pre-downloaded models as a volume:\n"
 837 |                             f"   # On host machine, download the model first:\n"
 838 |                             f"   python -c \"from sentence_transformers import SentenceTransformer; SentenceTransformer('{self.embedding_model_name}')\"\n"
 839 |                             f"   \n"
 840 |                             f"   # Then run container with cache mount:\n"
 841 |                             f"   docker run -v ~/.cache/huggingface:/root/.cache/huggingface ...\n"
 842 |                             f"\n"
 843 |                             f"2. Configure Docker network (if behind proxy):\n"
 844 |                             f"   docker run -e HTTPS_PROXY=your-proxy -e HTTP_PROXY=your-proxy ...\n"
 845 |                             f"\n"
 846 |                             f"3. Use offline mode with pre-cached models:\n"
 847 |                             f"   docker run -e HF_HUB_OFFLINE=1 -e TRANSFORMERS_OFFLINE=1 ...\n"
 848 |                             f"\n"
 849 |                             f"4. Use host network mode (if appropriate for your setup):\n"
 850 |                             f"   docker run --network host ...\n"
 851 |                             f"\n"
 852 |                             f"📚 See docs: https://github.com/doobidoo/mcp-memory-service/blob/main/docs/deployment/docker.md#model-download-issues\n"
 853 |                             f"{'='*60}"
 854 |                         ) from download_error
 855 |                     else:
 856 |                         # Re-raise if not a network issue
 857 |                         raise
 858 |             except Exception as cache_error:
 859 |                 logger.warning(f"Failed to load from cache: {cache_error}")
 860 |                 # Fallback to normal loading (may fail if offline)
 861 |                 logger.info("Attempting normal model loading...")
 862 |                 self.embedding_model = SentenceTransformer(self.embedding_model_name, device=device)
 863 | 
 864 |             # Update embedding dimension based on actual model
 865 |             test_embedding = self.embedding_model.encode(["test"], convert_to_numpy=True)
 866 |             self.embedding_dimension = test_embedding.shape[1]
 867 | 
 868 |             # Cache the model
 869 |             _MODEL_CACHE[cache_key] = self.embedding_model
 870 | 
 871 |             logger.info(f"✅ Embedding model loaded successfully. Dimension: {self.embedding_dimension}")
 872 | 
 873 |         except RuntimeError:
 874 |             # Re-raise our custom errors with helpful messages
 875 |             raise
 876 |         except Exception as e:
 877 |             logger.error(f"Failed to initialize embedding model: {str(e)}")
 878 |             logger.error(traceback.format_exc())
 879 |             logger.warning(
 880 |                 "Falling back to pure-Python hash embeddings due to embedding init failure (quality reduced)."
 881 |             )
 882 |             self.embedding_model = _HashEmbeddingModel(self.embedding_dimension)
 883 | 
 884 |     def _get_docker_network_help(self) -> str:
 885 |         """Get Docker-specific network troubleshooting help."""
 886 |         # Try to detect the Docker platform
 887 |         docker_platform = "Docker"
 888 |         if os.environ.get('DOCKER_DESKTOP_VERSION'):
 889 |             docker_platform = "Docker Desktop"
 890 |         elif os.path.exists('/proc/version'):
 891 |             try:
 892 |                 with open('/proc/version', 'r') as f:
 893 |                     version = f.read().lower()
 894 |                     if 'microsoft' in version:
 895 |                         docker_platform = "Docker Desktop for Windows"
 896 |             except (IOError, FileNotFoundError):
 897 |                 pass
 898 | 
 899 |         return (
 900 |             f"\n🐳 Docker Environment Detected ({docker_platform})\n"
 901 |             f"This appears to be a network connectivity issue common in Docker containers.\n"
 902 |         )
 903 |     
 904 |     def _generate_embedding(self, text: str) -> List[float]:
 905 |         """Generate embedding for text."""
 906 |         if not self.embedding_model:
 907 |             raise RuntimeError("No embedding model available. Ensure sentence-transformers is installed and model is loaded.")
 908 |         
 909 |         try:
 910 |             # Check cache first
 911 |             if self.enable_cache:
 912 |                 cache_key = hash(text)
 913 |                 if cache_key in _EMBEDDING_CACHE:
 914 |                     return _EMBEDDING_CACHE[cache_key]
 915 |             
 916 |             # Generate embedding
 917 |             embedding = self.embedding_model.encode([text], convert_to_numpy=True)[0]
 918 |             if hasattr(embedding, "tolist"):
 919 |                 embedding_list = embedding.tolist()
 920 |             else:
 921 |                 embedding_list = list(embedding)
 922 |             
 923 |             # Validate embedding
 924 |             if not embedding_list:
 925 |                 raise ValueError("Generated embedding is empty")
 926 |             
 927 |             if len(embedding_list) != self.embedding_dimension:
 928 |                 raise ValueError(f"Embedding dimension mismatch: expected {self.embedding_dimension}, got {len(embedding_list)}")
 929 |             
 930 |             # Validate values are finite
 931 |             if not all(isinstance(x, (int, float)) and not (x != x) and x != float('inf') and x != float('-inf') for x in embedding_list):
 932 |                 raise ValueError("Embedding contains invalid values (NaN or infinity)")
 933 |             
 934 |             # Cache the result
 935 |             if self.enable_cache:
 936 |                 _EMBEDDING_CACHE[cache_key] = embedding_list
 937 |             
 938 |             return embedding_list
 939 |             
 940 |         except Exception as e:
 941 |             logger.error(f"Failed to generate embedding: {str(e)}")
 942 |             raise RuntimeError(f"Failed to generate embedding: {str(e)}") from e
 943 |     
 944 |     async def store(self, memory: Memory) -> Tuple[bool, str]:
 945 |         """Store a memory in the SQLite-vec database."""
 946 |         try:
 947 |             if not self.conn:
 948 |                 return False, "Database not initialized"
 949 |             
 950 |             # Check for duplicates (only active memories, not soft-deleted)
 951 |             cursor = self.conn.execute(
 952 |                 'SELECT content_hash FROM memories WHERE content_hash = ? AND deleted_at IS NULL',
 953 |                 (memory.content_hash,)
 954 |             )
 955 |             if cursor.fetchone():
 956 |                 return False, "Duplicate content detected"
 957 |             
 958 |             # Generate and validate embedding
 959 |             try:
 960 |                 embedding = self._generate_embedding(memory.content)
 961 |             except Exception as e:
 962 |                 logger.error(f"Failed to generate embedding for memory {memory.content_hash}: {str(e)}")
 963 |                 return False, f"Failed to generate embedding: {str(e)}"
 964 |             
 965 |             # Prepare metadata
 966 |             tags_str = ",".join(memory.tags) if memory.tags else ""
 967 |             metadata_str = json.dumps(memory.metadata) if memory.metadata else "{}"
 968 |             
 969 |             # Insert into memories table (metadata) with retry logic
 970 |             def insert_memory():
 971 |                 cursor = self.conn.execute('''
 972 |                     INSERT INTO memories (
 973 |                         content_hash, content, tags, memory_type,
 974 |                         metadata, created_at, updated_at, created_at_iso, updated_at_iso
 975 |                     ) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)
 976 |                 ''', (
 977 |                     memory.content_hash,
 978 |                     memory.content,
 979 |                     tags_str,
 980 |                     memory.memory_type,
 981 |                     metadata_str,
 982 |                     memory.created_at,
 983 |                     memory.updated_at,
 984 |                     memory.created_at_iso,
 985 |                     memory.updated_at_iso
 986 |                 ))
 987 |                 return cursor.lastrowid
 988 |             
 989 |             memory_rowid = await self._execute_with_retry(insert_memory)
 990 |             
 991 |             # Insert into embeddings table with retry logic
 992 |             def insert_embedding():
 993 |                 # Check if we can insert with specific rowid
 994 |                 try:
 995 |                     self.conn.execute('''
 996 |                         INSERT INTO memory_embeddings (rowid, content_embedding)
 997 |                         VALUES (?, ?)
 998 |                     ''', (
 999 |                         memory_rowid,
1000 |                         serialize_float32(embedding)
1001 |                     ))
1002 |                 except sqlite3.Error as e:
1003 |                     # If rowid insert fails, try without specifying rowid
1004 |                     logger.warning(f"Failed to insert with rowid {memory_rowid}: {e}. Trying without rowid.")
1005 |                     self.conn.execute('''
1006 |                         INSERT INTO memory_embeddings (content_embedding)
1007 |                         VALUES (?)
1008 |                     ''', (
1009 |                         serialize_float32(embedding),
1010 |                     ))
1011 |             
1012 |             await self._execute_with_retry(insert_embedding)
1013 |             
1014 |             # Commit with retry logic
1015 |             await self._execute_with_retry(self.conn.commit)
1016 |             
1017 |             logger.info(f"Successfully stored memory: {memory.content_hash}")
1018 |             return True, "Memory stored successfully"
1019 |             
1020 |         except Exception as e:
1021 |             error_msg = f"Failed to store memory: {str(e)}"
1022 |             logger.error(error_msg)
1023 |             logger.error(traceback.format_exc())
1024 |             return False, error_msg
1025 |     
1026 |     async def retrieve(self, query: str, n_results: int = 5) -> List[MemoryQueryResult]:
1027 |         """Retrieve memories using semantic search."""
1028 |         try:
1029 |             if not self.conn:
1030 |                 logger.error("Database not initialized")
1031 |                 return []
1032 |             
1033 |             if not self.embedding_model:
1034 |                 logger.warning("No embedding model available, cannot perform semantic search")
1035 |                 return []
1036 |             
1037 |             # Generate query embedding
1038 |             try:
1039 |                 query_embedding = self._generate_embedding(query)
1040 |             except Exception as e:
1041 |                 logger.error(f"Failed to generate query embedding: {str(e)}")
1042 |                 return []
1043 |             
1044 |             # First, check if embeddings table has data
1045 |             cursor = self.conn.execute('SELECT COUNT(*) FROM memory_embeddings')
1046 |             embedding_count = cursor.fetchone()[0]
1047 |             
1048 |             if embedding_count == 0:
1049 |                 logger.warning("No embeddings found in database. Memories may have been stored without embeddings.")
1050 |                 return []
1051 |             
1052 |             # Perform vector similarity search using JOIN with retry logic
1053 |             def search_memories():
1054 |                 # Try direct rowid join first - use k=? syntax for sqlite-vec
1055 |                 # Note: ORDER BY distance is implicit with k=? and redundant in subquery
1056 |                 cursor = self.conn.execute('''
1057 |                     SELECT m.content_hash, m.content, m.tags, m.memory_type, m.metadata,
1058 |                            m.created_at, m.updated_at, m.created_at_iso, m.updated_at_iso,
1059 |                            e.distance
1060 |                     FROM memories m
1061 |                     INNER JOIN (
1062 |                         SELECT rowid, distance
1063 |                         FROM memory_embeddings
1064 |                         WHERE content_embedding MATCH ? AND k = ?
1065 |                     ) e ON m.id = e.rowid
1066 |                     WHERE m.deleted_at IS NULL
1067 |                     ORDER BY e.distance
1068 |                 ''', (serialize_float32(query_embedding), n_results))
1069 |                 
1070 |                 # Check if we got results
1071 |                 results = cursor.fetchall()
1072 |                 if not results:
1073 |                     # Log debug info
1074 |                     logger.debug("No results from vector search. Checking database state...")
1075 |                     mem_count = self.conn.execute('SELECT COUNT(*) FROM memories').fetchone()[0]
1076 |                     logger.debug(f"Memories table has {mem_count} rows, embeddings table has {embedding_count} rows")
1077 |                 
1078 |                 return results
1079 |             
1080 |             search_results = await self._execute_with_retry(search_memories)
1081 |             
1082 |             results = []
1083 |             for row in search_results:
1084 |                 try:
1085 |                     # Parse row data
1086 |                     content_hash, content, tags_str, memory_type, metadata_str = row[:5]
1087 |                     created_at, updated_at, created_at_iso, updated_at_iso, distance = row[5:]
1088 |                     
1089 |                     # Parse tags and metadata
1090 |                     tags = [tag.strip() for tag in tags_str.split(",") if tag.strip()] if tags_str else []
1091 |                     metadata = self._safe_json_loads(metadata_str, "memory_metadata")
1092 |                     
1093 |                     # Create Memory object
1094 |                     memory = Memory(
1095 |                         content=content,
1096 |                         content_hash=content_hash,
1097 |                         tags=tags,
1098 |                         memory_type=memory_type,
1099 |                         metadata=metadata,
1100 |                         created_at=created_at,
1101 |                         updated_at=updated_at,
1102 |                         created_at_iso=created_at_iso,
1103 |                         updated_at_iso=updated_at_iso
1104 |                     )
1105 |                     
1106 |                     # Calculate relevance score (lower distance = higher relevance)
1107 |                     # For cosine distance: distance ranges from 0 (identical) to 2 (opposite)
1108 |                     # Convert to similarity score: 1 - (distance/2) gives 0-1 range
1109 |                     relevance_score = max(0.0, 1.0 - (float(distance) / 2.0)) if distance is not None else 0.0
1110 | 
1111 |                     # Record access for quality scoring (implicit signals)
1112 |                     memory.record_access(query)
1113 | 
1114 |                     results.append(MemoryQueryResult(
1115 |                         memory=memory,
1116 |                         relevance_score=relevance_score,
1117 |                         debug_info={"distance": distance, "backend": "sqlite-vec"}
1118 |                     ))
1119 | 
1120 |                 except Exception as parse_error:
1121 |                     logger.warning(f"Failed to parse memory result: {parse_error}")
1122 |                     continue
1123 | 
1124 |             # Persist updated metadata for accessed memories
1125 |             for result in results:
1126 |                 try:
1127 |                     await self._persist_access_metadata(result.memory)
1128 |                 except Exception as e:
1129 |                     logger.warning(f"Failed to persist access metadata: {e}")
1130 | 
1131 |             logger.info(f"Retrieved {len(results)} memories for query: {query}")
1132 |             return results
1133 |             
1134 |         except Exception as e:
1135 |             logger.error(f"Failed to retrieve memories: {str(e)}")
1136 |             logger.error(traceback.format_exc())
1137 |             return []
1138 |     
1139 |     async def search_by_tag(self, tags: List[str], time_start: Optional[float] = None) -> List[Memory]:
1140 |         """Search memories by tags with optional time filtering.
1141 | 
1142 |         Args:
1143 |             tags: List of tags to search for (OR logic)
1144 |             time_start: Optional Unix timestamp (in seconds) to filter memories created after this time
1145 | 
1146 |         Returns:
1147 |             List of Memory objects matching the tag criteria and time filter
1148 |         """
1149 |         try:
1150 |             if not self.conn:
1151 |                 logger.error("Database not initialized")
1152 |                 return []
1153 | 
1154 |             if not tags:
1155 |                 return []
1156 | 
1157 |             # Build query for tag search (OR logic) with EXACT tag matching
1158 |             # Uses GLOB for case-sensitive matching (LIKE is case-insensitive in SQLite)
1159 |             # Pattern: (',' || tags || ',') GLOB '*,tag,*' matches exact tag in comma-separated list
1160 |             # Strip whitespace from tags to match get_all_tags_with_counts behavior
1161 |             stripped_tags = [tag.strip() for tag in tags]
1162 |             tag_conditions = " OR ".join(["(',' || REPLACE(tags, ' ', '') || ',') GLOB ?" for _ in stripped_tags])
1163 |             tag_params = [f"*,{tag},*" for tag in stripped_tags]
1164 | 
1165 |             # Add time filter to WHERE clause if provided
1166 |             # Also exclude soft-deleted memories
1167 |             where_clause = f"WHERE ({tag_conditions}) AND deleted_at IS NULL"
1168 |             if time_start is not None:
1169 |                 where_clause += " AND created_at >= ?"
1170 |                 tag_params.append(time_start)
1171 | 
1172 |             cursor = self.conn.execute(f'''
1173 |                 SELECT content_hash, content, tags, memory_type, metadata,
1174 |                        created_at, updated_at, created_at_iso, updated_at_iso
1175 |                 FROM memories
1176 |                 {where_clause}
1177 |                 ORDER BY created_at DESC
1178 |             ''', tag_params)
1179 |             
1180 |             results = []
1181 |             for row in cursor.fetchall():
1182 |                 try:
1183 |                     content_hash, content, tags_str, memory_type, metadata_str = row[:5]
1184 |                     created_at, updated_at, created_at_iso, updated_at_iso = row[5:]
1185 |                     
1186 |                     # Parse tags and metadata
1187 |                     memory_tags = [tag.strip() for tag in tags_str.split(",") if tag.strip()] if tags_str else []
1188 |                     metadata = self._safe_json_loads(metadata_str, "memory_metadata")
1189 |                     
1190 |                     memory = Memory(
1191 |                         content=content,
1192 |                         content_hash=content_hash,
1193 |                         tags=memory_tags,
1194 |                         memory_type=memory_type,
1195 |                         metadata=metadata,
1196 |                         created_at=created_at,
1197 |                         updated_at=updated_at,
1198 |                         created_at_iso=created_at_iso,
1199 |                         updated_at_iso=updated_at_iso
1200 |                     )
1201 |                     
1202 |                     results.append(memory)
1203 |                     
1204 |                 except Exception as parse_error:
1205 |                     logger.warning(f"Failed to parse memory result: {parse_error}")
1206 |                     continue
1207 |             
1208 |             logger.info(f"Found {len(results)} memories with tags: {tags}")
1209 |             return results
1210 |             
1211 |         except Exception as e:
1212 |             logger.error(f"Failed to search by tags: {str(e)}")
1213 |             logger.error(traceback.format_exc())
1214 |             return []
1215 |     
1216 |     async def search_by_tags(
1217 |         self,
1218 |         tags: List[str],
1219 |         operation: str = "AND",
1220 |         time_start: Optional[float] = None,
1221 |         time_end: Optional[float] = None
1222 |     ) -> List[Memory]:
1223 |         """Search memories by tags with AND/OR operation and optional time filtering."""
1224 |         try:
1225 |             if not self.conn:
1226 |                 logger.error("Database not initialized")
1227 |                 return []
1228 |             
1229 |             if not tags:
1230 |                 return []
1231 |             
1232 |             normalized_operation = operation.strip().upper() if isinstance(operation, str) else "AND"
1233 |             if normalized_operation not in {"AND", "OR"}:
1234 |                 logger.warning("Unsupported tag operation %s; defaulting to AND", operation)
1235 |                 normalized_operation = "AND"
1236 | 
1237 |             # Use GLOB for case-sensitive exact tag matching
1238 |             # Pattern: (',' || tags || ',') GLOB '*,tag,*' matches exact tag in comma-separated list
1239 |             # Strip whitespace from tags to match get_all_tags_with_counts behavior
1240 |             stripped_tags = [tag.strip() for tag in tags]
1241 |             comparator = " AND " if normalized_operation == "AND" else " OR "
1242 |             tag_conditions = comparator.join(["(',' || REPLACE(tags, ' ', '') || ',') GLOB ?" for _ in stripped_tags])
1243 |             tag_params = [f"*,{tag},*" for tag in stripped_tags]
1244 | 
1245 |             where_conditions = [f"({tag_conditions})"] if tag_conditions else []
1246 |             # Always exclude soft-deleted memories
1247 |             where_conditions.append("deleted_at IS NULL")
1248 |             if time_start is not None:
1249 |                 where_conditions.append("created_at >= ?")
1250 |                 tag_params.append(time_start)
1251 |             if time_end is not None:
1252 |                 where_conditions.append("created_at <= ?")
1253 |                 tag_params.append(time_end)
1254 | 
1255 |             where_clause = f"WHERE {' AND '.join(where_conditions)}" if where_conditions else ""
1256 |             
1257 |             cursor = self.conn.execute(f'''
1258 |                 SELECT content_hash, content, tags, memory_type, metadata,
1259 |                        created_at, updated_at, created_at_iso, updated_at_iso
1260 |                 FROM memories 
1261 |                 {where_clause}
1262 |                 ORDER BY updated_at DESC
1263 |             ''', tag_params)
1264 |             
1265 |             results = []
1266 |             for row in cursor.fetchall():
1267 |                 try:
1268 |                     content_hash, content, tags_str, memory_type, metadata_str, created_at, updated_at, created_at_iso, updated_at_iso = row
1269 |                     
1270 |                     # Parse tags and metadata
1271 |                     memory_tags = [tag.strip() for tag in tags_str.split(",") if tag.strip()] if tags_str else []
1272 |                     metadata = self._safe_json_loads(metadata_str, "memory_metadata")
1273 |                     
1274 |                     memory = Memory(
1275 |                         content=content,
1276 |                         content_hash=content_hash,
1277 |                         tags=memory_tags,
1278 |                         memory_type=memory_type,
1279 |                         metadata=metadata,
1280 |                         created_at=created_at,
1281 |                         updated_at=updated_at,
1282 |                         created_at_iso=created_at_iso,
1283 |                         updated_at_iso=updated_at_iso
1284 |                     )
1285 |                     
1286 |                     results.append(memory)
1287 |                     
1288 |                 except Exception as parse_error:
1289 |                     logger.warning(f"Failed to parse memory result: {parse_error}")
1290 |                     continue
1291 |             
1292 |             logger.info(f"Found {len(results)} memories with tags: {tags} (operation: {operation})")
1293 |             return results
1294 | 
1295 |         except Exception as e:
1296 |             logger.error(f"Failed to search by tags with operation {operation}: {str(e)}")
1297 |             logger.error(traceback.format_exc())
1298 |             return []
1299 | 
1300 |     async def search_by_tag_chronological(self, tags: List[str], limit: int = None, offset: int = 0) -> List[Memory]:
1301 |         """
1302 |         Search memories by tags with chronological ordering and database-level pagination.
1303 | 
1304 |         This method addresses Gemini Code Assist's performance concern by pushing
1305 |         ordering and pagination to the database level instead of doing it in Python.
1306 | 
1307 |         Args:
1308 |             tags: List of tags to search for
1309 |             limit: Maximum number of memories to return (None for all)
1310 |             offset: Number of memories to skip (for pagination)
1311 | 
1312 |         Returns:
1313 |             List of Memory objects ordered by created_at DESC
1314 |         """
1315 |         try:
1316 |             if not self.conn:
1317 |                 logger.error("Database not initialized")
1318 |                 return []
1319 | 
1320 |             if not tags:
1321 |                 return []
1322 | 
1323 |             # Build query for tag search (OR logic) with database-level ordering and pagination
1324 |             # Use GLOB for case-sensitive exact tag matching
1325 |             # Strip whitespace from tags to match get_all_tags_with_counts behavior
1326 |             stripped_tags = [tag.strip() for tag in tags]
1327 |             tag_conditions = " OR ".join(["(',' || REPLACE(tags, ' ', '') || ',') GLOB ?" for _ in stripped_tags])
1328 |             tag_params = [f"*,{tag},*" for tag in stripped_tags]
1329 | 
1330 |             # Build pagination clauses
1331 |             limit_clause = f"LIMIT {limit}" if limit is not None else ""
1332 |             offset_clause = f"OFFSET {offset}" if offset > 0 else ""
1333 | 
1334 |             query = f'''
1335 |                 SELECT content_hash, content, tags, memory_type, metadata,
1336 |                        created_at, updated_at, created_at_iso, updated_at_iso
1337 |                 FROM memories
1338 |                 WHERE {tag_conditions}
1339 |                 ORDER BY created_at DESC
1340 |                 {limit_clause} {offset_clause}
1341 |             '''
1342 | 
1343 |             cursor = self.conn.execute(query, tag_params)
1344 |             results = []
1345 | 
1346 |             for row in cursor.fetchall():
1347 |                 try:
1348 |                     content_hash, content, tags_str, memory_type, metadata_str, created_at, updated_at, created_at_iso, updated_at_iso = row
1349 | 
1350 |                     # Parse tags and metadata
1351 |                     memory_tags = [tag.strip() for tag in tags_str.split(",") if tag.strip()] if tags_str else []
1352 |                     metadata = self._safe_json_loads(metadata_str, "memory_metadata")
1353 | 
1354 |                     memory = Memory(
1355 |                         content=content,
1356 |                         content_hash=content_hash,
1357 |                         tags=memory_tags,
1358 |                         memory_type=memory_type,
1359 |                         metadata=metadata,
1360 |                         created_at=created_at,
1361 |                         updated_at=updated_at,
1362 |                         created_at_iso=created_at_iso,
1363 |                         updated_at_iso=updated_at_iso
1364 |                     )
1365 | 
1366 |                     results.append(memory)
1367 | 
1368 |                 except Exception as parse_error:
1369 |                     logger.warning(f"Failed to parse memory result: {parse_error}")
1370 |                     continue
1371 | 
1372 |             logger.info(f"Found {len(results)} memories with tags: {tags} using database-level pagination (limit={limit}, offset={offset})")
1373 |             return results
1374 | 
1375 |         except Exception as e:
1376 |             logger.error(f"Failed to search by tags chronologically: {str(e)}")
1377 |             logger.error(traceback.format_exc())
1378 |             return []
1379 | 
1380 |     async def delete(self, content_hash: str) -> Tuple[bool, str]:
1381 |         """
1382 |         Soft-delete a memory by setting deleted_at timestamp.
1383 | 
1384 |         The memory is marked as deleted but retained for sync conflict resolution.
1385 |         Use purge_deleted() to permanently remove old tombstones.
1386 |         """
1387 |         try:
1388 |             if not self.conn:
1389 |                 return False, "Database not initialized"
1390 | 
1391 |             # Get the id first to delete corresponding embedding
1392 |             cursor = self.conn.execute(
1393 |                 'SELECT id FROM memories WHERE content_hash = ? AND deleted_at IS NULL',
1394 |                 (content_hash,)
1395 |             )
1396 |             row = cursor.fetchone()
1397 | 
1398 |             if row:
1399 |                 memory_id = row[0]
1400 |                 # Delete embedding (won't be needed for search)
1401 |                 self.conn.execute('DELETE FROM memory_embeddings WHERE rowid = ?', (memory_id,))
1402 |                 # Soft-delete: set deleted_at timestamp instead of DELETE
1403 |                 cursor = self.conn.execute(
1404 |                     'UPDATE memories SET deleted_at = ? WHERE content_hash = ? AND deleted_at IS NULL',
1405 |                     (time.time(), content_hash)
1406 |                 )
1407 |                 self.conn.commit()
1408 |             else:
1409 |                 return False, f"Memory with hash {content_hash} not found"
1410 | 
1411 |             if cursor.rowcount > 0:
1412 |                 logger.info(f"Soft-deleted memory: {content_hash}")
1413 |                 return True, f"Successfully deleted memory {content_hash}"
1414 |             else:
1415 |                 return False, f"Memory with hash {content_hash} not found"
1416 | 
1417 |         except Exception as e:
1418 |             error_msg = f"Failed to delete memory: {str(e)}"
1419 |             logger.error(error_msg)
1420 |             return False, error_msg
1421 | 
1422 |     async def is_deleted(self, content_hash: str) -> bool:
1423 |         """
1424 |         Check if a memory has been soft-deleted (tombstone exists).
1425 | 
1426 |         Used by hybrid sync to prevent re-syncing deleted memories from cloud.
1427 |         """
1428 |         try:
1429 |             if not self.conn:
1430 |                 return False
1431 | 
1432 |             cursor = self.conn.execute(
1433 |                 'SELECT deleted_at FROM memories WHERE content_hash = ? AND deleted_at IS NOT NULL',
1434 |                 (content_hash,)
1435 |             )
1436 |             return cursor.fetchone() is not None
1437 | 
1438 |         except Exception as e:
1439 |             logger.error(f"Failed to check if memory is deleted: {str(e)}")
1440 |             return False
1441 | 
1442 |     async def purge_deleted(self, older_than_days: int = 30) -> int:
1443 |         """
1444 |         Permanently delete tombstones older than specified days.
1445 | 
1446 |         This should be called periodically to clean up old soft-deleted records.
1447 |         Default: 30 days retention to allow all devices to sync deletions.
1448 |         """
1449 |         try:
1450 |             if not self.conn:
1451 |                 return 0
1452 | 
1453 |             cutoff = time.time() - (older_than_days * 86400)
1454 |             cursor = self.conn.execute(
1455 |                 'DELETE FROM memories WHERE deleted_at IS NOT NULL AND deleted_at < ?',
1456 |                 (cutoff,)
1457 |             )
1458 |             self.conn.commit()
1459 | 
1460 |             count = cursor.rowcount
1461 |             if count > 0:
1462 |                 logger.info(f"Purged {count} tombstones older than {older_than_days} days")
1463 |             return count
1464 | 
1465 |         except Exception as e:
1466 |             logger.error(f"Failed to purge deleted memories: {str(e)}")
1467 |             return 0
1468 |     
1469 |     async def get_by_hash(self, content_hash: str) -> Optional[Memory]:
1470 |         """Get a memory by its content hash."""
1471 |         try:
1472 |             if not self.conn:
1473 |                 return None
1474 |             
1475 |             cursor = self.conn.execute('''
1476 |                 SELECT content_hash, content, tags, memory_type, metadata,
1477 |                        created_at, updated_at, created_at_iso, updated_at_iso
1478 |                 FROM memories WHERE content_hash = ? AND deleted_at IS NULL
1479 |             ''', (content_hash,))
1480 |             
1481 |             row = cursor.fetchone()
1482 |             if not row:
1483 |                 return None
1484 |             
1485 |             content_hash, content, tags_str, memory_type, metadata_str = row[:5]
1486 |             created_at, updated_at, created_at_iso, updated_at_iso = row[5:]
1487 |             
1488 |             # Parse tags and metadata
1489 |             tags = [tag.strip() for tag in tags_str.split(",") if tag.strip()] if tags_str else []
1490 |             metadata = self._safe_json_loads(metadata_str, "memory_retrieval")
1491 |             
1492 |             memory = Memory(
1493 |                 content=content,
1494 |                 content_hash=content_hash,
1495 |                 tags=tags,
1496 |                 memory_type=memory_type,
1497 |                 metadata=metadata,
1498 |                 created_at=created_at,
1499 |                 updated_at=updated_at,
1500 |                 created_at_iso=created_at_iso,
1501 |                 updated_at_iso=updated_at_iso
1502 |             )
1503 |             
1504 |             return memory
1505 | 
1506 |         except Exception as e:
1507 |             logger.error(f"Failed to get memory by hash {content_hash}: {str(e)}")
1508 |             return None
1509 | 
1510 |     async def get_all_content_hashes(self, include_deleted: bool = False) -> Set[str]:
1511 |         """
1512 |         Get all content hashes in database for bulk existence checking.
1513 | 
1514 |         This is optimized for sync operations to avoid individual existence checks.
1515 |         Returns a set for O(1) lookup performance.
1516 | 
1517 |         Args:
1518 |             include_deleted: If True, includes soft-deleted memories. Default False.
1519 | 
1520 |         Returns:
1521 |             Set of all content_hash values currently in the database
1522 |         """
1523 |         try:
1524 |             if not self.conn:
1525 |                 return set()
1526 | 
1527 |             if include_deleted:
1528 |                 cursor = self.conn.execute('SELECT content_hash FROM memories')
1529 |             else:
1530 |                 cursor = self.conn.execute('SELECT content_hash FROM memories WHERE deleted_at IS NULL')
1531 |             return {row[0] for row in cursor.fetchall()}
1532 | 
1533 |         except Exception as e:
1534 |             logger.error(f"Failed to get all content hashes: {str(e)}")
1535 |             return set()
1536 | 
1537 |     async def delete_by_tag(self, tag: str) -> Tuple[int, str]:
1538 |         """Soft-delete memories by tag (exact match only)."""
1539 |         try:
1540 |             if not self.conn:
1541 |                 return 0, "Database not initialized"
1542 | 
1543 |             # Use GLOB for case-sensitive exact tag matching
1544 |             # Pattern: (',' || tags || ',') GLOB '*,tag,*' matches exact tag in comma-separated list
1545 |             # Strip whitespace to match get_all_tags_with_counts behavior
1546 |             stripped_tag = tag.strip()
1547 |             exact_match_pattern = f"*,{stripped_tag},*"
1548 | 
1549 |             # Get the ids first to delete corresponding embeddings (only non-deleted)
1550 |             cursor = self.conn.execute(
1551 |                 "SELECT id FROM memories WHERE (',' || REPLACE(tags, ' ', '') || ',') GLOB ? AND deleted_at IS NULL",
1552 |                 (exact_match_pattern,)
1553 |             )
1554 |             memory_ids = [row[0] for row in cursor.fetchall()]
1555 | 
1556 |             # Delete embeddings (won't be needed for search)
1557 |             for memory_id in memory_ids:
1558 |                 self.conn.execute('DELETE FROM memory_embeddings WHERE rowid = ?', (memory_id,))
1559 | 
1560 |             # Soft-delete: set deleted_at timestamp instead of DELETE
1561 |             cursor = self.conn.execute(
1562 |                 "UPDATE memories SET deleted_at = ? WHERE (',' || REPLACE(tags, ' ', '') || ',') GLOB ? AND deleted_at IS NULL",
1563 |                 (time.time(), exact_match_pattern)
1564 |             )
1565 |             self.conn.commit()
1566 | 
1567 |             count = cursor.rowcount
1568 |             logger.info(f"Soft-deleted {count} memories with tag: {tag}")
1569 | 
1570 |             if count > 0:
1571 |                 return count, f"Successfully deleted {count} memories with tag '{tag}'"
1572 |             else:
1573 |                 return 0, f"No memories found with tag '{tag}'"
1574 | 
1575 |         except Exception as e:
1576 |             error_msg = f"Failed to delete by tag: {str(e)}"
1577 |             logger.error(error_msg)
1578 |             return 0, error_msg
1579 | 
1580 |     async def delete_by_tags(self, tags: List[str]) -> Tuple[int, str]:
1581 |         """
1582 |         Soft-delete memories matching ANY of the given tags (optimized single-query version).
1583 | 
1584 |         Overrides base class implementation for better performance using OR conditions.
1585 |         """
1586 |         try:
1587 |             if not self.conn:
1588 |                 return 0, "Database not initialized"
1589 | 
1590 |             if not tags:
1591 |                 return 0, "No tags provided"
1592 | 
1593 |             # Build OR condition with GLOB for case-sensitive exact tag matching
1594 |             # Pattern: (',' || tags || ',') GLOB '*,tag,*' matches exact tag in comma-separated list
1595 |             # Strip whitespace to match get_all_tags_with_counts behavior
1596 |             stripped_tags = [tag.strip() for tag in tags]
1597 |             conditions = " OR ".join(["(',' || REPLACE(tags, ' ', '') || ',') GLOB ?" for _ in stripped_tags])
1598 |             params = [f"*,{tag},*" for tag in stripped_tags]
1599 | 
1600 |             # Get the ids first to delete corresponding embeddings (only non-deleted)
1601 |             query = f'SELECT id FROM memories WHERE ({conditions}) AND deleted_at IS NULL'
1602 |             cursor = self.conn.execute(query, params)
1603 |             memory_ids = [row[0] for row in cursor.fetchall()]
1604 | 
1605 |             # Delete from embeddings table using single query with IN clause
1606 |             if memory_ids:
1607 |                 placeholders = ','.join('?' for _ in memory_ids)
1608 |                 self.conn.execute(f'DELETE FROM memory_embeddings WHERE rowid IN ({placeholders})', memory_ids)
1609 | 
1610 |             # Soft-delete: set deleted_at timestamp instead of DELETE
1611 |             update_query = f'UPDATE memories SET deleted_at = ? WHERE ({conditions}) AND deleted_at IS NULL'
1612 |             cursor = self.conn.execute(update_query, [time.time()] + params)
1613 |             self.conn.commit()
1614 | 
1615 |             count = cursor.rowcount
1616 |             logger.info(f"Soft-deleted {count} memories matching tags: {tags}")
1617 | 
1618 |             if count > 0:
1619 |                 return count, f"Successfully deleted {count} memories matching {len(tags)} tag(s)"
1620 |             else:
1621 |                 return 0, f"No memories found matching any of the {len(tags)} tags"
1622 | 
1623 |         except Exception as e:
1624 |             error_msg = f"Failed to delete by tags: {str(e)}"
1625 |             logger.error(error_msg)
1626 |             return 0, error_msg
1627 | 
1628 |     async def delete_by_timeframe(self, start_date: date, end_date: date, tag: Optional[str] = None) -> Tuple[int, str]:
1629 |         """Delete memories within a specific date range."""
1630 |         try:
1631 |             if not self.conn:
1632 |                 return 0, "Database not initialized"
1633 | 
1634 |             # Convert dates to timestamps
1635 |             start_ts = datetime.combine(start_date, datetime.min.time()).timestamp()
1636 |             end_ts = datetime.combine(end_date, datetime.max.time()).timestamp()
1637 | 
1638 |             if tag:
1639 |                 # Delete with tag filter
1640 |                 cursor = self.conn.execute('''
1641 |                     SELECT content_hash FROM memories
1642 |                     WHERE created_at >= ? AND created_at <= ?
1643 |                     AND (tags LIKE ? OR tags LIKE ? OR tags LIKE ? OR tags = ?)
1644 |                     AND deleted_at IS NULL
1645 |                 ''', (start_ts, end_ts, f"{tag},%", f"%,{tag},%", f"%,{tag}", tag))
1646 |             else:
1647 |                 # Delete all in timeframe
1648 |                 cursor = self.conn.execute('''
1649 |                     SELECT content_hash FROM memories
1650 |                     WHERE created_at >= ? AND created_at <= ?
1651 |                     AND deleted_at IS NULL
1652 |                 ''', (start_ts, end_ts))
1653 | 
1654 |             hashes = [row[0] for row in cursor.fetchall()]
1655 | 
1656 |             # Use soft-delete for each hash
1657 |             deleted_count = 0
1658 |             for content_hash in hashes:
1659 |                 success, _ = await self.delete(content_hash)
1660 |                 if success:
1661 |                     deleted_count += 1
1662 | 
1663 |             return deleted_count, f"Deleted {deleted_count} memories from {start_date} to {end_date}" + (f" with tag '{tag}'" if tag else "")
1664 | 
1665 |         except Exception as e:
1666 |             logger.error(f"Error deleting by timeframe: {str(e)}")
1667 |             return 0, f"Error: {str(e)}"
1668 | 
1669 |     async def delete_before_date(self, before_date: date, tag: Optional[str] = None) -> Tuple[int, str]:
1670 |         """Delete memories created before a specific date."""
1671 |         try:
1672 |             if not self.conn:
1673 |                 return 0, "Database not initialized"
1674 | 
1675 |             # Convert date to timestamp
1676 |             before_ts = datetime.combine(before_date, datetime.min.time()).timestamp()
1677 | 
1678 |             if tag:
1679 |                 # Delete with tag filter
1680 |                 cursor = self.conn.execute('''
1681 |                     SELECT content_hash FROM memories
1682 |                     WHERE created_at < ?
1683 |                     AND (tags LIKE ? OR tags LIKE ? OR tags LIKE ? OR tags = ?)
1684 |                     AND deleted_at IS NULL
1685 |                 ''', (before_ts, f"{tag},%", f"%,{tag},%", f"%,{tag}", tag))
1686 |             else:
1687 |                 # Delete all before date
1688 |                 cursor = self.conn.execute('''
1689 |                     SELECT content_hash FROM memories
1690 |                     WHERE created_at < ?
1691 |                     AND deleted_at IS NULL
1692 |                 ''', (before_ts,))
1693 | 
1694 |             hashes = [row[0] for row in cursor.fetchall()]
1695 | 
1696 |             # Use soft-delete for each hash
1697 |             deleted_count = 0
1698 |             for content_hash in hashes:
1699 |                 success, _ = await self.delete(content_hash)
1700 |                 if success:
1701 |                     deleted_count += 1
1702 | 
1703 |             return deleted_count, f"Deleted {deleted_count} memories before {before_date}" + (f" with tag '{tag}'" if tag else "")
1704 | 
1705 |         except Exception as e:
1706 |             logger.error(f"Error deleting before date: {str(e)}")
1707 |             return 0, f"Error: {str(e)}"
1708 | 
1709 |     async def get_by_exact_content(self, content: str) -> List[Memory]:
1710 |         """Retrieve memories by exact content match."""
1711 |         try:
1712 |             if not self.conn:
1713 |                 return []
1714 | 
1715 |             cursor = self.conn.execute('''
1716 |                 SELECT content, tags, memory_type, metadata, content_hash,
1717 |                        created_at, created_at_iso, updated_at, updated_at_iso
1718 |                 FROM memories
1719 |                 WHERE content = ? AND deleted_at IS NULL
1720 |             ''', (content,))
1721 | 
1722 |             memories = []
1723 |             for row in cursor.fetchall():
1724 |                 content_str, tags_str, memory_type, metadata_str, content_hash, \
1725 |                     created_at, created_at_iso, updated_at, updated_at_iso = row
1726 | 
1727 |                 metadata = self._safe_json_loads(metadata_str, "get_by_exact_content")
1728 |                 tags = [tag.strip() for tag in tags_str.split(',')] if tags_str else []
1729 | 
1730 |                 memory = Memory(
1731 |                     content=content_str,
1732 |                     content_hash=content_hash,
1733 |                     tags=tags,
1734 |                     memory_type=memory_type,
1735 |                     metadata=metadata,
1736 |                     created_at=created_at,
1737 |                     created_at_iso=created_at_iso,
1738 |                     updated_at=updated_at,
1739 |                     updated_at_iso=updated_at_iso
1740 |                 )
1741 |                 memories.append(memory)
1742 | 
1743 |             return memories
1744 | 
1745 |         except Exception as e:
1746 |             logger.error(f"Error in exact content match: {str(e)}")
1747 |             return []
1748 | 
1749 |     async def cleanup_duplicates(self) -> Tuple[int, str]:
1750 |         """Soft-delete duplicate memories based on content hash."""
1751 |         try:
1752 |             if not self.conn:
1753 |                 return 0, "Database not initialized"
1754 | 
1755 |             # Soft delete duplicates (keep the first occurrence by rowid)
1756 |             cursor = self.conn.execute('''
1757 |                 UPDATE memories
1758 |                 SET deleted_at = ?
1759 |                 WHERE rowid NOT IN (
1760 |                     SELECT MIN(rowid)
1761 |                     FROM memories
1762 |                     WHERE deleted_at IS NULL
1763 |                     GROUP BY content_hash
1764 |                 )
1765 |                 AND deleted_at IS NULL
1766 |             ''', (time.time(),))
1767 |             self.conn.commit()
1768 | 
1769 |             count = cursor.rowcount
1770 |             logger.info(f"Soft-deleted {count} duplicate memories")
1771 | 
1772 |             if count > 0:
1773 |                 return count, f"Successfully soft-deleted {count} duplicate memories"
1774 |             else:
1775 |                 return 0, "No duplicate memories found"
1776 |                 
1777 |         except Exception as e:
1778 |             error_msg = f"Failed to cleanup duplicates: {str(e)}"
1779 |             logger.error(error_msg)
1780 |             return 0, error_msg
1781 |     
1782 |     async def update_memory_metadata(self, content_hash: str, updates: Dict[str, Any], preserve_timestamps: bool = True) -> Tuple[bool, str]:
1783 |         """Update memory metadata without recreating the entire memory entry."""
1784 |         try:
1785 |             if not self.conn:
1786 |                 return False, "Database not initialized"
1787 |             
1788 |             # Get current memory
1789 |             cursor = self.conn.execute('''
1790 |                 SELECT content, tags, memory_type, metadata, created_at, created_at_iso
1791 |                 FROM memories WHERE content_hash = ?
1792 |             ''', (content_hash,))
1793 |             
1794 |             row = cursor.fetchone()
1795 |             if not row:
1796 |                 return False, f"Memory with hash {content_hash} not found"
1797 |             
1798 |             content, current_tags, current_type, current_metadata_str, created_at, created_at_iso = row
1799 |             
1800 |             # Parse current metadata
1801 |             current_metadata = self._safe_json_loads(current_metadata_str, "update_memory_metadata")
1802 |             
1803 |             # Apply updates
1804 |             new_tags = current_tags
1805 |             new_type = current_type
1806 |             new_metadata = current_metadata.copy()
1807 |             
1808 |             # Handle tag updates
1809 |             if "tags" in updates:
1810 |                 if isinstance(updates["tags"], list):
1811 |                     new_tags = ",".join(updates["tags"])
1812 |                 else:
1813 |                     return False, "Tags must be provided as a list of strings"
1814 |             
1815 |             # Handle memory type updates
1816 |             if "memory_type" in updates:
1817 |                 new_type = updates["memory_type"]
1818 |             
1819 |             # Handle metadata updates
1820 |             if "metadata" in updates:
1821 |                 if isinstance(updates["metadata"], dict):
1822 |                     new_metadata.update(updates["metadata"])
1823 |                 else:
1824 |                     return False, "Metadata must be provided as a dictionary"
1825 |             
1826 |             # Handle other custom fields
1827 |             protected_fields = {
1828 |                 "content", "content_hash", "tags", "memory_type", "metadata",
1829 |                 "embedding", "created_at", "created_at_iso", "updated_at", "updated_at_iso"
1830 |             }
1831 |             
1832 |             for key, value in updates.items():
1833 |                 if key not in protected_fields:
1834 |                     new_metadata[key] = value
1835 |             
1836 |             # Update timestamps
1837 |             now = time.time()
1838 |             now_iso = datetime.utcfromtimestamp(now).isoformat() + "Z"
1839 | 
1840 |             # Handle timestamp updates based on preserve_timestamps flag
1841 |             if not preserve_timestamps:
1842 |                 # When preserve_timestamps=False, use timestamps from updates dict if provided
1843 |                 # This allows syncing timestamps from source (e.g., Cloudflare → SQLite)
1844 |                 # Always preserve created_at (never reset to current time!)
1845 |                 created_at = updates.get('created_at', created_at)
1846 |                 created_at_iso = updates.get('created_at_iso', created_at_iso)
1847 |                 # Use updated_at from updates or current time
1848 |                 updated_at = updates.get('updated_at', now)
1849 |                 updated_at_iso = updates.get('updated_at_iso', now_iso)
1850 |             else:
1851 |                 # preserve_timestamps=True: only update updated_at to current time
1852 |                 updated_at = now
1853 |                 updated_at_iso = now_iso
1854 | 
1855 |             # Update the memory
1856 |             self.conn.execute('''
1857 |                 UPDATE memories SET
1858 |                     tags = ?, memory_type = ?, metadata = ?,
1859 |                     updated_at = ?, updated_at_iso = ?,
1860 |                     created_at = ?, created_at_iso = ?
1861 |                 WHERE content_hash = ?
1862 |             ''', (
1863 |                 new_tags, new_type, json.dumps(new_metadata),
1864 |                 updated_at, updated_at_iso, created_at, created_at_iso, content_hash
1865 |             ))
1866 |             
1867 |             self.conn.commit()
1868 |             
1869 |             # Create summary of updated fields
1870 |             updated_fields = []
1871 |             if "tags" in updates:
1872 |                 updated_fields.append("tags")
1873 |             if "memory_type" in updates:
1874 |                 updated_fields.append("memory_type")
1875 |             if "metadata" in updates:
1876 |                 updated_fields.append("custom_metadata")
1877 |             
1878 |             for key in updates.keys():
1879 |                 if key not in protected_fields and key not in ["tags", "memory_type", "metadata"]:
1880 |                     updated_fields.append(key)
1881 |             
1882 |             updated_fields.append("updated_at")
1883 | 
1884 |             summary = f"Updated fields: {', '.join(updated_fields)}"
1885 |             logger.info(f"Successfully updated metadata for memory {content_hash}")
1886 |             return True, summary
1887 | 
1888 |         except Exception as e:
1889 |             error_msg = f"Error updating memory metadata: {str(e)}"
1890 |             logger.error(error_msg)
1891 |             logger.error(traceback.format_exc())
1892 |             return False, error_msg
1893 | 
1894 |     async def update_memories_batch(self, memories: List[Memory]) -> List[bool]:
1895 |         """
1896 |         Update multiple memories in a single database transaction for optimal performance.
1897 | 
1898 |         This method processes all updates in a single transaction, significantly improving
1899 |         performance compared to individual update_memory() calls.
1900 | 
1901 |         Args:
1902 |             memories: List of Memory objects with updated fields
1903 | 
1904 |         Returns:
1905 |             List of success booleans, one for each memory in the batch
1906 |         """
1907 |         if not memories:
1908 |             return []
1909 | 
1910 |         try:
1911 |             if not self.conn:
1912 |                 return [False] * len(memories)
1913 | 
1914 |             results = [False] * len(memories)
1915 |             now = time.time()
1916 |             now_iso = datetime.utcfromtimestamp(now).isoformat() + "Z"
1917 | 
1918 |             # Start transaction (will be committed at the end)
1919 |             # SQLite doesn't have explicit BEGIN for Python DB-API, but we can use savepoint
1920 |             cursor = self.conn.cursor()
1921 | 
1922 |             for idx, memory in enumerate(memories):
1923 |                 try:
1924 |                     # Get current memory data
1925 |                     cursor.execute('''
1926 |                         SELECT content, tags, memory_type, metadata, created_at, created_at_iso
1927 |                         FROM memories WHERE content_hash = ?
1928 |                     ''', (memory.content_hash,))
1929 | 
1930 |                     row = cursor.fetchone()
1931 |                     if not row:
1932 |                         logger.warning(f"Memory {memory.content_hash} not found during batch update")
1933 |                         continue
1934 | 
1935 |                     content, current_tags, current_type, current_metadata_str, created_at, created_at_iso = row
1936 | 
1937 |                     # Parse current metadata
1938 |                     current_metadata = self._safe_json_loads(current_metadata_str, "update_memories_batch")
1939 | 
1940 |                     # Merge metadata (new metadata takes precedence)
1941 |                     if memory.metadata:
1942 |                         merged_metadata = current_metadata.copy()
1943 |                         merged_metadata.update(memory.metadata)
1944 |                     else:
1945 |                         merged_metadata = current_metadata
1946 | 
1947 |                     # Prepare new values
1948 |                     new_tags = ",".join(memory.tags) if memory.tags else current_tags
1949 |                     new_type = memory.memory_type if memory.memory_type else current_type
1950 | 
1951 |                     # Execute update
1952 |                     cursor.execute('''
1953 |                         UPDATE memories SET
1954 |                             tags = ?, memory_type = ?, metadata = ?,
1955 |                             updated_at = ?, updated_at_iso = ?
1956 |                         WHERE content_hash = ?
1957 |                     ''', (
1958 |                         new_tags, new_type, json.dumps(merged_metadata),
1959 |                         now, now_iso, memory.content_hash
1960 |                     ))
1961 | 
1962 |                     results[idx] = True
1963 | 
1964 |                 except Exception as e:
1965 |                     logger.warning(f"Failed to update memory {memory.content_hash} in batch: {e}")
1966 |                     continue
1967 | 
1968 |             # Commit all updates in a single transaction
1969 |             self.conn.commit()
1970 | 
1971 |             success_count = sum(results)
1972 |             logger.info(f"Batch update completed: {success_count}/{len(memories)} memories updated successfully")
1973 | 
1974 |             return results
1975 | 
1976 |         except Exception as e:
1977 |             # Rollback on error
1978 |             if self.conn:
1979 |                 self.conn.rollback()
1980 |             logger.error(f"Batch update failed: {e}")
1981 |             logger.error(traceback.format_exc())
1982 |             return [False] * len(memories)
1983 | 
1984 |     async def get_stats(self) -> Dict[str, Any]:
1985 |         """Get storage statistics."""
1986 |         try:
1987 |             if not self.conn:
1988 |                 return {"error": "Database not initialized"}
1989 | 
1990 |             # Exclude soft-deleted memories from all stats
1991 |             cursor = self.conn.execute('SELECT COUNT(*) FROM memories WHERE deleted_at IS NULL')
1992 |             total_memories = cursor.fetchone()[0]
1993 | 
1994 |             # Count unique individual tags (not tag sets)
1995 |             cursor = self.conn.execute('SELECT tags FROM memories WHERE tags IS NOT NULL AND tags != "" AND deleted_at IS NULL')
1996 |             unique_tags = len(set(
1997 |                 tag.strip()
1998 |                 for (tag_string,) in cursor
1999 |                 if tag_string
2000 |                 for tag in tag_string.split(",")
2001 |                 if tag.strip()
2002 |             ))
2003 | 
2004 |             # Count memories from this week (last 7 days)
2005 |             import time
2006 |             week_ago = time.time() - (7 * 24 * 60 * 60)
2007 |             cursor = self.conn.execute('SELECT COUNT(*) FROM memories WHERE created_at >= ? AND deleted_at IS NULL', (week_ago,))
2008 |             memories_this_week = cursor.fetchone()[0]
2009 | 
2010 |             # Get database file size
2011 |             file_size = os.path.getsize(self.db_path) if os.path.exists(self.db_path) else 0
2012 | 
2013 |             return {
2014 |                 "backend": "sqlite-vec",
2015 |                 "total_memories": total_memories,
2016 |                 "unique_tags": unique_tags,
2017 |                 "memories_this_week": memories_this_week,
2018 |                 "database_size_bytes": file_size,
2019 |                 "database_size_mb": round(file_size / (1024 * 1024), 2),
2020 |                 "embedding_model": self.embedding_model_name,
2021 |                 "embedding_dimension": self.embedding_dimension
2022 |             }
2023 | 
2024 |         except sqlite3.Error as e:
2025 |             logger.error(f"Database error getting stats: {str(e)}")
2026 |             return {"error": f"Database error: {str(e)}"}
2027 |         except OSError as e:
2028 |             logger.error(f"File system error getting stats: {str(e)}")
2029 |             return {"error": f"File system error: {str(e)}"}
2030 |         except Exception as e:
2031 |             logger.error(f"Unexpected error getting stats: {str(e)}")
2032 |             return {"error": f"Unexpected error: {str(e)}"}
2033 |     
2034 |     def sanitized(self, tags):
2035 |         """Sanitize and normalize tags to a JSON string.
2036 | 
2037 |         This method provides compatibility with the storage backend interface.
2038 |         """
2039 |         if tags is None:
2040 |             return json.dumps([])
2041 |         
2042 |         # If we get a string, split it into an array
2043 |         if isinstance(tags, str):
2044 |             tags = [tag.strip() for tag in tags.split(",") if tag.strip()]
2045 |         # If we get an array, use it directly
2046 |         elif isinstance(tags, list):
2047 |             tags = [str(tag).strip() for tag in tags if str(tag).strip()]
2048 |         else:
2049 |             return json.dumps([])
2050 |                 
2051 |         # Return JSON string representation of the array
2052 |         return json.dumps(tags)
2053 |     
2054 |     async def recall(self, query: Optional[str] = None, n_results: int = 5, start_timestamp: Optional[float] = None, end_timestamp: Optional[float] = None) -> List[MemoryQueryResult]:
2055 |         """
2056 |         Retrieve memories with combined time filtering and optional semantic search.
2057 |         
2058 |         Args:
2059 |             query: Optional semantic search query. If None, only time filtering is applied.
2060 |             n_results: Maximum number of results to return.
2061 |             start_timestamp: Optional start time for filtering.
2062 |             end_timestamp: Optional end time for filtering.
2063 |             
2064 |         Returns:
2065 |             List of MemoryQueryResult objects.
2066 |         """
2067 |         try:
2068 |             if not self.conn:
2069 |                 logger.error("Database not initialized, cannot retrieve memories")
2070 |                 return []
2071 |             
2072 |             # Build time filtering WHERE clause
2073 |             time_conditions = []
2074 |             params = []
2075 |             
2076 |             if start_timestamp is not None:
2077 |                 time_conditions.append("created_at >= ?")
2078 |                 params.append(float(start_timestamp))
2079 |             
2080 |             if end_timestamp is not None:
2081 |                 time_conditions.append("created_at <= ?")
2082 |                 params.append(float(end_timestamp))
2083 |             
2084 |             time_where = " AND ".join(time_conditions) if time_conditions else ""
2085 |             
2086 |             logger.info(f"Time filtering conditions: {time_where}, params: {params}")
2087 |             
2088 |             # Determine whether to use semantic search or just time-based filtering
2089 |             if query and self.embedding_model:
2090 |                 # Combined semantic search with time filtering
2091 |                 try:
2092 |                     # Generate query embedding
2093 |                     query_embedding = self._generate_embedding(query)
2094 |                     
2095 |                     # Build SQL query with time filtering
2096 |                     base_query = '''
2097 |                         SELECT m.content_hash, m.content, m.tags, m.memory_type, m.metadata,
2098 |                                m.created_at, m.updated_at, m.created_at_iso, m.updated_at_iso,
2099 |                                e.distance
2100 |                         FROM memories m
2101 |                         JOIN (
2102 |                             SELECT rowid, distance
2103 |                             FROM memory_embeddings
2104 |                             WHERE content_embedding MATCH ? AND k = ?
2105 |                         ) e ON m.id = e.rowid
2106 |                     '''
2107 |                     
2108 |                     if time_where:
2109 |                         base_query += f" WHERE {time_where}"
2110 |                     
2111 |                     base_query += " ORDER BY e.distance"
2112 |                     
2113 |                     # Prepare parameters: embedding, limit, then time filter params
2114 |                     query_params = [serialize_float32(query_embedding), n_results] + params
2115 |                     
2116 |                     cursor = self.conn.execute(base_query, query_params)
2117 |                     
2118 |                     results = []
2119 |                     for row in cursor.fetchall():
2120 |                         try:
2121 |                             # Parse row data
2122 |                             content_hash, content, tags_str, memory_type, metadata_str = row[:5]
2123 |                             created_at, updated_at, created_at_iso, updated_at_iso, distance = row[5:]
2124 |                             
2125 |                             # Parse tags and metadata
2126 |                             tags = [tag.strip() for tag in tags_str.split(",") if tag.strip()] if tags_str else []
2127 |                             metadata = self._safe_json_loads(metadata_str, "memory_metadata")
2128 |                             
2129 |                             # Create Memory object
2130 |                             memory = Memory(
2131 |                                 content=content,
2132 |                                 content_hash=content_hash,
2133 |                                 tags=tags,
2134 |                                 memory_type=memory_type,
2135 |                                 metadata=metadata,
2136 |                                 created_at=created_at,
2137 |                                 updated_at=updated_at,
2138 |                                 created_at_iso=created_at_iso,
2139 |                                 updated_at_iso=updated_at_iso
2140 |                             )
2141 |                             
2142 |                             # Calculate relevance score (lower distance = higher relevance)
2143 |                             relevance_score = max(0.0, 1.0 - distance)
2144 |                             
2145 |                             results.append(MemoryQueryResult(
2146 |                                 memory=memory,
2147 |                                 relevance_score=relevance_score,
2148 |                                 debug_info={"distance": distance, "backend": "sqlite-vec", "time_filtered": bool(time_where)}
2149 |                             ))
2150 |                             
2151 |                         except Exception as parse_error:
2152 |                             logger.warning(f"Failed to parse memory result: {parse_error}")
2153 |                             continue
2154 |                     
2155 |                     logger.info(f"Retrieved {len(results)} memories for semantic query with time filter")
2156 |                     return results
2157 |                     
2158 |                 except Exception as query_error:
2159 |                     logger.error(f"Error in semantic search with time filter: {str(query_error)}")
2160 |                     # Fall back to time-based retrieval on error
2161 |                     logger.info("Falling back to time-based retrieval")
2162 |             
2163 |             # Time-based filtering only (or fallback from failed semantic search)
2164 |             base_query = '''
2165 |                 SELECT content_hash, content, tags, memory_type, metadata,
2166 |                        created_at, updated_at, created_at_iso, updated_at_iso
2167 |                 FROM memories
2168 |             '''
2169 |             
2170 |             if time_where:
2171 |                 base_query += f" WHERE {time_where}"
2172 |             
2173 |             base_query += " ORDER BY created_at DESC LIMIT ?"
2174 |             
2175 |             # Add limit parameter
2176 |             params.append(n_results)
2177 |             
2178 |             cursor = self.conn.execute(base_query, params)
2179 |             
2180 |             results = []
2181 |             for row in cursor.fetchall():
2182 |                 try:
2183 |                     content_hash, content, tags_str, memory_type, metadata_str = row[:5]
2184 |                     created_at, updated_at, created_at_iso, updated_at_iso = row[5:]
2185 |                     
2186 |                     # Parse tags and metadata
2187 |                     tags = [tag.strip() for tag in tags_str.split(",") if tag.strip()] if tags_str else []
2188 |                     metadata = self._safe_json_loads(metadata_str, "memory_metadata")
2189 |                     
2190 |                     memory = Memory(
2191 |                         content=content,
2192 |                         content_hash=content_hash,
2193 |                         tags=tags,
2194 |                         memory_type=memory_type,
2195 |                         metadata=metadata,
2196 |                         created_at=created_at,
2197 |                         updated_at=updated_at,
2198 |                         created_at_iso=created_at_iso,
2199 |                         updated_at_iso=updated_at_iso
2200 |                     )
2201 |                     
2202 |                     # For time-based retrieval, we don't have a relevance score
2203 |                     results.append(MemoryQueryResult(
2204 |                         memory=memory,
2205 |                         relevance_score=None,
2206 |                         debug_info={"backend": "sqlite-vec", "time_filtered": bool(time_where), "query_type": "time_based"}
2207 |                     ))
2208 |                     
2209 |                 except Exception as parse_error:
2210 |                     logger.warning(f"Failed to parse memory result: {parse_error}")
2211 |                     continue
2212 |             
2213 |             logger.info(f"Retrieved {len(results)} memories for time-based query")
2214 |             return results
2215 |             
2216 |         except Exception as e:
2217 |             logger.error(f"Error in recall: {str(e)}")
2218 |             logger.error(traceback.format_exc())
2219 |             return []
2220 |     
2221 |     async def get_all_memories(self) -> List[Memory]:
2222 |         """
2223 |         Get all memories from the database.
2224 |         
2225 |         Returns:
2226 |             List of all Memory objects in the database.
2227 |         """
2228 |         try:
2229 |             if not self.conn:
2230 |                 logger.error("Database not initialized, cannot retrieve memories")
2231 |                 return []
2232 |             
2233 |             cursor = self.conn.execute('''
2234 |                 SELECT m.content_hash, m.content, m.tags, m.memory_type, m.metadata,
2235 |                        m.created_at, m.updated_at, m.created_at_iso, m.updated_at_iso,
2236 |                        e.content_embedding
2237 |                 FROM memories m
2238 |                 LEFT JOIN memory_embeddings e ON m.id = e.rowid
2239 |                 WHERE m.deleted_at IS NULL
2240 |                 ORDER BY m.created_at DESC
2241 |             ''')
2242 | 
2243 |             results = []
2244 |             for row in cursor.fetchall():
2245 |                 try:
2246 |                     content_hash, content, tags_str, memory_type, metadata_str = row[:5]
2247 |                     created_at, updated_at, created_at_iso, updated_at_iso = row[5:9]
2248 |                     embedding_blob = row[9] if len(row) > 9 else None
2249 | 
2250 |                     # Parse tags and metadata
2251 |                     tags = [tag.strip() for tag in tags_str.split(",") if tag.strip()] if tags_str else []
2252 |                     metadata = self._safe_json_loads(metadata_str, "memory_metadata")
2253 | 
2254 |                     # Deserialize embedding if present
2255 |                     embedding = None
2256 |                     if embedding_blob:
2257 |                         embedding = deserialize_embedding(embedding_blob)
2258 | 
2259 |                     memory = Memory(
2260 |                         content=content,
2261 |                         content_hash=content_hash,
2262 |                         tags=tags,
2263 |                         memory_type=memory_type,
2264 |                         metadata=metadata,
2265 |                         embedding=embedding,
2266 |                         created_at=created_at,
2267 |                         updated_at=updated_at,
2268 |                         created_at_iso=created_at_iso,
2269 |                         updated_at_iso=updated_at_iso
2270 |                     )
2271 |                     
2272 |                     results.append(memory)
2273 |                     
2274 |                 except Exception as parse_error:
2275 |                     logger.warning(f"Failed to parse memory result: {parse_error}")
2276 |                     continue
2277 |             
2278 |             logger.info(f"Retrieved {len(results)} total memories")
2279 |             return results
2280 |             
2281 |         except Exception as e:
2282 |             logger.error(f"Error getting all memories: {str(e)}")
2283 |             return []
2284 | 
2285 |     async def get_memories_by_time_range(self, start_time: float, end_time: float) -> List[Memory]:
2286 |         """Get memories within a specific time range."""
2287 |         try:
2288 |             await self.initialize()
2289 |             cursor = self.conn.execute('''
2290 |                 SELECT content_hash, content, tags, memory_type, metadata,
2291 |                        created_at, updated_at, created_at_iso, updated_at_iso
2292 |                 FROM memories
2293 |                 WHERE created_at BETWEEN ? AND ?
2294 |                 ORDER BY created_at DESC
2295 |             ''', (start_time, end_time))
2296 |             
2297 |             results = []
2298 |             for row in cursor.fetchall():
2299 |                 try:
2300 |                     content_hash, content, tags_str, memory_type, metadata_str = row[:5]
2301 |                     created_at, updated_at, created_at_iso, updated_at_iso = row[5:]
2302 |                     
2303 |                     # Parse tags and metadata
2304 |                     tags = [tag.strip() for tag in tags_str.split(",") if tag.strip()] if tags_str else []
2305 |                     metadata = self._safe_json_loads(metadata_str, "memory_metadata")
2306 |                     
2307 |                     memory = Memory(
2308 |                         content=content,
2309 |                         content_hash=content_hash,
2310 |                         tags=tags,
2311 |                         memory_type=memory_type,
2312 |                         metadata=metadata,
2313 |                         created_at=created_at,
2314 |                         updated_at=updated_at,
2315 |                         created_at_iso=created_at_iso,
2316 |                         updated_at_iso=updated_at_iso
2317 |                     )
2318 |                     
2319 |                     results.append(memory)
2320 |                     
2321 |                 except Exception as parse_error:
2322 |                     logger.warning(f"Failed to parse memory result: {parse_error}")
2323 |                     continue
2324 |             
2325 |             logger.info(f"Retrieved {len(results)} memories in time range {start_time}-{end_time}")
2326 |             return results
2327 |             
2328 |         except Exception as e:
2329 |             logger.error(f"Error getting memories by time range: {str(e)}")
2330 |             return []
2331 | 
2332 |     async def get_memory_connections(self) -> Dict[str, int]:
2333 |         """Get memory connection statistics."""
2334 |         try:
2335 |             await self.initialize()
2336 |             # For now, return basic statistics based on tags and content similarity
2337 |             cursor = self.conn.execute('''
2338 |                 SELECT tags, COUNT(*) as count
2339 |                 FROM memories
2340 |                 WHERE tags IS NOT NULL AND tags != ''
2341 |                 GROUP BY tags
2342 |             ''')
2343 |             
2344 |             connections = {}
2345 |             for row in cursor.fetchall():
2346 |                 tags_str, count = row
2347 |                 if tags_str:
2348 |                     tags = [tag.strip() for tag in tags_str.split(",") if tag.strip()]
2349 |                     for tag in tags:
2350 |                         connections[f"tag:{tag}"] = connections.get(f"tag:{tag}", 0) + count
2351 |             
2352 |             return connections
2353 |             
2354 |         except Exception as e:
2355 |             logger.error(f"Error getting memory connections: {str(e)}")
2356 |             return {}
2357 | 
2358 |     async def get_access_patterns(self) -> Dict[str, datetime]:
2359 |         """Get memory access pattern statistics."""
2360 |         try:
2361 |             await self.initialize()
2362 |             # Return recent access patterns based on updated_at timestamps
2363 |             cursor = self.conn.execute('''
2364 |                 SELECT content_hash, updated_at_iso
2365 |                 FROM memories
2366 |                 WHERE updated_at_iso IS NOT NULL
2367 |                 ORDER BY updated_at DESC
2368 |                 LIMIT 100
2369 |             ''')
2370 |             
2371 |             patterns = {}
2372 |             for row in cursor.fetchall():
2373 |                 content_hash, updated_at_iso = row
2374 |                 try:
2375 |                     patterns[content_hash] = datetime.fromisoformat(updated_at_iso.replace('Z', '+00:00'))
2376 |                 except Exception:
2377 |                     # Fallback for timestamp parsing issues
2378 |                     patterns[content_hash] = datetime.now()
2379 |             
2380 |             return patterns
2381 |             
2382 |         except Exception as e:
2383 |             logger.error(f"Error getting access patterns: {str(e)}")
2384 |             return {}
2385 | 
2386 |     def _row_to_memory(self, row) -> Optional[Memory]:
2387 |         """Convert database row to Memory object."""
2388 |         try:
2389 |             # Handle both 9-column (without embedding) and 10-column (with embedding) rows
2390 |             content_hash, content, tags_str, memory_type, metadata_str, created_at, updated_at, created_at_iso, updated_at_iso = row[:9]
2391 |             embedding_blob = row[9] if len(row) > 9 else None
2392 | 
2393 |             # Parse tags (comma-separated format)
2394 |             tags = [tag.strip() for tag in tags_str.split(",") if tag.strip()] if tags_str else []
2395 | 
2396 |             # Parse metadata
2397 |             metadata = self._safe_json_loads(metadata_str, "get_by_hash")
2398 | 
2399 |             # Deserialize embedding if present
2400 |             embedding = None
2401 |             if embedding_blob:
2402 |                 embedding = deserialize_embedding(embedding_blob)
2403 | 
2404 |             return Memory(
2405 |                 content=content,
2406 |                 content_hash=content_hash,
2407 |                 tags=tags,
2408 |                 memory_type=memory_type,
2409 |                 metadata=metadata,
2410 |                 embedding=embedding,
2411 |                 created_at=created_at,
2412 |                 updated_at=updated_at,
2413 |                 created_at_iso=created_at_iso,
2414 |                 updated_at_iso=updated_at_iso
2415 |             )
2416 |             
2417 |         except Exception as e:
2418 |             logger.error(f"Error converting row to memory: {str(e)}")
2419 |             return None
2420 | 
2421 |     async def get_all_memories(self, limit: int = None, offset: int = 0, memory_type: Optional[str] = None, tags: Optional[List[str]] = None) -> List[Memory]:
2422 |         """
2423 |         Get all memories in storage ordered by creation time (newest first).
2424 | 
2425 |         Args:
2426 |             limit: Maximum number of memories to return (None for all)
2427 |             offset: Number of memories to skip (for pagination)
2428 |             memory_type: Optional filter by memory type
2429 |             tags: Optional filter by tags (matches ANY of the provided tags)
2430 | 
2431 |         Returns:
2432 |             List of Memory objects ordered by created_at DESC, optionally filtered by type and tags
2433 |         """
2434 |         try:
2435 |             await self.initialize()
2436 | 
2437 |             # Build query with optional memory_type and tags filters
2438 |             query = '''
2439 |                 SELECT m.content_hash, m.content, m.tags, m.memory_type, m.metadata,
2440 |                        m.created_at, m.updated_at, m.created_at_iso, m.updated_at_iso,
2441 |                        e.content_embedding
2442 |                 FROM memories m
2443 |                 LEFT JOIN memory_embeddings e ON m.id = e.rowid
2444 |             '''
2445 | 
2446 |             params = []
2447 |             where_conditions = []
2448 | 
2449 |             # Always exclude soft-deleted memories
2450 |             where_conditions.append('m.deleted_at IS NULL')
2451 | 
2452 |             # Add memory_type filter if specified
2453 |             if memory_type is not None:
2454 |                 where_conditions.append('m.memory_type = ?')
2455 |                 params.append(memory_type)
2456 | 
2457 |             # Add tags filter if specified (using database-level filtering like search_by_tag_chronological)
2458 |             if tags and len(tags) > 0:
2459 |                 tag_conditions = " OR ".join(["m.tags LIKE ?" for _ in tags])
2460 |                 where_conditions.append(f"({tag_conditions})")
2461 |                 params.extend([f"%{tag}%" for tag in tags])
2462 | 
2463 |             # Apply WHERE clause
2464 |             query += ' WHERE ' + ' AND '.join(where_conditions)
2465 | 
2466 |             query += ' ORDER BY m.created_at DESC'
2467 | 
2468 |             if limit is not None:
2469 |                 query += ' LIMIT ?'
2470 |                 params.append(limit)
2471 | 
2472 |             if offset > 0:
2473 |                 query += ' OFFSET ?'
2474 |                 params.append(offset)
2475 |             
2476 |             cursor = self.conn.execute(query, params)
2477 |             memories = []
2478 |             
2479 |             for row in cursor.fetchall():
2480 |                 memory = self._row_to_memory(row)
2481 |                 if memory:
2482 |                     memories.append(memory)
2483 |             
2484 |             return memories
2485 |             
2486 |         except Exception as e:
2487 |             logger.error(f"Error getting all memories: {str(e)}")
2488 |             return []
2489 | 
2490 |     async def get_recent_memories(self, n: int = 10) -> List[Memory]:
2491 |         """
2492 |         Get n most recent memories.
2493 | 
2494 |         Args:
2495 |             n: Number of recent memories to return
2496 | 
2497 |         Returns:
2498 |             List of the n most recent Memory objects
2499 |         """
2500 |         return await self.get_all_memories(limit=n, offset=0)
2501 | 
2502 |     async def get_largest_memories(self, n: int = 10) -> List[Memory]:
2503 |         """
2504 |         Get n largest memories by content length.
2505 | 
2506 |         Args:
2507 |             n: Number of largest memories to return
2508 | 
2509 |         Returns:
2510 |             List of the n largest Memory objects ordered by content length descending
2511 |         """
2512 |         try:
2513 |             await self.initialize()
2514 | 
2515 |             # Query for largest memories by content length
2516 |             query = """
2517 |                 SELECT content_hash, content, tags, memory_type, metadata, created_at, updated_at
2518 |                 FROM memories
2519 |                 ORDER BY LENGTH(content) DESC
2520 |                 LIMIT ?
2521 |             """
2522 | 
2523 |             cursor = self.conn.execute(query, (n,))
2524 |             rows = cursor.fetchall()
2525 | 
2526 |             memories = []
2527 |             for row in rows:
2528 |                 try:
2529 |                     memory = Memory(
2530 |                         content_hash=row[0],
2531 |                         content=row[1],
2532 |                         tags=json.loads(row[2]) if row[2] else [],
2533 |                         memory_type=row[3],
2534 |                         metadata=json.loads(row[4]) if row[4] else {},
2535 |                         created_at=row[5],
2536 |                         updated_at=row[6]
2537 |                     )
2538 |                     memories.append(memory)
2539 |                 except Exception as parse_error:
2540 |                     logger.warning(f"Failed to parse memory {row[0]}: {parse_error}")
2541 |                     continue
2542 | 
2543 |             return memories
2544 | 
2545 |         except Exception as e:
2546 |             logger.error(f"Error getting largest memories: {e}")
2547 |             return []
2548 | 
2549 |     async def get_memory_timestamps(self, days: Optional[int] = None) -> List[float]:
2550 |         """
2551 |         Get memory creation timestamps only, without loading full memory objects.
2552 | 
2553 |         This is an optimized method for analytics that only needs timestamps,
2554 |         avoiding the overhead of loading full memory content and embeddings.
2555 | 
2556 |         Args:
2557 |             days: Optional filter to only get memories from last N days
2558 | 
2559 |         Returns:
2560 |             List of Unix timestamps (float) in descending order (newest first)
2561 |         """
2562 |         try:
2563 |             await self.initialize()
2564 | 
2565 |             if days is not None:
2566 |                 cutoff = datetime.now(timezone.utc) - timedelta(days=days)
2567 |                 cutoff_timestamp = cutoff.timestamp()
2568 | 
2569 |                 query = """
2570 |                     SELECT created_at
2571 |                     FROM memories
2572 |                     WHERE created_at >= ?
2573 |                     ORDER BY created_at DESC
2574 |                 """
2575 |                 cursor = self.conn.execute(query, (cutoff_timestamp,))
2576 |             else:
2577 |                 query = """
2578 |                     SELECT created_at
2579 |                     FROM memories
2580 |                     ORDER BY created_at DESC
2581 |                 """
2582 |                 cursor = self.conn.execute(query)
2583 | 
2584 |             rows = cursor.fetchall()
2585 |             timestamps = [row[0] for row in rows if row[0] is not None]
2586 | 
2587 |             return timestamps
2588 | 
2589 |         except Exception as e:
2590 |             logger.error(f"Error getting memory timestamps: {e}")
2591 |             return []
2592 | 
2593 |     async def count_all_memories(self, memory_type: Optional[str] = None, tags: Optional[List[str]] = None) -> int:
2594 |         """
2595 |         Get total count of memories in storage.
2596 | 
2597 |         Args:
2598 |             memory_type: Optional filter by memory type
2599 |             tags: Optional filter by tags (memories matching ANY of the tags)
2600 | 
2601 |         Returns:
2602 |             Total number of memories, optionally filtered by type and/or tags
2603 |         """
2604 |         try:
2605 |             await self.initialize()
2606 | 
2607 |             # Build query with filters
2608 |             conditions = []
2609 |             params = []
2610 | 
2611 |             if memory_type is not None:
2612 |                 conditions.append('memory_type = ?')
2613 |                 params.append(memory_type)
2614 | 
2615 |             if tags:
2616 |                 # Filter by tags - match ANY tag (OR logic)
2617 |                 tag_conditions = ' OR '.join(['tags LIKE ?' for _ in tags])
2618 |                 conditions.append(f'({tag_conditions})')
2619 |                 # Add each tag with wildcards for LIKE matching
2620 |                 for tag in tags:
2621 |                     params.append(f'%{tag}%')
2622 | 
2623 |             # Build final query (always exclude soft-deleted)
2624 |             conditions.append('deleted_at IS NULL')
2625 |             query = 'SELECT COUNT(*) FROM memories WHERE ' + ' AND '.join(conditions)
2626 |             cursor = self.conn.execute(query, tuple(params))
2627 | 
2628 |             result = cursor.fetchone()
2629 |             return result[0] if result else 0
2630 | 
2631 |         except Exception as e:
2632 |             logger.error(f"Error counting memories: {str(e)}")
2633 |             return 0
2634 | 
2635 |     async def get_all_tags_with_counts(self) -> List[Dict[str, Any]]:
2636 |         """
2637 |         Get all tags with their usage counts.
2638 | 
2639 |         Returns:
2640 |             List of dictionaries with 'tag' and 'count' keys, sorted by count descending
2641 |         """
2642 |         try:
2643 |             await self.initialize()
2644 | 
2645 |             # No explicit transaction needed - SQLite in WAL mode handles this automatically
2646 |             # Get all tags from the database (exclude soft-deleted)
2647 |             cursor = self.conn.execute('''
2648 |                 SELECT tags
2649 |                 FROM memories
2650 |                 WHERE tags IS NOT NULL AND tags != '' AND deleted_at IS NULL
2651 |             ''')
2652 | 
2653 |             # Fetch all rows first to avoid holding cursor during processing
2654 |             rows = cursor.fetchall()
2655 | 
2656 |             # Yield control to event loop before processing
2657 |             await asyncio.sleep(0)
2658 | 
2659 |             # Use Counter with generator expression for memory efficiency
2660 |             tag_counter = Counter(
2661 |                 tag.strip()
2662 |                 for (tag_string,) in rows
2663 |                 if tag_string
2664 |                 for tag in tag_string.split(",")
2665 |                 if tag.strip()
2666 |             )
2667 | 
2668 |             # Return as list of dicts sorted by count descending
2669 |             return [{"tag": tag, "count": count} for tag, count in tag_counter.most_common()]
2670 | 
2671 |         except sqlite3.Error as e:
2672 |             logger.error(f"Database error getting tags with counts: {str(e)}")
2673 |             return []
2674 |         except Exception as e:
2675 |             logger.error(f"Unexpected error getting tags with counts: {str(e)}")
2676 |             raise
2677 | 
2678 |     def close(self):
2679 |         """Close the database connection."""
2680 |         if self.conn:
2681 |             self.conn.close()
2682 |             self.conn = None
2683 |             logger.info("SQLite-vec storage connection closed")
2684 | 
```
Page 58/62FirstPrevNextLast