#
tokens: 48050/50000 7/625 files (page 30/47)
lines: on (toggle) GitHub
raw markdown copy reset
This is page 30 of 47. Use http://codebase.md/doobidoo/mcp-memory-service?lines=true&page={x} to view the full context.

# Directory Structure

```
├── .claude
│   ├── agents
│   │   ├── amp-bridge.md
│   │   ├── amp-pr-automator.md
│   │   ├── code-quality-guard.md
│   │   ├── gemini-pr-automator.md
│   │   └── github-release-manager.md
│   ├── settings.local.json.backup
│   └── settings.local.json.local
├── .commit-message
├── .dockerignore
├── .env.example
├── .env.sqlite.backup
├── .envnn#
├── .gitattributes
├── .github
│   ├── FUNDING.yml
│   ├── ISSUE_TEMPLATE
│   │   ├── bug_report.yml
│   │   ├── config.yml
│   │   ├── feature_request.yml
│   │   └── performance_issue.yml
│   ├── pull_request_template.md
│   └── workflows
│       ├── bridge-tests.yml
│       ├── CACHE_FIX.md
│       ├── claude-code-review.yml
│       ├── claude.yml
│       ├── cleanup-images.yml.disabled
│       ├── dev-setup-validation.yml
│       ├── docker-publish.yml
│       ├── LATEST_FIXES.md
│       ├── main-optimized.yml.disabled
│       ├── main.yml
│       ├── publish-and-test.yml
│       ├── README_OPTIMIZATION.md
│       ├── release-tag.yml.disabled
│       ├── release.yml
│       ├── roadmap-review-reminder.yml
│       ├── SECRET_CONDITIONAL_FIX.md
│       └── WORKFLOW_FIXES.md
├── .gitignore
├── .mcp.json.backup
├── .mcp.json.template
├── .pyscn
│   ├── .gitignore
│   └── reports
│       └── analyze_20251123_214224.html
├── AGENTS.md
├── archive
│   ├── deployment
│   │   ├── deploy_fastmcp_fixed.sh
│   │   ├── deploy_http_with_mcp.sh
│   │   └── deploy_mcp_v4.sh
│   ├── deployment-configs
│   │   ├── empty_config.yml
│   │   └── smithery.yaml
│   ├── development
│   │   └── test_fastmcp.py
│   ├── docs-removed-2025-08-23
│   │   ├── authentication.md
│   │   ├── claude_integration.md
│   │   ├── claude-code-compatibility.md
│   │   ├── claude-code-integration.md
│   │   ├── claude-code-quickstart.md
│   │   ├── claude-desktop-setup.md
│   │   ├── complete-setup-guide.md
│   │   ├── database-synchronization.md
│   │   ├── development
│   │   │   ├── autonomous-memory-consolidation.md
│   │   │   ├── CLEANUP_PLAN.md
│   │   │   ├── CLEANUP_README.md
│   │   │   ├── CLEANUP_SUMMARY.md
│   │   │   ├── dream-inspired-memory-consolidation.md
│   │   │   ├── hybrid-slm-memory-consolidation.md
│   │   │   ├── mcp-milestone.md
│   │   │   ├── multi-client-architecture.md
│   │   │   ├── test-results.md
│   │   │   └── TIMESTAMP_FIX_SUMMARY.md
│   │   ├── distributed-sync.md
│   │   ├── invocation_guide.md
│   │   ├── macos-intel.md
│   │   ├── master-guide.md
│   │   ├── mcp-client-configuration.md
│   │   ├── multi-client-server.md
│   │   ├── service-installation.md
│   │   ├── sessions
│   │   │   └── MCP_ENHANCEMENT_SESSION_MEMORY_v4.1.0.md
│   │   ├── UBUNTU_SETUP.md
│   │   ├── ubuntu.md
│   │   ├── windows-setup.md
│   │   └── windows.md
│   ├── docs-root-cleanup-2025-08-23
│   │   ├── AWESOME_LIST_SUBMISSION.md
│   │   ├── CLOUDFLARE_IMPLEMENTATION.md
│   │   ├── DOCUMENTATION_ANALYSIS.md
│   │   ├── DOCUMENTATION_CLEANUP_PLAN.md
│   │   ├── DOCUMENTATION_CONSOLIDATION_COMPLETE.md
│   │   ├── LITESTREAM_SETUP_GUIDE.md
│   │   ├── lm_studio_system_prompt.md
│   │   ├── PYTORCH_DOWNLOAD_FIX.md
│   │   └── README-ORIGINAL-BACKUP.md
│   ├── investigations
│   │   └── MACOS_HOOKS_INVESTIGATION.md
│   ├── litestream-configs-v6.3.0
│   │   ├── install_service.sh
│   │   ├── litestream_master_config_fixed.yml
│   │   ├── litestream_master_config.yml
│   │   ├── litestream_replica_config_fixed.yml
│   │   ├── litestream_replica_config.yml
│   │   ├── litestream_replica_simple.yml
│   │   ├── litestream-http.service
│   │   ├── litestream.service
│   │   └── requirements-cloudflare.txt
│   ├── release-notes
│   │   └── release-notes-v7.1.4.md
│   └── setup-development
│       ├── README.md
│       ├── setup_consolidation_mdns.sh
│       ├── STARTUP_SETUP_GUIDE.md
│       └── test_service.sh
├── CHANGELOG-HISTORIC.md
├── CHANGELOG.md
├── claude_commands
│   ├── memory-context.md
│   ├── memory-health.md
│   ├── memory-ingest-dir.md
│   ├── memory-ingest.md
│   ├── memory-recall.md
│   ├── memory-search.md
│   ├── memory-store.md
│   ├── README.md
│   └── session-start.md
├── claude-hooks
│   ├── config.json
│   ├── config.template.json
│   ├── CONFIGURATION.md
│   ├── core
│   │   ├── memory-retrieval.js
│   │   ├── mid-conversation.js
│   │   ├── session-end.js
│   │   ├── session-start.js
│   │   └── topic-change.js
│   ├── debug-pattern-test.js
│   ├── install_claude_hooks_windows.ps1
│   ├── install_hooks.py
│   ├── memory-mode-controller.js
│   ├── MIGRATION.md
│   ├── README-NATURAL-TRIGGERS.md
│   ├── README-phase2.md
│   ├── README.md
│   ├── simple-test.js
│   ├── statusline.sh
│   ├── test-adaptive-weights.js
│   ├── test-dual-protocol-hook.js
│   ├── test-mcp-hook.js
│   ├── test-natural-triggers.js
│   ├── test-recency-scoring.js
│   ├── tests
│   │   ├── integration-test.js
│   │   ├── phase2-integration-test.js
│   │   ├── test-code-execution.js
│   │   ├── test-cross-session.json
│   │   ├── test-session-tracking.json
│   │   └── test-threading.json
│   ├── utilities
│   │   ├── adaptive-pattern-detector.js
│   │   ├── context-formatter.js
│   │   ├── context-shift-detector.js
│   │   ├── conversation-analyzer.js
│   │   ├── dynamic-context-updater.js
│   │   ├── git-analyzer.js
│   │   ├── mcp-client.js
│   │   ├── memory-client.js
│   │   ├── memory-scorer.js
│   │   ├── performance-manager.js
│   │   ├── project-detector.js
│   │   ├── session-tracker.js
│   │   ├── tiered-conversation-monitor.js
│   │   └── version-checker.js
│   └── WINDOWS-SESSIONSTART-BUG.md
├── CLAUDE.md
├── CODE_OF_CONDUCT.md
├── CONTRIBUTING.md
├── Development-Sprint-November-2025.md
├── docs
│   ├── amp-cli-bridge.md
│   ├── api
│   │   ├── code-execution-interface.md
│   │   ├── memory-metadata-api.md
│   │   ├── PHASE1_IMPLEMENTATION_SUMMARY.md
│   │   ├── PHASE2_IMPLEMENTATION_SUMMARY.md
│   │   ├── PHASE2_REPORT.md
│   │   └── tag-standardization.md
│   ├── architecture
│   │   ├── search-enhancement-spec.md
│   │   └── search-examples.md
│   ├── architecture.md
│   ├── archive
│   │   └── obsolete-workflows
│   │       ├── load_memory_context.md
│   │       └── README.md
│   ├── assets
│   │   └── images
│   │       ├── dashboard-v3.3.0-preview.png
│   │       ├── memory-awareness-hooks-example.png
│   │       ├── project-infographic.svg
│   │       └── README.md
│   ├── CLAUDE_CODE_QUICK_REFERENCE.md
│   ├── cloudflare-setup.md
│   ├── deployment
│   │   ├── docker.md
│   │   ├── dual-service.md
│   │   ├── production-guide.md
│   │   └── systemd-service.md
│   ├── development
│   │   ├── ai-agent-instructions.md
│   │   ├── code-quality
│   │   │   ├── phase-2a-completion.md
│   │   │   ├── phase-2a-handle-get-prompt.md
│   │   │   ├── phase-2a-index.md
│   │   │   ├── phase-2a-install-package.md
│   │   │   └── phase-2b-session-summary.md
│   │   ├── code-quality-workflow.md
│   │   ├── dashboard-workflow.md
│   │   ├── issue-management.md
│   │   ├── pr-review-guide.md
│   │   ├── refactoring-notes.md
│   │   ├── release-checklist.md
│   │   └── todo-tracker.md
│   ├── docker-optimized-build.md
│   ├── document-ingestion.md
│   ├── DOCUMENTATION_AUDIT.md
│   ├── enhancement-roadmap-issue-14.md
│   ├── examples
│   │   ├── analysis-scripts.js
│   │   ├── maintenance-session-example.md
│   │   ├── memory-distribution-chart.jsx
│   │   └── tag-schema.json
│   ├── first-time-setup.md
│   ├── glama-deployment.md
│   ├── guides
│   │   ├── advanced-command-examples.md
│   │   ├── chromadb-migration.md
│   │   ├── commands-vs-mcp-server.md
│   │   ├── mcp-enhancements.md
│   │   ├── mdns-service-discovery.md
│   │   ├── memory-consolidation-guide.md
│   │   ├── migration.md
│   │   ├── scripts.md
│   │   └── STORAGE_BACKENDS.md
│   ├── HOOK_IMPROVEMENTS.md
│   ├── hooks
│   │   └── phase2-code-execution-migration.md
│   ├── http-server-management.md
│   ├── ide-compatability.md
│   ├── IMAGE_RETENTION_POLICY.md
│   ├── images
│   │   └── dashboard-placeholder.md
│   ├── implementation
│   │   ├── health_checks.md
│   │   └── performance.md
│   ├── IMPLEMENTATION_PLAN_HTTP_SSE.md
│   ├── integration
│   │   ├── homebrew.md
│   │   └── multi-client.md
│   ├── integrations
│   │   ├── gemini.md
│   │   ├── groq-bridge.md
│   │   ├── groq-integration-summary.md
│   │   └── groq-model-comparison.md
│   ├── integrations.md
│   ├── legacy
│   │   └── dual-protocol-hooks.md
│   ├── LM_STUDIO_COMPATIBILITY.md
│   ├── maintenance
│   │   └── memory-maintenance.md
│   ├── mastery
│   │   ├── api-reference.md
│   │   ├── architecture-overview.md
│   │   ├── configuration-guide.md
│   │   ├── local-setup-and-run.md
│   │   ├── testing-guide.md
│   │   └── troubleshooting.md
│   ├── migration
│   │   └── code-execution-api-quick-start.md
│   ├── natural-memory-triggers
│   │   ├── cli-reference.md
│   │   ├── installation-guide.md
│   │   └── performance-optimization.md
│   ├── oauth-setup.md
│   ├── pr-graphql-integration.md
│   ├── quick-setup-cloudflare-dual-environment.md
│   ├── README.md
│   ├── remote-configuration-wiki-section.md
│   ├── research
│   │   ├── code-execution-interface-implementation.md
│   │   └── code-execution-interface-summary.md
│   ├── ROADMAP.md
│   ├── sqlite-vec-backend.md
│   ├── statistics
│   │   ├── charts
│   │   │   ├── activity_patterns.png
│   │   │   ├── contributors.png
│   │   │   ├── growth_trajectory.png
│   │   │   ├── monthly_activity.png
│   │   │   └── october_sprint.png
│   │   ├── data
│   │   │   ├── activity_by_day.csv
│   │   │   ├── activity_by_hour.csv
│   │   │   ├── contributors.csv
│   │   │   └── monthly_activity.csv
│   │   ├── generate_charts.py
│   │   └── REPOSITORY_STATISTICS.md
│   ├── technical
│   │   ├── development.md
│   │   ├── memory-migration.md
│   │   ├── migration-log.md
│   │   ├── sqlite-vec-embedding-fixes.md
│   │   └── tag-storage.md
│   ├── testing
│   │   └── regression-tests.md
│   ├── testing-cloudflare-backend.md
│   ├── troubleshooting
│   │   ├── cloudflare-api-token-setup.md
│   │   ├── cloudflare-authentication.md
│   │   ├── general.md
│   │   ├── hooks-quick-reference.md
│   │   ├── pr162-schema-caching-issue.md
│   │   ├── session-end-hooks.md
│   │   └── sync-issues.md
│   └── tutorials
│       ├── advanced-techniques.md
│       ├── data-analysis.md
│       └── demo-session-walkthrough.md
├── examples
│   ├── claude_desktop_config_template.json
│   ├── claude_desktop_config_windows.json
│   ├── claude-desktop-http-config.json
│   ├── config
│   │   └── claude_desktop_config.json
│   ├── http-mcp-bridge.js
│   ├── memory_export_template.json
│   ├── README.md
│   ├── setup
│   │   └── setup_multi_client_complete.py
│   └── start_https_example.sh
├── install_service.py
├── install.py
├── LICENSE
├── NOTICE
├── pyproject.toml
├── pytest.ini
├── README.md
├── run_server.py
├── scripts
│   ├── .claude
│   │   └── settings.local.json
│   ├── archive
│   │   └── check_missing_timestamps.py
│   ├── backup
│   │   ├── backup_memories.py
│   │   ├── backup_sqlite_vec.sh
│   │   ├── export_distributable_memories.sh
│   │   └── restore_memories.py
│   ├── benchmarks
│   │   ├── benchmark_code_execution_api.py
│   │   ├── benchmark_hybrid_sync.py
│   │   └── benchmark_server_caching.py
│   ├── database
│   │   ├── analyze_sqlite_vec_db.py
│   │   ├── check_sqlite_vec_status.py
│   │   ├── db_health_check.py
│   │   └── simple_timestamp_check.py
│   ├── development
│   │   ├── debug_server_initialization.py
│   │   ├── find_orphaned_files.py
│   │   ├── fix_mdns.sh
│   │   ├── fix_sitecustomize.py
│   │   ├── remote_ingest.sh
│   │   ├── setup-git-merge-drivers.sh
│   │   ├── uv-lock-merge.sh
│   │   └── verify_hybrid_sync.py
│   ├── hooks
│   │   └── pre-commit
│   ├── installation
│   │   ├── install_linux_service.py
│   │   ├── install_macos_service.py
│   │   ├── install_uv.py
│   │   ├── install_windows_service.py
│   │   ├── install.py
│   │   ├── setup_backup_cron.sh
│   │   ├── setup_claude_mcp.sh
│   │   └── setup_cloudflare_resources.py
│   ├── linux
│   │   ├── service_status.sh
│   │   ├── start_service.sh
│   │   ├── stop_service.sh
│   │   ├── uninstall_service.sh
│   │   └── view_logs.sh
│   ├── maintenance
│   │   ├── assign_memory_types.py
│   │   ├── check_memory_types.py
│   │   ├── cleanup_corrupted_encoding.py
│   │   ├── cleanup_memories.py
│   │   ├── cleanup_organize.py
│   │   ├── consolidate_memory_types.py
│   │   ├── consolidation_mappings.json
│   │   ├── delete_orphaned_vectors_fixed.py
│   │   ├── fast_cleanup_duplicates_with_tracking.sh
│   │   ├── find_all_duplicates.py
│   │   ├── find_cloudflare_duplicates.py
│   │   ├── find_duplicates.py
│   │   ├── memory-types.md
│   │   ├── README.md
│   │   ├── recover_timestamps_from_cloudflare.py
│   │   ├── regenerate_embeddings.py
│   │   ├── repair_malformed_tags.py
│   │   ├── repair_memories.py
│   │   ├── repair_sqlite_vec_embeddings.py
│   │   ├── repair_zero_embeddings.py
│   │   ├── restore_from_json_export.py
│   │   └── scan_todos.sh
│   ├── migration
│   │   ├── cleanup_mcp_timestamps.py
│   │   ├── legacy
│   │   │   └── migrate_chroma_to_sqlite.py
│   │   ├── mcp-migration.py
│   │   ├── migrate_sqlite_vec_embeddings.py
│   │   ├── migrate_storage.py
│   │   ├── migrate_tags.py
│   │   ├── migrate_timestamps.py
│   │   ├── migrate_to_cloudflare.py
│   │   ├── migrate_to_sqlite_vec.py
│   │   ├── migrate_v5_enhanced.py
│   │   ├── TIMESTAMP_CLEANUP_README.md
│   │   └── verify_mcp_timestamps.py
│   ├── pr
│   │   ├── amp_collect_results.sh
│   │   ├── amp_detect_breaking_changes.sh
│   │   ├── amp_generate_tests.sh
│   │   ├── amp_pr_review.sh
│   │   ├── amp_quality_gate.sh
│   │   ├── amp_suggest_fixes.sh
│   │   ├── auto_review.sh
│   │   ├── detect_breaking_changes.sh
│   │   ├── generate_tests.sh
│   │   ├── lib
│   │   │   └── graphql_helpers.sh
│   │   ├── quality_gate.sh
│   │   ├── resolve_threads.sh
│   │   ├── run_pyscn_analysis.sh
│   │   ├── run_quality_checks.sh
│   │   ├── thread_status.sh
│   │   └── watch_reviews.sh
│   ├── quality
│   │   ├── fix_dead_code_install.sh
│   │   ├── phase1_dead_code_analysis.md
│   │   ├── phase2_complexity_analysis.md
│   │   ├── README_PHASE1.md
│   │   ├── README_PHASE2.md
│   │   ├── track_pyscn_metrics.sh
│   │   └── weekly_quality_review.sh
│   ├── README.md
│   ├── run
│   │   ├── run_mcp_memory.sh
│   │   ├── run-with-uv.sh
│   │   └── start_sqlite_vec.sh
│   ├── run_memory_server.py
│   ├── server
│   │   ├── check_http_server.py
│   │   ├── check_server_health.py
│   │   ├── memory_offline.py
│   │   ├── preload_models.py
│   │   ├── run_http_server.py
│   │   ├── run_memory_server.py
│   │   ├── start_http_server.bat
│   │   └── start_http_server.sh
│   ├── service
│   │   ├── deploy_dual_services.sh
│   │   ├── install_http_service.sh
│   │   ├── mcp-memory-http.service
│   │   ├── mcp-memory.service
│   │   ├── memory_service_manager.sh
│   │   ├── service_control.sh
│   │   ├── service_utils.py
│   │   └── update_service.sh
│   ├── sync
│   │   ├── check_drift.py
│   │   ├── claude_sync_commands.py
│   │   ├── export_memories.py
│   │   ├── import_memories.py
│   │   ├── litestream
│   │   │   ├── apply_local_changes.sh
│   │   │   ├── enhanced_memory_store.sh
│   │   │   ├── init_staging_db.sh
│   │   │   ├── io.litestream.replication.plist
│   │   │   ├── manual_sync.sh
│   │   │   ├── memory_sync.sh
│   │   │   ├── pull_remote_changes.sh
│   │   │   ├── push_to_remote.sh
│   │   │   ├── README.md
│   │   │   ├── resolve_conflicts.sh
│   │   │   ├── setup_local_litestream.sh
│   │   │   ├── setup_remote_litestream.sh
│   │   │   ├── staging_db_init.sql
│   │   │   ├── stash_local_changes.sh
│   │   │   ├── sync_from_remote_noconfig.sh
│   │   │   └── sync_from_remote.sh
│   │   ├── README.md
│   │   ├── safe_cloudflare_update.sh
│   │   ├── sync_memory_backends.py
│   │   └── sync_now.py
│   ├── testing
│   │   ├── run_complete_test.py
│   │   ├── run_memory_test.sh
│   │   ├── simple_test.py
│   │   ├── test_cleanup_logic.py
│   │   ├── test_cloudflare_backend.py
│   │   ├── test_docker_functionality.py
│   │   ├── test_installation.py
│   │   ├── test_mdns.py
│   │   ├── test_memory_api.py
│   │   ├── test_memory_simple.py
│   │   ├── test_migration.py
│   │   ├── test_search_api.py
│   │   ├── test_sqlite_vec_embeddings.py
│   │   ├── test_sse_events.py
│   │   ├── test-connection.py
│   │   └── test-hook.js
│   ├── utils
│   │   ├── claude_commands_utils.py
│   │   ├── generate_personalized_claude_md.sh
│   │   ├── groq
│   │   ├── groq_agent_bridge.py
│   │   ├── list-collections.py
│   │   ├── memory_wrapper_uv.py
│   │   ├── query_memories.py
│   │   ├── smithery_wrapper.py
│   │   ├── test_groq_bridge.sh
│   │   └── uv_wrapper.py
│   └── validation
│       ├── check_dev_setup.py
│       ├── check_documentation_links.py
│       ├── diagnose_backend_config.py
│       ├── validate_configuration_complete.py
│       ├── validate_memories.py
│       ├── validate_migration.py
│       ├── validate_timestamp_integrity.py
│       ├── verify_environment.py
│       ├── verify_pytorch_windows.py
│       └── verify_torch.py
├── SECURITY.md
├── selective_timestamp_recovery.py
├── SPONSORS.md
├── src
│   └── mcp_memory_service
│       ├── __init__.py
│       ├── api
│       │   ├── __init__.py
│       │   ├── client.py
│       │   ├── operations.py
│       │   ├── sync_wrapper.py
│       │   └── types.py
│       ├── backup
│       │   ├── __init__.py
│       │   └── scheduler.py
│       ├── cli
│       │   ├── __init__.py
│       │   ├── ingestion.py
│       │   ├── main.py
│       │   └── utils.py
│       ├── config.py
│       ├── consolidation
│       │   ├── __init__.py
│       │   ├── associations.py
│       │   ├── base.py
│       │   ├── clustering.py
│       │   ├── compression.py
│       │   ├── consolidator.py
│       │   ├── decay.py
│       │   ├── forgetting.py
│       │   ├── health.py
│       │   └── scheduler.py
│       ├── dependency_check.py
│       ├── discovery
│       │   ├── __init__.py
│       │   ├── client.py
│       │   └── mdns_service.py
│       ├── embeddings
│       │   ├── __init__.py
│       │   └── onnx_embeddings.py
│       ├── ingestion
│       │   ├── __init__.py
│       │   ├── base.py
│       │   ├── chunker.py
│       │   ├── csv_loader.py
│       │   ├── json_loader.py
│       │   ├── pdf_loader.py
│       │   ├── registry.py
│       │   ├── semtools_loader.py
│       │   └── text_loader.py
│       ├── lm_studio_compat.py
│       ├── mcp_server.py
│       ├── models
│       │   ├── __init__.py
│       │   └── memory.py
│       ├── server.py
│       ├── services
│       │   ├── __init__.py
│       │   └── memory_service.py
│       ├── storage
│       │   ├── __init__.py
│       │   ├── base.py
│       │   ├── cloudflare.py
│       │   ├── factory.py
│       │   ├── http_client.py
│       │   ├── hybrid.py
│       │   └── sqlite_vec.py
│       ├── sync
│       │   ├── __init__.py
│       │   ├── exporter.py
│       │   ├── importer.py
│       │   └── litestream_config.py
│       ├── utils
│       │   ├── __init__.py
│       │   ├── cache_manager.py
│       │   ├── content_splitter.py
│       │   ├── db_utils.py
│       │   ├── debug.py
│       │   ├── document_processing.py
│       │   ├── gpu_detection.py
│       │   ├── hashing.py
│       │   ├── http_server_manager.py
│       │   ├── port_detection.py
│       │   ├── system_detection.py
│       │   └── time_parser.py
│       └── web
│           ├── __init__.py
│           ├── api
│           │   ├── __init__.py
│           │   ├── analytics.py
│           │   ├── backup.py
│           │   ├── consolidation.py
│           │   ├── documents.py
│           │   ├── events.py
│           │   ├── health.py
│           │   ├── manage.py
│           │   ├── mcp.py
│           │   ├── memories.py
│           │   ├── search.py
│           │   └── sync.py
│           ├── app.py
│           ├── dependencies.py
│           ├── oauth
│           │   ├── __init__.py
│           │   ├── authorization.py
│           │   ├── discovery.py
│           │   ├── middleware.py
│           │   ├── models.py
│           │   ├── registration.py
│           │   └── storage.py
│           ├── sse.py
│           └── static
│               ├── app.js
│               ├── index.html
│               ├── README.md
│               ├── sse_test.html
│               └── style.css
├── start_http_debug.bat
├── start_http_server.sh
├── test_document.txt
├── test_version_checker.js
├── tests
│   ├── __init__.py
│   ├── api
│   │   ├── __init__.py
│   │   ├── test_compact_types.py
│   │   └── test_operations.py
│   ├── bridge
│   │   ├── mock_responses.js
│   │   ├── package-lock.json
│   │   ├── package.json
│   │   └── test_http_mcp_bridge.js
│   ├── conftest.py
│   ├── consolidation
│   │   ├── __init__.py
│   │   ├── conftest.py
│   │   ├── test_associations.py
│   │   ├── test_clustering.py
│   │   ├── test_compression.py
│   │   ├── test_consolidator.py
│   │   ├── test_decay.py
│   │   └── test_forgetting.py
│   ├── contracts
│   │   └── api-specification.yml
│   ├── integration
│   │   ├── package-lock.json
│   │   ├── package.json
│   │   ├── test_api_key_fallback.py
│   │   ├── test_api_memories_chronological.py
│   │   ├── test_api_tag_time_search.py
│   │   ├── test_api_with_memory_service.py
│   │   ├── test_bridge_integration.js
│   │   ├── test_cli_interfaces.py
│   │   ├── test_cloudflare_connection.py
│   │   ├── test_concurrent_clients.py
│   │   ├── test_data_serialization_consistency.py
│   │   ├── test_http_server_startup.py
│   │   ├── test_mcp_memory.py
│   │   ├── test_mdns_integration.py
│   │   ├── test_oauth_basic_auth.py
│   │   ├── test_oauth_flow.py
│   │   ├── test_server_handlers.py
│   │   └── test_store_memory.py
│   ├── performance
│   │   ├── test_background_sync.py
│   │   └── test_hybrid_live.py
│   ├── README.md
│   ├── smithery
│   │   └── test_smithery.py
│   ├── sqlite
│   │   └── simple_sqlite_vec_test.py
│   ├── test_client.py
│   ├── test_content_splitting.py
│   ├── test_database.py
│   ├── test_hybrid_cloudflare_limits.py
│   ├── test_hybrid_storage.py
│   ├── test_memory_ops.py
│   ├── test_semantic_search.py
│   ├── test_sqlite_vec_storage.py
│   ├── test_time_parser.py
│   ├── test_timestamp_preservation.py
│   ├── timestamp
│   │   ├── test_hook_vs_manual_storage.py
│   │   ├── test_issue99_final_validation.py
│   │   ├── test_search_retrieval_inconsistency.py
│   │   ├── test_timestamp_issue.py
│   │   └── test_timestamp_simple.py
│   └── unit
│       ├── conftest.py
│       ├── test_cloudflare_storage.py
│       ├── test_csv_loader.py
│       ├── test_fastapi_dependencies.py
│       ├── test_import.py
│       ├── test_json_loader.py
│       ├── test_mdns_simple.py
│       ├── test_mdns.py
│       ├── test_memory_service.py
│       ├── test_memory.py
│       ├── test_semtools_loader.py
│       ├── test_storage_interface_compatibility.py
│       └── test_tag_time_filtering.py
├── tools
│   ├── docker
│   │   ├── DEPRECATED.md
│   │   ├── docker-compose.http.yml
│   │   ├── docker-compose.pythonpath.yml
│   │   ├── docker-compose.standalone.yml
│   │   ├── docker-compose.uv.yml
│   │   ├── docker-compose.yml
│   │   ├── docker-entrypoint-persistent.sh
│   │   ├── docker-entrypoint-unified.sh
│   │   ├── docker-entrypoint.sh
│   │   ├── Dockerfile
│   │   ├── Dockerfile.glama
│   │   ├── Dockerfile.slim
│   │   ├── README.md
│   │   └── test-docker-modes.sh
│   └── README.md
└── uv.lock
```

# Files

--------------------------------------------------------------------------------
/docs/tutorials/data-analysis.md:
--------------------------------------------------------------------------------

```markdown
  1 | # Data Analysis Examples
  2 | 
  3 | This guide demonstrates how to extract insights, patterns, and visualizations from your MCP Memory Service data, transforming stored knowledge into actionable intelligence.
  4 | 
  5 | ## 🎯 Overview
  6 | 
  7 | The MCP Memory Service can be used not just for storage and retrieval, but as a powerful analytics platform for understanding knowledge patterns, usage trends, and information relationships. This guide shows practical examples of data analysis techniques that reveal valuable insights about your knowledge base.
  8 | 
  9 | ## 📊 Types of Analysis
 10 | 
 11 | ### 1. Temporal Analysis
 12 | Understanding when and how your knowledge base grows over time.
 13 | 
 14 | ### 2. Content Analysis  
 15 | Analyzing what types of information are stored and how they're organized.
 16 | 
 17 | ### 3. Usage Pattern Analysis
 18 | Identifying how information is accessed and utilized.
 19 | 
 20 | ### 4. Quality Analysis
 21 | Measuring the health and organization of your knowledge base.
 22 | 
 23 | ### 5. Relationship Analysis
 24 | Discovering connections and patterns between different pieces of information.
 25 | 
 26 | ## 📈 Temporal Distribution Analysis
 27 | 
 28 | ### Basic Time-Based Queries
 29 | 
 30 | **Monthly Distribution:**
 31 | ```javascript
 32 | // Retrieve memories by time period
 33 | const januaryMemories = await recall_memory({
 34 |   "query": "memories from january 2025",
 35 |   "n_results": 50
 36 | });
 37 | 
 38 | const juneMemories = await recall_memory({
 39 |   "query": "memories from june 2025", 
 40 |   "n_results": 50
 41 | });
 42 | 
 43 | // Analyze patterns
 44 | console.log(`January: ${januaryMemories.length} memories`);
 45 | console.log(`June: ${juneMemories.length} memories`);
 46 | ```
 47 | 
 48 | **Weekly Activity Patterns:**
 49 | ```javascript
 50 | // Get recent activity
 51 | const lastWeek = await recall_memory({
 52 |   "query": "memories from last week",
 53 |   "n_results": 25
 54 | });
 55 | 
 56 | const thisWeek = await recall_memory({
 57 |   "query": "memories from this week",
 58 |   "n_results": 25
 59 | });
 60 | 
 61 | // Compare activity levels
 62 | const weeklyGrowth = ((thisWeek.length - lastWeek.length) / lastWeek.length) * 100;
 63 | console.log(`Weekly growth rate: ${weeklyGrowth.toFixed(1)}%`);
 64 | ```
 65 | 
 66 | ### Advanced Temporal Analysis
 67 | 
 68 | **Memory Creation Frequency:**
 69 | ```javascript
 70 | // Process temporal data for visualization
 71 | function analyzeMemoryDistribution(memories) {
 72 |   const monthlyDistribution = {};
 73 |   
 74 |   memories.forEach(memory => {
 75 |     // Extract date from timestamp
 76 |     const date = new Date(memory.timestamp);
 77 |     const monthKey = `${date.getFullYear()}-${String(date.getMonth() + 1).padStart(2, '0')}`;
 78 |     
 79 |     if (!monthlyDistribution[monthKey]) {
 80 |       monthlyDistribution[monthKey] = {
 81 |         count: 0,
 82 |         memories: []
 83 |       };
 84 |     }
 85 |     
 86 |     monthlyDistribution[monthKey].count++;
 87 |     monthlyDistribution[monthKey].memories.push(memory);
 88 |   });
 89 |   
 90 |   return monthlyDistribution;
 91 | }
 92 | 
 93 | // Convert to chart data
 94 | function prepareChartData(distribution) {
 95 |   return Object.entries(distribution)
 96 |     .sort(([a], [b]) => a.localeCompare(b))
 97 |     .map(([month, data]) => {
 98 |       const [year, monthNum] = month.split('-');
 99 |       const monthNames = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 
100 |                          'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec'];
101 |       const monthName = monthNames[parseInt(monthNum) - 1];
102 |       
103 |       return {
104 |         month: `${monthName} ${year}`,
105 |         count: data.count,
106 |         monthKey: month,
107 |         memories: data.memories
108 |       };
109 |     });
110 | }
111 | ```
112 | 
113 | **Project Lifecycle Analysis:**
114 | ```javascript
115 | // Analyze project phases through memory patterns
116 | async function analyzeProjectLifecycle(projectTag) {
117 |   const projectMemories = await search_by_tag({
118 |     "tags": [projectTag]
119 |   });
120 |   
121 |   // Group by status tags
122 |   const phases = {
123 |     planning: [],
124 |     development: [],
125 |     testing: [],
126 |     deployment: [],
127 |     maintenance: []
128 |   };
129 |   
130 |   projectMemories.forEach(memory => {
131 |     const tags = memory.tags || [];
132 |     
133 |     if (tags.includes('planning') || tags.includes('design')) {
134 |       phases.planning.push(memory);
135 |     } else if (tags.includes('development') || tags.includes('implementation')) {
136 |       phases.development.push(memory);
137 |     } else if (tags.includes('testing') || tags.includes('debugging')) {
138 |       phases.testing.push(memory);
139 |     } else if (tags.includes('deployment') || tags.includes('production')) {
140 |       phases.deployment.push(memory);
141 |     } else if (tags.includes('maintenance') || tags.includes('optimization')) {
142 |       phases.maintenance.push(memory);
143 |     }
144 |   });
145 |   
146 |   return phases;
147 | }
148 | 
149 | // Usage example
150 | const mcpLifecycle = await analyzeProjectLifecycle('mcp-memory-service');
151 | console.log('Project phases:', {
152 |   planning: mcpLifecycle.planning.length,
153 |   development: mcpLifecycle.development.length,
154 |   testing: mcpLifecycle.testing.length,
155 |   deployment: mcpLifecycle.deployment.length,
156 |   maintenance: mcpLifecycle.maintenance.length
157 | });
158 | ```
159 | 
160 | ## 🏷️ Tag Analysis
161 | 
162 | ### Tag Frequency Analysis
163 | 
164 | **Most Used Tags:**
165 | ```javascript
166 | async function analyzeTagFrequency() {
167 |   // Get all memories (you may need to paginate for large datasets)
168 |   const allMemories = await retrieve_memory({
169 |     "query": "all memories",
170 |     "n_results": 500
171 |   });
172 |   
173 |   const tagFrequency = {};
174 |   
175 |   allMemories.forEach(memory => {
176 |     const tags = memory.tags || [];
177 |     tags.forEach(tag => {
178 |       tagFrequency[tag] = (tagFrequency[tag] || 0) + 1;
179 |     });
180 |   });
181 |   
182 |   // Sort by frequency
183 |   const sortedTags = Object.entries(tagFrequency)
184 |     .sort(([,a], [,b]) => b - a)
185 |     .slice(0, 20); // Top 20 tags
186 |   
187 |   return sortedTags;
188 | }
189 | 
190 | // Generate insights
191 | const topTags = await analyzeTagFrequency();
192 | console.log('Most used tags:');
193 | topTags.forEach(([tag, count]) => {
194 |   console.log(`${tag}: ${count} memories`);
195 | });
196 | ```
197 | 
198 | **Tag Co-occurrence Analysis:**
199 | ```javascript
200 | function analyzeTagRelationships(memories) {
201 |   const cooccurrence = {};
202 |   
203 |   memories.forEach(memory => {
204 |     const tags = memory.tags || [];
205 |     
206 |     // For each pair of tags in the memory
207 |     for (let i = 0; i < tags.length; i++) {
208 |       for (let j = i + 1; j < tags.length; j++) {
209 |         const pair = [tags[i], tags[j]].sort().join(' + ');
210 |         cooccurrence[pair] = (cooccurrence[pair] || 0) + 1;
211 |       }
212 |     }
213 |   });
214 |   
215 |   // Find most common tag combinations
216 |   return Object.entries(cooccurrence)
217 |     .sort(([,a], [,b]) => b - a)
218 |     .slice(0, 10);
219 | }
220 | 
221 | // Usage
222 | const tagRelationships = analyzeTagRelationships(allMemories);
223 | console.log('Common tag combinations:');
224 | tagRelationships.forEach(([pair, count]) => {
225 |   console.log(`${pair}: ${count} times`);
226 | });
227 | ```
228 | 
229 | ### Tag Category Analysis
230 | 
231 | **Category Distribution:**
232 | ```javascript
233 | function categorizeTagsByType(tags) {
234 |   const categories = {
235 |     projects: [],
236 |     technologies: [],
237 |     activities: [],
238 |     status: [],
239 |     content: [],
240 |     temporal: [],
241 |     other: []
242 |   };
243 |   
244 |   // Define patterns for each category
245 |   const patterns = {
246 |     projects: /^(mcp-memory-service|memory-dashboard|github-integration)/,
247 |     technologies: /^(python|react|typescript|chromadb|git|docker)/,
248 |     activities: /^(testing|debugging|development|documentation|deployment)/,
249 |     status: /^(resolved|in-progress|blocked|verified|completed)/,
250 |     content: /^(concept|architecture|tutorial|reference|example)/,
251 |     temporal: /^(january|february|march|april|may|june|q1|q2|2025)/
252 |   };
253 |   
254 |   tags.forEach(([tag, count]) => {
255 |     let categorized = false;
256 |     
257 |     for (const [category, pattern] of Object.entries(patterns)) {
258 |       if (pattern.test(tag)) {
259 |         categories[category].push([tag, count]);
260 |         categorized = true;
261 |         break;
262 |       }
263 |     }
264 |     
265 |     if (!categorized) {
266 |       categories.other.push([tag, count]);
267 |     }
268 |   });
269 |   
270 |   return categories;
271 | }
272 | 
273 | // Analyze tag distribution by category
274 | const tagCategories = categorizeTagsByType(topTags);
275 | console.log('Tags by category:');
276 | Object.entries(tagCategories).forEach(([category, tags]) => {
277 |   console.log(`${category}: ${tags.length} unique tags`);
278 | });
279 | ```
280 | 
281 | ## 📋 Content Quality Analysis
282 | 
283 | ### Tagging Quality Assessment
284 | 
285 | **Untagged Memory Detection:**
286 | ```javascript
287 | async function findUntaggedMemories() {
288 |   // Search for potentially untagged content
289 |   const candidates = await retrieve_memory({
290 |     "query": "test simple basic example memory",
291 |     "n_results": 50
292 |   });
293 |   
294 |   const untagged = candidates.filter(memory => {
295 |     const tags = memory.tags || [];
296 |     return tags.length === 0 || 
297 |            (tags.length === 1 && ['test', 'memory', 'note'].includes(tags[0]));
298 |   });
299 |   
300 |   return {
301 |     total: candidates.length,
302 |     untagged: untagged.length,
303 |     percentage: (untagged.length / candidates.length) * 100,
304 |     examples: untagged.slice(0, 5)
305 |   };
306 | }
307 | 
308 | // Quality assessment
309 | const qualityReport = await findUntaggedMemories();
310 | console.log(`Tagging quality: ${(100 - qualityReport.percentage).toFixed(1)}% properly tagged`);
311 | ```
312 | 
313 | **Tag Consistency Analysis:**
314 | ```javascript
315 | function analyzeTagConsistency(memories) {
316 |   const patterns = {};
317 |   const inconsistencies = [];
318 |   
319 |   memories.forEach(memory => {
320 |     const content = memory.content;
321 |     const tags = memory.tags || [];
322 |     
323 |     // Look for common content patterns
324 |     if (content.includes('issue') || content.includes('bug')) {
325 |       const hasIssueTag = tags.some(tag => tag.includes('issue') || tag.includes('bug'));
326 |       if (!hasIssueTag) {
327 |         inconsistencies.push({
328 |           type: 'missing-issue-tag',
329 |           memory: memory.content.substring(0, 100),
330 |           tags: tags
331 |         });
332 |       }
333 |     }
334 |     
335 |     if (content.includes('test') || content.includes('TEST')) {
336 |       const hasTestTag = tags.includes('test') || tags.includes('testing');
337 |       if (!hasTestTag) {
338 |         inconsistencies.push({
339 |           type: 'missing-test-tag',
340 |           memory: memory.content.substring(0, 100),
341 |           tags: tags
342 |         });
343 |       }
344 |     }
345 |   });
346 |   
347 |   return {
348 |     totalMemories: memories.length,
349 |     inconsistencies: inconsistencies.length,
350 |     consistencyScore: ((memories.length - inconsistencies.length) / memories.length) * 100,
351 |     examples: inconsistencies.slice(0, 5)
352 |   };
353 | }
354 | ```
355 | 
356 | ## 📊 Visualization Examples
357 | 
358 | ### Memory Distribution Chart Data
359 | 
360 | **Prepare data for visualization:**
361 | ```javascript
362 | function prepareDistributionData(memories) {
363 |   const distribution = analyzeMemoryDistribution(memories);
364 |   const chartData = prepareChartData(distribution);
365 |   
366 |   // Add additional metrics
367 |   const total = chartData.reduce((sum, item) => sum + item.count, 0);
368 |   const average = total / chartData.length;
369 |   
370 |   // Identify peaks and valleys
371 |   const peak = chartData.reduce((max, item) => 
372 |     item.count > max.count ? item : max, chartData[0]);
373 |   const valley = chartData.reduce((min, item) => 
374 |     item.count < min.count ? item : min, chartData[0]);
375 |   
376 |   return {
377 |     chartData,
378 |     metrics: {
379 |       total,
380 |       average: Math.round(average * 10) / 10,
381 |       peak: { month: peak.month, count: peak.count },
382 |       valley: { month: valley.month, count: valley.count },
383 |       growth: calculateGrowthRate(chartData)
384 |     }
385 |   };
386 | }
387 | 
388 | function calculateGrowthRate(chartData) {
389 |   if (chartData.length < 2) return 0;
390 |   
391 |   const first = chartData[0].count;
392 |   const last = chartData[chartData.length - 1].count;
393 |   
394 |   return ((last - first) / first) * 100;
395 | }
396 | ```
397 | 
398 | ### Activity Heatmap Data
399 | 
400 | **Generate activity patterns:**
401 | ```javascript
402 | function generateActivityHeatmap(memories) {
403 |   const heatmapData = {};
404 |   
405 |   memories.forEach(memory => {
406 |     const date = new Date(memory.timestamp);
407 |     const dayOfWeek = date.getDay(); // 0 = Sunday
408 |     const hour = date.getHours();
409 |     
410 |     const key = `${dayOfWeek}-${hour}`;
411 |     heatmapData[key] = (heatmapData[key] || 0) + 1;
412 |   });
413 |   
414 |   // Convert to matrix format for visualization
415 |   const matrix = [];
416 |   const days = ['Sun', 'Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat'];
417 |   
418 |   for (let day = 0; day < 7; day++) {
419 |     const dayData = [];
420 |     for (let hour = 0; hour < 24; hour++) {
421 |       const key = `${day}-${hour}`;
422 |       dayData.push({
423 |         day: days[day],
424 |         hour: hour,
425 |         value: heatmapData[key] || 0
426 |       });
427 |     }
428 |     matrix.push(dayData);
429 |   }
430 |   
431 |   return matrix;
432 | }
433 | ```
434 | 
435 | ## 🔍 Advanced Analytics
436 | 
437 | ### Semantic Similarity Analysis
438 | 
439 | **Find related memories:**
440 | ```javascript
441 | async function findRelatedMemories(targetMemory, threshold = 0.7) {
442 |   // Use semantic search to find similar content
443 |   const related = await retrieve_memory({
444 |     "query": targetMemory.content.substring(0, 200),
445 |     "n_results": 20
446 |   });
447 |   
448 |   // Filter by relevance score (if available)
449 |   const highlyRelated = related.filter(memory => 
450 |     memory.relevanceScore > threshold &&
451 |     memory.content_hash !== targetMemory.content_hash
452 |   );
453 |   
454 |   return highlyRelated;
455 | }
456 | 
457 | // Build knowledge graph data
458 | async function buildKnowledgeGraph(memories) {
459 |   const nodes = [];
460 |   const edges = [];
461 |   
462 |   for (const memory of memories.slice(0, 50)) { // Limit for performance
463 |     nodes.push({
464 |       id: memory.content_hash,
465 |       label: memory.content.substring(0, 50) + '...',
466 |       tags: memory.tags || [],
467 |       group: memory.tags?.[0] || 'untagged'
468 |     });
469 |     
470 |     const related = await findRelatedMemories(memory, 0.8);
471 |     
472 |     related.forEach(relatedMemory => {
473 |       edges.push({
474 |         from: memory.content_hash,
475 |         to: relatedMemory.content_hash,
476 |         weight: relatedMemory.relevanceScore || 0.5
477 |       });
478 |     });
479 |   }
480 |   
481 |   return { nodes, edges };
482 | }
483 | ```
484 | 
485 | ### Trend Analysis
486 | 
487 | **Identify emerging patterns:**
488 | ```javascript
489 | function analyzeTrends(memories, timeWindow = 30) {
490 |   const now = new Date();
491 |   const cutoff = new Date(now - timeWindow * 24 * 60 * 60 * 1000);
492 |   
493 |   const recentMemories = memories.filter(memory => 
494 |     new Date(memory.timestamp) > cutoff
495 |   );
496 |   
497 |   const historicalMemories = memories.filter(memory => 
498 |     new Date(memory.timestamp) <= cutoff
499 |   );
500 |   
501 |   // Analyze tag frequency changes
502 |   const recentTags = getTagFrequency(recentMemories);
503 |   const historicalTags = getTagFrequency(historicalMemories);
504 |   
505 |   const trends = [];
506 |   
507 |   Object.entries(recentTags).forEach(([tag, recentCount]) => {
508 |     const historicalCount = historicalTags[tag] || 0;
509 |     const change = recentCount - historicalCount;
510 |     const changePercent = historicalCount > 0 ? 
511 |       (change / historicalCount) * 100 : 100;
512 |     
513 |     if (Math.abs(changePercent) > 50) { // Significant change
514 |       trends.push({
515 |         tag,
516 |         trend: changePercent > 0 ? 'increasing' : 'decreasing',
517 |         change: changePercent,
518 |         recentCount,
519 |         historicalCount
520 |       });
521 |     }
522 |   });
523 |   
524 |   return trends.sort((a, b) => Math.abs(b.change) - Math.abs(a.change));
525 | }
526 | 
527 | function getTagFrequency(memories) {
528 |   const frequency = {};
529 |   memories.forEach(memory => {
530 |     (memory.tags || []).forEach(tag => {
531 |       frequency[tag] = (frequency[tag] || 0) + 1;
532 |     });
533 |   });
534 |   return frequency;
535 | }
536 | ```
537 | 
538 | ## 📋 Analysis Workflows
539 | 
540 | ### Daily Analytics Routine
541 | 
542 | ```javascript
543 | async function runDailyAnalytics() {
544 |   console.log('🔍 Daily Memory Analytics Report');
545 |   console.log('================================');
546 |   
547 |   // 1. Recent activity
548 |   const todayMemories = await recall_memory({
549 |     "query": "memories from today",
550 |     "n_results": 50
551 |   });
552 |   console.log(`📊 Memories added today: ${todayMemories.length}`);
553 |   
554 |   // 2. Tag quality check
555 |   const qualityReport = await findUntaggedMemories();
556 |   console.log(`🏷️  Tagging quality: ${(100 - qualityReport.percentage).toFixed(1)}%`);
557 |   
558 |   // 3. Most active projects
559 |   const topTags = await analyzeTagFrequency();
560 |   const topProjects = topTags.filter(([tag]) => 
561 |     tag.includes('project') || tag.includes('service')
562 |   ).slice(0, 3);
563 |   console.log('🚀 Most active projects:', topProjects);
564 |   
565 |   // 4. Database health
566 |   const health = await check_database_health();
567 |   console.log(`💾 Database health: ${health.status}`);
568 |   
569 |   console.log('\n✅ Daily analytics complete');
570 | }
571 | ```
572 | 
573 | ### Weekly Analysis Report
574 | 
575 | ```javascript
576 | async function generateWeeklyReport() {
577 |   const weekMemories = await recall_memory({
578 |     "query": "memories from last week",
579 |     "n_results": 100
580 |   });
581 |   
582 |   const report = {
583 |     summary: {
584 |       totalMemories: weekMemories.length,
585 |       date: new Date().toISOString().split('T')[0]
586 |     },
587 |     
588 |     topCategories: analyzeTagFrequency(weekMemories),
589 |     
590 |     qualityMetrics: await findUntaggedMemories(),
591 |     
592 |     trends: analyzeTrends(weekMemories, 7),
593 |     
594 |     recommendations: generateRecommendations(weekMemories)
595 |   };
596 |   
597 |   // Store report as memory
598 |   await store_memory({
599 |     "content": `Weekly Analytics Report - ${report.summary.date}: ${JSON.stringify(report, null, 2)}`,
600 |     "metadata": {
601 |       "tags": ["analytics", "weekly-report", "metrics", "summary"],
602 |       "type": "analytics-report"
603 |     }
604 |   });
605 |   
606 |   return report;
607 | }
608 | 
609 | function generateRecommendations(memories) {
610 |   const recommendations = [];
611 |   
612 |   // Tag consistency recommendations
613 |   const untagged = memories.filter(m => (m.tags || []).length === 0);
614 |   if (untagged.length > 0) {
615 |     recommendations.push({
616 |       type: 'tagging',
617 |       priority: 'high',
618 |       message: `${untagged.length} memories need tagging`
619 |     });
620 |   }
621 |   
622 |   // Content organization recommendations
623 |   const testMemories = memories.filter(m => 
624 |     m.content.toLowerCase().includes('test') && 
625 |     !(m.tags || []).includes('test')
626 |   );
627 |   if (testMemories.length > 0) {
628 |     recommendations.push({
629 |       type: 'organization',
630 |       priority: 'medium',
631 |       message: `${testMemories.length} test memories need proper categorization`
632 |     });
633 |   }
634 |   
635 |   return recommendations;
636 | }
637 | ```
638 | 
639 | ## 🎯 Practical Implementation
640 | 
641 | ### Setting Up Analytics Pipeline
642 | 
643 | **1. Create analysis script:**
644 | ```javascript
645 | // analytics.js
646 | const MemoryAnalytics = {
647 |   async runFullAnalysis() {
648 |     const results = {
649 |       temporal: await this.analyzeTemporalDistribution(),
650 |       tags: await this.analyzeTagUsage(),
651 |       quality: await this.assessQuality(),
652 |       trends: await this.identifyTrends()
653 |     };
654 |     
655 |     return results;
656 |   },
657 |   
658 |   async generateVisualizationData() {
659 |     const memories = await this.getAllMemories();
660 |     return prepareDistributionData(memories);
661 |   }
662 | };
663 | ```
664 | 
665 | **2. Schedule regular analysis:**
666 | ```javascript
667 | // Run analytics and store results
668 | async function scheduledAnalysis() {
669 |   const results = await MemoryAnalytics.runFullAnalysis();
670 |   
671 |   await store_memory({
672 |     "content": `Automated Analytics Report: ${JSON.stringify(results, null, 2)}`,
673 |     "metadata": {
674 |       "tags": ["automated-analytics", "system-analysis", "metrics"],
675 |       "type": "analytics-report"
676 |     }
677 |   });
678 | }
679 | 
680 | // Run weekly
681 | setInterval(scheduledAnalysis, 7 * 24 * 60 * 60 * 1000);
682 | ```
683 | 
684 | ## 📊 Export and Integration
685 | 
686 | ### Data Export for External Tools
687 | 
688 | **CSV Export:**
689 | ```javascript
690 | function exportToCSV(memories) {
691 |   const headers = ['Timestamp', 'Content_Preview', 'Tags', 'Type'];
692 |   const rows = memories.map(memory => [
693 |     memory.timestamp,
694 |     memory.content.substring(0, 100).replace(/,/g, ';'),
695 |     (memory.tags || []).join(';'),
696 |     memory.type || 'unknown'
697 |   ]);
698 |   
699 |   const csv = [headers, ...rows]
700 |     .map(row => row.map(field => `"${field}"`).join(','))
701 |     .join('\n');
702 |   
703 |   return csv;
704 | }
705 | ```
706 | 
707 | **JSON Export for Visualization Tools:**
708 | ```javascript
709 | function exportForVisualization(memories) {
710 |   return {
711 |     metadata: {
712 |       total: memories.length,
713 |       exported: new Date().toISOString(),
714 |       schema_version: '1.0'
715 |     },
716 |     
717 |     temporal_data: prepareDistributionData(memories),
718 |     
719 |     tag_analysis: analyzeTagFrequency(memories),
720 |     
721 |     relationships: buildKnowledgeGraph(memories),
722 |     
723 |     quality_metrics: assessQuality(memories)
724 |   };
725 | }
726 | ```
727 | 
728 | ---
729 | 
730 | *These analysis examples demonstrate the power of treating your MCP Memory Service as not just storage, but as a comprehensive analytics platform for understanding and optimizing your knowledge management workflows.*
```

--------------------------------------------------------------------------------
/src/mcp_memory_service/services/memory_service.py:
--------------------------------------------------------------------------------

```python
  1 | """
  2 | Memory Service - Shared business logic for memory operations.
  3 | 
  4 | This service contains the shared business logic that was previously duplicated
  5 | between mcp_server.py and server.py. It provides a single source of truth for
  6 | all memory operations, eliminating the DRY violation and ensuring consistent behavior.
  7 | """
  8 | 
  9 | import logging
 10 | from typing import Dict, List, Optional, Any, Union, Tuple, TypedDict
 11 | from datetime import datetime
 12 | 
 13 | from ..config import (
 14 |     INCLUDE_HOSTNAME,
 15 |     CONTENT_PRESERVE_BOUNDARIES,
 16 |     CONTENT_SPLIT_OVERLAP,
 17 |     ENABLE_AUTO_SPLIT
 18 | )
 19 | from ..storage.base import MemoryStorage
 20 | from ..models.memory import Memory
 21 | from ..utils.content_splitter import split_content
 22 | from ..utils.hashing import generate_content_hash
 23 | 
 24 | logger = logging.getLogger(__name__)
 25 | 
 26 | 
 27 | def normalize_tags(tags: Union[str, List[str], None]) -> List[str]:
 28 |     """
 29 |     Normalize tags to a consistent list format.
 30 | 
 31 |     Handles all input formats:
 32 |     - None → []
 33 |     - "tag1,tag2,tag3" → ["tag1", "tag2", "tag3"]
 34 |     - "single-tag" → ["single-tag"]
 35 |     - ["tag1", "tag2"] → ["tag1", "tag2"]
 36 | 
 37 |     Args:
 38 |         tags: Tags in any supported format (None, string, comma-separated string, or list)
 39 | 
 40 |     Returns:
 41 |         List of tag strings, empty list if None or empty string
 42 |     """
 43 |     if tags is None:
 44 |         return []
 45 | 
 46 |     if isinstance(tags, str):
 47 |         # Empty string returns empty list
 48 |         if not tags.strip():
 49 |             return []
 50 |         # Split by comma if present, otherwise single tag
 51 |         if ',' in tags:
 52 |             return [tag.strip() for tag in tags.split(',') if tag.strip()]
 53 |         return [tags.strip()]
 54 | 
 55 |     # Already a list - return as-is
 56 |     return tags
 57 | 
 58 | 
 59 | class MemoryResult(TypedDict):
 60 |     """Type definition for memory operation results."""
 61 |     content: str
 62 |     content_hash: str
 63 |     tags: List[str]
 64 |     memory_type: Optional[str]
 65 |     metadata: Optional[Dict[str, Any]]
 66 |     created_at: str
 67 |     updated_at: str
 68 |     created_at_iso: str
 69 |     updated_at_iso: str
 70 | 
 71 | 
 72 | # Store Memory Return Types
 73 | class StoreMemorySingleSuccess(TypedDict):
 74 |     """Return type for successful single memory storage."""
 75 |     success: bool
 76 |     memory: MemoryResult
 77 | 
 78 | 
 79 | class StoreMemoryChunkedSuccess(TypedDict):
 80 |     """Return type for successful chunked memory storage."""
 81 |     success: bool
 82 |     memories: List[MemoryResult]
 83 |     total_chunks: int
 84 |     original_hash: str
 85 | 
 86 | 
 87 | class StoreMemoryFailure(TypedDict):
 88 |     """Return type for failed memory storage."""
 89 |     success: bool
 90 |     error: str
 91 | 
 92 | 
 93 | # List Memories Return Types
 94 | class ListMemoriesSuccess(TypedDict):
 95 |     """Return type for successful memory listing."""
 96 |     memories: List[MemoryResult]
 97 |     page: int
 98 |     page_size: int
 99 |     total: int
100 |     has_more: bool
101 | 
102 | 
103 | class ListMemoriesError(TypedDict):
104 |     """Return type for failed memory listing."""
105 |     success: bool
106 |     error: str
107 |     memories: List[MemoryResult]
108 |     page: int
109 |     page_size: int
110 | 
111 | 
112 | # Retrieve Memories Return Types
113 | class RetrieveMemoriesSuccess(TypedDict):
114 |     """Return type for successful memory retrieval."""
115 |     memories: List[MemoryResult]
116 |     query: str
117 |     count: int
118 | 
119 | 
120 | class RetrieveMemoriesError(TypedDict):
121 |     """Return type for failed memory retrieval."""
122 |     memories: List[MemoryResult]
123 |     query: str
124 |     error: str
125 | 
126 | 
127 | # Search by Tag Return Types
128 | class SearchByTagSuccess(TypedDict):
129 |     """Return type for successful tag search."""
130 |     memories: List[MemoryResult]
131 |     tags: List[str]
132 |     match_type: str
133 |     count: int
134 | 
135 | 
136 | class SearchByTagError(TypedDict):
137 |     """Return type for failed tag search."""
138 |     memories: List[MemoryResult]
139 |     tags: List[str]
140 |     error: str
141 | 
142 | 
143 | # Delete Memory Return Types
144 | class DeleteMemorySuccess(TypedDict):
145 |     """Return type for successful memory deletion."""
146 |     success: bool
147 |     content_hash: str
148 | 
149 | 
150 | class DeleteMemoryFailure(TypedDict):
151 |     """Return type for failed memory deletion."""
152 |     success: bool
153 |     content_hash: str
154 |     error: str
155 | 
156 | 
157 | # Health Check Return Types
158 | class HealthCheckSuccess(TypedDict, total=False):
159 |     """Return type for successful health check."""
160 |     healthy: bool
161 |     storage_type: str
162 |     total_memories: int
163 |     last_updated: str
164 |     # Additional fields from storage stats (marked as not required via total=False)
165 | 
166 | 
167 | class HealthCheckFailure(TypedDict):
168 |     """Return type for failed health check."""
169 |     healthy: bool
170 |     error: str
171 | 
172 | 
173 | class MemoryService:
174 |     """
175 |     Shared service for memory operations with consistent business logic.
176 | 
177 |     This service centralizes all memory-related business logic to ensure
178 |     consistent behavior across API endpoints and MCP tools, eliminating
179 |     code duplication and potential inconsistencies.
180 |     """
181 | 
182 |     def __init__(self, storage: MemoryStorage):
183 |         self.storage = storage
184 | 
185 |     async def list_memories(
186 |         self,
187 |         page: int = 1,
188 |         page_size: int = 10,
189 |         tag: Optional[str] = None,
190 |         memory_type: Optional[str] = None
191 |     ) -> Union[ListMemoriesSuccess, ListMemoriesError]:
192 |         """
193 |         List memories with pagination and optional filtering.
194 | 
195 |         This method provides database-level filtering for optimal performance,
196 |         avoiding the common anti-pattern of loading all records into memory.
197 | 
198 |         Args:
199 |             page: Page number (1-based)
200 |             page_size: Number of memories per page
201 |             tag: Filter by specific tag
202 |             memory_type: Filter by memory type
203 | 
204 |         Returns:
205 |             Dictionary with memories and pagination info
206 |         """
207 |         try:
208 |             # Calculate offset for pagination
209 |             offset = (page - 1) * page_size
210 | 
211 |             # Use database-level filtering for optimal performance
212 |             tags_list = [tag] if tag else None
213 |             memories = await self.storage.get_all_memories(
214 |                 limit=page_size,
215 |                 offset=offset,
216 |                 memory_type=memory_type,
217 |                 tags=tags_list
218 |             )
219 | 
220 |             # Get accurate total count for pagination
221 |             total = await self.storage.count_all_memories(
222 |                 memory_type=memory_type,
223 |                 tags=tags_list
224 |             )
225 | 
226 |             # Format results for API response
227 |             results = []
228 |             for memory in memories:
229 |                 results.append(self._format_memory_response(memory))
230 | 
231 |             return {
232 |                 "memories": results,
233 |                 "page": page,
234 |                 "page_size": page_size,
235 |                 "total": total,
236 |                 "has_more": offset + page_size < total
237 |             }
238 | 
239 |         except Exception as e:
240 |             logger.exception(f"Unexpected error listing memories: {e}")
241 |             return {
242 |                 "success": False,
243 |                 "error": f"Failed to list memories: {str(e)}",
244 |                 "memories": [],
245 |                 "page": page,
246 |                 "page_size": page_size
247 |             }
248 | 
249 |     async def store_memory(
250 |         self,
251 |         content: str,
252 |         tags: Union[str, List[str], None] = None,
253 |         memory_type: Optional[str] = None,
254 |         metadata: Optional[Dict[str, Any]] = None,
255 |         client_hostname: Optional[str] = None
256 |     ) -> Union[StoreMemorySingleSuccess, StoreMemoryChunkedSuccess, StoreMemoryFailure]:
257 |         """
258 |         Store a new memory with validation and content processing.
259 | 
260 |         Accepts tags in multiple formats for maximum flexibility:
261 |         - None → []
262 |         - "tag1,tag2,tag3" → ["tag1", "tag2", "tag3"]
263 |         - "single-tag" → ["single-tag"]
264 |         - ["tag1", "tag2"] → ["tag1", "tag2"]
265 | 
266 |         Args:
267 |             content: The memory content
268 |             tags: Optional tags for the memory (string, comma-separated string, or list)
269 |             memory_type: Optional memory type classification
270 |             metadata: Optional additional metadata (can also contain tags)
271 |             client_hostname: Optional client hostname for source tagging
272 | 
273 |         Returns:
274 |             Dictionary with operation result
275 |         """
276 |         try:
277 |             # Normalize tags from parameter (handles all formats)
278 |             final_tags = normalize_tags(tags)
279 | 
280 |             # Extract and normalize metadata.tags if present
281 |             final_metadata = metadata or {}
282 |             if metadata and "tags" in metadata:
283 |                 metadata_tags = normalize_tags(metadata.get("tags"))
284 |                 # Merge with parameter tags and remove duplicates
285 |                 final_tags = list(set(final_tags + metadata_tags))
286 | 
287 |             # Apply hostname tagging if provided (for consistent source tracking)
288 |             if client_hostname:
289 |                 source_tag = f"source:{client_hostname}"
290 |                 if source_tag not in final_tags:
291 |                     final_tags.append(source_tag)
292 |                 final_metadata["hostname"] = client_hostname
293 | 
294 |             # Generate content hash for deduplication
295 |             content_hash = generate_content_hash(content)
296 | 
297 |             # Process content if auto-splitting is enabled and content exceeds max length
298 |             max_length = self.storage.max_content_length
299 |             if ENABLE_AUTO_SPLIT and max_length and len(content) > max_length:
300 |                 # Split content into chunks
301 |                 chunks = split_content(
302 |                     content,
303 |                     max_length=max_length,
304 |                     preserve_boundaries=CONTENT_PRESERVE_BOUNDARIES,
305 |                     overlap=CONTENT_SPLIT_OVERLAP
306 |                 )
307 |                 stored_memories = []
308 | 
309 |                 for i, chunk in enumerate(chunks):
310 |                     chunk_hash = generate_content_hash(chunk)
311 |                     chunk_metadata = final_metadata.copy()
312 |                     chunk_metadata["chunk_index"] = i
313 |                     chunk_metadata["total_chunks"] = len(chunks)
314 |                     chunk_metadata["original_hash"] = content_hash
315 | 
316 |                     memory = Memory(
317 |                         content=chunk,
318 |                         content_hash=chunk_hash,
319 |                         tags=final_tags,
320 |                         memory_type=memory_type,
321 |                         metadata=chunk_metadata
322 |                     )
323 | 
324 |                     success, message = await self.storage.store(memory)
325 |                     if success:
326 |                         stored_memories.append(self._format_memory_response(memory))
327 | 
328 |                 return {
329 |                     "success": True,
330 |                     "memories": stored_memories,
331 |                     "total_chunks": len(chunks),
332 |                     "original_hash": content_hash
333 |                 }
334 |             else:
335 |                 # Store as single memory
336 |                 memory = Memory(
337 |                     content=content,
338 |                     content_hash=content_hash,
339 |                     tags=final_tags,
340 |                     memory_type=memory_type,
341 |                     metadata=final_metadata
342 |                 )
343 | 
344 |                 success, message = await self.storage.store(memory)
345 | 
346 |                 if success:
347 |                     return {
348 |                         "success": True,
349 |                         "memory": self._format_memory_response(memory)
350 |                     }
351 |                 else:
352 |                     return {
353 |                         "success": False,
354 |                         "error": message
355 |                     }
356 | 
357 |         except ValueError as e:
358 |             # Handle validation errors specifically
359 |             logger.warning(f"Validation error storing memory: {e}")
360 |             return {
361 |                 "success": False,
362 |                 "error": f"Invalid memory data: {str(e)}"
363 |             }
364 |         except ConnectionError as e:
365 |             # Handle storage connectivity issues
366 |             logger.error(f"Storage connection error: {e}")
367 |             return {
368 |                 "success": False,
369 |                 "error": f"Storage connection failed: {str(e)}"
370 |             }
371 |         except Exception as e:
372 |             # Handle unexpected errors
373 |             logger.exception(f"Unexpected error storing memory: {e}")
374 |             return {
375 |                 "success": False,
376 |                 "error": f"Failed to store memory: {str(e)}"
377 |             }
378 | 
379 |     async def retrieve_memories(
380 |         self,
381 |         query: str,
382 |         n_results: int = 10,
383 |         tags: Optional[List[str]] = None,
384 |         memory_type: Optional[str] = None
385 |     ) -> Union[RetrieveMemoriesSuccess, RetrieveMemoriesError]:
386 |         """
387 |         Retrieve memories by semantic search with optional filtering.
388 | 
389 |         Args:
390 |             query: Search query string
391 |             n_results: Maximum number of results
392 |             tags: Optional tag filtering
393 |             memory_type: Optional memory type filtering
394 | 
395 |         Returns:
396 |             Dictionary with search results
397 |         """
398 |         try:
399 |             # Retrieve memories using semantic search
400 |             # Note: storage.retrieve() only supports query and n_results
401 |             # We'll filter by tags/type after retrieval if needed
402 |             memories = await self.storage.retrieve(
403 |                 query=query,
404 |                 n_results=n_results
405 |             )
406 | 
407 |             # Apply optional post-filtering
408 |             filtered_memories = memories
409 |             if tags or memory_type:
410 |                 filtered_memories = []
411 |                 for memory in memories:
412 |                     # Filter by tags if specified
413 |                     if tags:
414 |                         memory_tags = memory.metadata.get('tags', []) if hasattr(memory, 'metadata') else []
415 |                         if not any(tag in memory_tags for tag in tags):
416 |                             continue
417 | 
418 |                     # Filter by memory_type if specified
419 |                     if memory_type:
420 |                         mem_type = memory.metadata.get('memory_type', '') if hasattr(memory, 'metadata') else ''
421 |                         if mem_type != memory_type:
422 |                             continue
423 | 
424 |                     filtered_memories.append(memory)
425 | 
426 |             results = []
427 |             for result in filtered_memories:
428 |                 # Extract Memory object from MemoryQueryResult and add similarity score
429 |                 memory_dict = self._format_memory_response(result.memory)
430 |                 memory_dict['similarity_score'] = result.relevance_score
431 |                 results.append(memory_dict)
432 | 
433 |             return {
434 |                 "memories": results,
435 |                 "query": query,
436 |                 "count": len(results)
437 |             }
438 | 
439 |         except Exception as e:
440 |             logger.error(f"Error retrieving memories: {e}")
441 |             return {
442 |                 "memories": [],
443 |                 "query": query,
444 |                 "error": f"Failed to retrieve memories: {str(e)}"
445 |             }
446 | 
447 |     async def search_by_tag(
448 |         self,
449 |         tags: Union[str, List[str]],
450 |         match_all: bool = False
451 |     ) -> Union[SearchByTagSuccess, SearchByTagError]:
452 |         """
453 |         Search memories by tags with flexible matching options.
454 | 
455 |         Args:
456 |             tags: Tag or list of tags to search for
457 |             match_all: If True, memory must have ALL tags; if False, ANY tag
458 | 
459 |         Returns:
460 |             Dictionary with matching memories
461 |         """
462 |         try:
463 |             # Normalize tags to list (handles all formats including comma-separated)
464 |             tags = normalize_tags(tags)
465 | 
466 |             # Search using database-level filtering
467 |             # Note: Using search_by_tag from base class (singular)
468 |             memories = await self.storage.search_by_tag(tags=tags)
469 | 
470 |             # Format results
471 |             results = []
472 |             for memory in memories:
473 |                 results.append(self._format_memory_response(memory))
474 | 
475 |             # Determine match type description
476 |             match_type = "ALL" if match_all else "ANY"
477 | 
478 |             return {
479 |                 "memories": results,
480 |                 "tags": tags,
481 |                 "match_type": match_type,
482 |                 "count": len(results)
483 |             }
484 | 
485 |         except Exception as e:
486 |             logger.error(f"Error searching by tags: {e}")
487 |             return {
488 |                 "memories": [],
489 |                 "tags": tags if isinstance(tags, list) else [tags],
490 |                 "error": f"Failed to search by tags: {str(e)}"
491 |             }
492 | 
493 |     async def get_memory_by_hash(self, content_hash: str) -> Dict[str, Any]:
494 |         """
495 |         Retrieve a specific memory by its content hash using O(1) direct lookup.
496 | 
497 |         Args:
498 |             content_hash: The content hash of the memory
499 | 
500 |         Returns:
501 |             Dictionary with memory data or error
502 |         """
503 |         try:
504 |             # Use direct O(1) lookup via storage.get_by_hash()
505 |             memory = await self.storage.get_by_hash(content_hash)
506 | 
507 |             if memory:
508 |                 return {
509 |                     "memory": self._format_memory_response(memory),
510 |                     "found": True
511 |                 }
512 |             else:
513 |                 return {
514 |                     "found": False,
515 |                     "content_hash": content_hash
516 |                 }
517 | 
518 |         except Exception as e:
519 |             logger.error(f"Error getting memory by hash: {e}")
520 |             return {
521 |                 "found": False,
522 |                 "content_hash": content_hash,
523 |                 "error": f"Failed to get memory: {str(e)}"
524 |             }
525 | 
526 |     async def delete_memory(self, content_hash: str) -> Union[DeleteMemorySuccess, DeleteMemoryFailure]:
527 |         """
528 |         Delete a memory by its content hash.
529 | 
530 |         Args:
531 |             content_hash: The content hash of the memory to delete
532 | 
533 |         Returns:
534 |             Dictionary with operation result
535 |         """
536 |         try:
537 |             success, message = await self.storage.delete(content_hash)
538 |             if success:
539 |                 return {
540 |                     "success": True,
541 |                     "content_hash": content_hash
542 |                 }
543 |             else:
544 |                 return {
545 |                     "success": False,
546 |                     "content_hash": content_hash,
547 |                     "error": message
548 |                 }
549 | 
550 |         except Exception as e:
551 |             logger.error(f"Error deleting memory: {e}")
552 |             return {
553 |                 "success": False,
554 |                 "content_hash": content_hash,
555 |                 "error": f"Failed to delete memory: {str(e)}"
556 |             }
557 | 
558 |     async def health_check(self) -> Union[HealthCheckSuccess, HealthCheckFailure]:
559 |         """
560 |         Perform a health check on the memory storage system.
561 | 
562 |         Returns:
563 |             Dictionary with health status and statistics
564 |         """
565 |         try:
566 |             stats = await self.storage.get_stats()
567 |             return {
568 |                 "healthy": True,
569 |                 "storage_type": stats.get("backend", "unknown"),
570 |                 "total_memories": stats.get("total_memories", 0),
571 |                 "last_updated": datetime.now().isoformat(),
572 |                 **stats
573 |             }
574 | 
575 |         except Exception as e:
576 |             logger.error(f"Health check failed: {e}")
577 |             return {
578 |                 "healthy": False,
579 |                 "error": f"Health check failed: {str(e)}"
580 |             }
581 | 
582 |     def _format_memory_response(self, memory: Memory) -> MemoryResult:
583 |         """
584 |         Format a memory object for API response.
585 | 
586 |         Args:
587 |             memory: The memory object to format
588 | 
589 |         Returns:
590 |             Formatted memory dictionary
591 |         """
592 |         return {
593 |             "content": memory.content,
594 |             "content_hash": memory.content_hash,
595 |             "tags": memory.tags,
596 |             "memory_type": memory.memory_type,
597 |             "metadata": memory.metadata,
598 |             "created_at": memory.created_at,
599 |             "updated_at": memory.updated_at,
600 |             "created_at_iso": memory.created_at_iso,
601 |             "updated_at_iso": memory.updated_at_iso
602 |         }
603 | 
```

--------------------------------------------------------------------------------
/claude-hooks/CONFIGURATION.md:
--------------------------------------------------------------------------------

```markdown
  1 | # Memory Hooks Configuration Guide
  2 | 
  3 | ## Overview
  4 | 
  5 | This guide documents all configuration properties for the Claude Code memory awareness hooks, with detailed explanations of their behavior and impact on memory retrieval.
  6 | 
  7 | ## Configuration Structure
  8 | 
  9 | The hooks are configured via `config.json` in the hooks directory. Configuration follows this hierarchy:
 10 | 
 11 | 1. **Memory Service** - Connection and protocol settings
 12 | 2. **Project Detection** - How projects are identified
 13 | 3. **Memory Scoring** - How memories are ranked for relevance
 14 | 4. **Git Analysis** - Repository context integration
 15 | 5. **Time Windows** - Temporal scoping for queries
 16 | 6. **Output** - Display and logging options
 17 | 
 18 | ---
 19 | 
 20 | ## Memory Service Connection Configuration
 21 | 
 22 | ### `memoryService` Object
 23 | 
 24 | Controls how the hooks connect to the MCP Memory Service.
 25 | 
 26 | ```json
 27 | "memoryService": {
 28 |   "protocol": "auto",
 29 |   "preferredProtocol": "http",
 30 |   "fallbackEnabled": true,
 31 |   "http": {
 32 |     "endpoint": "http://127.0.0.1:8889",
 33 |     "apiKey": "YOUR_API_KEY_HERE",
 34 |     "healthCheckTimeout": 3000,
 35 |     "useDetailedHealthCheck": true
 36 |   },
 37 |   "mcp": {
 38 |     "serverCommand": ["uv", "run", "memory", "server", "-s", "hybrid"],
 39 |     "serverWorkingDir": "../",
 40 |     "connectionTimeout": 2000,
 41 |     "toolCallTimeout": 3000
 42 |   }
 43 | }
 44 | ```
 45 | 
 46 | #### HTTP Configuration
 47 | 
 48 | **`endpoint`** (String): URL of the HTTP memory service.
 49 | 
 50 | **Security Considerations:**
 51 | - **HTTP (`http://`)**: Default for local development. Traffic is **unencrypted** - only use for localhost connections.
 52 | - **HTTPS (`https://`)**: Recommended if connecting to remote servers or when encryption-in-transit is required.
 53 |   - For self-signed certificates, your system must trust the certificate authority.
 54 |   - The hooks enforce certificate validation - `rejectUnauthorized` is always enabled for security.
 55 | 
 56 | **`apiKey`** (String): API key for authenticating with the memory service.
 57 | - **Default**: Empty string `""` - the application will validate and prompt for a valid key on startup
 58 | - **Best practice**: Set via environment variable or secure configuration file
 59 | - **Security**: Never commit actual API keys to version control
 60 | 
 61 | #### MCP Configuration
 62 | 
 63 | **`serverCommand`** (Array): Command to launch the MCP memory service locally.
 64 | - Example: `["uv", "run", "memory", "server", "-s", "hybrid"]`
 65 | - Adjust storage backend flag (`-s`) as needed: `hybrid`, `cloudflare`, `sqlite_vec`, `chromadb`
 66 | 
 67 | **`serverWorkingDir`** (String): Working directory for the MCP server process.
 68 | - **Relative paths**: `"../"` assumes hooks are in a subdirectory (e.g., `project/claude-hooks/`)
 69 | - **Absolute paths**: Use full path for explicit configuration
 70 | - **Environment variables**: Consider using `process.env.MCP_MEMORY_PROJECT_ROOT` for flexibility
 71 | 
 72 | **Directory Structure Assumption (for `../` relative path):**
 73 | ```
 74 | project-root/
 75 | ├── src/                    # MCP Memory Service code
 76 | ├── claude-hooks/           # This hooks directory
 77 | │   ├── config.json
 78 | │   └── utilities/
 79 | └── pyproject.toml
 80 | ```
 81 | 
 82 | If your structure differs, update `serverWorkingDir` accordingly or use an absolute path.
 83 | 
 84 | **`connectionTimeout`** (Number): Milliseconds to wait for MCP server connection (default: 2000).
 85 | 
 86 | **`toolCallTimeout`** (Number): Milliseconds to wait for MCP tool call responses (default: 3000).
 87 | 
 88 | ---
 89 | 
 90 | ## Memory Scoring Configuration
 91 | 
 92 | ### `memoryScoring` Object
 93 | 
 94 | Controls how memories are scored and ranked for relevance to the current session.
 95 | 
 96 | #### `weights` (Object)
 97 | 
 98 | Relative importance of different scoring factors. These weights are applied to individual component scores (0.0-1.0 each), then summed together with additive bonuses (typeBonus, recencyBonus). The final score is clamped to [0, 1].
 99 | 
100 | **Note**: Weights don't need to sum to exactly 1.0 since additional bonuses are added separately and the final score is normalized by clamping. The weights shown below sum to 1.00 for the base scoring (without conversation context) or 1.25 when conversation context is enabled.
101 | 
102 | ```json
103 | "weights": {
104 |   "timeDecay": 0.40,           // Recency weight (default: 0.40)
105 |   "tagRelevance": 0.25,        // Tag matching weight (default: 0.25)
106 |   "contentRelevance": 0.15,    // Content keyword weight (default: 0.15)
107 |   "contentQuality": 0.20,      // Quality assessment weight (default: 0.20)
108 |   "conversationRelevance": 0.25 // Conversation context weight (default: 0.25, only when enabled)
109 | }
110 | ```
111 | 
112 | **Property Details:**
113 | 
114 | - **`timeDecay`** (0.0-1.0, recommended: 0.35-0.45)
115 |   - Weight given to memory age in scoring
116 |   - Higher values prioritize recent memories
117 |   - Lower values allow older, high-quality memories to rank higher
118 |   - **Impact**: At 0.40, a 7-day-old memory with perfect tags can outscore a 60-day-old memory with perfect tags and high quality
119 |   - **Recommendation**: Set to 0.40-0.45 for active development, 0.25-0.35 for research/reference work
120 | 
121 | - **`tagRelevance`** (0.0-1.0, recommended: 0.20-0.30)
122 |   - Weight given to tag matching with project context
123 |   - Higher values favor well-tagged memories
124 |   - **Impact**: Tags like `projectName`, `language`, `framework` significantly boost scores
125 |   - **Trade-off**: High tag weight can cause old, well-documented memories to dominate over recent work
126 |   - **Recommendation**: Set to 0.25 for balanced tag importance, 0.20 if recency is critical
127 | 
128 | - **`contentRelevance`** (0.0-1.0, recommended: 0.10-0.20)
129 |   - Weight for keyword matching in memory content
130 |   - Matches against project name, language, frameworks, technical terms
131 |   - **Impact**: Memories mentioning project-specific terms rank higher
132 |   - **Recommendation**: Keep at 0.15 unless doing very keyword-focused work
133 | 
134 | - **`contentQuality`** (0.0-1.0, recommended: 0.15-0.25)
135 |   - Weight for assessed content quality (length, diversity, meaningful indicators)
136 |   - Penalizes generic session summaries
137 |   - **Impact**: Filters out low-quality auto-generated content
138 |   - **Quality Indicators**: "decided", "implemented", "fixed", "because", "approach", "solution"
139 |   - **Recommendation**: Set to 0.20 to balance quality with other factors
140 | 
141 | - **`conversationRelevance`** (0.0-1.0, recommended: 0.20-0.30)
142 |   - Weight for matching current conversation topics and intent
143 |   - Only active when conversation context is available
144 |   - **Impact**: Dynamically adjusts based on what user is discussing
145 |   - **Recommendation**: Keep at 0.25 for adaptive context awareness
146 | 
147 | #### `minRelevanceScore` (Number)
148 | 
149 | Minimum score threshold for a memory to be included in context.
150 | 
151 | ```json
152 | "minRelevanceScore": 0.4  // Default: 0.4
153 | ```
154 | 
155 | **Details:**
156 | - Range: 0.0 to 1.0
157 | - Memories below this threshold are filtered out entirely
158 | - **Impact on Quality**:
159 |   - `0.3`: Permissive, may include generic old content
160 |   - `0.4`: Balanced, filters most low-quality memories (recommended)
161 |   - `0.5`: Strict, only high-relevance memories
162 | - **Trade-off**: Higher threshold = fewer but higher quality memories
163 | 
164 | #### `timeDecayRate` (Number)
165 | 
166 | Rate of exponential decay for time-based scoring.
167 | 
168 | ```json
169 | "timeDecayRate": 0.05  // Default: 0.05
170 | ```
171 | 
172 | **Formula**: `score = e^(-rate * days)`
173 | 
174 | **Details:**
175 | - Range: 0.01 to 0.2 (practical range)
176 | - Lower rate = gentler decay (memories age slower)
177 | - Higher rate = aggressive decay (memories age faster)
178 | 
179 | **Decay Examples**:
180 | 
181 | | Days Old | Rate 0.05 | Rate 0.10 | Rate 0.15 |
182 | |----------|-----------|-----------|-----------|
183 | | 7 days   | 0.70      | 0.50      | 0.35      |
184 | | 14 days  | 0.50      | 0.25      | 0.12      |
185 | | 30 days  | 0.22      | 0.05      | 0.01      |
186 | | 60 days  | 0.05      | 0.002     | ~0        |
187 | 
188 | **Recommendation**:
189 | - `0.05`: Balanced, keeps 2-4 week memories relevant (recommended)
190 | - `0.10`: Aggressive, prioritizes last 1-2 weeks only
191 | - `0.03`: Gentle, treats 1-2 month memories as still valuable
192 | 
193 | #### `enableConversationContext` (Boolean)
194 | 
195 | Whether to use conversation analysis for dynamic memory scoring.
196 | 
197 | ```json
198 | "enableConversationContext": true  // Default: true
199 | ```
200 | 
201 | ---
202 | 
203 | ## Git Analysis Configuration
204 | 
205 | ### `gitAnalysis` Object
206 | 
207 | Controls how git repository context influences memory retrieval.
208 | 
209 | ```json
210 | "gitAnalysis": {
211 |   "enabled": true,
212 |   "commitLookback": 14,
213 |   "maxCommits": 20,
214 |   "includeChangelog": true,
215 |   "maxGitMemories": 3,
216 |   "gitContextWeight": 1.8
217 | }
218 | ```
219 | 
220 | #### `gitContextWeight` (Number)
221 | 
222 | Multiplier applied to memories derived from git context queries.
223 | 
224 | **Details:**
225 | - Range: 1.0 to 2.5 (practical range)
226 | - Applied multiplicatively to base memory score
227 | - **Impact Examples**:
228 |   - Base score 0.5 × weight 1.2 = final 0.6
229 |   - Base score 0.5 × weight 1.8 = final 0.9
230 | 
231 | **Behavior by Value**:
232 | - `1.0`: No boost (git context treated equally)
233 | - `1.2`: Small boost (git-aware memories slightly favored)
234 | - `1.8`: Strong boost (git-aware memories highly prioritized) ✅ **Recommended**
235 | - `2.0+`: Very strong boost (git context dominates)
236 | 
237 | **Use Cases**:
238 | - **Active development** (`1.8`): Prioritize memories matching recent commits/keywords
239 | - **Maintenance work** (`1.2-1.5`): Balance git context with other signals
240 | - **Research/planning** (`1.0`): Disable git preference
241 | 
242 | #### Other Git Properties
243 | 
244 | - **`commitLookback`** (Number, default: 14): Days of git history to analyze
245 | - **`maxCommits`** (Number, default: 20): Maximum commits to process
246 | - **`includeChangelog`** (Boolean, default: true): Parse CHANGELOG.md for context
247 | - **`maxGitMemories`** (Number, default: 3): Max memories from git-context phase
248 | 
249 | ---
250 | 
251 | ## Time Windows Configuration
252 | 
253 | ### Memory Service Time Windows
254 | 
255 | Controls temporal scoping for memory queries.
256 | 
257 | ```json
258 | "memoryService": {
259 |   "recentTimeWindow": "last-month",      // Default: "last-month"
260 |   "fallbackTimeWindow": "last-3-months"  // Default: "last-3-months"
261 | }
262 | ```
263 | 
264 | #### `recentTimeWindow` (String)
265 | 
266 | Time window for Phase 1 recent memory queries.
267 | 
268 | **Supported Values:**
269 | - `"last-day"`: Last 24 hours
270 | - `"last-week"`: Last 7 days
271 | - `"last-2-weeks"`: Last 14 days
272 | - `"last-month"`: Last 30 days ✅ **Recommended**
273 | - `"last-3-months"`: Last 90 days
274 | 
275 | **Impact:**
276 | - **Narrow window** (`last-week`): Only very recent memories, may miss context during development gaps
277 | - **Balanced window** (`last-month`): Captures recent sprint/iteration cycle
278 | - **Wide window** (`last-3-months`): Includes seasonal patterns, may dilute recency focus
279 | 
280 | **Recommendation**:
281 | - Active development: `"last-month"`
282 | - Periodic/seasonal work: `"last-3-months"`
283 | 
284 | #### `fallbackTimeWindow` (String)
285 | 
286 | Time window for fallback queries when recent memories are insufficient.
287 | 
288 | **Supported Values:** Same as `recentTimeWindow`
289 | 
290 | **Purpose:** Ensures minimum context when recent work is sparse.
291 | 
292 | **Recommendation**: Set 2-3× wider than recent window (e.g., `last-month` → `last-3-months`)
293 | 
294 | ---
295 | 
296 | ## Recency Bonus System
297 | 
298 | ### Automatic Recency Bonuses
299 | 
300 | The memory scorer applies explicit additive bonuses based on memory age (implemented in `memory-scorer.js`):
301 | 
302 | ```javascript
303 | // Automatic bonuses (no configuration needed)
304 | < 7 days:  +0.15 bonus  // Strong boost for last week
305 | < 14 days: +0.10 bonus  // Moderate boost for last 2 weeks
306 | < 30 days: +0.05 bonus  // Small boost for last month
307 | > 30 days: 0 bonus      // No bonus for older memories
308 | ```
309 | 
310 | **How It Works:**
311 | - Applied **additively** (not multiplicatively) to final score
312 | - Ensures very recent memories get absolute advantage
313 | - Creates clear tier separation (weekly/biweekly/monthly)
314 | 
315 | **Example Impact:**
316 | ```
317 | Memory A (5 days old):
318 |   Base score: 0.50
319 |   Recency bonus: +0.15
320 |   Final score: 0.65
321 | 
322 | Memory B (60 days old):
323 |   Base score: 0.60 (higher quality/tags)
324 |   Recency bonus: 0
325 |   Final score: 0.60
326 | 
327 | Result: Recent memory wins despite lower base score
328 | ```
329 | 
330 | **Design Rationale:**
331 | - Compensates for aggressive time decay
332 | - Prevents old, well-tagged memories from dominating
333 | - Aligns with user expectation that recent work is most relevant
334 | 
335 | ---
336 | 
337 | ## Complete Configuration Example
338 | 
339 | ### Optimized for Active Development (Recommended)
340 | 
341 | ```json
342 | {
343 |   "memoryService": {
344 |     "maxMemoriesPerSession": 8,
345 |     "recentFirstMode": true,
346 |     "recentMemoryRatio": 0.6,
347 |     "recentTimeWindow": "last-month",
348 |     "fallbackTimeWindow": "last-3-months"
349 |   },
350 |   "memoryScoring": {
351 |     "weights": {
352 |       "timeDecay": 0.40,
353 |       "tagRelevance": 0.25,
354 |       "contentRelevance": 0.15,
355 |       "contentQuality": 0.20,
356 |       "conversationRelevance": 0.25
357 |     },
358 |     "minRelevanceScore": 0.4,
359 |     "timeDecayRate": 0.05,
360 |     "enableConversationContext": true
361 |   },
362 |   "gitAnalysis": {
363 |     "enabled": true,
364 |     "commitLookback": 14,
365 |     "maxCommits": 20,
366 |     "includeChangelog": true,
367 |     "maxGitMemories": 3,
368 |     "gitContextWeight": 1.8
369 |   }
370 | }
371 | ```
372 | 
373 | ### Optimized for Research/Reference Work
374 | 
375 | ```json
376 | {
377 |   "memoryService": {
378 |     "recentTimeWindow": "last-month",
379 |     "fallbackTimeWindow": "last-3-months"
380 |   },
381 |   "memoryScoring": {
382 |     "weights": {
383 |       "timeDecay": 0.25,
384 |       "tagRelevance": 0.35,
385 |       "contentRelevance": 0.20,
386 |       "contentQuality": 0.30,
387 |       "conversationRelevance": 0.20
388 |     },
389 |     "minRelevanceScore": 0.3,
390 |     "timeDecayRate": 0.03
391 |   },
392 |   "gitAnalysis": {
393 |     "gitContextWeight": 1.0
394 |   }
395 | }
396 | ```
397 | 
398 | ---
399 | 
400 | ## Tuning Guide
401 | 
402 | ### Problem: Recent work not appearing in context
403 | 
404 | **Symptoms:**
405 | - Old documentation/decisions dominate
406 | - Recent bug fixes/features missing
407 | - Context feels outdated
408 | 
409 | **Solutions:**
410 | 1. Increase `timeDecay` weight: `0.40` → `0.45`
411 | 2. Increase `gitContextWeight`: `1.8` → `2.0`
412 | 3. Widen `recentTimeWindow`: `"last-week"` → `"last-month"`
413 | 4. Reduce `tagRelevance` weight: `0.25` → `0.20`
414 | 
415 | ### Problem: Too many low-quality memories
416 | 
417 | **Symptoms:**
418 | - Generic session summaries in context
419 | - Duplicate or trivial information
420 | - Context feels noisy
421 | 
422 | **Solutions:**
423 | 1. Increase `minRelevanceScore`: `0.4` → `0.5`
424 | 2. Increase `contentQuality` weight: `0.20` → `0.25`
425 | 3. Reduce `maxMemoriesPerSession`: `8` → `5`
426 | 
427 | ### Problem: Missing important old architectural decisions
428 | 
429 | **Symptoms:**
430 | - Lose context of foundational decisions
431 | - Architectural rationale missing
432 | - Only seeing recent tactical work
433 | 
434 | **Solutions:**
435 | 1. Reduce `timeDecay` weight: `0.40` → `0.30`
436 | 2. Increase `tagRelevance` weight: `0.25` → `0.30`
437 | 3. Gentler `timeDecayRate`: `0.05` → `0.03`
438 | 4. Tag important decisions with `"architecture"`, `"decision"` tags
439 | 
440 | ### Problem: Git context overwhelming other signals
441 | 
442 | **Symptoms:**
443 | - Only git-keyword memories showing up
444 | - Missing memories that don't match commit messages
445 | - Over-focused on recent commits
446 | 
447 | **Solutions:**
448 | 1. Reduce `gitContextWeight`: `1.8` → `1.4`
449 | 2. Reduce `maxGitMemories`: `3` → `2`
450 | 3. Disable git analysis temporarily: `"enabled": false`
451 | 
452 | ### Problem: "No relevant memories found" despite healthy database
453 | 
454 | **Symptoms:**
455 | - Hooks show "No relevant memories found" or "No active connection available"
456 | - HTTP server is running and healthy
457 | - Database contains many memories
458 | - Hook logs show connection failures or wrong endpoint
459 | 
460 | **Root Causes:**
461 | 
462 | 1. **Port Mismatch**: Config endpoint doesn't match actual HTTP server port
463 |    ```json
464 |    // WRONG - Server runs on 8000, config shows 8889
465 |    "endpoint": "http://127.0.0.1:8889"
466 |    ```
467 | 
468 | 2. **Stale Configuration**: Config not updated after reinstalling hooks or changing server port
469 | 
470 | **Solutions:**
471 | 
472 | 1. **Verify HTTP server port**:
473 |    ```bash
474 |    # Check what port the server is actually running on
475 |    lsof -i :8000    # Linux/macOS
476 |    netstat -ano | findstr "8000"  # Windows
477 | 
478 |    # Or check server logs
479 |    systemctl --user status mcp-memory-http.service  # Linux systemd
480 |    ```
481 | 
482 | 2. **Fix endpoint in config** (`~/.claude/hooks/config.json`):
483 |    ```json
484 |    {
485 |      "memoryService": {
486 |        "http": {
487 |          "endpoint": "http://127.0.0.1:8000",  // Match actual server port!
488 |          "apiKey": "your-api-key"
489 |        }
490 |      }
491 |    }
492 |    ```
493 | 
494 | 3. **Test connection manually**:
495 |    ```bash
496 |    curl http://127.0.0.1:8000/api/health
497 |    # Should return: {"status":"healthy","version":"..."}
498 |    ```
499 | 
500 | 4. **Verify configuration is loaded**:
501 |    ```bash
502 |    # Run a hook manually and check the output
503 |    node ~/.claude/hooks/core/session-start.js
504 |    # Look for connection protocol and storage info in output
505 |    ```
506 | 
507 | **Related Configuration Issues:**
508 | 
509 | After updating from repository, verify these settings match your preferences:
510 | - `recentTimeWindow`: Repository default is `"last week"` (not `"last 3 days"`)
511 | - `fallbackTimeWindow`: Repository default is `"last 2 weeks"` (not `"last week"`)
512 | - `timeDecay` weight: Repository default is `0.50` (not `0.60`)
513 | - `minRelevanceScore`: Repository default is `0.4` (not `0.25`)
514 | - `commitLookback`: Repository default is `14` days (not `7`)
515 | 
516 | **See also:** [CLAUDE.md § Configuration Management](../../CLAUDE.md#configuration-management) for complete troubleshooting guide.
517 | 
518 | ---
519 | 
520 | ## Migration from Previous Versions
521 | 
522 | ### v1.0 → v2.0 (Recency Optimization)
523 | 
524 | **Breaking Changes:**
525 | - `timeDecay` weight increased from `0.25` to `0.40`
526 | - `tagRelevance` weight decreased from `0.35` to `0.25`
527 | - `timeDecayRate` decreased from `0.10` to `0.05`
528 | - `minRelevanceScore` increased from `0.3` to `0.4`
529 | - `gitContextWeight` increased from `1.2` to `1.8`
530 | 
531 | **Impact:** Recent memories (< 30 days) will rank significantly higher. Adjust weights if you need more historical context.
532 | 
533 | **Migration Steps:**
534 | 1. Backup current `config.json`
535 | 2. Update weights to new defaults
536 | 3. Test with `test-recency-scoring.js`
537 | 4. Fine-tune based on your workflow
538 | 
539 | ---
540 | 
541 | ## Advanced: Scoring Algorithm Details
542 | 
543 | ### Final Score Calculation
544 | 
545 | ```javascript
546 | // Step 1: Calculate base score (weighted sum of components + bonuses)
547 | let baseScore =
548 |   (timeDecayScore * timeDecayWeight) +
549 |   (tagRelevanceScore * tagRelevanceWeight) +
550 |   (contentRelevanceScore * contentRelevanceWeight) +
551 |   (contentQualityScore * contentQualityWeight) +
552 |   typeBonus +
553 |   recencyBonus
554 | 
555 | // Step 2: Add conversation context if enabled (additive)
556 | if (conversationContextEnabled) {
557 |   baseScore += (conversationRelevanceScore * conversationRelevanceWeight)
558 | }
559 | 
560 | // Step 3: Apply git context boost (multiplicative - boosts ALL components)
561 | // Note: This multiplies the entire score including conversation relevance
562 | // Implementation: Applied in session-start.js after scoring, not in memory-scorer.js
563 | if (isGitContextMemory) {
564 |   baseScore *= gitContextWeight
565 | }
566 | 
567 | // Step 4: Apply quality penalty for very low quality (multiplicative)
568 | if (contentQualityScore < 0.2) {
569 |   baseScore *= 0.5
570 | }
571 | 
572 | // Step 5: Normalize to [0, 1]
573 | finalScore = clamp(baseScore, 0, 1)
574 | ```
575 | 
576 | ### Score Component Ranges
577 | 
578 | - **Time Decay**: 0.01 - 1.0 (exponential decay based on age)
579 | - **Tag Relevance**: 0.1 - 1.0 (0.3 default if no tags)
580 | - **Content Relevance**: 0.1 - 1.0 (0.3 default if no keywords)
581 | - **Content Quality**: 0.05 - 1.0 (0.3 default for normal content)
582 | - **Type Bonus**: -0.1 - 0.3 (based on memory type)
583 | - **Recency Bonus**: 0 - 0.15 (tiered based on age)
584 | 
585 | ### Type Bonuses
586 | 
587 | ```javascript
588 | {
589 |   'decision': 0.3,      // Architectural decisions
590 |   'architecture': 0.3,  // Architecture docs
591 |   'reference': 0.2,     // Reference materials
592 |   'session': 0.15,      // Session summaries
593 |   'insight': 0.2,       // Insights
594 |   'bug-fix': 0.15,      // Bug fixes
595 |   'feature': 0.1,       // Feature descriptions
596 |   'note': 0.05,         // General notes
597 |   'temporary': -0.1     // Temporary notes (penalized)
598 | }
599 | ```
600 | 
601 | ---
602 | 
603 | ## Testing Configuration Changes
604 | 
605 | Use the included test script to validate your configuration:
606 | 
607 | ```bash
608 | cd /path/to/claude-hooks
609 | node test-recency-scoring.js
610 | ```
611 | 
612 | This will show:
613 | - Time decay calculations for different ages
614 | - Recency bonus application
615 | - Final scoring with your config weights
616 | - Ranking of test memories
617 | 
618 | Expected output should show recent memories (< 7 days) in top 3 positions.
619 | 
620 | ---
621 | 
622 | ## See Also
623 | 
624 | - [README.md](./README.md) - General hooks documentation
625 | - [MIGRATION.md](./MIGRATION.md) - Migration guides
626 | - [README-NATURAL-TRIGGERS.md](./README-NATURAL-TRIGGERS.md) - Natural triggers documentation
627 | 
```

--------------------------------------------------------------------------------
/claude-hooks/test-natural-triggers.js:
--------------------------------------------------------------------------------

```javascript
  1 | #!/usr/bin/env node
  2 | 
  3 | /**
  4 |  * Comprehensive Test Suite for Natural Memory Triggers
  5 |  * Tests performance-aware conversation monitoring and pattern detection
  6 |  */
  7 | 
  8 | const { TieredConversationMonitor } = require('./utilities/tiered-conversation-monitor');
  9 | const { AdaptivePatternDetector } = require('./utilities/adaptive-pattern-detector');
 10 | const { PerformanceManager } = require('./utilities/performance-manager');
 11 | const { MidConversationHook } = require('./core/mid-conversation');
 12 | 
 13 | class NaturalTriggersTestSuite {
 14 |     constructor() {
 15 |         this.testResults = [];
 16 |         this.performanceMetrics = [];
 17 |     }
 18 | 
 19 |     /**
 20 |      * Run all tests
 21 |      */
 22 |     async runAllTests() {
 23 |         console.log('🧪 Natural Memory Triggers - Comprehensive Test Suite');
 24 |         console.log('═'.repeat(60));
 25 | 
 26 |         // Test categories
 27 |         const testCategories = [
 28 |             { name: 'Performance Management', tests: this.performanceTests },
 29 |             { name: 'Pattern Detection', tests: this.patternDetectionTests },
 30 |             { name: 'Conversation Monitoring', tests: this.conversationMonitorTests },
 31 |             { name: 'Integration Tests', tests: this.integrationTests },
 32 |             { name: 'Performance Profiles', tests: this.performanceProfileTests }
 33 |         ];
 34 | 
 35 |         for (const category of testCategories) {
 36 |             console.log(`\n📂 ${category.name}`);
 37 |             console.log('─'.repeat(40));
 38 | 
 39 |             await category.tests.call(this);
 40 |         }
 41 | 
 42 |         // Summary
 43 |         this.printTestSummary();
 44 |         return this.testResults;
 45 |     }
 46 | 
 47 |     /**
 48 |      * Performance Management Tests
 49 |      */
 50 |     async performanceTests() {
 51 |         // Test 1: Performance Manager Initialization
 52 |         await this.runTest('Performance Manager Initialization', async () => {
 53 |             const perfManager = new PerformanceManager({
 54 |                 defaultProfile: 'balanced'
 55 |             });
 56 | 
 57 |             this.assert(perfManager.activeProfile === 'balanced', 'Should initialize with correct profile');
 58 |             this.assert(perfManager.performanceBudget.maxLatency === 200, 'Should have correct latency budget');
 59 |             return { perfManager };
 60 |         });
 61 | 
 62 |         // Test 2: Timing Operations
 63 |         await this.runTest('Timing Operations', async () => {
 64 |             const perfManager = new PerformanceManager();
 65 | 
 66 |             const timing = perfManager.startTiming('test_operation', 'fast');
 67 |             await this.sleep(50);
 68 |             const result = perfManager.endTiming(timing);
 69 | 
 70 |             // Test performance tracking functionality without relying on exact timing
 71 |             this.assert(typeof result.latency === 'number' && result.latency >= 0, 'Should record numeric latency');
 72 |             this.assert(result.latency > 10, 'Should record reasonable latency for 50ms operation');
 73 |             this.assert(result.tier === 'fast', 'Should record correct tier');
 74 |             this.assert(typeof result.withinBudget === 'boolean', 'Should determine budget compliance');
 75 |         });
 76 | 
 77 |         // Test 3: Profile Switching
 78 |         await this.runTest('Profile Switching', async () => {
 79 |             const perfManager = new PerformanceManager();
 80 | 
 81 |             const originalProfile = perfManager.activeProfile;
 82 |             perfManager.switchProfile('speed_focused');
 83 | 
 84 |             this.assert(perfManager.activeProfile === 'speed_focused', 'Should switch to speed focused profile');
 85 |             this.assert(perfManager.performanceBudget.maxLatency === 100, 'Should update latency budget');
 86 | 
 87 |             perfManager.switchProfile(originalProfile); // Reset
 88 |         });
 89 | 
 90 |         // Test 4: Adaptive Learning
 91 |         await this.runTest('Adaptive Learning', async () => {
 92 |             const perfManager = new PerformanceManager();
 93 | 
 94 |             // Simulate positive feedback
 95 |             perfManager.recordUserFeedback(true, { latency: 300 });
 96 |             perfManager.recordUserFeedback(true, { latency: 350 });
 97 | 
 98 |             // User tolerance should increase
 99 |             const toleranceBefore = perfManager.userPreferences.toleranceLevel;
100 |             perfManager.recordUserFeedback(true, { latency: 400 });
101 |             const toleranceAfter = perfManager.userPreferences.toleranceLevel;
102 | 
103 |             this.assert(toleranceAfter >= toleranceBefore, 'User tolerance should increase with positive feedback');
104 |         });
105 |     }
106 | 
107 |     /**
108 |      * Pattern Detection Tests
109 |      */
110 |     async patternDetectionTests() {
111 |         // Test 1: Explicit Memory Requests
112 |         await this.runTest('Explicit Memory Request Detection', async () => {
113 |             const detector = new AdaptivePatternDetector({
114 |                 sensitivity: 0.7,
115 |                 adaptiveLearning: false // Disable learning for consistent tests
116 |             });
117 | 
118 |             const testCases = [
119 |                 { message: "What did we decide about the authentication approach?", shouldTrigger: true },
120 |                 { message: "Remind me how we handled user sessions", shouldTrigger: true },
121 |                 { message: "Remember when we discussed the database schema?", shouldTrigger: true },
122 |                 { message: "Just implementing a new feature", shouldTrigger: false }
123 |             ];
124 | 
125 |             for (const testCase of testCases) {
126 |                 const result = await detector.detectPatterns(testCase.message);
127 |                 const actualTrigger = result.triggerRecommendation;
128 | 
129 |                 // Debug output for failing tests
130 |                 if (actualTrigger !== testCase.shouldTrigger) {
131 |                     console.log(`\nDEBUG: "${testCase.message}"`);
132 |                     console.log(`Expected: ${testCase.shouldTrigger}, Got: ${actualTrigger}`);
133 |                     console.log(`Confidence: ${result.confidence}, Matches: ${result.matches.length}`);
134 |                     result.matches.forEach(m => console.log(`  - ${m.category}: ${m.confidence}`));
135 |                 }
136 | 
137 |                 this.assert(actualTrigger === testCase.shouldTrigger,
138 |                     `"${testCase.message}" should ${testCase.shouldTrigger ? '' : 'not '}trigger (got ${actualTrigger})`);
139 |             }
140 |         });
141 | 
142 |         // Test 2: Technical Discussion Patterns
143 |         await this.runTest('Technical Discussion Detection', async () => {
144 |             const detector = new AdaptivePatternDetector({ sensitivity: 0.6 });
145 | 
146 |             const technicalMessages = [
147 |                 "Let's discuss the authentication architecture",
148 |                 "What's our approach to database migrations?",
149 |                 "How should we implement the security layer?"
150 |             ];
151 | 
152 |             for (const message of technicalMessages) {
153 |                 const result = await detector.detectPatterns(message, {
154 |                     projectContext: { name: 'test-project', language: 'JavaScript' }
155 |                 });
156 | 
157 |                 this.assert(result.matches.length > 0, `Technical message should have pattern matches: "${message}"`);
158 |                 this.assert(result.confidence > 0.2, `Technical message should have reasonable confidence: ${result.confidence} for "${message}"`);
159 |             }
160 |         });
161 | 
162 |         // Test 3: Sensitivity Adjustment
163 |         await this.runTest('Sensitivity Adjustment', async () => {
164 |             const lowSensitivity = new AdaptivePatternDetector({ sensitivity: 0.3 });
165 |             const highSensitivity = new AdaptivePatternDetector({ sensitivity: 0.9 });
166 | 
167 |             const ambiguousMessage = "How do we handle this?";
168 | 
169 |             const lowResult = await lowSensitivity.detectPatterns(ambiguousMessage);
170 |             const highResult = await highSensitivity.detectPatterns(ambiguousMessage);
171 | 
172 |             this.assert(highResult.confidence >= lowResult.confidence,
173 |                 'Higher sensitivity should yield higher confidence for ambiguous messages');
174 |         });
175 | 
176 |         // Test 4: Learning from Feedback
177 |         await this.runTest('Learning from Feedback', async () => {
178 |             const detector = new AdaptivePatternDetector({ sensitivity: 0.7, adaptiveLearning: true });
179 | 
180 |             const message = "What's our standard approach?";
181 |             const initialResult = await detector.detectPatterns(message);
182 |             const initialConfidence = initialResult.confidence;
183 | 
184 |             // Provide positive feedback multiple times
185 |             for (let i = 0; i < 5; i++) {
186 |                 detector.recordUserFeedback(true, initialResult);
187 |             }
188 | 
189 |             const learnedResult = await detector.detectPatterns(message);
190 | 
191 |             // Note: In a real implementation, this might increase confidence for similar patterns
192 |             // For now, we just verify the feedback was recorded
193 |             const stats = detector.getStatistics();
194 |             this.assert(stats.positiveRate > 0, 'Should record positive feedback');
195 |         });
196 |     }
197 | 
198 |     /**
199 |      * Conversation Monitoring Tests
200 |      */
201 |     async conversationMonitorTests() {
202 |         // Test 1: Topic Extraction
203 |         await this.runTest('Topic Extraction', async () => {
204 |             const monitor = new TieredConversationMonitor({
205 |                 contextWindow: 5
206 |             });
207 | 
208 |             const technicalMessage = "Let's implement authentication using OAuth and JWT tokens for our React application";
209 |             const analysis = await monitor.analyzeMessage(technicalMessage);
210 | 
211 |             this.assert(analysis.topics.length > 0, 'Should extract topics from technical message');
212 |             this.assert(analysis.confidence > 0.4, `Should have reasonable confidence: ${analysis.confidence}`);
213 |             this.assert(analysis.processingTier !== 'none', 'Should process with some tier');
214 |         });
215 | 
216 |         // Test 2: Semantic Shift Detection
217 |         await this.runTest('Semantic Shift Detection', async () => {
218 |             const monitor = new TieredConversationMonitor();
219 | 
220 |             // First message establishes context
221 |             await monitor.analyzeMessage("Working on React components and state management");
222 | 
223 |             // Second message on same topic
224 |             const sameTopicResult = await monitor.analyzeMessage("Adding more React hooks to the component");
225 | 
226 |             // Third message on different topic
227 |             const differentTopicResult = await monitor.analyzeMessage("Let's switch to database schema design");
228 | 
229 |             this.assert(differentTopicResult.semanticShift > sameTopicResult.semanticShift,
230 |                 'Topic change should register higher semantic shift');
231 |         });
232 | 
233 |         // Test 3: Performance Tier Selection
234 |         await this.runTest('Performance Tier Selection', async () => {
235 |             const perfManager = new PerformanceManager({ defaultProfile: 'speed_focused' });
236 |             const monitor = new TieredConversationMonitor({}, perfManager);
237 | 
238 |             const message = "Simple question about React";
239 |             const analysis = await monitor.analyzeMessage(message);
240 | 
241 |             // In speed_focused mode, should prefer instant tier
242 |             this.assert(analysis.processingTier === 'instant' || analysis.processingTier === 'fast',
243 |                 `Speed focused mode should use fast tiers, got: ${analysis.processingTier}`);
244 |         });
245 | 
246 |         // Test 4: Caching Behavior
247 |         await this.runTest('Caching Behavior', async () => {
248 |             const monitor = new TieredConversationMonitor({
249 |                 enableCaching: true
250 |             });
251 | 
252 |             const message = "What is React?";
253 | 
254 |             // First analysis
255 |             const start1 = Date.now();
256 |             const result1 = await monitor.analyzeMessage(message);
257 |             const time1 = Date.now() - start1;
258 | 
259 |             // Second analysis (should use cache)
260 |             const start2 = Date.now();
261 |             const result2 = await monitor.analyzeMessage(message);
262 |             const time2 = Date.now() - start2;
263 | 
264 |             // Check that both results have reasonable confidence values
265 |             this.assert(typeof result1.confidence === 'number', 'First result should have confidence');
266 |             this.assert(typeof result2.confidence === 'number', 'Second result should have confidence');
267 |             // Note: Processing tiers may vary due to performance-based decisions, which is expected behavior
268 |             this.assert(result1.processingTier && result2.processingTier, 'Both results should have processing tiers');
269 |             // Note: Due to timestamps and context changes, exact confidence equality might vary
270 |         });
271 |     }
272 | 
273 |     /**
274 |      * Integration Tests
275 |      */
276 |     async integrationTests() {
277 |         // Test 1: Full Mid-Conversation Hook
278 |         await this.runTest('Full Mid-Conversation Hook Analysis', async () => {
279 |             const hook = new MidConversationHook({
280 |                 enabled: true,
281 |                 triggerThreshold: 0.6,
282 |                 maxMemoriesPerTrigger: 3,
283 |                 performance: { defaultProfile: 'balanced' }
284 |             });
285 | 
286 |             const context = {
287 |                 userMessage: "What did we decide about the authentication strategy?",
288 |                 projectContext: {
289 |                     name: 'test-project',
290 |                     language: 'JavaScript',
291 |                     frameworks: ['React']
292 |                 }
293 |             };
294 | 
295 |             const result = await hook.analyzeMessage(context.userMessage, context);
296 | 
297 |             this.assert(result !== null, 'Should return analysis result');
298 |             this.assert(typeof result.confidence === 'number', 'Should include confidence score');
299 |             this.assert(typeof result.shouldTrigger === 'boolean', 'Should include trigger decision');
300 |             this.assert(result.reasoning, 'Should include reasoning for decision');
301 | 
302 |             await hook.cleanup();
303 |         });
304 | 
305 |         // Test 2: Performance Budget Compliance
306 |         await this.runTest('Performance Budget Compliance', async () => {
307 |             const hook = new MidConversationHook({
308 |                 performance: { defaultProfile: 'speed_focused' }
309 |             });
310 | 
311 |             const start = Date.now();
312 |             const result = await hook.analyzeMessage("Quick question about React hooks");
313 |             const elapsed = Date.now() - start;
314 | 
315 |             // Speed focused should complete and return results
316 |             this.assert(result !== null, `Speed focused mode should return analysis result`);
317 |             console.log(`[Test] Speed focused analysis completed in ${elapsed}ms`);
318 | 
319 |             await hook.cleanup();
320 |         });
321 | 
322 |         // Test 3: Cooldown Period
323 |         await this.runTest('Cooldown Period Enforcement', async () => {
324 |             const hook = new MidConversationHook({
325 |                 cooldownPeriod: 1000, // 1 second
326 |                 triggerThreshold: 0.5
327 |             });
328 | 
329 |             const message = "What did we decide about authentication?";
330 | 
331 |             // First trigger
332 |             const result1 = await hook.analyzeMessage(message);
333 | 
334 |             // Immediate second attempt (should be in cooldown)
335 |             const result2 = await hook.analyzeMessage(message);
336 | 
337 |             if (result1.shouldTrigger) {
338 |                 this.assert(result2.reasoning?.includes('cooldown') || !result2.shouldTrigger,
339 |                     'Should respect cooldown period');
340 |             }
341 | 
342 |             await hook.cleanup();
343 |         });
344 |     }
345 | 
346 |     /**
347 |      * Performance Profile Tests
348 |      */
349 |     async performanceProfileTests() {
350 |         // Test 1: Profile Configuration Loading
351 |         await this.runTest('Performance Profile Loading', async () => {
352 |             const profiles = ['speed_focused', 'balanced', 'memory_aware', 'adaptive'];
353 | 
354 |             for (const profileName of profiles) {
355 |                 const perfManager = new PerformanceManager({ defaultProfile: profileName });
356 | 
357 |                 this.assert(perfManager.activeProfile === profileName,
358 |                     `Should load ${profileName} profile correctly`);
359 | 
360 |                 const budget = perfManager.performanceBudget;
361 |                 this.assert(budget !== null, `${profileName} should have performance budget`);
362 | 
363 |                 if (profileName !== 'adaptive') {
364 |                     this.assert(typeof budget.maxLatency === 'number',
365 |                         `${profileName} should have numeric maxLatency`);
366 |                 }
367 |             }
368 |         });
369 | 
370 |         // Test 2: Tier Enabling/Disabling
371 |         await this.runTest('Tier Configuration', async () => {
372 |             const speedFocused = new PerformanceManager({ defaultProfile: 'speed_focused' });
373 |             const memoryAware = new PerformanceManager({ defaultProfile: 'memory_aware' });
374 | 
375 |             // Speed focused should have fewer enabled tiers
376 |             const speedTiers = speedFocused.performanceBudget.enabledTiers || [];
377 |             const memoryTiers = memoryAware.performanceBudget.enabledTiers || [];
378 | 
379 |             this.assert(speedTiers.length <= memoryTiers.length,
380 |                 'Speed focused should have fewer or equal enabled tiers');
381 | 
382 |             this.assert(speedTiers.includes('instant'),
383 |                 'Speed focused should at least include instant tier');
384 |         });
385 | 
386 |         // Test 3: Adaptive Profile Behavior
387 |         await this.runTest('Adaptive Profile Behavior', async () => {
388 |             const adaptive = new PerformanceManager({ defaultProfile: 'adaptive' });
389 | 
390 |             // Simulate performance history
391 |             for (let i = 0; i < 20; i++) {
392 |                 adaptive.recordTotalLatency(150); // Consistent good performance
393 |             }
394 | 
395 |             // Check if adaptive calculation makes sense
396 |             const budget = adaptive.getProfileBudget('adaptive');
397 |             this.assert(budget.autoAdjust === true, 'Adaptive profile should have autoAdjust enabled');
398 |         });
399 |     }
400 | 
401 |     /**
402 |      * Utility Methods
403 |      */
404 | 
405 |     async runTest(testName, testFunction) {
406 |         try {
407 |             console.log(`  🧪 ${testName}...`);
408 |             const start = Date.now();
409 | 
410 |             const result = await testFunction();
411 | 
412 |             const duration = Date.now() - start;
413 |             this.performanceMetrics.push({ testName, duration });
414 | 
415 |             console.log(`  ✅ ${testName} (${duration}ms)`);
416 |             this.testResults.push({ name: testName, status: 'passed', duration });
417 | 
418 |             return result;
419 | 
420 |         } catch (error) {
421 |             console.log(`  ❌ ${testName}: ${error.message}`);
422 |             this.testResults.push({ name: testName, status: 'failed', error: error.message });
423 |             throw error; // Re-throw to stop execution if needed
424 |         }
425 |     }
426 | 
427 |     assert(condition, message) {
428 |         if (!condition) {
429 |             throw new Error(`Assertion failed: ${message}`);
430 |         }
431 |     }
432 | 
433 |     async sleep(ms) {
434 |         return new Promise(resolve => setTimeout(resolve, ms));
435 |     }
436 | 
437 |     printTestSummary() {
438 |         console.log('\n📊 Test Summary');
439 |         console.log('═'.repeat(50));
440 | 
441 |         const passed = this.testResults.filter(r => r.status === 'passed').length;
442 |         const failed = this.testResults.filter(r => r.status === 'failed').length;
443 |         const total = this.testResults.length;
444 | 
445 |         console.log(`Total Tests: ${total}`);
446 |         console.log(`Passed: ${passed} ✅`);
447 |         console.log(`Failed: ${failed} ${failed > 0 ? '❌' : ''}`);
448 |         console.log(`Success Rate: ${((passed / total) * 100).toFixed(1)}%`);
449 | 
450 |         // Performance summary
451 |         const totalTime = this.performanceMetrics.reduce((sum, m) => sum + m.duration, 0);
452 |         const avgTime = totalTime / this.performanceMetrics.length;
453 | 
454 |         console.log(`\n⚡ Performance`);
455 |         console.log(`Total Time: ${totalTime}ms`);
456 |         console.log(`Average per Test: ${avgTime.toFixed(1)}ms`);
457 | 
458 |         if (failed > 0) {
459 |             console.log('\n❌ Failed Tests:');
460 |             this.testResults
461 |                 .filter(r => r.status === 'failed')
462 |                 .forEach(r => console.log(`  • ${r.name}: ${r.error}`));
463 |         }
464 |     }
465 | }
466 | 
467 | /**
468 |  * Run tests if called directly
469 |  */
470 | if (require.main === module) {
471 |     const suite = new NaturalTriggersTestSuite();
472 | 
473 |     suite.runAllTests()
474 |         .then(results => {
475 |             const failed = results.filter(r => r.status === 'failed').length;
476 |             process.exit(failed > 0 ? 1 : 0);
477 |         })
478 |         .catch(error => {
479 |             console.error('❌ Test suite failed:', error.message);
480 |             process.exit(1);
481 |         });
482 | }
483 | 
484 | module.exports = { NaturalTriggersTestSuite };
```

--------------------------------------------------------------------------------
/docs/guides/advanced-command-examples.md:
--------------------------------------------------------------------------------

```markdown
  1 | # Advanced Claude Code Commands - Real-World Examples
  2 | 
  3 | This guide showcases advanced usage patterns and real-world workflows using MCP Memory Service Claude Code commands.
  4 | 
  5 | ## Table of Contents
  6 | - [Development Workflows](#development-workflows)
  7 | - [Project Management](#project-management)
  8 | - [Learning & Knowledge Management](#learning--knowledge-management)
  9 | - [Team Collaboration](#team-collaboration)
 10 | - [Debugging & Troubleshooting](#debugging--troubleshooting)
 11 | - [Advanced Search Techniques](#advanced-search-techniques)
 12 | - [Automation & Scripting](#automation--scripting)
 13 | 
 14 | ---
 15 | 
 16 | ## Development Workflows
 17 | 
 18 | ### Full-Stack Development Session
 19 | 
 20 | **Scenario**: Working on a web application with authentication
 21 | 
 22 | ```bash
 23 | # Start development session with context capture
 24 | claude /memory-context --summary "Starting OAuth 2.0 integration for user authentication"
 25 | 
 26 | # Store architecture decisions as you make them
 27 | claude /memory-store --tags "architecture,oauth,security" \
 28 |   "Using Authorization Code flow with PKCE for mobile app security"
 29 | 
 30 | claude /memory-store --tags "database,schema" --type "reference" \
 31 |   "User table schema: id, email, oauth_provider, oauth_id, created_at, last_login"
 32 | 
 33 | # Store implementation details
 34 | claude /memory-store --tags "implementation,react" \
 35 |   "React auth context uses useReducer for state management with actions: LOGIN, LOGOUT, REFRESH_TOKEN"
 36 | 
 37 | # Store configuration details (marked as private)
 38 | claude /memory-store --tags "config,oauth" --type "reference" --private \
 39 |   "Auth0 configuration: domain=dev-xyz.auth0.com, audience=https://api.myapp.com"
 40 | 
 41 | # Later, recall decisions when working on related features
 42 | claude /memory-recall "what did we decide about OAuth implementation yesterday?"
 43 | 
 44 | # Search for specific implementation patterns
 45 | claude /memory-search --tags "react,auth" "state management patterns"
 46 | 
 47 | # End session with comprehensive context capture
 48 | claude /memory-context --summary "Completed OAuth integration - ready for testing" \
 49 |   --include-files --include-commits
 50 | ```
 51 | 
 52 | ### Bug Fixing Workflow
 53 | 
 54 | **Scenario**: Tracking and resolving a complex bug
 55 | 
 56 | ```bash
 57 | # Store bug discovery
 58 | claude /memory-store --tags "bug,critical,payment" --type "task" \
 59 |   "Payment processing fails for amounts over $1000 - investigation needed"
 60 | 
 61 | # Store investigation findings
 62 | claude /memory-store --tags "bug,payment,stripe" \
 63 |   "Issue traced to Stripe API rate limiting on high-value transactions"
 64 | 
 65 | # Store attempted solutions
 66 | claude /memory-store --tags "bug,payment,attempted-fix" \
 67 |   "Tried increasing timeout from 5s to 30s - did not resolve issue"
 68 | 
 69 | # Store working solution
 70 | claude /memory-store --tags "bug,payment,solution" --type "decision" \
 71 |   "Fixed by implementing exponential backoff retry mechanism with max 3 attempts"
 72 | 
 73 | # Create searchable reference for future
 74 | claude /memory-store --tags "reference,stripe,best-practice" \
 75 |   "Stripe high-value transactions require retry logic - see payment-service.js line 245"
 76 | 
 77 | # Later, search for similar issues
 78 | claude /memory-search --tags "bug,stripe" "rate limiting payment"
 79 | ```
 80 | 
 81 | ### Code Review & Refactoring
 82 | 
 83 | **Scenario**: Systematic code improvement process
 84 | 
 85 | ```bash
 86 | # Store code review insights
 87 | claude /memory-store --tags "code-review,performance,database" \
 88 |   "N+1 query problem in user dashboard - fetching posts individually instead of batch"
 89 | 
 90 | # Store refactoring decisions
 91 | claude /memory-store --tags "refactoring,database,optimization" --type "decision" \
 92 |   "Replaced individual queries with single JOIN query - reduced DB calls from 50+ to 1"
 93 | 
 94 | # Store before/after metrics
 95 | claude /memory-store --tags "performance,metrics,improvement" \
 96 |   "Dashboard load time: Before=2.3s, After=0.4s (83% improvement)"
 97 | 
 98 | # Track technical debt
 99 | claude /memory-store --tags "technical-debt,todo" --type "task" \
100 |   "TODO: Extract user dashboard logic into dedicated service class"
101 | 
102 | # Review improvements over time
103 | claude /memory-recall "what performance improvements did we make this month?"
104 | ```
105 | 
106 | ---
107 | 
108 | ## Project Management
109 | 
110 | ### Sprint Planning & Tracking
111 | 
112 | **Scenario**: Agile development with memory-enhanced tracking
113 | 
114 | ```bash
115 | # Start of sprint
116 | claude /memory-context --summary "Sprint 15 planning - Focus on user onboarding improvements"
117 | 
118 | # Store sprint goals
119 | claude /memory-store --tags "sprint-15,goals,onboarding" --type "planning" \
120 |   "Sprint 15 goals: Simplify signup flow, add email verification, implement welcome tour"
121 | 
122 | # Track daily progress
123 | claude /memory-store --tags "sprint-15,progress,day-1" \
124 |   "Completed signup form validation and error handling - 2 story points"
125 | 
126 | # Store blockers and risks
127 | claude /memory-store --tags "sprint-15,blocker,email" --type "task" \
128 |   "Email service integration blocked - waiting for IT to configure SendGrid account"
129 | 
130 | # Mid-sprint review
131 | claude /memory-recall "what blockers did we identify this sprint?"
132 | claude /memory-search --tags "sprint-15,progress"
133 | 
134 | # Sprint retrospective
135 | claude /memory-store --tags "sprint-15,retrospective" --type "meeting" \
136 |   "Sprint 15 retro: Delivered 18/20 points, email blocker resolved, team velocity improving"
137 | 
138 | # Cross-sprint analysis
139 | claude /memory-search --tags "retrospective" --limit 5
140 | claude /memory-recall "what patterns do we see in our sprint blockers?"
141 | ```
142 | 
143 | ### Feature Development Lifecycle
144 | 
145 | **Scenario**: End-to-end feature development tracking
146 | 
147 | ```bash
148 | # Feature inception
149 | claude /memory-store --tags "feature,user-profiles,inception" --type "planning" \
150 |   "User profiles feature: Allow users to customize avatar, bio, social links, privacy settings"
151 | 
152 | # Requirements gathering
153 | claude /memory-store --tags "feature,user-profiles,requirements" \
154 |   "Requirements: Image upload (max 2MB), bio text (max 500 chars), 5 social links, public/private toggle"
155 | 
156 | # Technical design
157 | claude /memory-store --tags "feature,user-profiles,design" --type "architecture" \
158 |   "Design: New profiles table, S3 for image storage, React profile editor component"
159 | 
160 | # Implementation milestones
161 | claude /memory-store --tags "feature,user-profiles,milestone" \
162 |   "Milestone 1 complete: Database schema created and migrated to production"
163 | 
164 | # Testing notes
165 | claude /memory-store --tags "feature,user-profiles,testing" \
166 |   "Testing discovered: Large images cause timeout - need client-side compression"
167 | 
168 | # Launch preparation
169 | claude /memory-store --tags "feature,user-profiles,launch" \
170 |   "Launch checklist: DB migration ✓, S3 bucket ✓, feature flag ready ✓, docs updated ✓"
171 | 
172 | # Post-launch analysis
173 | claude /memory-store --tags "feature,user-profiles,metrics" \
174 |   "Week 1 metrics: 45% adoption rate, avg 3.2 social links per profile, 12% privacy toggle usage"
175 | 
176 | # Feature evolution tracking
177 | claude /memory-search --tags "feature,user-profiles" --limit 20
178 | ```
179 | 
180 | ---
181 | 
182 | ## Learning & Knowledge Management
183 | 
184 | ### Technology Research & Evaluation
185 | 
186 | **Scenario**: Evaluating new technologies for adoption
187 | 
188 | ```bash
189 | # Research session start
190 | claude /memory-context --summary "Researching GraphQL vs REST API for mobile app backend"
191 | 
192 | # Store research findings
193 | claude /memory-store --tags "research,graphql,pros" \
194 |   "GraphQL benefits: Single endpoint, client-defined queries, strong typing, introspection"
195 | 
196 | claude /memory-store --tags "research,graphql,cons" \
197 |   "GraphQL challenges: Learning curve, caching complexity, N+1 query risk, server complexity"
198 | 
199 | # Store comparison data
200 | claude /memory-store --tags "research,performance,comparison" \
201 |   "Performance test: GraphQL 340ms avg, REST 280ms avg for mobile app typical queries"
202 | 
203 | # Store team feedback
204 | claude /memory-store --tags "research,team-feedback,graphql" \
205 |   "Team survey: 60% excited about GraphQL, 30% prefer REST familiarity, 10% neutral"
206 | 
207 | # Store decision and rationale
208 | claude /memory-store --tags "decision,architecture,graphql" --type "decision" \
209 |   "Decision: Adopt GraphQL for new features, maintain REST for existing APIs during 6-month transition"
210 | 
211 | # Create reference documentation
212 | claude /memory-store --tags "reference,graphql,implementation" \
213 |   "GraphQL implementation guide: Use Apollo Server, implement DataLoader for N+1 prevention"
214 | 
215 | # Later research sessions
216 | claude /memory-recall "what did we learn about GraphQL performance last month?"
217 | claude /memory-search --tags "research,comparison" "technology evaluation"
218 | ```
219 | 
220 | ### Personal Learning Journal
221 | 
222 | **Scenario**: Building a personal knowledge base
223 | 
224 | ```bash
225 | # Daily learning capture
226 | claude /memory-store --tags "learning,javascript,async" \
227 |   "Learned: Promise.allSettled() waits for all promises unlike Promise.all() which fails fast"
228 | 
229 | claude /memory-store --tags "learning,css,flexbox" \
230 |   "CSS trick: flex-grow: 1 on middle item makes it expand to fill available space"
231 | 
232 | # Code snippets and examples
233 | claude /memory-store --tags "snippet,react,custom-hook" --type "reference" \
234 |   "Custom hook pattern: useLocalStorage - encapsulates localStorage with React state sync"
235 | 
236 | # Book and article insights
237 | claude /memory-store --tags "book,clean-code,insight" \
238 |   "Clean Code principle: Functions should do one thing well - if function has 'and' in description, split it"
239 | 
240 | # Conference and talk notes
241 | claude /memory-store --tags "conference,react-conf,2024" \
242 |   "React Conf 2024: New concurrent features in React 18.3, Server Components adoption patterns"
243 | 
244 | # Weekly knowledge review
245 | claude /memory-recall "what did I learn about React this week?"
246 | claude /memory-search --tags "learning,javascript" --limit 10
247 | 
248 | # Monthly learning patterns
249 | claude /memory-search --tags "learning" --since "last month"
250 | ```
251 | 
252 | ---
253 | 
254 | ## Team Collaboration
255 | 
256 | ### Cross-Team Communication
257 | 
258 | **Scenario**: Working with multiple teams on shared systems
259 | 
260 | ```bash
261 | # Store cross-team decisions
262 | claude /memory-store --tags "team,frontend,backend,api-contract" --type "decision" \
263 |   "API contract agreed: User service will return ISO 8601 timestamps, frontend will handle timezone conversion"
264 | 
265 | # Store meeting outcomes
266 | claude /memory-store --tags "meeting,security-team,compliance" \
267 |   "Security review outcome: Authentication service approved for production with rate limiting requirement"
268 | 
269 | # Store shared resource information
270 | claude /memory-store --tags "shared,database,access" --type "reference" \
271 |   "Shared analytics DB access: Use service account [email protected], read-only access"
272 | 
273 | # Track dependencies and blockers
274 | claude /memory-store --tags "dependency,infrastructure,blocker" --type "task" \
275 |   "Blocked on infrastructure team: Need production K8s namespace for user-service deployment"
276 | 
277 | # Store team contact information
278 | claude /memory-store --tags "team,contacts,infrastructure" --type "reference" --private \
279 |   "Infrastructure team: Primary contact Alex Chen ([email protected]), escalation Sarah Kim ([email protected])"
280 | 
281 | # Regular cross-team syncs
282 | claude /memory-recall "what dependencies do we have on the infrastructure team?"
283 | claude /memory-search --tags "team,backend" "shared decisions"
284 | ```
285 | 
286 | ### Code Handoff & Documentation
287 | 
288 | **Scenario**: Preparing code handoff to another developer
289 | 
290 | ```bash
291 | # Store system overview for handoff
292 | claude /memory-store --tags "handoff,system-overview,payment-service" \
293 |   "Payment service architecture: Express.js API, PostgreSQL DB, Redis cache, Stripe integration"
294 | 
295 | # Document key implementation details
296 | claude /memory-store --tags "handoff,implementation,payment-service" \
297 |   "Key files: server.js (main app), routes/payments.js (API endpoints), services/stripe.js (integration logic)"
298 | 
299 | # Store operational knowledge
300 | claude /memory-store --tags "handoff,operations,payment-service" \
301 |   "Monitoring: Grafana dashboard 'Payment Service', alerts on Slack #payments-alerts channel"
302 | 
303 | # Document gotchas and edge cases
304 | claude /memory-store --tags "handoff,gotchas,payment-service" \
305 |   "Known issues: Webhook retries can cause duplicate processing - check payment_id before processing"
306 | 
307 | # Store testing information
308 | claude /memory-store --tags "handoff,testing,payment-service" \
309 |   "Testing: npm test (unit), npm run test:integration, Stripe test cards in test-data.md"
310 | 
311 | # Create comprehensive handoff package
312 | claude /memory-search --tags "handoff,payment-service" --export
313 | ```
314 | 
315 | ---
316 | 
317 | ## Debugging & Troubleshooting
318 | 
319 | ### Production Issue Investigation
320 | 
321 | **Scenario**: Investigating and resolving production incidents
322 | 
323 | ```bash
324 | # Store incident details
325 | claude /memory-store --tags "incident,p1,database,performance" --type "task" \
326 |   "P1 Incident: Database connection timeouts causing 504 errors, affecting 15% of users"
327 | 
328 | # Store investigation timeline
329 | claude /memory-store --tags "incident,investigation,timeline" \
330 |   "10:15 AM - First reports of timeouts, 10:22 AM - Confirmed DB connection pool exhaustion"
331 | 
332 | # Store root cause analysis
333 | claude /memory-store --tags "incident,root-cause,connection-pool" \
334 |   "Root cause: Connection pool size (10) insufficient for increased traffic, no connection recycling"
335 | 
336 | # Store immediate fixes applied
337 | claude /memory-store --tags "incident,fix,immediate" \
338 |   "Immediate fix: Increased connection pool to 50, enabled connection recycling, deployed at 11:30 AM"
339 | 
340 | # Store monitoring improvements
341 | claude /memory-store --tags "incident,monitoring,improvement" \
342 |   "Added monitoring: DB connection pool utilization alerts at 80% threshold"
343 | 
344 | # Store prevention measures
345 | claude /memory-store --tags "incident,prevention,long-term" --type "task" \
346 |   "Long-term prevention: Implement connection pool auto-scaling, add load testing to CI/CD"
347 | 
348 | # Post-incident review
349 | claude /memory-store --tags "incident,postmortem,lessons" \
350 |   "Lessons learned: Need proactive monitoring of resource utilization, not just error rates"
351 | 
352 | # Search for similar incidents
353 | claude /memory-search --tags "incident,database" "connection timeout"
354 | ```
355 | 
356 | ### Performance Optimization Tracking
357 | 
358 | **Scenario**: Systematic performance improvement initiative
359 | 
360 | ```bash
361 | # Baseline measurements
362 | claude /memory-store --tags "performance,baseline,api-response" \
363 |   "Baseline metrics: API avg response time 850ms, p99 2.1s, DB query avg 340ms"
364 | 
365 | # Store optimization experiments
366 | claude /memory-store --tags "performance,experiment,caching" \
367 |   "Experiment 1: Added Redis caching for user profiles - 30% response time improvement"
368 | 
369 | claude /memory-store --tags "performance,experiment,database" \
370 |   "Experiment 2: Optimized N+1 queries in posts endpoint - 45% DB query time reduction"
371 | 
372 | # Track measurement methodology
373 | claude /memory-store --tags "performance,methodology,testing" \
374 |   "Load testing setup: k6 script, 100 VU, 5min ramp-up, 10min steady state, production-like data"
375 | 
376 | # Store optimization results
377 | claude /memory-store --tags "performance,results,final" \
378 |   "Final metrics: API avg response time 420ms (51% improvement), p99 980ms (53% improvement)"
379 | 
380 | # Document optimization techniques
381 | claude /memory-store --tags "performance,techniques,reference" --type "reference" \
382 |   "Optimization techniques applied: Redis caching, query optimization, connection pooling, response compression"
383 | 
384 | # Performance trend analysis  
385 | claude /memory-recall "what performance improvements did we achieve this quarter?"
386 | ```
387 | 
388 | ---
389 | 
390 | ## Advanced Search Techniques
391 | 
392 | ### Complex Query Patterns
393 | 
394 | **Scenario**: Advanced search strategies for large knowledge bases
395 | 
396 | ```bash
397 | # Multi-tag searches with boolean logic
398 | claude /memory-search --tags "architecture,database" --content "performance"
399 | # Finds memories tagged with both architecture AND database, containing performance-related content
400 | 
401 | # Time-constrained searches
402 | claude /memory-search --tags "bug,critical" --since "last week" --limit 20
403 | # Recent critical bugs only
404 | 
405 | # Project-specific technical searches
406 | claude /memory-search --project "user-service" --type "decision" --content "authentication"
407 | # Architecture decisions about authentication in specific project
408 | 
409 | # Minimum relevance threshold searches
410 | claude /memory-search --min-score 0.8 "microservices communication patterns"
411 | # Only highly relevant results about microservices communication
412 | 
413 | # Comprehensive metadata searches
414 | claude /memory-search --include-metadata --tags "api,design" --export
415 | # Full metadata export for API design memories
416 | ```
417 | 
418 | ### Research and Analysis Queries
419 | 
420 | **Scenario**: Analyzing patterns and trends in stored knowledge
421 | 
422 | ```bash
423 | # Trend analysis across time periods
424 | claude /memory-recall "what architectural decisions did we make in Q1?"
425 | claude /memory-recall "what architectural decisions did we make in Q2?"
426 | # Compare decision patterns across quarters
427 | 
428 | # Technology adoption tracking
429 | claude /memory-search --tags "adoption" --content "react"
430 | claude /memory-search --tags "adoption" --content "vue"
431 | # Compare technology adoption discussions
432 | 
433 | # Problem pattern identification
434 | claude /memory-search --tags "bug,database" --limit 50
435 | # Identify common database-related issues
436 | 
437 | # Team learning velocity analysis
438 | claude /memory-search --tags "learning" --since "last month"
439 | # Recent learning activities
440 | 
441 | # Decision outcome tracking
442 | claude /memory-search --tags "decision" --content "outcome"
443 | # Find decisions with documented outcomes
444 | ```
445 | 
446 | ---
447 | 
448 | ## Automation & Scripting
449 | 
450 | ### Automated Memory Capture
451 | 
452 | **Scenario**: Scripting common memory operations
453 | 
454 | ```bash
455 | # Daily standup automation
456 | #!/bin/bash
457 | # daily-standup.sh
458 | DATE=$(date +"%Y-%m-%d")
459 | echo "What did you accomplish yesterday?" | read YESTERDAY
460 | echo "What are you working on today?" | read TODAY
461 | echo "Any blockers?" | read BLOCKERS
462 | 
463 | claude /memory-store --tags "standup,daily,$DATE" \
464 |   "Yesterday: $YESTERDAY. Today: $TODAY. Blockers: $BLOCKERS"
465 | 
466 | # Git commit message enhancement
467 | #!/bin/bash
468 | # enhanced-commit.sh
469 | COMMIT_MSG="$1"
470 | BRANCH=$(git branch --show-current)
471 | FILES=$(git diff --name-only --cached)
472 | 
473 | claude /memory-store --tags "commit,$BRANCH,development" \
474 |   "Commit: $COMMIT_MSG. Files: $FILES. Branch: $BRANCH"
475 | 
476 | git commit -m "$COMMIT_MSG"
477 | 
478 | # End-of-day summary
479 | #!/bin/bash
480 | # eod-summary.sh
481 | claude /memory-context --summary "End of day summary - $(date +%Y-%m-%d)" \
482 |   --include-files --include-commits
483 | ```
484 | 
485 | ### Batch Operations and Analysis
486 | 
487 | **Scenario**: Processing multiple memories for analysis
488 | 
489 | ```bash
490 | # Export all architectural decisions for documentation
491 | claude /memory-search --tags "architecture,decision" --limit 100 --export
492 | # Creates exportable report of all architectural decisions
493 | 
494 | # Weekly learning summary
495 | claude /memory-search --tags "learning" --since "last week" --export
496 | # Export week's learning for review
497 | 
498 | # Project retrospective data gathering
499 | claude /memory-search --project "user-service" --tags "bug,issue" --export
500 | claude /memory-search --project "user-service" --tags "success,milestone" --export
501 | # Gather both problems and successes for retrospective
502 | 
503 | # Technical debt analysis
504 | claude /memory-search --tags "technical-debt,todo" --include-metadata --export
505 | # Comprehensive technical debt report
506 | 
507 | # Performance trend analysis
508 | claude /memory-search --tags "performance,metrics" --limit 50 --export
509 | # Historical performance data for trend analysis
510 | ```
511 | 
512 | ---
513 | 
514 | ## Best Practices Summary
515 | 
516 | ### Effective Tagging Strategies
517 | - **Hierarchical tags**: Use `team`, `team-frontend`, `team-frontend-react`
518 | - **Temporal tags**: Include sprint numbers, quarters, years
519 | - **Context tags**: Project names, feature names, team names
520 | - **Type tags**: `decision`, `bug`, `learning`, `reference`, `todo`
521 | - **Priority tags**: `critical`, `important`, `nice-to-have`
522 | 
523 | ### Content Organization
524 | - **Specific titles**: Instead of "Fixed bug", use "Fixed payment timeout bug in Stripe integration"
525 | - **Context inclusion**: Always include relevant project and time context
526 | - **Outcome documentation**: Store not just decisions but their outcomes
527 | - **Link related memories**: Reference related decisions and implementations
528 | 
529 | ### Search Optimization
530 | - **Use multiple search strategies**: Combine tags, content, and time-based searches
531 | - **Iterate searches**: Start broad, then narrow down with additional filters
532 | - **Export important results**: Save comprehensive analyses for documentation
533 | - **Regular reviews**: Weekly/monthly searches to identify patterns
534 | 
535 | ### Workflow Integration
536 | - **Start sessions with context**: Use `/memory-context` to set session scope
537 | - **Real-time capture**: Store decisions and insights as they happen
538 | - **End sessions with summary**: Capture session outcomes and next steps
539 | - **Regular retrospectives**: Use search to analyze patterns and improvements
540 | 
541 | ---
542 | 
543 | **These advanced patterns will transform your development workflow with persistent, searchable memory that grows with your expertise!** 🚀
```

--------------------------------------------------------------------------------
/archive/docs-removed-2025-08-23/development/hybrid-slm-memory-consolidation.md:
--------------------------------------------------------------------------------

```markdown
  1 | # Hybrid Memory Consolidation with On-Device SLMs
  2 | 
  3 | ## Overview
  4 | 
  5 | This document extends the [Autonomous Memory Consolidation](./autonomous-memory-consolidation.md) system by selectively incorporating on-device Small Language Models (SLMs) to enhance natural language capabilities while maintaining privacy, efficiency, and autonomous operation.
  6 | 
  7 | > **Note**: This is an optional enhancement. The memory consolidation system works fully autonomously without SLMs, but can provide richer insights when enhanced with local AI models.
  8 | 
  9 | ## Why Hybrid?
 10 | 
 11 | The autonomous system excels at:
 12 | - ✅ Mathematical operations (similarity, clustering)
 13 | - ✅ Deterministic behavior
 14 | - ✅ Zero dependencies
 15 | - ❌ Natural language summaries
 16 | - ❌ Complex reasoning about connections
 17 | 
 18 | On-device SLMs add:
 19 | - ✅ Eloquent prose summaries
 20 | - ✅ Nuanced understanding
 21 | - ✅ Creative insights
 22 | - ✅ Still completely private (local processing)
 23 | 
 24 | ## Recommended On-Device SLMs
 25 | 
 26 | ### Tier 1: Ultra-Lightweight (< 2GB RAM)
 27 | 
 28 | #### **Llama 3.2 1B-Instruct**
 29 | - **Size**: ~1.2GB quantized (Q4_K_M)
 30 | - **Performance**: 50-100 tokens/sec on CPU
 31 | - **Best for**: Basic summarization, keyword expansion
 32 | - **Install**: `ollama pull llama3.2:1b`
 33 | 
 34 | ```python
 35 | import ollama
 36 | 
 37 | def generate_summary_with_llama(cluster_data):
 38 |     """Generate natural language summary for memory cluster."""
 39 |     prompt = f"""Summarize these key themes from related memories:
 40 |     Keywords: {', '.join(cluster_data['keywords'])}
 41 |     Time span: {cluster_data['time_span']}
 42 |     Number of memories: {cluster_data['count']}
 43 |     
 44 |     Provide a concise, insightful summary:"""
 45 |     
 46 |     response = ollama.generate(model='llama3.2:1b', prompt=prompt)
 47 |     return response['response']
 48 | ```
 49 | 
 50 | #### **Phi-3-mini (3.8B)**
 51 | - **Size**: ~2.3GB quantized
 52 | - **Strengths**: Exceptional reasoning for size
 53 | - **Best for**: Analyzing creative connections
 54 | - **Install**: `ollama pull phi3:mini`
 55 | 
 56 | ### Tier 2: Balanced Performance (4-8GB RAM)
 57 | 
 58 | #### **Mistral 7B-Instruct v0.3**
 59 | - **Size**: ~4GB quantized (Q4_K_M)
 60 | - **Performance**: 20-40 tokens/sec on modern CPU
 61 | - **Best for**: Full consolidation narratives
 62 | - **Install**: `ollama pull mistral:7b-instruct-q4_K_M`
 63 | 
 64 | ```python
 65 | class MistralEnhancedConsolidator:
 66 |     def __init__(self):
 67 |         self.model = "mistral:7b-instruct-q4_K_M"
 68 |     
 69 |     async def create_consolidation_narrative(self, clusters, associations):
 70 |         """Create a narrative summary of the consolidation results."""
 71 |         prompt = f"""Based on memory consolidation analysis:
 72 |         
 73 |         Found {len(clusters)} memory clusters and {len(associations)} creative connections.
 74 |         
 75 |         Key themes: {self.extract_themes(clusters)}
 76 |         Surprising connections: {self.format_associations(associations[:3])}
 77 |         
 78 |         Write a brief narrative summary highlighting the most important insights 
 79 |         and patterns discovered during this consolidation cycle."""
 80 |         
 81 |         response = await ollama.generate(
 82 |             model=self.model,
 83 |             prompt=prompt,
 84 |             options={"temperature": 0.7, "max_tokens": 200}
 85 |         )
 86 |         return response['response']
 87 | ```
 88 | 
 89 | #### **Gemma 2B**
 90 | - **Size**: ~1.5GB quantized
 91 | - **Strengths**: Google's training quality
 92 | - **Best for**: Classification and scoring
 93 | - **Install**: `ollama pull gemma:2b`
 94 | 
 95 | ### Tier 3: High-Performance (8-16GB RAM)
 96 | 
 97 | #### **Qwen 2.5 7B-Instruct**
 98 | - **Size**: ~4-5GB quantized
 99 | - **Strengths**: Multilingual, complex reasoning
100 | - **Best for**: International users, detailed analysis
101 | - **Install**: `ollama pull qwen2.5:7b-instruct`
102 | 
103 | ## Hybrid Implementation Architecture
104 | 
105 | ```python
106 | from enum import Enum
107 | from typing import List, Dict, Optional
108 | import numpy as np
109 | from datetime import datetime
110 | 
111 | class ProcessingMode(Enum):
112 |     AUTONOMOUS_ONLY = "autonomous"
113 |     HYBRID_SELECTIVE = "hybrid_selective"
114 |     HYBRID_FULL = "hybrid_full"
115 | 
116 | class HybridMemoryConsolidator:
117 |     """
118 |     Combines autonomous processing with selective SLM enhancement.
119 |     
120 |     The system always runs autonomous processing first, then selectively
121 |     enhances results with SLM-generated insights where valuable.
122 |     """
123 |     
124 |     def __init__(self, storage, config):
125 |         # Core autonomous system (always available)
126 |         self.autonomous = AutonomousMemoryConsolidator(storage, config)
127 |         
128 |         # SLM configuration (optional enhancement)
129 |         self.mode = ProcessingMode(config.get('processing_mode', 'autonomous'))
130 |         self.slm_model = config.get('slm_model', 'llama3.2:1b')
131 |         self.slm_available = self._check_slm_availability()
132 |         
133 |         # Enhancement thresholds
134 |         self.min_cluster_size = config.get('slm_min_cluster_size', 5)
135 |         self.min_importance = config.get('slm_min_importance', 0.7)
136 |         self.enhancement_horizons = config.get(
137 |             'slm_time_horizons', 
138 |             ['weekly', 'monthly', 'quarterly', 'yearly']
139 |         )
140 |     
141 |     def _check_slm_availability(self) -> bool:
142 |         """Check if SLM is available for enhancement."""
143 |         if self.mode == ProcessingMode.AUTONOMOUS_ONLY:
144 |             return False
145 |             
146 |         try:
147 |             import ollama
148 |             # Check if model is available
149 |             models = ollama.list()
150 |             return any(m['name'].startswith(self.slm_model) for m in models['models'])
151 |         except:
152 |             return False
153 |     
154 |     async def consolidate(self, time_horizon: str) -> Dict:
155 |         """
156 |         Run consolidation with optional SLM enhancement.
157 |         
158 |         Always performs autonomous processing first, then selectively
159 |         enhances based on configuration and context.
160 |         """
161 |         # Step 1: Always run autonomous processing
162 |         auto_results = await self.autonomous.consolidate(time_horizon)
163 |         
164 |         # Step 2: Determine if SLM enhancement should be applied
165 |         if not self._should_enhance(time_horizon, auto_results):
166 |             return auto_results
167 |         
168 |         # Step 3: Selective SLM enhancement
169 |         enhanced_results = await self._enhance_with_slm(
170 |             auto_results, 
171 |             time_horizon
172 |         )
173 |         
174 |         return enhanced_results
175 |     
176 |     def _should_enhance(self, time_horizon: str, results: Dict) -> bool:
177 |         """Determine if SLM enhancement would add value."""
178 |         # Check if SLM is available
179 |         if not self.slm_available:
180 |             return False
181 |         
182 |         # Check if time horizon warrants enhancement
183 |         if time_horizon not in self.enhancement_horizons:
184 |             return False
185 |         
186 |         # Check if results are significant enough
187 |         significant_clusters = sum(
188 |             1 for cluster in results.get('clusters', [])
189 |             if len(cluster) >= self.min_cluster_size
190 |         )
191 |         
192 |         return significant_clusters > 0
193 |     
194 |     async def _enhance_with_slm(self, auto_results: Dict, time_horizon: str) -> Dict:
195 |         """Selectively enhance autonomous results with SLM insights."""
196 |         enhanced = auto_results.copy()
197 |         
198 |         # Enhance cluster summaries
199 |         if 'clusters' in enhanced:
200 |             enhanced['narrative_summaries'] = []
201 |             for i, cluster in enumerate(enhanced['clusters']):
202 |                 if len(cluster) >= self.min_cluster_size:
203 |                     narrative = await self._generate_cluster_narrative(
204 |                         cluster, 
205 |                         auto_results.get('compressed_summaries', [])[i]
206 |                     )
207 |                     enhanced['narrative_summaries'].append({
208 |                         'cluster_id': i,
209 |                         'narrative': narrative,
210 |                         'memory_count': len(cluster)
211 |                     })
212 |         
213 |         # Enhance creative associations
214 |         if 'associations' in enhanced and len(enhanced['associations']) > 0:
215 |             insights = await self._generate_association_insights(
216 |                 enhanced['associations'][:5]  # Top 5 associations
217 |             )
218 |             enhanced['association_insights'] = insights
219 |         
220 |         # Generate consolidation overview
221 |         enhanced['consolidation_narrative'] = await self._generate_overview(
222 |             enhanced, 
223 |             time_horizon
224 |         )
225 |         
226 |         enhanced['processing_mode'] = 'hybrid'
227 |         enhanced['slm_model'] = self.slm_model
228 |         
229 |         return enhanced
230 |     
231 |     async def _generate_cluster_narrative(
232 |         self, 
233 |         cluster: List, 
234 |         compressed_summary: Dict
235 |     ) -> str:
236 |         """Generate natural language narrative for a memory cluster."""
237 |         prompt = f"""Based on this memory cluster analysis:
238 |         
239 |         Keywords: {', '.join(compressed_summary['keywords'][:10])}
240 |         Time span: {compressed_summary['temporal_range']['start']} to {compressed_summary['temporal_range']['end']}
241 |         Common tags: {', '.join(compressed_summary['common_tags'][:5])}
242 |         Number of memories: {len(cluster)}
243 |         
244 |         Create a brief, insightful summary that captures the essence of these 
245 |         related memories and any patterns or themes you notice:"""
246 |         
247 |         response = await self._call_slm(prompt, max_tokens=150)
248 |         return response
249 |     
250 |     async def _generate_association_insights(
251 |         self, 
252 |         associations: List[Dict]
253 |     ) -> List[Dict]:
254 |         """Generate insights about creative associations discovered."""
255 |         insights = []
256 |         
257 |         for assoc in associations:
258 |             prompt = f"""Two memories were found to have an interesting connection 
259 |             (similarity: {assoc['similarity']:.2f}).
260 |             
261 |             Memory 1: {assoc['memory_1_preview'][:100]}...
262 |             Memory 2: {assoc['memory_2_preview'][:100]}...
263 |             
264 |             What insight or pattern might this connection reveal?
265 |             Be concise and focus on the non-obvious relationship:"""
266 |             
267 |             insight = await self._call_slm(prompt, max_tokens=80)
268 |             insights.append({
269 |                 'association_id': assoc['id'],
270 |                 'insight': insight,
271 |                 'similarity': assoc['similarity']
272 |             })
273 |         
274 |         return insights
275 |     
276 |     async def _generate_overview(
277 |         self, 
278 |         results: Dict, 
279 |         time_horizon: str
280 |     ) -> str:
281 |         """Generate a narrative overview of the consolidation cycle."""
282 |         prompt = f"""Memory consolidation {time_horizon} summary:
283 |         
284 |         - Processed {results.get('total_memories', 0)} memories
285 |         - Found {len(results.get('clusters', []))} memory clusters
286 |         - Discovered {len(results.get('associations', []))} creative connections
287 |         - Archived {results.get('archived_count', 0)} low-relevance memories
288 |         
289 |         Key themes: {self._extract_top_themes(results)}
290 |         
291 |         Write a brief executive summary of this consolidation cycle, 
292 |         highlighting the most important patterns and any surprising discoveries:"""
293 |         
294 |         response = await self._call_slm(prompt, max_tokens=200)
295 |         return response
296 |     
297 |     async def _call_slm(self, prompt: str, max_tokens: int = 100) -> str:
298 |         """Call the SLM with error handling."""
299 |         try:
300 |             import ollama
301 |             response = ollama.generate(
302 |                 model=self.slm_model,
303 |                 prompt=prompt,
304 |                 options={
305 |                     "temperature": 0.7,
306 |                     "max_tokens": max_tokens,
307 |                     "stop": ["\n\n", "###"]
308 |                 }
309 |             )
310 |             return response['response'].strip()
311 |         except Exception as e:
312 |             # Fallback to autonomous summary
313 |             return f"[SLM unavailable: {str(e)}]"
314 |     
315 |     def _extract_top_themes(self, results: Dict) -> str:
316 |         """Extract top themes from results."""
317 |         all_keywords = []
318 |         for summary in results.get('compressed_summaries', []):
319 |             all_keywords.extend(summary.get('keywords', []))
320 |         
321 |         # Count frequency
322 |         from collections import Counter
323 |         theme_counts = Counter(all_keywords)
324 |         top_themes = [theme for theme, _ in theme_counts.most_common(5)]
325 |         
326 |         return ', '.join(top_themes) if top_themes else 'various topics'
327 | ```
328 | 
329 | ## Smart Enhancement Strategy
330 | 
331 | ```python
332 | class SmartEnhancementStrategy:
333 |     """
334 |     Intelligently decide when and how to use SLM enhancement.
335 |     
336 |     Principles:
337 |     1. Autonomous processing is always the foundation
338 |     2. SLM enhancement only when it adds significant value
339 |     3. Resource usage scales with importance
340 |     """
341 |     
342 |     def __init__(self, config):
343 |         self.config = config
344 |         
345 |         # Enhancement criteria
346 |         self.criteria = {
347 |             'min_cluster_size': 5,
348 |             'min_importance_score': 0.7,
349 |             'min_association_similarity': 0.4,
350 |             'max_association_similarity': 0.7,
351 |             'enhancement_time_horizons': ['weekly', 'monthly', 'quarterly', 'yearly'],
352 |             'daily_enhancement': False,  # Too frequent
353 |             'require_user_request': False
354 |         }
355 |     
356 |     def should_enhance_cluster(self, cluster_info: Dict) -> bool:
357 |         """Decide if a cluster warrants SLM enhancement."""
358 |         # Size check
359 |         if cluster_info['size'] < self.criteria['min_cluster_size']:
360 |             return False
361 |         
362 |         # Importance check
363 |         avg_importance = np.mean([m.importance_score for m in cluster_info['memories']])
364 |         if avg_importance < self.criteria['min_importance_score']:
365 |             return False
366 |         
367 |         # Complexity check (high variance suggests interesting cluster)
368 |         embedding_variance = np.var([m.embedding for m in cluster_info['memories']], axis=0).mean()
369 |         if embedding_variance < 0.1:  # Too homogeneous
370 |             return False
371 |         
372 |         return True
373 |     
374 |     def select_model_for_task(self, task_type: str, resource_limit: str) -> str:
375 |         """Select appropriate model based on task and resources."""
376 |         model_selection = {
377 |             'basic_summary': {
378 |                 'low': 'llama3.2:1b',
379 |                 'medium': 'phi3:mini',
380 |                 'high': 'mistral:7b-instruct'
381 |             },
382 |             'creative_insights': {
383 |                 'low': 'phi3:mini',  # Good reasoning even when small
384 |                 'medium': 'mistral:7b-instruct',
385 |                 'high': 'qwen2.5:7b-instruct'
386 |             },
387 |             'technical_analysis': {
388 |                 'low': 'gemma:2b',
389 |                 'medium': 'mistral:7b-instruct',
390 |                 'high': 'qwen2.5:7b-instruct'
391 |             }
392 |         }
393 |         
394 |         return model_selection.get(task_type, {}).get(resource_limit, 'llama3.2:1b')
395 | ```
396 | 
397 | ## Configuration Examples
398 | 
399 | ### Minimal Enhancement (Low Resources)
400 | ```yaml
401 | hybrid_consolidation:
402 |   processing_mode: "hybrid_selective"
403 |   slm_model: "llama3.2:1b"
404 |   slm_min_cluster_size: 10  # Only largest clusters
405 |   slm_min_importance: 0.8   # Only most important
406 |   slm_time_horizons: ["monthly", "quarterly"]  # Less frequent
407 |   max_tokens_per_summary: 100
408 | ```
409 | 
410 | ### Balanced Enhancement (Recommended)
411 | ```yaml
412 | hybrid_consolidation:
413 |   processing_mode: "hybrid_selective"
414 |   slm_model: "mistral:7b-instruct-q4_K_M"
415 |   slm_min_cluster_size: 5
416 |   slm_min_importance: 0.7
417 |   slm_time_horizons: ["weekly", "monthly", "quarterly", "yearly"]
418 |   max_tokens_per_summary: 150
419 |   enable_creative_insights: true
420 |   enable_narrative_summaries: true
421 | ```
422 | 
423 | ### Full Enhancement (High Resources)
424 | ```yaml
425 | hybrid_consolidation:
426 |   processing_mode: "hybrid_full"
427 |   slm_model: "qwen2.5:7b-instruct"
428 |   slm_min_cluster_size: 3
429 |   slm_min_importance: 0.5
430 |   slm_time_horizons: ["daily", "weekly", "monthly", "quarterly", "yearly"]
431 |   max_tokens_per_summary: 200
432 |   enable_creative_insights: true
433 |   enable_narrative_summaries: true
434 |   enable_predictive_insights: true
435 |   parallel_processing: true
436 | ```
437 | 
438 | ## Installation Guide
439 | 
440 | ### Using Ollama (Recommended)
441 | ```bash
442 | # Install Ollama
443 | curl -fsSL https://ollama.ai/install.sh | sh
444 | 
445 | # Pull models based on your resources
446 | # Minimal (2GB)
447 | ollama pull llama3.2:1b
448 | 
449 | # Balanced (8GB)
450 | ollama pull mistral:7b-instruct-q4_K_M
451 | 
452 | # High-performance (16GB)
453 | ollama pull qwen2.5:7b-instruct
454 | 
455 | # Test the model
456 | ollama run llama3.2:1b "Summarize: AI helps organize memories"
457 | ```
458 | 
459 | ### Using llama.cpp
460 | ```python
461 | from llama_cpp import Llama
462 | 
463 | # Initialize with specific model
464 | llm = Llama(
465 |     model_path="./models/llama-3.2-1b-instruct.Q4_K_M.gguf",
466 |     n_ctx=2048,
467 |     n_threads=4,
468 |     n_gpu_layers=-1  # Use GPU if available
469 | )
470 | 
471 | # Generate summary
472 | response = llm(
473 |     prompt="Summarize these themes: productivity, learning, coding",
474 |     max_tokens=100,
475 |     temperature=0.7
476 | )
477 | ```
478 | 
479 | ## Performance Considerations
480 | 
481 | ### Resource Usage by Model
482 | 
483 | | Model | RAM Usage | CPU Tokens/sec | GPU Tokens/sec | Quality |
484 | |-------|-----------|----------------|----------------|---------|
485 | | Llama 3.2 1B | 1.2GB | 50-100 | 200-400 | Good |
486 | | Phi-3 mini | 2.3GB | 30-60 | 150-300 | Excellent |
487 | | Mistral 7B Q4 | 4GB | 20-40 | 100-200 | Excellent |
488 | | Gemma 2B | 1.5GB | 40-80 | 180-350 | Good |
489 | | Qwen 2.5 7B | 5GB | 15-30 | 80-150 | Best |
490 | 
491 | ### Optimization Strategies
492 | 
493 | 1. **Batch Processing**: Process multiple summaries in one call
494 | 2. **Caching**: Cache SLM responses for similar inputs
495 | 3. **Progressive Enhancement**: Start with fast model, upgrade if needed
496 | 4. **Time-based Scheduling**: Run SLM enhancement during off-hours
497 | 
498 | ## Benefits of Hybrid Approach
499 | 
500 | ### ✅ **Advantages**
501 | 1. **Best of Both Worlds**: Mathematical precision + natural language eloquence
502 | 2. **Flexible Deployment**: Can disable SLM without breaking system
503 | 3. **Privacy Preserved**: Everything runs locally
504 | 4. **Resource Efficient**: SLM only when valuable
505 | 5. **Progressive Enhancement**: Better with SLM, functional without
506 | 
507 | ### 📊 **Comparison**
508 | 
509 | | Feature | Autonomous Only | Hybrid with SLM |
510 | |---------|----------------|-----------------|
511 | | Natural summaries | ❌ Structured data | ✅ Eloquent prose |
512 | | Creative insights | ❌ Statistical only | ✅ Nuanced understanding |
513 | | Resource usage | ✅ Minimal | 🔶 Moderate |
514 | | Speed | ✅ Very fast | 🔶 Task-dependent |
515 | | Deterministic | ✅ Always | 🔶 Core operations only |
516 | | Privacy | ✅ Complete | ✅ Complete |
517 | 
518 | ## Example Output Comparison
519 | 
520 | ### Autonomous Only
521 | ```json
522 | {
523 |   "cluster_summary": {
524 |     "keywords": ["python", "debugging", "memory", "optimization"],
525 |     "memory_count": 8,
526 |     "time_span": "2025-07-21 to 2025-07-28",
527 |     "representative_memory": "Fixed memory leak in consolidation engine"
528 |   }
529 | }
530 | ```
531 | 
532 | ### Hybrid with SLM
533 | ```json
534 | {
535 |   "cluster_summary": {
536 |     "keywords": ["python", "debugging", "memory", "optimization"],
537 |     "memory_count": 8,
538 |     "time_span": "2025-07-21 to 2025-07-28",
539 |     "representative_memory": "Fixed memory leak in consolidation engine",
540 |     "narrative": "This week focused on resolving critical performance issues in the memory consolidation system. The memory leak in the clustering algorithm was traced to improper cleanup of embedding vectors, resulting in a 40% performance improvement after the fix. These debugging sessions revealed important patterns about resource management in long-running consolidation processes.",
541 |     "key_insight": "Proper lifecycle management of vector embeddings is crucial for maintaining performance in continuous consolidation systems."
542 |   }
543 | }
544 | ```
545 | 
546 | ## Future Enhancements
547 | 
548 | 1. **Fine-tuned Models**: Train small models specifically for memory consolidation
549 | 2. **Multi-Model Ensemble**: Use different models for different tasks
550 | 3. **Adaptive Model Selection**: Automatically choose model based on task complexity
551 | 4. **Streaming Generation**: Process summaries as they generate
552 | 5. **Quantization Optimization**: Test various quantization levels for best trade-offs
553 | 
554 | ## Conclusion
555 | 
556 | The hybrid approach with on-device SLMs provides the perfect balance between the reliability of autonomous processing and the expressiveness of natural language AI. By running everything locally and using SLMs selectively, we maintain privacy, control costs, and ensure the system remains functional even without AI enhancement.
557 | 
558 | This transforms the dream-inspired memory consolidation from a purely algorithmic system into an intelligent assistant that can provide genuine insights while respecting user privacy and system resources.
559 | 
560 | ---
561 | 
562 | **Related Documents:**
563 | - 🔧 [Autonomous Memory Consolidation Guide](./autonomous-memory-consolidation.md)
564 | - 💭 [Dream-Inspired Memory Consolidation System](./dream-inspired-memory-consolidation.md)
565 | - 📋 [Issue #11: Multi-Layered Memory Consolidation](https://github.com/doobidoo/mcp-memory-service/issues/11)
566 | 
567 | *Created: July 28, 2025*
```

--------------------------------------------------------------------------------
/claude-hooks/utilities/adaptive-pattern-detector.js:
--------------------------------------------------------------------------------

```javascript
  1 | /**
  2 |  * Adaptive Pattern Detector
  3 |  * Detects natural language patterns that suggest memory triggers needed
  4 |  */
  5 | 
  6 | class AdaptivePatternDetector {
  7 |     constructor(config = {}, performanceManager = null) {
  8 |         this.config = config;
  9 |         this.performanceManager = performanceManager;
 10 | 
 11 |         // Pattern sensitivity (0-1, where 1 is most sensitive)
 12 |         this.sensitivity = config.sensitivity || 0.7;
 13 | 
 14 |         // Pattern categories with performance tiers
 15 |         this.patterns = {
 16 |             // Tier 1: Instant patterns (regex-based, < 50ms)
 17 |             instant: {
 18 |                 explicitMemoryRequests: [
 19 |                     {
 20 |                         pattern: /what (did|do) we (decide|choose|do|discuss) (about|regarding|for|with)/i,
 21 |                         confidence: 0.9,
 22 |                         description: 'Direct memory request'
 23 |                     },
 24 |                     {
 25 |                         pattern: /remind me (about|how|what|of|regarding)/i,
 26 |                         confidence: 0.9,
 27 |                         description: 'Explicit reminder request'
 28 |                     },
 29 |                     {
 30 |                         pattern: /remember (when|how|what|that) we/i,
 31 |                         confidence: 0.8,
 32 |                         description: 'Memory recall request'
 33 |                     },
 34 |                     {
 35 |                         pattern: /according to (our|the) (previous|earlier|last)/i,
 36 |                         confidence: 0.8,
 37 |                         description: 'Reference to past decisions'
 38 |                     }
 39 |                 ],
 40 | 
 41 |                 pastWorkReferences: [
 42 |                     {
 43 |                         pattern: /similar to (what|how) we (did|used|implemented)/i,
 44 |                         confidence: 0.7,
 45 |                         description: 'Comparison to past work'
 46 |                     },
 47 |                     {
 48 |                         pattern: /like (we|the) (discussed|decided|implemented|chose) (before|earlier|previously)/i,
 49 |                         confidence: 0.7,
 50 |                         description: 'Reference to past implementation'
 51 |                     },
 52 |                     {
 53 |                         pattern: /the (same|approach|solution|pattern) (we|that) (used|implemented|chose)/i,
 54 |                         confidence: 0.6,
 55 |                         description: 'Reuse of past solutions'
 56 |                     }
 57 |                 ],
 58 | 
 59 |                 questionPatterns: [
 60 |                     {
 61 |                         pattern: /^(how do|how did|how should|how can) we/i,
 62 |                         confidence: 0.5,
 63 |                         description: 'Implementation question'
 64 |                     },
 65 |                     {
 66 |                         pattern: /^(what is|what was|what should be) (our|the) (approach|strategy|pattern)/i,
 67 |                         confidence: 0.6,
 68 |                         description: 'Strategy question'
 69 |                     },
 70 |                     {
 71 |                         pattern: /^(why did|why do|why should) we (choose|use|implement)/i,
 72 |                         confidence: 0.5,
 73 |                         description: 'Rationale question'
 74 |                     }
 75 |                 ]
 76 |             },
 77 | 
 78 |             // Tier 2: Fast patterns (contextual analysis, < 150ms)
 79 |             fast: {
 80 |                 technicalDiscussions: [
 81 |                     {
 82 |                         pattern: /\b(architecture|design|pattern|approach|strategy|implementation)\b/i,
 83 |                         context: ['technical', 'decision'],
 84 |                         confidence: 0.4,
 85 |                         description: 'Technical architecture discussion'
 86 |                     },
 87 |                     {
 88 |                         pattern: /\b(authentication|authorization|security|oauth|jwt)\b/i,
 89 |                         context: ['security', 'implementation'],
 90 |                         confidence: 0.5,
 91 |                         description: 'Security implementation discussion'
 92 |                     },
 93 |                     {
 94 |                         pattern: /\b(database|storage|persistence|schema|migration)\b/i,
 95 |                         context: ['data', 'implementation'],
 96 |                         confidence: 0.5,
 97 |                         description: 'Data layer discussion'
 98 |                     }
 99 |                 ],
100 | 
101 |                 projectContinuity: [
102 |                     {
103 |                         pattern: /\b(continue|continuing|resume|pick up where)\b/i,
104 |                         context: ['continuation'],
105 |                         confidence: 0.6,
106 |                         description: 'Project continuation'
107 |                     },
108 |                     {
109 |                         pattern: /\b(next step|next phase|moving forward|proceed with)\b/i,
110 |                         context: ['progression'],
111 |                         confidence: 0.4,
112 |                         description: 'Project progression'
113 |                     }
114 |                 ],
115 | 
116 |                 problemSolving: [
117 |                     {
118 |                         pattern: /\b(issue|problem|bug|error|failure) (with|in|regarding)/i,
119 |                         context: ['troubleshooting'],
120 |                         confidence: 0.6,
121 |                         description: 'Problem solving discussion'
122 |                     },
123 |                     {
124 |                         pattern: /\b(fix|resolve|solve|debug|troubleshoot)\b/i,
125 |                         context: ['troubleshooting'],
126 |                         confidence: 0.4,
127 |                         description: 'Problem resolution'
128 |                     }
129 |                 ]
130 |             },
131 | 
132 |             // Tier 3: Intensive patterns (semantic analysis, < 500ms)
133 |             intensive: {
134 |                 contextualReferences: [
135 |                     {
136 |                         semantic: ['previous discussion', 'earlier conversation', 'past decision'],
137 |                         confidence: 0.7,
138 |                         description: 'Contextual reference to past'
139 |                     },
140 |                     {
141 |                         semantic: ['established pattern', 'agreed approach', 'standard practice'],
142 |                         confidence: 0.6,
143 |                         description: 'Reference to established practices'
144 |                     }
145 |                 ],
146 | 
147 |                 complexQuestions: [
148 |                     {
149 |                         semantic: ['best practice', 'recommended approach', 'optimal solution'],
150 |                         confidence: 0.5,
151 |                         description: 'Best practice inquiry'
152 |                     }
153 |                 ]
154 |             }
155 |         };
156 | 
157 |         // Pattern matching statistics
158 |         this.stats = {
159 |             totalMatches: 0,
160 |             patternHits: new Map(),
161 |             falsePositives: 0,
162 |             userFeedback: []
163 |         };
164 | 
165 |         // Adaptive learning
166 |         this.adaptiveSettings = {
167 |             learningEnabled: config.adaptiveLearning !== false,
168 |             confidenceAdjustments: new Map(),
169 |             userPreferences: new Map()
170 |         };
171 |     }
172 | 
173 |     /**
174 |      * Detect patterns in user message with tiered approach
175 |      */
176 |     async detectPatterns(message, context = {}) {
177 |         const results = {
178 |             matches: [],
179 |             confidence: 0,
180 |             processingTier: 'none',
181 |             triggerRecommendation: false
182 |         };
183 | 
184 |         // Tier 1: Instant pattern detection
185 |         if (this.shouldRunTier('instant')) {
186 |             const timing = this.performanceManager?.startTiming('pattern_detection_instant', 'instant');
187 | 
188 |             const instantMatches = this.detectInstantPatterns(message);
189 |             results.matches.push(...instantMatches);
190 |             results.processingTier = 'instant';
191 | 
192 |             if (timing) this.performanceManager.endTiming(timing);
193 | 
194 |             // Early return if high confidence instant match
195 |             const maxConfidence = Math.max(...instantMatches.map(m => m.confidence), 0);
196 |             if (maxConfidence > 0.8) {
197 |                 results.confidence = maxConfidence;
198 |                 results.triggerRecommendation = true;
199 |                 return results;
200 |             }
201 |         }
202 | 
203 |         // Tier 2: Fast contextual analysis
204 |         if (this.shouldRunTier('fast')) {
205 |             const timing = this.performanceManager?.startTiming('pattern_detection_fast', 'fast');
206 | 
207 |             const fastMatches = this.detectFastPatterns(message, context);
208 |             results.matches.push(...fastMatches);
209 |             results.processingTier = 'fast';
210 | 
211 |             if (timing) this.performanceManager.endTiming(timing);
212 |         }
213 | 
214 |         // Tier 3: Intensive semantic analysis
215 |         if (this.shouldRunTier('intensive') && this.shouldRunIntensiveAnalysis(results.matches)) {
216 |             const timing = this.performanceManager?.startTiming('pattern_detection_intensive', 'intensive');
217 | 
218 |             const intensiveMatches = await this.detectIntensivePatterns(message, context);
219 |             results.matches.push(...intensiveMatches);
220 |             results.processingTier = 'intensive';
221 | 
222 |             if (timing) this.performanceManager.endTiming(timing);
223 |         }
224 | 
225 |         // Calculate overall confidence and recommendation
226 |         results.confidence = this.calculateOverallConfidence(results.matches);
227 |         results.triggerRecommendation = this.shouldRecommendTrigger(results);
228 | 
229 |         // Record statistics
230 |         this.recordPatternMatch(results);
231 | 
232 |         return results;
233 |     }
234 | 
235 |     /**
236 |      * Detect instant patterns using regex
237 |      */
238 |     detectInstantPatterns(message) {
239 |         const matches = [];
240 | 
241 |         for (const [category, patterns] of Object.entries(this.patterns.instant)) {
242 |             for (const patternDef of patterns) {
243 |                 if (patternDef.pattern.test(message)) {
244 |                     const adjustedConfidence = this.adjustConfidence(patternDef.confidence, category);
245 | 
246 |                     matches.push({
247 |                         type: 'instant',
248 |                         category,
249 |                         pattern: patternDef.description,
250 |                         confidence: adjustedConfidence,
251 |                         message: patternDef.description
252 |                     });
253 | 
254 |                     // Record pattern hit
255 |                     const key = `${category}:${patternDef.description}`;
256 |                     this.stats.patternHits.set(key, (this.stats.patternHits.get(key) || 0) + 1);
257 |                 }
258 |             }
259 |         }
260 | 
261 |         return matches;
262 |     }
263 | 
264 |     /**
265 |      * Detect fast patterns with context analysis
266 |      */
267 |     detectFastPatterns(message, context) {
268 |         const matches = [];
269 | 
270 |         for (const [category, patterns] of Object.entries(this.patterns.fast)) {
271 |             for (const patternDef of patterns) {
272 |                 if (patternDef.pattern.test(message)) {
273 |                     // Check if context matches
274 |                     const contextMatch = this.checkContextMatch(patternDef.context, context);
275 |                     const contextBoost = contextMatch ? 0.2 : 0;
276 | 
277 |                     const adjustedConfidence = this.adjustConfidence(
278 |                         patternDef.confidence + contextBoost,
279 |                         category
280 |                     );
281 | 
282 |                     matches.push({
283 |                         type: 'fast',
284 |                         category,
285 |                         pattern: patternDef.description,
286 |                         confidence: adjustedConfidence,
287 |                         contextMatch,
288 |                         message: patternDef.description
289 |                     });
290 |                 }
291 |             }
292 |         }
293 | 
294 |         return matches;
295 |     }
296 | 
297 |     /**
298 |      * Detect intensive patterns with semantic analysis
299 |      */
300 |     async detectIntensivePatterns(message, context) {
301 |         const matches = [];
302 | 
303 |         // This would integrate with semantic analysis libraries if available
304 |         // For now, we'll do enhanced keyword matching with semantic similarity
305 | 
306 |         for (const [category, patterns] of Object.entries(this.patterns.intensive)) {
307 |             for (const patternDef of patterns) {
308 |                 if (patternDef.semantic) {
309 |                     const semanticMatch = this.checkSemanticMatch(message, patternDef.semantic);
310 |                     if (semanticMatch.isMatch) {
311 |                         const adjustedConfidence = this.adjustConfidence(
312 |                             patternDef.confidence * semanticMatch.similarity,
313 |                             category
314 |                         );
315 | 
316 |                         matches.push({
317 |                             type: 'intensive',
318 |                             category,
319 |                             pattern: patternDef.description,
320 |                             confidence: adjustedConfidence,
321 |                             similarity: semanticMatch.similarity,
322 |                             message: patternDef.description
323 |                         });
324 |                     }
325 |                 }
326 |             }
327 |         }
328 | 
329 |         return matches;
330 |     }
331 | 
332 |     /**
333 |      * Check if semantic patterns match (simplified implementation)
334 |      */
335 |     checkSemanticMatch(message, semanticTerms) {
336 |         const messageLower = message.toLowerCase();
337 |         let matchCount = 0;
338 |         let totalTerms = 0;
339 | 
340 |         for (const term of semanticTerms) {
341 |             totalTerms++;
342 |             // Simple keyword matching - in practice, this would use semantic similarity
343 |             const words = term.toLowerCase().split(' ');
344 |             const termMatches = words.every(word => messageLower.includes(word));
345 | 
346 |             if (termMatches) {
347 |                 matchCount++;
348 |             }
349 |         }
350 | 
351 |         // Prevent division by zero
352 |         if (totalTerms === 0) {
353 |             return {
354 |                 isMatch: false,
355 |                 similarity: 0
356 |             };
357 |         }
358 | 
359 |         const similarity = matchCount / totalTerms;
360 |         return {
361 |             isMatch: similarity > 0.3, // Threshold for semantic match
362 |             similarity
363 |         };
364 |     }
365 | 
366 |     /**
367 |      * Check if context matches pattern requirements
368 |      */
369 |     checkContextMatch(requiredContext, actualContext) {
370 |         if (!requiredContext || !actualContext) return false;
371 | 
372 |         return requiredContext.some(reqCtx =>
373 |             Object.keys(actualContext).some(key =>
374 |                 key.toLowerCase().includes(reqCtx.toLowerCase()) ||
375 |                 (typeof actualContext[key] === 'string' &&
376 |                  actualContext[key].toLowerCase().includes(reqCtx.toLowerCase()))
377 |             )
378 |         );
379 |     }
380 | 
381 |     /**
382 |      * Adjust confidence based on learning and user preferences
383 |      */
384 |     adjustConfidence(baseConfidence, category) {
385 |         let adjusted = baseConfidence;
386 | 
387 |         // Apply sensitivity adjustment
388 |         adjusted = adjusted * this.sensitivity;
389 | 
390 |         // Apply learned adjustments
391 |         if (this.adaptiveSettings.confidenceAdjustments.has(category)) {
392 |             const adjustment = this.adaptiveSettings.confidenceAdjustments.get(category);
393 |             adjusted = Math.max(0, Math.min(1, adjusted + adjustment));
394 |         }
395 | 
396 |         return adjusted;
397 |     }
398 | 
399 |     /**
400 |      * Calculate overall confidence from all matches
401 |      */
402 |     calculateOverallConfidence(matches) {
403 |         if (matches.length === 0) return 0;
404 | 
405 |         // Weight by match type (instant > fast > intensive for reliability)
406 |         const weights = { instant: 1.0, fast: 0.8, intensive: 0.6 };
407 |         let weightedSum = 0;
408 |         let totalWeight = 0;
409 | 
410 |         for (const match of matches) {
411 |             const weight = weights[match.type] || 0.5;
412 |             weightedSum += match.confidence * weight;
413 |             totalWeight += weight;
414 |         }
415 | 
416 |         return totalWeight > 0 ? weightedSum / totalWeight : 0;
417 |     }
418 | 
419 |     /**
420 |      * Determine if we should recommend triggering memory hooks
421 |      */
422 |     shouldRecommendTrigger(results) {
423 |         const confidence = results.confidence;
424 |         const matchCount = results.matches.length;
425 | 
426 |         // High confidence single match
427 |         if (confidence > 0.8) return true;
428 | 
429 |         // Medium confidence with multiple matches
430 |         if (confidence > 0.6 && matchCount > 1) return true;
431 | 
432 |         // Lower threshold for explicit memory requests
433 |         const hasExplicitRequest = results.matches.some(m =>
434 |             m.category === 'explicitMemoryRequests' && m.confidence > 0.5
435 |         );
436 |         if (hasExplicitRequest) return true;
437 | 
438 |         // Any match with reasonable confidence should trigger
439 |         if (matchCount > 0 && confidence > 0.4) return true;
440 | 
441 |         return false;
442 |     }
443 | 
444 |     /**
445 |      * Determine if we should run intensive analysis
446 |      */
447 |     shouldRunIntensiveAnalysis(currentMatches) {
448 |         // Only run intensive if we have some matches but low confidence
449 |         const hasMatches = currentMatches.length > 0;
450 |         const lowConfidence = Math.max(...currentMatches.map(m => m.confidence), 0) < 0.7;
451 | 
452 |         return hasMatches && lowConfidence;
453 |     }
454 | 
455 |     /**
456 |      * Check if a processing tier should run
457 |      */
458 |     shouldRunTier(tier) {
459 |         if (!this.performanceManager) return true;
460 | 
461 |         const tierMap = {
462 |             instant: 'instant',
463 |             fast: 'fast',
464 |             intensive: 'intensive'
465 |         };
466 | 
467 |         try {
468 |             return this.performanceManager.shouldRunHook(`pattern_detection_${tier}`, tierMap[tier]);
469 |         } catch (error) {
470 |             // If performance manager fails, allow the tier to run
471 |             console.warn(`[Pattern Detector] Performance check failed for ${tier}: ${error.message}`);
472 |             return true;
473 |         }
474 |     }
475 | 
476 |     /**
477 |      * Record pattern match for statistics and learning
478 |      */
479 |     recordPatternMatch(results) {
480 |         this.stats.totalMatches++;
481 | 
482 |         // Update pattern hit statistics
483 |         for (const match of results.matches) {
484 |             const key = `${match.category}:${match.pattern}`;
485 |             this.stats.patternHits.set(key, (this.stats.patternHits.get(key) || 0) + 1);
486 |         }
487 |     }
488 | 
489 |     /**
490 |      * Learn from user feedback
491 |      */
492 |     recordUserFeedback(isPositive, patternResults, context = {}) {
493 |         if (!this.adaptiveSettings.learningEnabled) return;
494 | 
495 |         const feedback = {
496 |             positive: isPositive,
497 |             patterns: patternResults.matches.map(m => ({
498 |                 category: m.category,
499 |                 pattern: m.pattern,
500 |                 confidence: m.confidence
501 |             })),
502 |             overallConfidence: patternResults.confidence,
503 |             triggerRecommendation: patternResults.triggerRecommendation,
504 |             timestamp: Date.now(),
505 |             context
506 |         };
507 | 
508 |         this.stats.userFeedback.push(feedback);
509 | 
510 |         // Adjust confidence for patterns based on feedback
511 |         this.adjustPatternsFromFeedback(feedback);
512 | 
513 |         // Keep feedback history manageable
514 |         if (this.stats.userFeedback.length > 100) {
515 |             this.stats.userFeedback.splice(0, 20);
516 |         }
517 |     }
518 | 
519 |     /**
520 |      * Adjust pattern confidence based on user feedback
521 |      */
522 |     adjustPatternsFromFeedback(feedback) {
523 |         const adjustmentFactor = feedback.positive ? 0.05 : -0.05;
524 | 
525 |         for (const patternInfo of feedback.patterns) {
526 |             const currentAdjustment = this.adaptiveSettings.confidenceAdjustments.get(patternInfo.category) || 0;
527 |             const newAdjustment = Math.max(-0.3, Math.min(0.3, currentAdjustment + adjustmentFactor));
528 |             this.adaptiveSettings.confidenceAdjustments.set(patternInfo.category, newAdjustment);
529 |         }
530 |     }
531 | 
532 |     /**
533 |      * Get pattern detection statistics
534 |      */
535 |     getStatistics() {
536 |         const recentFeedback = this.stats.userFeedback.slice(-20);
537 |         const positiveRate = recentFeedback.length > 0 ?
538 |             recentFeedback.filter(f => f.positive).length / recentFeedback.length : 0;
539 | 
540 |         return {
541 |             totalMatches: this.stats.totalMatches,
542 |             patternHitCounts: Object.fromEntries(this.stats.patternHits),
543 |             positiveRate: Math.round(positiveRate * 100),
544 |             confidenceAdjustments: Object.fromEntries(this.adaptiveSettings.confidenceAdjustments),
545 |             sensitivity: this.sensitivity,
546 |             learningEnabled: this.adaptiveSettings.learningEnabled
547 |         };
548 |     }
549 | 
550 |     /**
551 |      * Update sensitivity setting
552 |      */
553 |     updateSensitivity(newSensitivity) {
554 |         this.sensitivity = Math.max(0, Math.min(1, newSensitivity));
555 |     }
556 | 
557 |     /**
558 |      * Reset learning data (useful for testing)
559 |      */
560 |     resetLearning() {
561 |         this.adaptiveSettings.confidenceAdjustments.clear();
562 |         this.stats.userFeedback = [];
563 |         this.stats.patternHits.clear();
564 |         this.stats.totalMatches = 0;
565 |     }
566 | }
567 | 
568 | module.exports = { AdaptivePatternDetector };
```
Page 30/47FirstPrevNextLast