This is page 1 of 27. Use http://codebase.md/basicmachines-co/basic-memory?lines=true&page={x} to view the full context.
# Directory Structure
```
├── .claude
│ ├── commands
│ │ ├── release
│ │ │ ├── beta.md
│ │ │ ├── changelog.md
│ │ │ ├── release-check.md
│ │ │ └── release.md
│ │ ├── spec.md
│ │ └── test-live.md
│ └── settings.json
├── .dockerignore
├── .env.example
├── .github
│ ├── dependabot.yml
│ ├── ISSUE_TEMPLATE
│ │ ├── bug_report.md
│ │ ├── config.yml
│ │ ├── documentation.md
│ │ └── feature_request.md
│ └── workflows
│ ├── claude-code-review.yml
│ ├── claude-issue-triage.yml
│ ├── claude.yml
│ ├── dev-release.yml
│ ├── docker.yml
│ ├── pr-title.yml
│ ├── release.yml
│ └── test.yml
├── .gitignore
├── .python-version
├── CHANGELOG.md
├── CITATION.cff
├── CLA.md
├── CLAUDE.md
├── CODE_OF_CONDUCT.md
├── CONTRIBUTING.md
├── docker-compose-postgres.yml
├── docker-compose.yml
├── Dockerfile
├── docs
│ ├── ai-assistant-guide-extended.md
│ ├── ARCHITECTURE.md
│ ├── character-handling.md
│ ├── cloud-cli.md
│ ├── Docker.md
│ └── testing-coverage.md
├── justfile
├── LICENSE
├── llms-install.md
├── pyproject.toml
├── README.md
├── SECURITY.md
├── smithery.yaml
├── specs
│ ├── SPEC-1 Specification-Driven Development Process.md
│ ├── SPEC-10 Unified Deployment Workflow and Event Tracking.md
│ ├── SPEC-11 Basic Memory API Performance Optimization.md
│ ├── SPEC-12 OpenTelemetry Observability.md
│ ├── SPEC-13 CLI Authentication with Subscription Validation.md
│ ├── SPEC-14 Cloud Git Versioning & GitHub Backup.md
│ ├── SPEC-14- Cloud Git Versioning & GitHub Backup.md
│ ├── SPEC-15 Configuration Persistence via Tigris for Cloud Tenants.md
│ ├── SPEC-16 MCP Cloud Service Consolidation.md
│ ├── SPEC-17 Semantic Search with ChromaDB.md
│ ├── SPEC-18 AI Memory Management Tool.md
│ ├── SPEC-19 Sync Performance and Memory Optimization.md
│ ├── SPEC-2 Slash Commands Reference.md
│ ├── SPEC-20 Simplified Project-Scoped Rclone Sync.md
│ ├── SPEC-3 Agent Definitions.md
│ ├── SPEC-4 Notes Web UI Component Architecture.md
│ ├── SPEC-5 CLI Cloud Upload via WebDAV.md
│ ├── SPEC-6 Explicit Project Parameter Architecture.md
│ ├── SPEC-7 POC to spike Tigris Turso for local access to cloud data.md
│ ├── SPEC-8 TigrisFS Integration.md
│ ├── SPEC-9 Multi-Project Bidirectional Sync Architecture.md
│ ├── SPEC-9 Signed Header Tenant Information.md
│ └── SPEC-9-1 Follow-Ups- Conflict, Sync, and Observability.md
├── src
│ └── basic_memory
│ ├── __init__.py
│ ├── alembic
│ │ ├── alembic.ini
│ │ ├── env.py
│ │ ├── migrations.py
│ │ ├── script.py.mako
│ │ └── versions
│ │ ├── 314f1ea54dc4_add_postgres_full_text_search_support_.py
│ │ ├── 3dae7c7b1564_initial_schema.py
│ │ ├── 502b60eaa905_remove_required_from_entity_permalink.py
│ │ ├── 5fe1ab1ccebe_add_projects_table.py
│ │ ├── 647e7a75e2cd_project_constraint_fix.py
│ │ ├── 6830751f5fb6_merge_multiple_heads.py
│ │ ├── 9d9c1cb7d8f5_add_mtime_and_size_columns_to_entity_.py
│ │ ├── a1b2c3d4e5f6_fix_project_foreign_keys.py
│ │ ├── a2b3c4d5e6f7_add_search_index_entity_cascade.py
│ │ ├── b3c3938bacdb_relation_to_name_unique_index.py
│ │ ├── cc7172b46608_update_search_index_schema.py
│ │ ├── e7e1f4367280_add_scan_watermark_tracking_to_project.py
│ │ ├── f8a9b2c3d4e5_add_pg_trgm_for_fuzzy_link_resolution.py
│ │ └── g9a0b3c4d5e6_add_external_id_to_project_and_entity.py
│ ├── api
│ │ ├── __init__.py
│ │ ├── app.py
│ │ ├── container.py
│ │ ├── routers
│ │ │ ├── __init__.py
│ │ │ ├── directory_router.py
│ │ │ ├── importer_router.py
│ │ │ ├── knowledge_router.py
│ │ │ ├── management_router.py
│ │ │ ├── memory_router.py
│ │ │ ├── project_router.py
│ │ │ ├── prompt_router.py
│ │ │ ├── resource_router.py
│ │ │ ├── search_router.py
│ │ │ └── utils.py
│ │ ├── template_loader.py
│ │ └── v2
│ │ ├── __init__.py
│ │ └── routers
│ │ ├── __init__.py
│ │ ├── directory_router.py
│ │ ├── importer_router.py
│ │ ├── knowledge_router.py
│ │ ├── memory_router.py
│ │ ├── project_router.py
│ │ ├── prompt_router.py
│ │ ├── resource_router.py
│ │ └── search_router.py
│ ├── cli
│ │ ├── __init__.py
│ │ ├── app.py
│ │ ├── auth.py
│ │ ├── commands
│ │ │ ├── __init__.py
│ │ │ ├── cloud
│ │ │ │ ├── __init__.py
│ │ │ │ ├── api_client.py
│ │ │ │ ├── bisync_commands.py
│ │ │ │ ├── cloud_utils.py
│ │ │ │ ├── core_commands.py
│ │ │ │ ├── rclone_commands.py
│ │ │ │ ├── rclone_config.py
│ │ │ │ ├── rclone_installer.py
│ │ │ │ ├── upload_command.py
│ │ │ │ └── upload.py
│ │ │ ├── command_utils.py
│ │ │ ├── db.py
│ │ │ ├── format.py
│ │ │ ├── import_chatgpt.py
│ │ │ ├── import_claude_conversations.py
│ │ │ ├── import_claude_projects.py
│ │ │ ├── import_memory_json.py
│ │ │ ├── mcp.py
│ │ │ ├── project.py
│ │ │ ├── status.py
│ │ │ ├── telemetry.py
│ │ │ └── tool.py
│ │ ├── container.py
│ │ └── main.py
│ ├── config.py
│ ├── db.py
│ ├── deps
│ │ ├── __init__.py
│ │ ├── config.py
│ │ ├── db.py
│ │ ├── importers.py
│ │ ├── projects.py
│ │ ├── repositories.py
│ │ └── services.py
│ ├── deps.py
│ ├── file_utils.py
│ ├── ignore_utils.py
│ ├── importers
│ │ ├── __init__.py
│ │ ├── base.py
│ │ ├── chatgpt_importer.py
│ │ ├── claude_conversations_importer.py
│ │ ├── claude_projects_importer.py
│ │ ├── memory_json_importer.py
│ │ └── utils.py
│ ├── markdown
│ │ ├── __init__.py
│ │ ├── entity_parser.py
│ │ ├── markdown_processor.py
│ │ ├── plugins.py
│ │ ├── schemas.py
│ │ └── utils.py
│ ├── mcp
│ │ ├── __init__.py
│ │ ├── async_client.py
│ │ ├── clients
│ │ │ ├── __init__.py
│ │ │ ├── directory.py
│ │ │ ├── knowledge.py
│ │ │ ├── memory.py
│ │ │ ├── project.py
│ │ │ ├── resource.py
│ │ │ └── search.py
│ │ ├── container.py
│ │ ├── project_context.py
│ │ ├── prompts
│ │ │ ├── __init__.py
│ │ │ ├── ai_assistant_guide.py
│ │ │ ├── continue_conversation.py
│ │ │ ├── recent_activity.py
│ │ │ ├── search.py
│ │ │ └── utils.py
│ │ ├── resources
│ │ │ ├── ai_assistant_guide.md
│ │ │ └── project_info.py
│ │ ├── server.py
│ │ └── tools
│ │ ├── __init__.py
│ │ ├── build_context.py
│ │ ├── canvas.py
│ │ ├── chatgpt_tools.py
│ │ ├── delete_note.py
│ │ ├── edit_note.py
│ │ ├── list_directory.py
│ │ ├── move_note.py
│ │ ├── project_management.py
│ │ ├── read_content.py
│ │ ├── read_note.py
│ │ ├── recent_activity.py
│ │ ├── search.py
│ │ ├── utils.py
│ │ ├── view_note.py
│ │ └── write_note.py
│ ├── models
│ │ ├── __init__.py
│ │ ├── base.py
│ │ ├── knowledge.py
│ │ ├── project.py
│ │ └── search.py
│ ├── project_resolver.py
│ ├── repository
│ │ ├── __init__.py
│ │ ├── entity_repository.py
│ │ ├── observation_repository.py
│ │ ├── postgres_search_repository.py
│ │ ├── project_info_repository.py
│ │ ├── project_repository.py
│ │ ├── relation_repository.py
│ │ ├── repository.py
│ │ ├── search_index_row.py
│ │ ├── search_repository_base.py
│ │ ├── search_repository.py
│ │ └── sqlite_search_repository.py
│ ├── runtime.py
│ ├── schemas
│ │ ├── __init__.py
│ │ ├── base.py
│ │ ├── cloud.py
│ │ ├── delete.py
│ │ ├── directory.py
│ │ ├── importer.py
│ │ ├── memory.py
│ │ ├── project_info.py
│ │ ├── prompt.py
│ │ ├── request.py
│ │ ├── response.py
│ │ ├── search.py
│ │ ├── sync_report.py
│ │ └── v2
│ │ ├── __init__.py
│ │ ├── entity.py
│ │ └── resource.py
│ ├── services
│ │ ├── __init__.py
│ │ ├── context_service.py
│ │ ├── directory_service.py
│ │ ├── entity_service.py
│ │ ├── exceptions.py
│ │ ├── file_service.py
│ │ ├── initialization.py
│ │ ├── link_resolver.py
│ │ ├── project_service.py
│ │ ├── search_service.py
│ │ └── service.py
│ ├── sync
│ │ ├── __init__.py
│ │ ├── background_sync.py
│ │ ├── coordinator.py
│ │ ├── sync_service.py
│ │ └── watch_service.py
│ ├── telemetry.py
│ ├── templates
│ │ └── prompts
│ │ ├── continue_conversation.hbs
│ │ └── search.hbs
│ └── utils.py
├── test-int
│ ├── BENCHMARKS.md
│ ├── cli
│ │ ├── test_project_commands_integration.py
│ │ └── test_version_integration.py
│ ├── conftest.py
│ ├── mcp
│ │ ├── test_build_context_underscore.py
│ │ ├── test_build_context_validation.py
│ │ ├── test_chatgpt_tools_integration.py
│ │ ├── test_default_project_mode_integration.py
│ │ ├── test_delete_note_integration.py
│ │ ├── test_edit_note_integration.py
│ │ ├── test_lifespan_shutdown_sync_task_cancellation_integration.py
│ │ ├── test_list_directory_integration.py
│ │ ├── test_move_note_integration.py
│ │ ├── test_project_management_integration.py
│ │ ├── test_project_state_sync_integration.py
│ │ ├── test_read_content_integration.py
│ │ ├── test_read_note_integration.py
│ │ ├── test_search_integration.py
│ │ ├── test_single_project_mcp_integration.py
│ │ └── test_write_note_integration.py
│ ├── test_db_wal_mode.py
│ └── test_disable_permalinks_integration.py
├── tests
│ ├── __init__.py
│ ├── api
│ │ ├── conftest.py
│ │ ├── test_api_container.py
│ │ ├── test_async_client.py
│ │ ├── test_continue_conversation_template.py
│ │ ├── test_directory_router.py
│ │ ├── test_importer_router.py
│ │ ├── test_knowledge_router.py
│ │ ├── test_management_router.py
│ │ ├── test_memory_router.py
│ │ ├── test_project_router_operations.py
│ │ ├── test_project_router.py
│ │ ├── test_prompt_router.py
│ │ ├── test_relation_background_resolution.py
│ │ ├── test_resource_router.py
│ │ ├── test_search_router.py
│ │ ├── test_search_template.py
│ │ ├── test_template_loader_helpers.py
│ │ ├── test_template_loader.py
│ │ └── v2
│ │ ├── __init__.py
│ │ ├── conftest.py
│ │ ├── test_directory_router.py
│ │ ├── test_importer_router.py
│ │ ├── test_knowledge_router.py
│ │ ├── test_memory_router.py
│ │ ├── test_project_router.py
│ │ ├── test_prompt_router.py
│ │ ├── test_resource_router.py
│ │ └── test_search_router.py
│ ├── cli
│ │ ├── cloud
│ │ │ ├── test_cloud_api_client_and_utils.py
│ │ │ ├── test_rclone_config_and_bmignore_filters.py
│ │ │ └── test_upload_path.py
│ │ ├── conftest.py
│ │ ├── test_auth_cli_auth.py
│ │ ├── test_cli_container.py
│ │ ├── test_cli_exit.py
│ │ ├── test_cli_tool_exit.py
│ │ ├── test_cli_tools.py
│ │ ├── test_cloud_authentication.py
│ │ ├── test_ignore_utils.py
│ │ ├── test_import_chatgpt.py
│ │ ├── test_import_claude_conversations.py
│ │ ├── test_import_claude_projects.py
│ │ ├── test_import_memory_json.py
│ │ ├── test_project_add_with_local_path.py
│ │ └── test_upload.py
│ ├── conftest.py
│ ├── db
│ │ └── test_issue_254_foreign_key_constraints.py
│ ├── importers
│ │ ├── test_conversation_indexing.py
│ │ ├── test_importer_base.py
│ │ └── test_importer_utils.py
│ ├── markdown
│ │ ├── __init__.py
│ │ ├── test_date_frontmatter_parsing.py
│ │ ├── test_entity_parser_error_handling.py
│ │ ├── test_entity_parser.py
│ │ ├── test_markdown_plugins.py
│ │ ├── test_markdown_processor.py
│ │ ├── test_observation_edge_cases.py
│ │ ├── test_parser_edge_cases.py
│ │ ├── test_relation_edge_cases.py
│ │ └── test_task_detection.py
│ ├── mcp
│ │ ├── clients
│ │ │ ├── __init__.py
│ │ │ └── test_clients.py
│ │ ├── conftest.py
│ │ ├── test_async_client_modes.py
│ │ ├── test_mcp_container.py
│ │ ├── test_obsidian_yaml_formatting.py
│ │ ├── test_permalink_collision_file_overwrite.py
│ │ ├── test_project_context.py
│ │ ├── test_prompts.py
│ │ ├── test_recent_activity_prompt_modes.py
│ │ ├── test_resources.py
│ │ ├── test_server_lifespan_branches.py
│ │ ├── test_tool_build_context.py
│ │ ├── test_tool_canvas.py
│ │ ├── test_tool_delete_note.py
│ │ ├── test_tool_edit_note.py
│ │ ├── test_tool_list_directory.py
│ │ ├── test_tool_move_note.py
│ │ ├── test_tool_project_management.py
│ │ ├── test_tool_read_content.py
│ │ ├── test_tool_read_note.py
│ │ ├── test_tool_recent_activity.py
│ │ ├── test_tool_resource.py
│ │ ├── test_tool_search.py
│ │ ├── test_tool_utils.py
│ │ ├── test_tool_view_note.py
│ │ ├── test_tool_write_note_kebab_filenames.py
│ │ ├── test_tool_write_note.py
│ │ └── tools
│ │ └── test_chatgpt_tools.py
│ ├── Non-MarkdownFileSupport.pdf
│ ├── README.md
│ ├── repository
│ │ ├── test_entity_repository_upsert.py
│ │ ├── test_entity_repository.py
│ │ ├── test_entity_upsert_issue_187.py
│ │ ├── test_observation_repository.py
│ │ ├── test_postgres_search_repository.py
│ │ ├── test_project_info_repository.py
│ │ ├── test_project_repository.py
│ │ ├── test_relation_repository.py
│ │ ├── test_repository.py
│ │ ├── test_search_repository_edit_bug_fix.py
│ │ └── test_search_repository.py
│ ├── schemas
│ │ ├── test_base_timeframe_minimum.py
│ │ ├── test_memory_serialization.py
│ │ ├── test_memory_url_validation.py
│ │ ├── test_memory_url.py
│ │ ├── test_relation_response_reference_resolution.py
│ │ ├── test_schemas.py
│ │ └── test_search.py
│ ├── Screenshot.png
│ ├── services
│ │ ├── test_context_service.py
│ │ ├── test_directory_service.py
│ │ ├── test_entity_service_disable_permalinks.py
│ │ ├── test_entity_service.py
│ │ ├── test_file_service.py
│ │ ├── test_initialization_cloud_mode_branches.py
│ │ ├── test_initialization.py
│ │ ├── test_link_resolver.py
│ │ ├── test_project_removal_bug.py
│ │ ├── test_project_service_operations.py
│ │ ├── test_project_service.py
│ │ └── test_search_service.py
│ ├── sync
│ │ ├── test_character_conflicts.py
│ │ ├── test_coordinator.py
│ │ ├── test_sync_service_incremental.py
│ │ ├── test_sync_service.py
│ │ ├── test_sync_wikilink_issue.py
│ │ ├── test_tmp_files.py
│ │ ├── test_watch_service_atomic_adds.py
│ │ ├── test_watch_service_edge_cases.py
│ │ ├── test_watch_service_reload.py
│ │ └── test_watch_service.py
│ ├── test_config.py
│ ├── test_deps.py
│ ├── test_production_cascade_delete.py
│ ├── test_project_resolver.py
│ ├── test_rclone_commands.py
│ ├── test_runtime.py
│ ├── test_telemetry.py
│ └── utils
│ ├── test_file_utils.py
│ ├── test_frontmatter_obsidian_compatible.py
│ ├── test_parse_tags.py
│ ├── test_permalink_formatting.py
│ ├── test_timezone_utils.py
│ ├── test_utf8_handling.py
│ └── test_validate_project_path.py
└── uv.lock
```
# Files
--------------------------------------------------------------------------------
/.python-version:
--------------------------------------------------------------------------------
```
1 | 3.14
2 |
```
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
```
1 | *.py[cod]
2 | __pycache__/
3 | .pytest_cache/
4 | .coverage
5 | htmlcov/
6 |
7 | # Distribution / packaging
8 | .Python
9 | build/
10 | develop-eggs/
11 | dist/
12 | downloads/
13 | eggs/
14 | .eggs/
15 | lib/
16 | lib64/
17 | parts/
18 | sdist/
19 | var/
20 | wheels/
21 | *.egg-info/
22 | .installed.cfg
23 | *.egg
24 |
25 | # Installer artifacts
26 | installer/build/
27 | installer/dist/
28 | rw.*.dmg # Temporary disk images
29 |
30 | # Virtual environments
31 | .env
32 | .venv
33 | env/
34 | venv/
35 | ENV/
36 |
37 | # IDE
38 | .idea/
39 | .vscode/
40 | *.swp
41 | *.swo
42 |
43 | # macOS
44 | .DS_Store
45 | .coverage.*
46 |
47 | # obsidian docs:
48 | /docs/.obsidian/
49 | /examples/.obsidian/
50 | /examples/.basic-memory/
51 |
52 |
53 | # claude action
54 | claude-output
55 | **/.claude/settings.local.json
56 | .mcp.json
57 |
```
--------------------------------------------------------------------------------
/.dockerignore:
--------------------------------------------------------------------------------
```
1 | # Git files
2 | .git/
3 | .gitignore
4 | .gitattributes
5 |
6 | # Development files
7 | .vscode/
8 | .idea/
9 | *.swp
10 | *.swo
11 | *~
12 |
13 | # Testing files
14 | tests/
15 | test-int/
16 | .pytest_cache/
17 | .coverage
18 | htmlcov/
19 |
20 | # Build artifacts
21 | build/
22 | dist/
23 | *.egg-info/
24 | __pycache__/
25 | *.pyc
26 | *.pyo
27 | *.pyd
28 | .Python
29 |
30 | # Virtual environments (uv creates these during build)
31 | .venv/
32 | venv/
33 | .env
34 |
35 | # CI/CD files
36 | .github/
37 |
38 | # Documentation (keep README.md and pyproject.toml)
39 | docs/
40 | CHANGELOG.md
41 | CLAUDE.md
42 | CONTRIBUTING.md
43 |
44 | # Example files not needed for runtime
45 | examples/
46 |
47 | # Local development files
48 | .basic-memory/
49 | *.db
50 | *.sqlite3
51 |
52 | # OS files
53 | .DS_Store
54 | Thumbs.db
55 |
56 | # Temporary files
57 | tmp/
58 | temp/
59 | *.tmp
60 | *.log
```
--------------------------------------------------------------------------------
/.env.example:
--------------------------------------------------------------------------------
```
1 | # Basic Memory Environment Variables Example
2 | # Copy this file to .env and customize as needed
3 | # Note: .env files are gitignored and should never be committed
4 |
5 | # ============================================================================
6 | # PostgreSQL Test Database Configuration
7 | # ============================================================================
8 | # These variables allow you to override the default test database credentials
9 | # Default values match docker-compose-postgres.yml for local development
10 | #
11 | # Only needed if you want to use different credentials or a remote test database
12 | # By default, tests use: postgresql://basic_memory_user:dev_password@localhost:5433/basic_memory_test
13 |
14 | # Full PostgreSQL test database URL (used by tests and migrations)
15 | # POSTGRES_TEST_URL=postgresql+asyncpg://basic_memory_user:dev_password@localhost:5433/basic_memory_test
16 |
17 | # Individual components (used by justfile postgres-reset command)
18 | # POSTGRES_USER=basic_memory_user
19 | # POSTGRES_TEST_DB=basic_memory_test
20 |
21 | # ============================================================================
22 | # Production Database Configuration
23 | # ============================================================================
24 | # For production use, set these in your deployment environment
25 | # DO NOT use the test credentials above in production!
26 |
27 | # BASIC_MEMORY_DATABASE_BACKEND=postgres # or "sqlite"
28 | # BASIC_MEMORY_DATABASE_URL=postgresql+asyncpg://user:password@host:port/database
29 |
```
--------------------------------------------------------------------------------
/tests/README.md:
--------------------------------------------------------------------------------
```markdown
1 | # Dual-Backend Testing
2 |
3 | Basic Memory tests run against both SQLite and Postgres backends to ensure compatibility.
4 |
5 | ## Quick Start
6 |
7 | ```bash
8 | # Run tests against SQLite only (default, no setup needed)
9 | pytest
10 |
11 | # Run tests against Postgres only (requires docker-compose)
12 | docker-compose -f docker-compose-postgres.yml up -d
13 | pytest -m postgres
14 |
15 | # Run tests against BOTH backends
16 | docker-compose -f docker-compose-postgres.yml up -d
17 | pytest --run-all-backends # Not yet implemented - run both commands above
18 | ```
19 |
20 | ## How It Works
21 |
22 | ### Parametrized Backend Fixture
23 |
24 | The `db_backend` fixture is parametrized to run tests against both `sqlite` and `postgres`:
25 |
26 | ```python
27 | @pytest.fixture(
28 | params=[
29 | pytest.param("sqlite", id="sqlite"),
30 | pytest.param("postgres", id="postgres", marks=pytest.mark.postgres),
31 | ]
32 | )
33 | def db_backend(request) -> Literal["sqlite", "postgres"]:
34 | return request.param
35 | ```
36 |
37 | ### Backend-Specific Engine Factories
38 |
39 | Each backend has its own engine factory implementation:
40 |
41 | - **`sqlite_engine_factory`** - Uses in-memory SQLite (fast, isolated)
42 | - **`postgres_engine_factory`** - Uses Postgres test database (realistic, requires Docker)
43 |
44 | The main `engine_factory` fixture delegates to the appropriate implementation based on `db_backend`.
45 |
46 | ### Configuration
47 |
48 | The `app_config` fixture automatically configures the correct backend:
49 |
50 | ```python
51 | # SQLite config
52 | database_backend = DatabaseBackend.SQLITE
53 | database_url = None # Uses default SQLite path
54 |
55 | # Postgres config
56 | database_backend = DatabaseBackend.POSTGRES
57 | database_url = "postgresql+asyncpg://basic_memory_user:dev_password@localhost:5433/basic_memory_test"
58 | ```
59 |
60 | ## Running Postgres Tests
61 |
62 | ### 1. Start Postgres Docker Container
63 |
64 | ```bash
65 | docker-compose -f docker-compose-postgres.yml up -d
66 | ```
67 |
68 | This starts:
69 | - Postgres 17 on port **5433** (not 5432 to avoid conflicts)
70 | - Test database: `basic_memory_test`
71 | - Credentials: `basic_memory_user` / `dev_password`
72 |
73 | ### 2. Run Postgres Tests
74 |
75 | ```bash
76 | # Run only Postgres tests
77 | pytest -m postgres
78 |
79 | # Run specific test with Postgres
80 | pytest tests/test_entity_repository.py::test_create -m postgres
81 |
82 | # Skip Postgres tests (default behavior)
83 | pytest -m "not postgres"
84 | ```
85 |
86 | ### 3. Stop Docker Container
87 |
88 | ```bash
89 | docker-compose -f docker-compose-postgres.yml down
90 | ```
91 |
92 | ## Test Isolation
93 |
94 | ### SQLite Tests
95 | - Each test gets a fresh in-memory database
96 | - Automatic cleanup (database destroyed after test)
97 | - No setup required
98 |
99 | ### Postgres Tests
100 | - Database is **cleaned before each test** (drop all tables, recreate)
101 | - Tests share the same Postgres instance but get isolated schemas
102 | - Requires Docker Compose to be running
103 |
104 | ## Markers
105 |
106 | - `postgres` - Marks tests that run against Postgres backend
107 | - Use `-m postgres` to run only Postgres tests
108 | - Use `-m "not postgres"` to skip Postgres tests (default)
109 |
110 | ## CI Integration
111 |
112 | ### GitHub Actions
113 |
114 | Use service containers for Postgres (no Docker Compose needed):
115 |
116 | ```yaml
117 | jobs:
118 | test:
119 | runs-on: ubuntu-latest
120 |
121 | # Postgres service container
122 | services:
123 | postgres:
124 | image: postgres:17
125 | env:
126 | POSTGRES_DB: basic_memory_test
127 | POSTGRES_USER: basic_memory_user
128 | POSTGRES_PASSWORD: dev_password
129 | ports:
130 | - 5433:5432
131 | options: >-
132 | --health-cmd pg_isready
133 | --health-interval 10s
134 | --health-timeout 5s
135 | --health-retries 5
136 |
137 | steps:
138 | - name: Run SQLite tests
139 | run: pytest -m "not postgres"
140 |
141 | - name: Run Postgres tests
142 | run: pytest -m postgres
143 | ```
144 |
145 | ## Troubleshooting
146 |
147 | ### Postgres tests fail with "connection refused"
148 |
149 | Make sure Docker Compose is running:
150 | ```bash
151 | docker-compose -f docker-compose-postgres.yml ps
152 | docker-compose -f docker-compose-postgres.yml logs postgres
153 | ```
154 |
155 | ### Port 5433 already in use
156 |
157 | Either:
158 | - Stop the conflicting service
159 | - Change the port in `docker-compose-postgres.yml` and `tests/conftest.py`
160 |
161 | ### Tests hang or timeout
162 |
163 | Check Postgres health:
164 | ```bash
165 | docker-compose -f docker-compose-postgres.yml exec postgres pg_isready -U basic_memory_user
166 | ```
167 |
168 | ## Future Enhancements
169 |
170 | - [ ] Add `--run-all-backends` CLI flag to run both backends in sequence
171 | - [ ] Implement test fixtures for backend-specific features (e.g., Postgres full-text search vs SQLite FTS5)
172 | - [ ] Add performance comparison benchmarks between backends
```
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
```markdown
1 | [](https://www.gnu.org/licenses/agpl-3.0)
2 | [](https://badge.fury.io/py/basic-memory)
3 | [](https://www.python.org/downloads/)
4 | [](https://github.com/basicmachines-co/basic-memory/actions)
5 | [](https://github.com/astral-sh/ruff)
6 | 
7 | 
8 | [](https://smithery.ai/server/@basicmachines-co/basic-memory)
9 |
10 | ## 🚀 Basic Memory Cloud is Live!
11 |
12 | - **Cross-device and multi-platform support is here.** Your knowledge graph now works on desktop, web, and mobile - seamlessly synced across all your AI tools (Claude, ChatGPT, Gemini, Claude Code, and Codex)
13 | - **Early Supporter Pricing:** Early users get 25% off forever.
14 | The open source project continues as always. Cloud just makes it work everywhere.
15 |
16 | [Sign up now →](https://basicmemory.com/beta)
17 |
18 | with a 7 day free trial
19 |
20 | # Basic Memory
21 |
22 | Basic Memory lets you build persistent knowledge through natural conversations with Large Language Models (LLMs) like
23 | Claude, while keeping everything in simple Markdown files on your computer. It uses the Model Context Protocol (MCP) to
24 | enable any compatible LLM to read and write to your local knowledge base.
25 |
26 | - Website: https://basicmachines.co
27 | - Documentation: https://memory.basicmachines.co
28 |
29 | ## Pick up your conversation right where you left off
30 |
31 | - AI assistants can load context from local files in a new conversation
32 | - Notes are saved locally as Markdown files in real time
33 | - No project knowledge or special prompting required
34 |
35 | https://github.com/user-attachments/assets/a55d8238-8dd0-454a-be4c-8860dbbd0ddc
36 |
37 | ## Quick Start
38 |
39 | ```bash
40 | # Install with uv (recommended)
41 | uv tool install basic-memory
42 |
43 | # Configure Claude Desktop (edit ~/Library/Application Support/Claude/claude_desktop_config.json)
44 | # Add this to your config:
45 | {
46 | "mcpServers": {
47 | "basic-memory": {
48 | "command": "uvx",
49 | "args": [
50 | "basic-memory",
51 | "mcp"
52 | ]
53 | }
54 | }
55 | }
56 | # Now in Claude Desktop, you can:
57 | # - Write notes with "Create a note about coffee brewing methods"
58 | # - Read notes with "What do I know about pour over coffee?"
59 | # - Search with "Find information about Ethiopian beans"
60 |
61 | ```
62 |
63 | You can view shared context via files in `~/basic-memory` (default directory location).
64 |
65 | ### Alternative Installation via Smithery
66 |
67 | You can use [Smithery](https://smithery.ai/server/@basicmachines-co/basic-memory) to automatically configure Basic
68 | Memory for Claude Desktop:
69 |
70 | ```bash
71 | npx -y @smithery/cli install @basicmachines-co/basic-memory --client claude
72 | ```
73 |
74 | This installs and configures Basic Memory without requiring manual edits to the Claude Desktop configuration file. The
75 | Smithery server hosts the MCP server component, while your data remains stored locally as Markdown files.
76 |
77 | ### Glama.ai
78 |
79 | <a href="https://glama.ai/mcp/servers/o90kttu9ym">
80 | <img width="380" height="200" src="https://glama.ai/mcp/servers/o90kttu9ym/badge" alt="basic-memory MCP server" />
81 | </a>
82 |
83 | ## Why Basic Memory?
84 |
85 | Most LLM interactions are ephemeral - you ask a question, get an answer, and everything is forgotten. Each conversation
86 | starts fresh, without the context or knowledge from previous ones. Current workarounds have limitations:
87 |
88 | - Chat histories capture conversations but aren't structured knowledge
89 | - RAG systems can query documents but don't let LLMs write back
90 | - Vector databases require complex setups and often live in the cloud
91 | - Knowledge graphs typically need specialized tools to maintain
92 |
93 | Basic Memory addresses these problems with a simple approach: structured Markdown files that both humans and LLMs can
94 | read
95 | and write to. The key advantages:
96 |
97 | - **Local-first:** All knowledge stays in files you control
98 | - **Bi-directional:** Both you and the LLM read and write to the same files
99 | - **Structured yet simple:** Uses familiar Markdown with semantic patterns
100 | - **Traversable knowledge graph:** LLMs can follow links between topics
101 | - **Standard formats:** Works with existing editors like Obsidian
102 | - **Lightweight infrastructure:** Just local files indexed in a local SQLite database
103 |
104 | With Basic Memory, you can:
105 |
106 | - Have conversations that build on previous knowledge
107 | - Create structured notes during natural conversations
108 | - Have conversations with LLMs that remember what you've discussed before
109 | - Navigate your knowledge graph semantically
110 | - Keep everything local and under your control
111 | - Use familiar tools like Obsidian to view and edit notes
112 | - Build a personal knowledge base that grows over time
113 | - Sync your knowledge to the cloud with bidirectional synchronization
114 | - Authenticate and manage cloud projects with subscription validation
115 | - Mount cloud storage for direct file access
116 |
117 | ## How It Works in Practice
118 |
119 | Let's say you're exploring coffee brewing methods and want to capture your knowledge. Here's how it works:
120 |
121 | 1. Start by chatting normally:
122 |
123 | ```
124 | I've been experimenting with different coffee brewing methods. Key things I've learned:
125 |
126 | - Pour over gives more clarity in flavor than French press
127 | - Water temperature is critical - around 205°F seems best
128 | - Freshly ground beans make a huge difference
129 | ```
130 |
131 | ... continue conversation.
132 |
133 | 2. Ask the LLM to help structure this knowledge:
134 |
135 | ```
136 | "Let's write a note about coffee brewing methods."
137 | ```
138 |
139 | LLM creates a new Markdown file on your system (which you can see instantly in Obsidian or your editor):
140 |
141 | ```markdown
142 | ---
143 | title: Coffee Brewing Methods
144 | permalink: coffee-brewing-methods
145 | tags:
146 | - coffee
147 | - brewing
148 | ---
149 |
150 | # Coffee Brewing Methods
151 |
152 | ## Observations
153 |
154 | - [method] Pour over provides more clarity and highlights subtle flavors
155 | - [technique] Water temperature at 205°F (96°C) extracts optimal compounds
156 | - [principle] Freshly ground beans preserve aromatics and flavor
157 |
158 | ## Relations
159 |
160 | - relates_to [[Coffee Bean Origins]]
161 | - requires [[Proper Grinding Technique]]
162 | - affects [[Flavor Extraction]]
163 | ```
164 |
165 | The note embeds semantic content and links to other topics via simple Markdown formatting.
166 |
167 | 3. You see this file on your computer in real time in the current project directory (default `~/$HOME/basic-memory`).
168 |
169 | - Realtime sync can be enabled via running `basic-memory sync --watch`
170 |
171 | 4. In a chat with the LLM, you can reference a topic:
172 |
173 | ```
174 | Look at `coffee-brewing-methods` for context about pour over coffee
175 | ```
176 |
177 | The LLM can now build rich context from the knowledge graph. For example:
178 |
179 | ```
180 | Following relation 'relates_to [[Coffee Bean Origins]]':
181 | - Found information about Ethiopian Yirgacheffe
182 | - Notes on Colombian beans' nutty profile
183 | - Altitude effects on bean characteristics
184 |
185 | Following relation 'requires [[Proper Grinding Technique]]':
186 | - Burr vs. blade grinder comparisons
187 | - Grind size recommendations for different methods
188 | - Impact of consistent particle size on extraction
189 | ```
190 |
191 | Each related document can lead to more context, building a rich semantic understanding of your knowledge base.
192 |
193 | This creates a two-way flow where:
194 |
195 | - Humans write and edit Markdown files
196 | - LLMs read and write through the MCP protocol
197 | - Sync keeps everything consistent
198 | - All knowledge stays in local files.
199 |
200 | ## Technical Implementation
201 |
202 | Under the hood, Basic Memory:
203 |
204 | 1. Stores everything in Markdown files
205 | 2. Uses a SQLite database for searching and indexing
206 | 3. Extracts semantic meaning from simple Markdown patterns
207 | - Files become `Entity` objects
208 | - Each `Entity` can have `Observations`, or facts associated with it
209 | - `Relations` connect entities together to form the knowledge graph
210 | 4. Maintains the local knowledge graph derived from the files
211 | 5. Provides bidirectional synchronization between files and the knowledge graph
212 | 6. Implements the Model Context Protocol (MCP) for AI integration
213 | 7. Exposes tools that let AI assistants traverse and manipulate the knowledge graph
214 | 8. Uses memory:// URLs to reference entities across tools and conversations
215 |
216 | The file format is just Markdown with some simple markup:
217 |
218 | Each Markdown file has:
219 |
220 | ### Frontmatter
221 |
222 | ```markdown
223 | title: <Entity title>
224 | type: <The type of Entity> (e.g. note)
225 | permalink: <a uri slug>
226 |
227 | - <optional metadata> (such as tags)
228 | ```
229 |
230 | ### Observations
231 |
232 | Observations are facts about a topic.
233 | They can be added by creating a Markdown list with a special format that can reference a `category`, `tags` using a
234 | "#" character, and an optional `context`.
235 |
236 | Observation Markdown format:
237 |
238 | ```markdown
239 | - [category] content #tag (optional context)
240 | ```
241 |
242 | Examples of observations:
243 |
244 | ```markdown
245 | - [method] Pour over extracts more floral notes than French press
246 | - [tip] Grind size should be medium-fine for pour over #brewing
247 | - [preference] Ethiopian beans have bright, fruity flavors (especially from Yirgacheffe)
248 | - [fact] Lighter roasts generally contain more caffeine than dark roasts
249 | - [experiment] Tried 1:15 coffee-to-water ratio with good results
250 | - [resource] James Hoffman's V60 technique on YouTube is excellent
251 | - [question] Does water temperature affect extraction of different compounds differently?
252 | - [note] My favorite local shop uses a 30-second bloom time
253 | ```
254 |
255 | ### Relations
256 |
257 | Relations are links to other topics. They define how entities connect in the knowledge graph.
258 |
259 | Markdown format:
260 |
261 | ```markdown
262 | - relation_type [[WikiLink]] (optional context)
263 | ```
264 |
265 | Examples of relations:
266 |
267 | ```markdown
268 | - pairs_well_with [[Chocolate Desserts]]
269 | - grown_in [[Ethiopia]]
270 | - contrasts_with [[Tea Brewing Methods]]
271 | - requires [[Burr Grinder]]
272 | - improves_with [[Fresh Beans]]
273 | - relates_to [[Morning Routine]]
274 | - inspired_by [[Japanese Coffee Culture]]
275 | - documented_in [[Coffee Journal]]
276 | ```
277 |
278 | ## Using with VS Code
279 |
280 | Add the following JSON block to your User Settings (JSON) file in VS Code. You can do this by pressing `Ctrl + Shift + P` and typing `Preferences: Open User Settings (JSON)`.
281 |
282 | ```json
283 | {
284 | "mcp": {
285 | "servers": {
286 | "basic-memory": {
287 | "command": "uvx",
288 | "args": ["basic-memory", "mcp"]
289 | }
290 | }
291 | }
292 | }
293 | ```
294 |
295 | Optionally, you can add it to a file called `.vscode/mcp.json` in your workspace. This will allow you to share the configuration with others.
296 |
297 | ```json
298 | {
299 | "servers": {
300 | "basic-memory": {
301 | "command": "uvx",
302 | "args": ["basic-memory", "mcp"]
303 | }
304 | }
305 | }
306 | ```
307 |
308 | You can use Basic Memory with VS Code to easily retrieve and store information while coding.
309 |
310 | ## Using with Claude Desktop
311 |
312 | Basic Memory is built using the MCP (Model Context Protocol) and works with the Claude desktop app (https://claude.ai/):
313 |
314 | 1. Configure Claude Desktop to use Basic Memory:
315 |
316 | Edit your MCP configuration file (usually located at `~/Library/Application Support/Claude/claude_desktop_config.json`
317 | for OS X):
318 |
319 | ```json
320 | {
321 | "mcpServers": {
322 | "basic-memory": {
323 | "command": "uvx",
324 | "args": [
325 | "basic-memory",
326 | "mcp"
327 | ]
328 | }
329 | }
330 | }
331 | ```
332 |
333 | If you want to use a specific project (see [Multiple Projects](#multiple-projects) below), update your Claude Desktop
334 | config:
335 |
336 | ```json
337 | {
338 | "mcpServers": {
339 | "basic-memory": {
340 | "command": "uvx",
341 | "args": [
342 | "basic-memory",
343 | "mcp",
344 | "--project",
345 | "your-project-name"
346 | ]
347 | }
348 | }
349 | }
350 | ```
351 |
352 | 2. Sync your knowledge:
353 |
354 | ```bash
355 | # One-time sync of local knowledge updates
356 | basic-memory sync
357 |
358 | # Run realtime sync process (recommended)
359 | basic-memory sync --watch
360 | ```
361 |
362 | 3. Cloud features (optional, requires subscription):
363 |
364 | ```bash
365 | # Authenticate with cloud
366 | basic-memory cloud login
367 |
368 | # Bidirectional sync with cloud
369 | basic-memory cloud sync
370 |
371 | # Verify cloud integrity
372 | basic-memory cloud check
373 |
374 | # Mount cloud storage
375 | basic-memory cloud mount
376 | ```
377 |
378 | 4. In Claude Desktop, the LLM can now use these tools:
379 |
380 | **Content Management:**
381 | ```
382 | write_note(title, content, folder, tags) - Create or update notes
383 | read_note(identifier, page, page_size) - Read notes by title or permalink
384 | read_content(path) - Read raw file content (text, images, binaries)
385 | view_note(identifier) - View notes as formatted artifacts
386 | edit_note(identifier, operation, content) - Edit notes incrementally
387 | move_note(identifier, destination_path) - Move notes with database consistency
388 | delete_note(identifier) - Delete notes from knowledge base
389 | ```
390 |
391 | **Knowledge Graph Navigation:**
392 | ```
393 | build_context(url, depth, timeframe) - Navigate knowledge graph via memory:// URLs
394 | recent_activity(type, depth, timeframe) - Find recently updated information
395 | list_directory(dir_name, depth) - Browse directory contents with filtering
396 | ```
397 |
398 | **Search & Discovery:**
399 | ```
400 | search(query, page, page_size) - Search across your knowledge base
401 | ```
402 |
403 | **Project Management:**
404 | ```
405 | list_memory_projects() - List all available projects
406 | create_memory_project(project_name, project_path) - Create new projects
407 | get_current_project() - Show current project stats
408 | sync_status() - Check synchronization status
409 | ```
410 |
411 | **Visualization:**
412 | ```
413 | canvas(nodes, edges, title, folder) - Generate knowledge visualizations
414 | ```
415 |
416 | 5. Example prompts to try:
417 |
418 | ```
419 | "Create a note about our project architecture decisions"
420 | "Find information about JWT authentication in my notes"
421 | "Create a canvas visualization of my project components"
422 | "Read my notes on the authentication system"
423 | "What have I been working on in the past week?"
424 | ```
425 |
426 | ## Futher info
427 |
428 | See the [Documentation](https://memory.basicmachines.co/) for more info, including:
429 |
430 | - [Complete User Guide](https://docs.basicmemory.com/user-guide/)
431 | - [CLI tools](https://docs.basicmemory.com/guides/cli-reference/)
432 | - [Cloud CLI and Sync](https://docs.basicmemory.com/guides/cloud-cli/)
433 | - [Managing multiple Projects](https://docs.basicmemory.com/guides/cli-reference/#project)
434 | - [Importing data from OpenAI/Claude Projects](https://docs.basicmemory.com/guides/cli-reference/#import)
435 |
436 | ## Logging
437 |
438 | Basic Memory uses [Loguru](https://github.com/Delgan/loguru) for logging. The logging behavior varies by entry point:
439 |
440 | | Entry Point | Default Behavior | Use Case |
441 | |-------------|------------------|----------|
442 | | CLI commands | File only | Prevents log output from interfering with command output |
443 | | MCP server | File only | Stdout would corrupt the JSON-RPC protocol |
444 | | API server | File (local) or stdout (cloud) | Docker/cloud deployments use stdout |
445 |
446 | **Log file location:** `~/.basic-memory/basic-memory.log` (10MB rotation, 10 days retention)
447 |
448 | ### Environment Variables
449 |
450 | | Variable | Default | Description |
451 | |----------|---------|-------------|
452 | | `BASIC_MEMORY_LOG_LEVEL` | `INFO` | Log level: DEBUG, INFO, WARNING, ERROR |
453 | | `BASIC_MEMORY_CLOUD_MODE` | `false` | When `true`, API logs to stdout with structured context |
454 | | `BASIC_MEMORY_ENV` | `dev` | Set to `test` for test mode (stderr only) |
455 |
456 | ### Examples
457 |
458 | ```bash
459 | # Enable debug logging
460 | BASIC_MEMORY_LOG_LEVEL=DEBUG basic-memory sync
461 |
462 | # View logs
463 | tail -f ~/.basic-memory/basic-memory.log
464 |
465 | # Cloud/Docker mode (stdout logging with structured context)
466 | BASIC_MEMORY_CLOUD_MODE=true uvicorn basic_memory.api.app:app
467 | ```
468 |
469 | ## Telemetry
470 |
471 | Basic Memory collects anonymous usage statistics to help improve the software. This follows the [Homebrew model](https://docs.brew.sh/Analytics) - telemetry is on by default with easy opt-out.
472 |
473 | **What we collect:**
474 | - App version, Python version, OS, architecture
475 | - Feature usage (which MCP tools and CLI commands are used)
476 | - Error types (sanitized - no file paths or personal data)
477 |
478 | **What we NEVER collect:**
479 | - Note content, file names, or paths
480 | - Personal information
481 | - IP addresses
482 |
483 | **Opting out:**
484 | ```bash
485 | # Disable telemetry
486 | basic-memory telemetry disable
487 |
488 | # Check status
489 | basic-memory telemetry status
490 |
491 | # Re-enable
492 | basic-memory telemetry enable
493 | ```
494 |
495 | Or set the environment variable:
496 | ```bash
497 | export BASIC_MEMORY_TELEMETRY_ENABLED=false
498 | ```
499 |
500 | For more details, see the [Telemetry documentation](https://basicmemory.com/telemetry).
501 |
502 | ## Development
503 |
504 | ### Running Tests
505 |
506 | Basic Memory supports dual database backends (SQLite and Postgres). By default, tests run against SQLite. Set `BASIC_MEMORY_TEST_POSTGRES=1` to run against Postgres (uses testcontainers - Docker required).
507 |
508 | **Quick Start:**
509 | ```bash
510 | # Run all tests against SQLite (default, fast)
511 | just test-sqlite
512 |
513 | # Run all tests against Postgres (uses testcontainers)
514 | just test-postgres
515 |
516 | # Run both SQLite and Postgres tests
517 | just test
518 | ```
519 |
520 | **Available Test Commands:**
521 |
522 | - `just test` - Run all tests against both SQLite and Postgres
523 | - `just test-sqlite` - Run all tests against SQLite (fast, no Docker needed)
524 | - `just test-postgres` - Run all tests against Postgres (uses testcontainers)
525 | - `just test-unit-sqlite` - Run unit tests against SQLite
526 | - `just test-unit-postgres` - Run unit tests against Postgres
527 | - `just test-int-sqlite` - Run integration tests against SQLite
528 | - `just test-int-postgres` - Run integration tests against Postgres
529 | - `just test-windows` - Run Windows-specific tests (auto-skips on other platforms)
530 | - `just test-benchmark` - Run performance benchmark tests
531 |
532 | **Postgres Testing:**
533 |
534 | Postgres tests use [testcontainers](https://testcontainers-python.readthedocs.io/) which automatically spins up a Postgres instance in Docker. No manual database setup required - just have Docker running.
535 |
536 | **Test Markers:**
537 |
538 | Tests use pytest markers for selective execution:
539 | - `windows` - Windows-specific database optimizations
540 | - `benchmark` - Performance tests (excluded from default runs)
541 |
542 | **Other Development Commands:**
543 | ```bash
544 | just install # Install with dev dependencies
545 | just lint # Run linting checks
546 | just typecheck # Run type checking
547 | just format # Format code with ruff
548 | just check # Run all quality checks
549 | just migration "msg" # Create database migration
550 | ```
551 |
552 | See the [justfile](justfile) for the complete list of development commands.
553 |
554 | ## License
555 |
556 | AGPL-3.0
557 |
558 | Contributions are welcome. See the [Contributing](CONTRIBUTING.md) guide for info about setting up the project locally
559 | and submitting PRs.
560 |
561 | ## Star History
562 |
563 | <a href="https://www.star-history.com/#basicmachines-co/basic-memory&Date">
564 | <picture>
565 | <source media="(prefers-color-scheme: dark)" srcset="https://api.star-history.com/svg?repos=basicmachines-co/basic-memory&type=Date&theme=dark" />
566 | <source media="(prefers-color-scheme: light)" srcset="https://api.star-history.com/svg?repos=basicmachines-co/basic-memory&type=Date" />
567 | <img alt="Star History Chart" src="https://api.star-history.com/svg?repos=basicmachines-co/basic-memory&type=Date" />
568 | </picture>
569 | </a>
570 |
571 | Built with ♥️ by Basic Machines
572 |
```
--------------------------------------------------------------------------------
/SECURITY.md:
--------------------------------------------------------------------------------
```markdown
1 | # Security Policy
2 |
3 | ## Supported Versions
4 |
5 | | Version | Supported |
6 | | ------- | ------------------ |
7 | | 0.x.x | :white_check_mark: |
8 |
9 | ## Reporting a Vulnerability
10 |
11 | Use this section to tell people how to report a vulnerability.
12 |
13 | If you find a vulnerability, please contact [email protected]
14 |
```
--------------------------------------------------------------------------------
/CODE_OF_CONDUCT.md:
--------------------------------------------------------------------------------
```markdown
1 | # Code of Conduct
2 |
3 | ## Purpose
4 |
5 | Maintain a respectful and professional environment where contributions can be made without harassment or
6 | negativity.
7 |
8 | ## Standards
9 |
10 | Respectful communication and collaboration are expected. Offensive behavior, harassment, or personal attacks will not be
11 | tolerated.
12 |
13 | ## Reporting Issues
14 |
15 | To report inappropriate behavior, contact [[email protected]].
16 |
17 | ## Consequences
18 |
19 | Violations of this code may lead to consequences, including being banned from contributing to the project.
20 |
```
--------------------------------------------------------------------------------
/CONTRIBUTING.md:
--------------------------------------------------------------------------------
```markdown
1 | # Contributing to Basic Memory
2 |
3 | Thank you for considering contributing to Basic Memory! This document outlines the process for contributing to the
4 | project and how to get started as a developer.
5 |
6 | ## Getting Started
7 |
8 | ### Development Environment
9 |
10 | 1. **Clone the Repository**:
11 | ```bash
12 | git clone https://github.com/basicmachines-co/basic-memory.git
13 | cd basic-memory
14 | ```
15 |
16 | 2. **Install Dependencies**:
17 | ```bash
18 | # Using just (recommended)
19 | just install
20 |
21 | # Or using uv
22 | uv install -e ".[dev]"
23 |
24 | # Or using pip
25 | pip install -e ".[dev]"
26 | ```
27 |
28 | > **Note**: Basic Memory uses [just](https://just.systems) as a modern command runner. Install with `brew install just` or `cargo install just`.
29 |
30 | 3. **Activate the Virtual Environment**
31 | ```bash
32 | source .venv/bin/activate
33 | ```
34 |
35 | 4. **Run the Tests**:
36 | ```bash
37 | # Run all tests with unified coverage (unit + integration)
38 | just test
39 |
40 | # Run unit tests only (fast, no coverage)
41 | just test-unit
42 |
43 | # Run integration tests only (fast, no coverage)
44 | just test-int
45 |
46 | # Generate HTML coverage report
47 | just coverage
48 |
49 | # Run a specific test
50 | pytest tests/path/to/test_file.py::test_function_name
51 | ```
52 |
53 | ### Development Workflow
54 |
55 | 1. **Fork the Repo**: Fork the repository on GitHub and clone your copy.
56 | 2. **Create a Branch**: Create a new branch for your feature or fix.
57 | ```bash
58 | git checkout -b feature/your-feature-name
59 | # or
60 | git checkout -b fix/issue-you-are-fixing
61 | ```
62 | 3. **Make Your Changes**: Implement your changes with appropriate test coverage.
63 | 4. **Check Code Quality**:
64 | ```bash
65 | # Run all checks at once
66 | just check
67 |
68 | # Or run individual checks
69 | just lint # Run linting
70 | just format # Format code
71 | just type-check # Type checking
72 | ```
73 | 5. **Test Your Changes**: Ensure all tests pass locally and maintain 100% test coverage.
74 | ```bash
75 | just test
76 | ```
77 | 6. **Submit a PR**: Submit a pull request with a detailed description of your changes.
78 |
79 | ## LLM-Assisted Development
80 |
81 | This project is designed for collaborative development between humans and LLMs (Large Language Models):
82 |
83 | 1. **CLAUDE.md**: The repository includes a `CLAUDE.md` file that serves as a project guide for both humans and LLMs.
84 | This file contains:
85 | - Key project information and architectural overview
86 | - Development commands and workflows
87 | - Code style guidelines
88 | - Documentation standards
89 |
90 | 2. **AI-Human Collaborative Workflow**:
91 | - We encourage using LLMs like Claude for code generation, reviews, and documentation
92 | - When possible, save context in markdown files that can be referenced later
93 | - This enables seamless knowledge transfer between different development sessions
94 | - Claude can help with implementation details while you focus on architecture and design
95 |
96 | 3. **Adding to CLAUDE.md**:
97 | - If you discover useful project information or common commands, consider adding them to CLAUDE.md
98 | - This helps all contributors (human and AI) maintain consistent knowledge of the project
99 |
100 | ## Pull Request Process
101 |
102 | 1. **Create a Pull Request**: Open a PR against the `main` branch with a clear title and description.
103 | 2. **Sign the Developer Certificate of Origin (DCO)**: All contributions require signing our DCO, which certifies that
104 | you have the right to submit your contributions. This will be automatically checked by our CLA assistant when you
105 | create a PR.
106 | 3. **PR Description**: Include:
107 | - What the PR changes
108 | - Why the change is needed
109 | - How you tested the changes
110 | - Any related issues (use "Fixes #123" to automatically close issues)
111 | 4. **Code Review**: Wait for code review and address any feedback.
112 | 5. **CI Checks**: Ensure all CI checks pass.
113 | 6. **Merge**: Once approved, a maintainer will merge your PR.
114 |
115 | ## Developer Certificate of Origin
116 |
117 | By contributing to this project, you agree to the [Developer Certificate of Origin (DCO)](CLA.md). This means you
118 | certify that:
119 |
120 | - You have the right to submit your contributions
121 | - You're not knowingly submitting code with patent or copyright issues
122 | - Your contributions are provided under the project's license (AGPL-3.0)
123 |
124 | This is a lightweight alternative to a Contributor License Agreement and helps ensure that all contributions can be
125 | properly incorporated into the project and potentially used in commercial applications.
126 |
127 | ### Signing Your Commits
128 |
129 | Sign your commit:
130 |
131 | **Using the `-s` or `--signoff` flag**:
132 |
133 | ```bash
134 | git commit -s -m "Your commit message"
135 | ```
136 |
137 | This adds a `Signed-off-by` line to your commit message, certifying that you adhere to the DCO.
138 |
139 | The sign-off certifies that you have the right to submit your contribution under the project's license and verifies your
140 | agreement to the DCO.
141 |
142 | ## Code Style Guidelines
143 |
144 | - **Python Version**: Python 3.12+ with full type annotations (3.12+ required for type parameter syntax)
145 | - **Line Length**: 100 characters maximum
146 | - **Formatting**: Use ruff for consistent styling
147 | - **Import Order**: Standard lib, third-party, local imports
148 | - **Naming**: Use snake_case for functions/variables, PascalCase for classes
149 | - **Documentation**: Add docstrings to public functions, classes, and methods
150 | - **Type Annotations**: Use type hints for all functions and methods
151 |
152 | ## Testing Guidelines
153 |
154 | ### Test Structure
155 |
156 | Basic Memory uses two test directories with unified coverage reporting:
157 |
158 | - **`tests/`**: Unit tests that test individual components in isolation
159 | - Fast execution with extensive mocking
160 | - Test individual functions, classes, and modules
161 | - Run with: `just test-unit` (no coverage, fast)
162 |
163 | - **`test-int/`**: Integration tests that test real-world scenarios
164 | - Test full workflows with real database and file operations
165 | - Include performance benchmarks
166 | - More realistic but slower than unit tests
167 | - Run with: `just test-int` (no coverage, fast)
168 |
169 | ### Running Tests
170 |
171 | ```bash
172 | # Run all tests with unified coverage report
173 | just test
174 |
175 | # Run only unit tests (fast iteration)
176 | just test-unit
177 |
178 | # Run only integration tests
179 | just test-int
180 |
181 | # Generate HTML coverage report
182 | just coverage
183 |
184 | # Run specific test
185 | pytest tests/path/to/test_file.py::test_function_name
186 |
187 | # Run tests excluding benchmarks
188 | pytest -m "not benchmark"
189 |
190 | # Run only benchmark tests
191 | pytest -m benchmark test-int/test_sync_performance_benchmark.py
192 | ```
193 |
194 | ### Performance Benchmarks
195 |
196 | The `test-int/test_sync_performance_benchmark.py` file contains performance benchmarks that measure sync and indexing speed:
197 |
198 | - `test_benchmark_sync_100_files` - Small repository performance
199 | - `test_benchmark_sync_500_files` - Medium repository performance
200 | - `test_benchmark_sync_1000_files` - Large repository performance (marked slow)
201 | - `test_benchmark_resync_no_changes` - Re-sync performance baseline
202 |
203 | Run benchmarks with:
204 | ```bash
205 | # Run all benchmarks (excluding slow ones)
206 | pytest test-int/test_sync_performance_benchmark.py -v -m "benchmark and not slow"
207 |
208 | # Run all benchmarks including slow ones
209 | pytest test-int/test_sync_performance_benchmark.py -v -m benchmark
210 |
211 | # Run specific benchmark
212 | pytest test-int/test_sync_performance_benchmark.py::test_benchmark_sync_100_files -v
213 | ```
214 |
215 | See `test-int/BENCHMARKS.md` for detailed benchmark documentation.
216 |
217 | ### Testing Best Practices
218 |
219 | - **Coverage Target**: We aim for high test coverage for all code
220 | - **Test Framework**: Use pytest for unit and integration tests
221 | - **Mocking**: Avoid mocking in integration tests; use sparingly in unit tests
222 | - **Edge Cases**: Test both normal operation and edge cases
223 | - **Database Testing**: Use in-memory SQLite for testing database operations
224 | - **Fixtures**: Use async pytest fixtures for setup and teardown
225 | - **Markers**: Use `@pytest.mark.benchmark` for benchmarks, `@pytest.mark.slow` for slow tests
226 |
227 | ## Release Process
228 |
229 | Basic Memory uses automatic versioning based on git tags with `uv-dynamic-versioning`. Here's how releases work:
230 |
231 | ### Version Management
232 | - **Development versions**: Automatically generated from git commits (e.g., `0.12.4.dev26+468a22f`)
233 | - **Beta releases**: Created by tagging with beta suffixes (e.g., `git tag v0.13.0b1`)
234 | - **Stable releases**: Created by tagging with version numbers (e.g., `git tag v0.13.0`)
235 |
236 | ### Release Workflows
237 |
238 | #### Development Builds
239 | - Automatically published to PyPI on every commit to `main`
240 | - Version format: `0.12.4.dev26+468a22f` (base version + dev + commit count + hash)
241 | - Users install with: `pip install basic-memory --pre --force-reinstall`
242 |
243 | #### Beta Releases
244 | 1. Create and push a beta tag: `git tag v0.13.0b1 && git push origin v0.13.0b1`
245 | 2. GitHub Actions automatically builds and publishes to PyPI
246 | 3. Users install with: `pip install basic-memory --pre`
247 |
248 | #### Stable Releases
249 | 1. Create and push a version tag: `git tag v0.13.0 && git push origin v0.13.0`
250 | 2. GitHub Actions automatically:
251 | - Builds the package with version `0.13.0`
252 | - Creates GitHub release with auto-generated notes
253 | - Publishes to PyPI
254 | 3. Users install with: `pip install basic-memory`
255 |
256 | ### For Contributors
257 | - No manual version bumping required
258 | - Versions are automatically derived from git tags
259 | - Focus on code changes, not version management
260 |
261 | ## Creating Issues
262 |
263 | If you're planning to work on something, please create an issue first to discuss the approach. Include:
264 |
265 | - A clear title and description
266 | - Steps to reproduce if reporting a bug
267 | - Expected behavior vs. actual behavior
268 | - Any relevant logs or screenshots
269 | - Your proposed solution, if you have one
270 |
271 | ## Code of Conduct
272 |
273 | All contributors must follow the [Code of Conduct](CODE_OF_CONDUCT.md).
274 |
275 | ## Thank You!
276 |
277 | Your contributions help make Basic Memory better. We appreciate your time and effort!
```
--------------------------------------------------------------------------------
/CLAUDE.md:
--------------------------------------------------------------------------------
```markdown
1 | # CLAUDE.md - Basic Memory Project Guide
2 |
3 | ## Project Overview
4 |
5 | Basic Memory is a local-first knowledge management system built on the Model Context Protocol (MCP). It enables
6 | bidirectional communication between LLMs (like Claude) and markdown files, creating a personal knowledge graph that can
7 | be traversed using links between documents.
8 |
9 | ## CODEBASE DEVELOPMENT
10 |
11 | ### Project information
12 |
13 | See the [README.md](README.md) file for a project overview.
14 |
15 | ### Build and Test Commands
16 |
17 | - Install: `just install` or `pip install -e ".[dev]"`
18 | - Run all tests (SQLite + Postgres): `just test`
19 | - Run all tests against SQLite: `just test-sqlite`
20 | - Run all tests against Postgres: `just test-postgres` (uses testcontainers)
21 | - Run unit tests (SQLite): `just test-unit-sqlite`
22 | - Run unit tests (Postgres): `just test-unit-postgres`
23 | - Run integration tests (SQLite): `just test-int-sqlite`
24 | - Run integration tests (Postgres): `just test-int-postgres`
25 | - Generate HTML coverage: `just coverage`
26 | - Single test: `pytest tests/path/to/test_file.py::test_function_name`
27 | - Run benchmarks: `pytest test-int/test_sync_performance_benchmark.py -v -m "benchmark and not slow"`
28 | - Lint: `just lint` or `ruff check . --fix`
29 | - Type check: `just typecheck` or `uv run pyright`
30 | - Format: `just format` or `uv run ruff format .`
31 | - Run all code checks: `just check` (runs lint, format, typecheck, test)
32 | - Create db migration: `just migration "Your migration message"`
33 | - Run development MCP Inspector: `just run-inspector`
34 |
35 | **Note:** Project requires Python 3.12+ (uses type parameter syntax and `type` aliases introduced in 3.12)
36 |
37 | **Postgres Testing:** Uses [testcontainers](https://testcontainers-python.readthedocs.io/) which automatically spins up a Postgres instance in Docker. No manual database setup required - just have Docker running.
38 |
39 | ### Test Structure
40 |
41 | - `tests/` - Unit tests for individual components (mocked, fast)
42 | - `test-int/` - Integration tests for real-world scenarios (no mocks, realistic)
43 | - Both directories are covered by unified coverage reporting
44 | - Benchmark tests in `test-int/` are marked with `@pytest.mark.benchmark`
45 | - Slow tests are marked with `@pytest.mark.slow`
46 |
47 | ### Code Style Guidelines
48 |
49 | - Line length: 100 characters max
50 | - Python 3.12+ with full type annotations (uses type parameters and type aliases)
51 | - Format with ruff (consistent styling)
52 | - Import order: standard lib, third-party, local imports
53 | - Naming: snake_case for functions/variables, PascalCase for classes
54 | - Prefer async patterns with SQLAlchemy 2.0
55 | - Use Pydantic v2 for data validation and schemas
56 | - CLI uses Typer for command structure
57 | - API uses FastAPI for endpoints
58 | - Follow the repository pattern for data access
59 | - Tools communicate to api routers via the httpx ASGI client (in process)
60 |
61 | ### Code Change Guidelines
62 |
63 | - **Full file read before edits**: Before editing any file, read it in full first to ensure complete context; partial reads lead to corrupted edits
64 | - **Minimize diffs**: Prefer the smallest change that satisfies the request. Avoid unrelated refactors or style rewrites unless necessary for correctness
65 | - **No speculative getattr**: Never use `getattr(obj, "attr", default)` when unsure about attribute names. Check the class definition or source code first
66 | - **Fail fast**: Write code with fail-fast logic by default. Do not swallow exceptions with errors or warnings
67 | - **No fallback logic**: Do not add fallback logic unless explicitly told to and agreed with the user
68 | - **No guessing**: Do not say "The issue is..." before you actually know what the issue is. Investigate first.
69 |
70 | ### Literate Programming Style
71 |
72 | Code should tell a story. Comments must explain the "why" and narrative flow, not just the "what".
73 |
74 | **Section Headers:**
75 | For files with multiple phases of logic, add section headers so the control flow reads like chapters:
76 | ```python
77 | # --- Authentication ---
78 | # ... auth logic ...
79 |
80 | # --- Data Validation ---
81 | # ... validation logic ...
82 |
83 | # --- Business Logic ---
84 | # ... core logic ...
85 | ```
86 |
87 | **Decision Point Comments:**
88 | For conditionals that materially change behavior (gates, fallbacks, retries, feature flags), add comments with:
89 | - **Trigger**: what condition causes this branch
90 | - **Why**: the rationale (cost, correctness, UX, determinism)
91 | - **Outcome**: what changes downstream
92 |
93 | ```python
94 | # Trigger: project has no active sync watcher
95 | # Why: avoid duplicate file system watchers consuming resources
96 | # Outcome: starts new watcher, registers in active_watchers dict
97 | if project_id not in active_watchers:
98 | start_watcher(project_id)
99 | ```
100 |
101 | **Constraint Comments:**
102 | If code exists because of a constraint (async requirements, rate limits, schema compatibility), explain the constraint near the code:
103 | ```python
104 | # SQLite requires WAL mode for concurrent read/write access
105 | connection.execute("PRAGMA journal_mode=WAL")
106 | ```
107 |
108 | **What NOT to Comment:**
109 | Avoid comments that restate obvious code:
110 | ```python
111 | # Bad - restates code
112 | counter += 1 # increment counter
113 |
114 | # Good - explains why
115 | counter += 1 # track retries for backoff calculation
116 | ```
117 |
118 | ### Codebase Architecture
119 |
120 | See [docs/ARCHITECTURE.md](docs/ARCHITECTURE.md) for detailed architecture documentation.
121 |
122 | **Directory Structure:**
123 | - `/alembic` - Alembic db migrations
124 | - `/api` - FastAPI REST endpoints + `container.py` composition root
125 | - `/cli` - Typer CLI + `container.py` composition root
126 | - `/deps` - Feature-scoped FastAPI dependencies (config, db, projects, repositories, services, importers)
127 | - `/importers` - Import functionality for Claude, ChatGPT, and other sources
128 | - `/markdown` - Markdown parsing and processing
129 | - `/mcp` - MCP server + `container.py` composition root + `clients/` typed API clients
130 | - `/models` - SQLAlchemy ORM models
131 | - `/repository` - Data access layer
132 | - `/schemas` - Pydantic models for validation
133 | - `/services` - Business logic layer
134 | - `/sync` - File synchronization services + `coordinator.py` for lifecycle management
135 |
136 | **Composition Roots:**
137 | Each entrypoint (API, MCP, CLI) has a composition root that:
138 | - Reads `ConfigManager` (the only place that reads global config)
139 | - Resolves runtime mode via `RuntimeMode` enum (TEST > CLOUD > LOCAL)
140 | - Provides dependencies to downstream code explicitly
141 |
142 | **Typed API Clients (MCP):**
143 | MCP tools use typed clients in `mcp/clients/` to communicate with the API:
144 | - `KnowledgeClient` - Entity CRUD operations
145 | - `SearchClient` - Search operations
146 | - `MemoryClient` - Context building
147 | - `DirectoryClient` - Directory listing
148 | - `ResourceClient` - Resource reading
149 | - `ProjectClient` - Project management
150 |
151 | Flow: MCP Tool → Typed Client → HTTP API → Router → Service → Repository
152 |
153 | ### Development Notes
154 |
155 | - MCP tools are defined in src/basic_memory/mcp/tools/
156 | - MCP prompts are defined in src/basic_memory/mcp/prompts/
157 | - MCP tools should be atomic, composable operations
158 | - Use `textwrap.dedent()` for multi-line string formatting in prompts and tools
159 | - MCP Prompts are used to invoke tools and format content with instructions for an LLM
160 | - Schema changes require Alembic migrations
161 | - SQLite is used for indexing and full text search, files are source of truth
162 | - Testing uses pytest with asyncio support (strict mode)
163 | - Unit tests (`tests/`) use mocks when necessary; integration tests (`test-int/`) use real implementations
164 | - By default, tests run against SQLite (fast, no Docker needed)
165 | - Set `BASIC_MEMORY_TEST_POSTGRES=1` to run against Postgres (uses testcontainers - Docker required)
166 | - Each test runs in a standalone environment with isolated database and tmp_path directory
167 | - CI runs SQLite and Postgres tests in parallel for faster feedback
168 | - Performance benchmarks are in `test-int/test_sync_performance_benchmark.py`
169 | - Use pytest markers: `@pytest.mark.benchmark` for benchmarks, `@pytest.mark.slow` for slow tests
170 | - **Coverage must stay at 100%**: Write tests for new code. Only use `# pragma: no cover` when tests would require excessive mocking (e.g., TYPE_CHECKING blocks, error handlers that need failure injection, runtime-mode-dependent code paths)
171 |
172 | ### Async Client Pattern (Important!)
173 |
174 | **All MCP tools and CLI commands use the context manager pattern for HTTP clients:**
175 |
176 | ```python
177 | from basic_memory.mcp.async_client import get_client
178 |
179 | async def my_mcp_tool():
180 | async with get_client() as client:
181 | # Use client for API calls
182 | response = await call_get(client, "/path")
183 | return response
184 | ```
185 |
186 | **Do NOT use:**
187 | - ❌ `from basic_memory.mcp.async_client import client` (deprecated module-level client)
188 | - ❌ Manual auth header management
189 | - ❌ `inject_auth_header()` (deleted)
190 |
191 | **Key principles:**
192 | - Auth happens at client creation, not per-request
193 | - Proper resource management via context managers
194 | - Supports three modes: Local (ASGI), CLI cloud (HTTP + auth), Cloud app (factory injection)
195 | - Factory pattern enables dependency injection for cloud consolidation
196 |
197 | **For cloud app integration:**
198 | ```python
199 | from basic_memory.mcp import async_client
200 |
201 | # Set custom factory before importing tools
202 | async_client.set_client_factory(your_custom_factory)
203 | ```
204 |
205 | See SPEC-16 for full context manager refactor details.
206 |
207 | ## BASIC MEMORY PRODUCT USAGE
208 |
209 | ### Knowledge Structure
210 |
211 | - Entity: Any concept, document, or idea represented as a markdown file
212 | - Observation: A categorized fact about an entity (`- [category] content`)
213 | - Relation: A directional link between entities (`- relation_type [[Target]]`)
214 | - Frontmatter: YAML metadata at the top of markdown files
215 | - Knowledge representation follows precise markdown format:
216 | - Observations with [category] prefixes
217 | - Relations with WikiLinks [[Entity]]
218 | - Frontmatter with metadata
219 |
220 | ### Basic Memory Commands
221 |
222 | **Local Commands:**
223 | - Check sync status: `basic-memory status`
224 | - Import from Claude: `basic-memory import claude conversations`
225 | - Import from ChatGPT: `basic-memory import chatgpt`
226 | - Import from Memory JSON: `basic-memory import memory-json`
227 | - Tool access: `basic-memory tool` (provides CLI access to MCP tools)
228 | - Continue: `basic-memory tool continue-conversation --topic="search"`
229 |
230 | **Project Management:**
231 | - List projects: `basic-memory project list`
232 | - Add project: `basic-memory project add "name" ~/path`
233 | - Project info: `basic-memory project info`
234 | - One-way sync (local -> cloud): `basic-memory project sync`
235 | - Bidirectional sync: `basic-memory project bisync`
236 | - Integrity check: `basic-memory project check`
237 |
238 | **Cloud Commands (requires subscription):**
239 | - Authenticate: `basic-memory cloud login`
240 | - Logout: `basic-memory cloud logout`
241 | - Check cloud status: `basic-memory cloud status`
242 | - Setup cloud sync: `basic-memory cloud setup`
243 |
244 | ### MCP Capabilities
245 |
246 | - Basic Memory exposes these MCP tools to LLMs:
247 |
248 | **Content Management:**
249 | - `write_note(title, content, folder, tags)` - Create/update markdown notes with semantic observations and relations
250 | - `read_note(identifier, page, page_size)` - Read notes by title, permalink, or memory:// URL with knowledge graph awareness
251 | - `read_content(path)` - Read raw file content (text, images, binaries) without knowledge graph processing
252 | - `view_note(identifier, page, page_size)` - View notes as formatted artifacts for better readability
253 | - `edit_note(identifier, operation, content)` - Edit notes incrementally (append, prepend, find/replace, replace_section)
254 | - `move_note(identifier, destination_path)` - Move notes to new locations, updating database and maintaining links
255 | - `delete_note(identifier)` - Delete notes from the knowledge base
256 |
257 | **Knowledge Graph Navigation:**
258 | - `build_context(url, depth, timeframe)` - Navigate the knowledge graph via memory:// URLs for conversation continuity
259 | - `recent_activity(type, depth, timeframe)` - Get recently updated information with specified timeframe (e.g., "1d", "1 week")
260 | - `list_directory(dir_name, depth, file_name_glob)` - Browse directory contents with filtering and depth control
261 |
262 | **Search & Discovery:**
263 | - `search_notes(query, page, page_size, search_type, types, entity_types, after_date)` - Full-text search across all content with advanced filtering options
264 |
265 | **Project Management:**
266 | - `list_memory_projects()` - List all available projects with their status
267 | - `create_memory_project(project_name, project_path, set_default)` - Create new Basic Memory projects
268 | - `delete_project(project_name)` - Delete a project from configuration
269 |
270 | **Visualization:**
271 | - `canvas(nodes, edges, title, folder)` - Generate Obsidian canvas files for knowledge graph visualization
272 |
273 | **ChatGPT-Compatible Tools:**
274 | - `search(query)` - Search across knowledge base (OpenAI actions compatible)
275 | - `fetch(id)` - Fetch full content of a search result document
276 |
277 | - MCP Prompts for better AI interaction:
278 | - `ai_assistant_guide()` - Guidance on effectively using Basic Memory tools for AI assistants
279 | - `continue_conversation(topic, timeframe)` - Continue previous conversations with relevant historical context
280 | - `search(query, after_date)` - Search with detailed, formatted results for better context understanding
281 | - `recent_activity(timeframe)` - View recently changed items with formatted output
282 |
283 | ### Cloud Features (v0.15.0+)
284 |
285 | Basic Memory now supports cloud synchronization and storage (requires active subscription):
286 |
287 | **Authentication:**
288 | - JWT-based authentication with subscription validation
289 | - Secure session management with token refresh
290 | - Support for multiple cloud projects
291 |
292 | **Bidirectional Sync:**
293 | - rclone bisync integration for two-way synchronization
294 | - Conflict resolution and integrity verification
295 | - Real-time sync with change detection
296 | - Mount/unmount cloud storage for direct file access
297 |
298 | **Cloud Project Management:**
299 | - Create and manage projects in the cloud
300 | - Toggle between local and cloud modes
301 | - Per-project sync configuration
302 | - Subscription-based access control
303 |
304 | **Security & Performance:**
305 | - Removed .env file loading for improved security
306 | - .gitignore integration (respects gitignored files)
307 | - WAL mode for SQLite performance
308 | - Background relation resolution (non-blocking startup)
309 | - API performance optimizations (SPEC-11)
310 |
311 | ## AI-Human Collaborative Development
312 |
313 | Basic Memory emerged from and enables a new kind of development process that combines human and AI capabilities. Instead
314 | of using AI just for code generation, we've developed a true collaborative workflow:
315 |
316 | 1. AI (LLM) writes initial implementation based on specifications and context
317 | 2. Human reviews, runs tests, and commits code with any necessary adjustments
318 | 3. Knowledge persists across conversations using Basic Memory's knowledge graph
319 | 4. Development continues seamlessly across different AI sessions with consistent context
320 | 5. Results improve through iterative collaboration and shared understanding
321 |
322 | This approach has allowed us to tackle more complex challenges and build a more robust system than either humans or AI
323 | could achieve independently.
324 |
325 | **Problem-Solving Guidance:**
326 | - If a solution isn't working after reasonable effort, suggest alternative approaches
327 | - Don't persist with a problematic library or pattern when better alternatives exist
328 | - Example: When py-pglite caused cascading test failures, switching to testcontainers-postgres was the right call
329 |
330 | ## GitHub Integration
331 |
332 | Basic Memory has taken AI-Human collaboration to the next level by integrating Claude directly into the development workflow through GitHub:
333 |
334 | ### GitHub MCP Tools
335 |
336 | Using the GitHub Model Context Protocol server, Claude can now:
337 |
338 | - **Repository Management**:
339 | - View repository files and structure
340 | - Read file contents
341 | - Create new branches
342 | - Create and update files
343 |
344 | - **Issue Management**:
345 | - Create new issues
346 | - Comment on existing issues
347 | - Close and update issues
348 | - Search across issues
349 |
350 | - **Pull Request Workflow**:
351 | - Create pull requests
352 | - Review code changes
353 | - Add comments to PRs
354 |
355 | This integration enables Claude to participate as a full team member in the development process, not just as a code generation tool. Claude's GitHub account ([bm-claudeai](https://github.com/bm-claudeai)) is a member of the Basic Machines organization with direct contributor access to the codebase.
356 |
357 | ### Collaborative Development Process
358 |
359 | With GitHub integration, the development workflow includes:
360 |
361 | 1. **Direct code review** - Claude can analyze PRs and provide detailed feedback
362 | 2. **Contribution tracking** - All of Claude's contributions are properly attributed in the Git history
363 | 3. **Branch management** - Claude can create feature branches for implementations
364 | 4. **Documentation maintenance** - Claude can keep documentation updated as the code evolves
365 | 5. **Code Commits**: ALWAYS sign off commits with `git commit -s`
366 |
367 | This level of integration represents a new paradigm in AI-human collaboration, where the AI assistant becomes a full-fledged team member rather than just a tool for generating code snippets.
368 |
```
--------------------------------------------------------------------------------
/tests/markdown/__init__.py:
--------------------------------------------------------------------------------
```python
1 |
```
--------------------------------------------------------------------------------
/tests/mcp/clients/__init__.py:
--------------------------------------------------------------------------------
```python
1 |
```
--------------------------------------------------------------------------------
/tests/api/v2/__init__.py:
--------------------------------------------------------------------------------
```python
1 | """V2 API tests."""
2 |
```
--------------------------------------------------------------------------------
/src/basic_memory/cli/__init__.py:
--------------------------------------------------------------------------------
```python
1 | """CLI tools for basic-memory"""
2 |
```
--------------------------------------------------------------------------------
/src/basic_memory/mcp/__init__.py:
--------------------------------------------------------------------------------
```python
1 | """MCP server for basic-memory."""
2 |
```
--------------------------------------------------------------------------------
/.claude/settings.json:
--------------------------------------------------------------------------------
```json
1 | {
2 | "enabledPlugins": {
3 | "basic-memory@basicmachines": true
4 | }
5 | }
6 |
```
--------------------------------------------------------------------------------
/src/basic_memory/api/__init__.py:
--------------------------------------------------------------------------------
```python
1 | """Basic Memory API module."""
2 |
3 | from .app import app
4 |
5 | __all__ = ["app"]
6 |
```
--------------------------------------------------------------------------------
/tests/__init__.py:
--------------------------------------------------------------------------------
```python
1 | import os
2 |
3 | # set config.env to "test" for pytest to prevent logging to file in utils.setup_logging()
4 | os.environ["BASIC_MEMORY_ENV"] = "test"
5 |
```
--------------------------------------------------------------------------------
/src/basic_memory/models/base.py:
--------------------------------------------------------------------------------
```python
1 | """Base model class for SQLAlchemy models."""
2 |
3 | from sqlalchemy.ext.asyncio import AsyncAttrs
4 | from sqlalchemy.orm import DeclarativeBase
5 |
6 |
7 | class Base(AsyncAttrs, DeclarativeBase):
8 | """Base class for all models"""
9 |
10 | pass
11 |
```
--------------------------------------------------------------------------------
/src/basic_memory/sync/__init__.py:
--------------------------------------------------------------------------------
```python
1 | """Basic Memory sync services."""
2 |
3 | from .coordinator import SyncCoordinator, SyncStatus
4 | from .sync_service import SyncService
5 | from .watch_service import WatchService
6 |
7 | __all__ = ["SyncService", "WatchService", "SyncCoordinator", "SyncStatus"]
8 |
```
--------------------------------------------------------------------------------
/src/basic_memory/__init__.py:
--------------------------------------------------------------------------------
```python
1 | """basic-memory - Local-first knowledge management combining Zettelkasten with knowledge graphs"""
2 |
3 | # Package version - updated by release automation
4 | __version__ = "0.17.5"
5 |
6 | # API version for FastAPI - independent of package version
7 | __api_version__ = "v0"
8 |
```
--------------------------------------------------------------------------------
/src/basic_memory/services/__init__.py:
--------------------------------------------------------------------------------
```python
1 | """Services package."""
2 |
3 | from .service import BaseService
4 | from .file_service import FileService
5 | from .entity_service import EntityService
6 | from .project_service import ProjectService
7 |
8 | __all__ = ["BaseService", "FileService", "EntityService", "ProjectService"]
9 |
```
--------------------------------------------------------------------------------
/src/basic_memory/repository/__init__.py:
--------------------------------------------------------------------------------
```python
1 | from .entity_repository import EntityRepository
2 | from .observation_repository import ObservationRepository
3 | from .project_repository import ProjectRepository
4 | from .relation_repository import RelationRepository
5 |
6 | __all__ = [
7 | "EntityRepository",
8 | "ObservationRepository",
9 | "ProjectRepository",
10 | "RelationRepository",
11 | ]
12 |
```
--------------------------------------------------------------------------------
/src/basic_memory/models/__init__.py:
--------------------------------------------------------------------------------
```python
1 | """Models package for basic-memory."""
2 |
3 | import basic_memory
4 | from basic_memory.models.base import Base
5 | from basic_memory.models.knowledge import Entity, Observation, Relation
6 | from basic_memory.models.project import Project
7 |
8 | __all__ = [
9 | "Base",
10 | "Entity",
11 | "Observation",
12 | "Relation",
13 | "Project",
14 | "basic_memory",
15 | ]
16 |
```
--------------------------------------------------------------------------------
/src/basic_memory/services/service.py:
--------------------------------------------------------------------------------
```python
1 | """Base service class."""
2 |
3 | from typing import TypeVar, Generic
4 |
5 | from basic_memory.models import Base
6 |
7 | T = TypeVar("T", bound=Base)
8 |
9 |
10 | class BaseService(Generic[T]):
11 | """Base service that takes a repository."""
12 |
13 | def __init__(self, repository):
14 | """Initialize service with repository."""
15 | self.repository = repository
16 |
```
--------------------------------------------------------------------------------
/src/basic_memory/repository/project_info_repository.py:
--------------------------------------------------------------------------------
```python
1 | from basic_memory.repository.repository import Repository
2 | from basic_memory.models.project import Project
3 |
4 |
5 | class ProjectInfoRepository(Repository):
6 | """Repository for statistics queries."""
7 |
8 | def __init__(self, session_maker):
9 | # Initialize with Project model as a reference
10 | super().__init__(session_maker, Project)
11 |
```
--------------------------------------------------------------------------------
/src/basic_memory/cli/commands/cloud/__init__.py:
--------------------------------------------------------------------------------
```python
1 | """Cloud commands package."""
2 |
3 | # Import all commands to register them with typer
4 | from basic_memory.cli.commands.cloud.core_commands import * # noqa: F401,F403
5 | from basic_memory.cli.commands.cloud.api_client import get_authenticated_headers, get_cloud_config # noqa: F401
6 | from basic_memory.cli.commands.cloud.upload_command import * # noqa: F401,F403
7 |
```
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/config.yml:
--------------------------------------------------------------------------------
```yaml
1 | blank_issues_enabled: false
2 | contact_links:
3 | - name: Basic Memory Discussions
4 | url: https://github.com/basicmachines-co/basic-memory/discussions
5 | about: For questions, ideas, or more open-ended discussions
6 | - name: Documentation
7 | url: https://github.com/basicmachines-co/basic-memory#readme
8 | about: Please check the documentation first before reporting an issue
```
--------------------------------------------------------------------------------
/test-int/cli/test_version_integration.py:
--------------------------------------------------------------------------------
```python
1 | """Integration tests for version command."""
2 |
3 | from typer.testing import CliRunner
4 |
5 | from basic_memory.cli.main import app
6 | import basic_memory
7 |
8 |
9 | def test_version_command():
10 | """Test 'bm --version' command shows version."""
11 | runner = CliRunner()
12 | result = runner.invoke(app, ["--version"])
13 |
14 | assert result.exit_code == 0
15 | assert basic_memory.__version__ in result.stdout
16 |
```
--------------------------------------------------------------------------------
/src/basic_memory/api/routers/__init__.py:
--------------------------------------------------------------------------------
```python
1 | """API routers."""
2 |
3 | from . import knowledge_router as knowledge
4 | from . import management_router as management
5 | from . import memory_router as memory
6 | from . import project_router as project
7 | from . import resource_router as resource
8 | from . import search_router as search
9 | from . import prompt_router as prompt
10 |
11 | __all__ = ["knowledge", "management", "memory", "project", "resource", "search", "prompt"]
12 |
```
--------------------------------------------------------------------------------
/tests/markdown/test_task_detection.py:
--------------------------------------------------------------------------------
```python
1 | """Test how markdown-it handles task lists."""
2 |
3 | from markdown_it import MarkdownIt
4 |
5 |
6 | def test_task_token_type():
7 | """Verify how markdown-it parses task list items."""
8 | md = MarkdownIt()
9 | content = """
10 | - [ ] Unchecked task
11 | - [x] Completed task
12 | - [-] In progress task
13 | """
14 |
15 | tokens = md.parse(content)
16 | for token in tokens:
17 | print(f"{token.type}: {token.content}")
18 |
```
--------------------------------------------------------------------------------
/src/basic_memory/cli/commands/__init__.py:
--------------------------------------------------------------------------------
```python
1 | """CLI commands for basic-memory."""
2 |
3 | from . import status, db, import_memory_json, mcp, import_claude_conversations
4 | from . import import_claude_projects, import_chatgpt, tool, project, format, telemetry
5 |
6 | __all__ = [
7 | "status",
8 | "db",
9 | "import_memory_json",
10 | "mcp",
11 | "import_claude_conversations",
12 | "import_claude_projects",
13 | "import_chatgpt",
14 | "tool",
15 | "project",
16 | "format",
17 | "telemetry",
18 | ]
19 |
```
--------------------------------------------------------------------------------
/smithery.yaml:
--------------------------------------------------------------------------------
```yaml
1 | # Smithery configuration file: https://smithery.ai/docs/config#smitheryyaml
2 |
3 | startCommand:
4 | type: stdio
5 | configSchema:
6 | # JSON Schema defining the configuration options for the MCP.
7 | type: object
8 | properties: {}
9 | description: No configuration required. This MCP server runs using the default command.
10 | commandFunction: |-
11 | (config) => ({
12 | command: 'basic-memory',
13 | args: ['mcp']
14 | })
15 | exampleConfig: {}
```
--------------------------------------------------------------------------------
/src/basic_memory/alembic/versions/6830751f5fb6_merge_multiple_heads.py:
--------------------------------------------------------------------------------
```python
1 | """Merge multiple heads
2 |
3 | Revision ID: 6830751f5fb6
4 | Revises: a2b3c4d5e6f7, g9a0b3c4d5e6
5 | Create Date: 2025-12-29 12:46:46.476268
6 |
7 | """
8 |
9 | from typing import Sequence, Union
10 |
11 |
12 | # revision identifiers, used by Alembic.
13 | revision: str = "6830751f5fb6"
14 | down_revision: Union[str, Sequence[str], None] = ("a2b3c4d5e6f7", "g9a0b3c4d5e6")
15 | branch_labels: Union[str, Sequence[str], None] = None
16 | depends_on: Union[str, Sequence[str], None] = None
17 |
18 |
19 | def upgrade() -> None:
20 | pass
21 |
22 |
23 | def downgrade() -> None:
24 | pass
25 |
```
--------------------------------------------------------------------------------
/src/basic_memory/markdown/__init__.py:
--------------------------------------------------------------------------------
```python
1 | """Base package for markdown parsing."""
2 |
3 | from basic_memory.file_utils import ParseError
4 | from basic_memory.markdown.entity_parser import EntityParser
5 | from basic_memory.markdown.markdown_processor import MarkdownProcessor
6 | from basic_memory.markdown.schemas import (
7 | EntityMarkdown,
8 | EntityFrontmatter,
9 | Observation,
10 | Relation,
11 | )
12 |
13 | __all__ = [
14 | "EntityMarkdown",
15 | "EntityFrontmatter",
16 | "EntityParser",
17 | "MarkdownProcessor",
18 | "Observation",
19 | "Relation",
20 | "ParseError",
21 | ]
22 |
```
--------------------------------------------------------------------------------
/.github/dependabot.yml:
--------------------------------------------------------------------------------
```yaml
1 | # To get started with Dependabot version updates, you'll need to specify which
2 | # package ecosystems to update and where the package manifests are located.
3 | # Please see the documentation for all configuration options:
4 | # https://docs.github.com/code-security/dependabot/dependabot-version-updates/configuration-options-for-the-dependabot.yml-file
5 |
6 | version: 2
7 | updates:
8 | - package-ecosystem: "" # See documentation for possible values
9 | directory: "/" # Location of package manifests
10 | schedule:
11 | interval: "weekly"
12 |
13 |
```
--------------------------------------------------------------------------------
/tests/mcp/test_resources.py:
--------------------------------------------------------------------------------
```python
1 | from basic_memory.mcp.prompts.ai_assistant_guide import ai_assistant_guide
2 |
3 |
4 | import pytest
5 |
6 |
7 | @pytest.mark.asyncio
8 | async def test_ai_assistant_guide_exists(app):
9 | """Test that the canvas spec resource exists and returns content."""
10 | # Call the resource function
11 | guide = ai_assistant_guide.fn()
12 |
13 | # Verify basic characteristics of the content
14 | assert guide is not None
15 | assert isinstance(guide, str)
16 | assert len(guide) > 0
17 |
18 | # Verify it contains expected sections of the Canvas spec
19 | assert "# AI Assistant Guide" in guide
20 |
```
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/documentation.md:
--------------------------------------------------------------------------------
```markdown
1 | ---
2 | name: Documentation improvement
3 | about: Suggest improvements or report issues with documentation
4 | title: '[DOCS] '
5 | labels: documentation
6 | assignees: ''
7 | ---
8 |
9 | ## Documentation Issue
10 | Describe what's missing, unclear, or incorrect in the current documentation.
11 |
12 | ## Location
13 | Where is the problematic documentation? (URL, file path, or section)
14 |
15 | ## Suggested Improvement
16 | How would you improve this documentation? Please be as specific as possible.
17 |
18 | ## Additional Context
19 | Any additional information or screenshots that might help explain the issue or improvement.
```
--------------------------------------------------------------------------------
/tests/api/v2/conftest.py:
--------------------------------------------------------------------------------
```python
1 | """Fixtures for V2 API tests."""
2 |
3 | import pytest
4 |
5 | from basic_memory.models import Project
6 |
7 |
8 | @pytest.fixture
9 | def v2_project_url(test_project: Project) -> str:
10 | """Create a URL prefix for v2 project-scoped routes using project external_id.
11 |
12 | This helps tests generate the correct URL for v2 project-scoped routes
13 | which use external_id UUIDs instead of permalinks or integer IDs.
14 | """
15 | return f"/v2/projects/{test_project.external_id}"
16 |
17 |
18 | @pytest.fixture
19 | def v2_projects_url() -> str:
20 | """Base URL for v2 project management endpoints."""
21 | return "/v2/projects"
22 |
```
--------------------------------------------------------------------------------
/src/basic_memory/mcp/prompts/__init__.py:
--------------------------------------------------------------------------------
```python
1 | """Basic Memory MCP prompts.
2 |
3 | Prompts are a special type of tool that returns a string response
4 | formatted for a user to read, typically invoking one or more tools
5 | and transforming their results into user-friendly text.
6 | """
7 |
8 | # Import individual prompt modules to register them with the MCP server
9 | from basic_memory.mcp.prompts import continue_conversation
10 | from basic_memory.mcp.prompts import recent_activity
11 | from basic_memory.mcp.prompts import search
12 | from basic_memory.mcp.prompts import ai_assistant_guide
13 |
14 | __all__ = [
15 | "ai_assistant_guide",
16 | "continue_conversation",
17 | "recent_activity",
18 | "search",
19 | ]
20 |
```
--------------------------------------------------------------------------------
/tests/services/test_initialization_cloud_mode_branches.py:
--------------------------------------------------------------------------------
```python
1 | import pytest
2 |
3 | from basic_memory.services.initialization import (
4 | ensure_initialization,
5 | initialize_app,
6 | initialize_file_sync,
7 | )
8 |
9 |
10 | @pytest.mark.asyncio
11 | async def test_initialize_app_noop_in_cloud_mode(app_config):
12 | app_config.cloud_mode = True
13 | await initialize_app(app_config)
14 |
15 |
16 | def test_ensure_initialization_noop_in_cloud_mode(app_config):
17 | app_config.cloud_mode = True
18 | ensure_initialization(app_config)
19 |
20 |
21 | @pytest.mark.asyncio
22 | async def test_initialize_file_sync_skips_in_test_env(app_config):
23 | # app_config fixture uses env="test"
24 | assert app_config.is_test_env is True
25 | await initialize_file_sync(app_config)
26 |
```
--------------------------------------------------------------------------------
/src/basic_memory/schemas/v2/__init__.py:
--------------------------------------------------------------------------------
```python
1 | """V2 API schemas - ID-based entity and project references."""
2 |
3 | from basic_memory.schemas.v2.entity import (
4 | EntityResolveRequest,
5 | EntityResolveResponse,
6 | EntityResponseV2,
7 | MoveEntityRequestV2,
8 | ProjectResolveRequest,
9 | ProjectResolveResponse,
10 | )
11 | from basic_memory.schemas.v2.resource import (
12 | CreateResourceRequest,
13 | UpdateResourceRequest,
14 | ResourceResponse,
15 | )
16 |
17 | __all__ = [
18 | "EntityResolveRequest",
19 | "EntityResolveResponse",
20 | "EntityResponseV2",
21 | "MoveEntityRequestV2",
22 | "ProjectResolveRequest",
23 | "ProjectResolveResponse",
24 | "CreateResourceRequest",
25 | "UpdateResourceRequest",
26 | "ResourceResponse",
27 | ]
28 |
```
--------------------------------------------------------------------------------
/src/basic_memory/deps.py:
--------------------------------------------------------------------------------
```python
1 | """Dependency injection functions for basic-memory services.
2 |
3 | DEPRECATED: This module is a backwards-compatibility shim.
4 | Import from basic_memory.deps package submodules instead:
5 | - basic_memory.deps.config for configuration
6 | - basic_memory.deps.db for database/session
7 | - basic_memory.deps.projects for project resolution
8 | - basic_memory.deps.repositories for data access
9 | - basic_memory.deps.services for business logic
10 | - basic_memory.deps.importers for import functionality
11 |
12 | This file will be removed once all callers are migrated.
13 | """
14 |
15 | # Re-export everything from the deps package for backwards compatibility
16 | from basic_memory.deps import * # noqa: F401, F403 # pragma: no cover
17 |
```
--------------------------------------------------------------------------------
/src/basic_memory/cli/main.py:
--------------------------------------------------------------------------------
```python
1 | """Main CLI entry point for basic-memory.""" # pragma: no cover
2 |
3 | from basic_memory.cli.app import app # pragma: no cover
4 |
5 | # Register commands
6 | from basic_memory.cli.commands import ( # noqa: F401 # pragma: no cover
7 | cloud,
8 | db,
9 | import_chatgpt,
10 | import_claude_conversations,
11 | import_claude_projects,
12 | import_memory_json,
13 | mcp,
14 | project,
15 | status,
16 | telemetry,
17 | tool,
18 | )
19 |
20 | # Re-apply warning filter AFTER all imports
21 | # (authlib adds a DeprecationWarning filter that overrides ours)
22 | import warnings # pragma: no cover
23 |
24 | warnings.filterwarnings("ignore") # pragma: no cover
25 |
26 | if __name__ == "__main__": # pragma: no cover
27 | # start the app
28 | app()
29 |
```
--------------------------------------------------------------------------------
/src/basic_memory/schemas/importer.py:
--------------------------------------------------------------------------------
```python
1 | """Schemas for import services."""
2 |
3 | from typing import Dict, Optional
4 |
5 | from pydantic import BaseModel
6 |
7 |
8 | class ImportResult(BaseModel):
9 | """Common import result schema."""
10 |
11 | import_count: Dict[str, int]
12 | success: bool
13 | error_message: Optional[str] = None
14 |
15 |
16 | class ChatImportResult(ImportResult):
17 | """Result schema for chat imports."""
18 |
19 | conversations: int = 0
20 | messages: int = 0
21 |
22 |
23 | class ProjectImportResult(ImportResult):
24 | """Result schema for project imports."""
25 |
26 | documents: int = 0
27 | prompts: int = 0
28 |
29 |
30 | class EntityImportResult(ImportResult):
31 | """Result schema for entity imports."""
32 |
33 | entities: int = 0
34 | relations: int = 0
35 | skipped_entities: int = 0
36 |
```
--------------------------------------------------------------------------------
/src/basic_memory/alembic/migrations.py:
--------------------------------------------------------------------------------
```python
1 | """Functions for managing database migrations."""
2 |
3 | from pathlib import Path
4 | from loguru import logger
5 | from alembic.config import Config
6 | from alembic import command
7 |
8 |
9 | def get_alembic_config() -> Config: # pragma: no cover
10 | """Get alembic config with correct paths."""
11 | migrations_path = Path(__file__).parent
12 | alembic_ini = migrations_path / "alembic.ini"
13 |
14 | config = Config(alembic_ini)
15 | config.set_main_option("script_location", str(migrations_path))
16 | return config
17 |
18 |
19 | def reset_database(): # pragma: no cover
20 | """Drop and recreate all tables."""
21 | logger.info("Resetting database...")
22 | config = get_alembic_config()
23 | command.downgrade(config, "base")
24 | command.upgrade(config, "head")
25 |
```
--------------------------------------------------------------------------------
/src/basic_memory/sync/background_sync.py:
--------------------------------------------------------------------------------
```python
1 | import asyncio
2 |
3 | from loguru import logger
4 |
5 | from basic_memory.config import get_project_config
6 | from basic_memory.sync import SyncService, WatchService
7 |
8 |
9 | async def sync_and_watch(
10 | sync_service: SyncService, watch_service: WatchService
11 | ): # pragma: no cover
12 | """Run sync and watch service."""
13 |
14 | config = get_project_config()
15 | logger.info(f"Starting watch service to sync file changes in dir: {config.home}")
16 | # full sync
17 | await sync_service.sync(config.home)
18 |
19 | # watch changes
20 | await watch_service.run()
21 |
22 |
23 | async def create_background_sync_task(
24 | sync_service: SyncService, watch_service: WatchService
25 | ): # pragma: no cover
26 | return asyncio.create_task(sync_and_watch(sync_service, watch_service))
27 |
```
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/feature_request.md:
--------------------------------------------------------------------------------
```markdown
1 | ---
2 | name: Feature request
3 | about: Suggest an idea for Basic Memory
4 | title: '[FEATURE] '
5 | labels: enhancement
6 | assignees: ''
7 | ---
8 |
9 | ## Feature Description
10 | A clear and concise description of the feature you'd like to see implemented.
11 |
12 | ## Problem This Feature Solves
13 | Describe the problem or limitation you're experiencing that this feature would address.
14 |
15 | ## Proposed Solution
16 | Describe how you envision this feature working. Include:
17 | - User workflow
18 | - Interface design (if applicable)
19 | - Technical approach (if you have ideas)
20 |
21 | ## Alternative Solutions
22 | Have you considered any alternative solutions or workarounds?
23 |
24 | ## Additional Context
25 | Add any other context, screenshots, or examples about the feature request here.
26 |
27 | ## Impact
28 | How would this feature benefit you and other users of Basic Memory?
```
--------------------------------------------------------------------------------
/src/basic_memory/importers/__init__.py:
--------------------------------------------------------------------------------
```python
1 | """Import services for Basic Memory."""
2 |
3 | from basic_memory.importers.base import Importer
4 | from basic_memory.importers.chatgpt_importer import ChatGPTImporter
5 | from basic_memory.importers.claude_conversations_importer import (
6 | ClaudeConversationsImporter,
7 | )
8 | from basic_memory.importers.claude_projects_importer import ClaudeProjectsImporter
9 | from basic_memory.importers.memory_json_importer import MemoryJsonImporter
10 | from basic_memory.schemas.importer import (
11 | ChatImportResult,
12 | EntityImportResult,
13 | ImportResult,
14 | ProjectImportResult,
15 | )
16 |
17 | __all__ = [
18 | "Importer",
19 | "ChatGPTImporter",
20 | "ClaudeConversationsImporter",
21 | "ClaudeProjectsImporter",
22 | "MemoryJsonImporter",
23 | "ImportResult",
24 | "ChatImportResult",
25 | "EntityImportResult",
26 | "ProjectImportResult",
27 | ]
28 |
```
--------------------------------------------------------------------------------
/tests/mcp/test_server_lifespan_branches.py:
--------------------------------------------------------------------------------
```python
1 | import pytest
2 |
3 | from basic_memory import db
4 | from basic_memory.mcp.server import lifespan, mcp
5 |
6 |
7 | @pytest.mark.asyncio
8 | async def test_mcp_lifespan_sync_disabled_branch(config_manager, monkeypatch):
9 | cfg = config_manager.load_config()
10 | cfg.sync_changes = False
11 | cfg.cloud_mode = False
12 | config_manager.save_config(cfg)
13 |
14 | async with lifespan(mcp):
15 | pass
16 |
17 |
18 | @pytest.mark.asyncio
19 | async def test_mcp_lifespan_cloud_mode_branch(config_manager):
20 | cfg = config_manager.load_config()
21 | cfg.sync_changes = True
22 | cfg.cloud_mode = True
23 | config_manager.save_config(cfg)
24 |
25 | async with lifespan(mcp):
26 | pass
27 |
28 |
29 | @pytest.mark.asyncio
30 | async def test_mcp_lifespan_shuts_down_db_when_engine_was_none(config_manager):
31 | db._engine = None
32 | async with lifespan(mcp):
33 | pass
34 |
```
--------------------------------------------------------------------------------
/src/basic_memory/deps/config.py:
--------------------------------------------------------------------------------
```python
1 | """Configuration dependency injection for basic-memory.
2 |
3 | This module provides configuration-related dependencies.
4 | Note: Long-term goal is to minimize direct ConfigManager access
5 | and inject config from composition roots instead.
6 | """
7 |
8 | from typing import Annotated
9 |
10 | from fastapi import Depends
11 |
12 | from basic_memory.config import BasicMemoryConfig, ConfigManager
13 |
14 |
15 | def get_app_config() -> BasicMemoryConfig: # pragma: no cover
16 | """Get the application configuration.
17 |
18 | Note: This is a transitional dependency. The goal is for composition roots
19 | to read ConfigManager and inject config explicitly. During migration,
20 | this provides the same behavior as before.
21 | """
22 | app_config = ConfigManager().config
23 | return app_config
24 |
25 |
26 | AppConfigDep = Annotated[BasicMemoryConfig, Depends(get_app_config)]
27 |
```
--------------------------------------------------------------------------------
/src/basic_memory/services/exceptions.py:
--------------------------------------------------------------------------------
```python
1 | class FileOperationError(Exception):
2 | """Raised when file operations fail"""
3 |
4 | pass
5 |
6 |
7 | class EntityNotFoundError(Exception):
8 | """Raised when an entity cannot be found"""
9 |
10 | pass
11 |
12 |
13 | class EntityCreationError(Exception):
14 | """Raised when an entity cannot be created"""
15 |
16 | pass
17 |
18 |
19 | class DirectoryOperationError(Exception):
20 | """Raised when directory operations fail"""
21 |
22 | pass
23 |
24 |
25 | class SyncFatalError(Exception):
26 | """Raised when sync encounters a fatal error that prevents continuation.
27 |
28 | Fatal errors include:
29 | - Project deleted during sync (FOREIGN KEY constraint)
30 | - Database corruption
31 | - Critical system failures
32 |
33 | When this exception is raised, the entire sync operation should be terminated
34 | immediately rather than attempting to continue with remaining files.
35 | """
36 |
37 | pass
38 |
```
--------------------------------------------------------------------------------
/src/basic_memory/api/v2/__init__.py:
--------------------------------------------------------------------------------
```python
1 | """API v2 module - ID-based entity references.
2 |
3 | Version 2 of the Basic Memory API uses integer entity IDs as the primary
4 | identifier for improved performance and stability.
5 |
6 | Key changes from v1:
7 | - Entity lookups use integer IDs instead of paths/permalinks
8 | - Direct database queries instead of cascading resolution
9 | - Stable references that don't change with file moves
10 | - Better caching support
11 |
12 | All v2 routers are registered with the /v2 prefix.
13 | """
14 |
15 | from basic_memory.api.v2.routers import (
16 | knowledge_router,
17 | memory_router,
18 | project_router,
19 | resource_router,
20 | search_router,
21 | directory_router,
22 | prompt_router,
23 | importer_router,
24 | )
25 |
26 | __all__ = [
27 | "knowledge_router",
28 | "memory_router",
29 | "project_router",
30 | "resource_router",
31 | "search_router",
32 | "directory_router",
33 | "prompt_router",
34 | "importer_router",
35 | ]
36 |
```
--------------------------------------------------------------------------------
/src/basic_memory/api/v2/routers/__init__.py:
--------------------------------------------------------------------------------
```python
1 | """V2 API routers."""
2 |
3 | from basic_memory.api.v2.routers.knowledge_router import router as knowledge_router
4 | from basic_memory.api.v2.routers.project_router import router as project_router
5 | from basic_memory.api.v2.routers.memory_router import router as memory_router
6 | from basic_memory.api.v2.routers.search_router import router as search_router
7 | from basic_memory.api.v2.routers.resource_router import router as resource_router
8 | from basic_memory.api.v2.routers.directory_router import router as directory_router
9 | from basic_memory.api.v2.routers.prompt_router import router as prompt_router
10 | from basic_memory.api.v2.routers.importer_router import router as importer_router
11 |
12 | __all__ = [
13 | "knowledge_router",
14 | "project_router",
15 | "memory_router",
16 | "search_router",
17 | "resource_router",
18 | "directory_router",
19 | "prompt_router",
20 | "importer_router",
21 | ]
22 |
```
--------------------------------------------------------------------------------
/.github/workflows/pr-title.yml:
--------------------------------------------------------------------------------
```yaml
1 | name: "Pull Request Title"
2 |
3 | on:
4 | pull_request:
5 | types:
6 | - opened
7 | - edited
8 | - synchronize
9 |
10 | jobs:
11 | main:
12 | runs-on: ubuntu-latest
13 | steps:
14 | - uses: amannn/action-semantic-pull-request@v5
15 | env:
16 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
17 | with:
18 | # Configure allowed types based on what we want in our changelog
19 | types: |
20 | feat
21 | fix
22 | chore
23 | docs
24 | style
25 | refactor
26 | perf
27 | test
28 | build
29 | ci
30 | # Require at least one from scope list (optional)
31 | scopes: |
32 | core
33 | cli
34 | api
35 | mcp
36 | sync
37 | ui
38 | deps
39 | installer
40 | # Allow breaking changes (needs "!" after type/scope)
41 | requireScopeForBreakingChange: true
```
--------------------------------------------------------------------------------
/src/basic_memory/mcp/clients/__init__.py:
--------------------------------------------------------------------------------
```python
1 | """Typed internal API clients for MCP tools.
2 |
3 | These clients encapsulate API paths, error handling, and response validation.
4 | MCP tools become thin adapters that call these clients and format results.
5 |
6 | Usage:
7 | from basic_memory.mcp.clients import KnowledgeClient, SearchClient
8 |
9 | async with get_client() as http_client:
10 | knowledge = KnowledgeClient(http_client, project_id)
11 | entity = await knowledge.create_entity(entity_data)
12 | """
13 |
14 | from basic_memory.mcp.clients.knowledge import KnowledgeClient
15 | from basic_memory.mcp.clients.search import SearchClient
16 | from basic_memory.mcp.clients.memory import MemoryClient
17 | from basic_memory.mcp.clients.directory import DirectoryClient
18 | from basic_memory.mcp.clients.resource import ResourceClient
19 | from basic_memory.mcp.clients.project import ProjectClient
20 |
21 | __all__ = [
22 | "KnowledgeClient",
23 | "SearchClient",
24 | "MemoryClient",
25 | "DirectoryClient",
26 | "ResourceClient",
27 | "ProjectClient",
28 | ]
29 |
```
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/bug_report.md:
--------------------------------------------------------------------------------
```markdown
1 | ---
2 | name: Bug report
3 | about: Create a report to help us improve Basic Memory
4 | title: '[BUG] '
5 | labels: bug
6 | assignees: ''
7 | ---
8 |
9 | ## Bug Description
10 | A clear and concise description of what the bug is.
11 |
12 | ## Steps To Reproduce
13 | Steps to reproduce the behavior:
14 | 1. Install version '...'
15 | 2. Run command '...'
16 | 3. Use tool/feature '...'
17 | 4. See error
18 |
19 | ## Expected Behavior
20 | A clear and concise description of what you expected to happen.
21 |
22 | ## Actual Behavior
23 | What actually happened, including error messages and output.
24 |
25 | ## Environment
26 | - OS: [e.g. macOS 14.2, Ubuntu 22.04]
27 | - Python version: [e.g. 3.12.1]
28 | - Basic Memory version: [e.g. 0.1.0]
29 | - Installation method: [e.g. pip, uv, source]
30 | - Claude Desktop version (if applicable):
31 |
32 | ## Additional Context
33 | - Configuration files (if relevant)
34 | - Logs or screenshots
35 | - Any special configuration or environment variables
36 |
37 | ## Possible Solution
38 | If you have any ideas on what might be causing the issue or how to fix it, please share them here.
```
--------------------------------------------------------------------------------
/src/basic_memory/schemas/directory.py:
--------------------------------------------------------------------------------
```python
1 | """Schemas for directory tree operations."""
2 |
3 | from datetime import datetime
4 | from typing import List, Optional, Literal
5 |
6 | from pydantic import BaseModel
7 |
8 |
9 | class DirectoryNode(BaseModel):
10 | """Directory node in file system."""
11 |
12 | name: str
13 | file_path: Optional[str] = None # Original path without leading slash (matches DB)
14 | directory_path: str # Path with leading slash for directory navigation
15 | type: Literal["directory", "file"]
16 | children: List["DirectoryNode"] = [] # Default to empty list
17 | title: Optional[str] = None
18 | permalink: Optional[str] = None
19 | external_id: Optional[str] = None # UUID (primary API identifier for v2)
20 | entity_id: Optional[int] = None # Internal numeric ID
21 | entity_type: Optional[str] = None
22 | content_type: Optional[str] = None
23 | updated_at: Optional[datetime] = None
24 |
25 | @property
26 | def has_children(self) -> bool:
27 | return bool(self.children)
28 |
29 |
30 | # Support for recursive model
31 | DirectoryNode.model_rebuild()
32 |
```
--------------------------------------------------------------------------------
/src/basic_memory/alembic/versions/e7e1f4367280_add_scan_watermark_tracking_to_project.py:
--------------------------------------------------------------------------------
```python
1 | """Add scan watermark tracking to Project
2 |
3 | Revision ID: e7e1f4367280
4 | Revises: 9d9c1cb7d8f5
5 | Create Date: 2025-10-20 16:42:46.625075
6 |
7 | """
8 |
9 | from typing import Sequence, Union
10 |
11 | from alembic import op
12 | import sqlalchemy as sa
13 |
14 |
15 | # revision identifiers, used by Alembic.
16 | revision: str = "e7e1f4367280"
17 | down_revision: Union[str, None] = "9d9c1cb7d8f5"
18 | branch_labels: Union[str, Sequence[str], None] = None
19 | depends_on: Union[str, Sequence[str], None] = None
20 |
21 |
22 | def upgrade() -> None:
23 | # ### commands auto generated by Alembic - please adjust! ###
24 | with op.batch_alter_table("project", schema=None) as batch_op:
25 | batch_op.add_column(sa.Column("last_scan_timestamp", sa.Float(), nullable=True))
26 | batch_op.add_column(sa.Column("last_file_count", sa.Integer(), nullable=True))
27 |
28 | # ### end Alembic commands ###
29 |
30 |
31 | def downgrade() -> None:
32 | # ### commands auto generated by Alembic - please adjust! ###
33 | with op.batch_alter_table("project", schema=None) as batch_op:
34 | batch_op.drop_column("last_file_count")
35 | batch_op.drop_column("last_scan_timestamp")
36 |
37 | # ### end Alembic commands ###
38 |
```
--------------------------------------------------------------------------------
/src/basic_memory/schemas/delete.py:
--------------------------------------------------------------------------------
```python
1 | """Delete operation schemas for the knowledge graph.
2 |
3 | This module defines the request schemas for removing entities, relations,
4 | and observations from the knowledge graph. Each operation has specific
5 | implications and safety considerations.
6 |
7 | Deletion Hierarchy:
8 | 1. Entity deletion removes the entity and all its relations
9 | 2. Relation deletion only removes the connection between entities
10 | 3. Observation deletion preserves entity and relations
11 |
12 | Key Considerations:
13 | - All deletions are permanent
14 | - Entity deletions cascade to relations
15 | - Files are removed along with entities
16 | - Operations are atomic - they fully succeed or fail
17 | """
18 |
19 | from typing import List, Annotated
20 |
21 | from annotated_types import MinLen
22 | from pydantic import BaseModel
23 |
24 | from basic_memory.schemas.base import Permalink
25 |
26 |
27 | class DeleteEntitiesRequest(BaseModel):
28 | """Delete one or more entities from the knowledge graph.
29 |
30 | This operation:
31 | 1. Removes the entity from the database
32 | 2. Deletes all observations attached to the entity
33 | 3. Removes all relations where the entity is source or target
34 | 4. Deletes the corresponding markdown file
35 | """
36 |
37 | permalinks: Annotated[List[Permalink], MinLen(1)]
38 |
```
--------------------------------------------------------------------------------
/tests/repository/test_project_info_repository.py:
--------------------------------------------------------------------------------
```python
1 | """Tests for the ProjectInfoRepository."""
2 |
3 | import pytest
4 | from sqlalchemy import text
5 |
6 | from basic_memory.repository.project_info_repository import ProjectInfoRepository
7 | from basic_memory.models.project import Project # Add a model reference
8 |
9 |
10 | @pytest.mark.asyncio
11 | async def test_project_info_repository_init(session_maker):
12 | """Test ProjectInfoRepository initialization."""
13 | # Create a ProjectInfoRepository
14 | repository = ProjectInfoRepository(session_maker)
15 |
16 | # Verify it was initialized properly
17 | assert repository is not None
18 | assert repository.session_maker == session_maker
19 | # Model is set to a dummy value (Project is used as a reference here)
20 | assert repository.Model is Project
21 |
22 |
23 | @pytest.mark.asyncio
24 | async def test_project_info_repository_execute_query(session_maker):
25 | """Test ProjectInfoRepository execute_query method."""
26 | # Create a ProjectInfoRepository
27 | repository = ProjectInfoRepository(session_maker)
28 |
29 | # Execute a simple query
30 | result = await repository.execute_query(text("SELECT 1 as test"))
31 |
32 | # Verify the result
33 | assert result is not None
34 | row = result.fetchone()
35 | assert row is not None
36 | assert row[0] == 1
37 |
```
--------------------------------------------------------------------------------
/src/basic_memory/api/routers/search_router.py:
--------------------------------------------------------------------------------
```python
1 | """Router for search operations."""
2 |
3 | from fastapi import APIRouter, BackgroundTasks
4 |
5 | from basic_memory.api.routers.utils import to_search_results
6 | from basic_memory.schemas.search import SearchQuery, SearchResponse
7 | from basic_memory.deps import SearchServiceDep, EntityServiceDep
8 |
9 | router = APIRouter(prefix="/search", tags=["search"])
10 |
11 |
12 | @router.post("/", response_model=SearchResponse)
13 | async def search(
14 | query: SearchQuery,
15 | search_service: SearchServiceDep,
16 | entity_service: EntityServiceDep,
17 | page: int = 1,
18 | page_size: int = 10,
19 | ):
20 | """Search across all knowledge and documents."""
21 | limit = page_size
22 | offset = (page - 1) * page_size
23 | results = await search_service.search(query, limit=limit, offset=offset)
24 | search_results = await to_search_results(entity_service, results)
25 | return SearchResponse(
26 | results=search_results,
27 | current_page=page,
28 | page_size=page_size,
29 | )
30 |
31 |
32 | @router.post("/reindex")
33 | async def reindex(background_tasks: BackgroundTasks, search_service: SearchServiceDep):
34 | """Recreate and populate the search index."""
35 | await search_service.reindex_all(background_tasks=background_tasks)
36 | return {"status": "ok", "message": "Reindex initiated"}
37 |
```
--------------------------------------------------------------------------------
/docker-compose-postgres.yml:
--------------------------------------------------------------------------------
```yaml
1 | # Docker Compose configuration for Basic Memory with PostgreSQL
2 | # Use this for local development and testing with Postgres backend
3 | #
4 | # Usage:
5 | # docker-compose -f docker-compose-postgres.yml up -d
6 | # docker-compose -f docker-compose-postgres.yml down
7 |
8 | services:
9 | postgres:
10 | image: postgres:17
11 | container_name: basic-memory-postgres
12 | environment:
13 | # Local development/test credentials - NOT for production
14 | # These values are referenced by tests and justfile commands
15 | POSTGRES_DB: basic_memory
16 | POSTGRES_USER: basic_memory_user
17 | POSTGRES_PASSWORD: dev_password # Simple password for local testing only
18 | ports:
19 | - "5433:5432"
20 | volumes:
21 | - postgres_data:/var/lib/postgresql/data
22 | healthcheck:
23 | test: ["CMD-SHELL", "pg_isready -U basic_memory_user -d basic_memory"]
24 | interval: 10s
25 | timeout: 5s
26 | retries: 5
27 | restart: unless-stopped
28 |
29 | volumes:
30 | # Named volume for Postgres data
31 | postgres_data:
32 | driver: local
33 |
34 | # Named volume for persistent configuration
35 | # Database will be stored in Postgres, not in this volume
36 | basic-memory-config:
37 | driver: local
38 |
39 | # Network configuration (optional)
40 | # networks:
41 | # basic-memory-net:
42 | # driver: bridge
43 |
```
--------------------------------------------------------------------------------
/tests/cli/test_cli_tools.py:
--------------------------------------------------------------------------------
```python
1 | """Tests for the Basic Memory CLI tools.
2 |
3 | These tests verify CLI tool functionality. Some tests that previously used
4 | subprocess have been removed due to a pre-existing CLI architecture issue
5 | where ASGI transport doesn't trigger FastAPI lifespan initialization.
6 |
7 | The subprocess-based integration tests are kept in test_cli_integration.py
8 | for future use when the CLI initialization issue is fixed.
9 | """
10 |
11 | import pytest
12 |
13 |
14 | def test_ensure_migrations_functionality(app_config, monkeypatch):
15 | """Test the database initialization functionality."""
16 | import basic_memory.services.initialization as init_mod
17 |
18 | calls = {"count": 0}
19 |
20 | async def fake_initialize_database(*args, **kwargs):
21 | calls["count"] += 1
22 |
23 | monkeypatch.setattr(init_mod, "initialize_database", fake_initialize_database)
24 | init_mod.ensure_initialization(app_config)
25 | assert calls["count"] == 1
26 |
27 |
28 | def test_ensure_migrations_propagates_errors(app_config, monkeypatch):
29 | """Test that initialization errors propagate to caller."""
30 | import basic_memory.services.initialization as init_mod
31 |
32 | async def fake_initialize_database(*args, **kwargs):
33 | raise Exception("Test error")
34 |
35 | monkeypatch.setattr(init_mod, "initialize_database", fake_initialize_database)
36 |
37 | with pytest.raises(Exception, match="Test error"):
38 | init_mod.ensure_initialization(app_config)
39 |
```
--------------------------------------------------------------------------------
/tests/schemas/test_relation_response_reference_resolution.py:
--------------------------------------------------------------------------------
```python
1 | from basic_memory.schemas.response import RelationResponse
2 |
3 |
4 | def test_relation_response_resolves_from_to_from_dict_fallbacks():
5 | data = {
6 | "permalink": "rel/1",
7 | "relation_type": "relates_to",
8 | "context": "ctx",
9 | "to_name": None,
10 | "from_entity": {"permalink": None, "file_path": "From.md"},
11 | "to_entity": {"permalink": None, "file_path": "To.md", "title": "To Title"},
12 | }
13 |
14 | rel = RelationResponse.model_validate(data)
15 | assert rel.from_id == "From.md"
16 | assert rel.to_id == "To.md"
17 | assert rel.to_name == "To Title"
18 |
19 |
20 | def test_relation_response_resolves_from_to_from_orm_like_object_fallbacks():
21 | class EntityLike:
22 | def __init__(self, permalink, file_path, title=None):
23 | self.permalink = permalink
24 | self.file_path = file_path
25 | self.title = title
26 |
27 | class RelationLike:
28 | def __init__(self):
29 | self.permalink = "rel/2"
30 | self.relation_type = "relates_to"
31 | self.context = "ctx"
32 | self.to_name = None
33 | self.from_entity = EntityLike(permalink=None, file_path="From2.md")
34 | self.to_entity = EntityLike(permalink=None, file_path="To2.md", title="To2 Title")
35 |
36 | rel = RelationResponse.model_validate(RelationLike())
37 | assert rel.from_id == "From2.md"
38 | assert rel.to_id == "To2.md"
39 | assert rel.to_name == "To2 Title"
40 |
```
--------------------------------------------------------------------------------
/Dockerfile:
--------------------------------------------------------------------------------
```dockerfile
1 | FROM python:3.12-slim-bookworm
2 |
3 | # Build arguments for user ID and group ID (defaults to 1000)
4 | ARG UID=1000
5 | ARG GID=1000
6 |
7 | # Copy uv from official image
8 | COPY --from=ghcr.io/astral-sh/uv:latest /uv /uvx /bin/
9 |
10 | # Set environment variables
11 | ENV PYTHONUNBUFFERED=1 \
12 | PYTHONDONTWRITEBYTECODE=1
13 |
14 | # Create a group and user with the provided UID/GID
15 | # Check if the GID already exists, if not create appgroup
16 | RUN (getent group ${GID} || groupadd --gid ${GID} appgroup) && \
17 | useradd --uid ${UID} --gid ${GID} --create-home --shell /bin/bash appuser
18 |
19 | # Copy the project into the image
20 | ADD . /app
21 |
22 | # Sync the project into a new environment, asserting the lockfile is up to date
23 | WORKDIR /app
24 | RUN uv sync --locked
25 |
26 | # Create necessary directories and set ownership
27 | RUN mkdir -p /app/data/basic-memory /app/.basic-memory && \
28 | chown -R appuser:${GID} /app
29 |
30 | # Set default data directory and add venv to PATH
31 | ENV BASIC_MEMORY_HOME=/app/data/basic-memory \
32 | BASIC_MEMORY_PROJECT_ROOT=/app/data \
33 | PATH="/app/.venv/bin:$PATH"
34 |
35 | # Switch to the non-root user
36 | USER appuser
37 |
38 | # Expose port
39 | EXPOSE 8000
40 |
41 | # Health check
42 | HEALTHCHECK --interval=30s --timeout=10s --start-period=5s --retries=3 \
43 | CMD basic-memory --version || exit 1
44 |
45 | # Use the basic-memory entrypoint to run the MCP server with default SSE transport
46 | CMD ["basic-memory", "mcp", "--transport", "sse", "--host", "0.0.0.0", "--port", "8000"]
```
--------------------------------------------------------------------------------
/tests/api/conftest.py:
--------------------------------------------------------------------------------
```python
1 | """Tests for knowledge graph API routes."""
2 |
3 | from typing import AsyncGenerator
4 |
5 | import pytest
6 | import pytest_asyncio
7 | from fastapi import FastAPI
8 | from httpx import AsyncClient, ASGITransport
9 |
10 | from basic_memory.deps import get_project_config, get_engine_factory, get_app_config
11 | from basic_memory.models import Project
12 |
13 |
14 | @pytest_asyncio.fixture
15 | async def app(test_config, engine_factory, app_config) -> FastAPI:
16 | """Create FastAPI test application."""
17 | from basic_memory.api.app import app
18 |
19 | app.dependency_overrides[get_app_config] = lambda: app_config
20 | app.dependency_overrides[get_project_config] = lambda: test_config.project_config
21 | app.dependency_overrides[get_engine_factory] = lambda: engine_factory
22 | return app
23 |
24 |
25 | @pytest_asyncio.fixture
26 | async def client(app: FastAPI) -> AsyncGenerator[AsyncClient, None]:
27 | """Create client using ASGI transport - same as CLI will use."""
28 | async with AsyncClient(transport=ASGITransport(app=app), base_url="http://test") as client:
29 | yield client
30 |
31 |
32 | @pytest.fixture
33 | def project_url(test_project: Project) -> str:
34 | """Create a URL prefix for the project routes.
35 |
36 | This helps tests generate the correct URL for project-scoped routes.
37 | """
38 | # Make sure this matches what's in tests/conftest.py for test_project creation
39 | # The permalink should be generated from "Test Project Context"
40 | return f"/{test_project.permalink}"
41 |
```
--------------------------------------------------------------------------------
/.github/workflows/dev-release.yml:
--------------------------------------------------------------------------------
```yaml
1 | name: Dev Release
2 |
3 | on:
4 | push:
5 | branches: [main]
6 | workflow_dispatch: # Allow manual triggering
7 |
8 | jobs:
9 | dev-release:
10 | runs-on: ubuntu-latest
11 | permissions:
12 | id-token: write
13 | contents: write
14 |
15 | steps:
16 | - uses: actions/checkout@v4
17 | with:
18 | fetch-depth: 0
19 |
20 | - name: Set up Python
21 | uses: actions/setup-python@v5
22 | with:
23 | python-version: "3.12"
24 |
25 | - name: Install uv
26 | run: |
27 | pip install uv
28 |
29 | - name: Install dependencies and build
30 | run: |
31 | uv venv
32 | uv sync
33 | uv build
34 |
35 | - name: Check if this is a dev version
36 | id: check_version
37 | run: |
38 | VERSION=$(uv run python -c "import basic_memory; print(basic_memory.__version__)")
39 | echo "version=$VERSION" >> $GITHUB_OUTPUT
40 | if [[ "$VERSION" == *"dev"* ]]; then
41 | echo "is_dev=true" >> $GITHUB_OUTPUT
42 | echo "Dev version detected: $VERSION"
43 | else
44 | echo "is_dev=false" >> $GITHUB_OUTPUT
45 | echo "Release version detected: $VERSION, skipping dev release"
46 | fi
47 |
48 | - name: Publish dev version to PyPI
49 | if: steps.check_version.outputs.is_dev == 'true'
50 | uses: pypa/gh-action-pypi-publish@release/v1
51 | with:
52 | password: ${{ secrets.PYPI_TOKEN }}
53 | skip-existing: true # Don't fail if version already exists
```
--------------------------------------------------------------------------------
/tests/cli/test_cli_exit.py:
--------------------------------------------------------------------------------
```python
1 | """Regression tests for CLI command exit behavior.
2 |
3 | These tests verify that CLI commands exit cleanly without hanging,
4 | which was a bug fixed in the database initialization refactor.
5 | """
6 |
7 | import subprocess
8 | from pathlib import Path
9 |
10 |
11 | def test_bm_version_exits_cleanly():
12 | """Test that 'bm --version' exits cleanly within timeout."""
13 | # Use uv run to ensure correct environment
14 | result = subprocess.run(
15 | ["uv", "run", "bm", "--version"],
16 | capture_output=True,
17 | text=True,
18 | timeout=10,
19 | cwd=Path(__file__).parent.parent.parent, # Project root
20 | )
21 | assert result.returncode == 0
22 | assert "Basic Memory version:" in result.stdout
23 |
24 |
25 | def test_bm_help_exits_cleanly():
26 | """Test that 'bm --help' exits cleanly within timeout."""
27 | result = subprocess.run(
28 | ["uv", "run", "bm", "--help"],
29 | capture_output=True,
30 | text=True,
31 | timeout=10,
32 | cwd=Path(__file__).parent.parent.parent,
33 | )
34 | assert result.returncode == 0
35 | assert "Basic Memory" in result.stdout
36 |
37 |
38 | def test_bm_tool_help_exits_cleanly():
39 | """Test that 'bm tool --help' exits cleanly within timeout."""
40 | result = subprocess.run(
41 | ["uv", "run", "bm", "tool", "--help"],
42 | capture_output=True,
43 | text=True,
44 | timeout=10,
45 | cwd=Path(__file__).parent.parent.parent,
46 | )
47 | assert result.returncode == 0
48 | assert "tool" in result.stdout.lower()
49 |
```
--------------------------------------------------------------------------------
/src/basic_memory/alembic/versions/b3c3938bacdb_relation_to_name_unique_index.py:
--------------------------------------------------------------------------------
```python
1 | """relation to_name unique index
2 |
3 | Revision ID: b3c3938bacdb
4 | Revises: 3dae7c7b1564
5 | Create Date: 2025-02-22 14:59:30.668466
6 |
7 | """
8 |
9 | from typing import Sequence, Union
10 |
11 | from alembic import op
12 |
13 |
14 | # revision identifiers, used by Alembic.
15 | revision: str = "b3c3938bacdb"
16 | down_revision: Union[str, None] = "3dae7c7b1564"
17 | branch_labels: Union[str, Sequence[str], None] = None
18 | depends_on: Union[str, Sequence[str], None] = None
19 |
20 |
21 | def upgrade() -> None:
22 | # SQLite doesn't support constraint changes through ALTER
23 | # Need to recreate table with desired constraints
24 | with op.batch_alter_table("relation") as batch_op:
25 | # Drop existing unique constraint
26 | batch_op.drop_constraint("uix_relation", type_="unique")
27 |
28 | # Add new constraints
29 | batch_op.create_unique_constraint(
30 | "uix_relation_from_id_to_id", ["from_id", "to_id", "relation_type"]
31 | )
32 | batch_op.create_unique_constraint(
33 | "uix_relation_from_id_to_name", ["from_id", "to_name", "relation_type"]
34 | )
35 |
36 |
37 | def downgrade() -> None:
38 | with op.batch_alter_table("relation") as batch_op:
39 | # Drop new constraints
40 | batch_op.drop_constraint("uix_relation_from_id_to_name", type_="unique")
41 | batch_op.drop_constraint("uix_relation_from_id_to_id", type_="unique")
42 |
43 | # Restore original constraint
44 | batch_op.create_unique_constraint("uix_relation", ["from_id", "to_id", "relation_type"])
45 |
```
--------------------------------------------------------------------------------
/src/basic_memory/mcp/tools/__init__.py:
--------------------------------------------------------------------------------
```python
1 | """MCP tools for Basic Memory.
2 |
3 | This package provides the complete set of tools for interacting with
4 | Basic Memory through the MCP protocol. Importing this module registers
5 | all tools with the MCP server.
6 | """
7 |
8 | # Import tools to register them with MCP
9 | from basic_memory.mcp.tools.delete_note import delete_note
10 | from basic_memory.mcp.tools.read_content import read_content
11 | from basic_memory.mcp.tools.build_context import build_context
12 | from basic_memory.mcp.tools.recent_activity import recent_activity
13 | from basic_memory.mcp.tools.read_note import read_note
14 | from basic_memory.mcp.tools.view_note import view_note
15 | from basic_memory.mcp.tools.write_note import write_note
16 | from basic_memory.mcp.tools.search import search_notes
17 | from basic_memory.mcp.tools.canvas import canvas
18 | from basic_memory.mcp.tools.list_directory import list_directory
19 | from basic_memory.mcp.tools.edit_note import edit_note
20 | from basic_memory.mcp.tools.move_note import move_note
21 | from basic_memory.mcp.tools.project_management import (
22 | list_memory_projects,
23 | create_memory_project,
24 | delete_project,
25 | )
26 |
27 | # ChatGPT-compatible tools
28 | from basic_memory.mcp.tools.chatgpt_tools import search, fetch
29 |
30 | __all__ = [
31 | "build_context",
32 | "canvas",
33 | "create_memory_project",
34 | "delete_note",
35 | "delete_project",
36 | "edit_note",
37 | "fetch",
38 | "list_directory",
39 | "list_memory_projects",
40 | "move_note",
41 | "read_content",
42 | "read_note",
43 | "recent_activity",
44 | "search",
45 | "search_notes",
46 | "view_note",
47 | "write_note",
48 | ]
49 |
```
--------------------------------------------------------------------------------
/docs/testing-coverage.md:
--------------------------------------------------------------------------------
```markdown
1 | ## Coverage policy (practical 100%)
2 |
3 | Basic Memory’s test suite intentionally mixes:
4 | - unit tests (fast, deterministic)
5 | - integration tests (real filesystem + real DB via `test-int/`)
6 |
7 | To keep the default CI signal **stable and meaningful**, the default `pytest` coverage report targets **core library logic** and **excludes** a small set of modules that are either:
8 | - highly environment-dependent (OS/DB tuning)
9 | - inherently interactive (CLI)
10 | - background-task orchestration (watchers/sync runners)
11 | - external analytics
12 |
13 | ### What’s excluded (and why)
14 |
15 | Coverage excludes are configured in `pyproject.toml` under `[tool.coverage.report].omit`.
16 |
17 | Current exclusions include:
18 | - `src/basic_memory/cli/**`: interactive wrappers; behavior is validated via higher-level tests and smoke tests.
19 | - `src/basic_memory/db.py`: platform/backend tuning paths (SQLite/Postgres/Windows), covered by integration tests and targeted runs.
20 | - `src/basic_memory/services/initialization.py`: startup orchestration/background tasks; covered indirectly by app/MCP entrypoints.
21 | - `src/basic_memory/sync/sync_service.py`: heavy filesystem↔DB integration; validated in integration suite (not enforced in unit coverage).
22 | - `src/basic_memory/telemetry.py`: external analytics; exercised lightly but excluded from strict coverage gate.
23 |
24 | ### Recommended additional runs
25 |
26 | If you want extra confidence locally/CI:
27 | - **Postgres backend**: run tests with `BASIC_MEMORY_TEST_POSTGRES=1`.
28 | - **Strict backend-complete coverage**: run coverage on SQLite + Postgres and combine the results (recommended).
29 |
30 |
31 |
```
--------------------------------------------------------------------------------
/tests/api/test_relation_background_resolution.py:
--------------------------------------------------------------------------------
```python
1 | """Test that relation resolution happens in the background."""
2 |
3 | import pytest
4 |
5 | from basic_memory.api.routers.knowledge_router import resolve_relations_background
6 |
7 |
8 | @pytest.mark.asyncio
9 | async def test_resolve_relations_background_success():
10 | """Test that background relation resolution calls sync service correctly."""
11 |
12 | class StubSyncService:
13 | def __init__(self) -> None:
14 | self.calls: list[int] = []
15 |
16 | async def resolve_relations(self, *, entity_id: int) -> None:
17 | self.calls.append(entity_id)
18 |
19 | sync_service = StubSyncService()
20 |
21 | entity_id = 123
22 | entity_permalink = "test/entity"
23 |
24 | # Call the background function
25 | await resolve_relations_background(sync_service, entity_id, entity_permalink)
26 |
27 | # Verify sync service was called with the entity_id
28 | assert sync_service.calls == [entity_id]
29 |
30 |
31 | @pytest.mark.asyncio
32 | async def test_resolve_relations_background_handles_errors():
33 | """Test that background relation resolution handles errors gracefully."""
34 |
35 | class StubSyncService:
36 | def __init__(self) -> None:
37 | self.calls: list[int] = []
38 |
39 | async def resolve_relations(self, *, entity_id: int) -> None:
40 | self.calls.append(entity_id)
41 | raise Exception("Test error")
42 |
43 | sync_service = StubSyncService()
44 |
45 | entity_id = 123
46 | entity_permalink = "test/entity"
47 |
48 | # Call should not raise - errors are logged
49 | await resolve_relations_background(sync_service, entity_id, entity_permalink)
50 |
51 | # Verify sync service was called
52 | assert sync_service.calls == [entity_id]
53 |
```
--------------------------------------------------------------------------------
/src/basic_memory/alembic/versions/9d9c1cb7d8f5_add_mtime_and_size_columns_to_entity_.py:
--------------------------------------------------------------------------------
```python
1 | """Add mtime and size columns to Entity for sync optimization
2 |
3 | Revision ID: 9d9c1cb7d8f5
4 | Revises: a1b2c3d4e5f6
5 | Create Date: 2025-10-20 05:07:55.173849
6 |
7 | """
8 |
9 | from typing import Sequence, Union
10 |
11 | from alembic import op
12 | import sqlalchemy as sa
13 |
14 |
15 | # revision identifiers, used by Alembic.
16 | revision: str = "9d9c1cb7d8f5"
17 | down_revision: Union[str, None] = "a1b2c3d4e5f6"
18 | branch_labels: Union[str, Sequence[str], None] = None
19 | depends_on: Union[str, Sequence[str], None] = None
20 |
21 |
22 | def upgrade() -> None:
23 | # ### commands auto generated by Alembic - please adjust! ###
24 | with op.batch_alter_table("entity", schema=None) as batch_op:
25 | batch_op.add_column(sa.Column("mtime", sa.Float(), nullable=True))
26 | batch_op.add_column(sa.Column("size", sa.Integer(), nullable=True))
27 | batch_op.drop_constraint(batch_op.f("fk_entity_project_id"), type_="foreignkey")
28 | batch_op.create_foreign_key(
29 | batch_op.f("fk_entity_project_id"), "project", ["project_id"], ["id"]
30 | )
31 |
32 | # ### end Alembic commands ###
33 |
34 |
35 | def downgrade() -> None:
36 | # ### commands auto generated by Alembic - please adjust! ###
37 | with op.batch_alter_table("entity", schema=None) as batch_op:
38 | batch_op.drop_constraint(batch_op.f("fk_entity_project_id"), type_="foreignkey")
39 | batch_op.create_foreign_key(
40 | batch_op.f("fk_entity_project_id"),
41 | "project",
42 | ["project_id"],
43 | ["id"],
44 | ondelete="CASCADE",
45 | )
46 | batch_op.drop_column("size")
47 | batch_op.drop_column("mtime")
48 |
49 | # ### end Alembic commands ###
50 |
```
--------------------------------------------------------------------------------
/src/basic_memory/schemas/v2/resource.py:
--------------------------------------------------------------------------------
```python
1 | """V2 resource schemas for file content operations."""
2 |
3 | from pydantic import BaseModel, Field
4 |
5 |
6 | class CreateResourceRequest(BaseModel):
7 | """Request to create a new resource file.
8 |
9 | File path is required for new resources since we need to know where
10 | to create the file.
11 | """
12 |
13 | file_path: str = Field(
14 | ...,
15 | description="Path to create the file, relative to project root",
16 | min_length=1,
17 | max_length=500,
18 | )
19 | content: str = Field(..., description="File content to write")
20 |
21 |
22 | class UpdateResourceRequest(BaseModel):
23 | """Request to update an existing resource by entity ID.
24 |
25 | Only content is required - the file path is already known from the entity.
26 | Optionally can update the file_path to move the file.
27 | """
28 |
29 | content: str = Field(..., description="File content to write")
30 | file_path: str | None = Field(
31 | None,
32 | description="Optional new file path to move the resource",
33 | min_length=1,
34 | max_length=500,
35 | )
36 |
37 |
38 | class ResourceResponse(BaseModel):
39 | """Response from resource operations."""
40 |
41 | entity_id: int = Field(..., description="Internal entity ID of the resource")
42 | external_id: str = Field(..., description="External UUID of the resource for API references")
43 | file_path: str = Field(..., description="File path of the resource")
44 | checksum: str = Field(..., description="File content checksum")
45 | size: int = Field(..., description="File size in bytes")
46 | created_at: float = Field(..., description="Creation timestamp")
47 | modified_at: float = Field(..., description="Modification timestamp")
48 |
```
--------------------------------------------------------------------------------
/.github/workflows/docker.yml:
--------------------------------------------------------------------------------
```yaml
1 | name: Docker Image CI
2 |
3 | on:
4 | push:
5 | tags:
6 | - 'v*' # Trigger on version tags like v1.0.0, v0.13.0, etc.
7 | workflow_dispatch: # Allow manual triggering for testing
8 |
9 | env:
10 | REGISTRY: ghcr.io
11 | IMAGE_NAME: basicmachines-co/basic-memory
12 |
13 | jobs:
14 | docker:
15 | runs-on: ubuntu-latest
16 | permissions:
17 | contents: read
18 | packages: write
19 |
20 | steps:
21 | - name: Checkout repository
22 | uses: actions/checkout@v4
23 | with:
24 | fetch-depth: 0
25 |
26 | - name: Set up Docker Buildx
27 | uses: docker/setup-buildx-action@v3
28 | with:
29 | platforms: linux/amd64,linux/arm64
30 |
31 | - name: Log in to GitHub Container Registry
32 | uses: docker/login-action@v3
33 | with:
34 | registry: ${{ env.REGISTRY }}
35 | username: ${{ github.actor }}
36 | password: ${{ secrets.GITHUB_TOKEN }}
37 |
38 | - name: Extract metadata
39 | id: meta
40 | uses: docker/metadata-action@v5
41 | with:
42 | images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}
43 | tags: |
44 | type=ref,event=branch
45 | type=ref,event=pr
46 | type=semver,pattern={{version}}
47 | type=semver,pattern={{major}}.{{minor}}
48 | type=raw,value=latest,enable={{is_default_branch}}
49 |
50 | - name: Build and push Docker image
51 | uses: docker/build-push-action@v5
52 | with:
53 | context: .
54 | file: ./Dockerfile
55 | platforms: linux/amd64,linux/arm64
56 | push: true
57 | tags: ${{ steps.meta.outputs.tags }}
58 | labels: ${{ steps.meta.outputs.labels }}
59 | cache-from: type=gha
60 | cache-to: type=gha,mode=max
61 |
62 |
```
--------------------------------------------------------------------------------
/src/basic_memory/alembic/versions/a2b3c4d5e6f7_add_search_index_entity_cascade.py:
--------------------------------------------------------------------------------
```python
1 | """Add cascade delete FK from search_index to entity
2 |
3 | Revision ID: a2b3c4d5e6f7
4 | Revises: f8a9b2c3d4e5
5 | Create Date: 2025-12-02 07:00:00.000000
6 |
7 | """
8 |
9 | from typing import Sequence, Union
10 |
11 | from alembic import op
12 |
13 |
14 | # revision identifiers, used by Alembic.
15 | revision: str = "a2b3c4d5e6f7"
16 | down_revision: Union[str, None] = "f8a9b2c3d4e5"
17 | branch_labels: Union[str, Sequence[str], None] = None
18 | depends_on: Union[str, Sequence[str], None] = None
19 |
20 |
21 | def upgrade() -> None:
22 | """Add FK with CASCADE delete from search_index.entity_id to entity.id.
23 |
24 | This migration is Postgres-only because:
25 | - SQLite uses FTS5 virtual tables which don't support foreign keys
26 | - The FK enables automatic cleanup of search_index entries when entities are deleted
27 | """
28 | connection = op.get_bind()
29 | dialect = connection.dialect.name
30 |
31 | if dialect == "postgresql":
32 | # First, clean up any orphaned search_index entries where entity no longer exists
33 | op.execute("""
34 | DELETE FROM search_index
35 | WHERE entity_id IS NOT NULL
36 | AND entity_id NOT IN (SELECT id FROM entity)
37 | """)
38 |
39 | # Add FK with CASCADE - nullable FK allows search_index entries without entity_id
40 | op.create_foreign_key(
41 | "fk_search_index_entity_id",
42 | "search_index",
43 | "entity",
44 | ["entity_id"],
45 | ["id"],
46 | ondelete="CASCADE",
47 | )
48 |
49 |
50 | def downgrade() -> None:
51 | """Remove the FK constraint."""
52 | connection = op.get_bind()
53 | dialect = connection.dialect.name
54 |
55 | if dialect == "postgresql":
56 | op.drop_constraint("fk_search_index_entity_id", "search_index", type_="foreignkey")
57 |
```
--------------------------------------------------------------------------------
/tests/sync/test_watch_service_atomic_adds.py:
--------------------------------------------------------------------------------
```python
1 | import pytest
2 | from watchfiles.main import Change
3 |
4 | from basic_memory.sync.watch_service import WatchService
5 |
6 |
7 | @pytest.mark.asyncio
8 | async def test_handle_changes_reclassifies_added_existing_files_as_modified(
9 | app_config,
10 | project_repository,
11 | sync_service,
12 | test_project,
13 | project_config,
14 | ):
15 | """Regression: don't mutate `adds` while iterating.
16 |
17 | Some editors perform atomic writes that can show up as "added" events for files
18 | that already exist and have entities in the DB. We should process these as
19 | modifications for *all* affected files (not skip half the batch).
20 | """
21 |
22 | async def sync_service_factory(_project):
23 | return sync_service
24 |
25 | watch_service = WatchService(
26 | app_config=app_config,
27 | project_repository=project_repository,
28 | quiet=True,
29 | sync_service_factory=sync_service_factory,
30 | )
31 |
32 | # Create two files and sync them so they exist in the DB.
33 | file_a = project_config.home / "atomic-a.md"
34 | file_b = project_config.home / "atomic-b.md"
35 | file_a.write_text("# A\n\n- links_to [[B]]\n", encoding="utf-8")
36 | file_b.write_text("# B\n", encoding="utf-8")
37 |
38 | await sync_service.sync(project_config.home, project_name=test_project.name)
39 |
40 | # Simulate a watcher batch where both existing files show up as "added".
41 | changes = {
42 | (Change.added, str(file_a)),
43 | (Change.added, str(file_b)),
44 | }
45 |
46 | await watch_service.handle_changes(test_project, changes)
47 |
48 | # Both should have been processed as "modified" (reclassified), not "new".
49 | actions = [e.action for e in watch_service.state.recent_events]
50 | assert "new" not in actions
51 | assert actions.count("modified") >= 2
52 |
```
--------------------------------------------------------------------------------
/src/basic_memory/deps/db.py:
--------------------------------------------------------------------------------
```python
1 | """Database dependency injection for basic-memory.
2 |
3 | This module provides database-related dependencies:
4 | - Engine and session maker factories
5 | - Session dependencies for request handling
6 | """
7 |
8 | from typing import Annotated
9 |
10 | from fastapi import Depends, Request
11 | from loguru import logger
12 | from sqlalchemy.ext.asyncio import (
13 | AsyncEngine,
14 | AsyncSession,
15 | async_sessionmaker,
16 | )
17 |
18 | from basic_memory import db
19 | from basic_memory.deps.config import get_app_config
20 |
21 |
22 | async def get_engine_factory(
23 | request: Request,
24 | ) -> tuple[AsyncEngine, async_sessionmaker[AsyncSession]]: # pragma: no cover
25 | """Get cached engine and session maker from app state.
26 |
27 | For API requests, returns cached connections from app.state for optimal performance.
28 | For non-API contexts (CLI), falls back to direct database connection.
29 | """
30 | # Try to get cached connections from app state (API context)
31 | if (
32 | hasattr(request, "app")
33 | and hasattr(request.app.state, "engine")
34 | and hasattr(request.app.state, "session_maker")
35 | ):
36 | return request.app.state.engine, request.app.state.session_maker
37 |
38 | # Fallback for non-API contexts (CLI)
39 | logger.debug("Using fallback database connection for non-API context")
40 | app_config = get_app_config()
41 | engine, session_maker = await db.get_or_create_db(app_config.database_path)
42 | return engine, session_maker
43 |
44 |
45 | EngineFactoryDep = Annotated[
46 | tuple[AsyncEngine, async_sessionmaker[AsyncSession]], Depends(get_engine_factory)
47 | ]
48 |
49 |
50 | async def get_session_maker(engine_factory: EngineFactoryDep) -> async_sessionmaker[AsyncSession]:
51 | """Get session maker."""
52 | _, session_maker = engine_factory
53 | return session_maker
54 |
55 |
56 | SessionMakerDep = Annotated[async_sessionmaker, Depends(get_session_maker)]
57 |
```
--------------------------------------------------------------------------------
/src/basic_memory/mcp/prompts/search.py:
--------------------------------------------------------------------------------
```python
1 | """Search prompts for Basic Memory MCP server.
2 |
3 | These prompts help users search and explore their knowledge base.
4 | """
5 |
6 | from typing import Annotated, Optional
7 |
8 | from loguru import logger
9 | from pydantic import Field
10 |
11 | from basic_memory.config import get_project_config
12 | from basic_memory.mcp.async_client import get_client
13 | from basic_memory.mcp.server import mcp
14 | from basic_memory.mcp.tools.utils import call_post
15 | from basic_memory.schemas.base import TimeFrame
16 | from basic_memory.schemas.prompt import SearchPromptRequest
17 |
18 |
19 | @mcp.prompt(
20 | name="search_knowledge_base",
21 | description="Search across all content in basic-memory",
22 | )
23 | async def search_prompt(
24 | query: str,
25 | timeframe: Annotated[
26 | Optional[TimeFrame],
27 | Field(description="How far back to search (e.g. '1d', '1 week')"),
28 | ] = None,
29 | ) -> str:
30 | """Search across all content in basic-memory.
31 |
32 | This prompt helps search for content in the knowledge base and
33 | provides helpful context about the results.
34 |
35 | Args:
36 | query: The search text to look for
37 | timeframe: Optional timeframe to limit results (e.g. '1d', '1 week')
38 |
39 | Returns:
40 | Formatted search results with context
41 | """
42 | logger.info(f"Searching knowledge base, query: {query}, timeframe: {timeframe}")
43 |
44 | async with get_client() as client:
45 | # Create request model
46 | request = SearchPromptRequest(query=query, timeframe=timeframe)
47 |
48 | project_url = get_project_config().project_url
49 |
50 | # Call the prompt API endpoint
51 | response = await call_post(
52 | client, f"{project_url}/prompt/search", json=request.model_dump(exclude_none=True)
53 | )
54 |
55 | # Extract the rendered prompt from the response
56 | result = response.json()
57 | return result["prompt"]
58 |
```
--------------------------------------------------------------------------------
/src/basic_memory/schemas/__init__.py:
--------------------------------------------------------------------------------
```python
1 | """Knowledge graph schema exports.
2 |
3 | This module exports all schema classes to simplify imports.
4 | Rather than importing from individual schema files, you can
5 | import everything from basic_memory.schemas.
6 | """
7 |
8 | # Base types and models
9 | from basic_memory.schemas.base import (
10 | Observation,
11 | EntityType,
12 | RelationType,
13 | Relation,
14 | Entity,
15 | )
16 |
17 | # Delete operation models
18 | from basic_memory.schemas.delete import (
19 | DeleteEntitiesRequest,
20 | )
21 |
22 | # Request models
23 | from basic_memory.schemas.request import (
24 | SearchNodesRequest,
25 | GetEntitiesRequest,
26 | CreateRelationsRequest,
27 | )
28 |
29 | # Response models
30 | from basic_memory.schemas.response import (
31 | SQLAlchemyModel,
32 | ObservationResponse,
33 | RelationResponse,
34 | EntityResponse,
35 | EntityListResponse,
36 | SearchNodesResponse,
37 | DeleteEntitiesResponse,
38 | )
39 |
40 | from basic_memory.schemas.project_info import (
41 | ProjectStatistics,
42 | ActivityMetrics,
43 | SystemStatus,
44 | ProjectInfoResponse,
45 | )
46 |
47 | from basic_memory.schemas.directory import (
48 | DirectoryNode,
49 | )
50 |
51 | from basic_memory.schemas.sync_report import (
52 | SyncReportResponse,
53 | )
54 |
55 | # For convenient imports, export all models
56 | __all__ = [
57 | # Base
58 | "Observation",
59 | "EntityType",
60 | "RelationType",
61 | "Relation",
62 | "Entity",
63 | # Requests
64 | "SearchNodesRequest",
65 | "GetEntitiesRequest",
66 | "CreateRelationsRequest",
67 | # Responses
68 | "SQLAlchemyModel",
69 | "ObservationResponse",
70 | "RelationResponse",
71 | "EntityResponse",
72 | "EntityListResponse",
73 | "SearchNodesResponse",
74 | "DeleteEntitiesResponse",
75 | # Delete Operations
76 | "DeleteEntitiesRequest",
77 | # Project Info
78 | "ProjectStatistics",
79 | "ActivityMetrics",
80 | "SystemStatus",
81 | "ProjectInfoResponse",
82 | # Directory
83 | "DirectoryNode",
84 | # Sync
85 | "SyncReportResponse",
86 | ]
87 |
```
--------------------------------------------------------------------------------
/src/basic_memory/runtime.py:
--------------------------------------------------------------------------------
```python
1 | """Runtime mode resolution for Basic Memory.
2 |
3 | This module centralizes runtime mode detection, ensuring cloud/local/test
4 | determination happens in one place rather than scattered across modules.
5 |
6 | Composition roots (containers) read ConfigManager and use this module
7 | to resolve the runtime mode, then pass the result downstream.
8 | """
9 |
10 | from enum import Enum, auto
11 |
12 |
13 | class RuntimeMode(Enum):
14 | """Runtime modes for Basic Memory."""
15 |
16 | LOCAL = auto() # Local standalone mode (default)
17 | CLOUD = auto() # Cloud mode with remote sync
18 | TEST = auto() # Test environment
19 |
20 | @property
21 | def is_cloud(self) -> bool:
22 | return self == RuntimeMode.CLOUD
23 |
24 | @property
25 | def is_local(self) -> bool:
26 | return self == RuntimeMode.LOCAL
27 |
28 | @property
29 | def is_test(self) -> bool:
30 | return self == RuntimeMode.TEST
31 |
32 |
33 | def resolve_runtime_mode(
34 | cloud_mode_enabled: bool,
35 | is_test_env: bool,
36 | ) -> RuntimeMode:
37 | """Resolve the runtime mode from configuration flags.
38 |
39 | This is the single source of truth for mode resolution.
40 | Composition roots call this with config values they've read.
41 |
42 | Args:
43 | cloud_mode_enabled: Whether cloud mode is enabled in config
44 | is_test_env: Whether running in test environment
45 |
46 | Returns:
47 | The resolved RuntimeMode
48 | """
49 | # Trigger: test environment is detected
50 | # Why: tests need special handling (no file sync, isolated DB)
51 | # Outcome: returns TEST mode, skipping cloud mode check
52 | if is_test_env:
53 | return RuntimeMode.TEST
54 |
55 | # Trigger: cloud mode is enabled in config
56 | # Why: cloud mode changes auth, sync, and API behavior
57 | # Outcome: returns CLOUD mode for remote-first behavior
58 | if cloud_mode_enabled:
59 | return RuntimeMode.CLOUD
60 |
61 | return RuntimeMode.LOCAL
62 |
```
--------------------------------------------------------------------------------
/tests/mcp/test_tool_project_management.py:
--------------------------------------------------------------------------------
```python
1 | """Tests for MCP project management tools."""
2 |
3 | import pytest
4 | from sqlalchemy import select
5 |
6 | from basic_memory import db
7 | from basic_memory.mcp.tools import list_memory_projects, create_memory_project, delete_project
8 | from basic_memory.models.project import Project
9 |
10 |
11 | @pytest.mark.asyncio
12 | async def test_list_memory_projects_unconstrained(app, test_project):
13 | result = await list_memory_projects.fn()
14 | assert "Available projects:" in result
15 | assert f"• {test_project.name}" in result
16 |
17 |
18 | @pytest.mark.asyncio
19 | async def test_list_memory_projects_constrained_env(monkeypatch, app, test_project):
20 | monkeypatch.setenv("BASIC_MEMORY_MCP_PROJECT", test_project.name)
21 | result = await list_memory_projects.fn()
22 | assert f"Project: {test_project.name}" in result
23 | assert "constrained to a single project" in result
24 |
25 |
26 | @pytest.mark.asyncio
27 | async def test_create_and_delete_project_and_name_match_branch(
28 | app, tmp_path_factory, session_maker
29 | ):
30 | # Create a project through the tool (exercises POST + response formatting).
31 | project_root = tmp_path_factory.mktemp("extra-project-home")
32 | result = await create_memory_project.fn(
33 | project_name="My Project",
34 | project_path=str(project_root),
35 | set_default=False,
36 | )
37 | assert result.startswith("✓")
38 | assert "My Project" in result
39 |
40 | # Make permalink intentionally not derived from name so delete_project hits the name-match branch.
41 | async with db.scoped_session(session_maker) as session:
42 | project = (
43 | await session.execute(select(Project).where(Project.name == "My Project"))
44 | ).scalar_one()
45 | project.permalink = "custom-permalink"
46 | await session.commit()
47 |
48 | delete_result = await delete_project.fn("My Project")
49 | assert delete_result.startswith("✓")
50 |
```
--------------------------------------------------------------------------------
/tests/api/test_project_router_operations.py:
--------------------------------------------------------------------------------
```python
1 | """Tests for project router operation endpoints."""
2 |
3 | import pytest
4 |
5 |
6 | @pytest.mark.asyncio
7 | async def test_get_project_info_additional(client, test_graph, project_url):
8 | """Test additional fields in the project info endpoint."""
9 | # Call the endpoint
10 | response = await client.get(f"{project_url}/project/info")
11 |
12 | # Verify response
13 | assert response.status_code == 200
14 | data = response.json()
15 |
16 | # Check specific fields we're interested in
17 | assert "available_projects" in data
18 | assert isinstance(data["available_projects"], dict)
19 |
20 | # Get a project from the list
21 | for project_name, project_info in data["available_projects"].items():
22 | # Verify project structure
23 | assert "path" in project_info
24 | assert "active" in project_info
25 | assert "is_default" in project_info
26 | break # Just check the first one for structure
27 |
28 |
29 | @pytest.mark.asyncio
30 | async def test_project_list_additional(client, project_url):
31 | """Test additional fields in the project list endpoint."""
32 | # Call the endpoint
33 | response = await client.get("/projects/projects")
34 |
35 | # Verify response
36 | assert response.status_code == 200
37 | data = response.json()
38 |
39 | # Verify projects list structure in more detail
40 | assert "projects" in data
41 | assert len(data["projects"]) > 0
42 |
43 | # Verify the default project is identified
44 | default_project = data["default_project"]
45 | assert default_project
46 |
47 | # Verify the default_project appears in the projects list and is marked as default
48 | default_in_list = False
49 | for project in data["projects"]:
50 | if project["name"] == default_project:
51 | assert project["is_default"] is True
52 | default_in_list = True
53 | break
54 |
55 | assert default_in_list, "Default project should appear in the projects list"
56 |
```
--------------------------------------------------------------------------------
/src/basic_memory/alembic/versions/a1b2c3d4e5f6_fix_project_foreign_keys.py:
--------------------------------------------------------------------------------
```python
1 | """fix project foreign keys
2 |
3 | Revision ID: a1b2c3d4e5f6
4 | Revises: 647e7a75e2cd
5 | Create Date: 2025-08-19 22:06:00.000000
6 |
7 | """
8 |
9 | from typing import Sequence, Union
10 |
11 | from alembic import op
12 |
13 |
14 | # revision identifiers, used by Alembic.
15 | revision: str = "a1b2c3d4e5f6"
16 | down_revision: Union[str, None] = "647e7a75e2cd"
17 | branch_labels: Union[str, Sequence[str], None] = None
18 | depends_on: Union[str, Sequence[str], None] = None
19 |
20 |
21 | def upgrade() -> None:
22 | """Re-establish foreign key constraints that were lost during project table recreation.
23 |
24 | The migration 647e7a75e2cd recreated the project table but did not re-establish
25 | the foreign key constraint from entity.project_id to project.id, causing
26 | foreign key constraint failures when trying to delete projects with related entities.
27 | """
28 | # SQLite doesn't allow adding foreign key constraints to existing tables easily
29 | # We need to be careful and handle the case where the constraint might already exist
30 |
31 | with op.batch_alter_table("entity", schema=None) as batch_op:
32 | # Try to drop existing foreign key constraint (may not exist)
33 | try:
34 | batch_op.drop_constraint("fk_entity_project_id", type_="foreignkey")
35 | except Exception:
36 | # Constraint may not exist, which is fine - we'll create it next
37 | pass
38 |
39 | # Add the foreign key constraint with CASCADE DELETE
40 | # This ensures that when a project is deleted, all related entities are also deleted
41 | batch_op.create_foreign_key(
42 | "fk_entity_project_id", "project", ["project_id"], ["id"], ondelete="CASCADE"
43 | )
44 |
45 |
46 | def downgrade() -> None:
47 | """Remove the foreign key constraint."""
48 | with op.batch_alter_table("entity", schema=None) as batch_op:
49 | batch_op.drop_constraint("fk_entity_project_id", type_="foreignkey")
50 |
```
--------------------------------------------------------------------------------
/src/basic_memory/alembic/versions/502b60eaa905_remove_required_from_entity_permalink.py:
--------------------------------------------------------------------------------
```python
1 | """remove required from entity.permalink
2 |
3 | Revision ID: 502b60eaa905
4 | Revises: b3c3938bacdb
5 | Create Date: 2025-02-24 13:33:09.790951
6 |
7 | """
8 |
9 | from typing import Sequence, Union
10 |
11 | from alembic import op
12 | import sqlalchemy as sa
13 |
14 |
15 | # revision identifiers, used by Alembic.
16 | revision: str = "502b60eaa905"
17 | down_revision: Union[str, None] = "b3c3938bacdb"
18 | branch_labels: Union[str, Sequence[str], None] = None
19 | depends_on: Union[str, Sequence[str], None] = None
20 |
21 |
22 | def upgrade() -> None:
23 | # ### commands auto generated by Alembic - please adjust! ###
24 | with op.batch_alter_table("entity", schema=None) as batch_op:
25 | batch_op.alter_column("permalink", existing_type=sa.VARCHAR(), nullable=True)
26 | batch_op.drop_index("ix_entity_permalink")
27 | batch_op.create_index(batch_op.f("ix_entity_permalink"), ["permalink"], unique=False)
28 | batch_op.drop_constraint("uix_entity_permalink", type_="unique")
29 | batch_op.create_index(
30 | "uix_entity_permalink",
31 | ["permalink"],
32 | unique=True,
33 | sqlite_where=sa.text("content_type = 'text/markdown' AND permalink IS NOT NULL"),
34 | )
35 |
36 | # ### end Alembic commands ###
37 |
38 |
39 | def downgrade() -> None:
40 | # ### commands auto generated by Alembic - please adjust! ###
41 | with op.batch_alter_table("entity", schema=None) as batch_op:
42 | batch_op.drop_index(
43 | "uix_entity_permalink",
44 | sqlite_where=sa.text("content_type = 'text/markdown' AND permalink IS NOT NULL"),
45 | )
46 | batch_op.create_unique_constraint("uix_entity_permalink", ["permalink"])
47 | batch_op.drop_index(batch_op.f("ix_entity_permalink"))
48 | batch_op.create_index("ix_entity_permalink", ["permalink"], unique=1)
49 | batch_op.alter_column("permalink", existing_type=sa.VARCHAR(), nullable=False)
50 |
51 | # ### end Alembic commands ###
52 |
```
--------------------------------------------------------------------------------
/src/basic_memory/schemas/cloud.py:
--------------------------------------------------------------------------------
```python
1 | """Schemas for cloud-related API responses."""
2 |
3 | from pydantic import BaseModel, Field
4 |
5 |
6 | class TenantMountInfo(BaseModel):
7 | """Response from /tenant/mount/info endpoint."""
8 |
9 | tenant_id: str = Field(..., description="Unique identifier for the tenant")
10 | bucket_name: str = Field(..., description="S3 bucket name for the tenant")
11 |
12 |
13 | class MountCredentials(BaseModel):
14 | """Response from /tenant/mount/credentials endpoint."""
15 |
16 | access_key: str = Field(..., description="S3 access key for mount")
17 | secret_key: str = Field(..., description="S3 secret key for mount")
18 |
19 |
20 | class CloudProject(BaseModel):
21 | """Representation of a cloud project."""
22 |
23 | name: str = Field(..., description="Project name")
24 | path: str = Field(..., description="Project path on cloud")
25 |
26 |
27 | class CloudProjectList(BaseModel):
28 | """Response from /proxy/projects/projects endpoint."""
29 |
30 | projects: list[CloudProject] = Field(default_factory=list, description="List of cloud projects")
31 |
32 |
33 | class CloudProjectCreateRequest(BaseModel):
34 | """Request to create a new cloud project."""
35 |
36 | name: str = Field(..., description="Project name")
37 | path: str = Field(..., description="Project path (permalink)")
38 | set_default: bool = Field(default=False, description="Set as default project")
39 |
40 |
41 | class CloudProjectCreateResponse(BaseModel):
42 | """Response from creating a cloud project."""
43 |
44 | message: str = Field(..., description="Status message about the project creation")
45 | status: str = Field(..., description="Status of the creation (success or error)")
46 | default: bool = Field(..., description="True if the project was set as the default")
47 | old_project: dict | None = Field(None, description="Information about the previous project")
48 | new_project: dict | None = Field(
49 | None, description="Information about the newly created project"
50 | )
51 |
```
--------------------------------------------------------------------------------
/tests/mcp/conftest.py:
--------------------------------------------------------------------------------
```python
1 | """Tests for the MCP server implementation using FastAPI TestClient."""
2 |
3 | from typing import AsyncGenerator
4 |
5 | import pytest
6 | import pytest_asyncio
7 | from fastapi import FastAPI
8 | from httpx import AsyncClient, ASGITransport
9 | from mcp.server import FastMCP
10 |
11 | from basic_memory.api.app import app as fastapi_app
12 | from basic_memory.deps import get_project_config, get_engine_factory, get_app_config
13 | from basic_memory.services.search_service import SearchService
14 | from basic_memory.mcp.server import mcp as mcp_server
15 |
16 |
17 | @pytest.fixture(scope="function")
18 | def mcp() -> FastMCP:
19 | return mcp_server # pyright: ignore [reportReturnType]
20 |
21 |
22 | @pytest.fixture(scope="function")
23 | def app(app_config, project_config, engine_factory, config_manager) -> FastAPI:
24 | """Create test FastAPI application."""
25 | app = fastapi_app
26 | app.dependency_overrides[get_app_config] = lambda: app_config
27 | app.dependency_overrides[get_project_config] = lambda: project_config
28 | app.dependency_overrides[get_engine_factory] = lambda: engine_factory
29 | return app
30 |
31 |
32 | @pytest_asyncio.fixture(scope="function")
33 | async def client(app: FastAPI) -> AsyncGenerator[AsyncClient, None]:
34 | """Create test client that both MCP and tests will use."""
35 | async with AsyncClient(transport=ASGITransport(app=app), base_url="http://test") as client:
36 | yield client
37 |
38 |
39 | @pytest.fixture
40 | def test_entity_data():
41 | """Sample data for creating a test entity."""
42 | return {
43 | "entities": [
44 | {
45 | "title": "Test Entity",
46 | "entity_type": "test",
47 | "summary": "", # Empty string instead of None
48 | }
49 | ]
50 | }
51 |
52 |
53 | @pytest_asyncio.fixture
54 | async def init_search_index(search_service: SearchService):
55 | """Initialize search index. Request this fixture explicitly in tests that need it."""
56 | await search_service.init_search_index()
57 |
```
--------------------------------------------------------------------------------
/tests/test_runtime.py:
--------------------------------------------------------------------------------
```python
1 | """Tests for runtime mode resolution."""
2 |
3 | from basic_memory.runtime import RuntimeMode, resolve_runtime_mode
4 |
5 |
6 | class TestRuntimeMode:
7 | """Tests for RuntimeMode enum."""
8 |
9 | def test_local_mode_properties(self):
10 | mode = RuntimeMode.LOCAL
11 | assert mode.is_local is True
12 | assert mode.is_cloud is False
13 | assert mode.is_test is False
14 |
15 | def test_cloud_mode_properties(self):
16 | mode = RuntimeMode.CLOUD
17 | assert mode.is_local is False
18 | assert mode.is_cloud is True
19 | assert mode.is_test is False
20 |
21 | def test_test_mode_properties(self):
22 | mode = RuntimeMode.TEST
23 | assert mode.is_local is False
24 | assert mode.is_cloud is False
25 | assert mode.is_test is True
26 |
27 |
28 | class TestResolveRuntimeMode:
29 | """Tests for resolve_runtime_mode function."""
30 |
31 | def test_resolves_to_test_when_test_env(self):
32 | """Test environment takes precedence over cloud mode."""
33 | mode = resolve_runtime_mode(cloud_mode_enabled=True, is_test_env=True)
34 | assert mode == RuntimeMode.TEST
35 |
36 | def test_resolves_to_cloud_when_enabled(self):
37 | """Cloud mode is used when enabled and not in test env."""
38 | mode = resolve_runtime_mode(cloud_mode_enabled=True, is_test_env=False)
39 | assert mode == RuntimeMode.CLOUD
40 |
41 | def test_resolves_to_local_by_default(self):
42 | """Local mode is the default when no other modes apply."""
43 | mode = resolve_runtime_mode(cloud_mode_enabled=False, is_test_env=False)
44 | assert mode == RuntimeMode.LOCAL
45 |
46 | def test_test_env_overrides_cloud_mode(self):
47 | """Test environment should override cloud mode."""
48 | # When both are enabled, test takes precedence
49 | mode = resolve_runtime_mode(cloud_mode_enabled=True, is_test_env=True)
50 | assert mode == RuntimeMode.TEST
51 | assert mode.is_test is True
52 | assert mode.is_cloud is False
53 |
```
--------------------------------------------------------------------------------
/src/basic_memory/markdown/schemas.py:
--------------------------------------------------------------------------------
```python
1 | """Schema models for entity markdown files."""
2 |
3 | from datetime import datetime
4 | from typing import List, Optional
5 |
6 | from pydantic import BaseModel
7 |
8 |
9 | class Observation(BaseModel):
10 | """An observation about an entity."""
11 |
12 | category: Optional[str] = "Note"
13 | content: str
14 | tags: Optional[List[str]] = None
15 | context: Optional[str] = None
16 |
17 | def __str__(self) -> str:
18 | obs_string = f"- [{self.category}] {self.content}"
19 | if self.context:
20 | obs_string += f" ({self.context})"
21 | return obs_string
22 |
23 |
24 | class Relation(BaseModel):
25 | """A relation between entities."""
26 |
27 | type: str
28 | target: str
29 | context: Optional[str] = None
30 |
31 | def __str__(self) -> str:
32 | rel_string = f"- {self.type} [[{self.target}]]"
33 | if self.context:
34 | rel_string += f" ({self.context})"
35 | return rel_string
36 |
37 |
38 | class EntityFrontmatter(BaseModel):
39 | """Required frontmatter fields for an entity."""
40 |
41 | metadata: dict = {}
42 |
43 | @property
44 | def tags(self) -> List[str]:
45 | return self.metadata.get("tags") if self.metadata else None # pyright: ignore
46 |
47 | @property
48 | def title(self) -> str:
49 | return self.metadata.get("title") if self.metadata else None # pyright: ignore
50 |
51 | @property
52 | def type(self) -> str:
53 | return self.metadata.get("type", "note") if self.metadata else "note" # pyright: ignore
54 |
55 | @property
56 | def permalink(self) -> str:
57 | return self.metadata.get("permalink") if self.metadata else None # pyright: ignore
58 |
59 |
60 | class EntityMarkdown(BaseModel):
61 | """Complete entity combining frontmatter, content, and metadata."""
62 |
63 | frontmatter: EntityFrontmatter
64 | content: Optional[str] = None
65 | observations: List[Observation] = []
66 | relations: List[Relation] = []
67 |
68 | # created, updated will have values after a read
69 | created: Optional[datetime] = None
70 | modified: Optional[datetime] = None
71 |
```
--------------------------------------------------------------------------------
/src/basic_memory/mcp/clients/search.py:
--------------------------------------------------------------------------------
```python
1 | """Typed client for search API operations.
2 |
3 | Encapsulates all /v2/projects/{project_id}/search/* endpoints.
4 | """
5 |
6 | from typing import Any
7 |
8 | from httpx import AsyncClient
9 |
10 | from basic_memory.mcp.tools.utils import call_post
11 | from basic_memory.schemas.search import SearchResponse
12 |
13 |
14 | class SearchClient:
15 | """Typed client for search operations.
16 |
17 | Centralizes:
18 | - API path construction for /v2/projects/{project_id}/search/*
19 | - Response validation via Pydantic models
20 | - Consistent error handling through call_* utilities
21 |
22 | Usage:
23 | async with get_client() as http_client:
24 | client = SearchClient(http_client, project_id)
25 | results = await client.search(search_query.model_dump())
26 | """
27 |
28 | def __init__(self, http_client: AsyncClient, project_id: str):
29 | """Initialize the search client.
30 |
31 | Args:
32 | http_client: HTTPX AsyncClient for making requests
33 | project_id: Project external_id (UUID) for API calls
34 | """
35 | self.http_client = http_client
36 | self.project_id = project_id
37 | self._base_path = f"/v2/projects/{project_id}/search"
38 |
39 | async def search(
40 | self,
41 | query: dict[str, Any],
42 | *,
43 | page: int = 1,
44 | page_size: int = 10,
45 | ) -> SearchResponse:
46 | """Search across all content in the knowledge base.
47 |
48 | Args:
49 | query: Search query dict (from SearchQuery.model_dump())
50 | page: Page number (1-indexed)
51 | page_size: Results per page
52 |
53 | Returns:
54 | SearchResponse with results and pagination
55 |
56 | Raises:
57 | ToolError: If the request fails
58 | """
59 | response = await call_post(
60 | self.http_client,
61 | f"{self._base_path}/",
62 | json=query,
63 | params={"page": page, "page_size": page_size},
64 | )
65 | return SearchResponse.model_validate(response.json())
66 |
```
--------------------------------------------------------------------------------
/src/basic_memory/importers/utils.py:
--------------------------------------------------------------------------------
```python
1 | """Utility functions for import services."""
2 |
3 | import re
4 | from datetime import datetime
5 | from typing import Any
6 |
7 |
8 | def clean_filename(name: str | None) -> str: # pragma: no cover
9 | """Clean a string to be used as a filename.
10 |
11 | Args:
12 | name: The string to clean (can be None).
13 |
14 | Returns:
15 | A cleaned string suitable for use as a filename.
16 | """
17 | # Handle None or empty input
18 | if not name:
19 | return "untitled"
20 | # Replace common punctuation and whitespace with underscores
21 | name = re.sub(r"[\s\-,.:/\\\[\]\(\)]+", "_", name)
22 | # Remove any non-alphanumeric or underscore characters
23 | name = re.sub(r"[^\w]+", "", name)
24 | # Ensure the name isn't too long
25 | if len(name) > 100: # pragma: no cover
26 | name = name[:100]
27 | # Ensure the name isn't empty
28 | if not name: # pragma: no cover
29 | name = "untitled"
30 | return name
31 |
32 |
33 | def format_timestamp(timestamp: Any) -> str: # pragma: no cover
34 | """Format a timestamp for use in a filename or title.
35 |
36 | Args:
37 | timestamp: A timestamp in various formats.
38 |
39 | Returns:
40 | A formatted string representation of the timestamp.
41 | """
42 | if isinstance(timestamp, str):
43 | try:
44 | # Try ISO format
45 | timestamp = datetime.fromisoformat(timestamp.replace("Z", "+00:00"))
46 | except ValueError:
47 | try:
48 | # Try unix timestamp as string
49 | timestamp = datetime.fromtimestamp(float(timestamp)).astimezone()
50 | except ValueError:
51 | # Return as is if we can't parse it
52 | return timestamp
53 | elif isinstance(timestamp, (int, float)):
54 | # Unix timestamp
55 | timestamp = datetime.fromtimestamp(timestamp).astimezone()
56 |
57 | if isinstance(timestamp, datetime):
58 | return timestamp.strftime("%Y-%m-%d %H:%M:%S")
59 |
60 | # Return as is if we can't format it
61 | return str(timestamp) # pragma: no cover
62 |
```
--------------------------------------------------------------------------------
/tests/importers/test_importer_utils.py:
--------------------------------------------------------------------------------
```python
1 | """Tests for importer utility functions."""
2 |
3 | from datetime import datetime
4 |
5 | from basic_memory.importers.utils import clean_filename, format_timestamp
6 |
7 |
8 | def test_clean_filename():
9 | """Test clean_filename utility function."""
10 | # Test with normal string
11 | assert clean_filename("Hello World") == "Hello_World"
12 |
13 | # Test with punctuation
14 | assert clean_filename("Hello, World!") == "Hello_World"
15 |
16 | # Test with special characters
17 | assert clean_filename("File[1]/with\\special:chars") == "File_1_with_special_chars"
18 |
19 | # Test with long string (over 100 chars)
20 | long_str = "a" * 120
21 | assert len(clean_filename(long_str)) == 100
22 |
23 | # Test with empty string
24 | assert clean_filename("") == "untitled"
25 |
26 | # Test with None (fixes #451 - ChatGPT null titles)
27 | assert clean_filename(None) == "untitled"
28 |
29 | # Test with only special characters
30 | # Some implementations may return empty string or underscore
31 | result = clean_filename("!@#$%^&*()")
32 | assert result in ["untitled", "_", ""]
33 |
34 |
35 | def test_format_timestamp():
36 | """Test format_timestamp utility function."""
37 | # Test with datetime object
38 | dt = datetime(2023, 1, 1, 12, 30, 45)
39 | assert format_timestamp(dt) == "2023-01-01 12:30:45"
40 |
41 | # Test with ISO format string
42 | iso_str = "2023-01-01T12:30:45Z"
43 | assert format_timestamp(iso_str) == "2023-01-01 12:30:45"
44 |
45 | # Test with Unix timestamp as int
46 | unix_ts = 1672577445 # 2023-01-01 12:30:45 UTC
47 | formatted = format_timestamp(unix_ts)
48 | # The exact format may vary by timezone, so we just check for the year
49 | assert "2023" in formatted
50 |
51 | # Test with Unix timestamp as string
52 | unix_str = "1672577445"
53 | formatted = format_timestamp(unix_str)
54 | assert "2023" in formatted
55 |
56 | # Test with unparseable string
57 | assert format_timestamp("not a timestamp") == "not a timestamp"
58 |
59 | # Test with non-timestamp object
60 | assert format_timestamp(None) == "None"
61 |
```
--------------------------------------------------------------------------------
/src/basic_memory/mcp/clients/directory.py:
--------------------------------------------------------------------------------
```python
1 | """Typed client for directory API operations.
2 |
3 | Encapsulates all /v2/projects/{project_id}/directory/* endpoints.
4 | """
5 |
6 | from typing import Optional, Any
7 |
8 | from httpx import AsyncClient
9 |
10 | from basic_memory.mcp.tools.utils import call_get
11 |
12 |
13 | class DirectoryClient:
14 | """Typed client for directory listing operations.
15 |
16 | Centralizes:
17 | - API path construction for /v2/projects/{project_id}/directory/*
18 | - Response validation
19 | - Consistent error handling through call_* utilities
20 |
21 | Usage:
22 | async with get_client() as http_client:
23 | client = DirectoryClient(http_client, project_id)
24 | nodes = await client.list("/", depth=2)
25 | """
26 |
27 | def __init__(self, http_client: AsyncClient, project_id: str):
28 | """Initialize the directory client.
29 |
30 | Args:
31 | http_client: HTTPX AsyncClient for making requests
32 | project_id: Project external_id (UUID) for API calls
33 | """
34 | self.http_client = http_client
35 | self.project_id = project_id
36 | self._base_path = f"/v2/projects/{project_id}/directory"
37 |
38 | async def list(
39 | self,
40 | dir_name: str = "/",
41 | *,
42 | depth: int = 1,
43 | file_name_glob: Optional[str] = None,
44 | ) -> list[dict[str, Any]]:
45 | """List directory contents.
46 |
47 | Args:
48 | dir_name: Directory path to list (default: root)
49 | depth: How deep to traverse (default: 1)
50 | file_name_glob: Optional glob pattern to filter files
51 |
52 | Returns:
53 | List of directory nodes with their contents
54 |
55 | Raises:
56 | ToolError: If the request fails
57 | """
58 | params: dict = {
59 | "dir_name": dir_name,
60 | "depth": depth,
61 | }
62 | if file_name_glob:
63 | params["file_name_glob"] = file_name_glob
64 |
65 | response = await call_get(
66 | self.http_client,
67 | f"{self._base_path}/list",
68 | params=params,
69 | )
70 | return response.json()
71 |
```
--------------------------------------------------------------------------------
/tests/api/test_async_client.py:
--------------------------------------------------------------------------------
```python
1 | """Tests for async_client configuration."""
2 |
3 | from httpx import AsyncClient, ASGITransport, Timeout
4 |
5 | from basic_memory.mcp.async_client import create_client
6 |
7 |
8 | def test_create_client_uses_asgi_when_no_remote_env(config_manager, monkeypatch):
9 | """Test that create_client uses ASGI transport when cloud mode is disabled."""
10 | monkeypatch.delenv("BASIC_MEMORY_USE_REMOTE_API", raising=False)
11 | monkeypatch.delenv("BASIC_MEMORY_CLOUD_MODE", raising=False)
12 |
13 | cfg = config_manager.load_config()
14 | cfg.cloud_mode = False
15 | config_manager.save_config(cfg)
16 |
17 | client = create_client()
18 |
19 | assert isinstance(client, AsyncClient)
20 | assert isinstance(client._transport, ASGITransport)
21 | assert str(client.base_url) == "http://test"
22 |
23 |
24 | def test_create_client_uses_http_when_cloud_mode_env_set(config_manager, monkeypatch):
25 | """Test that create_client uses HTTP transport when BASIC_MEMORY_CLOUD_MODE is set."""
26 | monkeypatch.setenv("BASIC_MEMORY_CLOUD_MODE", "True")
27 |
28 | config = config_manager.load_config()
29 | client = create_client()
30 |
31 | assert isinstance(client, AsyncClient)
32 | assert not isinstance(client._transport, ASGITransport)
33 | # Cloud mode uses cloud_host/proxy as base_url
34 | assert str(client.base_url) == f"{config.cloud_host}/proxy/"
35 |
36 |
37 | def test_create_client_configures_extended_timeouts(config_manager, monkeypatch):
38 | """Test that create_client configures 30-second timeouts for long operations."""
39 | monkeypatch.delenv("BASIC_MEMORY_USE_REMOTE_API", raising=False)
40 | monkeypatch.delenv("BASIC_MEMORY_CLOUD_MODE", raising=False)
41 |
42 | cfg = config_manager.load_config()
43 | cfg.cloud_mode = False
44 | config_manager.save_config(cfg)
45 |
46 | client = create_client()
47 |
48 | # Verify timeout configuration
49 | assert isinstance(client.timeout, Timeout)
50 | assert client.timeout.connect == 10.0 # 10 seconds for connection
51 | assert client.timeout.read == 30.0 # 30 seconds for reading
52 | assert client.timeout.write == 30.0 # 30 seconds for writing
53 | assert client.timeout.pool == 30.0 # 30 seconds for pool
54 |
```
--------------------------------------------------------------------------------
/tests/schemas/test_memory_url.py:
--------------------------------------------------------------------------------
```python
1 | """Tests for MemoryUrl parsing."""
2 |
3 | import pytest
4 |
5 | from basic_memory.schemas.memory import memory_url, memory_url_path, normalize_memory_url
6 |
7 |
8 | def test_basic_permalink():
9 | """Test basic permalink parsing."""
10 | url = memory_url.validate_strings("memory://specs/search")
11 | assert str(url) == "memory://specs/search"
12 | assert memory_url_path(url) == "specs/search"
13 |
14 |
15 | def test_glob_pattern():
16 | """Test pattern matching."""
17 | url = memory_url.validate_python("memory://specs/search/*")
18 | assert memory_url_path(url) == "specs/search/*"
19 |
20 |
21 | def test_related_prefix():
22 | """Test related content prefix."""
23 | url = memory_url.validate_python("memory://related/specs/search")
24 | assert memory_url_path(url) == "related/specs/search"
25 |
26 |
27 | def test_context_prefix():
28 | """Test context prefix."""
29 | url = memory_url.validate_python("memory://context/current")
30 | assert memory_url_path(url) == "context/current"
31 |
32 |
33 | def test_complex_pattern():
34 | """Test multiple glob patterns."""
35 | url = memory_url.validate_python("memory://specs/*/search/*")
36 | assert memory_url_path(url) == "specs/*/search/*"
37 |
38 |
39 | def test_path_with_dashes():
40 | """Test path with dashes and other chars."""
41 | url = memory_url.validate_python("memory://file-sync-and-note-updates-implementation")
42 | assert memory_url_path(url) == "file-sync-and-note-updates-implementation"
43 |
44 |
45 | def test_str_representation():
46 | """Test converting back to string."""
47 | url = memory_url.validate_python("memory://specs/search")
48 | assert url == "memory://specs/search"
49 |
50 |
51 | def test_normalize_memory_url():
52 | """Test converting back to string."""
53 | url = normalize_memory_url("memory://specs/search")
54 | assert url == "memory://specs/search"
55 |
56 |
57 | def test_normalize_memory_url_no_prefix():
58 | """Test converting back to string."""
59 | url = normalize_memory_url("specs/search")
60 | assert url == "memory://specs/search"
61 |
62 |
63 | def test_normalize_memory_url_empty():
64 | """Test that empty string raises ValueError."""
65 | with pytest.raises(ValueError, match="cannot be empty"):
66 | normalize_memory_url("")
67 |
```
--------------------------------------------------------------------------------
/src/basic_memory/mcp/prompts/continue_conversation.py:
--------------------------------------------------------------------------------
```python
1 | """Session continuation prompts for Basic Memory MCP server.
2 |
3 | These prompts help users continue conversations and work across sessions,
4 | providing context from previous interactions to maintain continuity.
5 | """
6 |
7 | from typing import Annotated, Optional
8 |
9 | from loguru import logger
10 | from pydantic import Field
11 |
12 | from basic_memory.config import get_project_config
13 | from basic_memory.mcp.async_client import get_client
14 | from basic_memory.mcp.server import mcp
15 | from basic_memory.mcp.tools.utils import call_post
16 | from basic_memory.schemas.base import TimeFrame
17 | from basic_memory.schemas.prompt import ContinueConversationRequest
18 |
19 |
20 | @mcp.prompt(
21 | name="continue_conversation",
22 | description="Continue a previous conversation",
23 | )
24 | async def continue_conversation(
25 | topic: Annotated[Optional[str], Field(description="Topic or keyword to search for")] = None,
26 | timeframe: Annotated[
27 | Optional[TimeFrame],
28 | Field(description="How far back to look for activity (e.g. '1d', '1 week')"),
29 | ] = None,
30 | ) -> str:
31 | """Continue a previous conversation or work session.
32 |
33 | This prompt helps you pick up where you left off by finding recent context
34 | about a specific topic or showing general recent activity.
35 |
36 | Args:
37 | topic: Topic or keyword to search for (optional)
38 | timeframe: How far back to look for activity
39 |
40 | Returns:
41 | Context from previous sessions on this topic
42 | """
43 | logger.info(f"Continuing session, topic: {topic}, timeframe: {timeframe}")
44 |
45 | async with get_client() as client:
46 | # Create request model
47 | request = ContinueConversationRequest( # pyright: ignore [reportCallIssue]
48 | topic=topic, timeframe=timeframe
49 | )
50 |
51 | project_url = get_project_config().project_url
52 |
53 | # Call the prompt API endpoint
54 | response = await call_post(
55 | client,
56 | f"{project_url}/prompt/continue-conversation",
57 | json=request.model_dump(exclude_none=True),
58 | )
59 |
60 | # Extract the rendered prompt from the response
61 | result = response.json()
62 | return result["prompt"]
63 |
```