This is page 1 of 23. Use http://codebase.md/basicmachines-co/basic-memory?lines=true&page={x} to view the full context.
# Directory Structure
```
├── .claude
│ ├── agents
│ │ ├── python-developer.md
│ │ └── system-architect.md
│ └── commands
│ ├── release
│ │ ├── beta.md
│ │ ├── changelog.md
│ │ ├── release-check.md
│ │ └── release.md
│ ├── spec.md
│ └── test-live.md
├── .dockerignore
├── .github
│ ├── dependabot.yml
│ ├── ISSUE_TEMPLATE
│ │ ├── bug_report.md
│ │ ├── config.yml
│ │ ├── documentation.md
│ │ └── feature_request.md
│ └── workflows
│ ├── claude-code-review.yml
│ ├── claude-issue-triage.yml
│ ├── claude.yml
│ ├── dev-release.yml
│ ├── docker.yml
│ ├── pr-title.yml
│ ├── release.yml
│ └── test.yml
├── .gitignore
├── .python-version
├── CHANGELOG.md
├── CITATION.cff
├── CLA.md
├── CLAUDE.md
├── CODE_OF_CONDUCT.md
├── CONTRIBUTING.md
├── docker-compose.yml
├── Dockerfile
├── docs
│ ├── ai-assistant-guide-extended.md
│ ├── character-handling.md
│ ├── cloud-cli.md
│ └── Docker.md
├── justfile
├── LICENSE
├── llms-install.md
├── pyproject.toml
├── README.md
├── SECURITY.md
├── smithery.yaml
├── specs
│ ├── SPEC-1 Specification-Driven Development Process.md
│ ├── SPEC-10 Unified Deployment Workflow and Event Tracking.md
│ ├── SPEC-11 Basic Memory API Performance Optimization.md
│ ├── SPEC-12 OpenTelemetry Observability.md
│ ├── SPEC-13 CLI Authentication with Subscription Validation.md
│ ├── SPEC-14 Cloud Git Versioning & GitHub Backup.md
│ ├── SPEC-14- Cloud Git Versioning & GitHub Backup.md
│ ├── SPEC-15 Configuration Persistence via Tigris for Cloud Tenants.md
│ ├── SPEC-16 MCP Cloud Service Consolidation.md
│ ├── SPEC-17 Semantic Search with ChromaDB.md
│ ├── SPEC-18 AI Memory Management Tool.md
│ ├── SPEC-19 Sync Performance and Memory Optimization.md
│ ├── SPEC-2 Slash Commands Reference.md
│ ├── SPEC-3 Agent Definitions.md
│ ├── SPEC-4 Notes Web UI Component Architecture.md
│ ├── SPEC-5 CLI Cloud Upload via WebDAV.md
│ ├── SPEC-6 Explicit Project Parameter Architecture.md
│ ├── SPEC-7 POC to spike Tigris Turso for local access to cloud data.md
│ ├── SPEC-8 TigrisFS Integration.md
│ ├── SPEC-9 Multi-Project Bidirectional Sync Architecture.md
│ ├── SPEC-9 Signed Header Tenant Information.md
│ └── SPEC-9-1 Follow-Ups- Conflict, Sync, and Observability.md
├── src
│ └── basic_memory
│ ├── __init__.py
│ ├── alembic
│ │ ├── alembic.ini
│ │ ├── env.py
│ │ ├── migrations.py
│ │ ├── script.py.mako
│ │ └── versions
│ │ ├── 3dae7c7b1564_initial_schema.py
│ │ ├── 502b60eaa905_remove_required_from_entity_permalink.py
│ │ ├── 5fe1ab1ccebe_add_projects_table.py
│ │ ├── 647e7a75e2cd_project_constraint_fix.py
│ │ ├── 9d9c1cb7d8f5_add_mtime_and_size_columns_to_entity_.py
│ │ ├── a1b2c3d4e5f6_fix_project_foreign_keys.py
│ │ ├── b3c3938bacdb_relation_to_name_unique_index.py
│ │ ├── cc7172b46608_update_search_index_schema.py
│ │ └── e7e1f4367280_add_scan_watermark_tracking_to_project.py
│ ├── api
│ │ ├── __init__.py
│ │ ├── app.py
│ │ ├── routers
│ │ │ ├── __init__.py
│ │ │ ├── directory_router.py
│ │ │ ├── importer_router.py
│ │ │ ├── knowledge_router.py
│ │ │ ├── management_router.py
│ │ │ ├── memory_router.py
│ │ │ ├── project_router.py
│ │ │ ├── prompt_router.py
│ │ │ ├── resource_router.py
│ │ │ ├── search_router.py
│ │ │ └── utils.py
│ │ └── template_loader.py
│ ├── cli
│ │ ├── __init__.py
│ │ ├── app.py
│ │ ├── auth.py
│ │ ├── commands
│ │ │ ├── __init__.py
│ │ │ ├── cloud
│ │ │ │ ├── __init__.py
│ │ │ │ ├── api_client.py
│ │ │ │ ├── bisync_commands.py
│ │ │ │ ├── cloud_utils.py
│ │ │ │ ├── core_commands.py
│ │ │ │ ├── mount_commands.py
│ │ │ │ ├── rclone_config.py
│ │ │ │ ├── rclone_installer.py
│ │ │ │ ├── upload_command.py
│ │ │ │ └── upload.py
│ │ │ ├── command_utils.py
│ │ │ ├── db.py
│ │ │ ├── import_chatgpt.py
│ │ │ ├── import_claude_conversations.py
│ │ │ ├── import_claude_projects.py
│ │ │ ├── import_memory_json.py
│ │ │ ├── mcp.py
│ │ │ ├── project.py
│ │ │ ├── status.py
│ │ │ ├── sync.py
│ │ │ └── tool.py
│ │ └── main.py
│ ├── config.py
│ ├── db.py
│ ├── deps.py
│ ├── file_utils.py
│ ├── ignore_utils.py
│ ├── importers
│ │ ├── __init__.py
│ │ ├── base.py
│ │ ├── chatgpt_importer.py
│ │ ├── claude_conversations_importer.py
│ │ ├── claude_projects_importer.py
│ │ ├── memory_json_importer.py
│ │ └── utils.py
│ ├── markdown
│ │ ├── __init__.py
│ │ ├── entity_parser.py
│ │ ├── markdown_processor.py
│ │ ├── plugins.py
│ │ ├── schemas.py
│ │ └── utils.py
│ ├── mcp
│ │ ├── __init__.py
│ │ ├── async_client.py
│ │ ├── project_context.py
│ │ ├── prompts
│ │ │ ├── __init__.py
│ │ │ ├── ai_assistant_guide.py
│ │ │ ├── continue_conversation.py
│ │ │ ├── recent_activity.py
│ │ │ ├── search.py
│ │ │ └── utils.py
│ │ ├── resources
│ │ │ ├── ai_assistant_guide.md
│ │ │ └── project_info.py
│ │ ├── server.py
│ │ └── tools
│ │ ├── __init__.py
│ │ ├── build_context.py
│ │ ├── canvas.py
│ │ ├── chatgpt_tools.py
│ │ ├── delete_note.py
│ │ ├── edit_note.py
│ │ ├── list_directory.py
│ │ ├── move_note.py
│ │ ├── project_management.py
│ │ ├── read_content.py
│ │ ├── read_note.py
│ │ ├── recent_activity.py
│ │ ├── search.py
│ │ ├── utils.py
│ │ ├── view_note.py
│ │ └── write_note.py
│ ├── models
│ │ ├── __init__.py
│ │ ├── base.py
│ │ ├── knowledge.py
│ │ ├── project.py
│ │ └── search.py
│ ├── repository
│ │ ├── __init__.py
│ │ ├── entity_repository.py
│ │ ├── observation_repository.py
│ │ ├── project_info_repository.py
│ │ ├── project_repository.py
│ │ ├── relation_repository.py
│ │ ├── repository.py
│ │ └── search_repository.py
│ ├── schemas
│ │ ├── __init__.py
│ │ ├── base.py
│ │ ├── cloud.py
│ │ ├── delete.py
│ │ ├── directory.py
│ │ ├── importer.py
│ │ ├── memory.py
│ │ ├── project_info.py
│ │ ├── prompt.py
│ │ ├── request.py
│ │ ├── response.py
│ │ ├── search.py
│ │ └── sync_report.py
│ ├── services
│ │ ├── __init__.py
│ │ ├── context_service.py
│ │ ├── directory_service.py
│ │ ├── entity_service.py
│ │ ├── exceptions.py
│ │ ├── file_service.py
│ │ ├── initialization.py
│ │ ├── link_resolver.py
│ │ ├── project_service.py
│ │ ├── search_service.py
│ │ └── service.py
│ ├── sync
│ │ ├── __init__.py
│ │ ├── background_sync.py
│ │ ├── sync_service.py
│ │ └── watch_service.py
│ ├── templates
│ │ └── prompts
│ │ ├── continue_conversation.hbs
│ │ └── search.hbs
│ └── utils.py
├── test-int
│ ├── BENCHMARKS.md
│ ├── cli
│ │ ├── test_project_commands_integration.py
│ │ ├── test_sync_commands_integration.py
│ │ └── test_version_integration.py
│ ├── conftest.py
│ ├── mcp
│ │ ├── test_build_context_underscore.py
│ │ ├── test_build_context_validation.py
│ │ ├── test_chatgpt_tools_integration.py
│ │ ├── test_default_project_mode_integration.py
│ │ ├── test_delete_note_integration.py
│ │ ├── test_edit_note_integration.py
│ │ ├── test_list_directory_integration.py
│ │ ├── test_move_note_integration.py
│ │ ├── test_project_management_integration.py
│ │ ├── test_project_state_sync_integration.py
│ │ ├── test_read_content_integration.py
│ │ ├── test_read_note_integration.py
│ │ ├── test_search_integration.py
│ │ ├── test_single_project_mcp_integration.py
│ │ └── test_write_note_integration.py
│ ├── test_db_wal_mode.py
│ ├── test_disable_permalinks_integration.py
│ └── test_sync_performance_benchmark.py
├── tests
│ ├── __init__.py
│ ├── api
│ │ ├── conftest.py
│ │ ├── test_async_client.py
│ │ ├── test_continue_conversation_template.py
│ │ ├── test_directory_router.py
│ │ ├── test_importer_router.py
│ │ ├── test_knowledge_router.py
│ │ ├── test_management_router.py
│ │ ├── test_memory_router.py
│ │ ├── test_project_router_operations.py
│ │ ├── test_project_router.py
│ │ ├── test_prompt_router.py
│ │ ├── test_relation_background_resolution.py
│ │ ├── test_resource_router.py
│ │ ├── test_search_router.py
│ │ ├── test_search_template.py
│ │ ├── test_template_loader_helpers.py
│ │ └── test_template_loader.py
│ ├── cli
│ │ ├── conftest.py
│ │ ├── test_bisync_commands.py
│ │ ├── test_cli_tools.py
│ │ ├── test_cloud_authentication.py
│ │ ├── test_cloud_utils.py
│ │ ├── test_ignore_utils.py
│ │ ├── test_import_chatgpt.py
│ │ ├── test_import_claude_conversations.py
│ │ ├── test_import_claude_projects.py
│ │ ├── test_import_memory_json.py
│ │ └── test_upload.py
│ ├── conftest.py
│ ├── db
│ │ └── test_issue_254_foreign_key_constraints.py
│ ├── importers
│ │ ├── test_importer_base.py
│ │ └── test_importer_utils.py
│ ├── markdown
│ │ ├── __init__.py
│ │ ├── test_date_frontmatter_parsing.py
│ │ ├── test_entity_parser_error_handling.py
│ │ ├── test_entity_parser.py
│ │ ├── test_markdown_plugins.py
│ │ ├── test_markdown_processor.py
│ │ ├── test_observation_edge_cases.py
│ │ ├── test_parser_edge_cases.py
│ │ ├── test_relation_edge_cases.py
│ │ └── test_task_detection.py
│ ├── mcp
│ │ ├── conftest.py
│ │ ├── test_obsidian_yaml_formatting.py
│ │ ├── test_permalink_collision_file_overwrite.py
│ │ ├── test_prompts.py
│ │ ├── test_resources.py
│ │ ├── test_tool_build_context.py
│ │ ├── test_tool_canvas.py
│ │ ├── test_tool_delete_note.py
│ │ ├── test_tool_edit_note.py
│ │ ├── test_tool_list_directory.py
│ │ ├── test_tool_move_note.py
│ │ ├── test_tool_read_content.py
│ │ ├── test_tool_read_note.py
│ │ ├── test_tool_recent_activity.py
│ │ ├── test_tool_resource.py
│ │ ├── test_tool_search.py
│ │ ├── test_tool_utils.py
│ │ ├── test_tool_view_note.py
│ │ ├── test_tool_write_note.py
│ │ └── tools
│ │ └── test_chatgpt_tools.py
│ ├── Non-MarkdownFileSupport.pdf
│ ├── repository
│ │ ├── test_entity_repository_upsert.py
│ │ ├── test_entity_repository.py
│ │ ├── test_entity_upsert_issue_187.py
│ │ ├── test_observation_repository.py
│ │ ├── test_project_info_repository.py
│ │ ├── test_project_repository.py
│ │ ├── test_relation_repository.py
│ │ ├── test_repository.py
│ │ ├── test_search_repository_edit_bug_fix.py
│ │ └── test_search_repository.py
│ ├── schemas
│ │ ├── test_base_timeframe_minimum.py
│ │ ├── test_memory_serialization.py
│ │ ├── test_memory_url_validation.py
│ │ ├── test_memory_url.py
│ │ ├── test_schemas.py
│ │ └── test_search.py
│ ├── Screenshot.png
│ ├── services
│ │ ├── test_context_service.py
│ │ ├── test_directory_service.py
│ │ ├── test_entity_service_disable_permalinks.py
│ │ ├── test_entity_service.py
│ │ ├── test_file_service.py
│ │ ├── test_initialization.py
│ │ ├── test_link_resolver.py
│ │ ├── test_project_removal_bug.py
│ │ ├── test_project_service_operations.py
│ │ ├── test_project_service.py
│ │ └── test_search_service.py
│ ├── sync
│ │ ├── test_character_conflicts.py
│ │ ├── test_sync_service_incremental.py
│ │ ├── test_sync_service.py
│ │ ├── test_sync_wikilink_issue.py
│ │ ├── test_tmp_files.py
│ │ ├── test_watch_service_edge_cases.py
│ │ ├── test_watch_service_reload.py
│ │ └── test_watch_service.py
│ ├── test_config.py
│ ├── test_db_migration_deduplication.py
│ ├── test_deps.py
│ ├── test_production_cascade_delete.py
│ └── utils
│ ├── test_file_utils.py
│ ├── test_frontmatter_obsidian_compatible.py
│ ├── test_parse_tags.py
│ ├── test_permalink_formatting.py
│ ├── test_utf8_handling.py
│ └── test_validate_project_path.py
├── uv.lock
├── v0.15.0-RELEASE-DOCS.md
└── v15-docs
├── api-performance.md
├── background-relations.md
├── basic-memory-home.md
├── bug-fixes.md
├── chatgpt-integration.md
├── cloud-authentication.md
├── cloud-bisync.md
├── cloud-mode-usage.md
├── cloud-mount.md
├── default-project-mode.md
├── env-file-removal.md
├── env-var-overrides.md
├── explicit-project-parameter.md
├── gitignore-integration.md
├── project-root-env-var.md
├── README.md
└── sqlite-performance.md
```
# Files
--------------------------------------------------------------------------------
/.python-version:
--------------------------------------------------------------------------------
```
1 | 3.12
2 |
```
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
```
1 | *.py[cod]
2 | __pycache__/
3 | .pytest_cache/
4 | .coverage
5 | htmlcov/
6 |
7 | # Distribution / packaging
8 | .Python
9 | build/
10 | develop-eggs/
11 | dist/
12 | downloads/
13 | eggs/
14 | .eggs/
15 | lib/
16 | lib64/
17 | parts/
18 | sdist/
19 | var/
20 | wheels/
21 | *.egg-info/
22 | .installed.cfg
23 | *.egg
24 |
25 | # Installer artifacts
26 | installer/build/
27 | installer/dist/
28 | rw.*.dmg # Temporary disk images
29 |
30 | # Virtual environments
31 | .env
32 | .venv
33 | env/
34 | venv/
35 | ENV/
36 |
37 | # IDE
38 | .idea/
39 | .vscode/
40 | *.swp
41 | *.swo
42 |
43 | # macOS
44 | .DS_Store
45 | .coverage.*
46 |
47 | # obsidian docs:
48 | /docs/.obsidian/
49 | /examples/.obsidian/
50 | /examples/.basic-memory/
51 |
52 |
53 | # claude action
54 | claude-output
55 | **/.claude/settings.local.json
```
--------------------------------------------------------------------------------
/.dockerignore:
--------------------------------------------------------------------------------
```
1 | # Git files
2 | .git/
3 | .gitignore
4 | .gitattributes
5 |
6 | # Development files
7 | .vscode/
8 | .idea/
9 | *.swp
10 | *.swo
11 | *~
12 |
13 | # Testing files
14 | tests/
15 | test-int/
16 | .pytest_cache/
17 | .coverage
18 | htmlcov/
19 |
20 | # Build artifacts
21 | build/
22 | dist/
23 | *.egg-info/
24 | __pycache__/
25 | *.pyc
26 | *.pyo
27 | *.pyd
28 | .Python
29 |
30 | # Virtual environments (uv creates these during build)
31 | .venv/
32 | venv/
33 | .env
34 |
35 | # CI/CD files
36 | .github/
37 |
38 | # Documentation (keep README.md and pyproject.toml)
39 | docs/
40 | CHANGELOG.md
41 | CLAUDE.md
42 | CONTRIBUTING.md
43 |
44 | # Example files not needed for runtime
45 | examples/
46 |
47 | # Local development files
48 | .basic-memory/
49 | *.db
50 | *.sqlite3
51 |
52 | # OS files
53 | .DS_Store
54 | Thumbs.db
55 |
56 | # Temporary files
57 | tmp/
58 | temp/
59 | *.tmp
60 | *.log
```
--------------------------------------------------------------------------------
/v15-docs/README.md:
--------------------------------------------------------------------------------
```markdown
1 | # v0.15.0 Documentation Notes
2 |
3 | This directory contains user-focused documentation notes for v0.15.0 changes. These notes are written from the user's perspective and will be used to update the main documentation site (docs.basicmemory.com).
4 |
5 | ## Purpose
6 |
7 | - Capture complete user-facing details of code changes
8 | - Provide examples and migration guidance
9 | - Serve as source material for final documentation
10 | - **Temporary workspace** - will be removed after release docs are complete
11 |
12 | ## Notes Structure
13 |
14 | Each note covers a specific change or feature:
15 | - **What changed** - User-visible behavior changes
16 | - **Why it matters** - Impact and benefits
17 | - **How to use** - Examples and usage patterns
18 | - **Migration** - Steps to adapt (if breaking change)
19 |
20 | ## Coverage
21 |
22 | Based on v0.15.0-RELEASE-DOCS.md:
23 |
24 | ### Breaking Changes
25 | - [x] explicit-project-parameter.md (SPEC-6: #298)
26 | - [x] default-project-mode.md
27 |
28 | ### Configuration
29 | - [x] project-root-env-var.md (#334)
30 | - [x] basic-memory-home.md (clarify relationship with PROJECT_ROOT)
31 | - [x] env-var-overrides.md
32 |
33 | ### Cloud Features
34 | - [x] cloud-authentication.md (SPEC-13: #327)
35 | - [x] cloud-bisync.md (SPEC-9: #322)
36 | - [x] cloud-mount.md (#306)
37 | - [x] cloud-mode-usage.md
38 |
39 | ### Security & Performance
40 | - [x] env-file-removal.md (#330)
41 | - [x] gitignore-integration.md (#314)
42 | - [x] sqlite-performance.md (#316)
43 | - [x] background-relations.md (#319)
44 | - [x] api-performance.md (SPEC-11: #315)
45 |
46 | ### Bug Fixes & Platform
47 | - [x] bug-fixes.md (13+ fixes including #328, #329, #287, #281, #330, Python 3.13)
48 |
49 | ### Integrations
50 | - [x] chatgpt-integration.md (ChatGPT MCP tools, remote only, Pro subscription required)
51 |
52 | ### AI Assistant Guides
53 | - [x] ai-assistant-guide-extended.md (Extended guide for docs site with comprehensive examples)
54 |
55 | ## Usage
56 |
57 | From docs.basicmemory.com repo, reference these notes to create/update:
58 | - Migration guides
59 | - Feature documentation
60 | - Release notes
61 | - Getting started guides
62 |
```
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
```markdown
1 | [](https://www.gnu.org/licenses/agpl-3.0)
2 | [](https://badge.fury.io/py/basic-memory)
3 | [](https://www.python.org/downloads/)
4 | [](https://github.com/basicmachines-co/basic-memory/actions)
5 | [](https://github.com/astral-sh/ruff)
6 | 
7 | 
8 | [](https://smithery.ai/server/@basicmachines-co/basic-memory)
9 |
10 | ## 🚀 Basic Memory Cloud is Live!
11 |
12 | - **Cross-device and multi-platform support is here.** Your knowledge graph now works on desktop, web, and mobile - seamlessly synced across all your AI tools (Claude, ChatGPT, Gemini, Claude Code, and Codex)
13 | - **Early Supporter Pricing:** Early users get 25% off forever.
14 | The open source project continues as always. Cloud just makes it work everywhere.
15 |
16 | [Sign up now →](https://basicmemory.com/beta)
17 |
18 | with a 7 day free trial
19 |
20 | # Basic Memory
21 |
22 | Basic Memory lets you build persistent knowledge through natural conversations with Large Language Models (LLMs) like
23 | Claude, while keeping everything in simple Markdown files on your computer. It uses the Model Context Protocol (MCP) to
24 | enable any compatible LLM to read and write to your local knowledge base.
25 |
26 | - Website: https://basicmachines.co
27 | - Documentation: https://memory.basicmachines.co
28 |
29 | ## Pick up your conversation right where you left off
30 |
31 | - AI assistants can load context from local files in a new conversation
32 | - Notes are saved locally as Markdown files in real time
33 | - No project knowledge or special prompting required
34 |
35 | https://github.com/user-attachments/assets/a55d8238-8dd0-454a-be4c-8860dbbd0ddc
36 |
37 | ## Quick Start
38 |
39 | ```bash
40 | # Install with uv (recommended)
41 | uv tool install basic-memory
42 |
43 | # Configure Claude Desktop (edit ~/Library/Application Support/Claude/claude_desktop_config.json)
44 | # Add this to your config:
45 | {
46 | "mcpServers": {
47 | "basic-memory": {
48 | "command": "uvx",
49 | "args": [
50 | "basic-memory",
51 | "mcp"
52 | ]
53 | }
54 | }
55 | }
56 | # Now in Claude Desktop, you can:
57 | # - Write notes with "Create a note about coffee brewing methods"
58 | # - Read notes with "What do I know about pour over coffee?"
59 | # - Search with "Find information about Ethiopian beans"
60 |
61 | ```
62 |
63 | You can view shared context via files in `~/basic-memory` (default directory location).
64 |
65 | ### Alternative Installation via Smithery
66 |
67 | You can use [Smithery](https://smithery.ai/server/@basicmachines-co/basic-memory) to automatically configure Basic
68 | Memory for Claude Desktop:
69 |
70 | ```bash
71 | npx -y @smithery/cli install @basicmachines-co/basic-memory --client claude
72 | ```
73 |
74 | This installs and configures Basic Memory without requiring manual edits to the Claude Desktop configuration file. The
75 | Smithery server hosts the MCP server component, while your data remains stored locally as Markdown files.
76 |
77 | ### Glama.ai
78 |
79 | <a href="https://glama.ai/mcp/servers/o90kttu9ym">
80 | <img width="380" height="200" src="https://glama.ai/mcp/servers/o90kttu9ym/badge" alt="basic-memory MCP server" />
81 | </a>
82 |
83 | ## Why Basic Memory?
84 |
85 | Most LLM interactions are ephemeral - you ask a question, get an answer, and everything is forgotten. Each conversation
86 | starts fresh, without the context or knowledge from previous ones. Current workarounds have limitations:
87 |
88 | - Chat histories capture conversations but aren't structured knowledge
89 | - RAG systems can query documents but don't let LLMs write back
90 | - Vector databases require complex setups and often live in the cloud
91 | - Knowledge graphs typically need specialized tools to maintain
92 |
93 | Basic Memory addresses these problems with a simple approach: structured Markdown files that both humans and LLMs can
94 | read
95 | and write to. The key advantages:
96 |
97 | - **Local-first:** All knowledge stays in files you control
98 | - **Bi-directional:** Both you and the LLM read and write to the same files
99 | - **Structured yet simple:** Uses familiar Markdown with semantic patterns
100 | - **Traversable knowledge graph:** LLMs can follow links between topics
101 | - **Standard formats:** Works with existing editors like Obsidian
102 | - **Lightweight infrastructure:** Just local files indexed in a local SQLite database
103 |
104 | With Basic Memory, you can:
105 |
106 | - Have conversations that build on previous knowledge
107 | - Create structured notes during natural conversations
108 | - Have conversations with LLMs that remember what you've discussed before
109 | - Navigate your knowledge graph semantically
110 | - Keep everything local and under your control
111 | - Use familiar tools like Obsidian to view and edit notes
112 | - Build a personal knowledge base that grows over time
113 | - Sync your knowledge to the cloud with bidirectional synchronization
114 | - Authenticate and manage cloud projects with subscription validation
115 | - Mount cloud storage for direct file access
116 |
117 | ## How It Works in Practice
118 |
119 | Let's say you're exploring coffee brewing methods and want to capture your knowledge. Here's how it works:
120 |
121 | 1. Start by chatting normally:
122 |
123 | ```
124 | I've been experimenting with different coffee brewing methods. Key things I've learned:
125 |
126 | - Pour over gives more clarity in flavor than French press
127 | - Water temperature is critical - around 205°F seems best
128 | - Freshly ground beans make a huge difference
129 | ```
130 |
131 | ... continue conversation.
132 |
133 | 2. Ask the LLM to help structure this knowledge:
134 |
135 | ```
136 | "Let's write a note about coffee brewing methods."
137 | ```
138 |
139 | LLM creates a new Markdown file on your system (which you can see instantly in Obsidian or your editor):
140 |
141 | ```markdown
142 | ---
143 | title: Coffee Brewing Methods
144 | permalink: coffee-brewing-methods
145 | tags:
146 | - coffee
147 | - brewing
148 | ---
149 |
150 | # Coffee Brewing Methods
151 |
152 | ## Observations
153 |
154 | - [method] Pour over provides more clarity and highlights subtle flavors
155 | - [technique] Water temperature at 205°F (96°C) extracts optimal compounds
156 | - [principle] Freshly ground beans preserve aromatics and flavor
157 |
158 | ## Relations
159 |
160 | - relates_to [[Coffee Bean Origins]]
161 | - requires [[Proper Grinding Technique]]
162 | - affects [[Flavor Extraction]]
163 | ```
164 |
165 | The note embeds semantic content and links to other topics via simple Markdown formatting.
166 |
167 | 3. You see this file on your computer in real time in the current project directory (default `~/$HOME/basic-memory`).
168 |
169 | - Realtime sync can be enabled via running `basic-memory sync --watch`
170 |
171 | 4. In a chat with the LLM, you can reference a topic:
172 |
173 | ```
174 | Look at `coffee-brewing-methods` for context about pour over coffee
175 | ```
176 |
177 | The LLM can now build rich context from the knowledge graph. For example:
178 |
179 | ```
180 | Following relation 'relates_to [[Coffee Bean Origins]]':
181 | - Found information about Ethiopian Yirgacheffe
182 | - Notes on Colombian beans' nutty profile
183 | - Altitude effects on bean characteristics
184 |
185 | Following relation 'requires [[Proper Grinding Technique]]':
186 | - Burr vs. blade grinder comparisons
187 | - Grind size recommendations for different methods
188 | - Impact of consistent particle size on extraction
189 | ```
190 |
191 | Each related document can lead to more context, building a rich semantic understanding of your knowledge base.
192 |
193 | This creates a two-way flow where:
194 |
195 | - Humans write and edit Markdown files
196 | - LLMs read and write through the MCP protocol
197 | - Sync keeps everything consistent
198 | - All knowledge stays in local files.
199 |
200 | ## Technical Implementation
201 |
202 | Under the hood, Basic Memory:
203 |
204 | 1. Stores everything in Markdown files
205 | 2. Uses a SQLite database for searching and indexing
206 | 3. Extracts semantic meaning from simple Markdown patterns
207 | - Files become `Entity` objects
208 | - Each `Entity` can have `Observations`, or facts associated with it
209 | - `Relations` connect entities together to form the knowledge graph
210 | 4. Maintains the local knowledge graph derived from the files
211 | 5. Provides bidirectional synchronization between files and the knowledge graph
212 | 6. Implements the Model Context Protocol (MCP) for AI integration
213 | 7. Exposes tools that let AI assistants traverse and manipulate the knowledge graph
214 | 8. Uses memory:// URLs to reference entities across tools and conversations
215 |
216 | The file format is just Markdown with some simple markup:
217 |
218 | Each Markdown file has:
219 |
220 | ### Frontmatter
221 |
222 | ```markdown
223 | title: <Entity title>
224 | type: <The type of Entity> (e.g. note)
225 | permalink: <a uri slug>
226 |
227 | - <optional metadata> (such as tags)
228 | ```
229 |
230 | ### Observations
231 |
232 | Observations are facts about a topic.
233 | They can be added by creating a Markdown list with a special format that can reference a `category`, `tags` using a
234 | "#" character, and an optional `context`.
235 |
236 | Observation Markdown format:
237 |
238 | ```markdown
239 | - [category] content #tag (optional context)
240 | ```
241 |
242 | Examples of observations:
243 |
244 | ```markdown
245 | - [method] Pour over extracts more floral notes than French press
246 | - [tip] Grind size should be medium-fine for pour over #brewing
247 | - [preference] Ethiopian beans have bright, fruity flavors (especially from Yirgacheffe)
248 | - [fact] Lighter roasts generally contain more caffeine than dark roasts
249 | - [experiment] Tried 1:15 coffee-to-water ratio with good results
250 | - [resource] James Hoffman's V60 technique on YouTube is excellent
251 | - [question] Does water temperature affect extraction of different compounds differently?
252 | - [note] My favorite local shop uses a 30-second bloom time
253 | ```
254 |
255 | ### Relations
256 |
257 | Relations are links to other topics. They define how entities connect in the knowledge graph.
258 |
259 | Markdown format:
260 |
261 | ```markdown
262 | - relation_type [[WikiLink]] (optional context)
263 | ```
264 |
265 | Examples of relations:
266 |
267 | ```markdown
268 | - pairs_well_with [[Chocolate Desserts]]
269 | - grown_in [[Ethiopia]]
270 | - contrasts_with [[Tea Brewing Methods]]
271 | - requires [[Burr Grinder]]
272 | - improves_with [[Fresh Beans]]
273 | - relates_to [[Morning Routine]]
274 | - inspired_by [[Japanese Coffee Culture]]
275 | - documented_in [[Coffee Journal]]
276 | ```
277 |
278 | ## Using with VS Code
279 |
280 | Add the following JSON block to your User Settings (JSON) file in VS Code. You can do this by pressing `Ctrl + Shift + P` and typing `Preferences: Open User Settings (JSON)`.
281 |
282 | ```json
283 | {
284 | "mcp": {
285 | "servers": {
286 | "basic-memory": {
287 | "command": "uvx",
288 | "args": ["basic-memory", "mcp"]
289 | }
290 | }
291 | }
292 | }
293 | ```
294 |
295 | Optionally, you can add it to a file called `.vscode/mcp.json` in your workspace. This will allow you to share the configuration with others.
296 |
297 | ```json
298 | {
299 | "servers": {
300 | "basic-memory": {
301 | "command": "uvx",
302 | "args": ["basic-memory", "mcp"]
303 | }
304 | }
305 | }
306 | ```
307 |
308 | You can use Basic Memory with VS Code to easily retrieve and store information while coding.
309 |
310 | ## Using with Claude Desktop
311 |
312 | Basic Memory is built using the MCP (Model Context Protocol) and works with the Claude desktop app (https://claude.ai/):
313 |
314 | 1. Configure Claude Desktop to use Basic Memory:
315 |
316 | Edit your MCP configuration file (usually located at `~/Library/Application Support/Claude/claude_desktop_config.json`
317 | for OS X):
318 |
319 | ```json
320 | {
321 | "mcpServers": {
322 | "basic-memory": {
323 | "command": "uvx",
324 | "args": [
325 | "basic-memory",
326 | "mcp"
327 | ]
328 | }
329 | }
330 | }
331 | ```
332 |
333 | If you want to use a specific project (see [Multiple Projects](#multiple-projects) below), update your Claude Desktop
334 | config:
335 |
336 | ```json
337 | {
338 | "mcpServers": {
339 | "basic-memory": {
340 | "command": "uvx",
341 | "args": [
342 | "basic-memory",
343 | "mcp",
344 | "--project",
345 | "your-project-name"
346 | ]
347 | }
348 | }
349 | }
350 | ```
351 |
352 | 2. Sync your knowledge:
353 |
354 | ```bash
355 | # One-time sync of local knowledge updates
356 | basic-memory sync
357 |
358 | # Run realtime sync process (recommended)
359 | basic-memory sync --watch
360 | ```
361 |
362 | 3. Cloud features (optional, requires subscription):
363 |
364 | ```bash
365 | # Authenticate with cloud
366 | basic-memory cloud login
367 |
368 | # Bidirectional sync with cloud
369 | basic-memory cloud sync
370 |
371 | # Verify cloud integrity
372 | basic-memory cloud check
373 |
374 | # Mount cloud storage
375 | basic-memory cloud mount
376 | ```
377 |
378 | 4. In Claude Desktop, the LLM can now use these tools:
379 |
380 | **Content Management:**
381 | ```
382 | write_note(title, content, folder, tags) - Create or update notes
383 | read_note(identifier, page, page_size) - Read notes by title or permalink
384 | read_content(path) - Read raw file content (text, images, binaries)
385 | view_note(identifier) - View notes as formatted artifacts
386 | edit_note(identifier, operation, content) - Edit notes incrementally
387 | move_note(identifier, destination_path) - Move notes with database consistency
388 | delete_note(identifier) - Delete notes from knowledge base
389 | ```
390 |
391 | **Knowledge Graph Navigation:**
392 | ```
393 | build_context(url, depth, timeframe) - Navigate knowledge graph via memory:// URLs
394 | recent_activity(type, depth, timeframe) - Find recently updated information
395 | list_directory(dir_name, depth) - Browse directory contents with filtering
396 | ```
397 |
398 | **Search & Discovery:**
399 | ```
400 | search(query, page, page_size) - Search across your knowledge base
401 | ```
402 |
403 | **Project Management:**
404 | ```
405 | list_memory_projects() - List all available projects
406 | create_memory_project(project_name, project_path) - Create new projects
407 | get_current_project() - Show current project stats
408 | sync_status() - Check synchronization status
409 | ```
410 |
411 | **Visualization:**
412 | ```
413 | canvas(nodes, edges, title, folder) - Generate knowledge visualizations
414 | ```
415 |
416 | 5. Example prompts to try:
417 |
418 | ```
419 | "Create a note about our project architecture decisions"
420 | "Find information about JWT authentication in my notes"
421 | "Create a canvas visualization of my project components"
422 | "Read my notes on the authentication system"
423 | "What have I been working on in the past week?"
424 | ```
425 |
426 | ## Futher info
427 |
428 | See the [Documentation](https://memory.basicmachines.co/) for more info, including:
429 |
430 | - [Complete User Guide](https://docs.basicmemory.com/user-guide/)
431 | - [CLI tools](https://docs.basicmemory.com/guides/cli-reference/)
432 | - [Cloud CLI and Sync](https://docs.basicmemory.com/guides/cloud-cli/)
433 | - [Managing multiple Projects](https://docs.basicmemory.com/guides/cli-reference/#project)
434 | - [Importing data from OpenAI/Claude Projects](https://docs.basicmemory.com/guides/cli-reference/#import)
435 |
436 | ## License
437 |
438 | AGPL-3.0
439 |
440 | Contributions are welcome. See the [Contributing](CONTRIBUTING.md) guide for info about setting up the project locally
441 | and submitting PRs.
442 |
443 | ## Star History
444 |
445 | <a href="https://www.star-history.com/#basicmachines-co/basic-memory&Date">
446 | <picture>
447 | <source media="(prefers-color-scheme: dark)" srcset="https://api.star-history.com/svg?repos=basicmachines-co/basic-memory&type=Date&theme=dark" />
448 | <source media="(prefers-color-scheme: light)" srcset="https://api.star-history.com/svg?repos=basicmachines-co/basic-memory&type=Date" />
449 | <img alt="Star History Chart" src="https://api.star-history.com/svg?repos=basicmachines-co/basic-memory&type=Date" />
450 | </picture>
451 | </a>
452 |
453 | Built with ♥️ by Basic Machines
454 |
```
--------------------------------------------------------------------------------
/SECURITY.md:
--------------------------------------------------------------------------------
```markdown
1 | # Security Policy
2 |
3 | ## Supported Versions
4 |
5 | | Version | Supported |
6 | | ------- | ------------------ |
7 | | 0.x.x | :white_check_mark: |
8 |
9 | ## Reporting a Vulnerability
10 |
11 | Use this section to tell people how to report a vulnerability.
12 |
13 | If you find a vulnerability, please contact [email protected]
14 |
```
--------------------------------------------------------------------------------
/CODE_OF_CONDUCT.md:
--------------------------------------------------------------------------------
```markdown
1 | # Code of Conduct
2 |
3 | ## Purpose
4 |
5 | Maintain a respectful and professional environment where contributions can be made without harassment or
6 | negativity.
7 |
8 | ## Standards
9 |
10 | Respectful communication and collaboration are expected. Offensive behavior, harassment, or personal attacks will not be
11 | tolerated.
12 |
13 | ## Reporting Issues
14 |
15 | To report inappropriate behavior, contact [[email protected]].
16 |
17 | ## Consequences
18 |
19 | Violations of this code may lead to consequences, including being banned from contributing to the project.
20 |
```
--------------------------------------------------------------------------------
/CONTRIBUTING.md:
--------------------------------------------------------------------------------
```markdown
1 | # Contributing to Basic Memory
2 |
3 | Thank you for considering contributing to Basic Memory! This document outlines the process for contributing to the
4 | project and how to get started as a developer.
5 |
6 | ## Getting Started
7 |
8 | ### Development Environment
9 |
10 | 1. **Clone the Repository**:
11 | ```bash
12 | git clone https://github.com/basicmachines-co/basic-memory.git
13 | cd basic-memory
14 | ```
15 |
16 | 2. **Install Dependencies**:
17 | ```bash
18 | # Using just (recommended)
19 | just install
20 |
21 | # Or using uv
22 | uv install -e ".[dev]"
23 |
24 | # Or using pip
25 | pip install -e ".[dev]"
26 | ```
27 |
28 | > **Note**: Basic Memory uses [just](https://just.systems) as a modern command runner. Install with `brew install just` or `cargo install just`.
29 |
30 | 3. **Activate the Virtual Environment**
31 | ```bash
32 | source .venv/bin/activate
33 | ```
34 |
35 | 4. **Run the Tests**:
36 | ```bash
37 | # Run all tests with unified coverage (unit + integration)
38 | just test
39 |
40 | # Run unit tests only (fast, no coverage)
41 | just test-unit
42 |
43 | # Run integration tests only (fast, no coverage)
44 | just test-int
45 |
46 | # Generate HTML coverage report
47 | just coverage
48 |
49 | # Run a specific test
50 | pytest tests/path/to/test_file.py::test_function_name
51 | ```
52 |
53 | ### Development Workflow
54 |
55 | 1. **Fork the Repo**: Fork the repository on GitHub and clone your copy.
56 | 2. **Create a Branch**: Create a new branch for your feature or fix.
57 | ```bash
58 | git checkout -b feature/your-feature-name
59 | # or
60 | git checkout -b fix/issue-you-are-fixing
61 | ```
62 | 3. **Make Your Changes**: Implement your changes with appropriate test coverage.
63 | 4. **Check Code Quality**:
64 | ```bash
65 | # Run all checks at once
66 | just check
67 |
68 | # Or run individual checks
69 | just lint # Run linting
70 | just format # Format code
71 | just type-check # Type checking
72 | ```
73 | 5. **Test Your Changes**: Ensure all tests pass locally and maintain 100% test coverage.
74 | ```bash
75 | just test
76 | ```
77 | 6. **Submit a PR**: Submit a pull request with a detailed description of your changes.
78 |
79 | ## LLM-Assisted Development
80 |
81 | This project is designed for collaborative development between humans and LLMs (Large Language Models):
82 |
83 | 1. **CLAUDE.md**: The repository includes a `CLAUDE.md` file that serves as a project guide for both humans and LLMs.
84 | This file contains:
85 | - Key project information and architectural overview
86 | - Development commands and workflows
87 | - Code style guidelines
88 | - Documentation standards
89 |
90 | 2. **AI-Human Collaborative Workflow**:
91 | - We encourage using LLMs like Claude for code generation, reviews, and documentation
92 | - When possible, save context in markdown files that can be referenced later
93 | - This enables seamless knowledge transfer between different development sessions
94 | - Claude can help with implementation details while you focus on architecture and design
95 |
96 | 3. **Adding to CLAUDE.md**:
97 | - If you discover useful project information or common commands, consider adding them to CLAUDE.md
98 | - This helps all contributors (human and AI) maintain consistent knowledge of the project
99 |
100 | ## Pull Request Process
101 |
102 | 1. **Create a Pull Request**: Open a PR against the `main` branch with a clear title and description.
103 | 2. **Sign the Developer Certificate of Origin (DCO)**: All contributions require signing our DCO, which certifies that
104 | you have the right to submit your contributions. This will be automatically checked by our CLA assistant when you
105 | create a PR.
106 | 3. **PR Description**: Include:
107 | - What the PR changes
108 | - Why the change is needed
109 | - How you tested the changes
110 | - Any related issues (use "Fixes #123" to automatically close issues)
111 | 4. **Code Review**: Wait for code review and address any feedback.
112 | 5. **CI Checks**: Ensure all CI checks pass.
113 | 6. **Merge**: Once approved, a maintainer will merge your PR.
114 |
115 | ## Developer Certificate of Origin
116 |
117 | By contributing to this project, you agree to the [Developer Certificate of Origin (DCO)](CLA.md). This means you
118 | certify that:
119 |
120 | - You have the right to submit your contributions
121 | - You're not knowingly submitting code with patent or copyright issues
122 | - Your contributions are provided under the project's license (AGPL-3.0)
123 |
124 | This is a lightweight alternative to a Contributor License Agreement and helps ensure that all contributions can be
125 | properly incorporated into the project and potentially used in commercial applications.
126 |
127 | ### Signing Your Commits
128 |
129 | Sign your commit:
130 |
131 | **Using the `-s` or `--signoff` flag**:
132 |
133 | ```bash
134 | git commit -s -m "Your commit message"
135 | ```
136 |
137 | This adds a `Signed-off-by` line to your commit message, certifying that you adhere to the DCO.
138 |
139 | The sign-off certifies that you have the right to submit your contribution under the project's license and verifies your
140 | agreement to the DCO.
141 |
142 | ## Code Style Guidelines
143 |
144 | - **Python Version**: Python 3.12+ with full type annotations (3.12+ required for type parameter syntax)
145 | - **Line Length**: 100 characters maximum
146 | - **Formatting**: Use ruff for consistent styling
147 | - **Import Order**: Standard lib, third-party, local imports
148 | - **Naming**: Use snake_case for functions/variables, PascalCase for classes
149 | - **Documentation**: Add docstrings to public functions, classes, and methods
150 | - **Type Annotations**: Use type hints for all functions and methods
151 |
152 | ## Testing Guidelines
153 |
154 | ### Test Structure
155 |
156 | Basic Memory uses two test directories with unified coverage reporting:
157 |
158 | - **`tests/`**: Unit tests that test individual components in isolation
159 | - Fast execution with extensive mocking
160 | - Test individual functions, classes, and modules
161 | - Run with: `just test-unit` (no coverage, fast)
162 |
163 | - **`test-int/`**: Integration tests that test real-world scenarios
164 | - Test full workflows with real database and file operations
165 | - Include performance benchmarks
166 | - More realistic but slower than unit tests
167 | - Run with: `just test-int` (no coverage, fast)
168 |
169 | ### Running Tests
170 |
171 | ```bash
172 | # Run all tests with unified coverage report
173 | just test
174 |
175 | # Run only unit tests (fast iteration)
176 | just test-unit
177 |
178 | # Run only integration tests
179 | just test-int
180 |
181 | # Generate HTML coverage report
182 | just coverage
183 |
184 | # Run specific test
185 | pytest tests/path/to/test_file.py::test_function_name
186 |
187 | # Run tests excluding benchmarks
188 | pytest -m "not benchmark"
189 |
190 | # Run only benchmark tests
191 | pytest -m benchmark test-int/test_sync_performance_benchmark.py
192 | ```
193 |
194 | ### Performance Benchmarks
195 |
196 | The `test-int/test_sync_performance_benchmark.py` file contains performance benchmarks that measure sync and indexing speed:
197 |
198 | - `test_benchmark_sync_100_files` - Small repository performance
199 | - `test_benchmark_sync_500_files` - Medium repository performance
200 | - `test_benchmark_sync_1000_files` - Large repository performance (marked slow)
201 | - `test_benchmark_resync_no_changes` - Re-sync performance baseline
202 |
203 | Run benchmarks with:
204 | ```bash
205 | # Run all benchmarks (excluding slow ones)
206 | pytest test-int/test_sync_performance_benchmark.py -v -m "benchmark and not slow"
207 |
208 | # Run all benchmarks including slow ones
209 | pytest test-int/test_sync_performance_benchmark.py -v -m benchmark
210 |
211 | # Run specific benchmark
212 | pytest test-int/test_sync_performance_benchmark.py::test_benchmark_sync_100_files -v
213 | ```
214 |
215 | See `test-int/BENCHMARKS.md` for detailed benchmark documentation.
216 |
217 | ### Testing Best Practices
218 |
219 | - **Coverage Target**: We aim for high test coverage for all code
220 | - **Test Framework**: Use pytest for unit and integration tests
221 | - **Mocking**: Avoid mocking in integration tests; use sparingly in unit tests
222 | - **Edge Cases**: Test both normal operation and edge cases
223 | - **Database Testing**: Use in-memory SQLite for testing database operations
224 | - **Fixtures**: Use async pytest fixtures for setup and teardown
225 | - **Markers**: Use `@pytest.mark.benchmark` for benchmarks, `@pytest.mark.slow` for slow tests
226 |
227 | ## Release Process
228 |
229 | Basic Memory uses automatic versioning based on git tags with `uv-dynamic-versioning`. Here's how releases work:
230 |
231 | ### Version Management
232 | - **Development versions**: Automatically generated from git commits (e.g., `0.12.4.dev26+468a22f`)
233 | - **Beta releases**: Created by tagging with beta suffixes (e.g., `git tag v0.13.0b1`)
234 | - **Stable releases**: Created by tagging with version numbers (e.g., `git tag v0.13.0`)
235 |
236 | ### Release Workflows
237 |
238 | #### Development Builds
239 | - Automatically published to PyPI on every commit to `main`
240 | - Version format: `0.12.4.dev26+468a22f` (base version + dev + commit count + hash)
241 | - Users install with: `pip install basic-memory --pre --force-reinstall`
242 |
243 | #### Beta Releases
244 | 1. Create and push a beta tag: `git tag v0.13.0b1 && git push origin v0.13.0b1`
245 | 2. GitHub Actions automatically builds and publishes to PyPI
246 | 3. Users install with: `pip install basic-memory --pre`
247 |
248 | #### Stable Releases
249 | 1. Create and push a version tag: `git tag v0.13.0 && git push origin v0.13.0`
250 | 2. GitHub Actions automatically:
251 | - Builds the package with version `0.13.0`
252 | - Creates GitHub release with auto-generated notes
253 | - Publishes to PyPI
254 | 3. Users install with: `pip install basic-memory`
255 |
256 | ### For Contributors
257 | - No manual version bumping required
258 | - Versions are automatically derived from git tags
259 | - Focus on code changes, not version management
260 |
261 | ## Creating Issues
262 |
263 | If you're planning to work on something, please create an issue first to discuss the approach. Include:
264 |
265 | - A clear title and description
266 | - Steps to reproduce if reporting a bug
267 | - Expected behavior vs. actual behavior
268 | - Any relevant logs or screenshots
269 | - Your proposed solution, if you have one
270 |
271 | ## Code of Conduct
272 |
273 | All contributors must follow the [Code of Conduct](CODE_OF_CONDUCT.md).
274 |
275 | ## Thank You!
276 |
277 | Your contributions help make Basic Memory better. We appreciate your time and effort!
```
--------------------------------------------------------------------------------
/CLAUDE.md:
--------------------------------------------------------------------------------
```markdown
1 | # CLAUDE.md - Basic Memory Project Guide
2 |
3 | ## Project Overview
4 |
5 | Basic Memory is a local-first knowledge management system built on the Model Context Protocol (MCP). It enables
6 | bidirectional communication between LLMs (like Claude) and markdown files, creating a personal knowledge graph that can
7 | be traversed using links between documents.
8 |
9 | ## CODEBASE DEVELOPMENT
10 |
11 | ### Project information
12 |
13 | See the [README.md](README.md) file for a project overview.
14 |
15 | ### Build and Test Commands
16 |
17 | - Install: `just install` or `pip install -e ".[dev]"`
18 | - Run all tests (with coverage): `just test` - Runs both unit and integration tests with unified coverage
19 | - Run unit tests only: `just test-unit` - Fast, no coverage
20 | - Run integration tests only: `just test-int` - Fast, no coverage
21 | - Generate HTML coverage: `just coverage` - Opens in browser
22 | - Single test: `pytest tests/path/to/test_file.py::test_function_name`
23 | - Run benchmarks: `pytest test-int/test_sync_performance_benchmark.py -v -m "benchmark and not slow"`
24 | - Lint: `just lint` or `ruff check . --fix`
25 | - Type check: `just typecheck` or `uv run pyright`
26 | - Format: `just format` or `uv run ruff format .`
27 | - Run all code checks: `just check` (runs lint, format, typecheck, test)
28 | - Create db migration: `just migration "Your migration message"`
29 | - Run development MCP Inspector: `just run-inspector`
30 |
31 | **Note:** Project requires Python 3.12+ (uses type parameter syntax and `type` aliases introduced in 3.12)
32 |
33 | ### Test Structure
34 |
35 | - `tests/` - Unit tests for individual components (mocked, fast)
36 | - `test-int/` - Integration tests for real-world scenarios (no mocks, realistic)
37 | - Both directories are covered by unified coverage reporting
38 | - Benchmark tests in `test-int/` are marked with `@pytest.mark.benchmark`
39 | - Slow tests are marked with `@pytest.mark.slow`
40 |
41 | ### Code Style Guidelines
42 |
43 | - Line length: 100 characters max
44 | - Python 3.12+ with full type annotations (uses type parameters and type aliases)
45 | - Format with ruff (consistent styling)
46 | - Import order: standard lib, third-party, local imports
47 | - Naming: snake_case for functions/variables, PascalCase for classes
48 | - Prefer async patterns with SQLAlchemy 2.0
49 | - Use Pydantic v2 for data validation and schemas
50 | - CLI uses Typer for command structure
51 | - API uses FastAPI for endpoints
52 | - Follow the repository pattern for data access
53 | - Tools communicate to api routers via the httpx ASGI client (in process)
54 |
55 | ### Codebase Architecture
56 |
57 | - `/alembic` - Alembic db migrations
58 | - `/api` - FastAPI implementation of REST endpoints
59 | - `/cli` - Typer command-line interface
60 | - `/markdown` - Markdown parsing and processing
61 | - `/mcp` - Model Context Protocol server implementation
62 | - `/models` - SQLAlchemy ORM models
63 | - `/repository` - Data access layer
64 | - `/schemas` - Pydantic models for validation
65 | - `/services` - Business logic layer
66 | - `/sync` - File synchronization services
67 |
68 | ### Development Notes
69 |
70 | - MCP tools are defined in src/basic_memory/mcp/tools/
71 | - MCP prompts are defined in src/basic_memory/mcp/prompts/
72 | - MCP tools should be atomic, composable operations
73 | - Use `textwrap.dedent()` for multi-line string formatting in prompts and tools
74 | - MCP Prompts are used to invoke tools and format content with instructions for an LLM
75 | - Schema changes require Alembic migrations
76 | - SQLite is used for indexing and full text search, files are source of truth
77 | - Testing uses pytest with asyncio support (strict mode)
78 | - Unit tests (`tests/`) use mocks when necessary; integration tests (`test-int/`) use real implementations
79 | - Test database uses in-memory SQLite
80 | - Each test runs in a standalone environment with in-memory SQLite and tmp_file directory
81 | - Performance benchmarks are in `test-int/test_sync_performance_benchmark.py`
82 | - Use pytest markers: `@pytest.mark.benchmark` for benchmarks, `@pytest.mark.slow` for slow tests
83 |
84 | ### Async Client Pattern (Important!)
85 |
86 | **All MCP tools and CLI commands use the context manager pattern for HTTP clients:**
87 |
88 | ```python
89 | from basic_memory.mcp.async_client import get_client
90 |
91 | async def my_mcp_tool():
92 | async with get_client() as client:
93 | # Use client for API calls
94 | response = await call_get(client, "/path")
95 | return response
96 | ```
97 |
98 | **Do NOT use:**
99 | - ❌ `from basic_memory.mcp.async_client import client` (deprecated module-level client)
100 | - ❌ Manual auth header management
101 | - ❌ `inject_auth_header()` (deleted)
102 |
103 | **Key principles:**
104 | - Auth happens at client creation, not per-request
105 | - Proper resource management via context managers
106 | - Supports three modes: Local (ASGI), CLI cloud (HTTP + auth), Cloud app (factory injection)
107 | - Factory pattern enables dependency injection for cloud consolidation
108 |
109 | **For cloud app integration:**
110 | ```python
111 | from basic_memory.mcp import async_client
112 |
113 | # Set custom factory before importing tools
114 | async_client.set_client_factory(your_custom_factory)
115 | ```
116 |
117 | See SPEC-16 for full context manager refactor details.
118 |
119 | ## BASIC MEMORY PRODUCT USAGE
120 |
121 | ### Knowledge Structure
122 |
123 | - Entity: Any concept, document, or idea represented as a markdown file
124 | - Observation: A categorized fact about an entity (`- [category] content`)
125 | - Relation: A directional link between entities (`- relation_type [[Target]]`)
126 | - Frontmatter: YAML metadata at the top of markdown files
127 | - Knowledge representation follows precise markdown format:
128 | - Observations with [category] prefixes
129 | - Relations with WikiLinks [[Entity]]
130 | - Frontmatter with metadata
131 |
132 | ### Basic Memory Commands
133 |
134 | **Local Commands:**
135 | - Sync knowledge: `basic-memory sync` or `basic-memory sync --watch`
136 | - Import from Claude: `basic-memory import claude conversations`
137 | - Import from ChatGPT: `basic-memory import chatgpt`
138 | - Import from Memory JSON: `basic-memory import memory-json`
139 | - Check sync status: `basic-memory status`
140 | - Tool access: `basic-memory tools` (provides CLI access to MCP tools)
141 | - Guide: `basic-memory tools basic-memory-guide`
142 | - Continue: `basic-memory tools continue-conversation --topic="search"`
143 |
144 | **Cloud Commands (requires subscription):**
145 | - Authenticate: `basic-memory cloud login`
146 | - Logout: `basic-memory cloud logout`
147 | - Bidirectional sync: `basic-memory cloud sync`
148 | - Integrity check: `basic-memory cloud check`
149 | - Mount cloud storage: `basic-memory cloud mount`
150 | - Unmount cloud storage: `basic-memory cloud unmount`
151 |
152 | ### MCP Capabilities
153 |
154 | - Basic Memory exposes these MCP tools to LLMs:
155 |
156 | **Content Management:**
157 | - `write_note(title, content, folder, tags)` - Create/update markdown notes with semantic observations and relations
158 | - `read_note(identifier, page, page_size)` - Read notes by title, permalink, or memory:// URL with knowledge graph awareness
159 | - `read_content(path)` - Read raw file content (text, images, binaries) without knowledge graph processing
160 | - `view_note(identifier, page, page_size)` - View notes as formatted artifacts for better readability
161 | - `edit_note(identifier, operation, content)` - Edit notes incrementally (append, prepend, find/replace, replace_section)
162 | - `move_note(identifier, destination_path)` - Move notes to new locations, updating database and maintaining links
163 | - `delete_note(identifier)` - Delete notes from the knowledge base
164 |
165 | **Knowledge Graph Navigation:**
166 | - `build_context(url, depth, timeframe)` - Navigate the knowledge graph via memory:// URLs for conversation continuity
167 | - `recent_activity(type, depth, timeframe)` - Get recently updated information with specified timeframe (e.g., "1d", "1 week")
168 | - `list_directory(dir_name, depth, file_name_glob)` - Browse directory contents with filtering and depth control
169 |
170 | **Search & Discovery:**
171 | - `search_notes(query, page, page_size, search_type, types, entity_types, after_date)` - Full-text search across all content with advanced filtering options
172 |
173 | **Project Management:**
174 | - `list_memory_projects()` - List all available projects with their status
175 | - `create_memory_project(project_name, project_path, set_default)` - Create new Basic Memory projects
176 | - `delete_project(project_name)` - Delete a project from configuration
177 | - `get_current_project()` - Get current project information and stats
178 | - `sync_status()` - Check file synchronization and background operation status
179 |
180 | **Visualization:**
181 | - `canvas(nodes, edges, title, folder)` - Generate Obsidian canvas files for knowledge graph visualization
182 |
183 | - MCP Prompts for better AI interaction:
184 | - `ai_assistant_guide()` - Guidance on effectively using Basic Memory tools for AI assistants
185 | - `continue_conversation(topic, timeframe)` - Continue previous conversations with relevant historical context
186 | - `search(query, after_date)` - Search with detailed, formatted results for better context understanding
187 | - `recent_activity(timeframe)` - View recently changed items with formatted output
188 | - `json_canvas_spec()` - Full JSON Canvas specification for Obsidian visualization
189 |
190 | ### Cloud Features (v0.15.0+)
191 |
192 | Basic Memory now supports cloud synchronization and storage (requires active subscription):
193 |
194 | **Authentication:**
195 | - JWT-based authentication with subscription validation
196 | - Secure session management with token refresh
197 | - Support for multiple cloud projects
198 |
199 | **Bidirectional Sync:**
200 | - rclone bisync integration for two-way synchronization
201 | - Conflict resolution and integrity verification
202 | - Real-time sync with change detection
203 | - Mount/unmount cloud storage for direct file access
204 |
205 | **Cloud Project Management:**
206 | - Create and manage projects in the cloud
207 | - Toggle between local and cloud modes
208 | - Per-project sync configuration
209 | - Subscription-based access control
210 |
211 | **Security & Performance:**
212 | - Removed .env file loading for improved security
213 | - .gitignore integration (respects gitignored files)
214 | - WAL mode for SQLite performance
215 | - Background relation resolution (non-blocking startup)
216 | - API performance optimizations (SPEC-11)
217 |
218 | ## AI-Human Collaborative Development
219 |
220 | Basic Memory emerged from and enables a new kind of development process that combines human and AI capabilities. Instead
221 | of using AI just for code generation, we've developed a true collaborative workflow:
222 |
223 | 1. AI (LLM) writes initial implementation based on specifications and context
224 | 2. Human reviews, runs tests, and commits code with any necessary adjustments
225 | 3. Knowledge persists across conversations using Basic Memory's knowledge graph
226 | 4. Development continues seamlessly across different AI sessions with consistent context
227 | 5. Results improve through iterative collaboration and shared understanding
228 |
229 | This approach has allowed us to tackle more complex challenges and build a more robust system than either humans or AI
230 | could achieve independently.
231 |
232 | ## GitHub Integration
233 |
234 | Basic Memory has taken AI-Human collaboration to the next level by integrating Claude directly into the development workflow through GitHub:
235 |
236 | ### GitHub MCP Tools
237 |
238 | Using the GitHub Model Context Protocol server, Claude can now:
239 |
240 | - **Repository Management**:
241 | - View repository files and structure
242 | - Read file contents
243 | - Create new branches
244 | - Create and update files
245 |
246 | - **Issue Management**:
247 | - Create new issues
248 | - Comment on existing issues
249 | - Close and update issues
250 | - Search across issues
251 |
252 | - **Pull Request Workflow**:
253 | - Create pull requests
254 | - Review code changes
255 | - Add comments to PRs
256 |
257 | This integration enables Claude to participate as a full team member in the development process, not just as a code generation tool. Claude's GitHub account ([bm-claudeai](https://github.com/bm-claudeai)) is a member of the Basic Machines organization with direct contributor access to the codebase.
258 |
259 | ### Collaborative Development Process
260 |
261 | With GitHub integration, the development workflow includes:
262 |
263 | 1. **Direct code review** - Claude can analyze PRs and provide detailed feedback
264 | 2. **Contribution tracking** - All of Claude's contributions are properly attributed in the Git history
265 | 3. **Branch management** - Claude can create feature branches for implementations
266 | 4. **Documentation maintenance** - Claude can keep documentation updated as the code evolves
267 |
268 | This level of integration represents a new paradigm in AI-human collaboration, where the AI assistant becomes a full-fledged team member rather than just a tool for generating code snippets.
269 |
```
--------------------------------------------------------------------------------
/tests/markdown/__init__.py:
--------------------------------------------------------------------------------
```python
1 |
```
--------------------------------------------------------------------------------
/src/basic_memory/cli/__init__.py:
--------------------------------------------------------------------------------
```python
1 | """CLI tools for basic-memory"""
2 |
```
--------------------------------------------------------------------------------
/src/basic_memory/mcp/__init__.py:
--------------------------------------------------------------------------------
```python
1 | """MCP server for basic-memory."""
2 |
```
--------------------------------------------------------------------------------
/src/basic_memory/api/__init__.py:
--------------------------------------------------------------------------------
```python
1 | """Basic Memory API module."""
2 |
3 | from .app import app
4 |
5 | __all__ = ["app"]
6 |
```
--------------------------------------------------------------------------------
/src/basic_memory/mcp/server.py:
--------------------------------------------------------------------------------
```python
1 | """
2 | Basic Memory FastMCP server.
3 | """
4 |
5 | from fastmcp import FastMCP
6 |
7 | mcp = FastMCP(
8 | name="Basic Memory",
9 | )
10 |
```
--------------------------------------------------------------------------------
/tests/__init__.py:
--------------------------------------------------------------------------------
```python
1 | import os
2 |
3 | # set config.env to "test" for pytest to prevent logging to file in utils.setup_logging()
4 | os.environ["BASIC_MEMORY_ENV"] = "test"
5 |
```
--------------------------------------------------------------------------------
/src/basic_memory/sync/__init__.py:
--------------------------------------------------------------------------------
```python
1 | """Basic Memory sync services."""
2 |
3 | from .sync_service import SyncService
4 | from .watch_service import WatchService
5 |
6 | __all__ = ["SyncService", "WatchService"]
7 |
```
--------------------------------------------------------------------------------
/src/basic_memory/models/base.py:
--------------------------------------------------------------------------------
```python
1 | """Base model class for SQLAlchemy models."""
2 |
3 | from sqlalchemy.ext.asyncio import AsyncAttrs
4 | from sqlalchemy.orm import DeclarativeBase
5 |
6 |
7 | class Base(AsyncAttrs, DeclarativeBase):
8 | """Base class for all models"""
9 |
10 | pass
11 |
```
--------------------------------------------------------------------------------
/src/basic_memory/__init__.py:
--------------------------------------------------------------------------------
```python
1 | """basic-memory - Local-first knowledge management combining Zettelkasten with knowledge graphs"""
2 |
3 | # Package version - updated by release automation
4 | __version__ = "0.15.2"
5 |
6 | # API version for FastAPI - independent of package version
7 | __api_version__ = "v0"
8 |
```
--------------------------------------------------------------------------------
/src/basic_memory/services/__init__.py:
--------------------------------------------------------------------------------
```python
1 | """Services package."""
2 |
3 | from .service import BaseService
4 | from .file_service import FileService
5 | from .entity_service import EntityService
6 | from .project_service import ProjectService
7 |
8 | __all__ = ["BaseService", "FileService", "EntityService", "ProjectService"]
9 |
```
--------------------------------------------------------------------------------
/src/basic_memory/repository/__init__.py:
--------------------------------------------------------------------------------
```python
1 | from .entity_repository import EntityRepository
2 | from .observation_repository import ObservationRepository
3 | from .project_repository import ProjectRepository
4 | from .relation_repository import RelationRepository
5 |
6 | __all__ = [
7 | "EntityRepository",
8 | "ObservationRepository",
9 | "ProjectRepository",
10 | "RelationRepository",
11 | ]
12 |
```
--------------------------------------------------------------------------------
/src/basic_memory/models/__init__.py:
--------------------------------------------------------------------------------
```python
1 | """Models package for basic-memory."""
2 |
3 | import basic_memory
4 | from basic_memory.models.base import Base
5 | from basic_memory.models.knowledge import Entity, Observation, Relation
6 | from basic_memory.models.project import Project
7 |
8 | __all__ = [
9 | "Base",
10 | "Entity",
11 | "Observation",
12 | "Relation",
13 | "Project",
14 | "basic_memory",
15 | ]
16 |
```
--------------------------------------------------------------------------------
/src/basic_memory/services/service.py:
--------------------------------------------------------------------------------
```python
1 | """Base service class."""
2 |
3 | from typing import TypeVar, Generic
4 |
5 | from basic_memory.models import Base
6 |
7 | T = TypeVar("T", bound=Base)
8 |
9 |
10 | class BaseService(Generic[T]):
11 | """Base service that takes a repository."""
12 |
13 | def __init__(self, repository):
14 | """Initialize service with repository."""
15 | self.repository = repository
16 |
```
--------------------------------------------------------------------------------
/src/basic_memory/repository/project_info_repository.py:
--------------------------------------------------------------------------------
```python
1 | from basic_memory.repository.repository import Repository
2 | from basic_memory.models.project import Project
3 |
4 |
5 | class ProjectInfoRepository(Repository):
6 | """Repository for statistics queries."""
7 |
8 | def __init__(self, session_maker):
9 | # Initialize with Project model as a reference
10 | super().__init__(session_maker, Project)
11 |
```
--------------------------------------------------------------------------------
/src/basic_memory/cli/commands/cloud/__init__.py:
--------------------------------------------------------------------------------
```python
1 | """Cloud commands package."""
2 |
3 | # Import all commands to register them with typer
4 | from basic_memory.cli.commands.cloud.core_commands import * # noqa: F401,F403
5 | from basic_memory.cli.commands.cloud.api_client import get_authenticated_headers, get_cloud_config # noqa: F401
6 | from basic_memory.cli.commands.cloud.upload_command import * # noqa: F401,F403
7 |
```
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/config.yml:
--------------------------------------------------------------------------------
```yaml
1 | blank_issues_enabled: false
2 | contact_links:
3 | - name: Basic Memory Discussions
4 | url: https://github.com/basicmachines-co/basic-memory/discussions
5 | about: For questions, ideas, or more open-ended discussions
6 | - name: Documentation
7 | url: https://github.com/basicmachines-co/basic-memory#readme
8 | about: Please check the documentation first before reporting an issue
```
--------------------------------------------------------------------------------
/test-int/cli/test_version_integration.py:
--------------------------------------------------------------------------------
```python
1 | """Integration tests for version command."""
2 |
3 | from typer.testing import CliRunner
4 |
5 | from basic_memory.cli.main import app
6 | import basic_memory
7 |
8 |
9 | def test_version_command():
10 | """Test 'bm --version' command shows version."""
11 | runner = CliRunner()
12 | result = runner.invoke(app, ["--version"])
13 |
14 | assert result.exit_code == 0
15 | assert basic_memory.__version__ in result.stdout
16 |
```
--------------------------------------------------------------------------------
/src/basic_memory/cli/commands/__init__.py:
--------------------------------------------------------------------------------
```python
1 | """CLI commands for basic-memory."""
2 |
3 | from . import status, sync, db, import_memory_json, mcp, import_claude_conversations
4 | from . import import_claude_projects, import_chatgpt, tool, project
5 |
6 | __all__ = [
7 | "status",
8 | "sync",
9 | "db",
10 | "import_memory_json",
11 | "mcp",
12 | "import_claude_conversations",
13 | "import_claude_projects",
14 | "import_chatgpt",
15 | "tool",
16 | "project",
17 | ]
18 |
```
--------------------------------------------------------------------------------
/src/basic_memory/api/routers/__init__.py:
--------------------------------------------------------------------------------
```python
1 | """API routers."""
2 |
3 | from . import knowledge_router as knowledge
4 | from . import management_router as management
5 | from . import memory_router as memory
6 | from . import project_router as project
7 | from . import resource_router as resource
8 | from . import search_router as search
9 | from . import prompt_router as prompt
10 |
11 | __all__ = ["knowledge", "management", "memory", "project", "resource", "search", "prompt"]
12 |
```
--------------------------------------------------------------------------------
/tests/markdown/test_task_detection.py:
--------------------------------------------------------------------------------
```python
1 | """Test how markdown-it handles task lists."""
2 |
3 | from markdown_it import MarkdownIt
4 |
5 |
6 | def test_task_token_type():
7 | """Verify how markdown-it parses task list items."""
8 | md = MarkdownIt()
9 | content = """
10 | - [ ] Unchecked task
11 | - [x] Completed task
12 | - [-] In progress task
13 | """
14 |
15 | tokens = md.parse(content)
16 | for token in tokens:
17 | print(f"{token.type}: {token.content}")
18 |
```
--------------------------------------------------------------------------------
/smithery.yaml:
--------------------------------------------------------------------------------
```yaml
1 | # Smithery configuration file: https://smithery.ai/docs/config#smitheryyaml
2 |
3 | startCommand:
4 | type: stdio
5 | configSchema:
6 | # JSON Schema defining the configuration options for the MCP.
7 | type: object
8 | properties: {}
9 | description: No configuration required. This MCP server runs using the default command.
10 | commandFunction: |-
11 | (config) => ({
12 | command: 'basic-memory',
13 | args: ['mcp']
14 | })
15 | exampleConfig: {}
```
--------------------------------------------------------------------------------
/src/basic_memory/cli/main.py:
--------------------------------------------------------------------------------
```python
1 | """Main CLI entry point for basic-memory.""" # pragma: no cover
2 |
3 | from basic_memory.cli.app import app # pragma: no cover
4 |
5 | # Register commands
6 | from basic_memory.cli.commands import ( # noqa: F401 # pragma: no cover
7 | cloud,
8 | db,
9 | import_chatgpt,
10 | import_claude_conversations,
11 | import_claude_projects,
12 | import_memory_json,
13 | mcp,
14 | project,
15 | status,
16 | sync,
17 | tool,
18 | )
19 |
20 | if __name__ == "__main__": # pragma: no cover
21 | # start the app
22 | app()
23 |
```
--------------------------------------------------------------------------------
/src/basic_memory/markdown/__init__.py:
--------------------------------------------------------------------------------
```python
1 | """Base package for markdown parsing."""
2 |
3 | from basic_memory.file_utils import ParseError
4 | from basic_memory.markdown.entity_parser import EntityParser
5 | from basic_memory.markdown.markdown_processor import MarkdownProcessor
6 | from basic_memory.markdown.schemas import (
7 | EntityMarkdown,
8 | EntityFrontmatter,
9 | Observation,
10 | Relation,
11 | )
12 |
13 | __all__ = [
14 | "EntityMarkdown",
15 | "EntityFrontmatter",
16 | "EntityParser",
17 | "MarkdownProcessor",
18 | "Observation",
19 | "Relation",
20 | "ParseError",
21 | ]
22 |
```
--------------------------------------------------------------------------------
/.github/dependabot.yml:
--------------------------------------------------------------------------------
```yaml
1 | # To get started with Dependabot version updates, you'll need to specify which
2 | # package ecosystems to update and where the package manifests are located.
3 | # Please see the documentation for all configuration options:
4 | # https://docs.github.com/code-security/dependabot/dependabot-version-updates/configuration-options-for-the-dependabot.yml-file
5 |
6 | version: 2
7 | updates:
8 | - package-ecosystem: "" # See documentation for possible values
9 | directory: "/" # Location of package manifests
10 | schedule:
11 | interval: "weekly"
12 |
13 |
```
--------------------------------------------------------------------------------
/tests/mcp/test_resources.py:
--------------------------------------------------------------------------------
```python
1 | from basic_memory.mcp.prompts.ai_assistant_guide import ai_assistant_guide
2 |
3 |
4 | import pytest
5 |
6 |
7 | @pytest.mark.asyncio
8 | async def test_ai_assistant_guide_exists(app):
9 | """Test that the canvas spec resource exists and returns content."""
10 | # Call the resource function
11 | guide = ai_assistant_guide.fn()
12 |
13 | # Verify basic characteristics of the content
14 | assert guide is not None
15 | assert isinstance(guide, str)
16 | assert len(guide) > 0
17 |
18 | # Verify it contains expected sections of the Canvas spec
19 | assert "# AI Assistant Guide" in guide
20 |
```
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/documentation.md:
--------------------------------------------------------------------------------
```markdown
1 | ---
2 | name: Documentation improvement
3 | about: Suggest improvements or report issues with documentation
4 | title: '[DOCS] '
5 | labels: documentation
6 | assignees: ''
7 | ---
8 |
9 | ## Documentation Issue
10 | Describe what's missing, unclear, or incorrect in the current documentation.
11 |
12 | ## Location
13 | Where is the problematic documentation? (URL, file path, or section)
14 |
15 | ## Suggested Improvement
16 | How would you improve this documentation? Please be as specific as possible.
17 |
18 | ## Additional Context
19 | Any additional information or screenshots that might help explain the issue or improvement.
```
--------------------------------------------------------------------------------
/src/basic_memory/mcp/prompts/__init__.py:
--------------------------------------------------------------------------------
```python
1 | """Basic Memory MCP prompts.
2 |
3 | Prompts are a special type of tool that returns a string response
4 | formatted for a user to read, typically invoking one or more tools
5 | and transforming their results into user-friendly text.
6 | """
7 |
8 | # Import individual prompt modules to register them with the MCP server
9 | from basic_memory.mcp.prompts import continue_conversation
10 | from basic_memory.mcp.prompts import recent_activity
11 | from basic_memory.mcp.prompts import search
12 | from basic_memory.mcp.prompts import ai_assistant_guide
13 |
14 | __all__ = [
15 | "ai_assistant_guide",
16 | "continue_conversation",
17 | "recent_activity",
18 | "search",
19 | ]
20 |
```
--------------------------------------------------------------------------------
/src/basic_memory/schemas/importer.py:
--------------------------------------------------------------------------------
```python
1 | """Schemas for import services."""
2 |
3 | from typing import Dict, Optional
4 |
5 | from pydantic import BaseModel
6 |
7 |
8 | class ImportResult(BaseModel):
9 | """Common import result schema."""
10 |
11 | import_count: Dict[str, int]
12 | success: bool
13 | error_message: Optional[str] = None
14 |
15 |
16 | class ChatImportResult(ImportResult):
17 | """Result schema for chat imports."""
18 |
19 | conversations: int = 0
20 | messages: int = 0
21 |
22 |
23 | class ProjectImportResult(ImportResult):
24 | """Result schema for project imports."""
25 |
26 | documents: int = 0
27 | prompts: int = 0
28 |
29 |
30 | class EntityImportResult(ImportResult):
31 | """Result schema for entity imports."""
32 |
33 | entities: int = 0
34 | relations: int = 0
35 | skipped_entities: int = 0
36 |
```
--------------------------------------------------------------------------------
/src/basic_memory/alembic/migrations.py:
--------------------------------------------------------------------------------
```python
1 | """Functions for managing database migrations."""
2 |
3 | from pathlib import Path
4 | from loguru import logger
5 | from alembic.config import Config
6 | from alembic import command
7 |
8 |
9 | def get_alembic_config() -> Config: # pragma: no cover
10 | """Get alembic config with correct paths."""
11 | migrations_path = Path(__file__).parent
12 | alembic_ini = migrations_path / "alembic.ini"
13 |
14 | config = Config(alembic_ini)
15 | config.set_main_option("script_location", str(migrations_path))
16 | return config
17 |
18 |
19 | def reset_database(): # pragma: no cover
20 | """Drop and recreate all tables."""
21 | logger.info("Resetting database...")
22 | config = get_alembic_config()
23 | command.downgrade(config, "base")
24 | command.upgrade(config, "head")
25 |
```
--------------------------------------------------------------------------------
/src/basic_memory/sync/background_sync.py:
--------------------------------------------------------------------------------
```python
1 | import asyncio
2 |
3 | from loguru import logger
4 |
5 | from basic_memory.config import get_project_config
6 | from basic_memory.sync import SyncService, WatchService
7 |
8 |
9 | async def sync_and_watch(
10 | sync_service: SyncService, watch_service: WatchService
11 | ): # pragma: no cover
12 | """Run sync and watch service."""
13 |
14 | config = get_project_config()
15 | logger.info(f"Starting watch service to sync file changes in dir: {config.home}")
16 | # full sync
17 | await sync_service.sync(config.home)
18 |
19 | # watch changes
20 | await watch_service.run()
21 |
22 |
23 | async def create_background_sync_task(
24 | sync_service: SyncService, watch_service: WatchService
25 | ): # pragma: no cover
26 | return asyncio.create_task(sync_and_watch(sync_service, watch_service))
27 |
```
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/feature_request.md:
--------------------------------------------------------------------------------
```markdown
1 | ---
2 | name: Feature request
3 | about: Suggest an idea for Basic Memory
4 | title: '[FEATURE] '
5 | labels: enhancement
6 | assignees: ''
7 | ---
8 |
9 | ## Feature Description
10 | A clear and concise description of the feature you'd like to see implemented.
11 |
12 | ## Problem This Feature Solves
13 | Describe the problem or limitation you're experiencing that this feature would address.
14 |
15 | ## Proposed Solution
16 | Describe how you envision this feature working. Include:
17 | - User workflow
18 | - Interface design (if applicable)
19 | - Technical approach (if you have ideas)
20 |
21 | ## Alternative Solutions
22 | Have you considered any alternative solutions or workarounds?
23 |
24 | ## Additional Context
25 | Add any other context, screenshots, or examples about the feature request here.
26 |
27 | ## Impact
28 | How would this feature benefit you and other users of Basic Memory?
```
--------------------------------------------------------------------------------
/src/basic_memory/importers/__init__.py:
--------------------------------------------------------------------------------
```python
1 | """Import services for Basic Memory."""
2 |
3 | from basic_memory.importers.base import Importer
4 | from basic_memory.importers.chatgpt_importer import ChatGPTImporter
5 | from basic_memory.importers.claude_conversations_importer import (
6 | ClaudeConversationsImporter,
7 | )
8 | from basic_memory.importers.claude_projects_importer import ClaudeProjectsImporter
9 | from basic_memory.importers.memory_json_importer import MemoryJsonImporter
10 | from basic_memory.schemas.importer import (
11 | ChatImportResult,
12 | EntityImportResult,
13 | ImportResult,
14 | ProjectImportResult,
15 | )
16 |
17 | __all__ = [
18 | "Importer",
19 | "ChatGPTImporter",
20 | "ClaudeConversationsImporter",
21 | "ClaudeProjectsImporter",
22 | "MemoryJsonImporter",
23 | "ImportResult",
24 | "ChatImportResult",
25 | "EntityImportResult",
26 | "ProjectImportResult",
27 | ]
28 |
```
--------------------------------------------------------------------------------
/src/basic_memory/services/exceptions.py:
--------------------------------------------------------------------------------
```python
1 | class FileOperationError(Exception):
2 | """Raised when file operations fail"""
3 |
4 | pass
5 |
6 |
7 | class EntityNotFoundError(Exception):
8 | """Raised when an entity cannot be found"""
9 |
10 | pass
11 |
12 |
13 | class EntityCreationError(Exception):
14 | """Raised when an entity cannot be created"""
15 |
16 | pass
17 |
18 |
19 | class DirectoryOperationError(Exception):
20 | """Raised when directory operations fail"""
21 |
22 | pass
23 |
24 |
25 | class SyncFatalError(Exception):
26 | """Raised when sync encounters a fatal error that prevents continuation.
27 |
28 | Fatal errors include:
29 | - Project deleted during sync (FOREIGN KEY constraint)
30 | - Database corruption
31 | - Critical system failures
32 |
33 | When this exception is raised, the entire sync operation should be terminated
34 | immediately rather than attempting to continue with remaining files.
35 | """
36 |
37 | pass
38 |
```
--------------------------------------------------------------------------------
/src/basic_memory/schemas/directory.py:
--------------------------------------------------------------------------------
```python
1 | """Schemas for directory tree operations."""
2 |
3 | from datetime import datetime
4 | from typing import List, Optional, Literal
5 |
6 | from pydantic import BaseModel
7 |
8 |
9 | class DirectoryNode(BaseModel):
10 | """Directory node in file system."""
11 |
12 | name: str
13 | file_path: Optional[str] = None # Original path without leading slash (matches DB)
14 | directory_path: str # Path with leading slash for directory navigation
15 | type: Literal["directory", "file"]
16 | children: List["DirectoryNode"] = [] # Default to empty list
17 | title: Optional[str] = None
18 | permalink: Optional[str] = None
19 | entity_id: Optional[int] = None
20 | entity_type: Optional[str] = None
21 | content_type: Optional[str] = None
22 | updated_at: Optional[datetime] = None
23 |
24 | @property
25 | def has_children(self) -> bool:
26 | return bool(self.children)
27 |
28 |
29 | # Support for recursive model
30 | DirectoryNode.model_rebuild()
31 |
```
--------------------------------------------------------------------------------
/.github/workflows/pr-title.yml:
--------------------------------------------------------------------------------
```yaml
1 | name: "Pull Request Title"
2 |
3 | on:
4 | pull_request:
5 | types:
6 | - opened
7 | - edited
8 | - synchronize
9 |
10 | jobs:
11 | main:
12 | runs-on: ubuntu-latest
13 | steps:
14 | - uses: amannn/action-semantic-pull-request@v5
15 | env:
16 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
17 | with:
18 | # Configure allowed types based on what we want in our changelog
19 | types: |
20 | feat
21 | fix
22 | chore
23 | docs
24 | style
25 | refactor
26 | perf
27 | test
28 | build
29 | ci
30 | # Require at least one from scope list (optional)
31 | scopes: |
32 | core
33 | cli
34 | api
35 | mcp
36 | sync
37 | ui
38 | deps
39 | installer
40 | # Allow breaking changes (needs "!" after type/scope)
41 | requireScopeForBreakingChange: true
```
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/bug_report.md:
--------------------------------------------------------------------------------
```markdown
1 | ---
2 | name: Bug report
3 | about: Create a report to help us improve Basic Memory
4 | title: '[BUG] '
5 | labels: bug
6 | assignees: ''
7 | ---
8 |
9 | ## Bug Description
10 | A clear and concise description of what the bug is.
11 |
12 | ## Steps To Reproduce
13 | Steps to reproduce the behavior:
14 | 1. Install version '...'
15 | 2. Run command '...'
16 | 3. Use tool/feature '...'
17 | 4. See error
18 |
19 | ## Expected Behavior
20 | A clear and concise description of what you expected to happen.
21 |
22 | ## Actual Behavior
23 | What actually happened, including error messages and output.
24 |
25 | ## Environment
26 | - OS: [e.g. macOS 14.2, Ubuntu 22.04]
27 | - Python version: [e.g. 3.12.1]
28 | - Basic Memory version: [e.g. 0.1.0]
29 | - Installation method: [e.g. pip, uv, source]
30 | - Claude Desktop version (if applicable):
31 |
32 | ## Additional Context
33 | - Configuration files (if relevant)
34 | - Logs or screenshots
35 | - Any special configuration or environment variables
36 |
37 | ## Possible Solution
38 | If you have any ideas on what might be causing the issue or how to fix it, please share them here.
```
--------------------------------------------------------------------------------
/src/basic_memory/alembic/versions/e7e1f4367280_add_scan_watermark_tracking_to_project.py:
--------------------------------------------------------------------------------
```python
1 | """Add scan watermark tracking to Project
2 |
3 | Revision ID: e7e1f4367280
4 | Revises: 9d9c1cb7d8f5
5 | Create Date: 2025-10-20 16:42:46.625075
6 |
7 | """
8 |
9 | from typing import Sequence, Union
10 |
11 | from alembic import op
12 | import sqlalchemy as sa
13 |
14 |
15 | # revision identifiers, used by Alembic.
16 | revision: str = "e7e1f4367280"
17 | down_revision: Union[str, None] = "9d9c1cb7d8f5"
18 | branch_labels: Union[str, Sequence[str], None] = None
19 | depends_on: Union[str, Sequence[str], None] = None
20 |
21 |
22 | def upgrade() -> None:
23 | # ### commands auto generated by Alembic - please adjust! ###
24 | with op.batch_alter_table("project", schema=None) as batch_op:
25 | batch_op.add_column(sa.Column("last_scan_timestamp", sa.Float(), nullable=True))
26 | batch_op.add_column(sa.Column("last_file_count", sa.Integer(), nullable=True))
27 |
28 | # ### end Alembic commands ###
29 |
30 |
31 | def downgrade() -> None:
32 | # ### commands auto generated by Alembic - please adjust! ###
33 | with op.batch_alter_table("project", schema=None) as batch_op:
34 | batch_op.drop_column("last_file_count")
35 | batch_op.drop_column("last_scan_timestamp")
36 |
37 | # ### end Alembic commands ###
38 |
```
--------------------------------------------------------------------------------
/src/basic_memory/schemas/delete.py:
--------------------------------------------------------------------------------
```python
1 | """Delete operation schemas for the knowledge graph.
2 |
3 | This module defines the request schemas for removing entities, relations,
4 | and observations from the knowledge graph. Each operation has specific
5 | implications and safety considerations.
6 |
7 | Deletion Hierarchy:
8 | 1. Entity deletion removes the entity and all its relations
9 | 2. Relation deletion only removes the connection between entities
10 | 3. Observation deletion preserves entity and relations
11 |
12 | Key Considerations:
13 | - All deletions are permanent
14 | - Entity deletions cascade to relations
15 | - Files are removed along with entities
16 | - Operations are atomic - they fully succeed or fail
17 | """
18 |
19 | from typing import List, Annotated
20 |
21 | from annotated_types import MinLen
22 | from pydantic import BaseModel
23 |
24 | from basic_memory.schemas.base import Permalink
25 |
26 |
27 | class DeleteEntitiesRequest(BaseModel):
28 | """Delete one or more entities from the knowledge graph.
29 |
30 | This operation:
31 | 1. Removes the entity from the database
32 | 2. Deletes all observations attached to the entity
33 | 3. Removes all relations where the entity is source or target
34 | 4. Deletes the corresponding markdown file
35 | """
36 |
37 | permalinks: Annotated[List[Permalink], MinLen(1)]
38 |
```
--------------------------------------------------------------------------------
/tests/cli/conftest.py:
--------------------------------------------------------------------------------
```python
1 | from typing import AsyncGenerator
2 |
3 | import pytest
4 | import pytest_asyncio
5 | from fastapi import FastAPI
6 | from httpx import AsyncClient, ASGITransport
7 |
8 | from basic_memory.api.app import app as fastapi_app
9 | from basic_memory.deps import get_project_config, get_engine_factory, get_app_config
10 |
11 |
12 | @pytest_asyncio.fixture(autouse=True)
13 | async def app(app_config, project_config, engine_factory, test_config, aiolib) -> FastAPI:
14 | """Create test FastAPI application."""
15 | app = fastapi_app
16 | app.dependency_overrides[get_app_config] = lambda: app_config
17 | app.dependency_overrides[get_project_config] = lambda: project_config
18 | app.dependency_overrides[get_engine_factory] = lambda: engine_factory
19 | return app
20 |
21 |
22 | @pytest_asyncio.fixture
23 | async def client(app: FastAPI, aiolib) -> AsyncGenerator[AsyncClient, None]:
24 | """Create test client that both MCP and tests will use."""
25 | async with AsyncClient(transport=ASGITransport(app=app), base_url="http://test") as client:
26 | yield client
27 |
28 |
29 | @pytest.fixture
30 | def cli_env(project_config, client, test_config):
31 | """Set up CLI environment with correct project session."""
32 | return {"project_config": project_config, "client": client}
33 |
```
--------------------------------------------------------------------------------
/tests/repository/test_project_info_repository.py:
--------------------------------------------------------------------------------
```python
1 | """Tests for the ProjectInfoRepository."""
2 |
3 | import pytest
4 | from sqlalchemy import text
5 |
6 | from basic_memory.repository.project_info_repository import ProjectInfoRepository
7 | from basic_memory.models.project import Project # Add a model reference
8 |
9 |
10 | @pytest.mark.asyncio
11 | async def test_project_info_repository_init(session_maker):
12 | """Test ProjectInfoRepository initialization."""
13 | # Create a ProjectInfoRepository
14 | repository = ProjectInfoRepository(session_maker)
15 |
16 | # Verify it was initialized properly
17 | assert repository is not None
18 | assert repository.session_maker == session_maker
19 | # Model is set to a dummy value (Project is used as a reference here)
20 | assert repository.Model is Project
21 |
22 |
23 | @pytest.mark.asyncio
24 | async def test_project_info_repository_execute_query(session_maker):
25 | """Test ProjectInfoRepository execute_query method."""
26 | # Create a ProjectInfoRepository
27 | repository = ProjectInfoRepository(session_maker)
28 |
29 | # Execute a simple query
30 | result = await repository.execute_query(text("SELECT 1 as test"))
31 |
32 | # Verify the result
33 | assert result is not None
34 | row = result.fetchone()
35 | assert row is not None
36 | assert row[0] == 1
37 |
```
--------------------------------------------------------------------------------
/src/basic_memory/api/routers/search_router.py:
--------------------------------------------------------------------------------
```python
1 | """Router for search operations."""
2 |
3 | from fastapi import APIRouter, BackgroundTasks
4 |
5 | from basic_memory.api.routers.utils import to_search_results
6 | from basic_memory.schemas.search import SearchQuery, SearchResponse
7 | from basic_memory.deps import SearchServiceDep, EntityServiceDep
8 |
9 | router = APIRouter(prefix="/search", tags=["search"])
10 |
11 |
12 | @router.post("/", response_model=SearchResponse)
13 | async def search(
14 | query: SearchQuery,
15 | search_service: SearchServiceDep,
16 | entity_service: EntityServiceDep,
17 | page: int = 1,
18 | page_size: int = 10,
19 | ):
20 | """Search across all knowledge and documents."""
21 | limit = page_size
22 | offset = (page - 1) * page_size
23 | results = await search_service.search(query, limit=limit, offset=offset)
24 | search_results = await to_search_results(entity_service, results)
25 | return SearchResponse(
26 | results=search_results,
27 | current_page=page,
28 | page_size=page_size,
29 | )
30 |
31 |
32 | @router.post("/reindex")
33 | async def reindex(background_tasks: BackgroundTasks, search_service: SearchServiceDep):
34 | """Recreate and populate the search index."""
35 | await search_service.reindex_all(background_tasks=background_tasks)
36 | return {"status": "ok", "message": "Reindex initiated"}
37 |
```
--------------------------------------------------------------------------------
/src/basic_memory/models/search.py:
--------------------------------------------------------------------------------
```python
1 | """Search models and tables."""
2 |
3 | from sqlalchemy import DDL
4 |
5 | # Define FTS5 virtual table creation
6 | CREATE_SEARCH_INDEX = DDL("""
7 | CREATE VIRTUAL TABLE IF NOT EXISTS search_index USING fts5(
8 | -- Core entity fields
9 | id UNINDEXED, -- Row ID
10 | title, -- Title for searching
11 | content_stems, -- Main searchable content split into stems
12 | content_snippet, -- File content snippet for display
13 | permalink, -- Stable identifier (now indexed for path search)
14 | file_path UNINDEXED, -- Physical location
15 | type UNINDEXED, -- entity/relation/observation
16 |
17 | -- Project context
18 | project_id UNINDEXED, -- Project identifier
19 |
20 | -- Relation fields
21 | from_id UNINDEXED, -- Source entity
22 | to_id UNINDEXED, -- Target entity
23 | relation_type UNINDEXED, -- Type of relation
24 |
25 | -- Observation fields
26 | entity_id UNINDEXED, -- Parent entity
27 | category UNINDEXED, -- Observation category
28 |
29 | -- Common fields
30 | metadata UNINDEXED, -- JSON metadata
31 | created_at UNINDEXED, -- Creation timestamp
32 | updated_at UNINDEXED, -- Last update
33 |
34 | -- Configuration
35 | tokenize='unicode61 tokenchars 0x2F', -- Hex code for /
36 | prefix='1,2,3,4' -- Support longer prefixes for paths
37 | );
38 | """)
39 |
```
--------------------------------------------------------------------------------
/src/basic_memory/cli/app.py:
--------------------------------------------------------------------------------
```python
1 | from typing import Optional
2 |
3 | import typer
4 |
5 | from basic_memory.config import ConfigManager
6 |
7 |
8 | def version_callback(value: bool) -> None:
9 | """Show version and exit."""
10 | if value: # pragma: no cover
11 | import basic_memory
12 |
13 | typer.echo(f"Basic Memory version: {basic_memory.__version__}")
14 | raise typer.Exit()
15 |
16 |
17 | app = typer.Typer(name="basic-memory")
18 |
19 |
20 | @app.callback()
21 | def app_callback(
22 | ctx: typer.Context,
23 | version: Optional[bool] = typer.Option(
24 | None,
25 | "--version",
26 | "-v",
27 | help="Show version and exit.",
28 | callback=version_callback,
29 | is_eager=True,
30 | ),
31 | ) -> None:
32 | """Basic Memory - Local-first personal knowledge management."""
33 |
34 | # Run initialization for every command unless --version was specified
35 | if not version and ctx.invoked_subcommand is not None:
36 | from basic_memory.services.initialization import ensure_initialization
37 |
38 | app_config = ConfigManager().config
39 | ensure_initialization(app_config)
40 |
41 |
42 | ## import
43 | # Register sub-command groups
44 | import_app = typer.Typer(help="Import data from various sources")
45 | app.add_typer(import_app, name="import")
46 |
47 | claude_app = typer.Typer(help="Import Conversations from Claude JSON export.")
48 | import_app.add_typer(claude_app, name="claude")
49 |
50 |
51 | ## cloud
52 |
53 | cloud_app = typer.Typer(help="Access Basic Memory Cloud")
54 | app.add_typer(cloud_app, name="cloud")
55 |
```
--------------------------------------------------------------------------------
/Dockerfile:
--------------------------------------------------------------------------------
```dockerfile
1 | FROM python:3.12-slim-bookworm
2 |
3 | # Build arguments for user ID and group ID (defaults to 1000)
4 | ARG UID=1000
5 | ARG GID=1000
6 |
7 | # Copy uv from official image
8 | COPY --from=ghcr.io/astral-sh/uv:latest /uv /uvx /bin/
9 |
10 | # Set environment variables
11 | ENV PYTHONUNBUFFERED=1 \
12 | PYTHONDONTWRITEBYTECODE=1
13 |
14 | # Create a group and user with the provided UID/GID
15 | # Check if the GID already exists, if not create appgroup
16 | RUN (getent group ${GID} || groupadd --gid ${GID} appgroup) && \
17 | useradd --uid ${UID} --gid ${GID} --create-home --shell /bin/bash appuser
18 |
19 | # Copy the project into the image
20 | ADD . /app
21 |
22 | # Sync the project into a new environment, asserting the lockfile is up to date
23 | WORKDIR /app
24 | RUN uv sync --locked
25 |
26 | # Create necessary directories and set ownership
27 | RUN mkdir -p /app/data/basic-memory /app/.basic-memory && \
28 | chown -R appuser:${GID} /app
29 |
30 | # Set default data directory and add venv to PATH
31 | ENV BASIC_MEMORY_HOME=/app/data/basic-memory \
32 | BASIC_MEMORY_PROJECT_ROOT=/app/data \
33 | PATH="/app/.venv/bin:$PATH"
34 |
35 | # Switch to the non-root user
36 | USER appuser
37 |
38 | # Expose port
39 | EXPOSE 8000
40 |
41 | # Health check
42 | HEALTHCHECK --interval=30s --timeout=10s --start-period=5s --retries=3 \
43 | CMD basic-memory --version || exit 1
44 |
45 | # Use the basic-memory entrypoint to run the MCP server with default SSE transport
46 | CMD ["basic-memory", "mcp", "--transport", "sse", "--host", "0.0.0.0", "--port", "8000"]
```
--------------------------------------------------------------------------------
/tests/api/conftest.py:
--------------------------------------------------------------------------------
```python
1 | """Tests for knowledge graph API routes."""
2 |
3 | from typing import AsyncGenerator
4 |
5 | import pytest
6 | import pytest_asyncio
7 | from fastapi import FastAPI
8 | from httpx import AsyncClient, ASGITransport
9 |
10 | from basic_memory.deps import get_project_config, get_engine_factory, get_app_config
11 | from basic_memory.models import Project
12 |
13 |
14 | @pytest_asyncio.fixture
15 | async def app(test_config, engine_factory, app_config) -> FastAPI:
16 | """Create FastAPI test application."""
17 | from basic_memory.api.app import app
18 |
19 | app.dependency_overrides[get_app_config] = lambda: app_config
20 | app.dependency_overrides[get_project_config] = lambda: test_config.project_config
21 | app.dependency_overrides[get_engine_factory] = lambda: engine_factory
22 | return app
23 |
24 |
25 | @pytest_asyncio.fixture
26 | async def client(app: FastAPI) -> AsyncGenerator[AsyncClient, None]:
27 | """Create client using ASGI transport - same as CLI will use."""
28 | async with AsyncClient(transport=ASGITransport(app=app), base_url="http://test") as client:
29 | yield client
30 |
31 |
32 | @pytest.fixture
33 | def project_url(test_project: Project) -> str:
34 | """Create a URL prefix for the project routes.
35 |
36 | This helps tests generate the correct URL for project-scoped routes.
37 | """
38 | # Make sure this matches what's in tests/conftest.py for test_project creation
39 | # The permalink should be generated from "Test Project Context"
40 | return f"/{test_project.permalink}"
41 |
```
--------------------------------------------------------------------------------
/.github/workflows/dev-release.yml:
--------------------------------------------------------------------------------
```yaml
1 | name: Dev Release
2 |
3 | on:
4 | push:
5 | branches: [main]
6 | workflow_dispatch: # Allow manual triggering
7 |
8 | jobs:
9 | dev-release:
10 | runs-on: ubuntu-latest
11 | permissions:
12 | id-token: write
13 | contents: write
14 |
15 | steps:
16 | - uses: actions/checkout@v4
17 | with:
18 | fetch-depth: 0
19 |
20 | - name: Set up Python
21 | uses: actions/setup-python@v5
22 | with:
23 | python-version: "3.12"
24 |
25 | - name: Install uv
26 | run: |
27 | pip install uv
28 |
29 | - name: Install dependencies and build
30 | run: |
31 | uv venv
32 | uv sync
33 | uv build
34 |
35 | - name: Check if this is a dev version
36 | id: check_version
37 | run: |
38 | VERSION=$(uv run python -c "import basic_memory; print(basic_memory.__version__)")
39 | echo "version=$VERSION" >> $GITHUB_OUTPUT
40 | if [[ "$VERSION" == *"dev"* ]]; then
41 | echo "is_dev=true" >> $GITHUB_OUTPUT
42 | echo "Dev version detected: $VERSION"
43 | else
44 | echo "is_dev=false" >> $GITHUB_OUTPUT
45 | echo "Release version detected: $VERSION, skipping dev release"
46 | fi
47 |
48 | - name: Publish dev version to PyPI
49 | if: steps.check_version.outputs.is_dev == 'true'
50 | uses: pypa/gh-action-pypi-publish@release/v1
51 | with:
52 | password: ${{ secrets.PYPI_TOKEN }}
53 | skip-existing: true # Don't fail if version already exists
```
--------------------------------------------------------------------------------
/tests/api/test_relation_background_resolution.py:
--------------------------------------------------------------------------------
```python
1 | """Test that relation resolution happens in the background."""
2 |
3 | import pytest
4 | from unittest.mock import AsyncMock
5 |
6 | from basic_memory.api.routers.knowledge_router import resolve_relations_background
7 |
8 |
9 | @pytest.mark.asyncio
10 | async def test_resolve_relations_background_success():
11 | """Test that background relation resolution calls sync service correctly."""
12 | # Create mocks
13 | sync_service = AsyncMock()
14 | sync_service.resolve_relations = AsyncMock(return_value=None)
15 |
16 | entity_id = 123
17 | entity_permalink = "test/entity"
18 |
19 | # Call the background function
20 | await resolve_relations_background(sync_service, entity_id, entity_permalink)
21 |
22 | # Verify sync service was called with the entity_id
23 | sync_service.resolve_relations.assert_called_once_with(entity_id=entity_id)
24 |
25 |
26 | @pytest.mark.asyncio
27 | async def test_resolve_relations_background_handles_errors():
28 | """Test that background relation resolution handles errors gracefully."""
29 | # Create mock that raises an exception
30 | sync_service = AsyncMock()
31 | sync_service.resolve_relations = AsyncMock(side_effect=Exception("Test error"))
32 |
33 | entity_id = 123
34 | entity_permalink = "test/entity"
35 |
36 | # Call should not raise - errors are logged
37 | await resolve_relations_background(sync_service, entity_id, entity_permalink)
38 |
39 | # Verify sync service was called
40 | sync_service.resolve_relations.assert_called_once_with(entity_id=entity_id)
41 |
```
--------------------------------------------------------------------------------
/src/basic_memory/alembic/versions/b3c3938bacdb_relation_to_name_unique_index.py:
--------------------------------------------------------------------------------
```python
1 | """relation to_name unique index
2 |
3 | Revision ID: b3c3938bacdb
4 | Revises: 3dae7c7b1564
5 | Create Date: 2025-02-22 14:59:30.668466
6 |
7 | """
8 |
9 | from typing import Sequence, Union
10 |
11 | from alembic import op
12 |
13 |
14 | # revision identifiers, used by Alembic.
15 | revision: str = "b3c3938bacdb"
16 | down_revision: Union[str, None] = "3dae7c7b1564"
17 | branch_labels: Union[str, Sequence[str], None] = None
18 | depends_on: Union[str, Sequence[str], None] = None
19 |
20 |
21 | def upgrade() -> None:
22 | # SQLite doesn't support constraint changes through ALTER
23 | # Need to recreate table with desired constraints
24 | with op.batch_alter_table("relation") as batch_op:
25 | # Drop existing unique constraint
26 | batch_op.drop_constraint("uix_relation", type_="unique")
27 |
28 | # Add new constraints
29 | batch_op.create_unique_constraint(
30 | "uix_relation_from_id_to_id", ["from_id", "to_id", "relation_type"]
31 | )
32 | batch_op.create_unique_constraint(
33 | "uix_relation_from_id_to_name", ["from_id", "to_name", "relation_type"]
34 | )
35 |
36 |
37 | def downgrade() -> None:
38 | with op.batch_alter_table("relation") as batch_op:
39 | # Drop new constraints
40 | batch_op.drop_constraint("uix_relation_from_id_to_name", type_="unique")
41 | batch_op.drop_constraint("uix_relation_from_id_to_id", type_="unique")
42 |
43 | # Restore original constraint
44 | batch_op.create_unique_constraint("uix_relation", ["from_id", "to_id", "relation_type"])
45 |
```
--------------------------------------------------------------------------------
/src/basic_memory/cli/commands/command_utils.py:
--------------------------------------------------------------------------------
```python
1 | """utility functions for commands"""
2 |
3 | from typing import Optional
4 |
5 | from mcp.server.fastmcp.exceptions import ToolError
6 | import typer
7 |
8 | from rich.console import Console
9 |
10 | from basic_memory.mcp.async_client import get_client
11 |
12 | from basic_memory.mcp.tools.utils import call_post, call_get
13 | from basic_memory.mcp.project_context import get_active_project
14 | from basic_memory.schemas import ProjectInfoResponse
15 |
16 | console = Console()
17 |
18 |
19 | async def run_sync(project: Optional[str] = None):
20 | """Run sync operation via API endpoint."""
21 |
22 | try:
23 | async with get_client() as client:
24 | project_item = await get_active_project(client, project, None)
25 | response = await call_post(client, f"{project_item.project_url}/project/sync")
26 | data = response.json()
27 | console.print(f"[green]✓ {data['message']}[/green]")
28 | except (ToolError, ValueError) as e:
29 | console.print(f"[red]✗ Sync failed: {e}[/red]")
30 | raise typer.Exit(1)
31 |
32 |
33 | async def get_project_info(project: str):
34 | """Get project information via API endpoint."""
35 |
36 | try:
37 | async with get_client() as client:
38 | project_item = await get_active_project(client, project, None)
39 | response = await call_get(client, f"{project_item.project_url}/project/info")
40 | return ProjectInfoResponse.model_validate(response.json())
41 | except (ToolError, ValueError) as e:
42 | console.print(f"[red]✗ Sync failed: {e}[/red]")
43 | raise typer.Exit(1)
44 |
```
--------------------------------------------------------------------------------
/src/basic_memory/cli/commands/db.py:
--------------------------------------------------------------------------------
```python
1 | """Database management commands."""
2 |
3 | import asyncio
4 |
5 | import typer
6 | from loguru import logger
7 |
8 | from basic_memory import db
9 | from basic_memory.cli.app import app
10 | from basic_memory.config import ConfigManager, BasicMemoryConfig, save_basic_memory_config
11 |
12 |
13 | @app.command()
14 | def reset(
15 | reindex: bool = typer.Option(False, "--reindex", help="Rebuild db index from filesystem"),
16 | ): # pragma: no cover
17 | """Reset database (drop all tables and recreate)."""
18 | if typer.confirm("This will delete all data in your db. Are you sure?"):
19 | logger.info("Resetting database...")
20 | config_manager = ConfigManager()
21 | app_config = config_manager.config
22 | # Get database path
23 | db_path = app_config.app_database_path
24 |
25 | # Delete the database file if it exists
26 | if db_path.exists():
27 | db_path.unlink()
28 | logger.info(f"Database file deleted: {db_path}")
29 |
30 | # Reset project configuration
31 | config = BasicMemoryConfig()
32 | save_basic_memory_config(config_manager.config_file, config)
33 | logger.info("Project configuration reset to default")
34 |
35 | # Create a new empty database
36 | asyncio.run(db.run_migrations(app_config))
37 | logger.info("Database reset complete")
38 |
39 | if reindex:
40 | # Import and run sync
41 | from basic_memory.cli.commands.sync import sync
42 |
43 | logger.info("Rebuilding search index from filesystem...")
44 | sync(watch=False) # pyright: ignore
45 |
```
--------------------------------------------------------------------------------
/test-int/mcp/test_read_note_integration.py:
--------------------------------------------------------------------------------
```python
1 | """
2 | Integration tests for read_note MCP tool.
3 |
4 | Tests the full flow: MCP client -> MCP server -> FastAPI -> database
5 | """
6 |
7 | import pytest
8 | from fastmcp import Client
9 |
10 |
11 | @pytest.mark.asyncio
12 | async def test_read_note_after_write(mcp_server, app, test_project):
13 | """Test read_note after write_note using real database."""
14 |
15 | async with Client(mcp_server) as client:
16 | # First write a note
17 | write_result = await client.call_tool(
18 | "write_note",
19 | {
20 | "project": test_project.name,
21 | "title": "Test Note",
22 | "folder": "test",
23 | "content": "# Test Note\n\nThis is test content.",
24 | "tags": "test,integration",
25 | },
26 | )
27 |
28 | assert len(write_result.content) == 1
29 | assert write_result.content[0].type == "text"
30 | assert "Test Note.md" in write_result.content[0].text
31 |
32 | # Then read it back
33 | read_result = await client.call_tool(
34 | "read_note",
35 | {
36 | "project": test_project.name,
37 | "identifier": "Test Note",
38 | },
39 | )
40 |
41 | assert len(read_result.content) == 1
42 | assert read_result.content[0].type == "text"
43 | result_text = read_result.content[0].text
44 |
45 | # Should contain the note content and metadata
46 | assert "# Test Note" in result_text
47 | assert "This is test content." in result_text
48 | assert "test/test-note" in result_text # permalink
49 |
```
--------------------------------------------------------------------------------
/src/basic_memory/mcp/tools/__init__.py:
--------------------------------------------------------------------------------
```python
1 | """MCP tools for Basic Memory.
2 |
3 | This package provides the complete set of tools for interacting with
4 | Basic Memory through the MCP protocol. Importing this module registers
5 | all tools with the MCP server.
6 | """
7 |
8 | # Import tools to register them with MCP
9 | from basic_memory.mcp.tools.delete_note import delete_note
10 | from basic_memory.mcp.tools.read_content import read_content
11 | from basic_memory.mcp.tools.build_context import build_context
12 | from basic_memory.mcp.tools.recent_activity import recent_activity
13 | from basic_memory.mcp.tools.read_note import read_note
14 | from basic_memory.mcp.tools.view_note import view_note
15 | from basic_memory.mcp.tools.write_note import write_note
16 | from basic_memory.mcp.tools.search import search_notes
17 | from basic_memory.mcp.tools.canvas import canvas
18 | from basic_memory.mcp.tools.list_directory import list_directory
19 | from basic_memory.mcp.tools.edit_note import edit_note
20 | from basic_memory.mcp.tools.move_note import move_note
21 | from basic_memory.mcp.tools.project_management import (
22 | list_memory_projects,
23 | create_memory_project,
24 | delete_project,
25 | )
26 |
27 | # ChatGPT-compatible tools
28 | from basic_memory.mcp.tools.chatgpt_tools import search, fetch
29 |
30 | __all__ = [
31 | "build_context",
32 | "canvas",
33 | "create_memory_project",
34 | "delete_note",
35 | "delete_project",
36 | "edit_note",
37 | "fetch",
38 | "list_directory",
39 | "list_memory_projects",
40 | "move_note",
41 | "read_content",
42 | "read_note",
43 | "recent_activity",
44 | "search",
45 | "search_notes",
46 | "view_note",
47 | "write_note",
48 | ]
49 |
```
--------------------------------------------------------------------------------
/src/basic_memory/alembic/versions/9d9c1cb7d8f5_add_mtime_and_size_columns_to_entity_.py:
--------------------------------------------------------------------------------
```python
1 | """Add mtime and size columns to Entity for sync optimization
2 |
3 | Revision ID: 9d9c1cb7d8f5
4 | Revises: a1b2c3d4e5f6
5 | Create Date: 2025-10-20 05:07:55.173849
6 |
7 | """
8 |
9 | from typing import Sequence, Union
10 |
11 | from alembic import op
12 | import sqlalchemy as sa
13 |
14 |
15 | # revision identifiers, used by Alembic.
16 | revision: str = "9d9c1cb7d8f5"
17 | down_revision: Union[str, None] = "a1b2c3d4e5f6"
18 | branch_labels: Union[str, Sequence[str], None] = None
19 | depends_on: Union[str, Sequence[str], None] = None
20 |
21 |
22 | def upgrade() -> None:
23 | # ### commands auto generated by Alembic - please adjust! ###
24 | with op.batch_alter_table("entity", schema=None) as batch_op:
25 | batch_op.add_column(sa.Column("mtime", sa.Float(), nullable=True))
26 | batch_op.add_column(sa.Column("size", sa.Integer(), nullable=True))
27 | batch_op.drop_constraint(batch_op.f("fk_entity_project_id"), type_="foreignkey")
28 | batch_op.create_foreign_key(
29 | batch_op.f("fk_entity_project_id"), "project", ["project_id"], ["id"]
30 | )
31 |
32 | # ### end Alembic commands ###
33 |
34 |
35 | def downgrade() -> None:
36 | # ### commands auto generated by Alembic - please adjust! ###
37 | with op.batch_alter_table("entity", schema=None) as batch_op:
38 | batch_op.drop_constraint(batch_op.f("fk_entity_project_id"), type_="foreignkey")
39 | batch_op.create_foreign_key(
40 | batch_op.f("fk_entity_project_id"),
41 | "project",
42 | ["project_id"],
43 | ["id"],
44 | ondelete="CASCADE",
45 | )
46 | batch_op.drop_column("size")
47 | batch_op.drop_column("mtime")
48 |
49 | # ### end Alembic commands ###
50 |
```
--------------------------------------------------------------------------------
/.github/workflows/docker.yml:
--------------------------------------------------------------------------------
```yaml
1 | name: Docker Image CI
2 |
3 | on:
4 | push:
5 | tags:
6 | - 'v*' # Trigger on version tags like v1.0.0, v0.13.0, etc.
7 | workflow_dispatch: # Allow manual triggering for testing
8 |
9 | env:
10 | REGISTRY: ghcr.io
11 | IMAGE_NAME: basicmachines-co/basic-memory
12 |
13 | jobs:
14 | docker:
15 | runs-on: ubuntu-latest
16 | permissions:
17 | contents: read
18 | packages: write
19 |
20 | steps:
21 | - name: Checkout repository
22 | uses: actions/checkout@v4
23 | with:
24 | fetch-depth: 0
25 |
26 | - name: Set up Docker Buildx
27 | uses: docker/setup-buildx-action@v3
28 | with:
29 | platforms: linux/amd64,linux/arm64
30 |
31 | - name: Log in to GitHub Container Registry
32 | uses: docker/login-action@v3
33 | with:
34 | registry: ${{ env.REGISTRY }}
35 | username: ${{ github.actor }}
36 | password: ${{ secrets.GITHUB_TOKEN }}
37 |
38 | - name: Extract metadata
39 | id: meta
40 | uses: docker/metadata-action@v5
41 | with:
42 | images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}
43 | tags: |
44 | type=ref,event=branch
45 | type=ref,event=pr
46 | type=semver,pattern={{version}}
47 | type=semver,pattern={{major}}.{{minor}}
48 | type=raw,value=latest,enable={{is_default_branch}}
49 |
50 | - name: Build and push Docker image
51 | uses: docker/build-push-action@v5
52 | with:
53 | context: .
54 | file: ./Dockerfile
55 | platforms: linux/amd64,linux/arm64
56 | push: true
57 | tags: ${{ steps.meta.outputs.tags }}
58 | labels: ${{ steps.meta.outputs.labels }}
59 | cache-from: type=gha
60 | cache-to: type=gha,mode=max
61 |
62 |
```
--------------------------------------------------------------------------------
/src/basic_memory/mcp/prompts/search.py:
--------------------------------------------------------------------------------
```python
1 | """Search prompts for Basic Memory MCP server.
2 |
3 | These prompts help users search and explore their knowledge base.
4 | """
5 |
6 | from typing import Annotated, Optional
7 |
8 | from loguru import logger
9 | from pydantic import Field
10 |
11 | from basic_memory.config import get_project_config
12 | from basic_memory.mcp.async_client import get_client
13 | from basic_memory.mcp.server import mcp
14 | from basic_memory.mcp.tools.utils import call_post
15 | from basic_memory.schemas.base import TimeFrame
16 | from basic_memory.schemas.prompt import SearchPromptRequest
17 |
18 |
19 | @mcp.prompt(
20 | name="search_knowledge_base",
21 | description="Search across all content in basic-memory",
22 | )
23 | async def search_prompt(
24 | query: str,
25 | timeframe: Annotated[
26 | Optional[TimeFrame],
27 | Field(description="How far back to search (e.g. '1d', '1 week')"),
28 | ] = None,
29 | ) -> str:
30 | """Search across all content in basic-memory.
31 |
32 | This prompt helps search for content in the knowledge base and
33 | provides helpful context about the results.
34 |
35 | Args:
36 | query: The search text to look for
37 | timeframe: Optional timeframe to limit results (e.g. '1d', '1 week')
38 |
39 | Returns:
40 | Formatted search results with context
41 | """
42 | logger.info(f"Searching knowledge base, query: {query}, timeframe: {timeframe}")
43 |
44 | async with get_client() as client:
45 | # Create request model
46 | request = SearchPromptRequest(query=query, timeframe=timeframe)
47 |
48 | project_url = get_project_config().project_url
49 |
50 | # Call the prompt API endpoint
51 | response = await call_post(
52 | client, f"{project_url}/prompt/search", json=request.model_dump(exclude_none=True)
53 | )
54 |
55 | # Extract the rendered prompt from the response
56 | result = response.json()
57 | return result["prompt"]
58 |
```
--------------------------------------------------------------------------------
/src/basic_memory/cli/commands/sync.py:
--------------------------------------------------------------------------------
```python
1 | """Command module for basic-memory sync operations."""
2 |
3 | import asyncio
4 | from typing import Annotated, Optional
5 |
6 | import typer
7 |
8 | from basic_memory.cli.app import app
9 | from basic_memory.cli.commands.command_utils import run_sync
10 | from basic_memory.config import ConfigManager
11 |
12 |
13 | @app.command()
14 | def sync(
15 | project: Annotated[
16 | Optional[str],
17 | typer.Option(help="The project name."),
18 | ] = None,
19 | watch: Annotated[
20 | bool,
21 | typer.Option("--watch", help="Run continuous sync (cloud mode only)"),
22 | ] = False,
23 | interval: Annotated[
24 | int,
25 | typer.Option("--interval", help="Sync interval in seconds for watch mode (default: 60)"),
26 | ] = 60,
27 | ) -> None:
28 | """Sync knowledge files with the database.
29 |
30 | In local mode: Scans filesystem and updates database.
31 | In cloud mode: Runs bidirectional file sync (bisync) then updates database.
32 |
33 | Examples:
34 | bm sync # One-time sync
35 | bm sync --watch # Continuous sync every 60s
36 | bm sync --watch --interval 30 # Continuous sync every 30s
37 | """
38 | config = ConfigManager().config
39 |
40 | if config.cloud_mode_enabled:
41 | # Cloud mode: run bisync which includes database sync
42 | from basic_memory.cli.commands.cloud.bisync_commands import run_bisync, run_bisync_watch
43 |
44 | try:
45 | if watch:
46 | run_bisync_watch(interval_seconds=interval)
47 | else:
48 | run_bisync()
49 | except Exception:
50 | raise typer.Exit(1)
51 | else:
52 | # Local mode: just database sync
53 | if watch:
54 | typer.echo(
55 | "Error: --watch is only available in cloud mode. Run 'bm cloud login' first."
56 | )
57 | raise typer.Exit(1)
58 |
59 | asyncio.run(run_sync(project))
60 |
```
--------------------------------------------------------------------------------
/tests/mcp/conftest.py:
--------------------------------------------------------------------------------
```python
1 | """Tests for the MCP server implementation using FastAPI TestClient."""
2 |
3 | from typing import AsyncGenerator
4 |
5 | import pytest
6 | import pytest_asyncio
7 | from fastapi import FastAPI
8 | from httpx import AsyncClient, ASGITransport
9 | from mcp.server import FastMCP
10 |
11 | from basic_memory.api.app import app as fastapi_app
12 | from basic_memory.deps import get_project_config, get_engine_factory, get_app_config
13 | from basic_memory.services.search_service import SearchService
14 | from basic_memory.mcp.server import mcp as mcp_server
15 |
16 |
17 | @pytest.fixture(scope="function")
18 | def mcp() -> FastMCP:
19 | return mcp_server # pyright: ignore [reportReturnType]
20 |
21 |
22 | @pytest.fixture(scope="function")
23 | def app(app_config, project_config, engine_factory, config_manager) -> FastAPI:
24 | """Create test FastAPI application."""
25 | app = fastapi_app
26 | app.dependency_overrides[get_app_config] = lambda: app_config
27 | app.dependency_overrides[get_project_config] = lambda: project_config
28 | app.dependency_overrides[get_engine_factory] = lambda: engine_factory
29 | return app
30 |
31 |
32 | @pytest_asyncio.fixture(scope="function")
33 | async def client(app: FastAPI) -> AsyncGenerator[AsyncClient, None]:
34 | """Create test client that both MCP and tests will use."""
35 | async with AsyncClient(transport=ASGITransport(app=app), base_url="http://test") as client:
36 | yield client
37 |
38 |
39 | @pytest.fixture
40 | def test_entity_data():
41 | """Sample data for creating a test entity."""
42 | return {
43 | "entities": [
44 | {
45 | "title": "Test Entity",
46 | "entity_type": "test",
47 | "summary": "", # Empty string instead of None
48 | }
49 | ]
50 | }
51 |
52 |
53 | @pytest_asyncio.fixture(autouse=True)
54 | async def init_search_index(search_service: SearchService):
55 | await search_service.init_search_index()
56 |
```
--------------------------------------------------------------------------------
/src/basic_memory/schemas/__init__.py:
--------------------------------------------------------------------------------
```python
1 | """Knowledge graph schema exports.
2 |
3 | This module exports all schema classes to simplify imports.
4 | Rather than importing from individual schema files, you can
5 | import everything from basic_memory.schemas.
6 | """
7 |
8 | # Base types and models
9 | from basic_memory.schemas.base import (
10 | Observation,
11 | EntityType,
12 | RelationType,
13 | Relation,
14 | Entity,
15 | )
16 |
17 | # Delete operation models
18 | from basic_memory.schemas.delete import (
19 | DeleteEntitiesRequest,
20 | )
21 |
22 | # Request models
23 | from basic_memory.schemas.request import (
24 | SearchNodesRequest,
25 | GetEntitiesRequest,
26 | CreateRelationsRequest,
27 | )
28 |
29 | # Response models
30 | from basic_memory.schemas.response import (
31 | SQLAlchemyModel,
32 | ObservationResponse,
33 | RelationResponse,
34 | EntityResponse,
35 | EntityListResponse,
36 | SearchNodesResponse,
37 | DeleteEntitiesResponse,
38 | )
39 |
40 | from basic_memory.schemas.project_info import (
41 | ProjectStatistics,
42 | ActivityMetrics,
43 | SystemStatus,
44 | ProjectInfoResponse,
45 | )
46 |
47 | from basic_memory.schemas.directory import (
48 | DirectoryNode,
49 | )
50 |
51 | from basic_memory.schemas.sync_report import (
52 | SyncReportResponse,
53 | )
54 |
55 | # For convenient imports, export all models
56 | __all__ = [
57 | # Base
58 | "Observation",
59 | "EntityType",
60 | "RelationType",
61 | "Relation",
62 | "Entity",
63 | # Requests
64 | "SearchNodesRequest",
65 | "GetEntitiesRequest",
66 | "CreateRelationsRequest",
67 | # Responses
68 | "SQLAlchemyModel",
69 | "ObservationResponse",
70 | "RelationResponse",
71 | "EntityResponse",
72 | "EntityListResponse",
73 | "SearchNodesResponse",
74 | "DeleteEntitiesResponse",
75 | # Delete Operations
76 | "DeleteEntitiesRequest",
77 | # Project Info
78 | "ProjectStatistics",
79 | "ActivityMetrics",
80 | "SystemStatus",
81 | "ProjectInfoResponse",
82 | # Directory
83 | "DirectoryNode",
84 | # Sync
85 | "SyncReportResponse",
86 | ]
87 |
```
--------------------------------------------------------------------------------
/tests/api/test_project_router_operations.py:
--------------------------------------------------------------------------------
```python
1 | """Tests for project router operation endpoints."""
2 |
3 | import pytest
4 |
5 |
6 | @pytest.mark.asyncio
7 | async def test_get_project_info_additional(client, test_graph, project_url):
8 | """Test additional fields in the project info endpoint."""
9 | # Call the endpoint
10 | response = await client.get(f"{project_url}/project/info")
11 |
12 | # Verify response
13 | assert response.status_code == 200
14 | data = response.json()
15 |
16 | # Check specific fields we're interested in
17 | assert "available_projects" in data
18 | assert isinstance(data["available_projects"], dict)
19 |
20 | # Get a project from the list
21 | for project_name, project_info in data["available_projects"].items():
22 | # Verify project structure
23 | assert "path" in project_info
24 | assert "active" in project_info
25 | assert "is_default" in project_info
26 | break # Just check the first one for structure
27 |
28 |
29 | @pytest.mark.asyncio
30 | async def test_project_list_additional(client, project_url):
31 | """Test additional fields in the project list endpoint."""
32 | # Call the endpoint
33 | response = await client.get("/projects/projects")
34 |
35 | # Verify response
36 | assert response.status_code == 200
37 | data = response.json()
38 |
39 | # Verify projects list structure in more detail
40 | assert "projects" in data
41 | assert len(data["projects"]) > 0
42 |
43 | # Verify the default project is identified
44 | default_project = data["default_project"]
45 | assert default_project
46 |
47 | # Verify the default_project appears in the projects list and is marked as default
48 | default_in_list = False
49 | for project in data["projects"]:
50 | if project["name"] == default_project:
51 | assert project["is_default"] is True
52 | default_in_list = True
53 | break
54 |
55 | assert default_in_list, "Default project should appear in the projects list"
56 |
```
--------------------------------------------------------------------------------
/src/basic_memory/alembic/versions/a1b2c3d4e5f6_fix_project_foreign_keys.py:
--------------------------------------------------------------------------------
```python
1 | """fix project foreign keys
2 |
3 | Revision ID: a1b2c3d4e5f6
4 | Revises: 647e7a75e2cd
5 | Create Date: 2025-08-19 22:06:00.000000
6 |
7 | """
8 |
9 | from typing import Sequence, Union
10 |
11 | from alembic import op
12 |
13 |
14 | # revision identifiers, used by Alembic.
15 | revision: str = "a1b2c3d4e5f6"
16 | down_revision: Union[str, None] = "647e7a75e2cd"
17 | branch_labels: Union[str, Sequence[str], None] = None
18 | depends_on: Union[str, Sequence[str], None] = None
19 |
20 |
21 | def upgrade() -> None:
22 | """Re-establish foreign key constraints that were lost during project table recreation.
23 |
24 | The migration 647e7a75e2cd recreated the project table but did not re-establish
25 | the foreign key constraint from entity.project_id to project.id, causing
26 | foreign key constraint failures when trying to delete projects with related entities.
27 | """
28 | # SQLite doesn't allow adding foreign key constraints to existing tables easily
29 | # We need to be careful and handle the case where the constraint might already exist
30 |
31 | with op.batch_alter_table("entity", schema=None) as batch_op:
32 | # Try to drop existing foreign key constraint (may not exist)
33 | try:
34 | batch_op.drop_constraint("fk_entity_project_id", type_="foreignkey")
35 | except Exception:
36 | # Constraint may not exist, which is fine - we'll create it next
37 | pass
38 |
39 | # Add the foreign key constraint with CASCADE DELETE
40 | # This ensures that when a project is deleted, all related entities are also deleted
41 | batch_op.create_foreign_key(
42 | "fk_entity_project_id", "project", ["project_id"], ["id"], ondelete="CASCADE"
43 | )
44 |
45 |
46 | def downgrade() -> None:
47 | """Remove the foreign key constraint."""
48 | with op.batch_alter_table("entity", schema=None) as batch_op:
49 | batch_op.drop_constraint("fk_entity_project_id", type_="foreignkey")
50 |
```
--------------------------------------------------------------------------------
/src/basic_memory/importers/utils.py:
--------------------------------------------------------------------------------
```python
1 | """Utility functions for import services."""
2 |
3 | import re
4 | from datetime import datetime
5 | from typing import Any
6 |
7 |
8 | def clean_filename(name: str) -> str: # pragma: no cover
9 | """Clean a string to be used as a filename.
10 |
11 | Args:
12 | name: The string to clean.
13 |
14 | Returns:
15 | A cleaned string suitable for use as a filename.
16 | """
17 | # Replace common punctuation and whitespace with underscores
18 | name = re.sub(r"[\s\-,.:/\\\[\]\(\)]+", "_", name)
19 | # Remove any non-alphanumeric or underscore characters
20 | name = re.sub(r"[^\w]+", "", name)
21 | # Ensure the name isn't too long
22 | if len(name) > 100: # pragma: no cover
23 | name = name[:100]
24 | # Ensure the name isn't empty
25 | if not name: # pragma: no cover
26 | name = "untitled"
27 | return name
28 |
29 |
30 | def format_timestamp(timestamp: Any) -> str: # pragma: no cover
31 | """Format a timestamp for use in a filename or title.
32 |
33 | Args:
34 | timestamp: A timestamp in various formats.
35 |
36 | Returns:
37 | A formatted string representation of the timestamp.
38 | """
39 | if isinstance(timestamp, str):
40 | try:
41 | # Try ISO format
42 | timestamp = datetime.fromisoformat(timestamp.replace("Z", "+00:00"))
43 | except ValueError:
44 | try:
45 | # Try unix timestamp as string
46 | timestamp = datetime.fromtimestamp(float(timestamp)).astimezone()
47 | except ValueError:
48 | # Return as is if we can't parse it
49 | return timestamp
50 | elif isinstance(timestamp, (int, float)):
51 | # Unix timestamp
52 | timestamp = datetime.fromtimestamp(timestamp).astimezone()
53 |
54 | if isinstance(timestamp, datetime):
55 | return timestamp.strftime("%Y-%m-%d %H:%M:%S")
56 |
57 | # Return as is if we can't format it
58 | return str(timestamp) # pragma: no cover
59 |
```
--------------------------------------------------------------------------------
/.github/workflows/test.yml:
--------------------------------------------------------------------------------
```yaml
1 | name: Tests
2 |
3 | on:
4 | push:
5 | branches: [ "main" ]
6 | pull_request:
7 | branches: [ "main" ]
8 | # pull_request_target runs on the BASE of the PR, not the merge result.
9 | # It has write permissions and access to secrets.
10 | # It's useful for PRs from forks or automated PRs but requires careful use for security reasons.
11 | # See: https://docs.github.com/en/actions/using-workflows/events-that-trigger-workflows#pull_request_target
12 | pull_request_target:
13 | branches: [ "main" ]
14 |
15 | jobs:
16 | test:
17 | strategy:
18 | fail-fast: false
19 | matrix:
20 | os: [ubuntu-latest, windows-latest]
21 | python-version: [ "3.12", "3.13" ]
22 | runs-on: ${{ matrix.os }}
23 |
24 | steps:
25 | - uses: actions/checkout@v4
26 | with:
27 | submodules: true
28 |
29 | - name: Set up Python ${{ matrix.python-version }}
30 | uses: actions/setup-python@v4
31 | with:
32 | python-version: ${{ matrix.python-version }}
33 | cache: 'pip'
34 |
35 | - name: Install uv
36 | run: |
37 | pip install uv
38 |
39 | - name: Install just (Linux/macOS)
40 | if: runner.os != 'Windows'
41 | run: |
42 | curl --proto '=https' --tlsv1.2 -sSf https://just.systems/install.sh | bash -s -- --to /usr/local/bin
43 |
44 | - name: Install just (Windows)
45 | if: runner.os == 'Windows'
46 | run: |
47 | # Install just using Chocolatey (pre-installed on GitHub Actions Windows runners)
48 | choco install just --yes
49 | shell: pwsh
50 |
51 | - name: Create virtual env
52 | run: |
53 | uv venv
54 |
55 | - name: Install dependencies
56 | run: |
57 | uv pip install -e .[dev]
58 |
59 | - name: Run type checks
60 | run: |
61 | just typecheck
62 |
63 | - name: Run linting
64 | run: |
65 | just lint
66 |
67 | - name: Run tests
68 | run: |
69 | uv pip install pytest pytest-cov
70 | just test
71 |
```
--------------------------------------------------------------------------------
/tests/importers/test_importer_utils.py:
--------------------------------------------------------------------------------
```python
1 | """Tests for importer utility functions."""
2 |
3 | from datetime import datetime
4 |
5 | from basic_memory.importers.utils import clean_filename, format_timestamp
6 |
7 |
8 | def test_clean_filename():
9 | """Test clean_filename utility function."""
10 | # Test with normal string
11 | assert clean_filename("Hello World") == "Hello_World"
12 |
13 | # Test with punctuation
14 | assert clean_filename("Hello, World!") == "Hello_World"
15 |
16 | # Test with special characters
17 | assert clean_filename("File[1]/with\\special:chars") == "File_1_with_special_chars"
18 |
19 | # Test with long string (over 100 chars)
20 | long_str = "a" * 120
21 | assert len(clean_filename(long_str)) == 100
22 |
23 | # Test with empty string
24 | assert clean_filename("") == "untitled"
25 |
26 | # Test with only special characters
27 | # Some implementations may return empty string or underscore
28 | result = clean_filename("!@#$%^&*()")
29 | assert result in ["untitled", "_", ""]
30 |
31 |
32 | def test_format_timestamp():
33 | """Test format_timestamp utility function."""
34 | # Test with datetime object
35 | dt = datetime(2023, 1, 1, 12, 30, 45)
36 | assert format_timestamp(dt) == "2023-01-01 12:30:45"
37 |
38 | # Test with ISO format string
39 | iso_str = "2023-01-01T12:30:45Z"
40 | assert format_timestamp(iso_str) == "2023-01-01 12:30:45"
41 |
42 | # Test with Unix timestamp as int
43 | unix_ts = 1672577445 # 2023-01-01 12:30:45 UTC
44 | formatted = format_timestamp(unix_ts)
45 | # The exact format may vary by timezone, so we just check for the year
46 | assert "2023" in formatted
47 |
48 | # Test with Unix timestamp as string
49 | unix_str = "1672577445"
50 | formatted = format_timestamp(unix_str)
51 | assert "2023" in formatted
52 |
53 | # Test with unparseable string
54 | assert format_timestamp("not a timestamp") == "not a timestamp"
55 |
56 | # Test with non-timestamp object
57 | assert format_timestamp(None) == "None"
58 |
```
--------------------------------------------------------------------------------
/src/basic_memory/alembic/versions/502b60eaa905_remove_required_from_entity_permalink.py:
--------------------------------------------------------------------------------
```python
1 | """remove required from entity.permalink
2 |
3 | Revision ID: 502b60eaa905
4 | Revises: b3c3938bacdb
5 | Create Date: 2025-02-24 13:33:09.790951
6 |
7 | """
8 |
9 | from typing import Sequence, Union
10 |
11 | from alembic import op
12 | import sqlalchemy as sa
13 |
14 |
15 | # revision identifiers, used by Alembic.
16 | revision: str = "502b60eaa905"
17 | down_revision: Union[str, None] = "b3c3938bacdb"
18 | branch_labels: Union[str, Sequence[str], None] = None
19 | depends_on: Union[str, Sequence[str], None] = None
20 |
21 |
22 | def upgrade() -> None:
23 | # ### commands auto generated by Alembic - please adjust! ###
24 | with op.batch_alter_table("entity", schema=None) as batch_op:
25 | batch_op.alter_column("permalink", existing_type=sa.VARCHAR(), nullable=True)
26 | batch_op.drop_index("ix_entity_permalink")
27 | batch_op.create_index(batch_op.f("ix_entity_permalink"), ["permalink"], unique=False)
28 | batch_op.drop_constraint("uix_entity_permalink", type_="unique")
29 | batch_op.create_index(
30 | "uix_entity_permalink",
31 | ["permalink"],
32 | unique=True,
33 | sqlite_where=sa.text("content_type = 'text/markdown' AND permalink IS NOT NULL"),
34 | )
35 |
36 | # ### end Alembic commands ###
37 |
38 |
39 | def downgrade() -> None:
40 | # ### commands auto generated by Alembic - please adjust! ###
41 | with op.batch_alter_table("entity", schema=None) as batch_op:
42 | batch_op.drop_index(
43 | "uix_entity_permalink",
44 | sqlite_where=sa.text("content_type = 'text/markdown' AND permalink IS NOT NULL"),
45 | )
46 | batch_op.create_unique_constraint("uix_entity_permalink", ["permalink"])
47 | batch_op.drop_index(batch_op.f("ix_entity_permalink"))
48 | batch_op.create_index("ix_entity_permalink", ["permalink"], unique=1)
49 | batch_op.alter_column("permalink", existing_type=sa.VARCHAR(), nullable=False)
50 |
51 | # ### end Alembic commands ###
52 |
```
--------------------------------------------------------------------------------
/src/basic_memory/schemas/cloud.py:
--------------------------------------------------------------------------------
```python
1 | """Schemas for cloud-related API responses."""
2 |
3 | from pydantic import BaseModel, Field
4 |
5 |
6 | class TenantMountInfo(BaseModel):
7 | """Response from /tenant/mount/info endpoint."""
8 |
9 | tenant_id: str = Field(..., description="Unique identifier for the tenant")
10 | bucket_name: str = Field(..., description="S3 bucket name for the tenant")
11 |
12 |
13 | class MountCredentials(BaseModel):
14 | """Response from /tenant/mount/credentials endpoint."""
15 |
16 | access_key: str = Field(..., description="S3 access key for mount")
17 | secret_key: str = Field(..., description="S3 secret key for mount")
18 |
19 |
20 | class CloudProject(BaseModel):
21 | """Representation of a cloud project."""
22 |
23 | name: str = Field(..., description="Project name")
24 | path: str = Field(..., description="Project path on cloud")
25 |
26 |
27 | class CloudProjectList(BaseModel):
28 | """Response from /proxy/projects/projects endpoint."""
29 |
30 | projects: list[CloudProject] = Field(default_factory=list, description="List of cloud projects")
31 |
32 |
33 | class CloudProjectCreateRequest(BaseModel):
34 | """Request to create a new cloud project."""
35 |
36 | name: str = Field(..., description="Project name")
37 | path: str = Field(..., description="Project path (permalink)")
38 | set_default: bool = Field(default=False, description="Set as default project")
39 |
40 |
41 | class CloudProjectCreateResponse(BaseModel):
42 | """Response from creating a cloud project."""
43 |
44 | message: str = Field(..., description="Status message about the project creation")
45 | status: str = Field(..., description="Status of the creation (success or error)")
46 | default: bool = Field(..., description="True if the project was set as the default")
47 | old_project: dict | None = Field(None, description="Information about the previous project")
48 | new_project: dict | None = Field(
49 | None, description="Information about the newly created project"
50 | )
51 |
```
--------------------------------------------------------------------------------
/src/basic_memory/markdown/schemas.py:
--------------------------------------------------------------------------------
```python
1 | """Schema models for entity markdown files."""
2 |
3 | from datetime import datetime
4 | from typing import List, Optional
5 |
6 | from pydantic import BaseModel
7 |
8 |
9 | class Observation(BaseModel):
10 | """An observation about an entity."""
11 |
12 | category: Optional[str] = "Note"
13 | content: str
14 | tags: Optional[List[str]] = None
15 | context: Optional[str] = None
16 |
17 | def __str__(self) -> str:
18 | obs_string = f"- [{self.category}] {self.content}"
19 | if self.context:
20 | obs_string += f" ({self.context})"
21 | return obs_string
22 |
23 |
24 | class Relation(BaseModel):
25 | """A relation between entities."""
26 |
27 | type: str
28 | target: str
29 | context: Optional[str] = None
30 |
31 | def __str__(self) -> str:
32 | rel_string = f"- {self.type} [[{self.target}]]"
33 | if self.context:
34 | rel_string += f" ({self.context})"
35 | return rel_string
36 |
37 |
38 | class EntityFrontmatter(BaseModel):
39 | """Required frontmatter fields for an entity."""
40 |
41 | metadata: dict = {}
42 |
43 | @property
44 | def tags(self) -> List[str]:
45 | return self.metadata.get("tags") if self.metadata else None # pyright: ignore
46 |
47 | @property
48 | def title(self) -> str:
49 | return self.metadata.get("title") if self.metadata else None # pyright: ignore
50 |
51 | @property
52 | def type(self) -> str:
53 | return self.metadata.get("type", "note") if self.metadata else "note" # pyright: ignore
54 |
55 | @property
56 | def permalink(self) -> str:
57 | return self.metadata.get("permalink") if self.metadata else None # pyright: ignore
58 |
59 |
60 | class EntityMarkdown(BaseModel):
61 | """Complete entity combining frontmatter, content, and metadata."""
62 |
63 | frontmatter: EntityFrontmatter
64 | content: Optional[str] = None
65 | observations: List[Observation] = []
66 | relations: List[Relation] = []
67 |
68 | # created, updated will have values after a read
69 | created: Optional[datetime] = None
70 | modified: Optional[datetime] = None
71 |
```
--------------------------------------------------------------------------------
/test-int/cli/test_sync_commands_integration.py:
--------------------------------------------------------------------------------
```python
1 | """Integration tests for sync CLI commands."""
2 |
3 | from pathlib import Path
4 | from typer.testing import CliRunner
5 |
6 | from basic_memory.cli.main import app
7 |
8 |
9 | def test_sync_command(app_config, test_project, config_manager, config_home):
10 | """Test 'bm sync' command successfully syncs files."""
11 | runner = CliRunner()
12 |
13 | # Create a test file
14 | test_file = Path(config_home) / "test-note.md"
15 | test_file.write_text("# Test Note\n\nThis is a test.")
16 |
17 | # Run sync
18 | result = runner.invoke(app, ["sync", "--project", "test-project"])
19 |
20 | if result.exit_code != 0:
21 | print(f"STDOUT: {result.stdout}")
22 | print(f"STDERR: {result.stderr}")
23 | assert result.exit_code == 0
24 | assert "sync" in result.stdout.lower() or "initiated" in result.stdout.lower()
25 |
26 |
27 | def test_status_command(app_config, test_project, config_manager, config_home):
28 | """Test 'bm status' command shows sync status."""
29 | runner = CliRunner()
30 |
31 | # Create a test file
32 | test_file = Path(config_home) / "unsynced.md"
33 | test_file.write_text("# Unsynced Note\n\nThis file hasn't been synced yet.")
34 |
35 | # Run status
36 | result = runner.invoke(app, ["status", "--project", "test-project"])
37 |
38 | if result.exit_code != 0:
39 | print(f"STDOUT: {result.stdout}")
40 | print(f"STDERR: {result.stderr}")
41 | assert result.exit_code == 0
42 | # Should show some status output
43 | assert len(result.stdout) > 0
44 |
45 |
46 | def test_status_verbose(app_config, test_project, config_manager, config_home):
47 | """Test 'bm status --verbose' shows detailed status."""
48 | runner = CliRunner()
49 |
50 | # Create a test file
51 | test_file = Path(config_home) / "test.md"
52 | test_file.write_text("# Test\n\nContent.")
53 |
54 | # Run status with verbose
55 | result = runner.invoke(app, ["status", "--project", "test-project", "--verbose"])
56 |
57 | if result.exit_code != 0:
58 | print(f"STDOUT: {result.stdout}")
59 | print(f"STDERR: {result.stderr}")
60 | assert result.exit_code == 0
61 | assert len(result.stdout) > 0
62 |
```
--------------------------------------------------------------------------------
/tests/schemas/test_memory_url.py:
--------------------------------------------------------------------------------
```python
1 | """Tests for MemoryUrl parsing."""
2 |
3 | import pytest
4 |
5 | from basic_memory.schemas.memory import memory_url, memory_url_path, normalize_memory_url
6 |
7 |
8 | def test_basic_permalink():
9 | """Test basic permalink parsing."""
10 | url = memory_url.validate_strings("memory://specs/search")
11 | assert str(url) == "memory://specs/search"
12 | assert memory_url_path(url) == "specs/search"
13 |
14 |
15 | def test_glob_pattern():
16 | """Test pattern matching."""
17 | url = memory_url.validate_python("memory://specs/search/*")
18 | assert memory_url_path(url) == "specs/search/*"
19 |
20 |
21 | def test_related_prefix():
22 | """Test related content prefix."""
23 | url = memory_url.validate_python("memory://related/specs/search")
24 | assert memory_url_path(url) == "related/specs/search"
25 |
26 |
27 | def test_context_prefix():
28 | """Test context prefix."""
29 | url = memory_url.validate_python("memory://context/current")
30 | assert memory_url_path(url) == "context/current"
31 |
32 |
33 | def test_complex_pattern():
34 | """Test multiple glob patterns."""
35 | url = memory_url.validate_python("memory://specs/*/search/*")
36 | assert memory_url_path(url) == "specs/*/search/*"
37 |
38 |
39 | def test_path_with_dashes():
40 | """Test path with dashes and other chars."""
41 | url = memory_url.validate_python("memory://file-sync-and-note-updates-implementation")
42 | assert memory_url_path(url) == "file-sync-and-note-updates-implementation"
43 |
44 |
45 | def test_str_representation():
46 | """Test converting back to string."""
47 | url = memory_url.validate_python("memory://specs/search")
48 | assert url == "memory://specs/search"
49 |
50 |
51 | def test_normalize_memory_url():
52 | """Test converting back to string."""
53 | url = normalize_memory_url("memory://specs/search")
54 | assert url == "memory://specs/search"
55 |
56 |
57 | def test_normalize_memory_url_no_prefix():
58 | """Test converting back to string."""
59 | url = normalize_memory_url("specs/search")
60 | assert url == "memory://specs/search"
61 |
62 |
63 | def test_normalize_memory_url_empty():
64 | """Test that empty string raises ValueError."""
65 | with pytest.raises(ValueError, match="cannot be empty"):
66 | normalize_memory_url("")
67 |
```
--------------------------------------------------------------------------------
/src/basic_memory/mcp/prompts/continue_conversation.py:
--------------------------------------------------------------------------------
```python
1 | """Session continuation prompts for Basic Memory MCP server.
2 |
3 | These prompts help users continue conversations and work across sessions,
4 | providing context from previous interactions to maintain continuity.
5 | """
6 |
7 | from typing import Annotated, Optional
8 |
9 | from loguru import logger
10 | from pydantic import Field
11 |
12 | from basic_memory.config import get_project_config
13 | from basic_memory.mcp.async_client import get_client
14 | from basic_memory.mcp.server import mcp
15 | from basic_memory.mcp.tools.utils import call_post
16 | from basic_memory.schemas.base import TimeFrame
17 | from basic_memory.schemas.prompt import ContinueConversationRequest
18 |
19 |
20 | @mcp.prompt(
21 | name="continue_conversation",
22 | description="Continue a previous conversation",
23 | )
24 | async def continue_conversation(
25 | topic: Annotated[Optional[str], Field(description="Topic or keyword to search for")] = None,
26 | timeframe: Annotated[
27 | Optional[TimeFrame],
28 | Field(description="How far back to look for activity (e.g. '1d', '1 week')"),
29 | ] = None,
30 | ) -> str:
31 | """Continue a previous conversation or work session.
32 |
33 | This prompt helps you pick up where you left off by finding recent context
34 | about a specific topic or showing general recent activity.
35 |
36 | Args:
37 | topic: Topic or keyword to search for (optional)
38 | timeframe: How far back to look for activity
39 |
40 | Returns:
41 | Context from previous sessions on this topic
42 | """
43 | logger.info(f"Continuing session, topic: {topic}, timeframe: {timeframe}")
44 |
45 | async with get_client() as client:
46 | # Create request model
47 | request = ContinueConversationRequest( # pyright: ignore [reportCallIssue]
48 | topic=topic, timeframe=timeframe
49 | )
50 |
51 | project_url = get_project_config().project_url
52 |
53 | # Call the prompt API endpoint
54 | response = await call_post(
55 | client,
56 | f"{project_url}/prompt/continue-conversation",
57 | json=request.model_dump(exclude_none=True),
58 | )
59 |
60 | # Extract the rendered prompt from the response
61 | result = response.json()
62 | return result["prompt"]
63 |
```
--------------------------------------------------------------------------------
/tests/api/test_async_client.py:
--------------------------------------------------------------------------------
```python
1 | """Tests for async_client configuration."""
2 |
3 | import os
4 | from unittest.mock import patch
5 | from httpx import AsyncClient, ASGITransport, Timeout
6 |
7 | from basic_memory.config import ConfigManager
8 | from basic_memory.mcp.async_client import create_client
9 |
10 |
11 | def test_create_client_uses_asgi_when_no_remote_env():
12 | """Test that create_client uses ASGI transport when BASIC_MEMORY_USE_REMOTE_API is not set."""
13 | # Ensure env vars are not set (pop if they exist)
14 | with patch.dict("os.environ", clear=False):
15 | os.environ.pop("BASIC_MEMORY_USE_REMOTE_API", None)
16 | os.environ.pop("BASIC_MEMORY_CLOUD_MODE", None)
17 |
18 | client = create_client()
19 |
20 | assert isinstance(client, AsyncClient)
21 | assert isinstance(client._transport, ASGITransport)
22 | assert str(client.base_url) == "http://test"
23 |
24 |
25 | def test_create_client_uses_http_when_cloud_mode_env_set():
26 | """Test that create_client uses HTTP transport when BASIC_MEMORY_CLOUD_MODE is set."""
27 |
28 | config = ConfigManager().config
29 | with patch.dict("os.environ", {"BASIC_MEMORY_CLOUD_MODE": "True"}):
30 | client = create_client()
31 |
32 | assert isinstance(client, AsyncClient)
33 | assert not isinstance(client._transport, ASGITransport)
34 | # Cloud mode uses cloud_host/proxy as base_url
35 | assert str(client.base_url) == f"{config.cloud_host}/proxy/"
36 |
37 |
38 | def test_create_client_configures_extended_timeouts():
39 | """Test that create_client configures 30-second timeouts for long operations."""
40 | # Ensure env vars are not set (pop if they exist)
41 | with patch.dict("os.environ", clear=False):
42 | os.environ.pop("BASIC_MEMORY_USE_REMOTE_API", None)
43 | os.environ.pop("BASIC_MEMORY_CLOUD_MODE", None)
44 |
45 | client = create_client()
46 |
47 | # Verify timeout configuration
48 | assert isinstance(client.timeout, Timeout)
49 | assert client.timeout.connect == 10.0 # 10 seconds for connection
50 | assert client.timeout.read == 30.0 # 30 seconds for reading
51 | assert client.timeout.write == 30.0 # 30 seconds for writing
52 | assert client.timeout.pool == 30.0 # 30 seconds for pool
53 |
```
--------------------------------------------------------------------------------
/llms-install.md:
--------------------------------------------------------------------------------
```markdown
1 | # Basic Memory Installation Guide for LLMs
2 |
3 | This guide is specifically designed to help AI assistants like Cline install and configure Basic Memory. Follow these
4 | steps in order.
5 |
6 | ## Installation Steps
7 |
8 | ### 1. Install Basic Memory Package
9 |
10 | Use one of the following package managers to install:
11 |
12 | ```bash
13 | # Install with uv (recommended)
14 | uv tool install basic-memory
15 |
16 | # Or with pip
17 | pip install basic-memory
18 | ```
19 |
20 | ### 2. Configure MCP Server
21 |
22 | Add the following to your config:
23 |
24 | ```json
25 | {
26 | "mcpServers": {
27 | "basic-memory": {
28 | "command": "uvx",
29 | "args": [
30 | "basic-memory",
31 | "mcp"
32 | ]
33 | }
34 | }
35 | }
36 | ```
37 |
38 | For Claude Desktop, this file is located at:
39 |
40 | macOS: ~/Library/Application Support/Claude/claude_desktop_config.json
41 | Windows: %APPDATA%\Claude\claude_desktop_config.json
42 |
43 | ### 3. Start Synchronization (optional)
44 |
45 | To synchronize files in real-time, run:
46 |
47 | ```bash
48 | basic-memory sync --watch
49 | ```
50 |
51 | Or for a one-time sync:
52 |
53 | ```bash
54 | basic-memory sync
55 | ```
56 |
57 | ## Configuration Options
58 |
59 | ### Custom Directory
60 |
61 | To use a directory other than the default `~/basic-memory`:
62 |
63 | ```bash
64 | basic-memory project add custom-project /path/to/your/directory
65 | basic-memory project default custom-project
66 | ```
67 |
68 | ### Multiple Projects
69 |
70 | To manage multiple knowledge bases:
71 |
72 | ```bash
73 | # List all projects
74 | basic-memory project list
75 |
76 | # Add a new project
77 | basic-memory project add work ~/work-basic-memory
78 |
79 | # Set default project
80 | basic-memory project default work
81 | ```
82 |
83 | ## Importing Existing Data
84 |
85 | ### From Claude.ai
86 |
87 | ```bash
88 | basic-memory import claude conversations path/to/conversations.json
89 | basic-memory import claude projects path/to/projects.json
90 | ```
91 |
92 | ### From ChatGPT
93 |
94 | ```bash
95 | basic-memory import chatgpt path/to/conversations.json
96 | ```
97 |
98 | ### From MCP Memory Server
99 |
100 | ```bash
101 | basic-memory import memory-json path/to/memory.json
102 | ```
103 |
104 | ## Troubleshooting
105 |
106 | If you encounter issues:
107 |
108 | 1. Check that Basic Memory is properly installed:
109 | ```bash
110 | basic-memory --version
111 | ```
112 |
113 | 2. Verify the sync process is running:
114 | ```bash
115 | ps aux | grep basic-memory
116 | ```
117 |
118 | 3. Check sync output for errors:
119 | ```bash
120 | basic-memory sync --verbose
121 | ```
122 |
123 | 4. Check log output:
124 | ```bash
125 | cat ~/.basic-memory/basic-memory.log
126 | ```
127 |
128 | For more detailed information, refer to the [full documentation](https://memory.basicmachines.co/).
```
--------------------------------------------------------------------------------
/src/basic_memory/mcp/prompts/ai_assistant_guide.py:
--------------------------------------------------------------------------------
```python
1 | from pathlib import Path
2 |
3 | from basic_memory.config import ConfigManager
4 | from basic_memory.mcp.server import mcp
5 | from loguru import logger
6 |
7 |
8 | @mcp.resource(
9 | uri="memory://ai_assistant_guide",
10 | name="ai assistant guide",
11 | description="Give an AI assistant guidance on how to use Basic Memory tools effectively",
12 | )
13 | def ai_assistant_guide() -> str:
14 | """Return a concise guide on Basic Memory tools and how to use them.
15 |
16 | Dynamically adapts instructions based on configuration:
17 | - Default project mode: Simplified instructions with automatic project
18 | - Regular mode: Project discovery and selection guidance
19 | - CLI constraint mode: Single project constraint information
20 |
21 | Returns:
22 | A focused guide on Basic Memory usage.
23 | """
24 | logger.info("Loading AI assistant guide resource")
25 |
26 | # Load base guide content
27 | guide_doc = Path(__file__).parent.parent / "resources" / "ai_assistant_guide.md"
28 | content = guide_doc.read_text(encoding="utf-8")
29 |
30 | # Check configuration for mode-specific instructions
31 | config = ConfigManager().config
32 |
33 | # Add mode-specific header
34 | mode_info = ""
35 | if config.default_project_mode:
36 | mode_info = f"""
37 | # 🎯 Default Project Mode Active
38 |
39 | **Current Configuration**: All operations automatically use project '{config.default_project}'
40 |
41 | **Simplified Usage**: You don't need to specify the project parameter in tool calls.
42 | - `write_note(title="Note", content="...", folder="docs")` ✅
43 | - Project parameter is optional and will default to '{config.default_project}'
44 | - To use a different project, explicitly specify: `project="other-project"`
45 |
46 | ────────────────────────────────────────
47 |
48 | """
49 | else:
50 | mode_info = """
51 | # 🔧 Multi-Project Mode Active
52 |
53 | **Current Configuration**: Project parameter required for all operations
54 |
55 | **Project Discovery Required**: Use these tools to select a project:
56 | - `list_memory_projects()` - See all available projects
57 | - `recent_activity()` - Get project activity and recommendations
58 | - Remember the user's project choice throughout the conversation
59 |
60 | ────────────────────────────────────────
61 |
62 | """
63 |
64 | # Prepend mode info to the guide
65 | enhanced_content = mode_info + content
66 |
67 | logger.info(
68 | f"Loaded AI assistant guide ({len(enhanced_content)} chars) with mode: {'default_project' if config.default_project_mode else 'multi_project'}"
69 | )
70 | return enhanced_content
71 |
```
--------------------------------------------------------------------------------
/src/basic_memory/mcp/tools/view_note.py:
--------------------------------------------------------------------------------
```python
1 | """View note tool for Basic Memory MCP server."""
2 |
3 | from textwrap import dedent
4 | from typing import Optional
5 |
6 | from loguru import logger
7 | from fastmcp import Context
8 |
9 | from basic_memory.mcp.server import mcp
10 | from basic_memory.mcp.tools.read_note import read_note
11 |
12 |
13 | @mcp.tool(
14 | description="View a note as a formatted artifact for better readability.",
15 | )
16 | async def view_note(
17 | identifier: str,
18 | project: Optional[str] = None,
19 | page: int = 1,
20 | page_size: int = 10,
21 | context: Context | None = None,
22 | ) -> str:
23 | """View a markdown note as a formatted artifact.
24 |
25 | This tool reads a note using the same logic as read_note but instructs Claude
26 | to display the content as a markdown artifact in the Claude Desktop app.
27 | Project parameter optional with server resolution.
28 |
29 | Args:
30 | identifier: The title or permalink of the note to view
31 | project: Project name to read from. Optional - server will resolve using hierarchy.
32 | If unknown, use list_memory_projects() to discover available projects.
33 | page: Page number for paginated results (default: 1)
34 | page_size: Number of items per page (default: 10)
35 | context: Optional FastMCP context for performance caching.
36 |
37 | Returns:
38 | Instructions for Claude to create a markdown artifact with the note content.
39 |
40 | Examples:
41 | # View a note by title
42 | view_note("Meeting Notes")
43 |
44 | # View a note by permalink
45 | view_note("meetings/weekly-standup")
46 |
47 | # View with pagination
48 | view_note("large-document", page=2, page_size=5)
49 |
50 | # Explicit project specification
51 | view_note("Meeting Notes", project="my-project")
52 |
53 | Raises:
54 | HTTPError: If project doesn't exist or is inaccessible
55 | SecurityError: If identifier attempts path traversal
56 | """
57 |
58 | logger.info(f"Viewing note: {identifier} in project: {project}")
59 |
60 | # Call the existing read_note logic
61 | content = await read_note.fn(identifier, project, page, page_size, context)
62 |
63 | # Check if this is an error message (note not found)
64 | if "# Note Not Found" in content:
65 | return content # Return error message directly
66 |
67 | # Return instructions for Claude to create an artifact
68 | return dedent(f"""
69 | Note retrieved: "{identifier}"
70 |
71 | Display this note as a markdown artifact for the user.
72 |
73 | Content:
74 | ---
75 | {content}
76 | ---
77 | """).strip()
78 |
```
--------------------------------------------------------------------------------
/.claude/commands/spec.md:
--------------------------------------------------------------------------------
```markdown
1 | ---
2 | allowed-tools: mcp__basic-memory__write_note, mcp__basic-memory__read_note, mcp__basic-memory__search_notes, mcp__basic-memory__edit_note, Task
3 | argument-hint: [create|status|implement|review] [spec-name]
4 | description: Manage specifications in our development process
5 | ---
6 |
7 | ## Context
8 |
9 | You are managing specifications using our specification-driven development process defined in @docs/specs/SPEC-001.md.
10 |
11 | Available commands:
12 | - `create [name]` - Create new specification
13 | - `status` - Show all spec statuses
14 | - `implement [spec-name]` - Hand spec to appropriate agent
15 | - `review [spec-name]` - Review implementation against spec
16 |
17 | ## Your task
18 |
19 | Execute the spec command: `/spec $ARGUMENTS`
20 |
21 | ### If command is "create":
22 | 1. Get next SPEC number by searching existing specs
23 | 2. Create new spec using template from @docs/specs/Slash\ Commands\ Reference.md
24 | 3. Place in `/specs` folder with title "SPEC-XXX: [name]"
25 | 4. Include standard sections: Why, What, How, How to Evaluate
26 |
27 | ### If command is "status":
28 | 1. Search all notes in `/specs` folder
29 | 2. Display table with spec number, title, and status
30 | 3. Show any dependencies or assigned agents
31 |
32 | ### If command is "implement":
33 | 1. Read the specified spec
34 | 2. Determine appropriate agent based on content:
35 | - Frontend/UI → vue-developer
36 | - Architecture/system → system-architect
37 | - Backend/API → python-developer
38 | 3. Launch Task tool with appropriate agent and spec context
39 |
40 | ### If command is "review":
41 | 1. Read the specified spec and its "How to Evaluate" section
42 | 2. Review current implementation against success criteria with careful evaluation of:
43 | - **Functional completeness** - All specified features working
44 | - **Test coverage analysis** - Actual test files and coverage percentage
45 | - Count existing test files vs required components/APIs/composables
46 | - Verify unit tests, integration tests, and end-to-end tests
47 | - Check for missing test categories (component, API, workflow)
48 | - **Code quality metrics** - TypeScript compilation, linting, performance
49 | - **Architecture compliance** - Component isolation, state management patterns
50 | - **Documentation completeness** - Implementation matches specification
51 | 3. Provide honest, accurate assessment - do not overstate completeness
52 | 4. Document findings and update spec with review results
53 | 5. If gaps found, clearly identify what still needs to be implemented/tested
54 |
55 | Use the agent definitions from @docs/specs/Agent\ Definitions.md for implementation handoffs.
56 |
```
--------------------------------------------------------------------------------
/.github/workflows/release.yml:
--------------------------------------------------------------------------------
```yaml
1 | name: Release
2 |
3 | on:
4 | push:
5 | tags:
6 | - 'v*' # Trigger on version tags like v1.0.0, v0.13.0, etc.
7 |
8 | jobs:
9 | release:
10 | runs-on: ubuntu-latest
11 | permissions:
12 | id-token: write
13 | contents: write
14 |
15 | steps:
16 | - uses: actions/checkout@v4
17 | with:
18 | fetch-depth: 0
19 |
20 | - name: Set up Python
21 | uses: actions/setup-python@v5
22 | with:
23 | python-version: "3.12"
24 |
25 | - name: Install uv
26 | run: |
27 | pip install uv
28 |
29 | - name: Install dependencies and build
30 | run: |
31 | uv venv
32 | uv sync
33 | uv build
34 |
35 | - name: Verify build succeeded
36 | run: |
37 | # Verify that build artifacts exist
38 | ls -la dist/
39 | echo "Build completed successfully"
40 |
41 | - name: Create GitHub Release
42 | uses: softprops/action-gh-release@v2
43 | with:
44 | files: |
45 | dist/*.whl
46 | dist/*.tar.gz
47 | generate_release_notes: true
48 | tag_name: ${{ github.ref_name }}
49 | token: ${{ secrets.GITHUB_TOKEN }}
50 |
51 | - name: Publish to PyPI
52 | uses: pypa/gh-action-pypi-publish@release/v1
53 | with:
54 | password: ${{ secrets.PYPI_TOKEN }}
55 |
56 | homebrew:
57 | name: Update Homebrew Formula
58 | needs: release
59 | runs-on: ubuntu-latest
60 | # Only run for stable releases (not dev, beta, or rc versions)
61 | if: ${{ !contains(github.ref_name, 'dev') && !contains(github.ref_name, 'b') && !contains(github.ref_name, 'rc') }}
62 | permissions:
63 | contents: write
64 | actions: read
65 | steps:
66 | - name: Update Homebrew formula
67 | uses: mislav/bump-homebrew-formula-action@v3
68 | with:
69 | # Formula name in homebrew-basic-memory repo
70 | formula-name: basic-memory
71 | # The tap repository
72 | homebrew-tap: basicmachines-co/homebrew-basic-memory
73 | # Base branch of the tap repository
74 | base-branch: main
75 | # Download URL will be automatically constructed from the tag
76 | download-url: https://github.com/basicmachines-co/basic-memory/archive/refs/tags/${{ github.ref_name }}.tar.gz
77 | # Commit message for the formula update
78 | commit-message: |
79 | {{formulaName}} {{version}}
80 |
81 | Created by https://github.com/basicmachines-co/basic-memory/actions/runs/${{ github.run_id }}
82 | env:
83 | # Personal Access Token with repo scope for homebrew-basic-memory repo
84 | COMMITTER_TOKEN: ${{ secrets.HOMEBREW_TOKEN }}
85 |
86 |
```
--------------------------------------------------------------------------------
/src/basic_memory/importers/base.py:
--------------------------------------------------------------------------------
```python
1 | """Base import service for Basic Memory."""
2 |
3 | import logging
4 | from abc import abstractmethod
5 | from pathlib import Path
6 | from typing import Any, Optional, TypeVar
7 |
8 | from basic_memory.markdown.markdown_processor import MarkdownProcessor
9 | from basic_memory.markdown.schemas import EntityMarkdown
10 | from basic_memory.schemas.importer import ImportResult
11 |
12 | logger = logging.getLogger(__name__)
13 |
14 | T = TypeVar("T", bound=ImportResult)
15 |
16 |
17 | class Importer[T: ImportResult]:
18 | """Base class for all import services."""
19 |
20 | def __init__(self, base_path: Path, markdown_processor: MarkdownProcessor):
21 | """Initialize the import service.
22 |
23 | Args:
24 | markdown_processor: MarkdownProcessor instance for writing markdown files.
25 | """
26 | self.base_path = base_path.resolve() # Get absolute path
27 | self.markdown_processor = markdown_processor
28 |
29 | @abstractmethod
30 | async def import_data(self, source_data, destination_folder: str, **kwargs: Any) -> T:
31 | """Import data from source file to destination folder.
32 |
33 | Args:
34 | source_path: Path to the source file.
35 | destination_folder: Destination folder within the project.
36 | **kwargs: Additional keyword arguments for specific import types.
37 |
38 | Returns:
39 | ImportResult containing statistics and status of the import.
40 | """
41 | pass # pragma: no cover
42 |
43 | async def write_entity(self, entity: EntityMarkdown, file_path: Path) -> None:
44 | """Write entity to file using markdown processor.
45 |
46 | Args:
47 | entity: EntityMarkdown instance to write.
48 | file_path: Path to write the entity to.
49 | """
50 | await self.markdown_processor.write_file(file_path, entity)
51 |
52 | def ensure_folder_exists(self, folder: str) -> Path:
53 | """Ensure folder exists, create if it doesn't.
54 |
55 | Args:
56 | base_path: Base path of the project.
57 | folder: Folder name or path within the project.
58 |
59 | Returns:
60 | Path to the folder.
61 | """
62 | folder_path = self.base_path / folder
63 | folder_path.mkdir(parents=True, exist_ok=True)
64 | return folder_path
65 |
66 | @abstractmethod
67 | def handle_error(
68 | self, message: str, error: Optional[Exception] = None
69 | ) -> T: # pragma: no cover
70 | """Handle errors during import.
71 |
72 | Args:
73 | message: Error message.
74 | error: Optional exception that caused the error.
75 |
76 | Returns:
77 | ImportResult with error information.
78 | """
79 | pass
80 |
```
--------------------------------------------------------------------------------
/docker-compose.yml:
--------------------------------------------------------------------------------
```yaml
1 | # Docker Compose configuration for Basic Memory
2 | # See docs/Docker.md for detailed setup instructions
3 |
4 | version: '3.8'
5 |
6 | services:
7 | basic-memory:
8 | # Use pre-built image (recommended for most users)
9 | image: ghcr.io/basicmachines-co/basic-memory:latest
10 |
11 | # Uncomment to build locally instead:
12 | # build: .
13 |
14 | container_name: basic-memory-server
15 |
16 | # Volume mounts for knowledge directories and persistent data
17 | volumes:
18 |
19 | # Persistent storage for configuration and database
20 | - basic-memory-config:/root/.basic-memory:rw
21 |
22 | # Mount your knowledge directory (required)
23 | # Change './knowledge' to your actual Obsidian vault or knowledge directory
24 | - ./knowledge:/app/data:rw
25 |
26 | # OPTIONAL: Mount additional knowledge directories for multiple projects
27 | # - ./work-notes:/app/data/work:rw
28 | # - ./personal-notes:/app/data/personal:rw
29 |
30 | # You can edit the project config manually in the mounted config volume
31 | # The default project will be configured to use /app/data
32 | environment:
33 | # Project configuration
34 | - BASIC_MEMORY_DEFAULT_PROJECT=main
35 |
36 | # Enable real-time file synchronization (recommended for Docker)
37 | - BASIC_MEMORY_SYNC_CHANGES=true
38 |
39 | # Logging configuration
40 | - BASIC_MEMORY_LOG_LEVEL=INFO
41 |
42 | # Sync delay in milliseconds (adjust for performance vs responsiveness)
43 | - BASIC_MEMORY_SYNC_DELAY=1000
44 |
45 | # Port exposure for HTTP transport (only needed if not using STDIO)
46 | ports:
47 | - "8000:8000"
48 |
49 | # Command with SSE transport (configurable via environment variables above)
50 | # IMPORTANT: The SSE and streamable-http endpoints are not secured
51 | command: ["basic-memory", "mcp", "--transport", "sse", "--host", "0.0.0.0", "--port", "8000"]
52 |
53 | # Container management
54 | restart: unless-stopped
55 |
56 | # Health monitoring
57 | healthcheck:
58 | test: ["CMD", "basic-memory", "--version"]
59 | interval: 30s
60 | timeout: 10s
61 | retries: 3
62 | start_period: 30s
63 |
64 | # Optional: Resource limits
65 | # deploy:
66 | # resources:
67 | # limits:
68 | # memory: 512M
69 | # cpus: '0.5'
70 | # reservations:
71 | # memory: 256M
72 | # cpus: '0.25'
73 |
74 | volumes:
75 | # Named volume for persistent configuration and database
76 | # This ensures your configuration and knowledge graph persist across container restarts
77 | basic-memory-config:
78 | driver: local
79 |
80 | # Network configuration (optional)
81 | # networks:
82 | # basic-memory-net:
83 | # driver: bridge
```
--------------------------------------------------------------------------------
/src/basic_memory/mcp/resources/project_info.py:
--------------------------------------------------------------------------------
```python
1 | """Project info tool for Basic Memory MCP server."""
2 |
3 | from typing import Optional
4 |
5 | from loguru import logger
6 | from fastmcp import Context
7 |
8 | from basic_memory.mcp.async_client import get_client
9 | from basic_memory.mcp.project_context import get_active_project
10 | from basic_memory.mcp.server import mcp
11 | from basic_memory.mcp.tools.utils import call_get
12 | from basic_memory.schemas import ProjectInfoResponse
13 |
14 |
15 | @mcp.resource(
16 | uri="memory://{project}/info",
17 | description="Get information and statistics about the current Basic Memory project.",
18 | )
19 | async def project_info(
20 | project: Optional[str] = None, context: Context | None = None
21 | ) -> ProjectInfoResponse:
22 | """Get comprehensive information about the current Basic Memory project.
23 |
24 | This tool provides detailed statistics and status information about your
25 | Basic Memory project, including:
26 |
27 | - Project configuration
28 | - Entity, observation, and relation counts
29 | - Graph metrics (most connected entities, isolated entities)
30 | - Recent activity and growth over time
31 | - System status (database, watch service, version)
32 |
33 | Use this tool to:
34 | - Verify your Basic Memory installation is working correctly
35 | - Get insights into your knowledge base structure
36 | - Monitor growth and activity over time
37 | - Identify potential issues like unresolved relations
38 |
39 | Args:
40 | project: Optional project name. If not provided, uses default_project
41 | (if default_project_mode=true) or CLI constraint. If unknown,
42 | use list_memory_projects() to discover available projects.
43 | context: Optional FastMCP context for performance caching.
44 |
45 | Returns:
46 | Detailed project information and statistics
47 |
48 | Examples:
49 | # Get information about the current/default project
50 | info = await project_info()
51 |
52 | # Get information about a specific project
53 | info = await project_info(project="my-project")
54 |
55 | # Check entity counts
56 | print(f"Total entities: {info.statistics.total_entities}")
57 |
58 | # Check system status
59 | print(f"Basic Memory version: {info.system.version}")
60 | """
61 | logger.info("Getting project info")
62 |
63 | async with get_client() as client:
64 | project_config = await get_active_project(client, project, context)
65 | project_url = project_config.permalink
66 |
67 | # Call the API endpoint
68 | response = await call_get(client, f"{project_url}/project/info")
69 |
70 | # Convert response to ProjectInfoResponse
71 | return ProjectInfoResponse.model_validate(response.json())
72 |
```
--------------------------------------------------------------------------------
/src/basic_memory/schemas/sync_report.py:
--------------------------------------------------------------------------------
```python
1 | """Pydantic schemas for sync report responses."""
2 |
3 | from datetime import datetime
4 | from typing import TYPE_CHECKING, Dict, List, Set
5 |
6 | from pydantic import BaseModel, Field
7 |
8 | # avoid cirular imports
9 | if TYPE_CHECKING:
10 | from basic_memory.sync.sync_service import SyncReport
11 |
12 |
13 | class SkippedFileResponse(BaseModel):
14 | """Information about a file that was skipped due to repeated failures."""
15 |
16 | path: str = Field(description="File path relative to project root")
17 | reason: str = Field(description="Error message from last failure")
18 | failure_count: int = Field(description="Number of consecutive failures")
19 | first_failed: datetime = Field(description="Timestamp of first failure")
20 |
21 | model_config = {"from_attributes": True}
22 |
23 |
24 | class SyncReportResponse(BaseModel):
25 | """Report of file changes found compared to database state.
26 |
27 | Used for API responses when scanning or syncing files.
28 | """
29 |
30 | new: Set[str] = Field(default_factory=set, description="Files on disk but not in database")
31 | modified: Set[str] = Field(default_factory=set, description="Files with different checksums")
32 | deleted: Set[str] = Field(default_factory=set, description="Files in database but not on disk")
33 | moves: Dict[str, str] = Field(
34 | default_factory=dict, description="Files moved (old_path -> new_path)"
35 | )
36 | checksums: Dict[str, str] = Field(
37 | default_factory=dict, description="Current file checksums (path -> checksum)"
38 | )
39 | skipped_files: List[SkippedFileResponse] = Field(
40 | default_factory=list, description="Files skipped due to repeated failures"
41 | )
42 | total: int = Field(description="Total number of changes")
43 |
44 | @classmethod
45 | def from_sync_report(cls, report: "SyncReport") -> "SyncReportResponse":
46 | """Convert SyncReport dataclass to Pydantic model.
47 |
48 | Args:
49 | report: SyncReport dataclass from sync service
50 |
51 | Returns:
52 | SyncReportResponse with same data
53 | """
54 | return cls(
55 | new=report.new,
56 | modified=report.modified,
57 | deleted=report.deleted,
58 | moves=report.moves,
59 | checksums=report.checksums,
60 | skipped_files=[
61 | SkippedFileResponse(
62 | path=skipped.path,
63 | reason=skipped.reason,
64 | failure_count=skipped.failure_count,
65 | first_failed=skipped.first_failed,
66 | )
67 | for skipped in report.skipped_files
68 | ],
69 | total=report.total,
70 | )
71 |
72 | model_config = {"from_attributes": True}
73 |
```
--------------------------------------------------------------------------------
/tests/utils/test_parse_tags.py:
--------------------------------------------------------------------------------
```python
1 | """Tests for parse_tags utility function."""
2 |
3 | from typing import List, Union
4 |
5 | import pytest
6 |
7 | from basic_memory.utils import parse_tags
8 |
9 |
10 | @pytest.mark.parametrize(
11 | "input_tags,expected",
12 | [
13 | # None input
14 | (None, []),
15 | # List inputs
16 | ([], []),
17 | (["tag1", "tag2"], ["tag1", "tag2"]),
18 | (["tag1", "", "tag2"], ["tag1", "tag2"]), # Empty tags are filtered
19 | ([" tag1 ", " tag2 "], ["tag1", "tag2"]), # Whitespace is stripped
20 | # String inputs
21 | ("", []),
22 | ("tag1", ["tag1"]),
23 | ("tag1,tag2", ["tag1", "tag2"]),
24 | ("tag1, tag2", ["tag1", "tag2"]), # Whitespace after comma is stripped
25 | ("tag1,,tag2", ["tag1", "tag2"]), # Empty tags are filtered
26 | # Tags with leading '#' characters - these should be stripped
27 | (["#tag1", "##tag2"], ["tag1", "tag2"]),
28 | ("#tag1,##tag2", ["tag1", "tag2"]),
29 | (["tag1", "#tag2", "##tag3"], ["tag1", "tag2", "tag3"]),
30 | # Mixed whitespace and '#' characters
31 | ([" #tag1 ", " ##tag2 "], ["tag1", "tag2"]),
32 | (" #tag1 , ##tag2 ", ["tag1", "tag2"]),
33 | # JSON stringified arrays (common AI assistant issue)
34 | ('["tag1", "tag2", "tag3"]', ["tag1", "tag2", "tag3"]),
35 | ('["system", "overview", "reference"]', ["system", "overview", "reference"]),
36 | ('["#tag1", "##tag2"]', ["tag1", "tag2"]), # JSON array with hash prefixes
37 | ('[ "tag1" , "tag2" ]', ["tag1", "tag2"]), # JSON array with extra spaces
38 | ],
39 | )
40 | def test_parse_tags(input_tags: Union[List[str], str, None], expected: List[str]) -> None:
41 | """Test tag parsing with various input formats."""
42 | result = parse_tags(input_tags)
43 | assert result == expected
44 |
45 |
46 | def test_parse_tags_special_case() -> None:
47 | """Test parsing from non-string, non-list types."""
48 |
49 | # Test with custom object that has __str__ method
50 | class TagObject:
51 | def __str__(self) -> str:
52 | return "tag1,tag2"
53 |
54 | result = parse_tags(TagObject()) # pyright: ignore [reportArgumentType]
55 | assert result == ["tag1", "tag2"]
56 |
57 |
58 | def test_parse_tags_invalid_json() -> None:
59 | """Test that invalid JSON strings fall back to comma-separated parsing."""
60 | # Invalid JSON should fall back to comma-separated parsing
61 | result = parse_tags("[invalid json")
62 | assert result == ["[invalid json"] # Treated as single tag
63 |
64 | result = parse_tags("[tag1, tag2]") # Valid bracket format but not JSON
65 | assert result == ["[tag1", "tag2]"] # Split by comma
66 |
67 | result = parse_tags('["tag1", "tag2"') # Incomplete JSON
68 | assert result == ['["tag1"', '"tag2"'] # Fall back to comma separation
69 |
```
--------------------------------------------------------------------------------
/.github/workflows/claude-issue-triage.yml:
--------------------------------------------------------------------------------
```yaml
1 | name: Claude Issue Triage
2 |
3 | on:
4 | issues:
5 | types: [opened]
6 |
7 | jobs:
8 | triage:
9 | runs-on: ubuntu-latest
10 | permissions:
11 | issues: write
12 | id-token: write
13 | steps:
14 | - name: Checkout repository
15 | uses: actions/checkout@v4
16 | with:
17 | fetch-depth: 1
18 |
19 | - name: Run Claude Issue Triage
20 | uses: anthropics/claude-code-action@v1
21 | with:
22 | claude_code_oauth_token: ${{ secrets.CLAUDE_CODE_OAUTH_TOKEN }}
23 | track_progress: true # Show triage progress
24 | prompt: |
25 | Analyze this new Basic Memory issue and perform triage:
26 |
27 | **Issue Analysis:**
28 | 1. **Type Classification:**
29 | - Bug report (code defect)
30 | - Feature request (new functionality)
31 | - Enhancement (improvement to existing feature)
32 | - Documentation (docs improvement)
33 | - Question/Support (user help)
34 | - MCP tool issue (specific to MCP functionality)
35 |
36 | 2. **Priority Assessment:**
37 | - Critical: Security issues, data loss, complete breakage
38 | - High: Major functionality broken, affects many users
39 | - Medium: Minor bugs, usability issues
40 | - Low: Nice-to-have improvements, cosmetic issues
41 |
42 | 3. **Component Classification:**
43 | - CLI commands
44 | - MCP tools
45 | - Database/sync
46 | - Cloud functionality
47 | - Documentation
48 | - Testing
49 |
50 | 4. **Complexity Estimate:**
51 | - Simple: Quick fix, documentation update
52 | - Medium: Requires some investigation/testing
53 | - Complex: Major feature work, architectural changes
54 |
55 | **Actions to Take:**
56 | 1. Add appropriate labels using: `gh issue edit ${{ github.event.issue.number }} --add-label "label1,label2"`
57 | 2. Check for duplicates using: `gh search issues`
58 | 3. If duplicate found, comment mentioning the original issue
59 | 4. For feature requests, ask clarifying questions if needed
60 | 5. For bugs, request reproduction steps if missing
61 |
62 | **Available Labels:**
63 | - Type: bug, enhancement, feature, documentation, question, mcp-tool
64 | - Priority: critical, high, medium, low
65 | - Component: cli, mcp, database, cloud, docs, testing
66 | - Complexity: simple, medium, complex
67 | - Status: needs-reproduction, needs-clarification, duplicate
68 |
69 | Read the issue carefully and provide helpful triage with appropriate labels.
70 |
71 | claude_args: '--allowed-tools "Bash(gh issue:*),Bash(gh search:*),Read"'
```
--------------------------------------------------------------------------------
/src/basic_memory/api/routers/management_router.py:
--------------------------------------------------------------------------------
```python
1 | """Management router for basic-memory API."""
2 |
3 | import asyncio
4 |
5 | from fastapi import APIRouter, Request
6 | from loguru import logger
7 | from pydantic import BaseModel
8 |
9 | from basic_memory.config import ConfigManager
10 | from basic_memory.deps import SyncServiceDep, ProjectRepositoryDep
11 |
12 | router = APIRouter(prefix="/management", tags=["management"])
13 |
14 |
15 | class WatchStatusResponse(BaseModel):
16 | """Response model for watch status."""
17 |
18 | running: bool
19 | """Whether the watch service is currently running."""
20 |
21 |
22 | @router.get("/watch/status", response_model=WatchStatusResponse)
23 | async def get_watch_status(request: Request) -> WatchStatusResponse:
24 | """Get the current status of the watch service."""
25 | return WatchStatusResponse(
26 | running=request.app.state.watch_task is not None and not request.app.state.watch_task.done()
27 | )
28 |
29 |
30 | @router.post("/watch/start", response_model=WatchStatusResponse)
31 | async def start_watch_service(
32 | request: Request, project_repository: ProjectRepositoryDep, sync_service: SyncServiceDep
33 | ) -> WatchStatusResponse:
34 | """Start the watch service if it's not already running."""
35 |
36 | # needed because of circular imports from sync -> app
37 | from basic_memory.sync import WatchService
38 | from basic_memory.sync.background_sync import create_background_sync_task
39 |
40 | if request.app.state.watch_task is not None and not request.app.state.watch_task.done():
41 | # Watch service is already running
42 | return WatchStatusResponse(running=True)
43 |
44 | app_config = ConfigManager().config
45 |
46 | # Create and start a new watch service
47 | logger.info("Starting watch service via management API")
48 |
49 | # Get services needed for the watch task
50 | watch_service = WatchService(
51 | app_config=app_config,
52 | project_repository=project_repository,
53 | )
54 |
55 | # Create and store the task
56 | watch_task = create_background_sync_task(sync_service, watch_service)
57 | request.app.state.watch_task = watch_task
58 |
59 | return WatchStatusResponse(running=True)
60 |
61 |
62 | @router.post("/watch/stop", response_model=WatchStatusResponse)
63 | async def stop_watch_service(request: Request) -> WatchStatusResponse: # pragma: no cover
64 | """Stop the watch service if it's running."""
65 | if request.app.state.watch_task is None or request.app.state.watch_task.done():
66 | # Watch service is not running
67 | return WatchStatusResponse(running=False)
68 |
69 | # Cancel the running task
70 | logger.info("Stopping watch service via management API")
71 | request.app.state.watch_task.cancel()
72 |
73 | # Wait for it to be properly cancelled
74 | try:
75 | await request.app.state.watch_task
76 | except asyncio.CancelledError:
77 | pass
78 |
79 | request.app.state.watch_task = None
80 | return WatchStatusResponse(running=False)
81 |
```
--------------------------------------------------------------------------------
/tests/sync/test_sync_wikilink_issue.py:
--------------------------------------------------------------------------------
```python
1 | """Test for issue #72 - notes with wikilinks staying in modified status."""
2 |
3 | from pathlib import Path
4 |
5 | import pytest
6 |
7 | from basic_memory.sync.sync_service import SyncService
8 |
9 |
10 | async def create_test_file(path: Path, content: str) -> None:
11 | """Create a test file with given content."""
12 | path.parent.mkdir(parents=True, exist_ok=True)
13 | path.write_text(content)
14 |
15 |
16 | async def force_full_scan(sync_service: SyncService) -> None:
17 | """Force next sync to do a full scan by clearing watermark (for testing moves/deletions)."""
18 | if sync_service.entity_repository.project_id is not None:
19 | project = await sync_service.project_repository.find_by_id(
20 | sync_service.entity_repository.project_id
21 | )
22 | if project:
23 | await sync_service.project_repository.update(
24 | project.id,
25 | {
26 | "last_scan_timestamp": None,
27 | "last_file_count": None,
28 | },
29 | )
30 |
31 |
32 | @pytest.mark.asyncio
33 | async def test_wikilink_modified_status_issue(sync_service: SyncService, project_config):
34 | """Test that files with wikilinks don't remain in modified status after sync."""
35 | project_dir = project_config.home
36 |
37 | # Create a file with a wikilink
38 | content = """---
39 | title: Test Wikilink
40 | type: note
41 | ---
42 | # Test File
43 |
44 | This file contains a wikilink to [[another-file]].
45 | """
46 | test_file_path = project_dir / "test_wikilink.md"
47 | await create_test_file(test_file_path, content)
48 |
49 | # Initial sync
50 | report1 = await sync_service.sync(project_config.home)
51 | assert "test_wikilink.md" in report1.new
52 | assert "test_wikilink.md" not in report1.modified
53 |
54 | # Sync again without changing the file - should not be modified
55 | report2 = await sync_service.sync(project_config.home)
56 | assert "test_wikilink.md" not in report2.new
57 | assert "test_wikilink.md" not in report2.modified
58 |
59 | # Create the target file
60 | target_content = """---
61 | title: Another File
62 | type: note
63 | ---
64 | # Another File
65 |
66 | This is the target file.
67 | """
68 | target_file_path = project_dir / "another_file.md"
69 | await create_test_file(target_file_path, target_content)
70 |
71 | # Force full scan to detect the new file
72 | # (file just created may not be newer than watermark due to timing precision)
73 | await force_full_scan(sync_service)
74 |
75 | # Sync again after adding target file
76 | report3 = await sync_service.sync(project_config.home)
77 | assert "another_file.md" in report3.new
78 | assert "test_wikilink.md" not in report3.modified
79 |
80 | # Sync one more time - both files should now be stable
81 | report4 = await sync_service.sync(project_config.home)
82 | assert "test_wikilink.md" not in report4.modified
83 | assert "another_file.md" not in report4.modified
84 |
```