This is page 1 of 17. Use http://codebase.md/basicmachines-co/basic-memory?page={x} to view the full context.
# Directory Structure
```
├── .claude
│ ├── agents
│ │ ├── python-developer.md
│ │ └── system-architect.md
│ └── commands
│ ├── release
│ │ ├── beta.md
│ │ ├── changelog.md
│ │ ├── release-check.md
│ │ └── release.md
│ ├── spec.md
│ └── test-live.md
├── .dockerignore
├── .github
│ ├── dependabot.yml
│ ├── ISSUE_TEMPLATE
│ │ ├── bug_report.md
│ │ ├── config.yml
│ │ ├── documentation.md
│ │ └── feature_request.md
│ └── workflows
│ ├── claude-code-review.yml
│ ├── claude-issue-triage.yml
│ ├── claude.yml
│ ├── dev-release.yml
│ ├── docker.yml
│ ├── pr-title.yml
│ ├── release.yml
│ └── test.yml
├── .gitignore
├── .python-version
├── CHANGELOG.md
├── CITATION.cff
├── CLA.md
├── CLAUDE.md
├── CODE_OF_CONDUCT.md
├── CONTRIBUTING.md
├── docker-compose.yml
├── Dockerfile
├── docs
│ ├── ai-assistant-guide-extended.md
│ ├── character-handling.md
│ ├── cloud-cli.md
│ └── Docker.md
├── justfile
├── LICENSE
├── llms-install.md
├── pyproject.toml
├── README.md
├── SECURITY.md
├── smithery.yaml
├── specs
│ ├── SPEC-1 Specification-Driven Development Process.md
│ ├── SPEC-10 Unified Deployment Workflow and Event Tracking.md
│ ├── SPEC-11 Basic Memory API Performance Optimization.md
│ ├── SPEC-12 OpenTelemetry Observability.md
│ ├── SPEC-13 CLI Authentication with Subscription Validation.md
│ ├── SPEC-14 Cloud Git Versioning & GitHub Backup.md
│ ├── SPEC-14- Cloud Git Versioning & GitHub Backup.md
│ ├── SPEC-15 Configuration Persistence via Tigris for Cloud Tenants.md
│ ├── SPEC-16 MCP Cloud Service Consolidation.md
│ ├── SPEC-17 Semantic Search with ChromaDB.md
│ ├── SPEC-18 AI Memory Management Tool.md
│ ├── SPEC-19 Sync Performance and Memory Optimization.md
│ ├── SPEC-2 Slash Commands Reference.md
│ ├── SPEC-3 Agent Definitions.md
│ ├── SPEC-4 Notes Web UI Component Architecture.md
│ ├── SPEC-5 CLI Cloud Upload via WebDAV.md
│ ├── SPEC-6 Explicit Project Parameter Architecture.md
│ ├── SPEC-7 POC to spike Tigris Turso for local access to cloud data.md
│ ├── SPEC-8 TigrisFS Integration.md
│ ├── SPEC-9 Multi-Project Bidirectional Sync Architecture.md
│ ├── SPEC-9 Signed Header Tenant Information.md
│ └── SPEC-9-1 Follow-Ups- Conflict, Sync, and Observability.md
├── src
│ └── basic_memory
│ ├── __init__.py
│ ├── alembic
│ │ ├── alembic.ini
│ │ ├── env.py
│ │ ├── migrations.py
│ │ ├── script.py.mako
│ │ └── versions
│ │ ├── 3dae7c7b1564_initial_schema.py
│ │ ├── 502b60eaa905_remove_required_from_entity_permalink.py
│ │ ├── 5fe1ab1ccebe_add_projects_table.py
│ │ ├── 647e7a75e2cd_project_constraint_fix.py
│ │ ├── 9d9c1cb7d8f5_add_mtime_and_size_columns_to_entity_.py
│ │ ├── a1b2c3d4e5f6_fix_project_foreign_keys.py
│ │ ├── b3c3938bacdb_relation_to_name_unique_index.py
│ │ ├── cc7172b46608_update_search_index_schema.py
│ │ └── e7e1f4367280_add_scan_watermark_tracking_to_project.py
│ ├── api
│ │ ├── __init__.py
│ │ ├── app.py
│ │ ├── routers
│ │ │ ├── __init__.py
│ │ │ ├── directory_router.py
│ │ │ ├── importer_router.py
│ │ │ ├── knowledge_router.py
│ │ │ ├── management_router.py
│ │ │ ├── memory_router.py
│ │ │ ├── project_router.py
│ │ │ ├── prompt_router.py
│ │ │ ├── resource_router.py
│ │ │ ├── search_router.py
│ │ │ └── utils.py
│ │ └── template_loader.py
│ ├── cli
│ │ ├── __init__.py
│ │ ├── app.py
│ │ ├── auth.py
│ │ ├── commands
│ │ │ ├── __init__.py
│ │ │ ├── cloud
│ │ │ │ ├── __init__.py
│ │ │ │ ├── api_client.py
│ │ │ │ ├── bisync_commands.py
│ │ │ │ ├── cloud_utils.py
│ │ │ │ ├── core_commands.py
│ │ │ │ ├── mount_commands.py
│ │ │ │ ├── rclone_config.py
│ │ │ │ ├── rclone_installer.py
│ │ │ │ ├── upload_command.py
│ │ │ │ └── upload.py
│ │ │ ├── command_utils.py
│ │ │ ├── db.py
│ │ │ ├── import_chatgpt.py
│ │ │ ├── import_claude_conversations.py
│ │ │ ├── import_claude_projects.py
│ │ │ ├── import_memory_json.py
│ │ │ ├── mcp.py
│ │ │ ├── project.py
│ │ │ ├── status.py
│ │ │ ├── sync.py
│ │ │ └── tool.py
│ │ └── main.py
│ ├── config.py
│ ├── db.py
│ ├── deps.py
│ ├── file_utils.py
│ ├── ignore_utils.py
│ ├── importers
│ │ ├── __init__.py
│ │ ├── base.py
│ │ ├── chatgpt_importer.py
│ │ ├── claude_conversations_importer.py
│ │ ├── claude_projects_importer.py
│ │ ├── memory_json_importer.py
│ │ └── utils.py
│ ├── markdown
│ │ ├── __init__.py
│ │ ├── entity_parser.py
│ │ ├── markdown_processor.py
│ │ ├── plugins.py
│ │ ├── schemas.py
│ │ └── utils.py
│ ├── mcp
│ │ ├── __init__.py
│ │ ├── async_client.py
│ │ ├── project_context.py
│ │ ├── prompts
│ │ │ ├── __init__.py
│ │ │ ├── ai_assistant_guide.py
│ │ │ ├── continue_conversation.py
│ │ │ ├── recent_activity.py
│ │ │ ├── search.py
│ │ │ └── utils.py
│ │ ├── resources
│ │ │ ├── ai_assistant_guide.md
│ │ │ └── project_info.py
│ │ ├── server.py
│ │ └── tools
│ │ ├── __init__.py
│ │ ├── build_context.py
│ │ ├── canvas.py
│ │ ├── chatgpt_tools.py
│ │ ├── delete_note.py
│ │ ├── edit_note.py
│ │ ├── list_directory.py
│ │ ├── move_note.py
│ │ ├── project_management.py
│ │ ├── read_content.py
│ │ ├── read_note.py
│ │ ├── recent_activity.py
│ │ ├── search.py
│ │ ├── utils.py
│ │ ├── view_note.py
│ │ └── write_note.py
│ ├── models
│ │ ├── __init__.py
│ │ ├── base.py
│ │ ├── knowledge.py
│ │ ├── project.py
│ │ └── search.py
│ ├── repository
│ │ ├── __init__.py
│ │ ├── entity_repository.py
│ │ ├── observation_repository.py
│ │ ├── project_info_repository.py
│ │ ├── project_repository.py
│ │ ├── relation_repository.py
│ │ ├── repository.py
│ │ └── search_repository.py
│ ├── schemas
│ │ ├── __init__.py
│ │ ├── base.py
│ │ ├── cloud.py
│ │ ├── delete.py
│ │ ├── directory.py
│ │ ├── importer.py
│ │ ├── memory.py
│ │ ├── project_info.py
│ │ ├── prompt.py
│ │ ├── request.py
│ │ ├── response.py
│ │ ├── search.py
│ │ └── sync_report.py
│ ├── services
│ │ ├── __init__.py
│ │ ├── context_service.py
│ │ ├── directory_service.py
│ │ ├── entity_service.py
│ │ ├── exceptions.py
│ │ ├── file_service.py
│ │ ├── initialization.py
│ │ ├── link_resolver.py
│ │ ├── project_service.py
│ │ ├── search_service.py
│ │ └── service.py
│ ├── sync
│ │ ├── __init__.py
│ │ ├── background_sync.py
│ │ ├── sync_service.py
│ │ └── watch_service.py
│ ├── templates
│ │ └── prompts
│ │ ├── continue_conversation.hbs
│ │ └── search.hbs
│ └── utils.py
├── test-int
│ ├── BENCHMARKS.md
│ ├── cli
│ │ ├── test_project_commands_integration.py
│ │ ├── test_sync_commands_integration.py
│ │ └── test_version_integration.py
│ ├── conftest.py
│ ├── mcp
│ │ ├── test_build_context_underscore.py
│ │ ├── test_build_context_validation.py
│ │ ├── test_chatgpt_tools_integration.py
│ │ ├── test_default_project_mode_integration.py
│ │ ├── test_delete_note_integration.py
│ │ ├── test_edit_note_integration.py
│ │ ├── test_list_directory_integration.py
│ │ ├── test_move_note_integration.py
│ │ ├── test_project_management_integration.py
│ │ ├── test_project_state_sync_integration.py
│ │ ├── test_read_content_integration.py
│ │ ├── test_read_note_integration.py
│ │ ├── test_search_integration.py
│ │ ├── test_single_project_mcp_integration.py
│ │ └── test_write_note_integration.py
│ ├── test_db_wal_mode.py
│ ├── test_disable_permalinks_integration.py
│ └── test_sync_performance_benchmark.py
├── tests
│ ├── __init__.py
│ ├── api
│ │ ├── conftest.py
│ │ ├── test_async_client.py
│ │ ├── test_continue_conversation_template.py
│ │ ├── test_directory_router.py
│ │ ├── test_importer_router.py
│ │ ├── test_knowledge_router.py
│ │ ├── test_management_router.py
│ │ ├── test_memory_router.py
│ │ ├── test_project_router_operations.py
│ │ ├── test_project_router.py
│ │ ├── test_prompt_router.py
│ │ ├── test_relation_background_resolution.py
│ │ ├── test_resource_router.py
│ │ ├── test_search_router.py
│ │ ├── test_search_template.py
│ │ ├── test_template_loader_helpers.py
│ │ └── test_template_loader.py
│ ├── cli
│ │ ├── conftest.py
│ │ ├── test_bisync_commands.py
│ │ ├── test_cli_tools.py
│ │ ├── test_cloud_authentication.py
│ │ ├── test_cloud_utils.py
│ │ ├── test_ignore_utils.py
│ │ ├── test_import_chatgpt.py
│ │ ├── test_import_claude_conversations.py
│ │ ├── test_import_claude_projects.py
│ │ ├── test_import_memory_json.py
│ │ └── test_upload.py
│ ├── conftest.py
│ ├── db
│ │ └── test_issue_254_foreign_key_constraints.py
│ ├── importers
│ │ ├── test_importer_base.py
│ │ └── test_importer_utils.py
│ ├── markdown
│ │ ├── __init__.py
│ │ ├── test_date_frontmatter_parsing.py
│ │ ├── test_entity_parser_error_handling.py
│ │ ├── test_entity_parser.py
│ │ ├── test_markdown_plugins.py
│ │ ├── test_markdown_processor.py
│ │ ├── test_observation_edge_cases.py
│ │ ├── test_parser_edge_cases.py
│ │ ├── test_relation_edge_cases.py
│ │ └── test_task_detection.py
│ ├── mcp
│ │ ├── conftest.py
│ │ ├── test_obsidian_yaml_formatting.py
│ │ ├── test_permalink_collision_file_overwrite.py
│ │ ├── test_prompts.py
│ │ ├── test_resources.py
│ │ ├── test_tool_build_context.py
│ │ ├── test_tool_canvas.py
│ │ ├── test_tool_delete_note.py
│ │ ├── test_tool_edit_note.py
│ │ ├── test_tool_list_directory.py
│ │ ├── test_tool_move_note.py
│ │ ├── test_tool_read_content.py
│ │ ├── test_tool_read_note.py
│ │ ├── test_tool_recent_activity.py
│ │ ├── test_tool_resource.py
│ │ ├── test_tool_search.py
│ │ ├── test_tool_utils.py
│ │ ├── test_tool_view_note.py
│ │ ├── test_tool_write_note.py
│ │ └── tools
│ │ └── test_chatgpt_tools.py
│ ├── Non-MarkdownFileSupport.pdf
│ ├── repository
│ │ ├── test_entity_repository_upsert.py
│ │ ├── test_entity_repository.py
│ │ ├── test_entity_upsert_issue_187.py
│ │ ├── test_observation_repository.py
│ │ ├── test_project_info_repository.py
│ │ ├── test_project_repository.py
│ │ ├── test_relation_repository.py
│ │ ├── test_repository.py
│ │ ├── test_search_repository_edit_bug_fix.py
│ │ └── test_search_repository.py
│ ├── schemas
│ │ ├── test_base_timeframe_minimum.py
│ │ ├── test_memory_serialization.py
│ │ ├── test_memory_url_validation.py
│ │ ├── test_memory_url.py
│ │ ├── test_schemas.py
│ │ └── test_search.py
│ ├── Screenshot.png
│ ├── services
│ │ ├── test_context_service.py
│ │ ├── test_directory_service.py
│ │ ├── test_entity_service_disable_permalinks.py
│ │ ├── test_entity_service.py
│ │ ├── test_file_service.py
│ │ ├── test_initialization.py
│ │ ├── test_link_resolver.py
│ │ ├── test_project_removal_bug.py
│ │ ├── test_project_service_operations.py
│ │ ├── test_project_service.py
│ │ └── test_search_service.py
│ ├── sync
│ │ ├── test_character_conflicts.py
│ │ ├── test_sync_service_incremental.py
│ │ ├── test_sync_service.py
│ │ ├── test_sync_wikilink_issue.py
│ │ ├── test_tmp_files.py
│ │ ├── test_watch_service_edge_cases.py
│ │ ├── test_watch_service_reload.py
│ │ └── test_watch_service.py
│ ├── test_config.py
│ ├── test_db_migration_deduplication.py
│ ├── test_deps.py
│ ├── test_production_cascade_delete.py
│ └── utils
│ ├── test_file_utils.py
│ ├── test_frontmatter_obsidian_compatible.py
│ ├── test_parse_tags.py
│ ├── test_permalink_formatting.py
│ ├── test_utf8_handling.py
│ └── test_validate_project_path.py
├── uv.lock
├── v0.15.0-RELEASE-DOCS.md
└── v15-docs
├── api-performance.md
├── background-relations.md
├── basic-memory-home.md
├── bug-fixes.md
├── chatgpt-integration.md
├── cloud-authentication.md
├── cloud-bisync.md
├── cloud-mode-usage.md
├── cloud-mount.md
├── default-project-mode.md
├── env-file-removal.md
├── env-var-overrides.md
├── explicit-project-parameter.md
├── gitignore-integration.md
├── project-root-env-var.md
├── README.md
└── sqlite-performance.md
```
# Files
--------------------------------------------------------------------------------
/.python-version:
--------------------------------------------------------------------------------
```
3.12
```
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
```
*.py[cod]
__pycache__/
.pytest_cache/
.coverage
htmlcov/
# Distribution / packaging
.Python
build/
develop-eggs/
dist/
downloads/
eggs/
.eggs/
lib/
lib64/
parts/
sdist/
var/
wheels/
*.egg-info/
.installed.cfg
*.egg
# Installer artifacts
installer/build/
installer/dist/
rw.*.dmg # Temporary disk images
# Virtual environments
.env
.venv
env/
venv/
ENV/
# IDE
.idea/
.vscode/
*.swp
*.swo
# macOS
.DS_Store
.coverage.*
# obsidian docs:
/docs/.obsidian/
/examples/.obsidian/
/examples/.basic-memory/
# claude action
claude-output
**/.claude/settings.local.json
```
--------------------------------------------------------------------------------
/.dockerignore:
--------------------------------------------------------------------------------
```
# Git files
.git/
.gitignore
.gitattributes
# Development files
.vscode/
.idea/
*.swp
*.swo
*~
# Testing files
tests/
test-int/
.pytest_cache/
.coverage
htmlcov/
# Build artifacts
build/
dist/
*.egg-info/
__pycache__/
*.pyc
*.pyo
*.pyd
.Python
# Virtual environments (uv creates these during build)
.venv/
venv/
.env
# CI/CD files
.github/
# Documentation (keep README.md and pyproject.toml)
docs/
CHANGELOG.md
CLAUDE.md
CONTRIBUTING.md
# Example files not needed for runtime
examples/
# Local development files
.basic-memory/
*.db
*.sqlite3
# OS files
.DS_Store
Thumbs.db
# Temporary files
tmp/
temp/
*.tmp
*.log
```
--------------------------------------------------------------------------------
/v15-docs/README.md:
--------------------------------------------------------------------------------
```markdown
# v0.15.0 Documentation Notes
This directory contains user-focused documentation notes for v0.15.0 changes. These notes are written from the user's perspective and will be used to update the main documentation site (docs.basicmemory.com).
## Purpose
- Capture complete user-facing details of code changes
- Provide examples and migration guidance
- Serve as source material for final documentation
- **Temporary workspace** - will be removed after release docs are complete
## Notes Structure
Each note covers a specific change or feature:
- **What changed** - User-visible behavior changes
- **Why it matters** - Impact and benefits
- **How to use** - Examples and usage patterns
- **Migration** - Steps to adapt (if breaking change)
## Coverage
Based on v0.15.0-RELEASE-DOCS.md:
### Breaking Changes
- [x] explicit-project-parameter.md (SPEC-6: #298)
- [x] default-project-mode.md
### Configuration
- [x] project-root-env-var.md (#334)
- [x] basic-memory-home.md (clarify relationship with PROJECT_ROOT)
- [x] env-var-overrides.md
### Cloud Features
- [x] cloud-authentication.md (SPEC-13: #327)
- [x] cloud-bisync.md (SPEC-9: #322)
- [x] cloud-mount.md (#306)
- [x] cloud-mode-usage.md
### Security & Performance
- [x] env-file-removal.md (#330)
- [x] gitignore-integration.md (#314)
- [x] sqlite-performance.md (#316)
- [x] background-relations.md (#319)
- [x] api-performance.md (SPEC-11: #315)
### Bug Fixes & Platform
- [x] bug-fixes.md (13+ fixes including #328, #329, #287, #281, #330, Python 3.13)
### Integrations
- [x] chatgpt-integration.md (ChatGPT MCP tools, remote only, Pro subscription required)
### AI Assistant Guides
- [x] ai-assistant-guide-extended.md (Extended guide for docs site with comprehensive examples)
## Usage
From docs.basicmemory.com repo, reference these notes to create/update:
- Migration guides
- Feature documentation
- Release notes
- Getting started guides
```
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
```markdown
[](https://www.gnu.org/licenses/agpl-3.0)
[](https://badge.fury.io/py/basic-memory)
[](https://www.python.org/downloads/)
[](https://github.com/basicmachines-co/basic-memory/actions)
[](https://github.com/astral-sh/ruff)


[](https://smithery.ai/server/@basicmachines-co/basic-memory)
## 🚀 Basic Memory Cloud is Live!
- **Cross-device and multi-platform support is here.** Your knowledge graph now works on desktop, web, and mobile - seamlessly synced across all your AI tools (Claude, ChatGPT, Gemini, Claude Code, and Codex)
- **Early Supporter Pricing:** Early users get 25% off forever.
The open source project continues as always. Cloud just makes it work everywhere.
[Sign up now →](https://basicmemory.com/beta)
with a 7 day free trial
# Basic Memory
Basic Memory lets you build persistent knowledge through natural conversations with Large Language Models (LLMs) like
Claude, while keeping everything in simple Markdown files on your computer. It uses the Model Context Protocol (MCP) to
enable any compatible LLM to read and write to your local knowledge base.
- Website: https://basicmachines.co
- Documentation: https://memory.basicmachines.co
## Pick up your conversation right where you left off
- AI assistants can load context from local files in a new conversation
- Notes are saved locally as Markdown files in real time
- No project knowledge or special prompting required
https://github.com/user-attachments/assets/a55d8238-8dd0-454a-be4c-8860dbbd0ddc
## Quick Start
```bash
# Install with uv (recommended)
uv tool install basic-memory
# Configure Claude Desktop (edit ~/Library/Application Support/Claude/claude_desktop_config.json)
# Add this to your config:
{
"mcpServers": {
"basic-memory": {
"command": "uvx",
"args": [
"basic-memory",
"mcp"
]
}
}
}
# Now in Claude Desktop, you can:
# - Write notes with "Create a note about coffee brewing methods"
# - Read notes with "What do I know about pour over coffee?"
# - Search with "Find information about Ethiopian beans"
```
You can view shared context via files in `~/basic-memory` (default directory location).
### Alternative Installation via Smithery
You can use [Smithery](https://smithery.ai/server/@basicmachines-co/basic-memory) to automatically configure Basic
Memory for Claude Desktop:
```bash
npx -y @smithery/cli install @basicmachines-co/basic-memory --client claude
```
This installs and configures Basic Memory without requiring manual edits to the Claude Desktop configuration file. The
Smithery server hosts the MCP server component, while your data remains stored locally as Markdown files.
### Glama.ai
<a href="https://glama.ai/mcp/servers/o90kttu9ym">
<img width="380" height="200" src="https://glama.ai/mcp/servers/o90kttu9ym/badge" alt="basic-memory MCP server" />
</a>
## Why Basic Memory?
Most LLM interactions are ephemeral - you ask a question, get an answer, and everything is forgotten. Each conversation
starts fresh, without the context or knowledge from previous ones. Current workarounds have limitations:
- Chat histories capture conversations but aren't structured knowledge
- RAG systems can query documents but don't let LLMs write back
- Vector databases require complex setups and often live in the cloud
- Knowledge graphs typically need specialized tools to maintain
Basic Memory addresses these problems with a simple approach: structured Markdown files that both humans and LLMs can
read
and write to. The key advantages:
- **Local-first:** All knowledge stays in files you control
- **Bi-directional:** Both you and the LLM read and write to the same files
- **Structured yet simple:** Uses familiar Markdown with semantic patterns
- **Traversable knowledge graph:** LLMs can follow links between topics
- **Standard formats:** Works with existing editors like Obsidian
- **Lightweight infrastructure:** Just local files indexed in a local SQLite database
With Basic Memory, you can:
- Have conversations that build on previous knowledge
- Create structured notes during natural conversations
- Have conversations with LLMs that remember what you've discussed before
- Navigate your knowledge graph semantically
- Keep everything local and under your control
- Use familiar tools like Obsidian to view and edit notes
- Build a personal knowledge base that grows over time
- Sync your knowledge to the cloud with bidirectional synchronization
- Authenticate and manage cloud projects with subscription validation
- Mount cloud storage for direct file access
## How It Works in Practice
Let's say you're exploring coffee brewing methods and want to capture your knowledge. Here's how it works:
1. Start by chatting normally:
```
I've been experimenting with different coffee brewing methods. Key things I've learned:
- Pour over gives more clarity in flavor than French press
- Water temperature is critical - around 205°F seems best
- Freshly ground beans make a huge difference
```
... continue conversation.
2. Ask the LLM to help structure this knowledge:
```
"Let's write a note about coffee brewing methods."
```
LLM creates a new Markdown file on your system (which you can see instantly in Obsidian or your editor):
```markdown
---
title: Coffee Brewing Methods
permalink: coffee-brewing-methods
tags:
- coffee
- brewing
---
# Coffee Brewing Methods
## Observations
- [method] Pour over provides more clarity and highlights subtle flavors
- [technique] Water temperature at 205°F (96°C) extracts optimal compounds
- [principle] Freshly ground beans preserve aromatics and flavor
## Relations
- relates_to [[Coffee Bean Origins]]
- requires [[Proper Grinding Technique]]
- affects [[Flavor Extraction]]
```
The note embeds semantic content and links to other topics via simple Markdown formatting.
3. You see this file on your computer in real time in the current project directory (default `~/$HOME/basic-memory`).
- Realtime sync can be enabled via running `basic-memory sync --watch`
4. In a chat with the LLM, you can reference a topic:
```
Look at `coffee-brewing-methods` for context about pour over coffee
```
The LLM can now build rich context from the knowledge graph. For example:
```
Following relation 'relates_to [[Coffee Bean Origins]]':
- Found information about Ethiopian Yirgacheffe
- Notes on Colombian beans' nutty profile
- Altitude effects on bean characteristics
Following relation 'requires [[Proper Grinding Technique]]':
- Burr vs. blade grinder comparisons
- Grind size recommendations for different methods
- Impact of consistent particle size on extraction
```
Each related document can lead to more context, building a rich semantic understanding of your knowledge base.
This creates a two-way flow where:
- Humans write and edit Markdown files
- LLMs read and write through the MCP protocol
- Sync keeps everything consistent
- All knowledge stays in local files.
## Technical Implementation
Under the hood, Basic Memory:
1. Stores everything in Markdown files
2. Uses a SQLite database for searching and indexing
3. Extracts semantic meaning from simple Markdown patterns
- Files become `Entity` objects
- Each `Entity` can have `Observations`, or facts associated with it
- `Relations` connect entities together to form the knowledge graph
4. Maintains the local knowledge graph derived from the files
5. Provides bidirectional synchronization between files and the knowledge graph
6. Implements the Model Context Protocol (MCP) for AI integration
7. Exposes tools that let AI assistants traverse and manipulate the knowledge graph
8. Uses memory:// URLs to reference entities across tools and conversations
The file format is just Markdown with some simple markup:
Each Markdown file has:
### Frontmatter
```markdown
title: <Entity title>
type: <The type of Entity> (e.g. note)
permalink: <a uri slug>
- <optional metadata> (such as tags)
```
### Observations
Observations are facts about a topic.
They can be added by creating a Markdown list with a special format that can reference a `category`, `tags` using a
"#" character, and an optional `context`.
Observation Markdown format:
```markdown
- [category] content #tag (optional context)
```
Examples of observations:
```markdown
- [method] Pour over extracts more floral notes than French press
- [tip] Grind size should be medium-fine for pour over #brewing
- [preference] Ethiopian beans have bright, fruity flavors (especially from Yirgacheffe)
- [fact] Lighter roasts generally contain more caffeine than dark roasts
- [experiment] Tried 1:15 coffee-to-water ratio with good results
- [resource] James Hoffman's V60 technique on YouTube is excellent
- [question] Does water temperature affect extraction of different compounds differently?
- [note] My favorite local shop uses a 30-second bloom time
```
### Relations
Relations are links to other topics. They define how entities connect in the knowledge graph.
Markdown format:
```markdown
- relation_type [[WikiLink]] (optional context)
```
Examples of relations:
```markdown
- pairs_well_with [[Chocolate Desserts]]
- grown_in [[Ethiopia]]
- contrasts_with [[Tea Brewing Methods]]
- requires [[Burr Grinder]]
- improves_with [[Fresh Beans]]
- relates_to [[Morning Routine]]
- inspired_by [[Japanese Coffee Culture]]
- documented_in [[Coffee Journal]]
```
## Using with VS Code
Add the following JSON block to your User Settings (JSON) file in VS Code. You can do this by pressing `Ctrl + Shift + P` and typing `Preferences: Open User Settings (JSON)`.
```json
{
"mcp": {
"servers": {
"basic-memory": {
"command": "uvx",
"args": ["basic-memory", "mcp"]
}
}
}
}
```
Optionally, you can add it to a file called `.vscode/mcp.json` in your workspace. This will allow you to share the configuration with others.
```json
{
"servers": {
"basic-memory": {
"command": "uvx",
"args": ["basic-memory", "mcp"]
}
}
}
```
You can use Basic Memory with VS Code to easily retrieve and store information while coding.
## Using with Claude Desktop
Basic Memory is built using the MCP (Model Context Protocol) and works with the Claude desktop app (https://claude.ai/):
1. Configure Claude Desktop to use Basic Memory:
Edit your MCP configuration file (usually located at `~/Library/Application Support/Claude/claude_desktop_config.json`
for OS X):
```json
{
"mcpServers": {
"basic-memory": {
"command": "uvx",
"args": [
"basic-memory",
"mcp"
]
}
}
}
```
If you want to use a specific project (see [Multiple Projects](#multiple-projects) below), update your Claude Desktop
config:
```json
{
"mcpServers": {
"basic-memory": {
"command": "uvx",
"args": [
"basic-memory",
"mcp",
"--project",
"your-project-name"
]
}
}
}
```
2. Sync your knowledge:
```bash
# One-time sync of local knowledge updates
basic-memory sync
# Run realtime sync process (recommended)
basic-memory sync --watch
```
3. Cloud features (optional, requires subscription):
```bash
# Authenticate with cloud
basic-memory cloud login
# Bidirectional sync with cloud
basic-memory cloud sync
# Verify cloud integrity
basic-memory cloud check
# Mount cloud storage
basic-memory cloud mount
```
4. In Claude Desktop, the LLM can now use these tools:
**Content Management:**
```
write_note(title, content, folder, tags) - Create or update notes
read_note(identifier, page, page_size) - Read notes by title or permalink
read_content(path) - Read raw file content (text, images, binaries)
view_note(identifier) - View notes as formatted artifacts
edit_note(identifier, operation, content) - Edit notes incrementally
move_note(identifier, destination_path) - Move notes with database consistency
delete_note(identifier) - Delete notes from knowledge base
```
**Knowledge Graph Navigation:**
```
build_context(url, depth, timeframe) - Navigate knowledge graph via memory:// URLs
recent_activity(type, depth, timeframe) - Find recently updated information
list_directory(dir_name, depth) - Browse directory contents with filtering
```
**Search & Discovery:**
```
search(query, page, page_size) - Search across your knowledge base
```
**Project Management:**
```
list_memory_projects() - List all available projects
create_memory_project(project_name, project_path) - Create new projects
get_current_project() - Show current project stats
sync_status() - Check synchronization status
```
**Visualization:**
```
canvas(nodes, edges, title, folder) - Generate knowledge visualizations
```
5. Example prompts to try:
```
"Create a note about our project architecture decisions"
"Find information about JWT authentication in my notes"
"Create a canvas visualization of my project components"
"Read my notes on the authentication system"
"What have I been working on in the past week?"
```
## Futher info
See the [Documentation](https://memory.basicmachines.co/) for more info, including:
- [Complete User Guide](https://docs.basicmemory.com/user-guide/)
- [CLI tools](https://docs.basicmemory.com/guides/cli-reference/)
- [Cloud CLI and Sync](https://docs.basicmemory.com/guides/cloud-cli/)
- [Managing multiple Projects](https://docs.basicmemory.com/guides/cli-reference/#project)
- [Importing data from OpenAI/Claude Projects](https://docs.basicmemory.com/guides/cli-reference/#import)
## License
AGPL-3.0
Contributions are welcome. See the [Contributing](CONTRIBUTING.md) guide for info about setting up the project locally
and submitting PRs.
## Star History
<a href="https://www.star-history.com/#basicmachines-co/basic-memory&Date">
<picture>
<source media="(prefers-color-scheme: dark)" srcset="https://api.star-history.com/svg?repos=basicmachines-co/basic-memory&type=Date&theme=dark" />
<source media="(prefers-color-scheme: light)" srcset="https://api.star-history.com/svg?repos=basicmachines-co/basic-memory&type=Date" />
<img alt="Star History Chart" src="https://api.star-history.com/svg?repos=basicmachines-co/basic-memory&type=Date" />
</picture>
</a>
Built with ♥️ by Basic Machines
```
--------------------------------------------------------------------------------
/SECURITY.md:
--------------------------------------------------------------------------------
```markdown
# Security Policy
## Supported Versions
| Version | Supported |
| ------- | ------------------ |
| 0.x.x | :white_check_mark: |
## Reporting a Vulnerability
Use this section to tell people how to report a vulnerability.
If you find a vulnerability, please contact [email protected]
```
--------------------------------------------------------------------------------
/CODE_OF_CONDUCT.md:
--------------------------------------------------------------------------------
```markdown
# Code of Conduct
## Purpose
Maintain a respectful and professional environment where contributions can be made without harassment or
negativity.
## Standards
Respectful communication and collaboration are expected. Offensive behavior, harassment, or personal attacks will not be
tolerated.
## Reporting Issues
To report inappropriate behavior, contact [[email protected]].
## Consequences
Violations of this code may lead to consequences, including being banned from contributing to the project.
```
--------------------------------------------------------------------------------
/CONTRIBUTING.md:
--------------------------------------------------------------------------------
```markdown
# Contributing to Basic Memory
Thank you for considering contributing to Basic Memory! This document outlines the process for contributing to the
project and how to get started as a developer.
## Getting Started
### Development Environment
1. **Clone the Repository**:
```bash
git clone https://github.com/basicmachines-co/basic-memory.git
cd basic-memory
```
2. **Install Dependencies**:
```bash
# Using just (recommended)
just install
# Or using uv
uv install -e ".[dev]"
# Or using pip
pip install -e ".[dev]"
```
> **Note**: Basic Memory uses [just](https://just.systems) as a modern command runner. Install with `brew install just` or `cargo install just`.
3. **Activate the Virtual Environment**
```bash
source .venv/bin/activate
```
4. **Run the Tests**:
```bash
# Run all tests with unified coverage (unit + integration)
just test
# Run unit tests only (fast, no coverage)
just test-unit
# Run integration tests only (fast, no coverage)
just test-int
# Generate HTML coverage report
just coverage
# Run a specific test
pytest tests/path/to/test_file.py::test_function_name
```
### Development Workflow
1. **Fork the Repo**: Fork the repository on GitHub and clone your copy.
2. **Create a Branch**: Create a new branch for your feature or fix.
```bash
git checkout -b feature/your-feature-name
# or
git checkout -b fix/issue-you-are-fixing
```
3. **Make Your Changes**: Implement your changes with appropriate test coverage.
4. **Check Code Quality**:
```bash
# Run all checks at once
just check
# Or run individual checks
just lint # Run linting
just format # Format code
just type-check # Type checking
```
5. **Test Your Changes**: Ensure all tests pass locally and maintain 100% test coverage.
```bash
just test
```
6. **Submit a PR**: Submit a pull request with a detailed description of your changes.
## LLM-Assisted Development
This project is designed for collaborative development between humans and LLMs (Large Language Models):
1. **CLAUDE.md**: The repository includes a `CLAUDE.md` file that serves as a project guide for both humans and LLMs.
This file contains:
- Key project information and architectural overview
- Development commands and workflows
- Code style guidelines
- Documentation standards
2. **AI-Human Collaborative Workflow**:
- We encourage using LLMs like Claude for code generation, reviews, and documentation
- When possible, save context in markdown files that can be referenced later
- This enables seamless knowledge transfer between different development sessions
- Claude can help with implementation details while you focus on architecture and design
3. **Adding to CLAUDE.md**:
- If you discover useful project information or common commands, consider adding them to CLAUDE.md
- This helps all contributors (human and AI) maintain consistent knowledge of the project
## Pull Request Process
1. **Create a Pull Request**: Open a PR against the `main` branch with a clear title and description.
2. **Sign the Developer Certificate of Origin (DCO)**: All contributions require signing our DCO, which certifies that
you have the right to submit your contributions. This will be automatically checked by our CLA assistant when you
create a PR.
3. **PR Description**: Include:
- What the PR changes
- Why the change is needed
- How you tested the changes
- Any related issues (use "Fixes #123" to automatically close issues)
4. **Code Review**: Wait for code review and address any feedback.
5. **CI Checks**: Ensure all CI checks pass.
6. **Merge**: Once approved, a maintainer will merge your PR.
## Developer Certificate of Origin
By contributing to this project, you agree to the [Developer Certificate of Origin (DCO)](CLA.md). This means you
certify that:
- You have the right to submit your contributions
- You're not knowingly submitting code with patent or copyright issues
- Your contributions are provided under the project's license (AGPL-3.0)
This is a lightweight alternative to a Contributor License Agreement and helps ensure that all contributions can be
properly incorporated into the project and potentially used in commercial applications.
### Signing Your Commits
Sign your commit:
**Using the `-s` or `--signoff` flag**:
```bash
git commit -s -m "Your commit message"
```
This adds a `Signed-off-by` line to your commit message, certifying that you adhere to the DCO.
The sign-off certifies that you have the right to submit your contribution under the project's license and verifies your
agreement to the DCO.
## Code Style Guidelines
- **Python Version**: Python 3.12+ with full type annotations (3.12+ required for type parameter syntax)
- **Line Length**: 100 characters maximum
- **Formatting**: Use ruff for consistent styling
- **Import Order**: Standard lib, third-party, local imports
- **Naming**: Use snake_case for functions/variables, PascalCase for classes
- **Documentation**: Add docstrings to public functions, classes, and methods
- **Type Annotations**: Use type hints for all functions and methods
## Testing Guidelines
### Test Structure
Basic Memory uses two test directories with unified coverage reporting:
- **`tests/`**: Unit tests that test individual components in isolation
- Fast execution with extensive mocking
- Test individual functions, classes, and modules
- Run with: `just test-unit` (no coverage, fast)
- **`test-int/`**: Integration tests that test real-world scenarios
- Test full workflows with real database and file operations
- Include performance benchmarks
- More realistic but slower than unit tests
- Run with: `just test-int` (no coverage, fast)
### Running Tests
```bash
# Run all tests with unified coverage report
just test
# Run only unit tests (fast iteration)
just test-unit
# Run only integration tests
just test-int
# Generate HTML coverage report
just coverage
# Run specific test
pytest tests/path/to/test_file.py::test_function_name
# Run tests excluding benchmarks
pytest -m "not benchmark"
# Run only benchmark tests
pytest -m benchmark test-int/test_sync_performance_benchmark.py
```
### Performance Benchmarks
The `test-int/test_sync_performance_benchmark.py` file contains performance benchmarks that measure sync and indexing speed:
- `test_benchmark_sync_100_files` - Small repository performance
- `test_benchmark_sync_500_files` - Medium repository performance
- `test_benchmark_sync_1000_files` - Large repository performance (marked slow)
- `test_benchmark_resync_no_changes` - Re-sync performance baseline
Run benchmarks with:
```bash
# Run all benchmarks (excluding slow ones)
pytest test-int/test_sync_performance_benchmark.py -v -m "benchmark and not slow"
# Run all benchmarks including slow ones
pytest test-int/test_sync_performance_benchmark.py -v -m benchmark
# Run specific benchmark
pytest test-int/test_sync_performance_benchmark.py::test_benchmark_sync_100_files -v
```
See `test-int/BENCHMARKS.md` for detailed benchmark documentation.
### Testing Best Practices
- **Coverage Target**: We aim for high test coverage for all code
- **Test Framework**: Use pytest for unit and integration tests
- **Mocking**: Avoid mocking in integration tests; use sparingly in unit tests
- **Edge Cases**: Test both normal operation and edge cases
- **Database Testing**: Use in-memory SQLite for testing database operations
- **Fixtures**: Use async pytest fixtures for setup and teardown
- **Markers**: Use `@pytest.mark.benchmark` for benchmarks, `@pytest.mark.slow` for slow tests
## Release Process
Basic Memory uses automatic versioning based on git tags with `uv-dynamic-versioning`. Here's how releases work:
### Version Management
- **Development versions**: Automatically generated from git commits (e.g., `0.12.4.dev26+468a22f`)
- **Beta releases**: Created by tagging with beta suffixes (e.g., `git tag v0.13.0b1`)
- **Stable releases**: Created by tagging with version numbers (e.g., `git tag v0.13.0`)
### Release Workflows
#### Development Builds
- Automatically published to PyPI on every commit to `main`
- Version format: `0.12.4.dev26+468a22f` (base version + dev + commit count + hash)
- Users install with: `pip install basic-memory --pre --force-reinstall`
#### Beta Releases
1. Create and push a beta tag: `git tag v0.13.0b1 && git push origin v0.13.0b1`
2. GitHub Actions automatically builds and publishes to PyPI
3. Users install with: `pip install basic-memory --pre`
#### Stable Releases
1. Create and push a version tag: `git tag v0.13.0 && git push origin v0.13.0`
2. GitHub Actions automatically:
- Builds the package with version `0.13.0`
- Creates GitHub release with auto-generated notes
- Publishes to PyPI
3. Users install with: `pip install basic-memory`
### For Contributors
- No manual version bumping required
- Versions are automatically derived from git tags
- Focus on code changes, not version management
## Creating Issues
If you're planning to work on something, please create an issue first to discuss the approach. Include:
- A clear title and description
- Steps to reproduce if reporting a bug
- Expected behavior vs. actual behavior
- Any relevant logs or screenshots
- Your proposed solution, if you have one
## Code of Conduct
All contributors must follow the [Code of Conduct](CODE_OF_CONDUCT.md).
## Thank You!
Your contributions help make Basic Memory better. We appreciate your time and effort!
```
--------------------------------------------------------------------------------
/CLAUDE.md:
--------------------------------------------------------------------------------
```markdown
# CLAUDE.md - Basic Memory Project Guide
## Project Overview
Basic Memory is a local-first knowledge management system built on the Model Context Protocol (MCP). It enables
bidirectional communication between LLMs (like Claude) and markdown files, creating a personal knowledge graph that can
be traversed using links between documents.
## CODEBASE DEVELOPMENT
### Project information
See the [README.md](README.md) file for a project overview.
### Build and Test Commands
- Install: `just install` or `pip install -e ".[dev]"`
- Run all tests (with coverage): `just test` - Runs both unit and integration tests with unified coverage
- Run unit tests only: `just test-unit` - Fast, no coverage
- Run integration tests only: `just test-int` - Fast, no coverage
- Generate HTML coverage: `just coverage` - Opens in browser
- Single test: `pytest tests/path/to/test_file.py::test_function_name`
- Run benchmarks: `pytest test-int/test_sync_performance_benchmark.py -v -m "benchmark and not slow"`
- Lint: `just lint` or `ruff check . --fix`
- Type check: `just typecheck` or `uv run pyright`
- Format: `just format` or `uv run ruff format .`
- Run all code checks: `just check` (runs lint, format, typecheck, test)
- Create db migration: `just migration "Your migration message"`
- Run development MCP Inspector: `just run-inspector`
**Note:** Project requires Python 3.12+ (uses type parameter syntax and `type` aliases introduced in 3.12)
### Test Structure
- `tests/` - Unit tests for individual components (mocked, fast)
- `test-int/` - Integration tests for real-world scenarios (no mocks, realistic)
- Both directories are covered by unified coverage reporting
- Benchmark tests in `test-int/` are marked with `@pytest.mark.benchmark`
- Slow tests are marked with `@pytest.mark.slow`
### Code Style Guidelines
- Line length: 100 characters max
- Python 3.12+ with full type annotations (uses type parameters and type aliases)
- Format with ruff (consistent styling)
- Import order: standard lib, third-party, local imports
- Naming: snake_case for functions/variables, PascalCase for classes
- Prefer async patterns with SQLAlchemy 2.0
- Use Pydantic v2 for data validation and schemas
- CLI uses Typer for command structure
- API uses FastAPI for endpoints
- Follow the repository pattern for data access
- Tools communicate to api routers via the httpx ASGI client (in process)
### Codebase Architecture
- `/alembic` - Alembic db migrations
- `/api` - FastAPI implementation of REST endpoints
- `/cli` - Typer command-line interface
- `/markdown` - Markdown parsing and processing
- `/mcp` - Model Context Protocol server implementation
- `/models` - SQLAlchemy ORM models
- `/repository` - Data access layer
- `/schemas` - Pydantic models for validation
- `/services` - Business logic layer
- `/sync` - File synchronization services
### Development Notes
- MCP tools are defined in src/basic_memory/mcp/tools/
- MCP prompts are defined in src/basic_memory/mcp/prompts/
- MCP tools should be atomic, composable operations
- Use `textwrap.dedent()` for multi-line string formatting in prompts and tools
- MCP Prompts are used to invoke tools and format content with instructions for an LLM
- Schema changes require Alembic migrations
- SQLite is used for indexing and full text search, files are source of truth
- Testing uses pytest with asyncio support (strict mode)
- Unit tests (`tests/`) use mocks when necessary; integration tests (`test-int/`) use real implementations
- Test database uses in-memory SQLite
- Each test runs in a standalone environment with in-memory SQLite and tmp_file directory
- Performance benchmarks are in `test-int/test_sync_performance_benchmark.py`
- Use pytest markers: `@pytest.mark.benchmark` for benchmarks, `@pytest.mark.slow` for slow tests
### Async Client Pattern (Important!)
**All MCP tools and CLI commands use the context manager pattern for HTTP clients:**
```python
from basic_memory.mcp.async_client import get_client
async def my_mcp_tool():
async with get_client() as client:
# Use client for API calls
response = await call_get(client, "/path")
return response
```
**Do NOT use:**
- ❌ `from basic_memory.mcp.async_client import client` (deprecated module-level client)
- ❌ Manual auth header management
- ❌ `inject_auth_header()` (deleted)
**Key principles:**
- Auth happens at client creation, not per-request
- Proper resource management via context managers
- Supports three modes: Local (ASGI), CLI cloud (HTTP + auth), Cloud app (factory injection)
- Factory pattern enables dependency injection for cloud consolidation
**For cloud app integration:**
```python
from basic_memory.mcp import async_client
# Set custom factory before importing tools
async_client.set_client_factory(your_custom_factory)
```
See SPEC-16 for full context manager refactor details.
## BASIC MEMORY PRODUCT USAGE
### Knowledge Structure
- Entity: Any concept, document, or idea represented as a markdown file
- Observation: A categorized fact about an entity (`- [category] content`)
- Relation: A directional link between entities (`- relation_type [[Target]]`)
- Frontmatter: YAML metadata at the top of markdown files
- Knowledge representation follows precise markdown format:
- Observations with [category] prefixes
- Relations with WikiLinks [[Entity]]
- Frontmatter with metadata
### Basic Memory Commands
**Local Commands:**
- Sync knowledge: `basic-memory sync` or `basic-memory sync --watch`
- Import from Claude: `basic-memory import claude conversations`
- Import from ChatGPT: `basic-memory import chatgpt`
- Import from Memory JSON: `basic-memory import memory-json`
- Check sync status: `basic-memory status`
- Tool access: `basic-memory tools` (provides CLI access to MCP tools)
- Guide: `basic-memory tools basic-memory-guide`
- Continue: `basic-memory tools continue-conversation --topic="search"`
**Cloud Commands (requires subscription):**
- Authenticate: `basic-memory cloud login`
- Logout: `basic-memory cloud logout`
- Bidirectional sync: `basic-memory cloud sync`
- Integrity check: `basic-memory cloud check`
- Mount cloud storage: `basic-memory cloud mount`
- Unmount cloud storage: `basic-memory cloud unmount`
### MCP Capabilities
- Basic Memory exposes these MCP tools to LLMs:
**Content Management:**
- `write_note(title, content, folder, tags)` - Create/update markdown notes with semantic observations and relations
- `read_note(identifier, page, page_size)` - Read notes by title, permalink, or memory:// URL with knowledge graph awareness
- `read_content(path)` - Read raw file content (text, images, binaries) without knowledge graph processing
- `view_note(identifier, page, page_size)` - View notes as formatted artifacts for better readability
- `edit_note(identifier, operation, content)` - Edit notes incrementally (append, prepend, find/replace, replace_section)
- `move_note(identifier, destination_path)` - Move notes to new locations, updating database and maintaining links
- `delete_note(identifier)` - Delete notes from the knowledge base
**Knowledge Graph Navigation:**
- `build_context(url, depth, timeframe)` - Navigate the knowledge graph via memory:// URLs for conversation continuity
- `recent_activity(type, depth, timeframe)` - Get recently updated information with specified timeframe (e.g., "1d", "1 week")
- `list_directory(dir_name, depth, file_name_glob)` - Browse directory contents with filtering and depth control
**Search & Discovery:**
- `search_notes(query, page, page_size, search_type, types, entity_types, after_date)` - Full-text search across all content with advanced filtering options
**Project Management:**
- `list_memory_projects()` - List all available projects with their status
- `create_memory_project(project_name, project_path, set_default)` - Create new Basic Memory projects
- `delete_project(project_name)` - Delete a project from configuration
- `get_current_project()` - Get current project information and stats
- `sync_status()` - Check file synchronization and background operation status
**Visualization:**
- `canvas(nodes, edges, title, folder)` - Generate Obsidian canvas files for knowledge graph visualization
- MCP Prompts for better AI interaction:
- `ai_assistant_guide()` - Guidance on effectively using Basic Memory tools for AI assistants
- `continue_conversation(topic, timeframe)` - Continue previous conversations with relevant historical context
- `search(query, after_date)` - Search with detailed, formatted results for better context understanding
- `recent_activity(timeframe)` - View recently changed items with formatted output
- `json_canvas_spec()` - Full JSON Canvas specification for Obsidian visualization
### Cloud Features (v0.15.0+)
Basic Memory now supports cloud synchronization and storage (requires active subscription):
**Authentication:**
- JWT-based authentication with subscription validation
- Secure session management with token refresh
- Support for multiple cloud projects
**Bidirectional Sync:**
- rclone bisync integration for two-way synchronization
- Conflict resolution and integrity verification
- Real-time sync with change detection
- Mount/unmount cloud storage for direct file access
**Cloud Project Management:**
- Create and manage projects in the cloud
- Toggle between local and cloud modes
- Per-project sync configuration
- Subscription-based access control
**Security & Performance:**
- Removed .env file loading for improved security
- .gitignore integration (respects gitignored files)
- WAL mode for SQLite performance
- Background relation resolution (non-blocking startup)
- API performance optimizations (SPEC-11)
## AI-Human Collaborative Development
Basic Memory emerged from and enables a new kind of development process that combines human and AI capabilities. Instead
of using AI just for code generation, we've developed a true collaborative workflow:
1. AI (LLM) writes initial implementation based on specifications and context
2. Human reviews, runs tests, and commits code with any necessary adjustments
3. Knowledge persists across conversations using Basic Memory's knowledge graph
4. Development continues seamlessly across different AI sessions with consistent context
5. Results improve through iterative collaboration and shared understanding
This approach has allowed us to tackle more complex challenges and build a more robust system than either humans or AI
could achieve independently.
## GitHub Integration
Basic Memory has taken AI-Human collaboration to the next level by integrating Claude directly into the development workflow through GitHub:
### GitHub MCP Tools
Using the GitHub Model Context Protocol server, Claude can now:
- **Repository Management**:
- View repository files and structure
- Read file contents
- Create new branches
- Create and update files
- **Issue Management**:
- Create new issues
- Comment on existing issues
- Close and update issues
- Search across issues
- **Pull Request Workflow**:
- Create pull requests
- Review code changes
- Add comments to PRs
This integration enables Claude to participate as a full team member in the development process, not just as a code generation tool. Claude's GitHub account ([bm-claudeai](https://github.com/bm-claudeai)) is a member of the Basic Machines organization with direct contributor access to the codebase.
### Collaborative Development Process
With GitHub integration, the development workflow includes:
1. **Direct code review** - Claude can analyze PRs and provide detailed feedback
2. **Contribution tracking** - All of Claude's contributions are properly attributed in the Git history
3. **Branch management** - Claude can create feature branches for implementations
4. **Documentation maintenance** - Claude can keep documentation updated as the code evolves
This level of integration represents a new paradigm in AI-human collaboration, where the AI assistant becomes a full-fledged team member rather than just a tool for generating code snippets.
```
--------------------------------------------------------------------------------
/tests/markdown/__init__.py:
--------------------------------------------------------------------------------
```python
```
--------------------------------------------------------------------------------
/src/basic_memory/cli/__init__.py:
--------------------------------------------------------------------------------
```python
"""CLI tools for basic-memory"""
```
--------------------------------------------------------------------------------
/src/basic_memory/mcp/__init__.py:
--------------------------------------------------------------------------------
```python
"""MCP server for basic-memory."""
```
--------------------------------------------------------------------------------
/src/basic_memory/api/__init__.py:
--------------------------------------------------------------------------------
```python
"""Basic Memory API module."""
from .app import app
__all__ = ["app"]
```
--------------------------------------------------------------------------------
/src/basic_memory/mcp/server.py:
--------------------------------------------------------------------------------
```python
"""
Basic Memory FastMCP server.
"""
from fastmcp import FastMCP
mcp = FastMCP(
name="Basic Memory",
)
```
--------------------------------------------------------------------------------
/tests/__init__.py:
--------------------------------------------------------------------------------
```python
import os
# set config.env to "test" for pytest to prevent logging to file in utils.setup_logging()
os.environ["BASIC_MEMORY_ENV"] = "test"
```
--------------------------------------------------------------------------------
/src/basic_memory/sync/__init__.py:
--------------------------------------------------------------------------------
```python
"""Basic Memory sync services."""
from .sync_service import SyncService
from .watch_service import WatchService
__all__ = ["SyncService", "WatchService"]
```
--------------------------------------------------------------------------------
/src/basic_memory/models/base.py:
--------------------------------------------------------------------------------
```python
"""Base model class for SQLAlchemy models."""
from sqlalchemy.ext.asyncio import AsyncAttrs
from sqlalchemy.orm import DeclarativeBase
class Base(AsyncAttrs, DeclarativeBase):
"""Base class for all models"""
pass
```
--------------------------------------------------------------------------------
/src/basic_memory/__init__.py:
--------------------------------------------------------------------------------
```python
"""basic-memory - Local-first knowledge management combining Zettelkasten with knowledge graphs"""
# Package version - updated by release automation
__version__ = "0.15.2"
# API version for FastAPI - independent of package version
__api_version__ = "v0"
```
--------------------------------------------------------------------------------
/src/basic_memory/services/__init__.py:
--------------------------------------------------------------------------------
```python
"""Services package."""
from .service import BaseService
from .file_service import FileService
from .entity_service import EntityService
from .project_service import ProjectService
__all__ = ["BaseService", "FileService", "EntityService", "ProjectService"]
```
--------------------------------------------------------------------------------
/src/basic_memory/repository/__init__.py:
--------------------------------------------------------------------------------
```python
from .entity_repository import EntityRepository
from .observation_repository import ObservationRepository
from .project_repository import ProjectRepository
from .relation_repository import RelationRepository
__all__ = [
"EntityRepository",
"ObservationRepository",
"ProjectRepository",
"RelationRepository",
]
```
--------------------------------------------------------------------------------
/src/basic_memory/models/__init__.py:
--------------------------------------------------------------------------------
```python
"""Models package for basic-memory."""
import basic_memory
from basic_memory.models.base import Base
from basic_memory.models.knowledge import Entity, Observation, Relation
from basic_memory.models.project import Project
__all__ = [
"Base",
"Entity",
"Observation",
"Relation",
"Project",
"basic_memory",
]
```
--------------------------------------------------------------------------------
/src/basic_memory/services/service.py:
--------------------------------------------------------------------------------
```python
"""Base service class."""
from typing import TypeVar, Generic
from basic_memory.models import Base
T = TypeVar("T", bound=Base)
class BaseService(Generic[T]):
"""Base service that takes a repository."""
def __init__(self, repository):
"""Initialize service with repository."""
self.repository = repository
```
--------------------------------------------------------------------------------
/src/basic_memory/repository/project_info_repository.py:
--------------------------------------------------------------------------------
```python
from basic_memory.repository.repository import Repository
from basic_memory.models.project import Project
class ProjectInfoRepository(Repository):
"""Repository for statistics queries."""
def __init__(self, session_maker):
# Initialize with Project model as a reference
super().__init__(session_maker, Project)
```
--------------------------------------------------------------------------------
/src/basic_memory/cli/commands/cloud/__init__.py:
--------------------------------------------------------------------------------
```python
"""Cloud commands package."""
# Import all commands to register them with typer
from basic_memory.cli.commands.cloud.core_commands import * # noqa: F401,F403
from basic_memory.cli.commands.cloud.api_client import get_authenticated_headers, get_cloud_config # noqa: F401
from basic_memory.cli.commands.cloud.upload_command import * # noqa: F401,F403
```
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/config.yml:
--------------------------------------------------------------------------------
```yaml
blank_issues_enabled: false
contact_links:
- name: Basic Memory Discussions
url: https://github.com/basicmachines-co/basic-memory/discussions
about: For questions, ideas, or more open-ended discussions
- name: Documentation
url: https://github.com/basicmachines-co/basic-memory#readme
about: Please check the documentation first before reporting an issue
```
--------------------------------------------------------------------------------
/test-int/cli/test_version_integration.py:
--------------------------------------------------------------------------------
```python
"""Integration tests for version command."""
from typer.testing import CliRunner
from basic_memory.cli.main import app
import basic_memory
def test_version_command():
"""Test 'bm --version' command shows version."""
runner = CliRunner()
result = runner.invoke(app, ["--version"])
assert result.exit_code == 0
assert basic_memory.__version__ in result.stdout
```
--------------------------------------------------------------------------------
/src/basic_memory/cli/commands/__init__.py:
--------------------------------------------------------------------------------
```python
"""CLI commands for basic-memory."""
from . import status, sync, db, import_memory_json, mcp, import_claude_conversations
from . import import_claude_projects, import_chatgpt, tool, project
__all__ = [
"status",
"sync",
"db",
"import_memory_json",
"mcp",
"import_claude_conversations",
"import_claude_projects",
"import_chatgpt",
"tool",
"project",
]
```
--------------------------------------------------------------------------------
/src/basic_memory/api/routers/__init__.py:
--------------------------------------------------------------------------------
```python
"""API routers."""
from . import knowledge_router as knowledge
from . import management_router as management
from . import memory_router as memory
from . import project_router as project
from . import resource_router as resource
from . import search_router as search
from . import prompt_router as prompt
__all__ = ["knowledge", "management", "memory", "project", "resource", "search", "prompt"]
```
--------------------------------------------------------------------------------
/tests/markdown/test_task_detection.py:
--------------------------------------------------------------------------------
```python
"""Test how markdown-it handles task lists."""
from markdown_it import MarkdownIt
def test_task_token_type():
"""Verify how markdown-it parses task list items."""
md = MarkdownIt()
content = """
- [ ] Unchecked task
- [x] Completed task
- [-] In progress task
"""
tokens = md.parse(content)
for token in tokens:
print(f"{token.type}: {token.content}")
```
--------------------------------------------------------------------------------
/smithery.yaml:
--------------------------------------------------------------------------------
```yaml
# Smithery configuration file: https://smithery.ai/docs/config#smitheryyaml
startCommand:
type: stdio
configSchema:
# JSON Schema defining the configuration options for the MCP.
type: object
properties: {}
description: No configuration required. This MCP server runs using the default command.
commandFunction: |-
(config) => ({
command: 'basic-memory',
args: ['mcp']
})
exampleConfig: {}
```
--------------------------------------------------------------------------------
/src/basic_memory/cli/main.py:
--------------------------------------------------------------------------------
```python
"""Main CLI entry point for basic-memory.""" # pragma: no cover
from basic_memory.cli.app import app # pragma: no cover
# Register commands
from basic_memory.cli.commands import ( # noqa: F401 # pragma: no cover
cloud,
db,
import_chatgpt,
import_claude_conversations,
import_claude_projects,
import_memory_json,
mcp,
project,
status,
sync,
tool,
)
if __name__ == "__main__": # pragma: no cover
# start the app
app()
```
--------------------------------------------------------------------------------
/src/basic_memory/markdown/__init__.py:
--------------------------------------------------------------------------------
```python
"""Base package for markdown parsing."""
from basic_memory.file_utils import ParseError
from basic_memory.markdown.entity_parser import EntityParser
from basic_memory.markdown.markdown_processor import MarkdownProcessor
from basic_memory.markdown.schemas import (
EntityMarkdown,
EntityFrontmatter,
Observation,
Relation,
)
__all__ = [
"EntityMarkdown",
"EntityFrontmatter",
"EntityParser",
"MarkdownProcessor",
"Observation",
"Relation",
"ParseError",
]
```
--------------------------------------------------------------------------------
/.github/dependabot.yml:
--------------------------------------------------------------------------------
```yaml
# To get started with Dependabot version updates, you'll need to specify which
# package ecosystems to update and where the package manifests are located.
# Please see the documentation for all configuration options:
# https://docs.github.com/code-security/dependabot/dependabot-version-updates/configuration-options-for-the-dependabot.yml-file
version: 2
updates:
- package-ecosystem: "" # See documentation for possible values
directory: "/" # Location of package manifests
schedule:
interval: "weekly"
```
--------------------------------------------------------------------------------
/tests/mcp/test_resources.py:
--------------------------------------------------------------------------------
```python
from basic_memory.mcp.prompts.ai_assistant_guide import ai_assistant_guide
import pytest
@pytest.mark.asyncio
async def test_ai_assistant_guide_exists(app):
"""Test that the canvas spec resource exists and returns content."""
# Call the resource function
guide = ai_assistant_guide.fn()
# Verify basic characteristics of the content
assert guide is not None
assert isinstance(guide, str)
assert len(guide) > 0
# Verify it contains expected sections of the Canvas spec
assert "# AI Assistant Guide" in guide
```
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/documentation.md:
--------------------------------------------------------------------------------
```markdown
---
name: Documentation improvement
about: Suggest improvements or report issues with documentation
title: '[DOCS] '
labels: documentation
assignees: ''
---
## Documentation Issue
Describe what's missing, unclear, or incorrect in the current documentation.
## Location
Where is the problematic documentation? (URL, file path, or section)
## Suggested Improvement
How would you improve this documentation? Please be as specific as possible.
## Additional Context
Any additional information or screenshots that might help explain the issue or improvement.
```
--------------------------------------------------------------------------------
/src/basic_memory/mcp/prompts/__init__.py:
--------------------------------------------------------------------------------
```python
"""Basic Memory MCP prompts.
Prompts are a special type of tool that returns a string response
formatted for a user to read, typically invoking one or more tools
and transforming their results into user-friendly text.
"""
# Import individual prompt modules to register them with the MCP server
from basic_memory.mcp.prompts import continue_conversation
from basic_memory.mcp.prompts import recent_activity
from basic_memory.mcp.prompts import search
from basic_memory.mcp.prompts import ai_assistant_guide
__all__ = [
"ai_assistant_guide",
"continue_conversation",
"recent_activity",
"search",
]
```
--------------------------------------------------------------------------------
/src/basic_memory/schemas/importer.py:
--------------------------------------------------------------------------------
```python
"""Schemas for import services."""
from typing import Dict, Optional
from pydantic import BaseModel
class ImportResult(BaseModel):
"""Common import result schema."""
import_count: Dict[str, int]
success: bool
error_message: Optional[str] = None
class ChatImportResult(ImportResult):
"""Result schema for chat imports."""
conversations: int = 0
messages: int = 0
class ProjectImportResult(ImportResult):
"""Result schema for project imports."""
documents: int = 0
prompts: int = 0
class EntityImportResult(ImportResult):
"""Result schema for entity imports."""
entities: int = 0
relations: int = 0
skipped_entities: int = 0
```
--------------------------------------------------------------------------------
/src/basic_memory/alembic/migrations.py:
--------------------------------------------------------------------------------
```python
"""Functions for managing database migrations."""
from pathlib import Path
from loguru import logger
from alembic.config import Config
from alembic import command
def get_alembic_config() -> Config: # pragma: no cover
"""Get alembic config with correct paths."""
migrations_path = Path(__file__).parent
alembic_ini = migrations_path / "alembic.ini"
config = Config(alembic_ini)
config.set_main_option("script_location", str(migrations_path))
return config
def reset_database(): # pragma: no cover
"""Drop and recreate all tables."""
logger.info("Resetting database...")
config = get_alembic_config()
command.downgrade(config, "base")
command.upgrade(config, "head")
```
--------------------------------------------------------------------------------
/src/basic_memory/sync/background_sync.py:
--------------------------------------------------------------------------------
```python
import asyncio
from loguru import logger
from basic_memory.config import get_project_config
from basic_memory.sync import SyncService, WatchService
async def sync_and_watch(
sync_service: SyncService, watch_service: WatchService
): # pragma: no cover
"""Run sync and watch service."""
config = get_project_config()
logger.info(f"Starting watch service to sync file changes in dir: {config.home}")
# full sync
await sync_service.sync(config.home)
# watch changes
await watch_service.run()
async def create_background_sync_task(
sync_service: SyncService, watch_service: WatchService
): # pragma: no cover
return asyncio.create_task(sync_and_watch(sync_service, watch_service))
```
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/feature_request.md:
--------------------------------------------------------------------------------
```markdown
---
name: Feature request
about: Suggest an idea for Basic Memory
title: '[FEATURE] '
labels: enhancement
assignees: ''
---
## Feature Description
A clear and concise description of the feature you'd like to see implemented.
## Problem This Feature Solves
Describe the problem or limitation you're experiencing that this feature would address.
## Proposed Solution
Describe how you envision this feature working. Include:
- User workflow
- Interface design (if applicable)
- Technical approach (if you have ideas)
## Alternative Solutions
Have you considered any alternative solutions or workarounds?
## Additional Context
Add any other context, screenshots, or examples about the feature request here.
## Impact
How would this feature benefit you and other users of Basic Memory?
```
--------------------------------------------------------------------------------
/src/basic_memory/importers/__init__.py:
--------------------------------------------------------------------------------
```python
"""Import services for Basic Memory."""
from basic_memory.importers.base import Importer
from basic_memory.importers.chatgpt_importer import ChatGPTImporter
from basic_memory.importers.claude_conversations_importer import (
ClaudeConversationsImporter,
)
from basic_memory.importers.claude_projects_importer import ClaudeProjectsImporter
from basic_memory.importers.memory_json_importer import MemoryJsonImporter
from basic_memory.schemas.importer import (
ChatImportResult,
EntityImportResult,
ImportResult,
ProjectImportResult,
)
__all__ = [
"Importer",
"ChatGPTImporter",
"ClaudeConversationsImporter",
"ClaudeProjectsImporter",
"MemoryJsonImporter",
"ImportResult",
"ChatImportResult",
"EntityImportResult",
"ProjectImportResult",
]
```
--------------------------------------------------------------------------------
/src/basic_memory/services/exceptions.py:
--------------------------------------------------------------------------------
```python
class FileOperationError(Exception):
"""Raised when file operations fail"""
pass
class EntityNotFoundError(Exception):
"""Raised when an entity cannot be found"""
pass
class EntityCreationError(Exception):
"""Raised when an entity cannot be created"""
pass
class DirectoryOperationError(Exception):
"""Raised when directory operations fail"""
pass
class SyncFatalError(Exception):
"""Raised when sync encounters a fatal error that prevents continuation.
Fatal errors include:
- Project deleted during sync (FOREIGN KEY constraint)
- Database corruption
- Critical system failures
When this exception is raised, the entire sync operation should be terminated
immediately rather than attempting to continue with remaining files.
"""
pass
```
--------------------------------------------------------------------------------
/src/basic_memory/schemas/directory.py:
--------------------------------------------------------------------------------
```python
"""Schemas for directory tree operations."""
from datetime import datetime
from typing import List, Optional, Literal
from pydantic import BaseModel
class DirectoryNode(BaseModel):
"""Directory node in file system."""
name: str
file_path: Optional[str] = None # Original path without leading slash (matches DB)
directory_path: str # Path with leading slash for directory navigation
type: Literal["directory", "file"]
children: List["DirectoryNode"] = [] # Default to empty list
title: Optional[str] = None
permalink: Optional[str] = None
entity_id: Optional[int] = None
entity_type: Optional[str] = None
content_type: Optional[str] = None
updated_at: Optional[datetime] = None
@property
def has_children(self) -> bool:
return bool(self.children)
# Support for recursive model
DirectoryNode.model_rebuild()
```
--------------------------------------------------------------------------------
/.github/workflows/pr-title.yml:
--------------------------------------------------------------------------------
```yaml
name: "Pull Request Title"
on:
pull_request:
types:
- opened
- edited
- synchronize
jobs:
main:
runs-on: ubuntu-latest
steps:
- uses: amannn/action-semantic-pull-request@v5
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
# Configure allowed types based on what we want in our changelog
types: |
feat
fix
chore
docs
style
refactor
perf
test
build
ci
# Require at least one from scope list (optional)
scopes: |
core
cli
api
mcp
sync
ui
deps
installer
# Allow breaking changes (needs "!" after type/scope)
requireScopeForBreakingChange: true
```
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/bug_report.md:
--------------------------------------------------------------------------------
```markdown
---
name: Bug report
about: Create a report to help us improve Basic Memory
title: '[BUG] '
labels: bug
assignees: ''
---
## Bug Description
A clear and concise description of what the bug is.
## Steps To Reproduce
Steps to reproduce the behavior:
1. Install version '...'
2. Run command '...'
3. Use tool/feature '...'
4. See error
## Expected Behavior
A clear and concise description of what you expected to happen.
## Actual Behavior
What actually happened, including error messages and output.
## Environment
- OS: [e.g. macOS 14.2, Ubuntu 22.04]
- Python version: [e.g. 3.12.1]
- Basic Memory version: [e.g. 0.1.0]
- Installation method: [e.g. pip, uv, source]
- Claude Desktop version (if applicable):
## Additional Context
- Configuration files (if relevant)
- Logs or screenshots
- Any special configuration or environment variables
## Possible Solution
If you have any ideas on what might be causing the issue or how to fix it, please share them here.
```
--------------------------------------------------------------------------------
/src/basic_memory/alembic/versions/e7e1f4367280_add_scan_watermark_tracking_to_project.py:
--------------------------------------------------------------------------------
```python
"""Add scan watermark tracking to Project
Revision ID: e7e1f4367280
Revises: 9d9c1cb7d8f5
Create Date: 2025-10-20 16:42:46.625075
"""
from typing import Sequence, Union
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision: str = "e7e1f4367280"
down_revision: Union[str, None] = "9d9c1cb7d8f5"
branch_labels: Union[str, Sequence[str], None] = None
depends_on: Union[str, Sequence[str], None] = None
def upgrade() -> None:
# ### commands auto generated by Alembic - please adjust! ###
with op.batch_alter_table("project", schema=None) as batch_op:
batch_op.add_column(sa.Column("last_scan_timestamp", sa.Float(), nullable=True))
batch_op.add_column(sa.Column("last_file_count", sa.Integer(), nullable=True))
# ### end Alembic commands ###
def downgrade() -> None:
# ### commands auto generated by Alembic - please adjust! ###
with op.batch_alter_table("project", schema=None) as batch_op:
batch_op.drop_column("last_file_count")
batch_op.drop_column("last_scan_timestamp")
# ### end Alembic commands ###
```
--------------------------------------------------------------------------------
/src/basic_memory/schemas/delete.py:
--------------------------------------------------------------------------------
```python
"""Delete operation schemas for the knowledge graph.
This module defines the request schemas for removing entities, relations,
and observations from the knowledge graph. Each operation has specific
implications and safety considerations.
Deletion Hierarchy:
1. Entity deletion removes the entity and all its relations
2. Relation deletion only removes the connection between entities
3. Observation deletion preserves entity and relations
Key Considerations:
- All deletions are permanent
- Entity deletions cascade to relations
- Files are removed along with entities
- Operations are atomic - they fully succeed or fail
"""
from typing import List, Annotated
from annotated_types import MinLen
from pydantic import BaseModel
from basic_memory.schemas.base import Permalink
class DeleteEntitiesRequest(BaseModel):
"""Delete one or more entities from the knowledge graph.
This operation:
1. Removes the entity from the database
2. Deletes all observations attached to the entity
3. Removes all relations where the entity is source or target
4. Deletes the corresponding markdown file
"""
permalinks: Annotated[List[Permalink], MinLen(1)]
```
--------------------------------------------------------------------------------
/tests/cli/conftest.py:
--------------------------------------------------------------------------------
```python
from typing import AsyncGenerator
import pytest
import pytest_asyncio
from fastapi import FastAPI
from httpx import AsyncClient, ASGITransport
from basic_memory.api.app import app as fastapi_app
from basic_memory.deps import get_project_config, get_engine_factory, get_app_config
@pytest_asyncio.fixture(autouse=True)
async def app(app_config, project_config, engine_factory, test_config, aiolib) -> FastAPI:
"""Create test FastAPI application."""
app = fastapi_app
app.dependency_overrides[get_app_config] = lambda: app_config
app.dependency_overrides[get_project_config] = lambda: project_config
app.dependency_overrides[get_engine_factory] = lambda: engine_factory
return app
@pytest_asyncio.fixture
async def client(app: FastAPI, aiolib) -> AsyncGenerator[AsyncClient, None]:
"""Create test client that both MCP and tests will use."""
async with AsyncClient(transport=ASGITransport(app=app), base_url="http://test") as client:
yield client
@pytest.fixture
def cli_env(project_config, client, test_config):
"""Set up CLI environment with correct project session."""
return {"project_config": project_config, "client": client}
```
--------------------------------------------------------------------------------
/tests/repository/test_project_info_repository.py:
--------------------------------------------------------------------------------
```python
"""Tests for the ProjectInfoRepository."""
import pytest
from sqlalchemy import text
from basic_memory.repository.project_info_repository import ProjectInfoRepository
from basic_memory.models.project import Project # Add a model reference
@pytest.mark.asyncio
async def test_project_info_repository_init(session_maker):
"""Test ProjectInfoRepository initialization."""
# Create a ProjectInfoRepository
repository = ProjectInfoRepository(session_maker)
# Verify it was initialized properly
assert repository is not None
assert repository.session_maker == session_maker
# Model is set to a dummy value (Project is used as a reference here)
assert repository.Model is Project
@pytest.mark.asyncio
async def test_project_info_repository_execute_query(session_maker):
"""Test ProjectInfoRepository execute_query method."""
# Create a ProjectInfoRepository
repository = ProjectInfoRepository(session_maker)
# Execute a simple query
result = await repository.execute_query(text("SELECT 1 as test"))
# Verify the result
assert result is not None
row = result.fetchone()
assert row is not None
assert row[0] == 1
```
--------------------------------------------------------------------------------
/src/basic_memory/api/routers/search_router.py:
--------------------------------------------------------------------------------
```python
"""Router for search operations."""
from fastapi import APIRouter, BackgroundTasks
from basic_memory.api.routers.utils import to_search_results
from basic_memory.schemas.search import SearchQuery, SearchResponse
from basic_memory.deps import SearchServiceDep, EntityServiceDep
router = APIRouter(prefix="/search", tags=["search"])
@router.post("/", response_model=SearchResponse)
async def search(
query: SearchQuery,
search_service: SearchServiceDep,
entity_service: EntityServiceDep,
page: int = 1,
page_size: int = 10,
):
"""Search across all knowledge and documents."""
limit = page_size
offset = (page - 1) * page_size
results = await search_service.search(query, limit=limit, offset=offset)
search_results = await to_search_results(entity_service, results)
return SearchResponse(
results=search_results,
current_page=page,
page_size=page_size,
)
@router.post("/reindex")
async def reindex(background_tasks: BackgroundTasks, search_service: SearchServiceDep):
"""Recreate and populate the search index."""
await search_service.reindex_all(background_tasks=background_tasks)
return {"status": "ok", "message": "Reindex initiated"}
```
--------------------------------------------------------------------------------
/src/basic_memory/models/search.py:
--------------------------------------------------------------------------------
```python
"""Search models and tables."""
from sqlalchemy import DDL
# Define FTS5 virtual table creation
CREATE_SEARCH_INDEX = DDL("""
CREATE VIRTUAL TABLE IF NOT EXISTS search_index USING fts5(
-- Core entity fields
id UNINDEXED, -- Row ID
title, -- Title for searching
content_stems, -- Main searchable content split into stems
content_snippet, -- File content snippet for display
permalink, -- Stable identifier (now indexed for path search)
file_path UNINDEXED, -- Physical location
type UNINDEXED, -- entity/relation/observation
-- Project context
project_id UNINDEXED, -- Project identifier
-- Relation fields
from_id UNINDEXED, -- Source entity
to_id UNINDEXED, -- Target entity
relation_type UNINDEXED, -- Type of relation
-- Observation fields
entity_id UNINDEXED, -- Parent entity
category UNINDEXED, -- Observation category
-- Common fields
metadata UNINDEXED, -- JSON metadata
created_at UNINDEXED, -- Creation timestamp
updated_at UNINDEXED, -- Last update
-- Configuration
tokenize='unicode61 tokenchars 0x2F', -- Hex code for /
prefix='1,2,3,4' -- Support longer prefixes for paths
);
""")
```
--------------------------------------------------------------------------------
/src/basic_memory/cli/app.py:
--------------------------------------------------------------------------------
```python
from typing import Optional
import typer
from basic_memory.config import ConfigManager
def version_callback(value: bool) -> None:
"""Show version and exit."""
if value: # pragma: no cover
import basic_memory
typer.echo(f"Basic Memory version: {basic_memory.__version__}")
raise typer.Exit()
app = typer.Typer(name="basic-memory")
@app.callback()
def app_callback(
ctx: typer.Context,
version: Optional[bool] = typer.Option(
None,
"--version",
"-v",
help="Show version and exit.",
callback=version_callback,
is_eager=True,
),
) -> None:
"""Basic Memory - Local-first personal knowledge management."""
# Run initialization for every command unless --version was specified
if not version and ctx.invoked_subcommand is not None:
from basic_memory.services.initialization import ensure_initialization
app_config = ConfigManager().config
ensure_initialization(app_config)
## import
# Register sub-command groups
import_app = typer.Typer(help="Import data from various sources")
app.add_typer(import_app, name="import")
claude_app = typer.Typer(help="Import Conversations from Claude JSON export.")
import_app.add_typer(claude_app, name="claude")
## cloud
cloud_app = typer.Typer(help="Access Basic Memory Cloud")
app.add_typer(cloud_app, name="cloud")
```
--------------------------------------------------------------------------------
/Dockerfile:
--------------------------------------------------------------------------------
```dockerfile
FROM python:3.12-slim-bookworm
# Build arguments for user ID and group ID (defaults to 1000)
ARG UID=1000
ARG GID=1000
# Copy uv from official image
COPY --from=ghcr.io/astral-sh/uv:latest /uv /uvx /bin/
# Set environment variables
ENV PYTHONUNBUFFERED=1 \
PYTHONDONTWRITEBYTECODE=1
# Create a group and user with the provided UID/GID
# Check if the GID already exists, if not create appgroup
RUN (getent group ${GID} || groupadd --gid ${GID} appgroup) && \
useradd --uid ${UID} --gid ${GID} --create-home --shell /bin/bash appuser
# Copy the project into the image
ADD . /app
# Sync the project into a new environment, asserting the lockfile is up to date
WORKDIR /app
RUN uv sync --locked
# Create necessary directories and set ownership
RUN mkdir -p /app/data/basic-memory /app/.basic-memory && \
chown -R appuser:${GID} /app
# Set default data directory and add venv to PATH
ENV BASIC_MEMORY_HOME=/app/data/basic-memory \
BASIC_MEMORY_PROJECT_ROOT=/app/data \
PATH="/app/.venv/bin:$PATH"
# Switch to the non-root user
USER appuser
# Expose port
EXPOSE 8000
# Health check
HEALTHCHECK --interval=30s --timeout=10s --start-period=5s --retries=3 \
CMD basic-memory --version || exit 1
# Use the basic-memory entrypoint to run the MCP server with default SSE transport
CMD ["basic-memory", "mcp", "--transport", "sse", "--host", "0.0.0.0", "--port", "8000"]
```
--------------------------------------------------------------------------------
/tests/api/conftest.py:
--------------------------------------------------------------------------------
```python
"""Tests for knowledge graph API routes."""
from typing import AsyncGenerator
import pytest
import pytest_asyncio
from fastapi import FastAPI
from httpx import AsyncClient, ASGITransport
from basic_memory.deps import get_project_config, get_engine_factory, get_app_config
from basic_memory.models import Project
@pytest_asyncio.fixture
async def app(test_config, engine_factory, app_config) -> FastAPI:
"""Create FastAPI test application."""
from basic_memory.api.app import app
app.dependency_overrides[get_app_config] = lambda: app_config
app.dependency_overrides[get_project_config] = lambda: test_config.project_config
app.dependency_overrides[get_engine_factory] = lambda: engine_factory
return app
@pytest_asyncio.fixture
async def client(app: FastAPI) -> AsyncGenerator[AsyncClient, None]:
"""Create client using ASGI transport - same as CLI will use."""
async with AsyncClient(transport=ASGITransport(app=app), base_url="http://test") as client:
yield client
@pytest.fixture
def project_url(test_project: Project) -> str:
"""Create a URL prefix for the project routes.
This helps tests generate the correct URL for project-scoped routes.
"""
# Make sure this matches what's in tests/conftest.py for test_project creation
# The permalink should be generated from "Test Project Context"
return f"/{test_project.permalink}"
```
--------------------------------------------------------------------------------
/.github/workflows/dev-release.yml:
--------------------------------------------------------------------------------
```yaml
name: Dev Release
on:
push:
branches: [main]
workflow_dispatch: # Allow manual triggering
jobs:
dev-release:
runs-on: ubuntu-latest
permissions:
id-token: write
contents: write
steps:
- uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Set up Python
uses: actions/setup-python@v5
with:
python-version: "3.12"
- name: Install uv
run: |
pip install uv
- name: Install dependencies and build
run: |
uv venv
uv sync
uv build
- name: Check if this is a dev version
id: check_version
run: |
VERSION=$(uv run python -c "import basic_memory; print(basic_memory.__version__)")
echo "version=$VERSION" >> $GITHUB_OUTPUT
if [[ "$VERSION" == *"dev"* ]]; then
echo "is_dev=true" >> $GITHUB_OUTPUT
echo "Dev version detected: $VERSION"
else
echo "is_dev=false" >> $GITHUB_OUTPUT
echo "Release version detected: $VERSION, skipping dev release"
fi
- name: Publish dev version to PyPI
if: steps.check_version.outputs.is_dev == 'true'
uses: pypa/gh-action-pypi-publish@release/v1
with:
password: ${{ secrets.PYPI_TOKEN }}
skip-existing: true # Don't fail if version already exists
```
--------------------------------------------------------------------------------
/tests/api/test_relation_background_resolution.py:
--------------------------------------------------------------------------------
```python
"""Test that relation resolution happens in the background."""
import pytest
from unittest.mock import AsyncMock
from basic_memory.api.routers.knowledge_router import resolve_relations_background
@pytest.mark.asyncio
async def test_resolve_relations_background_success():
"""Test that background relation resolution calls sync service correctly."""
# Create mocks
sync_service = AsyncMock()
sync_service.resolve_relations = AsyncMock(return_value=None)
entity_id = 123
entity_permalink = "test/entity"
# Call the background function
await resolve_relations_background(sync_service, entity_id, entity_permalink)
# Verify sync service was called with the entity_id
sync_service.resolve_relations.assert_called_once_with(entity_id=entity_id)
@pytest.mark.asyncio
async def test_resolve_relations_background_handles_errors():
"""Test that background relation resolution handles errors gracefully."""
# Create mock that raises an exception
sync_service = AsyncMock()
sync_service.resolve_relations = AsyncMock(side_effect=Exception("Test error"))
entity_id = 123
entity_permalink = "test/entity"
# Call should not raise - errors are logged
await resolve_relations_background(sync_service, entity_id, entity_permalink)
# Verify sync service was called
sync_service.resolve_relations.assert_called_once_with(entity_id=entity_id)
```
--------------------------------------------------------------------------------
/src/basic_memory/alembic/versions/b3c3938bacdb_relation_to_name_unique_index.py:
--------------------------------------------------------------------------------
```python
"""relation to_name unique index
Revision ID: b3c3938bacdb
Revises: 3dae7c7b1564
Create Date: 2025-02-22 14:59:30.668466
"""
from typing import Sequence, Union
from alembic import op
# revision identifiers, used by Alembic.
revision: str = "b3c3938bacdb"
down_revision: Union[str, None] = "3dae7c7b1564"
branch_labels: Union[str, Sequence[str], None] = None
depends_on: Union[str, Sequence[str], None] = None
def upgrade() -> None:
# SQLite doesn't support constraint changes through ALTER
# Need to recreate table with desired constraints
with op.batch_alter_table("relation") as batch_op:
# Drop existing unique constraint
batch_op.drop_constraint("uix_relation", type_="unique")
# Add new constraints
batch_op.create_unique_constraint(
"uix_relation_from_id_to_id", ["from_id", "to_id", "relation_type"]
)
batch_op.create_unique_constraint(
"uix_relation_from_id_to_name", ["from_id", "to_name", "relation_type"]
)
def downgrade() -> None:
with op.batch_alter_table("relation") as batch_op:
# Drop new constraints
batch_op.drop_constraint("uix_relation_from_id_to_name", type_="unique")
batch_op.drop_constraint("uix_relation_from_id_to_id", type_="unique")
# Restore original constraint
batch_op.create_unique_constraint("uix_relation", ["from_id", "to_id", "relation_type"])
```
--------------------------------------------------------------------------------
/src/basic_memory/cli/commands/command_utils.py:
--------------------------------------------------------------------------------
```python
"""utility functions for commands"""
from typing import Optional
from mcp.server.fastmcp.exceptions import ToolError
import typer
from rich.console import Console
from basic_memory.mcp.async_client import get_client
from basic_memory.mcp.tools.utils import call_post, call_get
from basic_memory.mcp.project_context import get_active_project
from basic_memory.schemas import ProjectInfoResponse
console = Console()
async def run_sync(project: Optional[str] = None):
"""Run sync operation via API endpoint."""
try:
async with get_client() as client:
project_item = await get_active_project(client, project, None)
response = await call_post(client, f"{project_item.project_url}/project/sync")
data = response.json()
console.print(f"[green]✓ {data['message']}[/green]")
except (ToolError, ValueError) as e:
console.print(f"[red]✗ Sync failed: {e}[/red]")
raise typer.Exit(1)
async def get_project_info(project: str):
"""Get project information via API endpoint."""
try:
async with get_client() as client:
project_item = await get_active_project(client, project, None)
response = await call_get(client, f"{project_item.project_url}/project/info")
return ProjectInfoResponse.model_validate(response.json())
except (ToolError, ValueError) as e:
console.print(f"[red]✗ Sync failed: {e}[/red]")
raise typer.Exit(1)
```
--------------------------------------------------------------------------------
/src/basic_memory/cli/commands/db.py:
--------------------------------------------------------------------------------
```python
"""Database management commands."""
import asyncio
import typer
from loguru import logger
from basic_memory import db
from basic_memory.cli.app import app
from basic_memory.config import ConfigManager, BasicMemoryConfig, save_basic_memory_config
@app.command()
def reset(
reindex: bool = typer.Option(False, "--reindex", help="Rebuild db index from filesystem"),
): # pragma: no cover
"""Reset database (drop all tables and recreate)."""
if typer.confirm("This will delete all data in your db. Are you sure?"):
logger.info("Resetting database...")
config_manager = ConfigManager()
app_config = config_manager.config
# Get database path
db_path = app_config.app_database_path
# Delete the database file if it exists
if db_path.exists():
db_path.unlink()
logger.info(f"Database file deleted: {db_path}")
# Reset project configuration
config = BasicMemoryConfig()
save_basic_memory_config(config_manager.config_file, config)
logger.info("Project configuration reset to default")
# Create a new empty database
asyncio.run(db.run_migrations(app_config))
logger.info("Database reset complete")
if reindex:
# Import and run sync
from basic_memory.cli.commands.sync import sync
logger.info("Rebuilding search index from filesystem...")
sync(watch=False) # pyright: ignore
```
--------------------------------------------------------------------------------
/test-int/mcp/test_read_note_integration.py:
--------------------------------------------------------------------------------
```python
"""
Integration tests for read_note MCP tool.
Tests the full flow: MCP client -> MCP server -> FastAPI -> database
"""
import pytest
from fastmcp import Client
@pytest.mark.asyncio
async def test_read_note_after_write(mcp_server, app, test_project):
"""Test read_note after write_note using real database."""
async with Client(mcp_server) as client:
# First write a note
write_result = await client.call_tool(
"write_note",
{
"project": test_project.name,
"title": "Test Note",
"folder": "test",
"content": "# Test Note\n\nThis is test content.",
"tags": "test,integration",
},
)
assert len(write_result.content) == 1
assert write_result.content[0].type == "text"
assert "Test Note.md" in write_result.content[0].text
# Then read it back
read_result = await client.call_tool(
"read_note",
{
"project": test_project.name,
"identifier": "Test Note",
},
)
assert len(read_result.content) == 1
assert read_result.content[0].type == "text"
result_text = read_result.content[0].text
# Should contain the note content and metadata
assert "# Test Note" in result_text
assert "This is test content." in result_text
assert "test/test-note" in result_text # permalink
```
--------------------------------------------------------------------------------
/src/basic_memory/mcp/tools/__init__.py:
--------------------------------------------------------------------------------
```python
"""MCP tools for Basic Memory.
This package provides the complete set of tools for interacting with
Basic Memory through the MCP protocol. Importing this module registers
all tools with the MCP server.
"""
# Import tools to register them with MCP
from basic_memory.mcp.tools.delete_note import delete_note
from basic_memory.mcp.tools.read_content import read_content
from basic_memory.mcp.tools.build_context import build_context
from basic_memory.mcp.tools.recent_activity import recent_activity
from basic_memory.mcp.tools.read_note import read_note
from basic_memory.mcp.tools.view_note import view_note
from basic_memory.mcp.tools.write_note import write_note
from basic_memory.mcp.tools.search import search_notes
from basic_memory.mcp.tools.canvas import canvas
from basic_memory.mcp.tools.list_directory import list_directory
from basic_memory.mcp.tools.edit_note import edit_note
from basic_memory.mcp.tools.move_note import move_note
from basic_memory.mcp.tools.project_management import (
list_memory_projects,
create_memory_project,
delete_project,
)
# ChatGPT-compatible tools
from basic_memory.mcp.tools.chatgpt_tools import search, fetch
__all__ = [
"build_context",
"canvas",
"create_memory_project",
"delete_note",
"delete_project",
"edit_note",
"fetch",
"list_directory",
"list_memory_projects",
"move_note",
"read_content",
"read_note",
"recent_activity",
"search",
"search_notes",
"view_note",
"write_note",
]
```
--------------------------------------------------------------------------------
/src/basic_memory/alembic/versions/9d9c1cb7d8f5_add_mtime_and_size_columns_to_entity_.py:
--------------------------------------------------------------------------------
```python
"""Add mtime and size columns to Entity for sync optimization
Revision ID: 9d9c1cb7d8f5
Revises: a1b2c3d4e5f6
Create Date: 2025-10-20 05:07:55.173849
"""
from typing import Sequence, Union
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision: str = "9d9c1cb7d8f5"
down_revision: Union[str, None] = "a1b2c3d4e5f6"
branch_labels: Union[str, Sequence[str], None] = None
depends_on: Union[str, Sequence[str], None] = None
def upgrade() -> None:
# ### commands auto generated by Alembic - please adjust! ###
with op.batch_alter_table("entity", schema=None) as batch_op:
batch_op.add_column(sa.Column("mtime", sa.Float(), nullable=True))
batch_op.add_column(sa.Column("size", sa.Integer(), nullable=True))
batch_op.drop_constraint(batch_op.f("fk_entity_project_id"), type_="foreignkey")
batch_op.create_foreign_key(
batch_op.f("fk_entity_project_id"), "project", ["project_id"], ["id"]
)
# ### end Alembic commands ###
def downgrade() -> None:
# ### commands auto generated by Alembic - please adjust! ###
with op.batch_alter_table("entity", schema=None) as batch_op:
batch_op.drop_constraint(batch_op.f("fk_entity_project_id"), type_="foreignkey")
batch_op.create_foreign_key(
batch_op.f("fk_entity_project_id"),
"project",
["project_id"],
["id"],
ondelete="CASCADE",
)
batch_op.drop_column("size")
batch_op.drop_column("mtime")
# ### end Alembic commands ###
```
--------------------------------------------------------------------------------
/.github/workflows/docker.yml:
--------------------------------------------------------------------------------
```yaml
name: Docker Image CI
on:
push:
tags:
- 'v*' # Trigger on version tags like v1.0.0, v0.13.0, etc.
workflow_dispatch: # Allow manual triggering for testing
env:
REGISTRY: ghcr.io
IMAGE_NAME: basicmachines-co/basic-memory
jobs:
docker:
runs-on: ubuntu-latest
permissions:
contents: read
packages: write
steps:
- name: Checkout repository
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
with:
platforms: linux/amd64,linux/arm64
- name: Log in to GitHub Container Registry
uses: docker/login-action@v3
with:
registry: ${{ env.REGISTRY }}
username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Extract metadata
id: meta
uses: docker/metadata-action@v5
with:
images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}
tags: |
type=ref,event=branch
type=ref,event=pr
type=semver,pattern={{version}}
type=semver,pattern={{major}}.{{minor}}
type=raw,value=latest,enable={{is_default_branch}}
- name: Build and push Docker image
uses: docker/build-push-action@v5
with:
context: .
file: ./Dockerfile
platforms: linux/amd64,linux/arm64
push: true
tags: ${{ steps.meta.outputs.tags }}
labels: ${{ steps.meta.outputs.labels }}
cache-from: type=gha
cache-to: type=gha,mode=max
```
--------------------------------------------------------------------------------
/src/basic_memory/mcp/prompts/search.py:
--------------------------------------------------------------------------------
```python
"""Search prompts for Basic Memory MCP server.
These prompts help users search and explore their knowledge base.
"""
from typing import Annotated, Optional
from loguru import logger
from pydantic import Field
from basic_memory.config import get_project_config
from basic_memory.mcp.async_client import get_client
from basic_memory.mcp.server import mcp
from basic_memory.mcp.tools.utils import call_post
from basic_memory.schemas.base import TimeFrame
from basic_memory.schemas.prompt import SearchPromptRequest
@mcp.prompt(
name="search_knowledge_base",
description="Search across all content in basic-memory",
)
async def search_prompt(
query: str,
timeframe: Annotated[
Optional[TimeFrame],
Field(description="How far back to search (e.g. '1d', '1 week')"),
] = None,
) -> str:
"""Search across all content in basic-memory.
This prompt helps search for content in the knowledge base and
provides helpful context about the results.
Args:
query: The search text to look for
timeframe: Optional timeframe to limit results (e.g. '1d', '1 week')
Returns:
Formatted search results with context
"""
logger.info(f"Searching knowledge base, query: {query}, timeframe: {timeframe}")
async with get_client() as client:
# Create request model
request = SearchPromptRequest(query=query, timeframe=timeframe)
project_url = get_project_config().project_url
# Call the prompt API endpoint
response = await call_post(
client, f"{project_url}/prompt/search", json=request.model_dump(exclude_none=True)
)
# Extract the rendered prompt from the response
result = response.json()
return result["prompt"]
```
--------------------------------------------------------------------------------
/src/basic_memory/cli/commands/sync.py:
--------------------------------------------------------------------------------
```python
"""Command module for basic-memory sync operations."""
import asyncio
from typing import Annotated, Optional
import typer
from basic_memory.cli.app import app
from basic_memory.cli.commands.command_utils import run_sync
from basic_memory.config import ConfigManager
@app.command()
def sync(
project: Annotated[
Optional[str],
typer.Option(help="The project name."),
] = None,
watch: Annotated[
bool,
typer.Option("--watch", help="Run continuous sync (cloud mode only)"),
] = False,
interval: Annotated[
int,
typer.Option("--interval", help="Sync interval in seconds for watch mode (default: 60)"),
] = 60,
) -> None:
"""Sync knowledge files with the database.
In local mode: Scans filesystem and updates database.
In cloud mode: Runs bidirectional file sync (bisync) then updates database.
Examples:
bm sync # One-time sync
bm sync --watch # Continuous sync every 60s
bm sync --watch --interval 30 # Continuous sync every 30s
"""
config = ConfigManager().config
if config.cloud_mode_enabled:
# Cloud mode: run bisync which includes database sync
from basic_memory.cli.commands.cloud.bisync_commands import run_bisync, run_bisync_watch
try:
if watch:
run_bisync_watch(interval_seconds=interval)
else:
run_bisync()
except Exception:
raise typer.Exit(1)
else:
# Local mode: just database sync
if watch:
typer.echo(
"Error: --watch is only available in cloud mode. Run 'bm cloud login' first."
)
raise typer.Exit(1)
asyncio.run(run_sync(project))
```
--------------------------------------------------------------------------------
/tests/mcp/conftest.py:
--------------------------------------------------------------------------------
```python
"""Tests for the MCP server implementation using FastAPI TestClient."""
from typing import AsyncGenerator
import pytest
import pytest_asyncio
from fastapi import FastAPI
from httpx import AsyncClient, ASGITransport
from mcp.server import FastMCP
from basic_memory.api.app import app as fastapi_app
from basic_memory.deps import get_project_config, get_engine_factory, get_app_config
from basic_memory.services.search_service import SearchService
from basic_memory.mcp.server import mcp as mcp_server
@pytest.fixture(scope="function")
def mcp() -> FastMCP:
return mcp_server # pyright: ignore [reportReturnType]
@pytest.fixture(scope="function")
def app(app_config, project_config, engine_factory, config_manager) -> FastAPI:
"""Create test FastAPI application."""
app = fastapi_app
app.dependency_overrides[get_app_config] = lambda: app_config
app.dependency_overrides[get_project_config] = lambda: project_config
app.dependency_overrides[get_engine_factory] = lambda: engine_factory
return app
@pytest_asyncio.fixture(scope="function")
async def client(app: FastAPI) -> AsyncGenerator[AsyncClient, None]:
"""Create test client that both MCP and tests will use."""
async with AsyncClient(transport=ASGITransport(app=app), base_url="http://test") as client:
yield client
@pytest.fixture
def test_entity_data():
"""Sample data for creating a test entity."""
return {
"entities": [
{
"title": "Test Entity",
"entity_type": "test",
"summary": "", # Empty string instead of None
}
]
}
@pytest_asyncio.fixture(autouse=True)
async def init_search_index(search_service: SearchService):
await search_service.init_search_index()
```
--------------------------------------------------------------------------------
/src/basic_memory/schemas/__init__.py:
--------------------------------------------------------------------------------
```python
"""Knowledge graph schema exports.
This module exports all schema classes to simplify imports.
Rather than importing from individual schema files, you can
import everything from basic_memory.schemas.
"""
# Base types and models
from basic_memory.schemas.base import (
Observation,
EntityType,
RelationType,
Relation,
Entity,
)
# Delete operation models
from basic_memory.schemas.delete import (
DeleteEntitiesRequest,
)
# Request models
from basic_memory.schemas.request import (
SearchNodesRequest,
GetEntitiesRequest,
CreateRelationsRequest,
)
# Response models
from basic_memory.schemas.response import (
SQLAlchemyModel,
ObservationResponse,
RelationResponse,
EntityResponse,
EntityListResponse,
SearchNodesResponse,
DeleteEntitiesResponse,
)
from basic_memory.schemas.project_info import (
ProjectStatistics,
ActivityMetrics,
SystemStatus,
ProjectInfoResponse,
)
from basic_memory.schemas.directory import (
DirectoryNode,
)
from basic_memory.schemas.sync_report import (
SyncReportResponse,
)
# For convenient imports, export all models
__all__ = [
# Base
"Observation",
"EntityType",
"RelationType",
"Relation",
"Entity",
# Requests
"SearchNodesRequest",
"GetEntitiesRequest",
"CreateRelationsRequest",
# Responses
"SQLAlchemyModel",
"ObservationResponse",
"RelationResponse",
"EntityResponse",
"EntityListResponse",
"SearchNodesResponse",
"DeleteEntitiesResponse",
# Delete Operations
"DeleteEntitiesRequest",
# Project Info
"ProjectStatistics",
"ActivityMetrics",
"SystemStatus",
"ProjectInfoResponse",
# Directory
"DirectoryNode",
# Sync
"SyncReportResponse",
]
```
--------------------------------------------------------------------------------
/tests/api/test_project_router_operations.py:
--------------------------------------------------------------------------------
```python
"""Tests for project router operation endpoints."""
import pytest
@pytest.mark.asyncio
async def test_get_project_info_additional(client, test_graph, project_url):
"""Test additional fields in the project info endpoint."""
# Call the endpoint
response = await client.get(f"{project_url}/project/info")
# Verify response
assert response.status_code == 200
data = response.json()
# Check specific fields we're interested in
assert "available_projects" in data
assert isinstance(data["available_projects"], dict)
# Get a project from the list
for project_name, project_info in data["available_projects"].items():
# Verify project structure
assert "path" in project_info
assert "active" in project_info
assert "is_default" in project_info
break # Just check the first one for structure
@pytest.mark.asyncio
async def test_project_list_additional(client, project_url):
"""Test additional fields in the project list endpoint."""
# Call the endpoint
response = await client.get("/projects/projects")
# Verify response
assert response.status_code == 200
data = response.json()
# Verify projects list structure in more detail
assert "projects" in data
assert len(data["projects"]) > 0
# Verify the default project is identified
default_project = data["default_project"]
assert default_project
# Verify the default_project appears in the projects list and is marked as default
default_in_list = False
for project in data["projects"]:
if project["name"] == default_project:
assert project["is_default"] is True
default_in_list = True
break
assert default_in_list, "Default project should appear in the projects list"
```
--------------------------------------------------------------------------------
/src/basic_memory/alembic/versions/a1b2c3d4e5f6_fix_project_foreign_keys.py:
--------------------------------------------------------------------------------
```python
"""fix project foreign keys
Revision ID: a1b2c3d4e5f6
Revises: 647e7a75e2cd
Create Date: 2025-08-19 22:06:00.000000
"""
from typing import Sequence, Union
from alembic import op
# revision identifiers, used by Alembic.
revision: str = "a1b2c3d4e5f6"
down_revision: Union[str, None] = "647e7a75e2cd"
branch_labels: Union[str, Sequence[str], None] = None
depends_on: Union[str, Sequence[str], None] = None
def upgrade() -> None:
"""Re-establish foreign key constraints that were lost during project table recreation.
The migration 647e7a75e2cd recreated the project table but did not re-establish
the foreign key constraint from entity.project_id to project.id, causing
foreign key constraint failures when trying to delete projects with related entities.
"""
# SQLite doesn't allow adding foreign key constraints to existing tables easily
# We need to be careful and handle the case where the constraint might already exist
with op.batch_alter_table("entity", schema=None) as batch_op:
# Try to drop existing foreign key constraint (may not exist)
try:
batch_op.drop_constraint("fk_entity_project_id", type_="foreignkey")
except Exception:
# Constraint may not exist, which is fine - we'll create it next
pass
# Add the foreign key constraint with CASCADE DELETE
# This ensures that when a project is deleted, all related entities are also deleted
batch_op.create_foreign_key(
"fk_entity_project_id", "project", ["project_id"], ["id"], ondelete="CASCADE"
)
def downgrade() -> None:
"""Remove the foreign key constraint."""
with op.batch_alter_table("entity", schema=None) as batch_op:
batch_op.drop_constraint("fk_entity_project_id", type_="foreignkey")
```
--------------------------------------------------------------------------------
/src/basic_memory/importers/utils.py:
--------------------------------------------------------------------------------
```python
"""Utility functions for import services."""
import re
from datetime import datetime
from typing import Any
def clean_filename(name: str) -> str: # pragma: no cover
"""Clean a string to be used as a filename.
Args:
name: The string to clean.
Returns:
A cleaned string suitable for use as a filename.
"""
# Replace common punctuation and whitespace with underscores
name = re.sub(r"[\s\-,.:/\\\[\]\(\)]+", "_", name)
# Remove any non-alphanumeric or underscore characters
name = re.sub(r"[^\w]+", "", name)
# Ensure the name isn't too long
if len(name) > 100: # pragma: no cover
name = name[:100]
# Ensure the name isn't empty
if not name: # pragma: no cover
name = "untitled"
return name
def format_timestamp(timestamp: Any) -> str: # pragma: no cover
"""Format a timestamp for use in a filename or title.
Args:
timestamp: A timestamp in various formats.
Returns:
A formatted string representation of the timestamp.
"""
if isinstance(timestamp, str):
try:
# Try ISO format
timestamp = datetime.fromisoformat(timestamp.replace("Z", "+00:00"))
except ValueError:
try:
# Try unix timestamp as string
timestamp = datetime.fromtimestamp(float(timestamp)).astimezone()
except ValueError:
# Return as is if we can't parse it
return timestamp
elif isinstance(timestamp, (int, float)):
# Unix timestamp
timestamp = datetime.fromtimestamp(timestamp).astimezone()
if isinstance(timestamp, datetime):
return timestamp.strftime("%Y-%m-%d %H:%M:%S")
# Return as is if we can't format it
return str(timestamp) # pragma: no cover
```
--------------------------------------------------------------------------------
/.github/workflows/test.yml:
--------------------------------------------------------------------------------
```yaml
name: Tests
on:
push:
branches: [ "main" ]
pull_request:
branches: [ "main" ]
# pull_request_target runs on the BASE of the PR, not the merge result.
# It has write permissions and access to secrets.
# It's useful for PRs from forks or automated PRs but requires careful use for security reasons.
# See: https://docs.github.com/en/actions/using-workflows/events-that-trigger-workflows#pull_request_target
pull_request_target:
branches: [ "main" ]
jobs:
test:
strategy:
fail-fast: false
matrix:
os: [ubuntu-latest, windows-latest]
python-version: [ "3.12", "3.13" ]
runs-on: ${{ matrix.os }}
steps:
- uses: actions/checkout@v4
with:
submodules: true
- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v4
with:
python-version: ${{ matrix.python-version }}
cache: 'pip'
- name: Install uv
run: |
pip install uv
- name: Install just (Linux/macOS)
if: runner.os != 'Windows'
run: |
curl --proto '=https' --tlsv1.2 -sSf https://just.systems/install.sh | bash -s -- --to /usr/local/bin
- name: Install just (Windows)
if: runner.os == 'Windows'
run: |
# Install just using Chocolatey (pre-installed on GitHub Actions Windows runners)
choco install just --yes
shell: pwsh
- name: Create virtual env
run: |
uv venv
- name: Install dependencies
run: |
uv pip install -e .[dev]
- name: Run type checks
run: |
just typecheck
- name: Run linting
run: |
just lint
- name: Run tests
run: |
uv pip install pytest pytest-cov
just test
```
--------------------------------------------------------------------------------
/tests/importers/test_importer_utils.py:
--------------------------------------------------------------------------------
```python
"""Tests for importer utility functions."""
from datetime import datetime
from basic_memory.importers.utils import clean_filename, format_timestamp
def test_clean_filename():
"""Test clean_filename utility function."""
# Test with normal string
assert clean_filename("Hello World") == "Hello_World"
# Test with punctuation
assert clean_filename("Hello, World!") == "Hello_World"
# Test with special characters
assert clean_filename("File[1]/with\\special:chars") == "File_1_with_special_chars"
# Test with long string (over 100 chars)
long_str = "a" * 120
assert len(clean_filename(long_str)) == 100
# Test with empty string
assert clean_filename("") == "untitled"
# Test with only special characters
# Some implementations may return empty string or underscore
result = clean_filename("!@#$%^&*()")
assert result in ["untitled", "_", ""]
def test_format_timestamp():
"""Test format_timestamp utility function."""
# Test with datetime object
dt = datetime(2023, 1, 1, 12, 30, 45)
assert format_timestamp(dt) == "2023-01-01 12:30:45"
# Test with ISO format string
iso_str = "2023-01-01T12:30:45Z"
assert format_timestamp(iso_str) == "2023-01-01 12:30:45"
# Test with Unix timestamp as int
unix_ts = 1672577445 # 2023-01-01 12:30:45 UTC
formatted = format_timestamp(unix_ts)
# The exact format may vary by timezone, so we just check for the year
assert "2023" in formatted
# Test with Unix timestamp as string
unix_str = "1672577445"
formatted = format_timestamp(unix_str)
assert "2023" in formatted
# Test with unparseable string
assert format_timestamp("not a timestamp") == "not a timestamp"
# Test with non-timestamp object
assert format_timestamp(None) == "None"
```
--------------------------------------------------------------------------------
/src/basic_memory/alembic/versions/502b60eaa905_remove_required_from_entity_permalink.py:
--------------------------------------------------------------------------------
```python
"""remove required from entity.permalink
Revision ID: 502b60eaa905
Revises: b3c3938bacdb
Create Date: 2025-02-24 13:33:09.790951
"""
from typing import Sequence, Union
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision: str = "502b60eaa905"
down_revision: Union[str, None] = "b3c3938bacdb"
branch_labels: Union[str, Sequence[str], None] = None
depends_on: Union[str, Sequence[str], None] = None
def upgrade() -> None:
# ### commands auto generated by Alembic - please adjust! ###
with op.batch_alter_table("entity", schema=None) as batch_op:
batch_op.alter_column("permalink", existing_type=sa.VARCHAR(), nullable=True)
batch_op.drop_index("ix_entity_permalink")
batch_op.create_index(batch_op.f("ix_entity_permalink"), ["permalink"], unique=False)
batch_op.drop_constraint("uix_entity_permalink", type_="unique")
batch_op.create_index(
"uix_entity_permalink",
["permalink"],
unique=True,
sqlite_where=sa.text("content_type = 'text/markdown' AND permalink IS NOT NULL"),
)
# ### end Alembic commands ###
def downgrade() -> None:
# ### commands auto generated by Alembic - please adjust! ###
with op.batch_alter_table("entity", schema=None) as batch_op:
batch_op.drop_index(
"uix_entity_permalink",
sqlite_where=sa.text("content_type = 'text/markdown' AND permalink IS NOT NULL"),
)
batch_op.create_unique_constraint("uix_entity_permalink", ["permalink"])
batch_op.drop_index(batch_op.f("ix_entity_permalink"))
batch_op.create_index("ix_entity_permalink", ["permalink"], unique=1)
batch_op.alter_column("permalink", existing_type=sa.VARCHAR(), nullable=False)
# ### end Alembic commands ###
```
--------------------------------------------------------------------------------
/src/basic_memory/schemas/cloud.py:
--------------------------------------------------------------------------------
```python
"""Schemas for cloud-related API responses."""
from pydantic import BaseModel, Field
class TenantMountInfo(BaseModel):
"""Response from /tenant/mount/info endpoint."""
tenant_id: str = Field(..., description="Unique identifier for the tenant")
bucket_name: str = Field(..., description="S3 bucket name for the tenant")
class MountCredentials(BaseModel):
"""Response from /tenant/mount/credentials endpoint."""
access_key: str = Field(..., description="S3 access key for mount")
secret_key: str = Field(..., description="S3 secret key for mount")
class CloudProject(BaseModel):
"""Representation of a cloud project."""
name: str = Field(..., description="Project name")
path: str = Field(..., description="Project path on cloud")
class CloudProjectList(BaseModel):
"""Response from /proxy/projects/projects endpoint."""
projects: list[CloudProject] = Field(default_factory=list, description="List of cloud projects")
class CloudProjectCreateRequest(BaseModel):
"""Request to create a new cloud project."""
name: str = Field(..., description="Project name")
path: str = Field(..., description="Project path (permalink)")
set_default: bool = Field(default=False, description="Set as default project")
class CloudProjectCreateResponse(BaseModel):
"""Response from creating a cloud project."""
message: str = Field(..., description="Status message about the project creation")
status: str = Field(..., description="Status of the creation (success or error)")
default: bool = Field(..., description="True if the project was set as the default")
old_project: dict | None = Field(None, description="Information about the previous project")
new_project: dict | None = Field(
None, description="Information about the newly created project"
)
```
--------------------------------------------------------------------------------
/src/basic_memory/markdown/schemas.py:
--------------------------------------------------------------------------------
```python
"""Schema models for entity markdown files."""
from datetime import datetime
from typing import List, Optional
from pydantic import BaseModel
class Observation(BaseModel):
"""An observation about an entity."""
category: Optional[str] = "Note"
content: str
tags: Optional[List[str]] = None
context: Optional[str] = None
def __str__(self) -> str:
obs_string = f"- [{self.category}] {self.content}"
if self.context:
obs_string += f" ({self.context})"
return obs_string
class Relation(BaseModel):
"""A relation between entities."""
type: str
target: str
context: Optional[str] = None
def __str__(self) -> str:
rel_string = f"- {self.type} [[{self.target}]]"
if self.context:
rel_string += f" ({self.context})"
return rel_string
class EntityFrontmatter(BaseModel):
"""Required frontmatter fields for an entity."""
metadata: dict = {}
@property
def tags(self) -> List[str]:
return self.metadata.get("tags") if self.metadata else None # pyright: ignore
@property
def title(self) -> str:
return self.metadata.get("title") if self.metadata else None # pyright: ignore
@property
def type(self) -> str:
return self.metadata.get("type", "note") if self.metadata else "note" # pyright: ignore
@property
def permalink(self) -> str:
return self.metadata.get("permalink") if self.metadata else None # pyright: ignore
class EntityMarkdown(BaseModel):
"""Complete entity combining frontmatter, content, and metadata."""
frontmatter: EntityFrontmatter
content: Optional[str] = None
observations: List[Observation] = []
relations: List[Relation] = []
# created, updated will have values after a read
created: Optional[datetime] = None
modified: Optional[datetime] = None
```
--------------------------------------------------------------------------------
/test-int/cli/test_sync_commands_integration.py:
--------------------------------------------------------------------------------
```python
"""Integration tests for sync CLI commands."""
from pathlib import Path
from typer.testing import CliRunner
from basic_memory.cli.main import app
def test_sync_command(app_config, test_project, config_manager, config_home):
"""Test 'bm sync' command successfully syncs files."""
runner = CliRunner()
# Create a test file
test_file = Path(config_home) / "test-note.md"
test_file.write_text("# Test Note\n\nThis is a test.")
# Run sync
result = runner.invoke(app, ["sync", "--project", "test-project"])
if result.exit_code != 0:
print(f"STDOUT: {result.stdout}")
print(f"STDERR: {result.stderr}")
assert result.exit_code == 0
assert "sync" in result.stdout.lower() or "initiated" in result.stdout.lower()
def test_status_command(app_config, test_project, config_manager, config_home):
"""Test 'bm status' command shows sync status."""
runner = CliRunner()
# Create a test file
test_file = Path(config_home) / "unsynced.md"
test_file.write_text("# Unsynced Note\n\nThis file hasn't been synced yet.")
# Run status
result = runner.invoke(app, ["status", "--project", "test-project"])
if result.exit_code != 0:
print(f"STDOUT: {result.stdout}")
print(f"STDERR: {result.stderr}")
assert result.exit_code == 0
# Should show some status output
assert len(result.stdout) > 0
def test_status_verbose(app_config, test_project, config_manager, config_home):
"""Test 'bm status --verbose' shows detailed status."""
runner = CliRunner()
# Create a test file
test_file = Path(config_home) / "test.md"
test_file.write_text("# Test\n\nContent.")
# Run status with verbose
result = runner.invoke(app, ["status", "--project", "test-project", "--verbose"])
if result.exit_code != 0:
print(f"STDOUT: {result.stdout}")
print(f"STDERR: {result.stderr}")
assert result.exit_code == 0
assert len(result.stdout) > 0
```
--------------------------------------------------------------------------------
/tests/schemas/test_memory_url.py:
--------------------------------------------------------------------------------
```python
"""Tests for MemoryUrl parsing."""
import pytest
from basic_memory.schemas.memory import memory_url, memory_url_path, normalize_memory_url
def test_basic_permalink():
"""Test basic permalink parsing."""
url = memory_url.validate_strings("memory://specs/search")
assert str(url) == "memory://specs/search"
assert memory_url_path(url) == "specs/search"
def test_glob_pattern():
"""Test pattern matching."""
url = memory_url.validate_python("memory://specs/search/*")
assert memory_url_path(url) == "specs/search/*"
def test_related_prefix():
"""Test related content prefix."""
url = memory_url.validate_python("memory://related/specs/search")
assert memory_url_path(url) == "related/specs/search"
def test_context_prefix():
"""Test context prefix."""
url = memory_url.validate_python("memory://context/current")
assert memory_url_path(url) == "context/current"
def test_complex_pattern():
"""Test multiple glob patterns."""
url = memory_url.validate_python("memory://specs/*/search/*")
assert memory_url_path(url) == "specs/*/search/*"
def test_path_with_dashes():
"""Test path with dashes and other chars."""
url = memory_url.validate_python("memory://file-sync-and-note-updates-implementation")
assert memory_url_path(url) == "file-sync-and-note-updates-implementation"
def test_str_representation():
"""Test converting back to string."""
url = memory_url.validate_python("memory://specs/search")
assert url == "memory://specs/search"
def test_normalize_memory_url():
"""Test converting back to string."""
url = normalize_memory_url("memory://specs/search")
assert url == "memory://specs/search"
def test_normalize_memory_url_no_prefix():
"""Test converting back to string."""
url = normalize_memory_url("specs/search")
assert url == "memory://specs/search"
def test_normalize_memory_url_empty():
"""Test that empty string raises ValueError."""
with pytest.raises(ValueError, match="cannot be empty"):
normalize_memory_url("")
```
--------------------------------------------------------------------------------
/src/basic_memory/mcp/prompts/continue_conversation.py:
--------------------------------------------------------------------------------
```python
"""Session continuation prompts for Basic Memory MCP server.
These prompts help users continue conversations and work across sessions,
providing context from previous interactions to maintain continuity.
"""
from typing import Annotated, Optional
from loguru import logger
from pydantic import Field
from basic_memory.config import get_project_config
from basic_memory.mcp.async_client import get_client
from basic_memory.mcp.server import mcp
from basic_memory.mcp.tools.utils import call_post
from basic_memory.schemas.base import TimeFrame
from basic_memory.schemas.prompt import ContinueConversationRequest
@mcp.prompt(
name="continue_conversation",
description="Continue a previous conversation",
)
async def continue_conversation(
topic: Annotated[Optional[str], Field(description="Topic or keyword to search for")] = None,
timeframe: Annotated[
Optional[TimeFrame],
Field(description="How far back to look for activity (e.g. '1d', '1 week')"),
] = None,
) -> str:
"""Continue a previous conversation or work session.
This prompt helps you pick up where you left off by finding recent context
about a specific topic or showing general recent activity.
Args:
topic: Topic or keyword to search for (optional)
timeframe: How far back to look for activity
Returns:
Context from previous sessions on this topic
"""
logger.info(f"Continuing session, topic: {topic}, timeframe: {timeframe}")
async with get_client() as client:
# Create request model
request = ContinueConversationRequest( # pyright: ignore [reportCallIssue]
topic=topic, timeframe=timeframe
)
project_url = get_project_config().project_url
# Call the prompt API endpoint
response = await call_post(
client,
f"{project_url}/prompt/continue-conversation",
json=request.model_dump(exclude_none=True),
)
# Extract the rendered prompt from the response
result = response.json()
return result["prompt"]
```
--------------------------------------------------------------------------------
/tests/api/test_async_client.py:
--------------------------------------------------------------------------------
```python
"""Tests for async_client configuration."""
import os
from unittest.mock import patch
from httpx import AsyncClient, ASGITransport, Timeout
from basic_memory.config import ConfigManager
from basic_memory.mcp.async_client import create_client
def test_create_client_uses_asgi_when_no_remote_env():
"""Test that create_client uses ASGI transport when BASIC_MEMORY_USE_REMOTE_API is not set."""
# Ensure env vars are not set (pop if they exist)
with patch.dict("os.environ", clear=False):
os.environ.pop("BASIC_MEMORY_USE_REMOTE_API", None)
os.environ.pop("BASIC_MEMORY_CLOUD_MODE", None)
client = create_client()
assert isinstance(client, AsyncClient)
assert isinstance(client._transport, ASGITransport)
assert str(client.base_url) == "http://test"
def test_create_client_uses_http_when_cloud_mode_env_set():
"""Test that create_client uses HTTP transport when BASIC_MEMORY_CLOUD_MODE is set."""
config = ConfigManager().config
with patch.dict("os.environ", {"BASIC_MEMORY_CLOUD_MODE": "True"}):
client = create_client()
assert isinstance(client, AsyncClient)
assert not isinstance(client._transport, ASGITransport)
# Cloud mode uses cloud_host/proxy as base_url
assert str(client.base_url) == f"{config.cloud_host}/proxy/"
def test_create_client_configures_extended_timeouts():
"""Test that create_client configures 30-second timeouts for long operations."""
# Ensure env vars are not set (pop if they exist)
with patch.dict("os.environ", clear=False):
os.environ.pop("BASIC_MEMORY_USE_REMOTE_API", None)
os.environ.pop("BASIC_MEMORY_CLOUD_MODE", None)
client = create_client()
# Verify timeout configuration
assert isinstance(client.timeout, Timeout)
assert client.timeout.connect == 10.0 # 10 seconds for connection
assert client.timeout.read == 30.0 # 30 seconds for reading
assert client.timeout.write == 30.0 # 30 seconds for writing
assert client.timeout.pool == 30.0 # 30 seconds for pool
```
--------------------------------------------------------------------------------
/llms-install.md:
--------------------------------------------------------------------------------
```markdown
# Basic Memory Installation Guide for LLMs
This guide is specifically designed to help AI assistants like Cline install and configure Basic Memory. Follow these
steps in order.
## Installation Steps
### 1. Install Basic Memory Package
Use one of the following package managers to install:
```bash
# Install with uv (recommended)
uv tool install basic-memory
# Or with pip
pip install basic-memory
```
### 2. Configure MCP Server
Add the following to your config:
```json
{
"mcpServers": {
"basic-memory": {
"command": "uvx",
"args": [
"basic-memory",
"mcp"
]
}
}
}
```
For Claude Desktop, this file is located at:
macOS: ~/Library/Application Support/Claude/claude_desktop_config.json
Windows: %APPDATA%\Claude\claude_desktop_config.json
### 3. Start Synchronization (optional)
To synchronize files in real-time, run:
```bash
basic-memory sync --watch
```
Or for a one-time sync:
```bash
basic-memory sync
```
## Configuration Options
### Custom Directory
To use a directory other than the default `~/basic-memory`:
```bash
basic-memory project add custom-project /path/to/your/directory
basic-memory project default custom-project
```
### Multiple Projects
To manage multiple knowledge bases:
```bash
# List all projects
basic-memory project list
# Add a new project
basic-memory project add work ~/work-basic-memory
# Set default project
basic-memory project default work
```
## Importing Existing Data
### From Claude.ai
```bash
basic-memory import claude conversations path/to/conversations.json
basic-memory import claude projects path/to/projects.json
```
### From ChatGPT
```bash
basic-memory import chatgpt path/to/conversations.json
```
### From MCP Memory Server
```bash
basic-memory import memory-json path/to/memory.json
```
## Troubleshooting
If you encounter issues:
1. Check that Basic Memory is properly installed:
```bash
basic-memory --version
```
2. Verify the sync process is running:
```bash
ps aux | grep basic-memory
```
3. Check sync output for errors:
```bash
basic-memory sync --verbose
```
4. Check log output:
```bash
cat ~/.basic-memory/basic-memory.log
```
For more detailed information, refer to the [full documentation](https://memory.basicmachines.co/).
```
--------------------------------------------------------------------------------
/src/basic_memory/mcp/prompts/ai_assistant_guide.py:
--------------------------------------------------------------------------------
```python
from pathlib import Path
from basic_memory.config import ConfigManager
from basic_memory.mcp.server import mcp
from loguru import logger
@mcp.resource(
uri="memory://ai_assistant_guide",
name="ai assistant guide",
description="Give an AI assistant guidance on how to use Basic Memory tools effectively",
)
def ai_assistant_guide() -> str:
"""Return a concise guide on Basic Memory tools and how to use them.
Dynamically adapts instructions based on configuration:
- Default project mode: Simplified instructions with automatic project
- Regular mode: Project discovery and selection guidance
- CLI constraint mode: Single project constraint information
Returns:
A focused guide on Basic Memory usage.
"""
logger.info("Loading AI assistant guide resource")
# Load base guide content
guide_doc = Path(__file__).parent.parent / "resources" / "ai_assistant_guide.md"
content = guide_doc.read_text(encoding="utf-8")
# Check configuration for mode-specific instructions
config = ConfigManager().config
# Add mode-specific header
mode_info = ""
if config.default_project_mode:
mode_info = f"""
# 🎯 Default Project Mode Active
**Current Configuration**: All operations automatically use project '{config.default_project}'
**Simplified Usage**: You don't need to specify the project parameter in tool calls.
- `write_note(title="Note", content="...", folder="docs")` ✅
- Project parameter is optional and will default to '{config.default_project}'
- To use a different project, explicitly specify: `project="other-project"`
────────────────────────────────────────
"""
else:
mode_info = """
# 🔧 Multi-Project Mode Active
**Current Configuration**: Project parameter required for all operations
**Project Discovery Required**: Use these tools to select a project:
- `list_memory_projects()` - See all available projects
- `recent_activity()` - Get project activity and recommendations
- Remember the user's project choice throughout the conversation
────────────────────────────────────────
"""
# Prepend mode info to the guide
enhanced_content = mode_info + content
logger.info(
f"Loaded AI assistant guide ({len(enhanced_content)} chars) with mode: {'default_project' if config.default_project_mode else 'multi_project'}"
)
return enhanced_content
```
--------------------------------------------------------------------------------
/src/basic_memory/mcp/tools/view_note.py:
--------------------------------------------------------------------------------
```python
"""View note tool for Basic Memory MCP server."""
from textwrap import dedent
from typing import Optional
from loguru import logger
from fastmcp import Context
from basic_memory.mcp.server import mcp
from basic_memory.mcp.tools.read_note import read_note
@mcp.tool(
description="View a note as a formatted artifact for better readability.",
)
async def view_note(
identifier: str,
project: Optional[str] = None,
page: int = 1,
page_size: int = 10,
context: Context | None = None,
) -> str:
"""View a markdown note as a formatted artifact.
This tool reads a note using the same logic as read_note but instructs Claude
to display the content as a markdown artifact in the Claude Desktop app.
Project parameter optional with server resolution.
Args:
identifier: The title or permalink of the note to view
project: Project name to read from. Optional - server will resolve using hierarchy.
If unknown, use list_memory_projects() to discover available projects.
page: Page number for paginated results (default: 1)
page_size: Number of items per page (default: 10)
context: Optional FastMCP context for performance caching.
Returns:
Instructions for Claude to create a markdown artifact with the note content.
Examples:
# View a note by title
view_note("Meeting Notes")
# View a note by permalink
view_note("meetings/weekly-standup")
# View with pagination
view_note("large-document", page=2, page_size=5)
# Explicit project specification
view_note("Meeting Notes", project="my-project")
Raises:
HTTPError: If project doesn't exist or is inaccessible
SecurityError: If identifier attempts path traversal
"""
logger.info(f"Viewing note: {identifier} in project: {project}")
# Call the existing read_note logic
content = await read_note.fn(identifier, project, page, page_size, context)
# Check if this is an error message (note not found)
if "# Note Not Found" in content:
return content # Return error message directly
# Return instructions for Claude to create an artifact
return dedent(f"""
Note retrieved: "{identifier}"
Display this note as a markdown artifact for the user.
Content:
---
{content}
---
""").strip()
```
--------------------------------------------------------------------------------
/.claude/commands/spec.md:
--------------------------------------------------------------------------------
```markdown
---
allowed-tools: mcp__basic-memory__write_note, mcp__basic-memory__read_note, mcp__basic-memory__search_notes, mcp__basic-memory__edit_note, Task
argument-hint: [create|status|implement|review] [spec-name]
description: Manage specifications in our development process
---
## Context
You are managing specifications using our specification-driven development process defined in @docs/specs/SPEC-001.md.
Available commands:
- `create [name]` - Create new specification
- `status` - Show all spec statuses
- `implement [spec-name]` - Hand spec to appropriate agent
- `review [spec-name]` - Review implementation against spec
## Your task
Execute the spec command: `/spec $ARGUMENTS`
### If command is "create":
1. Get next SPEC number by searching existing specs
2. Create new spec using template from @docs/specs/Slash\ Commands\ Reference.md
3. Place in `/specs` folder with title "SPEC-XXX: [name]"
4. Include standard sections: Why, What, How, How to Evaluate
### If command is "status":
1. Search all notes in `/specs` folder
2. Display table with spec number, title, and status
3. Show any dependencies or assigned agents
### If command is "implement":
1. Read the specified spec
2. Determine appropriate agent based on content:
- Frontend/UI → vue-developer
- Architecture/system → system-architect
- Backend/API → python-developer
3. Launch Task tool with appropriate agent and spec context
### If command is "review":
1. Read the specified spec and its "How to Evaluate" section
2. Review current implementation against success criteria with careful evaluation of:
- **Functional completeness** - All specified features working
- **Test coverage analysis** - Actual test files and coverage percentage
- Count existing test files vs required components/APIs/composables
- Verify unit tests, integration tests, and end-to-end tests
- Check for missing test categories (component, API, workflow)
- **Code quality metrics** - TypeScript compilation, linting, performance
- **Architecture compliance** - Component isolation, state management patterns
- **Documentation completeness** - Implementation matches specification
3. Provide honest, accurate assessment - do not overstate completeness
4. Document findings and update spec with review results
5. If gaps found, clearly identify what still needs to be implemented/tested
Use the agent definitions from @docs/specs/Agent\ Definitions.md for implementation handoffs.
```
--------------------------------------------------------------------------------
/.github/workflows/release.yml:
--------------------------------------------------------------------------------
```yaml
name: Release
on:
push:
tags:
- 'v*' # Trigger on version tags like v1.0.0, v0.13.0, etc.
jobs:
release:
runs-on: ubuntu-latest
permissions:
id-token: write
contents: write
steps:
- uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Set up Python
uses: actions/setup-python@v5
with:
python-version: "3.12"
- name: Install uv
run: |
pip install uv
- name: Install dependencies and build
run: |
uv venv
uv sync
uv build
- name: Verify build succeeded
run: |
# Verify that build artifacts exist
ls -la dist/
echo "Build completed successfully"
- name: Create GitHub Release
uses: softprops/action-gh-release@v2
with:
files: |
dist/*.whl
dist/*.tar.gz
generate_release_notes: true
tag_name: ${{ github.ref_name }}
token: ${{ secrets.GITHUB_TOKEN }}
- name: Publish to PyPI
uses: pypa/gh-action-pypi-publish@release/v1
with:
password: ${{ secrets.PYPI_TOKEN }}
homebrew:
name: Update Homebrew Formula
needs: release
runs-on: ubuntu-latest
# Only run for stable releases (not dev, beta, or rc versions)
if: ${{ !contains(github.ref_name, 'dev') && !contains(github.ref_name, 'b') && !contains(github.ref_name, 'rc') }}
permissions:
contents: write
actions: read
steps:
- name: Update Homebrew formula
uses: mislav/bump-homebrew-formula-action@v3
with:
# Formula name in homebrew-basic-memory repo
formula-name: basic-memory
# The tap repository
homebrew-tap: basicmachines-co/homebrew-basic-memory
# Base branch of the tap repository
base-branch: main
# Download URL will be automatically constructed from the tag
download-url: https://github.com/basicmachines-co/basic-memory/archive/refs/tags/${{ github.ref_name }}.tar.gz
# Commit message for the formula update
commit-message: |
{{formulaName}} {{version}}
Created by https://github.com/basicmachines-co/basic-memory/actions/runs/${{ github.run_id }}
env:
# Personal Access Token with repo scope for homebrew-basic-memory repo
COMMITTER_TOKEN: ${{ secrets.HOMEBREW_TOKEN }}
```
--------------------------------------------------------------------------------
/src/basic_memory/importers/base.py:
--------------------------------------------------------------------------------
```python
"""Base import service for Basic Memory."""
import logging
from abc import abstractmethod
from pathlib import Path
from typing import Any, Optional, TypeVar
from basic_memory.markdown.markdown_processor import MarkdownProcessor
from basic_memory.markdown.schemas import EntityMarkdown
from basic_memory.schemas.importer import ImportResult
logger = logging.getLogger(__name__)
T = TypeVar("T", bound=ImportResult)
class Importer[T: ImportResult]:
"""Base class for all import services."""
def __init__(self, base_path: Path, markdown_processor: MarkdownProcessor):
"""Initialize the import service.
Args:
markdown_processor: MarkdownProcessor instance for writing markdown files.
"""
self.base_path = base_path.resolve() # Get absolute path
self.markdown_processor = markdown_processor
@abstractmethod
async def import_data(self, source_data, destination_folder: str, **kwargs: Any) -> T:
"""Import data from source file to destination folder.
Args:
source_path: Path to the source file.
destination_folder: Destination folder within the project.
**kwargs: Additional keyword arguments for specific import types.
Returns:
ImportResult containing statistics and status of the import.
"""
pass # pragma: no cover
async def write_entity(self, entity: EntityMarkdown, file_path: Path) -> None:
"""Write entity to file using markdown processor.
Args:
entity: EntityMarkdown instance to write.
file_path: Path to write the entity to.
"""
await self.markdown_processor.write_file(file_path, entity)
def ensure_folder_exists(self, folder: str) -> Path:
"""Ensure folder exists, create if it doesn't.
Args:
base_path: Base path of the project.
folder: Folder name or path within the project.
Returns:
Path to the folder.
"""
folder_path = self.base_path / folder
folder_path.mkdir(parents=True, exist_ok=True)
return folder_path
@abstractmethod
def handle_error(
self, message: str, error: Optional[Exception] = None
) -> T: # pragma: no cover
"""Handle errors during import.
Args:
message: Error message.
error: Optional exception that caused the error.
Returns:
ImportResult with error information.
"""
pass
```
--------------------------------------------------------------------------------
/docker-compose.yml:
--------------------------------------------------------------------------------
```yaml
# Docker Compose configuration for Basic Memory
# See docs/Docker.md for detailed setup instructions
version: '3.8'
services:
basic-memory:
# Use pre-built image (recommended for most users)
image: ghcr.io/basicmachines-co/basic-memory:latest
# Uncomment to build locally instead:
# build: .
container_name: basic-memory-server
# Volume mounts for knowledge directories and persistent data
volumes:
# Persistent storage for configuration and database
- basic-memory-config:/root/.basic-memory:rw
# Mount your knowledge directory (required)
# Change './knowledge' to your actual Obsidian vault or knowledge directory
- ./knowledge:/app/data:rw
# OPTIONAL: Mount additional knowledge directories for multiple projects
# - ./work-notes:/app/data/work:rw
# - ./personal-notes:/app/data/personal:rw
# You can edit the project config manually in the mounted config volume
# The default project will be configured to use /app/data
environment:
# Project configuration
- BASIC_MEMORY_DEFAULT_PROJECT=main
# Enable real-time file synchronization (recommended for Docker)
- BASIC_MEMORY_SYNC_CHANGES=true
# Logging configuration
- BASIC_MEMORY_LOG_LEVEL=INFO
# Sync delay in milliseconds (adjust for performance vs responsiveness)
- BASIC_MEMORY_SYNC_DELAY=1000
# Port exposure for HTTP transport (only needed if not using STDIO)
ports:
- "8000:8000"
# Command with SSE transport (configurable via environment variables above)
# IMPORTANT: The SSE and streamable-http endpoints are not secured
command: ["basic-memory", "mcp", "--transport", "sse", "--host", "0.0.0.0", "--port", "8000"]
# Container management
restart: unless-stopped
# Health monitoring
healthcheck:
test: ["CMD", "basic-memory", "--version"]
interval: 30s
timeout: 10s
retries: 3
start_period: 30s
# Optional: Resource limits
# deploy:
# resources:
# limits:
# memory: 512M
# cpus: '0.5'
# reservations:
# memory: 256M
# cpus: '0.25'
volumes:
# Named volume for persistent configuration and database
# This ensures your configuration and knowledge graph persist across container restarts
basic-memory-config:
driver: local
# Network configuration (optional)
# networks:
# basic-memory-net:
# driver: bridge
```
--------------------------------------------------------------------------------
/src/basic_memory/mcp/resources/project_info.py:
--------------------------------------------------------------------------------
```python
"""Project info tool for Basic Memory MCP server."""
from typing import Optional
from loguru import logger
from fastmcp import Context
from basic_memory.mcp.async_client import get_client
from basic_memory.mcp.project_context import get_active_project
from basic_memory.mcp.server import mcp
from basic_memory.mcp.tools.utils import call_get
from basic_memory.schemas import ProjectInfoResponse
@mcp.resource(
uri="memory://{project}/info",
description="Get information and statistics about the current Basic Memory project.",
)
async def project_info(
project: Optional[str] = None, context: Context | None = None
) -> ProjectInfoResponse:
"""Get comprehensive information about the current Basic Memory project.
This tool provides detailed statistics and status information about your
Basic Memory project, including:
- Project configuration
- Entity, observation, and relation counts
- Graph metrics (most connected entities, isolated entities)
- Recent activity and growth over time
- System status (database, watch service, version)
Use this tool to:
- Verify your Basic Memory installation is working correctly
- Get insights into your knowledge base structure
- Monitor growth and activity over time
- Identify potential issues like unresolved relations
Args:
project: Optional project name. If not provided, uses default_project
(if default_project_mode=true) or CLI constraint. If unknown,
use list_memory_projects() to discover available projects.
context: Optional FastMCP context for performance caching.
Returns:
Detailed project information and statistics
Examples:
# Get information about the current/default project
info = await project_info()
# Get information about a specific project
info = await project_info(project="my-project")
# Check entity counts
print(f"Total entities: {info.statistics.total_entities}")
# Check system status
print(f"Basic Memory version: {info.system.version}")
"""
logger.info("Getting project info")
async with get_client() as client:
project_config = await get_active_project(client, project, context)
project_url = project_config.permalink
# Call the API endpoint
response = await call_get(client, f"{project_url}/project/info")
# Convert response to ProjectInfoResponse
return ProjectInfoResponse.model_validate(response.json())
```
--------------------------------------------------------------------------------
/src/basic_memory/schemas/sync_report.py:
--------------------------------------------------------------------------------
```python
"""Pydantic schemas for sync report responses."""
from datetime import datetime
from typing import TYPE_CHECKING, Dict, List, Set
from pydantic import BaseModel, Field
# avoid cirular imports
if TYPE_CHECKING:
from basic_memory.sync.sync_service import SyncReport
class SkippedFileResponse(BaseModel):
"""Information about a file that was skipped due to repeated failures."""
path: str = Field(description="File path relative to project root")
reason: str = Field(description="Error message from last failure")
failure_count: int = Field(description="Number of consecutive failures")
first_failed: datetime = Field(description="Timestamp of first failure")
model_config = {"from_attributes": True}
class SyncReportResponse(BaseModel):
"""Report of file changes found compared to database state.
Used for API responses when scanning or syncing files.
"""
new: Set[str] = Field(default_factory=set, description="Files on disk but not in database")
modified: Set[str] = Field(default_factory=set, description="Files with different checksums")
deleted: Set[str] = Field(default_factory=set, description="Files in database but not on disk")
moves: Dict[str, str] = Field(
default_factory=dict, description="Files moved (old_path -> new_path)"
)
checksums: Dict[str, str] = Field(
default_factory=dict, description="Current file checksums (path -> checksum)"
)
skipped_files: List[SkippedFileResponse] = Field(
default_factory=list, description="Files skipped due to repeated failures"
)
total: int = Field(description="Total number of changes")
@classmethod
def from_sync_report(cls, report: "SyncReport") -> "SyncReportResponse":
"""Convert SyncReport dataclass to Pydantic model.
Args:
report: SyncReport dataclass from sync service
Returns:
SyncReportResponse with same data
"""
return cls(
new=report.new,
modified=report.modified,
deleted=report.deleted,
moves=report.moves,
checksums=report.checksums,
skipped_files=[
SkippedFileResponse(
path=skipped.path,
reason=skipped.reason,
failure_count=skipped.failure_count,
first_failed=skipped.first_failed,
)
for skipped in report.skipped_files
],
total=report.total,
)
model_config = {"from_attributes": True}
```
--------------------------------------------------------------------------------
/tests/utils/test_parse_tags.py:
--------------------------------------------------------------------------------
```python
"""Tests for parse_tags utility function."""
from typing import List, Union
import pytest
from basic_memory.utils import parse_tags
@pytest.mark.parametrize(
"input_tags,expected",
[
# None input
(None, []),
# List inputs
([], []),
(["tag1", "tag2"], ["tag1", "tag2"]),
(["tag1", "", "tag2"], ["tag1", "tag2"]), # Empty tags are filtered
([" tag1 ", " tag2 "], ["tag1", "tag2"]), # Whitespace is stripped
# String inputs
("", []),
("tag1", ["tag1"]),
("tag1,tag2", ["tag1", "tag2"]),
("tag1, tag2", ["tag1", "tag2"]), # Whitespace after comma is stripped
("tag1,,tag2", ["tag1", "tag2"]), # Empty tags are filtered
# Tags with leading '#' characters - these should be stripped
(["#tag1", "##tag2"], ["tag1", "tag2"]),
("#tag1,##tag2", ["tag1", "tag2"]),
(["tag1", "#tag2", "##tag3"], ["tag1", "tag2", "tag3"]),
# Mixed whitespace and '#' characters
([" #tag1 ", " ##tag2 "], ["tag1", "tag2"]),
(" #tag1 , ##tag2 ", ["tag1", "tag2"]),
# JSON stringified arrays (common AI assistant issue)
('["tag1", "tag2", "tag3"]', ["tag1", "tag2", "tag3"]),
('["system", "overview", "reference"]', ["system", "overview", "reference"]),
('["#tag1", "##tag2"]', ["tag1", "tag2"]), # JSON array with hash prefixes
('[ "tag1" , "tag2" ]', ["tag1", "tag2"]), # JSON array with extra spaces
],
)
def test_parse_tags(input_tags: Union[List[str], str, None], expected: List[str]) -> None:
"""Test tag parsing with various input formats."""
result = parse_tags(input_tags)
assert result == expected
def test_parse_tags_special_case() -> None:
"""Test parsing from non-string, non-list types."""
# Test with custom object that has __str__ method
class TagObject:
def __str__(self) -> str:
return "tag1,tag2"
result = parse_tags(TagObject()) # pyright: ignore [reportArgumentType]
assert result == ["tag1", "tag2"]
def test_parse_tags_invalid_json() -> None:
"""Test that invalid JSON strings fall back to comma-separated parsing."""
# Invalid JSON should fall back to comma-separated parsing
result = parse_tags("[invalid json")
assert result == ["[invalid json"] # Treated as single tag
result = parse_tags("[tag1, tag2]") # Valid bracket format but not JSON
assert result == ["[tag1", "tag2]"] # Split by comma
result = parse_tags('["tag1", "tag2"') # Incomplete JSON
assert result == ['["tag1"', '"tag2"'] # Fall back to comma separation
```
--------------------------------------------------------------------------------
/.github/workflows/claude-issue-triage.yml:
--------------------------------------------------------------------------------
```yaml
name: Claude Issue Triage
on:
issues:
types: [opened]
jobs:
triage:
runs-on: ubuntu-latest
permissions:
issues: write
id-token: write
steps:
- name: Checkout repository
uses: actions/checkout@v4
with:
fetch-depth: 1
- name: Run Claude Issue Triage
uses: anthropics/claude-code-action@v1
with:
claude_code_oauth_token: ${{ secrets.CLAUDE_CODE_OAUTH_TOKEN }}
track_progress: true # Show triage progress
prompt: |
Analyze this new Basic Memory issue and perform triage:
**Issue Analysis:**
1. **Type Classification:**
- Bug report (code defect)
- Feature request (new functionality)
- Enhancement (improvement to existing feature)
- Documentation (docs improvement)
- Question/Support (user help)
- MCP tool issue (specific to MCP functionality)
2. **Priority Assessment:**
- Critical: Security issues, data loss, complete breakage
- High: Major functionality broken, affects many users
- Medium: Minor bugs, usability issues
- Low: Nice-to-have improvements, cosmetic issues
3. **Component Classification:**
- CLI commands
- MCP tools
- Database/sync
- Cloud functionality
- Documentation
- Testing
4. **Complexity Estimate:**
- Simple: Quick fix, documentation update
- Medium: Requires some investigation/testing
- Complex: Major feature work, architectural changes
**Actions to Take:**
1. Add appropriate labels using: `gh issue edit ${{ github.event.issue.number }} --add-label "label1,label2"`
2. Check for duplicates using: `gh search issues`
3. If duplicate found, comment mentioning the original issue
4. For feature requests, ask clarifying questions if needed
5. For bugs, request reproduction steps if missing
**Available Labels:**
- Type: bug, enhancement, feature, documentation, question, mcp-tool
- Priority: critical, high, medium, low
- Component: cli, mcp, database, cloud, docs, testing
- Complexity: simple, medium, complex
- Status: needs-reproduction, needs-clarification, duplicate
Read the issue carefully and provide helpful triage with appropriate labels.
claude_args: '--allowed-tools "Bash(gh issue:*),Bash(gh search:*),Read"'
```
--------------------------------------------------------------------------------
/src/basic_memory/api/routers/management_router.py:
--------------------------------------------------------------------------------
```python
"""Management router for basic-memory API."""
import asyncio
from fastapi import APIRouter, Request
from loguru import logger
from pydantic import BaseModel
from basic_memory.config import ConfigManager
from basic_memory.deps import SyncServiceDep, ProjectRepositoryDep
router = APIRouter(prefix="/management", tags=["management"])
class WatchStatusResponse(BaseModel):
"""Response model for watch status."""
running: bool
"""Whether the watch service is currently running."""
@router.get("/watch/status", response_model=WatchStatusResponse)
async def get_watch_status(request: Request) -> WatchStatusResponse:
"""Get the current status of the watch service."""
return WatchStatusResponse(
running=request.app.state.watch_task is not None and not request.app.state.watch_task.done()
)
@router.post("/watch/start", response_model=WatchStatusResponse)
async def start_watch_service(
request: Request, project_repository: ProjectRepositoryDep, sync_service: SyncServiceDep
) -> WatchStatusResponse:
"""Start the watch service if it's not already running."""
# needed because of circular imports from sync -> app
from basic_memory.sync import WatchService
from basic_memory.sync.background_sync import create_background_sync_task
if request.app.state.watch_task is not None and not request.app.state.watch_task.done():
# Watch service is already running
return WatchStatusResponse(running=True)
app_config = ConfigManager().config
# Create and start a new watch service
logger.info("Starting watch service via management API")
# Get services needed for the watch task
watch_service = WatchService(
app_config=app_config,
project_repository=project_repository,
)
# Create and store the task
watch_task = create_background_sync_task(sync_service, watch_service)
request.app.state.watch_task = watch_task
return WatchStatusResponse(running=True)
@router.post("/watch/stop", response_model=WatchStatusResponse)
async def stop_watch_service(request: Request) -> WatchStatusResponse: # pragma: no cover
"""Stop the watch service if it's running."""
if request.app.state.watch_task is None or request.app.state.watch_task.done():
# Watch service is not running
return WatchStatusResponse(running=False)
# Cancel the running task
logger.info("Stopping watch service via management API")
request.app.state.watch_task.cancel()
# Wait for it to be properly cancelled
try:
await request.app.state.watch_task
except asyncio.CancelledError:
pass
request.app.state.watch_task = None
return WatchStatusResponse(running=False)
```
--------------------------------------------------------------------------------
/tests/sync/test_sync_wikilink_issue.py:
--------------------------------------------------------------------------------
```python
"""Test for issue #72 - notes with wikilinks staying in modified status."""
from pathlib import Path
import pytest
from basic_memory.sync.sync_service import SyncService
async def create_test_file(path: Path, content: str) -> None:
"""Create a test file with given content."""
path.parent.mkdir(parents=True, exist_ok=True)
path.write_text(content)
async def force_full_scan(sync_service: SyncService) -> None:
"""Force next sync to do a full scan by clearing watermark (for testing moves/deletions)."""
if sync_service.entity_repository.project_id is not None:
project = await sync_service.project_repository.find_by_id(
sync_service.entity_repository.project_id
)
if project:
await sync_service.project_repository.update(
project.id,
{
"last_scan_timestamp": None,
"last_file_count": None,
},
)
@pytest.mark.asyncio
async def test_wikilink_modified_status_issue(sync_service: SyncService, project_config):
"""Test that files with wikilinks don't remain in modified status after sync."""
project_dir = project_config.home
# Create a file with a wikilink
content = """---
title: Test Wikilink
type: note
---
# Test File
This file contains a wikilink to [[another-file]].
"""
test_file_path = project_dir / "test_wikilink.md"
await create_test_file(test_file_path, content)
# Initial sync
report1 = await sync_service.sync(project_config.home)
assert "test_wikilink.md" in report1.new
assert "test_wikilink.md" not in report1.modified
# Sync again without changing the file - should not be modified
report2 = await sync_service.sync(project_config.home)
assert "test_wikilink.md" not in report2.new
assert "test_wikilink.md" not in report2.modified
# Create the target file
target_content = """---
title: Another File
type: note
---
# Another File
This is the target file.
"""
target_file_path = project_dir / "another_file.md"
await create_test_file(target_file_path, target_content)
# Force full scan to detect the new file
# (file just created may not be newer than watermark due to timing precision)
await force_full_scan(sync_service)
# Sync again after adding target file
report3 = await sync_service.sync(project_config.home)
assert "another_file.md" in report3.new
assert "test_wikilink.md" not in report3.modified
# Sync one more time - both files should now be stable
report4 = await sync_service.sync(project_config.home)
assert "test_wikilink.md" not in report4.modified
assert "another_file.md" not in report4.modified
```
--------------------------------------------------------------------------------
/src/basic_memory/api/routers/directory_router.py:
--------------------------------------------------------------------------------
```python
"""Router for directory tree operations."""
from typing import List, Optional
from fastapi import APIRouter, Query
from basic_memory.deps import DirectoryServiceDep, ProjectIdDep
from basic_memory.schemas.directory import DirectoryNode
router = APIRouter(prefix="/directory", tags=["directory"])
@router.get("/tree", response_model=DirectoryNode, response_model_exclude_none=True)
async def get_directory_tree(
directory_service: DirectoryServiceDep,
project_id: ProjectIdDep,
):
"""Get hierarchical directory structure from the knowledge base.
Args:
directory_service: Service for directory operations
project_id: ID of the current project
Returns:
DirectoryNode representing the root of the hierarchical tree structure
"""
# Get a hierarchical directory tree for the specific project
tree = await directory_service.get_directory_tree()
# Return the hierarchical tree
return tree
@router.get("/structure", response_model=DirectoryNode, response_model_exclude_none=True)
async def get_directory_structure(
directory_service: DirectoryServiceDep,
project_id: ProjectIdDep,
):
"""Get folder structure for navigation (no files).
Optimized endpoint for folder tree navigation. Returns only directory nodes
without file metadata. For full tree with files, use /directory/tree.
Args:
directory_service: Service for directory operations
project_id: ID of the current project
Returns:
DirectoryNode tree containing only folders (type="directory")
"""
structure = await directory_service.get_directory_structure()
return structure
@router.get("/list", response_model=List[DirectoryNode], response_model_exclude_none=True)
async def list_directory(
directory_service: DirectoryServiceDep,
project_id: ProjectIdDep,
dir_name: str = Query("/", description="Directory path to list"),
depth: int = Query(1, ge=1, le=10, description="Recursion depth (1-10)"),
file_name_glob: Optional[str] = Query(
None, description="Glob pattern for filtering file names"
),
):
"""List directory contents with filtering and depth control.
Args:
directory_service: Service for directory operations
project_id: ID of the current project
dir_name: Directory path to list (default: root "/")
depth: Recursion depth (1-10, default: 1 for immediate children only)
file_name_glob: Optional glob pattern for filtering file names (e.g., "*.md", "*meeting*")
Returns:
List of DirectoryNode objects matching the criteria
"""
# Get directory listing with filtering
nodes = await directory_service.list_directory(
dir_name=dir_name,
depth=depth,
file_name_glob=file_name_glob,
)
return nodes
```
--------------------------------------------------------------------------------
/src/basic_memory/alembic/env.py:
--------------------------------------------------------------------------------
```python
"""Alembic environment configuration."""
import os
from logging.config import fileConfig
from sqlalchemy import engine_from_config
from sqlalchemy import pool
from alembic import context
from basic_memory.config import ConfigManager
# set config.env to "test" for pytest to prevent logging to file in utils.setup_logging()
os.environ["BASIC_MEMORY_ENV"] = "test"
# Import after setting environment variable # noqa: E402
from basic_memory.models import Base # noqa: E402
# this is the Alembic Config object, which provides
# access to the values within the .ini file in use.
config = context.config
app_config = ConfigManager().config
# Set the SQLAlchemy URL from our app config
sqlalchemy_url = f"sqlite:///{app_config.database_path}"
config.set_main_option("sqlalchemy.url", sqlalchemy_url)
# print(f"Using SQLAlchemy URL: {sqlalchemy_url}")
# Interpret the config file for Python logging.
if config.config_file_name is not None:
fileConfig(config.config_file_name)
# add your model's MetaData object here
# for 'autogenerate' support
target_metadata = Base.metadata
# Add this function to tell Alembic what to include/exclude
def include_object(object, name, type_, reflected, compare_to):
# Ignore SQLite FTS tables
if type_ == "table" and name.startswith("search_index"):
return False
return True
def run_migrations_offline() -> None:
"""Run migrations in 'offline' mode.
This configures the context with just a URL
and not an Engine, though an Engine is acceptable
here as well. By skipping the Engine creation
we don't even need a DBAPI to be available.
Calls to context.execute() here emit the given string to the
script output.
"""
url = config.get_main_option("sqlalchemy.url")
context.configure(
url=url,
target_metadata=target_metadata,
literal_binds=True,
dialect_opts={"paramstyle": "named"},
include_object=include_object,
render_as_batch=True,
)
with context.begin_transaction():
context.run_migrations()
def run_migrations_online() -> None:
"""Run migrations in 'online' mode.
In this scenario we need to create an Engine
and associate a connection with the context.
"""
connectable = engine_from_config(
config.get_section(config.config_ini_section, {}),
prefix="sqlalchemy.",
poolclass=pool.NullPool,
)
with connectable.connect() as connection:
context.configure(
connection=connection,
target_metadata=target_metadata,
include_object=include_object,
render_as_batch=True,
)
with context.begin_transaction():
context.run_migrations()
if context.is_offline_mode():
run_migrations_offline()
else:
run_migrations_online()
```
--------------------------------------------------------------------------------
/src/basic_memory/cli/commands/import_chatgpt.py:
--------------------------------------------------------------------------------
```python
"""Import command for ChatGPT conversations."""
import asyncio
import json
from pathlib import Path
from typing import Annotated
import typer
from basic_memory.cli.app import import_app
from basic_memory.config import get_project_config
from basic_memory.importers import ChatGPTImporter
from basic_memory.markdown import EntityParser, MarkdownProcessor
from loguru import logger
from rich.console import Console
from rich.panel import Panel
console = Console()
async def get_markdown_processor() -> MarkdownProcessor:
"""Get MarkdownProcessor instance."""
config = get_project_config()
entity_parser = EntityParser(config.home)
return MarkdownProcessor(entity_parser)
@import_app.command(name="chatgpt", help="Import conversations from ChatGPT JSON export.")
def import_chatgpt(
conversations_json: Annotated[
Path, typer.Argument(help="Path to ChatGPT conversations.json file")
] = Path("conversations.json"),
folder: Annotated[
str, typer.Option(help="The folder to place the files in.")
] = "conversations",
):
"""Import chat conversations from ChatGPT JSON format.
This command will:
1. Read the complex tree structure of messages
2. Convert them to linear markdown conversations
3. Save as clean, readable markdown files
After importing, run 'basic-memory sync' to index the new files.
"""
try:
if not conversations_json.exists(): # pragma: no cover
typer.echo(f"Error: File not found: {conversations_json}", err=True)
raise typer.Exit(1)
# Get markdown processor
markdown_processor = asyncio.run(get_markdown_processor())
config = get_project_config()
# Process the file
base_path = config.home / folder
console.print(f"\nImporting chats from {conversations_json}...writing to {base_path}")
# Create importer and run import
importer = ChatGPTImporter(config.home, markdown_processor)
with conversations_json.open("r", encoding="utf-8") as file:
json_data = json.load(file)
result = asyncio.run(importer.import_data(json_data, folder))
if not result.success: # pragma: no cover
typer.echo(f"Error during import: {result.error_message}", err=True)
raise typer.Exit(1)
# Show results
console.print(
Panel(
f"[green]Import complete![/green]\n\n"
f"Imported {result.conversations} conversations\n"
f"Containing {result.messages} messages",
expand=False,
)
)
console.print("\nRun 'basic-memory sync' to index the new files.")
except Exception as e:
logger.error("Import failed")
typer.echo(f"Error during import: {e}", err=True)
raise typer.Exit(1)
```
--------------------------------------------------------------------------------
/src/basic_memory/cli/commands/import_memory_json.py:
--------------------------------------------------------------------------------
```python
"""Import command for basic-memory CLI to import from JSON memory format."""
import asyncio
import json
from pathlib import Path
from typing import Annotated
import typer
from basic_memory.cli.app import import_app
from basic_memory.config import get_project_config
from basic_memory.importers.memory_json_importer import MemoryJsonImporter
from basic_memory.markdown import EntityParser, MarkdownProcessor
from loguru import logger
from rich.console import Console
from rich.panel import Panel
console = Console()
async def get_markdown_processor() -> MarkdownProcessor:
"""Get MarkdownProcessor instance."""
config = get_project_config()
entity_parser = EntityParser(config.home)
return MarkdownProcessor(entity_parser)
@import_app.command()
def memory_json(
json_path: Annotated[Path, typer.Argument(..., help="Path to memory.json file")] = Path(
"memory.json"
),
destination_folder: Annotated[
str, typer.Option(help="Optional destination folder within the project")
] = "",
):
"""Import entities and relations from a memory.json file.
This command will:
1. Read entities and relations from the JSON file
2. Create markdown files for each entity
3. Include outgoing relations in each entity's markdown
"""
if not json_path.exists():
typer.echo(f"Error: File not found: {json_path}", err=True)
raise typer.Exit(1)
config = get_project_config()
try:
# Get markdown processor
markdown_processor = asyncio.run(get_markdown_processor())
# Create the importer
importer = MemoryJsonImporter(config.home, markdown_processor)
# Process the file
base_path = config.home if not destination_folder else config.home / destination_folder
console.print(f"\nImporting from {json_path}...writing to {base_path}")
# Run the import for json log format
file_data = []
with json_path.open("r", encoding="utf-8") as file:
for line in file:
json_data = json.loads(line)
file_data.append(json_data)
result = asyncio.run(importer.import_data(file_data, destination_folder))
if not result.success: # pragma: no cover
typer.echo(f"Error during import: {result.error_message}", err=True)
raise typer.Exit(1)
# Show results
console.print(
Panel(
f"[green]Import complete![/green]\n\n"
f"Created {result.entities} entities\n"
f"Added {result.relations} relations\n"
f"Skipped {result.skipped_entities} entities\n",
expand=False,
)
)
except Exception as e:
logger.error("Import failed")
typer.echo(f"Error during import: {e}", err=True)
raise typer.Exit(1)
```
--------------------------------------------------------------------------------
/src/basic_memory/cli/commands/import_claude_projects.py:
--------------------------------------------------------------------------------
```python
"""Import command for basic-memory CLI to import project data from Claude.ai."""
import asyncio
import json
from pathlib import Path
from typing import Annotated
import typer
from basic_memory.cli.app import claude_app
from basic_memory.config import get_project_config
from basic_memory.importers.claude_projects_importer import ClaudeProjectsImporter
from basic_memory.markdown import EntityParser, MarkdownProcessor
from loguru import logger
from rich.console import Console
from rich.panel import Panel
console = Console()
async def get_markdown_processor() -> MarkdownProcessor:
"""Get MarkdownProcessor instance."""
config = get_project_config()
entity_parser = EntityParser(config.home)
return MarkdownProcessor(entity_parser)
@claude_app.command(name="projects", help="Import projects from Claude.ai.")
def import_projects(
projects_json: Annotated[Path, typer.Argument(..., help="Path to projects.json file")] = Path(
"projects.json"
),
base_folder: Annotated[
str, typer.Option(help="The base folder to place project files in.")
] = "projects",
):
"""Import project data from Claude.ai.
This command will:
1. Create a directory for each project
2. Store docs in a docs/ subdirectory
3. Place prompt template in project root
After importing, run 'basic-memory sync' to index the new files.
"""
config = get_project_config()
try:
if not projects_json.exists():
typer.echo(f"Error: File not found: {projects_json}", err=True)
raise typer.Exit(1)
# Get markdown processor
markdown_processor = asyncio.run(get_markdown_processor())
# Create the importer
importer = ClaudeProjectsImporter(config.home, markdown_processor)
# Process the file
base_path = config.home / base_folder if base_folder else config.home
console.print(f"\nImporting projects from {projects_json}...writing to {base_path}")
# Run the import
with projects_json.open("r", encoding="utf-8") as file:
json_data = json.load(file)
result = asyncio.run(importer.import_data(json_data, base_folder))
if not result.success: # pragma: no cover
typer.echo(f"Error during import: {result.error_message}", err=True)
raise typer.Exit(1)
# Show results
console.print(
Panel(
f"[green]Import complete![/green]\n\n"
f"Imported {result.documents} project documents\n"
f"Imported {result.prompts} prompt templates",
expand=False,
)
)
console.print("\nRun 'basic-memory sync' to index the new files.")
except Exception as e:
logger.error("Import failed")
typer.echo(f"Error during import: {e}", err=True)
raise typer.Exit(1)
```
--------------------------------------------------------------------------------
/.claude/commands/release/release.md:
--------------------------------------------------------------------------------
```markdown
# /release - Create Stable Release
Create a stable release using the automated justfile target with comprehensive validation.
## Usage
```
/release <version>
```
**Parameters:**
- `version` (required): Release version like `v0.13.2`
## Implementation
You are an expert release manager for the Basic Memory project. When the user runs `/release`, execute the following steps:
### Step 1: Pre-flight Validation
1. Verify version format matches `v\d+\.\d+\.\d+` pattern
2. Check current git status for uncommitted changes
3. Verify we're on the `main` branch
4. Confirm no existing tag with this version
#### Documentation Validation
1. **Changelog Check**
- CHANGELOG.md contains entry for target version
- Entry includes all major features and fixes
- Breaking changes are documented
### Step 2: Use Justfile Automation
Execute the automated release process:
```bash
just release <version>
```
The justfile target handles:
- ✅ Version format validation
- ✅ Git status and branch checks
- ✅ Quality checks (`just check` - lint, format, type-check, tests)
- ✅ Version update in `src/basic_memory/__init__.py`
- ✅ Automatic commit with proper message
- ✅ Tag creation and pushing to GitHub
- ✅ Release workflow trigger
### Step 3: Monitor Release Process
1. Check that GitHub Actions workflow starts successfully
2. Monitor workflow completion at: https://github.com/basicmachines-co/basic-memory/actions
3. Verify PyPI publication
4. Test installation: `uv tool install basic-memory`
### Step 4: Post-Release Validation
1. Verify GitHub release is created automatically
2. Check PyPI publication
3. Validate release assets
4. Update any post-release documentation
## Pre-conditions Check
Before starting, verify:
- [ ] All beta testing is complete
- [ ] Critical bugs are fixed
- [ ] Breaking changes are documented
- [ ] CHANGELOG.md is updated (if needed)
- [ ] Version number follows semantic versioning
## Error Handling
- If `just release` fails, examine the error output for specific issues
- If quality checks fail, fix issues and retry
- If changelog entry missing, update CHANGELOG.md and commit before retrying
- If GitHub Actions fail, check workflow logs for debugging
## Success Output
```
🎉 Stable Release v0.13.2 Created Successfully!
🏷️ Tag: v0.13.2
📋 GitHub Release: https://github.com/basicmachines-co/basic-memory/releases/tag/v0.13.2
📦 PyPI: https://pypi.org/project/basic-memory/0.13.2/
🚀 GitHub Actions: Completed
Install with:
uv tool install basic-memory
Users can now upgrade:
uv tool upgrade basic-memory
```
## Context
- This creates production releases used by end users
- Must pass all quality gates before proceeding
- Uses the automated justfile target for consistency
- Version is automatically updated in `__init__.py`
- Triggers automated GitHub release with changelog
- Leverages uv-dynamic-versioning for package version management
```
--------------------------------------------------------------------------------
/specs/SPEC-2 Slash Commands Reference.md:
--------------------------------------------------------------------------------
```markdown
---
title: 'SPEC-2: Slash Commands Reference'
type: spec
permalink: specs/spec-2-slash-commands-reference
tags:
- commands
- process
- reference
---
# SPEC-2: Slash Commands Reference
This document defines the slash commands used in our specification-driven development process.
## /spec create [name]
**Purpose**: Create a new specification document
**Usage**: `/spec create notes-decomposition`
**Process**:
1. Create new spec document in `/specs` folder
2. Use SPEC-XXX numbering format (auto-increment)
3. Include standard spec template:
- Why (reasoning/problem)
- What (affected areas)
- How (high-level approach)
- How to Evaluate (testing/validation)
4. Tag appropriately for knowledge graph
5. Link to related specs/components
**Template**:
```markdown
# SPEC-XXX: [Title]
## Why
[Problem statement and reasoning]
## What
[What is affected or changed]
## How (High Level)
[Approach to implementation]
## How to Evaluate
[Testing/validation procedure]
## Notes
[Additional context as needed]
```
## /spec status
**Purpose**: Show current status of all specifications
**Usage**: `/spec status`
**Process**:
1. Search all specs in `/specs` folder
2. Display table showing:
- Spec number and title
- Status (draft, approved, implementing, complete)
- Assigned agent (if any)
- Last updated
- Dependencies
## /spec implement [name]
**Purpose**: Hand specification to appropriate agent for implementation
**Usage**: `/spec implement SPEC-002`
**Process**:
1. Read the specified spec
2. Analyze requirements to determine appropriate agent:
- Frontend components → vue-developer
- Architecture/system design → system-architect
- Backend/API → python-developer
3. Launch agent with spec context
4. Agent creates implementation plan
5. Update spec with implementation status
## /spec review [name]
**Purpose**: Review implementation against specification criteria
**Usage**: `/spec review SPEC-002`
**Process**:
1. Read original spec and "How to Evaluate" section
2. Examine current implementation
3. Test against success criteria
4. Document gaps or issues
5. Update spec with review results
6. Recommend next actions (complete, revise, iterate)
## Command Extensions
As the process evolves, we may add:
- `/spec link [spec1] [spec2]` - Create dependency links
- `/spec archive [name]` - Archive completed specs
- `/spec template [type]` - Create spec from template
- `/spec search [query]` - Search spec content
## References
- Claude Slash commands: https://docs.anthropic.com/en/docs/claude-code/slash-commands
## Creating a command
Commands are implemented as Claude slash commands:
Location in repo: .claude/commands/
In the following example, we create the /optimize command:
```bash
# Create a project command
mkdir -p .claude/commands
echo "Analyze this code for performance issues and suggest optimizations:" > .claude/commands/optimize.md
```
```
--------------------------------------------------------------------------------
/src/basic_memory/repository/observation_repository.py:
--------------------------------------------------------------------------------
```python
"""Repository for managing Observation objects."""
from typing import Dict, List, Sequence
from sqlalchemy import select
from sqlalchemy.ext.asyncio import async_sessionmaker
from basic_memory.models import Observation
from basic_memory.repository.repository import Repository
class ObservationRepository(Repository[Observation]):
"""Repository for Observation model with memory-specific operations."""
def __init__(self, session_maker: async_sessionmaker, project_id: int):
"""Initialize with session maker and project_id filter.
Args:
session_maker: SQLAlchemy session maker
project_id: Project ID to filter all operations by
"""
super().__init__(session_maker, Observation, project_id=project_id)
async def find_by_entity(self, entity_id: int) -> Sequence[Observation]:
"""Find all observations for a specific entity."""
query = select(Observation).filter(Observation.entity_id == entity_id)
result = await self.execute_query(query)
return result.scalars().all()
async def find_by_context(self, context: str) -> Sequence[Observation]:
"""Find observations with a specific context."""
query = select(Observation).filter(Observation.context == context)
result = await self.execute_query(query)
return result.scalars().all()
async def find_by_category(self, category: str) -> Sequence[Observation]:
"""Find observations with a specific context."""
query = select(Observation).filter(Observation.category == category)
result = await self.execute_query(query)
return result.scalars().all()
async def observation_categories(self) -> Sequence[str]:
"""Return a list of all observation categories."""
query = select(Observation.category).distinct()
result = await self.execute_query(query, use_query_options=False)
return result.scalars().all()
async def find_by_entities(self, entity_ids: List[int]) -> Dict[int, List[Observation]]:
"""Find all observations for multiple entities in a single query.
Args:
entity_ids: List of entity IDs to fetch observations for
Returns:
Dictionary mapping entity_id to list of observations
"""
if not entity_ids: # pragma: no cover
return {}
# Query observations for all entities in the list
query = select(Observation).filter(Observation.entity_id.in_(entity_ids))
result = await self.execute_query(query)
observations = result.scalars().all()
# Group observations by entity_id
observations_by_entity = {}
for obs in observations:
if obs.entity_id not in observations_by_entity:
observations_by_entity[obs.entity_id] = []
observations_by_entity[obs.entity_id].append(obs)
return observations_by_entity
```
--------------------------------------------------------------------------------
/src/basic_memory/cli/commands/import_claude_conversations.py:
--------------------------------------------------------------------------------
```python
"""Import command for basic-memory CLI to import chat data from conversations2.json format."""
import asyncio
import json
from pathlib import Path
from typing import Annotated
import typer
from basic_memory.cli.app import claude_app
from basic_memory.config import get_project_config
from basic_memory.importers.claude_conversations_importer import ClaudeConversationsImporter
from basic_memory.markdown import EntityParser, MarkdownProcessor
from loguru import logger
from rich.console import Console
from rich.panel import Panel
console = Console()
async def get_markdown_processor() -> MarkdownProcessor:
"""Get MarkdownProcessor instance."""
config = get_project_config()
entity_parser = EntityParser(config.home)
return MarkdownProcessor(entity_parser)
@claude_app.command(name="conversations", help="Import chat conversations from Claude.ai.")
def import_claude(
conversations_json: Annotated[
Path, typer.Argument(..., help="Path to conversations.json file")
] = Path("conversations.json"),
folder: Annotated[
str, typer.Option(help="The folder to place the files in.")
] = "conversations",
):
"""Import chat conversations from conversations2.json format.
This command will:
1. Read chat data and nested messages
2. Create markdown files for each conversation
3. Format content in clean, readable markdown
After importing, run 'basic-memory sync' to index the new files.
"""
config = get_project_config()
try:
if not conversations_json.exists():
typer.echo(f"Error: File not found: {conversations_json}", err=True)
raise typer.Exit(1)
# Get markdown processor
markdown_processor = asyncio.run(get_markdown_processor())
# Create the importer
importer = ClaudeConversationsImporter(config.home, markdown_processor)
# Process the file
base_path = config.home / folder
console.print(f"\nImporting chats from {conversations_json}...writing to {base_path}")
# Run the import
with conversations_json.open("r", encoding="utf-8") as file:
json_data = json.load(file)
result = asyncio.run(importer.import_data(json_data, folder))
if not result.success: # pragma: no cover
typer.echo(f"Error during import: {result.error_message}", err=True)
raise typer.Exit(1)
# Show results
console.print(
Panel(
f"[green]Import complete![/green]\n\n"
f"Imported {result.conversations} conversations\n"
f"Containing {result.messages} messages",
expand=False,
)
)
console.print("\nRun 'basic-memory sync' to index the new files.")
except Exception as e:
logger.error("Import failed")
typer.echo(f"Error during import: {e}", err=True)
raise typer.Exit(1)
```
--------------------------------------------------------------------------------
/src/basic_memory/cli/commands/cloud/cloud_utils.py:
--------------------------------------------------------------------------------
```python
"""Shared utilities for cloud operations."""
from basic_memory.cli.commands.cloud.api_client import make_api_request
from basic_memory.config import ConfigManager
from basic_memory.schemas.cloud import (
CloudProjectList,
CloudProjectCreateRequest,
CloudProjectCreateResponse,
)
from basic_memory.utils import generate_permalink
class CloudUtilsError(Exception):
"""Exception raised for cloud utility errors."""
pass
async def fetch_cloud_projects() -> CloudProjectList:
"""Fetch list of projects from cloud API.
Returns:
CloudProjectList with projects from cloud
"""
try:
config_manager = ConfigManager()
config = config_manager.config
host_url = config.cloud_host.rstrip("/")
response = await make_api_request(method="GET", url=f"{host_url}/proxy/projects/projects")
return CloudProjectList.model_validate(response.json())
except Exception as e:
raise CloudUtilsError(f"Failed to fetch cloud projects: {e}") from e
async def create_cloud_project(project_name: str) -> CloudProjectCreateResponse:
"""Create a new project on cloud.
Args:
project_name: Name of project to create
Returns:
CloudProjectCreateResponse with project details from API
"""
try:
config_manager = ConfigManager()
config = config_manager.config
host_url = config.cloud_host.rstrip("/")
# Use generate_permalink to ensure consistent naming
project_path = generate_permalink(project_name)
project_data = CloudProjectCreateRequest(
name=project_name,
path=project_path,
set_default=False,
)
response = await make_api_request(
method="POST",
url=f"{host_url}/proxy/projects/projects",
headers={"Content-Type": "application/json"},
json_data=project_data.model_dump(),
)
return CloudProjectCreateResponse.model_validate(response.json())
except Exception as e:
raise CloudUtilsError(f"Failed to create cloud project '{project_name}': {e}") from e
async def sync_project(project_name: str) -> None:
"""Trigger sync for a specific project on cloud.
Args:
project_name: Name of project to sync
"""
try:
from basic_memory.cli.commands.command_utils import run_sync
await run_sync(project=project_name)
except Exception as e:
raise CloudUtilsError(f"Failed to sync project '{project_name}': {e}") from e
async def project_exists(project_name: str) -> bool:
"""Check if a project exists on cloud.
Args:
project_name: Name of project to check
Returns:
True if project exists, False otherwise
"""
try:
projects = await fetch_cloud_projects()
project_names = {p.name for p in projects.projects}
return project_name in project_names
except Exception:
return False
```
--------------------------------------------------------------------------------
/src/basic_memory/api/routers/memory_router.py:
--------------------------------------------------------------------------------
```python
"""Routes for memory:// URI operations."""
from typing import Annotated, Optional
from fastapi import APIRouter, Query
from loguru import logger
from basic_memory.deps import ContextServiceDep, EntityRepositoryDep
from basic_memory.schemas.base import TimeFrame, parse_timeframe
from basic_memory.schemas.memory import (
GraphContext,
normalize_memory_url,
)
from basic_memory.schemas.search import SearchItemType
from basic_memory.api.routers.utils import to_graph_context
router = APIRouter(prefix="/memory", tags=["memory"])
@router.get("/recent", response_model=GraphContext)
async def recent(
context_service: ContextServiceDep,
entity_repository: EntityRepositoryDep,
type: Annotated[list[SearchItemType] | None, Query()] = None,
depth: int = 1,
timeframe: TimeFrame = "7d",
page: int = 1,
page_size: int = 10,
max_related: int = 10,
) -> GraphContext:
# return all types by default
types = (
[SearchItemType.ENTITY, SearchItemType.RELATION, SearchItemType.OBSERVATION]
if not type
else type
)
logger.debug(
f"Getting recent context: `{types}` depth: `{depth}` timeframe: `{timeframe}` page: `{page}` page_size: `{page_size}` max_related: `{max_related}`"
)
# Parse timeframe
since = parse_timeframe(timeframe)
limit = page_size
offset = (page - 1) * page_size
# Build context
context = await context_service.build_context(
types=types, depth=depth, since=since, limit=limit, offset=offset, max_related=max_related
)
recent_context = await to_graph_context(
context, entity_repository=entity_repository, page=page, page_size=page_size
)
logger.debug(f"Recent context: {recent_context.model_dump_json()}")
return recent_context
# get_memory_context needs to be declared last so other paths can match
@router.get("/{uri:path}", response_model=GraphContext)
async def get_memory_context(
context_service: ContextServiceDep,
entity_repository: EntityRepositoryDep,
uri: str,
depth: int = 1,
timeframe: Optional[TimeFrame] = None,
page: int = 1,
page_size: int = 10,
max_related: int = 10,
) -> GraphContext:
"""Get rich context from memory:// URI."""
# add the project name from the config to the url as the "host
# Parse URI
logger.debug(
f"Getting context for URI: `{uri}` depth: `{depth}` timeframe: `{timeframe}` page: `{page}` page_size: `{page_size}` max_related: `{max_related}`"
)
memory_url = normalize_memory_url(uri)
# Parse timeframe
since = parse_timeframe(timeframe) if timeframe else None
limit = page_size
offset = (page - 1) * page_size
# Build context
context = await context_service.build_context(
memory_url, depth=depth, since=since, limit=limit, offset=offset, max_related=max_related
)
return await to_graph_context(
context, entity_repository=entity_repository, page=page, page_size=page_size
)
```
--------------------------------------------------------------------------------
/.github/workflows/claude.yml:
--------------------------------------------------------------------------------
```yaml
name: Claude Code
on:
issue_comment:
types: [created]
pull_request_review_comment:
types: [created]
issues:
types: [opened, assigned]
pull_request_review:
types: [submitted]
pull_request_target:
types: [opened, synchronize]
jobs:
claude:
if: |
(
(github.event_name == 'issue_comment' && contains(github.event.comment.body, '@claude')) ||
(github.event_name == 'pull_request_review_comment' && contains(github.event.comment.body, '@claude')) ||
(github.event_name == 'pull_request_review' && contains(github.event.review.body, '@claude')) ||
(github.event_name == 'issues' && (contains(github.event.issue.body, '@claude') || contains(github.event.issue.title, '@claude'))) ||
(github.event_name == 'pull_request_target' && contains(github.event.pull_request.body, '@claude'))
) && (
github.event.comment.author_association == 'OWNER' ||
github.event.comment.author_association == 'MEMBER' ||
github.event.comment.author_association == 'COLLABORATOR' ||
github.event.sender.author_association == 'OWNER' ||
github.event.sender.author_association == 'MEMBER' ||
github.event.sender.author_association == 'COLLABORATOR' ||
github.event.pull_request.author_association == 'OWNER' ||
github.event.pull_request.author_association == 'MEMBER' ||
github.event.pull_request.author_association == 'COLLABORATOR'
)
runs-on: ubuntu-latest
permissions:
contents: read
pull-requests: read
issues: read
id-token: write
actions: read # Required for Claude to read CI results on PRs
steps:
- name: Checkout repository
uses: actions/checkout@v4
with:
# For pull_request_target, checkout the PR head to review the actual changes
ref: ${{ github.event_name == 'pull_request_target' && github.event.pull_request.head.sha || github.sha }}
fetch-depth: 1
- name: Run Claude Code
id: claude
uses: anthropics/claude-code-action@v1
with:
claude_code_oauth_token: ${{ secrets.CLAUDE_CODE_OAUTH_TOKEN }}
track_progress: true # Enable visual progress tracking
# This is an optional setting that allows Claude to read CI results on PRs
additional_permissions: |
actions: read
# Optional: Give a custom prompt to Claude. If this is not specified, Claude will perform the instructions specified in the comment that tagged it.
# prompt: 'Update the pull request description to include a summary of changes.'
# Optional: Add claude_args to customize behavior and configuration
# See https://github.com/anthropics/claude-code-action/blob/main/docs/usage.md
# or https://docs.claude.com/en/docs/claude-code/sdk#command-line for available options
# claude_args: '--model claude-opus-4-1-20250805 --allowed-tools Bash(gh pr:*)'
```
--------------------------------------------------------------------------------
/specs/SPEC-3 Agent Definitions.md:
--------------------------------------------------------------------------------
```markdown
---
title: 'SPEC-3: Agent Definitions'
type: spec
permalink: specs/spec-3-agent-definitions
tags:
- agents
- roles
- process
---
# SPEC-3: Agent Definitions
This document defines the specialist agents used in our specification-driven development process.
## system-architect
**Role**: High-level system design and architectural decisions
**Responsibilities**:
- Create architectural specifications and ADRs
- Analyze system-wide impacts and trade-offs
- Design component interfaces and data flow
- Evaluate technical approaches and patterns
- Document architectural decisions and rationale
**Expertise Areas**:
- System architecture and design patterns
- Technology evaluation and selection
- Scalability and performance considerations
- Integration patterns and API design
- Technical debt and refactoring strategies
**Typical Specs**:
- System architecture overviews
- Component decomposition strategies
- Data flow and state management
- Integration and deployment patterns
## vue-developer
**Role**: Frontend component development and UI implementation
**Responsibilities**:
- Create Vue.js component specifications
- Implement responsive UI components
- Design component APIs and interfaces
- Optimize for performance and accessibility
- Document component usage and patterns
**Expertise Areas**:
- Vue.js 3 Composition API
- Nuxt 3 framework patterns
- shadcn-vue component library
- Responsive design and CSS
- TypeScript integration
- State management with Pinia
**Typical Specs**:
- Individual component specifications
- UI pattern libraries
- Responsive design approaches
- Component interaction flows
## python-developer
**Role**: Backend development and API implementation
**Responsibilities**:
- Create backend service specifications
- Implement APIs and data processing
- Design database schemas and queries
- Optimize performance and reliability
- Document service interfaces and behavior
**Expertise Areas**:
- FastAPI and Python web frameworks
- Database design and operations
- API design and documentation
- Authentication and security
- Performance optimization
- Testing and validation
**Typical Specs**:
- API endpoint specifications
- Database schema designs
- Service integration patterns
- Performance optimization strategies
## Agent Collaboration Patterns
### Handoff Protocol
1. Agent receives spec through `/spec implement [name]`
2. Agent reviews spec and creates implementation plan
3. Agent documents progress and decisions in spec
4. Agent hands off to another agent if cross-domain work needed
5. Final agent updates spec with completion status
### Communication Standards
- All agents update specs through basic-memory MCP tools
- Document decisions and trade-offs in spec notes
- Link related specs and components
- Preserve context for future reference
### Quality Standards
- Follow existing codebase patterns and conventions
- Write tests that validate spec requirements
- Document implementation choices
- Consider maintainability and extensibility
```
--------------------------------------------------------------------------------
/.claude/commands/release/beta.md:
--------------------------------------------------------------------------------
```markdown
# /beta - Create Beta Release
Create a new beta release using the automated justfile target with quality checks and tagging.
## Usage
```
/beta <version>
```
**Parameters:**
- `version` (required): Beta version like `v0.13.2b1` or `v0.13.2rc1`
## Implementation
You are an expert release manager for the Basic Memory project. When the user runs `/beta`, execute the following steps:
### Step 1: Pre-flight Validation
1. Verify version format matches `v\d+\.\d+\.\d+(b\d+|rc\d+)` pattern
2. Check current git status for uncommitted changes
3. Verify we're on the `main` branch
4. Confirm no existing tag with this version
### Step 2: Use Justfile Automation
Execute the automated beta release process:
```bash
just beta <version>
```
The justfile target handles:
- ✅ Beta version format validation (supports b1, b2, rc1, etc.)
- ✅ Git status and branch checks
- ✅ Quality checks (`just check` - lint, format, type-check, tests)
- ✅ Version update in `src/basic_memory/__init__.py`
- ✅ Automatic commit with proper message
- ✅ Tag creation and pushing to GitHub
- ✅ Beta release workflow trigger
### Step 3: Monitor Beta Release
1. Check GitHub Actions workflow starts successfully
2. Monitor workflow at: https://github.com/basicmachines-co/basic-memory/actions
3. Verify PyPI pre-release publication
4. Test beta installation: `uv tool install basic-memory --pre`
### Step 4: Beta Testing Instructions
Provide users with beta testing instructions:
```bash
# Install/upgrade to beta
uv tool install basic-memory --pre
# Or upgrade existing installation
uv tool upgrade basic-memory --prerelease=allow
```
## Version Guidelines
- **First beta**: `v0.13.2b1`
- **Subsequent betas**: `v0.13.2b2`, `v0.13.2b3`, etc.
- **Release candidates**: `v0.13.2rc1`, `v0.13.2rc2`, etc.
- **Final release**: `v0.13.2` (use `/release` command)
## Error Handling
- If `just beta` fails, examine the error output for specific issues
- If quality checks fail, fix issues and retry
- If version format is invalid, correct and retry
- If tag already exists, increment version number
## Success Output
```
✅ Beta Release v0.13.2b1 Created Successfully!
🏷️ Tag: v0.13.2b1
🚀 GitHub Actions: Running
📦 PyPI: Will be available in ~5 minutes as pre-release
Install/test with:
uv tool install basic-memory --pre
Monitor release: https://github.com/basicmachines-co/basic-memory/actions
```
## Beta Testing Workflow
1. **Create beta**: Use `/beta v0.13.2b1`
2. **Test features**: Install and validate new functionality
3. **Fix issues**: Address bugs found during testing
4. **Iterate**: Create `v0.13.2b2` if needed
5. **Release candidate**: Create `v0.13.2rc1` when stable
6. **Final release**: Use `/release v0.13.2` when ready
## Context
- Beta releases are pre-releases for testing new features
- Automatically published to PyPI with pre-release flag
- Uses the automated justfile target for consistency
- Version is automatically updated in `__init__.py`
- Ideal for validating changes before stable release
- Supports both beta (b1, b2) and release candidate (rc1, rc2) versions
```
--------------------------------------------------------------------------------
/.github/workflows/claude-code-review.yml:
--------------------------------------------------------------------------------
```yaml
name: Claude Code Review
on:
pull_request:
types: [opened, synchronize]
# Optional: Only run on specific file changes
# paths:
# - "src/**/*.ts"
# - "src/**/*.tsx"
# - "src/**/*.js"
# - "src/**/*.jsx"
jobs:
claude-review:
# Only run for organization members and collaborators
if: |
github.event.pull_request.author_association == 'OWNER' ||
github.event.pull_request.author_association == 'MEMBER' ||
github.event.pull_request.author_association == 'COLLABORATOR'
runs-on: ubuntu-latest
permissions:
contents: read
pull-requests: write
issues: read
id-token: write
steps:
- name: Checkout repository
uses: actions/checkout@v4
with:
fetch-depth: 1
- name: Run Claude Code Review
id: claude-review
uses: anthropics/claude-code-action@v1
with:
claude_code_oauth_token: ${{ secrets.CLAUDE_CODE_OAUTH_TOKEN }}
github_token: ${{ secrets.GITHUB_TOKEN }}
track_progress: true # Enable visual progress tracking
allowed_bots: '*'
prompt: |
Review this Basic Memory PR against our team checklist:
## Code Quality & Standards
- [ ] Follows Basic Memory's coding conventions in CLAUDE.md
- [ ] Python 3.12+ type annotations and async patterns
- [ ] SQLAlchemy 2.0 best practices
- [ ] FastAPI and Typer conventions followed
- [ ] 100-character line length limit maintained
- [ ] No commented-out code blocks
## Testing & Documentation
- [ ] Unit tests for new functions/methods
- [ ] Integration tests for new MCP tools
- [ ] Test coverage for edge cases
- [ ] Documentation updated (README, docstrings)
- [ ] CLAUDE.md updated if conventions change
## Basic Memory Architecture
- [ ] MCP tools follow atomic, composable design
- [ ] Database changes include Alembic migrations
- [ ] Preserves local-first architecture principles
- [ ] Knowledge graph operations maintain consistency
- [ ] Markdown file handling preserves integrity
- [ ] AI-human collaboration patterns followed
## Security & Performance
- [ ] No hardcoded secrets or credentials
- [ ] Input validation for MCP tools
- [ ] Proper error handling and logging
- [ ] Performance considerations addressed
- [ ] No sensitive data in logs or commits
Read the CLAUDE.md file for detailed project context. For each checklist item, verify if it's satisfied and comment on any that need attention. Use inline comments for specific code issues and post a summary with checklist results.
# Allow broader tool access for thorough code review
claude_args: '--allowed-tools "Bash(gh pr:*),Bash(gh issue:*),Bash(gh api:*),Bash(git log:*),Bash(git show:*),Read,Grep,Glob"'
```
--------------------------------------------------------------------------------
/src/basic_memory/models/project.py:
--------------------------------------------------------------------------------
```python
"""Project model for Basic Memory."""
from datetime import datetime, UTC
from typing import Optional
from sqlalchemy import (
Integer,
String,
Text,
Boolean,
DateTime,
Float,
Index,
event,
)
from sqlalchemy.orm import Mapped, mapped_column, relationship
from basic_memory.models.base import Base
from basic_memory.utils import generate_permalink
class Project(Base):
"""Project model for Basic Memory.
A project represents a collection of knowledge entities that are grouped together.
Projects are stored in the app-level database and provide context for all knowledge
operations.
"""
__tablename__ = "project"
__table_args__ = (
# Regular indexes
Index("ix_project_name", "name", unique=True),
Index("ix_project_permalink", "permalink", unique=True),
Index("ix_project_path", "path"),
Index("ix_project_created_at", "created_at"),
Index("ix_project_updated_at", "updated_at"),
)
# Core identity
id: Mapped[int] = mapped_column(Integer, primary_key=True)
name: Mapped[str] = mapped_column(String, unique=True)
description: Mapped[Optional[str]] = mapped_column(Text, nullable=True)
# URL-friendly identifier generated from name
permalink: Mapped[str] = mapped_column(String, unique=True)
# Filesystem path to project directory
path: Mapped[str] = mapped_column(String)
# Status flags
is_active: Mapped[bool] = mapped_column(Boolean, default=True)
is_default: Mapped[Optional[bool]] = mapped_column(Boolean, default=None, nullable=True)
# Timestamps
created_at: Mapped[datetime] = mapped_column(
DateTime(timezone=True), default=lambda: datetime.now(UTC)
)
updated_at: Mapped[datetime] = mapped_column(
DateTime(timezone=True),
default=lambda: datetime.now(UTC),
onupdate=lambda: datetime.now(UTC),
)
# Sync optimization - scan watermark tracking
last_scan_timestamp: Mapped[Optional[float]] = mapped_column(Float, nullable=True)
last_file_count: Mapped[Optional[int]] = mapped_column(Integer, nullable=True)
# Define relationships to entities, observations, and relations
# These relationships will be established once we add project_id to those models
entities = relationship("Entity", back_populates="project", cascade="all, delete-orphan")
def __repr__(self) -> str: # pragma: no cover
return f"Project(id={self.id}, name='{self.name}', permalink='{self.permalink}', path='{self.path}')"
@event.listens_for(Project, "before_insert")
@event.listens_for(Project, "before_update")
def set_project_permalink(mapper, connection, project):
"""Generate URL-friendly permalink for the project if needed.
This event listener ensures the permalink is always derived from the name,
even if the name changes.
"""
# If the name changed or permalink is empty, regenerate permalink
if not project.permalink or project.permalink != generate_permalink(project.name):
project.permalink = generate_permalink(project.name)
```
--------------------------------------------------------------------------------
/src/basic_memory/api/app.py:
--------------------------------------------------------------------------------
```python
"""FastAPI application for basic-memory knowledge graph API."""
import asyncio
from contextlib import asynccontextmanager
from fastapi import FastAPI, HTTPException
from fastapi.exception_handlers import http_exception_handler
from loguru import logger
from basic_memory import __version__ as version
from basic_memory import db
from basic_memory.api.routers import (
directory_router,
importer_router,
knowledge,
management,
memory,
project,
resource,
search,
prompt_router,
)
from basic_memory.config import ConfigManager
from basic_memory.services.initialization import initialize_file_sync, initialize_app
@asynccontextmanager
async def lifespan(app: FastAPI): # pragma: no cover
"""Lifecycle manager for the FastAPI app. Not called in stdio mcp mode"""
app_config = ConfigManager().config
logger.info("Starting Basic Memory API")
await initialize_app(app_config)
# Cache database connections in app state for performance
logger.info("Initializing database and caching connections...")
engine, session_maker = await db.get_or_create_db(app_config.database_path)
app.state.engine = engine
app.state.session_maker = session_maker
logger.info("Database connections cached in app state")
logger.info(f"Sync changes enabled: {app_config.sync_changes}")
if app_config.sync_changes:
# start file sync task in background
app.state.sync_task = asyncio.create_task(initialize_file_sync(app_config))
else:
logger.info("Sync changes disabled. Skipping file sync service.")
# proceed with startup
yield
logger.info("Shutting down Basic Memory API")
if app.state.sync_task:
logger.info("Stopping sync...")
app.state.sync_task.cancel() # pyright: ignore
await db.shutdown_db()
# Initialize FastAPI app
app = FastAPI(
title="Basic Memory API",
description="Knowledge graph API for basic-memory",
version=version,
lifespan=lifespan,
)
# Include routers
app.include_router(knowledge.router, prefix="/{project}")
app.include_router(memory.router, prefix="/{project}")
app.include_router(resource.router, prefix="/{project}")
app.include_router(search.router, prefix="/{project}")
app.include_router(project.project_router, prefix="/{project}")
app.include_router(directory_router.router, prefix="/{project}")
app.include_router(prompt_router.router, prefix="/{project}")
app.include_router(importer_router.router, prefix="/{project}")
# Project resource router works accross projects
app.include_router(project.project_resource_router)
app.include_router(management.router)
# Auth routes are handled by FastMCP automatically when auth is enabled
@app.exception_handler(Exception)
async def exception_handler(request, exc): # pragma: no cover
logger.exception(
"API unhandled exception",
url=str(request.url),
method=request.method,
client=request.client.host if request.client else None,
path=request.url.path,
error_type=type(exc).__name__,
error=str(exc),
)
return await http_exception_handler(request, HTTPException(status_code=500, detail=str(exc)))
```