This is page 8 of 23. Use http://codebase.md/basicmachines-co/basic-memory?lines=true&page={x} to view the full context.
# Directory Structure
```
├── .claude
│ ├── agents
│ │ ├── python-developer.md
│ │ └── system-architect.md
│ └── commands
│ ├── release
│ │ ├── beta.md
│ │ ├── changelog.md
│ │ ├── release-check.md
│ │ └── release.md
│ ├── spec.md
│ └── test-live.md
├── .dockerignore
├── .github
│ ├── dependabot.yml
│ ├── ISSUE_TEMPLATE
│ │ ├── bug_report.md
│ │ ├── config.yml
│ │ ├── documentation.md
│ │ └── feature_request.md
│ └── workflows
│ ├── claude-code-review.yml
│ ├── claude-issue-triage.yml
│ ├── claude.yml
│ ├── dev-release.yml
│ ├── docker.yml
│ ├── pr-title.yml
│ ├── release.yml
│ └── test.yml
├── .gitignore
├── .python-version
├── CHANGELOG.md
├── CITATION.cff
├── CLA.md
├── CLAUDE.md
├── CODE_OF_CONDUCT.md
├── CONTRIBUTING.md
├── docker-compose.yml
├── Dockerfile
├── docs
│ ├── ai-assistant-guide-extended.md
│ ├── character-handling.md
│ ├── cloud-cli.md
│ └── Docker.md
├── justfile
├── LICENSE
├── llms-install.md
├── pyproject.toml
├── README.md
├── SECURITY.md
├── smithery.yaml
├── specs
│ ├── SPEC-1 Specification-Driven Development Process.md
│ ├── SPEC-10 Unified Deployment Workflow and Event Tracking.md
│ ├── SPEC-11 Basic Memory API Performance Optimization.md
│ ├── SPEC-12 OpenTelemetry Observability.md
│ ├── SPEC-13 CLI Authentication with Subscription Validation.md
│ ├── SPEC-14 Cloud Git Versioning & GitHub Backup.md
│ ├── SPEC-14- Cloud Git Versioning & GitHub Backup.md
│ ├── SPEC-15 Configuration Persistence via Tigris for Cloud Tenants.md
│ ├── SPEC-16 MCP Cloud Service Consolidation.md
│ ├── SPEC-17 Semantic Search with ChromaDB.md
│ ├── SPEC-18 AI Memory Management Tool.md
│ ├── SPEC-19 Sync Performance and Memory Optimization.md
│ ├── SPEC-2 Slash Commands Reference.md
│ ├── SPEC-20 Simplified Project-Scoped Rclone Sync.md
│ ├── SPEC-3 Agent Definitions.md
│ ├── SPEC-4 Notes Web UI Component Architecture.md
│ ├── SPEC-5 CLI Cloud Upload via WebDAV.md
│ ├── SPEC-6 Explicit Project Parameter Architecture.md
│ ├── SPEC-7 POC to spike Tigris Turso for local access to cloud data.md
│ ├── SPEC-8 TigrisFS Integration.md
│ ├── SPEC-9 Multi-Project Bidirectional Sync Architecture.md
│ ├── SPEC-9 Signed Header Tenant Information.md
│ └── SPEC-9-1 Follow-Ups- Conflict, Sync, and Observability.md
├── src
│ └── basic_memory
│ ├── __init__.py
│ ├── alembic
│ │ ├── alembic.ini
│ │ ├── env.py
│ │ ├── migrations.py
│ │ ├── script.py.mako
│ │ └── versions
│ │ ├── 3dae7c7b1564_initial_schema.py
│ │ ├── 502b60eaa905_remove_required_from_entity_permalink.py
│ │ ├── 5fe1ab1ccebe_add_projects_table.py
│ │ ├── 647e7a75e2cd_project_constraint_fix.py
│ │ ├── 9d9c1cb7d8f5_add_mtime_and_size_columns_to_entity_.py
│ │ ├── a1b2c3d4e5f6_fix_project_foreign_keys.py
│ │ ├── b3c3938bacdb_relation_to_name_unique_index.py
│ │ ├── cc7172b46608_update_search_index_schema.py
│ │ └── e7e1f4367280_add_scan_watermark_tracking_to_project.py
│ ├── api
│ │ ├── __init__.py
│ │ ├── app.py
│ │ ├── routers
│ │ │ ├── __init__.py
│ │ │ ├── directory_router.py
│ │ │ ├── importer_router.py
│ │ │ ├── knowledge_router.py
│ │ │ ├── management_router.py
│ │ │ ├── memory_router.py
│ │ │ ├── project_router.py
│ │ │ ├── prompt_router.py
│ │ │ ├── resource_router.py
│ │ │ ├── search_router.py
│ │ │ └── utils.py
│ │ └── template_loader.py
│ ├── cli
│ │ ├── __init__.py
│ │ ├── app.py
│ │ ├── auth.py
│ │ ├── commands
│ │ │ ├── __init__.py
│ │ │ ├── cloud
│ │ │ │ ├── __init__.py
│ │ │ │ ├── api_client.py
│ │ │ │ ├── bisync_commands.py
│ │ │ │ ├── cloud_utils.py
│ │ │ │ ├── core_commands.py
│ │ │ │ ├── rclone_commands.py
│ │ │ │ ├── rclone_config.py
│ │ │ │ ├── rclone_installer.py
│ │ │ │ ├── upload_command.py
│ │ │ │ └── upload.py
│ │ │ ├── command_utils.py
│ │ │ ├── db.py
│ │ │ ├── import_chatgpt.py
│ │ │ ├── import_claude_conversations.py
│ │ │ ├── import_claude_projects.py
│ │ │ ├── import_memory_json.py
│ │ │ ├── mcp.py
│ │ │ ├── project.py
│ │ │ ├── status.py
│ │ │ └── tool.py
│ │ └── main.py
│ ├── config.py
│ ├── db.py
│ ├── deps.py
│ ├── file_utils.py
│ ├── ignore_utils.py
│ ├── importers
│ │ ├── __init__.py
│ │ ├── base.py
│ │ ├── chatgpt_importer.py
│ │ ├── claude_conversations_importer.py
│ │ ├── claude_projects_importer.py
│ │ ├── memory_json_importer.py
│ │ └── utils.py
│ ├── markdown
│ │ ├── __init__.py
│ │ ├── entity_parser.py
│ │ ├── markdown_processor.py
│ │ ├── plugins.py
│ │ ├── schemas.py
│ │ └── utils.py
│ ├── mcp
│ │ ├── __init__.py
│ │ ├── async_client.py
│ │ ├── project_context.py
│ │ ├── prompts
│ │ │ ├── __init__.py
│ │ │ ├── ai_assistant_guide.py
│ │ │ ├── continue_conversation.py
│ │ │ ├── recent_activity.py
│ │ │ ├── search.py
│ │ │ └── utils.py
│ │ ├── resources
│ │ │ ├── ai_assistant_guide.md
│ │ │ └── project_info.py
│ │ ├── server.py
│ │ └── tools
│ │ ├── __init__.py
│ │ ├── build_context.py
│ │ ├── canvas.py
│ │ ├── chatgpt_tools.py
│ │ ├── delete_note.py
│ │ ├── edit_note.py
│ │ ├── list_directory.py
│ │ ├── move_note.py
│ │ ├── project_management.py
│ │ ├── read_content.py
│ │ ├── read_note.py
│ │ ├── recent_activity.py
│ │ ├── search.py
│ │ ├── utils.py
│ │ ├── view_note.py
│ │ └── write_note.py
│ ├── models
│ │ ├── __init__.py
│ │ ├── base.py
│ │ ├── knowledge.py
│ │ ├── project.py
│ │ └── search.py
│ ├── repository
│ │ ├── __init__.py
│ │ ├── entity_repository.py
│ │ ├── observation_repository.py
│ │ ├── project_info_repository.py
│ │ ├── project_repository.py
│ │ ├── relation_repository.py
│ │ ├── repository.py
│ │ └── search_repository.py
│ ├── schemas
│ │ ├── __init__.py
│ │ ├── base.py
│ │ ├── cloud.py
│ │ ├── delete.py
│ │ ├── directory.py
│ │ ├── importer.py
│ │ ├── memory.py
│ │ ├── project_info.py
│ │ ├── prompt.py
│ │ ├── request.py
│ │ ├── response.py
│ │ ├── search.py
│ │ └── sync_report.py
│ ├── services
│ │ ├── __init__.py
│ │ ├── context_service.py
│ │ ├── directory_service.py
│ │ ├── entity_service.py
│ │ ├── exceptions.py
│ │ ├── file_service.py
│ │ ├── initialization.py
│ │ ├── link_resolver.py
│ │ ├── project_service.py
│ │ ├── search_service.py
│ │ └── service.py
│ ├── sync
│ │ ├── __init__.py
│ │ ├── background_sync.py
│ │ ├── sync_service.py
│ │ └── watch_service.py
│ ├── templates
│ │ └── prompts
│ │ ├── continue_conversation.hbs
│ │ └── search.hbs
│ └── utils.py
├── test-int
│ ├── BENCHMARKS.md
│ ├── cli
│ │ ├── test_project_commands_integration.py
│ │ └── test_version_integration.py
│ ├── conftest.py
│ ├── mcp
│ │ ├── test_build_context_underscore.py
│ │ ├── test_build_context_validation.py
│ │ ├── test_chatgpt_tools_integration.py
│ │ ├── test_default_project_mode_integration.py
│ │ ├── test_delete_note_integration.py
│ │ ├── test_edit_note_integration.py
│ │ ├── test_list_directory_integration.py
│ │ ├── test_move_note_integration.py
│ │ ├── test_project_management_integration.py
│ │ ├── test_project_state_sync_integration.py
│ │ ├── test_read_content_integration.py
│ │ ├── test_read_note_integration.py
│ │ ├── test_search_integration.py
│ │ ├── test_single_project_mcp_integration.py
│ │ └── test_write_note_integration.py
│ ├── test_db_wal_mode.py
│ ├── test_disable_permalinks_integration.py
│ └── test_sync_performance_benchmark.py
├── tests
│ ├── __init__.py
│ ├── api
│ │ ├── conftest.py
│ │ ├── test_async_client.py
│ │ ├── test_continue_conversation_template.py
│ │ ├── test_directory_router.py
│ │ ├── test_importer_router.py
│ │ ├── test_knowledge_router.py
│ │ ├── test_management_router.py
│ │ ├── test_memory_router.py
│ │ ├── test_project_router_operations.py
│ │ ├── test_project_router.py
│ │ ├── test_prompt_router.py
│ │ ├── test_relation_background_resolution.py
│ │ ├── test_resource_router.py
│ │ ├── test_search_router.py
│ │ ├── test_search_template.py
│ │ ├── test_template_loader_helpers.py
│ │ └── test_template_loader.py
│ ├── cli
│ │ ├── conftest.py
│ │ ├── test_cli_tools.py
│ │ ├── test_cloud_authentication.py
│ │ ├── test_ignore_utils.py
│ │ ├── test_import_chatgpt.py
│ │ ├── test_import_claude_conversations.py
│ │ ├── test_import_claude_projects.py
│ │ ├── test_import_memory_json.py
│ │ ├── test_project_add_with_local_path.py
│ │ └── test_upload.py
│ ├── conftest.py
│ ├── db
│ │ └── test_issue_254_foreign_key_constraints.py
│ ├── importers
│ │ ├── test_importer_base.py
│ │ └── test_importer_utils.py
│ ├── markdown
│ │ ├── __init__.py
│ │ ├── test_date_frontmatter_parsing.py
│ │ ├── test_entity_parser_error_handling.py
│ │ ├── test_entity_parser.py
│ │ ├── test_markdown_plugins.py
│ │ ├── test_markdown_processor.py
│ │ ├── test_observation_edge_cases.py
│ │ ├── test_parser_edge_cases.py
│ │ ├── test_relation_edge_cases.py
│ │ └── test_task_detection.py
│ ├── mcp
│ │ ├── conftest.py
│ │ ├── test_obsidian_yaml_formatting.py
│ │ ├── test_permalink_collision_file_overwrite.py
│ │ ├── test_prompts.py
│ │ ├── test_resources.py
│ │ ├── test_tool_build_context.py
│ │ ├── test_tool_canvas.py
│ │ ├── test_tool_delete_note.py
│ │ ├── test_tool_edit_note.py
│ │ ├── test_tool_list_directory.py
│ │ ├── test_tool_move_note.py
│ │ ├── test_tool_read_content.py
│ │ ├── test_tool_read_note.py
│ │ ├── test_tool_recent_activity.py
│ │ ├── test_tool_resource.py
│ │ ├── test_tool_search.py
│ │ ├── test_tool_utils.py
│ │ ├── test_tool_view_note.py
│ │ ├── test_tool_write_note.py
│ │ └── tools
│ │ └── test_chatgpt_tools.py
│ ├── Non-MarkdownFileSupport.pdf
│ ├── repository
│ │ ├── test_entity_repository_upsert.py
│ │ ├── test_entity_repository.py
│ │ ├── test_entity_upsert_issue_187.py
│ │ ├── test_observation_repository.py
│ │ ├── test_project_info_repository.py
│ │ ├── test_project_repository.py
│ │ ├── test_relation_repository.py
│ │ ├── test_repository.py
│ │ ├── test_search_repository_edit_bug_fix.py
│ │ └── test_search_repository.py
│ ├── schemas
│ │ ├── test_base_timeframe_minimum.py
│ │ ├── test_memory_serialization.py
│ │ ├── test_memory_url_validation.py
│ │ ├── test_memory_url.py
│ │ ├── test_schemas.py
│ │ └── test_search.py
│ ├── Screenshot.png
│ ├── services
│ │ ├── test_context_service.py
│ │ ├── test_directory_service.py
│ │ ├── test_entity_service_disable_permalinks.py
│ │ ├── test_entity_service.py
│ │ ├── test_file_service.py
│ │ ├── test_initialization.py
│ │ ├── test_link_resolver.py
│ │ ├── test_project_removal_bug.py
│ │ ├── test_project_service_operations.py
│ │ ├── test_project_service.py
│ │ └── test_search_service.py
│ ├── sync
│ │ ├── test_character_conflicts.py
│ │ ├── test_sync_service_incremental.py
│ │ ├── test_sync_service.py
│ │ ├── test_sync_wikilink_issue.py
│ │ ├── test_tmp_files.py
│ │ ├── test_watch_service_edge_cases.py
│ │ ├── test_watch_service_reload.py
│ │ └── test_watch_service.py
│ ├── test_config.py
│ ├── test_db_migration_deduplication.py
│ ├── test_deps.py
│ ├── test_production_cascade_delete.py
│ ├── test_rclone_commands.py
│ └── utils
│ ├── test_file_utils.py
│ ├── test_frontmatter_obsidian_compatible.py
│ ├── test_parse_tags.py
│ ├── test_permalink_formatting.py
│ ├── test_utf8_handling.py
│ └── test_validate_project_path.py
├── uv.lock
├── v0.15.0-RELEASE-DOCS.md
└── v15-docs
├── api-performance.md
├── background-relations.md
├── basic-memory-home.md
├── bug-fixes.md
├── chatgpt-integration.md
├── cloud-authentication.md
├── cloud-bisync.md
├── cloud-mode-usage.md
├── cloud-mount.md
├── default-project-mode.md
├── env-file-removal.md
├── env-var-overrides.md
├── explicit-project-parameter.md
├── gitignore-integration.md
├── project-root-env-var.md
├── README.md
└── sqlite-performance.md
```
# Files
--------------------------------------------------------------------------------
/src/basic_memory/mcp/tools/read_note.py:
--------------------------------------------------------------------------------
```python
1 | """Read note tool for Basic Memory MCP server."""
2 |
3 | from textwrap import dedent
4 | from typing import Optional
5 |
6 | from loguru import logger
7 | from fastmcp import Context
8 |
9 | from basic_memory.mcp.async_client import get_client
10 | from basic_memory.mcp.project_context import get_active_project
11 | from basic_memory.mcp.server import mcp
12 | from basic_memory.mcp.tools.search import search_notes
13 | from basic_memory.mcp.tools.utils import call_get
14 | from basic_memory.schemas.memory import memory_url_path
15 | from basic_memory.utils import validate_project_path
16 |
17 |
18 | @mcp.tool(
19 | description="Read a markdown note by title or permalink.",
20 | )
21 | async def read_note(
22 | identifier: str,
23 | project: Optional[str] = None,
24 | page: int = 1,
25 | page_size: int = 10,
26 | context: Context | None = None,
27 | ) -> str:
28 | """Return the raw markdown for a note, or guidance text if no match is found.
29 |
30 | Finds and retrieves a note by its title, permalink, or content search,
31 | returning the raw markdown content including observations, relations, and metadata.
32 |
33 | Project Resolution:
34 | Server resolves projects in this order: Single Project Mode → project parameter → default project.
35 | If project unknown, use list_memory_projects() or recent_activity() first.
36 |
37 | This tool will try multiple lookup strategies to find the most relevant note:
38 | 1. Direct permalink lookup
39 | 2. Title search fallback
40 | 3. Text search as last resort
41 |
42 | Args:
43 | project: Project name to read from. Optional - server will resolve using the
44 | hierarchy above. If unknown, use list_memory_projects() to discover
45 | available projects.
46 | identifier: The title or permalink of the note to read
47 | Can be a full memory:// URL, a permalink, a title, or search text
48 | page: Page number for paginated results (default: 1)
49 | page_size: Number of items per page (default: 10)
50 | context: Optional FastMCP context for performance caching.
51 |
52 | Returns:
53 | The full markdown content of the note if found, or helpful guidance if not found.
54 | Content includes frontmatter, observations, relations, and all markdown formatting.
55 |
56 | Examples:
57 | # Read by permalink
58 | read_note("my-research", "specs/search-spec")
59 |
60 | # Read by title
61 | read_note("work-project", "Search Specification")
62 |
63 | # Read with memory URL
64 | read_note("my-research", "memory://specs/search-spec")
65 |
66 | # Read with pagination
67 | read_note("work-project", "Project Updates", page=2, page_size=5)
68 |
69 | # Read recent meeting notes
70 | read_note("team-docs", "Weekly Standup")
71 |
72 | Raises:
73 | HTTPError: If project doesn't exist or is inaccessible
74 | SecurityError: If identifier attempts path traversal
75 |
76 | Note:
77 | If the exact note isn't found, this tool provides helpful suggestions
78 | including related notes, search commands, and note creation templates.
79 | """
80 | async with get_client() as client:
81 | # Get and validate the project
82 | active_project = await get_active_project(client, project, context)
83 |
84 | # Validate identifier to prevent path traversal attacks
85 | # We need to check both the raw identifier and the processed path
86 | processed_path = memory_url_path(identifier)
87 | project_path = active_project.home
88 |
89 | if not validate_project_path(identifier, project_path) or not validate_project_path(
90 | processed_path, project_path
91 | ):
92 | logger.warning(
93 | "Attempted path traversal attack blocked",
94 | identifier=identifier,
95 | processed_path=processed_path,
96 | project=active_project.name,
97 | )
98 | return f"# Error\n\nIdentifier '{identifier}' is not allowed - paths must stay within project boundaries"
99 |
100 | project_url = active_project.project_url
101 |
102 | # Get the file via REST API - first try direct permalink lookup
103 | entity_path = memory_url_path(identifier)
104 | path = f"{project_url}/resource/{entity_path}"
105 | logger.info(f"Attempting to read note from Project: {active_project.name} URL: {path}")
106 |
107 | try:
108 | # Try direct lookup first
109 | response = await call_get(client, path, params={"page": page, "page_size": page_size})
110 |
111 | # If successful, return the content
112 | if response.status_code == 200:
113 | logger.info("Returning read_note result from resource: {path}", path=entity_path)
114 | return response.text
115 | except Exception as e: # pragma: no cover
116 | logger.info(f"Direct lookup failed for '{path}': {e}")
117 | # Continue to fallback methods
118 |
119 | # Fallback 1: Try title search via API
120 | logger.info(f"Search title for: {identifier}")
121 | title_results = await search_notes.fn(
122 | query=identifier, search_type="title", project=project, context=context
123 | )
124 |
125 | # Handle both SearchResponse object and error strings
126 | if title_results and hasattr(title_results, "results") and title_results.results:
127 | result = title_results.results[0] # Get the first/best match
128 | if result.permalink:
129 | try:
130 | # Try to fetch the content using the found permalink
131 | path = f"{project_url}/resource/{result.permalink}"
132 | response = await call_get(
133 | client, path, params={"page": page, "page_size": page_size}
134 | )
135 |
136 | if response.status_code == 200:
137 | logger.info(f"Found note by title search: {result.permalink}")
138 | return response.text
139 | except Exception as e: # pragma: no cover
140 | logger.info(
141 | f"Failed to fetch content for found title match {result.permalink}: {e}"
142 | )
143 | else:
144 | logger.info(
145 | f"No results in title search for: {identifier} in project {active_project.name}"
146 | )
147 |
148 | # Fallback 2: Text search as a last resort
149 | logger.info(f"Title search failed, trying text search for: {identifier}")
150 | text_results = await search_notes.fn(
151 | query=identifier, search_type="text", project=project, context=context
152 | )
153 |
154 | # We didn't find a direct match, construct a helpful error message
155 | # Handle both SearchResponse object and error strings
156 | if not text_results or not hasattr(text_results, "results") or not text_results.results:
157 | # No results at all
158 | return format_not_found_message(active_project.name, identifier)
159 | else:
160 | # We found some related results
161 | return format_related_results(active_project.name, identifier, text_results.results[:5])
162 |
163 |
164 | def format_not_found_message(project: str | None, identifier: str) -> str:
165 | """Format a helpful message when no note was found."""
166 | return dedent(f"""
167 | # Note Not Found in {project}: "{identifier}"
168 |
169 | I couldn't find any notes matching "{identifier}". Here are some suggestions:
170 |
171 | ## Check Identifier Type
172 | - If you provided a title, try using the exact permalink instead
173 | - If you provided a permalink, check for typos or try a broader search
174 |
175 | ## Search Instead
176 | Try searching for related content:
177 | ```
178 | search_notes(project="{project}", query="{identifier}")
179 | ```
180 |
181 | ## Recent Activity
182 | Check recently modified notes:
183 | ```
184 | recent_activity(timeframe="7d")
185 | ```
186 |
187 | ## Create New Note
188 | This might be a good opportunity to create a new note on this topic:
189 | ```
190 | write_note(
191 | project="{project}",
192 | title="{identifier.capitalize()}",
193 | content='''
194 | # {identifier.capitalize()}
195 |
196 | ## Overview
197 | [Your content here]
198 |
199 | ## Observations
200 | - [category] [Observation about {identifier}]
201 |
202 | ## Relations
203 | - relates_to [[Related Topic]]
204 | ''',
205 | folder="notes"
206 | )
207 | ```
208 | """)
209 |
210 |
211 | def format_related_results(project: str | None, identifier: str, results) -> str:
212 | """Format a helpful message with related results when an exact match wasn't found."""
213 | message = dedent(f"""
214 | # Note Not Found in {project}: "{identifier}"
215 |
216 | I couldn't find an exact match for "{identifier}", but I found some related notes:
217 |
218 | """)
219 |
220 | for i, result in enumerate(results):
221 | message += dedent(f"""
222 | ## {i + 1}. {result.title}
223 | - **Type**: {result.type.value}
224 | - **Permalink**: {result.permalink}
225 |
226 | You can read this note with:
227 | ```
228 | read_note(project="{project}", {result.permalink}")
229 | ```
230 |
231 | """)
232 |
233 | message += dedent(f"""
234 | ## Try More Specific Lookup
235 | For exact matches, try using the full permalink from one of the results above.
236 |
237 | ## Search For More Results
238 | To see more related content:
239 | ```
240 | search_notes(project="{project}", query="{identifier}")
241 | ```
242 |
243 | ## Create New Note
244 | If none of these match what you're looking for, consider creating a new note:
245 | ```
246 | write_note(
247 | project="{project}",
248 | title="[Your title]",
249 | content="[Your content]",
250 | folder="notes"
251 | )
252 | ```
253 | """)
254 |
255 | return message
256 |
```
--------------------------------------------------------------------------------
/src/basic_memory/schemas/base.py:
--------------------------------------------------------------------------------
```python
1 | """Core pydantic models for basic-memory entities, observations, and relations.
2 |
3 | This module defines the foundational data structures for the knowledge graph system.
4 | The graph consists of entities (nodes) connected by relations (edges), where each
5 | entity can have multiple observations (facts) attached to it.
6 |
7 | Key Concepts:
8 | 1. Entities are nodes storing factual observations
9 | 2. Relations are directed edges between entities using active voice verbs
10 | 3. Observations are atomic facts/notes about an entity
11 | 4. Everything is stored in both SQLite and markdown files
12 | """
13 |
14 | import os
15 | import mimetypes
16 | import re
17 | from datetime import datetime, timedelta
18 | from pathlib import Path
19 | from typing import List, Optional, Annotated, Dict
20 |
21 | from annotated_types import MinLen, MaxLen
22 | from dateparser import parse
23 |
24 | from pydantic import BaseModel, BeforeValidator, Field, model_validator
25 |
26 | from basic_memory.config import ConfigManager
27 | from basic_memory.file_utils import sanitize_for_filename, sanitize_for_folder
28 | from basic_memory.utils import generate_permalink
29 |
30 |
31 | def to_snake_case(name: str) -> str:
32 | """Convert a string to snake_case.
33 |
34 | Examples:
35 | BasicMemory -> basic_memory
36 | Memory Service -> memory_service
37 | memory-service -> memory_service
38 | Memory_Service -> memory_service
39 | """
40 | name = name.strip()
41 |
42 | # Replace spaces and hyphens and . with underscores
43 | s1 = re.sub(r"[\s\-\\.]", "_", name)
44 |
45 | # Insert underscore between camelCase
46 | s2 = re.sub(r"([a-z0-9])([A-Z])", r"\1_\2", s1)
47 |
48 | # Convert to lowercase
49 | return s2.lower()
50 |
51 |
52 | def parse_timeframe(timeframe: str) -> datetime:
53 | """Parse timeframe with special handling for 'today' and other natural language expressions.
54 |
55 | Enforces a minimum 1-day lookback to handle timezone differences in distributed deployments.
56 |
57 | Args:
58 | timeframe: Natural language timeframe like 'today', '1d', '1 week ago', etc.
59 |
60 | Returns:
61 | datetime: The parsed datetime for the start of the timeframe, timezone-aware in local system timezone
62 | Always returns at least 1 day ago to handle timezone differences.
63 |
64 | Examples:
65 | parse_timeframe('today') -> 2025-06-04 14:50:00-07:00 (1 day ago, not start of today)
66 | parse_timeframe('1h') -> 2025-06-04 14:50:00-07:00 (1 day ago, not 1 hour ago)
67 | parse_timeframe('1d') -> 2025-06-04 14:50:00-07:00 (24 hours ago with local timezone)
68 | parse_timeframe('1 week ago') -> 2025-05-29 14:50:00-07:00 (1 week ago with local timezone)
69 | """
70 | if timeframe.lower() == "today":
71 | # For "today", return 1 day ago to ensure we capture recent activity across timezones
72 | # This handles the case where client and server are in different timezones
73 | now = datetime.now()
74 | one_day_ago = now - timedelta(days=1)
75 | return one_day_ago.astimezone()
76 | else:
77 | # Use dateparser for other formats
78 | parsed = parse(timeframe)
79 | if not parsed:
80 | raise ValueError(f"Could not parse timeframe: {timeframe}")
81 |
82 | # If the parsed datetime is naive, make it timezone-aware in local system timezone
83 | if parsed.tzinfo is None:
84 | parsed = parsed.astimezone()
85 | else:
86 | parsed = parsed
87 |
88 | # Enforce minimum 1-day lookback to handle timezone differences
89 | # This ensures we don't miss recent activity due to client/server timezone mismatches
90 | now = datetime.now().astimezone()
91 | one_day_ago = now - timedelta(days=1)
92 |
93 | # If the parsed time is more recent than 1 day ago, use 1 day ago instead
94 | if parsed > one_day_ago:
95 | return one_day_ago
96 | else:
97 | return parsed
98 |
99 |
100 | def validate_timeframe(timeframe: str) -> str:
101 | """Convert human readable timeframes to a duration relative to the current time."""
102 | if not isinstance(timeframe, str):
103 | raise ValueError("Timeframe must be a string")
104 |
105 | # Preserve special timeframe strings that need custom handling
106 | special_timeframes = ["today"]
107 | if timeframe.lower() in special_timeframes:
108 | return timeframe.lower()
109 |
110 | # Parse relative time expression using our enhanced parser
111 | parsed = parse_timeframe(timeframe)
112 |
113 | # Convert to duration
114 | now = datetime.now().astimezone()
115 | if parsed > now:
116 | raise ValueError("Timeframe cannot be in the future")
117 |
118 | # Could format the duration back to our standard format
119 | days = (now - parsed).days
120 |
121 | # Could enforce reasonable limits
122 | if days > 365:
123 | raise ValueError("Timeframe should be <= 1 year")
124 |
125 | return f"{days}d"
126 |
127 |
128 | TimeFrame = Annotated[str, BeforeValidator(validate_timeframe)]
129 |
130 | Permalink = Annotated[str, MinLen(1)]
131 | """Unique identifier in format '{path}/{normalized_name}'."""
132 |
133 |
134 | EntityType = Annotated[str, BeforeValidator(to_snake_case), MinLen(1), MaxLen(200)]
135 | """Classification of entity (e.g., 'person', 'project', 'concept'). """
136 |
137 | ALLOWED_CONTENT_TYPES = {
138 | "text/markdown",
139 | "text/plain",
140 | "application/pdf",
141 | "image/jpeg",
142 | "image/png",
143 | "image/svg+xml",
144 | }
145 |
146 | ContentType = Annotated[
147 | str,
148 | BeforeValidator(str.lower),
149 | Field(pattern=r"^[\w\-\+\.]+/[\w\-\+\.]+$"),
150 | Field(json_schema_extra={"examples": list(ALLOWED_CONTENT_TYPES)}),
151 | ]
152 |
153 |
154 | RelationType = Annotated[str, MinLen(1), MaxLen(200)]
155 | """Type of relationship between entities. Always use active voice present tense."""
156 |
157 | ObservationStr = Annotated[
158 | str,
159 | BeforeValidator(str.strip), # Clean whitespace
160 | MinLen(1), # Ensure non-empty after stripping
161 | MaxLen(1000), # Keep reasonable length
162 | ]
163 |
164 |
165 | class Observation(BaseModel):
166 | """A single observation with category, content, and optional context."""
167 |
168 | category: Optional[str] = None
169 | content: ObservationStr
170 | tags: Optional[List[str]] = Field(default_factory=list)
171 | context: Optional[str] = None
172 |
173 |
174 | class Relation(BaseModel):
175 | """Represents a directed edge between entities in the knowledge graph.
176 |
177 | Relations are directed connections stored in active voice (e.g., "created", "depends_on").
178 | The from_permalink represents the source or actor entity, while to_permalink represents the target
179 | or recipient entity.
180 | """
181 |
182 | from_id: Permalink
183 | to_id: Permalink
184 | relation_type: RelationType
185 | context: Optional[str] = None
186 |
187 |
188 | class Entity(BaseModel):
189 | """Represents a node in our knowledge graph - could be a person, project, concept, etc.
190 |
191 | Each entity has:
192 | - A file path (e.g., "people/jane-doe.md")
193 | - An entity type (for classification)
194 | - A list of observations (facts/notes about the entity)
195 | - Optional relations to other entities
196 | - Optional description for high-level overview
197 | """
198 |
199 | # private field to override permalink
200 | # Use empty string "" as sentinel to indicate permalinks are explicitly disabled
201 | _permalink: Optional[str] = None
202 |
203 | title: str
204 | content: Optional[str] = None
205 | folder: str
206 | entity_type: EntityType = "note"
207 | entity_metadata: Optional[Dict] = Field(default=None, description="Optional metadata")
208 | content_type: ContentType = Field(
209 | description="MIME type of the content (e.g. text/markdown, image/jpeg)",
210 | examples=["text/markdown", "image/jpeg"],
211 | default="text/markdown",
212 | )
213 |
214 | def __init__(self, **data):
215 | data["folder"] = sanitize_for_folder(data.get("folder", ""))
216 | super().__init__(**data)
217 |
218 | @property
219 | def safe_title(self) -> str:
220 | """
221 | A sanitized version of the title, which is safe for use on the filesystem. For example,
222 | a title of "Coupon Enable/Disable Feature" should create a the file as "Coupon Enable-Disable Feature.md"
223 | instead of creating a file named "Disable Feature.md" beneath the "Coupon Enable" directory.
224 |
225 | Replaces POSIX and/or Windows style slashes as well as a few other characters that are not safe for filenames.
226 | If kebab_filenames is True, then behavior is consistent with transformation used when generating permalink
227 | strings (e.g. "Coupon Enable/Disable Feature" -> "coupon-enable-disable-feature").
228 | """
229 | fixed_title = sanitize_for_filename(self.title)
230 |
231 | app_config = ConfigManager().config
232 | use_kebab_case = app_config.kebab_filenames
233 |
234 | if use_kebab_case:
235 | fixed_title = generate_permalink(file_path=fixed_title, split_extension=False)
236 |
237 | return fixed_title
238 |
239 | @property
240 | def file_path(self):
241 | """Get the file path for this entity based on its permalink."""
242 | safe_title = self.safe_title
243 | if self.content_type == "text/markdown":
244 | return (
245 | os.path.join(self.folder, f"{safe_title}.md") if self.folder else f"{safe_title}.md"
246 | )
247 | else:
248 | return os.path.join(self.folder, safe_title) if self.folder else safe_title
249 |
250 | @property
251 | def permalink(self) -> Optional[Permalink]:
252 | """Get a url friendly path}."""
253 | # Empty string is a sentinel value indicating permalinks are disabled
254 | if self._permalink == "":
255 | return None
256 | return self._permalink or generate_permalink(self.file_path)
257 |
258 | @model_validator(mode="after")
259 | def infer_content_type(self) -> "Entity": # pragma: no cover
260 | if not self.content_type:
261 | path = Path(self.file_path)
262 | if not path.exists():
263 | self.content_type = "text/plain"
264 | else:
265 | mime_type, _ = mimetypes.guess_type(path.name)
266 | self.content_type = mime_type or "text/plain"
267 | return self
268 |
```
--------------------------------------------------------------------------------
/tests/mcp/test_tool_view_note.py:
--------------------------------------------------------------------------------
```python
1 | """Tests for view_note tool that exercise the full stack with SQLite."""
2 |
3 | from textwrap import dedent
4 | from unittest.mock import MagicMock, patch
5 |
6 | import pytest
7 | import pytest_asyncio
8 |
9 | from basic_memory.mcp.tools import write_note, view_note
10 | from basic_memory.schemas.search import SearchResponse
11 |
12 |
13 | @pytest_asyncio.fixture
14 | async def mock_call_get():
15 | """Mock for call_get to simulate different responses."""
16 | with patch("basic_memory.mcp.tools.read_note.call_get") as mock:
17 | # Default to 404 - not found
18 | mock_response = MagicMock()
19 | mock_response.status_code = 404
20 | mock.return_value = mock_response
21 | yield mock
22 |
23 |
24 | @pytest_asyncio.fixture
25 | async def mock_search():
26 | """Mock for search tool."""
27 | with patch("basic_memory.mcp.tools.read_note.search_notes.fn") as mock:
28 | # Default to empty results
29 | mock.return_value = SearchResponse(results=[], current_page=1, page_size=1)
30 | yield mock
31 |
32 |
33 | @pytest.mark.asyncio
34 | async def test_view_note_basic_functionality(app, test_project):
35 | """Test viewing a note creates an artifact."""
36 | # First create a note
37 | await write_note.fn(
38 | project=test_project.name,
39 | title="Test View Note",
40 | folder="test",
41 | content="# Test View Note\n\nThis is test content for viewing.",
42 | )
43 |
44 | # View the note
45 | result = await view_note.fn("Test View Note", project=test_project.name)
46 |
47 | # Should contain note retrieval message
48 | assert 'Note retrieved: "Test View Note"' in result
49 | assert "Display this note as a markdown artifact for the user" in result
50 | assert "Content:" in result
51 | assert "---" in result
52 |
53 | # Should contain the note content
54 | assert "# Test View Note" in result
55 | assert "This is test content for viewing." in result
56 |
57 |
58 | @pytest.mark.asyncio
59 | async def test_view_note_with_frontmatter_title(app, test_project):
60 | """Test viewing a note extracts title from frontmatter."""
61 | # Create note with frontmatter
62 | content = dedent("""
63 | ---
64 | title: "Frontmatter Title"
65 | tags: [test]
66 | ---
67 |
68 | # Frontmatter Title
69 |
70 | Content with frontmatter title.
71 | """).strip()
72 |
73 | await write_note.fn(
74 | project=test_project.name, title="Frontmatter Title", folder="test", content=content
75 | )
76 |
77 | # View the note
78 | result = await view_note.fn("Frontmatter Title", project=test_project.name)
79 |
80 | # Should show title in retrieval message
81 | assert 'Note retrieved: "Frontmatter Title"' in result
82 | assert "Display this note as a markdown artifact for the user" in result
83 |
84 |
85 | @pytest.mark.asyncio
86 | async def test_view_note_with_heading_title(app, test_project):
87 | """Test viewing a note extracts title from first heading when no frontmatter."""
88 | # Create note with heading but no frontmatter title
89 | content = "# Heading Title\n\nContent with heading title."
90 |
91 | await write_note.fn(
92 | project=test_project.name, title="Heading Title", folder="test", content=content
93 | )
94 |
95 | # View the note
96 | result = await view_note.fn("Heading Title", project=test_project.name)
97 |
98 | # Should show title in retrieval message
99 | assert 'Note retrieved: "Heading Title"' in result
100 | assert "Display this note as a markdown artifact for the user" in result
101 |
102 |
103 | @pytest.mark.asyncio
104 | async def test_view_note_unicode_content(app, test_project):
105 | """Test viewing a note with Unicode content."""
106 | content = "# Unicode Test 🚀\n\nThis note has emoji 🎉 and unicode ♠♣♥♦"
107 |
108 | await write_note.fn(
109 | project=test_project.name, title="Unicode Test 🚀", folder="test", content=content
110 | )
111 |
112 | # View the note
113 | result = await view_note.fn("Unicode Test 🚀", project=test_project.name)
114 |
115 | # Should handle Unicode properly
116 | assert "🚀" in result
117 | assert "🎉" in result
118 | assert "♠♣♥♦" in result
119 | assert 'Note retrieved: "Unicode Test 🚀"' in result
120 |
121 |
122 | @pytest.mark.asyncio
123 | async def test_view_note_by_permalink(app, test_project):
124 | """Test viewing a note by its permalink."""
125 | await write_note.fn(
126 | project=test_project.name,
127 | title="Permalink Test",
128 | folder="test",
129 | content="Content for permalink test.",
130 | )
131 |
132 | # View by permalink
133 | result = await view_note.fn("test/permalink-test", project=test_project.name)
134 |
135 | # Should work with permalink
136 | assert 'Note retrieved: "test/permalink-test"' in result
137 | assert "Content for permalink test." in result
138 | assert "Display this note as a markdown artifact for the user" in result
139 |
140 |
141 | @pytest.mark.asyncio
142 | async def test_view_note_with_memory_url(app, test_project):
143 | """Test viewing a note using a memory:// URL."""
144 | await write_note.fn(
145 | project=test_project.name,
146 | title="Memory URL Test",
147 | folder="test",
148 | content="Testing memory:// URL handling in view_note",
149 | )
150 |
151 | # View with memory:// URL
152 | result = await view_note.fn("memory://test/memory-url-test", project=test_project.name)
153 |
154 | # Should work with memory:// URL
155 | assert 'Note retrieved: "memory://test/memory-url-test"' in result
156 | assert "Testing memory:// URL handling in view_note" in result
157 | assert "Display this note as a markdown artifact for the user" in result
158 |
159 |
160 | @pytest.mark.asyncio
161 | async def test_view_note_not_found(app, test_project):
162 | """Test viewing a non-existent note returns error without artifact."""
163 | # Try to view non-existent note
164 | result = await view_note.fn("NonExistent Note", project=test_project.name)
165 |
166 | # Should return error message without artifact instructions
167 | assert "# Note Not Found" in result
168 | assert "NonExistent Note" in result
169 | assert "Display this note as a markdown artifact" not in result # No artifact for errors
170 | assert "Check Identifier Type" in result
171 | assert "Search Instead" in result
172 |
173 |
174 | @pytest.mark.asyncio
175 | async def test_view_note_pagination(app, test_project):
176 | """Test viewing a note with pagination parameters."""
177 | await write_note.fn(
178 | project=test_project.name,
179 | title="Pagination Test",
180 | folder="test",
181 | content="Content for pagination test.",
182 | )
183 |
184 | # View with pagination
185 | result = await view_note.fn("Pagination Test", page=1, page_size=5, project=test_project.name)
186 |
187 | # Should work with pagination
188 | assert 'Note retrieved: "Pagination Test"' in result
189 | assert "Content for pagination test." in result
190 | assert "Display this note as a markdown artifact for the user" in result
191 |
192 |
193 | @pytest.mark.asyncio
194 | async def test_view_note_project_parameter(app, test_project):
195 | """Test viewing a note with project parameter."""
196 | await write_note.fn(
197 | project=test_project.name,
198 | title="Project Test",
199 | folder="test",
200 | content="Content for project test.",
201 | )
202 |
203 | # View with explicit project
204 | result = await view_note.fn("Project Test", project=test_project.name)
205 |
206 | # Should work with project parameter
207 | assert 'Note retrieved: "Project Test"' in result
208 | assert "Content for project test." in result
209 | assert "Display this note as a markdown artifact for the user" in result
210 |
211 |
212 | @pytest.mark.asyncio
213 | async def test_view_note_artifact_identifier_unique(app, test_project):
214 | """Test that different notes are retrieved correctly with unique identifiers."""
215 | # Create two notes
216 | await write_note.fn(
217 | project=test_project.name, title="Note One", folder="test", content="Content one"
218 | )
219 | await write_note.fn(
220 | project=test_project.name, title="Note Two", folder="test", content="Content two"
221 | )
222 |
223 | # View both notes
224 | result1 = await view_note.fn("Note One", project=test_project.name)
225 | result2 = await view_note.fn("Note Two", project=test_project.name)
226 |
227 | # Should have different note identifiers in retrieval messages
228 | assert 'Note retrieved: "Note One"' in result1
229 | assert 'Note retrieved: "Note Two"' in result2
230 | assert "Content one" in result1
231 | assert "Content two" in result2
232 |
233 |
234 | @pytest.mark.asyncio
235 | async def test_view_note_fallback_identifier_as_title(app, test_project):
236 | """Test that view_note uses identifier as title when no title is extractable."""
237 | # Create a note with no clear title structure
238 | await write_note.fn(
239 | project=test_project.name,
240 | title="Simple Note",
241 | folder="test",
242 | content="Just plain content with no headings or frontmatter title",
243 | )
244 |
245 | # View the note
246 | result = await view_note.fn("Simple Note", project=test_project.name)
247 |
248 | # Should use identifier as title in retrieval message
249 | assert 'Note retrieved: "Simple Note"' in result
250 | assert "Display this note as a markdown artifact for the user" in result
251 |
252 |
253 | @pytest.mark.asyncio
254 | async def test_view_note_direct_success(app, test_project, mock_call_get):
255 | """Test view_note with successful direct permalink lookup."""
256 | # Setup mock for successful response with frontmatter
257 | note_content = dedent("""
258 | ---
259 | title: "Test Note"
260 | ---
261 | # Test Note
262 |
263 | This is a test note.
264 | """).strip()
265 |
266 | mock_response = MagicMock()
267 | mock_response.status_code = 200
268 | mock_response.text = note_content
269 | mock_call_get.return_value = mock_response
270 |
271 | # Call the function
272 | result = await view_note.fn("test/test-note", project=test_project.name)
273 |
274 | # Verify direct lookup was used
275 | mock_call_get.assert_called_once()
276 | assert "test/test-note" in mock_call_get.call_args[0][1]
277 |
278 | # Verify result contains note content
279 | assert 'Note retrieved: "test/test-note"' in result
280 | assert "Display this note as a markdown artifact for the user" in result
281 | assert "This is a test note." in result
282 |
```
--------------------------------------------------------------------------------
/src/basic_memory/api/routers/prompt_router.py:
--------------------------------------------------------------------------------
```python
1 | """Router for prompt-related operations.
2 |
3 | This router is responsible for rendering various prompts using Handlebars templates.
4 | It centralizes all prompt formatting logic that was previously in the MCP prompts.
5 | """
6 |
7 | from datetime import datetime, timezone
8 | from fastapi import APIRouter, HTTPException, status
9 | from loguru import logger
10 |
11 | from basic_memory.api.routers.utils import to_graph_context, to_search_results
12 | from basic_memory.api.template_loader import template_loader
13 | from basic_memory.schemas.base import parse_timeframe
14 | from basic_memory.deps import (
15 | ContextServiceDep,
16 | EntityRepositoryDep,
17 | SearchServiceDep,
18 | EntityServiceDep,
19 | )
20 | from basic_memory.schemas.prompt import (
21 | ContinueConversationRequest,
22 | SearchPromptRequest,
23 | PromptResponse,
24 | PromptMetadata,
25 | )
26 | from basic_memory.schemas.search import SearchItemType, SearchQuery
27 |
28 | router = APIRouter(prefix="/prompt", tags=["prompt"])
29 |
30 |
31 | @router.post("/continue-conversation", response_model=PromptResponse)
32 | async def continue_conversation(
33 | search_service: SearchServiceDep,
34 | entity_service: EntityServiceDep,
35 | context_service: ContextServiceDep,
36 | entity_repository: EntityRepositoryDep,
37 | request: ContinueConversationRequest,
38 | ) -> PromptResponse:
39 | """Generate a prompt for continuing a conversation.
40 |
41 | This endpoint takes a topic and/or timeframe and generates a prompt with
42 | relevant context from the knowledge base.
43 |
44 | Args:
45 | request: The request parameters
46 |
47 | Returns:
48 | Formatted continuation prompt with context
49 | """
50 | logger.info(
51 | f"Generating continue conversation prompt, topic: {request.topic}, timeframe: {request.timeframe}"
52 | )
53 |
54 | since = parse_timeframe(request.timeframe) if request.timeframe else None
55 |
56 | # Initialize search results
57 | search_results = []
58 |
59 | # Get data needed for template
60 | if request.topic:
61 | query = SearchQuery(text=request.topic, after_date=request.timeframe)
62 | results = await search_service.search(query, limit=request.search_items_limit)
63 | search_results = await to_search_results(entity_service, results)
64 |
65 | # Build context from results
66 | all_hierarchical_results = []
67 | for result in search_results:
68 | if hasattr(result, "permalink") and result.permalink:
69 | # Get hierarchical context using the new dataclass-based approach
70 | context_result = await context_service.build_context(
71 | result.permalink,
72 | depth=request.depth,
73 | since=since,
74 | max_related=request.related_items_limit,
75 | include_observations=True, # Include observations for entities
76 | )
77 |
78 | # Process results into the schema format
79 | graph_context = await to_graph_context(
80 | context_result, entity_repository=entity_repository
81 | )
82 |
83 | # Add results to our collection (limit to top results for each permalink)
84 | if graph_context.results:
85 | all_hierarchical_results.extend(graph_context.results[:3])
86 |
87 | # Limit to a reasonable number of total results
88 | all_hierarchical_results = all_hierarchical_results[:10]
89 |
90 | template_context = {
91 | "topic": request.topic,
92 | "timeframe": request.timeframe,
93 | "hierarchical_results": all_hierarchical_results,
94 | "has_results": len(all_hierarchical_results) > 0,
95 | }
96 | else:
97 | # If no topic, get recent activity
98 | context_result = await context_service.build_context(
99 | types=[SearchItemType.ENTITY],
100 | depth=request.depth,
101 | since=since,
102 | max_related=request.related_items_limit,
103 | include_observations=True,
104 | )
105 | recent_context = await to_graph_context(context_result, entity_repository=entity_repository)
106 |
107 | hierarchical_results = recent_context.results[:5] # Limit to top 5 recent items
108 |
109 | template_context = {
110 | "topic": f"Recent Activity from ({request.timeframe})",
111 | "timeframe": request.timeframe,
112 | "hierarchical_results": hierarchical_results,
113 | "has_results": len(hierarchical_results) > 0,
114 | }
115 |
116 | try:
117 | # Render template
118 | rendered_prompt = await template_loader.render(
119 | "prompts/continue_conversation.hbs", template_context
120 | )
121 |
122 | # Calculate metadata
123 | # Count items of different types
124 | observation_count = 0
125 | relation_count = 0
126 | entity_count = 0
127 |
128 | # Get the hierarchical results from the template context
129 | hierarchical_results_for_count = template_context.get("hierarchical_results", [])
130 |
131 | # For topic-based search
132 | if request.topic:
133 | for item in hierarchical_results_for_count:
134 | if hasattr(item, "observations"):
135 | observation_count += len(item.observations) if item.observations else 0
136 |
137 | if hasattr(item, "related_results"):
138 | for related in item.related_results or []:
139 | if hasattr(related, "type"):
140 | if related.type == "relation":
141 | relation_count += 1
142 | elif related.type == "entity": # pragma: no cover
143 | entity_count += 1 # pragma: no cover
144 | # For recent activity
145 | else:
146 | for item in hierarchical_results_for_count:
147 | if hasattr(item, "observations"):
148 | observation_count += len(item.observations) if item.observations else 0
149 |
150 | if hasattr(item, "related_results"):
151 | for related in item.related_results or []:
152 | if hasattr(related, "type"):
153 | if related.type == "relation":
154 | relation_count += 1
155 | elif related.type == "entity": # pragma: no cover
156 | entity_count += 1 # pragma: no cover
157 |
158 | # Build metadata
159 | metadata = {
160 | "query": request.topic,
161 | "timeframe": request.timeframe,
162 | "search_count": len(search_results)
163 | if request.topic
164 | else 0, # Original search results count
165 | "context_count": len(hierarchical_results_for_count),
166 | "observation_count": observation_count,
167 | "relation_count": relation_count,
168 | "total_items": (
169 | len(hierarchical_results_for_count)
170 | + observation_count
171 | + relation_count
172 | + entity_count
173 | ),
174 | "search_limit": request.search_items_limit,
175 | "context_depth": request.depth,
176 | "related_limit": request.related_items_limit,
177 | "generated_at": datetime.now(timezone.utc).isoformat(),
178 | }
179 |
180 | prompt_metadata = PromptMetadata(**metadata)
181 |
182 | return PromptResponse(
183 | prompt=rendered_prompt, context=template_context, metadata=prompt_metadata
184 | )
185 | except Exception as e:
186 | logger.error(f"Error rendering continue conversation template: {e}")
187 | raise HTTPException(
188 | status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
189 | detail=f"Error rendering prompt template: {str(e)}",
190 | )
191 |
192 |
193 | @router.post("/search", response_model=PromptResponse)
194 | async def search_prompt(
195 | search_service: SearchServiceDep,
196 | entity_service: EntityServiceDep,
197 | request: SearchPromptRequest,
198 | page: int = 1,
199 | page_size: int = 10,
200 | ) -> PromptResponse:
201 | """Generate a prompt for search results.
202 |
203 | This endpoint takes a search query and formats the results into a helpful
204 | prompt with context and suggestions.
205 |
206 | Args:
207 | request: The search parameters
208 | page: The page number for pagination
209 | page_size: The number of results per page, defaults to 10
210 |
211 | Returns:
212 | Formatted search results prompt with context
213 | """
214 | logger.info(f"Generating search prompt, query: {request.query}, timeframe: {request.timeframe}")
215 |
216 | limit = page_size
217 | offset = (page - 1) * page_size
218 |
219 | query = SearchQuery(text=request.query, after_date=request.timeframe)
220 | results = await search_service.search(query, limit=limit, offset=offset)
221 | search_results = await to_search_results(entity_service, results)
222 |
223 | template_context = {
224 | "query": request.query,
225 | "timeframe": request.timeframe,
226 | "results": search_results,
227 | "has_results": len(search_results) > 0,
228 | "result_count": len(search_results),
229 | }
230 |
231 | try:
232 | # Render template
233 | rendered_prompt = await template_loader.render("prompts/search.hbs", template_context)
234 |
235 | # Build metadata
236 | metadata = {
237 | "query": request.query,
238 | "timeframe": request.timeframe,
239 | "search_count": len(search_results),
240 | "context_count": len(search_results),
241 | "observation_count": 0, # Search results don't include observations
242 | "relation_count": 0, # Search results don't include relations
243 | "total_items": len(search_results),
244 | "search_limit": limit,
245 | "context_depth": 0, # No context depth for basic search
246 | "related_limit": 0, # No related items for basic search
247 | "generated_at": datetime.now(timezone.utc).isoformat(),
248 | }
249 |
250 | prompt_metadata = PromptMetadata(**metadata)
251 |
252 | return PromptResponse(
253 | prompt=rendered_prompt, context=template_context, metadata=prompt_metadata
254 | )
255 | except Exception as e:
256 | logger.error(f"Error rendering search template: {e}")
257 | raise HTTPException(
258 | status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
259 | detail=f"Error rendering prompt template: {str(e)}",
260 | )
261 |
```
--------------------------------------------------------------------------------
/src/basic_memory/mcp/tools/read_content.py:
--------------------------------------------------------------------------------
```python
1 | """File reading tool for Basic Memory MCP server.
2 |
3 | This module provides tools for reading raw file content directly,
4 | supporting various file types including text, images, and other binary files.
5 | Files are read directly without any knowledge graph processing.
6 | """
7 |
8 | import base64
9 | import io
10 |
11 | from typing import Optional
12 |
13 | from loguru import logger
14 | from PIL import Image as PILImage
15 | from fastmcp import Context
16 |
17 | from basic_memory.mcp.project_context import get_active_project
18 | from basic_memory.mcp.server import mcp
19 | from basic_memory.mcp.async_client import get_client
20 | from basic_memory.mcp.tools.utils import call_get
21 | from basic_memory.schemas.memory import memory_url_path
22 | from basic_memory.utils import validate_project_path
23 |
24 |
25 | def calculate_target_params(content_length):
26 | """Calculate initial quality and size based on input file size"""
27 | target_size = 350000 # Reduced target for more safety margin
28 | ratio = content_length / target_size
29 |
30 | logger.debug(
31 | "Calculating target parameters",
32 | content_length=content_length,
33 | ratio=ratio,
34 | target_size=target_size,
35 | )
36 |
37 | if ratio > 4:
38 | # Very large images - start very aggressive
39 | return 50, 600 # Lower initial quality and size
40 | elif ratio > 2:
41 | return 60, 800
42 | else:
43 | return 70, 1000
44 |
45 |
46 | def resize_image(img, max_size):
47 | """Resize image maintaining aspect ratio"""
48 | original_dimensions = {"width": img.width, "height": img.height}
49 |
50 | if img.width > max_size or img.height > max_size:
51 | ratio = min(max_size / img.width, max_size / img.height)
52 | new_size = (int(img.width * ratio), int(img.height * ratio))
53 | logger.debug("Resizing image", original=original_dimensions, target=new_size, ratio=ratio)
54 | return img.resize(new_size, PILImage.Resampling.LANCZOS)
55 |
56 | logger.debug("No resize needed", dimensions=original_dimensions)
57 | return img
58 |
59 |
60 | def optimize_image(img, content_length, max_output_bytes=350000):
61 | """Iteratively optimize image with aggressive size reduction"""
62 | stats = {
63 | "dimensions": {"width": img.width, "height": img.height},
64 | "mode": img.mode,
65 | "estimated_memory": (img.width * img.height * len(img.getbands())),
66 | }
67 |
68 | initial_quality, initial_size = calculate_target_params(content_length)
69 |
70 | logger.debug(
71 | "Starting optimization",
72 | image_stats=stats,
73 | content_length=content_length,
74 | initial_quality=initial_quality,
75 | initial_size=initial_size,
76 | max_output_bytes=max_output_bytes,
77 | )
78 |
79 | quality = initial_quality
80 | size = initial_size
81 |
82 | # Convert to RGB if needed
83 | if img.mode in ("RGBA", "LA") or (img.mode == "P" and "transparency" in img.info):
84 | img = img.convert("RGB")
85 | logger.debug("Converted to RGB mode")
86 |
87 | iteration = 0
88 | min_size = 300 # Absolute minimum size
89 | min_quality = 20 # Absolute minimum quality
90 |
91 | while True:
92 | iteration += 1
93 | buf = io.BytesIO()
94 | resized = resize_image(img, size)
95 |
96 | resized.save(
97 | buf,
98 | format="JPEG",
99 | quality=quality,
100 | optimize=True,
101 | progressive=True,
102 | subsampling="4:2:0",
103 | )
104 |
105 | output_size = buf.getbuffer().nbytes
106 | reduction_ratio = output_size / content_length
107 |
108 | logger.debug(
109 | "Optimization attempt",
110 | iteration=iteration,
111 | quality=quality,
112 | size=size,
113 | output_bytes=output_size,
114 | target_bytes=max_output_bytes,
115 | reduction_ratio=f"{reduction_ratio:.2f}",
116 | )
117 |
118 | if output_size < max_output_bytes:
119 | logger.info(
120 | "Image optimization complete",
121 | final_size=output_size,
122 | quality=quality,
123 | dimensions={"width": resized.width, "height": resized.height},
124 | reduction_ratio=f"{reduction_ratio:.2f}",
125 | )
126 | return buf.getvalue()
127 |
128 | # Very aggressive reduction for large files
129 | if content_length > 2000000: # 2MB+ # pragma: no cover
130 | quality = max(min_quality, quality - 20)
131 | size = max(min_size, int(size * 0.6))
132 | elif content_length > 1000000: # 1MB+ # pragma: no cover
133 | quality = max(min_quality, quality - 15)
134 | size = max(min_size, int(size * 0.7))
135 | else:
136 | quality = max(min_quality, quality - 10) # pragma: no cover
137 | size = max(min_size, int(size * 0.8)) # pragma: no cover
138 |
139 | logger.debug("Reducing parameters", new_quality=quality, new_size=size) # pragma: no cover
140 |
141 | # If we've hit minimum values and still too big
142 | if quality <= min_quality and size <= min_size: # pragma: no cover
143 | logger.warning(
144 | "Reached minimum parameters",
145 | final_size=output_size,
146 | over_limit_by=output_size - max_output_bytes,
147 | )
148 | return buf.getvalue()
149 |
150 |
151 | @mcp.tool(description="Read a file's raw content by path or permalink")
152 | async def read_content(
153 | path: str, project: Optional[str] = None, context: Context | None = None
154 | ) -> dict:
155 | """Read a file's raw content by path or permalink.
156 |
157 | This tool provides direct access to file content in the knowledge base,
158 | handling different file types appropriately. Uses stateless architecture -
159 | project parameter optional with server resolution.
160 |
161 | Supported file types:
162 | - Text files (markdown, code, etc.) are returned as plain text
163 | - Images are automatically resized/optimized for display
164 | - Other binary files are returned as base64 if below size limits
165 |
166 | Args:
167 | path: The path or permalink to the file. Can be:
168 | - A regular file path (docs/example.md)
169 | - A memory URL (memory://docs/example)
170 | - A permalink (docs/example)
171 | project: Project name to read from. Optional - server will resolve using hierarchy.
172 | If unknown, use list_memory_projects() to discover available projects.
173 | context: Optional FastMCP context for performance caching.
174 |
175 | Returns:
176 | A dictionary with the file content and metadata:
177 | - For text: {"type": "text", "text": "content", "content_type": "text/markdown", "encoding": "utf-8"}
178 | - For images: {"type": "image", "source": {"type": "base64", "media_type": "image/jpeg", "data": "base64_data"}}
179 | - For other files: {"type": "document", "source": {"type": "base64", "media_type": "content_type", "data": "base64_data"}}
180 | - For errors: {"type": "error", "error": "error message"}
181 |
182 | Examples:
183 | # Read a markdown file
184 | result = await read_content("docs/project-specs.md")
185 |
186 | # Read an image
187 | image_data = await read_content("assets/diagram.png")
188 |
189 | # Read using memory URL
190 | content = await read_content("memory://docs/architecture")
191 |
192 | # Read configuration file
193 | config = await read_content("config/settings.json")
194 |
195 | # Explicit project specification
196 | result = await read_content("docs/project-specs.md", project="my-project")
197 |
198 | Raises:
199 | HTTPError: If project doesn't exist or is inaccessible
200 | SecurityError: If path attempts path traversal
201 | """
202 | logger.info("Reading file", path=path, project=project)
203 |
204 | async with get_client() as client:
205 | active_project = await get_active_project(client, project, context)
206 | project_url = active_project.project_url
207 |
208 | url = memory_url_path(path)
209 |
210 | # Validate path to prevent path traversal attacks
211 | project_path = active_project.home
212 | if not validate_project_path(url, project_path):
213 | logger.warning(
214 | "Attempted path traversal attack blocked",
215 | path=path,
216 | url=url,
217 | project=active_project.name,
218 | )
219 | return {
220 | "type": "error",
221 | "error": f"Path '{path}' is not allowed - paths must stay within project boundaries",
222 | }
223 |
224 | response = await call_get(client, f"{project_url}/resource/{url}")
225 | content_type = response.headers.get("content-type", "application/octet-stream")
226 | content_length = int(response.headers.get("content-length", 0))
227 |
228 | logger.debug("Resource metadata", content_type=content_type, size=content_length, path=path)
229 |
230 | # Handle text or json
231 | if content_type.startswith("text/") or content_type == "application/json":
232 | logger.debug("Processing text resource")
233 | return {
234 | "type": "text",
235 | "text": response.text,
236 | "content_type": content_type,
237 | "encoding": "utf-8",
238 | }
239 |
240 | # Handle images
241 | elif content_type.startswith("image/"):
242 | logger.debug("Processing image")
243 | img = PILImage.open(io.BytesIO(response.content))
244 | img_bytes = optimize_image(img, content_length)
245 |
246 | return {
247 | "type": "image",
248 | "source": {
249 | "type": "base64",
250 | "media_type": "image/jpeg",
251 | "data": base64.b64encode(img_bytes).decode("utf-8"),
252 | },
253 | }
254 |
255 | # Handle other file types
256 | else:
257 | logger.debug(f"Processing binary resource content_type {content_type}")
258 | if content_length > 350000: # pragma: no cover
259 | logger.warning("Document too large for response", size=content_length)
260 | return {
261 | "type": "error",
262 | "error": f"Document size {content_length} bytes exceeds maximum allowed size",
263 | }
264 | return {
265 | "type": "document",
266 | "source": {
267 | "type": "base64",
268 | "media_type": content_type,
269 | "data": base64.b64encode(response.content).decode("utf-8"),
270 | },
271 | }
272 |
```
--------------------------------------------------------------------------------
/v15-docs/sqlite-performance.md:
--------------------------------------------------------------------------------
```markdown
1 | # SQLite Performance Improvements
2 |
3 | **Status**: Performance Enhancement
4 | **PR**: #316
5 | **Impact**: Faster database operations, better concurrency
6 |
7 | ## What's New
8 |
9 | v0.15.0 enables **Write-Ahead Logging (WAL) mode** for SQLite and adds Windows-specific optimizations, significantly improving performance and concurrent access.
10 |
11 | ## Key Changes
12 |
13 | ### 1. WAL Mode Enabled
14 |
15 | **Write-Ahead Logging (WAL)** is now enabled by default:
16 |
17 | ```python
18 | # Applied automatically on database initialization
19 | PRAGMA journal_mode=WAL
20 | ```
21 |
22 | **Benefits:**
23 | - **Better concurrency:** Readers don't block writers
24 | - **Faster writes:** Transactions commit faster
25 | - **Crash resilience:** Better recovery from crashes
26 | - **Reduced disk I/O:** Fewer fsync operations
27 |
28 | ### 2. Windows Optimizations
29 |
30 | Additional Windows-specific settings:
31 |
32 | ```python
33 | # Windows-specific SQLite settings
34 | PRAGMA synchronous=NORMAL # Balanced durability/performance
35 | PRAGMA cache_size=-2000 # 2MB cache
36 | PRAGMA temp_store=MEMORY # Temp tables in memory
37 | ```
38 |
39 | ## Performance Impact
40 |
41 | ### Before (DELETE mode)
42 |
43 | ```python
44 | # Old journal mode
45 | PRAGMA journal_mode=DELETE
46 |
47 | # Characteristics:
48 | # - Writers block readers
49 | # - Readers block writers
50 | # - Slower concurrent access
51 | # - More disk I/O
52 | ```
53 |
54 | **Measured impact:**
55 | - Concurrent read/write: **Serialized (slow)**
56 | - Write speed: **Baseline**
57 | - Crash recovery: **Good**
58 |
59 | ### After (WAL mode)
60 |
61 | ```python
62 | # New journal mode
63 | PRAGMA journal_mode=WAL
64 |
65 | # Characteristics:
66 | # - Readers don't block writers
67 | # - Writers don't block readers
68 | # - Faster concurrent access
69 | # - Reduced disk I/O
70 | ```
71 |
72 | **Measured impact:**
73 | - Concurrent read/write: **Parallel (fast)**
74 | - Write speed: **Up to 2-3x faster**
75 | - Crash recovery: **Excellent**
76 |
77 | ## How WAL Works
78 |
79 | ### Traditional DELETE Mode
80 |
81 | ```
82 | Write Transaction:
83 | 1. Lock database
84 | 2. Write to journal file
85 | 3. Modify database
86 | 4. Delete journal
87 | 5. Unlock database
88 |
89 | Problem: Readers wait for writers
90 | ```
91 |
92 | ### WAL Mode
93 |
94 | ```
95 | Write Transaction:
96 | 1. Append changes to WAL file
97 | 2. Commit (fast)
98 | 3. Periodically checkpoint WAL → database
99 |
100 | Benefit: Readers read from database while WAL is being written
101 | ```
102 |
103 | ### Checkpoint Process
104 |
105 | WAL file periodically merged back to database:
106 |
107 | ```python
108 | # Automatic checkpointing
109 | # - Triggered at ~1000 pages in WAL
110 | # - Or manual: PRAGMA wal_checkpoint(TRUNCATE)
111 | ```
112 |
113 | ## Database Files
114 |
115 | ### Before WAL
116 |
117 | ```bash
118 | ~/basic-memory/
119 | └── .basic-memory/
120 | └── memory.db # Single database file
121 | ```
122 |
123 | ### After WAL
124 |
125 | ```bash
126 | ~/.basic-memory/
127 | ├── memory.db # Main database
128 | ├── memory.db-wal # Write-ahead log
129 | └── memory.db-shm # Shared memory file
130 | ```
131 |
132 | **Important:** All three files required for database to function
133 |
134 | ## Use Cases
135 |
136 | ### 1. Concurrent MCP Servers
137 |
138 | **Before (slow):**
139 | ```python
140 | # Multiple MCP servers sharing database
141 | Server A: Reading... (blocks Server B)
142 | Server B: Waiting to write...
143 | ```
144 |
145 | **After (fast):**
146 | ```python
147 | # Concurrent access
148 | Server A: Reading (doesn't block)
149 | Server B: Writing (doesn't block)
150 | Server C: Reading (doesn't block)
151 | ```
152 |
153 | ### 2. Real-Time Sync
154 |
155 | **Before:**
156 | ```bash
157 | # Sync blocks reads
158 | bm sync & # Background sync
159 | bm tools search ... # Waits for sync
160 | ```
161 |
162 | **After:**
163 | ```bash
164 | # Sync doesn't block
165 | bm sync & # Background sync
166 | bm tools search ... # Runs concurrently
167 | ```
168 |
169 | ### 3. Large Knowledge Bases
170 |
171 | **Before:**
172 | - Large writes cause delays
173 | - Readers wait during bulk updates
174 | - Slow performance on large datasets
175 |
176 | **After:**
177 | - Large writes don't block reads
178 | - Readers continue during bulk updates
179 | - Better performance on large datasets
180 |
181 | ## Configuration
182 |
183 | ### WAL Mode (Default)
184 |
185 | Enabled automatically:
186 |
187 | ```python
188 | # Basic Memory applies on initialization
189 | async def init_db():
190 | await db.execute("PRAGMA journal_mode=WAL")
191 | await db.execute("PRAGMA synchronous=NORMAL")
192 | ```
193 |
194 | ### Verify WAL Mode
195 |
196 | ```bash
197 | # Check journal mode
198 | sqlite3 ~/.basic-memory/memory.db "PRAGMA journal_mode;"
199 | # → wal
200 | ```
201 |
202 | ### Manual Configuration (Advanced)
203 |
204 | ```python
205 | from basic_memory.db import get_db
206 |
207 | # Get database connection
208 | db = await get_db()
209 |
210 | # Check settings
211 | result = await db.execute("PRAGMA journal_mode")
212 | print(result) # → wal
213 |
214 | result = await db.execute("PRAGMA synchronous")
215 | print(result) # → 1 (NORMAL)
216 | ```
217 |
218 | ## Platform-Specific Optimizations
219 |
220 | ### Windows
221 |
222 | ```python
223 | # Windows-specific settings
224 | PRAGMA synchronous=NORMAL # Balance safety/speed
225 | PRAGMA temp_store=MEMORY # Faster temp operations
226 | PRAGMA cache_size=-2000 # 2MB cache
227 | ```
228 |
229 | **Benefits on Windows:**
230 | - Faster on NTFS
231 | - Better with Windows Defender
232 | - Improved antivirus compatibility
233 |
234 | ### macOS/Linux
235 |
236 | ```python
237 | # Unix-specific (defaults work well)
238 | PRAGMA journal_mode=WAL
239 | PRAGMA synchronous=NORMAL
240 | ```
241 |
242 | **Benefits:**
243 | - Faster on APFS/ext4
244 | - Better with spotlight/indexing
245 | - Improved filesystem syncing
246 |
247 | ## Maintenance
248 |
249 | ### Checkpoint WAL File
250 |
251 | WAL auto-checkpoints, but you can force it:
252 |
253 | ```python
254 | # Python
255 | from basic_memory.db import get_db
256 |
257 | db = await get_db()
258 | await db.execute("PRAGMA wal_checkpoint(TRUNCATE)")
259 | ```
260 |
261 | ```bash
262 | # Command line
263 | sqlite3 ~/.basic-memory/memory.db "PRAGMA wal_checkpoint(TRUNCATE);"
264 | ```
265 |
266 | **When to checkpoint:**
267 | - Before backup
268 | - After large bulk operations
269 | - When WAL file grows large
270 |
271 | ### Backup Considerations
272 |
273 | **Wrong way (incomplete):**
274 | ```bash
275 | # ✗ Only copies main file, misses WAL
276 | cp ~/.basic-memory/memory.db backup.db
277 | ```
278 |
279 | **Right way (complete):**
280 | ```bash
281 | # ✓ Checkpoint first, then backup
282 | sqlite3 ~/.basic-memory/memory.db "PRAGMA wal_checkpoint(TRUNCATE);"
283 | cp ~/.basic-memory/memory.db* backup/
284 |
285 | # Or use SQLite backup command
286 | sqlite3 ~/.basic-memory/memory.db ".backup backup.db"
287 | ```
288 |
289 | ### Monitoring WAL Size
290 |
291 | ```python
292 | import os
293 |
294 | wal_file = os.path.expanduser("~/.basic-memory/memory.db-wal")
295 | if os.path.exists(wal_file):
296 | size_mb = os.path.getsize(wal_file) / (1024 * 1024)
297 | print(f"WAL size: {size_mb:.2f} MB")
298 |
299 | if size_mb > 10: # More than 10MB
300 | # Consider checkpointing
301 | db.execute("PRAGMA wal_checkpoint(TRUNCATE)")
302 | ```
303 |
304 | ## Troubleshooting
305 |
306 | ### Database Locked Error
307 |
308 | **Problem:** Still seeing "database is locked" errors
309 |
310 | **Possible causes:**
311 | 1. WAL mode not enabled
312 | 2. Network filesystem (NFS, SMB)
313 | 3. Transaction timeout
314 |
315 | **Solutions:**
316 |
317 | ```bash
318 | # 1. Verify WAL mode
319 | sqlite3 ~/.basic-memory/memory.db "PRAGMA journal_mode;"
320 |
321 | # 2. Check filesystem (WAL requires local filesystem)
322 | df -T ~/.basic-memory/memory.db
323 |
324 | # 3. Increase timeout (if needed)
325 | # In code:
326 | db.execute("PRAGMA busy_timeout=10000") # 10 seconds
327 | ```
328 |
329 | ### WAL File Growing Large
330 |
331 | **Problem:** memory.db-wal keeps growing
332 |
333 | **Checkpoint more frequently:**
334 |
335 | ```python
336 | # Automatic checkpoint at smaller size
337 | db.execute("PRAGMA wal_autocheckpoint=100") # Every 100 pages
338 |
339 | # Or manual checkpoint
340 | db.execute("PRAGMA wal_checkpoint(TRUNCATE)")
341 | ```
342 |
343 | ### Network Filesystem Issues
344 |
345 | **Problem:** Using WAL on NFS/SMB
346 |
347 | **Limitation:** WAL requires local filesystem with proper locking
348 |
349 | **Solution:**
350 | ```bash
351 | # Option 1: Use local filesystem
352 | mv ~/.basic-memory /local/path/.basic-memory
353 |
354 | # Option 2: Fallback to DELETE mode (slower but works)
355 | sqlite3 memory.db "PRAGMA journal_mode=DELETE"
356 | ```
357 |
358 | ## Performance Benchmarks
359 |
360 | ### Concurrent Reads/Writes
361 |
362 | **Before WAL:**
363 | ```
364 | Test: 1 writer + 5 readers
365 | Result: Serialized access
366 | Time: 10.5 seconds
367 | ```
368 |
369 | **After WAL:**
370 | ```
371 | Test: 1 writer + 5 readers
372 | Result: Concurrent access
373 | Time: 3.2 seconds (3.3x faster)
374 | ```
375 |
376 | ### Bulk Operations
377 |
378 | **Before WAL:**
379 | ```
380 | Test: Import 1000 notes
381 | Result: 15.2 seconds
382 | ```
383 |
384 | **After WAL:**
385 | ```
386 | Test: Import 1000 notes
387 | Result: 5.8 seconds (2.6x faster)
388 | ```
389 |
390 | ### Search Performance
391 |
392 | **Before WAL (with concurrent writes):**
393 | ```
394 | Test: Full-text search during sync
395 | Result: Blocked, 2.1 seconds
396 | ```
397 |
398 | **After WAL (with concurrent writes):**
399 | ```
400 | Test: Full-text search during sync
401 | Result: Concurrent, 0.4 seconds (5.3x faster)
402 | ```
403 |
404 | ## Best Practices
405 |
406 | ### 1. Let WAL Auto-Checkpoint
407 |
408 | Default auto-checkpointing works well:
409 | ```python
410 | # Default: checkpoint at ~1000 pages
411 | # Usually optimal, don't change unless needed
412 | ```
413 |
414 | ### 2. Checkpoint Before Backup
415 |
416 | ```bash
417 | # Always checkpoint before backup
418 | sqlite3 memory.db "PRAGMA wal_checkpoint(TRUNCATE)"
419 | cp memory.db* backup/
420 | ```
421 |
422 | ### 3. Monitor WAL Size
423 |
424 | ```bash
425 | # Check WAL size periodically
426 | ls -lh ~/.basic-memory/memory.db-wal
427 |
428 | # If > 50MB, consider more frequent checkpoints
429 | ```
430 |
431 | ### 4. Use Local Filesystem
432 |
433 | ```bash
434 | # ✓ Good: Local SSD/HDD
435 | /home/user/.basic-memory/
436 |
437 | # ✗ Bad: Network filesystem
438 | /mnt/nfs/home/.basic-memory/
439 | ```
440 |
441 | ### 5. Don't Delete WAL Files
442 |
443 | ```bash
444 | # ✗ Never delete these manually
445 | # memory.db-wal
446 | # memory.db-shm
447 |
448 | # Let SQLite manage them
449 | ```
450 |
451 | ## Advanced Configuration
452 |
453 | ### Custom Checkpoint Interval
454 |
455 | ```python
456 | # Checkpoint more frequently (smaller WAL)
457 | db.execute("PRAGMA wal_autocheckpoint=100")
458 |
459 | # Checkpoint less frequently (larger WAL, fewer interruptions)
460 | db.execute("PRAGMA wal_autocheckpoint=10000")
461 | ```
462 |
463 | ### Synchronous Modes
464 |
465 | ```python
466 | # Modes (in order of durability vs speed):
467 | db.execute("PRAGMA synchronous=OFF") # Fastest, least safe
468 | db.execute("PRAGMA synchronous=NORMAL") # Balanced (default)
469 | db.execute("PRAGMA synchronous=FULL") # Safest, slowest
470 | ```
471 |
472 | ### Cache Size
473 |
474 | ```python
475 | # Larger cache = faster, more memory
476 | db.execute("PRAGMA cache_size=-10000") # 10MB cache
477 | db.execute("PRAGMA cache_size=-50000") # 50MB cache
478 | ```
479 |
480 | ## Migration from v0.14.x
481 |
482 | ### Automatic Migration
483 |
484 | **First run on v0.15.0:**
485 | ```bash
486 | bm sync
487 | # → Automatically converts to WAL mode
488 | # → Creates memory.db-wal and memory.db-shm
489 | ```
490 |
491 | **No action required** - migration is automatic
492 |
493 | ### Verifying Migration
494 |
495 | ```bash
496 | # Check mode changed
497 | sqlite3 ~/.basic-memory/memory.db "PRAGMA journal_mode;"
498 | # → wal (was: delete)
499 |
500 | # Check new files exist
501 | ls -la ~/.basic-memory/memory.db*
502 | # → memory.db
503 | # → memory.db-wal
504 | # → memory.db-shm
505 | ```
506 |
507 | ## See Also
508 |
509 | - SQLite WAL documentation: https://www.sqlite.org/wal.html
510 | - `api-performance.md` - API-level optimizations
511 | - `background-relations.md` - Concurrent processing improvements
512 | - Database optimization guide
513 |
```
--------------------------------------------------------------------------------
/tests/repository/test_search_repository_edit_bug_fix.py:
--------------------------------------------------------------------------------
```python
1 | """Tests for the search repository edit bug fix.
2 |
3 | This test reproduces the critical bug where editing notes causes them to disappear
4 | from the search index due to missing project_id filter in index_item() method.
5 | """
6 |
7 | from datetime import datetime, timezone
8 |
9 | import pytest
10 | import pytest_asyncio
11 |
12 | from basic_memory.models.project import Project
13 | from basic_memory.repository.search_repository import SearchRepository, SearchIndexRow
14 | from basic_memory.schemas.search import SearchItemType
15 |
16 |
17 | @pytest_asyncio.fixture
18 | async def second_test_project(project_repository):
19 | """Create a second project for testing project isolation during edits."""
20 | project_data = {
21 | "name": "Second Edit Test Project",
22 | "description": "Another project for testing edit bug",
23 | "path": "/second/edit/test/path",
24 | "is_active": True,
25 | "is_default": None,
26 | }
27 | return await project_repository.create(project_data)
28 |
29 |
30 | @pytest_asyncio.fixture
31 | async def second_search_repo(session_maker, second_test_project):
32 | """Create a search repository for the second project."""
33 | return SearchRepository(session_maker, project_id=second_test_project.id)
34 |
35 |
36 | @pytest.mark.asyncio
37 | async def test_index_item_respects_project_isolation_during_edit():
38 | """Test that index_item() doesn't delete records from other projects during edits.
39 |
40 | This test reproduces the critical bug where editing a note in one project
41 | would delete search index entries with the same permalink from ALL projects,
42 | causing notes to disappear from the search index.
43 | """
44 | from basic_memory import db
45 | from basic_memory.models.base import Base
46 | from basic_memory.repository.search_repository import SearchRepository
47 | from sqlalchemy.ext.asyncio import create_async_engine, async_sessionmaker
48 |
49 | # Create a separate in-memory database for this test
50 | engine = create_async_engine("sqlite+aiosqlite:///:memory:")
51 | session_maker = async_sessionmaker(engine, expire_on_commit=False)
52 |
53 | # Create the database schema
54 | async with engine.begin() as conn:
55 | await conn.run_sync(Base.metadata.create_all)
56 |
57 | # Create two projects
58 | async with db.scoped_session(session_maker) as session:
59 | project1 = Project(
60 | name="Project 1",
61 | description="First project",
62 | path="/project1/path",
63 | is_active=True,
64 | is_default=True,
65 | )
66 | project2 = Project(
67 | name="Project 2",
68 | description="Second project",
69 | path="/project2/path",
70 | is_active=True,
71 | is_default=False,
72 | )
73 | session.add(project1)
74 | session.add(project2)
75 | await session.flush()
76 |
77 | project1_id = project1.id
78 | project2_id = project2.id
79 | await session.commit()
80 |
81 | # Create search repositories for both projects
82 | repo1 = SearchRepository(session_maker, project_id=project1_id)
83 | repo2 = SearchRepository(session_maker, project_id=project2_id)
84 |
85 | # Initialize search index
86 | await repo1.init_search_index()
87 |
88 | # Create two notes with the SAME permalink in different projects
89 | # This simulates the same note name/structure across different projects
90 | same_permalink = "notes/test-note"
91 |
92 | search_row1 = SearchIndexRow(
93 | id=1,
94 | type=SearchItemType.ENTITY.value,
95 | title="Test Note in Project 1",
96 | content_stems="project 1 content original",
97 | content_snippet="This is the original content in project 1",
98 | permalink=same_permalink,
99 | file_path="notes/test_note.md",
100 | entity_id=1,
101 | metadata={"entity_type": "note"},
102 | created_at=datetime.now(timezone.utc),
103 | updated_at=datetime.now(timezone.utc),
104 | project_id=project1_id,
105 | )
106 |
107 | search_row2 = SearchIndexRow(
108 | id=2,
109 | type=SearchItemType.ENTITY.value,
110 | title="Test Note in Project 2",
111 | content_stems="project 2 content original",
112 | content_snippet="This is the original content in project 2",
113 | permalink=same_permalink, # SAME permalink as project 1
114 | file_path="notes/test_note.md",
115 | entity_id=2,
116 | metadata={"entity_type": "note"},
117 | created_at=datetime.now(timezone.utc),
118 | updated_at=datetime.now(timezone.utc),
119 | project_id=project2_id,
120 | )
121 |
122 | # Index both items in their respective projects
123 | await repo1.index_item(search_row1)
124 | await repo2.index_item(search_row2)
125 |
126 | # Verify both projects can find their respective notes
127 | results1_before = await repo1.search(search_text="project 1 content")
128 | assert len(results1_before) == 1
129 | assert results1_before[0].title == "Test Note in Project 1"
130 | assert results1_before[0].project_id == project1_id
131 |
132 | results2_before = await repo2.search(search_text="project 2 content")
133 | assert len(results2_before) == 1
134 | assert results2_before[0].title == "Test Note in Project 2"
135 | assert results2_before[0].project_id == project2_id
136 |
137 | # Now simulate editing the note in project 1 (which re-indexes it)
138 | # This would trigger the bug where the DELETE query doesn't filter by project_id
139 | edited_search_row1 = SearchIndexRow(
140 | id=1,
141 | type=SearchItemType.ENTITY.value,
142 | title="Test Note in Project 1",
143 | content_stems="project 1 content EDITED", # Changed content
144 | content_snippet="This is the EDITED content in project 1",
145 | permalink=same_permalink,
146 | file_path="notes/test_note.md",
147 | entity_id=1,
148 | metadata={"entity_type": "note"},
149 | created_at=datetime.now(timezone.utc),
150 | updated_at=datetime.now(timezone.utc),
151 | project_id=project1_id,
152 | )
153 |
154 | # Re-index the edited note in project 1
155 | # BEFORE THE FIX: This would delete the note from project 2 as well!
156 | await repo1.index_item(edited_search_row1)
157 |
158 | # Verify project 1 has the edited version
159 | results1_after = await repo1.search(search_text="project 1 content EDITED")
160 | assert len(results1_after) == 1
161 | assert results1_after[0].title == "Test Note in Project 1"
162 | assert "EDITED" in results1_after[0].content_snippet
163 |
164 | # CRITICAL TEST: Verify project 2's note is still there (the bug would delete it)
165 | results2_after = await repo2.search(search_text="project 2 content")
166 | assert len(results2_after) == 1, "Project 2's note disappeared after editing project 1's note!"
167 | assert results2_after[0].title == "Test Note in Project 2"
168 | assert results2_after[0].project_id == project2_id
169 | assert "original" in results2_after[0].content_snippet # Should still be original
170 |
171 | # Double-check: project 1 should not be able to see project 2's note
172 | cross_search = await repo1.search(search_text="project 2 content")
173 | assert len(cross_search) == 0
174 |
175 | await engine.dispose()
176 |
177 |
178 | @pytest.mark.asyncio
179 | async def test_index_item_updates_existing_record_same_project():
180 | """Test that index_item() correctly updates existing records within the same project."""
181 | from basic_memory import db
182 | from basic_memory.models.base import Base
183 | from basic_memory.repository.search_repository import SearchRepository
184 | from sqlalchemy.ext.asyncio import create_async_engine, async_sessionmaker
185 |
186 | # Create a separate in-memory database for this test
187 | engine = create_async_engine("sqlite+aiosqlite:///:memory:")
188 | session_maker = async_sessionmaker(engine, expire_on_commit=False)
189 |
190 | # Create the database schema
191 | async with engine.begin() as conn:
192 | await conn.run_sync(Base.metadata.create_all)
193 |
194 | # Create one project
195 | async with db.scoped_session(session_maker) as session:
196 | project = Project(
197 | name="Test Project",
198 | description="Test project",
199 | path="/test/path",
200 | is_active=True,
201 | is_default=True,
202 | )
203 | session.add(project)
204 | await session.flush()
205 | project_id = project.id
206 | await session.commit()
207 |
208 | # Create search repository
209 | repo = SearchRepository(session_maker, project_id=project_id)
210 | await repo.init_search_index()
211 |
212 | permalink = "test/my-note"
213 |
214 | # Create initial note
215 | initial_row = SearchIndexRow(
216 | id=1,
217 | type=SearchItemType.ENTITY.value,
218 | title="My Test Note",
219 | content_stems="initial content here",
220 | content_snippet="This is the initial content",
221 | permalink=permalink,
222 | file_path="test/my_note.md",
223 | entity_id=1,
224 | metadata={"entity_type": "note"},
225 | created_at=datetime.now(timezone.utc),
226 | updated_at=datetime.now(timezone.utc),
227 | project_id=project_id,
228 | )
229 |
230 | # Index the initial version
231 | await repo.index_item(initial_row)
232 |
233 | # Verify it exists
234 | results_initial = await repo.search(search_text="initial content")
235 | assert len(results_initial) == 1
236 | assert results_initial[0].content_snippet == "This is the initial content"
237 |
238 | # Now update the note (simulate an edit)
239 | updated_row = SearchIndexRow(
240 | id=1,
241 | type=SearchItemType.ENTITY.value,
242 | title="My Test Note",
243 | content_stems="updated content here", # Changed
244 | content_snippet="This is the UPDATED content", # Changed
245 | permalink=permalink, # Same permalink
246 | file_path="test/my_note.md",
247 | entity_id=1,
248 | metadata={"entity_type": "note"},
249 | created_at=datetime.now(timezone.utc),
250 | updated_at=datetime.now(timezone.utc),
251 | project_id=project_id,
252 | )
253 |
254 | # Re-index (should replace the old version)
255 | await repo.index_item(updated_row)
256 |
257 | # Verify the old version is gone
258 | results_old = await repo.search(search_text="initial content")
259 | assert len(results_old) == 0
260 |
261 | # Verify the new version exists
262 | results_new = await repo.search(search_text="updated content")
263 | assert len(results_new) == 1
264 | assert results_new[0].content_snippet == "This is the UPDATED content"
265 |
266 | # Verify we only have one record (not duplicated)
267 | all_results = await repo.search(search_text="My Test Note")
268 | assert len(all_results) == 1
269 |
270 | await engine.dispose()
271 |
```
--------------------------------------------------------------------------------
/v15-docs/project-root-env-var.md:
--------------------------------------------------------------------------------
```markdown
1 | # BASIC_MEMORY_PROJECT_ROOT Environment Variable
2 |
3 | **Status**: New Feature
4 | **PR**: #334
5 | **Use Case**: Security, containerization, path constraints
6 |
7 | ## What's New
8 |
9 | v0.15.0 introduces the `BASIC_MEMORY_PROJECT_ROOT` environment variable to constrain all project paths to a specific directory. This provides security and enables safe multi-tenant deployments.
10 |
11 | ## Quick Examples
12 |
13 | ### Containerized Deployment
14 |
15 | ```bash
16 | # Docker/containerized environment
17 | export BASIC_MEMORY_PROJECT_ROOT=/app/data
18 | export BASIC_MEMORY_HOME=/app/data/basic-memory
19 |
20 | # All projects must be under /app/data
21 | bm project add my-project /app/data/my-project # ✓ Allowed
22 | bm project add my-project /tmp/unsafe # ✗ Blocked
23 | ```
24 |
25 | ### Development Environment
26 |
27 | ```bash
28 | # Local development - no constraint (default)
29 | # BASIC_MEMORY_PROJECT_ROOT not set
30 |
31 | # Projects can be anywhere
32 | bm project add work ~/Documents/work-notes # ✓ Allowed
33 | bm project add personal ~/personal-kb # ✓ Allowed
34 | ```
35 |
36 | ## How It Works
37 |
38 | ### Path Validation
39 |
40 | When `BASIC_MEMORY_PROJECT_ROOT` is set:
41 |
42 | 1. **All project paths are validated** against the root
43 | 2. **Paths are sanitized** to prevent directory traversal
44 | 3. **Symbolic links are resolved** and verified
45 | 4. **Escape attempts are blocked** (e.g., `../../../etc`)
46 |
47 | ### Path Sanitization
48 |
49 | ```python
50 | # Example internal validation
51 | project_root = "/app/data"
52 | user_path = "/app/data/../../../etc"
53 |
54 | # Sanitized and validated
55 | resolved_path = Path(user_path).resolve()
56 | # → "/etc"
57 |
58 | # Check if under project_root
59 | if not str(resolved_path).startswith(project_root):
60 | raise ValueError("Path must be under /app/data")
61 | ```
62 |
63 | ## Configuration
64 |
65 | ### Set via Environment Variable
66 |
67 | ```bash
68 | # In shell or .bashrc/.zshrc
69 | export BASIC_MEMORY_PROJECT_ROOT=/app/data
70 |
71 | # Or in Docker
72 | docker run -e BASIC_MEMORY_PROJECT_ROOT=/app/data ...
73 | ```
74 |
75 | ### Docker Deployment
76 |
77 | **Dockerfile:**
78 | ```dockerfile
79 | # Set project root for path constraints
80 | ENV BASIC_MEMORY_HOME=/app/data/basic-memory \
81 | BASIC_MEMORY_PROJECT_ROOT=/app/data
82 | ```
83 |
84 | **docker-compose.yml:**
85 | ```yaml
86 | services:
87 | basic-memory:
88 | environment:
89 | BASIC_MEMORY_HOME: /app/data/basic-memory
90 | BASIC_MEMORY_PROJECT_ROOT: /app/data
91 | volumes:
92 | - ./data:/app/data
93 | ```
94 |
95 | ### Kubernetes Deployment
96 |
97 | ```yaml
98 | apiVersion: v1
99 | kind: Pod
100 | spec:
101 | containers:
102 | - name: basic-memory
103 | env:
104 | - name: BASIC_MEMORY_PROJECT_ROOT
105 | value: "/app/data"
106 | - name: BASIC_MEMORY_HOME
107 | value: "/app/data/basic-memory"
108 | volumeMounts:
109 | - name: data-volume
110 | mountPath: /app/data
111 | ```
112 |
113 | ## Use Cases
114 |
115 | ### 1. Container Security
116 |
117 | **Problem:** Containers shouldn't create projects outside mounted volumes
118 |
119 | **Solution:**
120 | ```bash
121 | # Set project root to volume mount
122 | export BASIC_MEMORY_PROJECT_ROOT=/app/data
123 |
124 | # Projects confined to volume
125 | bm project add notes /app/data/notes # ✓
126 | bm project add evil /etc/passwd # ✗ Blocked
127 | ```
128 |
129 | ### 2. Multi-Tenant SaaS
130 |
131 | **Problem:** Tenant A shouldn't access Tenant B's files
132 |
133 | **Solution:**
134 | ```bash
135 | # Per-tenant isolation
136 | export BASIC_MEMORY_PROJECT_ROOT=/app/data/tenant-${TENANT_ID}
137 |
138 | # Tenant can only create projects under their directory
139 | bm project add my-notes /app/data/tenant-123/notes # ✓
140 | bm project add sneaky /app/data/tenant-456/notes # ✗ Blocked
141 | ```
142 |
143 | ### 3. Shared Hosting
144 |
145 | **Problem:** Users need isolated project spaces
146 |
147 | **Solution:**
148 | ```bash
149 | # Per-user isolation
150 | export BASIC_MEMORY_PROJECT_ROOT=/home/${USER}/basic-memory
151 |
152 | # User confined to their home directory
153 | bm project add personal /home/alice/basic-memory/personal # ✓
154 | bm project add other /home/bob/basic-memory/data # ✗ Blocked
155 | ```
156 |
157 | ## Relationship with BASIC_MEMORY_HOME
158 |
159 | `BASIC_MEMORY_HOME` and `BASIC_MEMORY_PROJECT_ROOT` serve **different purposes**:
160 |
161 | | Variable | Purpose | Default | Example |
162 | |----------|---------|---------|---------|
163 | | `BASIC_MEMORY_HOME` | Default project location | `~/basic-memory` | Where "main" project lives |
164 | | `BASIC_MEMORY_PROJECT_ROOT` | Path constraint boundary | None (unrestricted) | Security boundary |
165 |
166 | ### Using Both Together
167 |
168 | ```bash
169 | # Typical containerized setup
170 | export BASIC_MEMORY_PROJECT_ROOT=/app/data # Constraint: all under /app/data
171 | export BASIC_MEMORY_HOME=/app/data/basic-memory # Default: main project location
172 |
173 | # This creates main project at /app/data/basic-memory
174 | # And ensures all other projects are also under /app/data
175 | ```
176 |
177 | ### Key Differences
178 |
179 | **BASIC_MEMORY_HOME:**
180 | - Sets default project path
181 | - Used for "main" project
182 | - Does NOT enforce constraints
183 | - Optional - defaults to `~/basic-memory`
184 |
185 | **BASIC_MEMORY_PROJECT_ROOT:**
186 | - Enforces path constraints
187 | - Validates ALL project paths
188 | - Prevents path traversal
189 | - Optional - if not set, no constraints
190 |
191 | ## Validation Examples
192 |
193 | ### Valid Paths (with PROJECT_ROOT=/app/data)
194 |
195 | ```bash
196 | export BASIC_MEMORY_PROJECT_ROOT=/app/data
197 |
198 | # Direct child
199 | bm project add notes /app/data/notes # ✓
200 |
201 | # Nested child
202 | bm project add work /app/data/projects/work # ✓
203 |
204 | # Relative path (resolves to /app/data/relative)
205 | bm project add rel /app/data/relative # ✓
206 |
207 | # Symlink (resolves under /app/data)
208 | ln -s /app/data/real /app/data/link
209 | bm project add linked /app/data/link # ✓
210 | ```
211 |
212 | ### Invalid Paths (with PROJECT_ROOT=/app/data)
213 |
214 | ```bash
215 | export BASIC_MEMORY_PROJECT_ROOT=/app/data
216 |
217 | # Path traversal attempt
218 | bm project add evil /app/data/../../../etc
219 | # ✗ Error: Path must be under /app/data
220 |
221 | # Absolute path outside root
222 | bm project add outside /tmp/data
223 | # ✗ Error: Path must be under /app/data
224 |
225 | # Symlink escaping root
226 | ln -s /etc/passwd /app/data/evil
227 | bm project add bad /app/data/evil
228 | # ✗ Error: Path must be under /app/data
229 |
230 | # Relative path escaping
231 | bm project add sneaky /app/data/../../sneaky
232 | # ✗ Error: Path must be under /app/data
233 | ```
234 |
235 | ## Error Messages
236 |
237 | ### Path Outside Root
238 |
239 | ```bash
240 | $ bm project add test /tmp/test
241 | Error: BASIC_MEMORY_PROJECT_ROOT is set to /app/data.
242 | All projects must be created under this directory.
243 | Invalid path: /tmp/test
244 | ```
245 |
246 | ### Escape Attempt Blocked
247 |
248 | ```bash
249 | $ bm project add evil /app/data/../../../etc
250 | Error: BASIC_MEMORY_PROJECT_ROOT is set to /app/data.
251 | All projects must be created under this directory.
252 | Invalid path: /etc
253 | ```
254 |
255 | ## Migration Guide
256 |
257 | ### Enabling PROJECT_ROOT on Existing Setup
258 |
259 | If you have existing projects outside the desired root:
260 |
261 | 1. **Choose project root location**
262 | ```bash
263 | export BASIC_MEMORY_PROJECT_ROOT=/app/data
264 | ```
265 |
266 | 2. **Move existing projects**
267 | ```bash
268 | # Backup first
269 | cp -r ~/old-project /app/data/old-project
270 | ```
271 |
272 | 3. **Update config.json**
273 | ```bash
274 | # Edit ~/.basic-memory/config.json
275 | {
276 | "projects": {
277 | "main": "/app/data/basic-memory",
278 | "old-project": "/app/data/old-project"
279 | }
280 | }
281 | ```
282 |
283 | 4. **Verify paths**
284 | ```bash
285 | bm project list
286 | # All paths should be under /app/data
287 | ```
288 |
289 | ### Disabling PROJECT_ROOT
290 |
291 | To remove constraints:
292 |
293 | ```bash
294 | # Unset environment variable
295 | unset BASIC_MEMORY_PROJECT_ROOT
296 |
297 | # Or remove from Docker/config
298 | # Now projects can be created anywhere again
299 | ```
300 |
301 | ## Testing Path Constraints
302 |
303 | ### Verify Configuration
304 |
305 | ```bash
306 | # Check if PROJECT_ROOT is set
307 | env | grep BASIC_MEMORY_PROJECT_ROOT
308 |
309 | # Try creating project outside root (should fail)
310 | bm project add test /tmp/test
311 | ```
312 |
313 | ### Docker Testing
314 |
315 | ```bash
316 | # Run with constraint
317 | docker run \
318 | -e BASIC_MEMORY_PROJECT_ROOT=/app/data \
319 | -v $(pwd)/data:/app/data \
320 | basic-memory:latest \
321 | bm project add notes /app/data/notes
322 |
323 | # Verify in container
324 | docker exec -it container_id env | grep PROJECT_ROOT
325 | ```
326 |
327 | ## Security Best Practices
328 |
329 | 1. **Always set in production**: Use PROJECT_ROOT in deployed environments
330 | 2. **Minimal permissions**: Set directory permissions to 700 or 750
331 | 3. **Audit project creation**: Log all project add/remove operations
332 | 4. **Regular validation**: Periodically check project paths haven't escaped
333 | 5. **Volume mounts**: Ensure PROJECT_ROOT matches Docker volume mounts
334 |
335 | ## Troubleshooting
336 |
337 | ### Projects Not Creating
338 |
339 | **Problem:** Can't create projects with PROJECT_ROOT set
340 |
341 | ```bash
342 | $ bm project add test /app/data/test
343 | Error: Path must be under /app/data
344 | ```
345 |
346 | **Solution:** Verify PROJECT_ROOT is correct
347 | ```bash
348 | echo $BASIC_MEMORY_PROJECT_ROOT
349 | # Should match expected path
350 | ```
351 |
352 | ### Paths Resolving Incorrectly
353 |
354 | **Problem:** Symlinks not working as expected
355 |
356 | **Solution:** Check symlink target
357 | ```bash
358 | ls -la /app/data/link
359 | # → /app/data/link -> /some/target
360 |
361 | # Ensure target is under PROJECT_ROOT
362 | realpath /app/data/link
363 | ```
364 |
365 | ### Docker Volume Issues
366 |
367 | **Problem:** PROJECT_ROOT doesn't match volume mount
368 |
369 | **Solution:** Align environment and volume
370 | ```yaml
371 | # docker-compose.yml
372 | environment:
373 | BASIC_MEMORY_PROJECT_ROOT: /app/data # ← Must match volume mount
374 | volumes:
375 | - ./data:/app/data # ← Mount point
376 | ```
377 |
378 | ## Implementation Details
379 |
380 | ### Path Sanitization Algorithm
381 |
382 | ```python
383 | def sanitize_and_validate_path(path: str, project_root: str) -> str:
384 | """Sanitize path and validate against project root."""
385 | # Convert to absolute path
386 | base_path = Path(project_root).resolve()
387 | target_path = Path(path).resolve()
388 |
389 | # Get as POSIX string for comparison
390 | resolved_path = target_path.as_posix()
391 | base_posix = base_path.as_posix()
392 |
393 | # Verify resolved path is under project_root
394 | if not resolved_path.startswith(base_posix):
395 | raise ValueError(
396 | f"BASIC_MEMORY_PROJECT_ROOT is set to {project_root}. "
397 | f"All projects must be created under this directory. "
398 | f"Invalid path: {path}"
399 | )
400 |
401 | return resolved_path
402 | ```
403 |
404 | ### Config Loading
405 |
406 | ```python
407 | class BasicMemoryConfig(BaseSettings):
408 | project_root: Optional[str] = Field(
409 | default=None,
410 | description="If set, all projects must be created underneath this directory"
411 | )
412 |
413 | model_config = SettingsConfigDict(
414 | env_prefix="BASIC_MEMORY_", # Maps BASIC_MEMORY_PROJECT_ROOT
415 | extra="ignore",
416 | )
417 | ```
418 |
419 | ## See Also
420 |
421 | - `basic-memory-home.md` - Default project location
422 | - `env-var-overrides.md` - Environment variable precedence
423 | - Docker deployment guide
424 | - Security best practices
425 |
```
--------------------------------------------------------------------------------
/docs/Docker.md:
--------------------------------------------------------------------------------
```markdown
1 | # Docker Setup Guide
2 |
3 | Basic Memory can be run in Docker containers to provide a consistent, isolated environment for your knowledge management
4 | system. This is particularly useful for integrating with existing Dockerized MCP servers or for deployment scenarios.
5 |
6 | ## Quick Start
7 |
8 | ### Option 1: Using Pre-built Images (Recommended)
9 |
10 | Basic Memory provides pre-built Docker images on GitHub Container Registry that are automatically updated with each release.
11 |
12 | 1. **Use the official image directly:**
13 | ```bash
14 | docker run -d \
15 | --name basic-memory-server \
16 | -p 8000:8000 \
17 | -v /path/to/your/obsidian-vault:/app/data:rw \
18 | -v basic-memory-config:/app/.basic-memory:rw \
19 | ghcr.io/basicmachines-co/basic-memory:latest
20 | ```
21 |
22 | 2. **Or use Docker Compose with the pre-built image:**
23 | ```yaml
24 | version: '3.8'
25 | services:
26 | basic-memory:
27 | image: ghcr.io/basicmachines-co/basic-memory:latest
28 | container_name: basic-memory-server
29 | ports:
30 | - "8000:8000"
31 | volumes:
32 | - /path/to/your/obsidian-vault:/app/data:rw
33 | - basic-memory-config:/app/.basic-memory:rw
34 | environment:
35 | - BASIC_MEMORY_DEFAULT_PROJECT=main
36 | restart: unless-stopped
37 | ```
38 |
39 | ### Option 2: Using Docker Compose (Building Locally)
40 |
41 | 1. **Clone the repository:**
42 | ```bash
43 | git clone https://github.com/basicmachines-co/basic-memory.git
44 | cd basic-memory
45 | ```
46 |
47 | 2. **Update the docker-compose.yml:**
48 | Edit the volume mount to point to your Obsidian vault:
49 | ```yaml
50 | volumes:
51 | # Change './obsidian-vault' to your actual directory path
52 | - /path/to/your/obsidian-vault:/app/data:rw
53 | ```
54 |
55 | 3. **Start the container:**
56 | ```bash
57 | docker-compose up -d
58 | ```
59 |
60 | ### Option 3: Using Docker CLI
61 |
62 | ```bash
63 | # Build the image
64 | docker build -t basic-memory .
65 |
66 | # Run with volume mounting
67 | docker run -d \
68 | --name basic-memory-server \
69 | -v /path/to/your/obsidian-vault:/app/data:rw \
70 | -v basic-memory-config:/app/.basic-memory:rw \
71 | -e BASIC_MEMORY_DEFAULT_PROJECT=main \
72 | basic-memory
73 | ```
74 |
75 | ## Configuration
76 |
77 | ### Volume Mounts
78 |
79 | Basic Memory requires several volume mounts for proper operation:
80 |
81 | 1. **Knowledge Directory** (Required):
82 | ```yaml
83 | - /path/to/your/obsidian-vault:/app/data:rw
84 | ```
85 | Mount your Obsidian vault or knowledge base directory.
86 |
87 | 2. **Configuration and Database** (Recommended):
88 | ```yaml
89 | - basic-memory-config:/app/.basic-memory:rw
90 | ```
91 | Persistent storage for configuration and SQLite database.
92 |
93 | You can edit the basic-memory config.json file located in the /app/.basic-memory/config.json after Basic Memory starts.
94 |
95 | 3. **Multiple Projects** (Optional):
96 | ```yaml
97 | - /path/to/project1:/app/data/project1:rw
98 | - /path/to/project2:/app/data/project2:rw
99 | ```
100 |
101 | You can edit the basic-memory config.json file located in the /app/.basic-memory/config.json
102 |
103 | ## CLI Commands via Docker
104 |
105 | You can run Basic Memory CLI commands inside the container using `docker exec`:
106 |
107 | ### Basic Commands
108 |
109 | ```bash
110 | # Check status
111 | docker exec basic-memory-server basic-memory status
112 |
113 | # Sync files
114 | docker exec basic-memory-server basic-memory sync
115 |
116 | # Show help
117 | docker exec basic-memory-server basic-memory --help
118 | ```
119 |
120 | ### Managing Projects with Volume Mounts
121 |
122 | When using Docker volumes, you'll need to configure projects to point to your mounted directories:
123 |
124 | 1. **Check current configuration:**
125 | ```bash
126 | docker exec basic-memory-server cat /app/.basic-memory/config.json
127 | ```
128 |
129 | 2. **Add a project for your mounted volume:**
130 | ```bash
131 | # If you mounted /path/to/your/vault to /app/data
132 | docker exec basic-memory-server basic-memory project create my-vault /app/data
133 |
134 | # Set it as default
135 | docker exec basic-memory-server basic-memory project set-default my-vault
136 | ```
137 |
138 | 3. **Sync the new project:**
139 | ```bash
140 | docker exec basic-memory-server basic-memory sync
141 | ```
142 |
143 | ### Example: Setting up an Obsidian Vault
144 |
145 | If you mounted your Obsidian vault like this in docker-compose.yml:
146 | ```yaml
147 | volumes:
148 | - /Users/yourname/Documents/ObsidianVault:/app/data:rw
149 | ```
150 |
151 | Then configure it:
152 | ```bash
153 | # Create project pointing to mounted vault
154 | docker exec basic-memory-server basic-memory project create obsidian /app/data
155 |
156 | # Set as default
157 | docker exec basic-memory-server basic-memory project set-default obsidian
158 |
159 | # Sync to index all files
160 | docker exec basic-memory-server basic-memory sync
161 | ```
162 |
163 | ### Environment Variables
164 |
165 | Configure Basic Memory using environment variables:
166 |
167 | ```yaml
168 | environment:
169 |
170 | # Default project
171 | - BASIC_MEMORY_DEFAULT_PROJECT=main
172 |
173 | # Enable real-time sync
174 | - BASIC_MEMORY_SYNC_CHANGES=true
175 |
176 | # Logging level
177 | - BASIC_MEMORY_LOG_LEVEL=INFO
178 |
179 | # Sync delay in milliseconds
180 | - BASIC_MEMORY_SYNC_DELAY=1000
181 | ```
182 |
183 | ## File Permissions
184 |
185 | ### Linux/macOS
186 |
187 | The Docker container now runs as a non-root user to avoid file ownership issues. By default, the container uses UID/GID 1000, but you can customize this to match your user:
188 |
189 | ```bash
190 | # Build with custom UID/GID to match your user
191 | docker build --build-arg UID=$(id -u) --build-arg GID=$(id -g) -t basic-memory .
192 |
193 | # Or use docker-compose with build args
194 | ```
195 |
196 | **Example docker-compose.yml with custom user:**
197 | ```yaml
198 | version: '3.8'
199 | services:
200 | basic-memory:
201 | build:
202 | context: .
203 | dockerfile: Dockerfile
204 | args:
205 | UID: 1000 # Replace with your UID
206 | GID: 1000 # Replace with your GID
207 | container_name: basic-memory-server
208 | ports:
209 | - "8000:8000"
210 | volumes:
211 | - /path/to/your/obsidian-vault:/app/data:rw
212 | - basic-memory-config:/app/.basic-memory:rw
213 | environment:
214 | - BASIC_MEMORY_DEFAULT_PROJECT=main
215 | restart: unless-stopped
216 | ```
217 |
218 | **Using pre-built images:**
219 | If using the pre-built image from GitHub Container Registry, files will be created with UID/GID 1000. You can either:
220 |
221 | 1. Change your local directory ownership to match:
222 | ```bash
223 | sudo chown -R 1000:1000 /path/to/your/obsidian-vault
224 | ```
225 |
226 | 2. Or build your own image with custom UID/GID as shown above.
227 |
228 | ### Windows
229 |
230 | When using Docker Desktop on Windows, ensure the directories are shared:
231 |
232 | 1. Open Docker Desktop
233 | 2. Go to Settings → Resources → File Sharing
234 | 3. Add your knowledge directory path
235 | 4. Apply & Restart
236 |
237 | ## Troubleshooting
238 |
239 | ### Common Issues
240 |
241 | 1. **File Watching Not Working:**
242 | - Ensure volume mounts are read-write (`:rw`)
243 | - Check directory permissions
244 | - On Linux, may need to increase inotify limits:
245 | ```bash
246 | echo fs.inotify.max_user_watches=524288 | sudo tee -a /etc/sysctl.conf
247 | sudo sysctl -p
248 | ```
249 |
250 | 2. **Configuration Not Persisting:**
251 | - Use named volumes for `/app/.basic-memory`
252 | - Check volume mount permissions
253 |
254 | 3. **Network Connectivity:**
255 | - For HTTP transport, ensure port 8000 is exposed
256 | - Check firewall settings
257 |
258 | ### Debug Mode
259 |
260 | Run with debug logging:
261 |
262 | ```yaml
263 | environment:
264 | - BASIC_MEMORY_LOG_LEVEL=DEBUG
265 | ```
266 |
267 | View logs:
268 |
269 | ```bash
270 | docker-compose logs -f basic-memory
271 | ```
272 |
273 |
274 | ## Security Considerations
275 |
276 | 1. **Docker Security:**
277 | The container runs as a non-root user (UID/GID 1000 by default) for improved security. You can customize the user ID using build arguments to match your local user.
278 |
279 | 2. **Volume Permissions:**
280 | Ensure mounted directories have appropriate permissions and don't expose sensitive data. With the non-root container, files will be created with the specified user ownership.
281 |
282 | 3. **Network Security:**
283 | If using HTTP transport, consider using reverse proxy with SSL/TLS and authentication if the endpoint is available on
284 | a network.
285 |
286 | 4. **IMPORTANT:** The HTTP endpoints have no authorization. They should not be exposed on a public network.
287 |
288 | ## Integration Examples
289 |
290 | ### Claude Desktop with Docker
291 |
292 | The recommended way to connect Claude Desktop to the containerized Basic Memory is using `mcp-proxy`, which converts the HTTP transport to STDIO that Claude Desktop expects:
293 |
294 | 1. **Start the Docker container:**
295 | ```bash
296 | docker-compose up -d
297 | ```
298 |
299 | 2. **Configure Claude Desktop** to use mcp-proxy:
300 | ```json
301 | {
302 | "mcpServers": {
303 | "basic-memory": {
304 | "command": "uvx",
305 | "args": [
306 | "mcp-proxy",
307 | "http://localhost:8000/mcp"
308 | ]
309 | }
310 | }
311 | }
312 | ```
313 |
314 |
315 | ## Support
316 |
317 | For Docker-specific issues:
318 |
319 | 1. Check the [troubleshooting section](#troubleshooting) above
320 | 2. Review container logs: `docker-compose logs basic-memory`
321 | 3. Verify volume mounts: `docker inspect basic-memory-server`
322 | 4. Test file permissions: `docker exec basic-memory-server ls -la /app`
323 |
324 | For general Basic Memory support, see the main [README](../README.md)
325 | and [documentation](https://memory.basicmachines.co/).
326 |
327 | ## GitHub Container Registry Images
328 |
329 | ### Available Images
330 |
331 | Pre-built Docker images are available on GitHub Container Registry at [`ghcr.io/basicmachines-co/basic-memory`](https://github.com/basicmachines-co/basic-memory/pkgs/container/basic-memory).
332 |
333 | **Supported architectures:**
334 | - `linux/amd64` (Intel/AMD x64)
335 | - `linux/arm64` (ARM64, including Apple Silicon)
336 |
337 | **Available tags:**
338 | - `latest` - Latest stable release
339 | - `v0.13.8`, `v0.13.7`, etc. - Specific version tags
340 | - `v0.13`, `v0.12`, etc. - Major.minor tags
341 |
342 | ### Automated Builds
343 |
344 | Docker images are automatically built and published when new releases are tagged:
345 |
346 | 1. **Release Process:** When a git tag matching `v*` (e.g., `v0.13.8`) is pushed, the CI workflow automatically:
347 | - Builds multi-platform Docker images
348 | - Pushes to GitHub Container Registry with appropriate tags
349 | - Uses native GitHub integration for seamless publishing
350 |
351 | 2. **CI/CD Pipeline:** The Docker workflow includes:
352 | - Multi-platform builds (AMD64 and ARM64)
353 | - Layer caching for faster builds
354 | - Automatic tagging with semantic versioning
355 | - Security scanning and optimization
356 |
357 | ### Setup Requirements (For Maintainers)
358 |
359 | GitHub Container Registry integration is automatic for this repository:
360 |
361 | 1. **No external setup required** - GHCR is natively integrated with GitHub
362 | 2. **Automatic permissions** - Uses `GITHUB_TOKEN` with `packages: write` permission
363 | 3. **Public by default** - Images are automatically public for public repositories
364 |
365 | The Docker CI workflow (`.github/workflows/docker.yml`) handles everything automatically when version tags are pushed.
```
--------------------------------------------------------------------------------
/v15-docs/cloud-mode-usage.md:
--------------------------------------------------------------------------------
```markdown
1 | # Using CLI Tools in Cloud Mode
2 |
3 | **Status**: DEPRECATED - Use `cloud_mode` instead of `api_url`
4 | **Related**: cloud-authentication.md, cloud-bisync.md
5 |
6 | ## DEPRECATION NOTICE
7 |
8 | This document describes the old `api_url` / `BASIC_MEMORY_API_URL` approach which has been replaced by `cloud_mode` / `BASIC_MEMORY_CLOUD_MODE`.
9 |
10 | **New approach:** Use `cloud_mode` config or `BASIC_MEMORY_CLOUD_MODE` environment variable instead.
11 |
12 | ## Quick Start
13 |
14 | ### Enable Cloud Mode
15 |
16 | ```bash
17 | # Set cloud API URL
18 | export BASIC_MEMORY_API_URL=https://api.basicmemory.cloud
19 |
20 | # Or in config.json
21 | {
22 | "api_url": "https://api.basicmemory.cloud"
23 | }
24 |
25 | # Authenticate
26 | bm cloud login
27 |
28 | # Now CLI tools work against cloud
29 | bm sync --project my-cloud-project
30 | bm status
31 | bm tools search --query "notes"
32 | ```
33 |
34 | ## How It Works
35 |
36 | ### Local vs Cloud Mode
37 |
38 | **Local Mode (default):**
39 | ```
40 | CLI Tools → Local ASGI Transport → Local API → Local SQLite + Files
41 | ```
42 |
43 | **Cloud Mode (with api_url set):**
44 | ```
45 | CLI Tools → HTTP Client → Cloud API → Cloud SQLite + Cloud Files
46 | ```
47 |
48 | ### Mode Detection
49 |
50 | Basic Memory automatically detects mode:
51 |
52 | ```python
53 | from basic_memory.config import ConfigManager
54 |
55 | config = ConfigManager().config
56 |
57 | if config.api_url:
58 | # Cloud mode: use HTTP client
59 | client = HTTPClient(base_url=config.api_url)
60 | else:
61 | # Local mode: use ASGI transport
62 | client = ASGITransport(app=api_app)
63 | ```
64 |
65 | ## Configuration
66 |
67 | ### Via Environment Variable
68 |
69 | ```bash
70 | # Set cloud API URL
71 | export BASIC_MEMORY_API_URL=https://api.basicmemory.cloud
72 |
73 | # All commands use cloud
74 | bm sync
75 | bm status
76 | ```
77 |
78 | ### Via Config File
79 |
80 | Edit `~/.basic-memory/config.json`:
81 |
82 | ```json
83 | {
84 | "api_url": "https://api.basicmemory.cloud",
85 | "cloud_client_id": "client_abc123",
86 | "cloud_domain": "https://auth.basicmemory.cloud",
87 | "cloud_host": "https://api.basicmemory.cloud"
88 | }
89 | ```
90 |
91 | ### Temporary Override
92 |
93 | ```bash
94 | # One-off cloud command
95 | BASIC_MEMORY_API_URL=https://api.basicmemory.cloud bm sync --project notes
96 |
97 | # Back to local mode
98 | bm sync --project notes
99 | ```
100 |
101 | ## Available Commands in Cloud Mode
102 |
103 | ### Sync Commands
104 |
105 | ```bash
106 | # Sync cloud project
107 | bm sync --project cloud-project
108 |
109 | # Sync specific project
110 | bm sync --project work-notes
111 |
112 | # Watch mode (cloud sync)
113 | bm sync --watch --project notes
114 | ```
115 |
116 | ### Status Commands
117 |
118 | ```bash
119 | # Check cloud sync status
120 | bm status
121 |
122 | # Shows cloud project status
123 | ```
124 |
125 | ### MCP Tools
126 |
127 | ```bash
128 | # Search in cloud project
129 | bm tools search \
130 | --query "authentication" \
131 | --project cloud-notes
132 |
133 | # Continue conversation from cloud
134 | bm tools continue-conversation \
135 | --topic "search implementation" \
136 | --project cloud-notes
137 |
138 | # Basic Memory guide
139 | bm tools basic-memory-guide
140 | ```
141 |
142 | ### Project Commands
143 |
144 | ```bash
145 | # List cloud projects
146 | bm project list
147 |
148 | # Add cloud project (if permitted)
149 | bm project add notes /app/data/notes
150 |
151 | # Switch default project
152 | bm project default notes
153 | ```
154 |
155 | ## Workflows
156 |
157 | ### Multi-Device Cloud Workflow
158 |
159 | **Device A (Primary):**
160 | ```bash
161 | # Configure cloud mode
162 | export BASIC_MEMORY_API_URL=https://api.basicmemory.cloud
163 |
164 | # Authenticate
165 | bm cloud login
166 |
167 | # Use bisync for primary work
168 | bm cloud bisync-setup
169 | bm sync --watch
170 |
171 | # Local files in ~/basic-memory-cloud-sync/
172 | # Synced bidirectionally with cloud
173 | ```
174 |
175 | **Device B (Secondary):**
176 | ```bash
177 | # Configure cloud mode
178 | export BASIC_MEMORY_API_URL=https://api.basicmemory.cloud
179 |
180 | # Authenticate
181 | bm cloud login
182 |
183 | # Work directly with cloud (no local sync)
184 | bm tools search --query "meeting notes" --project work
185 |
186 | # Or mount for file access
187 | bm cloud mount
188 | ```
189 |
190 | ### Development vs Production
191 |
192 | **Development (local):**
193 | ```bash
194 | # Local mode
195 | unset BASIC_MEMORY_API_URL
196 |
197 | # Work with local files
198 | bm sync
199 | bm tools search --query "test"
200 | ```
201 |
202 | **Production (cloud):**
203 | ```bash
204 | # Cloud mode
205 | export BASIC_MEMORY_API_URL=https://api.basicmemory.cloud
206 |
207 | # Work with cloud data
208 | bm sync --project production-kb
209 | ```
210 |
211 | ### Testing Cloud Integration
212 |
213 | ```bash
214 | # Test against staging
215 | export BASIC_MEMORY_API_URL=https://staging-api.basicmemory.cloud
216 | bm cloud login
217 | bm sync --project test-project
218 |
219 | # Test against production
220 | export BASIC_MEMORY_API_URL=https://api.basicmemory.cloud
221 | bm cloud login
222 | bm sync --project prod-project
223 | ```
224 |
225 | ## MCP Integration
226 |
227 | ### Local MCP (default)
228 |
229 | ```json
230 | // claude_desktop_config.json
231 | {
232 | "mcpServers": {
233 | "basic-memory": {
234 | "command": "uvx",
235 | "args": ["basic-memory", "mcp"]
236 | }
237 | }
238 | }
239 | ```
240 |
241 | Uses local files via ASGI transport.
242 |
243 | ### Cloud MCP
244 |
245 | ```json
246 | // claude_desktop_config.json
247 | {
248 | "mcpServers": {
249 | "basic-memory-cloud": {
250 | "command": "uvx",
251 | "args": ["basic-memory", "mcp"],
252 | "env": {
253 | "BASIC_MEMORY_API_URL": "https://api.basicmemory.cloud"
254 | }
255 | }
256 | }
257 | }
258 | ```
259 |
260 | Uses cloud API via HTTP client.
261 |
262 | ### Hybrid Setup (Both)
263 |
264 | ```json
265 | {
266 | "mcpServers": {
267 | "basic-memory-local": {
268 | "command": "uvx",
269 | "args": ["basic-memory", "mcp"]
270 | },
271 | "basic-memory-cloud": {
272 | "command": "uvx",
273 | "args": ["basic-memory", "mcp"],
274 | "env": {
275 | "BASIC_MEMORY_API_URL": "https://api.basicmemory.cloud"
276 | }
277 | }
278 | }
279 | }
280 | ```
281 |
282 | Access both local and cloud from same LLM.
283 |
284 | ## Authentication
285 |
286 | ### Cloud Mode Requires Authentication
287 |
288 | ```bash
289 | # Must login first
290 | bm cloud login
291 |
292 | # Then cloud commands work
293 | export BASIC_MEMORY_API_URL=https://api.basicmemory.cloud
294 | bm sync --project notes
295 | ```
296 |
297 | ### Token Management
298 |
299 | Cloud mode uses JWT authentication:
300 | - Token stored in `~/.basic-memory/cloud-auth.json`
301 | - Auto-refreshed when expired
302 | - Includes subscription validation
303 |
304 | ### Authentication Flow
305 |
306 | ```bash
307 | # 1. Login
308 | bm cloud login
309 | # → Opens browser for OAuth
310 | # → Stores JWT token
311 |
312 | # 2. Set cloud mode
313 | export BASIC_MEMORY_API_URL=https://api.basicmemory.cloud
314 |
315 | # 3. Use tools (automatically authenticated)
316 | bm sync --project notes
317 | # → Sends Authorization: Bearer {token} header
318 | ```
319 |
320 | ## Project Management in Cloud Mode
321 |
322 | ### Cloud Projects vs Local Projects
323 |
324 | **Local mode:**
325 | - Projects are local directories
326 | - Defined in `~/.basic-memory/config.json`
327 | - Full filesystem access
328 |
329 | **Cloud mode:**
330 | - Projects are cloud-managed
331 | - Retrieved from cloud API
332 | - Constrained by BASIC_MEMORY_PROJECT_ROOT on server
333 |
334 | ### Working with Cloud Projects
335 |
336 | ```bash
337 | # Enable cloud mode
338 | export BASIC_MEMORY_API_URL=https://api.basicmemory.cloud
339 |
340 | # List cloud projects
341 | bm project list
342 | # → Fetches from cloud API
343 |
344 | # Sync specific cloud project
345 | bm sync --project cloud-notes
346 | # → Syncs cloud project to cloud database
347 |
348 | # Search in cloud project
349 | bm tools search --query "auth" --project cloud-notes
350 | # → Searches cloud-indexed content
351 | ```
352 |
353 | ## Switching Between Local and Cloud
354 |
355 | ### Switch to Cloud Mode
356 |
357 | ```bash
358 | # Save local state
359 | bm sync # Ensure local is synced
360 |
361 | # Switch to cloud
362 | export BASIC_MEMORY_API_URL=https://api.basicmemory.cloud
363 | bm cloud login
364 |
365 | # Work with cloud
366 | bm sync --project cloud-project
367 | ```
368 |
369 | ### Switch to Local Mode
370 |
371 | ```bash
372 | # Switch back to local
373 | unset BASIC_MEMORY_API_URL
374 |
375 | # Work with local files
376 | bm sync --project local-project
377 | ```
378 |
379 | ### Context-Aware Scripts
380 |
381 | ```bash
382 | #!/bin/bash
383 |
384 | if [ -n "$BASIC_MEMORY_API_URL" ]; then
385 | echo "Cloud mode: $BASIC_MEMORY_API_URL"
386 | bm cloud login # Ensure authenticated
387 | else
388 | echo "Local mode"
389 | fi
390 |
391 | bm sync --project notes
392 | ```
393 |
394 | ## Performance Considerations
395 |
396 | ### Network Latency
397 |
398 | Cloud mode requires network:
399 | - API calls over HTTPS
400 | - Latency depends on connection
401 | - Slower than local ASGI transport
402 |
403 | ### Caching
404 |
405 | MCP in cloud mode has limited caching:
406 | - Results not cached locally
407 | - Each request hits cloud API
408 | - Consider using bisync for frequent access
409 |
410 | ### Best Practices
411 |
412 | 1. **Use bisync for primary work:**
413 | ```bash
414 | # Sync local copy
415 | bm cloud bisync
416 |
417 | # Work locally (fast)
418 | unset BASIC_MEMORY_API_URL
419 | bm tools search --query "notes"
420 | ```
421 |
422 | 2. **Use cloud mode for occasional access:**
423 | ```bash
424 | # Quick check from another device
425 | export BASIC_MEMORY_API_URL=https://api.basicmemory.cloud
426 | bm tools search --query "meeting" --project work
427 | ```
428 |
429 | 3. **Hybrid approach:**
430 | - Primary device: bisync for local work
431 | - Other devices: cloud mode for quick access
432 |
433 | ## Troubleshooting
434 |
435 | ### Not Authenticated Error
436 |
437 | ```bash
438 | $ bm sync --project notes
439 | Error: Not authenticated. Please run 'bm cloud login' first.
440 | ```
441 |
442 | **Solution:**
443 | ```bash
444 | bm cloud login
445 | ```
446 |
447 | ### Connection Refused
448 |
449 | ```bash
450 | $ bm sync
451 | Error: Connection refused: https://api.basicmemory.cloud
452 | ```
453 |
454 | **Solutions:**
455 | 1. Check API URL: `echo $BASIC_MEMORY_API_URL`
456 | 2. Verify network: `curl https://api.basicmemory.cloud/health`
457 | 3. Check cloud status: https://status.basicmemory.com
458 |
459 | ### Wrong Projects Listed
460 |
461 | **Problem:** `bm project list` shows unexpected projects
462 |
463 | **Check mode:**
464 | ```bash
465 | # What mode am I in?
466 | echo $BASIC_MEMORY_API_URL
467 |
468 | # If set → cloud projects
469 | # If not set → local projects
470 | ```
471 |
472 | **Solution:** Set/unset API_URL as needed
473 |
474 | ### Subscription Required
475 |
476 | ```bash
477 | $ bm sync --project notes
478 | Error: Active subscription required
479 | Subscribe at: https://basicmemory.com/subscribe
480 | ```
481 |
482 | **Solution:** Subscribe or renew subscription
483 |
484 | ## Configuration Examples
485 |
486 | ### Development Setup
487 |
488 | ```bash
489 | # .bashrc / .zshrc
490 | export BASIC_MEMORY_ENV=dev
491 | export BASIC_MEMORY_LOG_LEVEL=DEBUG
492 |
493 | # Local mode by default
494 | # Cloud mode on demand
495 | alias bm-cloud='BASIC_MEMORY_API_URL=https://api.basicmemory.cloud bm'
496 | ```
497 |
498 | ### Production Setup
499 |
500 | ```bash
501 | # systemd service
502 | [Service]
503 | Environment="BASIC_MEMORY_API_URL=https://api.basicmemory.cloud"
504 | Environment="BASIC_MEMORY_LOG_LEVEL=INFO"
505 | ExecStart=/usr/local/bin/basic-memory serve
506 | ```
507 |
508 | ### Docker Setup
509 |
510 | ```yaml
511 | # docker-compose.yml
512 | services:
513 | basic-memory:
514 | environment:
515 | BASIC_MEMORY_API_URL: https://api.basicmemory.cloud
516 | BASIC_MEMORY_LOG_LEVEL: INFO
517 | volumes:
518 | - ./cloud-auth:/root/.basic-memory/cloud-auth.json:ro
519 | ```
520 |
521 | ## Security
522 |
523 | ### API Authentication
524 |
525 | - All cloud API calls authenticated with JWT
526 | - Token in Authorization header
527 | - Subscription validated per request
528 |
529 | ### Network Security
530 |
531 | - All traffic over HTTPS/TLS
532 | - No credentials in URLs or logs
533 | - Tokens stored securely (mode 600)
534 |
535 | ### Multi-Tenant Isolation
536 |
537 | - Tenant ID from JWT claims
538 | - Each request isolated to tenant
539 | - Cannot access other tenants' data
540 |
541 | ## See Also
542 |
543 | - `cloud-authentication.md` - Authentication setup
544 | - `cloud-bisync.md` - Bidirectional sync workflow
545 | - `cloud-mount.md` - Direct cloud file access
546 | - MCP server configuration documentation
547 |
```
--------------------------------------------------------------------------------
/v15-docs/env-var-overrides.md:
--------------------------------------------------------------------------------
```markdown
1 | # Environment Variable Overrides
2 |
3 | **Status**: Fixed in v0.15.0
4 | **PR**: #334 (part of PROJECT_ROOT implementation)
5 |
6 | ## What Changed
7 |
8 | v0.15.0 fixes configuration loading to properly respect environment variable overrides. Environment variables with the `BASIC_MEMORY_` prefix now correctly override values in `config.json`.
9 |
10 | ## How It Works
11 |
12 | ### Precedence Order (Highest to Lowest)
13 |
14 | 1. **Environment Variables** (`BASIC_MEMORY_*`)
15 | 2. **Config File** (`~/.basic-memory/config.json`)
16 | 3. **Default Values** (Built-in defaults)
17 |
18 | ### Example
19 |
20 | ```bash
21 | # config.json contains:
22 | {
23 | "default_project": "main",
24 | "log_level": "INFO"
25 | }
26 |
27 | # Environment overrides:
28 | export BASIC_MEMORY_DEFAULT_PROJECT=work
29 | export BASIC_MEMORY_LOG_LEVEL=DEBUG
30 |
31 | # Result:
32 | # default_project = "work" ← from env var
33 | # log_level = "DEBUG" ← from env var
34 | ```
35 |
36 | ## Environment Variable Naming
37 |
38 | All environment variables use the prefix `BASIC_MEMORY_` followed by the config field name in UPPERCASE:
39 |
40 | | Config Field | Environment Variable | Example |
41 | |--------------|---------------------|---------|
42 | | `default_project` | `BASIC_MEMORY_DEFAULT_PROJECT` | `BASIC_MEMORY_DEFAULT_PROJECT=work` |
43 | | `log_level` | `BASIC_MEMORY_LOG_LEVEL` | `BASIC_MEMORY_LOG_LEVEL=DEBUG` |
44 | | `project_root` | `BASIC_MEMORY_PROJECT_ROOT` | `BASIC_MEMORY_PROJECT_ROOT=/app/data` |
45 | | `api_url` | `BASIC_MEMORY_API_URL` | `BASIC_MEMORY_API_URL=https://api.example.com` |
46 | | `default_project_mode` | `BASIC_MEMORY_DEFAULT_PROJECT_MODE` | `BASIC_MEMORY_DEFAULT_PROJECT_MODE=true` |
47 |
48 | ## Common Use Cases
49 |
50 | ### Development vs Production
51 |
52 | **Development (.env or shell):**
53 | ```bash
54 | export BASIC_MEMORY_LOG_LEVEL=DEBUG
55 | export BASIC_MEMORY_API_URL=http://localhost:8000
56 | ```
57 |
58 | **Production (systemd/docker):**
59 | ```bash
60 | export BASIC_MEMORY_LOG_LEVEL=INFO
61 | export BASIC_MEMORY_API_URL=https://api.basicmemory.cloud
62 | export BASIC_MEMORY_PROJECT_ROOT=/app/data
63 | ```
64 |
65 | ### CI/CD Pipelines
66 |
67 | ```bash
68 | # GitHub Actions
69 | env:
70 | BASIC_MEMORY_ENV: test
71 | BASIC_MEMORY_LOG_LEVEL: DEBUG
72 |
73 | # GitLab CI
74 | variables:
75 | BASIC_MEMORY_ENV: test
76 | BASIC_MEMORY_PROJECT_ROOT: /builds/project/data
77 | ```
78 |
79 | ### Docker Deployments
80 |
81 | ```bash
82 | # docker run
83 | docker run \
84 | -e BASIC_MEMORY_HOME=/app/data/main \
85 | -e BASIC_MEMORY_PROJECT_ROOT=/app/data \
86 | -e BASIC_MEMORY_LOG_LEVEL=INFO \
87 | basic-memory:latest
88 |
89 | # docker-compose.yml
90 | services:
91 | basic-memory:
92 | environment:
93 | BASIC_MEMORY_HOME: /app/data/main
94 | BASIC_MEMORY_PROJECT_ROOT: /app/data
95 | BASIC_MEMORY_LOG_LEVEL: INFO
96 | ```
97 |
98 | ### Kubernetes
99 |
100 | ```yaml
101 | apiVersion: v1
102 | kind: ConfigMap
103 | metadata:
104 | name: basic-memory-env
105 | data:
106 | BASIC_MEMORY_LOG_LEVEL: "INFO"
107 | BASIC_MEMORY_PROJECT_ROOT: "/app/data"
108 | ---
109 | apiVersion: apps/v1
110 | kind: Deployment
111 | spec:
112 | template:
113 | spec:
114 | containers:
115 | - name: basic-memory
116 | envFrom:
117 | - configMapRef:
118 | name: basic-memory-env
119 | ```
120 |
121 | ## Available Environment Variables
122 |
123 | ### Core Configuration
124 |
125 | ```bash
126 | # Environment mode
127 | export BASIC_MEMORY_ENV=user # test, dev, user
128 |
129 | # Project configuration
130 | export BASIC_MEMORY_DEFAULT_PROJECT=main
131 | export BASIC_MEMORY_DEFAULT_PROJECT_MODE=true
132 |
133 | # Path constraints
134 | export BASIC_MEMORY_HOME=/path/to/main
135 | export BASIC_MEMORY_PROJECT_ROOT=/path/to/root
136 | ```
137 |
138 | ### Sync Configuration
139 |
140 | ```bash
141 | # Sync behavior
142 | export BASIC_MEMORY_SYNC_CHANGES=true
143 | export BASIC_MEMORY_SYNC_DELAY=1000
144 | export BASIC_MEMORY_SYNC_THREAD_POOL_SIZE=4
145 |
146 | # Watch service
147 | export BASIC_MEMORY_WATCH_PROJECT_RELOAD_INTERVAL=30
148 | ```
149 |
150 | ### Feature Flags
151 |
152 | ```bash
153 | # Permalinks
154 | export BASIC_MEMORY_UPDATE_PERMALINKS_ON_MOVE=false
155 | export BASIC_MEMORY_DISABLE_PERMALINKS=false
156 | export BASIC_MEMORY_KEBAB_FILENAMES=false
157 |
158 | # Performance
159 | export BASIC_MEMORY_SKIP_INITIALIZATION_SYNC=false
160 | ```
161 |
162 | ### API Configuration
163 |
164 | ```bash
165 | # Remote API
166 | export BASIC_MEMORY_API_URL=https://api.basicmemory.cloud
167 |
168 | # Cloud configuration
169 | export BASIC_MEMORY_CLOUD_CLIENT_ID=client_abc123
170 | export BASIC_MEMORY_CLOUD_DOMAIN=https://auth.example.com
171 | export BASIC_MEMORY_CLOUD_HOST=https://api.example.com
172 | ```
173 |
174 | ### Logging
175 |
176 | ```bash
177 | # Log level
178 | export BASIC_MEMORY_LOG_LEVEL=DEBUG # DEBUG, INFO, WARNING, ERROR
179 | ```
180 |
181 | ## Override Examples
182 |
183 | ### Temporarily Override for Testing
184 |
185 | ```bash
186 | # One-off override
187 | BASIC_MEMORY_LOG_LEVEL=DEBUG bm sync
188 |
189 | # Session override
190 | export BASIC_MEMORY_DEFAULT_PROJECT=test-project
191 | bm tools search --query "test"
192 | unset BASIC_MEMORY_DEFAULT_PROJECT
193 | ```
194 |
195 | ### Override in Scripts
196 |
197 | ```bash
198 | #!/bin/bash
199 |
200 | # Override for this script execution
201 | export BASIC_MEMORY_LOG_LEVEL=DEBUG
202 | export BASIC_MEMORY_API_URL=http://localhost:8000
203 |
204 | # Run commands
205 | bm sync
206 | bm tools search --query "development"
207 | ```
208 |
209 | ### Per-Environment Config
210 |
211 | **~/.bashrc (development):**
212 | ```bash
213 | export BASIC_MEMORY_ENV=dev
214 | export BASIC_MEMORY_LOG_LEVEL=DEBUG
215 | export BASIC_MEMORY_HOME=~/dev/basic-memory-dev
216 | ```
217 |
218 | **Production systemd:**
219 | ```ini
220 | [Service]
221 | Environment="BASIC_MEMORY_ENV=user"
222 | Environment="BASIC_MEMORY_LOG_LEVEL=INFO"
223 | Environment="BASIC_MEMORY_HOME=/var/lib/basic-memory"
224 | Environment="BASIC_MEMORY_PROJECT_ROOT=/var/lib"
225 | ```
226 |
227 | ## Verification
228 |
229 | ### Check Current Values
230 |
231 | ```bash
232 | # View all BASIC_MEMORY_ env vars
233 | env | grep BASIC_MEMORY_
234 |
235 | # Check specific value
236 | echo $BASIC_MEMORY_PROJECT_ROOT
237 | ```
238 |
239 | ### Verify Override Working
240 |
241 | ```python
242 | from basic_memory.config import ConfigManager
243 |
244 | # Load config
245 | config = ConfigManager().config
246 |
247 | # Check values
248 | print(f"Project root: {config.project_root}")
249 | print(f"Log level: {config.log_level}")
250 | print(f"Default project: {config.default_project}")
251 | ```
252 |
253 | ### Debug Configuration Loading
254 |
255 | ```python
256 | import os
257 | from basic_memory.config import ConfigManager
258 |
259 | # Check what env vars are set
260 | env_vars = {k: v for k, v in os.environ.items() if k.startswith("BASIC_MEMORY_")}
261 | print("Environment variables:", env_vars)
262 |
263 | # Load config and see what won
264 | config = ConfigManager().config
265 | print("Resolved config:", config.model_dump())
266 | ```
267 |
268 | ## Migration from v0.14.x
269 |
270 | ### Previous Behavior (Bug)
271 |
272 | In v0.14.x, environment variables were sometimes ignored:
273 |
274 | ```bash
275 | # v0.14.x bug
276 | export BASIC_MEMORY_PROJECT_ROOT=/app/data
277 | # → config.json value used instead (wrong!)
278 | ```
279 |
280 | ### Fixed Behavior (v0.15.0+)
281 |
282 | ```bash
283 | # v0.15.0+ correct
284 | export BASIC_MEMORY_PROJECT_ROOT=/app/data
285 | # → Environment variable properly overrides config.json
286 | ```
287 |
288 | **No action needed** - Just verify env vars are working as expected.
289 |
290 | ## Configuration Loading Details
291 |
292 | ### Loading Process
293 |
294 | 1. **Load defaults** from Pydantic model
295 | 2. **Load config.json** if it exists
296 | 3. **Apply environment overrides** (BASIC_MEMORY_* variables)
297 | 4. **Validate and return** merged configuration
298 |
299 | ### Implementation
300 |
301 | ```python
302 | class BasicMemoryConfig(BaseSettings):
303 | # Fields with defaults
304 | default_project: str = Field(default="main")
305 | log_level: str = "INFO"
306 |
307 | model_config = SettingsConfigDict(
308 | env_prefix="BASIC_MEMORY_", # Maps env vars
309 | extra="ignore",
310 | )
311 |
312 | # Loading logic (simplified)
313 | class ConfigManager:
314 | def load_config(self) -> BasicMemoryConfig:
315 | # 1. Load file data
316 | file_data = json.loads(config_file.read_text())
317 |
318 | # 2. Load env data
319 | env_dict = BasicMemoryConfig().model_dump()
320 |
321 | # 3. Merge (env takes precedence)
322 | merged_data = file_data.copy()
323 | for field_name in BasicMemoryConfig.model_fields.keys():
324 | env_var_name = f"BASIC_MEMORY_{field_name.upper()}"
325 | if env_var_name in os.environ:
326 | merged_data[field_name] = env_dict[field_name]
327 |
328 | return BasicMemoryConfig(**merged_data)
329 | ```
330 |
331 | ## Troubleshooting
332 |
333 | ### Environment Variable Not Taking Effect
334 |
335 | **Problem:** Set env var but config.json value still used
336 |
337 | **Check:**
338 | ```bash
339 | # Is the variable exported?
340 | env | grep BASIC_MEMORY_PROJECT_ROOT
341 |
342 | # Exact name (case-sensitive)?
343 | export BASIC_MEMORY_PROJECT_ROOT=/app/data # ✓
344 | export basic_memory_project_root=/app/data # ✗ (wrong case)
345 | ```
346 |
347 | **Solution:** Ensure variable is exported and named correctly
348 |
349 | ### Config.json Overwriting Env Vars
350 |
351 | **Problem:** Changing config.json overrides env vars
352 |
353 | **v0.14.x:** This was a bug - config.json would override env vars
354 |
355 | **v0.15.0+:** Fixed - env vars always win
356 |
357 | **Verify:**
358 | ```python
359 | import os
360 | os.environ["BASIC_MEMORY_LOG_LEVEL"] = "DEBUG"
361 |
362 | from basic_memory.config import ConfigManager
363 | config = ConfigManager().config
364 | print(config.log_level) # Should be "DEBUG"
365 | ```
366 |
367 | ### Cache Issues
368 |
369 | **Problem:** Changes not reflected after config update
370 |
371 | **Solution:** Clear config cache
372 | ```python
373 | from basic_memory import config as config_module
374 | config_module._config = None # Clear cache
375 |
376 | # Reload
377 | config = ConfigManager().config
378 | ```
379 |
380 | ## Best Practices
381 |
382 | 1. **Use env vars for environment-specific settings:**
383 | - Different values for dev/staging/prod
384 | - Secrets and credentials
385 | - Deployment-specific paths
386 |
387 | 2. **Use config.json for stable settings:**
388 | - User preferences
389 | - Project definitions (can be overridden by env)
390 | - Feature flags that rarely change
391 |
392 | 3. **Document required env vars:**
393 | - List in README or deployment docs
394 | - Provide .env.example file
395 |
396 | 4. **Validate in scripts:**
397 | ```bash
398 | if [ -z "$BASIC_MEMORY_PROJECT_ROOT" ]; then
399 | echo "Error: BASIC_MEMORY_PROJECT_ROOT not set"
400 | exit 1
401 | fi
402 | ```
403 |
404 | 5. **Use consistent naming:**
405 | - Always use BASIC_MEMORY_ prefix
406 | - Match config.json field names (uppercase)
407 |
408 | ## Security Considerations
409 |
410 | 1. **Never commit env vars with secrets:**
411 | ```bash
412 | # .env (not committed)
413 | BASIC_MEMORY_CLOUD_SECRET_KEY=secret123
414 |
415 | # .gitignore
416 | .env
417 | ```
418 |
419 | 2. **Use secret management for production:**
420 | ```bash
421 | # Kubernetes secrets
422 | kubectl create secret generic basic-memory-secrets \
423 | --from-literal=api-key=$API_KEY
424 |
425 | # Reference in deployment
426 | env:
427 | - name: BASIC_MEMORY_API_KEY
428 | valueFrom:
429 | secretKeyRef:
430 | name: basic-memory-secrets
431 | key: api-key
432 | ```
433 |
434 | 3. **Audit environment in logs:**
435 | ```python
436 | # Don't log secret values
437 | env_vars = {
438 | k: "***" if "SECRET" in k else v
439 | for k, v in os.environ.items()
440 | if k.startswith("BASIC_MEMORY_")
441 | }
442 | logger.info(f"Config loaded with env: {env_vars}")
443 | ```
444 |
445 | ## See Also
446 |
447 | - `project-root-env-var.md` - BASIC_MEMORY_PROJECT_ROOT usage
448 | - `basic-memory-home.md` - BASIC_MEMORY_HOME usage
449 | - Configuration reference documentation
450 |
```
--------------------------------------------------------------------------------
/src/basic_memory/api/routers/knowledge_router.py:
--------------------------------------------------------------------------------
```python
1 | """Router for knowledge graph operations."""
2 |
3 | from typing import Annotated
4 |
5 | from fastapi import APIRouter, HTTPException, BackgroundTasks, Depends, Query, Response
6 | from loguru import logger
7 |
8 | from basic_memory.deps import (
9 | EntityServiceDep,
10 | get_search_service,
11 | SearchServiceDep,
12 | LinkResolverDep,
13 | ProjectPathDep,
14 | FileServiceDep,
15 | ProjectConfigDep,
16 | AppConfigDep,
17 | SyncServiceDep,
18 | )
19 | from basic_memory.schemas import (
20 | EntityListResponse,
21 | EntityResponse,
22 | DeleteEntitiesResponse,
23 | DeleteEntitiesRequest,
24 | )
25 | from basic_memory.schemas.request import EditEntityRequest, MoveEntityRequest
26 | from basic_memory.schemas.base import Permalink, Entity
27 |
28 | router = APIRouter(prefix="/knowledge", tags=["knowledge"])
29 |
30 |
31 | async def resolve_relations_background(sync_service, entity_id: int, entity_permalink: str) -> None:
32 | """Background task to resolve relations for a specific entity.
33 |
34 | This runs asynchronously after the API response is sent, preventing
35 | long delays when creating entities with many relations.
36 | """
37 | try:
38 | # Only resolve relations for the newly created entity
39 | await sync_service.resolve_relations(entity_id=entity_id)
40 | logger.debug(
41 | f"Background: Resolved relations for entity {entity_permalink} (id={entity_id})"
42 | )
43 | except Exception as e:
44 | # Log but don't fail - this is a background task
45 | logger.warning(
46 | f"Background: Failed to resolve relations for entity {entity_permalink}: {e}"
47 | )
48 |
49 |
50 | ## Create endpoints
51 |
52 |
53 | @router.post("/entities", response_model=EntityResponse)
54 | async def create_entity(
55 | data: Entity,
56 | background_tasks: BackgroundTasks,
57 | entity_service: EntityServiceDep,
58 | search_service: SearchServiceDep,
59 | ) -> EntityResponse:
60 | """Create an entity."""
61 | logger.info(
62 | "API request", endpoint="create_entity", entity_type=data.entity_type, title=data.title
63 | )
64 |
65 | entity = await entity_service.create_entity(data)
66 |
67 | # reindex
68 | await search_service.index_entity(entity, background_tasks=background_tasks)
69 | result = EntityResponse.model_validate(entity)
70 |
71 | logger.info(
72 | f"API response: endpoint='create_entity' title={result.title}, permalink={result.permalink}, status_code=201"
73 | )
74 | return result
75 |
76 |
77 | @router.put("/entities/{permalink:path}", response_model=EntityResponse)
78 | async def create_or_update_entity(
79 | project: ProjectPathDep,
80 | permalink: Permalink,
81 | data: Entity,
82 | response: Response,
83 | background_tasks: BackgroundTasks,
84 | entity_service: EntityServiceDep,
85 | search_service: SearchServiceDep,
86 | file_service: FileServiceDep,
87 | sync_service: SyncServiceDep,
88 | ) -> EntityResponse:
89 | """Create or update an entity. If entity exists, it will be updated, otherwise created."""
90 | logger.info(
91 | f"API request: create_or_update_entity for {project=}, {permalink=}, {data.entity_type=}, {data.title=}"
92 | )
93 |
94 | # Validate permalink matches
95 | if data.permalink != permalink:
96 | logger.warning(
97 | f"API validation error: creating/updating entity with permalink mismatch - url={permalink}, data={data.permalink}",
98 | )
99 | raise HTTPException(
100 | status_code=400,
101 | detail=f"Entity permalink {data.permalink} must match URL path: '{permalink}'",
102 | )
103 |
104 | # Try create_or_update operation
105 | entity, created = await entity_service.create_or_update_entity(data)
106 | response.status_code = 201 if created else 200
107 |
108 | # reindex
109 | await search_service.index_entity(entity, background_tasks=background_tasks)
110 |
111 | # Schedule relation resolution as a background task for new entities
112 | # This prevents blocking the API response while resolving potentially many relations
113 | if created:
114 | background_tasks.add_task(
115 | resolve_relations_background, sync_service, entity.id, entity.permalink or ""
116 | )
117 |
118 | result = EntityResponse.model_validate(entity)
119 |
120 | logger.info(
121 | f"API response: {result.title=}, {result.permalink=}, {created=}, status_code={response.status_code}"
122 | )
123 | return result
124 |
125 |
126 | @router.patch("/entities/{identifier:path}", response_model=EntityResponse)
127 | async def edit_entity(
128 | identifier: str,
129 | data: EditEntityRequest,
130 | background_tasks: BackgroundTasks,
131 | entity_service: EntityServiceDep,
132 | search_service: SearchServiceDep,
133 | ) -> EntityResponse:
134 | """Edit an existing entity using various operations like append, prepend, find_replace, or replace_section.
135 |
136 | This endpoint allows for targeted edits without requiring the full entity content.
137 | """
138 | logger.info(
139 | f"API request: endpoint='edit_entity', identifier='{identifier}', operation='{data.operation}'"
140 | )
141 |
142 | try:
143 | # Edit the entity using the service
144 | entity = await entity_service.edit_entity(
145 | identifier=identifier,
146 | operation=data.operation,
147 | content=data.content,
148 | section=data.section,
149 | find_text=data.find_text,
150 | expected_replacements=data.expected_replacements,
151 | )
152 |
153 | # Reindex the updated entity
154 | await search_service.index_entity(entity, background_tasks=background_tasks)
155 |
156 | # Return the updated entity response
157 | result = EntityResponse.model_validate(entity)
158 |
159 | logger.info(
160 | "API response",
161 | endpoint="edit_entity",
162 | identifier=identifier,
163 | operation=data.operation,
164 | permalink=result.permalink,
165 | status_code=200,
166 | )
167 |
168 | return result
169 |
170 | except Exception as e:
171 | logger.error(f"Error editing entity: {e}")
172 | raise HTTPException(status_code=400, detail=str(e))
173 |
174 |
175 | @router.post("/move")
176 | async def move_entity(
177 | data: MoveEntityRequest,
178 | background_tasks: BackgroundTasks,
179 | entity_service: EntityServiceDep,
180 | project_config: ProjectConfigDep,
181 | app_config: AppConfigDep,
182 | search_service: SearchServiceDep,
183 | ) -> EntityResponse:
184 | """Move an entity to a new file location with project consistency.
185 |
186 | This endpoint moves a note to a different path while maintaining project
187 | consistency and optionally updating permalinks based on configuration.
188 | """
189 | logger.info(
190 | f"API request: endpoint='move_entity', identifier='{data.identifier}', destination='{data.destination_path}'"
191 | )
192 |
193 | try:
194 | # Move the entity using the service
195 | moved_entity = await entity_service.move_entity(
196 | identifier=data.identifier,
197 | destination_path=data.destination_path,
198 | project_config=project_config,
199 | app_config=app_config,
200 | )
201 |
202 | # Get the moved entity to reindex it
203 | entity = await entity_service.link_resolver.resolve_link(data.destination_path)
204 | if entity:
205 | await search_service.index_entity(entity, background_tasks=background_tasks)
206 |
207 | logger.info(
208 | "API response",
209 | endpoint="move_entity",
210 | identifier=data.identifier,
211 | destination=data.destination_path,
212 | status_code=200,
213 | )
214 | result = EntityResponse.model_validate(moved_entity)
215 | return result
216 |
217 | except Exception as e:
218 | logger.error(f"Error moving entity: {e}")
219 | raise HTTPException(status_code=400, detail=str(e))
220 |
221 |
222 | ## Read endpoints
223 |
224 |
225 | @router.get("/entities/{identifier:path}", response_model=EntityResponse)
226 | async def get_entity(
227 | entity_service: EntityServiceDep,
228 | link_resolver: LinkResolverDep,
229 | identifier: str,
230 | ) -> EntityResponse:
231 | """Get a specific entity by file path or permalink..
232 |
233 | Args:
234 | identifier: Entity file path or permalink
235 | :param entity_service: EntityService
236 | :param link_resolver: LinkResolver
237 | """
238 | logger.info(f"request: get_entity with identifier={identifier}")
239 | entity = await link_resolver.resolve_link(identifier)
240 | if not entity:
241 | raise HTTPException(status_code=404, detail=f"Entity {identifier} not found")
242 |
243 | result = EntityResponse.model_validate(entity)
244 | return result
245 |
246 |
247 | @router.get("/entities", response_model=EntityListResponse)
248 | async def get_entities(
249 | entity_service: EntityServiceDep,
250 | permalink: Annotated[list[str] | None, Query()] = None,
251 | ) -> EntityListResponse:
252 | """Open specific entities"""
253 | logger.info(f"request: get_entities with permalinks={permalink}")
254 |
255 | entities = await entity_service.get_entities_by_permalinks(permalink) if permalink else []
256 | result = EntityListResponse(
257 | entities=[EntityResponse.model_validate(entity) for entity in entities]
258 | )
259 | return result
260 |
261 |
262 | ## Delete endpoints
263 |
264 |
265 | @router.delete("/entities/{identifier:path}", response_model=DeleteEntitiesResponse)
266 | async def delete_entity(
267 | identifier: str,
268 | background_tasks: BackgroundTasks,
269 | entity_service: EntityServiceDep,
270 | link_resolver: LinkResolverDep,
271 | search_service=Depends(get_search_service),
272 | ) -> DeleteEntitiesResponse:
273 | """Delete a single entity and remove from search index."""
274 | logger.info(f"request: delete_entity with identifier={identifier}")
275 |
276 | entity = await link_resolver.resolve_link(identifier)
277 | if entity is None:
278 | return DeleteEntitiesResponse(deleted=False)
279 |
280 | # Delete the entity
281 | deleted = await entity_service.delete_entity(entity.permalink or entity.id)
282 |
283 | # Remove from search index (entity, observations, and relations)
284 | background_tasks.add_task(search_service.handle_delete, entity)
285 |
286 | result = DeleteEntitiesResponse(deleted=deleted)
287 | return result
288 |
289 |
290 | @router.post("/entities/delete", response_model=DeleteEntitiesResponse)
291 | async def delete_entities(
292 | data: DeleteEntitiesRequest,
293 | background_tasks: BackgroundTasks,
294 | entity_service: EntityServiceDep,
295 | search_service=Depends(get_search_service),
296 | ) -> DeleteEntitiesResponse:
297 | """Delete entities and remove from search index."""
298 | logger.info(f"request: delete_entities with data={data}")
299 | deleted = False
300 |
301 | # Remove each deleted entity from search index
302 | for permalink in data.permalinks:
303 | deleted = await entity_service.delete_entity(permalink)
304 | background_tasks.add_task(search_service.delete_by_permalink, permalink)
305 |
306 | result = DeleteEntitiesResponse(deleted=deleted)
307 | return result
308 |
```
--------------------------------------------------------------------------------
/tests/cli/test_cloud_authentication.py:
--------------------------------------------------------------------------------
```python
1 | """Tests for cloud authentication and subscription validation."""
2 |
3 | from unittest.mock import AsyncMock, Mock, patch
4 |
5 | import httpx
6 | import pytest
7 | from typer.testing import CliRunner
8 |
9 | from basic_memory.cli.app import app
10 | from basic_memory.cli.commands.cloud.api_client import (
11 | CloudAPIError,
12 | SubscriptionRequiredError,
13 | make_api_request,
14 | )
15 |
16 |
17 | class TestAPIClientErrorHandling:
18 | """Tests for API client error handling."""
19 |
20 | @pytest.mark.asyncio
21 | async def test_parse_subscription_required_error(self):
22 | """Test parsing 403 subscription_required error response."""
23 | # Mock httpx response with subscription error
24 | mock_response = Mock(spec=httpx.Response)
25 | mock_response.status_code = 403
26 | mock_response.json.return_value = {
27 | "detail": {
28 | "error": "subscription_required",
29 | "message": "Active subscription required for CLI access",
30 | "subscribe_url": "https://basicmemory.com/subscribe",
31 | }
32 | }
33 | mock_response.headers = {}
34 |
35 | # Create HTTPStatusError with the mock response
36 | http_error = httpx.HTTPStatusError("403 Forbidden", request=Mock(), response=mock_response)
37 |
38 | # Mock httpx client to raise the error
39 | with patch("basic_memory.cli.commands.cloud.api_client.httpx.AsyncClient") as mock_client:
40 | mock_instance = AsyncMock()
41 | mock_instance.request = AsyncMock(side_effect=http_error)
42 | mock_client.return_value.__aenter__.return_value = mock_instance
43 |
44 | # Mock auth to return a token
45 | with patch(
46 | "basic_memory.cli.commands.cloud.api_client.get_authenticated_headers",
47 | return_value={"Authorization": "Bearer test-token"},
48 | ):
49 | # Should raise SubscriptionRequiredError
50 | with pytest.raises(SubscriptionRequiredError) as exc_info:
51 | await make_api_request("GET", "https://test.com/api/endpoint")
52 |
53 | # Verify exception details
54 | error = exc_info.value
55 | assert error.status_code == 403
56 | assert error.subscribe_url == "https://basicmemory.com/subscribe"
57 | assert "Active subscription required" in str(error)
58 |
59 | @pytest.mark.asyncio
60 | async def test_parse_subscription_required_error_flat_format(self):
61 | """Test parsing 403 subscription_required error in flat format (backward compatibility)."""
62 | # Mock httpx response with subscription error in flat format
63 | mock_response = Mock(spec=httpx.Response)
64 | mock_response.status_code = 403
65 | mock_response.json.return_value = {
66 | "error": "subscription_required",
67 | "message": "Active subscription required",
68 | "subscribe_url": "https://basicmemory.com/subscribe",
69 | }
70 | mock_response.headers = {}
71 |
72 | # Create HTTPStatusError with the mock response
73 | http_error = httpx.HTTPStatusError("403 Forbidden", request=Mock(), response=mock_response)
74 |
75 | # Mock httpx client to raise the error
76 | with patch("basic_memory.cli.commands.cloud.api_client.httpx.AsyncClient") as mock_client:
77 | mock_instance = AsyncMock()
78 | mock_instance.request = AsyncMock(side_effect=http_error)
79 | mock_client.return_value.__aenter__.return_value = mock_instance
80 |
81 | # Mock auth to return a token
82 | with patch(
83 | "basic_memory.cli.commands.cloud.api_client.get_authenticated_headers",
84 | return_value={"Authorization": "Bearer test-token"},
85 | ):
86 | # Should raise SubscriptionRequiredError
87 | with pytest.raises(SubscriptionRequiredError) as exc_info:
88 | await make_api_request("GET", "https://test.com/api/endpoint")
89 |
90 | # Verify exception details
91 | error = exc_info.value
92 | assert error.status_code == 403
93 | assert error.subscribe_url == "https://basicmemory.com/subscribe"
94 |
95 | @pytest.mark.asyncio
96 | async def test_parse_generic_403_error(self):
97 | """Test parsing 403 error without subscription_required flag."""
98 | # Mock httpx response with generic 403 error
99 | mock_response = Mock(spec=httpx.Response)
100 | mock_response.status_code = 403
101 | mock_response.json.return_value = {
102 | "error": "forbidden",
103 | "message": "Access denied",
104 | }
105 | mock_response.headers = {}
106 |
107 | # Create HTTPStatusError with the mock response
108 | http_error = httpx.HTTPStatusError("403 Forbidden", request=Mock(), response=mock_response)
109 |
110 | # Mock httpx client to raise the error
111 | with patch("basic_memory.cli.commands.cloud.api_client.httpx.AsyncClient") as mock_client:
112 | mock_instance = AsyncMock()
113 | mock_instance.request = AsyncMock(side_effect=http_error)
114 | mock_client.return_value.__aenter__.return_value = mock_instance
115 |
116 | # Mock auth to return a token
117 | with patch(
118 | "basic_memory.cli.commands.cloud.api_client.get_authenticated_headers",
119 | return_value={"Authorization": "Bearer test-token"},
120 | ):
121 | # Should raise generic CloudAPIError
122 | with pytest.raises(CloudAPIError) as exc_info:
123 | await make_api_request("GET", "https://test.com/api/endpoint")
124 |
125 | # Should not be a SubscriptionRequiredError
126 | error = exc_info.value
127 | assert not isinstance(error, SubscriptionRequiredError)
128 | assert error.status_code == 403
129 |
130 |
131 | class TestLoginCommand:
132 | """Tests for cloud login command with subscription validation."""
133 |
134 | def test_login_without_subscription_shows_error(self):
135 | """Test login command displays error when subscription is required."""
136 | runner = CliRunner()
137 |
138 | # Mock successful OAuth login
139 | mock_auth = AsyncMock()
140 | mock_auth.login = AsyncMock(return_value=True)
141 |
142 | # Mock API request to raise SubscriptionRequiredError
143 | async def mock_make_api_request(*args, **kwargs):
144 | raise SubscriptionRequiredError(
145 | message="Active subscription required for CLI access",
146 | subscribe_url="https://basicmemory.com/subscribe",
147 | )
148 |
149 | with patch("basic_memory.cli.commands.cloud.core_commands.CLIAuth", return_value=mock_auth):
150 | with patch(
151 | "basic_memory.cli.commands.cloud.core_commands.make_api_request",
152 | side_effect=mock_make_api_request,
153 | ):
154 | with patch(
155 | "basic_memory.cli.commands.cloud.core_commands.get_cloud_config",
156 | return_value=("client_id", "domain", "https://cloud.example.com"),
157 | ):
158 | # Run login command
159 | result = runner.invoke(app, ["cloud", "login"])
160 |
161 | # Should exit with error
162 | assert result.exit_code == 1
163 |
164 | # Should display subscription error
165 | assert "Subscription Required" in result.stdout
166 | assert "Active subscription required" in result.stdout
167 | assert "https://basicmemory.com/subscribe" in result.stdout
168 | assert "bm cloud login" in result.stdout
169 |
170 | def test_login_with_subscription_succeeds(self):
171 | """Test login command succeeds when user has active subscription."""
172 | runner = CliRunner()
173 |
174 | # Mock successful OAuth login
175 | mock_auth = AsyncMock()
176 | mock_auth.login = AsyncMock(return_value=True)
177 |
178 | # Mock successful API request (subscription valid)
179 | mock_response = Mock(spec=httpx.Response)
180 | mock_response.status_code = 200
181 | mock_response.json.return_value = {"status": "healthy"}
182 |
183 | async def mock_make_api_request(*args, **kwargs):
184 | return mock_response
185 |
186 | with patch("basic_memory.cli.commands.cloud.core_commands.CLIAuth", return_value=mock_auth):
187 | with patch(
188 | "basic_memory.cli.commands.cloud.core_commands.make_api_request",
189 | side_effect=mock_make_api_request,
190 | ):
191 | with patch(
192 | "basic_memory.cli.commands.cloud.core_commands.get_cloud_config",
193 | return_value=("client_id", "domain", "https://cloud.example.com"),
194 | ):
195 | # Mock ConfigManager to avoid writing to real config
196 | mock_config_manager = Mock()
197 | mock_config = Mock()
198 | mock_config.cloud_mode = False
199 | mock_config_manager.load_config.return_value = mock_config
200 | mock_config_manager.config = mock_config
201 |
202 | with patch(
203 | "basic_memory.cli.commands.cloud.core_commands.ConfigManager",
204 | return_value=mock_config_manager,
205 | ):
206 | # Run login command
207 | result = runner.invoke(app, ["cloud", "login"])
208 |
209 | # Should succeed
210 | assert result.exit_code == 0
211 |
212 | # Should enable cloud mode
213 | assert mock_config.cloud_mode is True
214 | mock_config_manager.save_config.assert_called_once()
215 |
216 | # Should display success message
217 | assert "Cloud mode enabled" in result.stdout
218 |
219 | def test_login_authentication_failure(self):
220 | """Test login command handles authentication failure."""
221 | runner = CliRunner()
222 |
223 | # Mock failed OAuth login
224 | mock_auth = AsyncMock()
225 | mock_auth.login = AsyncMock(return_value=False)
226 |
227 | with patch("basic_memory.cli.commands.cloud.core_commands.CLIAuth", return_value=mock_auth):
228 | with patch(
229 | "basic_memory.cli.commands.cloud.core_commands.get_cloud_config",
230 | return_value=("client_id", "domain", "https://cloud.example.com"),
231 | ):
232 | # Run login command
233 | result = runner.invoke(app, ["cloud", "login"])
234 |
235 | # Should exit with error
236 | assert result.exit_code == 1
237 |
238 | # Should display login failed message
239 | assert "Login failed" in result.stdout
240 |
```
--------------------------------------------------------------------------------
/tests/repository/test_project_repository.py:
--------------------------------------------------------------------------------
```python
1 | """Tests for the ProjectRepository."""
2 |
3 | from datetime import datetime, timezone
4 | from pathlib import Path
5 |
6 | import pytest
7 | import pytest_asyncio
8 | from sqlalchemy import select
9 |
10 | from basic_memory import db
11 | from basic_memory.models.project import Project
12 | from basic_memory.repository.project_repository import ProjectRepository
13 |
14 |
15 | @pytest_asyncio.fixture
16 | async def sample_project(project_repository: ProjectRepository) -> Project:
17 | """Create a sample project for testing."""
18 | project_data = {
19 | "name": "Sample Project",
20 | "description": "A sample project",
21 | "path": "/sample/project/path",
22 | "is_active": True,
23 | "is_default": False,
24 | "created_at": datetime.now(timezone.utc),
25 | "updated_at": datetime.now(timezone.utc),
26 | }
27 | return await project_repository.create(project_data)
28 |
29 |
30 | @pytest.mark.asyncio
31 | async def test_create_project(project_repository: ProjectRepository):
32 | """Test creating a new project."""
33 | project_data = {
34 | "name": "Sample Project",
35 | "description": "A sample project",
36 | "path": "/sample/project/path",
37 | "is_active": True,
38 | "is_default": False,
39 | }
40 | project = await project_repository.create(project_data)
41 |
42 | # Verify returned object
43 | assert project.id is not None
44 | assert project.name == "Sample Project"
45 | assert project.description == "A sample project"
46 | assert project.path == "/sample/project/path"
47 | assert project.is_active is True
48 | assert project.is_default is False
49 | assert isinstance(project.created_at, datetime)
50 | assert isinstance(project.updated_at, datetime)
51 |
52 | # Verify permalink was generated correctly
53 | assert project.permalink == "sample-project"
54 |
55 | # Verify in database
56 | found = await project_repository.find_by_id(project.id)
57 | assert found is not None
58 | assert found.id == project.id
59 | assert found.name == project.name
60 | assert found.description == project.description
61 | assert found.path == project.path
62 | assert found.permalink == "sample-project"
63 | assert found.is_active is True
64 | assert found.is_default is False
65 |
66 |
67 | @pytest.mark.asyncio
68 | async def test_get_by_name(project_repository: ProjectRepository, sample_project: Project):
69 | """Test getting a project by name."""
70 | # Test exact match
71 | found = await project_repository.get_by_name(sample_project.name)
72 | assert found is not None
73 | assert found.id == sample_project.id
74 | assert found.name == sample_project.name
75 |
76 | # Test non-existent name
77 | found = await project_repository.get_by_name("Non-existent Project")
78 | assert found is None
79 |
80 |
81 | @pytest.mark.asyncio
82 | async def test_get_by_permalink(project_repository: ProjectRepository, sample_project: Project):
83 | """Test getting a project by permalink."""
84 | # Verify the permalink value
85 | assert sample_project.permalink == "sample-project"
86 |
87 | # Test exact match
88 | found = await project_repository.get_by_permalink(sample_project.permalink)
89 | assert found is not None
90 | assert found.id == sample_project.id
91 | assert found.permalink == sample_project.permalink
92 |
93 | # Test non-existent permalink
94 | found = await project_repository.get_by_permalink("non-existent-project")
95 | assert found is None
96 |
97 |
98 | @pytest.mark.asyncio
99 | async def test_get_by_path(project_repository: ProjectRepository, sample_project: Project):
100 | """Test getting a project by path."""
101 | # Test exact match
102 | found = await project_repository.get_by_path(sample_project.path)
103 | assert found is not None
104 | assert found.id == sample_project.id
105 | assert found.path == sample_project.path
106 |
107 | # Test with Path object
108 | found = await project_repository.get_by_path(Path(sample_project.path))
109 | assert found is not None
110 | assert found.id == sample_project.id
111 | assert found.path == sample_project.path
112 |
113 | # Test non-existent path
114 | found = await project_repository.get_by_path("/non/existent/path")
115 | assert found is None
116 |
117 |
118 | @pytest.mark.asyncio
119 | async def test_get_default_project(project_repository: ProjectRepository):
120 | """Test getting the default project."""
121 | # We already have a default project from the test_project fixture
122 | # So just create a non-default project
123 | non_default_project_data = {
124 | "name": "Non-Default Project",
125 | "description": "A non-default project",
126 | "path": "/non-default/project/path",
127 | "is_active": True,
128 | "is_default": None, # Not the default project
129 | }
130 |
131 | await project_repository.create(non_default_project_data)
132 |
133 | # Get default project
134 | default_project = await project_repository.get_default_project()
135 | assert default_project is not None
136 | assert default_project.is_default is True
137 |
138 |
139 | @pytest.mark.asyncio
140 | async def test_get_active_projects(project_repository: ProjectRepository):
141 | """Test getting all active projects."""
142 | # Create active and inactive projects
143 | active_project_data = {
144 | "name": "Active Project",
145 | "description": "An active project",
146 | "path": "/active/project/path",
147 | "is_active": True,
148 | }
149 | inactive_project_data = {
150 | "name": "Inactive Project",
151 | "description": "An inactive project",
152 | "path": "/inactive/project/path",
153 | "is_active": False,
154 | }
155 |
156 | await project_repository.create(active_project_data)
157 | await project_repository.create(inactive_project_data)
158 |
159 | # Get active projects
160 | active_projects = await project_repository.get_active_projects()
161 | assert len(active_projects) >= 1 # Could be more from other tests
162 |
163 | # Verify that all returned projects are active
164 | for project in active_projects:
165 | assert project.is_active is True
166 |
167 | # Verify active project is included
168 | active_names = [p.name for p in active_projects]
169 | assert "Active Project" in active_names
170 |
171 | # Verify inactive project is not included
172 | assert "Inactive Project" not in active_names
173 |
174 |
175 | @pytest.mark.asyncio
176 | async def test_set_as_default(project_repository: ProjectRepository, test_project: Project):
177 | """Test setting a project as default."""
178 | # The test_project fixture is already the default
179 | # Create a non-default project
180 | project2_data = {
181 | "name": "Project 2",
182 | "description": "Project 2",
183 | "path": "/project2/path",
184 | "is_active": True,
185 | "is_default": None, # Not default
186 | }
187 |
188 | # Get the existing default project
189 | project1 = test_project
190 | project2 = await project_repository.create(project2_data)
191 |
192 | # Verify initial state
193 | assert project1.is_default is True
194 | assert project2.is_default is None
195 |
196 | # Set project2 as default
197 | updated_project2 = await project_repository.set_as_default(project2.id)
198 | assert updated_project2 is not None
199 | assert updated_project2.is_default is True
200 |
201 | # Verify project1 is no longer default
202 | project1_updated = await project_repository.find_by_id(project1.id)
203 | assert project1_updated is not None
204 | assert project1_updated.is_default is None
205 |
206 | # Verify project2 is now default
207 | project2_updated = await project_repository.find_by_id(project2.id)
208 | assert project2_updated is not None
209 | assert project2_updated.is_default is True
210 |
211 |
212 | @pytest.mark.asyncio
213 | async def test_update_project(project_repository: ProjectRepository, sample_project: Project):
214 | """Test updating a project."""
215 | # Update project
216 | updated_data = {
217 | "name": "Updated Project Name",
218 | "description": "Updated description",
219 | "path": "/updated/path",
220 | }
221 | updated_project = await project_repository.update(sample_project.id, updated_data)
222 |
223 | # Verify returned object
224 | assert updated_project is not None
225 | assert updated_project.id == sample_project.id
226 | assert updated_project.name == "Updated Project Name"
227 | assert updated_project.description == "Updated description"
228 | assert updated_project.path == "/updated/path"
229 |
230 | # Verify permalink was updated based on new name
231 | assert updated_project.permalink == "updated-project-name"
232 |
233 | # Verify in database
234 | found = await project_repository.find_by_id(sample_project.id)
235 | assert found is not None
236 | assert found.name == "Updated Project Name"
237 | assert found.description == "Updated description"
238 | assert found.path == "/updated/path"
239 | assert found.permalink == "updated-project-name"
240 |
241 | # Verify we can find by the new permalink
242 | found_by_permalink = await project_repository.get_by_permalink("updated-project-name")
243 | assert found_by_permalink is not None
244 | assert found_by_permalink.id == sample_project.id
245 |
246 |
247 | @pytest.mark.asyncio
248 | async def test_delete_project(project_repository: ProjectRepository, sample_project: Project):
249 | """Test deleting a project."""
250 | # Delete project
251 | result = await project_repository.delete(sample_project.id)
252 | assert result is True
253 |
254 | # Verify deletion
255 | deleted = await project_repository.find_by_id(sample_project.id)
256 | assert deleted is None
257 |
258 | # Verify with direct database query
259 | async with db.scoped_session(project_repository.session_maker) as session:
260 | query = select(Project).filter(Project.id == sample_project.id)
261 | result = await session.execute(query)
262 | assert result.scalar_one_or_none() is None
263 |
264 |
265 | @pytest.mark.asyncio
266 | async def test_delete_nonexistent_project(project_repository: ProjectRepository):
267 | """Test deleting a project that doesn't exist."""
268 | result = await project_repository.delete(999) # Non-existent ID
269 | assert result is False
270 |
271 |
272 | @pytest.mark.asyncio
273 | async def test_update_path(project_repository: ProjectRepository, sample_project: Project):
274 | """Test updating a project's path."""
275 | new_path = "/new/project/path"
276 |
277 | # Update the project path
278 | updated_project = await project_repository.update_path(sample_project.id, new_path)
279 |
280 | # Verify returned object
281 | assert updated_project is not None
282 | assert updated_project.id == sample_project.id
283 | assert updated_project.path == new_path
284 | assert updated_project.name == sample_project.name # Other fields unchanged
285 |
286 | # Verify in database
287 | found = await project_repository.find_by_id(sample_project.id)
288 | assert found is not None
289 | assert found.path == new_path
290 | assert found.name == sample_project.name
291 |
292 |
293 | @pytest.mark.asyncio
294 | async def test_update_path_nonexistent_project(project_repository: ProjectRepository):
295 | """Test updating path for a project that doesn't exist."""
296 | result = await project_repository.update_path(999, "/some/path") # Non-existent ID
297 | assert result is None
298 |
```