This is page 2 of 23. Use http://codebase.md/basicmachines-co/basic-memory?lines=true&page={x} to view the full context.
# Directory Structure
```
├── .claude
│   ├── agents
│   │   ├── python-developer.md
│   │   └── system-architect.md
│   └── commands
│       ├── release
│       │   ├── beta.md
│       │   ├── changelog.md
│       │   ├── release-check.md
│       │   └── release.md
│       ├── spec.md
│       └── test-live.md
├── .dockerignore
├── .github
│   ├── dependabot.yml
│   ├── ISSUE_TEMPLATE
│   │   ├── bug_report.md
│   │   ├── config.yml
│   │   ├── documentation.md
│   │   └── feature_request.md
│   └── workflows
│       ├── claude-code-review.yml
│       ├── claude-issue-triage.yml
│       ├── claude.yml
│       ├── dev-release.yml
│       ├── docker.yml
│       ├── pr-title.yml
│       ├── release.yml
│       └── test.yml
├── .gitignore
├── .python-version
├── CHANGELOG.md
├── CITATION.cff
├── CLA.md
├── CLAUDE.md
├── CODE_OF_CONDUCT.md
├── CONTRIBUTING.md
├── docker-compose.yml
├── Dockerfile
├── docs
│   ├── ai-assistant-guide-extended.md
│   ├── character-handling.md
│   ├── cloud-cli.md
│   └── Docker.md
├── justfile
├── LICENSE
├── llms-install.md
├── pyproject.toml
├── README.md
├── SECURITY.md
├── smithery.yaml
├── specs
│   ├── SPEC-1 Specification-Driven Development Process.md
│   ├── SPEC-10 Unified Deployment Workflow and Event Tracking.md
│   ├── SPEC-11 Basic Memory API Performance Optimization.md
│   ├── SPEC-12 OpenTelemetry Observability.md
│   ├── SPEC-13 CLI Authentication with Subscription Validation.md
│   ├── SPEC-14 Cloud Git Versioning & GitHub Backup.md
│   ├── SPEC-14- Cloud Git Versioning & GitHub Backup.md
│   ├── SPEC-15 Configuration Persistence via Tigris for Cloud Tenants.md
│   ├── SPEC-16 MCP Cloud Service Consolidation.md
│   ├── SPEC-17 Semantic Search with ChromaDB.md
│   ├── SPEC-18 AI Memory Management Tool.md
│   ├── SPEC-19 Sync Performance and Memory Optimization.md
│   ├── SPEC-2 Slash Commands Reference.md
│   ├── SPEC-3 Agent Definitions.md
│   ├── SPEC-4 Notes Web UI Component Architecture.md
│   ├── SPEC-5 CLI Cloud Upload via WebDAV.md
│   ├── SPEC-6 Explicit Project Parameter Architecture.md
│   ├── SPEC-7 POC to spike Tigris Turso for local access to cloud data.md
│   ├── SPEC-8 TigrisFS Integration.md
│   ├── SPEC-9 Multi-Project Bidirectional Sync Architecture.md
│   ├── SPEC-9 Signed Header Tenant Information.md
│   └── SPEC-9-1 Follow-Ups- Conflict, Sync, and Observability.md
├── src
│   └── basic_memory
│       ├── __init__.py
│       ├── alembic
│       │   ├── alembic.ini
│       │   ├── env.py
│       │   ├── migrations.py
│       │   ├── script.py.mako
│       │   └── versions
│       │       ├── 3dae7c7b1564_initial_schema.py
│       │       ├── 502b60eaa905_remove_required_from_entity_permalink.py
│       │       ├── 5fe1ab1ccebe_add_projects_table.py
│       │       ├── 647e7a75e2cd_project_constraint_fix.py
│       │       ├── 9d9c1cb7d8f5_add_mtime_and_size_columns_to_entity_.py
│       │       ├── a1b2c3d4e5f6_fix_project_foreign_keys.py
│       │       ├── b3c3938bacdb_relation_to_name_unique_index.py
│       │       ├── cc7172b46608_update_search_index_schema.py
│       │       └── e7e1f4367280_add_scan_watermark_tracking_to_project.py
│       ├── api
│       │   ├── __init__.py
│       │   ├── app.py
│       │   ├── routers
│       │   │   ├── __init__.py
│       │   │   ├── directory_router.py
│       │   │   ├── importer_router.py
│       │   │   ├── knowledge_router.py
│       │   │   ├── management_router.py
│       │   │   ├── memory_router.py
│       │   │   ├── project_router.py
│       │   │   ├── prompt_router.py
│       │   │   ├── resource_router.py
│       │   │   ├── search_router.py
│       │   │   └── utils.py
│       │   └── template_loader.py
│       ├── cli
│       │   ├── __init__.py
│       │   ├── app.py
│       │   ├── auth.py
│       │   ├── commands
│       │   │   ├── __init__.py
│       │   │   ├── cloud
│       │   │   │   ├── __init__.py
│       │   │   │   ├── api_client.py
│       │   │   │   ├── bisync_commands.py
│       │   │   │   ├── cloud_utils.py
│       │   │   │   ├── core_commands.py
│       │   │   │   ├── mount_commands.py
│       │   │   │   ├── rclone_config.py
│       │   │   │   ├── rclone_installer.py
│       │   │   │   ├── upload_command.py
│       │   │   │   └── upload.py
│       │   │   ├── command_utils.py
│       │   │   ├── db.py
│       │   │   ├── import_chatgpt.py
│       │   │   ├── import_claude_conversations.py
│       │   │   ├── import_claude_projects.py
│       │   │   ├── import_memory_json.py
│       │   │   ├── mcp.py
│       │   │   ├── project.py
│       │   │   ├── status.py
│       │   │   ├── sync.py
│       │   │   └── tool.py
│       │   └── main.py
│       ├── config.py
│       ├── db.py
│       ├── deps.py
│       ├── file_utils.py
│       ├── ignore_utils.py
│       ├── importers
│       │   ├── __init__.py
│       │   ├── base.py
│       │   ├── chatgpt_importer.py
│       │   ├── claude_conversations_importer.py
│       │   ├── claude_projects_importer.py
│       │   ├── memory_json_importer.py
│       │   └── utils.py
│       ├── markdown
│       │   ├── __init__.py
│       │   ├── entity_parser.py
│       │   ├── markdown_processor.py
│       │   ├── plugins.py
│       │   ├── schemas.py
│       │   └── utils.py
│       ├── mcp
│       │   ├── __init__.py
│       │   ├── async_client.py
│       │   ├── project_context.py
│       │   ├── prompts
│       │   │   ├── __init__.py
│       │   │   ├── ai_assistant_guide.py
│       │   │   ├── continue_conversation.py
│       │   │   ├── recent_activity.py
│       │   │   ├── search.py
│       │   │   └── utils.py
│       │   ├── resources
│       │   │   ├── ai_assistant_guide.md
│       │   │   └── project_info.py
│       │   ├── server.py
│       │   └── tools
│       │       ├── __init__.py
│       │       ├── build_context.py
│       │       ├── canvas.py
│       │       ├── chatgpt_tools.py
│       │       ├── delete_note.py
│       │       ├── edit_note.py
│       │       ├── list_directory.py
│       │       ├── move_note.py
│       │       ├── project_management.py
│       │       ├── read_content.py
│       │       ├── read_note.py
│       │       ├── recent_activity.py
│       │       ├── search.py
│       │       ├── utils.py
│       │       ├── view_note.py
│       │       └── write_note.py
│       ├── models
│       │   ├── __init__.py
│       │   ├── base.py
│       │   ├── knowledge.py
│       │   ├── project.py
│       │   └── search.py
│       ├── repository
│       │   ├── __init__.py
│       │   ├── entity_repository.py
│       │   ├── observation_repository.py
│       │   ├── project_info_repository.py
│       │   ├── project_repository.py
│       │   ├── relation_repository.py
│       │   ├── repository.py
│       │   └── search_repository.py
│       ├── schemas
│       │   ├── __init__.py
│       │   ├── base.py
│       │   ├── cloud.py
│       │   ├── delete.py
│       │   ├── directory.py
│       │   ├── importer.py
│       │   ├── memory.py
│       │   ├── project_info.py
│       │   ├── prompt.py
│       │   ├── request.py
│       │   ├── response.py
│       │   ├── search.py
│       │   └── sync_report.py
│       ├── services
│       │   ├── __init__.py
│       │   ├── context_service.py
│       │   ├── directory_service.py
│       │   ├── entity_service.py
│       │   ├── exceptions.py
│       │   ├── file_service.py
│       │   ├── initialization.py
│       │   ├── link_resolver.py
│       │   ├── project_service.py
│       │   ├── search_service.py
│       │   └── service.py
│       ├── sync
│       │   ├── __init__.py
│       │   ├── background_sync.py
│       │   ├── sync_service.py
│       │   └── watch_service.py
│       ├── templates
│       │   └── prompts
│       │       ├── continue_conversation.hbs
│       │       └── search.hbs
│       └── utils.py
├── test-int
│   ├── BENCHMARKS.md
│   ├── cli
│   │   ├── test_project_commands_integration.py
│   │   ├── test_sync_commands_integration.py
│   │   └── test_version_integration.py
│   ├── conftest.py
│   ├── mcp
│   │   ├── test_build_context_underscore.py
│   │   ├── test_build_context_validation.py
│   │   ├── test_chatgpt_tools_integration.py
│   │   ├── test_default_project_mode_integration.py
│   │   ├── test_delete_note_integration.py
│   │   ├── test_edit_note_integration.py
│   │   ├── test_list_directory_integration.py
│   │   ├── test_move_note_integration.py
│   │   ├── test_project_management_integration.py
│   │   ├── test_project_state_sync_integration.py
│   │   ├── test_read_content_integration.py
│   │   ├── test_read_note_integration.py
│   │   ├── test_search_integration.py
│   │   ├── test_single_project_mcp_integration.py
│   │   └── test_write_note_integration.py
│   ├── test_db_wal_mode.py
│   ├── test_disable_permalinks_integration.py
│   └── test_sync_performance_benchmark.py
├── tests
│   ├── __init__.py
│   ├── api
│   │   ├── conftest.py
│   │   ├── test_async_client.py
│   │   ├── test_continue_conversation_template.py
│   │   ├── test_directory_router.py
│   │   ├── test_importer_router.py
│   │   ├── test_knowledge_router.py
│   │   ├── test_management_router.py
│   │   ├── test_memory_router.py
│   │   ├── test_project_router_operations.py
│   │   ├── test_project_router.py
│   │   ├── test_prompt_router.py
│   │   ├── test_relation_background_resolution.py
│   │   ├── test_resource_router.py
│   │   ├── test_search_router.py
│   │   ├── test_search_template.py
│   │   ├── test_template_loader_helpers.py
│   │   └── test_template_loader.py
│   ├── cli
│   │   ├── conftest.py
│   │   ├── test_bisync_commands.py
│   │   ├── test_cli_tools.py
│   │   ├── test_cloud_authentication.py
│   │   ├── test_cloud_utils.py
│   │   ├── test_ignore_utils.py
│   │   ├── test_import_chatgpt.py
│   │   ├── test_import_claude_conversations.py
│   │   ├── test_import_claude_projects.py
│   │   ├── test_import_memory_json.py
│   │   └── test_upload.py
│   ├── conftest.py
│   ├── db
│   │   └── test_issue_254_foreign_key_constraints.py
│   ├── importers
│   │   ├── test_importer_base.py
│   │   └── test_importer_utils.py
│   ├── markdown
│   │   ├── __init__.py
│   │   ├── test_date_frontmatter_parsing.py
│   │   ├── test_entity_parser_error_handling.py
│   │   ├── test_entity_parser.py
│   │   ├── test_markdown_plugins.py
│   │   ├── test_markdown_processor.py
│   │   ├── test_observation_edge_cases.py
│   │   ├── test_parser_edge_cases.py
│   │   ├── test_relation_edge_cases.py
│   │   └── test_task_detection.py
│   ├── mcp
│   │   ├── conftest.py
│   │   ├── test_obsidian_yaml_formatting.py
│   │   ├── test_permalink_collision_file_overwrite.py
│   │   ├── test_prompts.py
│   │   ├── test_resources.py
│   │   ├── test_tool_build_context.py
│   │   ├── test_tool_canvas.py
│   │   ├── test_tool_delete_note.py
│   │   ├── test_tool_edit_note.py
│   │   ├── test_tool_list_directory.py
│   │   ├── test_tool_move_note.py
│   │   ├── test_tool_read_content.py
│   │   ├── test_tool_read_note.py
│   │   ├── test_tool_recent_activity.py
│   │   ├── test_tool_resource.py
│   │   ├── test_tool_search.py
│   │   ├── test_tool_utils.py
│   │   ├── test_tool_view_note.py
│   │   ├── test_tool_write_note.py
│   │   └── tools
│   │       └── test_chatgpt_tools.py
│   ├── Non-MarkdownFileSupport.pdf
│   ├── repository
│   │   ├── test_entity_repository_upsert.py
│   │   ├── test_entity_repository.py
│   │   ├── test_entity_upsert_issue_187.py
│   │   ├── test_observation_repository.py
│   │   ├── test_project_info_repository.py
│   │   ├── test_project_repository.py
│   │   ├── test_relation_repository.py
│   │   ├── test_repository.py
│   │   ├── test_search_repository_edit_bug_fix.py
│   │   └── test_search_repository.py
│   ├── schemas
│   │   ├── test_base_timeframe_minimum.py
│   │   ├── test_memory_serialization.py
│   │   ├── test_memory_url_validation.py
│   │   ├── test_memory_url.py
│   │   ├── test_schemas.py
│   │   └── test_search.py
│   ├── Screenshot.png
│   ├── services
│   │   ├── test_context_service.py
│   │   ├── test_directory_service.py
│   │   ├── test_entity_service_disable_permalinks.py
│   │   ├── test_entity_service.py
│   │   ├── test_file_service.py
│   │   ├── test_initialization.py
│   │   ├── test_link_resolver.py
│   │   ├── test_project_removal_bug.py
│   │   ├── test_project_service_operations.py
│   │   ├── test_project_service.py
│   │   └── test_search_service.py
│   ├── sync
│   │   ├── test_character_conflicts.py
│   │   ├── test_sync_service_incremental.py
│   │   ├── test_sync_service.py
│   │   ├── test_sync_wikilink_issue.py
│   │   ├── test_tmp_files.py
│   │   ├── test_watch_service_edge_cases.py
│   │   ├── test_watch_service_reload.py
│   │   └── test_watch_service.py
│   ├── test_config.py
│   ├── test_db_migration_deduplication.py
│   ├── test_deps.py
│   ├── test_production_cascade_delete.py
│   └── utils
│       ├── test_file_utils.py
│       ├── test_frontmatter_obsidian_compatible.py
│       ├── test_parse_tags.py
│       ├── test_permalink_formatting.py
│       ├── test_utf8_handling.py
│       └── test_validate_project_path.py
├── uv.lock
├── v0.15.0-RELEASE-DOCS.md
└── v15-docs
    ├── api-performance.md
    ├── background-relations.md
    ├── basic-memory-home.md
    ├── bug-fixes.md
    ├── chatgpt-integration.md
    ├── cloud-authentication.md
    ├── cloud-bisync.md
    ├── cloud-mode-usage.md
    ├── cloud-mount.md
    ├── default-project-mode.md
    ├── env-file-removal.md
    ├── env-var-overrides.md
    ├── explicit-project-parameter.md
    ├── gitignore-integration.md
    ├── project-root-env-var.md
    ├── README.md
    └── sqlite-performance.md
```
# Files
--------------------------------------------------------------------------------
/src/basic_memory/api/routers/directory_router.py:
--------------------------------------------------------------------------------
```python
 1 | """Router for directory tree operations."""
 2 | 
 3 | from typing import List, Optional
 4 | 
 5 | from fastapi import APIRouter, Query
 6 | 
 7 | from basic_memory.deps import DirectoryServiceDep, ProjectIdDep
 8 | from basic_memory.schemas.directory import DirectoryNode
 9 | 
10 | router = APIRouter(prefix="/directory", tags=["directory"])
11 | 
12 | 
13 | @router.get("/tree", response_model=DirectoryNode, response_model_exclude_none=True)
14 | async def get_directory_tree(
15 |     directory_service: DirectoryServiceDep,
16 |     project_id: ProjectIdDep,
17 | ):
18 |     """Get hierarchical directory structure from the knowledge base.
19 | 
20 |     Args:
21 |         directory_service: Service for directory operations
22 |         project_id: ID of the current project
23 | 
24 |     Returns:
25 |         DirectoryNode representing the root of the hierarchical tree structure
26 |     """
27 |     # Get a hierarchical directory tree for the specific project
28 |     tree = await directory_service.get_directory_tree()
29 | 
30 |     # Return the hierarchical tree
31 |     return tree
32 | 
33 | 
34 | @router.get("/structure", response_model=DirectoryNode, response_model_exclude_none=True)
35 | async def get_directory_structure(
36 |     directory_service: DirectoryServiceDep,
37 |     project_id: ProjectIdDep,
38 | ):
39 |     """Get folder structure for navigation (no files).
40 | 
41 |     Optimized endpoint for folder tree navigation. Returns only directory nodes
42 |     without file metadata. For full tree with files, use /directory/tree.
43 | 
44 |     Args:
45 |         directory_service: Service for directory operations
46 |         project_id: ID of the current project
47 | 
48 |     Returns:
49 |         DirectoryNode tree containing only folders (type="directory")
50 |     """
51 |     structure = await directory_service.get_directory_structure()
52 |     return structure
53 | 
54 | 
55 | @router.get("/list", response_model=List[DirectoryNode], response_model_exclude_none=True)
56 | async def list_directory(
57 |     directory_service: DirectoryServiceDep,
58 |     project_id: ProjectIdDep,
59 |     dir_name: str = Query("/", description="Directory path to list"),
60 |     depth: int = Query(1, ge=1, le=10, description="Recursion depth (1-10)"),
61 |     file_name_glob: Optional[str] = Query(
62 |         None, description="Glob pattern for filtering file names"
63 |     ),
64 | ):
65 |     """List directory contents with filtering and depth control.
66 | 
67 |     Args:
68 |         directory_service: Service for directory operations
69 |         project_id: ID of the current project
70 |         dir_name: Directory path to list (default: root "/")
71 |         depth: Recursion depth (1-10, default: 1 for immediate children only)
72 |         file_name_glob: Optional glob pattern for filtering file names (e.g., "*.md", "*meeting*")
73 | 
74 |     Returns:
75 |         List of DirectoryNode objects matching the criteria
76 |     """
77 |     # Get directory listing with filtering
78 |     nodes = await directory_service.list_directory(
79 |         dir_name=dir_name,
80 |         depth=depth,
81 |         file_name_glob=file_name_glob,
82 |     )
83 | 
84 |     return nodes
85 | 
```
--------------------------------------------------------------------------------
/src/basic_memory/alembic/env.py:
--------------------------------------------------------------------------------
```python
  1 | """Alembic environment configuration."""
  2 | 
  3 | import os
  4 | from logging.config import fileConfig
  5 | 
  6 | from sqlalchemy import engine_from_config
  7 | from sqlalchemy import pool
  8 | 
  9 | from alembic import context
 10 | 
 11 | from basic_memory.config import ConfigManager
 12 | 
 13 | # set config.env to "test" for pytest to prevent logging to file in utils.setup_logging()
 14 | os.environ["BASIC_MEMORY_ENV"] = "test"
 15 | 
 16 | # Import after setting environment variable  # noqa: E402
 17 | from basic_memory.models import Base  # noqa: E402
 18 | 
 19 | # this is the Alembic Config object, which provides
 20 | # access to the values within the .ini file in use.
 21 | config = context.config
 22 | 
 23 | app_config = ConfigManager().config
 24 | # Set the SQLAlchemy URL from our app config
 25 | sqlalchemy_url = f"sqlite:///{app_config.database_path}"
 26 | config.set_main_option("sqlalchemy.url", sqlalchemy_url)
 27 | 
 28 | # print(f"Using SQLAlchemy URL: {sqlalchemy_url}")
 29 | 
 30 | # Interpret the config file for Python logging.
 31 | if config.config_file_name is not None:
 32 |     fileConfig(config.config_file_name)
 33 | 
 34 | # add your model's MetaData object here
 35 | # for 'autogenerate' support
 36 | target_metadata = Base.metadata
 37 | 
 38 | 
 39 | # Add this function to tell Alembic what to include/exclude
 40 | def include_object(object, name, type_, reflected, compare_to):
 41 |     # Ignore SQLite FTS tables
 42 |     if type_ == "table" and name.startswith("search_index"):
 43 |         return False
 44 |     return True
 45 | 
 46 | 
 47 | def run_migrations_offline() -> None:
 48 |     """Run migrations in 'offline' mode.
 49 | 
 50 |     This configures the context with just a URL
 51 |     and not an Engine, though an Engine is acceptable
 52 |     here as well.  By skipping the Engine creation
 53 |     we don't even need a DBAPI to be available.
 54 | 
 55 |     Calls to context.execute() here emit the given string to the
 56 |     script output.
 57 |     """
 58 |     url = config.get_main_option("sqlalchemy.url")
 59 |     context.configure(
 60 |         url=url,
 61 |         target_metadata=target_metadata,
 62 |         literal_binds=True,
 63 |         dialect_opts={"paramstyle": "named"},
 64 |         include_object=include_object,
 65 |         render_as_batch=True,
 66 |     )
 67 | 
 68 |     with context.begin_transaction():
 69 |         context.run_migrations()
 70 | 
 71 | 
 72 | def run_migrations_online() -> None:
 73 |     """Run migrations in 'online' mode.
 74 | 
 75 |     In this scenario we need to create an Engine
 76 |     and associate a connection with the context.
 77 |     """
 78 |     connectable = engine_from_config(
 79 |         config.get_section(config.config_ini_section, {}),
 80 |         prefix="sqlalchemy.",
 81 |         poolclass=pool.NullPool,
 82 |     )
 83 | 
 84 |     with connectable.connect() as connection:
 85 |         context.configure(
 86 |             connection=connection,
 87 |             target_metadata=target_metadata,
 88 |             include_object=include_object,
 89 |             render_as_batch=True,
 90 |         )
 91 | 
 92 |         with context.begin_transaction():
 93 |             context.run_migrations()
 94 | 
 95 | 
 96 | if context.is_offline_mode():
 97 |     run_migrations_offline()
 98 | else:
 99 |     run_migrations_online()
100 | 
```
--------------------------------------------------------------------------------
/src/basic_memory/cli/commands/import_chatgpt.py:
--------------------------------------------------------------------------------
```python
 1 | """Import command for ChatGPT conversations."""
 2 | 
 3 | import asyncio
 4 | import json
 5 | from pathlib import Path
 6 | from typing import Annotated
 7 | 
 8 | import typer
 9 | from basic_memory.cli.app import import_app
10 | from basic_memory.config import get_project_config
11 | from basic_memory.importers import ChatGPTImporter
12 | from basic_memory.markdown import EntityParser, MarkdownProcessor
13 | from loguru import logger
14 | from rich.console import Console
15 | from rich.panel import Panel
16 | 
17 | console = Console()
18 | 
19 | 
20 | async def get_markdown_processor() -> MarkdownProcessor:
21 |     """Get MarkdownProcessor instance."""
22 |     config = get_project_config()
23 |     entity_parser = EntityParser(config.home)
24 |     return MarkdownProcessor(entity_parser)
25 | 
26 | 
27 | @import_app.command(name="chatgpt", help="Import conversations from ChatGPT JSON export.")
28 | def import_chatgpt(
29 |     conversations_json: Annotated[
30 |         Path, typer.Argument(help="Path to ChatGPT conversations.json file")
31 |     ] = Path("conversations.json"),
32 |     folder: Annotated[
33 |         str, typer.Option(help="The folder to place the files in.")
34 |     ] = "conversations",
35 | ):
36 |     """Import chat conversations from ChatGPT JSON format.
37 | 
38 |     This command will:
39 |     1. Read the complex tree structure of messages
40 |     2. Convert them to linear markdown conversations
41 |     3. Save as clean, readable markdown files
42 | 
43 |     After importing, run 'basic-memory sync' to index the new files.
44 |     """
45 | 
46 |     try:
47 |         if not conversations_json.exists():  # pragma: no cover
48 |             typer.echo(f"Error: File not found: {conversations_json}", err=True)
49 |             raise typer.Exit(1)
50 | 
51 |         # Get markdown processor
52 |         markdown_processor = asyncio.run(get_markdown_processor())
53 |         config = get_project_config()
54 |         # Process the file
55 |         base_path = config.home / folder
56 |         console.print(f"\nImporting chats from {conversations_json}...writing to {base_path}")
57 | 
58 |         # Create importer and run import
59 |         importer = ChatGPTImporter(config.home, markdown_processor)
60 |         with conversations_json.open("r", encoding="utf-8") as file:
61 |             json_data = json.load(file)
62 |             result = asyncio.run(importer.import_data(json_data, folder))
63 | 
64 |         if not result.success:  # pragma: no cover
65 |             typer.echo(f"Error during import: {result.error_message}", err=True)
66 |             raise typer.Exit(1)
67 | 
68 |         # Show results
69 |         console.print(
70 |             Panel(
71 |                 f"[green]Import complete![/green]\n\n"
72 |                 f"Imported {result.conversations} conversations\n"
73 |                 f"Containing {result.messages} messages",
74 |                 expand=False,
75 |             )
76 |         )
77 | 
78 |         console.print("\nRun 'basic-memory sync' to index the new files.")
79 | 
80 |     except Exception as e:
81 |         logger.error("Import failed")
82 |         typer.echo(f"Error during import: {e}", err=True)
83 |         raise typer.Exit(1)
84 | 
```
--------------------------------------------------------------------------------
/src/basic_memory/cli/commands/import_memory_json.py:
--------------------------------------------------------------------------------
```python
 1 | """Import command for basic-memory CLI to import from JSON memory format."""
 2 | 
 3 | import asyncio
 4 | import json
 5 | from pathlib import Path
 6 | from typing import Annotated
 7 | 
 8 | import typer
 9 | from basic_memory.cli.app import import_app
10 | from basic_memory.config import get_project_config
11 | from basic_memory.importers.memory_json_importer import MemoryJsonImporter
12 | from basic_memory.markdown import EntityParser, MarkdownProcessor
13 | from loguru import logger
14 | from rich.console import Console
15 | from rich.panel import Panel
16 | 
17 | console = Console()
18 | 
19 | 
20 | async def get_markdown_processor() -> MarkdownProcessor:
21 |     """Get MarkdownProcessor instance."""
22 |     config = get_project_config()
23 |     entity_parser = EntityParser(config.home)
24 |     return MarkdownProcessor(entity_parser)
25 | 
26 | 
27 | @import_app.command()
28 | def memory_json(
29 |     json_path: Annotated[Path, typer.Argument(..., help="Path to memory.json file")] = Path(
30 |         "memory.json"
31 |     ),
32 |     destination_folder: Annotated[
33 |         str, typer.Option(help="Optional destination folder within the project")
34 |     ] = "",
35 | ):
36 |     """Import entities and relations from a memory.json file.
37 | 
38 |     This command will:
39 |     1. Read entities and relations from the JSON file
40 |     2. Create markdown files for each entity
41 |     3. Include outgoing relations in each entity's markdown
42 |     """
43 | 
44 |     if not json_path.exists():
45 |         typer.echo(f"Error: File not found: {json_path}", err=True)
46 |         raise typer.Exit(1)
47 | 
48 |     config = get_project_config()
49 |     try:
50 |         # Get markdown processor
51 |         markdown_processor = asyncio.run(get_markdown_processor())
52 | 
53 |         # Create the importer
54 |         importer = MemoryJsonImporter(config.home, markdown_processor)
55 | 
56 |         # Process the file
57 |         base_path = config.home if not destination_folder else config.home / destination_folder
58 |         console.print(f"\nImporting from {json_path}...writing to {base_path}")
59 | 
60 |         # Run the import for json log format
61 |         file_data = []
62 |         with json_path.open("r", encoding="utf-8") as file:
63 |             for line in file:
64 |                 json_data = json.loads(line)
65 |                 file_data.append(json_data)
66 |         result = asyncio.run(importer.import_data(file_data, destination_folder))
67 | 
68 |         if not result.success:  # pragma: no cover
69 |             typer.echo(f"Error during import: {result.error_message}", err=True)
70 |             raise typer.Exit(1)
71 | 
72 |         # Show results
73 |         console.print(
74 |             Panel(
75 |                 f"[green]Import complete![/green]\n\n"
76 |                 f"Created {result.entities} entities\n"
77 |                 f"Added {result.relations} relations\n"
78 |                 f"Skipped {result.skipped_entities} entities\n",
79 |                 expand=False,
80 |             )
81 |         )
82 | 
83 |     except Exception as e:
84 |         logger.error("Import failed")
85 |         typer.echo(f"Error during import: {e}", err=True)
86 |         raise typer.Exit(1)
87 | 
```
--------------------------------------------------------------------------------
/src/basic_memory/cli/commands/import_claude_projects.py:
--------------------------------------------------------------------------------
```python
 1 | """Import command for basic-memory CLI to import project data from Claude.ai."""
 2 | 
 3 | import asyncio
 4 | import json
 5 | from pathlib import Path
 6 | from typing import Annotated
 7 | 
 8 | import typer
 9 | from basic_memory.cli.app import claude_app
10 | from basic_memory.config import get_project_config
11 | from basic_memory.importers.claude_projects_importer import ClaudeProjectsImporter
12 | from basic_memory.markdown import EntityParser, MarkdownProcessor
13 | from loguru import logger
14 | from rich.console import Console
15 | from rich.panel import Panel
16 | 
17 | console = Console()
18 | 
19 | 
20 | async def get_markdown_processor() -> MarkdownProcessor:
21 |     """Get MarkdownProcessor instance."""
22 |     config = get_project_config()
23 |     entity_parser = EntityParser(config.home)
24 |     return MarkdownProcessor(entity_parser)
25 | 
26 | 
27 | @claude_app.command(name="projects", help="Import projects from Claude.ai.")
28 | def import_projects(
29 |     projects_json: Annotated[Path, typer.Argument(..., help="Path to projects.json file")] = Path(
30 |         "projects.json"
31 |     ),
32 |     base_folder: Annotated[
33 |         str, typer.Option(help="The base folder to place project files in.")
34 |     ] = "projects",
35 | ):
36 |     """Import project data from Claude.ai.
37 | 
38 |     This command will:
39 |     1. Create a directory for each project
40 |     2. Store docs in a docs/ subdirectory
41 |     3. Place prompt template in project root
42 | 
43 |     After importing, run 'basic-memory sync' to index the new files.
44 |     """
45 |     config = get_project_config()
46 |     try:
47 |         if not projects_json.exists():
48 |             typer.echo(f"Error: File not found: {projects_json}", err=True)
49 |             raise typer.Exit(1)
50 | 
51 |         # Get markdown processor
52 |         markdown_processor = asyncio.run(get_markdown_processor())
53 | 
54 |         # Create the importer
55 |         importer = ClaudeProjectsImporter(config.home, markdown_processor)
56 | 
57 |         # Process the file
58 |         base_path = config.home / base_folder if base_folder else config.home
59 |         console.print(f"\nImporting projects from {projects_json}...writing to {base_path}")
60 | 
61 |         # Run the import
62 |         with projects_json.open("r", encoding="utf-8") as file:
63 |             json_data = json.load(file)
64 |             result = asyncio.run(importer.import_data(json_data, base_folder))
65 | 
66 |         if not result.success:  # pragma: no cover
67 |             typer.echo(f"Error during import: {result.error_message}", err=True)
68 |             raise typer.Exit(1)
69 | 
70 |         # Show results
71 |         console.print(
72 |             Panel(
73 |                 f"[green]Import complete![/green]\n\n"
74 |                 f"Imported {result.documents} project documents\n"
75 |                 f"Imported {result.prompts} prompt templates",
76 |                 expand=False,
77 |             )
78 |         )
79 | 
80 |         console.print("\nRun 'basic-memory sync' to index the new files.")
81 | 
82 |     except Exception as e:
83 |         logger.error("Import failed")
84 |         typer.echo(f"Error during import: {e}", err=True)
85 |         raise typer.Exit(1)
86 | 
```
--------------------------------------------------------------------------------
/.claude/commands/release/release.md:
--------------------------------------------------------------------------------
```markdown
 1 | # /release - Create Stable Release
 2 | 
 3 | Create a stable release using the automated justfile target with comprehensive validation.
 4 | 
 5 | ## Usage
 6 | ```
 7 | /release <version>
 8 | ```
 9 | 
10 | **Parameters:**
11 | - `version` (required): Release version like `v0.13.2`
12 | 
13 | ## Implementation
14 | 
15 | You are an expert release manager for the Basic Memory project. When the user runs `/release`, execute the following steps:
16 | 
17 | ### Step 1: Pre-flight Validation
18 | 1. Verify version format matches `v\d+\.\d+\.\d+` pattern
19 | 2. Check current git status for uncommitted changes  
20 | 3. Verify we're on the `main` branch
21 | 4. Confirm no existing tag with this version
22 | 
23 | #### Documentation Validation
24 | 1. **Changelog Check**
25 |    - CHANGELOG.md contains entry for target version
26 |    - Entry includes all major features and fixes
27 |    - Breaking changes are documented
28 | 
29 | ### Step 2: Use Justfile Automation
30 | Execute the automated release process:
31 | ```bash
32 | just release <version>
33 | ```
34 | 
35 | The justfile target handles:
36 | - ✅ Version format validation
37 | - ✅ Git status and branch checks
38 | - ✅ Quality checks (`just check` - lint, format, type-check, tests)
39 | - ✅ Version update in `src/basic_memory/__init__.py`
40 | - ✅ Automatic commit with proper message
41 | - ✅ Tag creation and pushing to GitHub
42 | - ✅ Release workflow trigger
43 | 
44 | ### Step 3: Monitor Release Process
45 | 1. Check that GitHub Actions workflow starts successfully
46 | 2. Monitor workflow completion at: https://github.com/basicmachines-co/basic-memory/actions
47 | 3. Verify PyPI publication
48 | 4. Test installation: `uv tool install basic-memory`
49 | 
50 | ### Step 4: Post-Release Validation
51 | 1. Verify GitHub release is created automatically
52 | 2. Check PyPI publication
53 | 3. Validate release assets
54 | 4. Update any post-release documentation
55 | 
56 | ## Pre-conditions Check
57 | Before starting, verify:
58 | - [ ] All beta testing is complete
59 | - [ ] Critical bugs are fixed
60 | - [ ] Breaking changes are documented
61 | - [ ] CHANGELOG.md is updated (if needed)
62 | - [ ] Version number follows semantic versioning
63 | 
64 | ## Error Handling
65 | - If `just release` fails, examine the error output for specific issues
66 | - If quality checks fail, fix issues and retry
67 | - If changelog entry missing, update CHANGELOG.md and commit before retrying
68 | - If GitHub Actions fail, check workflow logs for debugging
69 | 
70 | ## Success Output
71 | ```
72 | 🎉 Stable Release v0.13.2 Created Successfully!
73 | 
74 | 🏷️  Tag: v0.13.2
75 | 📋 GitHub Release: https://github.com/basicmachines-co/basic-memory/releases/tag/v0.13.2
76 | 📦 PyPI: https://pypi.org/project/basic-memory/0.13.2/
77 | 🚀 GitHub Actions: Completed
78 | 
79 | Install with:
80 | uv tool install basic-memory
81 | 
82 | Users can now upgrade:
83 | uv tool upgrade basic-memory
84 | ```
85 | 
86 | ## Context
87 | - This creates production releases used by end users
88 | - Must pass all quality gates before proceeding
89 | - Uses the automated justfile target for consistency
90 | - Version is automatically updated in `__init__.py`
91 | - Triggers automated GitHub release with changelog
92 | - Leverages uv-dynamic-versioning for package version management
```
--------------------------------------------------------------------------------
/specs/SPEC-2 Slash Commands Reference.md:
--------------------------------------------------------------------------------
```markdown
  1 | ---
  2 | title: 'SPEC-2: Slash Commands Reference'
  3 | type: spec
  4 | permalink: specs/spec-2-slash-commands-reference
  5 | tags:
  6 | - commands
  7 | - process
  8 | - reference
  9 | ---
 10 | 
 11 | # SPEC-2: Slash Commands Reference
 12 | 
 13 | This document defines the slash commands used in our specification-driven development process.
 14 | 
 15 | ## /spec create [name]
 16 | 
 17 | **Purpose**: Create a new specification document
 18 | 
 19 | **Usage**: `/spec create notes-decomposition`
 20 | 
 21 | **Process**:
 22 | 1. Create new spec document in `/specs` folder
 23 | 2. Use SPEC-XXX numbering format (auto-increment)
 24 | 3. Include standard spec template:
 25 |    - Why (reasoning/problem)
 26 |    - What (affected areas)
 27 |    - How (high-level approach)
 28 |    - How to Evaluate (testing/validation)
 29 | 4. Tag appropriately for knowledge graph
 30 | 5. Link to related specs/components
 31 | 
 32 | **Template**:
 33 | ```markdown
 34 | # SPEC-XXX: [Title]
 35 | 
 36 | ## Why
 37 | [Problem statement and reasoning]
 38 | 
 39 | ## What
 40 | [What is affected or changed]
 41 | 
 42 | ## How (High Level)
 43 | [Approach to implementation]
 44 | 
 45 | ## How to Evaluate
 46 | [Testing/validation procedure]
 47 | 
 48 | ## Notes
 49 | [Additional context as needed]
 50 | ```
 51 | 
 52 | ## /spec status
 53 | 
 54 | **Purpose**: Show current status of all specifications
 55 | 
 56 | **Usage**: `/spec status`
 57 | 
 58 | **Process**:
 59 | 1. Search all specs in `/specs` folder
 60 | 2. Display table showing:
 61 |    - Spec number and title
 62 |    - Status (draft, approved, implementing, complete)
 63 |    - Assigned agent (if any)
 64 |    - Last updated
 65 |    - Dependencies
 66 | 
 67 | ## /spec implement [name]
 68 | 
 69 | **Purpose**: Hand specification to appropriate agent for implementation
 70 | 
 71 | **Usage**: `/spec implement SPEC-002`
 72 | 
 73 | **Process**:
 74 | 1. Read the specified spec
 75 | 2. Analyze requirements to determine appropriate agent:
 76 |    - Frontend components → vue-developer
 77 |    - Architecture/system design → system-architect  
 78 |    - Backend/API → python-developer
 79 | 3. Launch agent with spec context
 80 | 4. Agent creates implementation plan
 81 | 5. Update spec with implementation status
 82 | 
 83 | ## /spec review [name]
 84 | 
 85 | **Purpose**: Review implementation against specification criteria
 86 | 
 87 | **Usage**: `/spec review SPEC-002`
 88 | 
 89 | **Process**:
 90 | 1. Read original spec and "How to Evaluate" section
 91 | 2. Examine current implementation
 92 | 3. Test against success criteria
 93 | 4. Document gaps or issues
 94 | 5. Update spec with review results
 95 | 6. Recommend next actions (complete, revise, iterate)
 96 | 
 97 | ## Command Extensions
 98 | 
 99 | As the process evolves, we may add:
100 | - `/spec link [spec1] [spec2]` - Create dependency links
101 | - `/spec archive [name]` - Archive completed specs
102 | - `/spec template [type]` - Create spec from template
103 | - `/spec search [query]` - Search spec content
104 | 
105 | ## References
106 | 
107 | - Claude Slash commands: https://docs.anthropic.com/en/docs/claude-code/slash-commands
108 | 
109 | ## Creating a command
110 | 
111 | Commands are implemented as Claude slash commands: 
112 | 
113 | Location in repo: .claude/commands/
114 | 
115 | In the following example, we create the /optimize command:
116 | ```bash
117 | # Create a project command
118 | mkdir -p .claude/commands
119 | echo "Analyze this code for performance issues and suggest optimizations:" > .claude/commands/optimize.md
120 | ```
121 | 
```
--------------------------------------------------------------------------------
/src/basic_memory/repository/observation_repository.py:
--------------------------------------------------------------------------------
```python
 1 | """Repository for managing Observation objects."""
 2 | 
 3 | from typing import Dict, List, Sequence
 4 | 
 5 | from sqlalchemy import select
 6 | from sqlalchemy.ext.asyncio import async_sessionmaker
 7 | 
 8 | from basic_memory.models import Observation
 9 | from basic_memory.repository.repository import Repository
10 | 
11 | 
12 | class ObservationRepository(Repository[Observation]):
13 |     """Repository for Observation model with memory-specific operations."""
14 | 
15 |     def __init__(self, session_maker: async_sessionmaker, project_id: int):
16 |         """Initialize with session maker and project_id filter.
17 | 
18 |         Args:
19 |             session_maker: SQLAlchemy session maker
20 |             project_id: Project ID to filter all operations by
21 |         """
22 |         super().__init__(session_maker, Observation, project_id=project_id)
23 | 
24 |     async def find_by_entity(self, entity_id: int) -> Sequence[Observation]:
25 |         """Find all observations for a specific entity."""
26 |         query = select(Observation).filter(Observation.entity_id == entity_id)
27 |         result = await self.execute_query(query)
28 |         return result.scalars().all()
29 | 
30 |     async def find_by_context(self, context: str) -> Sequence[Observation]:
31 |         """Find observations with a specific context."""
32 |         query = select(Observation).filter(Observation.context == context)
33 |         result = await self.execute_query(query)
34 |         return result.scalars().all()
35 | 
36 |     async def find_by_category(self, category: str) -> Sequence[Observation]:
37 |         """Find observations with a specific context."""
38 |         query = select(Observation).filter(Observation.category == category)
39 |         result = await self.execute_query(query)
40 |         return result.scalars().all()
41 | 
42 |     async def observation_categories(self) -> Sequence[str]:
43 |         """Return a list of all observation categories."""
44 |         query = select(Observation.category).distinct()
45 |         result = await self.execute_query(query, use_query_options=False)
46 |         return result.scalars().all()
47 | 
48 |     async def find_by_entities(self, entity_ids: List[int]) -> Dict[int, List[Observation]]:
49 |         """Find all observations for multiple entities in a single query.
50 | 
51 |         Args:
52 |             entity_ids: List of entity IDs to fetch observations for
53 | 
54 |         Returns:
55 |             Dictionary mapping entity_id to list of observations
56 |         """
57 |         if not entity_ids:  # pragma: no cover
58 |             return {}
59 | 
60 |         # Query observations for all entities in the list
61 |         query = select(Observation).filter(Observation.entity_id.in_(entity_ids))
62 |         result = await self.execute_query(query)
63 |         observations = result.scalars().all()
64 | 
65 |         # Group observations by entity_id
66 |         observations_by_entity = {}
67 |         for obs in observations:
68 |             if obs.entity_id not in observations_by_entity:
69 |                 observations_by_entity[obs.entity_id] = []
70 |             observations_by_entity[obs.entity_id].append(obs)
71 | 
72 |         return observations_by_entity
73 | 
```
--------------------------------------------------------------------------------
/src/basic_memory/cli/commands/import_claude_conversations.py:
--------------------------------------------------------------------------------
```python
 1 | """Import command for basic-memory CLI to import chat data from conversations2.json format."""
 2 | 
 3 | import asyncio
 4 | import json
 5 | from pathlib import Path
 6 | from typing import Annotated
 7 | 
 8 | import typer
 9 | from basic_memory.cli.app import claude_app
10 | from basic_memory.config import get_project_config
11 | from basic_memory.importers.claude_conversations_importer import ClaudeConversationsImporter
12 | from basic_memory.markdown import EntityParser, MarkdownProcessor
13 | from loguru import logger
14 | from rich.console import Console
15 | from rich.panel import Panel
16 | 
17 | console = Console()
18 | 
19 | 
20 | async def get_markdown_processor() -> MarkdownProcessor:
21 |     """Get MarkdownProcessor instance."""
22 |     config = get_project_config()
23 |     entity_parser = EntityParser(config.home)
24 |     return MarkdownProcessor(entity_parser)
25 | 
26 | 
27 | @claude_app.command(name="conversations", help="Import chat conversations from Claude.ai.")
28 | def import_claude(
29 |     conversations_json: Annotated[
30 |         Path, typer.Argument(..., help="Path to conversations.json file")
31 |     ] = Path("conversations.json"),
32 |     folder: Annotated[
33 |         str, typer.Option(help="The folder to place the files in.")
34 |     ] = "conversations",
35 | ):
36 |     """Import chat conversations from conversations2.json format.
37 | 
38 |     This command will:
39 |     1. Read chat data and nested messages
40 |     2. Create markdown files for each conversation
41 |     3. Format content in clean, readable markdown
42 | 
43 |     After importing, run 'basic-memory sync' to index the new files.
44 |     """
45 | 
46 |     config = get_project_config()
47 |     try:
48 |         if not conversations_json.exists():
49 |             typer.echo(f"Error: File not found: {conversations_json}", err=True)
50 |             raise typer.Exit(1)
51 | 
52 |         # Get markdown processor
53 |         markdown_processor = asyncio.run(get_markdown_processor())
54 | 
55 |         # Create the importer
56 |         importer = ClaudeConversationsImporter(config.home, markdown_processor)
57 | 
58 |         # Process the file
59 |         base_path = config.home / folder
60 |         console.print(f"\nImporting chats from {conversations_json}...writing to {base_path}")
61 | 
62 |         # Run the import
63 |         with conversations_json.open("r", encoding="utf-8") as file:
64 |             json_data = json.load(file)
65 |             result = asyncio.run(importer.import_data(json_data, folder))
66 | 
67 |         if not result.success:  # pragma: no cover
68 |             typer.echo(f"Error during import: {result.error_message}", err=True)
69 |             raise typer.Exit(1)
70 | 
71 |         # Show results
72 |         console.print(
73 |             Panel(
74 |                 f"[green]Import complete![/green]\n\n"
75 |                 f"Imported {result.conversations} conversations\n"
76 |                 f"Containing {result.messages} messages",
77 |                 expand=False,
78 |             )
79 |         )
80 | 
81 |         console.print("\nRun 'basic-memory sync' to index the new files.")
82 | 
83 |     except Exception as e:
84 |         logger.error("Import failed")
85 |         typer.echo(f"Error during import: {e}", err=True)
86 |         raise typer.Exit(1)
87 | 
```
--------------------------------------------------------------------------------
/src/basic_memory/cli/commands/cloud/cloud_utils.py:
--------------------------------------------------------------------------------
```python
  1 | """Shared utilities for cloud operations."""
  2 | 
  3 | from basic_memory.cli.commands.cloud.api_client import make_api_request
  4 | from basic_memory.config import ConfigManager
  5 | from basic_memory.schemas.cloud import (
  6 |     CloudProjectList,
  7 |     CloudProjectCreateRequest,
  8 |     CloudProjectCreateResponse,
  9 | )
 10 | from basic_memory.utils import generate_permalink
 11 | 
 12 | 
 13 | class CloudUtilsError(Exception):
 14 |     """Exception raised for cloud utility errors."""
 15 | 
 16 |     pass
 17 | 
 18 | 
 19 | async def fetch_cloud_projects() -> CloudProjectList:
 20 |     """Fetch list of projects from cloud API.
 21 | 
 22 |     Returns:
 23 |         CloudProjectList with projects from cloud
 24 |     """
 25 |     try:
 26 |         config_manager = ConfigManager()
 27 |         config = config_manager.config
 28 |         host_url = config.cloud_host.rstrip("/")
 29 | 
 30 |         response = await make_api_request(method="GET", url=f"{host_url}/proxy/projects/projects")
 31 | 
 32 |         return CloudProjectList.model_validate(response.json())
 33 |     except Exception as e:
 34 |         raise CloudUtilsError(f"Failed to fetch cloud projects: {e}") from e
 35 | 
 36 | 
 37 | async def create_cloud_project(project_name: str) -> CloudProjectCreateResponse:
 38 |     """Create a new project on cloud.
 39 | 
 40 |     Args:
 41 |         project_name: Name of project to create
 42 | 
 43 |     Returns:
 44 |         CloudProjectCreateResponse with project details from API
 45 |     """
 46 |     try:
 47 |         config_manager = ConfigManager()
 48 |         config = config_manager.config
 49 |         host_url = config.cloud_host.rstrip("/")
 50 | 
 51 |         # Use generate_permalink to ensure consistent naming
 52 |         project_path = generate_permalink(project_name)
 53 | 
 54 |         project_data = CloudProjectCreateRequest(
 55 |             name=project_name,
 56 |             path=project_path,
 57 |             set_default=False,
 58 |         )
 59 | 
 60 |         response = await make_api_request(
 61 |             method="POST",
 62 |             url=f"{host_url}/proxy/projects/projects",
 63 |             headers={"Content-Type": "application/json"},
 64 |             json_data=project_data.model_dump(),
 65 |         )
 66 | 
 67 |         return CloudProjectCreateResponse.model_validate(response.json())
 68 |     except Exception as e:
 69 |         raise CloudUtilsError(f"Failed to create cloud project '{project_name}': {e}") from e
 70 | 
 71 | 
 72 | async def sync_project(project_name: str) -> None:
 73 |     """Trigger sync for a specific project on cloud.
 74 | 
 75 |     Args:
 76 |         project_name: Name of project to sync
 77 |     """
 78 |     try:
 79 |         from basic_memory.cli.commands.command_utils import run_sync
 80 | 
 81 |         await run_sync(project=project_name)
 82 |     except Exception as e:
 83 |         raise CloudUtilsError(f"Failed to sync project '{project_name}': {e}") from e
 84 | 
 85 | 
 86 | async def project_exists(project_name: str) -> bool:
 87 |     """Check if a project exists on cloud.
 88 | 
 89 |     Args:
 90 |         project_name: Name of project to check
 91 | 
 92 |     Returns:
 93 |         True if project exists, False otherwise
 94 |     """
 95 |     try:
 96 |         projects = await fetch_cloud_projects()
 97 |         project_names = {p.name for p in projects.projects}
 98 |         return project_name in project_names
 99 |     except Exception:
100 |         return False
101 | 
```
--------------------------------------------------------------------------------
/src/basic_memory/api/routers/memory_router.py:
--------------------------------------------------------------------------------
```python
 1 | """Routes for memory:// URI operations."""
 2 | 
 3 | from typing import Annotated, Optional
 4 | 
 5 | from fastapi import APIRouter, Query
 6 | from loguru import logger
 7 | 
 8 | from basic_memory.deps import ContextServiceDep, EntityRepositoryDep
 9 | from basic_memory.schemas.base import TimeFrame, parse_timeframe
10 | from basic_memory.schemas.memory import (
11 |     GraphContext,
12 |     normalize_memory_url,
13 | )
14 | from basic_memory.schemas.search import SearchItemType
15 | from basic_memory.api.routers.utils import to_graph_context
16 | 
17 | router = APIRouter(prefix="/memory", tags=["memory"])
18 | 
19 | 
20 | @router.get("/recent", response_model=GraphContext)
21 | async def recent(
22 |     context_service: ContextServiceDep,
23 |     entity_repository: EntityRepositoryDep,
24 |     type: Annotated[list[SearchItemType] | None, Query()] = None,
25 |     depth: int = 1,
26 |     timeframe: TimeFrame = "7d",
27 |     page: int = 1,
28 |     page_size: int = 10,
29 |     max_related: int = 10,
30 | ) -> GraphContext:
31 |     # return all types by default
32 |     types = (
33 |         [SearchItemType.ENTITY, SearchItemType.RELATION, SearchItemType.OBSERVATION]
34 |         if not type
35 |         else type
36 |     )
37 | 
38 |     logger.debug(
39 |         f"Getting recent context: `{types}` depth: `{depth}` timeframe: `{timeframe}` page: `{page}` page_size: `{page_size}` max_related: `{max_related}`"
40 |     )
41 |     # Parse timeframe
42 |     since = parse_timeframe(timeframe)
43 |     limit = page_size
44 |     offset = (page - 1) * page_size
45 | 
46 |     # Build context
47 |     context = await context_service.build_context(
48 |         types=types, depth=depth, since=since, limit=limit, offset=offset, max_related=max_related
49 |     )
50 |     recent_context = await to_graph_context(
51 |         context, entity_repository=entity_repository, page=page, page_size=page_size
52 |     )
53 |     logger.debug(f"Recent context: {recent_context.model_dump_json()}")
54 |     return recent_context
55 | 
56 | 
57 | # get_memory_context needs to be declared last so other paths can match
58 | 
59 | 
60 | @router.get("/{uri:path}", response_model=GraphContext)
61 | async def get_memory_context(
62 |     context_service: ContextServiceDep,
63 |     entity_repository: EntityRepositoryDep,
64 |     uri: str,
65 |     depth: int = 1,
66 |     timeframe: Optional[TimeFrame] = None,
67 |     page: int = 1,
68 |     page_size: int = 10,
69 |     max_related: int = 10,
70 | ) -> GraphContext:
71 |     """Get rich context from memory:// URI."""
72 |     # add the project name from the config to the url as the "host
73 |     # Parse URI
74 |     logger.debug(
75 |         f"Getting context for URI: `{uri}` depth: `{depth}` timeframe: `{timeframe}` page: `{page}` page_size: `{page_size}` max_related: `{max_related}`"
76 |     )
77 |     memory_url = normalize_memory_url(uri)
78 | 
79 |     # Parse timeframe
80 |     since = parse_timeframe(timeframe) if timeframe else None
81 |     limit = page_size
82 |     offset = (page - 1) * page_size
83 | 
84 |     # Build context
85 |     context = await context_service.build_context(
86 |         memory_url, depth=depth, since=since, limit=limit, offset=offset, max_related=max_related
87 |     )
88 |     return await to_graph_context(
89 |         context, entity_repository=entity_repository, page=page, page_size=page_size
90 |     )
91 | 
```
--------------------------------------------------------------------------------
/.github/workflows/claude.yml:
--------------------------------------------------------------------------------
```yaml
 1 | name: Claude Code
 2 | 
 3 | on:
 4 |   issue_comment:
 5 |     types: [created]
 6 |   pull_request_review_comment:
 7 |     types: [created]
 8 |   issues:
 9 |     types: [opened, assigned]
10 |   pull_request_review:
11 |     types: [submitted]
12 |   pull_request_target:
13 |     types: [opened, synchronize]
14 | 
15 | jobs:
16 |   claude:
17 |     if: |
18 |       (
19 |         (github.event_name == 'issue_comment' && contains(github.event.comment.body, '@claude')) ||
20 |         (github.event_name == 'pull_request_review_comment' && contains(github.event.comment.body, '@claude')) ||
21 |         (github.event_name == 'pull_request_review' && contains(github.event.review.body, '@claude')) ||
22 |         (github.event_name == 'issues' && (contains(github.event.issue.body, '@claude') || contains(github.event.issue.title, '@claude'))) ||
23 |         (github.event_name == 'pull_request_target' && contains(github.event.pull_request.body, '@claude'))
24 |       ) && (
25 |         github.event.comment.author_association == 'OWNER' ||
26 |         github.event.comment.author_association == 'MEMBER' ||
27 |         github.event.comment.author_association == 'COLLABORATOR' ||
28 |         github.event.sender.author_association == 'OWNER' ||
29 |         github.event.sender.author_association == 'MEMBER' ||
30 |         github.event.sender.author_association == 'COLLABORATOR' ||
31 |         github.event.pull_request.author_association == 'OWNER' ||
32 |         github.event.pull_request.author_association == 'MEMBER' ||
33 |         github.event.pull_request.author_association == 'COLLABORATOR'
34 |       )
35 |     runs-on: ubuntu-latest
36 |     permissions:
37 |       contents: read
38 |       pull-requests: read
39 |       issues: read
40 |       id-token: write
41 |       actions: read # Required for Claude to read CI results on PRs
42 |     steps:
43 |       - name: Checkout repository
44 |         uses: actions/checkout@v4
45 |         with:
46 |           # For pull_request_target, checkout the PR head to review the actual changes
47 |           ref: ${{ github.event_name == 'pull_request_target' && github.event.pull_request.head.sha || github.sha }}
48 |           fetch-depth: 1
49 | 
50 |       - name: Run Claude Code
51 |         id: claude
52 |         uses: anthropics/claude-code-action@v1
53 |         with:
54 |           claude_code_oauth_token: ${{ secrets.CLAUDE_CODE_OAUTH_TOKEN }}
55 |           track_progress: true  # Enable visual progress tracking
56 |           
57 |           # This is an optional setting that allows Claude to read CI results on PRs
58 |           additional_permissions: |
59 |             actions: read
60 | 
61 |           # Optional: Give a custom prompt to Claude. If this is not specified, Claude will perform the instructions specified in the comment that tagged it.
62 |           # prompt: 'Update the pull request description to include a summary of changes.'
63 | 
64 |           # Optional: Add claude_args to customize behavior and configuration
65 |           # See https://github.com/anthropics/claude-code-action/blob/main/docs/usage.md
66 |           # or https://docs.claude.com/en/docs/claude-code/sdk#command-line for available options
67 |           # claude_args: '--model claude-opus-4-1-20250805 --allowed-tools Bash(gh pr:*)'
68 | 
69 | 
```
--------------------------------------------------------------------------------
/specs/SPEC-3 Agent Definitions.md:
--------------------------------------------------------------------------------
```markdown
  1 | ---
  2 | title: 'SPEC-3: Agent Definitions'
  3 | type: spec
  4 | permalink: specs/spec-3-agent-definitions
  5 | tags:
  6 | - agents
  7 | - roles
  8 | - process
  9 | ---
 10 | 
 11 | # SPEC-3: Agent Definitions
 12 | 
 13 | This document defines the specialist agents used in our specification-driven development process.
 14 | 
 15 | ## system-architect
 16 | 
 17 | **Role**: High-level system design and architectural decisions
 18 | 
 19 | **Responsibilities**:
 20 | - Create architectural specifications and ADRs
 21 | - Analyze system-wide impacts and trade-offs
 22 | - Design component interfaces and data flow
 23 | - Evaluate technical approaches and patterns
 24 | - Document architectural decisions and rationale
 25 | 
 26 | **Expertise Areas**:
 27 | - System architecture and design patterns
 28 | - Technology evaluation and selection
 29 | - Scalability and performance considerations
 30 | - Integration patterns and API design
 31 | - Technical debt and refactoring strategies
 32 | 
 33 | **Typical Specs**:
 34 | - System architecture overviews
 35 | - Component decomposition strategies
 36 | - Data flow and state management
 37 | - Integration and deployment patterns
 38 | 
 39 | ## vue-developer
 40 | 
 41 | **Role**: Frontend component development and UI implementation
 42 | 
 43 | **Responsibilities**:
 44 | - Create Vue.js component specifications
 45 | - Implement responsive UI components
 46 | - Design component APIs and interfaces
 47 | - Optimize for performance and accessibility
 48 | - Document component usage and patterns
 49 | 
 50 | **Expertise Areas**:
 51 | - Vue.js 3 Composition API
 52 | - Nuxt 3 framework patterns
 53 | - shadcn-vue component library
 54 | - Responsive design and CSS
 55 | - TypeScript integration
 56 | - State management with Pinia
 57 | 
 58 | **Typical Specs**:
 59 | - Individual component specifications
 60 | - UI pattern libraries
 61 | - Responsive design approaches
 62 | - Component interaction flows
 63 | 
 64 | ## python-developer
 65 | 
 66 | **Role**: Backend development and API implementation
 67 | 
 68 | **Responsibilities**:
 69 | - Create backend service specifications
 70 | - Implement APIs and data processing
 71 | - Design database schemas and queries
 72 | - Optimize performance and reliability
 73 | - Document service interfaces and behavior
 74 | 
 75 | **Expertise Areas**:
 76 | - FastAPI and Python web frameworks
 77 | - Database design and operations
 78 | - API design and documentation
 79 | - Authentication and security
 80 | - Performance optimization
 81 | - Testing and validation
 82 | 
 83 | **Typical Specs**:
 84 | - API endpoint specifications
 85 | - Database schema designs
 86 | - Service integration patterns
 87 | - Performance optimization strategies
 88 | 
 89 | ## Agent Collaboration Patterns
 90 | 
 91 | ### Handoff Protocol
 92 | 1. Agent receives spec through `/spec implement [name]`
 93 | 2. Agent reviews spec and creates implementation plan
 94 | 3. Agent documents progress and decisions in spec
 95 | 4. Agent hands off to another agent if cross-domain work needed
 96 | 5. Final agent updates spec with completion status
 97 | 
 98 | ### Communication Standards
 99 | - All agents update specs through basic-memory MCP tools
100 | - Document decisions and trade-offs in spec notes
101 | - Link related specs and components
102 | - Preserve context for future reference
103 | 
104 | ### Quality Standards
105 | - Follow existing codebase patterns and conventions
106 | - Write tests that validate spec requirements
107 | - Document implementation choices
108 | - Consider maintainability and extensibility
109 | 
```
--------------------------------------------------------------------------------
/.claude/commands/release/beta.md:
--------------------------------------------------------------------------------
```markdown
 1 | # /beta - Create Beta Release
 2 | 
 3 | Create a new beta release using the automated justfile target with quality checks and tagging.
 4 | 
 5 | ## Usage
 6 | ```
 7 | /beta <version>
 8 | ```
 9 | 
10 | **Parameters:**
11 | - `version` (required): Beta version like `v0.13.2b1` or `v0.13.2rc1`
12 | 
13 | ## Implementation
14 | 
15 | You are an expert release manager for the Basic Memory project. When the user runs `/beta`, execute the following steps:
16 | 
17 | ### Step 1: Pre-flight Validation
18 | 1. Verify version format matches `v\d+\.\d+\.\d+(b\d+|rc\d+)` pattern
19 | 2. Check current git status for uncommitted changes
20 | 3. Verify we're on the `main` branch
21 | 4. Confirm no existing tag with this version
22 | 
23 | ### Step 2: Use Justfile Automation
24 | Execute the automated beta release process:
25 | ```bash
26 | just beta <version>
27 | ```
28 | 
29 | The justfile target handles:
30 | - ✅ Beta version format validation (supports b1, b2, rc1, etc.)
31 | - ✅ Git status and branch checks
32 | - ✅ Quality checks (`just check` - lint, format, type-check, tests)
33 | - ✅ Version update in `src/basic_memory/__init__.py`
34 | - ✅ Automatic commit with proper message
35 | - ✅ Tag creation and pushing to GitHub
36 | - ✅ Beta release workflow trigger
37 | 
38 | ### Step 3: Monitor Beta Release
39 | 1. Check GitHub Actions workflow starts successfully
40 | 2. Monitor workflow at: https://github.com/basicmachines-co/basic-memory/actions
41 | 3. Verify PyPI pre-release publication
42 | 4. Test beta installation: `uv tool install basic-memory --pre`
43 | 
44 | ### Step 4: Beta Testing Instructions
45 | Provide users with beta testing instructions:
46 | 
47 | ```bash
48 | # Install/upgrade to beta
49 | uv tool install basic-memory --pre
50 | 
51 | # Or upgrade existing installation
52 | uv tool upgrade basic-memory --prerelease=allow
53 | ```
54 | 
55 | ## Version Guidelines
56 | - **First beta**: `v0.13.2b1` 
57 | - **Subsequent betas**: `v0.13.2b2`, `v0.13.2b3`, etc.
58 | - **Release candidates**: `v0.13.2rc1`, `v0.13.2rc2`, etc.
59 | - **Final release**: `v0.13.2` (use `/release` command)
60 | 
61 | ## Error Handling
62 | - If `just beta` fails, examine the error output for specific issues
63 | - If quality checks fail, fix issues and retry
64 | - If version format is invalid, correct and retry
65 | - If tag already exists, increment version number
66 | 
67 | ## Success Output
68 | ```
69 | ✅ Beta Release v0.13.2b1 Created Successfully!
70 | 
71 | 🏷️  Tag: v0.13.2b1
72 | 🚀 GitHub Actions: Running
73 | 📦 PyPI: Will be available in ~5 minutes as pre-release
74 | 
75 | Install/test with:
76 | uv tool install basic-memory --pre
77 | 
78 | Monitor release: https://github.com/basicmachines-co/basic-memory/actions
79 | ```
80 | 
81 | ## Beta Testing Workflow
82 | 1. **Create beta**: Use `/beta v0.13.2b1`
83 | 2. **Test features**: Install and validate new functionality
84 | 3. **Fix issues**: Address bugs found during testing
85 | 4. **Iterate**: Create `v0.13.2b2` if needed
86 | 5. **Release candidate**: Create `v0.13.2rc1` when stable
87 | 6. **Final release**: Use `/release v0.13.2` when ready
88 | 
89 | ## Context
90 | - Beta releases are pre-releases for testing new features
91 | - Automatically published to PyPI with pre-release flag
92 | - Uses the automated justfile target for consistency
93 | - Version is automatically updated in `__init__.py`
94 | - Ideal for validating changes before stable release
95 | - Supports both beta (b1, b2) and release candidate (rc1, rc2) versions
```
--------------------------------------------------------------------------------
/.github/workflows/claude-code-review.yml:
--------------------------------------------------------------------------------
```yaml
 1 | name: Claude Code Review
 2 | 
 3 | on:
 4 |   pull_request:
 5 |     types: [opened, synchronize]
 6 |     # Optional: Only run on specific file changes
 7 |     # paths:
 8 |     #   - "src/**/*.ts"
 9 |     #   - "src/**/*.tsx"
10 |     #   - "src/**/*.js"
11 |     #   - "src/**/*.jsx"
12 | 
13 | jobs:
14 |   claude-review:
15 |     # Only run for organization members and collaborators
16 |     if: |
17 |       github.event.pull_request.author_association == 'OWNER' ||
18 |       github.event.pull_request.author_association == 'MEMBER' ||
19 |       github.event.pull_request.author_association == 'COLLABORATOR'
20 | 
21 |     runs-on: ubuntu-latest
22 |     permissions:
23 |       contents: read
24 |       pull-requests: write
25 |       issues: read
26 |       id-token: write
27 | 
28 |     steps:
29 |       - name: Checkout repository
30 |         uses: actions/checkout@v4
31 |         with:
32 |           fetch-depth: 1
33 | 
34 |       - name: Run Claude Code Review
35 |         id: claude-review
36 |         uses: anthropics/claude-code-action@v1
37 |         with:
38 |           claude_code_oauth_token: ${{ secrets.CLAUDE_CODE_OAUTH_TOKEN }}
39 |           github_token: ${{ secrets.GITHUB_TOKEN }}
40 |           track_progress: true  # Enable visual progress tracking
41 |           allowed_bots: '*'
42 |           prompt: |
43 |             Review this Basic Memory PR against our team checklist:
44 | 
45 |             ## Code Quality & Standards
46 |             - [ ] Follows Basic Memory's coding conventions in CLAUDE.md
47 |             - [ ] Python 3.12+ type annotations and async patterns
48 |             - [ ] SQLAlchemy 2.0 best practices
49 |             - [ ] FastAPI and Typer conventions followed
50 |             - [ ] 100-character line length limit maintained
51 |             - [ ] No commented-out code blocks
52 | 
53 |             ## Testing & Documentation
54 |             - [ ] Unit tests for new functions/methods
55 |             - [ ] Integration tests for new MCP tools
56 |             - [ ] Test coverage for edge cases
57 |             - [ ] Documentation updated (README, docstrings)
58 |             - [ ] CLAUDE.md updated if conventions change
59 | 
60 |             ## Basic Memory Architecture
61 |             - [ ] MCP tools follow atomic, composable design
62 |             - [ ] Database changes include Alembic migrations
63 |             - [ ] Preserves local-first architecture principles
64 |             - [ ] Knowledge graph operations maintain consistency
65 |             - [ ] Markdown file handling preserves integrity
66 |             - [ ] AI-human collaboration patterns followed
67 | 
68 |             ## Security & Performance
69 |             - [ ] No hardcoded secrets or credentials
70 |             - [ ] Input validation for MCP tools
71 |             - [ ] Proper error handling and logging
72 |             - [ ] Performance considerations addressed
73 |             - [ ] No sensitive data in logs or commits
74 | 
75 |             Read the CLAUDE.md file for detailed project context. For each checklist item, verify if it's satisfied and comment on any that need attention. Use inline comments for specific code issues and post a summary with checklist results.
76 | 
77 |           # Allow broader tool access for thorough code review
78 |           claude_args: '--allowed-tools "Bash(gh pr:*),Bash(gh issue:*),Bash(gh api:*),Bash(git log:*),Bash(git show:*),Read,Grep,Glob"'
79 | 
80 | 
```
--------------------------------------------------------------------------------
/src/basic_memory/models/project.py:
--------------------------------------------------------------------------------
```python
 1 | """Project model for Basic Memory."""
 2 | 
 3 | from datetime import datetime, UTC
 4 | from typing import Optional
 5 | 
 6 | from sqlalchemy import (
 7 |     Integer,
 8 |     String,
 9 |     Text,
10 |     Boolean,
11 |     DateTime,
12 |     Float,
13 |     Index,
14 |     event,
15 | )
16 | from sqlalchemy.orm import Mapped, mapped_column, relationship
17 | 
18 | from basic_memory.models.base import Base
19 | from basic_memory.utils import generate_permalink
20 | 
21 | 
22 | class Project(Base):
23 |     """Project model for Basic Memory.
24 | 
25 |     A project represents a collection of knowledge entities that are grouped together.
26 |     Projects are stored in the app-level database and provide context for all knowledge
27 |     operations.
28 |     """
29 | 
30 |     __tablename__ = "project"
31 |     __table_args__ = (
32 |         # Regular indexes
33 |         Index("ix_project_name", "name", unique=True),
34 |         Index("ix_project_permalink", "permalink", unique=True),
35 |         Index("ix_project_path", "path"),
36 |         Index("ix_project_created_at", "created_at"),
37 |         Index("ix_project_updated_at", "updated_at"),
38 |     )
39 | 
40 |     # Core identity
41 |     id: Mapped[int] = mapped_column(Integer, primary_key=True)
42 |     name: Mapped[str] = mapped_column(String, unique=True)
43 |     description: Mapped[Optional[str]] = mapped_column(Text, nullable=True)
44 | 
45 |     # URL-friendly identifier generated from name
46 |     permalink: Mapped[str] = mapped_column(String, unique=True)
47 | 
48 |     # Filesystem path to project directory
49 |     path: Mapped[str] = mapped_column(String)
50 | 
51 |     # Status flags
52 |     is_active: Mapped[bool] = mapped_column(Boolean, default=True)
53 |     is_default: Mapped[Optional[bool]] = mapped_column(Boolean, default=None, nullable=True)
54 | 
55 |     # Timestamps
56 |     created_at: Mapped[datetime] = mapped_column(
57 |         DateTime(timezone=True), default=lambda: datetime.now(UTC)
58 |     )
59 |     updated_at: Mapped[datetime] = mapped_column(
60 |         DateTime(timezone=True),
61 |         default=lambda: datetime.now(UTC),
62 |         onupdate=lambda: datetime.now(UTC),
63 |     )
64 | 
65 |     # Sync optimization - scan watermark tracking
66 |     last_scan_timestamp: Mapped[Optional[float]] = mapped_column(Float, nullable=True)
67 |     last_file_count: Mapped[Optional[int]] = mapped_column(Integer, nullable=True)
68 | 
69 |     # Define relationships to entities, observations, and relations
70 |     # These relationships will be established once we add project_id to those models
71 |     entities = relationship("Entity", back_populates="project", cascade="all, delete-orphan")
72 | 
73 |     def __repr__(self) -> str:  # pragma: no cover
74 |         return f"Project(id={self.id}, name='{self.name}', permalink='{self.permalink}', path='{self.path}')"
75 | 
76 | 
77 | @event.listens_for(Project, "before_insert")
78 | @event.listens_for(Project, "before_update")
79 | def set_project_permalink(mapper, connection, project):
80 |     """Generate URL-friendly permalink for the project if needed.
81 | 
82 |     This event listener ensures the permalink is always derived from the name,
83 |     even if the name changes.
84 |     """
85 |     # If the name changed or permalink is empty, regenerate permalink
86 |     if not project.permalink or project.permalink != generate_permalink(project.name):
87 |         project.permalink = generate_permalink(project.name)
88 | 
```
--------------------------------------------------------------------------------
/src/basic_memory/api/app.py:
--------------------------------------------------------------------------------
```python
 1 | """FastAPI application for basic-memory knowledge graph API."""
 2 | 
 3 | import asyncio
 4 | from contextlib import asynccontextmanager
 5 | 
 6 | from fastapi import FastAPI, HTTPException
 7 | from fastapi.exception_handlers import http_exception_handler
 8 | from loguru import logger
 9 | 
10 | from basic_memory import __version__ as version
11 | from basic_memory import db
12 | from basic_memory.api.routers import (
13 |     directory_router,
14 |     importer_router,
15 |     knowledge,
16 |     management,
17 |     memory,
18 |     project,
19 |     resource,
20 |     search,
21 |     prompt_router,
22 | )
23 | from basic_memory.config import ConfigManager
24 | from basic_memory.services.initialization import initialize_file_sync, initialize_app
25 | 
26 | 
27 | @asynccontextmanager
28 | async def lifespan(app: FastAPI):  # pragma: no cover
29 |     """Lifecycle manager for the FastAPI app. Not called in stdio mcp mode"""
30 | 
31 |     app_config = ConfigManager().config
32 |     logger.info("Starting Basic Memory API")
33 | 
34 |     await initialize_app(app_config)
35 | 
36 |     # Cache database connections in app state for performance
37 |     logger.info("Initializing database and caching connections...")
38 |     engine, session_maker = await db.get_or_create_db(app_config.database_path)
39 |     app.state.engine = engine
40 |     app.state.session_maker = session_maker
41 |     logger.info("Database connections cached in app state")
42 | 
43 |     logger.info(f"Sync changes enabled: {app_config.sync_changes}")
44 |     if app_config.sync_changes:
45 |         # start file sync task in background
46 |         app.state.sync_task = asyncio.create_task(initialize_file_sync(app_config))
47 |     else:
48 |         logger.info("Sync changes disabled. Skipping file sync service.")
49 | 
50 |     # proceed with startup
51 |     yield
52 | 
53 |     logger.info("Shutting down Basic Memory API")
54 |     if app.state.sync_task:
55 |         logger.info("Stopping sync...")
56 |         app.state.sync_task.cancel()  # pyright: ignore
57 | 
58 |     await db.shutdown_db()
59 | 
60 | 
61 | # Initialize FastAPI app
62 | app = FastAPI(
63 |     title="Basic Memory API",
64 |     description="Knowledge graph API for basic-memory",
65 |     version=version,
66 |     lifespan=lifespan,
67 | )
68 | 
69 | 
70 | # Include routers
71 | app.include_router(knowledge.router, prefix="/{project}")
72 | app.include_router(memory.router, prefix="/{project}")
73 | app.include_router(resource.router, prefix="/{project}")
74 | app.include_router(search.router, prefix="/{project}")
75 | app.include_router(project.project_router, prefix="/{project}")
76 | app.include_router(directory_router.router, prefix="/{project}")
77 | app.include_router(prompt_router.router, prefix="/{project}")
78 | app.include_router(importer_router.router, prefix="/{project}")
79 | 
80 | # Project resource router works accross projects
81 | app.include_router(project.project_resource_router)
82 | app.include_router(management.router)
83 | 
84 | # Auth routes are handled by FastMCP automatically when auth is enabled
85 | 
86 | 
87 | @app.exception_handler(Exception)
88 | async def exception_handler(request, exc):  # pragma: no cover
89 |     logger.exception(
90 |         "API unhandled exception",
91 |         url=str(request.url),
92 |         method=request.method,
93 |         client=request.client.host if request.client else None,
94 |         path=request.url.path,
95 |         error_type=type(exc).__name__,
96 |         error=str(exc),
97 |     )
98 |     return await http_exception_handler(request, HTTPException(status_code=500, detail=str(exc)))
99 | 
```
--------------------------------------------------------------------------------
/.claude/commands/release/release-check.md:
--------------------------------------------------------------------------------
```markdown
  1 | # /release-check - Pre-flight Release Validation
  2 | 
  3 | Comprehensive pre-flight check for release readiness without making any changes.
  4 | 
  5 | ## Usage
  6 | ```
  7 | /release-check [version]
  8 | ```
  9 | 
 10 | **Parameters:**
 11 | - `version` (optional): Version to validate like `v0.13.0`. If not provided, determines from context.
 12 | 
 13 | ## Implementation
 14 | 
 15 | You are an expert QA engineer for the Basic Memory project. When the user runs `/release-check`, execute the following validation steps:
 16 | 
 17 | ### Step 1: Environment Validation
 18 | 1. **Git Status Check**
 19 |    - Verify working directory is clean
 20 |    - Confirm on `main` branch
 21 |    - Check if ahead/behind origin
 22 | 
 23 | 2. **Version Validation**
 24 |    - Validate version format if provided
 25 |    - Check for existing tags with same version
 26 |    - Verify version increments properly from last release
 27 | 
 28 | ### Step 2: Code Quality Gates
 29 | 1. **Test Suite Validation**
 30 |    ```bash
 31 |    just test
 32 |    ```
 33 |    - All tests must pass
 34 |    - Check test coverage (target: 95%+)
 35 |    - Validate no skipped critical tests
 36 | 
 37 | 2. **Code Quality Checks**
 38 |    ```bash
 39 |    just lint
 40 |    just type-check
 41 |    ```
 42 |    - No linting errors
 43 |    - No type checking errors
 44 |    - Code formatting is consistent
 45 | 
 46 | ### Step 3: Documentation Validation
 47 | 1. **Changelog Check**
 48 |    - CHANGELOG.md contains entry for target version
 49 |    - Entry includes all major features and fixes
 50 |    - Breaking changes are documented
 51 | 
 52 | 2. **Documentation Currency**
 53 |    - README.md reflects current functionality
 54 |    - CLI reference is up to date
 55 |    - MCP tools are documented
 56 | 
 57 | ### Step 4: Dependency Validation
 58 | 1. **Security Scan**
 59 |    - No known vulnerabilities in dependencies
 60 |    - All dependencies are at appropriate versions
 61 |    - No conflicting dependency versions
 62 | 
 63 | 2. **Build Validation**
 64 |    - Package builds successfully
 65 |    - All required files are included
 66 |    - No missing dependencies
 67 | 
 68 | ### Step 5: Issue Tracking Validation
 69 | 1. **GitHub Issues Check**
 70 |    - No critical open issues blocking release
 71 |    - All milestone issues are resolved
 72 |    - High-priority bugs are fixed
 73 | 
 74 | 2. **Testing Coverage**
 75 |    - Integration tests pass
 76 |    - MCP tool tests pass
 77 |    - Cross-platform compatibility verified
 78 | 
 79 | ## Report Format
 80 | 
 81 | Generate a comprehensive report:
 82 | 
 83 | ```
 84 | 🔍 Release Readiness Check for v0.13.0
 85 | 
 86 | ✅ PASSED CHECKS:
 87 | ├── Git status clean
 88 | ├── On main branch  
 89 | ├── All tests passing (744/744)
 90 | ├── Test coverage: 98.2%
 91 | ├── Type checking passed
 92 | ├── Linting passed
 93 | ├── CHANGELOG.md updated
 94 | └── No critical issues open
 95 | 
 96 | ⚠️  WARNINGS:
 97 | ├── 2 medium-priority issues still open
 98 | └── Documentation could be updated
 99 | 
100 | ❌ BLOCKING ISSUES:
101 | └── None found
102 | 
103 | 🎯 RELEASE READINESS: ✅ READY
104 | 
105 | Recommended next steps:
106 | 1. Address warnings if desired
107 | 2. Run `/release v0.13.0` when ready
108 | ```
109 | 
110 | ## Validation Criteria
111 | 
112 | ### Must Pass (Blocking)
113 | - [ ] All tests pass
114 | - [ ] No type errors
115 | - [ ] No linting errors  
116 | - [ ] Working directory clean
117 | - [ ] On main branch
118 | - [ ] CHANGELOG.md has version entry
119 | - [ ] No critical open issues
120 | 
121 | ### Should Pass (Warnings)
122 | - [ ] Test coverage >95%
123 | - [ ] No medium-priority open issues
124 | - [ ] Documentation up to date
125 | - [ ] No dependency vulnerabilities
126 | 
127 | ## Context
128 | - This is a read-only validation - makes no changes
129 | - Provides confidence before running actual release
130 | - Helps identify issues early in release process
131 | - Can be run multiple times safely
```
--------------------------------------------------------------------------------
/tests/schemas/test_search.py:
--------------------------------------------------------------------------------
```python
  1 | """Tests for search schemas."""
  2 | 
  3 | from datetime import datetime
  4 | 
  5 | from basic_memory.schemas.search import (
  6 |     SearchItemType,
  7 |     SearchQuery,
  8 |     SearchResult,
  9 |     SearchResponse,
 10 | )
 11 | 
 12 | 
 13 | def test_search_modes():
 14 |     """Test different search modes."""
 15 |     # Exact permalink
 16 |     query = SearchQuery(permalink="specs/search")
 17 |     assert query.permalink == "specs/search"
 18 |     assert query.text is None
 19 | 
 20 |     # Pattern match
 21 |     query = SearchQuery(permalink="specs/*")
 22 |     assert query.permalink == "specs/*"
 23 |     assert query.text is None
 24 | 
 25 |     # Text search
 26 |     query = SearchQuery(text="search implementation")
 27 |     assert query.text == "search implementation"
 28 |     assert query.permalink is None
 29 | 
 30 | 
 31 | def test_search_filters():
 32 |     """Test search result filtering."""
 33 |     query = SearchQuery(
 34 |         text="search",
 35 |         entity_types=[SearchItemType.ENTITY],
 36 |         types=["component"],
 37 |         after_date=datetime(2024, 1, 1),
 38 |     )
 39 |     assert query.entity_types == [SearchItemType.ENTITY]
 40 |     assert query.types == ["component"]
 41 |     assert query.after_date == "2024-01-01T00:00:00"
 42 | 
 43 | 
 44 | def test_search_result():
 45 |     """Test search result structure."""
 46 |     result = SearchResult(
 47 |         title="test",
 48 |         type=SearchItemType.ENTITY,
 49 |         entity="some_entity",
 50 |         score=0.8,
 51 |         metadata={"entity_type": "component"},
 52 |         permalink="specs/search",
 53 |         file_path="specs/search.md",
 54 |     )
 55 |     assert result.type == SearchItemType.ENTITY
 56 |     assert result.score == 0.8
 57 |     assert result.metadata == {"entity_type": "component"}
 58 | 
 59 | 
 60 | def test_observation_result():
 61 |     """Test observation result fields."""
 62 |     result = SearchResult(
 63 |         title="test",
 64 |         permalink="specs/search",
 65 |         file_path="specs/search.md",
 66 |         type=SearchItemType.OBSERVATION,
 67 |         score=0.5,
 68 |         metadata={},
 69 |         entity="some_entity",
 70 |         category="tech",
 71 |     )
 72 |     assert result.entity == "some_entity"
 73 |     assert result.category == "tech"
 74 | 
 75 | 
 76 | def test_relation_result():
 77 |     """Test relation result fields."""
 78 |     result = SearchResult(
 79 |         title="test",
 80 |         permalink="specs/search",
 81 |         file_path="specs/search.md",
 82 |         type=SearchItemType.RELATION,
 83 |         entity="some_entity",
 84 |         score=0.5,
 85 |         metadata={},
 86 |         from_entity="123",
 87 |         to_entity="456",
 88 |         relation_type="depends_on",
 89 |     )
 90 |     assert result.from_entity == "123"
 91 |     assert result.to_entity == "456"
 92 |     assert result.relation_type == "depends_on"
 93 | 
 94 | 
 95 | def test_search_response():
 96 |     """Test search response wrapper."""
 97 |     results = [
 98 |         SearchResult(
 99 |             title="test",
100 |             permalink="specs/search",
101 |             file_path="specs/search.md",
102 |             type=SearchItemType.ENTITY,
103 |             entity="some_entity",
104 |             score=0.8,
105 |             metadata={},
106 |         ),
107 |         SearchResult(
108 |             title="test",
109 |             permalink="specs/search",
110 |             file_path="specs/search.md",
111 |             type=SearchItemType.ENTITY,
112 |             entity="some_entity",
113 |             score=0.6,
114 |             metadata={},
115 |         ),
116 |     ]
117 |     response = SearchResponse(results=results, current_page=1, page_size=1)
118 |     assert len(response.results) == 2
119 |     assert response.results[0].score > response.results[1].score
120 | 
```
--------------------------------------------------------------------------------
/src/basic_memory/markdown/utils.py:
--------------------------------------------------------------------------------
```python
  1 | """Utilities for converting between markdown and entity models."""
  2 | 
  3 | from pathlib import Path
  4 | from typing import Any, Optional
  5 | 
  6 | from frontmatter import Post
  7 | 
  8 | from basic_memory.file_utils import has_frontmatter, remove_frontmatter, parse_frontmatter
  9 | from basic_memory.markdown import EntityMarkdown
 10 | from basic_memory.models import Entity
 11 | from basic_memory.models import Observation as ObservationModel
 12 | 
 13 | 
 14 | def entity_model_from_markdown(
 15 |     file_path: Path, markdown: EntityMarkdown, entity: Optional[Entity] = None
 16 | ) -> Entity:
 17 |     """
 18 |     Convert markdown entity to model. Does not include relations.
 19 | 
 20 |     Args:
 21 |         file_path: Path to the markdown file
 22 |         markdown: Parsed markdown entity
 23 |         entity: Optional existing entity to update
 24 | 
 25 |     Returns:
 26 |         Entity model populated from markdown
 27 | 
 28 |     Raises:
 29 |         ValueError: If required datetime fields are missing from markdown
 30 |     """
 31 | 
 32 |     if not markdown.created or not markdown.modified:  # pragma: no cover
 33 |         raise ValueError("Both created and modified dates are required in markdown")
 34 | 
 35 |     # Create or update entity
 36 |     model = entity or Entity()
 37 | 
 38 |     # Update basic fields
 39 |     model.title = markdown.frontmatter.title
 40 |     model.entity_type = markdown.frontmatter.type
 41 |     # Only update permalink if it exists in frontmatter, otherwise preserve existing
 42 |     if markdown.frontmatter.permalink is not None:
 43 |         model.permalink = markdown.frontmatter.permalink
 44 |     model.file_path = file_path.as_posix()
 45 |     model.content_type = "text/markdown"
 46 |     model.created_at = markdown.created
 47 |     model.updated_at = markdown.modified
 48 | 
 49 |     # Handle metadata - ensure all values are strings and filter None
 50 |     metadata = markdown.frontmatter.metadata or {}
 51 |     model.entity_metadata = {k: str(v) for k, v in metadata.items() if v is not None}
 52 | 
 53 |     # Convert observations
 54 |     model.observations = [
 55 |         ObservationModel(
 56 |             content=obs.content,
 57 |             category=obs.category,
 58 |             context=obs.context,
 59 |             tags=obs.tags,
 60 |         )
 61 |         for obs in markdown.observations
 62 |     ]
 63 | 
 64 |     return model
 65 | 
 66 | 
 67 | async def schema_to_markdown(schema: Any) -> Post:
 68 |     """
 69 |     Convert schema to markdown Post object.
 70 | 
 71 |     Args:
 72 |         schema: Schema to convert (must have title, entity_type, and permalink attributes)
 73 | 
 74 |     Returns:
 75 |         Post object with frontmatter metadata
 76 |     """
 77 |     # Extract content and metadata
 78 |     content = schema.content or ""
 79 |     entity_metadata = dict(schema.entity_metadata or {})
 80 | 
 81 |     # if the content contains frontmatter, remove it and merge
 82 |     if has_frontmatter(content):
 83 |         content_frontmatter = parse_frontmatter(content)
 84 |         content = remove_frontmatter(content)
 85 | 
 86 |         # Merge content frontmatter with entity metadata
 87 |         # (entity_metadata takes precedence for conflicts)
 88 |         content_frontmatter.update(entity_metadata)
 89 |         entity_metadata = content_frontmatter
 90 | 
 91 |     # Remove special fields for ordered frontmatter
 92 |     for field in ["type", "title", "permalink"]:
 93 |         entity_metadata.pop(field, None)
 94 | 
 95 |     # Create Post with fields ordered by insert order
 96 |     post = Post(
 97 |         content,
 98 |         title=schema.title,
 99 |         type=schema.entity_type,
100 |     )
101 |     # set the permalink if passed in
102 |     if schema.permalink:
103 |         post.metadata["permalink"] = schema.permalink
104 | 
105 |     if entity_metadata:
106 |         post.metadata.update(entity_metadata)
107 | 
108 |     return post
109 | 
```
--------------------------------------------------------------------------------
/src/basic_memory/cli/commands/mcp.py:
--------------------------------------------------------------------------------
```python
 1 | """MCP server command with streamable HTTP transport."""
 2 | 
 3 | import asyncio
 4 | import os
 5 | import typer
 6 | from typing import Optional
 7 | 
 8 | from basic_memory.cli.app import app
 9 | from basic_memory.config import ConfigManager
10 | 
11 | # Import mcp instance
12 | from basic_memory.mcp.server import mcp as mcp_server  # pragma: no cover
13 | 
14 | # Import mcp tools to register them
15 | import basic_memory.mcp.tools  # noqa: F401  # pragma: no cover
16 | 
17 | # Import prompts to register them
18 | import basic_memory.mcp.prompts  # noqa: F401  # pragma: no cover
19 | from loguru import logger
20 | import threading
21 | from basic_memory.services.initialization import initialize_file_sync
22 | 
23 | config = ConfigManager().config
24 | 
25 | if not config.cloud_mode_enabled:
26 | 
27 |     @app.command()
28 |     def mcp(
29 |         transport: str = typer.Option(
30 |             "stdio", help="Transport type: stdio, streamable-http, or sse"
31 |         ),
32 |         host: str = typer.Option(
33 |             "0.0.0.0", help="Host for HTTP transports (use 0.0.0.0 to allow external connections)"
34 |         ),
35 |         port: int = typer.Option(8000, help="Port for HTTP transports"),
36 |         path: str = typer.Option("/mcp", help="Path prefix for streamable-http transport"),
37 |         project: Optional[str] = typer.Option(None, help="Restrict MCP server to single project"),
38 |     ):  # pragma: no cover
39 |         """Run the MCP server with configurable transport options.
40 | 
41 |         This command starts an MCP server using one of three transport options:
42 | 
43 |         - stdio: Standard I/O (good for local usage)
44 |         - streamable-http: Recommended for web deployments (default)
45 |         - sse: Server-Sent Events (for compatibility with existing clients)
46 |         """
47 | 
48 |         # Validate and set project constraint if specified
49 |         if project:
50 |             config_manager = ConfigManager()
51 |             project_name, _ = config_manager.get_project(project)
52 |             if not project_name:
53 |                 typer.echo(f"No project found named: {project}", err=True)
54 |                 raise typer.Exit(1)
55 | 
56 |             # Set env var with validated project name
57 |             os.environ["BASIC_MEMORY_MCP_PROJECT"] = project_name
58 |             logger.info(f"MCP server constrained to project: {project_name}")
59 | 
60 |         app_config = ConfigManager().config
61 | 
62 |         def run_file_sync():
63 |             """Run file sync in a separate thread with its own event loop."""
64 |             loop = asyncio.new_event_loop()
65 |             asyncio.set_event_loop(loop)
66 |             try:
67 |                 loop.run_until_complete(initialize_file_sync(app_config))
68 |             except Exception as e:
69 |                 logger.error(f"File sync error: {e}", err=True)
70 |             finally:
71 |                 loop.close()
72 | 
73 |         logger.info(f"Sync changes enabled: {app_config.sync_changes}")
74 |         if app_config.sync_changes:
75 |             # Start the sync thread
76 |             sync_thread = threading.Thread(target=run_file_sync, daemon=True)
77 |             sync_thread.start()
78 |             logger.info("Started file sync in background")
79 | 
80 |         # Now run the MCP server (blocks)
81 |         logger.info(f"Starting MCP server with {transport.upper()} transport")
82 | 
83 |         if transport == "stdio":
84 |             mcp_server.run(
85 |                 transport=transport,
86 |             )
87 |         elif transport == "streamable-http" or transport == "sse":
88 |             mcp_server.run(
89 |                 transport=transport,
90 |                 host=host,
91 |                 port=port,
92 |                 path=path,
93 |                 log_level="INFO",
94 |             )
95 | 
```
--------------------------------------------------------------------------------
/CLA.md:
--------------------------------------------------------------------------------
```markdown
 1 | # Contributor License Agreement
 2 | 
 3 | ## Copyright Assignment and License Grant
 4 | 
 5 | By signing this Contributor License Agreement ("Agreement"), you accept and agree to the following terms and conditions
 6 | for your present and future Contributions submitted
 7 | to Basic Machines LLC. Except for the license granted herein to Basic Machines LLC and recipients of software
 8 | distributed by Basic Machines LLC, you reserve all right,
 9 | title, and interest in and to your Contributions.
10 | 
11 | ### 1. Definitions
12 | 
13 | "You" (or "Your") shall mean the copyright owner or legal entity authorized by the copyright owner that is making this
14 | Agreement with Basic Machines LLC.
15 | 
16 | "Contribution" shall mean any original work of authorship, including any modifications or additions to an existing work,
17 | that is intentionally submitted by You to Basic
18 | Machines LLC for inclusion in, or documentation of, any of the products owned or managed by Basic Machines LLC (the "
19 | Work").
20 | 
21 | ### 2. Grant of Copyright License
22 | 
23 | Subject to the terms and conditions of this Agreement, You hereby grant to Basic Machines LLC and to recipients of
24 | software distributed by Basic Machines LLC a perpetual,
25 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to use, copy, modify, merge, publish,
26 | distribute, sublicense, and/or sell copies of the
27 | Work, and to permit persons to whom the Work is furnished to do so.
28 | 
29 | ### 3. Assignment of Copyright
30 | 
31 | You hereby assign to Basic Machines LLC all right, title, and interest worldwide in all Copyright covering your
32 | Contributions. Basic Machines LLC may license the
33 | Contributions under any license terms, including copyleft, permissive, commercial, or proprietary licenses.
34 | 
35 | ### 4. Grant of Patent License
36 | 
37 | Subject to the terms and conditions of this Agreement, You hereby grant to Basic Machines LLC and to recipients of
38 | software distributed by Basic Machines LLC a perpetual,
39 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to
40 | make, have made, use, offer to sell, sell, import, and
41 | otherwise transfer the Work.
42 | 
43 | ### 5. Developer Certificate of Origin
44 | 
45 | By making a Contribution to this project, You certify that:
46 | 
47 | (a) The Contribution was created in whole or in part by You and You have the right to submit it under this Agreement; or
48 | 
49 | (b) The Contribution is based upon previous work that, to the best of Your knowledge, is covered under an appropriate
50 | open source license and You have the right under that
51 | license to submit that work with modifications, whether created in whole or in part by You, under this Agreement; or
52 | 
53 | (c) The Contribution was provided directly to You by some other person who certified (a), (b) or (c) and You have not
54 | modified it.
55 | 
56 | (d) You understand and agree that this project and the Contribution are public and that a record of the Contribution (
57 | including all personal information You submit with
58 | it, including Your sign-off) is maintained indefinitely and may be redistributed consistent with this project or the
59 | open source license(s) involved.
60 | 
61 | ### 6. Representations
62 | 
63 | You represent that you are legally entitled to grant the above license and assignment. If your employer(s) has rights to
64 | intellectual property that you create that
65 | includes your Contributions, you represent that you have received permission to make Contributions on behalf of that
66 | employer, or that your employer has waived such rights
67 | for your Contributions to Basic Machines LLC.
68 | 
69 | ---
70 | 
71 | This Agreement is effective as of the date you first submit a Contribution to Basic Machines LLC.
72 | 
```
--------------------------------------------------------------------------------
/pyproject.toml:
--------------------------------------------------------------------------------
```toml
  1 | [project]
  2 | name = "basic-memory"
  3 | dynamic = ["version"]
  4 | description = "Local-first knowledge management combining Zettelkasten with knowledge graphs"
  5 | readme = "README.md"
  6 | requires-python = ">=3.12"
  7 | license = { text = "AGPL-3.0-or-later" }
  8 | authors = [
  9 |     { name = "Basic Machines", email = "[email protected]" }
 10 | ]
 11 | dependencies = [
 12 |     "sqlalchemy>=2.0.0",
 13 |     "pyyaml>=6.0.1",
 14 |     "typer>=0.9.0",
 15 |     "aiosqlite>=0.20.0",
 16 |     "greenlet>=3.1.1",
 17 |     "pydantic[email,timezone]>=2.10.3",
 18 |     "icecream>=2.1.3",
 19 |     "mcp>=1.2.0",
 20 |     "pydantic-settings>=2.6.1",
 21 |     "loguru>=0.7.3",
 22 |     "pyright>=1.1.390",
 23 |     "markdown-it-py>=3.0.0",
 24 |     "python-frontmatter>=1.1.0",
 25 |     "rich>=13.9.4",
 26 |     "unidecode>=1.3.8",
 27 |     "dateparser>=1.2.0",
 28 |     "watchfiles>=1.0.4",
 29 |     "fastapi[standard]>=0.115.8",
 30 |     "alembic>=1.14.1",
 31 |     "pillow>=11.1.0",
 32 |     "pybars3>=0.9.7",
 33 |     "fastmcp>=2.10.2",
 34 |     "pyjwt>=2.10.1",
 35 |     "python-dotenv>=1.1.0",
 36 |     "pytest-aio>=1.9.0",
 37 |     "aiofiles>=24.1.0", # Async file I/O
 38 |     "logfire>=0.73.0", # Optional observability (disabled by default via config)
 39 | ]
 40 | 
 41 | 
 42 | [project.urls]
 43 | Homepage = "https://github.com/basicmachines-co/basic-memory"
 44 | Repository = "https://github.com/basicmachines-co/basic-memory"
 45 | Documentation = "https://github.com/basicmachines-co/basic-memory#readme"
 46 | 
 47 | [project.scripts]
 48 | basic-memory = "basic_memory.cli.main:app"
 49 | bm = "basic_memory.cli.main:app"
 50 | 
 51 | [build-system]
 52 | requires = ["hatchling", "uv-dynamic-versioning>=0.7.0"]
 53 | build-backend = "hatchling.build"
 54 | 
 55 | [tool.pytest.ini_options]
 56 | pythonpath = ["src", "tests"]
 57 | addopts = "--cov=basic_memory --cov-report term-missing"
 58 | testpaths = ["tests", "test-int"]
 59 | asyncio_mode = "strict"
 60 | asyncio_default_fixture_loop_scope = "function"
 61 | markers = [
 62 |     "benchmark: Performance benchmark tests (deselect with '-m \"not benchmark\"')",
 63 |     "slow: Slow-running tests (deselect with '-m \"not slow\"')",
 64 | ]
 65 | 
 66 | [tool.ruff]
 67 | line-length = 100
 68 | target-version = "py312"
 69 | 
 70 | [dependency-groups]
 71 | dev = [
 72 |     "gevent>=24.11.1",
 73 |     "icecream>=2.1.3",
 74 |     "pytest>=8.3.4",
 75 |     "pytest-cov>=4.1.0",
 76 |     "pytest-mock>=3.12.0",
 77 |     "pytest-asyncio>=0.24.0",
 78 |     "pytest-xdist>=3.0.0",
 79 |     "ruff>=0.1.6",
 80 |     "freezegun>=1.5.5",
 81 | ]
 82 | 
 83 | [tool.hatch.version]
 84 | source = "uv-dynamic-versioning"
 85 | 
 86 | [tool.uv-dynamic-versioning]
 87 | vcs = "git"
 88 | style = "pep440"
 89 | bump = true
 90 | fallback-version = "0.0.0"
 91 | 
 92 | [tool.pyright]
 93 | include = ["src/"]
 94 | exclude = ["**/__pycache__"]
 95 | ignore = ["test/"]
 96 | defineConstant = { DEBUG = true }
 97 | reportMissingImports = "error"
 98 | reportMissingTypeStubs = false
 99 | pythonVersion = "3.12"
100 | 
101 | 
102 | 
103 | [tool.coverage.run]
104 | concurrency = ["thread", "gevent"]
105 | 
106 | [tool.coverage.report]
107 | exclude_lines = [
108 |     "pragma: no cover",
109 |     "def __repr__",
110 |     "if self.debug:",
111 |     "if settings.DEBUG",
112 |     "raise AssertionError",
113 |     "raise NotImplementedError",
114 |     "if 0:",
115 |     "if __name__ == .__main__.:",
116 |     "class .*\\bProtocol\\):",
117 |     "@(abc\\.)?abstractmethod",
118 | ]
119 | 
120 | # Exclude specific modules that are difficult to test comprehensively
121 | omit = [
122 |     "*/external_auth_provider.py",  # External HTTP calls to OAuth providers
123 |     "*/supabase_auth_provider.py",  # External HTTP calls to Supabase APIs
124 |     "*/watch_service.py",           # File system watching - complex integration testing
125 |     "*/background_sync.py",         # Background processes
126 |     "*/cli/main.py",               # CLI entry point
127 |     "*/mcp/tools/project_management.py",  # Covered by integration tests
128 |     "*/mcp/tools/sync_status.py",  # Covered by integration tests
129 |     "*/services/migration_service.py", # Complex migration scenarios
130 | ]
131 | 
132 | [tool.logfire]
133 | ignore_no_config = true
134 | 
```
--------------------------------------------------------------------------------
/src/basic_memory/schemas/prompt.py:
--------------------------------------------------------------------------------
```python
 1 | """Request and response schemas for prompt-related operations."""
 2 | 
 3 | from typing import Optional, List, Any, Dict
 4 | from pydantic import BaseModel, Field
 5 | 
 6 | from basic_memory.schemas.base import TimeFrame
 7 | from basic_memory.schemas.memory import EntitySummary, ObservationSummary, RelationSummary
 8 | 
 9 | 
10 | class PromptContextItem(BaseModel):
11 |     """Container for primary and related results to render in a prompt."""
12 | 
13 |     primary_results: List[EntitySummary]
14 |     related_results: List[EntitySummary | ObservationSummary | RelationSummary]
15 | 
16 | 
17 | class ContinueConversationRequest(BaseModel):
18 |     """Request for generating a continue conversation prompt.
19 | 
20 |     Used to provide context for continuing a conversation on a specific topic
21 |     or with recent activity from a given timeframe.
22 |     """
23 | 
24 |     topic: Optional[str] = Field(None, description="Topic or keyword to search for")
25 |     timeframe: Optional[TimeFrame] = Field(
26 |         None, description="How far back to look for activity (e.g. '1d', '1 week')"
27 |     )
28 |     # Limit depth to max 2 for performance reasons - higher values cause significant slowdown
29 |     search_items_limit: int = Field(
30 |         5,
31 |         description="Maximum number of search results to include in context (max 10)",
32 |         ge=1,
33 |         le=10,
34 |     )
35 |     depth: int = Field(
36 |         1,
37 |         description="How many relationship 'hops' to follow when building context (max 5)",
38 |         ge=1,
39 |         le=5,
40 |     )
41 |     # Limit related items to prevent overloading the context
42 |     related_items_limit: int = Field(
43 |         5, description="Maximum number of related items to include in context (max 10)", ge=1, le=10
44 |     )
45 | 
46 | 
47 | class SearchPromptRequest(BaseModel):
48 |     """Request for generating a search results prompt.
49 | 
50 |     Used to format search results into a prompt with context and suggestions.
51 |     """
52 | 
53 |     query: str = Field(..., description="The search query text")
54 |     timeframe: Optional[TimeFrame] = Field(
55 |         None, description="Optional timeframe to limit results (e.g. '1d', '1 week')"
56 |     )
57 | 
58 | 
59 | class PromptMetadata(BaseModel):
60 |     """Metadata about a prompt response.
61 | 
62 |     Contains statistical information about the prompt generation process
63 |     and results, useful for debugging and UI display.
64 |     """
65 | 
66 |     query: Optional[str] = Field(None, description="The original query or topic")
67 |     timeframe: Optional[str] = Field(None, description="The timeframe used for filtering")
68 |     search_count: int = Field(0, description="Number of search results found")
69 |     context_count: int = Field(0, description="Number of context items retrieved")
70 |     observation_count: int = Field(0, description="Total number of observations included")
71 |     relation_count: int = Field(0, description="Total number of relations included")
72 |     total_items: int = Field(0, description="Total number of all items included in the prompt")
73 |     search_limit: int = Field(0, description="Maximum search results requested")
74 |     context_depth: int = Field(0, description="Context depth used")
75 |     related_limit: int = Field(0, description="Maximum related items requested")
76 |     generated_at: str = Field(..., description="ISO timestamp when this prompt was generated")
77 | 
78 | 
79 | class PromptResponse(BaseModel):
80 |     """Response containing the rendered prompt.
81 | 
82 |     Includes both the rendered prompt text and the context that was used
83 |     to render it, for potential client-side use.
84 |     """
85 | 
86 |     prompt: str = Field(..., description="The rendered prompt text")
87 |     context: Dict[str, Any] = Field(..., description="The context used to render the prompt")
88 |     metadata: PromptMetadata = Field(
89 |         ..., description="Metadata about the prompt generation process"
90 |     )
91 | 
```
--------------------------------------------------------------------------------
/src/basic_memory/schemas/search.py:
--------------------------------------------------------------------------------
```python
  1 | """Search schemas for Basic Memory.
  2 | 
  3 | The search system supports three primary modes:
  4 | 1. Exact permalink lookup
  5 | 2. Pattern matching with *
  6 | 3. Full-text search across content
  7 | """
  8 | 
  9 | from typing import Optional, List, Union
 10 | from datetime import datetime
 11 | from enum import Enum
 12 | from pydantic import BaseModel, field_validator
 13 | 
 14 | from basic_memory.schemas.base import Permalink
 15 | 
 16 | 
 17 | class SearchItemType(str, Enum):
 18 |     """Types of searchable items."""
 19 | 
 20 |     ENTITY = "entity"
 21 |     OBSERVATION = "observation"
 22 |     RELATION = "relation"
 23 | 
 24 | 
 25 | class SearchQuery(BaseModel):
 26 |     """Search query parameters.
 27 | 
 28 |     Use ONE of these primary search modes:
 29 |     - permalink: Exact permalink match
 30 |     - permalink_match: Path pattern with *
 31 |     - text: Full-text search of title/content (supports boolean operators: AND, OR, NOT)
 32 | 
 33 |     Optionally filter results by:
 34 |     - types: Limit to specific item types
 35 |     - entity_types: Limit to specific entity types
 36 |     - after_date: Only items after date
 37 | 
 38 |     Boolean search examples:
 39 |     - "python AND flask" - Find items with both terms
 40 |     - "python OR django" - Find items with either term
 41 |     - "python NOT django" - Find items with python but not django
 42 |     - "(python OR flask) AND web" - Use parentheses for grouping
 43 |     """
 44 | 
 45 |     # Primary search modes (use ONE of these)
 46 |     permalink: Optional[str] = None  # Exact permalink match
 47 |     permalink_match: Optional[str] = None  # Glob permalink match
 48 |     text: Optional[str] = None  # Full-text search (now supports boolean operators)
 49 |     title: Optional[str] = None  # title only search
 50 | 
 51 |     # Optional filters
 52 |     types: Optional[List[str]] = None  # Filter by type
 53 |     entity_types: Optional[List[SearchItemType]] = None  # Filter by entity type
 54 |     after_date: Optional[Union[datetime, str]] = None  # Time-based filter
 55 | 
 56 |     @field_validator("after_date")
 57 |     @classmethod
 58 |     def validate_date(cls, v: Optional[Union[datetime, str]]) -> Optional[str]:
 59 |         """Convert datetime to ISO format if needed."""
 60 |         if isinstance(v, datetime):
 61 |             return v.isoformat()
 62 |         return v
 63 | 
 64 |     def no_criteria(self) -> bool:
 65 |         return (
 66 |             self.permalink is None
 67 |             and self.permalink_match is None
 68 |             and self.title is None
 69 |             and self.text is None
 70 |             and self.after_date is None
 71 |             and self.types is None
 72 |             and self.entity_types is None
 73 |         )
 74 | 
 75 |     def has_boolean_operators(self) -> bool:
 76 |         """Check if the text query contains boolean operators (AND, OR, NOT)."""
 77 |         if not self.text:  # pragma: no cover
 78 |             return False
 79 | 
 80 |         # Check for common boolean operators with correct word boundaries
 81 |         # to avoid matching substrings like "GRAND" containing "AND"
 82 |         boolean_patterns = [" AND ", " OR ", " NOT ", "(", ")"]
 83 |         text = f" {self.text} "  # Add spaces to ensure we match word boundaries
 84 |         return any(pattern in text for pattern in boolean_patterns)
 85 | 
 86 | 
 87 | class SearchResult(BaseModel):
 88 |     """Search result with score and metadata."""
 89 | 
 90 |     title: str
 91 |     type: SearchItemType
 92 |     score: float
 93 |     entity: Optional[Permalink] = None
 94 |     permalink: Optional[str]
 95 |     content: Optional[str] = None
 96 |     file_path: str
 97 | 
 98 |     metadata: Optional[dict] = None
 99 | 
100 |     # Type-specific fields
101 |     category: Optional[str] = None  # For observations
102 |     from_entity: Optional[Permalink] = None  # For relations
103 |     to_entity: Optional[Permalink] = None  # For relations
104 |     relation_type: Optional[str] = None  # For relations
105 | 
106 | 
107 | class SearchResponse(BaseModel):
108 |     """Wrapper for search results."""
109 | 
110 |     results: List[SearchResult]
111 |     current_page: int
112 |     page_size: int
113 | 
```
--------------------------------------------------------------------------------
/src/basic_memory/alembic/versions/cc7172b46608_update_search_index_schema.py:
--------------------------------------------------------------------------------
```python
  1 | """Update search index schema
  2 | 
  3 | Revision ID: cc7172b46608
  4 | Revises: 502b60eaa905
  5 | Create Date: 2025-02-28 18:48:23.244941
  6 | 
  7 | """
  8 | 
  9 | from typing import Sequence, Union
 10 | 
 11 | from alembic import op
 12 | 
 13 | 
 14 | # revision identifiers, used by Alembic.
 15 | revision: str = "cc7172b46608"
 16 | down_revision: Union[str, None] = "502b60eaa905"
 17 | branch_labels: Union[str, Sequence[str], None] = None
 18 | depends_on: Union[str, Sequence[str], None] = None
 19 | 
 20 | 
 21 | def upgrade() -> None:
 22 |     """Upgrade database schema to use new search index with content_stems and content_snippet."""
 23 | 
 24 |     # First, drop the existing search_index table
 25 |     op.execute("DROP TABLE IF EXISTS search_index")
 26 | 
 27 |     # Create new search_index with updated schema
 28 |     op.execute("""
 29 |     CREATE VIRTUAL TABLE IF NOT EXISTS search_index USING fts5(
 30 |         -- Core entity fields
 31 |         id UNINDEXED,          -- Row ID
 32 |         title,                 -- Title for searching
 33 |         content_stems,         -- Main searchable content split into stems
 34 |         content_snippet,       -- File content snippet for display
 35 |         permalink,             -- Stable identifier (now indexed for path search)
 36 |         file_path UNINDEXED,   -- Physical location
 37 |         type UNINDEXED,        -- entity/relation/observation
 38 |         
 39 |         -- Relation fields 
 40 |         from_id UNINDEXED,     -- Source entity
 41 |         to_id UNINDEXED,       -- Target entity
 42 |         relation_type UNINDEXED, -- Type of relation
 43 |         
 44 |         -- Observation fields
 45 |         entity_id UNINDEXED,   -- Parent entity
 46 |         category UNINDEXED,    -- Observation category
 47 |         
 48 |         -- Common fields
 49 |         metadata UNINDEXED,    -- JSON metadata
 50 |         created_at UNINDEXED,  -- Creation timestamp
 51 |         updated_at UNINDEXED,  -- Last update
 52 |         
 53 |         -- Configuration
 54 |         tokenize='unicode61 tokenchars 0x2F',  -- Hex code for /
 55 |         prefix='1,2,3,4'                    -- Support longer prefixes for paths
 56 |     );
 57 |     """)
 58 | 
 59 | 
 60 | def downgrade() -> None:
 61 |     """Downgrade database schema to use old search index."""
 62 |     # Drop the updated search_index table
 63 |     op.execute("DROP TABLE IF EXISTS search_index")
 64 | 
 65 |     # Recreate the original search_index schema
 66 |     op.execute("""
 67 |     CREATE VIRTUAL TABLE IF NOT EXISTS search_index USING fts5(
 68 |         -- Core entity fields
 69 |         id UNINDEXED,          -- Row ID
 70 |         title,                 -- Title for searching
 71 |         content,               -- Main searchable content
 72 |         permalink,             -- Stable identifier (now indexed for path search)
 73 |         file_path UNINDEXED,   -- Physical location
 74 |         type UNINDEXED,        -- entity/relation/observation
 75 |         
 76 |         -- Relation fields 
 77 |         from_id UNINDEXED,     -- Source entity
 78 |         to_id UNINDEXED,       -- Target entity
 79 |         relation_type UNINDEXED, -- Type of relation
 80 |         
 81 |         -- Observation fields
 82 |         entity_id UNINDEXED,   -- Parent entity
 83 |         category UNINDEXED,    -- Observation category
 84 |         
 85 |         -- Common fields
 86 |         metadata UNINDEXED,    -- JSON metadata
 87 |         created_at UNINDEXED,  -- Creation timestamp
 88 |         updated_at UNINDEXED,  -- Last update
 89 |         
 90 |         -- Configuration
 91 |         tokenize='unicode61 tokenchars 0x2F',  -- Hex code for /
 92 |         prefix='1,2,3,4'                    -- Support longer prefixes for paths
 93 |     );
 94 |     """)
 95 | 
 96 |     # Print instruction to manually reindex after migration
 97 |     print("\n------------------------------------------------------------------")
 98 |     print("IMPORTANT: After downgrade completes, manually run the reindex command:")
 99 |     print("basic-memory sync")
100 |     print("------------------------------------------------------------------\n")
101 | 
```
--------------------------------------------------------------------------------
/src/basic_memory/repository/relation_repository.py:
--------------------------------------------------------------------------------
```python
 1 | """Repository for managing Relation objects."""
 2 | 
 3 | from sqlalchemy import and_, delete
 4 | from typing import Sequence, List, Optional
 5 | 
 6 | from sqlalchemy import select
 7 | from sqlalchemy.ext.asyncio import async_sessionmaker
 8 | from sqlalchemy.orm import selectinload, aliased
 9 | from sqlalchemy.orm.interfaces import LoaderOption
10 | 
11 | from basic_memory import db
12 | from basic_memory.models import Relation, Entity
13 | from basic_memory.repository.repository import Repository
14 | 
15 | 
16 | class RelationRepository(Repository[Relation]):
17 |     """Repository for Relation model with memory-specific operations."""
18 | 
19 |     def __init__(self, session_maker: async_sessionmaker, project_id: int):
20 |         """Initialize with session maker and project_id filter.
21 | 
22 |         Args:
23 |             session_maker: SQLAlchemy session maker
24 |             project_id: Project ID to filter all operations by
25 |         """
26 |         super().__init__(session_maker, Relation, project_id=project_id)
27 | 
28 |     async def find_relation(
29 |         self, from_permalink: str, to_permalink: str, relation_type: str
30 |     ) -> Optional[Relation]:
31 |         """Find a relation by its from and to path IDs."""
32 |         from_entity = aliased(Entity)
33 |         to_entity = aliased(Entity)
34 | 
35 |         query = (
36 |             select(Relation)
37 |             .join(from_entity, Relation.from_id == from_entity.id)
38 |             .join(to_entity, Relation.to_id == to_entity.id)
39 |             .where(
40 |                 and_(
41 |                     from_entity.permalink == from_permalink,
42 |                     to_entity.permalink == to_permalink,
43 |                     Relation.relation_type == relation_type,
44 |                 )
45 |             )
46 |         )
47 |         return await self.find_one(query)
48 | 
49 |     async def find_by_entities(self, from_id: int, to_id: int) -> Sequence[Relation]:
50 |         """Find all relations between two entities."""
51 |         query = select(Relation).where((Relation.from_id == from_id) & (Relation.to_id == to_id))
52 |         result = await self.execute_query(query)
53 |         return result.scalars().all()
54 | 
55 |     async def find_by_type(self, relation_type: str) -> Sequence[Relation]:
56 |         """Find all relations of a specific type."""
57 |         query = select(Relation).filter(Relation.relation_type == relation_type)
58 |         result = await self.execute_query(query)
59 |         return result.scalars().all()
60 | 
61 |     async def delete_outgoing_relations_from_entity(self, entity_id: int) -> None:
62 |         """Delete outgoing relations for an entity.
63 | 
64 |         Only deletes relations where this entity is the source (from_id),
65 |         as these are the ones owned by this entity's markdown file.
66 |         """
67 |         async with db.scoped_session(self.session_maker) as session:
68 |             await session.execute(delete(Relation).where(Relation.from_id == entity_id))
69 | 
70 |     async def find_unresolved_relations(self) -> Sequence[Relation]:
71 |         """Find all unresolved relations, where to_id is null."""
72 |         query = select(Relation).filter(Relation.to_id.is_(None))
73 |         result = await self.execute_query(query)
74 |         return result.scalars().all()
75 | 
76 |     async def find_unresolved_relations_for_entity(self, entity_id: int) -> Sequence[Relation]:
77 |         """Find unresolved relations for a specific entity.
78 | 
79 |         Args:
80 |             entity_id: The entity whose unresolved outgoing relations to find.
81 | 
82 |         Returns:
83 |             List of unresolved relations where this entity is the source.
84 |         """
85 |         query = select(Relation).filter(Relation.from_id == entity_id, Relation.to_id.is_(None))
86 |         result = await self.execute_query(query)
87 |         return result.scalars().all()
88 | 
89 |     def get_load_options(self) -> List[LoaderOption]:
90 |         return [selectinload(Relation.from_entity), selectinload(Relation.to_entity)]
91 | 
```
--------------------------------------------------------------------------------
/src/basic_memory/alembic/alembic.ini:
--------------------------------------------------------------------------------
```
  1 | # A generic, single database configuration.
  2 | 
  3 | [alembic]
  4 | # path to migration scripts
  5 | # Use forward slashes (/) also on windows to provide an os agnostic path
  6 | script_location = .
  7 | 
  8 | # template used to generate migration file names; The default value is %%(rev)s_%%(slug)s
  9 | # Uncomment the line below if you want the files to be prepended with date and time
 10 | # see https://alembic.sqlalchemy.org/en/latest/tutorial.html#editing-the-ini-file
 11 | # for all available tokens
 12 | # file_template = %%(year)d_%%(month).2d_%%(day).2d_%%(hour).2d%%(minute).2d-%%(rev)s_%%(slug)s
 13 | 
 14 | # sys.path path, will be prepended to sys.path if present.
 15 | # defaults to the current working directory.
 16 | prepend_sys_path = .
 17 | 
 18 | # timezone to use when rendering the date within the migration file
 19 | # as well as the filename.
 20 | # If specified, requires the python>=3.9 or backports.zoneinfo library and tzdata library.
 21 | # Any required deps can installed by adding `alembic[tz]` to the pip requirements
 22 | # string value is passed to ZoneInfo()
 23 | # leave blank for localtime
 24 | # timezone =
 25 | 
 26 | # max length of characters to apply to the "slug" field
 27 | # truncate_slug_length = 40
 28 | 
 29 | # set to 'true' to run the environment during
 30 | # the 'revision' command, regardless of autogenerate
 31 | # revision_environment = false
 32 | 
 33 | # set to 'true' to allow .pyc and .pyo files without
 34 | # a source .py file to be detected as revisions in the
 35 | # versions/ directory
 36 | # sourceless = false
 37 | 
 38 | # version location specification; This defaults
 39 | # to migrations/versions.  When using multiple version
 40 | # directories, initial revisions must be specified with --version-path.
 41 | # The path separator used here should be the separator specified by "version_path_separator" below.
 42 | # version_locations = %(here)s/bar:%(here)s/bat:migrations/versions
 43 | 
 44 | # version path separator; As mentioned above, this is the character used to split
 45 | # version_locations. The default within new alembic.ini files is "os", which uses os.pathsep.
 46 | # If this key is omitted entirely, it falls back to the legacy behavior of splitting on spaces and/or commas.
 47 | # Valid values for version_path_separator are:
 48 | #
 49 | # version_path_separator = :
 50 | # version_path_separator = ;
 51 | # version_path_separator = space
 52 | # version_path_separator = newline
 53 | #
 54 | # Use os.pathsep. Default configuration used for new projects.
 55 | version_path_separator = os
 56 | 
 57 | # set to 'true' to search source files recursively
 58 | # in each "version_locations" directory
 59 | # new in Alembic version 1.10
 60 | # recursive_version_locations = false
 61 | 
 62 | # the output encoding used when revision files
 63 | # are written from script.py.mako
 64 | # output_encoding = utf-8
 65 | 
 66 | sqlalchemy.url = driver://user:pass@localhost/dbname
 67 | 
 68 | 
 69 | [post_write_hooks]
 70 | # post_write_hooks defines scripts or Python functions that are run
 71 | # on newly generated revision scripts.  See the documentation for further
 72 | # detail and examples
 73 | 
 74 | # format using "black" - use the console_scripts runner, against the "black" entrypoint
 75 | # hooks = black
 76 | # black.type = console_scripts
 77 | # black.entrypoint = black
 78 | # black.options = -l 79 REVISION_SCRIPT_FILENAME
 79 | 
 80 | # lint with attempts to fix using "ruff" - use the exec runner, execute a binary
 81 | # hooks = ruff
 82 | # ruff.type = exec
 83 | # ruff.executable = %(here)s/.venv/bin/ruff
 84 | # ruff.options = --fix REVISION_SCRIPT_FILENAME
 85 | 
 86 | # Logging configuration
 87 | [loggers]
 88 | keys = root,sqlalchemy,alembic
 89 | 
 90 | [handlers]
 91 | keys = console
 92 | 
 93 | [formatters]
 94 | keys = generic
 95 | 
 96 | [logger_root]
 97 | level = WARNING
 98 | handlers = console
 99 | qualname =
100 | 
101 | [logger_sqlalchemy]
102 | level = WARNING
103 | handlers =
104 | qualname = sqlalchemy.engine
105 | 
106 | [logger_alembic]
107 | level = INFO
108 | handlers =
109 | qualname = alembic
110 | 
111 | [handler_console]
112 | class = StreamHandler
113 | args = (sys.stderr,)
114 | level = NOTSET
115 | formatter = generic
116 | 
117 | [formatter_generic]
118 | format = %(levelname)-5.5s [%(name)s] %(message)s
119 | datefmt = %H:%M:%S
120 | 
```
--------------------------------------------------------------------------------
/src/basic_memory/schemas/request.py:
--------------------------------------------------------------------------------
```python
  1 | """Request schemas for interacting with the knowledge graph."""
  2 | 
  3 | from typing import List, Optional, Annotated, Literal
  4 | from annotated_types import MaxLen, MinLen
  5 | 
  6 | from pydantic import BaseModel, field_validator
  7 | 
  8 | from basic_memory.schemas.base import (
  9 |     Relation,
 10 |     Permalink,
 11 | )
 12 | 
 13 | 
 14 | class SearchNodesRequest(BaseModel):
 15 |     """Search for entities in the knowledge graph.
 16 | 
 17 |     The search looks across multiple fields:
 18 |     - Entity title
 19 |     - Entity types
 20 |     - summary
 21 |     - file content
 22 |     - Observations
 23 | 
 24 |     Features:
 25 |     - Case-insensitive matching
 26 |     - Partial word matches
 27 |     - Returns full entity objects with relations
 28 |     - Includes all matching entities
 29 |     - If a category is specified, only entities with that category are returned
 30 | 
 31 |     Example Queries:
 32 |     - "memory" - Find entities related to memory systems
 33 |     - "SQLite" - Find database-related components
 34 |     - "test" - Find test-related entities
 35 |     - "implementation" - Find concrete implementations
 36 |     - "service" - Find service components
 37 | 
 38 |     Note: Currently uses SQL ILIKE for matching. Wildcard (*) searches
 39 |     and full-text search capabilities are planned for future versions.
 40 |     """
 41 | 
 42 |     query: Annotated[str, MinLen(1), MaxLen(200)]
 43 |     category: Optional[str] = None
 44 | 
 45 | 
 46 | class GetEntitiesRequest(BaseModel):
 47 |     """Retrieve specific entities by their IDs.
 48 | 
 49 |     Used to load complete entity details including all observations
 50 |     and relations. Particularly useful for following relations
 51 |     discovered through search.
 52 |     """
 53 | 
 54 |     permalinks: Annotated[List[Permalink], MinLen(1), MaxLen(10)]
 55 | 
 56 | 
 57 | class CreateRelationsRequest(BaseModel):
 58 |     relations: List[Relation]
 59 | 
 60 | 
 61 | class EditEntityRequest(BaseModel):
 62 |     """Request schema for editing an existing entity's content.
 63 | 
 64 |     This allows for targeted edits without requiring the full entity content.
 65 |     Supports various operation types for different editing scenarios.
 66 |     """
 67 | 
 68 |     operation: Literal["append", "prepend", "find_replace", "replace_section"]
 69 |     content: str
 70 |     section: Optional[str] = None
 71 |     find_text: Optional[str] = None
 72 |     expected_replacements: int = 1
 73 | 
 74 |     @field_validator("section")
 75 |     @classmethod
 76 |     def validate_section_for_replace_section(cls, v, info):
 77 |         """Ensure section is provided for replace_section operation."""
 78 |         if info.data.get("operation") == "replace_section" and not v:
 79 |             raise ValueError("section parameter is required for replace_section operation")
 80 |         return v
 81 | 
 82 |     @field_validator("find_text")
 83 |     @classmethod
 84 |     def validate_find_text_for_find_replace(cls, v, info):
 85 |         """Ensure find_text is provided for find_replace operation."""
 86 |         if info.data.get("operation") == "find_replace" and not v:
 87 |             raise ValueError("find_text parameter is required for find_replace operation")
 88 |         return v
 89 | 
 90 | 
 91 | class MoveEntityRequest(BaseModel):
 92 |     """Request schema for moving an entity to a new file location.
 93 | 
 94 |     This allows moving notes to different paths while maintaining project
 95 |     consistency and optionally updating permalinks based on configuration.
 96 |     """
 97 | 
 98 |     identifier: Annotated[str, MinLen(1), MaxLen(200)]
 99 |     destination_path: Annotated[str, MinLen(1), MaxLen(500)]
100 |     project: Optional[str] = None
101 | 
102 |     @field_validator("destination_path")
103 |     @classmethod
104 |     def validate_destination_path(cls, v):
105 |         """Ensure destination path is relative and valid."""
106 |         if v.startswith("/"):
107 |             raise ValueError("destination_path must be relative, not absolute")
108 |         if ".." in v:
109 |             raise ValueError("destination_path cannot contain '..' path components")
110 |         if not v.strip():
111 |             raise ValueError("destination_path cannot be empty or whitespace only")
112 |         return v.strip()
113 | 
```
--------------------------------------------------------------------------------
/src/basic_memory/repository/project_repository.py:
--------------------------------------------------------------------------------
```python
  1 | """Repository for managing projects in Basic Memory."""
  2 | 
  3 | from pathlib import Path
  4 | from typing import Optional, Sequence, Union
  5 | 
  6 | from sqlalchemy import text
  7 | from sqlalchemy.ext.asyncio import AsyncSession, async_sessionmaker
  8 | 
  9 | from basic_memory import db
 10 | from basic_memory.models.project import Project
 11 | from basic_memory.repository.repository import Repository
 12 | 
 13 | 
 14 | class ProjectRepository(Repository[Project]):
 15 |     """Repository for Project model.
 16 | 
 17 |     Projects represent collections of knowledge entities grouped together.
 18 |     Each entity, observation, and relation belongs to a specific project.
 19 |     """
 20 | 
 21 |     def __init__(self, session_maker: async_sessionmaker[AsyncSession]):
 22 |         """Initialize with session maker."""
 23 |         super().__init__(session_maker, Project)
 24 | 
 25 |     async def get_by_name(self, name: str) -> Optional[Project]:
 26 |         """Get project by name.
 27 | 
 28 |         Args:
 29 |             name: Unique name of the project
 30 |         """
 31 |         query = self.select().where(Project.name == name)
 32 |         return await self.find_one(query)
 33 | 
 34 |     async def get_by_permalink(self, permalink: str) -> Optional[Project]:
 35 |         """Get project by permalink.
 36 | 
 37 |         Args:
 38 |             permalink: URL-friendly identifier for the project
 39 |         """
 40 |         query = self.select().where(Project.permalink == permalink)
 41 |         return await self.find_one(query)
 42 | 
 43 |     async def get_by_path(self, path: Union[Path, str]) -> Optional[Project]:
 44 |         """Get project by filesystem path.
 45 | 
 46 |         Args:
 47 |             path: Path to the project directory (will be converted to string internally)
 48 |         """
 49 |         query = self.select().where(Project.path == Path(path).as_posix())
 50 |         return await self.find_one(query)
 51 | 
 52 |     async def get_default_project(self) -> Optional[Project]:
 53 |         """Get the default project (the one marked as is_default=True)."""
 54 |         query = self.select().where(Project.is_default.is_not(None))
 55 |         return await self.find_one(query)
 56 | 
 57 |     async def get_active_projects(self) -> Sequence[Project]:
 58 |         """Get all active projects."""
 59 |         query = self.select().where(Project.is_active == True)  # noqa: E712
 60 |         result = await self.execute_query(query)
 61 |         return list(result.scalars().all())
 62 | 
 63 |     async def set_as_default(self, project_id: int) -> Optional[Project]:
 64 |         """Set a project as the default and unset previous default.
 65 | 
 66 |         Args:
 67 |             project_id: ID of the project to set as default
 68 | 
 69 |         Returns:
 70 |             The updated project if found, None otherwise
 71 |         """
 72 |         async with db.scoped_session(self.session_maker) as session:
 73 |             # First, clear the default flag for all projects using direct SQL
 74 |             await session.execute(
 75 |                 text("UPDATE project SET is_default = NULL WHERE is_default IS NOT NULL")
 76 |             )
 77 |             await session.flush()
 78 | 
 79 |             # Set the new default project
 80 |             target_project = await self.select_by_id(session, project_id)
 81 |             if target_project:
 82 |                 target_project.is_default = True
 83 |                 await session.flush()
 84 |                 return target_project
 85 |             return None  # pragma: no cover
 86 | 
 87 |     async def update_path(self, project_id: int, new_path: str) -> Optional[Project]:
 88 |         """Update project path.
 89 | 
 90 |         Args:
 91 |             project_id: ID of the project to update
 92 |             new_path: New filesystem path for the project
 93 | 
 94 |         Returns:
 95 |             The updated project if found, None otherwise
 96 |         """
 97 |         async with db.scoped_session(self.session_maker) as session:
 98 |             project = await self.select_by_id(session, project_id)
 99 |             if project:
100 |                 project.path = new_path
101 |                 await session.flush()
102 |                 return project
103 |             return None
104 | 
```
--------------------------------------------------------------------------------
/test-int/mcp/test_project_state_sync_integration.py:
--------------------------------------------------------------------------------
```python
 1 | """Integration test for project state synchronization between MCP session and CLI config.
 2 | 
 3 | This test validates the fix for GitHub issue #148 where MCP session and CLI commands
 4 | had inconsistent project state, causing "Project not found" errors and edit failures.
 5 | 
 6 | The test simulates the exact workflow reported in the issue:
 7 | 1. MCP server starts with a default project
 8 | 2. Default project is changed via CLI/API
 9 | 3. MCP tools should immediately use the new project (no restart needed)
10 | 4. All operations should work consistently in the new project context
11 | """
12 | 
13 | import pytest
14 | from fastmcp import Client
15 | 
16 | 
17 | @pytest.mark.asyncio
18 | async def test_project_state_sync_after_default_change(
19 |     mcp_server, app, config_manager, test_project
20 | ):
21 |     """Test that MCP session stays in sync when default project is changed."""
22 | 
23 |     async with Client(mcp_server) as client:
24 |         # Step 1: Create a second project that we can switch to
25 |         create_result = await client.call_tool(
26 |             "create_memory_project",
27 |             {
28 |                 "project_name": "minerva",
29 |                 "project_path": "/tmp/minerva-test-project",
30 |                 "set_default": False,  # Don't set as default yet
31 |             },
32 |         )
33 |         assert len(create_result.content) == 1
34 |         assert "✓" in create_result.content[0].text  # pyright: ignore [reportAttributeAccessIssue]
35 |         assert "minerva" in create_result.content[0].text  # pyright: ignore [reportAttributeAccessIssue]
36 | 
37 |         # Step 2: Test that note operations work in the new project context
38 |         # This validates that the identifier resolution works correctly
39 |         write_result = await client.call_tool(
40 |             "write_note",
41 |             {
42 |                 "project": "minerva",
43 |                 "title": "Test Consistency Note",
44 |                 "folder": "test",
45 |                 "content": "# Test Note\n\nThis note tests project state consistency.\n\n- [test] Project state sync working",
46 |                 "tags": "test,consistency",
47 |             },
48 |         )
49 |         assert len(write_result.content) == 1
50 |         assert "Test Consistency Note" in write_result.content[0].text  # pyright: ignore [reportAttributeAccessIssue]
51 | 
52 |         # Step 3: Test that we can read the note we just created
53 |         read_result = await client.call_tool(
54 |             "read_note", {"project": "minerva", "identifier": "Test Consistency Note"}
55 |         )
56 |         assert len(read_result.content) == 1
57 |         assert "Test Consistency Note" in read_result.content[0].text  # pyright: ignore [reportAttributeAccessIssue]
58 |         assert "project state sync working" in read_result.content[0].text.lower()  # pyright: ignore [reportAttributeAccessIssue]
59 | 
60 |         # Step 4: Test that edit operations work (this was failing in the original issue)
61 |         edit_result = await client.call_tool(
62 |             "edit_note",
63 |             {
64 |                 "project": "minerva",
65 |                 "identifier": "Test Consistency Note",
66 |                 "operation": "append",
67 |                 "content": "\n\n## Update\n\nEdit operation successful after project switch!",
68 |             },
69 |         )
70 |         assert len(edit_result.content) == 1
71 |         assert (
72 |             "added" in edit_result.content[0].text.lower()  # pyright: ignore [reportAttributeAccessIssue]
73 |             and "lines" in edit_result.content[0].text.lower()  # pyright: ignore [reportAttributeAccessIssue]
74 |         )
75 | 
76 |         # Step 5: Verify the edit was applied
77 |         final_read_result = await client.call_tool(
78 |             "read_note", {"project": "minerva", "identifier": "Test Consistency Note"}
79 |         )
80 |         assert len(final_read_result.content) == 1
81 |         final_content = final_read_result.content[0].text  # pyright: ignore [reportAttributeAccessIssue]
82 |         assert "Edit operation successful" in final_content
83 | 
```
--------------------------------------------------------------------------------
/tests/markdown/test_relation_edge_cases.py:
--------------------------------------------------------------------------------
```python
  1 | """Tests for edge cases in relation parsing."""
  2 | 
  3 | from markdown_it import MarkdownIt
  4 | 
  5 | from basic_memory.markdown.plugins import relation_plugin, parse_relation, parse_inline_relations
  6 | from basic_memory.markdown.schemas import Relation
  7 | 
  8 | 
  9 | def test_empty_targets():
 10 |     """Test handling of empty targets."""
 11 |     md = MarkdownIt().use(relation_plugin)
 12 | 
 13 |     # Empty brackets
 14 |     tokens = md.parse("- type [[]]")
 15 |     token = next(t for t in tokens if t.type == "inline")
 16 |     assert parse_relation(token) is None
 17 | 
 18 |     # Only spaces
 19 |     tokens = md.parse("- type [[ ]]")
 20 |     token = next(t for t in tokens if t.type == "inline")
 21 |     assert parse_relation(token) is None
 22 | 
 23 |     # Whitespace in brackets
 24 |     tokens = md.parse("- type [[   ]]")
 25 |     token = next(t for t in tokens if t.type == "inline")
 26 |     assert parse_relation(token) is None
 27 | 
 28 | 
 29 | def test_malformed_links():
 30 |     """Test handling of malformed wiki links."""
 31 |     md = MarkdownIt().use(relation_plugin)
 32 | 
 33 |     # Missing close brackets
 34 |     tokens = md.parse("- type [[Target")
 35 |     assert not any(t.meta and "relations" in t.meta for t in tokens)
 36 | 
 37 |     # Missing open brackets
 38 |     tokens = md.parse("- type Target]]")
 39 |     assert not any(t.meta and "relations" in t.meta for t in tokens)
 40 | 
 41 |     # Backwards brackets
 42 |     tokens = md.parse("- type ]]Target[[")
 43 |     assert not any(t.meta and "relations" in t.meta for t in tokens)
 44 | 
 45 |     # Nested brackets
 46 |     tokens = md.parse("- type [[Outer [[Inner]] ]]")
 47 |     token = next(t for t in tokens if t.type == "inline")
 48 |     rel = parse_relation(token)
 49 |     assert rel is not None
 50 |     assert "Outer" in rel["target"]
 51 | 
 52 | 
 53 | def test_context_handling():
 54 |     """Test handling of contexts."""
 55 |     md = MarkdownIt().use(relation_plugin)
 56 | 
 57 |     # Unclosed context
 58 |     tokens = md.parse("- type [[Target]] (unclosed")
 59 |     token = next(t for t in tokens if t.type == "inline")
 60 |     rel = parse_relation(token)
 61 |     assert rel["context"] is None
 62 | 
 63 |     # Multiple parens
 64 |     tokens = md.parse("- type [[Target]] (with (nested) parens)")
 65 |     token = next(t for t in tokens if t.type == "inline")
 66 |     rel = parse_relation(token)
 67 |     assert rel["context"] == "with (nested) parens"
 68 | 
 69 |     # Empty context
 70 |     tokens = md.parse("- type [[Target]] ()")
 71 |     token = next(t for t in tokens if t.type == "inline")
 72 |     rel = parse_relation(token)
 73 |     assert rel["context"] is None
 74 | 
 75 | 
 76 | def test_inline_relations():
 77 |     """Test inline relation detection."""
 78 |     md = MarkdownIt().use(relation_plugin)
 79 | 
 80 |     # Multiple links in text
 81 |     text = "Text with [[Link1]] and [[Link2]] and [[Link3]]"
 82 |     rels = parse_inline_relations(text)
 83 |     assert len(rels) == 3
 84 |     assert {r["target"] for r in rels} == {"Link1", "Link2", "Link3"}
 85 | 
 86 |     # Links with surrounding text
 87 |     text = "Before [[Target]] After"
 88 |     rels = parse_inline_relations(text)
 89 |     assert len(rels) == 1
 90 |     assert rels[0]["target"] == "Target"
 91 | 
 92 |     # Multiple links on same line
 93 |     tokens = md.parse("[[One]] [[Two]] [[Three]]")
 94 |     token = next(t for t in tokens if t.type == "inline")
 95 |     assert len(token.meta["relations"]) == 3
 96 | 
 97 | 
 98 | def test_unicode_targets():
 99 |     """Test handling of Unicode in targets."""
100 |     md = MarkdownIt().use(relation_plugin)
101 | 
102 |     # Unicode in target
103 |     tokens = md.parse("- type [[测试]]")
104 |     token = next(t for t in tokens if t.type == "inline")
105 |     rel = parse_relation(token)
106 |     assert rel["target"] == "测试"
107 | 
108 |     # Unicode in type
109 |     tokens = md.parse("- 使用 [[Target]]")
110 |     token = next(t for t in tokens if t.type == "inline")
111 |     rel = parse_relation(token)
112 |     assert rel["type"] == "使用"
113 | 
114 |     # Unicode in context
115 |     tokens = md.parse("- type [[Target]] (测试)")
116 |     token = next(t for t in tokens if t.type == "inline")
117 |     rel = parse_relation(token)
118 |     assert rel["context"] == "测试"
119 | 
120 |     # Model validation with Unicode
121 |     relation = Relation.model_validate(rel)
122 |     assert relation.type == "type"
123 |     assert relation.target == "Target"
124 |     assert relation.context == "测试"
125 | 
```
--------------------------------------------------------------------------------
/test-int/BENCHMARKS.md:
--------------------------------------------------------------------------------
```markdown
  1 | # Performance Benchmarks
  2 | 
  3 | This directory contains performance benchmark tests for Basic Memory's sync/indexing operations.
  4 | 
  5 | ## Purpose
  6 | 
  7 | These benchmarks measure baseline performance to track improvements from optimizations. They are particularly important for:
  8 | - Cloud deployments with ephemeral databases that need fast re-indexing
  9 | - Large repositories (100s to 1000s of files)
 10 | - Validating optimization efforts
 11 | 
 12 | ## Running Benchmarks
 13 | 
 14 | ### Run all benchmarks (excluding slow ones)
 15 | ```bash
 16 | pytest test-int/test_sync_performance_benchmark.py -v -m "benchmark and not slow"
 17 | ```
 18 | 
 19 | ### Run specific benchmark
 20 | ```bash
 21 | # 100 files (fast, ~10-30 seconds)
 22 | pytest test-int/test_sync_performance_benchmark.py::test_benchmark_sync_100_files -v
 23 | 
 24 | # 500 files (medium, ~1-3 minutes)
 25 | pytest test-int/test_sync_performance_benchmark.py::test_benchmark_sync_500_files -v
 26 | 
 27 | # 1000 files (slow, ~3-10 minutes)
 28 | pytest test-int/test_sync_performance_benchmark.py::test_benchmark_sync_1000_files -v
 29 | 
 30 | # Re-sync with no changes (tests scan performance)
 31 | pytest test-int/test_sync_performance_benchmark.py::test_benchmark_resync_no_changes -v
 32 | ```
 33 | 
 34 | ### Run all benchmarks including slow ones
 35 | ```bash
 36 | pytest test-int/test_sync_performance_benchmark.py -v -m benchmark
 37 | ```
 38 | 
 39 | ### Skip benchmarks in regular test runs
 40 | ```bash
 41 | pytest -m "not benchmark"
 42 | ```
 43 | 
 44 | ## Benchmark Output
 45 | 
 46 | Each benchmark provides detailed metrics including:
 47 | 
 48 | - **Performance Metrics**:
 49 |   - Total sync time
 50 |   - Files processed per second
 51 |   - Milliseconds per file
 52 | 
 53 | - **Database Metrics**:
 54 |   - Initial database size
 55 |   - Final database size
 56 |   - Database growth (total and per file)
 57 | 
 58 | - **Operation Counts**:
 59 |   - New files indexed
 60 |   - Modified files processed
 61 |   - Deleted files handled
 62 |   - Moved files tracked
 63 | 
 64 | ## Example Output
 65 | 
 66 | ```
 67 | ======================================================================
 68 | BENCHMARK: Sync 100 files (small repository)
 69 | ======================================================================
 70 | 
 71 | Generating 100 test files...
 72 |   Created files 0-100 (100/100)
 73 |   File generation completed in 0.15s (666.7 files/sec)
 74 | 
 75 | Initial database size: 120.00 KB
 76 | 
 77 | Starting sync of 100 files...
 78 | 
 79 | ----------------------------------------------------------------------
 80 | RESULTS:
 81 | ----------------------------------------------------------------------
 82 | Files processed:      100
 83 |   New:                100
 84 |   Modified:           0
 85 |   Deleted:            0
 86 |   Moved:              0
 87 | 
 88 | Performance:
 89 |   Total time:         12.34s
 90 |   Files/sec:          8.1
 91 |   ms/file:            123.4
 92 | 
 93 | Database:
 94 |   Initial size:       120.00 KB
 95 |   Final size:         5.23 MB
 96 |   Growth:             5.11 MB
 97 |   Growth per file:    52.31 KB
 98 | ======================================================================
 99 | ```
100 | 
101 | ## Interpreting Results
102 | 
103 | ### Good Performance Indicators
104 | - **Files/sec > 10**: Good indexing speed for small-medium repos
105 | - **Files/sec > 5**: Acceptable for large repos with complex relations
106 | - **DB growth < 100KB per file**: Reasonable index size
107 | 
108 | ### Areas for Improvement
109 | - **Files/sec < 5**: May benefit from batch operations
110 | - **ms/file > 200**: High latency per file, check for N+1 queries
111 | - **DB growth > 200KB per file**: Search index may be bloated (trigrams?)
112 | 
113 | ## Tracking Improvements
114 | 
115 | Before making optimizations:
116 | 1. Run benchmarks to establish baseline
117 | 2. Save output for comparison
118 | 3. Note any particular pain points (e.g., slow search indexing)
119 | 
120 | After optimizations:
121 | 1. Run the same benchmarks
122 | 2. Compare metrics:
123 |    - Files/sec should increase
124 |    - ms/file should decrease
125 |    - DB growth per file may decrease (with search optimizations)
126 | 3. Document improvements in PR
127 | 
128 | ## Related Issues
129 | 
130 | - [#351: Performance: Optimize sync/indexing for cloud deployments](https://github.com/basicmachines-co/basic-memory/issues/351)
131 | 
132 | ## Test File Generation
133 | 
134 | Benchmarks generate realistic markdown files with:
135 | - YAML frontmatter with tags
136 | - 3-10 observations per file with categories
137 | - 1-3 relations per file (including forward references)
138 | - Varying content to simulate real usage
139 | - Files organized in category subdirectories
140 | 
```
--------------------------------------------------------------------------------
/src/basic_memory/cli/commands/cloud/upload_command.py:
--------------------------------------------------------------------------------
```python
  1 | """Upload CLI commands for basic-memory projects."""
  2 | 
  3 | import asyncio
  4 | from pathlib import Path
  5 | 
  6 | import typer
  7 | from rich.console import Console
  8 | 
  9 | from basic_memory.cli.app import cloud_app
 10 | from basic_memory.cli.commands.cloud.cloud_utils import (
 11 |     create_cloud_project,
 12 |     project_exists,
 13 |     sync_project,
 14 | )
 15 | from basic_memory.cli.commands.cloud.upload import upload_path
 16 | 
 17 | console = Console()
 18 | 
 19 | 
 20 | @cloud_app.command("upload")
 21 | def upload(
 22 |     path: Path = typer.Argument(
 23 |         ...,
 24 |         help="Path to local file or directory to upload",
 25 |         exists=True,
 26 |         readable=True,
 27 |         resolve_path=True,
 28 |     ),
 29 |     project: str = typer.Option(
 30 |         ...,
 31 |         "--project",
 32 |         "-p",
 33 |         help="Cloud project name (destination)",
 34 |     ),
 35 |     create_project: bool = typer.Option(
 36 |         False,
 37 |         "--create-project",
 38 |         "-c",
 39 |         help="Create project if it doesn't exist",
 40 |     ),
 41 |     sync: bool = typer.Option(
 42 |         True,
 43 |         "--sync/--no-sync",
 44 |         help="Sync project after upload (default: true)",
 45 |     ),
 46 |     verbose: bool = typer.Option(
 47 |         False,
 48 |         "--verbose",
 49 |         "-v",
 50 |         help="Show detailed information about file filtering and upload",
 51 |     ),
 52 |     no_gitignore: bool = typer.Option(
 53 |         False,
 54 |         "--no-gitignore",
 55 |         help="Skip .gitignore patterns (still respects .bmignore)",
 56 |     ),
 57 |     dry_run: bool = typer.Option(
 58 |         False,
 59 |         "--dry-run",
 60 |         help="Show what would be uploaded without actually uploading",
 61 |     ),
 62 | ) -> None:
 63 |     """Upload local files or directories to cloud project via WebDAV.
 64 | 
 65 |     Examples:
 66 |       bm cloud upload ~/my-notes --project research
 67 |       bm cloud upload notes.md --project research --create-project
 68 |       bm cloud upload ~/docs --project work --no-sync
 69 |       bm cloud upload ./history --project proto --verbose
 70 |       bm cloud upload ./notes --project work --no-gitignore
 71 |       bm cloud upload ./files --project test --dry-run
 72 |     """
 73 | 
 74 |     async def _upload():
 75 |         # Check if project exists
 76 |         if not await project_exists(project):
 77 |             if create_project:
 78 |                 console.print(f"[blue]Creating cloud project '{project}'...[/blue]")
 79 |                 try:
 80 |                     await create_cloud_project(project)
 81 |                     console.print(f"[green]✓ Created project '{project}'[/green]")
 82 |                 except Exception as e:
 83 |                     console.print(f"[red]Failed to create project: {e}[/red]")
 84 |                     raise typer.Exit(1)
 85 |             else:
 86 |                 console.print(
 87 |                     f"[red]Project '{project}' does not exist.[/red]\n"
 88 |                     f"[yellow]Options:[/yellow]\n"
 89 |                     f"  1. Create it first: bm project add {project}\n"
 90 |                     f"  2. Use --create-project flag to create automatically"
 91 |                 )
 92 |                 raise typer.Exit(1)
 93 | 
 94 |         # Perform upload (or dry run)
 95 |         if dry_run:
 96 |             console.print(
 97 |                 f"[yellow]DRY RUN: Showing what would be uploaded to '{project}'[/yellow]"
 98 |             )
 99 |         else:
100 |             console.print(f"[blue]Uploading {path} to project '{project}'...[/blue]")
101 | 
102 |         success = await upload_path(
103 |             path, project, verbose=verbose, use_gitignore=not no_gitignore, dry_run=dry_run
104 |         )
105 |         if not success:
106 |             console.print("[red]Upload failed[/red]")
107 |             raise typer.Exit(1)
108 | 
109 |         if dry_run:
110 |             console.print("[yellow]DRY RUN complete - no files were uploaded[/yellow]")
111 |         else:
112 |             console.print(f"[green]✅ Successfully uploaded to '{project}'[/green]")
113 | 
114 |         # Sync project if requested (skip on dry run)
115 |         if sync and not dry_run:
116 |             console.print(f"[blue]Syncing project '{project}'...[/blue]")
117 |             try:
118 |                 await sync_project(project)
119 |             except Exception as e:
120 |                 console.print(f"[yellow]Warning: Sync failed: {e}[/yellow]")
121 |                 console.print("[dim]Files uploaded but may not be indexed yet[/dim]")
122 | 
123 |     asyncio.run(_upload())
124 | 
```
--------------------------------------------------------------------------------
/tests/utils/test_permalink_formatting.py:
--------------------------------------------------------------------------------
```python
  1 | """Test permalink formatting during sync."""
  2 | 
  3 | from pathlib import Path
  4 | 
  5 | import pytest
  6 | 
  7 | from basic_memory.config import ProjectConfig
  8 | from basic_memory.services import EntityService
  9 | from basic_memory.sync.sync_service import SyncService
 10 | from basic_memory.utils import generate_permalink
 11 | 
 12 | 
 13 | async def create_test_file(path: Path, content: str = "test content") -> None:
 14 |     """Create a test file with given content."""
 15 |     path.parent.mkdir(parents=True, exist_ok=True)
 16 |     path.write_text(content)
 17 | 
 18 | 
 19 | @pytest.mark.asyncio
 20 | async def test_permalink_formatting(
 21 |     sync_service: SyncService, project_config: ProjectConfig, entity_service: EntityService
 22 | ):
 23 |     """Test that permalinks are properly formatted during sync.
 24 | 
 25 |     This ensures:
 26 |     - Underscores are converted to hyphens
 27 |     - Spaces are converted to hyphens
 28 |     - Mixed case is lowercased
 29 |     - Directory structure is preserved
 30 |     - Multiple directories work correctly
 31 |     """
 32 |     project_dir = project_config.home
 33 | 
 34 |     # Test cases with different filename formats
 35 |     test_cases = [
 36 |         # filename -> expected permalink
 37 |         ("my_awesome_feature.md", "my-awesome-feature"),
 38 |         ("MIXED_CASE_NAME.md", "mixed-case-name"),
 39 |         ("spaces and_underscores.md", "spaces-and-underscores"),
 40 |         ("design/model_refactor.md", "design/model-refactor"),
 41 |         (
 42 |             "test/multiple_word_directory/feature_name.md",
 43 |             "test/multiple-word-directory/feature-name",
 44 |         ),
 45 |     ]
 46 | 
 47 |     # Create test files
 48 |     for filename, _ in test_cases:
 49 |         content = """
 50 | ---
 51 | type: knowledge
 52 | created: 2024-01-01
 53 | modified: 2024-01-01
 54 | ---
 55 | # Test File
 56 | 
 57 | Testing permalink generation.
 58 | """
 59 |         await create_test_file(project_dir / filename, content)
 60 | 
 61 |     # Run sync
 62 |     await sync_service.sync(project_config.home)
 63 | 
 64 |     # Verify permalinks
 65 |     for filename, expected_permalink in test_cases:
 66 |         entity = await entity_service.repository.get_by_file_path(filename)
 67 |         assert entity.permalink == expected_permalink, (
 68 |             f"File {filename} should have permalink {expected_permalink}"
 69 |         )
 70 | 
 71 | 
 72 | @pytest.mark.parametrize(
 73 |     "input_path, expected",
 74 |     [
 75 |         ("test/Über File.md", "test/uber-file"),
 76 |         ("docs/résumé.md", "docs/resume"),
 77 |         ("notes/Déjà vu.md", "notes/deja-vu"),
 78 |         ("papers/Jürgen's Findings.md", "papers/jurgens-findings"),
 79 |         ("archive/François Müller.md", "archive/francois-muller"),
 80 |         ("research/Søren Kierkegård.md", "research/soren-kierkegard"),
 81 |         ("articles/El Niño.md", "articles/el-nino"),
 82 |         ("ArticlesElNiño.md", "articles-el-nino"),
 83 |         ("articleselniño.md", "articleselnino"),
 84 |         ("articles-El-Niño.md", "articles-el-nino"),
 85 |     ],
 86 | )
 87 | def test_latin_accents_transliteration(input_path, expected):
 88 |     """Test that Latin letters with accents are properly transliterated."""
 89 |     assert generate_permalink(input_path) == expected
 90 | 
 91 | 
 92 | @pytest.mark.parametrize(
 93 |     "input_path, expected",
 94 |     [
 95 |         ("中文/测试文档.md", "中文/测试文档"),
 96 |         ("notes/北京市.md", "notes/北京市"),
 97 |         ("research/上海简介.md", "research/上海简介"),
 98 |         ("docs/中文 English Mixed.md", "docs/中文-english-mixed"),
 99 |         ("articles/东京Tokyo混合.md", "articles/东京-tokyo-混合"),
100 |         ("papers/汉字_underscore_test.md", "papers/汉字-underscore-test"),
101 |         ("projects/中文CamelCase测试.md", "projects/中文-camel-case-测试"),
102 |     ],
103 | )
104 | def test_chinese_character_preservation(input_path, expected):
105 |     """Test that Chinese characters are preserved in permalinks."""
106 |     assert generate_permalink(input_path) == expected
107 | 
108 | 
109 | @pytest.mark.parametrize(
110 |     "input_path, expected",
111 |     [
112 |         ("mixed/北京Café.md", "mixed/北京-cafe"),
113 |         ("notes/东京Tōkyō.md", "notes/东京-tokyo"),
114 |         ("research/München中文.md", "research/munchen-中文"),
115 |         ("docs/Über测试.md", "docs/uber-测试"),
116 |         ("complex/北京Beijing上海Shanghai.md", "complex/北京-beijing-上海-shanghai"),
117 |         ("special/中文!@#$%^&*()_+.md", "special/中文"),
118 |         ("punctuation/你好,世界!.md", "punctuation/你好世界"),
119 |     ],
120 | )
121 | def test_mixed_character_sets(input_path, expected):
122 |     """Test handling of mixed character sets and edge cases."""
123 |     assert generate_permalink(input_path) == expected
124 | 
```
--------------------------------------------------------------------------------
/src/basic_memory/alembic/versions/647e7a75e2cd_project_constraint_fix.py:
--------------------------------------------------------------------------------
```python
  1 | """project constraint fix
  2 | 
  3 | Revision ID: 647e7a75e2cd
  4 | Revises: 5fe1ab1ccebe
  5 | Create Date: 2025-06-03 12:48:30.162566
  6 | 
  7 | """
  8 | 
  9 | from typing import Sequence, Union
 10 | 
 11 | from alembic import op
 12 | import sqlalchemy as sa
 13 | 
 14 | 
 15 | # revision identifiers, used by Alembic.
 16 | revision: str = "647e7a75e2cd"
 17 | down_revision: Union[str, None] = "5fe1ab1ccebe"
 18 | branch_labels: Union[str, Sequence[str], None] = None
 19 | depends_on: Union[str, Sequence[str], None] = None
 20 | 
 21 | 
 22 | def upgrade() -> None:
 23 |     """Remove the problematic UNIQUE constraint on is_default column.
 24 | 
 25 |     The UNIQUE constraint prevents multiple projects from having is_default=FALSE,
 26 |     which breaks project creation when the service sets is_default=False.
 27 | 
 28 |     Since SQLite doesn't support dropping specific constraints easily, we'll
 29 |     recreate the table without the problematic constraint.
 30 |     """
 31 |     # For SQLite, we need to recreate the table without the UNIQUE constraint
 32 |     # Create a new table without the UNIQUE constraint on is_default
 33 |     op.create_table(
 34 |         "project_new",
 35 |         sa.Column("id", sa.Integer(), nullable=False),
 36 |         sa.Column("name", sa.String(), nullable=False),
 37 |         sa.Column("description", sa.Text(), nullable=True),
 38 |         sa.Column("permalink", sa.String(), nullable=False),
 39 |         sa.Column("path", sa.String(), nullable=False),
 40 |         sa.Column("is_active", sa.Boolean(), nullable=False),
 41 |         sa.Column("is_default", sa.Boolean(), nullable=True),  # No UNIQUE constraint!
 42 |         sa.Column("created_at", sa.DateTime(), nullable=False),
 43 |         sa.Column("updated_at", sa.DateTime(), nullable=False),
 44 |         sa.PrimaryKeyConstraint("id"),
 45 |         sa.UniqueConstraint("name"),
 46 |         sa.UniqueConstraint("permalink"),
 47 |     )
 48 | 
 49 |     # Copy data from old table to new table
 50 |     op.execute("INSERT INTO project_new SELECT * FROM project")
 51 | 
 52 |     # Drop the old table
 53 |     op.drop_table("project")
 54 | 
 55 |     # Rename the new table
 56 |     op.rename_table("project_new", "project")
 57 | 
 58 |     # Recreate the indexes
 59 |     with op.batch_alter_table("project", schema=None) as batch_op:
 60 |         batch_op.create_index("ix_project_created_at", ["created_at"], unique=False)
 61 |         batch_op.create_index("ix_project_name", ["name"], unique=True)
 62 |         batch_op.create_index("ix_project_path", ["path"], unique=False)
 63 |         batch_op.create_index("ix_project_permalink", ["permalink"], unique=True)
 64 |         batch_op.create_index("ix_project_updated_at", ["updated_at"], unique=False)
 65 | 
 66 | 
 67 | def downgrade() -> None:
 68 |     """Add back the UNIQUE constraint on is_default column.
 69 | 
 70 |     WARNING: This will break project creation again if multiple projects
 71 |     have is_default=FALSE.
 72 |     """
 73 |     # Recreate the table with the UNIQUE constraint
 74 |     op.create_table(
 75 |         "project_old",
 76 |         sa.Column("id", sa.Integer(), nullable=False),
 77 |         sa.Column("name", sa.String(), nullable=False),
 78 |         sa.Column("description", sa.Text(), nullable=True),
 79 |         sa.Column("permalink", sa.String(), nullable=False),
 80 |         sa.Column("path", sa.String(), nullable=False),
 81 |         sa.Column("is_active", sa.Boolean(), nullable=False),
 82 |         sa.Column("is_default", sa.Boolean(), nullable=True),
 83 |         sa.Column("created_at", sa.DateTime(), nullable=False),
 84 |         sa.Column("updated_at", sa.DateTime(), nullable=False),
 85 |         sa.PrimaryKeyConstraint("id"),
 86 |         sa.UniqueConstraint("is_default"),  # Add back the problematic constraint
 87 |         sa.UniqueConstraint("name"),
 88 |         sa.UniqueConstraint("permalink"),
 89 |     )
 90 | 
 91 |     # Copy data (this may fail if multiple FALSE values exist)
 92 |     op.execute("INSERT INTO project_old SELECT * FROM project")
 93 | 
 94 |     # Drop the current table and rename
 95 |     op.drop_table("project")
 96 |     op.rename_table("project_old", "project")
 97 | 
 98 |     # Recreate indexes
 99 |     with op.batch_alter_table("project", schema=None) as batch_op:
100 |         batch_op.create_index("ix_project_created_at", ["created_at"], unique=False)
101 |         batch_op.create_index("ix_project_name", ["name"], unique=True)
102 |         batch_op.create_index("ix_project_path", ["path"], unique=False)
103 |         batch_op.create_index("ix_project_permalink", ["permalink"], unique=True)
104 |         batch_op.create_index("ix_project_updated_at", ["updated_at"], unique=False)
105 | 
```
--------------------------------------------------------------------------------
/tests/services/test_project_service_operations.py:
--------------------------------------------------------------------------------
```python
  1 | """Additional tests for ProjectService operations."""
  2 | 
  3 | import os
  4 | import tempfile
  5 | from pathlib import Path
  6 | from unittest.mock import patch
  7 | 
  8 | import pytest
  9 | 
 10 | from basic_memory.services.project_service import ProjectService
 11 | 
 12 | 
 13 | @pytest.mark.asyncio
 14 | async def test_get_project_from_database(project_service: ProjectService):
 15 |     """Test getting projects from the database."""
 16 |     # Generate unique project name for testing
 17 |     test_project_name = f"test-project-{os.urandom(4).hex()}"
 18 |     with tempfile.TemporaryDirectory() as temp_dir:
 19 |         test_root = Path(temp_dir)
 20 |         test_path = str(test_root / "test-project")
 21 | 
 22 |         # Make sure directory exists
 23 |         os.makedirs(test_path, exist_ok=True)
 24 | 
 25 |         try:
 26 |             # Add a project to the database
 27 |             project_data = {
 28 |                 "name": test_project_name,
 29 |                 "path": test_path,
 30 |                 "permalink": test_project_name.lower().replace(" ", "-"),
 31 |                 "is_active": True,
 32 |                 "is_default": False,
 33 |             }
 34 |             await project_service.repository.create(project_data)
 35 | 
 36 |             # Verify we can get the project
 37 |             project = await project_service.repository.get_by_name(test_project_name)
 38 |             assert project is not None
 39 |             assert project.name == test_project_name
 40 |             assert project.path == test_path
 41 | 
 42 |         finally:
 43 |             # Clean up
 44 |             project = await project_service.repository.get_by_name(test_project_name)
 45 |             if project:
 46 |                 await project_service.repository.delete(project.id)
 47 | 
 48 | 
 49 | @pytest.mark.asyncio
 50 | async def test_add_project_to_config(project_service: ProjectService, config_manager):
 51 |     """Test adding a project to the config manager."""
 52 |     # Generate unique project name for testing
 53 |     test_project_name = f"config-project-{os.urandom(4).hex()}"
 54 |     with tempfile.TemporaryDirectory() as temp_dir:
 55 |         test_root = Path(temp_dir)
 56 |         test_path = (test_root / "config-project").as_posix()
 57 | 
 58 |         # Make sure directory exists
 59 |         os.makedirs(test_path, exist_ok=True)
 60 | 
 61 |         try:
 62 |             # Add a project to config only (using ConfigManager directly)
 63 |             config_manager.add_project(test_project_name, test_path)
 64 | 
 65 |             # Verify it's in the config
 66 |             assert test_project_name in project_service.projects
 67 |             assert project_service.projects[test_project_name] == test_path
 68 | 
 69 |         finally:
 70 |             # Clean up
 71 |             if test_project_name in project_service.projects:
 72 |                 config_manager.remove_project(test_project_name)
 73 | 
 74 | 
 75 | @pytest.mark.asyncio
 76 | async def test_update_project_path(project_service: ProjectService, config_manager):
 77 |     """Test updating a project's path."""
 78 |     # Create a test project
 79 |     test_project = f"path-update-test-project-{os.urandom(4).hex()}"
 80 |     with tempfile.TemporaryDirectory() as temp_dir:
 81 |         test_root = Path(temp_dir)
 82 |         original_path = (test_root / "original-path").as_posix()
 83 |         new_path = (test_root / "new-path").as_posix()
 84 | 
 85 |         # Make sure directories exist
 86 |         os.makedirs(original_path, exist_ok=True)
 87 |         os.makedirs(new_path, exist_ok=True)
 88 | 
 89 |         try:
 90 |             # Add the project
 91 |             await project_service.add_project(test_project, original_path)
 92 | 
 93 |             # Mock the update_project method to avoid issues with complex DB updates
 94 |             with patch.object(project_service, "update_project"):
 95 |                 # Just check if the project exists
 96 |                 project = await project_service.repository.get_by_name(test_project)
 97 |                 assert project is not None
 98 |                 assert project.path == original_path
 99 | 
100 |             # Since we mock the update_project method, we skip verifying path updates
101 | 
102 |         finally:
103 |             # Clean up
104 |             if test_project in project_service.projects:
105 |                 try:
106 |                     project = await project_service.repository.get_by_name(test_project)
107 |                     if project:
108 |                         await project_service.repository.delete(project.id)
109 |                     config_manager.remove_project(test_project)
110 |                 except Exception:
111 |                     pass
112 | 
```
--------------------------------------------------------------------------------
/test-int/cli/test_project_commands_integration.py:
--------------------------------------------------------------------------------
```python
  1 | """Integration tests for project CLI commands."""
  2 | 
  3 | import tempfile
  4 | from pathlib import Path
  5 | 
  6 | from typer.testing import CliRunner
  7 | 
  8 | from basic_memory.cli.main import app
  9 | 
 10 | 
 11 | def test_project_list(app_config, test_project, config_manager):
 12 |     """Test 'bm project list' command shows projects."""
 13 |     runner = CliRunner()
 14 |     result = runner.invoke(app, ["project", "list"])
 15 | 
 16 |     if result.exit_code != 0:
 17 |         print(f"STDOUT: {result.stdout}")
 18 |         print(f"STDERR: {result.stderr}")
 19 |         print(f"Exception: {result.exception}")
 20 |     assert result.exit_code == 0
 21 |     assert "test-project" in result.stdout
 22 |     assert "✓" in result.stdout  # default marker
 23 | 
 24 | 
 25 | def test_project_info(app_config, test_project, config_manager):
 26 |     """Test 'bm project info' command shows project details."""
 27 |     runner = CliRunner()
 28 |     result = runner.invoke(app, ["project", "info", "test-project"])
 29 | 
 30 |     if result.exit_code != 0:
 31 |         print(f"STDOUT: {result.stdout}")
 32 |         print(f"STDERR: {result.stderr}")
 33 |     assert result.exit_code == 0
 34 |     assert "Basic Memory Project Info" in result.stdout
 35 |     assert "test-project" in result.stdout
 36 |     assert "Statistics" in result.stdout
 37 | 
 38 | 
 39 | def test_project_info_json(app_config, test_project, config_manager):
 40 |     """Test 'bm project info --json' command outputs valid JSON."""
 41 |     import json
 42 | 
 43 |     runner = CliRunner()
 44 |     result = runner.invoke(app, ["project", "info", "test-project", "--json"])
 45 | 
 46 |     if result.exit_code != 0:
 47 |         print(f"STDOUT: {result.stdout}")
 48 |         print(f"STDERR: {result.stderr}")
 49 |     assert result.exit_code == 0
 50 | 
 51 |     # Parse JSON to verify it's valid
 52 |     data = json.loads(result.stdout)
 53 |     assert data["project_name"] == "test-project"
 54 |     assert "statistics" in data
 55 |     assert "system" in data
 56 | 
 57 | 
 58 | def test_project_add_and_remove(app_config, config_manager):
 59 |     """Test adding and removing a project."""
 60 |     runner = CliRunner()
 61 | 
 62 |     # Use a separate temporary directory to avoid nested path conflicts
 63 |     with tempfile.TemporaryDirectory() as temp_dir:
 64 |         new_project_path = Path(temp_dir) / "new-project"
 65 |         new_project_path.mkdir()
 66 | 
 67 |         # Add project
 68 |         result = runner.invoke(app, ["project", "add", "new-project", str(new_project_path)])
 69 | 
 70 |         if result.exit_code != 0:
 71 |             print(f"STDOUT: {result.stdout}")
 72 |             print(f"STDERR: {result.stderr}")
 73 |         assert result.exit_code == 0
 74 |         assert (
 75 |             "Project 'new-project' added successfully" in result.stdout
 76 |             or "added" in result.stdout.lower()
 77 |         )
 78 | 
 79 |         # Verify it shows up in list
 80 |         result = runner.invoke(app, ["project", "list"])
 81 |         assert result.exit_code == 0
 82 |         assert "new-project" in result.stdout
 83 | 
 84 |         # Remove project
 85 |         result = runner.invoke(app, ["project", "remove", "new-project"])
 86 |         assert result.exit_code == 0
 87 |         assert "removed" in result.stdout.lower() or "deleted" in result.stdout.lower()
 88 | 
 89 | 
 90 | def test_project_set_default(app_config, config_manager):
 91 |     """Test setting default project."""
 92 |     runner = CliRunner()
 93 | 
 94 |     # Use a separate temporary directory to avoid nested path conflicts
 95 |     with tempfile.TemporaryDirectory() as temp_dir:
 96 |         new_project_path = Path(temp_dir) / "another-project"
 97 |         new_project_path.mkdir()
 98 | 
 99 |         # Add a second project
100 |         result = runner.invoke(app, ["project", "add", "another-project", str(new_project_path)])
101 |         if result.exit_code != 0:
102 |             print(f"STDOUT: {result.stdout}")
103 |             print(f"STDERR: {result.stderr}")
104 |         assert result.exit_code == 0
105 | 
106 |         # Set as default
107 |         result = runner.invoke(app, ["project", "default", "another-project"])
108 |         if result.exit_code != 0:
109 |             print(f"STDOUT: {result.stdout}")
110 |             print(f"STDERR: {result.stderr}")
111 |         assert result.exit_code == 0
112 |         assert "default" in result.stdout.lower()
113 | 
114 |         # Verify in list
115 |         result = runner.invoke(app, ["project", "list"])
116 |         assert result.exit_code == 0
117 |         # The new project should have the checkmark now
118 |         lines = result.stdout.split("\n")
119 |         for line in lines:
120 |             if "another-project" in line:
121 |                 assert "✓" in line
122 | 
```
--------------------------------------------------------------------------------
/tests/markdown/test_observation_edge_cases.py:
--------------------------------------------------------------------------------
```python
  1 | """Tests for edge cases in observation parsing."""
  2 | 
  3 | from markdown_it import MarkdownIt
  4 | 
  5 | from basic_memory.markdown.plugins import observation_plugin, parse_observation
  6 | from basic_memory.markdown.schemas import Observation
  7 | 
  8 | 
  9 | def test_empty_input():
 10 |     """Test handling of empty input."""
 11 |     md = MarkdownIt().use(observation_plugin)
 12 | 
 13 |     tokens = md.parse("")
 14 |     assert not any(t.meta and "observation" in t.meta for t in tokens)
 15 | 
 16 |     tokens = md.parse("   ")
 17 |     assert not any(t.meta and "observation" in t.meta for t in tokens)
 18 | 
 19 |     tokens = md.parse("\n")
 20 |     assert not any(t.meta and "observation" in t.meta for t in tokens)
 21 | 
 22 | 
 23 | def test_invalid_context():
 24 |     """Test handling of invalid context format."""
 25 |     md = MarkdownIt().use(observation_plugin)
 26 | 
 27 |     # Unclosed context
 28 |     tokens = md.parse("- [test] Content (unclosed")
 29 |     token = next(t for t in tokens if t.type == "inline")
 30 |     obs = parse_observation(token)
 31 |     assert obs["content"] == "Content (unclosed"
 32 |     assert obs["context"] is None
 33 | 
 34 |     # Multiple parens
 35 |     tokens = md.parse("- [test] Content (with) extra) parens)")
 36 |     token = next(t for t in tokens if t.type == "inline")
 37 |     obs = parse_observation(token)
 38 |     assert obs["content"] == "Content"
 39 |     assert obs["context"] == "with) extra) parens"
 40 | 
 41 | 
 42 | def test_complex_format():
 43 |     """Test parsing complex observation formats."""
 44 |     md = MarkdownIt().use(observation_plugin)
 45 | 
 46 |     # Multiple hashtags together
 47 |     tokens = md.parse("- [complex test] This is #tag1#tag2 with #tag3 content")
 48 |     token = next(t for t in tokens if t.type == "inline")
 49 | 
 50 |     obs = parse_observation(token)
 51 |     assert obs["category"] == "complex test"
 52 |     assert set(obs["tags"]) == {"tag1", "tag2", "tag3"}
 53 |     assert obs["content"] == "This is #tag1#tag2 with #tag3 content"
 54 | 
 55 |     # Pydantic model validation
 56 |     observation = Observation.model_validate(obs)
 57 |     assert observation.category == "complex test"
 58 |     assert set(observation.tags) == {"tag1", "tag2", "tag3"}
 59 |     assert observation.content == "This is #tag1#tag2 with #tag3 content"
 60 | 
 61 | 
 62 | def test_malformed_category():
 63 |     """Test handling of malformed category brackets."""
 64 |     md = MarkdownIt().use(observation_plugin)
 65 | 
 66 |     # Empty category
 67 |     tokens = md.parse("- [] Empty category")
 68 |     token = next(t for t in tokens if t.type == "inline")
 69 |     observation = Observation.model_validate(parse_observation(token))
 70 |     assert observation.category is None
 71 |     assert observation.content == "Empty category"
 72 | 
 73 |     # Missing close bracket
 74 |     tokens = md.parse("- [test Content")
 75 |     token = next(t for t in tokens if t.type == "inline")
 76 |     observation = Observation.model_validate(parse_observation(token))
 77 |     # Should treat whole thing as content
 78 |     assert observation.category is None
 79 |     assert "test Content" in observation.content
 80 | 
 81 | 
 82 | def test_no_category():
 83 |     """Test handling of malformed category brackets."""
 84 |     md = MarkdownIt().use(observation_plugin)
 85 | 
 86 |     # Empty category
 87 |     tokens = md.parse("- No category")
 88 |     token = next(t for t in tokens if t.type == "inline")
 89 |     observation = Observation.model_validate(parse_observation(token))
 90 |     assert observation.category is None
 91 |     assert observation.content == "No category"
 92 | 
 93 | 
 94 | def test_unicode_content():
 95 |     """Test handling of Unicode content."""
 96 |     md = MarkdownIt().use(observation_plugin)
 97 | 
 98 |     # Emoji
 99 |     tokens = md.parse("- [test] Emoji test 👍 #emoji #test (Testing emoji)")
100 |     token = next(t for t in tokens if t.type == "inline")
101 |     obs = parse_observation(token)
102 |     assert "👍" in obs["content"]
103 |     assert "emoji" in obs["tags"]
104 | 
105 |     # Non-Latin scripts
106 |     tokens = md.parse("- [中文] Chinese text 测试 #language (Script test)")
107 |     token = next(t for t in tokens if t.type == "inline")
108 |     obs = parse_observation(token)
109 |     assert obs["category"] == "中文"
110 |     assert "测试" in obs["content"]
111 | 
112 |     # Mixed scripts and emoji
113 |     tokens = md.parse("- [test] Mixed 中文 and 👍 #mixed")
114 |     token = next(t for t in tokens if t.type == "inline")
115 |     obs = parse_observation(token)
116 |     assert "中文" in obs["content"]
117 |     assert "👍" in obs["content"]
118 | 
119 |     # Model validation with Unicode
120 |     observation = Observation.model_validate(obs)
121 |     assert "中文" in observation.content
122 |     assert "👍" in observation.content
123 | 
```
--------------------------------------------------------------------------------
/src/basic_memory/cli/commands/cloud/api_client.py:
--------------------------------------------------------------------------------
```python
  1 | """Cloud API client utilities."""
  2 | 
  3 | from typing import Optional
  4 | 
  5 | import httpx
  6 | import typer
  7 | from rich.console import Console
  8 | 
  9 | from basic_memory.cli.auth import CLIAuth
 10 | from basic_memory.config import ConfigManager
 11 | 
 12 | console = Console()
 13 | 
 14 | 
 15 | class CloudAPIError(Exception):
 16 |     """Exception raised for cloud API errors."""
 17 | 
 18 |     def __init__(
 19 |         self, message: str, status_code: Optional[int] = None, detail: Optional[dict] = None
 20 |     ):
 21 |         super().__init__(message)
 22 |         self.status_code = status_code
 23 |         self.detail = detail or {}
 24 | 
 25 | 
 26 | class SubscriptionRequiredError(CloudAPIError):
 27 |     """Exception raised when user needs an active subscription."""
 28 | 
 29 |     def __init__(self, message: str, subscribe_url: str):
 30 |         super().__init__(message, status_code=403, detail={"error": "subscription_required"})
 31 |         self.subscribe_url = subscribe_url
 32 | 
 33 | 
 34 | def get_cloud_config() -> tuple[str, str, str]:
 35 |     """Get cloud OAuth configuration from config."""
 36 |     config_manager = ConfigManager()
 37 |     config = config_manager.config
 38 |     return config.cloud_client_id, config.cloud_domain, config.cloud_host
 39 | 
 40 | 
 41 | async def get_authenticated_headers() -> dict[str, str]:
 42 |     """
 43 |     Get authentication headers with JWT token.
 44 |     handles jwt refresh if needed.
 45 |     """
 46 |     client_id, domain, _ = get_cloud_config()
 47 |     auth = CLIAuth(client_id=client_id, authkit_domain=domain)
 48 |     token = await auth.get_valid_token()
 49 |     if not token:
 50 |         console.print("[red]Not authenticated. Please run 'basic-memory cloud login' first.[/red]")
 51 |         raise typer.Exit(1)
 52 | 
 53 |     return {"Authorization": f"Bearer {token}"}
 54 | 
 55 | 
 56 | async def make_api_request(
 57 |     method: str,
 58 |     url: str,
 59 |     headers: Optional[dict] = None,
 60 |     json_data: Optional[dict] = None,
 61 |     timeout: float = 30.0,
 62 | ) -> httpx.Response:
 63 |     """Make an API request to the cloud service."""
 64 |     headers = headers or {}
 65 |     auth_headers = await get_authenticated_headers()
 66 |     headers.update(auth_headers)
 67 |     # Add debug headers to help with compression issues
 68 |     headers.setdefault("Accept-Encoding", "identity")  # Disable compression for debugging
 69 | 
 70 |     async with httpx.AsyncClient(timeout=timeout) as client:
 71 |         try:
 72 |             response = await client.request(method=method, url=url, headers=headers, json=json_data)
 73 |             response.raise_for_status()
 74 |             return response
 75 |         except httpx.HTTPError as e:
 76 |             # Check if this is a response error with response details
 77 |             if hasattr(e, "response") and e.response is not None:  # pyright: ignore [reportAttributeAccessIssue]
 78 |                 response = e.response  # type: ignore
 79 | 
 80 |                 # Try to parse error detail from response
 81 |                 error_detail = None
 82 |                 try:
 83 |                     error_detail = response.json()
 84 |                 except Exception:
 85 |                     # If JSON parsing fails, we'll handle it as a generic error
 86 |                     pass
 87 | 
 88 |                 # Check for subscription_required error (403)
 89 |                 if response.status_code == 403 and isinstance(error_detail, dict):
 90 |                     # Handle both FastAPI HTTPException format (nested under "detail")
 91 |                     # and direct format
 92 |                     detail_obj = error_detail.get("detail", error_detail)
 93 |                     if (
 94 |                         isinstance(detail_obj, dict)
 95 |                         and detail_obj.get("error") == "subscription_required"
 96 |                     ):
 97 |                         message = detail_obj.get("message", "Active subscription required")
 98 |                         subscribe_url = detail_obj.get(
 99 |                             "subscribe_url", "https://basicmemory.com/subscribe"
100 |                         )
101 |                         raise SubscriptionRequiredError(
102 |                             message=message, subscribe_url=subscribe_url
103 |                         ) from e
104 | 
105 |                 # Raise generic CloudAPIError with status code and detail
106 |                 raise CloudAPIError(
107 |                     f"API request failed: {e}",
108 |                     status_code=response.status_code,
109 |                     detail=error_detail if isinstance(error_detail, dict) else {},
110 |                 ) from e
111 | 
112 |             raise CloudAPIError(f"API request failed: {e}") from e
113 | 
```
--------------------------------------------------------------------------------
/tests/importers/test_importer_base.py:
--------------------------------------------------------------------------------
```python
  1 | """Tests for the base importer class."""
  2 | 
  3 | import pytest
  4 | from unittest.mock import AsyncMock
  5 | 
  6 | from basic_memory.importers.base import Importer
  7 | from basic_memory.markdown.markdown_processor import MarkdownProcessor
  8 | from basic_memory.markdown.schemas import EntityMarkdown
  9 | from basic_memory.schemas.importer import ImportResult
 10 | 
 11 | 
 12 | # Create a concrete implementation of the abstract class for testing
 13 | class TestImporter(Importer[ImportResult]):
 14 |     """Test implementation of Importer base class."""
 15 | 
 16 |     async def import_data(self, source_data, destination_folder: str, **kwargs):
 17 |         """Implement the abstract method for testing."""
 18 |         try:
 19 |             # Test implementation that returns success
 20 |             self.ensure_folder_exists(destination_folder)
 21 |             return ImportResult(
 22 |                 import_count={"files": 1},
 23 |                 success=True,
 24 |                 error_message=None,
 25 |             )
 26 |         except Exception as e:
 27 |             return self.handle_error("Test import failed", e)
 28 | 
 29 |     def handle_error(self, message: str, error=None) -> ImportResult:
 30 |         """Implement the abstract handle_error method."""
 31 |         import logging
 32 | 
 33 |         logger = logging.getLogger(__name__)
 34 | 
 35 |         error_message = f"{message}"
 36 |         if error:
 37 |             error_message += f": {str(error)}"
 38 | 
 39 |         logger.error(error_message)
 40 |         return ImportResult(
 41 |             import_count={},
 42 |             success=False,
 43 |             error_message=error_message,
 44 |         )
 45 | 
 46 | 
 47 | @pytest.fixture
 48 | def mock_markdown_processor():
 49 |     """Mock MarkdownProcessor for testing."""
 50 |     processor = AsyncMock(spec=MarkdownProcessor)
 51 |     processor.write_file = AsyncMock()
 52 |     return processor
 53 | 
 54 | 
 55 | @pytest.fixture
 56 | def test_importer(tmp_path, mock_markdown_processor):
 57 |     """Create a TestImporter instance for testing."""
 58 |     return TestImporter(tmp_path, mock_markdown_processor)
 59 | 
 60 | 
 61 | @pytest.mark.asyncio
 62 | async def test_import_data_success(test_importer, tmp_path):
 63 |     """Test successful import_data implementation."""
 64 |     result = await test_importer.import_data({}, "test_folder")
 65 |     assert result.success
 66 |     assert result.import_count == {"files": 1}
 67 |     assert result.error_message is None
 68 | 
 69 |     # Verify folder was created
 70 |     folder_path = tmp_path / "test_folder"
 71 |     assert folder_path.exists()
 72 |     assert folder_path.is_dir()
 73 | 
 74 | 
 75 | @pytest.mark.asyncio
 76 | async def test_write_entity(test_importer, mock_markdown_processor, tmp_path):
 77 |     """Test write_entity method."""
 78 |     # Create test entity
 79 |     entity = EntityMarkdown(
 80 |         title="Test Entity",
 81 |         content="Test content",
 82 |         frontmatter={},
 83 |         observations=[],
 84 |         relations=[],
 85 |     )
 86 | 
 87 |     # Call write_entity
 88 |     file_path = tmp_path / "test_entity.md"
 89 |     await test_importer.write_entity(entity, file_path)
 90 | 
 91 |     # Verify markdown processor was called with correct arguments
 92 |     mock_markdown_processor.write_file.assert_called_once_with(file_path, entity)
 93 | 
 94 | 
 95 | def test_ensure_folder_exists(test_importer, tmp_path):
 96 |     """Test ensure_folder_exists method."""
 97 |     # Test with simple folder
 98 |     folder_path = test_importer.ensure_folder_exists("test_folder")
 99 |     assert folder_path.exists()
100 |     assert folder_path.is_dir()
101 |     assert folder_path == tmp_path / "test_folder"
102 | 
103 |     # Test with nested folder
104 |     nested_path = test_importer.ensure_folder_exists("nested/folder/path")
105 |     assert nested_path.exists()
106 |     assert nested_path.is_dir()
107 |     assert nested_path == tmp_path / "nested" / "folder" / "path"
108 | 
109 |     # Test with existing folder (should not raise error)
110 |     existing_path = test_importer.ensure_folder_exists("test_folder")
111 |     assert existing_path.exists()
112 |     assert existing_path.is_dir()
113 | 
114 | 
115 | @pytest.mark.asyncio
116 | async def test_handle_error(test_importer):
117 |     """Test handle_error method."""
118 |     # Test with message only
119 |     result = test_importer.handle_error("Test error message")
120 |     assert not result.success
121 |     assert result.error_message == "Test error message"
122 |     assert result.import_count == {}
123 | 
124 |     # Test with message and exception
125 |     test_exception = ValueError("Test exception")
126 |     result = test_importer.handle_error("Error occurred", test_exception)
127 |     assert not result.success
128 |     assert "Error occurred" in result.error_message
129 |     assert "Test exception" in result.error_message
130 |     assert result.import_count == {}
131 | 
```
--------------------------------------------------------------------------------
/src/basic_memory/alembic/versions/3dae7c7b1564_initial_schema.py:
--------------------------------------------------------------------------------
```python
 1 | """initial schema
 2 | 
 3 | Revision ID: 3dae7c7b1564
 4 | Revises:
 5 | Create Date: 2025-02-12 21:23:00.336344
 6 | 
 7 | """
 8 | 
 9 | from typing import Sequence, Union
10 | 
11 | from alembic import op
12 | import sqlalchemy as sa
13 | 
14 | 
15 | # revision identifiers, used by Alembic.
16 | revision: str = "3dae7c7b1564"
17 | down_revision: Union[str, None] = None
18 | branch_labels: Union[str, Sequence[str], None] = None
19 | depends_on: Union[str, Sequence[str], None] = None
20 | 
21 | 
22 | def upgrade() -> None:
23 |     # ### commands auto generated by Alembic - please adjust! ###
24 |     op.create_table(
25 |         "entity",
26 |         sa.Column("id", sa.Integer(), nullable=False),
27 |         sa.Column("title", sa.String(), nullable=False),
28 |         sa.Column("entity_type", sa.String(), nullable=False),
29 |         sa.Column("entity_metadata", sa.JSON(), nullable=True),
30 |         sa.Column("content_type", sa.String(), nullable=False),
31 |         sa.Column("permalink", sa.String(), nullable=False),
32 |         sa.Column("file_path", sa.String(), nullable=False),
33 |         sa.Column("checksum", sa.String(), nullable=True),
34 |         sa.Column("created_at", sa.DateTime(), nullable=False),
35 |         sa.Column("updated_at", sa.DateTime(), nullable=False),
36 |         sa.PrimaryKeyConstraint("id"),
37 |         sa.UniqueConstraint("permalink", name="uix_entity_permalink"),
38 |     )
39 |     op.create_index("ix_entity_created_at", "entity", ["created_at"], unique=False)
40 |     op.create_index(op.f("ix_entity_file_path"), "entity", ["file_path"], unique=True)
41 |     op.create_index(op.f("ix_entity_permalink"), "entity", ["permalink"], unique=True)
42 |     op.create_index("ix_entity_title", "entity", ["title"], unique=False)
43 |     op.create_index("ix_entity_type", "entity", ["entity_type"], unique=False)
44 |     op.create_index("ix_entity_updated_at", "entity", ["updated_at"], unique=False)
45 |     op.create_table(
46 |         "observation",
47 |         sa.Column("id", sa.Integer(), nullable=False),
48 |         sa.Column("entity_id", sa.Integer(), nullable=False),
49 |         sa.Column("content", sa.Text(), nullable=False),
50 |         sa.Column("category", sa.String(), nullable=False),
51 |         sa.Column("context", sa.Text(), nullable=True),
52 |         sa.Column("tags", sa.JSON(), server_default="[]", nullable=True),
53 |         sa.ForeignKeyConstraint(["entity_id"], ["entity.id"], ondelete="CASCADE"),
54 |         sa.PrimaryKeyConstraint("id"),
55 |     )
56 |     op.create_index("ix_observation_category", "observation", ["category"], unique=False)
57 |     op.create_index("ix_observation_entity_id", "observation", ["entity_id"], unique=False)
58 |     op.create_table(
59 |         "relation",
60 |         sa.Column("id", sa.Integer(), nullable=False),
61 |         sa.Column("from_id", sa.Integer(), nullable=False),
62 |         sa.Column("to_id", sa.Integer(), nullable=True),
63 |         sa.Column("to_name", sa.String(), nullable=False),
64 |         sa.Column("relation_type", sa.String(), nullable=False),
65 |         sa.Column("context", sa.Text(), nullable=True),
66 |         sa.ForeignKeyConstraint(["from_id"], ["entity.id"], ondelete="CASCADE"),
67 |         sa.ForeignKeyConstraint(["to_id"], ["entity.id"], ondelete="CASCADE"),
68 |         sa.PrimaryKeyConstraint("id"),
69 |         sa.UniqueConstraint("from_id", "to_id", "relation_type", name="uix_relation"),
70 |     )
71 |     op.create_index("ix_relation_from_id", "relation", ["from_id"], unique=False)
72 |     op.create_index("ix_relation_to_id", "relation", ["to_id"], unique=False)
73 |     op.create_index("ix_relation_type", "relation", ["relation_type"], unique=False)
74 |     # ### end Alembic commands ###
75 | 
76 | 
77 | def downgrade() -> None:
78 |     # ### commands auto generated by Alembic - please adjust! ###
79 |     op.drop_index("ix_relation_type", table_name="relation")
80 |     op.drop_index("ix_relation_to_id", table_name="relation")
81 |     op.drop_index("ix_relation_from_id", table_name="relation")
82 |     op.drop_table("relation")
83 |     op.drop_index("ix_observation_entity_id", table_name="observation")
84 |     op.drop_index("ix_observation_category", table_name="observation")
85 |     op.drop_table("observation")
86 |     op.drop_index("ix_entity_updated_at", table_name="entity")
87 |     op.drop_index("ix_entity_type", table_name="entity")
88 |     op.drop_index("ix_entity_title", table_name="entity")
89 |     op.drop_index(op.f("ix_entity_permalink"), table_name="entity")
90 |     op.drop_index(op.f("ix_entity_file_path"), table_name="entity")
91 |     op.drop_index("ix_entity_created_at", table_name="entity")
92 |     op.drop_table("entity")
93 |     # ### end Alembic commands ###
94 | 
```