#
tokens: 49299/50000 20/347 files (page 6/17)
lines: off (toggle) GitHub
raw markdown copy
This is page 6 of 17. Use http://codebase.md/basicmachines-co/basic-memory?page={x} to view the full context.

# Directory Structure

```
├── .claude
│   ├── agents
│   │   ├── python-developer.md
│   │   └── system-architect.md
│   └── commands
│       ├── release
│       │   ├── beta.md
│       │   ├── changelog.md
│       │   ├── release-check.md
│       │   └── release.md
│       ├── spec.md
│       └── test-live.md
├── .dockerignore
├── .github
│   ├── dependabot.yml
│   ├── ISSUE_TEMPLATE
│   │   ├── bug_report.md
│   │   ├── config.yml
│   │   ├── documentation.md
│   │   └── feature_request.md
│   └── workflows
│       ├── claude-code-review.yml
│       ├── claude-issue-triage.yml
│       ├── claude.yml
│       ├── dev-release.yml
│       ├── docker.yml
│       ├── pr-title.yml
│       ├── release.yml
│       └── test.yml
├── .gitignore
├── .python-version
├── CHANGELOG.md
├── CITATION.cff
├── CLA.md
├── CLAUDE.md
├── CODE_OF_CONDUCT.md
├── CONTRIBUTING.md
├── docker-compose.yml
├── Dockerfile
├── docs
│   ├── ai-assistant-guide-extended.md
│   ├── character-handling.md
│   ├── cloud-cli.md
│   └── Docker.md
├── justfile
├── LICENSE
├── llms-install.md
├── pyproject.toml
├── README.md
├── SECURITY.md
├── smithery.yaml
├── specs
│   ├── SPEC-1 Specification-Driven Development Process.md
│   ├── SPEC-10 Unified Deployment Workflow and Event Tracking.md
│   ├── SPEC-11 Basic Memory API Performance Optimization.md
│   ├── SPEC-12 OpenTelemetry Observability.md
│   ├── SPEC-13 CLI Authentication with Subscription Validation.md
│   ├── SPEC-14 Cloud Git Versioning & GitHub Backup.md
│   ├── SPEC-14- Cloud Git Versioning & GitHub Backup.md
│   ├── SPEC-15 Configuration Persistence via Tigris for Cloud Tenants.md
│   ├── SPEC-16 MCP Cloud Service Consolidation.md
│   ├── SPEC-17 Semantic Search with ChromaDB.md
│   ├── SPEC-18 AI Memory Management Tool.md
│   ├── SPEC-19 Sync Performance and Memory Optimization.md
│   ├── SPEC-2 Slash Commands Reference.md
│   ├── SPEC-20 Simplified Project-Scoped Rclone Sync.md
│   ├── SPEC-3 Agent Definitions.md
│   ├── SPEC-4 Notes Web UI Component Architecture.md
│   ├── SPEC-5 CLI Cloud Upload via WebDAV.md
│   ├── SPEC-6 Explicit Project Parameter Architecture.md
│   ├── SPEC-7 POC to spike Tigris Turso for local access to cloud data.md
│   ├── SPEC-8 TigrisFS Integration.md
│   ├── SPEC-9 Multi-Project Bidirectional Sync Architecture.md
│   ├── SPEC-9 Signed Header Tenant Information.md
│   └── SPEC-9-1 Follow-Ups- Conflict, Sync, and Observability.md
├── src
│   └── basic_memory
│       ├── __init__.py
│       ├── alembic
│       │   ├── alembic.ini
│       │   ├── env.py
│       │   ├── migrations.py
│       │   ├── script.py.mako
│       │   └── versions
│       │       ├── 3dae7c7b1564_initial_schema.py
│       │       ├── 502b60eaa905_remove_required_from_entity_permalink.py
│       │       ├── 5fe1ab1ccebe_add_projects_table.py
│       │       ├── 647e7a75e2cd_project_constraint_fix.py
│       │       ├── 9d9c1cb7d8f5_add_mtime_and_size_columns_to_entity_.py
│       │       ├── a1b2c3d4e5f6_fix_project_foreign_keys.py
│       │       ├── b3c3938bacdb_relation_to_name_unique_index.py
│       │       ├── cc7172b46608_update_search_index_schema.py
│       │       └── e7e1f4367280_add_scan_watermark_tracking_to_project.py
│       ├── api
│       │   ├── __init__.py
│       │   ├── app.py
│       │   ├── routers
│       │   │   ├── __init__.py
│       │   │   ├── directory_router.py
│       │   │   ├── importer_router.py
│       │   │   ├── knowledge_router.py
│       │   │   ├── management_router.py
│       │   │   ├── memory_router.py
│       │   │   ├── project_router.py
│       │   │   ├── prompt_router.py
│       │   │   ├── resource_router.py
│       │   │   ├── search_router.py
│       │   │   └── utils.py
│       │   └── template_loader.py
│       ├── cli
│       │   ├── __init__.py
│       │   ├── app.py
│       │   ├── auth.py
│       │   ├── commands
│       │   │   ├── __init__.py
│       │   │   ├── cloud
│       │   │   │   ├── __init__.py
│       │   │   │   ├── api_client.py
│       │   │   │   ├── bisync_commands.py
│       │   │   │   ├── cloud_utils.py
│       │   │   │   ├── core_commands.py
│       │   │   │   ├── rclone_commands.py
│       │   │   │   ├── rclone_config.py
│       │   │   │   ├── rclone_installer.py
│       │   │   │   ├── upload_command.py
│       │   │   │   └── upload.py
│       │   │   ├── command_utils.py
│       │   │   ├── db.py
│       │   │   ├── import_chatgpt.py
│       │   │   ├── import_claude_conversations.py
│       │   │   ├── import_claude_projects.py
│       │   │   ├── import_memory_json.py
│       │   │   ├── mcp.py
│       │   │   ├── project.py
│       │   │   ├── status.py
│       │   │   └── tool.py
│       │   └── main.py
│       ├── config.py
│       ├── db.py
│       ├── deps.py
│       ├── file_utils.py
│       ├── ignore_utils.py
│       ├── importers
│       │   ├── __init__.py
│       │   ├── base.py
│       │   ├── chatgpt_importer.py
│       │   ├── claude_conversations_importer.py
│       │   ├── claude_projects_importer.py
│       │   ├── memory_json_importer.py
│       │   └── utils.py
│       ├── markdown
│       │   ├── __init__.py
│       │   ├── entity_parser.py
│       │   ├── markdown_processor.py
│       │   ├── plugins.py
│       │   ├── schemas.py
│       │   └── utils.py
│       ├── mcp
│       │   ├── __init__.py
│       │   ├── async_client.py
│       │   ├── project_context.py
│       │   ├── prompts
│       │   │   ├── __init__.py
│       │   │   ├── ai_assistant_guide.py
│       │   │   ├── continue_conversation.py
│       │   │   ├── recent_activity.py
│       │   │   ├── search.py
│       │   │   └── utils.py
│       │   ├── resources
│       │   │   ├── ai_assistant_guide.md
│       │   │   └── project_info.py
│       │   ├── server.py
│       │   └── tools
│       │       ├── __init__.py
│       │       ├── build_context.py
│       │       ├── canvas.py
│       │       ├── chatgpt_tools.py
│       │       ├── delete_note.py
│       │       ├── edit_note.py
│       │       ├── list_directory.py
│       │       ├── move_note.py
│       │       ├── project_management.py
│       │       ├── read_content.py
│       │       ├── read_note.py
│       │       ├── recent_activity.py
│       │       ├── search.py
│       │       ├── utils.py
│       │       ├── view_note.py
│       │       └── write_note.py
│       ├── models
│       │   ├── __init__.py
│       │   ├── base.py
│       │   ├── knowledge.py
│       │   ├── project.py
│       │   └── search.py
│       ├── repository
│       │   ├── __init__.py
│       │   ├── entity_repository.py
│       │   ├── observation_repository.py
│       │   ├── project_info_repository.py
│       │   ├── project_repository.py
│       │   ├── relation_repository.py
│       │   ├── repository.py
│       │   └── search_repository.py
│       ├── schemas
│       │   ├── __init__.py
│       │   ├── base.py
│       │   ├── cloud.py
│       │   ├── delete.py
│       │   ├── directory.py
│       │   ├── importer.py
│       │   ├── memory.py
│       │   ├── project_info.py
│       │   ├── prompt.py
│       │   ├── request.py
│       │   ├── response.py
│       │   ├── search.py
│       │   └── sync_report.py
│       ├── services
│       │   ├── __init__.py
│       │   ├── context_service.py
│       │   ├── directory_service.py
│       │   ├── entity_service.py
│       │   ├── exceptions.py
│       │   ├── file_service.py
│       │   ├── initialization.py
│       │   ├── link_resolver.py
│       │   ├── project_service.py
│       │   ├── search_service.py
│       │   └── service.py
│       ├── sync
│       │   ├── __init__.py
│       │   ├── background_sync.py
│       │   ├── sync_service.py
│       │   └── watch_service.py
│       ├── templates
│       │   └── prompts
│       │       ├── continue_conversation.hbs
│       │       └── search.hbs
│       └── utils.py
├── test-int
│   ├── BENCHMARKS.md
│   ├── cli
│   │   ├── test_project_commands_integration.py
│   │   └── test_version_integration.py
│   ├── conftest.py
│   ├── mcp
│   │   ├── test_build_context_underscore.py
│   │   ├── test_build_context_validation.py
│   │   ├── test_chatgpt_tools_integration.py
│   │   ├── test_default_project_mode_integration.py
│   │   ├── test_delete_note_integration.py
│   │   ├── test_edit_note_integration.py
│   │   ├── test_list_directory_integration.py
│   │   ├── test_move_note_integration.py
│   │   ├── test_project_management_integration.py
│   │   ├── test_project_state_sync_integration.py
│   │   ├── test_read_content_integration.py
│   │   ├── test_read_note_integration.py
│   │   ├── test_search_integration.py
│   │   ├── test_single_project_mcp_integration.py
│   │   └── test_write_note_integration.py
│   ├── test_db_wal_mode.py
│   ├── test_disable_permalinks_integration.py
│   └── test_sync_performance_benchmark.py
├── tests
│   ├── __init__.py
│   ├── api
│   │   ├── conftest.py
│   │   ├── test_async_client.py
│   │   ├── test_continue_conversation_template.py
│   │   ├── test_directory_router.py
│   │   ├── test_importer_router.py
│   │   ├── test_knowledge_router.py
│   │   ├── test_management_router.py
│   │   ├── test_memory_router.py
│   │   ├── test_project_router_operations.py
│   │   ├── test_project_router.py
│   │   ├── test_prompt_router.py
│   │   ├── test_relation_background_resolution.py
│   │   ├── test_resource_router.py
│   │   ├── test_search_router.py
│   │   ├── test_search_template.py
│   │   ├── test_template_loader_helpers.py
│   │   └── test_template_loader.py
│   ├── cli
│   │   ├── conftest.py
│   │   ├── test_cli_tools.py
│   │   ├── test_cloud_authentication.py
│   │   ├── test_ignore_utils.py
│   │   ├── test_import_chatgpt.py
│   │   ├── test_import_claude_conversations.py
│   │   ├── test_import_claude_projects.py
│   │   ├── test_import_memory_json.py
│   │   ├── test_project_add_with_local_path.py
│   │   └── test_upload.py
│   ├── conftest.py
│   ├── db
│   │   └── test_issue_254_foreign_key_constraints.py
│   ├── importers
│   │   ├── test_importer_base.py
│   │   └── test_importer_utils.py
│   ├── markdown
│   │   ├── __init__.py
│   │   ├── test_date_frontmatter_parsing.py
│   │   ├── test_entity_parser_error_handling.py
│   │   ├── test_entity_parser.py
│   │   ├── test_markdown_plugins.py
│   │   ├── test_markdown_processor.py
│   │   ├── test_observation_edge_cases.py
│   │   ├── test_parser_edge_cases.py
│   │   ├── test_relation_edge_cases.py
│   │   └── test_task_detection.py
│   ├── mcp
│   │   ├── conftest.py
│   │   ├── test_obsidian_yaml_formatting.py
│   │   ├── test_permalink_collision_file_overwrite.py
│   │   ├── test_prompts.py
│   │   ├── test_resources.py
│   │   ├── test_tool_build_context.py
│   │   ├── test_tool_canvas.py
│   │   ├── test_tool_delete_note.py
│   │   ├── test_tool_edit_note.py
│   │   ├── test_tool_list_directory.py
│   │   ├── test_tool_move_note.py
│   │   ├── test_tool_read_content.py
│   │   ├── test_tool_read_note.py
│   │   ├── test_tool_recent_activity.py
│   │   ├── test_tool_resource.py
│   │   ├── test_tool_search.py
│   │   ├── test_tool_utils.py
│   │   ├── test_tool_view_note.py
│   │   ├── test_tool_write_note.py
│   │   └── tools
│   │       └── test_chatgpt_tools.py
│   ├── Non-MarkdownFileSupport.pdf
│   ├── repository
│   │   ├── test_entity_repository_upsert.py
│   │   ├── test_entity_repository.py
│   │   ├── test_entity_upsert_issue_187.py
│   │   ├── test_observation_repository.py
│   │   ├── test_project_info_repository.py
│   │   ├── test_project_repository.py
│   │   ├── test_relation_repository.py
│   │   ├── test_repository.py
│   │   ├── test_search_repository_edit_bug_fix.py
│   │   └── test_search_repository.py
│   ├── schemas
│   │   ├── test_base_timeframe_minimum.py
│   │   ├── test_memory_serialization.py
│   │   ├── test_memory_url_validation.py
│   │   ├── test_memory_url.py
│   │   ├── test_schemas.py
│   │   └── test_search.py
│   ├── Screenshot.png
│   ├── services
│   │   ├── test_context_service.py
│   │   ├── test_directory_service.py
│   │   ├── test_entity_service_disable_permalinks.py
│   │   ├── test_entity_service.py
│   │   ├── test_file_service.py
│   │   ├── test_initialization.py
│   │   ├── test_link_resolver.py
│   │   ├── test_project_removal_bug.py
│   │   ├── test_project_service_operations.py
│   │   ├── test_project_service.py
│   │   └── test_search_service.py
│   ├── sync
│   │   ├── test_character_conflicts.py
│   │   ├── test_sync_service_incremental.py
│   │   ├── test_sync_service.py
│   │   ├── test_sync_wikilink_issue.py
│   │   ├── test_tmp_files.py
│   │   ├── test_watch_service_edge_cases.py
│   │   ├── test_watch_service_reload.py
│   │   └── test_watch_service.py
│   ├── test_config.py
│   ├── test_db_migration_deduplication.py
│   ├── test_deps.py
│   ├── test_production_cascade_delete.py
│   ├── test_rclone_commands.py
│   └── utils
│       ├── test_file_utils.py
│       ├── test_frontmatter_obsidian_compatible.py
│       ├── test_parse_tags.py
│       ├── test_permalink_formatting.py
│       ├── test_utf8_handling.py
│       └── test_validate_project_path.py
├── uv.lock
├── v0.15.0-RELEASE-DOCS.md
└── v15-docs
    ├── api-performance.md
    ├── background-relations.md
    ├── basic-memory-home.md
    ├── bug-fixes.md
    ├── chatgpt-integration.md
    ├── cloud-authentication.md
    ├── cloud-bisync.md
    ├── cloud-mode-usage.md
    ├── cloud-mount.md
    ├── default-project-mode.md
    ├── env-file-removal.md
    ├── env-var-overrides.md
    ├── explicit-project-parameter.md
    ├── gitignore-integration.md
    ├── project-root-env-var.md
    ├── README.md
    └── sqlite-performance.md
```

# Files

--------------------------------------------------------------------------------
/tests/schemas/test_memory_url_validation.py:
--------------------------------------------------------------------------------

```python
"""Tests for memory URL validation functionality."""

import pytest
from pydantic import ValidationError

from basic_memory.schemas.memory import (
    normalize_memory_url,
    validate_memory_url_path,
    memory_url,
)


class TestValidateMemoryUrlPath:
    """Test the validate_memory_url_path function."""

    def test_valid_paths(self):
        """Test that valid paths pass validation."""
        valid_paths = [
            "notes/meeting",
            "projects/basic-memory",
            "research/findings-2025",
            "specs/search",
            "docs/api-spec",
            "folder/subfolder/note",
            "single-note",
            "notes/with-hyphens",
            "notes/with_underscores",
            "notes/with123numbers",
            "pattern/*",  # Wildcard pattern matching
            "deep/*/pattern",
        ]

        for path in valid_paths:
            assert validate_memory_url_path(path), f"Path '{path}' should be valid"

    def test_invalid_empty_paths(self):
        """Test that empty/whitespace paths fail validation."""
        invalid_paths = [
            "",
            "   ",
            "\t",
            "\n",
            "  \n  ",
        ]

        for path in invalid_paths:
            assert not validate_memory_url_path(path), f"Path '{path}' should be invalid"

    def test_invalid_double_slashes(self):
        """Test that paths with double slashes fail validation."""
        invalid_paths = [
            "notes//meeting",
            "//root",
            "folder//subfolder/note",
            "path//with//multiple//doubles",
            "memory//test",
        ]

        for path in invalid_paths:
            assert not validate_memory_url_path(path), (
                f"Path '{path}' should be invalid (double slashes)"
            )

    def test_invalid_protocol_schemes(self):
        """Test that paths with protocol schemes fail validation."""
        invalid_paths = [
            "http://example.com",
            "https://example.com/path",
            "file://local/path",
            "ftp://server.com",
            "invalid://test",
            "custom://scheme",
        ]

        for path in invalid_paths:
            assert not validate_memory_url_path(path), (
                f"Path '{path}' should be invalid (protocol scheme)"
            )

    def test_invalid_characters(self):
        """Test that paths with invalid characters fail validation."""
        invalid_paths = [
            "notes<with>brackets",
            'notes"with"quotes',
            "notes|with|pipes",
            "notes?with?questions",
        ]

        for path in invalid_paths:
            assert not validate_memory_url_path(path), (
                f"Path '{path}' should be invalid (invalid chars)"
            )


class TestNormalizeMemoryUrl:
    """Test the normalize_memory_url function."""

    def test_valid_normalization(self):
        """Test that valid URLs are properly normalized."""
        test_cases = [
            ("specs/search", "memory://specs/search"),
            ("memory://specs/search", "memory://specs/search"),
            ("notes/meeting-2025", "memory://notes/meeting-2025"),
            ("memory://notes/meeting-2025", "memory://notes/meeting-2025"),
            ("pattern/*", "memory://pattern/*"),
            ("memory://pattern/*", "memory://pattern/*"),
        ]

        for input_url, expected in test_cases:
            result = normalize_memory_url(input_url)
            assert result == expected, (
                f"normalize_memory_url('{input_url}') should return '{expected}', got '{result}'"
            )

    def test_empty_url(self):
        """Test that empty URLs raise ValueError."""
        with pytest.raises(ValueError, match="cannot be empty"):
            normalize_memory_url(None)
        with pytest.raises(ValueError, match="cannot be empty"):
            normalize_memory_url("")

    def test_invalid_double_slashes(self):
        """Test that URLs with double slashes raise ValueError."""
        invalid_urls = [
            "memory//test",
            "notes//meeting",
            "//root",
            "memory://path//with//doubles",
        ]

        for url in invalid_urls:
            with pytest.raises(ValueError, match="contains double slashes"):
                normalize_memory_url(url)

    def test_invalid_protocol_schemes(self):
        """Test that URLs with other protocol schemes raise ValueError."""
        invalid_urls = [
            "http://example.com",
            "https://example.com/path",
            "file://local/path",
            "invalid://test",
        ]

        for url in invalid_urls:
            with pytest.raises(ValueError, match="contains protocol scheme"):
                normalize_memory_url(url)

    def test_whitespace_only(self):
        """Test that whitespace-only URLs raise ValueError."""
        whitespace_urls = [
            "   ",
            "\t",
            "\n",
            "  \n  ",
        ]

        for url in whitespace_urls:
            with pytest.raises(ValueError, match="cannot be empty or whitespace"):
                normalize_memory_url(url)

    def test_invalid_characters(self):
        """Test that URLs with invalid characters raise ValueError."""
        invalid_urls = [
            "notes<brackets>",
            'notes"quotes"',
            "notes|pipes|",
            "notes?questions?",
        ]

        for url in invalid_urls:
            with pytest.raises(ValueError, match="contains invalid characters"):
                normalize_memory_url(url)


class TestMemoryUrlPydanticValidation:
    """Test the MemoryUrl Pydantic type validation."""

    def test_valid_urls_pass_validation(self):
        """Test that valid URLs pass Pydantic validation."""
        valid_urls = [
            "specs/search",
            "memory://specs/search",
            "notes/meeting-2025",
            "projects/basic-memory/docs",
            "pattern/*",
        ]

        for url in valid_urls:
            # Should not raise an exception
            result = memory_url.validate_python(url)
            assert result.startswith("memory://"), (
                f"Validated URL should start with memory://, got {result}"
            )

    def test_invalid_urls_fail_validation(self):
        """Test that invalid URLs fail Pydantic validation with clear errors."""
        invalid_test_cases = [
            ("memory//test", "double slashes"),
            ("invalid://test", "protocol scheme"),
            ("   ", "empty or whitespace"),
            ("notes<brackets>", "invalid characters"),
        ]

        for url, expected_error in invalid_test_cases:
            with pytest.raises(ValidationError) as exc_info:
                memory_url.validate_python(url)

            error_msg = str(exc_info.value)
            assert "value_error" in error_msg, f"Should be a value_error for '{url}'"

    def test_empty_string_fails_validation(self):
        """Test that empty strings fail validation."""
        with pytest.raises(ValidationError, match="cannot be empty"):
            memory_url.validate_python("")

    def test_very_long_urls_fail_maxlength(self):
        """Test that very long URLs fail MaxLen validation."""
        long_url = "a" * 3000  # Exceeds MaxLen(2028)
        with pytest.raises(ValidationError, match="at most 2028"):
            memory_url.validate_python(long_url)

    def test_whitespace_stripped(self):
        """Test that whitespace is properly stripped."""
        urls_with_whitespace = [
            "  specs/search  ",
            "\tprojects/basic-memory\t",
            "\nnotes/meeting\n",
        ]

        for url in urls_with_whitespace:
            result = memory_url.validate_python(url)
            assert not result.startswith(" ") and not result.endswith(" "), (
                f"Whitespace should be stripped from '{url}'"
            )
            assert "memory://" in result, "Result should contain memory:// prefix"


class TestMemoryUrlErrorMessages:
    """Test that error messages are clear and helpful."""

    def test_double_slash_error_message(self):
        """Test specific error message for double slashes."""
        with pytest.raises(ValueError) as exc_info:
            normalize_memory_url("memory//test")

        error_msg = str(exc_info.value)
        assert "memory//test" in error_msg
        assert "double slashes" in error_msg

    def test_protocol_scheme_error_message(self):
        """Test specific error message for protocol schemes."""
        with pytest.raises(ValueError) as exc_info:
            normalize_memory_url("http://example.com")

        error_msg = str(exc_info.value)
        assert "http://example.com" in error_msg
        assert "protocol scheme" in error_msg

    def test_empty_error_message(self):
        """Test specific error message for empty paths."""
        with pytest.raises(ValueError) as exc_info:
            normalize_memory_url("   ")

        error_msg = str(exc_info.value)
        assert "empty or whitespace" in error_msg

    def test_invalid_characters_error_message(self):
        """Test specific error message for invalid characters."""
        with pytest.raises(ValueError) as exc_info:
            normalize_memory_url("notes<brackets>")

        error_msg = str(exc_info.value)
        assert "notes<brackets>" in error_msg
        assert "invalid characters" in error_msg

```

--------------------------------------------------------------------------------
/v15-docs/default-project-mode.md:
--------------------------------------------------------------------------------

```markdown
# Default Project Mode

**Status**: New Feature
**PR**: #298 (SPEC-6)
**Related**: explicit-project-parameter.md

## What's New

v0.15.0 introduces `default_project_mode` - a configuration option that simplifies single-project workflows by automatically using your default project when no explicit project parameter is provided.

## Quick Start

### Enable Default Project Mode

Edit `~/.basic-memory/config.json`:

```json
{
  "default_project": "main",
  "default_project_mode": true,
  "projects": {
    "main": "/Users/you/basic-memory"
  }
}
```

### Now Tools Work Without Project Parameter

```python
# Before (explicit project required)
await write_note("Note", "Content", "folder", project="main")

# After (with default_project_mode: true)
await write_note("Note", "Content", "folder")  # Uses "main" automatically
```

## Configuration Options

| Option | Type | Default | Description |
|--------|------|---------|-------------|
| `default_project_mode` | boolean | `false` | Enable auto-fallback to default project |
| `default_project` | string | `"main"` | Which project to use as default |

## How It Works

### Three-Tier Project Resolution

When a tool is called, Basic Memory resolves the project in this order:

1. **CLI Constraint** (Highest): `bm --project work-notes` forces all tools to use "work-notes"
2. **Explicit Parameter** (Medium): `project="specific"` in tool call
3. **Default Mode** (Lowest): Uses `default_project` if `default_project_mode: true`

### Examples

**With default_project_mode: false (default):**
```python
# Must specify project explicitly
await search_notes("query", project="main")  # ✓ Works
await search_notes("query")                  # ✗ Error: project required
```

**With default_project_mode: true:**
```python
# Project parameter is optional
await search_notes("query")                  # ✓ Uses default_project
await search_notes("query", project="work")  # ✓ Explicit override works
```

## Use Cases

### Single-Project Users

**Best for:**
- Users who maintain one primary knowledge base
- Personal knowledge management
- Single-purpose documentation

**Configuration:**
```json
{
  "default_project": "main",
  "default_project_mode": true,
  "projects": {
    "main": "/Users/you/basic-memory"
  }
}
```

**Benefits:**
- Simpler tool calls
- Less verbose for AI assistants
- Familiar workflow (like v0.14.x)

### Multi-Project Users

**Best for:**
- Multiple distinct knowledge bases (work, personal, research)
- Switching contexts frequently
- Team collaboration with separate projects

**Configuration:**
```json
{
  "default_project": "main",
  "default_project_mode": false,
  "projects": {
    "work": "/Users/you/work-kb",
    "personal": "/Users/you/personal-kb",
    "research": "/Users/you/research-kb"
  }
}
```

**Benefits:**
- Explicit project selection prevents mistakes
- Clear which knowledge base is being accessed
- Better for context switching

## Workflow Examples

### Single-Project Workflow

```python
# config.json: default_project_mode: true, default_project: "main"

# Write without specifying project
await write_note(
    title="Meeting Notes",
    content="# Team Sync\n...",
    folder="meetings"
)  # → Saved to "main" project

# Search across default project
results = await search_notes("quarterly goals")
# → Searches "main" project

# Build context from default project
context = await build_context("memory://goals/q4-2024")
# → Uses "main" project
```

### Multi-Project with Explicit Selection

```python
# config.json: default_project_mode: false

# Work project
await write_note(
    title="Architecture Decision",
    content="# ADR-001\n...",
    folder="decisions",
    project="work"
)

# Personal project
await write_note(
    title="Book Notes",
    content="# Design Patterns\n...",
    folder="reading",
    project="personal"
)

# Research project
await search_notes(
    query="machine learning",
    project="research"
)
```

### Hybrid: Default with Occasional Override

```python
# config.json: default_project_mode: true, default_project: "personal"

# Most operations use personal (default)
await write_note("Daily Journal", "...", "journal")
# → Saved to "personal"

# Explicitly use work project when needed
await write_note(
    title="Sprint Planning",
    content="...",
    folder="planning",
    project="work"  # Override default
)
# → Saved to "work"

# Back to default
await search_notes("goals")
# → Searches "personal"
```

## Migration Guide

### From v0.14.x (Implicit Project)

v0.14.x had implicit project context via middleware. To get similar behavior:

**Enable default_project_mode:**
```json
{
  "default_project": "main",
  "default_project_mode": true
}
```

Now tools work without explicit project parameter (like v0.14.x).

### From v0.15.0 Explicit-Only

If you started with v0.15.0 using explicit projects:

**Keep current behavior:**
```json
{
  "default_project_mode": false  # or omit (false is default)
}
```

**Or simplify for single project:**
```json
{
  "default_project": "main",
  "default_project_mode": true
}
```

## LLM Integration

### Claude Desktop

Claude can detect and use default_project_mode:

**Auto-detection:**
```python
# Claude reads config
config = read_config()

if config.get("default_project_mode"):
    # Use simple calls
    await write_note("Note", "Content", "folder")
else:
    # Discover and use explicit project
    projects = await list_memory_projects()
    await write_note("Note", "Content", "folder", project=projects[0].name)
```

### Custom MCP Clients

```python
from basic_memory.config import ConfigManager

config = ConfigManager().config

if config.default_project_mode:
    # Project parameter optional
    result = await mcp_tool(arg1, arg2)
else:
    # Project parameter required
    result = await mcp_tool(arg1, arg2, project="name")
```

## Error Handling

### Missing Project (default_project_mode: false)

```python
try:
    results = await search_notes("query")
except ValueError as e:
    print("Error: project parameter required")
    # Show available projects
    projects = await list_memory_projects()
    print(f"Available: {[p.name for p in projects]}")
```

### Invalid Default Project

```json
{
  "default_project": "nonexistent",
  "default_project_mode": true
}
```

**Result:** Falls back to "main" project if default doesn't exist.

## Configuration Management

### Update Config

```bash
# Edit directly
vim ~/.basic-memory/config.json

# Or use CLI (if available)
bm config set default_project_mode true
bm config set default_project main
```

### Verify Config

```python
from basic_memory.config import ConfigManager

config = ConfigManager().config
print(f"Default mode: {config.default_project_mode}")
print(f"Default project: {config.default_project}")
print(f"Projects: {list(config.projects.keys())}")
```

### Environment Override

```bash
# Override via environment
export BASIC_MEMORY_DEFAULT_PROJECT_MODE=true
export BASIC_MEMORY_DEFAULT_PROJECT=work

# Now default_project_mode enabled for this session
```

## Best Practices

1. **Choose based on workflow:**
   - Single project → enable default_project_mode
   - Multiple projects → keep explicit (false)

2. **Document your choice:**
   - Add comment to config.json explaining why

3. **Consistent with team:**
   - Agree on project mode for shared setups

4. **Test both modes:**
   - Try each to see what feels natural

5. **Use CLI constraints when needed:**
   - `bm --project work-notes` overrides everything

## Troubleshooting

### Tools Not Using Default Project

**Problem:** default_project_mode: true but tools still require project

**Check:**
```bash
# Verify config
cat ~/.basic-memory/config.json | grep default_project_mode

# Should show: "default_project_mode": true
```

**Solution:** Restart MCP server to reload config

### Wrong Project Being Used

**Problem:** Tools using unexpected project

**Check resolution order:**
1. CLI constraint (`--project` flag)
2. Explicit parameter in tool call
3. Default project (if mode enabled)

**Solution:** Check for CLI constraints or explicit parameters

### Config Not Loading

**Problem:** Changes to config.json not taking effect

**Solution:**
```bash
# Restart MCP server
# Or reload config programmatically
from basic_memory import config as config_module
config_module._config = None  # Clear cache
```

## Technical Details

### Implementation

```python
class BasicMemoryConfig(BaseSettings):
    default_project: str = Field(
        default="main",
        description="Name of the default project to use"
    )

    default_project_mode: bool = Field(
        default=False,
        description="When True, MCP tools automatically use default_project when no project parameter is specified"
    )
```

### Project Resolution Logic

```python
def resolve_project(
    explicit_project: Optional[str] = None,
    cli_project: Optional[str] = None,
    config: BasicMemoryConfig = None
) -> str:
    # 1. CLI constraint (highest priority)
    if cli_project:
        return cli_project

    # 2. Explicit parameter
    if explicit_project:
        return explicit_project

    # 3. Default mode (lowest priority)
    if config.default_project_mode:
        return config.default_project

    # 4. No project found
    raise ValueError("Project parameter required")
```

## See Also

- `explicit-project-parameter.md` - Why explicit project is required
- SPEC-6: Explicit Project Parameter Architecture
- MCP tools documentation

```

--------------------------------------------------------------------------------
/src/basic_memory/mcp/tools/read_note.py:
--------------------------------------------------------------------------------

```python
"""Read note tool for Basic Memory MCP server."""

from textwrap import dedent
from typing import Optional

from loguru import logger
from fastmcp import Context

from basic_memory.mcp.async_client import get_client
from basic_memory.mcp.project_context import get_active_project
from basic_memory.mcp.server import mcp
from basic_memory.mcp.tools.search import search_notes
from basic_memory.mcp.tools.utils import call_get
from basic_memory.schemas.memory import memory_url_path
from basic_memory.utils import validate_project_path


@mcp.tool(
    description="Read a markdown note by title or permalink.",
)
async def read_note(
    identifier: str,
    project: Optional[str] = None,
    page: int = 1,
    page_size: int = 10,
    context: Context | None = None,
) -> str:
    """Return the raw markdown for a note, or guidance text if no match is found.

    Finds and retrieves a note by its title, permalink, or content search,
    returning the raw markdown content including observations, relations, and metadata.

    Project Resolution:
    Server resolves projects in this order: Single Project Mode → project parameter → default project.
    If project unknown, use list_memory_projects() or recent_activity() first.

    This tool will try multiple lookup strategies to find the most relevant note:
    1. Direct permalink lookup
    2. Title search fallback
    3. Text search as last resort

    Args:
        project: Project name to read from. Optional - server will resolve using the
                hierarchy above. If unknown, use list_memory_projects() to discover
                available projects.
        identifier: The title or permalink of the note to read
                   Can be a full memory:// URL, a permalink, a title, or search text
        page: Page number for paginated results (default: 1)
        page_size: Number of items per page (default: 10)
        context: Optional FastMCP context for performance caching.

    Returns:
        The full markdown content of the note if found, or helpful guidance if not found.
        Content includes frontmatter, observations, relations, and all markdown formatting.

    Examples:
        # Read by permalink
        read_note("my-research", "specs/search-spec")

        # Read by title
        read_note("work-project", "Search Specification")

        # Read with memory URL
        read_note("my-research", "memory://specs/search-spec")

        # Read with pagination
        read_note("work-project", "Project Updates", page=2, page_size=5)

        # Read recent meeting notes
        read_note("team-docs", "Weekly Standup")

    Raises:
        HTTPError: If project doesn't exist or is inaccessible
        SecurityError: If identifier attempts path traversal

    Note:
        If the exact note isn't found, this tool provides helpful suggestions
        including related notes, search commands, and note creation templates.
    """
    async with get_client() as client:
        # Get and validate the project
        active_project = await get_active_project(client, project, context)

        # Validate identifier to prevent path traversal attacks
        # We need to check both the raw identifier and the processed path
        processed_path = memory_url_path(identifier)
        project_path = active_project.home

        if not validate_project_path(identifier, project_path) or not validate_project_path(
            processed_path, project_path
        ):
            logger.warning(
                "Attempted path traversal attack blocked",
                identifier=identifier,
                processed_path=processed_path,
                project=active_project.name,
            )
            return f"# Error\n\nIdentifier '{identifier}' is not allowed - paths must stay within project boundaries"

        project_url = active_project.project_url

        # Get the file via REST API - first try direct permalink lookup
        entity_path = memory_url_path(identifier)
        path = f"{project_url}/resource/{entity_path}"
        logger.info(f"Attempting to read note from Project: {active_project.name} URL: {path}")

        try:
            # Try direct lookup first
            response = await call_get(client, path, params={"page": page, "page_size": page_size})

            # If successful, return the content
            if response.status_code == 200:
                logger.info("Returning read_note result from resource: {path}", path=entity_path)
                return response.text
        except Exception as e:  # pragma: no cover
            logger.info(f"Direct lookup failed for '{path}': {e}")
            # Continue to fallback methods

        # Fallback 1: Try title search via API
        logger.info(f"Search title for: {identifier}")
        title_results = await search_notes.fn(
            query=identifier, search_type="title", project=project, context=context
        )

        # Handle both SearchResponse object and error strings
        if title_results and hasattr(title_results, "results") and title_results.results:
            result = title_results.results[0]  # Get the first/best match
            if result.permalink:
                try:
                    # Try to fetch the content using the found permalink
                    path = f"{project_url}/resource/{result.permalink}"
                    response = await call_get(
                        client, path, params={"page": page, "page_size": page_size}
                    )

                    if response.status_code == 200:
                        logger.info(f"Found note by title search: {result.permalink}")
                        return response.text
                except Exception as e:  # pragma: no cover
                    logger.info(
                        f"Failed to fetch content for found title match {result.permalink}: {e}"
                    )
        else:
            logger.info(
                f"No results in title search for: {identifier} in project {active_project.name}"
            )

        # Fallback 2: Text search as a last resort
        logger.info(f"Title search failed, trying text search for: {identifier}")
        text_results = await search_notes.fn(
            query=identifier, search_type="text", project=project, context=context
        )

        # We didn't find a direct match, construct a helpful error message
        # Handle both SearchResponse object and error strings
        if not text_results or not hasattr(text_results, "results") or not text_results.results:
            # No results at all
            return format_not_found_message(active_project.name, identifier)
        else:
            # We found some related results
            return format_related_results(active_project.name, identifier, text_results.results[:5])


def format_not_found_message(project: str | None, identifier: str) -> str:
    """Format a helpful message when no note was found."""
    return dedent(f"""
        # Note Not Found in {project}: "{identifier}"

        I couldn't find any notes matching "{identifier}". Here are some suggestions:

        ## Check Identifier Type
        - If you provided a title, try using the exact permalink instead
        - If you provided a permalink, check for typos or try a broader search

        ## Search Instead
        Try searching for related content:
        ```
        search_notes(project="{project}", query="{identifier}")
        ```

        ## Recent Activity
        Check recently modified notes:
        ```
        recent_activity(timeframe="7d")
        ```

        ## Create New Note
        This might be a good opportunity to create a new note on this topic:
        ```
        write_note(
            project="{project}",
            title="{identifier.capitalize()}",
            content='''
            # {identifier.capitalize()}

            ## Overview
            [Your content here]

            ## Observations
            - [category] [Observation about {identifier}]

            ## Relations
            - relates_to [[Related Topic]]
            ''',
            folder="notes"
        )
        ```
    """)


def format_related_results(project: str | None, identifier: str, results) -> str:
    """Format a helpful message with related results when an exact match wasn't found."""
    message = dedent(f"""
        # Note Not Found in {project}: "{identifier}"

        I couldn't find an exact match for "{identifier}", but I found some related notes:

        """)

    for i, result in enumerate(results):
        message += dedent(f"""
            ## {i + 1}. {result.title}
            - **Type**: {result.type.value}
            - **Permalink**: {result.permalink}

            You can read this note with:
            ```
            read_note(project="{project}", {result.permalink}")
            ```

            """)

    message += dedent(f"""
        ## Try More Specific Lookup
        For exact matches, try using the full permalink from one of the results above.

        ## Search For More Results
        To see more related content:
        ```
        search_notes(project="{project}", query="{identifier}")
        ```

        ## Create New Note
        If none of these match what you're looking for, consider creating a new note:
        ```
        write_note(
            project="{project}",
            title="[Your title]",
            content="[Your content]",
            folder="notes"
        )
        ```
    """)

    return message

```

--------------------------------------------------------------------------------
/src/basic_memory/schemas/base.py:
--------------------------------------------------------------------------------

```python
"""Core pydantic models for basic-memory entities, observations, and relations.

This module defines the foundational data structures for the knowledge graph system.
The graph consists of entities (nodes) connected by relations (edges), where each
entity can have multiple observations (facts) attached to it.

Key Concepts:
1. Entities are nodes storing factual observations
2. Relations are directed edges between entities using active voice verbs
3. Observations are atomic facts/notes about an entity
4. Everything is stored in both SQLite and markdown files
"""

import os
import mimetypes
import re
from datetime import datetime, timedelta
from pathlib import Path
from typing import List, Optional, Annotated, Dict

from annotated_types import MinLen, MaxLen
from dateparser import parse

from pydantic import BaseModel, BeforeValidator, Field, model_validator

from basic_memory.config import ConfigManager
from basic_memory.file_utils import sanitize_for_filename, sanitize_for_folder
from basic_memory.utils import generate_permalink


def to_snake_case(name: str) -> str:
    """Convert a string to snake_case.

    Examples:
        BasicMemory -> basic_memory
        Memory Service -> memory_service
        memory-service -> memory_service
        Memory_Service -> memory_service
    """
    name = name.strip()

    # Replace spaces and hyphens and . with underscores
    s1 = re.sub(r"[\s\-\\.]", "_", name)

    # Insert underscore between camelCase
    s2 = re.sub(r"([a-z0-9])([A-Z])", r"\1_\2", s1)

    # Convert to lowercase
    return s2.lower()


def parse_timeframe(timeframe: str) -> datetime:
    """Parse timeframe with special handling for 'today' and other natural language expressions.

    Enforces a minimum 1-day lookback to handle timezone differences in distributed deployments.

    Args:
        timeframe: Natural language timeframe like 'today', '1d', '1 week ago', etc.

    Returns:
        datetime: The parsed datetime for the start of the timeframe, timezone-aware in local system timezone
                 Always returns at least 1 day ago to handle timezone differences.

    Examples:
        parse_timeframe('today') -> 2025-06-04 14:50:00-07:00 (1 day ago, not start of today)
        parse_timeframe('1h') -> 2025-06-04 14:50:00-07:00 (1 day ago, not 1 hour ago)
        parse_timeframe('1d') -> 2025-06-04 14:50:00-07:00 (24 hours ago with local timezone)
        parse_timeframe('1 week ago') -> 2025-05-29 14:50:00-07:00 (1 week ago with local timezone)
    """
    if timeframe.lower() == "today":
        # For "today", return 1 day ago to ensure we capture recent activity across timezones
        # This handles the case where client and server are in different timezones
        now = datetime.now()
        one_day_ago = now - timedelta(days=1)
        return one_day_ago.astimezone()
    else:
        # Use dateparser for other formats
        parsed = parse(timeframe)
        if not parsed:
            raise ValueError(f"Could not parse timeframe: {timeframe}")

        # If the parsed datetime is naive, make it timezone-aware in local system timezone
        if parsed.tzinfo is None:
            parsed = parsed.astimezone()
        else:
            parsed = parsed

        # Enforce minimum 1-day lookback to handle timezone differences
        # This ensures we don't miss recent activity due to client/server timezone mismatches
        now = datetime.now().astimezone()
        one_day_ago = now - timedelta(days=1)

        # If the parsed time is more recent than 1 day ago, use 1 day ago instead
        if parsed > one_day_ago:
            return one_day_ago
        else:
            return parsed


def validate_timeframe(timeframe: str) -> str:
    """Convert human readable timeframes to a duration relative to the current time."""
    if not isinstance(timeframe, str):
        raise ValueError("Timeframe must be a string")

    # Preserve special timeframe strings that need custom handling
    special_timeframes = ["today"]
    if timeframe.lower() in special_timeframes:
        return timeframe.lower()

    # Parse relative time expression using our enhanced parser
    parsed = parse_timeframe(timeframe)

    # Convert to duration
    now = datetime.now().astimezone()
    if parsed > now:
        raise ValueError("Timeframe cannot be in the future")

    # Could format the duration back to our standard format
    days = (now - parsed).days

    # Could enforce reasonable limits
    if days > 365:
        raise ValueError("Timeframe should be <= 1 year")

    return f"{days}d"


TimeFrame = Annotated[str, BeforeValidator(validate_timeframe)]

Permalink = Annotated[str, MinLen(1)]
"""Unique identifier in format '{path}/{normalized_name}'."""


EntityType = Annotated[str, BeforeValidator(to_snake_case), MinLen(1), MaxLen(200)]
"""Classification of entity (e.g., 'person', 'project', 'concept'). """

ALLOWED_CONTENT_TYPES = {
    "text/markdown",
    "text/plain",
    "application/pdf",
    "image/jpeg",
    "image/png",
    "image/svg+xml",
}

ContentType = Annotated[
    str,
    BeforeValidator(str.lower),
    Field(pattern=r"^[\w\-\+\.]+/[\w\-\+\.]+$"),
    Field(json_schema_extra={"examples": list(ALLOWED_CONTENT_TYPES)}),
]


RelationType = Annotated[str, MinLen(1), MaxLen(200)]
"""Type of relationship between entities. Always use active voice present tense."""

ObservationStr = Annotated[
    str,
    BeforeValidator(str.strip),  # Clean whitespace
    MinLen(1),  # Ensure non-empty after stripping
    MaxLen(1000),  # Keep reasonable length
]


class Observation(BaseModel):
    """A single observation with category, content, and optional context."""

    category: Optional[str] = None
    content: ObservationStr
    tags: Optional[List[str]] = Field(default_factory=list)
    context: Optional[str] = None


class Relation(BaseModel):
    """Represents a directed edge between entities in the knowledge graph.

    Relations are directed connections stored in active voice (e.g., "created", "depends_on").
    The from_permalink represents the source or actor entity, while to_permalink represents the target
    or recipient entity.
    """

    from_id: Permalink
    to_id: Permalink
    relation_type: RelationType
    context: Optional[str] = None


class Entity(BaseModel):
    """Represents a node in our knowledge graph - could be a person, project, concept, etc.

    Each entity has:
    - A file path (e.g., "people/jane-doe.md")
    - An entity type (for classification)
    - A list of observations (facts/notes about the entity)
    - Optional relations to other entities
    - Optional description for high-level overview
    """

    # private field to override permalink
    # Use empty string "" as sentinel to indicate permalinks are explicitly disabled
    _permalink: Optional[str] = None

    title: str
    content: Optional[str] = None
    folder: str
    entity_type: EntityType = "note"
    entity_metadata: Optional[Dict] = Field(default=None, description="Optional metadata")
    content_type: ContentType = Field(
        description="MIME type of the content (e.g. text/markdown, image/jpeg)",
        examples=["text/markdown", "image/jpeg"],
        default="text/markdown",
    )

    def __init__(self, **data):
        data["folder"] = sanitize_for_folder(data.get("folder", ""))
        super().__init__(**data)

    @property
    def safe_title(self) -> str:
        """
        A sanitized version of the title, which is safe for use on the filesystem. For example,
        a title of "Coupon Enable/Disable Feature" should create a the file as "Coupon Enable-Disable Feature.md"
        instead of creating a file named "Disable Feature.md" beneath the "Coupon Enable" directory.

        Replaces POSIX and/or Windows style slashes as well as a few other characters that are not safe for filenames.
        If kebab_filenames is True, then behavior is consistent with transformation used when generating permalink
        strings (e.g. "Coupon Enable/Disable Feature" -> "coupon-enable-disable-feature").
        """
        fixed_title = sanitize_for_filename(self.title)

        app_config = ConfigManager().config
        use_kebab_case = app_config.kebab_filenames

        if use_kebab_case:
            fixed_title = generate_permalink(file_path=fixed_title, split_extension=False)

        return fixed_title

    @property
    def file_path(self):
        """Get the file path for this entity based on its permalink."""
        safe_title = self.safe_title
        if self.content_type == "text/markdown":
            return (
                os.path.join(self.folder, f"{safe_title}.md") if self.folder else f"{safe_title}.md"
            )
        else:
            return os.path.join(self.folder, safe_title) if self.folder else safe_title

    @property
    def permalink(self) -> Optional[Permalink]:
        """Get a url friendly path}."""
        # Empty string is a sentinel value indicating permalinks are disabled
        if self._permalink == "":
            return None
        return self._permalink or generate_permalink(self.file_path)

    @model_validator(mode="after")
    def infer_content_type(self) -> "Entity":  # pragma: no cover
        if not self.content_type:
            path = Path(self.file_path)
            if not path.exists():
                self.content_type = "text/plain"
            else:
                mime_type, _ = mimetypes.guess_type(path.name)
                self.content_type = mime_type or "text/plain"
        return self

```

--------------------------------------------------------------------------------
/tests/mcp/test_tool_view_note.py:
--------------------------------------------------------------------------------

```python
"""Tests for view_note tool that exercise the full stack with SQLite."""

from textwrap import dedent
from unittest.mock import MagicMock, patch

import pytest
import pytest_asyncio

from basic_memory.mcp.tools import write_note, view_note
from basic_memory.schemas.search import SearchResponse


@pytest_asyncio.fixture
async def mock_call_get():
    """Mock for call_get to simulate different responses."""
    with patch("basic_memory.mcp.tools.read_note.call_get") as mock:
        # Default to 404 - not found
        mock_response = MagicMock()
        mock_response.status_code = 404
        mock.return_value = mock_response
        yield mock


@pytest_asyncio.fixture
async def mock_search():
    """Mock for search tool."""
    with patch("basic_memory.mcp.tools.read_note.search_notes.fn") as mock:
        # Default to empty results
        mock.return_value = SearchResponse(results=[], current_page=1, page_size=1)
        yield mock


@pytest.mark.asyncio
async def test_view_note_basic_functionality(app, test_project):
    """Test viewing a note creates an artifact."""
    # First create a note
    await write_note.fn(
        project=test_project.name,
        title="Test View Note",
        folder="test",
        content="# Test View Note\n\nThis is test content for viewing.",
    )

    # View the note
    result = await view_note.fn("Test View Note", project=test_project.name)

    # Should contain note retrieval message
    assert 'Note retrieved: "Test View Note"' in result
    assert "Display this note as a markdown artifact for the user" in result
    assert "Content:" in result
    assert "---" in result

    # Should contain the note content
    assert "# Test View Note" in result
    assert "This is test content for viewing." in result


@pytest.mark.asyncio
async def test_view_note_with_frontmatter_title(app, test_project):
    """Test viewing a note extracts title from frontmatter."""
    # Create note with frontmatter
    content = dedent("""
        ---
        title: "Frontmatter Title"
        tags: [test]
        ---

        # Frontmatter Title

        Content with frontmatter title.
    """).strip()

    await write_note.fn(
        project=test_project.name, title="Frontmatter Title", folder="test", content=content
    )

    # View the note
    result = await view_note.fn("Frontmatter Title", project=test_project.name)

    # Should show title in retrieval message
    assert 'Note retrieved: "Frontmatter Title"' in result
    assert "Display this note as a markdown artifact for the user" in result


@pytest.mark.asyncio
async def test_view_note_with_heading_title(app, test_project):
    """Test viewing a note extracts title from first heading when no frontmatter."""
    # Create note with heading but no frontmatter title
    content = "# Heading Title\n\nContent with heading title."

    await write_note.fn(
        project=test_project.name, title="Heading Title", folder="test", content=content
    )

    # View the note
    result = await view_note.fn("Heading Title", project=test_project.name)

    # Should show title in retrieval message
    assert 'Note retrieved: "Heading Title"' in result
    assert "Display this note as a markdown artifact for the user" in result


@pytest.mark.asyncio
async def test_view_note_unicode_content(app, test_project):
    """Test viewing a note with Unicode content."""
    content = "# Unicode Test 🚀\n\nThis note has emoji 🎉 and unicode ♠♣♥♦"

    await write_note.fn(
        project=test_project.name, title="Unicode Test 🚀", folder="test", content=content
    )

    # View the note
    result = await view_note.fn("Unicode Test 🚀", project=test_project.name)

    # Should handle Unicode properly
    assert "🚀" in result
    assert "🎉" in result
    assert "♠♣♥♦" in result
    assert 'Note retrieved: "Unicode Test 🚀"' in result


@pytest.mark.asyncio
async def test_view_note_by_permalink(app, test_project):
    """Test viewing a note by its permalink."""
    await write_note.fn(
        project=test_project.name,
        title="Permalink Test",
        folder="test",
        content="Content for permalink test.",
    )

    # View by permalink
    result = await view_note.fn("test/permalink-test", project=test_project.name)

    # Should work with permalink
    assert 'Note retrieved: "test/permalink-test"' in result
    assert "Content for permalink test." in result
    assert "Display this note as a markdown artifact for the user" in result


@pytest.mark.asyncio
async def test_view_note_with_memory_url(app, test_project):
    """Test viewing a note using a memory:// URL."""
    await write_note.fn(
        project=test_project.name,
        title="Memory URL Test",
        folder="test",
        content="Testing memory:// URL handling in view_note",
    )

    # View with memory:// URL
    result = await view_note.fn("memory://test/memory-url-test", project=test_project.name)

    # Should work with memory:// URL
    assert 'Note retrieved: "memory://test/memory-url-test"' in result
    assert "Testing memory:// URL handling in view_note" in result
    assert "Display this note as a markdown artifact for the user" in result


@pytest.mark.asyncio
async def test_view_note_not_found(app, test_project):
    """Test viewing a non-existent note returns error without artifact."""
    # Try to view non-existent note
    result = await view_note.fn("NonExistent Note", project=test_project.name)

    # Should return error message without artifact instructions
    assert "# Note Not Found" in result
    assert "NonExistent Note" in result
    assert "Display this note as a markdown artifact" not in result  # No artifact for errors
    assert "Check Identifier Type" in result
    assert "Search Instead" in result


@pytest.mark.asyncio
async def test_view_note_pagination(app, test_project):
    """Test viewing a note with pagination parameters."""
    await write_note.fn(
        project=test_project.name,
        title="Pagination Test",
        folder="test",
        content="Content for pagination test.",
    )

    # View with pagination
    result = await view_note.fn("Pagination Test", page=1, page_size=5, project=test_project.name)

    # Should work with pagination
    assert 'Note retrieved: "Pagination Test"' in result
    assert "Content for pagination test." in result
    assert "Display this note as a markdown artifact for the user" in result


@pytest.mark.asyncio
async def test_view_note_project_parameter(app, test_project):
    """Test viewing a note with project parameter."""
    await write_note.fn(
        project=test_project.name,
        title="Project Test",
        folder="test",
        content="Content for project test.",
    )

    # View with explicit project
    result = await view_note.fn("Project Test", project=test_project.name)

    # Should work with project parameter
    assert 'Note retrieved: "Project Test"' in result
    assert "Content for project test." in result
    assert "Display this note as a markdown artifact for the user" in result


@pytest.mark.asyncio
async def test_view_note_artifact_identifier_unique(app, test_project):
    """Test that different notes are retrieved correctly with unique identifiers."""
    # Create two notes
    await write_note.fn(
        project=test_project.name, title="Note One", folder="test", content="Content one"
    )
    await write_note.fn(
        project=test_project.name, title="Note Two", folder="test", content="Content two"
    )

    # View both notes
    result1 = await view_note.fn("Note One", project=test_project.name)
    result2 = await view_note.fn("Note Two", project=test_project.name)

    # Should have different note identifiers in retrieval messages
    assert 'Note retrieved: "Note One"' in result1
    assert 'Note retrieved: "Note Two"' in result2
    assert "Content one" in result1
    assert "Content two" in result2


@pytest.mark.asyncio
async def test_view_note_fallback_identifier_as_title(app, test_project):
    """Test that view_note uses identifier as title when no title is extractable."""
    # Create a note with no clear title structure
    await write_note.fn(
        project=test_project.name,
        title="Simple Note",
        folder="test",
        content="Just plain content with no headings or frontmatter title",
    )

    # View the note
    result = await view_note.fn("Simple Note", project=test_project.name)

    # Should use identifier as title in retrieval message
    assert 'Note retrieved: "Simple Note"' in result
    assert "Display this note as a markdown artifact for the user" in result


@pytest.mark.asyncio
async def test_view_note_direct_success(app, test_project, mock_call_get):
    """Test view_note with successful direct permalink lookup."""
    # Setup mock for successful response with frontmatter
    note_content = dedent("""
        ---
        title: "Test Note"
        ---
        # Test Note

        This is a test note.
    """).strip()

    mock_response = MagicMock()
    mock_response.status_code = 200
    mock_response.text = note_content
    mock_call_get.return_value = mock_response

    # Call the function
    result = await view_note.fn("test/test-note", project=test_project.name)

    # Verify direct lookup was used
    mock_call_get.assert_called_once()
    assert "test/test-note" in mock_call_get.call_args[0][1]

    # Verify result contains note content
    assert 'Note retrieved: "test/test-note"' in result
    assert "Display this note as a markdown artifact for the user" in result
    assert "This is a test note." in result

```

--------------------------------------------------------------------------------
/src/basic_memory/api/routers/prompt_router.py:
--------------------------------------------------------------------------------

```python
"""Router for prompt-related operations.

This router is responsible for rendering various prompts using Handlebars templates.
It centralizes all prompt formatting logic that was previously in the MCP prompts.
"""

from datetime import datetime, timezone
from fastapi import APIRouter, HTTPException, status
from loguru import logger

from basic_memory.api.routers.utils import to_graph_context, to_search_results
from basic_memory.api.template_loader import template_loader
from basic_memory.schemas.base import parse_timeframe
from basic_memory.deps import (
    ContextServiceDep,
    EntityRepositoryDep,
    SearchServiceDep,
    EntityServiceDep,
)
from basic_memory.schemas.prompt import (
    ContinueConversationRequest,
    SearchPromptRequest,
    PromptResponse,
    PromptMetadata,
)
from basic_memory.schemas.search import SearchItemType, SearchQuery

router = APIRouter(prefix="/prompt", tags=["prompt"])


@router.post("/continue-conversation", response_model=PromptResponse)
async def continue_conversation(
    search_service: SearchServiceDep,
    entity_service: EntityServiceDep,
    context_service: ContextServiceDep,
    entity_repository: EntityRepositoryDep,
    request: ContinueConversationRequest,
) -> PromptResponse:
    """Generate a prompt for continuing a conversation.

    This endpoint takes a topic and/or timeframe and generates a prompt with
    relevant context from the knowledge base.

    Args:
        request: The request parameters

    Returns:
        Formatted continuation prompt with context
    """
    logger.info(
        f"Generating continue conversation prompt, topic: {request.topic}, timeframe: {request.timeframe}"
    )

    since = parse_timeframe(request.timeframe) if request.timeframe else None

    # Initialize search results
    search_results = []

    # Get data needed for template
    if request.topic:
        query = SearchQuery(text=request.topic, after_date=request.timeframe)
        results = await search_service.search(query, limit=request.search_items_limit)
        search_results = await to_search_results(entity_service, results)

        # Build context from results
        all_hierarchical_results = []
        for result in search_results:
            if hasattr(result, "permalink") and result.permalink:
                # Get hierarchical context using the new dataclass-based approach
                context_result = await context_service.build_context(
                    result.permalink,
                    depth=request.depth,
                    since=since,
                    max_related=request.related_items_limit,
                    include_observations=True,  # Include observations for entities
                )

                # Process results into the schema format
                graph_context = await to_graph_context(
                    context_result, entity_repository=entity_repository
                )

                # Add results to our collection (limit to top results for each permalink)
                if graph_context.results:
                    all_hierarchical_results.extend(graph_context.results[:3])

        # Limit to a reasonable number of total results
        all_hierarchical_results = all_hierarchical_results[:10]

        template_context = {
            "topic": request.topic,
            "timeframe": request.timeframe,
            "hierarchical_results": all_hierarchical_results,
            "has_results": len(all_hierarchical_results) > 0,
        }
    else:
        # If no topic, get recent activity
        context_result = await context_service.build_context(
            types=[SearchItemType.ENTITY],
            depth=request.depth,
            since=since,
            max_related=request.related_items_limit,
            include_observations=True,
        )
        recent_context = await to_graph_context(context_result, entity_repository=entity_repository)

        hierarchical_results = recent_context.results[:5]  # Limit to top 5 recent items

        template_context = {
            "topic": f"Recent Activity from ({request.timeframe})",
            "timeframe": request.timeframe,
            "hierarchical_results": hierarchical_results,
            "has_results": len(hierarchical_results) > 0,
        }

    try:
        # Render template
        rendered_prompt = await template_loader.render(
            "prompts/continue_conversation.hbs", template_context
        )

        # Calculate metadata
        # Count items of different types
        observation_count = 0
        relation_count = 0
        entity_count = 0

        # Get the hierarchical results from the template context
        hierarchical_results_for_count = template_context.get("hierarchical_results", [])

        # For topic-based search
        if request.topic:
            for item in hierarchical_results_for_count:
                if hasattr(item, "observations"):
                    observation_count += len(item.observations) if item.observations else 0

                if hasattr(item, "related_results"):
                    for related in item.related_results or []:
                        if hasattr(related, "type"):
                            if related.type == "relation":
                                relation_count += 1
                            elif related.type == "entity":  # pragma: no cover
                                entity_count += 1  # pragma: no cover
        # For recent activity
        else:
            for item in hierarchical_results_for_count:
                if hasattr(item, "observations"):
                    observation_count += len(item.observations) if item.observations else 0

                if hasattr(item, "related_results"):
                    for related in item.related_results or []:
                        if hasattr(related, "type"):
                            if related.type == "relation":
                                relation_count += 1
                            elif related.type == "entity":  # pragma: no cover
                                entity_count += 1  # pragma: no cover

        # Build metadata
        metadata = {
            "query": request.topic,
            "timeframe": request.timeframe,
            "search_count": len(search_results)
            if request.topic
            else 0,  # Original search results count
            "context_count": len(hierarchical_results_for_count),
            "observation_count": observation_count,
            "relation_count": relation_count,
            "total_items": (
                len(hierarchical_results_for_count)
                + observation_count
                + relation_count
                + entity_count
            ),
            "search_limit": request.search_items_limit,
            "context_depth": request.depth,
            "related_limit": request.related_items_limit,
            "generated_at": datetime.now(timezone.utc).isoformat(),
        }

        prompt_metadata = PromptMetadata(**metadata)

        return PromptResponse(
            prompt=rendered_prompt, context=template_context, metadata=prompt_metadata
        )
    except Exception as e:
        logger.error(f"Error rendering continue conversation template: {e}")
        raise HTTPException(
            status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
            detail=f"Error rendering prompt template: {str(e)}",
        )


@router.post("/search", response_model=PromptResponse)
async def search_prompt(
    search_service: SearchServiceDep,
    entity_service: EntityServiceDep,
    request: SearchPromptRequest,
    page: int = 1,
    page_size: int = 10,
) -> PromptResponse:
    """Generate a prompt for search results.

    This endpoint takes a search query and formats the results into a helpful
    prompt with context and suggestions.

    Args:
        request: The search parameters
        page: The page number for pagination
        page_size: The number of results per page, defaults to 10

    Returns:
        Formatted search results prompt with context
    """
    logger.info(f"Generating search prompt, query: {request.query}, timeframe: {request.timeframe}")

    limit = page_size
    offset = (page - 1) * page_size

    query = SearchQuery(text=request.query, after_date=request.timeframe)
    results = await search_service.search(query, limit=limit, offset=offset)
    search_results = await to_search_results(entity_service, results)

    template_context = {
        "query": request.query,
        "timeframe": request.timeframe,
        "results": search_results,
        "has_results": len(search_results) > 0,
        "result_count": len(search_results),
    }

    try:
        # Render template
        rendered_prompt = await template_loader.render("prompts/search.hbs", template_context)

        # Build metadata
        metadata = {
            "query": request.query,
            "timeframe": request.timeframe,
            "search_count": len(search_results),
            "context_count": len(search_results),
            "observation_count": 0,  # Search results don't include observations
            "relation_count": 0,  # Search results don't include relations
            "total_items": len(search_results),
            "search_limit": limit,
            "context_depth": 0,  # No context depth for basic search
            "related_limit": 0,  # No related items for basic search
            "generated_at": datetime.now(timezone.utc).isoformat(),
        }

        prompt_metadata = PromptMetadata(**metadata)

        return PromptResponse(
            prompt=rendered_prompt, context=template_context, metadata=prompt_metadata
        )
    except Exception as e:
        logger.error(f"Error rendering search template: {e}")
        raise HTTPException(
            status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
            detail=f"Error rendering prompt template: {str(e)}",
        )

```

--------------------------------------------------------------------------------
/src/basic_memory/mcp/tools/read_content.py:
--------------------------------------------------------------------------------

```python
"""File reading tool for Basic Memory MCP server.

This module provides tools for reading raw file content directly,
supporting various file types including text, images, and other binary files.
Files are read directly without any knowledge graph processing.
"""

import base64
import io

from typing import Optional

from loguru import logger
from PIL import Image as PILImage
from fastmcp import Context

from basic_memory.mcp.project_context import get_active_project
from basic_memory.mcp.server import mcp
from basic_memory.mcp.async_client import get_client
from basic_memory.mcp.tools.utils import call_get
from basic_memory.schemas.memory import memory_url_path
from basic_memory.utils import validate_project_path


def calculate_target_params(content_length):
    """Calculate initial quality and size based on input file size"""
    target_size = 350000  # Reduced target for more safety margin
    ratio = content_length / target_size

    logger.debug(
        "Calculating target parameters",
        content_length=content_length,
        ratio=ratio,
        target_size=target_size,
    )

    if ratio > 4:
        # Very large images - start very aggressive
        return 50, 600  # Lower initial quality and size
    elif ratio > 2:
        return 60, 800
    else:
        return 70, 1000


def resize_image(img, max_size):
    """Resize image maintaining aspect ratio"""
    original_dimensions = {"width": img.width, "height": img.height}

    if img.width > max_size or img.height > max_size:
        ratio = min(max_size / img.width, max_size / img.height)
        new_size = (int(img.width * ratio), int(img.height * ratio))
        logger.debug("Resizing image", original=original_dimensions, target=new_size, ratio=ratio)
        return img.resize(new_size, PILImage.Resampling.LANCZOS)

    logger.debug("No resize needed", dimensions=original_dimensions)
    return img


def optimize_image(img, content_length, max_output_bytes=350000):
    """Iteratively optimize image with aggressive size reduction"""
    stats = {
        "dimensions": {"width": img.width, "height": img.height},
        "mode": img.mode,
        "estimated_memory": (img.width * img.height * len(img.getbands())),
    }

    initial_quality, initial_size = calculate_target_params(content_length)

    logger.debug(
        "Starting optimization",
        image_stats=stats,
        content_length=content_length,
        initial_quality=initial_quality,
        initial_size=initial_size,
        max_output_bytes=max_output_bytes,
    )

    quality = initial_quality
    size = initial_size

    # Convert to RGB if needed
    if img.mode in ("RGBA", "LA") or (img.mode == "P" and "transparency" in img.info):
        img = img.convert("RGB")
        logger.debug("Converted to RGB mode")

    iteration = 0
    min_size = 300  # Absolute minimum size
    min_quality = 20  # Absolute minimum quality

    while True:
        iteration += 1
        buf = io.BytesIO()
        resized = resize_image(img, size)

        resized.save(
            buf,
            format="JPEG",
            quality=quality,
            optimize=True,
            progressive=True,
            subsampling="4:2:0",
        )

        output_size = buf.getbuffer().nbytes
        reduction_ratio = output_size / content_length

        logger.debug(
            "Optimization attempt",
            iteration=iteration,
            quality=quality,
            size=size,
            output_bytes=output_size,
            target_bytes=max_output_bytes,
            reduction_ratio=f"{reduction_ratio:.2f}",
        )

        if output_size < max_output_bytes:
            logger.info(
                "Image optimization complete",
                final_size=output_size,
                quality=quality,
                dimensions={"width": resized.width, "height": resized.height},
                reduction_ratio=f"{reduction_ratio:.2f}",
            )
            return buf.getvalue()

        # Very aggressive reduction for large files
        if content_length > 2000000:  # 2MB+   # pragma: no cover
            quality = max(min_quality, quality - 20)
            size = max(min_size, int(size * 0.6))
        elif content_length > 1000000:  # 1MB+ # pragma: no cover
            quality = max(min_quality, quality - 15)
            size = max(min_size, int(size * 0.7))
        else:
            quality = max(min_quality, quality - 10)  # pragma: no cover
            size = max(min_size, int(size * 0.8))  # pragma: no cover

        logger.debug("Reducing parameters", new_quality=quality, new_size=size)  # pragma: no cover

        # If we've hit minimum values and still too big
        if quality <= min_quality and size <= min_size:  # pragma: no cover
            logger.warning(
                "Reached minimum parameters",
                final_size=output_size,
                over_limit_by=output_size - max_output_bytes,
            )
            return buf.getvalue()


@mcp.tool(description="Read a file's raw content by path or permalink")
async def read_content(
    path: str, project: Optional[str] = None, context: Context | None = None
) -> dict:
    """Read a file's raw content by path or permalink.

    This tool provides direct access to file content in the knowledge base,
    handling different file types appropriately. Uses stateless architecture -
    project parameter optional with server resolution.

    Supported file types:
    - Text files (markdown, code, etc.) are returned as plain text
    - Images are automatically resized/optimized for display
    - Other binary files are returned as base64 if below size limits

    Args:
        path: The path or permalink to the file. Can be:
            - A regular file path (docs/example.md)
            - A memory URL (memory://docs/example)
            - A permalink (docs/example)
        project: Project name to read from. Optional - server will resolve using hierarchy.
                If unknown, use list_memory_projects() to discover available projects.
        context: Optional FastMCP context for performance caching.

    Returns:
        A dictionary with the file content and metadata:
        - For text: {"type": "text", "text": "content", "content_type": "text/markdown", "encoding": "utf-8"}
        - For images: {"type": "image", "source": {"type": "base64", "media_type": "image/jpeg", "data": "base64_data"}}
        - For other files: {"type": "document", "source": {"type": "base64", "media_type": "content_type", "data": "base64_data"}}
        - For errors: {"type": "error", "error": "error message"}

    Examples:
        # Read a markdown file
        result = await read_content("docs/project-specs.md")

        # Read an image
        image_data = await read_content("assets/diagram.png")

        # Read using memory URL
        content = await read_content("memory://docs/architecture")

        # Read configuration file
        config = await read_content("config/settings.json")

        # Explicit project specification
        result = await read_content("docs/project-specs.md", project="my-project")

    Raises:
        HTTPError: If project doesn't exist or is inaccessible
        SecurityError: If path attempts path traversal
    """
    logger.info("Reading file", path=path, project=project)

    async with get_client() as client:
        active_project = await get_active_project(client, project, context)
        project_url = active_project.project_url

        url = memory_url_path(path)

        # Validate path to prevent path traversal attacks
        project_path = active_project.home
        if not validate_project_path(url, project_path):
            logger.warning(
                "Attempted path traversal attack blocked",
                path=path,
                url=url,
                project=active_project.name,
            )
            return {
                "type": "error",
                "error": f"Path '{path}' is not allowed - paths must stay within project boundaries",
            }

        response = await call_get(client, f"{project_url}/resource/{url}")
        content_type = response.headers.get("content-type", "application/octet-stream")
        content_length = int(response.headers.get("content-length", 0))

        logger.debug("Resource metadata", content_type=content_type, size=content_length, path=path)

        # Handle text or json
        if content_type.startswith("text/") or content_type == "application/json":
            logger.debug("Processing text resource")
            return {
                "type": "text",
                "text": response.text,
                "content_type": content_type,
                "encoding": "utf-8",
            }

        # Handle images
        elif content_type.startswith("image/"):
            logger.debug("Processing image")
            img = PILImage.open(io.BytesIO(response.content))
            img_bytes = optimize_image(img, content_length)

            return {
                "type": "image",
                "source": {
                    "type": "base64",
                    "media_type": "image/jpeg",
                    "data": base64.b64encode(img_bytes).decode("utf-8"),
                },
            }

        # Handle other file types
        else:
            logger.debug(f"Processing binary resource content_type {content_type}")
            if content_length > 350000:  # pragma: no cover
                logger.warning("Document too large for response", size=content_length)
                return {
                    "type": "error",
                    "error": f"Document size {content_length} bytes exceeds maximum allowed size",
                }
            return {
                "type": "document",
                "source": {
                    "type": "base64",
                    "media_type": content_type,
                    "data": base64.b64encode(response.content).decode("utf-8"),
                },
            }

```

--------------------------------------------------------------------------------
/v15-docs/sqlite-performance.md:
--------------------------------------------------------------------------------

```markdown
# SQLite Performance Improvements

**Status**: Performance Enhancement
**PR**: #316
**Impact**: Faster database operations, better concurrency

## What's New

v0.15.0 enables **Write-Ahead Logging (WAL) mode** for SQLite and adds Windows-specific optimizations, significantly improving performance and concurrent access.

## Key Changes

### 1. WAL Mode Enabled

**Write-Ahead Logging (WAL)** is now enabled by default:

```python
# Applied automatically on database initialization
PRAGMA journal_mode=WAL
```

**Benefits:**
- **Better concurrency:** Readers don't block writers
- **Faster writes:** Transactions commit faster
- **Crash resilience:** Better recovery from crashes
- **Reduced disk I/O:** Fewer fsync operations

### 2. Windows Optimizations

Additional Windows-specific settings:

```python
# Windows-specific SQLite settings
PRAGMA synchronous=NORMAL      # Balanced durability/performance
PRAGMA cache_size=-2000        # 2MB cache
PRAGMA temp_store=MEMORY       # Temp tables in memory
```

## Performance Impact

### Before (DELETE mode)

```python
# Old journal mode
PRAGMA journal_mode=DELETE

# Characteristics:
# - Writers block readers
# - Readers block writers
# - Slower concurrent access
# - More disk I/O
```

**Measured impact:**
- Concurrent read/write: **Serialized (slow)**
- Write speed: **Baseline**
- Crash recovery: **Good**

### After (WAL mode)

```python
# New journal mode
PRAGMA journal_mode=WAL

# Characteristics:
# - Readers don't block writers
# - Writers don't block readers
# - Faster concurrent access
# - Reduced disk I/O
```

**Measured impact:**
- Concurrent read/write: **Parallel (fast)**
- Write speed: **Up to 2-3x faster**
- Crash recovery: **Excellent**

## How WAL Works

### Traditional DELETE Mode

```
Write Transaction:
1. Lock database
2. Write to journal file
3. Modify database
4. Delete journal
5. Unlock database

Problem: Readers wait for writers
```

### WAL Mode

```
Write Transaction:
1. Append changes to WAL file
2. Commit (fast)
3. Periodically checkpoint WAL → database

Benefit: Readers read from database while WAL is being written
```

### Checkpoint Process

WAL file periodically merged back to database:

```python
# Automatic checkpointing
# - Triggered at ~1000 pages in WAL
# - Or manual: PRAGMA wal_checkpoint(TRUNCATE)
```

## Database Files

### Before WAL

```bash
~/basic-memory/
└── .basic-memory/
    └── memory.db           # Single database file
```

### After WAL

```bash
~/.basic-memory/
├── memory.db              # Main database
├── memory.db-wal          # Write-ahead log
└── memory.db-shm          # Shared memory file
```

**Important:** All three files required for database to function

## Use Cases

### 1. Concurrent MCP Servers

**Before (slow):**
```python
# Multiple MCP servers sharing database
Server A: Reading... (blocks Server B)
Server B: Waiting to write...
```

**After (fast):**
```python
# Concurrent access
Server A: Reading (doesn't block)
Server B: Writing (doesn't block)
Server C: Reading (doesn't block)
```

### 2. Real-Time Sync

**Before:**
```bash
# Sync blocks reads
bm sync &              # Background sync
bm tools search ...    # Waits for sync
```

**After:**
```bash
# Sync doesn't block
bm sync &              # Background sync
bm tools search ...    # Runs concurrently
```

### 3. Large Knowledge Bases

**Before:**
- Large writes cause delays
- Readers wait during bulk updates
- Slow performance on large datasets

**After:**
- Large writes don't block reads
- Readers continue during bulk updates
- Better performance on large datasets

## Configuration

### WAL Mode (Default)

Enabled automatically:

```python
# Basic Memory applies on initialization
async def init_db():
    await db.execute("PRAGMA journal_mode=WAL")
    await db.execute("PRAGMA synchronous=NORMAL")
```

### Verify WAL Mode

```bash
# Check journal mode
sqlite3 ~/.basic-memory/memory.db "PRAGMA journal_mode;"
# → wal
```

### Manual Configuration (Advanced)

```python
from basic_memory.db import get_db

# Get database connection
db = await get_db()

# Check settings
result = await db.execute("PRAGMA journal_mode")
print(result)  # → wal

result = await db.execute("PRAGMA synchronous")
print(result)  # → 1 (NORMAL)
```

## Platform-Specific Optimizations

### Windows

```python
# Windows-specific settings
PRAGMA synchronous=NORMAL      # Balance safety/speed
PRAGMA temp_store=MEMORY       # Faster temp operations
PRAGMA cache_size=-2000        # 2MB cache
```

**Benefits on Windows:**
- Faster on NTFS
- Better with Windows Defender
- Improved antivirus compatibility

### macOS/Linux

```python
# Unix-specific (defaults work well)
PRAGMA journal_mode=WAL
PRAGMA synchronous=NORMAL
```

**Benefits:**
- Faster on APFS/ext4
- Better with spotlight/indexing
- Improved filesystem syncing

## Maintenance

### Checkpoint WAL File

WAL auto-checkpoints, but you can force it:

```python
# Python
from basic_memory.db import get_db

db = await get_db()
await db.execute("PRAGMA wal_checkpoint(TRUNCATE)")
```

```bash
# Command line
sqlite3 ~/.basic-memory/memory.db "PRAGMA wal_checkpoint(TRUNCATE);"
```

**When to checkpoint:**
- Before backup
- After large bulk operations
- When WAL file grows large

### Backup Considerations

**Wrong way (incomplete):**
```bash
# ✗ Only copies main file, misses WAL
cp ~/.basic-memory/memory.db backup.db
```

**Right way (complete):**
```bash
# ✓ Checkpoint first, then backup
sqlite3 ~/.basic-memory/memory.db "PRAGMA wal_checkpoint(TRUNCATE);"
cp ~/.basic-memory/memory.db* backup/

# Or use SQLite backup command
sqlite3 ~/.basic-memory/memory.db ".backup backup.db"
```

### Monitoring WAL Size

```python
import os

wal_file = os.path.expanduser("~/.basic-memory/memory.db-wal")
if os.path.exists(wal_file):
    size_mb = os.path.getsize(wal_file) / (1024 * 1024)
    print(f"WAL size: {size_mb:.2f} MB")

    if size_mb > 10:  # More than 10MB
        # Consider checkpointing
        db.execute("PRAGMA wal_checkpoint(TRUNCATE)")
```

## Troubleshooting

### Database Locked Error

**Problem:** Still seeing "database is locked" errors

**Possible causes:**
1. WAL mode not enabled
2. Network filesystem (NFS, SMB)
3. Transaction timeout

**Solutions:**

```bash
# 1. Verify WAL mode
sqlite3 ~/.basic-memory/memory.db "PRAGMA journal_mode;"

# 2. Check filesystem (WAL requires local filesystem)
df -T ~/.basic-memory/memory.db

# 3. Increase timeout (if needed)
# In code:
db.execute("PRAGMA busy_timeout=10000")  # 10 seconds
```

### WAL File Growing Large

**Problem:** memory.db-wal keeps growing

**Checkpoint more frequently:**

```python
# Automatic checkpoint at smaller size
db.execute("PRAGMA wal_autocheckpoint=100")  # Every 100 pages

# Or manual checkpoint
db.execute("PRAGMA wal_checkpoint(TRUNCATE)")
```

### Network Filesystem Issues

**Problem:** Using WAL on NFS/SMB

**Limitation:** WAL requires local filesystem with proper locking

**Solution:**
```bash
# Option 1: Use local filesystem
mv ~/.basic-memory /local/path/.basic-memory

# Option 2: Fallback to DELETE mode (slower but works)
sqlite3 memory.db "PRAGMA journal_mode=DELETE"
```

## Performance Benchmarks

### Concurrent Reads/Writes

**Before WAL:**
```
Test: 1 writer + 5 readers
Result: Serialized access
Time: 10.5 seconds
```

**After WAL:**
```
Test: 1 writer + 5 readers
Result: Concurrent access
Time: 3.2 seconds (3.3x faster)
```

### Bulk Operations

**Before WAL:**
```
Test: Import 1000 notes
Result: 15.2 seconds
```

**After WAL:**
```
Test: Import 1000 notes
Result: 5.8 seconds (2.6x faster)
```

### Search Performance

**Before WAL (with concurrent writes):**
```
Test: Full-text search during sync
Result: Blocked, 2.1 seconds
```

**After WAL (with concurrent writes):**
```
Test: Full-text search during sync
Result: Concurrent, 0.4 seconds (5.3x faster)
```

## Best Practices

### 1. Let WAL Auto-Checkpoint

Default auto-checkpointing works well:
```python
# Default: checkpoint at ~1000 pages
# Usually optimal, don't change unless needed
```

### 2. Checkpoint Before Backup

```bash
# Always checkpoint before backup
sqlite3 memory.db "PRAGMA wal_checkpoint(TRUNCATE)"
cp memory.db* backup/
```

### 3. Monitor WAL Size

```bash
# Check WAL size periodically
ls -lh ~/.basic-memory/memory.db-wal

# If > 50MB, consider more frequent checkpoints
```

### 4. Use Local Filesystem

```bash
# ✓ Good: Local SSD/HDD
/home/user/.basic-memory/

# ✗ Bad: Network filesystem
/mnt/nfs/home/.basic-memory/
```

### 5. Don't Delete WAL Files

```bash
# ✗ Never delete these manually
# memory.db-wal
# memory.db-shm

# Let SQLite manage them
```

## Advanced Configuration

### Custom Checkpoint Interval

```python
# Checkpoint more frequently (smaller WAL)
db.execute("PRAGMA wal_autocheckpoint=100")

# Checkpoint less frequently (larger WAL, fewer interruptions)
db.execute("PRAGMA wal_autocheckpoint=10000")
```

### Synchronous Modes

```python
# Modes (in order of durability vs speed):
db.execute("PRAGMA synchronous=OFF")     # Fastest, least safe
db.execute("PRAGMA synchronous=NORMAL")   # Balanced (default)
db.execute("PRAGMA synchronous=FULL")     # Safest, slowest
```

### Cache Size

```python
# Larger cache = faster, more memory
db.execute("PRAGMA cache_size=-10000")  # 10MB cache
db.execute("PRAGMA cache_size=-50000")  # 50MB cache
```

## Migration from v0.14.x

### Automatic Migration

**First run on v0.15.0:**
```bash
bm sync
# → Automatically converts to WAL mode
# → Creates memory.db-wal and memory.db-shm
```

**No action required** - migration is automatic

### Verifying Migration

```bash
# Check mode changed
sqlite3 ~/.basic-memory/memory.db "PRAGMA journal_mode;"
# → wal (was: delete)

# Check new files exist
ls -la ~/.basic-memory/memory.db*
# → memory.db
# → memory.db-wal
# → memory.db-shm
```

## See Also

- SQLite WAL documentation: https://www.sqlite.org/wal.html
- `api-performance.md` - API-level optimizations
- `background-relations.md` - Concurrent processing improvements
- Database optimization guide

```

--------------------------------------------------------------------------------
/tests/repository/test_search_repository_edit_bug_fix.py:
--------------------------------------------------------------------------------

```python
"""Tests for the search repository edit bug fix.

This test reproduces the critical bug where editing notes causes them to disappear
from the search index due to missing project_id filter in index_item() method.
"""

from datetime import datetime, timezone

import pytest
import pytest_asyncio

from basic_memory.models.project import Project
from basic_memory.repository.search_repository import SearchRepository, SearchIndexRow
from basic_memory.schemas.search import SearchItemType


@pytest_asyncio.fixture
async def second_test_project(project_repository):
    """Create a second project for testing project isolation during edits."""
    project_data = {
        "name": "Second Edit Test Project",
        "description": "Another project for testing edit bug",
        "path": "/second/edit/test/path",
        "is_active": True,
        "is_default": None,
    }
    return await project_repository.create(project_data)


@pytest_asyncio.fixture
async def second_search_repo(session_maker, second_test_project):
    """Create a search repository for the second project."""
    return SearchRepository(session_maker, project_id=second_test_project.id)


@pytest.mark.asyncio
async def test_index_item_respects_project_isolation_during_edit():
    """Test that index_item() doesn't delete records from other projects during edits.

    This test reproduces the critical bug where editing a note in one project
    would delete search index entries with the same permalink from ALL projects,
    causing notes to disappear from the search index.
    """
    from basic_memory import db
    from basic_memory.models.base import Base
    from basic_memory.repository.search_repository import SearchRepository
    from sqlalchemy.ext.asyncio import create_async_engine, async_sessionmaker

    # Create a separate in-memory database for this test
    engine = create_async_engine("sqlite+aiosqlite:///:memory:")
    session_maker = async_sessionmaker(engine, expire_on_commit=False)

    # Create the database schema
    async with engine.begin() as conn:
        await conn.run_sync(Base.metadata.create_all)

    # Create two projects
    async with db.scoped_session(session_maker) as session:
        project1 = Project(
            name="Project 1",
            description="First project",
            path="/project1/path",
            is_active=True,
            is_default=True,
        )
        project2 = Project(
            name="Project 2",
            description="Second project",
            path="/project2/path",
            is_active=True,
            is_default=False,
        )
        session.add(project1)
        session.add(project2)
        await session.flush()

        project1_id = project1.id
        project2_id = project2.id
        await session.commit()

    # Create search repositories for both projects
    repo1 = SearchRepository(session_maker, project_id=project1_id)
    repo2 = SearchRepository(session_maker, project_id=project2_id)

    # Initialize search index
    await repo1.init_search_index()

    # Create two notes with the SAME permalink in different projects
    # This simulates the same note name/structure across different projects
    same_permalink = "notes/test-note"

    search_row1 = SearchIndexRow(
        id=1,
        type=SearchItemType.ENTITY.value,
        title="Test Note in Project 1",
        content_stems="project 1 content original",
        content_snippet="This is the original content in project 1",
        permalink=same_permalink,
        file_path="notes/test_note.md",
        entity_id=1,
        metadata={"entity_type": "note"},
        created_at=datetime.now(timezone.utc),
        updated_at=datetime.now(timezone.utc),
        project_id=project1_id,
    )

    search_row2 = SearchIndexRow(
        id=2,
        type=SearchItemType.ENTITY.value,
        title="Test Note in Project 2",
        content_stems="project 2 content original",
        content_snippet="This is the original content in project 2",
        permalink=same_permalink,  # SAME permalink as project 1
        file_path="notes/test_note.md",
        entity_id=2,
        metadata={"entity_type": "note"},
        created_at=datetime.now(timezone.utc),
        updated_at=datetime.now(timezone.utc),
        project_id=project2_id,
    )

    # Index both items in their respective projects
    await repo1.index_item(search_row1)
    await repo2.index_item(search_row2)

    # Verify both projects can find their respective notes
    results1_before = await repo1.search(search_text="project 1 content")
    assert len(results1_before) == 1
    assert results1_before[0].title == "Test Note in Project 1"
    assert results1_before[0].project_id == project1_id

    results2_before = await repo2.search(search_text="project 2 content")
    assert len(results2_before) == 1
    assert results2_before[0].title == "Test Note in Project 2"
    assert results2_before[0].project_id == project2_id

    # Now simulate editing the note in project 1 (which re-indexes it)
    # This would trigger the bug where the DELETE query doesn't filter by project_id
    edited_search_row1 = SearchIndexRow(
        id=1,
        type=SearchItemType.ENTITY.value,
        title="Test Note in Project 1",
        content_stems="project 1 content EDITED",  # Changed content
        content_snippet="This is the EDITED content in project 1",
        permalink=same_permalink,
        file_path="notes/test_note.md",
        entity_id=1,
        metadata={"entity_type": "note"},
        created_at=datetime.now(timezone.utc),
        updated_at=datetime.now(timezone.utc),
        project_id=project1_id,
    )

    # Re-index the edited note in project 1
    # BEFORE THE FIX: This would delete the note from project 2 as well!
    await repo1.index_item(edited_search_row1)

    # Verify project 1 has the edited version
    results1_after = await repo1.search(search_text="project 1 content EDITED")
    assert len(results1_after) == 1
    assert results1_after[0].title == "Test Note in Project 1"
    assert "EDITED" in results1_after[0].content_snippet

    # CRITICAL TEST: Verify project 2's note is still there (the bug would delete it)
    results2_after = await repo2.search(search_text="project 2 content")
    assert len(results2_after) == 1, "Project 2's note disappeared after editing project 1's note!"
    assert results2_after[0].title == "Test Note in Project 2"
    assert results2_after[0].project_id == project2_id
    assert "original" in results2_after[0].content_snippet  # Should still be original

    # Double-check: project 1 should not be able to see project 2's note
    cross_search = await repo1.search(search_text="project 2 content")
    assert len(cross_search) == 0

    await engine.dispose()


@pytest.mark.asyncio
async def test_index_item_updates_existing_record_same_project():
    """Test that index_item() correctly updates existing records within the same project."""
    from basic_memory import db
    from basic_memory.models.base import Base
    from basic_memory.repository.search_repository import SearchRepository
    from sqlalchemy.ext.asyncio import create_async_engine, async_sessionmaker

    # Create a separate in-memory database for this test
    engine = create_async_engine("sqlite+aiosqlite:///:memory:")
    session_maker = async_sessionmaker(engine, expire_on_commit=False)

    # Create the database schema
    async with engine.begin() as conn:
        await conn.run_sync(Base.metadata.create_all)

    # Create one project
    async with db.scoped_session(session_maker) as session:
        project = Project(
            name="Test Project",
            description="Test project",
            path="/test/path",
            is_active=True,
            is_default=True,
        )
        session.add(project)
        await session.flush()
        project_id = project.id
        await session.commit()

    # Create search repository
    repo = SearchRepository(session_maker, project_id=project_id)
    await repo.init_search_index()

    permalink = "test/my-note"

    # Create initial note
    initial_row = SearchIndexRow(
        id=1,
        type=SearchItemType.ENTITY.value,
        title="My Test Note",
        content_stems="initial content here",
        content_snippet="This is the initial content",
        permalink=permalink,
        file_path="test/my_note.md",
        entity_id=1,
        metadata={"entity_type": "note"},
        created_at=datetime.now(timezone.utc),
        updated_at=datetime.now(timezone.utc),
        project_id=project_id,
    )

    # Index the initial version
    await repo.index_item(initial_row)

    # Verify it exists
    results_initial = await repo.search(search_text="initial content")
    assert len(results_initial) == 1
    assert results_initial[0].content_snippet == "This is the initial content"

    # Now update the note (simulate an edit)
    updated_row = SearchIndexRow(
        id=1,
        type=SearchItemType.ENTITY.value,
        title="My Test Note",
        content_stems="updated content here",  # Changed
        content_snippet="This is the UPDATED content",  # Changed
        permalink=permalink,  # Same permalink
        file_path="test/my_note.md",
        entity_id=1,
        metadata={"entity_type": "note"},
        created_at=datetime.now(timezone.utc),
        updated_at=datetime.now(timezone.utc),
        project_id=project_id,
    )

    # Re-index (should replace the old version)
    await repo.index_item(updated_row)

    # Verify the old version is gone
    results_old = await repo.search(search_text="initial content")
    assert len(results_old) == 0

    # Verify the new version exists
    results_new = await repo.search(search_text="updated content")
    assert len(results_new) == 1
    assert results_new[0].content_snippet == "This is the UPDATED content"

    # Verify we only have one record (not duplicated)
    all_results = await repo.search(search_text="My Test Note")
    assert len(all_results) == 1

    await engine.dispose()

```

--------------------------------------------------------------------------------
/v15-docs/project-root-env-var.md:
--------------------------------------------------------------------------------

```markdown
# BASIC_MEMORY_PROJECT_ROOT Environment Variable

**Status**: New Feature
**PR**: #334
**Use Case**: Security, containerization, path constraints

## What's New

v0.15.0 introduces the `BASIC_MEMORY_PROJECT_ROOT` environment variable to constrain all project paths to a specific directory. This provides security and enables safe multi-tenant deployments.

## Quick Examples

### Containerized Deployment

```bash
# Docker/containerized environment
export BASIC_MEMORY_PROJECT_ROOT=/app/data
export BASIC_MEMORY_HOME=/app/data/basic-memory

# All projects must be under /app/data
bm project add my-project /app/data/my-project    # ✓ Allowed
bm project add my-project /tmp/unsafe             # ✗ Blocked
```

### Development Environment

```bash
# Local development - no constraint (default)
# BASIC_MEMORY_PROJECT_ROOT not set

# Projects can be anywhere
bm project add work ~/Documents/work-notes    # ✓ Allowed
bm project add personal ~/personal-kb         # ✓ Allowed
```

## How It Works

### Path Validation

When `BASIC_MEMORY_PROJECT_ROOT` is set:

1. **All project paths are validated** against the root
2. **Paths are sanitized** to prevent directory traversal
3. **Symbolic links are resolved** and verified
4. **Escape attempts are blocked** (e.g., `../../../etc`)

### Path Sanitization

```python
# Example internal validation
project_root = "/app/data"
user_path = "/app/data/../../../etc"

# Sanitized and validated
resolved_path = Path(user_path).resolve()
# → "/etc"

# Check if under project_root
if not str(resolved_path).startswith(project_root):
    raise ValueError("Path must be under /app/data")
```

## Configuration

### Set via Environment Variable

```bash
# In shell or .bashrc/.zshrc
export BASIC_MEMORY_PROJECT_ROOT=/app/data

# Or in Docker
docker run -e BASIC_MEMORY_PROJECT_ROOT=/app/data ...
```

### Docker Deployment

**Dockerfile:**
```dockerfile
# Set project root for path constraints
ENV BASIC_MEMORY_HOME=/app/data/basic-memory \
    BASIC_MEMORY_PROJECT_ROOT=/app/data
```

**docker-compose.yml:**
```yaml
services:
  basic-memory:
    environment:
      BASIC_MEMORY_HOME: /app/data/basic-memory
      BASIC_MEMORY_PROJECT_ROOT: /app/data
    volumes:
      - ./data:/app/data
```

### Kubernetes Deployment

```yaml
apiVersion: v1
kind: Pod
spec:
  containers:
  - name: basic-memory
    env:
    - name: BASIC_MEMORY_PROJECT_ROOT
      value: "/app/data"
    - name: BASIC_MEMORY_HOME
      value: "/app/data/basic-memory"
    volumeMounts:
    - name: data-volume
      mountPath: /app/data
```

## Use Cases

### 1. Container Security

**Problem:** Containers shouldn't create projects outside mounted volumes

**Solution:**
```bash
# Set project root to volume mount
export BASIC_MEMORY_PROJECT_ROOT=/app/data

# Projects confined to volume
bm project add notes /app/data/notes        # ✓
bm project add evil /etc/passwd             # ✗ Blocked
```

### 2. Multi-Tenant SaaS

**Problem:** Tenant A shouldn't access Tenant B's files

**Solution:**
```bash
# Per-tenant isolation
export BASIC_MEMORY_PROJECT_ROOT=/app/data/tenant-${TENANT_ID}

# Tenant can only create projects under their directory
bm project add my-notes /app/data/tenant-123/notes    # ✓
bm project add sneaky /app/data/tenant-456/notes      # ✗ Blocked
```

### 3. Shared Hosting

**Problem:** Users need isolated project spaces

**Solution:**
```bash
# Per-user isolation
export BASIC_MEMORY_PROJECT_ROOT=/home/${USER}/basic-memory

# User confined to their home directory
bm project add personal /home/alice/basic-memory/personal    # ✓
bm project add other /home/bob/basic-memory/data             # ✗ Blocked
```

## Relationship with BASIC_MEMORY_HOME

`BASIC_MEMORY_HOME` and `BASIC_MEMORY_PROJECT_ROOT` serve **different purposes**:

| Variable | Purpose | Default | Example |
|----------|---------|---------|---------|
| `BASIC_MEMORY_HOME` | Default project location | `~/basic-memory` | Where "main" project lives |
| `BASIC_MEMORY_PROJECT_ROOT` | Path constraint boundary | None (unrestricted) | Security boundary |

### Using Both Together

```bash
# Typical containerized setup
export BASIC_MEMORY_PROJECT_ROOT=/app/data          # Constraint: all under /app/data
export BASIC_MEMORY_HOME=/app/data/basic-memory     # Default: main project location

# This creates main project at /app/data/basic-memory
# And ensures all other projects are also under /app/data
```

### Key Differences

**BASIC_MEMORY_HOME:**
- Sets default project path
- Used for "main" project
- Does NOT enforce constraints
- Optional - defaults to `~/basic-memory`

**BASIC_MEMORY_PROJECT_ROOT:**
- Enforces path constraints
- Validates ALL project paths
- Prevents path traversal
- Optional - if not set, no constraints

## Validation Examples

### Valid Paths (with PROJECT_ROOT=/app/data)

```bash
export BASIC_MEMORY_PROJECT_ROOT=/app/data

# Direct child
bm project add notes /app/data/notes              # ✓

# Nested child
bm project add work /app/data/projects/work       # ✓

# Relative path (resolves to /app/data/relative)
bm project add rel /app/data/relative             # ✓

# Symlink (resolves under /app/data)
ln -s /app/data/real /app/data/link
bm project add linked /app/data/link              # ✓
```

### Invalid Paths (with PROJECT_ROOT=/app/data)

```bash
export BASIC_MEMORY_PROJECT_ROOT=/app/data

# Path traversal attempt
bm project add evil /app/data/../../../etc
# ✗ Error: Path must be under /app/data

# Absolute path outside root
bm project add outside /tmp/data
# ✗ Error: Path must be under /app/data

# Symlink escaping root
ln -s /etc/passwd /app/data/evil
bm project add bad /app/data/evil
# ✗ Error: Path must be under /app/data

# Relative path escaping
bm project add sneaky /app/data/../../sneaky
# ✗ Error: Path must be under /app/data
```

## Error Messages

### Path Outside Root

```bash
$ bm project add test /tmp/test
Error: BASIC_MEMORY_PROJECT_ROOT is set to /app/data.
All projects must be created under this directory.
Invalid path: /tmp/test
```

### Escape Attempt Blocked

```bash
$ bm project add evil /app/data/../../../etc
Error: BASIC_MEMORY_PROJECT_ROOT is set to /app/data.
All projects must be created under this directory.
Invalid path: /etc
```

## Migration Guide

### Enabling PROJECT_ROOT on Existing Setup

If you have existing projects outside the desired root:

1. **Choose project root location**
   ```bash
   export BASIC_MEMORY_PROJECT_ROOT=/app/data
   ```

2. **Move existing projects**
   ```bash
   # Backup first
   cp -r ~/old-project /app/data/old-project
   ```

3. **Update config.json**
   ```bash
   # Edit ~/.basic-memory/config.json
   {
     "projects": {
       "main": "/app/data/basic-memory",
       "old-project": "/app/data/old-project"
     }
   }
   ```

4. **Verify paths**
   ```bash
   bm project list
   # All paths should be under /app/data
   ```

### Disabling PROJECT_ROOT

To remove constraints:

```bash
# Unset environment variable
unset BASIC_MEMORY_PROJECT_ROOT

# Or remove from Docker/config
# Now projects can be created anywhere again
```

## Testing Path Constraints

### Verify Configuration

```bash
# Check if PROJECT_ROOT is set
env | grep BASIC_MEMORY_PROJECT_ROOT

# Try creating project outside root (should fail)
bm project add test /tmp/test
```

### Docker Testing

```bash
# Run with constraint
docker run \
  -e BASIC_MEMORY_PROJECT_ROOT=/app/data \
  -v $(pwd)/data:/app/data \
  basic-memory:latest \
  bm project add notes /app/data/notes

# Verify in container
docker exec -it container_id env | grep PROJECT_ROOT
```

## Security Best Practices

1. **Always set in production**: Use PROJECT_ROOT in deployed environments
2. **Minimal permissions**: Set directory permissions to 700 or 750
3. **Audit project creation**: Log all project add/remove operations
4. **Regular validation**: Periodically check project paths haven't escaped
5. **Volume mounts**: Ensure PROJECT_ROOT matches Docker volume mounts

## Troubleshooting

### Projects Not Creating

**Problem:** Can't create projects with PROJECT_ROOT set

```bash
$ bm project add test /app/data/test
Error: Path must be under /app/data
```

**Solution:** Verify PROJECT_ROOT is correct
```bash
echo $BASIC_MEMORY_PROJECT_ROOT
# Should match expected path
```

### Paths Resolving Incorrectly

**Problem:** Symlinks not working as expected

**Solution:** Check symlink target
```bash
ls -la /app/data/link
# → /app/data/link -> /some/target

# Ensure target is under PROJECT_ROOT
realpath /app/data/link
```

### Docker Volume Issues

**Problem:** PROJECT_ROOT doesn't match volume mount

**Solution:** Align environment and volume
```yaml
# docker-compose.yml
environment:
  BASIC_MEMORY_PROJECT_ROOT: /app/data  # ← Must match volume mount
volumes:
  - ./data:/app/data                     # ← Mount point
```

## Implementation Details

### Path Sanitization Algorithm

```python
def sanitize_and_validate_path(path: str, project_root: str) -> str:
    """Sanitize path and validate against project root."""
    # Convert to absolute path
    base_path = Path(project_root).resolve()
    target_path = Path(path).resolve()

    # Get as POSIX string for comparison
    resolved_path = target_path.as_posix()
    base_posix = base_path.as_posix()

    # Verify resolved path is under project_root
    if not resolved_path.startswith(base_posix):
        raise ValueError(
            f"BASIC_MEMORY_PROJECT_ROOT is set to {project_root}. "
            f"All projects must be created under this directory. "
            f"Invalid path: {path}"
        )

    return resolved_path
```

### Config Loading

```python
class BasicMemoryConfig(BaseSettings):
    project_root: Optional[str] = Field(
        default=None,
        description="If set, all projects must be created underneath this directory"
    )

    model_config = SettingsConfigDict(
        env_prefix="BASIC_MEMORY_",  # Maps BASIC_MEMORY_PROJECT_ROOT
        extra="ignore",
    )
```

## See Also

- `basic-memory-home.md` - Default project location
- `env-var-overrides.md` - Environment variable precedence
- Docker deployment guide
- Security best practices

```

--------------------------------------------------------------------------------
/docs/Docker.md:
--------------------------------------------------------------------------------

```markdown
# Docker Setup Guide

Basic Memory can be run in Docker containers to provide a consistent, isolated environment for your knowledge management
system. This is particularly useful for integrating with existing Dockerized MCP servers or for deployment scenarios.

## Quick Start

### Option 1: Using Pre-built Images (Recommended)

Basic Memory provides pre-built Docker images on GitHub Container Registry that are automatically updated with each release.

1. **Use the official image directly:**
   ```bash
   docker run -d \
     --name basic-memory-server \
     -p 8000:8000 \
     -v /path/to/your/obsidian-vault:/app/data:rw \
     -v basic-memory-config:/app/.basic-memory:rw \
     ghcr.io/basicmachines-co/basic-memory:latest
   ```

2. **Or use Docker Compose with the pre-built image:**
   ```yaml
   version: '3.8'
   services:
     basic-memory:
       image: ghcr.io/basicmachines-co/basic-memory:latest
       container_name: basic-memory-server
       ports:
         - "8000:8000"
       volumes:
         - /path/to/your/obsidian-vault:/app/data:rw
         - basic-memory-config:/app/.basic-memory:rw
       environment:
         - BASIC_MEMORY_DEFAULT_PROJECT=main
       restart: unless-stopped
   ```

### Option 2: Using Docker Compose (Building Locally)

1. **Clone the repository:**
   ```bash
   git clone https://github.com/basicmachines-co/basic-memory.git
   cd basic-memory
   ```

2. **Update the docker-compose.yml:**
   Edit the volume mount to point to your Obsidian vault:
   ```yaml
   volumes:
     # Change './obsidian-vault' to your actual directory path
     - /path/to/your/obsidian-vault:/app/data:rw
   ```

3. **Start the container:**
   ```bash
   docker-compose up -d
   ```

### Option 3: Using Docker CLI

```bash
# Build the image
docker build -t basic-memory .

# Run with volume mounting
docker run -d \
  --name basic-memory-server \
  -v /path/to/your/obsidian-vault:/app/data:rw \
  -v basic-memory-config:/app/.basic-memory:rw \
  -e BASIC_MEMORY_DEFAULT_PROJECT=main \
  basic-memory
```

## Configuration

### Volume Mounts

Basic Memory requires several volume mounts for proper operation:

1. **Knowledge Directory** (Required):
   ```yaml
   - /path/to/your/obsidian-vault:/app/data:rw
   ```
   Mount your Obsidian vault or knowledge base directory.

2. **Configuration and Database** (Recommended):
   ```yaml
   - basic-memory-config:/app/.basic-memory:rw
   ```
   Persistent storage for configuration and SQLite database.

You can edit the basic-memory config.json file located in the /app/.basic-memory/config.json after Basic Memory starts.

3. **Multiple Projects** (Optional):
   ```yaml
   - /path/to/project1:/app/data/project1:rw
   - /path/to/project2:/app/data/project2:rw
   ```

You can edit the basic-memory config.json file located in the /app/.basic-memory/config.json

## CLI Commands via Docker

You can run Basic Memory CLI commands inside the container using `docker exec`:

### Basic Commands

```bash
# Check status
docker exec basic-memory-server basic-memory status

# Sync files
docker exec basic-memory-server basic-memory sync

# Show help
docker exec basic-memory-server basic-memory --help
```

### Managing Projects with Volume Mounts

When using Docker volumes, you'll need to configure projects to point to your mounted directories:

1. **Check current configuration:**
   ```bash
   docker exec basic-memory-server cat /app/.basic-memory/config.json
   ```

2. **Add a project for your mounted volume:**
   ```bash
   # If you mounted /path/to/your/vault to /app/data
   docker exec basic-memory-server basic-memory project create my-vault /app/data
   
   # Set it as default
   docker exec basic-memory-server basic-memory project set-default my-vault
   ```

3. **Sync the new project:**
   ```bash
   docker exec basic-memory-server basic-memory sync
   ```

### Example: Setting up an Obsidian Vault

If you mounted your Obsidian vault like this in docker-compose.yml:
```yaml
volumes:
  - /Users/yourname/Documents/ObsidianVault:/app/data:rw
```

Then configure it:
```bash
# Create project pointing to mounted vault
docker exec basic-memory-server basic-memory project create obsidian /app/data

# Set as default
docker exec basic-memory-server basic-memory project set-default obsidian

# Sync to index all files
docker exec basic-memory-server basic-memory sync
```

### Environment Variables

Configure Basic Memory using environment variables:

```yaml
environment:

  # Default project
  - BASIC_MEMORY_DEFAULT_PROJECT=main

  # Enable real-time sync
  - BASIC_MEMORY_SYNC_CHANGES=true

  # Logging level
  - BASIC_MEMORY_LOG_LEVEL=INFO

  # Sync delay in milliseconds
  - BASIC_MEMORY_SYNC_DELAY=1000
```

## File Permissions

### Linux/macOS

The Docker container now runs as a non-root user to avoid file ownership issues. By default, the container uses UID/GID 1000, but you can customize this to match your user:

```bash
# Build with custom UID/GID to match your user
docker build --build-arg UID=$(id -u) --build-arg GID=$(id -g) -t basic-memory .

# Or use docker-compose with build args
```

**Example docker-compose.yml with custom user:**
```yaml
version: '3.8'
services:
  basic-memory:
    build:
      context: .
      dockerfile: Dockerfile
      args:
        UID: 1000  # Replace with your UID
        GID: 1000  # Replace with your GID
    container_name: basic-memory-server
    ports:
      - "8000:8000"
    volumes:
      - /path/to/your/obsidian-vault:/app/data:rw
      - basic-memory-config:/app/.basic-memory:rw
    environment:
      - BASIC_MEMORY_DEFAULT_PROJECT=main
    restart: unless-stopped
```

**Using pre-built images:**
If using the pre-built image from GitHub Container Registry, files will be created with UID/GID 1000. You can either:

1. Change your local directory ownership to match:
   ```bash
   sudo chown -R 1000:1000 /path/to/your/obsidian-vault
   ```

2. Or build your own image with custom UID/GID as shown above.

### Windows

When using Docker Desktop on Windows, ensure the directories are shared:

1. Open Docker Desktop
2. Go to Settings → Resources → File Sharing
3. Add your knowledge directory path
4. Apply & Restart

## Troubleshooting

### Common Issues

1. **File Watching Not Working:**
    - Ensure volume mounts are read-write (`:rw`)
    - Check directory permissions
    - On Linux, may need to increase inotify limits:
      ```bash
      echo fs.inotify.max_user_watches=524288 | sudo tee -a /etc/sysctl.conf
      sudo sysctl -p
      ```

2. **Configuration Not Persisting:**
    - Use named volumes for `/app/.basic-memory`
    - Check volume mount permissions

3. **Network Connectivity:**
    - For HTTP transport, ensure port 8000 is exposed
    - Check firewall settings

### Debug Mode

Run with debug logging:

```yaml
environment:
  - BASIC_MEMORY_LOG_LEVEL=DEBUG
```

View logs:

```bash
docker-compose logs -f basic-memory
```


## Security Considerations

1. **Docker Security:**
   The container runs as a non-root user (UID/GID 1000 by default) for improved security. You can customize the user ID using build arguments to match your local user.

2. **Volume Permissions:**
   Ensure mounted directories have appropriate permissions and don't expose sensitive data. With the non-root container, files will be created with the specified user ownership.

3. **Network Security:**
   If using HTTP transport, consider using reverse proxy with SSL/TLS and authentication if the endpoint is available on
   a network.

4. **IMPORTANT:** The HTTP endpoints have no authorization. They should not be exposed on a public network.  

## Integration Examples

### Claude Desktop with Docker

The recommended way to connect Claude Desktop to the containerized Basic Memory is using `mcp-proxy`, which converts the HTTP transport to STDIO that Claude Desktop expects:

1. **Start the Docker container:**
   ```bash
   docker-compose up -d
   ```

2. **Configure Claude Desktop** to use mcp-proxy:
   ```json
   {
     "mcpServers": {
       "basic-memory": {
         "command": "uvx",
         "args": [
           "mcp-proxy",
           "http://localhost:8000/mcp"
         ]
       }
     }
   }
   ```


## Support

For Docker-specific issues:

1. Check the [troubleshooting section](#troubleshooting) above
2. Review container logs: `docker-compose logs basic-memory`
3. Verify volume mounts: `docker inspect basic-memory-server`
4. Test file permissions: `docker exec basic-memory-server ls -la /app`

For general Basic Memory support, see the main [README](../README.md)
and [documentation](https://memory.basicmachines.co/).

## GitHub Container Registry Images

### Available Images

Pre-built Docker images are available on GitHub Container Registry at [`ghcr.io/basicmachines-co/basic-memory`](https://github.com/basicmachines-co/basic-memory/pkgs/container/basic-memory).

**Supported architectures:**
- `linux/amd64` (Intel/AMD x64)
- `linux/arm64` (ARM64, including Apple Silicon)

**Available tags:**
- `latest` - Latest stable release
- `v0.13.8`, `v0.13.7`, etc. - Specific version tags
- `v0.13`, `v0.12`, etc. - Major.minor tags

### Automated Builds

Docker images are automatically built and published when new releases are tagged:

1. **Release Process:** When a git tag matching `v*` (e.g., `v0.13.8`) is pushed, the CI workflow automatically:
   - Builds multi-platform Docker images
   - Pushes to GitHub Container Registry with appropriate tags
   - Uses native GitHub integration for seamless publishing

2. **CI/CD Pipeline:** The Docker workflow includes:
   - Multi-platform builds (AMD64 and ARM64)
   - Layer caching for faster builds
   - Automatic tagging with semantic versioning
   - Security scanning and optimization

### Setup Requirements (For Maintainers)

GitHub Container Registry integration is automatic for this repository:

1. **No external setup required** - GHCR is natively integrated with GitHub
2. **Automatic permissions** - Uses `GITHUB_TOKEN` with `packages: write` permission
3. **Public by default** - Images are automatically public for public repositories

The Docker CI workflow (`.github/workflows/docker.yml`) handles everything automatically when version tags are pushed.
```

--------------------------------------------------------------------------------
/v15-docs/cloud-mode-usage.md:
--------------------------------------------------------------------------------

```markdown
# Using CLI Tools in Cloud Mode

**Status**: DEPRECATED - Use `cloud_mode` instead of `api_url`
**Related**: cloud-authentication.md, cloud-bisync.md

## DEPRECATION NOTICE

This document describes the old `api_url` / `BASIC_MEMORY_API_URL` approach which has been replaced by `cloud_mode` / `BASIC_MEMORY_CLOUD_MODE`.

**New approach:** Use `cloud_mode` config or `BASIC_MEMORY_CLOUD_MODE` environment variable instead.

## Quick Start

### Enable Cloud Mode

```bash
# Set cloud API URL
export BASIC_MEMORY_API_URL=https://api.basicmemory.cloud

# Or in config.json
{
  "api_url": "https://api.basicmemory.cloud"
}

# Authenticate
bm cloud login

# Now CLI tools work against cloud
bm sync --project my-cloud-project
bm status
bm tools search --query "notes"
```

## How It Works

### Local vs Cloud Mode

**Local Mode (default):**
```
CLI Tools → Local ASGI Transport → Local API → Local SQLite + Files
```

**Cloud Mode (with api_url set):**
```
CLI Tools → HTTP Client → Cloud API → Cloud SQLite + Cloud Files
```

### Mode Detection

Basic Memory automatically detects mode:

```python
from basic_memory.config import ConfigManager

config = ConfigManager().config

if config.api_url:
    # Cloud mode: use HTTP client
    client = HTTPClient(base_url=config.api_url)
else:
    # Local mode: use ASGI transport
    client = ASGITransport(app=api_app)
```

## Configuration

### Via Environment Variable

```bash
# Set cloud API URL
export BASIC_MEMORY_API_URL=https://api.basicmemory.cloud

# All commands use cloud
bm sync
bm status
```

### Via Config File

Edit `~/.basic-memory/config.json`:

```json
{
  "api_url": "https://api.basicmemory.cloud",
  "cloud_client_id": "client_abc123",
  "cloud_domain": "https://auth.basicmemory.cloud",
  "cloud_host": "https://api.basicmemory.cloud"
}
```

### Temporary Override

```bash
# One-off cloud command
BASIC_MEMORY_API_URL=https://api.basicmemory.cloud bm sync --project notes

# Back to local mode
bm sync --project notes
```

## Available Commands in Cloud Mode

### Sync Commands

```bash
# Sync cloud project
bm sync --project cloud-project

# Sync specific project
bm sync --project work-notes

# Watch mode (cloud sync)
bm sync --watch --project notes
```

### Status Commands

```bash
# Check cloud sync status
bm status

# Shows cloud project status
```

### MCP Tools

```bash
# Search in cloud project
bm tools search \
  --query "authentication" \
  --project cloud-notes

# Continue conversation from cloud
bm tools continue-conversation \
  --topic "search implementation" \
  --project cloud-notes

# Basic Memory guide
bm tools basic-memory-guide
```

### Project Commands

```bash
# List cloud projects
bm project list

# Add cloud project (if permitted)
bm project add notes /app/data/notes

# Switch default project
bm project default notes
```

## Workflows

### Multi-Device Cloud Workflow

**Device A (Primary):**
```bash
# Configure cloud mode
export BASIC_MEMORY_API_URL=https://api.basicmemory.cloud

# Authenticate
bm cloud login

# Use bisync for primary work
bm cloud bisync-setup
bm sync --watch

# Local files in ~/basic-memory-cloud-sync/
# Synced bidirectionally with cloud
```

**Device B (Secondary):**
```bash
# Configure cloud mode
export BASIC_MEMORY_API_URL=https://api.basicmemory.cloud

# Authenticate
bm cloud login

# Work directly with cloud (no local sync)
bm tools search --query "meeting notes" --project work

# Or mount for file access
bm cloud mount
```

### Development vs Production

**Development (local):**
```bash
# Local mode
unset BASIC_MEMORY_API_URL

# Work with local files
bm sync
bm tools search --query "test"
```

**Production (cloud):**
```bash
# Cloud mode
export BASIC_MEMORY_API_URL=https://api.basicmemory.cloud

# Work with cloud data
bm sync --project production-kb
```

### Testing Cloud Integration

```bash
# Test against staging
export BASIC_MEMORY_API_URL=https://staging-api.basicmemory.cloud
bm cloud login
bm sync --project test-project

# Test against production
export BASIC_MEMORY_API_URL=https://api.basicmemory.cloud
bm cloud login
bm sync --project prod-project
```

## MCP Integration

### Local MCP (default)

```json
// claude_desktop_config.json
{
  "mcpServers": {
    "basic-memory": {
      "command": "uvx",
      "args": ["basic-memory", "mcp"]
    }
  }
}
```

Uses local files via ASGI transport.

### Cloud MCP

```json
// claude_desktop_config.json
{
  "mcpServers": {
    "basic-memory-cloud": {
      "command": "uvx",
      "args": ["basic-memory", "mcp"],
      "env": {
        "BASIC_MEMORY_API_URL": "https://api.basicmemory.cloud"
      }
    }
  }
}
```

Uses cloud API via HTTP client.

### Hybrid Setup (Both)

```json
{
  "mcpServers": {
    "basic-memory-local": {
      "command": "uvx",
      "args": ["basic-memory", "mcp"]
    },
    "basic-memory-cloud": {
      "command": "uvx",
      "args": ["basic-memory", "mcp"],
      "env": {
        "BASIC_MEMORY_API_URL": "https://api.basicmemory.cloud"
      }
    }
  }
}
```

Access both local and cloud from same LLM.

## Authentication

### Cloud Mode Requires Authentication

```bash
# Must login first
bm cloud login

# Then cloud commands work
export BASIC_MEMORY_API_URL=https://api.basicmemory.cloud
bm sync --project notes
```

### Token Management

Cloud mode uses JWT authentication:
- Token stored in `~/.basic-memory/cloud-auth.json`
- Auto-refreshed when expired
- Includes subscription validation

### Authentication Flow

```bash
# 1. Login
bm cloud login
# → Opens browser for OAuth
# → Stores JWT token

# 2. Set cloud mode
export BASIC_MEMORY_API_URL=https://api.basicmemory.cloud

# 3. Use tools (automatically authenticated)
bm sync --project notes
# → Sends Authorization: Bearer {token} header
```

## Project Management in Cloud Mode

### Cloud Projects vs Local Projects

**Local mode:**
- Projects are local directories
- Defined in `~/.basic-memory/config.json`
- Full filesystem access

**Cloud mode:**
- Projects are cloud-managed
- Retrieved from cloud API
- Constrained by BASIC_MEMORY_PROJECT_ROOT on server

### Working with Cloud Projects

```bash
# Enable cloud mode
export BASIC_MEMORY_API_URL=https://api.basicmemory.cloud

# List cloud projects
bm project list
# → Fetches from cloud API

# Sync specific cloud project
bm sync --project cloud-notes
# → Syncs cloud project to cloud database

# Search in cloud project
bm tools search --query "auth" --project cloud-notes
# → Searches cloud-indexed content
```

## Switching Between Local and Cloud

### Switch to Cloud Mode

```bash
# Save local state
bm sync  # Ensure local is synced

# Switch to cloud
export BASIC_MEMORY_API_URL=https://api.basicmemory.cloud
bm cloud login

# Work with cloud
bm sync --project cloud-project
```

### Switch to Local Mode

```bash
# Switch back to local
unset BASIC_MEMORY_API_URL

# Work with local files
bm sync --project local-project
```

### Context-Aware Scripts

```bash
#!/bin/bash

if [ -n "$BASIC_MEMORY_API_URL" ]; then
  echo "Cloud mode: $BASIC_MEMORY_API_URL"
  bm cloud login  # Ensure authenticated
else
  echo "Local mode"
fi

bm sync --project notes
```

## Performance Considerations

### Network Latency

Cloud mode requires network:
- API calls over HTTPS
- Latency depends on connection
- Slower than local ASGI transport

### Caching

MCP in cloud mode has limited caching:
- Results not cached locally
- Each request hits cloud API
- Consider using bisync for frequent access

### Best Practices

1. **Use bisync for primary work:**
   ```bash
   # Sync local copy
   bm cloud bisync

   # Work locally (fast)
   unset BASIC_MEMORY_API_URL
   bm tools search --query "notes"
   ```

2. **Use cloud mode for occasional access:**
   ```bash
   # Quick check from another device
   export BASIC_MEMORY_API_URL=https://api.basicmemory.cloud
   bm tools search --query "meeting" --project work
   ```

3. **Hybrid approach:**
   - Primary device: bisync for local work
   - Other devices: cloud mode for quick access

## Troubleshooting

### Not Authenticated Error

```bash
$ bm sync --project notes
Error: Not authenticated. Please run 'bm cloud login' first.
```

**Solution:**
```bash
bm cloud login
```

### Connection Refused

```bash
$ bm sync
Error: Connection refused: https://api.basicmemory.cloud
```

**Solutions:**
1. Check API URL: `echo $BASIC_MEMORY_API_URL`
2. Verify network: `curl https://api.basicmemory.cloud/health`
3. Check cloud status: https://status.basicmemory.com

### Wrong Projects Listed

**Problem:** `bm project list` shows unexpected projects

**Check mode:**
```bash
# What mode am I in?
echo $BASIC_MEMORY_API_URL

# If set → cloud projects
# If not set → local projects
```

**Solution:** Set/unset API_URL as needed

### Subscription Required

```bash
$ bm sync --project notes
Error: Active subscription required
Subscribe at: https://basicmemory.com/subscribe
```

**Solution:** Subscribe or renew subscription

## Configuration Examples

### Development Setup

```bash
# .bashrc / .zshrc
export BASIC_MEMORY_ENV=dev
export BASIC_MEMORY_LOG_LEVEL=DEBUG

# Local mode by default
# Cloud mode on demand
alias bm-cloud='BASIC_MEMORY_API_URL=https://api.basicmemory.cloud bm'
```

### Production Setup

```bash
# systemd service
[Service]
Environment="BASIC_MEMORY_API_URL=https://api.basicmemory.cloud"
Environment="BASIC_MEMORY_LOG_LEVEL=INFO"
ExecStart=/usr/local/bin/basic-memory serve
```

### Docker Setup

```yaml
# docker-compose.yml
services:
  basic-memory:
    environment:
      BASIC_MEMORY_API_URL: https://api.basicmemory.cloud
      BASIC_MEMORY_LOG_LEVEL: INFO
    volumes:
      - ./cloud-auth:/root/.basic-memory/cloud-auth.json:ro
```

## Security

### API Authentication

- All cloud API calls authenticated with JWT
- Token in Authorization header
- Subscription validated per request

### Network Security

- All traffic over HTTPS/TLS
- No credentials in URLs or logs
- Tokens stored securely (mode 600)

### Multi-Tenant Isolation

- Tenant ID from JWT claims
- Each request isolated to tenant
- Cannot access other tenants' data

## See Also

- `cloud-authentication.md` - Authentication setup
- `cloud-bisync.md` - Bidirectional sync workflow
- `cloud-mount.md` - Direct cloud file access
- MCP server configuration documentation

```

--------------------------------------------------------------------------------
/v15-docs/env-var-overrides.md:
--------------------------------------------------------------------------------

```markdown
# Environment Variable Overrides

**Status**: Fixed in v0.15.0
**PR**: #334 (part of PROJECT_ROOT implementation)

## What Changed

v0.15.0 fixes configuration loading to properly respect environment variable overrides. Environment variables with the `BASIC_MEMORY_` prefix now correctly override values in `config.json`.

## How It Works

### Precedence Order (Highest to Lowest)

1. **Environment Variables** (`BASIC_MEMORY_*`)
2. **Config File** (`~/.basic-memory/config.json`)
3. **Default Values** (Built-in defaults)

### Example

```bash
# config.json contains:
{
  "default_project": "main",
  "log_level": "INFO"
}

# Environment overrides:
export BASIC_MEMORY_DEFAULT_PROJECT=work
export BASIC_MEMORY_LOG_LEVEL=DEBUG

# Result:
# default_project = "work"     ← from env var
# log_level = "DEBUG"           ← from env var
```

## Environment Variable Naming

All environment variables use the prefix `BASIC_MEMORY_` followed by the config field name in UPPERCASE:

| Config Field | Environment Variable | Example |
|--------------|---------------------|---------|
| `default_project` | `BASIC_MEMORY_DEFAULT_PROJECT` | `BASIC_MEMORY_DEFAULT_PROJECT=work` |
| `log_level` | `BASIC_MEMORY_LOG_LEVEL` | `BASIC_MEMORY_LOG_LEVEL=DEBUG` |
| `project_root` | `BASIC_MEMORY_PROJECT_ROOT` | `BASIC_MEMORY_PROJECT_ROOT=/app/data` |
| `api_url` | `BASIC_MEMORY_API_URL` | `BASIC_MEMORY_API_URL=https://api.example.com` |
| `default_project_mode` | `BASIC_MEMORY_DEFAULT_PROJECT_MODE` | `BASIC_MEMORY_DEFAULT_PROJECT_MODE=true` |

## Common Use Cases

### Development vs Production

**Development (.env or shell):**
```bash
export BASIC_MEMORY_LOG_LEVEL=DEBUG
export BASIC_MEMORY_API_URL=http://localhost:8000
```

**Production (systemd/docker):**
```bash
export BASIC_MEMORY_LOG_LEVEL=INFO
export BASIC_MEMORY_API_URL=https://api.basicmemory.cloud
export BASIC_MEMORY_PROJECT_ROOT=/app/data
```

### CI/CD Pipelines

```bash
# GitHub Actions
env:
  BASIC_MEMORY_ENV: test
  BASIC_MEMORY_LOG_LEVEL: DEBUG

# GitLab CI
variables:
  BASIC_MEMORY_ENV: test
  BASIC_MEMORY_PROJECT_ROOT: /builds/project/data
```

### Docker Deployments

```bash
# docker run
docker run \
  -e BASIC_MEMORY_HOME=/app/data/main \
  -e BASIC_MEMORY_PROJECT_ROOT=/app/data \
  -e BASIC_MEMORY_LOG_LEVEL=INFO \
  basic-memory:latest

# docker-compose.yml
services:
  basic-memory:
    environment:
      BASIC_MEMORY_HOME: /app/data/main
      BASIC_MEMORY_PROJECT_ROOT: /app/data
      BASIC_MEMORY_LOG_LEVEL: INFO
```

### Kubernetes

```yaml
apiVersion: v1
kind: ConfigMap
metadata:
  name: basic-memory-env
data:
  BASIC_MEMORY_LOG_LEVEL: "INFO"
  BASIC_MEMORY_PROJECT_ROOT: "/app/data"
---
apiVersion: apps/v1
kind: Deployment
spec:
  template:
    spec:
      containers:
      - name: basic-memory
        envFrom:
        - configMapRef:
            name: basic-memory-env
```

## Available Environment Variables

### Core Configuration

```bash
# Environment mode
export BASIC_MEMORY_ENV=user              # test, dev, user

# Project configuration
export BASIC_MEMORY_DEFAULT_PROJECT=main
export BASIC_MEMORY_DEFAULT_PROJECT_MODE=true

# Path constraints
export BASIC_MEMORY_HOME=/path/to/main
export BASIC_MEMORY_PROJECT_ROOT=/path/to/root
```

### Sync Configuration

```bash
# Sync behavior
export BASIC_MEMORY_SYNC_CHANGES=true
export BASIC_MEMORY_SYNC_DELAY=1000
export BASIC_MEMORY_SYNC_THREAD_POOL_SIZE=4

# Watch service
export BASIC_MEMORY_WATCH_PROJECT_RELOAD_INTERVAL=30
```

### Feature Flags

```bash
# Permalinks
export BASIC_MEMORY_UPDATE_PERMALINKS_ON_MOVE=false
export BASIC_MEMORY_DISABLE_PERMALINKS=false
export BASIC_MEMORY_KEBAB_FILENAMES=false

# Performance
export BASIC_MEMORY_SKIP_INITIALIZATION_SYNC=false
```

### API Configuration

```bash
# Remote API
export BASIC_MEMORY_API_URL=https://api.basicmemory.cloud

# Cloud configuration
export BASIC_MEMORY_CLOUD_CLIENT_ID=client_abc123
export BASIC_MEMORY_CLOUD_DOMAIN=https://auth.example.com
export BASIC_MEMORY_CLOUD_HOST=https://api.example.com
```

### Logging

```bash
# Log level
export BASIC_MEMORY_LOG_LEVEL=DEBUG       # DEBUG, INFO, WARNING, ERROR
```

## Override Examples

### Temporarily Override for Testing

```bash
# One-off override
BASIC_MEMORY_LOG_LEVEL=DEBUG bm sync

# Session override
export BASIC_MEMORY_DEFAULT_PROJECT=test-project
bm tools search --query "test"
unset BASIC_MEMORY_DEFAULT_PROJECT
```

### Override in Scripts

```bash
#!/bin/bash

# Override for this script execution
export BASIC_MEMORY_LOG_LEVEL=DEBUG
export BASIC_MEMORY_API_URL=http://localhost:8000

# Run commands
bm sync
bm tools search --query "development"
```

### Per-Environment Config

**~/.bashrc (development):**
```bash
export BASIC_MEMORY_ENV=dev
export BASIC_MEMORY_LOG_LEVEL=DEBUG
export BASIC_MEMORY_HOME=~/dev/basic-memory-dev
```

**Production systemd:**
```ini
[Service]
Environment="BASIC_MEMORY_ENV=user"
Environment="BASIC_MEMORY_LOG_LEVEL=INFO"
Environment="BASIC_MEMORY_HOME=/var/lib/basic-memory"
Environment="BASIC_MEMORY_PROJECT_ROOT=/var/lib"
```

## Verification

### Check Current Values

```bash
# View all BASIC_MEMORY_ env vars
env | grep BASIC_MEMORY_

# Check specific value
echo $BASIC_MEMORY_PROJECT_ROOT
```

### Verify Override Working

```python
from basic_memory.config import ConfigManager

# Load config
config = ConfigManager().config

# Check values
print(f"Project root: {config.project_root}")
print(f"Log level: {config.log_level}")
print(f"Default project: {config.default_project}")
```

### Debug Configuration Loading

```python
import os
from basic_memory.config import ConfigManager

# Check what env vars are set
env_vars = {k: v for k, v in os.environ.items() if k.startswith("BASIC_MEMORY_")}
print("Environment variables:", env_vars)

# Load config and see what won
config = ConfigManager().config
print("Resolved config:", config.model_dump())
```

## Migration from v0.14.x

### Previous Behavior (Bug)

In v0.14.x, environment variables were sometimes ignored:

```bash
# v0.14.x bug
export BASIC_MEMORY_PROJECT_ROOT=/app/data
# → config.json value used instead (wrong!)
```

### Fixed Behavior (v0.15.0+)

```bash
# v0.15.0+ correct
export BASIC_MEMORY_PROJECT_ROOT=/app/data
# → Environment variable properly overrides config.json
```

**No action needed** - Just verify env vars are working as expected.

## Configuration Loading Details

### Loading Process

1. **Load defaults** from Pydantic model
2. **Load config.json** if it exists
3. **Apply environment overrides** (BASIC_MEMORY_* variables)
4. **Validate and return** merged configuration

### Implementation

```python
class BasicMemoryConfig(BaseSettings):
    # Fields with defaults
    default_project: str = Field(default="main")
    log_level: str = "INFO"

    model_config = SettingsConfigDict(
        env_prefix="BASIC_MEMORY_",  # Maps env vars
        extra="ignore",
    )

# Loading logic (simplified)
class ConfigManager:
    def load_config(self) -> BasicMemoryConfig:
        # 1. Load file data
        file_data = json.loads(config_file.read_text())

        # 2. Load env data
        env_dict = BasicMemoryConfig().model_dump()

        # 3. Merge (env takes precedence)
        merged_data = file_data.copy()
        for field_name in BasicMemoryConfig.model_fields.keys():
            env_var_name = f"BASIC_MEMORY_{field_name.upper()}"
            if env_var_name in os.environ:
                merged_data[field_name] = env_dict[field_name]

        return BasicMemoryConfig(**merged_data)
```

## Troubleshooting

### Environment Variable Not Taking Effect

**Problem:** Set env var but config.json value still used

**Check:**
```bash
# Is the variable exported?
env | grep BASIC_MEMORY_PROJECT_ROOT

# Exact name (case-sensitive)?
export BASIC_MEMORY_PROJECT_ROOT=/app/data  # ✓
export basic_memory_project_root=/app/data  # ✗ (wrong case)
```

**Solution:** Ensure variable is exported and named correctly

### Config.json Overwriting Env Vars

**Problem:** Changing config.json overrides env vars

**v0.14.x:** This was a bug - config.json would override env vars

**v0.15.0+:** Fixed - env vars always win

**Verify:**
```python
import os
os.environ["BASIC_MEMORY_LOG_LEVEL"] = "DEBUG"

from basic_memory.config import ConfigManager
config = ConfigManager().config
print(config.log_level)  # Should be "DEBUG"
```

### Cache Issues

**Problem:** Changes not reflected after config update

**Solution:** Clear config cache
```python
from basic_memory import config as config_module
config_module._config = None  # Clear cache

# Reload
config = ConfigManager().config
```

## Best Practices

1. **Use env vars for environment-specific settings:**
   - Different values for dev/staging/prod
   - Secrets and credentials
   - Deployment-specific paths

2. **Use config.json for stable settings:**
   - User preferences
   - Project definitions (can be overridden by env)
   - Feature flags that rarely change

3. **Document required env vars:**
   - List in README or deployment docs
   - Provide .env.example file

4. **Validate in scripts:**
   ```bash
   if [ -z "$BASIC_MEMORY_PROJECT_ROOT" ]; then
     echo "Error: BASIC_MEMORY_PROJECT_ROOT not set"
     exit 1
   fi
   ```

5. **Use consistent naming:**
   - Always use BASIC_MEMORY_ prefix
   - Match config.json field names (uppercase)

## Security Considerations

1. **Never commit env vars with secrets:**
   ```bash
   # .env (not committed)
   BASIC_MEMORY_CLOUD_SECRET_KEY=secret123

   # .gitignore
   .env
   ```

2. **Use secret management for production:**
   ```bash
   # Kubernetes secrets
   kubectl create secret generic basic-memory-secrets \
     --from-literal=api-key=$API_KEY

   # Reference in deployment
   env:
   - name: BASIC_MEMORY_API_KEY
     valueFrom:
       secretKeyRef:
         name: basic-memory-secrets
         key: api-key
   ```

3. **Audit environment in logs:**
   ```python
   # Don't log secret values
   env_vars = {
       k: "***" if "SECRET" in k else v
       for k, v in os.environ.items()
       if k.startswith("BASIC_MEMORY_")
   }
   logger.info(f"Config loaded with env: {env_vars}")
   ```

## See Also

- `project-root-env-var.md` - BASIC_MEMORY_PROJECT_ROOT usage
- `basic-memory-home.md` - BASIC_MEMORY_HOME usage
- Configuration reference documentation

```

--------------------------------------------------------------------------------
/src/basic_memory/api/routers/knowledge_router.py:
--------------------------------------------------------------------------------

```python
"""Router for knowledge graph operations."""

from typing import Annotated

from fastapi import APIRouter, HTTPException, BackgroundTasks, Depends, Query, Response
from loguru import logger

from basic_memory.deps import (
    EntityServiceDep,
    get_search_service,
    SearchServiceDep,
    LinkResolverDep,
    ProjectPathDep,
    FileServiceDep,
    ProjectConfigDep,
    AppConfigDep,
    SyncServiceDep,
)
from basic_memory.schemas import (
    EntityListResponse,
    EntityResponse,
    DeleteEntitiesResponse,
    DeleteEntitiesRequest,
)
from basic_memory.schemas.request import EditEntityRequest, MoveEntityRequest
from basic_memory.schemas.base import Permalink, Entity

router = APIRouter(prefix="/knowledge", tags=["knowledge"])


async def resolve_relations_background(sync_service, entity_id: int, entity_permalink: str) -> None:
    """Background task to resolve relations for a specific entity.

    This runs asynchronously after the API response is sent, preventing
    long delays when creating entities with many relations.
    """
    try:
        # Only resolve relations for the newly created entity
        await sync_service.resolve_relations(entity_id=entity_id)
        logger.debug(
            f"Background: Resolved relations for entity {entity_permalink} (id={entity_id})"
        )
    except Exception as e:
        # Log but don't fail - this is a background task
        logger.warning(
            f"Background: Failed to resolve relations for entity {entity_permalink}: {e}"
        )


## Create endpoints


@router.post("/entities", response_model=EntityResponse)
async def create_entity(
    data: Entity,
    background_tasks: BackgroundTasks,
    entity_service: EntityServiceDep,
    search_service: SearchServiceDep,
) -> EntityResponse:
    """Create an entity."""
    logger.info(
        "API request", endpoint="create_entity", entity_type=data.entity_type, title=data.title
    )

    entity = await entity_service.create_entity(data)

    # reindex
    await search_service.index_entity(entity, background_tasks=background_tasks)
    result = EntityResponse.model_validate(entity)

    logger.info(
        f"API response: endpoint='create_entity' title={result.title}, permalink={result.permalink}, status_code=201"
    )
    return result


@router.put("/entities/{permalink:path}", response_model=EntityResponse)
async def create_or_update_entity(
    project: ProjectPathDep,
    permalink: Permalink,
    data: Entity,
    response: Response,
    background_tasks: BackgroundTasks,
    entity_service: EntityServiceDep,
    search_service: SearchServiceDep,
    file_service: FileServiceDep,
    sync_service: SyncServiceDep,
) -> EntityResponse:
    """Create or update an entity. If entity exists, it will be updated, otherwise created."""
    logger.info(
        f"API request: create_or_update_entity for {project=}, {permalink=}, {data.entity_type=}, {data.title=}"
    )

    # Validate permalink matches
    if data.permalink != permalink:
        logger.warning(
            f"API validation error: creating/updating entity with permalink mismatch - url={permalink}, data={data.permalink}",
        )
        raise HTTPException(
            status_code=400,
            detail=f"Entity permalink {data.permalink} must match URL path: '{permalink}'",
        )

    # Try create_or_update operation
    entity, created = await entity_service.create_or_update_entity(data)
    response.status_code = 201 if created else 200

    # reindex
    await search_service.index_entity(entity, background_tasks=background_tasks)

    # Schedule relation resolution as a background task for new entities
    # This prevents blocking the API response while resolving potentially many relations
    if created:
        background_tasks.add_task(
            resolve_relations_background, sync_service, entity.id, entity.permalink or ""
        )

    result = EntityResponse.model_validate(entity)

    logger.info(
        f"API response: {result.title=}, {result.permalink=}, {created=}, status_code={response.status_code}"
    )
    return result


@router.patch("/entities/{identifier:path}", response_model=EntityResponse)
async def edit_entity(
    identifier: str,
    data: EditEntityRequest,
    background_tasks: BackgroundTasks,
    entity_service: EntityServiceDep,
    search_service: SearchServiceDep,
) -> EntityResponse:
    """Edit an existing entity using various operations like append, prepend, find_replace, or replace_section.

    This endpoint allows for targeted edits without requiring the full entity content.
    """
    logger.info(
        f"API request: endpoint='edit_entity', identifier='{identifier}', operation='{data.operation}'"
    )

    try:
        # Edit the entity using the service
        entity = await entity_service.edit_entity(
            identifier=identifier,
            operation=data.operation,
            content=data.content,
            section=data.section,
            find_text=data.find_text,
            expected_replacements=data.expected_replacements,
        )

        # Reindex the updated entity
        await search_service.index_entity(entity, background_tasks=background_tasks)

        # Return the updated entity response
        result = EntityResponse.model_validate(entity)

        logger.info(
            "API response",
            endpoint="edit_entity",
            identifier=identifier,
            operation=data.operation,
            permalink=result.permalink,
            status_code=200,
        )

        return result

    except Exception as e:
        logger.error(f"Error editing entity: {e}")
        raise HTTPException(status_code=400, detail=str(e))


@router.post("/move")
async def move_entity(
    data: MoveEntityRequest,
    background_tasks: BackgroundTasks,
    entity_service: EntityServiceDep,
    project_config: ProjectConfigDep,
    app_config: AppConfigDep,
    search_service: SearchServiceDep,
) -> EntityResponse:
    """Move an entity to a new file location with project consistency.

    This endpoint moves a note to a different path while maintaining project
    consistency and optionally updating permalinks based on configuration.
    """
    logger.info(
        f"API request: endpoint='move_entity', identifier='{data.identifier}', destination='{data.destination_path}'"
    )

    try:
        # Move the entity using the service
        moved_entity = await entity_service.move_entity(
            identifier=data.identifier,
            destination_path=data.destination_path,
            project_config=project_config,
            app_config=app_config,
        )

        # Get the moved entity to reindex it
        entity = await entity_service.link_resolver.resolve_link(data.destination_path)
        if entity:
            await search_service.index_entity(entity, background_tasks=background_tasks)

        logger.info(
            "API response",
            endpoint="move_entity",
            identifier=data.identifier,
            destination=data.destination_path,
            status_code=200,
        )
        result = EntityResponse.model_validate(moved_entity)
        return result

    except Exception as e:
        logger.error(f"Error moving entity: {e}")
        raise HTTPException(status_code=400, detail=str(e))


## Read endpoints


@router.get("/entities/{identifier:path}", response_model=EntityResponse)
async def get_entity(
    entity_service: EntityServiceDep,
    link_resolver: LinkResolverDep,
    identifier: str,
) -> EntityResponse:
    """Get a specific entity by file path or permalink..

    Args:
        identifier: Entity file path or permalink
        :param entity_service: EntityService
        :param link_resolver: LinkResolver
    """
    logger.info(f"request: get_entity with identifier={identifier}")
    entity = await link_resolver.resolve_link(identifier)
    if not entity:
        raise HTTPException(status_code=404, detail=f"Entity {identifier} not found")

    result = EntityResponse.model_validate(entity)
    return result


@router.get("/entities", response_model=EntityListResponse)
async def get_entities(
    entity_service: EntityServiceDep,
    permalink: Annotated[list[str] | None, Query()] = None,
) -> EntityListResponse:
    """Open specific entities"""
    logger.info(f"request: get_entities with permalinks={permalink}")

    entities = await entity_service.get_entities_by_permalinks(permalink) if permalink else []
    result = EntityListResponse(
        entities=[EntityResponse.model_validate(entity) for entity in entities]
    )
    return result


## Delete endpoints


@router.delete("/entities/{identifier:path}", response_model=DeleteEntitiesResponse)
async def delete_entity(
    identifier: str,
    background_tasks: BackgroundTasks,
    entity_service: EntityServiceDep,
    link_resolver: LinkResolverDep,
    search_service=Depends(get_search_service),
) -> DeleteEntitiesResponse:
    """Delete a single entity and remove from search index."""
    logger.info(f"request: delete_entity with identifier={identifier}")

    entity = await link_resolver.resolve_link(identifier)
    if entity is None:
        return DeleteEntitiesResponse(deleted=False)

    # Delete the entity
    deleted = await entity_service.delete_entity(entity.permalink or entity.id)

    # Remove from search index (entity, observations, and relations)
    background_tasks.add_task(search_service.handle_delete, entity)

    result = DeleteEntitiesResponse(deleted=deleted)
    return result


@router.post("/entities/delete", response_model=DeleteEntitiesResponse)
async def delete_entities(
    data: DeleteEntitiesRequest,
    background_tasks: BackgroundTasks,
    entity_service: EntityServiceDep,
    search_service=Depends(get_search_service),
) -> DeleteEntitiesResponse:
    """Delete entities and remove from search index."""
    logger.info(f"request: delete_entities with data={data}")
    deleted = False

    # Remove each deleted entity from search index
    for permalink in data.permalinks:
        deleted = await entity_service.delete_entity(permalink)
        background_tasks.add_task(search_service.delete_by_permalink, permalink)

    result = DeleteEntitiesResponse(deleted=deleted)
    return result

```

--------------------------------------------------------------------------------
/tests/cli/test_cloud_authentication.py:
--------------------------------------------------------------------------------

```python
"""Tests for cloud authentication and subscription validation."""

from unittest.mock import AsyncMock, Mock, patch

import httpx
import pytest
from typer.testing import CliRunner

from basic_memory.cli.app import app
from basic_memory.cli.commands.cloud.api_client import (
    CloudAPIError,
    SubscriptionRequiredError,
    make_api_request,
)


class TestAPIClientErrorHandling:
    """Tests for API client error handling."""

    @pytest.mark.asyncio
    async def test_parse_subscription_required_error(self):
        """Test parsing 403 subscription_required error response."""
        # Mock httpx response with subscription error
        mock_response = Mock(spec=httpx.Response)
        mock_response.status_code = 403
        mock_response.json.return_value = {
            "detail": {
                "error": "subscription_required",
                "message": "Active subscription required for CLI access",
                "subscribe_url": "https://basicmemory.com/subscribe",
            }
        }
        mock_response.headers = {}

        # Create HTTPStatusError with the mock response
        http_error = httpx.HTTPStatusError("403 Forbidden", request=Mock(), response=mock_response)

        # Mock httpx client to raise the error
        with patch("basic_memory.cli.commands.cloud.api_client.httpx.AsyncClient") as mock_client:
            mock_instance = AsyncMock()
            mock_instance.request = AsyncMock(side_effect=http_error)
            mock_client.return_value.__aenter__.return_value = mock_instance

            # Mock auth to return a token
            with patch(
                "basic_memory.cli.commands.cloud.api_client.get_authenticated_headers",
                return_value={"Authorization": "Bearer test-token"},
            ):
                # Should raise SubscriptionRequiredError
                with pytest.raises(SubscriptionRequiredError) as exc_info:
                    await make_api_request("GET", "https://test.com/api/endpoint")

                # Verify exception details
                error = exc_info.value
                assert error.status_code == 403
                assert error.subscribe_url == "https://basicmemory.com/subscribe"
                assert "Active subscription required" in str(error)

    @pytest.mark.asyncio
    async def test_parse_subscription_required_error_flat_format(self):
        """Test parsing 403 subscription_required error in flat format (backward compatibility)."""
        # Mock httpx response with subscription error in flat format
        mock_response = Mock(spec=httpx.Response)
        mock_response.status_code = 403
        mock_response.json.return_value = {
            "error": "subscription_required",
            "message": "Active subscription required",
            "subscribe_url": "https://basicmemory.com/subscribe",
        }
        mock_response.headers = {}

        # Create HTTPStatusError with the mock response
        http_error = httpx.HTTPStatusError("403 Forbidden", request=Mock(), response=mock_response)

        # Mock httpx client to raise the error
        with patch("basic_memory.cli.commands.cloud.api_client.httpx.AsyncClient") as mock_client:
            mock_instance = AsyncMock()
            mock_instance.request = AsyncMock(side_effect=http_error)
            mock_client.return_value.__aenter__.return_value = mock_instance

            # Mock auth to return a token
            with patch(
                "basic_memory.cli.commands.cloud.api_client.get_authenticated_headers",
                return_value={"Authorization": "Bearer test-token"},
            ):
                # Should raise SubscriptionRequiredError
                with pytest.raises(SubscriptionRequiredError) as exc_info:
                    await make_api_request("GET", "https://test.com/api/endpoint")

                # Verify exception details
                error = exc_info.value
                assert error.status_code == 403
                assert error.subscribe_url == "https://basicmemory.com/subscribe"

    @pytest.mark.asyncio
    async def test_parse_generic_403_error(self):
        """Test parsing 403 error without subscription_required flag."""
        # Mock httpx response with generic 403 error
        mock_response = Mock(spec=httpx.Response)
        mock_response.status_code = 403
        mock_response.json.return_value = {
            "error": "forbidden",
            "message": "Access denied",
        }
        mock_response.headers = {}

        # Create HTTPStatusError with the mock response
        http_error = httpx.HTTPStatusError("403 Forbidden", request=Mock(), response=mock_response)

        # Mock httpx client to raise the error
        with patch("basic_memory.cli.commands.cloud.api_client.httpx.AsyncClient") as mock_client:
            mock_instance = AsyncMock()
            mock_instance.request = AsyncMock(side_effect=http_error)
            mock_client.return_value.__aenter__.return_value = mock_instance

            # Mock auth to return a token
            with patch(
                "basic_memory.cli.commands.cloud.api_client.get_authenticated_headers",
                return_value={"Authorization": "Bearer test-token"},
            ):
                # Should raise generic CloudAPIError
                with pytest.raises(CloudAPIError) as exc_info:
                    await make_api_request("GET", "https://test.com/api/endpoint")

                # Should not be a SubscriptionRequiredError
                error = exc_info.value
                assert not isinstance(error, SubscriptionRequiredError)
                assert error.status_code == 403


class TestLoginCommand:
    """Tests for cloud login command with subscription validation."""

    def test_login_without_subscription_shows_error(self):
        """Test login command displays error when subscription is required."""
        runner = CliRunner()

        # Mock successful OAuth login
        mock_auth = AsyncMock()
        mock_auth.login = AsyncMock(return_value=True)

        # Mock API request to raise SubscriptionRequiredError
        async def mock_make_api_request(*args, **kwargs):
            raise SubscriptionRequiredError(
                message="Active subscription required for CLI access",
                subscribe_url="https://basicmemory.com/subscribe",
            )

        with patch("basic_memory.cli.commands.cloud.core_commands.CLIAuth", return_value=mock_auth):
            with patch(
                "basic_memory.cli.commands.cloud.core_commands.make_api_request",
                side_effect=mock_make_api_request,
            ):
                with patch(
                    "basic_memory.cli.commands.cloud.core_commands.get_cloud_config",
                    return_value=("client_id", "domain", "https://cloud.example.com"),
                ):
                    # Run login command
                    result = runner.invoke(app, ["cloud", "login"])

                    # Should exit with error
                    assert result.exit_code == 1

                    # Should display subscription error
                    assert "Subscription Required" in result.stdout
                    assert "Active subscription required" in result.stdout
                    assert "https://basicmemory.com/subscribe" in result.stdout
                    assert "bm cloud login" in result.stdout

    def test_login_with_subscription_succeeds(self):
        """Test login command succeeds when user has active subscription."""
        runner = CliRunner()

        # Mock successful OAuth login
        mock_auth = AsyncMock()
        mock_auth.login = AsyncMock(return_value=True)

        # Mock successful API request (subscription valid)
        mock_response = Mock(spec=httpx.Response)
        mock_response.status_code = 200
        mock_response.json.return_value = {"status": "healthy"}

        async def mock_make_api_request(*args, **kwargs):
            return mock_response

        with patch("basic_memory.cli.commands.cloud.core_commands.CLIAuth", return_value=mock_auth):
            with patch(
                "basic_memory.cli.commands.cloud.core_commands.make_api_request",
                side_effect=mock_make_api_request,
            ):
                with patch(
                    "basic_memory.cli.commands.cloud.core_commands.get_cloud_config",
                    return_value=("client_id", "domain", "https://cloud.example.com"),
                ):
                    # Mock ConfigManager to avoid writing to real config
                    mock_config_manager = Mock()
                    mock_config = Mock()
                    mock_config.cloud_mode = False
                    mock_config_manager.load_config.return_value = mock_config
                    mock_config_manager.config = mock_config

                    with patch(
                        "basic_memory.cli.commands.cloud.core_commands.ConfigManager",
                        return_value=mock_config_manager,
                    ):
                        # Run login command
                        result = runner.invoke(app, ["cloud", "login"])

                        # Should succeed
                        assert result.exit_code == 0

                        # Should enable cloud mode
                        assert mock_config.cloud_mode is True
                        mock_config_manager.save_config.assert_called_once()

                        # Should display success message
                        assert "Cloud mode enabled" in result.stdout

    def test_login_authentication_failure(self):
        """Test login command handles authentication failure."""
        runner = CliRunner()

        # Mock failed OAuth login
        mock_auth = AsyncMock()
        mock_auth.login = AsyncMock(return_value=False)

        with patch("basic_memory.cli.commands.cloud.core_commands.CLIAuth", return_value=mock_auth):
            with patch(
                "basic_memory.cli.commands.cloud.core_commands.get_cloud_config",
                return_value=("client_id", "domain", "https://cloud.example.com"),
            ):
                # Run login command
                result = runner.invoke(app, ["cloud", "login"])

                # Should exit with error
                assert result.exit_code == 1

                # Should display login failed message
                assert "Login failed" in result.stdout

```

--------------------------------------------------------------------------------
/tests/repository/test_project_repository.py:
--------------------------------------------------------------------------------

```python
"""Tests for the ProjectRepository."""

from datetime import datetime, timezone
from pathlib import Path

import pytest
import pytest_asyncio
from sqlalchemy import select

from basic_memory import db
from basic_memory.models.project import Project
from basic_memory.repository.project_repository import ProjectRepository


@pytest_asyncio.fixture
async def sample_project(project_repository: ProjectRepository) -> Project:
    """Create a sample project for testing."""
    project_data = {
        "name": "Sample Project",
        "description": "A sample project",
        "path": "/sample/project/path",
        "is_active": True,
        "is_default": False,
        "created_at": datetime.now(timezone.utc),
        "updated_at": datetime.now(timezone.utc),
    }
    return await project_repository.create(project_data)


@pytest.mark.asyncio
async def test_create_project(project_repository: ProjectRepository):
    """Test creating a new project."""
    project_data = {
        "name": "Sample Project",
        "description": "A sample project",
        "path": "/sample/project/path",
        "is_active": True,
        "is_default": False,
    }
    project = await project_repository.create(project_data)

    # Verify returned object
    assert project.id is not None
    assert project.name == "Sample Project"
    assert project.description == "A sample project"
    assert project.path == "/sample/project/path"
    assert project.is_active is True
    assert project.is_default is False
    assert isinstance(project.created_at, datetime)
    assert isinstance(project.updated_at, datetime)

    # Verify permalink was generated correctly
    assert project.permalink == "sample-project"

    # Verify in database
    found = await project_repository.find_by_id(project.id)
    assert found is not None
    assert found.id == project.id
    assert found.name == project.name
    assert found.description == project.description
    assert found.path == project.path
    assert found.permalink == "sample-project"
    assert found.is_active is True
    assert found.is_default is False


@pytest.mark.asyncio
async def test_get_by_name(project_repository: ProjectRepository, sample_project: Project):
    """Test getting a project by name."""
    # Test exact match
    found = await project_repository.get_by_name(sample_project.name)
    assert found is not None
    assert found.id == sample_project.id
    assert found.name == sample_project.name

    # Test non-existent name
    found = await project_repository.get_by_name("Non-existent Project")
    assert found is None


@pytest.mark.asyncio
async def test_get_by_permalink(project_repository: ProjectRepository, sample_project: Project):
    """Test getting a project by permalink."""
    # Verify the permalink value
    assert sample_project.permalink == "sample-project"

    # Test exact match
    found = await project_repository.get_by_permalink(sample_project.permalink)
    assert found is not None
    assert found.id == sample_project.id
    assert found.permalink == sample_project.permalink

    # Test non-existent permalink
    found = await project_repository.get_by_permalink("non-existent-project")
    assert found is None


@pytest.mark.asyncio
async def test_get_by_path(project_repository: ProjectRepository, sample_project: Project):
    """Test getting a project by path."""
    # Test exact match
    found = await project_repository.get_by_path(sample_project.path)
    assert found is not None
    assert found.id == sample_project.id
    assert found.path == sample_project.path

    # Test with Path object
    found = await project_repository.get_by_path(Path(sample_project.path))
    assert found is not None
    assert found.id == sample_project.id
    assert found.path == sample_project.path

    # Test non-existent path
    found = await project_repository.get_by_path("/non/existent/path")
    assert found is None


@pytest.mark.asyncio
async def test_get_default_project(project_repository: ProjectRepository):
    """Test getting the default project."""
    # We already have a default project from the test_project fixture
    # So just create a non-default project
    non_default_project_data = {
        "name": "Non-Default Project",
        "description": "A non-default project",
        "path": "/non-default/project/path",
        "is_active": True,
        "is_default": None,  # Not the default project
    }

    await project_repository.create(non_default_project_data)

    # Get default project
    default_project = await project_repository.get_default_project()
    assert default_project is not None
    assert default_project.is_default is True


@pytest.mark.asyncio
async def test_get_active_projects(project_repository: ProjectRepository):
    """Test getting all active projects."""
    # Create active and inactive projects
    active_project_data = {
        "name": "Active Project",
        "description": "An active project",
        "path": "/active/project/path",
        "is_active": True,
    }
    inactive_project_data = {
        "name": "Inactive Project",
        "description": "An inactive project",
        "path": "/inactive/project/path",
        "is_active": False,
    }

    await project_repository.create(active_project_data)
    await project_repository.create(inactive_project_data)

    # Get active projects
    active_projects = await project_repository.get_active_projects()
    assert len(active_projects) >= 1  # Could be more from other tests

    # Verify that all returned projects are active
    for project in active_projects:
        assert project.is_active is True

    # Verify active project is included
    active_names = [p.name for p in active_projects]
    assert "Active Project" in active_names

    # Verify inactive project is not included
    assert "Inactive Project" not in active_names


@pytest.mark.asyncio
async def test_set_as_default(project_repository: ProjectRepository, test_project: Project):
    """Test setting a project as default."""
    # The test_project fixture is already the default
    # Create a non-default project
    project2_data = {
        "name": "Project 2",
        "description": "Project 2",
        "path": "/project2/path",
        "is_active": True,
        "is_default": None,  # Not default
    }

    # Get the existing default project
    project1 = test_project
    project2 = await project_repository.create(project2_data)

    # Verify initial state
    assert project1.is_default is True
    assert project2.is_default is None

    # Set project2 as default
    updated_project2 = await project_repository.set_as_default(project2.id)
    assert updated_project2 is not None
    assert updated_project2.is_default is True

    # Verify project1 is no longer default
    project1_updated = await project_repository.find_by_id(project1.id)
    assert project1_updated is not None
    assert project1_updated.is_default is None

    # Verify project2 is now default
    project2_updated = await project_repository.find_by_id(project2.id)
    assert project2_updated is not None
    assert project2_updated.is_default is True


@pytest.mark.asyncio
async def test_update_project(project_repository: ProjectRepository, sample_project: Project):
    """Test updating a project."""
    # Update project
    updated_data = {
        "name": "Updated Project Name",
        "description": "Updated description",
        "path": "/updated/path",
    }
    updated_project = await project_repository.update(sample_project.id, updated_data)

    # Verify returned object
    assert updated_project is not None
    assert updated_project.id == sample_project.id
    assert updated_project.name == "Updated Project Name"
    assert updated_project.description == "Updated description"
    assert updated_project.path == "/updated/path"

    # Verify permalink was updated based on new name
    assert updated_project.permalink == "updated-project-name"

    # Verify in database
    found = await project_repository.find_by_id(sample_project.id)
    assert found is not None
    assert found.name == "Updated Project Name"
    assert found.description == "Updated description"
    assert found.path == "/updated/path"
    assert found.permalink == "updated-project-name"

    # Verify we can find by the new permalink
    found_by_permalink = await project_repository.get_by_permalink("updated-project-name")
    assert found_by_permalink is not None
    assert found_by_permalink.id == sample_project.id


@pytest.mark.asyncio
async def test_delete_project(project_repository: ProjectRepository, sample_project: Project):
    """Test deleting a project."""
    # Delete project
    result = await project_repository.delete(sample_project.id)
    assert result is True

    # Verify deletion
    deleted = await project_repository.find_by_id(sample_project.id)
    assert deleted is None

    # Verify with direct database query
    async with db.scoped_session(project_repository.session_maker) as session:
        query = select(Project).filter(Project.id == sample_project.id)
        result = await session.execute(query)
        assert result.scalar_one_or_none() is None


@pytest.mark.asyncio
async def test_delete_nonexistent_project(project_repository: ProjectRepository):
    """Test deleting a project that doesn't exist."""
    result = await project_repository.delete(999)  # Non-existent ID
    assert result is False


@pytest.mark.asyncio
async def test_update_path(project_repository: ProjectRepository, sample_project: Project):
    """Test updating a project's path."""
    new_path = "/new/project/path"

    # Update the project path
    updated_project = await project_repository.update_path(sample_project.id, new_path)

    # Verify returned object
    assert updated_project is not None
    assert updated_project.id == sample_project.id
    assert updated_project.path == new_path
    assert updated_project.name == sample_project.name  # Other fields unchanged

    # Verify in database
    found = await project_repository.find_by_id(sample_project.id)
    assert found is not None
    assert found.path == new_path
    assert found.name == sample_project.name


@pytest.mark.asyncio
async def test_update_path_nonexistent_project(project_repository: ProjectRepository):
    """Test updating path for a project that doesn't exist."""
    result = await project_repository.update_path(999, "/some/path")  # Non-existent ID
    assert result is None

```

--------------------------------------------------------------------------------
/src/basic_memory/cli/auth.py:
--------------------------------------------------------------------------------

```python
"""WorkOS OAuth Device Authorization for CLI."""

import base64
import hashlib
import json
import os
import secrets
import time
import webbrowser

import httpx
from rich.console import Console

from basic_memory.config import ConfigManager

console = Console()


class CLIAuth:
    """Handles WorkOS OAuth Device Authorization for CLI tools."""

    def __init__(self, client_id: str, authkit_domain: str):
        self.client_id = client_id
        self.authkit_domain = authkit_domain
        app_config = ConfigManager().config
        # Store tokens in data dir
        self.token_file = app_config.data_dir_path / "basic-memory-cloud.json"
        # PKCE parameters
        self.code_verifier = None
        self.code_challenge = None

    def generate_pkce_pair(self) -> tuple[str, str]:
        """Generate PKCE code verifier and challenge."""
        # Generate code verifier (43-128 characters)
        code_verifier = base64.urlsafe_b64encode(secrets.token_bytes(32)).decode("utf-8")
        code_verifier = code_verifier.rstrip("=")

        # Generate code challenge (SHA256 hash of verifier)
        challenge_bytes = hashlib.sha256(code_verifier.encode("utf-8")).digest()
        code_challenge = base64.urlsafe_b64encode(challenge_bytes).decode("utf-8")
        code_challenge = code_challenge.rstrip("=")

        return code_verifier, code_challenge

    async def request_device_authorization(self) -> dict | None:
        """Request device authorization from WorkOS with PKCE."""
        device_auth_url = f"{self.authkit_domain}/oauth2/device_authorization"

        # Generate PKCE pair
        self.code_verifier, self.code_challenge = self.generate_pkce_pair()

        data = {
            "client_id": self.client_id,
            "scope": "openid profile email offline_access",
            "code_challenge": self.code_challenge,
            "code_challenge_method": "S256",
        }

        try:
            async with httpx.AsyncClient() as client:
                response = await client.post(device_auth_url, data=data)

                if response.status_code == 200:
                    return response.json()
                else:
                    console.print(
                        f"[red]Device authorization failed: {response.status_code} - {response.text}[/red]"
                    )
                    return None
        except Exception as e:
            console.print(f"[red]Device authorization error: {e}[/red]")
            return None

    def display_user_instructions(self, device_response: dict) -> None:
        """Display user instructions for device authorization."""
        user_code = device_response["user_code"]
        verification_uri = device_response["verification_uri"]
        verification_uri_complete = device_response.get("verification_uri_complete")

        console.print("\n[bold blue]Authentication Required[/bold blue]")
        console.print("\nTo authenticate, please visit:")
        console.print(f"[bold cyan]{verification_uri}[/bold cyan]")
        console.print(f"\nAnd enter this code: [bold yellow]{user_code}[/bold yellow]")

        if verification_uri_complete:
            console.print("\nOr for one-click access, visit:")
            console.print(f"[bold green]{verification_uri_complete}[/bold green]")

            # Try to open browser automatically
            try:
                console.print("\n[dim]Opening browser automatically...[/dim]")
                webbrowser.open(verification_uri_complete)
            except Exception:
                pass  # Silently fail if browser can't be opened

        console.print("\n[dim]Waiting for you to complete authentication in your browser...[/dim]")

    async def poll_for_token(self, device_code: str, interval: int = 5) -> dict | None:
        """Poll the token endpoint until user completes authentication."""
        token_url = f"{self.authkit_domain}/oauth2/token"

        data = {
            "client_id": self.client_id,
            "device_code": device_code,
            "grant_type": "urn:ietf:params:oauth:grant-type:device_code",
            "code_verifier": self.code_verifier,
        }

        max_attempts = 60  # 5 minutes with 5-second intervals
        current_interval = interval

        for _attempt in range(max_attempts):
            try:
                async with httpx.AsyncClient() as client:
                    response = await client.post(token_url, data=data)

                    if response.status_code == 200:
                        return response.json()

                    # Parse error response
                    try:
                        error_data = response.json()
                        error = error_data.get("error")
                    except Exception:
                        error = "unknown_error"

                    if error == "authorization_pending":
                        # User hasn't completed auth yet, keep polling
                        pass
                    elif error == "slow_down":
                        # Increase polling interval
                        current_interval += 5
                        console.print("[yellow]Slowing down polling rate...[/yellow]")
                    elif error == "access_denied":
                        console.print("[red]Authentication was denied by user[/red]")
                        return None
                    elif error == "expired_token":
                        console.print("[red]Device code has expired. Please try again.[/red]")
                        return None
                    else:
                        console.print(f"[red]Token polling error: {error}[/red]")
                        return None

            except Exception as e:
                console.print(f"[red]Token polling request error: {e}[/red]")

            # Wait before next poll
            await self._async_sleep(current_interval)

        console.print("[red]Authentication timeout. Please try again.[/red]")
        return None

    async def _async_sleep(self, seconds: int) -> None:
        """Async sleep utility."""
        import asyncio

        await asyncio.sleep(seconds)

    def save_tokens(self, tokens: dict) -> None:
        """Save tokens to project root as .bm-auth.json."""
        token_data = {
            "access_token": tokens["access_token"],
            "refresh_token": tokens.get("refresh_token"),
            "expires_at": int(time.time()) + tokens.get("expires_in", 3600),
            "token_type": tokens.get("token_type", "Bearer"),
        }

        with open(self.token_file, "w") as f:
            json.dump(token_data, f, indent=2)

        # Secure the token file
        os.chmod(self.token_file, 0o600)

        console.print(f"[green]Tokens saved to {self.token_file}[/green]")

    def load_tokens(self) -> dict | None:
        """Load tokens from .bm-auth.json file."""
        if not self.token_file.exists():
            return None

        try:
            with open(self.token_file) as f:
                return json.load(f)
        except (OSError, json.JSONDecodeError):
            return None

    def is_token_valid(self, tokens: dict) -> bool:
        """Check if stored token is still valid."""
        expires_at = tokens.get("expires_at", 0)
        # Add 60 second buffer for clock skew
        return time.time() < (expires_at - 60)

    async def refresh_token(self, refresh_token: str) -> dict | None:
        """Refresh access token using refresh token."""
        token_url = f"{self.authkit_domain}/oauth2/token"

        data = {
            "client_id": self.client_id,
            "grant_type": "refresh_token",
            "refresh_token": refresh_token,
        }

        try:
            async with httpx.AsyncClient() as client:
                response = await client.post(token_url, data=data)

                if response.status_code == 200:
                    return response.json()
                else:
                    console.print(
                        f"[red]Token refresh failed: {response.status_code} - {response.text}[/red]"
                    )
                    return None
        except Exception as e:
            console.print(f"[red]Token refresh error: {e}[/red]")
            return None

    async def get_valid_token(self) -> str | None:
        """Get valid access token, refresh if needed."""
        tokens = self.load_tokens()
        if not tokens:
            return None

        if self.is_token_valid(tokens):
            return tokens["access_token"]

        # Token expired - try to refresh if we have a refresh token
        refresh_token = tokens.get("refresh_token")
        if refresh_token:
            console.print("[yellow]Access token expired, refreshing...[/yellow]")

            new_tokens = await self.refresh_token(refresh_token)
            if new_tokens:
                # Save new tokens (may include rotated refresh token)
                self.save_tokens(new_tokens)
                console.print("[green]Token refreshed successfully[/green]")
                return new_tokens["access_token"]
            else:
                console.print("[yellow]Token refresh failed. Please run 'login' again.[/yellow]")
                return None
        else:
            console.print("[yellow]No refresh token available. Please run 'login' again.[/yellow]")
            return None

    async def login(self) -> bool:
        """Perform OAuth Device Authorization login flow."""
        console.print("[blue]Initiating authentication...[/blue]")

        # Step 1: Request device authorization
        device_response = await self.request_device_authorization()
        if not device_response:
            return False

        # Step 2: Display user instructions
        self.display_user_instructions(device_response)

        # Step 3: Poll for token
        device_code = device_response["device_code"]
        interval = device_response.get("interval", 5)

        tokens = await self.poll_for_token(device_code, interval)
        if not tokens:
            return False

        # Step 4: Save tokens
        self.save_tokens(tokens)

        console.print("\n[green]Successfully authenticated with Basic Memory Cloud![/green]")
        return True

    def logout(self) -> None:
        """Remove stored authentication tokens."""
        if self.token_file.exists():
            self.token_file.unlink()
            console.print("[green]Logged out successfully[/green]")
        else:
            console.print("[yellow]No stored authentication found[/yellow]")

```

--------------------------------------------------------------------------------
/v15-docs/gitignore-integration.md:
--------------------------------------------------------------------------------

```markdown
# .gitignore Integration

**Status**: New Feature
**PR**: #314
**Impact**: Improved security and reduced noise

## What's New

v0.15.0 integrates `.gitignore` support into the sync process. Files matching patterns in `.gitignore` are automatically skipped during synchronization, preventing sensitive files and build artifacts from being indexed.

## How It Works

### Ignore Pattern Sources

Basic Memory combines patterns from two sources:

1. **Global user patterns**: `~/.basic-memory/.bmignore`
   - User's personal ignore patterns
   - Applied to all projects
   - Useful for global exclusions (OS files, editor configs)

2. **Project-specific patterns**: `{project}/.gitignore`
   - Project's standard gitignore file
   - Applied to that project only
   - Follows standard gitignore syntax

### Automatic .gitignore Respect

When syncing, Basic Memory:
1. Loads patterns from `~/.basic-memory/.bmignore` (if exists)
2. Loads patterns from `.gitignore` in project root (if exists)
3. Combines both pattern sets
4. Skips files matching any pattern
5. Does not index ignored files

### Pattern Matching

Uses standard gitignore syntax:
```gitignore
# Comments are ignored
*.log                    # Ignore all .log files
build/                   # Ignore build directory
node_modules/           # Ignore node_modules
.env                    # Ignore .env files
!important.log          # Exception: don't ignore this file
```

## Benefits

### 1. Security

**Prevents indexing sensitive files:**
```gitignore
# Sensitive files automatically skipped
.env
.env.*
secrets.json
credentials/
*.key
*.pem
cloud-auth.json
```

**Result:** Secrets never indexed or synced

### 2. Performance

**Skips unnecessary files:**
```gitignore
# Build artifacts and caches
node_modules/
__pycache__/
.pytest_cache/
dist/
build/
*.pyc
```

**Result:** Faster sync, smaller database

### 3. Reduced Noise

**Ignores OS and editor files:**
```gitignore
# macOS
.DS_Store
.AppleDouble

# Linux
*~
.directory

# Windows
Thumbs.db
desktop.ini

# Editors
.vscode/
.idea/
*.swp
```

**Result:** Cleaner knowledge base

## Setup

### Default Behavior

If no `.gitignore` exists, Basic Memory uses built-in patterns:

```gitignore
# Default patterns
.git
.DS_Store
node_modules
__pycache__
.pytest_cache
.env
```

### Global .bmignore (Optional)

Create global ignore patterns for all projects:

```bash
# Create global ignore file
cat > ~/.basic-memory/.bmignore <<'EOF'
# OS files (apply to all projects)
.DS_Store
.AppleDouble
Thumbs.db
desktop.ini
*~

# Editor files (apply to all projects)
.vscode/
.idea/
*.swp
*.swo

# Always ignore these
.env
.env.*
*.secret
EOF
```

**Use cases:**
- Personal preferences (editor configs)
- OS-specific files
- Global security rules

### Project-Specific .gitignore

Create `.gitignore` in project root for project-specific patterns:

```bash
# Create .gitignore
cat > ~/basic-memory/.gitignore <<'EOF'
# Project-specific secrets
credentials.json
*.key

# Project build artifacts
dist/
build/
*.pyc
__pycache__/
node_modules/

# Project-specific temp files
*.tmp
*.cache
EOF
```

**Use cases:**
- Build artifacts
- Dependencies (node_modules, venv)
- Project-specific secrets

### Sync with .gitignore and .bmignore

```bash
# Sync respects both .bmignore and .gitignore
bm sync

# Ignored files are skipped
# → ".DS_Store skipped (global .bmignore)"
# → ".env skipped (gitignored)"
# → "node_modules/ skipped (gitignored)"
```

**Pattern precedence:**
1. Global `.bmignore` patterns checked first
2. Project `.gitignore` patterns checked second
3. If either matches, file is skipped

## Use Cases

### Git Repository as Knowledge Base

Perfect synergy when using git for version control:

```bash
# Project structure
~/my-knowledge/
├── .git/              # ← git repo
├── .gitignore         # ← shared ignore rules
├── notes/
│   ├── public.md      # ← synced
│   └── private.md     # ← synced
├── .env               # ← ignored by git AND sync
└── build/             # ← ignored by git AND sync
```

**Benefits:**
- Same ignore rules for git and sync
- Consistent behavior
- No sensitive files in either system

### Sensitive Information

```gitignore
# .gitignore
*.key
*.pem
credentials.json
secrets/
.env*
```

**Result:**
```bash
$ bm sync
Syncing...
→ Skipped: api-key.pem (gitignored)
→ Skipped: .env (gitignored)
→ Skipped: secrets/passwords.txt (gitignored)
✓ Synced 15 files (3 skipped)
```

### Development Environment

```gitignore
# Project-specific
node_modules/
venv/
.venv/
__pycache__/
*.pyc
.pytest_cache/
.coverage
.tox/
dist/
build/
*.egg-info/
```

**Result:** Clean knowledge base without dev noise

## Pattern Examples

### Common Patterns

**Secrets:**
```gitignore
.env
.env.*
*.key
*.pem
*secret*
*password*
credentials.json
auth.json
```

**Build Artifacts:**
```gitignore
dist/
build/
*.o
*.pyc
*.class
*.jar
node_modules/
__pycache__/
```

**OS Files:**
```gitignore
.DS_Store
.AppleDouble
.LSOverride
Thumbs.db
desktop.ini
*~
```

**Editors:**
```gitignore
.vscode/
.idea/
*.swp
*.swo
*~
.project
.settings/
```

### Advanced Patterns

**Exceptions (!):**
```gitignore
# Ignore all logs
*.log

# EXCEPT this one
!important.log
```

**Directory-specific:**
```gitignore
# Ignore only in root
/.env

# Ignore everywhere
**/.env
```

**Wildcards:**
```gitignore
# Multiple extensions
*.{log,tmp,cache}

# Specific patterns
test_*.py
*_backup.*
```

## Integration with Cloud Sync

### .bmignore Files Overview

Basic Memory uses `.bmignore` in two contexts:

1. **Global user patterns**: `~/.basic-memory/.bmignore`
   - Used for **local sync**
   - Standard gitignore syntax
   - Applied to all projects

2. **Cloud bisync filters**: `.bmignore.rclone`
   - Used for **cloud sync**
   - rclone filter format
   - Auto-generated from .gitignore patterns

### Automatic Pattern Conversion

Cloud bisync converts .gitignore to rclone filter format:

```bash
# Source: .gitignore (standard gitignore syntax)
node_modules/
*.log
.env

# Generated: .bmignore.rclone (rclone filter format)
- node_modules/**
- *.log
- .env
```

**Automatic conversion:** Basic Memory handles conversion during cloud sync

### Sync Workflow

1. **Local sync** (respects .bmignore + .gitignore)
   ```bash
   bm sync
   # → Loads ~/.basic-memory/.bmignore (global)
   # → Loads {project}/.gitignore (project-specific)
   # → Skips files matching either
   ```

2. **Cloud bisync** (respects .bmignore.rclone)
   ```bash
   bm cloud bisync
   # → Generates .bmignore.rclone from .gitignore
   # → Uses rclone filters for cloud sync
   # → Skips same files as local sync
   ```

**Result:** Consistent ignore behavior across local and cloud sync

## Verification

### Check What's Ignored

```bash
# Dry-run sync to see what's skipped
bm sync --dry-run

# Output shows:
# → Syncing: notes/ideas.md
# → Skipped: .env (gitignored)
# → Skipped: node_modules/package.json (gitignored)
```

### List Ignore Patterns

```bash
# View .gitignore
cat .gitignore

# View effective patterns
bm sync --show-patterns
```

### Test Pattern Matching

```bash
# Check if file matches pattern
git check-ignore -v path/to/file

# Example:
git check-ignore -v .env
# → .gitignore:5:.env    .env
```

## Migration

### From v0.14.x

**Before v0.15.0:**
- .gitignore patterns not respected
- All files synced, including ignored ones
- Manual exclude rules needed

**v0.15.0+:**
- .gitignore automatically respected
- Ignored files skipped
- No manual configuration needed

**Action:** Just add/update .gitignore - next sync uses it

### Cleaning Up Already-Indexed Files

If ignored files were previously synced:

```bash
# Option 1: Re-sync (re-indexes from scratch)
bm sync --force-resync

# Option 2: Delete and re-sync specific project
bm project remove old-project
bm project add clean-project ~/basic-memory
bm sync --project clean-project
```

## Troubleshooting

### File Not Being Ignored

**Problem:** File still synced despite being in .gitignore

**Check:**
1. Is .gitignore in project root?
   ```bash
   ls -la ~/basic-memory/.gitignore
   ```

2. Is pattern correct?
   ```bash
   # Test pattern
   git check-ignore -v path/to/file
   ```

3. Is file already indexed?
   ```bash
   # Force resync
   bm sync --force-resync
   ```

### Pattern Not Matching

**Problem:** Pattern doesn't match expected files

**Common issues:**
```gitignore
# ✗ Wrong: Won't match subdirectories
node_modules

# ✓ Correct: Matches recursively
node_modules/
**/node_modules/

# ✗ Wrong: Only matches in root
/.env

# ✓ Correct: Matches everywhere
.env
**/.env
```

### .gitignore Not Found

**Problem:** No .gitignore file exists

**Solution:**
```bash
# Create default .gitignore
cat > ~/basic-memory/.gitignore <<'EOF'
.git
.DS_Store
.env
node_modules/
__pycache__/
EOF

# Re-sync
bm sync
```

## Best Practices

### 1. Use Global .bmignore for Personal Preferences

Set global patterns once, apply to all projects:

```bash
# Create global ignore file
cat > ~/.basic-memory/.bmignore <<'EOF'
# Personal editor/OS preferences
.DS_Store
.vscode/
.idea/
*.swp

# Never sync these anywhere
.env
.env.*
EOF
```

### 2. Use .gitignore for Project-Specific Patterns

Even if not using git, create .gitignore for project-specific sync:

```bash
# Create project .gitignore
cat > .gitignore <<'EOF'
# Project build artifacts
dist/
node_modules/
__pycache__/

# Project secrets
credentials.json
*.key
EOF
```

### 3. Ignore Secrets First

Start with security (both global and project-specific):
```bash
# Global: ~/.basic-memory/.bmignore
.env*
*.key
*.pem

# Project: .gitignore
credentials.json
secrets/
api-keys.txt
```

### 4. Ignore Build Artifacts

Reduce noise in project .gitignore:
```gitignore
# Build outputs
dist/
build/
node_modules/
__pycache__/
*.pyc
```

### 5. Use Standard Templates

Start with community templates for .gitignore:
- [GitHub .gitignore templates](https://github.com/github/gitignore)
- Language-specific ignores (Python, Node, etc.)
- Framework-specific ignores

### 6. Test Your Patterns

```bash
# Verify pattern works
git check-ignore -v file.log

# Test sync
bm sync --dry-run
```

## See Also

- `cloud-bisync.md` - Cloud sync and .bmignore.rclone conversion
- `env-file-removal.md` - Why .env files should be ignored
- gitignore documentation: https://git-scm.com/docs/gitignore
- GitHub gitignore templates: https://github.com/github/gitignore

## Summary

Basic Memory provides flexible ignore patterns through:
- **Global**: `~/.basic-memory/.bmignore` - personal preferences across all projects
- **Project**: `.gitignore` - project-specific patterns
- **Cloud**: `.bmignore.rclone` - auto-generated for cloud sync

Use global .bmignore for OS/editor files, project .gitignore for build artifacts and secrets.

```

--------------------------------------------------------------------------------
/tests/cli/test_ignore_utils.py:
--------------------------------------------------------------------------------

```python
"""Tests for ignore_utils module."""

import tempfile
from pathlib import Path

from basic_memory.ignore_utils import (
    DEFAULT_IGNORE_PATTERNS,
    load_gitignore_patterns,
    should_ignore_path,
    filter_files,
)


def test_load_default_patterns_only():
    """Test loading default patterns when no .gitignore exists."""
    with tempfile.TemporaryDirectory() as temp_dir:
        temp_path = Path(temp_dir)
        patterns = load_gitignore_patterns(temp_path)

        # Should include all default patterns
        assert DEFAULT_IGNORE_PATTERNS.issubset(patterns)
        # Should only have default patterns (no custom ones)
        assert patterns == DEFAULT_IGNORE_PATTERNS


def test_load_patterns_with_gitignore():
    """Test loading patterns from .gitignore file."""
    with tempfile.TemporaryDirectory() as temp_dir:
        temp_path = Path(temp_dir)

        # Create a .gitignore file
        gitignore_content = """
# Python
*.pyc
__pycache__/

# Node
node_modules/
*.log

# Custom
secrets/
temp_*
"""
        (temp_path / ".gitignore").write_text(gitignore_content)

        patterns = load_gitignore_patterns(temp_path)

        # Should include default patterns
        assert DEFAULT_IGNORE_PATTERNS.issubset(patterns)

        # Should include custom patterns from .gitignore
        assert "*.pyc" in patterns
        assert "__pycache__/" in patterns
        assert "node_modules/" in patterns
        assert "*.log" in patterns
        assert "secrets/" in patterns
        assert "temp_*" in patterns

        # Should skip comments and empty lines
        assert "# Python" not in patterns
        assert "# Node" not in patterns
        assert "# Custom" not in patterns


def test_load_patterns_empty_gitignore():
    """Test loading patterns with empty .gitignore file."""
    with tempfile.TemporaryDirectory() as temp_dir:
        temp_path = Path(temp_dir)

        # Create empty .gitignore file
        (temp_path / ".gitignore").write_text("")

        patterns = load_gitignore_patterns(temp_path)

        # Should only have default patterns
        assert patterns == DEFAULT_IGNORE_PATTERNS


def test_load_patterns_unreadable_gitignore():
    """Test graceful handling of unreadable .gitignore file."""
    with tempfile.TemporaryDirectory() as temp_dir:
        temp_path = Path(temp_dir)

        # Create .gitignore file with restricted permissions
        gitignore_file = temp_path / ".gitignore"
        gitignore_file.write_text("*.log")
        gitignore_file.chmod(0o000)  # No read permissions

        try:
            patterns = load_gitignore_patterns(temp_path)

            # On Windows, chmod might not work as expected, so we need to check
            # if the file is actually unreadable
            try:
                with gitignore_file.open("r"):
                    pass
                # If we can read it, the test environment doesn't support this scenario
                # In this case, the patterns should include *.log
                assert "*.log" in patterns
            except (PermissionError, OSError):
                # File is actually unreadable, should fallback to default patterns only
                assert patterns == DEFAULT_IGNORE_PATTERNS
                assert "*.log" not in patterns
        finally:
            # Restore permissions for cleanup
            gitignore_file.chmod(0o644)


def test_should_ignore_default_patterns():
    """Test ignoring files matching default patterns."""
    with tempfile.TemporaryDirectory() as temp_dir:
        temp_path = Path(temp_dir)

        patterns = DEFAULT_IGNORE_PATTERNS

        test_cases = [
            # Git directory
            (temp_path / ".git" / "config", True),
            # Python artifacts
            (temp_path / "main.pyc", True),
            (temp_path / "__pycache__" / "main.cpython-39.pyc", True),
            (temp_path / "src" / "__pycache__" / "module.pyc", True),
            # Virtual environments
            (temp_path / ".venv" / "lib" / "python.so", True),
            (temp_path / "venv" / "bin" / "python", True),
            (temp_path / "env" / "lib" / "site-packages", True),
            # Node.js
            (temp_path / "node_modules" / "package" / "index.js", True),
            # IDE files
            (temp_path / ".idea" / "workspace.xml", True),
            (temp_path / ".vscode" / "settings.json", True),
            # OS files
            (temp_path / ".DS_Store", True),
            (temp_path / "Thumbs.db", True),
            # Valid files that should NOT be ignored
            (temp_path / "main.py", False),
            (temp_path / "README.md", False),
            (temp_path / "src" / "module.py", False),
            (temp_path / "package.json", False),
        ]

        for file_path, should_be_ignored in test_cases:
            result = should_ignore_path(file_path, temp_path, patterns)
            assert result == should_be_ignored, (
                f"Failed for {file_path}: expected {should_be_ignored}, got {result}"
            )


def test_should_ignore_glob_patterns():
    """Test glob pattern matching."""
    with tempfile.TemporaryDirectory() as temp_dir:
        temp_path = Path(temp_dir)

        patterns = {"*.log", "temp_*", "test*.txt"}

        test_cases = [
            (temp_path / "debug.log", True),
            (temp_path / "app.log", True),
            (temp_path / "sub" / "error.log", True),
            (temp_path / "temp_file.txt", True),
            (temp_path / "temp_123", True),
            (temp_path / "test_data.txt", True),
            (temp_path / "testfile.txt", True),
            (temp_path / "app.txt", False),
            (temp_path / "file.py", False),
            (temp_path / "data.json", False),
        ]

        for file_path, should_be_ignored in test_cases:
            result = should_ignore_path(file_path, temp_path, patterns)
            assert result == should_be_ignored, f"Failed for {file_path}"


def test_should_ignore_directory_patterns():
    """Test directory pattern matching (ending with /)."""
    with tempfile.TemporaryDirectory() as temp_dir:
        temp_path = Path(temp_dir)

        patterns = {"build/", "dist/", "logs/"}

        test_cases = [
            (temp_path / "build" / "output.js", True),
            (temp_path / "dist" / "main.css", True),
            (temp_path / "logs" / "app.log", True),
            (temp_path / "src" / "build" / "file.js", True),  # Nested
            (temp_path / "build.py", False),  # File with same name
            (temp_path / "build_script.sh", False),  # Similar name
            (temp_path / "src" / "main.py", False),  # Different directory
        ]

        for file_path, should_be_ignored in test_cases:
            result = should_ignore_path(file_path, temp_path, patterns)
            assert result == should_be_ignored, f"Failed for {file_path}"


def test_should_ignore_root_relative_patterns():
    """Test patterns starting with / (root relative)."""
    with tempfile.TemporaryDirectory() as temp_dir:
        temp_path = Path(temp_dir)

        patterns = {"/config.txt", "/build/", "/tmp/*.log"}

        test_cases = [
            (temp_path / "config.txt", True),  # Root level
            (temp_path / "build" / "app.js", True),  # Root level directory
            (temp_path / "tmp" / "debug.log", True),  # Root level with glob
            (temp_path / "src" / "config.txt", False),  # Not at root
            (temp_path / "project" / "build" / "file.js", False),  # Not at root
            (temp_path / "data" / "tmp" / "app.log", False),  # Not at root
        ]

        for file_path, should_be_ignored in test_cases:
            result = should_ignore_path(file_path, temp_path, patterns)
            assert result == should_be_ignored, f"Failed for {file_path}"


def test_should_ignore_invalid_relative_path():
    """Test handling of paths that cannot be made relative to base."""
    patterns = {"*.pyc"}

    # File outside of base path should not be ignored
    base_path = Path("/tmp/project")
    file_path = Path("/home/user/file.pyc")

    result = should_ignore_path(file_path, base_path, patterns)
    assert result is False


def test_filter_files_with_patterns():
    """Test filtering files with given patterns."""
    with tempfile.TemporaryDirectory() as temp_dir:
        temp_path = Path(temp_dir)

        # Create test files
        files = [
            temp_path / "main.py",
            temp_path / "main.pyc",
            temp_path / "__pycache__" / "module.pyc",
            temp_path / "README.md",
            temp_path / ".git" / "config",
            temp_path / "package.json",
        ]

        # Ensure parent directories exist
        for file_path in files:
            file_path.parent.mkdir(parents=True, exist_ok=True)
            file_path.write_text("test content")

        patterns = {"*.pyc", "__pycache__", ".git"}
        filtered_files, ignored_count = filter_files(files, temp_path, patterns)

        # Should keep valid files
        expected_kept = [
            temp_path / "main.py",
            temp_path / "README.md",
            temp_path / "package.json",
        ]

        assert len(filtered_files) == 3
        assert set(filtered_files) == set(expected_kept)
        assert ignored_count == 3  # main.pyc, module.pyc, config


def test_filter_files_no_patterns():
    """Test filtering with no patterns (should keep all files)."""
    with tempfile.TemporaryDirectory() as temp_dir:
        temp_path = Path(temp_dir)

        files = [
            temp_path / "main.py",
            temp_path / "main.pyc",
            temp_path / "README.md",
        ]

        patterns = set()
        filtered_files, ignored_count = filter_files(files, temp_path, patterns)

        assert len(filtered_files) == 3
        assert set(filtered_files) == set(files)
        assert ignored_count == 0


def test_filter_files_with_gitignore_loading():
    """Test filtering with automatic .gitignore loading."""
    with tempfile.TemporaryDirectory() as temp_dir:
        temp_path = Path(temp_dir)

        # Create .gitignore
        gitignore_content = """
*.log
temp_*
"""
        (temp_path / ".gitignore").write_text(gitignore_content)

        # Create test files
        files = [
            temp_path / "app.py",
            temp_path / "debug.log",
            temp_path / "temp_file.txt",
            temp_path / "README.md",
        ]

        # Ensure files exist
        for file_path in files:
            file_path.write_text("test content")

        filtered_files, ignored_count = filter_files(files, temp_path)  # patterns=None

        # Should ignore .log files and temp_* files, plus default patterns
        expected_kept = [temp_path / "app.py", temp_path / "README.md"]

        assert len(filtered_files) == 2
        assert set(filtered_files) == set(expected_kept)
        assert ignored_count == 2  # debug.log, temp_file.txt

```

--------------------------------------------------------------------------------
/tests/test_rclone_commands.py:
--------------------------------------------------------------------------------

```python
"""Test project-scoped rclone commands."""

from pathlib import Path
from unittest.mock import MagicMock, patch

import pytest

from basic_memory.cli.commands.cloud.rclone_commands import (
    RcloneError,
    SyncProject,
    bisync_initialized,
    get_project_bisync_state,
    get_project_remote,
    project_bisync,
    project_check,
    project_ls,
    project_sync,
)


def test_sync_project_dataclass():
    """Test SyncProject dataclass."""
    project = SyncProject(
        name="research",
        path="app/data/research",
        local_sync_path="/Users/test/research",
    )

    assert project.name == "research"
    assert project.path == "app/data/research"
    assert project.local_sync_path == "/Users/test/research"


def test_sync_project_optional_local_path():
    """Test SyncProject with optional local_sync_path."""
    project = SyncProject(
        name="research",
        path="app/data/research",
    )

    assert project.name == "research"
    assert project.path == "app/data/research"
    assert project.local_sync_path is None


def test_get_project_remote():
    """Test building rclone remote path with normalized path."""
    # Path comes from API already normalized (no /app/data/ prefix)
    project = SyncProject(name="research", path="/research")

    remote = get_project_remote(project, "my-bucket")

    assert remote == "basic-memory-cloud:my-bucket/research"


def test_get_project_remote_strips_app_data_prefix():
    """Test that /app/data/ prefix is stripped from cloud path."""
    # If API returns path with /app/data/, it should be stripped
    project = SyncProject(name="research", path="/app/data/research")

    remote = get_project_remote(project, "my-bucket")

    # Should strip /app/data/ prefix to get actual S3 path
    assert remote == "basic-memory-cloud:my-bucket/research"


def test_get_project_bisync_state():
    """Test getting bisync state directory path."""
    state_path = get_project_bisync_state("research")

    expected = Path.home() / ".basic-memory" / "bisync-state" / "research"
    assert state_path == expected


def test_bisync_initialized_false_when_not_exists(tmp_path, monkeypatch):
    """Test bisync_initialized returns False when state doesn't exist."""
    # Patch to use tmp directory
    monkeypatch.setattr(
        "basic_memory.cli.commands.cloud.rclone_commands.get_project_bisync_state",
        lambda project_name: tmp_path / project_name,
    )

    assert bisync_initialized("research") is False


def test_bisync_initialized_false_when_empty(tmp_path, monkeypatch):
    """Test bisync_initialized returns False when state directory is empty."""
    state_dir = tmp_path / "research"
    state_dir.mkdir()

    monkeypatch.setattr(
        "basic_memory.cli.commands.cloud.rclone_commands.get_project_bisync_state",
        lambda project_name: tmp_path / project_name,
    )

    assert bisync_initialized("research") is False


def test_bisync_initialized_true_when_has_files(tmp_path, monkeypatch):
    """Test bisync_initialized returns True when state has files."""
    state_dir = tmp_path / "research"
    state_dir.mkdir()
    (state_dir / "state.lst").touch()

    monkeypatch.setattr(
        "basic_memory.cli.commands.cloud.rclone_commands.get_project_bisync_state",
        lambda project_name: tmp_path / project_name,
    )

    assert bisync_initialized("research") is True


@patch("basic_memory.cli.commands.cloud.rclone_commands.subprocess.run")
def test_project_sync_success(mock_run):
    """Test successful project sync."""
    mock_run.return_value = MagicMock(returncode=0)

    project = SyncProject(
        name="research",
        path="/research",  # Normalized path from API
        local_sync_path="/tmp/research",
    )

    result = project_sync(project, "my-bucket", dry_run=True)

    assert result is True
    mock_run.assert_called_once()

    # Check command arguments
    cmd = mock_run.call_args[0][0]
    assert cmd[0] == "rclone"
    assert cmd[1] == "sync"
    # Use Path for cross-platform comparison (Windows uses backslashes)
    assert Path(cmd[2]) == Path("/tmp/research")
    assert cmd[3] == "basic-memory-cloud:my-bucket/research"
    assert "--dry-run" in cmd


@patch("basic_memory.cli.commands.cloud.rclone_commands.subprocess.run")
def test_project_sync_with_verbose(mock_run):
    """Test project sync with verbose flag."""
    mock_run.return_value = MagicMock(returncode=0)

    project = SyncProject(
        name="research",
        path="app/data/research",
        local_sync_path="/tmp/research",
    )

    project_sync(project, "my-bucket", verbose=True)

    cmd = mock_run.call_args[0][0]
    assert "--verbose" in cmd
    assert "--progress" not in cmd


@patch("basic_memory.cli.commands.cloud.rclone_commands.subprocess.run")
def test_project_sync_with_progress(mock_run):
    """Test project sync with progress (default)."""
    mock_run.return_value = MagicMock(returncode=0)

    project = SyncProject(
        name="research",
        path="app/data/research",
        local_sync_path="/tmp/research",
    )

    project_sync(project, "my-bucket")

    cmd = mock_run.call_args[0][0]
    assert "--progress" in cmd
    assert "--verbose" not in cmd


def test_project_sync_no_local_path():
    """Test project sync raises error when local_sync_path not configured."""
    project = SyncProject(name="research", path="app/data/research")

    with pytest.raises(RcloneError) as exc_info:
        project_sync(project, "my-bucket")

    assert "no local_sync_path configured" in str(exc_info.value)


@patch("basic_memory.cli.commands.cloud.rclone_commands.subprocess.run")
@patch("basic_memory.cli.commands.cloud.rclone_commands.bisync_initialized")
def test_project_bisync_success(mock_bisync_init, mock_run):
    """Test successful project bisync."""
    mock_bisync_init.return_value = True  # Already initialized
    mock_run.return_value = MagicMock(returncode=0)

    project = SyncProject(
        name="research",
        path="app/data/research",
        local_sync_path="/tmp/research",
    )

    result = project_bisync(project, "my-bucket")

    assert result is True
    mock_run.assert_called_once()

    # Check command arguments
    cmd = mock_run.call_args[0][0]
    assert cmd[0] == "rclone"
    assert cmd[1] == "bisync"
    assert "--conflict-resolve=newer" in cmd
    assert "--max-delete=25" in cmd
    assert "--resilient" in cmd


@patch("basic_memory.cli.commands.cloud.rclone_commands.subprocess.run")
@patch("basic_memory.cli.commands.cloud.rclone_commands.bisync_initialized")
def test_project_bisync_requires_resync_first_time(mock_bisync_init, mock_run):
    """Test that first bisync requires --resync flag."""
    mock_bisync_init.return_value = False  # Not initialized

    project = SyncProject(
        name="research",
        path="app/data/research",
        local_sync_path="/tmp/research",
    )

    with pytest.raises(RcloneError) as exc_info:
        project_bisync(project, "my-bucket")

    assert "requires --resync" in str(exc_info.value)
    mock_run.assert_not_called()


@patch("basic_memory.cli.commands.cloud.rclone_commands.subprocess.run")
@patch("basic_memory.cli.commands.cloud.rclone_commands.bisync_initialized")
def test_project_bisync_with_resync_flag(mock_bisync_init, mock_run):
    """Test bisync with --resync flag for first time."""
    mock_bisync_init.return_value = False  # Not initialized
    mock_run.return_value = MagicMock(returncode=0)

    project = SyncProject(
        name="research",
        path="app/data/research",
        local_sync_path="/tmp/research",
    )

    result = project_bisync(project, "my-bucket", resync=True)

    assert result is True
    cmd = mock_run.call_args[0][0]
    assert "--resync" in cmd


@patch("basic_memory.cli.commands.cloud.rclone_commands.subprocess.run")
@patch("basic_memory.cli.commands.cloud.rclone_commands.bisync_initialized")
def test_project_bisync_dry_run_skips_init_check(mock_bisync_init, mock_run):
    """Test that dry-run skips initialization check."""
    mock_bisync_init.return_value = False  # Not initialized
    mock_run.return_value = MagicMock(returncode=0)

    project = SyncProject(
        name="research",
        path="app/data/research",
        local_sync_path="/tmp/research",
    )

    # Should not raise error even though not initialized
    result = project_bisync(project, "my-bucket", dry_run=True)

    assert result is True
    cmd = mock_run.call_args[0][0]
    assert "--dry-run" in cmd


def test_project_bisync_no_local_path():
    """Test project bisync raises error when local_sync_path not configured."""
    project = SyncProject(name="research", path="app/data/research")

    with pytest.raises(RcloneError) as exc_info:
        project_bisync(project, "my-bucket")

    assert "no local_sync_path configured" in str(exc_info.value)


@patch("basic_memory.cli.commands.cloud.rclone_commands.subprocess.run")
def test_project_check_success(mock_run):
    """Test successful project check."""
    mock_run.return_value = MagicMock(returncode=0)

    project = SyncProject(
        name="research",
        path="app/data/research",
        local_sync_path="/tmp/research",
    )

    result = project_check(project, "my-bucket")

    assert result is True
    cmd = mock_run.call_args[0][0]
    assert cmd[0] == "rclone"
    assert cmd[1] == "check"


@patch("basic_memory.cli.commands.cloud.rclone_commands.subprocess.run")
def test_project_check_with_one_way(mock_run):
    """Test project check with one-way flag."""
    mock_run.return_value = MagicMock(returncode=0)

    project = SyncProject(
        name="research",
        path="app/data/research",
        local_sync_path="/tmp/research",
    )

    project_check(project, "my-bucket", one_way=True)

    cmd = mock_run.call_args[0][0]
    assert "--one-way" in cmd


def test_project_check_no_local_path():
    """Test project check raises error when local_sync_path not configured."""
    project = SyncProject(name="research", path="app/data/research")

    with pytest.raises(RcloneError) as exc_info:
        project_check(project, "my-bucket")

    assert "no local_sync_path configured" in str(exc_info.value)


@patch("basic_memory.cli.commands.cloud.rclone_commands.subprocess.run")
def test_project_ls_success(mock_run):
    """Test successful project ls."""
    mock_run.return_value = MagicMock(returncode=0, stdout="file1.md\nfile2.md\nsubdir/file3.md\n")

    project = SyncProject(name="research", path="app/data/research")

    files = project_ls(project, "my-bucket")

    assert len(files) == 3
    assert "file1.md" in files
    assert "file2.md" in files
    assert "subdir/file3.md" in files


@patch("basic_memory.cli.commands.cloud.rclone_commands.subprocess.run")
def test_project_ls_with_subpath(mock_run):
    """Test project ls with subdirectory."""
    mock_run.return_value = MagicMock(returncode=0, stdout="")

    project = SyncProject(name="research", path="/research")  # Normalized path

    project_ls(project, "my-bucket", path="subdir")

    cmd = mock_run.call_args[0][0]
    assert cmd[-1] == "basic-memory-cloud:my-bucket/research/subdir"

```
Page 6/17FirstPrevNextLast