#
tokens: 49492/50000 36/416 files (page 3/27)
lines: on (toggle) GitHub
raw markdown copy reset
This is page 3 of 27. Use http://codebase.md/basicmachines-co/basic-memory?lines=true&page={x} to view the full context.

# Directory Structure

```
├── .claude
│   ├── commands
│   │   ├── release
│   │   │   ├── beta.md
│   │   │   ├── changelog.md
│   │   │   ├── release-check.md
│   │   │   └── release.md
│   │   ├── spec.md
│   │   └── test-live.md
│   └── settings.json
├── .dockerignore
├── .env.example
├── .github
│   ├── dependabot.yml
│   ├── ISSUE_TEMPLATE
│   │   ├── bug_report.md
│   │   ├── config.yml
│   │   ├── documentation.md
│   │   └── feature_request.md
│   └── workflows
│       ├── claude-code-review.yml
│       ├── claude-issue-triage.yml
│       ├── claude.yml
│       ├── dev-release.yml
│       ├── docker.yml
│       ├── pr-title.yml
│       ├── release.yml
│       └── test.yml
├── .gitignore
├── .python-version
├── CHANGELOG.md
├── CITATION.cff
├── CLA.md
├── CLAUDE.md
├── CODE_OF_CONDUCT.md
├── CONTRIBUTING.md
├── docker-compose-postgres.yml
├── docker-compose.yml
├── Dockerfile
├── docs
│   ├── ai-assistant-guide-extended.md
│   ├── ARCHITECTURE.md
│   ├── character-handling.md
│   ├── cloud-cli.md
│   ├── Docker.md
│   └── testing-coverage.md
├── justfile
├── LICENSE
├── llms-install.md
├── pyproject.toml
├── README.md
├── SECURITY.md
├── smithery.yaml
├── specs
│   ├── SPEC-1 Specification-Driven Development Process.md
│   ├── SPEC-10 Unified Deployment Workflow and Event Tracking.md
│   ├── SPEC-11 Basic Memory API Performance Optimization.md
│   ├── SPEC-12 OpenTelemetry Observability.md
│   ├── SPEC-13 CLI Authentication with Subscription Validation.md
│   ├── SPEC-14 Cloud Git Versioning & GitHub Backup.md
│   ├── SPEC-14- Cloud Git Versioning & GitHub Backup.md
│   ├── SPEC-15 Configuration Persistence via Tigris for Cloud Tenants.md
│   ├── SPEC-16 MCP Cloud Service Consolidation.md
│   ├── SPEC-17 Semantic Search with ChromaDB.md
│   ├── SPEC-18 AI Memory Management Tool.md
│   ├── SPEC-19 Sync Performance and Memory Optimization.md
│   ├── SPEC-2 Slash Commands Reference.md
│   ├── SPEC-20 Simplified Project-Scoped Rclone Sync.md
│   ├── SPEC-3 Agent Definitions.md
│   ├── SPEC-4 Notes Web UI Component Architecture.md
│   ├── SPEC-5 CLI Cloud Upload via WebDAV.md
│   ├── SPEC-6 Explicit Project Parameter Architecture.md
│   ├── SPEC-7 POC to spike Tigris Turso for local access to cloud data.md
│   ├── SPEC-8 TigrisFS Integration.md
│   ├── SPEC-9 Multi-Project Bidirectional Sync Architecture.md
│   ├── SPEC-9 Signed Header Tenant Information.md
│   └── SPEC-9-1 Follow-Ups- Conflict, Sync, and Observability.md
├── src
│   └── basic_memory
│       ├── __init__.py
│       ├── alembic
│       │   ├── alembic.ini
│       │   ├── env.py
│       │   ├── migrations.py
│       │   ├── script.py.mako
│       │   └── versions
│       │       ├── 314f1ea54dc4_add_postgres_full_text_search_support_.py
│       │       ├── 3dae7c7b1564_initial_schema.py
│       │       ├── 502b60eaa905_remove_required_from_entity_permalink.py
│       │       ├── 5fe1ab1ccebe_add_projects_table.py
│       │       ├── 647e7a75e2cd_project_constraint_fix.py
│       │       ├── 6830751f5fb6_merge_multiple_heads.py
│       │       ├── 9d9c1cb7d8f5_add_mtime_and_size_columns_to_entity_.py
│       │       ├── a1b2c3d4e5f6_fix_project_foreign_keys.py
│       │       ├── a2b3c4d5e6f7_add_search_index_entity_cascade.py
│       │       ├── b3c3938bacdb_relation_to_name_unique_index.py
│       │       ├── cc7172b46608_update_search_index_schema.py
│       │       ├── e7e1f4367280_add_scan_watermark_tracking_to_project.py
│       │       ├── f8a9b2c3d4e5_add_pg_trgm_for_fuzzy_link_resolution.py
│       │       └── g9a0b3c4d5e6_add_external_id_to_project_and_entity.py
│       ├── api
│       │   ├── __init__.py
│       │   ├── app.py
│       │   ├── container.py
│       │   ├── routers
│       │   │   ├── __init__.py
│       │   │   ├── directory_router.py
│       │   │   ├── importer_router.py
│       │   │   ├── knowledge_router.py
│       │   │   ├── management_router.py
│       │   │   ├── memory_router.py
│       │   │   ├── project_router.py
│       │   │   ├── prompt_router.py
│       │   │   ├── resource_router.py
│       │   │   ├── search_router.py
│       │   │   └── utils.py
│       │   ├── template_loader.py
│       │   └── v2
│       │       ├── __init__.py
│       │       └── routers
│       │           ├── __init__.py
│       │           ├── directory_router.py
│       │           ├── importer_router.py
│       │           ├── knowledge_router.py
│       │           ├── memory_router.py
│       │           ├── project_router.py
│       │           ├── prompt_router.py
│       │           ├── resource_router.py
│       │           └── search_router.py
│       ├── cli
│       │   ├── __init__.py
│       │   ├── app.py
│       │   ├── auth.py
│       │   ├── commands
│       │   │   ├── __init__.py
│       │   │   ├── cloud
│       │   │   │   ├── __init__.py
│       │   │   │   ├── api_client.py
│       │   │   │   ├── bisync_commands.py
│       │   │   │   ├── cloud_utils.py
│       │   │   │   ├── core_commands.py
│       │   │   │   ├── rclone_commands.py
│       │   │   │   ├── rclone_config.py
│       │   │   │   ├── rclone_installer.py
│       │   │   │   ├── upload_command.py
│       │   │   │   └── upload.py
│       │   │   ├── command_utils.py
│       │   │   ├── db.py
│       │   │   ├── format.py
│       │   │   ├── import_chatgpt.py
│       │   │   ├── import_claude_conversations.py
│       │   │   ├── import_claude_projects.py
│       │   │   ├── import_memory_json.py
│       │   │   ├── mcp.py
│       │   │   ├── project.py
│       │   │   ├── status.py
│       │   │   ├── telemetry.py
│       │   │   └── tool.py
│       │   ├── container.py
│       │   └── main.py
│       ├── config.py
│       ├── db.py
│       ├── deps
│       │   ├── __init__.py
│       │   ├── config.py
│       │   ├── db.py
│       │   ├── importers.py
│       │   ├── projects.py
│       │   ├── repositories.py
│       │   └── services.py
│       ├── deps.py
│       ├── file_utils.py
│       ├── ignore_utils.py
│       ├── importers
│       │   ├── __init__.py
│       │   ├── base.py
│       │   ├── chatgpt_importer.py
│       │   ├── claude_conversations_importer.py
│       │   ├── claude_projects_importer.py
│       │   ├── memory_json_importer.py
│       │   └── utils.py
│       ├── markdown
│       │   ├── __init__.py
│       │   ├── entity_parser.py
│       │   ├── markdown_processor.py
│       │   ├── plugins.py
│       │   ├── schemas.py
│       │   └── utils.py
│       ├── mcp
│       │   ├── __init__.py
│       │   ├── async_client.py
│       │   ├── clients
│       │   │   ├── __init__.py
│       │   │   ├── directory.py
│       │   │   ├── knowledge.py
│       │   │   ├── memory.py
│       │   │   ├── project.py
│       │   │   ├── resource.py
│       │   │   └── search.py
│       │   ├── container.py
│       │   ├── project_context.py
│       │   ├── prompts
│       │   │   ├── __init__.py
│       │   │   ├── ai_assistant_guide.py
│       │   │   ├── continue_conversation.py
│       │   │   ├── recent_activity.py
│       │   │   ├── search.py
│       │   │   └── utils.py
│       │   ├── resources
│       │   │   ├── ai_assistant_guide.md
│       │   │   └── project_info.py
│       │   ├── server.py
│       │   └── tools
│       │       ├── __init__.py
│       │       ├── build_context.py
│       │       ├── canvas.py
│       │       ├── chatgpt_tools.py
│       │       ├── delete_note.py
│       │       ├── edit_note.py
│       │       ├── list_directory.py
│       │       ├── move_note.py
│       │       ├── project_management.py
│       │       ├── read_content.py
│       │       ├── read_note.py
│       │       ├── recent_activity.py
│       │       ├── search.py
│       │       ├── utils.py
│       │       ├── view_note.py
│       │       └── write_note.py
│       ├── models
│       │   ├── __init__.py
│       │   ├── base.py
│       │   ├── knowledge.py
│       │   ├── project.py
│       │   └── search.py
│       ├── project_resolver.py
│       ├── repository
│       │   ├── __init__.py
│       │   ├── entity_repository.py
│       │   ├── observation_repository.py
│       │   ├── postgres_search_repository.py
│       │   ├── project_info_repository.py
│       │   ├── project_repository.py
│       │   ├── relation_repository.py
│       │   ├── repository.py
│       │   ├── search_index_row.py
│       │   ├── search_repository_base.py
│       │   ├── search_repository.py
│       │   └── sqlite_search_repository.py
│       ├── runtime.py
│       ├── schemas
│       │   ├── __init__.py
│       │   ├── base.py
│       │   ├── cloud.py
│       │   ├── delete.py
│       │   ├── directory.py
│       │   ├── importer.py
│       │   ├── memory.py
│       │   ├── project_info.py
│       │   ├── prompt.py
│       │   ├── request.py
│       │   ├── response.py
│       │   ├── search.py
│       │   ├── sync_report.py
│       │   └── v2
│       │       ├── __init__.py
│       │       ├── entity.py
│       │       └── resource.py
│       ├── services
│       │   ├── __init__.py
│       │   ├── context_service.py
│       │   ├── directory_service.py
│       │   ├── entity_service.py
│       │   ├── exceptions.py
│       │   ├── file_service.py
│       │   ├── initialization.py
│       │   ├── link_resolver.py
│       │   ├── project_service.py
│       │   ├── search_service.py
│       │   └── service.py
│       ├── sync
│       │   ├── __init__.py
│       │   ├── background_sync.py
│       │   ├── coordinator.py
│       │   ├── sync_service.py
│       │   └── watch_service.py
│       ├── telemetry.py
│       ├── templates
│       │   └── prompts
│       │       ├── continue_conversation.hbs
│       │       └── search.hbs
│       └── utils.py
├── test-int
│   ├── BENCHMARKS.md
│   ├── cli
│   │   ├── test_project_commands_integration.py
│   │   └── test_version_integration.py
│   ├── conftest.py
│   ├── mcp
│   │   ├── test_build_context_underscore.py
│   │   ├── test_build_context_validation.py
│   │   ├── test_chatgpt_tools_integration.py
│   │   ├── test_default_project_mode_integration.py
│   │   ├── test_delete_note_integration.py
│   │   ├── test_edit_note_integration.py
│   │   ├── test_lifespan_shutdown_sync_task_cancellation_integration.py
│   │   ├── test_list_directory_integration.py
│   │   ├── test_move_note_integration.py
│   │   ├── test_project_management_integration.py
│   │   ├── test_project_state_sync_integration.py
│   │   ├── test_read_content_integration.py
│   │   ├── test_read_note_integration.py
│   │   ├── test_search_integration.py
│   │   ├── test_single_project_mcp_integration.py
│   │   └── test_write_note_integration.py
│   ├── test_db_wal_mode.py
│   └── test_disable_permalinks_integration.py
├── tests
│   ├── __init__.py
│   ├── api
│   │   ├── conftest.py
│   │   ├── test_api_container.py
│   │   ├── test_async_client.py
│   │   ├── test_continue_conversation_template.py
│   │   ├── test_directory_router.py
│   │   ├── test_importer_router.py
│   │   ├── test_knowledge_router.py
│   │   ├── test_management_router.py
│   │   ├── test_memory_router.py
│   │   ├── test_project_router_operations.py
│   │   ├── test_project_router.py
│   │   ├── test_prompt_router.py
│   │   ├── test_relation_background_resolution.py
│   │   ├── test_resource_router.py
│   │   ├── test_search_router.py
│   │   ├── test_search_template.py
│   │   ├── test_template_loader_helpers.py
│   │   ├── test_template_loader.py
│   │   └── v2
│   │       ├── __init__.py
│   │       ├── conftest.py
│   │       ├── test_directory_router.py
│   │       ├── test_importer_router.py
│   │       ├── test_knowledge_router.py
│   │       ├── test_memory_router.py
│   │       ├── test_project_router.py
│   │       ├── test_prompt_router.py
│   │       ├── test_resource_router.py
│   │       └── test_search_router.py
│   ├── cli
│   │   ├── cloud
│   │   │   ├── test_cloud_api_client_and_utils.py
│   │   │   ├── test_rclone_config_and_bmignore_filters.py
│   │   │   └── test_upload_path.py
│   │   ├── conftest.py
│   │   ├── test_auth_cli_auth.py
│   │   ├── test_cli_container.py
│   │   ├── test_cli_exit.py
│   │   ├── test_cli_tool_exit.py
│   │   ├── test_cli_tools.py
│   │   ├── test_cloud_authentication.py
│   │   ├── test_ignore_utils.py
│   │   ├── test_import_chatgpt.py
│   │   ├── test_import_claude_conversations.py
│   │   ├── test_import_claude_projects.py
│   │   ├── test_import_memory_json.py
│   │   ├── test_project_add_with_local_path.py
│   │   └── test_upload.py
│   ├── conftest.py
│   ├── db
│   │   └── test_issue_254_foreign_key_constraints.py
│   ├── importers
│   │   ├── test_conversation_indexing.py
│   │   ├── test_importer_base.py
│   │   └── test_importer_utils.py
│   ├── markdown
│   │   ├── __init__.py
│   │   ├── test_date_frontmatter_parsing.py
│   │   ├── test_entity_parser_error_handling.py
│   │   ├── test_entity_parser.py
│   │   ├── test_markdown_plugins.py
│   │   ├── test_markdown_processor.py
│   │   ├── test_observation_edge_cases.py
│   │   ├── test_parser_edge_cases.py
│   │   ├── test_relation_edge_cases.py
│   │   └── test_task_detection.py
│   ├── mcp
│   │   ├── clients
│   │   │   ├── __init__.py
│   │   │   └── test_clients.py
│   │   ├── conftest.py
│   │   ├── test_async_client_modes.py
│   │   ├── test_mcp_container.py
│   │   ├── test_obsidian_yaml_formatting.py
│   │   ├── test_permalink_collision_file_overwrite.py
│   │   ├── test_project_context.py
│   │   ├── test_prompts.py
│   │   ├── test_recent_activity_prompt_modes.py
│   │   ├── test_resources.py
│   │   ├── test_server_lifespan_branches.py
│   │   ├── test_tool_build_context.py
│   │   ├── test_tool_canvas.py
│   │   ├── test_tool_delete_note.py
│   │   ├── test_tool_edit_note.py
│   │   ├── test_tool_list_directory.py
│   │   ├── test_tool_move_note.py
│   │   ├── test_tool_project_management.py
│   │   ├── test_tool_read_content.py
│   │   ├── test_tool_read_note.py
│   │   ├── test_tool_recent_activity.py
│   │   ├── test_tool_resource.py
│   │   ├── test_tool_search.py
│   │   ├── test_tool_utils.py
│   │   ├── test_tool_view_note.py
│   │   ├── test_tool_write_note_kebab_filenames.py
│   │   ├── test_tool_write_note.py
│   │   └── tools
│   │       └── test_chatgpt_tools.py
│   ├── Non-MarkdownFileSupport.pdf
│   ├── README.md
│   ├── repository
│   │   ├── test_entity_repository_upsert.py
│   │   ├── test_entity_repository.py
│   │   ├── test_entity_upsert_issue_187.py
│   │   ├── test_observation_repository.py
│   │   ├── test_postgres_search_repository.py
│   │   ├── test_project_info_repository.py
│   │   ├── test_project_repository.py
│   │   ├── test_relation_repository.py
│   │   ├── test_repository.py
│   │   ├── test_search_repository_edit_bug_fix.py
│   │   └── test_search_repository.py
│   ├── schemas
│   │   ├── test_base_timeframe_minimum.py
│   │   ├── test_memory_serialization.py
│   │   ├── test_memory_url_validation.py
│   │   ├── test_memory_url.py
│   │   ├── test_relation_response_reference_resolution.py
│   │   ├── test_schemas.py
│   │   └── test_search.py
│   ├── Screenshot.png
│   ├── services
│   │   ├── test_context_service.py
│   │   ├── test_directory_service.py
│   │   ├── test_entity_service_disable_permalinks.py
│   │   ├── test_entity_service.py
│   │   ├── test_file_service.py
│   │   ├── test_initialization_cloud_mode_branches.py
│   │   ├── test_initialization.py
│   │   ├── test_link_resolver.py
│   │   ├── test_project_removal_bug.py
│   │   ├── test_project_service_operations.py
│   │   ├── test_project_service.py
│   │   └── test_search_service.py
│   ├── sync
│   │   ├── test_character_conflicts.py
│   │   ├── test_coordinator.py
│   │   ├── test_sync_service_incremental.py
│   │   ├── test_sync_service.py
│   │   ├── test_sync_wikilink_issue.py
│   │   ├── test_tmp_files.py
│   │   ├── test_watch_service_atomic_adds.py
│   │   ├── test_watch_service_edge_cases.py
│   │   ├── test_watch_service_reload.py
│   │   └── test_watch_service.py
│   ├── test_config.py
│   ├── test_deps.py
│   ├── test_production_cascade_delete.py
│   ├── test_project_resolver.py
│   ├── test_rclone_commands.py
│   ├── test_runtime.py
│   ├── test_telemetry.py
│   └── utils
│       ├── test_file_utils.py
│       ├── test_frontmatter_obsidian_compatible.py
│       ├── test_parse_tags.py
│       ├── test_permalink_formatting.py
│       ├── test_timezone_utils.py
│       ├── test_utf8_handling.py
│       └── test_validate_project_path.py
└── uv.lock
```

# Files

--------------------------------------------------------------------------------
/src/basic_memory/cli/commands/command_utils.py:
--------------------------------------------------------------------------------

```python
  1 | """utility functions for commands"""
  2 | 
  3 | import asyncio
  4 | from typing import Optional, TypeVar, Coroutine, Any
  5 | 
  6 | from mcp.server.fastmcp.exceptions import ToolError
  7 | import typer
  8 | 
  9 | from rich.console import Console
 10 | 
 11 | from basic_memory import db
 12 | from basic_memory.mcp.async_client import get_client
 13 | 
 14 | from basic_memory.mcp.tools.utils import call_post, call_get
 15 | from basic_memory.mcp.project_context import get_active_project
 16 | from basic_memory.schemas import ProjectInfoResponse
 17 | from basic_memory.telemetry import shutdown_telemetry
 18 | 
 19 | console = Console()
 20 | 
 21 | T = TypeVar("T")
 22 | 
 23 | 
 24 | def run_with_cleanup(coro: Coroutine[Any, Any, T]) -> T:
 25 |     """Run an async coroutine with proper database cleanup.
 26 | 
 27 |     This helper ensures database connections and telemetry threads are cleaned up
 28 |     before the event loop closes, preventing process hangs in CLI commands.
 29 | 
 30 |     Args:
 31 |         coro: The coroutine to run
 32 | 
 33 |     Returns:
 34 |         The result of the coroutine
 35 |     """
 36 | 
 37 |     async def _with_cleanup() -> T:
 38 |         try:
 39 |             return await coro
 40 |         finally:
 41 |             await db.shutdown_db()
 42 |             # Shutdown telemetry to stop the OpenPanel background thread
 43 |             # This prevents hangs on Python 3.14+ during thread shutdown
 44 |             shutdown_telemetry()
 45 | 
 46 |     return asyncio.run(_with_cleanup())
 47 | 
 48 | 
 49 | async def run_sync(
 50 |     project: Optional[str] = None,
 51 |     force_full: bool = False,
 52 |     run_in_background: bool = True,
 53 | ):
 54 |     """Run sync operation via API endpoint.
 55 | 
 56 |     Args:
 57 |         project: Optional project name
 58 |         force_full: If True, force a full scan bypassing watermark optimization
 59 |         run_in_background: If True, return immediately; if False, wait for completion
 60 |     """
 61 | 
 62 |     try:
 63 |         async with get_client() as client:
 64 |             project_item = await get_active_project(client, project, None)
 65 |             url = f"{project_item.project_url}/project/sync"
 66 |             params = []
 67 |             if force_full:
 68 |                 params.append("force_full=true")
 69 |             if not run_in_background:
 70 |                 params.append("run_in_background=false")
 71 |             if params:
 72 |                 url += "?" + "&".join(params)
 73 |             response = await call_post(client, url)
 74 |             data = response.json()
 75 |             # Background mode returns {"message": "..."}, foreground returns SyncReportResponse
 76 |             if "message" in data:
 77 |                 console.print(f"[green]{data['message']}[/green]")
 78 |             else:
 79 |                 # Foreground mode - show summary of sync results
 80 |                 total = data.get("total", 0)
 81 |                 new_count = len(data.get("new", []))
 82 |                 modified_count = len(data.get("modified", []))
 83 |                 deleted_count = len(data.get("deleted", []))
 84 |                 console.print(
 85 |                     f"[green]Synced {total} files[/green] "
 86 |                     f"(new: {new_count}, modified: {modified_count}, deleted: {deleted_count})"
 87 |                 )
 88 |     except (ToolError, ValueError) as e:
 89 |         console.print(f"[red]Sync failed: {e}[/red]")
 90 |         raise typer.Exit(1)
 91 | 
 92 | 
 93 | async def get_project_info(project: str):
 94 |     """Get project information via API endpoint."""
 95 | 
 96 |     try:
 97 |         async with get_client() as client:
 98 |             project_item = await get_active_project(client, project, None)
 99 |             response = await call_get(client, f"{project_item.project_url}/project/info")
100 |             return ProjectInfoResponse.model_validate(response.json())
101 |     except (ToolError, ValueError) as e:
102 |         console.print(f"[red]Sync failed: {e}[/red]")
103 |         raise typer.Exit(1)
104 | 
```

--------------------------------------------------------------------------------
/test-int/mcp/test_lifespan_shutdown_sync_task_cancellation_integration.py:
--------------------------------------------------------------------------------

```python
 1 | """
 2 | Integration test for FastAPI lifespan shutdown behavior.
 3 | 
 4 | This test verifies the asyncio cancellation pattern used by the API lifespan:
 5 | when the background sync task is cancelled during shutdown, it must be *awaited*
 6 | before database shutdown begins. This prevents "hang on exit" scenarios in
 7 | `asyncio.run(...)` callers (e.g. CLI/MCP clients using httpx ASGITransport).
 8 | """
 9 | 
10 | import asyncio
11 | 
12 | from httpx import ASGITransport, AsyncClient
13 | 
14 | 
15 | def test_lifespan_shutdown_awaits_sync_task_cancellation(app, monkeypatch):
16 |     """
17 |     Ensure lifespan shutdown awaits the cancelled background sync task.
18 | 
19 |     Why this is deterministic:
20 |     - Cancelling a task does not make it "done" immediately; it becomes done only
21 |       once the event loop schedules it and it processes the CancelledError.
22 |     - In the buggy version, shutdown proceeded directly to db.shutdown_db()
23 |       immediately after calling cancel(), so at *entry* to shutdown_db the task
24 |       is still not done.
25 |     - In the fixed version, SyncCoordinator.stop() awaits the task before returning,
26 |       so by the time shutdown_db is called, the task is done (cancelled).
27 |     """
28 | 
29 |     # Import the *module* (not the package-level FastAPI `basic_memory.api.app` export)
30 |     # so monkeypatching affects the exact symbols referenced inside lifespan().
31 |     #
32 |     # Note: `basic_memory/api/__init__.py` re-exports `app`, so `import basic_memory.api.app`
33 |     # can resolve to the FastAPI instance rather than the `basic_memory.api.app` module.
34 |     import importlib
35 | 
36 |     api_app_module = importlib.import_module("basic_memory.api.app")
37 |     container_module = importlib.import_module("basic_memory.api.container")
38 |     init_module = importlib.import_module("basic_memory.services.initialization")
39 | 
40 |     # Keep startup cheap: we don't need real DB init for this ordering test.
41 |     async def _noop_initialize_app(_app_config):
42 |         return None
43 | 
44 |     monkeypatch.setattr(api_app_module, "initialize_app", _noop_initialize_app)
45 | 
46 |     # Patch the container's init_database to return fake objects
47 |     async def _fake_init_database(self):
48 |         self.engine = object()
49 |         self.session_maker = object()
50 |         return self.engine, self.session_maker
51 | 
52 |     monkeypatch.setattr(container_module.ApiContainer, "init_database", _fake_init_database)
53 | 
54 |     # Make the sync task long-lived so it must be cancelled on shutdown.
55 |     # Patch at the source module where SyncCoordinator imports it.
56 |     async def _fake_initialize_file_sync(_app_config):
57 |         await asyncio.Event().wait()
58 | 
59 |     monkeypatch.setattr(init_module, "initialize_file_sync", _fake_initialize_file_sync)
60 | 
61 |     # Assert ordering: shutdown_db must be called only after the sync_task is done.
62 |     # SyncCoordinator stores the task in _sync_task attribute.
63 |     async def _assert_sync_task_done_before_db_shutdown(self):
64 |         sync_coordinator = api_app_module.app.state.sync_coordinator
65 |         assert sync_coordinator._sync_task is not None
66 |         assert sync_coordinator._sync_task.done()
67 | 
68 |     monkeypatch.setattr(
69 |         container_module.ApiContainer,
70 |         "shutdown_database",
71 |         _assert_sync_task_done_before_db_shutdown,
72 |     )
73 | 
74 |     async def _run_client_once():
75 |         async with AsyncClient(transport=ASGITransport(app=app), base_url="http://test") as client:
76 |             # Any request is sufficient to trigger lifespan startup/shutdown.
77 |             await client.get("/__nonexistent__")
78 | 
79 |     # Use asyncio.run to match the CLI/MCP execution model where loop teardown
80 |     # would hang if a background task is left running.
81 |     asyncio.run(_run_client_once())
82 | 
```

--------------------------------------------------------------------------------
/tests/cli/test_cli_container.py:
--------------------------------------------------------------------------------

```python
 1 | """Tests for CLI container composition root."""
 2 | 
 3 | import pytest
 4 | 
 5 | from basic_memory.cli.container import (
 6 |     CliContainer,
 7 |     get_container,
 8 |     set_container,
 9 |     get_or_create_container,
10 | )
11 | from basic_memory.runtime import RuntimeMode
12 | 
13 | 
14 | class TestCliContainer:
15 |     """Tests for CliContainer."""
16 | 
17 |     def test_create_from_config(self, app_config):
18 |         """Container can be created from config."""
19 |         container = CliContainer(config=app_config, mode=RuntimeMode.LOCAL)
20 |         assert container.config == app_config
21 |         assert container.mode == RuntimeMode.LOCAL
22 | 
23 |     def test_is_cloud_mode_when_cloud(self, app_config):
24 |         """is_cloud_mode returns True in cloud mode."""
25 |         container = CliContainer(config=app_config, mode=RuntimeMode.CLOUD)
26 |         assert container.is_cloud_mode is True
27 | 
28 |     def test_is_cloud_mode_when_local(self, app_config):
29 |         """is_cloud_mode returns False in local mode."""
30 |         container = CliContainer(config=app_config, mode=RuntimeMode.LOCAL)
31 |         assert container.is_cloud_mode is False
32 | 
33 |     def test_is_cloud_mode_when_test(self, app_config):
34 |         """is_cloud_mode returns False in test mode."""
35 |         container = CliContainer(config=app_config, mode=RuntimeMode.TEST)
36 |         assert container.is_cloud_mode is False
37 | 
38 | 
39 | class TestContainerAccessors:
40 |     """Tests for container get/set functions."""
41 | 
42 |     def test_get_container_raises_when_not_set(self, monkeypatch):
43 |         """get_container raises RuntimeError when container not initialized."""
44 |         import basic_memory.cli.container as container_module
45 | 
46 |         monkeypatch.setattr(container_module, "_container", None)
47 | 
48 |         with pytest.raises(RuntimeError, match="CLI container not initialized"):
49 |             get_container()
50 | 
51 |     def test_set_and_get_container(self, app_config, monkeypatch):
52 |         """set_container allows get_container to return the container."""
53 |         import basic_memory.cli.container as container_module
54 | 
55 |         container = CliContainer(config=app_config, mode=RuntimeMode.LOCAL)
56 |         monkeypatch.setattr(container_module, "_container", None)
57 | 
58 |         set_container(container)
59 |         assert get_container() is container
60 | 
61 | 
62 | class TestGetOrCreateContainer:
63 |     """Tests for get_or_create_container - unique to CLI container."""
64 | 
65 |     def test_creates_new_when_none_exists(self, monkeypatch):
66 |         """get_or_create_container creates a new container when none exists."""
67 |         import basic_memory.cli.container as container_module
68 | 
69 |         monkeypatch.setattr(container_module, "_container", None)
70 | 
71 |         container = get_or_create_container()
72 |         assert container is not None
73 |         assert isinstance(container, CliContainer)
74 | 
75 |     def test_returns_existing_when_set(self, app_config, monkeypatch):
76 |         """get_or_create_container returns existing container if already set."""
77 |         import basic_memory.cli.container as container_module
78 | 
79 |         existing = CliContainer(config=app_config, mode=RuntimeMode.LOCAL)
80 |         monkeypatch.setattr(container_module, "_container", existing)
81 | 
82 |         result = get_or_create_container()
83 |         assert result is existing
84 | 
85 |     def test_sets_module_level_container(self, monkeypatch):
86 |         """get_or_create_container sets the module-level container."""
87 |         import basic_memory.cli.container as container_module
88 | 
89 |         monkeypatch.setattr(container_module, "_container", None)
90 | 
91 |         container = get_or_create_container()
92 | 
93 |         # Verify it was set at module level
94 |         assert container_module._container is container
95 |         # Verify get_container now works
96 |         assert get_container() is container
97 | 
```

--------------------------------------------------------------------------------
/tests/mcp/test_recent_activity_prompt_modes.py:
--------------------------------------------------------------------------------

```python
  1 | from datetime import UTC, datetime
  2 | 
  3 | import pytest
  4 | 
  5 | from basic_memory.mcp.prompts.recent_activity import recent_activity_prompt
  6 | from basic_memory.schemas.memory import (
  7 |     ActivityStats,
  8 |     ContextResult,
  9 |     GraphContext,
 10 |     MemoryMetadata,
 11 |     ProjectActivity,
 12 |     ProjectActivitySummary,
 13 |     EntitySummary,
 14 | )
 15 | from basic_memory.schemas.search import SearchItemType
 16 | 
 17 | 
 18 | def _entity(title: str, entity_id: int = 1) -> EntitySummary:
 19 |     return EntitySummary(
 20 |         entity_id=entity_id,
 21 |         permalink=title.lower().replace(" ", "-"),
 22 |         title=title,
 23 |         content=None,
 24 |         file_path=f"{title}.md",
 25 |         created_at=datetime.now(UTC),
 26 |     )
 27 | 
 28 | 
 29 | @pytest.mark.asyncio
 30 | async def test_recent_activity_prompt_discovery_mode(monkeypatch):
 31 |     recent = ProjectActivitySummary(
 32 |         projects={
 33 |             "p1": ProjectActivity(
 34 |                 project_name="p1",
 35 |                 project_path="/tmp/p1",
 36 |                 activity=GraphContext(
 37 |                     results=[
 38 |                         ContextResult(
 39 |                             primary_result=_entity("A"), observations=[], related_results=[]
 40 |                         )
 41 |                     ],
 42 |                     metadata=MemoryMetadata(
 43 |                         uri=None,
 44 |                         types=[SearchItemType.ENTITY],
 45 |                         depth=1,
 46 |                         timeframe="7d",
 47 |                         generated_at=datetime.now(UTC),
 48 |                     ),
 49 |                 ),
 50 |                 item_count=1,
 51 |             ),
 52 |             "p2": ProjectActivity(
 53 |                 project_name="p2",
 54 |                 project_path="/tmp/p2",
 55 |                 activity=GraphContext(
 56 |                     results=[
 57 |                         ContextResult(
 58 |                             primary_result=_entity("B", 2), observations=[], related_results=[]
 59 |                         )
 60 |                     ],
 61 |                     metadata=MemoryMetadata(
 62 |                         uri=None,
 63 |                         types=[SearchItemType.ENTITY],
 64 |                         depth=1,
 65 |                         timeframe="7d",
 66 |                         generated_at=datetime.now(UTC),
 67 |                     ),
 68 |                 ),
 69 |                 item_count=1,
 70 |             ),
 71 |         },
 72 |         summary=ActivityStats(
 73 |             total_projects=2, active_projects=2, most_active_project="p1", total_items=2
 74 |         ),
 75 |         timeframe="7d",
 76 |         generated_at=datetime.now(UTC),
 77 |     )
 78 | 
 79 |     async def fake_fn(**_kwargs):
 80 |         return recent
 81 | 
 82 |     monkeypatch.setattr("basic_memory.mcp.prompts.recent_activity.recent_activity.fn", fake_fn)
 83 | 
 84 |     out = await recent_activity_prompt.fn(timeframe="7d", project=None)  # pyright: ignore[reportGeneralTypeIssues]
 85 |     assert "Recent Activity Across All Projects" in out
 86 |     assert "Cross-Project Activity Discovery" in out
 87 | 
 88 | 
 89 | @pytest.mark.asyncio
 90 | async def test_recent_activity_prompt_project_mode(monkeypatch):
 91 |     recent = GraphContext(
 92 |         results=[
 93 |             ContextResult(primary_result=_entity("Only"), observations=[], related_results=[])
 94 |         ],
 95 |         metadata=MemoryMetadata(
 96 |             uri=None,
 97 |             types=[SearchItemType.ENTITY],
 98 |             depth=1,
 99 |             timeframe="1d",
100 |             generated_at=datetime.now(UTC),
101 |         ),
102 |     )
103 | 
104 |     async def fake_fn(**_kwargs):
105 |         return recent
106 | 
107 |     monkeypatch.setattr("basic_memory.mcp.prompts.recent_activity.recent_activity.fn", fake_fn)
108 | 
109 |     out = await recent_activity_prompt.fn(timeframe="1d", project="proj")  # pyright: ignore[reportGeneralTypeIssues]
110 |     assert "Recent Activity in proj" in out
111 |     assert "Opportunity to Capture Activity Summary" in out
112 | 
```

--------------------------------------------------------------------------------
/src/basic_memory/schemas/prompt.py:
--------------------------------------------------------------------------------

```python
 1 | """Request and response schemas for prompt-related operations."""
 2 | 
 3 | from typing import Optional, List, Any, Dict
 4 | from pydantic import BaseModel, Field
 5 | 
 6 | from basic_memory.schemas.base import TimeFrame
 7 | from basic_memory.schemas.memory import EntitySummary, ObservationSummary, RelationSummary
 8 | 
 9 | 
10 | class PromptContextItem(BaseModel):
11 |     """Container for primary and related results to render in a prompt."""
12 | 
13 |     primary_results: List[EntitySummary]
14 |     related_results: List[EntitySummary | ObservationSummary | RelationSummary]
15 | 
16 | 
17 | class ContinueConversationRequest(BaseModel):
18 |     """Request for generating a continue conversation prompt.
19 | 
20 |     Used to provide context for continuing a conversation on a specific topic
21 |     or with recent activity from a given timeframe.
22 |     """
23 | 
24 |     topic: Optional[str] = Field(None, description="Topic or keyword to search for")
25 |     timeframe: Optional[TimeFrame] = Field(
26 |         None, description="How far back to look for activity (e.g. '1d', '1 week')"
27 |     )
28 |     # Limit depth to max 2 for performance reasons - higher values cause significant slowdown
29 |     search_items_limit: int = Field(
30 |         5,
31 |         description="Maximum number of search results to include in context (max 10)",
32 |         ge=1,
33 |         le=10,
34 |     )
35 |     depth: int = Field(
36 |         1,
37 |         description="How many relationship 'hops' to follow when building context (max 5)",
38 |         ge=1,
39 |         le=5,
40 |     )
41 |     # Limit related items to prevent overloading the context
42 |     related_items_limit: int = Field(
43 |         5, description="Maximum number of related items to include in context (max 10)", ge=1, le=10
44 |     )
45 | 
46 | 
47 | class SearchPromptRequest(BaseModel):
48 |     """Request for generating a search results prompt.
49 | 
50 |     Used to format search results into a prompt with context and suggestions.
51 |     """
52 | 
53 |     query: str = Field(..., description="The search query text")
54 |     timeframe: Optional[TimeFrame] = Field(
55 |         None, description="Optional timeframe to limit results (e.g. '1d', '1 week')"
56 |     )
57 | 
58 | 
59 | class PromptMetadata(BaseModel):
60 |     """Metadata about a prompt response.
61 | 
62 |     Contains statistical information about the prompt generation process
63 |     and results, useful for debugging and UI display.
64 |     """
65 | 
66 |     query: Optional[str] = Field(None, description="The original query or topic")
67 |     timeframe: Optional[str] = Field(None, description="The timeframe used for filtering")
68 |     search_count: int = Field(0, description="Number of search results found")
69 |     context_count: int = Field(0, description="Number of context items retrieved")
70 |     observation_count: int = Field(0, description="Total number of observations included")
71 |     relation_count: int = Field(0, description="Total number of relations included")
72 |     total_items: int = Field(0, description="Total number of all items included in the prompt")
73 |     search_limit: int = Field(0, description="Maximum search results requested")
74 |     context_depth: int = Field(0, description="Context depth used")
75 |     related_limit: int = Field(0, description="Maximum related items requested")
76 |     generated_at: str = Field(..., description="ISO timestamp when this prompt was generated")
77 | 
78 | 
79 | class PromptResponse(BaseModel):
80 |     """Response containing the rendered prompt.
81 | 
82 |     Includes both the rendered prompt text and the context that was used
83 |     to render it, for potential client-side use.
84 |     """
85 | 
86 |     prompt: str = Field(..., description="The rendered prompt text")
87 |     context: Dict[str, Any] = Field(..., description="The context used to render the prompt")
88 |     metadata: PromptMetadata = Field(
89 |         ..., description="Metadata about the prompt generation process"
90 |     )
91 | 
```

--------------------------------------------------------------------------------
/test-int/mcp/test_read_note_integration.py:
--------------------------------------------------------------------------------

```python
  1 | """
  2 | Integration tests for read_note MCP tool.
  3 | 
  4 | Tests the full flow: MCP client -> MCP server -> FastAPI -> database
  5 | """
  6 | 
  7 | import pytest
  8 | from fastmcp import Client
  9 | 
 10 | 
 11 | @pytest.mark.asyncio
 12 | async def test_read_note_after_write(mcp_server, app, test_project):
 13 |     """Test read_note after write_note using real database."""
 14 | 
 15 |     async with Client(mcp_server) as client:
 16 |         # First write a note
 17 |         write_result = await client.call_tool(
 18 |             "write_note",
 19 |             {
 20 |                 "project": test_project.name,
 21 |                 "title": "Test Note",
 22 |                 "folder": "test",
 23 |                 "content": "# Test Note\n\nThis is test content.",
 24 |                 "tags": "test,integration",
 25 |             },
 26 |         )
 27 | 
 28 |         assert len(write_result.content) == 1
 29 |         assert write_result.content[0].type == "text"
 30 |         assert "Test Note.md" in write_result.content[0].text
 31 | 
 32 |         # Then read it back
 33 |         read_result = await client.call_tool(
 34 |             "read_note",
 35 |             {
 36 |                 "project": test_project.name,
 37 |                 "identifier": "Test Note",
 38 |             },
 39 |         )
 40 | 
 41 |         assert len(read_result.content) == 1
 42 |         assert read_result.content[0].type == "text"
 43 |         result_text = read_result.content[0].text
 44 | 
 45 |         # Should contain the note content and metadata
 46 |         assert "# Test Note" in result_text
 47 |         assert "This is test content." in result_text
 48 |         assert "test/test-note" in result_text  # permalink
 49 | 
 50 | 
 51 | @pytest.mark.asyncio
 52 | async def test_read_note_underscored_folder_by_permalink(mcp_server, app, test_project):
 53 |     """Test read_note with permalink from underscored folder.
 54 | 
 55 |     Reproduces bug #416: read_note fails to find notes when given permalinks
 56 |     from underscored folder names (e.g., _archive/, _drafts/), even though
 57 |     the permalink is copied directly from the note's YAML frontmatter.
 58 |     """
 59 | 
 60 |     async with Client(mcp_server) as client:
 61 |         # Create a note in an underscored folder
 62 |         write_result = await client.call_tool(
 63 |             "write_note",
 64 |             {
 65 |                 "project": test_project.name,
 66 |                 "title": "Example Note",
 67 |                 "folder": "_archive/articles",
 68 |                 "content": "# Example Note\n\nThis is a test note in an underscored folder.",
 69 |                 "tags": "test,archive",
 70 |             },
 71 |         )
 72 | 
 73 |         assert len(write_result.content) == 1
 74 |         assert write_result.content[0].type == "text"
 75 |         write_text = write_result.content[0].text
 76 | 
 77 |         # Verify the file path includes the underscore
 78 |         assert "_archive/articles/Example Note.md" in write_text
 79 | 
 80 |         # Verify the permalink has underscores stripped (this is the expected behavior)
 81 |         assert "archive/articles/example-note" in write_text
 82 | 
 83 |         # Now try to read the note using the permalink (without underscores)
 84 |         # This is the exact scenario from the bug report - using the permalink
 85 |         # that was generated in the YAML frontmatter
 86 |         read_result = await client.call_tool(
 87 |             "read_note",
 88 |             {
 89 |                 "project": test_project.name,
 90 |                 "identifier": "archive/articles/example-note",  # permalink without underscores
 91 |             },
 92 |         )
 93 | 
 94 |         # This should succeed - the note should be found by its permalink
 95 |         assert len(read_result.content) == 1
 96 |         assert read_result.content[0].type == "text"
 97 |         result_text = read_result.content[0].text
 98 | 
 99 |         # Should contain the note content
100 |         assert "# Example Note" in result_text
101 |         assert "This is a test note in an underscored folder." in result_text
102 |         assert "archive/articles/example-note" in result_text  # permalink
103 | 
```

--------------------------------------------------------------------------------
/src/basic_memory/alembic/alembic.ini:
--------------------------------------------------------------------------------

```
  1 | # A generic, single database configuration.
  2 | 
  3 | [alembic]
  4 | # path to migration scripts
  5 | # Use forward slashes (/) also on windows to provide an os agnostic path
  6 | script_location = .
  7 | 
  8 | # template used to generate migration file names; The default value is %%(rev)s_%%(slug)s
  9 | # Uncomment the line below if you want the files to be prepended with date and time
 10 | # see https://alembic.sqlalchemy.org/en/latest/tutorial.html#editing-the-ini-file
 11 | # for all available tokens
 12 | # file_template = %%(year)d_%%(month).2d_%%(day).2d_%%(hour).2d%%(minute).2d-%%(rev)s_%%(slug)s
 13 | 
 14 | # sys.path path, will be prepended to sys.path if present.
 15 | # defaults to the current working directory.
 16 | prepend_sys_path = .
 17 | 
 18 | # timezone to use when rendering the date within the migration file
 19 | # as well as the filename.
 20 | # If specified, requires the python>=3.9 or backports.zoneinfo library and tzdata library.
 21 | # Any required deps can installed by adding `alembic[tz]` to the pip requirements
 22 | # string value is passed to ZoneInfo()
 23 | # leave blank for localtime
 24 | # timezone =
 25 | 
 26 | # max length of characters to apply to the "slug" field
 27 | # truncate_slug_length = 40
 28 | 
 29 | # set to 'true' to run the environment during
 30 | # the 'revision' command, regardless of autogenerate
 31 | # revision_environment = false
 32 | 
 33 | # set to 'true' to allow .pyc and .pyo files without
 34 | # a source .py file to be detected as revisions in the
 35 | # versions/ directory
 36 | # sourceless = false
 37 | 
 38 | # version location specification; This defaults
 39 | # to migrations/versions.  When using multiple version
 40 | # directories, initial revisions must be specified with --version-path.
 41 | # The path separator used here should be the separator specified by "version_path_separator" below.
 42 | # version_locations = %(here)s/bar:%(here)s/bat:migrations/versions
 43 | 
 44 | # version path separator; As mentioned above, this is the character used to split
 45 | # version_locations. The default within new alembic.ini files is "os", which uses os.pathsep.
 46 | # If this key is omitted entirely, it falls back to the legacy behavior of splitting on spaces and/or commas.
 47 | # Valid values for version_path_separator are:
 48 | #
 49 | # version_path_separator = :
 50 | # version_path_separator = ;
 51 | # version_path_separator = space
 52 | # version_path_separator = newline
 53 | #
 54 | # Use os.pathsep. Default configuration used for new projects.
 55 | version_path_separator = os
 56 | 
 57 | # set to 'true' to search source files recursively
 58 | # in each "version_locations" directory
 59 | # new in Alembic version 1.10
 60 | # recursive_version_locations = false
 61 | 
 62 | # the output encoding used when revision files
 63 | # are written from script.py.mako
 64 | # output_encoding = utf-8
 65 | 
 66 | sqlalchemy.url = driver://user:pass@localhost/dbname
 67 | 
 68 | 
 69 | [post_write_hooks]
 70 | # post_write_hooks defines scripts or Python functions that are run
 71 | # on newly generated revision scripts.  See the documentation for further
 72 | # detail and examples
 73 | 
 74 | # format using "black" - use the console_scripts runner, against the "black" entrypoint
 75 | # hooks = black
 76 | # black.type = console_scripts
 77 | # black.entrypoint = black
 78 | # black.options = -l 79 REVISION_SCRIPT_FILENAME
 79 | 
 80 | # lint with attempts to fix using "ruff" - use the exec runner, execute a binary
 81 | # hooks = ruff
 82 | # ruff.type = exec
 83 | # ruff.executable = %(here)s/.venv/bin/ruff
 84 | # ruff.options = --fix REVISION_SCRIPT_FILENAME
 85 | 
 86 | # Logging configuration
 87 | [loggers]
 88 | keys = root,sqlalchemy,alembic
 89 | 
 90 | [handlers]
 91 | keys = console
 92 | 
 93 | [formatters]
 94 | keys = generic
 95 | 
 96 | [logger_root]
 97 | level = WARNING
 98 | handlers = console
 99 | qualname =
100 | 
101 | [logger_sqlalchemy]
102 | level = WARNING
103 | handlers =
104 | qualname = sqlalchemy.engine
105 | 
106 | [logger_alembic]
107 | level = INFO
108 | handlers =
109 | qualname = alembic
110 | 
111 | [handler_console]
112 | class = StreamHandler
113 | args = (sys.stderr,)
114 | level = NOTSET
115 | formatter = generic
116 | 
117 | [formatter_generic]
118 | format = %(levelname)-5.5s [%(name)s] %(message)s
119 | datefmt = %H:%M:%S
120 | 
```

--------------------------------------------------------------------------------
/tests/mcp/test_project_context.py:
--------------------------------------------------------------------------------

```python
  1 | """Tests for project context utilities (no standard-library mock usage).
  2 | 
  3 | These functions are config/env driven, so we use the real ConfigManager-backed
  4 | test config file and pytest monkeypatch for environment variables.
  5 | """
  6 | 
  7 | from __future__ import annotations
  8 | 
  9 | import pytest
 10 | 
 11 | 
 12 | @pytest.mark.asyncio
 13 | async def test_cloud_mode_requires_project_by_default(config_manager, monkeypatch):
 14 |     from basic_memory.mcp.project_context import resolve_project_parameter
 15 | 
 16 |     cfg = config_manager.load_config()
 17 |     cfg.cloud_mode = True
 18 |     config_manager.save_config(cfg)
 19 | 
 20 |     with pytest.raises(ValueError) as exc_info:
 21 |         await resolve_project_parameter(project=None, allow_discovery=False)
 22 | 
 23 |     assert "No project specified" in str(exc_info.value)
 24 |     assert "Project is required for cloud mode" in str(exc_info.value)
 25 | 
 26 | 
 27 | @pytest.mark.asyncio
 28 | async def test_cloud_mode_allows_discovery_when_enabled(config_manager):
 29 |     from basic_memory.mcp.project_context import resolve_project_parameter
 30 | 
 31 |     cfg = config_manager.load_config()
 32 |     cfg.cloud_mode = True
 33 |     config_manager.save_config(cfg)
 34 | 
 35 |     assert await resolve_project_parameter(project=None, allow_discovery=True) is None
 36 | 
 37 | 
 38 | @pytest.mark.asyncio
 39 | async def test_cloud_mode_returns_project_when_specified(config_manager):
 40 |     from basic_memory.mcp.project_context import resolve_project_parameter
 41 | 
 42 |     cfg = config_manager.load_config()
 43 |     cfg.cloud_mode = True
 44 |     config_manager.save_config(cfg)
 45 | 
 46 |     assert await resolve_project_parameter(project="my-project") == "my-project"
 47 | 
 48 | 
 49 | @pytest.mark.asyncio
 50 | async def test_local_mode_uses_env_var_priority(config_manager, monkeypatch):
 51 |     from basic_memory.mcp.project_context import resolve_project_parameter
 52 | 
 53 |     cfg = config_manager.load_config()
 54 |     cfg.cloud_mode = False
 55 |     cfg.default_project_mode = False
 56 |     config_manager.save_config(cfg)
 57 | 
 58 |     monkeypatch.setenv("BASIC_MEMORY_MCP_PROJECT", "env-project")
 59 |     assert await resolve_project_parameter(project="explicit-project") == "env-project"
 60 | 
 61 | 
 62 | @pytest.mark.asyncio
 63 | async def test_local_mode_uses_explicit_project(config_manager, monkeypatch):
 64 |     from basic_memory.mcp.project_context import resolve_project_parameter
 65 | 
 66 |     cfg = config_manager.load_config()
 67 |     cfg.cloud_mode = False
 68 |     cfg.default_project_mode = False
 69 |     config_manager.save_config(cfg)
 70 | 
 71 |     monkeypatch.delenv("BASIC_MEMORY_MCP_PROJECT", raising=False)
 72 |     assert await resolve_project_parameter(project="explicit-project") == "explicit-project"
 73 | 
 74 | 
 75 | @pytest.mark.asyncio
 76 | async def test_local_mode_uses_default_project(config_manager, config_home, monkeypatch):
 77 |     from basic_memory.mcp.project_context import resolve_project_parameter
 78 | 
 79 |     cfg = config_manager.load_config()
 80 |     cfg.cloud_mode = False
 81 |     cfg.default_project_mode = True
 82 |     # default_project must exist in the config project list, otherwise config validation
 83 |     # will coerce it back to an existing default.
 84 |     (config_home / "default-project").mkdir(parents=True, exist_ok=True)
 85 |     cfg.projects["default-project"] = str(config_home / "default-project")
 86 |     cfg.default_project = "default-project"
 87 |     config_manager.save_config(cfg)
 88 | 
 89 |     monkeypatch.delenv("BASIC_MEMORY_MCP_PROJECT", raising=False)
 90 |     assert await resolve_project_parameter(project=None) == "default-project"
 91 | 
 92 | 
 93 | @pytest.mark.asyncio
 94 | async def test_local_mode_returns_none_when_no_resolution(config_manager, monkeypatch):
 95 |     from basic_memory.mcp.project_context import resolve_project_parameter
 96 | 
 97 |     cfg = config_manager.load_config()
 98 |     cfg.cloud_mode = False
 99 |     cfg.default_project_mode = False
100 |     config_manager.save_config(cfg)
101 | 
102 |     monkeypatch.delenv("BASIC_MEMORY_MCP_PROJECT", raising=False)
103 |     assert await resolve_project_parameter(project=None) is None
104 | 
```

--------------------------------------------------------------------------------
/src/basic_memory/markdown/utils.py:
--------------------------------------------------------------------------------

```python
  1 | """Utilities for converting between markdown and entity models."""
  2 | 
  3 | from pathlib import Path
  4 | from typing import Any, Optional
  5 | 
  6 | 
  7 | from frontmatter import Post
  8 | 
  9 | from basic_memory.file_utils import has_frontmatter, remove_frontmatter, parse_frontmatter
 10 | from basic_memory.markdown import EntityMarkdown
 11 | from basic_memory.models import Entity
 12 | from basic_memory.models import Observation as ObservationModel
 13 | 
 14 | 
 15 | def entity_model_from_markdown(
 16 |     file_path: Path,
 17 |     markdown: EntityMarkdown,
 18 |     entity: Optional[Entity] = None,
 19 |     project_id: Optional[int] = None,
 20 | ) -> Entity:
 21 |     """
 22 |     Convert markdown entity to model. Does not include relations.
 23 | 
 24 |     Args:
 25 |         file_path: Path to the markdown file
 26 |         markdown: Parsed markdown entity
 27 |         entity: Optional existing entity to update
 28 |         project_id: Project ID for new observations (uses entity.project_id if not provided)
 29 | 
 30 |     Returns:
 31 |         Entity model populated from markdown
 32 | 
 33 |     Raises:
 34 |         ValueError: If required datetime fields are missing from markdown
 35 |     """
 36 | 
 37 |     if not markdown.created or not markdown.modified:  # pragma: no cover
 38 |         raise ValueError("Both created and modified dates are required in markdown")
 39 | 
 40 |     # Create or update entity
 41 |     model = entity or Entity()
 42 | 
 43 |     # Update basic fields
 44 |     model.title = markdown.frontmatter.title
 45 |     model.entity_type = markdown.frontmatter.type
 46 |     # Only update permalink if it exists in frontmatter, otherwise preserve existing
 47 |     if markdown.frontmatter.permalink is not None:
 48 |         model.permalink = markdown.frontmatter.permalink
 49 |     model.file_path = file_path.as_posix()
 50 |     model.content_type = "text/markdown"
 51 |     model.created_at = markdown.created
 52 |     model.updated_at = markdown.modified
 53 | 
 54 |     # Handle metadata - ensure all values are strings and filter None
 55 |     metadata = markdown.frontmatter.metadata or {}
 56 |     model.entity_metadata = {k: str(v) for k, v in metadata.items() if v is not None}
 57 | 
 58 |     # Get project_id from entity if not provided
 59 |     obs_project_id = project_id or (model.project_id if hasattr(model, "project_id") else None)
 60 | 
 61 |     # Convert observations
 62 |     model.observations = [
 63 |         ObservationModel(
 64 |             project_id=obs_project_id,
 65 |             content=obs.content,
 66 |             category=obs.category,
 67 |             context=obs.context,
 68 |             tags=obs.tags,
 69 |         )
 70 |         for obs in markdown.observations
 71 |     ]
 72 | 
 73 |     return model
 74 | 
 75 | 
 76 | async def schema_to_markdown(schema: Any) -> Post:
 77 |     """
 78 |     Convert schema to markdown Post object.
 79 | 
 80 |     Args:
 81 |         schema: Schema to convert (must have title, entity_type, and permalink attributes)
 82 | 
 83 |     Returns:
 84 |         Post object with frontmatter metadata
 85 |     """
 86 |     # Extract content and metadata
 87 |     content = schema.content or ""
 88 |     entity_metadata = dict(schema.entity_metadata or {})
 89 | 
 90 |     # if the content contains frontmatter, remove it and merge
 91 |     if has_frontmatter(content):
 92 |         content_frontmatter = parse_frontmatter(content)
 93 |         content = remove_frontmatter(content)
 94 | 
 95 |         # Merge content frontmatter with entity metadata
 96 |         # (entity_metadata takes precedence for conflicts)
 97 |         content_frontmatter.update(entity_metadata)
 98 |         entity_metadata = content_frontmatter
 99 | 
100 |     # Remove special fields for ordered frontmatter
101 |     for field in ["type", "title", "permalink"]:
102 |         entity_metadata.pop(field, None)
103 | 
104 |     # Create Post with fields ordered by insert order
105 |     post = Post(
106 |         content,
107 |         title=schema.title,
108 |         type=schema.entity_type,
109 |     )
110 |     # set the permalink if passed in
111 |     if schema.permalink:
112 |         post.metadata["permalink"] = schema.permalink
113 | 
114 |     if entity_metadata:
115 |         post.metadata.update(entity_metadata)
116 | 
117 |     return post
118 | 
```

--------------------------------------------------------------------------------
/src/basic_memory/schemas/request.py:
--------------------------------------------------------------------------------

```python
  1 | """Request schemas for interacting with the knowledge graph."""
  2 | 
  3 | from typing import List, Optional, Annotated, Literal
  4 | from annotated_types import MaxLen, MinLen
  5 | 
  6 | from pydantic import BaseModel, field_validator
  7 | 
  8 | from basic_memory.schemas.base import (
  9 |     Relation,
 10 |     Permalink,
 11 | )
 12 | 
 13 | 
 14 | class SearchNodesRequest(BaseModel):
 15 |     """Search for entities in the knowledge graph.
 16 | 
 17 |     The search looks across multiple fields:
 18 |     - Entity title
 19 |     - Entity types
 20 |     - summary
 21 |     - file content
 22 |     - Observations
 23 | 
 24 |     Features:
 25 |     - Case-insensitive matching
 26 |     - Partial word matches
 27 |     - Returns full entity objects with relations
 28 |     - Includes all matching entities
 29 |     - If a category is specified, only entities with that category are returned
 30 | 
 31 |     Example Queries:
 32 |     - "memory" - Find entities related to memory systems
 33 |     - "SQLite" - Find database-related components
 34 |     - "test" - Find test-related entities
 35 |     - "implementation" - Find concrete implementations
 36 |     - "service" - Find service components
 37 | 
 38 |     Note: Currently uses SQL ILIKE for matching. Wildcard (*) searches
 39 |     and full-text search capabilities are planned for future versions.
 40 |     """
 41 | 
 42 |     query: Annotated[str, MinLen(1), MaxLen(200)]
 43 |     category: Optional[str] = None
 44 | 
 45 | 
 46 | class GetEntitiesRequest(BaseModel):
 47 |     """Retrieve specific entities by their IDs.
 48 | 
 49 |     Used to load complete entity details including all observations
 50 |     and relations. Particularly useful for following relations
 51 |     discovered through search.
 52 |     """
 53 | 
 54 |     permalinks: Annotated[List[Permalink], MinLen(1), MaxLen(10)]
 55 | 
 56 | 
 57 | class CreateRelationsRequest(BaseModel):
 58 |     relations: List[Relation]
 59 | 
 60 | 
 61 | class EditEntityRequest(BaseModel):
 62 |     """Request schema for editing an existing entity's content.
 63 | 
 64 |     This allows for targeted edits without requiring the full entity content.
 65 |     Supports various operation types for different editing scenarios.
 66 |     """
 67 | 
 68 |     operation: Literal["append", "prepend", "find_replace", "replace_section"]
 69 |     content: str
 70 |     section: Optional[str] = None
 71 |     find_text: Optional[str] = None
 72 |     expected_replacements: int = 1
 73 | 
 74 |     @field_validator("section")
 75 |     @classmethod
 76 |     def validate_section_for_replace_section(cls, v, info):
 77 |         """Ensure section is provided for replace_section operation."""
 78 |         if info.data.get("operation") == "replace_section" and not v:
 79 |             raise ValueError("section parameter is required for replace_section operation")
 80 |         return v
 81 | 
 82 |     @field_validator("find_text")
 83 |     @classmethod
 84 |     def validate_find_text_for_find_replace(cls, v, info):
 85 |         """Ensure find_text is provided for find_replace operation."""
 86 |         if info.data.get("operation") == "find_replace" and not v:
 87 |             raise ValueError("find_text parameter is required for find_replace operation")
 88 |         return v
 89 | 
 90 | 
 91 | class MoveEntityRequest(BaseModel):
 92 |     """Request schema for moving an entity to a new file location.
 93 | 
 94 |     This allows moving notes to different paths while maintaining project
 95 |     consistency and optionally updating permalinks based on configuration.
 96 |     """
 97 | 
 98 |     identifier: Annotated[str, MinLen(1), MaxLen(200)]
 99 |     destination_path: Annotated[str, MinLen(1), MaxLen(500)]
100 |     project: Optional[str] = None
101 | 
102 |     @field_validator("destination_path")
103 |     @classmethod
104 |     def validate_destination_path(cls, v):
105 |         """Ensure destination path is relative and valid."""
106 |         if v.startswith("/"):
107 |             raise ValueError("destination_path must be relative, not absolute")
108 |         if ".." in v:
109 |             raise ValueError("destination_path cannot contain '..' path components")
110 |         if not v.strip():
111 |             raise ValueError("destination_path cannot be empty or whitespace only")
112 |         return v.strip()
113 | 
```

--------------------------------------------------------------------------------
/tests/cli/test_cli_tool_exit.py:
--------------------------------------------------------------------------------

```python
  1 | """Test that CLI tool commands exit cleanly without hanging.
  2 | 
  3 | This test ensures that CLI commands properly clean up database connections
  4 | on exit, preventing process hangs. See GitHub issue for details.
  5 | 
  6 | The issue occurs when:
  7 | 1. ensure_initialization() calls asyncio.run(initialize_app())
  8 | 2. initialize_app() creates global database connections via db.get_or_create_db()
  9 | 3. When asyncio.run() completes, the event loop closes
 10 | 4. But the global database engine holds async connections that prevent clean exit
 11 | 5. Process hangs indefinitely
 12 | 
 13 | The fix ensures db.shutdown_db() is called before asyncio.run() returns.
 14 | """
 15 | 
 16 | import os
 17 | import platform
 18 | import subprocess
 19 | import sys
 20 | 
 21 | import pytest
 22 | 
 23 | # Windows has different process cleanup behavior that makes these tests unreliable
 24 | IS_WINDOWS = platform.system() == "Windows"
 25 | SUBPROCESS_TIMEOUT = 10.0
 26 | skip_on_windows = pytest.mark.skipif(
 27 |     IS_WINDOWS, reason="Subprocess cleanup tests unreliable on Windows CI"
 28 | )
 29 | 
 30 | 
 31 | @skip_on_windows
 32 | class TestCLIToolExit:
 33 |     """Test that CLI tool commands exit cleanly."""
 34 | 
 35 |     @pytest.mark.parametrize(
 36 |         "command",
 37 |         [
 38 |             ["tool", "--help"],
 39 |             ["tool", "write-note", "--help"],
 40 |             ["tool", "read-note", "--help"],
 41 |             ["tool", "search-notes", "--help"],
 42 |             ["tool", "build-context", "--help"],
 43 |         ],
 44 |     )
 45 |     def test_cli_command_exits_cleanly(self, command: list[str]):
 46 |         """Test that CLI commands exit without hanging.
 47 | 
 48 |         Each command should complete within the timeout without requiring
 49 |         manual termination (Ctrl+C).
 50 |         """
 51 |         full_command = [sys.executable, "-m", "basic_memory.cli.main"] + command
 52 | 
 53 |         try:
 54 |             result = subprocess.run(
 55 |                 full_command,
 56 |                 capture_output=True,
 57 |                 text=True,
 58 |                 timeout=SUBPROCESS_TIMEOUT,
 59 |             )
 60 |             # Command should exit with code 0 for --help
 61 |             assert result.returncode == 0, f"Command failed: {result.stderr}"
 62 |         except subprocess.TimeoutExpired:
 63 |             pytest.fail(
 64 |                 f"Command '{' '.join(command)}' hung and did not exit within timeout. "
 65 |                 "This indicates database connections are not being cleaned up properly."
 66 |             )
 67 | 
 68 |     def test_ensure_initialization_exits_cleanly(self, tmp_path):
 69 |         """Test that ensure_initialization doesn't cause process hang.
 70 | 
 71 |         This test directly tests the initialization function that's called
 72 |         by CLI commands, ensuring it cleans up database connections properly.
 73 |         """
 74 |         code = """
 75 | import asyncio
 76 | from basic_memory.config import ConfigManager
 77 | from basic_memory.services.initialization import ensure_initialization
 78 | 
 79 | app_config = ConfigManager().config
 80 | ensure_initialization(app_config)
 81 | print("OK")
 82 | """
 83 |         try:
 84 |             # Ensure the subprocess uses an isolated home directory so ConfigManager doesn't
 85 |             # touch the real user profile/AppData (which can be slow/flaky on CI Windows).
 86 |             env = dict(os.environ)
 87 |             bm_home = tmp_path / "basic-memory-home"
 88 |             env["BASIC_MEMORY_HOME"] = str(bm_home)
 89 |             env["HOME"] = str(tmp_path)
 90 |             env["USERPROFILE"] = str(tmp_path)
 91 | 
 92 |             result = subprocess.run(
 93 |                 [sys.executable, "-c", code],
 94 |                 capture_output=True,
 95 |                 text=True,
 96 |                 timeout=SUBPROCESS_TIMEOUT,
 97 |                 env=env,
 98 |             )
 99 |             assert "OK" in result.stdout, f"Unexpected output: {result.stdout}"
100 |         except subprocess.TimeoutExpired:
101 |             pytest.fail(
102 |                 "ensure_initialization() caused process hang. "
103 |                 "Database connections are not being cleaned up before event loop closes."
104 |             )
105 | 
```

--------------------------------------------------------------------------------
/tests/api/test_management_router.py:
--------------------------------------------------------------------------------

```python
  1 | """Tests for management router API endpoints (minimal mocking).
  2 | 
  3 | These endpoints are mostly simple state checks and wiring; we use stub objects
  4 | and pytest monkeypatch instead of standard-library mocks.
  5 | """
  6 | 
  7 | from __future__ import annotations
  8 | 
  9 | import pytest
 10 | from fastapi import FastAPI
 11 | 
 12 | from basic_memory.api.routers.management_router import (
 13 |     WatchStatusResponse,
 14 |     get_watch_status,
 15 |     start_watch_service,
 16 |     stop_watch_service,
 17 | )
 18 | 
 19 | 
 20 | class _Request:
 21 |     def __init__(self, app: FastAPI):
 22 |         self.app = app
 23 | 
 24 | 
 25 | class _Task:
 26 |     def __init__(self, *, done: bool):
 27 |         self._done = done
 28 |         self.cancel_called = False
 29 | 
 30 |     def done(self) -> bool:
 31 |         return self._done
 32 | 
 33 |     def cancel(self) -> None:
 34 |         self.cancel_called = True
 35 | 
 36 | 
 37 | @pytest.fixture
 38 | def app_with_state() -> FastAPI:
 39 |     app = FastAPI()
 40 |     app.state.watch_task = None
 41 |     return app
 42 | 
 43 | 
 44 | @pytest.mark.asyncio
 45 | async def test_get_watch_status_not_running(app_with_state: FastAPI):
 46 |     app_with_state.state.watch_task = None
 47 |     resp = await get_watch_status(_Request(app_with_state))
 48 |     assert isinstance(resp, WatchStatusResponse)
 49 |     assert resp.running is False
 50 | 
 51 | 
 52 | @pytest.mark.asyncio
 53 | async def test_get_watch_status_running(app_with_state: FastAPI):
 54 |     app_with_state.state.watch_task = _Task(done=False)
 55 |     resp = await get_watch_status(_Request(app_with_state))
 56 |     assert resp.running is True
 57 | 
 58 | 
 59 | @pytest.mark.asyncio
 60 | async def test_start_watch_service_when_not_running(monkeypatch, app_with_state: FastAPI):
 61 |     app_with_state.state.watch_task = None
 62 | 
 63 |     created = {"watch_service": None, "task": None}
 64 | 
 65 |     class _StubWatchService:
 66 |         def __init__(self, *, app_config, project_repository):
 67 |             self.app_config = app_config
 68 |             self.project_repository = project_repository
 69 |             created["watch_service"] = self
 70 | 
 71 |     def _create_background_sync_task(sync_service, watch_service):
 72 |         created["task"] = _Task(done=False)
 73 |         return created["task"]
 74 | 
 75 |     # start_watch_service imports these inside the function, so patch at the source modules.
 76 |     monkeypatch.setattr("basic_memory.sync.WatchService", _StubWatchService)
 77 |     monkeypatch.setattr(
 78 |         "basic_memory.sync.background_sync.create_background_sync_task",
 79 |         _create_background_sync_task,
 80 |     )
 81 | 
 82 |     project_repository = object()
 83 |     sync_service = object()
 84 | 
 85 |     resp = await start_watch_service(_Request(app_with_state), project_repository, sync_service)
 86 |     assert resp.running is True
 87 |     assert app_with_state.state.watch_task is created["task"]
 88 |     assert created["watch_service"] is not None
 89 |     assert created["watch_service"].project_repository is project_repository
 90 | 
 91 | 
 92 | @pytest.mark.asyncio
 93 | async def test_start_watch_service_already_running(monkeypatch, app_with_state: FastAPI):
 94 |     existing = _Task(done=False)
 95 |     app_with_state.state.watch_task = existing
 96 | 
 97 |     def _should_not_be_called(*_args, **_kwargs):
 98 |         raise AssertionError("create_background_sync_task should not be called if already running")
 99 | 
100 |     monkeypatch.setattr(
101 |         "basic_memory.sync.background_sync.create_background_sync_task",
102 |         _should_not_be_called,
103 |     )
104 | 
105 |     resp = await start_watch_service(_Request(app_with_state), object(), object())
106 |     assert resp.running is True
107 |     assert app_with_state.state.watch_task is existing
108 | 
109 | 
110 | @pytest.mark.asyncio
111 | async def test_stop_watch_service_not_running(app_with_state: FastAPI):
112 |     app_with_state.state.watch_task = None
113 |     resp = await stop_watch_service(_Request(app_with_state))
114 |     assert resp.running is False
115 | 
116 | 
117 | @pytest.mark.asyncio
118 | async def test_stop_watch_service_already_done(app_with_state: FastAPI):
119 |     app_with_state.state.watch_task = _Task(done=True)
120 |     resp = await stop_watch_service(_Request(app_with_state))
121 |     assert resp.running is False
122 | 
```

--------------------------------------------------------------------------------
/test-int/mcp/test_project_state_sync_integration.py:
--------------------------------------------------------------------------------

```python
 1 | """Integration test for project state synchronization between MCP session and CLI config.
 2 | 
 3 | This test validates the fix for GitHub issue #148 where MCP session and CLI commands
 4 | had inconsistent project state, causing "Project not found" errors and edit failures.
 5 | 
 6 | The test simulates the exact workflow reported in the issue:
 7 | 1. MCP server starts with a default project
 8 | 2. Default project is changed via CLI/API
 9 | 3. MCP tools should immediately use the new project (no restart needed)
10 | 4. All operations should work consistently in the new project context
11 | """
12 | 
13 | import pytest
14 | from fastmcp import Client
15 | 
16 | 
17 | @pytest.mark.asyncio
18 | async def test_project_state_sync_after_default_change(
19 |     mcp_server, app, config_manager, test_project, tmp_path
20 | ):
21 |     """Test that MCP session stays in sync when default project is changed."""
22 | 
23 |     async with Client(mcp_server) as client:
24 |         # Step 1: Create a second project that we can switch to
25 |         create_result = await client.call_tool(
26 |             "create_memory_project",
27 |             {
28 |                 "project_name": "minerva",
29 |                 "project_path": str(tmp_path.parent / (tmp_path.name + "-projects") / "minerva"),
30 |                 "set_default": False,  # Don't set as default yet
31 |             },
32 |         )
33 |         assert len(create_result.content) == 1
34 |         assert "✓" in create_result.content[0].text  # pyright: ignore [reportAttributeAccessIssue]
35 |         assert "minerva" in create_result.content[0].text  # pyright: ignore [reportAttributeAccessIssue]
36 | 
37 |         # Step 2: Test that note operations work in the new project context
38 |         # This validates that the identifier resolution works correctly
39 |         write_result = await client.call_tool(
40 |             "write_note",
41 |             {
42 |                 "project": "minerva",
43 |                 "title": "Test Consistency Note",
44 |                 "folder": "test",
45 |                 "content": "# Test Note\n\nThis note tests project state consistency.\n\n- [test] Project state sync working",
46 |                 "tags": "test,consistency",
47 |             },
48 |         )
49 |         assert len(write_result.content) == 1
50 |         assert "Test Consistency Note" in write_result.content[0].text  # pyright: ignore [reportAttributeAccessIssue]
51 | 
52 |         # Step 3: Test that we can read the note we just created
53 |         read_result = await client.call_tool(
54 |             "read_note", {"project": "minerva", "identifier": "Test Consistency Note"}
55 |         )
56 |         assert len(read_result.content) == 1
57 |         assert "Test Consistency Note" in read_result.content[0].text  # pyright: ignore [reportAttributeAccessIssue]
58 |         assert "project state sync working" in read_result.content[0].text.lower()  # pyright: ignore [reportAttributeAccessIssue]
59 | 
60 |         # Step 4: Test that edit operations work (this was failing in the original issue)
61 |         edit_result = await client.call_tool(
62 |             "edit_note",
63 |             {
64 |                 "project": "minerva",
65 |                 "identifier": "Test Consistency Note",
66 |                 "operation": "append",
67 |                 "content": "\n\n## Update\n\nEdit operation successful after project switch!",
68 |             },
69 |         )
70 |         assert len(edit_result.content) == 1
71 |         assert (
72 |             "added" in edit_result.content[0].text.lower()  # pyright: ignore [reportAttributeAccessIssue]
73 |             and "lines" in edit_result.content[0].text.lower()  # pyright: ignore [reportAttributeAccessIssue]
74 |         )
75 | 
76 |         # Step 5: Verify the edit was applied
77 |         final_read_result = await client.call_tool(
78 |             "read_note", {"project": "minerva", "identifier": "Test Consistency Note"}
79 |         )
80 |         assert len(final_read_result.content) == 1
81 |         final_content = final_read_result.content[0].text  # pyright: ignore [reportAttributeAccessIssue]
82 |         assert "Edit operation successful" in final_content
83 | 
```

--------------------------------------------------------------------------------
/tests/mcp/test_mcp_container.py:
--------------------------------------------------------------------------------

```python
 1 | """Tests for MCP container composition root."""
 2 | 
 3 | import pytest
 4 | 
 5 | from basic_memory.mcp.container import (
 6 |     McpContainer,
 7 |     get_container,
 8 |     set_container,
 9 | )
10 | from basic_memory.runtime import RuntimeMode
11 | 
12 | 
13 | class TestMcpContainer:
14 |     """Tests for McpContainer."""
15 | 
16 |     def test_create_from_config(self, app_config):
17 |         """Container can be created from config manager."""
18 |         container = McpContainer(config=app_config, mode=RuntimeMode.LOCAL)
19 |         assert container.config == app_config
20 |         assert container.mode == RuntimeMode.LOCAL
21 | 
22 |     def test_should_sync_files_when_enabled_local_mode(self, app_config):
23 |         """Sync should be enabled in local mode when config says so."""
24 |         app_config.sync_changes = True
25 |         container = McpContainer(config=app_config, mode=RuntimeMode.LOCAL)
26 |         assert container.should_sync_files is True
27 | 
28 |     def test_should_not_sync_files_when_disabled(self, app_config):
29 |         """Sync should be disabled when config says so."""
30 |         app_config.sync_changes = False
31 |         container = McpContainer(config=app_config, mode=RuntimeMode.LOCAL)
32 |         assert container.should_sync_files is False
33 | 
34 |     def test_should_not_sync_files_in_test_mode(self, app_config):
35 |         """Sync should be disabled in test mode regardless of config."""
36 |         app_config.sync_changes = True
37 |         container = McpContainer(config=app_config, mode=RuntimeMode.TEST)
38 |         assert container.should_sync_files is False
39 | 
40 |     def test_should_not_sync_files_in_cloud_mode(self, app_config):
41 |         """Sync should be disabled in cloud mode (cloud handles sync differently)."""
42 |         app_config.sync_changes = True
43 |         container = McpContainer(config=app_config, mode=RuntimeMode.CLOUD)
44 |         assert container.should_sync_files is False
45 | 
46 | 
47 | class TestSyncSkipReason:
48 |     """Tests for sync_skip_reason property."""
49 | 
50 |     def test_skip_reason_in_test_mode(self, app_config):
51 |         """Returns test message when in test mode."""
52 |         container = McpContainer(config=app_config, mode=RuntimeMode.TEST)
53 |         assert container.sync_skip_reason == "Test environment detected"
54 | 
55 |     def test_skip_reason_in_cloud_mode(self, app_config):
56 |         """Returns cloud message when in cloud mode."""
57 |         container = McpContainer(config=app_config, mode=RuntimeMode.CLOUD)
58 |         assert container.sync_skip_reason == "Cloud mode enabled"
59 | 
60 |     def test_skip_reason_when_sync_disabled(self, app_config):
61 |         """Returns disabled message when sync is disabled."""
62 |         app_config.sync_changes = False
63 |         container = McpContainer(config=app_config, mode=RuntimeMode.LOCAL)
64 |         assert container.sync_skip_reason == "Sync changes disabled"
65 | 
66 |     def test_no_skip_reason_when_should_sync(self, app_config):
67 |         """Returns None when sync should run."""
68 |         app_config.sync_changes = True
69 |         container = McpContainer(config=app_config, mode=RuntimeMode.LOCAL)
70 |         assert container.sync_skip_reason is None
71 | 
72 | 
73 | class TestContainerAccessors:
74 |     """Tests for container get/set functions."""
75 | 
76 |     def test_get_container_raises_when_not_set(self, monkeypatch):
77 |         """get_container raises RuntimeError when container not initialized."""
78 |         import basic_memory.mcp.container as container_module
79 | 
80 |         monkeypatch.setattr(container_module, "_container", None)
81 | 
82 |         with pytest.raises(RuntimeError, match="MCP container not initialized"):
83 |             get_container()
84 | 
85 |     def test_set_and_get_container(self, app_config, monkeypatch):
86 |         """set_container allows get_container to return the container."""
87 |         import basic_memory.mcp.container as container_module
88 | 
89 |         container = McpContainer(config=app_config, mode=RuntimeMode.LOCAL)
90 |         monkeypatch.setattr(container_module, "_container", None)
91 | 
92 |         set_container(container)
93 |         assert get_container() is container
94 | 
```

--------------------------------------------------------------------------------
/src/basic_memory/cli/commands/cloud/bisync_commands.py:
--------------------------------------------------------------------------------

```python
  1 | """Cloud bisync utility functions for Basic Memory CLI."""
  2 | 
  3 | from pathlib import Path
  4 | 
  5 | from basic_memory.cli.commands.cloud.api_client import make_api_request
  6 | from basic_memory.config import ConfigManager
  7 | from basic_memory.ignore_utils import create_default_bmignore, get_bmignore_path
  8 | from basic_memory.schemas.cloud import MountCredentials, TenantMountInfo
  9 | 
 10 | 
 11 | class BisyncError(Exception):
 12 |     """Exception raised for bisync-related errors."""
 13 | 
 14 |     pass
 15 | 
 16 | 
 17 | async def get_mount_info() -> TenantMountInfo:
 18 |     """Get current tenant information from cloud API."""
 19 |     try:
 20 |         config_manager = ConfigManager()
 21 |         config = config_manager.config
 22 |         host_url = config.cloud_host.rstrip("/")
 23 | 
 24 |         response = await make_api_request(method="GET", url=f"{host_url}/tenant/mount/info")
 25 | 
 26 |         return TenantMountInfo.model_validate(response.json())
 27 |     except Exception as e:
 28 |         raise BisyncError(f"Failed to get tenant info: {e}") from e
 29 | 
 30 | 
 31 | async def generate_mount_credentials(tenant_id: str) -> MountCredentials:
 32 |     """Generate scoped credentials for syncing."""
 33 |     try:
 34 |         config_manager = ConfigManager()
 35 |         config = config_manager.config
 36 |         host_url = config.cloud_host.rstrip("/")
 37 | 
 38 |         response = await make_api_request(method="POST", url=f"{host_url}/tenant/mount/credentials")
 39 | 
 40 |         return MountCredentials.model_validate(response.json())
 41 |     except Exception as e:
 42 |         raise BisyncError(f"Failed to generate credentials: {e}") from e
 43 | 
 44 | 
 45 | def convert_bmignore_to_rclone_filters() -> Path:
 46 |     """Convert .bmignore patterns to rclone filter format.
 47 | 
 48 |     Reads ~/.basic-memory/.bmignore (gitignore-style) and converts to
 49 |     ~/.basic-memory/.bmignore.rclone (rclone filter format).
 50 | 
 51 |     Only regenerates if .bmignore has been modified since last conversion.
 52 | 
 53 |     Returns:
 54 |         Path to converted rclone filter file
 55 |     """
 56 |     # Ensure .bmignore exists
 57 |     create_default_bmignore()
 58 | 
 59 |     bmignore_path = get_bmignore_path()
 60 |     # Create rclone filter path: ~/.basic-memory/.bmignore -> ~/.basic-memory/.bmignore.rclone
 61 |     rclone_filter_path = bmignore_path.parent / f"{bmignore_path.name}.rclone"
 62 | 
 63 |     # Skip regeneration if rclone file is newer than bmignore
 64 |     if rclone_filter_path.exists():
 65 |         bmignore_mtime = bmignore_path.stat().st_mtime
 66 |         rclone_mtime = rclone_filter_path.stat().st_mtime
 67 |         if rclone_mtime >= bmignore_mtime:
 68 |             return rclone_filter_path
 69 | 
 70 |     # Read .bmignore patterns
 71 |     patterns = []
 72 |     try:
 73 |         with bmignore_path.open("r", encoding="utf-8") as f:
 74 |             for line in f:
 75 |                 line = line.strip()
 76 |                 # Keep comments and empty lines
 77 |                 if not line or line.startswith("#"):
 78 |                     patterns.append(line)
 79 |                     continue
 80 | 
 81 |                 # Convert gitignore pattern to rclone filter syntax
 82 |                 # gitignore: node_modules  → rclone: - node_modules/**
 83 |                 # gitignore: *.pyc        → rclone: - *.pyc
 84 |                 if "*" in line:
 85 |                     # Pattern already has wildcard, just add exclude prefix
 86 |                     patterns.append(f"- {line}")
 87 |                 else:
 88 |                     # Directory pattern - add /** for recursive exclude
 89 |                     patterns.append(f"- {line}/**")
 90 | 
 91 |     except Exception:
 92 |         # If we can't read the file, create a minimal filter
 93 |         patterns = ["# Error reading .bmignore, using minimal filters", "- .git/**"]
 94 | 
 95 |     # Write rclone filter file
 96 |     rclone_filter_path.write_text("\n".join(patterns) + "\n")
 97 | 
 98 |     return rclone_filter_path
 99 | 
100 | 
101 | def get_bisync_filter_path() -> Path:
102 |     """Get path to bisync filter file.
103 | 
104 |     Uses ~/.basic-memory/.bmignore (converted to rclone format).
105 |     The file is automatically created with default patterns on first use.
106 | 
107 |     Returns:
108 |         Path to rclone filter file
109 |     """
110 |     return convert_bmignore_to_rclone_filters()
111 | 
```

--------------------------------------------------------------------------------
/tests/utils/test_timezone_utils.py:
--------------------------------------------------------------------------------

```python
 1 | """Tests for timezone utilities."""
 2 | 
 3 | from datetime import datetime, timezone
 4 | 
 5 | 
 6 | from basic_memory.utils import ensure_timezone_aware
 7 | 
 8 | 
 9 | class TestEnsureTimezoneAware:
10 |     """Tests for ensure_timezone_aware function."""
11 | 
12 |     def test_already_timezone_aware_returns_unchanged(self):
13 |         """Timezone-aware datetime should be returned unchanged."""
14 |         dt = datetime(2024, 1, 15, 12, 30, 0, tzinfo=timezone.utc)
15 |         result = ensure_timezone_aware(dt)
16 |         assert result == dt
17 |         assert result.tzinfo == timezone.utc
18 | 
19 |     def test_naive_datetime_cloud_mode_true_interprets_as_utc(self):
20 |         """In cloud mode, naive datetimes should be interpreted as UTC."""
21 |         naive_dt = datetime(2024, 1, 15, 12, 30, 0)
22 |         result = ensure_timezone_aware(naive_dt, cloud_mode=True)
23 | 
24 |         # Should have UTC timezone
25 |         assert result.tzinfo == timezone.utc
26 |         # Time values should be unchanged (just tagged as UTC)
27 |         assert result.year == 2024
28 |         assert result.month == 1
29 |         assert result.day == 15
30 |         assert result.hour == 12
31 |         assert result.minute == 30
32 | 
33 |     def test_naive_datetime_cloud_mode_false_interprets_as_local(self):
34 |         """In local mode, naive datetimes should be interpreted as local time."""
35 |         naive_dt = datetime(2024, 1, 15, 12, 30, 0)
36 |         result = ensure_timezone_aware(naive_dt, cloud_mode=False)
37 | 
38 |         # Should have some timezone info (local)
39 |         assert result.tzinfo is not None
40 |         # The datetime should be converted to local timezone
41 |         # We can't assert exact timezone as it depends on system
42 | 
43 |     def test_cloud_mode_true_does_not_shift_time(self):
44 |         """Cloud mode should use replace() not astimezone() - time values unchanged."""
45 |         naive_dt = datetime(2024, 6, 15, 18, 0, 0)  # Summer time
46 |         result = ensure_timezone_aware(naive_dt, cloud_mode=True)
47 | 
48 |         # Hour should remain 18, not be shifted by timezone offset
49 |         assert result.hour == 18
50 |         assert result.tzinfo == timezone.utc
51 | 
52 |     def test_explicit_cloud_mode_skips_config_loading(self):
53 |         """When cloud_mode is explicitly passed, config should not be loaded."""
54 |         # This test verifies we can call ensure_timezone_aware without
55 |         # triggering ConfigManager import when cloud_mode is explicit
56 |         naive_dt = datetime(2024, 1, 15, 12, 30, 0)
57 | 
58 |         # Should work without any config setup
59 |         result_cloud = ensure_timezone_aware(naive_dt, cloud_mode=True)
60 |         assert result_cloud.tzinfo == timezone.utc
61 | 
62 |         result_local = ensure_timezone_aware(naive_dt, cloud_mode=False)
63 |         assert result_local.tzinfo is not None
64 | 
65 |     def test_none_cloud_mode_falls_back_to_config(self, config_manager):
66 |         """When cloud_mode is None, should load from config."""
67 |         naive_dt = datetime(2024, 1, 15, 12, 30, 0)
68 |         # Use the real config file (via test fixtures) rather than mocking.
69 |         cfg = config_manager.config
70 |         cfg.cloud_mode = True
71 |         config_manager.save_config(cfg)
72 | 
73 |         result = ensure_timezone_aware(naive_dt, cloud_mode=None)
74 | 
75 |         # Should have used cloud mode (UTC)
76 |         assert result.tzinfo == timezone.utc
77 | 
78 |     def test_asyncpg_naive_utc_scenario(self):
79 |         """Simulate asyncpg returning naive datetime that's actually UTC.
80 | 
81 |         asyncpg binary protocol returns timestamps in UTC but as naive datetimes.
82 |         In cloud mode, we interpret these as UTC rather than local time.
83 |         """
84 |         # Simulate what asyncpg returns: a naive datetime that's actually UTC
85 |         asyncpg_result = datetime(2024, 1, 15, 18, 30, 0)  # 6:30 PM UTC
86 | 
87 |         # In cloud mode, interpret as UTC
88 |         cloud_result = ensure_timezone_aware(asyncpg_result, cloud_mode=True)
89 |         assert cloud_result == datetime(2024, 1, 15, 18, 30, 0, tzinfo=timezone.utc)
90 | 
91 |         # The hour should remain 18, not shifted
92 |         assert cloud_result.hour == 18
93 | 
```

--------------------------------------------------------------------------------
/tests/markdown/test_relation_edge_cases.py:
--------------------------------------------------------------------------------

```python
  1 | """Tests for edge cases in relation parsing."""
  2 | 
  3 | from markdown_it import MarkdownIt
  4 | 
  5 | from basic_memory.markdown.plugins import relation_plugin, parse_relation, parse_inline_relations
  6 | from basic_memory.markdown.schemas import Relation
  7 | 
  8 | 
  9 | def test_empty_targets():
 10 |     """Test handling of empty targets."""
 11 |     md = MarkdownIt().use(relation_plugin)
 12 | 
 13 |     # Empty brackets
 14 |     tokens = md.parse("- type [[]]")
 15 |     token = next(t for t in tokens if t.type == "inline")
 16 |     assert parse_relation(token) is None
 17 | 
 18 |     # Only spaces
 19 |     tokens = md.parse("- type [[ ]]")
 20 |     token = next(t for t in tokens if t.type == "inline")
 21 |     assert parse_relation(token) is None
 22 | 
 23 |     # Whitespace in brackets
 24 |     tokens = md.parse("- type [[   ]]")
 25 |     token = next(t for t in tokens if t.type == "inline")
 26 |     assert parse_relation(token) is None
 27 | 
 28 | 
 29 | def test_malformed_links():
 30 |     """Test handling of malformed wiki links."""
 31 |     md = MarkdownIt().use(relation_plugin)
 32 | 
 33 |     # Missing close brackets
 34 |     tokens = md.parse("- type [[Target")
 35 |     assert not any(t.meta and "relations" in t.meta for t in tokens)
 36 | 
 37 |     # Missing open brackets
 38 |     tokens = md.parse("- type Target]]")
 39 |     assert not any(t.meta and "relations" in t.meta for t in tokens)
 40 | 
 41 |     # Backwards brackets
 42 |     tokens = md.parse("- type ]]Target[[")
 43 |     assert not any(t.meta and "relations" in t.meta for t in tokens)
 44 | 
 45 |     # Nested brackets
 46 |     tokens = md.parse("- type [[Outer [[Inner]] ]]")
 47 |     token = next(t for t in tokens if t.type == "inline")
 48 |     rel = parse_relation(token)
 49 |     assert rel is not None
 50 |     assert "Outer" in rel["target"]
 51 | 
 52 | 
 53 | def test_context_handling():
 54 |     """Test handling of contexts."""
 55 |     md = MarkdownIt().use(relation_plugin)
 56 | 
 57 |     # Unclosed context
 58 |     tokens = md.parse("- type [[Target]] (unclosed")
 59 |     token = next(t for t in tokens if t.type == "inline")
 60 |     rel = parse_relation(token)
 61 |     assert rel["context"] is None
 62 | 
 63 |     # Multiple parens
 64 |     tokens = md.parse("- type [[Target]] (with (nested) parens)")
 65 |     token = next(t for t in tokens if t.type == "inline")
 66 |     rel = parse_relation(token)
 67 |     assert rel["context"] == "with (nested) parens"
 68 | 
 69 |     # Empty context
 70 |     tokens = md.parse("- type [[Target]] ()")
 71 |     token = next(t for t in tokens if t.type == "inline")
 72 |     rel = parse_relation(token)
 73 |     assert rel["context"] is None
 74 | 
 75 | 
 76 | def test_inline_relations():
 77 |     """Test inline relation detection."""
 78 |     md = MarkdownIt().use(relation_plugin)
 79 | 
 80 |     # Multiple links in text
 81 |     text = "Text with [[Link1]] and [[Link2]] and [[Link3]]"
 82 |     rels = parse_inline_relations(text)
 83 |     assert len(rels) == 3
 84 |     assert {r["target"] for r in rels} == {"Link1", "Link2", "Link3"}
 85 | 
 86 |     # Links with surrounding text
 87 |     text = "Before [[Target]] After"
 88 |     rels = parse_inline_relations(text)
 89 |     assert len(rels) == 1
 90 |     assert rels[0]["target"] == "Target"
 91 | 
 92 |     # Multiple links on same line
 93 |     tokens = md.parse("[[One]] [[Two]] [[Three]]")
 94 |     token = next(t for t in tokens if t.type == "inline")
 95 |     assert len(token.meta["relations"]) == 3
 96 | 
 97 | 
 98 | def test_unicode_targets():
 99 |     """Test handling of Unicode in targets."""
100 |     md = MarkdownIt().use(relation_plugin)
101 | 
102 |     # Unicode in target
103 |     tokens = md.parse("- type [[测试]]")
104 |     token = next(t for t in tokens if t.type == "inline")
105 |     rel = parse_relation(token)
106 |     assert rel["target"] == "测试"
107 | 
108 |     # Unicode in type
109 |     tokens = md.parse("- 使用 [[Target]]")
110 |     token = next(t for t in tokens if t.type == "inline")
111 |     rel = parse_relation(token)
112 |     assert rel["type"] == "使用"
113 | 
114 |     # Unicode in context
115 |     tokens = md.parse("- type [[Target]] (测试)")
116 |     token = next(t for t in tokens if t.type == "inline")
117 |     rel = parse_relation(token)
118 |     assert rel["context"] == "测试"
119 | 
120 |     # Model validation with Unicode
121 |     relation = Relation.model_validate(rel)
122 |     assert relation.type == "type"
123 |     assert relation.target == "Target"
124 |     assert relation.context == "测试"
125 | 
```

--------------------------------------------------------------------------------
/src/basic_memory/schemas/search.py:
--------------------------------------------------------------------------------

```python
  1 | """Search schemas for Basic Memory.
  2 | 
  3 | The search system supports three primary modes:
  4 | 1. Exact permalink lookup
  5 | 2. Pattern matching with *
  6 | 3. Full-text search across content
  7 | """
  8 | 
  9 | from typing import Optional, List, Union
 10 | from datetime import datetime
 11 | from enum import Enum
 12 | from pydantic import BaseModel, field_validator
 13 | 
 14 | from basic_memory.schemas.base import Permalink
 15 | 
 16 | 
 17 | class SearchItemType(str, Enum):
 18 |     """Types of searchable items."""
 19 | 
 20 |     ENTITY = "entity"
 21 |     OBSERVATION = "observation"
 22 |     RELATION = "relation"
 23 | 
 24 | 
 25 | class SearchQuery(BaseModel):
 26 |     """Search query parameters.
 27 | 
 28 |     Use ONE of these primary search modes:
 29 |     - permalink: Exact permalink match
 30 |     - permalink_match: Path pattern with *
 31 |     - text: Full-text search of title/content (supports boolean operators: AND, OR, NOT)
 32 | 
 33 |     Optionally filter results by:
 34 |     - types: Limit to specific item types
 35 |     - entity_types: Limit to specific entity types
 36 |     - after_date: Only items after date
 37 | 
 38 |     Boolean search examples:
 39 |     - "python AND flask" - Find items with both terms
 40 |     - "python OR django" - Find items with either term
 41 |     - "python NOT django" - Find items with python but not django
 42 |     - "(python OR flask) AND web" - Use parentheses for grouping
 43 |     """
 44 | 
 45 |     # Primary search modes (use ONE of these)
 46 |     permalink: Optional[str] = None  # Exact permalink match
 47 |     permalink_match: Optional[str] = None  # Glob permalink match
 48 |     text: Optional[str] = None  # Full-text search (now supports boolean operators)
 49 |     title: Optional[str] = None  # title only search
 50 | 
 51 |     # Optional filters
 52 |     types: Optional[List[str]] = None  # Filter by type
 53 |     entity_types: Optional[List[SearchItemType]] = None  # Filter by entity type
 54 |     after_date: Optional[Union[datetime, str]] = None  # Time-based filter
 55 | 
 56 |     @field_validator("after_date")
 57 |     @classmethod
 58 |     def validate_date(cls, v: Optional[Union[datetime, str]]) -> Optional[str]:
 59 |         """Convert datetime to ISO format if needed."""
 60 |         if isinstance(v, datetime):
 61 |             return v.isoformat()
 62 |         return v
 63 | 
 64 |     def no_criteria(self) -> bool:
 65 |         return (
 66 |             self.permalink is None
 67 |             and self.permalink_match is None
 68 |             and self.title is None
 69 |             and self.text is None
 70 |             and self.after_date is None
 71 |             and self.types is None
 72 |             and self.entity_types is None
 73 |         )
 74 | 
 75 |     def has_boolean_operators(self) -> bool:
 76 |         """Check if the text query contains boolean operators (AND, OR, NOT)."""
 77 |         if not self.text:  # pragma: no cover
 78 |             return False
 79 | 
 80 |         # Check for common boolean operators with correct word boundaries
 81 |         # to avoid matching substrings like "GRAND" containing "AND"
 82 |         boolean_patterns = [" AND ", " OR ", " NOT ", "(", ")"]
 83 |         text = f" {self.text} "  # Add spaces to ensure we match word boundaries
 84 |         return any(pattern in text for pattern in boolean_patterns)
 85 | 
 86 | 
 87 | class SearchResult(BaseModel):
 88 |     """Search result with score and metadata."""
 89 | 
 90 |     title: str
 91 |     type: SearchItemType
 92 |     score: float
 93 |     entity: Optional[Permalink] = None
 94 |     permalink: Optional[str]
 95 |     content: Optional[str] = None
 96 |     file_path: str
 97 | 
 98 |     metadata: Optional[dict] = None
 99 | 
100 |     # IDs for v2 API consistency
101 |     entity_id: Optional[int] = None  # Entity ID (always present for entities)
102 |     observation_id: Optional[int] = None  # Observation ID (for observation results)
103 |     relation_id: Optional[int] = None  # Relation ID (for relation results)
104 | 
105 |     # Type-specific fields
106 |     category: Optional[str] = None  # For observations
107 |     from_entity: Optional[Permalink] = None  # For relations
108 |     to_entity: Optional[Permalink] = None  # For relations
109 |     relation_type: Optional[str] = None  # For relations
110 | 
111 | 
112 | class SearchResponse(BaseModel):
113 |     """Wrapper for search results."""
114 | 
115 |     results: List[SearchResult]
116 |     current_page: int
117 |     page_size: int
118 | 
```

--------------------------------------------------------------------------------
/tests/api/v2/test_directory_router.py:
--------------------------------------------------------------------------------

```python
  1 | """Tests for V2 directory API routes (ID-based endpoints)."""
  2 | 
  3 | import pytest
  4 | from httpx import AsyncClient
  5 | 
  6 | from basic_memory.models import Project
  7 | from basic_memory.schemas.directory import DirectoryNode
  8 | 
  9 | 
 10 | @pytest.mark.asyncio
 11 | async def test_get_directory_tree(
 12 |     client: AsyncClient,
 13 |     test_project: Project,
 14 |     v2_project_url: str,
 15 | ):
 16 |     """Test getting directory tree via v2 endpoint."""
 17 |     response = await client.get(f"{v2_project_url}/directory/tree")
 18 | 
 19 |     assert response.status_code == 200
 20 |     tree = DirectoryNode.model_validate(response.json())
 21 |     assert tree.type == "directory"
 22 | 
 23 | 
 24 | @pytest.mark.asyncio
 25 | async def test_get_directory_structure(
 26 |     client: AsyncClient,
 27 |     test_project: Project,
 28 |     v2_project_url: str,
 29 | ):
 30 |     """Test getting directory structure (folders only) via v2 endpoint."""
 31 |     response = await client.get(f"{v2_project_url}/directory/structure")
 32 | 
 33 |     assert response.status_code == 200
 34 |     structure = DirectoryNode.model_validate(response.json())
 35 |     assert structure.type == "directory"
 36 |     # Structure should only contain directories, not files
 37 |     if structure.children:
 38 |         for child in structure.children:
 39 |             assert child.type == "directory"
 40 | 
 41 | 
 42 | @pytest.mark.asyncio
 43 | async def test_list_directory_default(
 44 |     client: AsyncClient,
 45 |     test_project: Project,
 46 |     v2_project_url: str,
 47 | ):
 48 |     """Test listing directory contents with default parameters via v2 endpoint."""
 49 |     response = await client.get(f"{v2_project_url}/directory/list")
 50 | 
 51 |     assert response.status_code == 200
 52 |     nodes = response.json()
 53 |     assert isinstance(nodes, list)
 54 | 
 55 | 
 56 | @pytest.mark.asyncio
 57 | async def test_list_directory_with_depth(
 58 |     client: AsyncClient,
 59 |     test_project: Project,
 60 |     v2_project_url: str,
 61 | ):
 62 |     """Test listing directory with custom depth via v2 endpoint."""
 63 |     response = await client.get(f"{v2_project_url}/directory/list?depth=2")
 64 | 
 65 |     assert response.status_code == 200
 66 |     nodes = response.json()
 67 |     assert isinstance(nodes, list)
 68 | 
 69 | 
 70 | @pytest.mark.asyncio
 71 | async def test_list_directory_with_glob(
 72 |     client: AsyncClient,
 73 |     test_project: Project,
 74 |     v2_project_url: str,
 75 | ):
 76 |     """Test listing directory with file name glob filter via v2 endpoint."""
 77 |     response = await client.get(f"{v2_project_url}/directory/list?file_name_glob=*.md")
 78 | 
 79 |     assert response.status_code == 200
 80 |     nodes = response.json()
 81 |     assert isinstance(nodes, list)
 82 |     # All file nodes should have .md extension
 83 |     for node in nodes:
 84 |         if node.get("type") == "file":
 85 |             assert node.get("path", "").endswith(".md")
 86 | 
 87 | 
 88 | @pytest.mark.asyncio
 89 | async def test_list_directory_with_custom_path(
 90 |     client: AsyncClient,
 91 |     test_project: Project,
 92 |     v2_project_url: str,
 93 | ):
 94 |     """Test listing a specific directory path via v2 endpoint."""
 95 |     response = await client.get(f"{v2_project_url}/directory/list?dir_name=/")
 96 | 
 97 |     assert response.status_code == 200
 98 |     nodes = response.json()
 99 |     assert isinstance(nodes, list)
100 | 
101 | 
102 | @pytest.mark.asyncio
103 | async def test_directory_invalid_project_id(
104 |     client: AsyncClient,
105 | ):
106 |     """Test directory endpoints with invalid project ID return 404."""
107 |     # Test tree endpoint
108 |     response = await client.get("/v2/projects/999999/directory/tree")
109 |     assert response.status_code == 404
110 | 
111 |     # Test structure endpoint
112 |     response = await client.get("/v2/projects/999999/directory/structure")
113 |     assert response.status_code == 404
114 | 
115 |     # Test list endpoint
116 |     response = await client.get("/v2/projects/999999/directory/list")
117 |     assert response.status_code == 404
118 | 
119 | 
120 | @pytest.mark.asyncio
121 | async def test_v2_directory_endpoints_use_project_id_not_name(
122 |     client: AsyncClient, test_project: Project
123 | ):
124 |     """Verify v2 directory endpoints require project ID, not name."""
125 |     # Try using project name instead of ID - should fail
126 |     response = await client.get(f"/v2/projects/{test_project.name}/directory/tree")
127 | 
128 |     # Should get validation error or 404 because name is not a valid integer
129 |     assert response.status_code in [404, 422]
130 | 
```

--------------------------------------------------------------------------------
/test-int/BENCHMARKS.md:
--------------------------------------------------------------------------------

```markdown
  1 | # Performance Benchmarks
  2 | 
  3 | This directory contains performance benchmark tests for Basic Memory's sync/indexing operations.
  4 | 
  5 | ## Purpose
  6 | 
  7 | These benchmarks measure baseline performance to track improvements from optimizations. They are particularly important for:
  8 | - Cloud deployments with ephemeral databases that need fast re-indexing
  9 | - Large repositories (100s to 1000s of files)
 10 | - Validating optimization efforts
 11 | 
 12 | ## Running Benchmarks
 13 | 
 14 | ### Run all benchmarks (excluding slow ones)
 15 | ```bash
 16 | pytest test-int/test_sync_performance_benchmark.py -v -m "benchmark and not slow"
 17 | ```
 18 | 
 19 | ### Run specific benchmark
 20 | ```bash
 21 | # 100 files (fast, ~10-30 seconds)
 22 | pytest test-int/test_sync_performance_benchmark.py::test_benchmark_sync_100_files -v
 23 | 
 24 | # 500 files (medium, ~1-3 minutes)
 25 | pytest test-int/test_sync_performance_benchmark.py::test_benchmark_sync_500_files -v
 26 | 
 27 | # 1000 files (slow, ~3-10 minutes)
 28 | pytest test-int/test_sync_performance_benchmark.py::test_benchmark_sync_1000_files -v
 29 | 
 30 | # Re-sync with no changes (tests scan performance)
 31 | pytest test-int/test_sync_performance_benchmark.py::test_benchmark_resync_no_changes -v
 32 | ```
 33 | 
 34 | ### Run all benchmarks including slow ones
 35 | ```bash
 36 | pytest test-int/test_sync_performance_benchmark.py -v -m benchmark
 37 | ```
 38 | 
 39 | ### Skip benchmarks in regular test runs
 40 | ```bash
 41 | pytest -m "not benchmark"
 42 | ```
 43 | 
 44 | ## Benchmark Output
 45 | 
 46 | Each benchmark provides detailed metrics including:
 47 | 
 48 | - **Performance Metrics**:
 49 |   - Total sync time
 50 |   - Files processed per second
 51 |   - Milliseconds per file
 52 | 
 53 | - **Database Metrics**:
 54 |   - Initial database size
 55 |   - Final database size
 56 |   - Database growth (total and per file)
 57 | 
 58 | - **Operation Counts**:
 59 |   - New files indexed
 60 |   - Modified files processed
 61 |   - Deleted files handled
 62 |   - Moved files tracked
 63 | 
 64 | ## Example Output
 65 | 
 66 | ```
 67 | ======================================================================
 68 | BENCHMARK: Sync 100 files (small repository)
 69 | ======================================================================
 70 | 
 71 | Generating 100 test files...
 72 |   Created files 0-100 (100/100)
 73 |   File generation completed in 0.15s (666.7 files/sec)
 74 | 
 75 | Initial database size: 120.00 KB
 76 | 
 77 | Starting sync of 100 files...
 78 | 
 79 | ----------------------------------------------------------------------
 80 | RESULTS:
 81 | ----------------------------------------------------------------------
 82 | Files processed:      100
 83 |   New:                100
 84 |   Modified:           0
 85 |   Deleted:            0
 86 |   Moved:              0
 87 | 
 88 | Performance:
 89 |   Total time:         12.34s
 90 |   Files/sec:          8.1
 91 |   ms/file:            123.4
 92 | 
 93 | Database:
 94 |   Initial size:       120.00 KB
 95 |   Final size:         5.23 MB
 96 |   Growth:             5.11 MB
 97 |   Growth per file:    52.31 KB
 98 | ======================================================================
 99 | ```
100 | 
101 | ## Interpreting Results
102 | 
103 | ### Good Performance Indicators
104 | - **Files/sec > 10**: Good indexing speed for small-medium repos
105 | - **Files/sec > 5**: Acceptable for large repos with complex relations
106 | - **DB growth < 100KB per file**: Reasonable index size
107 | 
108 | ### Areas for Improvement
109 | - **Files/sec < 5**: May benefit from batch operations
110 | - **ms/file > 200**: High latency per file, check for N+1 queries
111 | - **DB growth > 200KB per file**: Search index may be bloated (trigrams?)
112 | 
113 | ## Tracking Improvements
114 | 
115 | Before making optimizations:
116 | 1. Run benchmarks to establish baseline
117 | 2. Save output for comparison
118 | 3. Note any particular pain points (e.g., slow search indexing)
119 | 
120 | After optimizations:
121 | 1. Run the same benchmarks
122 | 2. Compare metrics:
123 |    - Files/sec should increase
124 |    - ms/file should decrease
125 |    - DB growth per file may decrease (with search optimizations)
126 | 3. Document improvements in PR
127 | 
128 | ## Related Issues
129 | 
130 | - [#351: Performance: Optimize sync/indexing for cloud deployments](https://github.com/basicmachines-co/basic-memory/issues/351)
131 | 
132 | ## Test File Generation
133 | 
134 | Benchmarks generate realistic markdown files with:
135 | - YAML frontmatter with tags
136 | - 3-10 observations per file with categories
137 | - 1-3 relations per file (including forward references)
138 | - Varying content to simulate real usage
139 | - Files organized in category subdirectories
140 | 
```

--------------------------------------------------------------------------------
/src/basic_memory/alembic/versions/cc7172b46608_update_search_index_schema.py:
--------------------------------------------------------------------------------

```python
  1 | """Update search index schema
  2 | 
  3 | Revision ID: cc7172b46608
  4 | Revises: 502b60eaa905
  5 | Create Date: 2025-02-28 18:48:23.244941
  6 | 
  7 | """
  8 | 
  9 | from typing import Sequence, Union
 10 | 
 11 | from alembic import op
 12 | 
 13 | 
 14 | # revision identifiers, used by Alembic.
 15 | revision: str = "cc7172b46608"
 16 | down_revision: Union[str, None] = "502b60eaa905"
 17 | branch_labels: Union[str, Sequence[str], None] = None
 18 | depends_on: Union[str, Sequence[str], None] = None
 19 | 
 20 | 
 21 | def upgrade() -> None:
 22 |     """Upgrade database schema to use new search index with content_stems and content_snippet."""
 23 | 
 24 |     # This migration is SQLite-specific (FTS5 virtual tables)
 25 |     # For Postgres, the search_index table is created via ORM models
 26 |     connection = op.get_bind()
 27 |     if connection.dialect.name != "sqlite":
 28 |         return
 29 | 
 30 |     # First, drop the existing search_index table
 31 |     op.execute("DROP TABLE IF EXISTS search_index")
 32 | 
 33 |     # Create new search_index with updated schema
 34 |     op.execute("""
 35 |     CREATE VIRTUAL TABLE IF NOT EXISTS search_index USING fts5(
 36 |         -- Core entity fields
 37 |         id UNINDEXED,          -- Row ID
 38 |         title,                 -- Title for searching
 39 |         content_stems,         -- Main searchable content split into stems
 40 |         content_snippet,       -- File content snippet for display
 41 |         permalink,             -- Stable identifier (now indexed for path search)
 42 |         file_path UNINDEXED,   -- Physical location
 43 |         type UNINDEXED,        -- entity/relation/observation
 44 |         
 45 |         -- Relation fields 
 46 |         from_id UNINDEXED,     -- Source entity
 47 |         to_id UNINDEXED,       -- Target entity
 48 |         relation_type UNINDEXED, -- Type of relation
 49 |         
 50 |         -- Observation fields
 51 |         entity_id UNINDEXED,   -- Parent entity
 52 |         category UNINDEXED,    -- Observation category
 53 |         
 54 |         -- Common fields
 55 |         metadata UNINDEXED,    -- JSON metadata
 56 |         created_at UNINDEXED,  -- Creation timestamp
 57 |         updated_at UNINDEXED,  -- Last update
 58 |         
 59 |         -- Configuration
 60 |         tokenize='unicode61 tokenchars 0x2F',  -- Hex code for /
 61 |         prefix='1,2,3,4'                    -- Support longer prefixes for paths
 62 |     );
 63 |     """)
 64 | 
 65 | 
 66 | def downgrade() -> None:
 67 |     """Downgrade database schema to use old search index."""
 68 | 
 69 |     # This migration is SQLite-specific (FTS5 virtual tables)
 70 |     # For Postgres, the search_index table is managed via ORM models
 71 |     connection = op.get_bind()
 72 |     if connection.dialect.name != "sqlite":
 73 |         return
 74 | 
 75 |     # Drop the updated search_index table
 76 |     op.execute("DROP TABLE IF EXISTS search_index")
 77 | 
 78 |     # Recreate the original search_index schema
 79 |     op.execute("""
 80 |     CREATE VIRTUAL TABLE IF NOT EXISTS search_index USING fts5(
 81 |         -- Core entity fields
 82 |         id UNINDEXED,          -- Row ID
 83 |         title,                 -- Title for searching
 84 |         content,               -- Main searchable content
 85 |         permalink,             -- Stable identifier (now indexed for path search)
 86 |         file_path UNINDEXED,   -- Physical location
 87 |         type UNINDEXED,        -- entity/relation/observation
 88 |         
 89 |         -- Relation fields 
 90 |         from_id UNINDEXED,     -- Source entity
 91 |         to_id UNINDEXED,       -- Target entity
 92 |         relation_type UNINDEXED, -- Type of relation
 93 |         
 94 |         -- Observation fields
 95 |         entity_id UNINDEXED,   -- Parent entity
 96 |         category UNINDEXED,    -- Observation category
 97 |         
 98 |         -- Common fields
 99 |         metadata UNINDEXED,    -- JSON metadata
100 |         created_at UNINDEXED,  -- Creation timestamp
101 |         updated_at UNINDEXED,  -- Last update
102 |         
103 |         -- Configuration
104 |         tokenize='unicode61 tokenchars 0x2F',  -- Hex code for /
105 |         prefix='1,2,3,4'                    -- Support longer prefixes for paths
106 |     );
107 |     """)
108 | 
109 |     # Print instruction to manually reindex after migration
110 |     print("\n------------------------------------------------------------------")
111 |     print("IMPORTANT: After downgrade completes, manually run the reindex command:")
112 |     print("basic-memory sync")
113 |     print("------------------------------------------------------------------\n")
114 | 
```

--------------------------------------------------------------------------------
/tests/utils/test_permalink_formatting.py:
--------------------------------------------------------------------------------

```python
  1 | """Test permalink formatting during sync."""
  2 | 
  3 | from pathlib import Path
  4 | 
  5 | import pytest
  6 | 
  7 | from basic_memory.config import ProjectConfig
  8 | from basic_memory.services import EntityService
  9 | from basic_memory.sync.sync_service import SyncService
 10 | from basic_memory.utils import generate_permalink
 11 | 
 12 | 
 13 | async def create_test_file(path: Path, content: str = "test content") -> None:
 14 |     """Create a test file with given content."""
 15 |     path.parent.mkdir(parents=True, exist_ok=True)
 16 |     path.write_text(content)
 17 | 
 18 | 
 19 | @pytest.mark.asyncio
 20 | async def test_permalink_formatting(
 21 |     sync_service: SyncService, project_config: ProjectConfig, entity_service: EntityService
 22 | ):
 23 |     """Test that permalinks are properly formatted during sync.
 24 | 
 25 |     This ensures:
 26 |     - Underscores are converted to hyphens
 27 |     - Spaces are converted to hyphens
 28 |     - Mixed case is lowercased
 29 |     - Directory structure is preserved
 30 |     - Multiple directories work correctly
 31 |     """
 32 |     project_dir = project_config.home
 33 | 
 34 |     # Test cases with different filename formats
 35 |     test_cases = [
 36 |         # filename -> expected permalink
 37 |         ("my_awesome_feature.md", "my-awesome-feature"),
 38 |         ("MIXED_CASE_NAME.md", "mixed-case-name"),
 39 |         ("spaces and_underscores.md", "spaces-and-underscores"),
 40 |         ("design/model_refactor.md", "design/model-refactor"),
 41 |         (
 42 |             "test/multiple_word_directory/feature_name.md",
 43 |             "test/multiple-word-directory/feature-name",
 44 |         ),
 45 |     ]
 46 | 
 47 |     # Create test files
 48 |     for filename, _ in test_cases:
 49 |         content = """
 50 | ---
 51 | type: knowledge
 52 | created: 2024-01-01
 53 | modified: 2024-01-01
 54 | ---
 55 | # Test File
 56 | 
 57 | Testing permalink generation.
 58 | """
 59 |         await create_test_file(project_dir / filename, content)
 60 | 
 61 |     # Run sync
 62 |     await sync_service.sync(project_config.home)
 63 | 
 64 |     # Verify permalinks
 65 |     for filename, expected_permalink in test_cases:
 66 |         entity = await entity_service.repository.get_by_file_path(filename)
 67 |         assert entity.permalink == expected_permalink, (
 68 |             f"File {filename} should have permalink {expected_permalink}"
 69 |         )
 70 | 
 71 | 
 72 | @pytest.mark.parametrize(
 73 |     "input_path, expected",
 74 |     [
 75 |         ("test/Über File.md", "test/uber-file"),
 76 |         ("docs/résumé.md", "docs/resume"),
 77 |         ("notes/Déjà vu.md", "notes/deja-vu"),
 78 |         ("papers/Jürgen's Findings.md", "papers/jurgens-findings"),
 79 |         ("archive/François Müller.md", "archive/francois-muller"),
 80 |         ("research/Søren Kierkegård.md", "research/soren-kierkegard"),
 81 |         ("articles/El Niño.md", "articles/el-nino"),
 82 |         ("ArticlesElNiño.md", "articles-el-nino"),
 83 |         ("articleselniño.md", "articleselnino"),
 84 |         ("articles-El-Niño.md", "articles-el-nino"),
 85 |     ],
 86 | )
 87 | def test_latin_accents_transliteration(input_path, expected):
 88 |     """Test that Latin letters with accents are properly transliterated."""
 89 |     assert generate_permalink(input_path) == expected
 90 | 
 91 | 
 92 | @pytest.mark.parametrize(
 93 |     "input_path, expected",
 94 |     [
 95 |         ("中文/测试文档.md", "中文/测试文档"),
 96 |         ("notes/北京市.md", "notes/北京市"),
 97 |         ("research/上海简介.md", "research/上海简介"),
 98 |         ("docs/中文 English Mixed.md", "docs/中文-english-mixed"),
 99 |         ("articles/东京Tokyo混合.md", "articles/东京-tokyo-混合"),
100 |         ("papers/汉字_underscore_test.md", "papers/汉字-underscore-test"),
101 |         ("projects/中文CamelCase测试.md", "projects/中文-camel-case-测试"),
102 |     ],
103 | )
104 | def test_chinese_character_preservation(input_path, expected):
105 |     """Test that Chinese characters are preserved in permalinks."""
106 |     assert generate_permalink(input_path) == expected
107 | 
108 | 
109 | @pytest.mark.parametrize(
110 |     "input_path, expected",
111 |     [
112 |         ("mixed/北京Café.md", "mixed/北京-cafe"),
113 |         ("notes/东京Tōkyō.md", "notes/东京-tokyo"),
114 |         ("research/München中文.md", "research/munchen-中文"),
115 |         ("docs/Über测试.md", "docs/uber-测试"),
116 |         ("complex/北京Beijing上海Shanghai.md", "complex/北京-beijing-上海-shanghai"),
117 |         ("special/中文!@#$%^&*()_+.md", "special/中文"),
118 |         ("punctuation/你好,世界!.md", "punctuation/你好世界"),
119 |     ],
120 | )
121 | def test_mixed_character_sets(input_path, expected):
122 |     """Test handling of mixed character sets and edge cases."""
123 |     assert generate_permalink(input_path) == expected
124 | 
```

--------------------------------------------------------------------------------
/tests/importers/test_importer_base.py:
--------------------------------------------------------------------------------

```python
  1 | """Tests for the base importer class."""
  2 | 
  3 | import pytest
  4 | 
  5 | from basic_memory.importers.base import Importer
  6 | from basic_memory.markdown.entity_parser import EntityParser
  7 | from basic_memory.markdown.markdown_processor import MarkdownProcessor
  8 | from basic_memory.markdown.schemas import EntityFrontmatter, EntityMarkdown
  9 | from basic_memory.schemas.importer import ImportResult
 10 | from basic_memory.services.file_service import FileService
 11 | 
 12 | 
 13 | # Create a concrete implementation of the abstract class for testing
 14 | class ConcreteTestImporter(Importer[ImportResult]):
 15 |     """Test implementation of Importer base class."""
 16 | 
 17 |     async def import_data(self, source_data, destination_folder: str, **kwargs):
 18 |         """Implement the abstract method for testing."""
 19 |         try:
 20 |             # Test implementation that returns success
 21 |             await self.ensure_folder_exists(destination_folder)
 22 |             return ImportResult(
 23 |                 import_count={"files": 1},
 24 |                 success=True,
 25 |                 error_message=None,
 26 |             )
 27 |         except Exception as e:
 28 |             return self.handle_error("Test import failed", e)
 29 | 
 30 |     def handle_error(self, message: str, error=None) -> ImportResult:
 31 |         """Implement the abstract handle_error method."""
 32 |         import logging
 33 | 
 34 |         logger = logging.getLogger(__name__)
 35 | 
 36 |         error_message = f"{message}"
 37 |         if error:
 38 |             error_message += f": {str(error)}"
 39 | 
 40 |         logger.error(error_message)
 41 |         return ImportResult(
 42 |             import_count={},
 43 |             success=False,
 44 |             error_message=error_message,
 45 |         )
 46 | 
 47 | 
 48 | @pytest.fixture
 49 | def test_importer(tmp_path):
 50 |     """Create a ConcreteTestImporter instance for testing."""
 51 |     entity_parser = EntityParser(base_path=tmp_path)
 52 |     markdown_processor = MarkdownProcessor(entity_parser=entity_parser)
 53 |     file_service = FileService(base_path=tmp_path, markdown_processor=markdown_processor)
 54 |     return ConcreteTestImporter(tmp_path, markdown_processor, file_service)
 55 | 
 56 | 
 57 | @pytest.mark.asyncio
 58 | async def test_import_data_success(test_importer):
 59 |     """Test successful import_data implementation."""
 60 |     result = await test_importer.import_data({}, "test_folder")
 61 |     assert result.success
 62 |     assert result.import_count == {"files": 1}
 63 |     assert result.error_message is None
 64 | 
 65 |     assert (test_importer.base_path / "test_folder").exists()
 66 | 
 67 | 
 68 | @pytest.mark.asyncio
 69 | async def test_write_entity(test_importer, tmp_path):
 70 |     """Test write_entity method."""
 71 |     # Create test entity
 72 |     entity = EntityMarkdown(
 73 |         frontmatter=EntityFrontmatter(metadata={"title": "Test Entity", "type": "note"}),
 74 |         content="Test content",
 75 |         observations=[],
 76 |         relations=[],
 77 |     )
 78 | 
 79 |     # Call write_entity
 80 |     file_path = tmp_path / "test_entity.md"
 81 |     checksum = await test_importer.write_entity(entity, file_path)
 82 | 
 83 |     assert file_path.exists()
 84 |     assert len(checksum) == 64  # sha256 hex digest
 85 |     assert file_path.read_text(encoding="utf-8").strip() != ""
 86 | 
 87 | 
 88 | @pytest.mark.asyncio
 89 | async def test_ensure_folder_exists(test_importer):
 90 |     """Test ensure_folder_exists method."""
 91 |     # Test with simple folder - now passes relative path to FileService
 92 |     await test_importer.ensure_folder_exists("test_folder")
 93 |     assert (test_importer.base_path / "test_folder").exists()
 94 | 
 95 |     # Test with nested folder - FileService handles base_path resolution
 96 |     await test_importer.ensure_folder_exists("nested/folder/path")
 97 |     assert (test_importer.base_path / "nested/folder/path").exists()
 98 | 
 99 | 
100 | @pytest.mark.asyncio
101 | async def test_handle_error(test_importer):
102 |     """Test handle_error method."""
103 |     # Test with message only
104 |     result = test_importer.handle_error("Test error message")
105 |     assert not result.success
106 |     assert result.error_message == "Test error message"
107 |     assert result.import_count == {}
108 | 
109 |     # Test with message and exception
110 |     test_exception = ValueError("Test exception")
111 |     result = test_importer.handle_error("Error occurred", test_exception)
112 |     assert not result.success
113 |     assert "Error occurred" in result.error_message
114 |     assert "Test exception" in result.error_message
115 |     assert result.import_count == {}
116 | 
```

--------------------------------------------------------------------------------
/tests/services/test_initialization.py:
--------------------------------------------------------------------------------

```python
  1 | """Integration-style tests for the initialization service.
  2 | 
  3 | Goal: avoid brittle deep mocking; assert real behavior using the existing
  4 | test config + dual-backend fixtures.
  5 | """
  6 | 
  7 | from __future__ import annotations
  8 | 
  9 | import pytest
 10 | 
 11 | from basic_memory import db
 12 | from basic_memory.config import BasicMemoryConfig, DatabaseBackend
 13 | from basic_memory.repository.project_repository import ProjectRepository
 14 | from basic_memory.services.initialization import (
 15 |     ensure_initialization,
 16 |     initialize_database,
 17 |     reconcile_projects_with_config,
 18 | )
 19 | 
 20 | 
 21 | @pytest.mark.asyncio
 22 | async def test_initialize_database_creates_engine_and_allows_queries(app_config: BasicMemoryConfig):
 23 |     await db.shutdown_db()
 24 |     try:
 25 |         await initialize_database(app_config)
 26 | 
 27 |         engine, session_maker = await db.get_or_create_db(app_config.database_path)
 28 |         assert engine is not None
 29 |         assert session_maker is not None
 30 | 
 31 |         # Smoke query on the initialized DB
 32 |         async with db.scoped_session(session_maker) as session:
 33 |             result = await session.execute(db.text("SELECT 1"))
 34 |             assert result.scalar() == 1
 35 |     finally:
 36 |         await db.shutdown_db()
 37 | 
 38 | 
 39 | @pytest.mark.asyncio
 40 | async def test_initialize_database_raises_on_invalid_postgres_config(
 41 |     app_config: BasicMemoryConfig, config_manager
 42 | ):
 43 |     """If config selects Postgres but has no DATABASE_URL, initialization should fail."""
 44 |     await db.shutdown_db()
 45 |     try:
 46 |         bad_config = app_config.model_copy(
 47 |             update={"database_backend": DatabaseBackend.POSTGRES, "database_url": None}
 48 |         )
 49 |         config_manager.save_config(bad_config)
 50 | 
 51 |         with pytest.raises(ValueError):
 52 |             await initialize_database(bad_config)
 53 |     finally:
 54 |         await db.shutdown_db()
 55 | 
 56 | 
 57 | @pytest.mark.asyncio
 58 | async def test_reconcile_projects_with_config_creates_projects_and_default(
 59 |     app_config: BasicMemoryConfig, config_manager, config_home
 60 | ):
 61 |     await db.shutdown_db()
 62 |     try:
 63 |         # Ensure the configured paths exist
 64 |         proj_a = config_home / "proj-a"
 65 |         proj_b = config_home / "proj-b"
 66 |         proj_a.mkdir(parents=True, exist_ok=True)
 67 |         proj_b.mkdir(parents=True, exist_ok=True)
 68 | 
 69 |         updated = app_config.model_copy(
 70 |             update={
 71 |                 "projects": {"proj-a": str(proj_a), "proj-b": str(proj_b)},
 72 |                 "default_project": "proj-b",
 73 |             }
 74 |         )
 75 |         config_manager.save_config(updated)
 76 | 
 77 |         # Real DB init + reconcile
 78 |         await initialize_database(updated)
 79 |         await reconcile_projects_with_config(updated)
 80 | 
 81 |         _, session_maker = await db.get_or_create_db(
 82 |             updated.database_path, db_type=db.DatabaseType.FILESYSTEM
 83 |         )
 84 |         repo = ProjectRepository(session_maker)
 85 | 
 86 |         active = await repo.get_active_projects()
 87 |         names = {p.name for p in active}
 88 |         assert names.issuperset({"proj-a", "proj-b"})
 89 | 
 90 |         default = await repo.get_default_project()
 91 |         assert default is not None
 92 |         assert default.name == "proj-b"
 93 |     finally:
 94 |         await db.shutdown_db()
 95 | 
 96 | 
 97 | @pytest.mark.asyncio
 98 | async def test_reconcile_projects_with_config_swallow_errors(
 99 |     monkeypatch, app_config: BasicMemoryConfig
100 | ):
101 |     """reconcile_projects_with_config should not raise if ProjectService sync fails."""
102 |     await db.shutdown_db()
103 |     try:
104 |         await initialize_database(app_config)
105 | 
106 |         async def boom(self):  # noqa: ANN001
107 |             raise ValueError("Project synchronization error")
108 | 
109 |         monkeypatch.setattr(
110 |             "basic_memory.services.project_service.ProjectService.synchronize_projects",
111 |             boom,
112 |         )
113 | 
114 |         # Should not raise
115 |         await reconcile_projects_with_config(app_config)
116 |     finally:
117 |         await db.shutdown_db()
118 | 
119 | 
120 | def test_ensure_initialization_runs_and_cleans_up(app_config: BasicMemoryConfig, config_manager):
121 |     # ensure_initialization uses asyncio.run; keep this test synchronous.
122 |     ensure_initialization(app_config)
123 | 
124 |     # Must be cleaned up to avoid hanging processes.
125 |     assert db._engine is None  # pyright: ignore [reportPrivateUsage]
126 |     assert db._session_maker is None  # pyright: ignore [reportPrivateUsage]
127 | 
```

--------------------------------------------------------------------------------
/src/basic_memory/api/container.py:
--------------------------------------------------------------------------------

```python
  1 | """API composition root for Basic Memory.
  2 | 
  3 | This container owns reading ConfigManager and environment variables for the
  4 | API entrypoint. Downstream modules receive config/dependencies explicitly
  5 | rather than reading globals.
  6 | 
  7 | Design principles:
  8 | - Only this module reads ConfigManager directly
  9 | - Runtime mode (cloud/local/test) is resolved here
 10 | - Factories for services are provided, not singletons
 11 | """
 12 | 
 13 | from dataclasses import dataclass
 14 | from typing import TYPE_CHECKING
 15 | 
 16 | from sqlalchemy.ext.asyncio import AsyncEngine, async_sessionmaker, AsyncSession
 17 | 
 18 | from basic_memory import db
 19 | from basic_memory.config import BasicMemoryConfig, ConfigManager
 20 | from basic_memory.runtime import RuntimeMode, resolve_runtime_mode
 21 | 
 22 | if TYPE_CHECKING:  # pragma: no cover
 23 |     from basic_memory.sync import SyncCoordinator
 24 | 
 25 | 
 26 | @dataclass
 27 | class ApiContainer:
 28 |     """Composition root for the API entrypoint.
 29 | 
 30 |     Holds resolved configuration and runtime context.
 31 |     Created once at app startup, then used to wire dependencies.
 32 |     """
 33 | 
 34 |     config: BasicMemoryConfig
 35 |     mode: RuntimeMode
 36 | 
 37 |     # --- Database ---
 38 |     # Cached database connections (set during lifespan startup)
 39 |     engine: AsyncEngine | None = None
 40 |     session_maker: async_sessionmaker[AsyncSession] | None = None
 41 | 
 42 |     @classmethod
 43 |     def create(cls) -> "ApiContainer":  # pragma: no cover
 44 |         """Create container by reading ConfigManager.
 45 | 
 46 |         This is the single point where API reads global config.
 47 |         """
 48 |         config = ConfigManager().config
 49 |         mode = resolve_runtime_mode(
 50 |             cloud_mode_enabled=config.cloud_mode_enabled,
 51 |             is_test_env=config.is_test_env,
 52 |         )
 53 |         return cls(config=config, mode=mode)
 54 | 
 55 |     # --- Runtime Mode Properties ---
 56 | 
 57 |     @property
 58 |     def should_sync_files(self) -> bool:
 59 |         """Whether file sync should be started.
 60 | 
 61 |         Sync is enabled when:
 62 |         - sync_changes is True in config
 63 |         - Not in test mode (tests manage their own sync)
 64 |         """
 65 |         return self.config.sync_changes and not self.mode.is_test
 66 | 
 67 |     @property
 68 |     def sync_skip_reason(self) -> str | None:  # pragma: no cover
 69 |         """Reason why sync is skipped, or None if sync should run.
 70 | 
 71 |         Useful for logging why sync was disabled.
 72 |         """
 73 |         if self.mode.is_test:
 74 |             return "Test environment detected"
 75 |         if not self.config.sync_changes:
 76 |             return "Sync changes disabled"
 77 |         return None
 78 | 
 79 |     def create_sync_coordinator(self) -> "SyncCoordinator":  # pragma: no cover
 80 |         """Create a SyncCoordinator with this container's settings.
 81 | 
 82 |         Returns:
 83 |             SyncCoordinator configured for this runtime environment
 84 |         """
 85 |         # Deferred import to avoid circular dependency
 86 |         from basic_memory.sync import SyncCoordinator
 87 | 
 88 |         return SyncCoordinator(
 89 |             config=self.config,
 90 |             should_sync=self.should_sync_files,
 91 |             skip_reason=self.sync_skip_reason,
 92 |         )
 93 | 
 94 |     # --- Database Factory ---
 95 | 
 96 |     async def init_database(  # pragma: no cover
 97 |         self,
 98 |     ) -> tuple[AsyncEngine, async_sessionmaker[AsyncSession]]:
 99 |         """Initialize and cache database connections.
100 | 
101 |         Returns:
102 |             Tuple of (engine, session_maker)
103 |         """
104 |         engine, session_maker = await db.get_or_create_db(self.config.database_path)
105 |         self.engine = engine
106 |         self.session_maker = session_maker
107 |         return engine, session_maker
108 | 
109 |     async def shutdown_database(self) -> None:  # pragma: no cover
110 |         """Clean up database connections."""
111 |         await db.shutdown_db()
112 | 
113 | 
114 | # Module-level container instance (set by lifespan)
115 | # This allows deps.py to access the container without reading ConfigManager
116 | _container: ApiContainer | None = None
117 | 
118 | 
119 | def get_container() -> ApiContainer:
120 |     """Get the current API container.
121 | 
122 |     Raises:
123 |         RuntimeError: If container hasn't been initialized
124 |     """
125 |     if _container is None:
126 |         raise RuntimeError("API container not initialized. Call set_container() first.")
127 |     return _container
128 | 
129 | 
130 | def set_container(container: ApiContainer) -> None:
131 |     """Set the API container (called by lifespan)."""
132 |     global _container
133 |     _container = container
134 | 
```

--------------------------------------------------------------------------------
/src/basic_memory/cli/commands/cloud/upload_command.py:
--------------------------------------------------------------------------------

```python
  1 | """Upload CLI commands for basic-memory projects."""
  2 | 
  3 | from pathlib import Path
  4 | 
  5 | import typer
  6 | from rich.console import Console
  7 | 
  8 | from basic_memory.cli.app import cloud_app
  9 | from basic_memory.cli.commands.command_utils import run_with_cleanup
 10 | from basic_memory.cli.commands.cloud.cloud_utils import (
 11 |     create_cloud_project,
 12 |     project_exists,
 13 |     sync_project,
 14 | )
 15 | from basic_memory.cli.commands.cloud.upload import upload_path
 16 | 
 17 | console = Console()
 18 | 
 19 | 
 20 | @cloud_app.command("upload")
 21 | def upload(
 22 |     path: Path = typer.Argument(
 23 |         ...,
 24 |         help="Path to local file or directory to upload",
 25 |         exists=True,
 26 |         readable=True,
 27 |         resolve_path=True,
 28 |     ),
 29 |     project: str = typer.Option(
 30 |         ...,
 31 |         "--project",
 32 |         "-p",
 33 |         help="Cloud project name (destination)",
 34 |     ),
 35 |     create_project: bool = typer.Option(
 36 |         False,
 37 |         "--create-project",
 38 |         "-c",
 39 |         help="Create project if it doesn't exist",
 40 |     ),
 41 |     sync: bool = typer.Option(
 42 |         True,
 43 |         "--sync/--no-sync",
 44 |         help="Sync project after upload (default: true)",
 45 |     ),
 46 |     verbose: bool = typer.Option(
 47 |         False,
 48 |         "--verbose",
 49 |         "-v",
 50 |         help="Show detailed information about file filtering and upload",
 51 |     ),
 52 |     no_gitignore: bool = typer.Option(
 53 |         False,
 54 |         "--no-gitignore",
 55 |         help="Skip .gitignore patterns (still respects .bmignore)",
 56 |     ),
 57 |     dry_run: bool = typer.Option(
 58 |         False,
 59 |         "--dry-run",
 60 |         help="Show what would be uploaded without actually uploading",
 61 |     ),
 62 | ) -> None:
 63 |     """Upload local files or directories to cloud project via WebDAV.
 64 | 
 65 |     Examples:
 66 |       bm cloud upload ~/my-notes --project research
 67 |       bm cloud upload notes.md --project research --create-project
 68 |       bm cloud upload ~/docs --project work --no-sync
 69 |       bm cloud upload ./history --project proto --verbose
 70 |       bm cloud upload ./notes --project work --no-gitignore
 71 |       bm cloud upload ./files --project test --dry-run
 72 |     """
 73 | 
 74 |     async def _upload():
 75 |         # Check if project exists
 76 |         if not await project_exists(project):
 77 |             if create_project:
 78 |                 console.print(f"[blue]Creating cloud project '{project}'...[/blue]")
 79 |                 try:
 80 |                     await create_cloud_project(project)
 81 |                     console.print(f"[green]Created project '{project}'[/green]")
 82 |                 except Exception as e:
 83 |                     console.print(f"[red]Failed to create project: {e}[/red]")
 84 |                     raise typer.Exit(1)
 85 |             else:
 86 |                 console.print(
 87 |                     f"[red]Project '{project}' does not exist.[/red]\n"
 88 |                     f"[yellow]Options:[/yellow]\n"
 89 |                     f"  1. Create it first: bm project add {project}\n"
 90 |                     f"  2. Use --create-project flag to create automatically"
 91 |                 )
 92 |                 raise typer.Exit(1)
 93 | 
 94 |         # Perform upload (or dry run)
 95 |         if dry_run:
 96 |             console.print(
 97 |                 f"[yellow]DRY RUN: Showing what would be uploaded to '{project}'[/yellow]"
 98 |             )
 99 |         else:
100 |             console.print(f"[blue]Uploading {path} to project '{project}'...[/blue]")
101 | 
102 |         success = await upload_path(
103 |             path, project, verbose=verbose, use_gitignore=not no_gitignore, dry_run=dry_run
104 |         )
105 |         if not success:
106 |             console.print("[red]Upload failed[/red]")
107 |             raise typer.Exit(1)
108 | 
109 |         if dry_run:
110 |             console.print("[yellow]DRY RUN complete - no files were uploaded[/yellow]")
111 |         else:
112 |             console.print(f"[green]Successfully uploaded to '{project}'[/green]")
113 | 
114 |         # Sync project if requested (skip on dry run)
115 |         # Force full scan after bisync to ensure database is up-to-date with synced files
116 |         if sync and not dry_run:
117 |             console.print(f"[blue]Syncing project '{project}'...[/blue]")
118 |             try:
119 |                 await sync_project(project, force_full=True)
120 |             except Exception as e:
121 |                 console.print(f"[yellow]Warning: Sync failed: {e}[/yellow]")
122 |                 console.print("[dim]Files uploaded but may not be indexed yet[/dim]")
123 | 
124 |     run_with_cleanup(_upload())
125 | 
```

--------------------------------------------------------------------------------
/tests/markdown/test_observation_edge_cases.py:
--------------------------------------------------------------------------------

```python
  1 | """Tests for edge cases in observation parsing."""
  2 | 
  3 | from markdown_it import MarkdownIt
  4 | 
  5 | from basic_memory.markdown.plugins import observation_plugin, parse_observation
  6 | from basic_memory.markdown.schemas import Observation
  7 | 
  8 | 
  9 | def test_empty_input():
 10 |     """Test handling of empty input."""
 11 |     md = MarkdownIt().use(observation_plugin)
 12 | 
 13 |     tokens = md.parse("")
 14 |     assert not any(t.meta and "observation" in t.meta for t in tokens)
 15 | 
 16 |     tokens = md.parse("   ")
 17 |     assert not any(t.meta and "observation" in t.meta for t in tokens)
 18 | 
 19 |     tokens = md.parse("\n")
 20 |     assert not any(t.meta and "observation" in t.meta for t in tokens)
 21 | 
 22 | 
 23 | def test_invalid_context():
 24 |     """Test handling of invalid context format."""
 25 |     md = MarkdownIt().use(observation_plugin)
 26 | 
 27 |     # Unclosed context
 28 |     tokens = md.parse("- [test] Content (unclosed")
 29 |     token = next(t for t in tokens if t.type == "inline")
 30 |     obs = parse_observation(token)
 31 |     assert obs["content"] == "Content (unclosed"
 32 |     assert obs["context"] is None
 33 | 
 34 |     # Multiple parens
 35 |     tokens = md.parse("- [test] Content (with) extra) parens)")
 36 |     token = next(t for t in tokens if t.type == "inline")
 37 |     obs = parse_observation(token)
 38 |     assert obs["content"] == "Content"
 39 |     assert obs["context"] == "with) extra) parens"
 40 | 
 41 | 
 42 | def test_complex_format():
 43 |     """Test parsing complex observation formats."""
 44 |     md = MarkdownIt().use(observation_plugin)
 45 | 
 46 |     # Multiple hashtags together
 47 |     tokens = md.parse("- [complex test] This is #tag1#tag2 with #tag3 content")
 48 |     token = next(t for t in tokens if t.type == "inline")
 49 | 
 50 |     obs = parse_observation(token)
 51 |     assert obs["category"] == "complex test"
 52 |     assert set(obs["tags"]) == {"tag1", "tag2", "tag3"}
 53 |     assert obs["content"] == "This is #tag1#tag2 with #tag3 content"
 54 | 
 55 |     # Pydantic model validation
 56 |     observation = Observation.model_validate(obs)
 57 |     assert observation.category == "complex test"
 58 |     assert set(observation.tags) == {"tag1", "tag2", "tag3"}
 59 |     assert observation.content == "This is #tag1#tag2 with #tag3 content"
 60 | 
 61 | 
 62 | def test_malformed_category():
 63 |     """Test handling of malformed category brackets."""
 64 |     md = MarkdownIt().use(observation_plugin)
 65 | 
 66 |     # Empty category
 67 |     tokens = md.parse("- [] Empty category")
 68 |     token = next(t for t in tokens if t.type == "inline")
 69 |     observation = Observation.model_validate(parse_observation(token))
 70 |     assert observation.category is None
 71 |     assert observation.content == "Empty category"
 72 | 
 73 |     # Missing close bracket
 74 |     tokens = md.parse("- [test Content")
 75 |     token = next(t for t in tokens if t.type == "inline")
 76 |     observation = Observation.model_validate(parse_observation(token))
 77 |     # Should treat whole thing as content
 78 |     assert observation.category is None
 79 |     assert "test Content" in observation.content
 80 | 
 81 | 
 82 | def test_no_category():
 83 |     """Test handling of malformed category brackets."""
 84 |     md = MarkdownIt().use(observation_plugin)
 85 | 
 86 |     # Empty category
 87 |     tokens = md.parse("- No category")
 88 |     token = next(t for t in tokens if t.type == "inline")
 89 |     observation = Observation.model_validate(parse_observation(token))
 90 |     assert observation.category is None
 91 |     assert observation.content == "No category"
 92 | 
 93 | 
 94 | def test_unicode_content():
 95 |     """Test handling of Unicode content."""
 96 |     md = MarkdownIt().use(observation_plugin)
 97 | 
 98 |     # Emoji
 99 |     tokens = md.parse("- [test] Emoji test 👍 #emoji #test (Testing emoji)")
100 |     token = next(t for t in tokens if t.type == "inline")
101 |     obs = parse_observation(token)
102 |     assert "👍" in obs["content"]
103 |     assert "emoji" in obs["tags"]
104 | 
105 |     # Non-Latin scripts
106 |     tokens = md.parse("- [中文] Chinese text 测试 #language (Script test)")
107 |     token = next(t for t in tokens if t.type == "inline")
108 |     obs = parse_observation(token)
109 |     assert obs["category"] == "中文"
110 |     assert "测试" in obs["content"]
111 | 
112 |     # Mixed scripts and emoji
113 |     tokens = md.parse("- [test] Mixed 中文 and 👍 #mixed")
114 |     token = next(t for t in tokens if t.type == "inline")
115 |     obs = parse_observation(token)
116 |     assert "中文" in obs["content"]
117 |     assert "👍" in obs["content"]
118 | 
119 |     # Model validation with Unicode
120 |     observation = Observation.model_validate(obs)
121 |     assert "中文" in observation.content
122 |     assert "👍" in observation.content
123 | 
```

--------------------------------------------------------------------------------
/src/basic_memory/api/app.py:
--------------------------------------------------------------------------------

```python
  1 | """FastAPI application for basic-memory knowledge graph API."""
  2 | 
  3 | from contextlib import asynccontextmanager
  4 | 
  5 | from fastapi import FastAPI, HTTPException
  6 | from fastapi.exception_handlers import http_exception_handler
  7 | from loguru import logger
  8 | 
  9 | from basic_memory import __version__ as version
 10 | from basic_memory.api.container import ApiContainer, set_container
 11 | from basic_memory.api.routers import (
 12 |     directory_router,
 13 |     importer_router,
 14 |     knowledge,
 15 |     management,
 16 |     memory,
 17 |     project,
 18 |     resource,
 19 |     search,
 20 |     prompt_router,
 21 | )
 22 | from basic_memory.api.v2.routers import (
 23 |     knowledge_router as v2_knowledge,
 24 |     project_router as v2_project,
 25 |     memory_router as v2_memory,
 26 |     search_router as v2_search,
 27 |     resource_router as v2_resource,
 28 |     directory_router as v2_directory,
 29 |     prompt_router as v2_prompt,
 30 |     importer_router as v2_importer,
 31 | )
 32 | from basic_memory.config import init_api_logging
 33 | from basic_memory.services.initialization import initialize_app
 34 | 
 35 | 
 36 | @asynccontextmanager
 37 | async def lifespan(app: FastAPI):  # pragma: no cover
 38 |     """Lifecycle manager for the FastAPI app. Not called in stdio mcp mode"""
 39 | 
 40 |     # Initialize logging for API (stdout in cloud mode, file otherwise)
 41 |     init_api_logging()
 42 | 
 43 |     # --- Composition Root ---
 44 |     # Create container and read config (single point of config access)
 45 |     container = ApiContainer.create()
 46 |     set_container(container)
 47 |     app.state.container = container
 48 | 
 49 |     logger.info(f"Starting Basic Memory API (mode={container.mode.name})")
 50 | 
 51 |     await initialize_app(container.config)
 52 | 
 53 |     # Cache database connections in app state for performance
 54 |     logger.info("Initializing database and caching connections...")
 55 |     engine, session_maker = await container.init_database()
 56 |     app.state.engine = engine
 57 |     app.state.session_maker = session_maker
 58 |     logger.info("Database connections cached in app state")
 59 | 
 60 |     # Create and start sync coordinator (lifecycle centralized in coordinator)
 61 |     sync_coordinator = container.create_sync_coordinator()
 62 |     await sync_coordinator.start()
 63 |     app.state.sync_coordinator = sync_coordinator
 64 | 
 65 |     # Proceed with startup
 66 |     yield
 67 | 
 68 |     # Shutdown - coordinator handles clean task cancellation
 69 |     logger.info("Shutting down Basic Memory API")
 70 |     await sync_coordinator.stop()
 71 | 
 72 |     await container.shutdown_database()
 73 | 
 74 | 
 75 | # Initialize FastAPI app
 76 | app = FastAPI(
 77 |     title="Basic Memory API",
 78 |     description="Knowledge graph API for basic-memory",
 79 |     version=version,
 80 |     lifespan=lifespan,
 81 | )
 82 | 
 83 | # Include v2 routers FIRST (more specific paths must match before /{project} catch-all)
 84 | app.include_router(v2_knowledge, prefix="/v2/projects/{project_id}")
 85 | app.include_router(v2_memory, prefix="/v2/projects/{project_id}")
 86 | app.include_router(v2_search, prefix="/v2/projects/{project_id}")
 87 | app.include_router(v2_resource, prefix="/v2/projects/{project_id}")
 88 | app.include_router(v2_directory, prefix="/v2/projects/{project_id}")
 89 | app.include_router(v2_prompt, prefix="/v2/projects/{project_id}")
 90 | app.include_router(v2_importer, prefix="/v2/projects/{project_id}")
 91 | app.include_router(v2_project, prefix="/v2")
 92 | 
 93 | # Include v1 routers (/{project} is a catch-all, must come after specific prefixes)
 94 | app.include_router(knowledge.router, prefix="/{project}")
 95 | app.include_router(memory.router, prefix="/{project}")
 96 | app.include_router(resource.router, prefix="/{project}")
 97 | app.include_router(search.router, prefix="/{project}")
 98 | app.include_router(project.project_router, prefix="/{project}")
 99 | app.include_router(directory_router.router, prefix="/{project}")
100 | app.include_router(prompt_router.router, prefix="/{project}")
101 | app.include_router(importer_router.router, prefix="/{project}")
102 | 
103 | # Project resource router works across projects
104 | app.include_router(project.project_resource_router)
105 | app.include_router(management.router)
106 | 
107 | 
108 | @app.exception_handler(Exception)
109 | async def exception_handler(request, exc):  # pragma: no cover
110 |     logger.exception(
111 |         "API unhandled exception",
112 |         url=str(request.url),
113 |         method=request.method,
114 |         client=request.client.host if request.client else None,
115 |         path=request.url.path,
116 |         error_type=type(exc).__name__,
117 |         error=str(exc),
118 |     )
119 |     return await http_exception_handler(request, HTTPException(status_code=500, detail=str(exc)))
120 | 
```

--------------------------------------------------------------------------------
/.github/workflows/test.yml:
--------------------------------------------------------------------------------

```yaml
  1 | name: Tests
  2 | 
  3 | on:
  4 |   push:
  5 |     branches: [ "main" ]
  6 |   pull_request:
  7 |     branches: [ "main" ]
  8 |   # pull_request_target runs on the BASE of the PR, not the merge result.
  9 |   # It has write permissions and access to secrets.
 10 |   # It's useful for PRs from forks or automated PRs but requires careful use for security reasons.
 11 |   # See: https://docs.github.com/en/actions/using-workflows/events-that-trigger-workflows#pull_request_target
 12 |   pull_request_target:
 13 |     branches: [ "main" ]
 14 | 
 15 | jobs:
 16 |   test-sqlite:
 17 |     name: Test SQLite (${{ matrix.os }}, Python ${{ matrix.python-version }})
 18 |     strategy:
 19 |       fail-fast: false
 20 |       matrix:
 21 |         os: [ubuntu-latest, windows-latest]
 22 |         python-version: [ "3.12", "3.13", "3.14" ]
 23 |     runs-on: ${{ matrix.os }}
 24 | 
 25 |     steps:
 26 |       - uses: actions/checkout@v4
 27 |         with:
 28 |           submodules: true
 29 | 
 30 |       - name: Set up Python ${{ matrix.python-version }}
 31 |         uses: actions/setup-python@v4
 32 |         with:
 33 |           python-version: ${{ matrix.python-version }}
 34 |           cache: 'pip'
 35 | 
 36 |       - name: Install uv
 37 |         run: |
 38 |           pip install uv
 39 | 
 40 |       - name: Install just (Linux/macOS)
 41 |         if: runner.os != 'Windows'
 42 |         run: |
 43 |           curl --proto '=https' --tlsv1.2 -sSf https://just.systems/install.sh | bash -s -- --to /usr/local/bin
 44 | 
 45 |       - name: Install just (Windows)
 46 |         if: runner.os == 'Windows'
 47 |         run: |
 48 |           # Install just using Chocolatey (pre-installed on GitHub Actions Windows runners)
 49 |           choco install just --yes
 50 |         shell: pwsh
 51 | 
 52 |       - name: Create virtual env
 53 |         run: |
 54 |           uv venv
 55 | 
 56 |       - name: Install dependencies
 57 |         run: |
 58 |           uv pip install -e .[dev]
 59 | 
 60 |       - name: Run type checks
 61 |         run: |
 62 |           just typecheck
 63 | 
 64 |       - name: Run linting
 65 |         run: |
 66 |           just lint
 67 | 
 68 |       - name: Run tests (SQLite)
 69 |         run: |
 70 |           uv pip install pytest pytest-cov
 71 |           just test-sqlite
 72 | 
 73 |   test-postgres:
 74 |     name: Test Postgres (Python ${{ matrix.python-version }})
 75 |     strategy:
 76 |       fail-fast: false
 77 |       matrix:
 78 |         python-version: [ "3.12", "3.13", "3.14" ]
 79 |     runs-on: ubuntu-latest
 80 | 
 81 |     # Note: No services section needed - testcontainers handles Postgres in Docker
 82 | 
 83 |     steps:
 84 |       - uses: actions/checkout@v4
 85 |         with:
 86 |           submodules: true
 87 | 
 88 |       - name: Set up Python ${{ matrix.python-version }}
 89 |         uses: actions/setup-python@v4
 90 |         with:
 91 |           python-version: ${{ matrix.python-version }}
 92 |           cache: 'pip'
 93 | 
 94 |       - name: Install uv
 95 |         run: |
 96 |           pip install uv
 97 | 
 98 |       - name: Install just
 99 |         run: |
100 |           curl --proto '=https' --tlsv1.2 -sSf https://just.systems/install.sh | bash -s -- --to /usr/local/bin
101 | 
102 |       - name: Create virtual env
103 |         run: |
104 |           uv venv
105 | 
106 |       - name: Install dependencies
107 |         run: |
108 |           uv pip install -e .[dev]
109 | 
110 |       - name: Run tests (Postgres via testcontainers)
111 |         run: |
112 |           uv pip install pytest pytest-cov
113 |           just test-postgres
114 | 
115 |   coverage:
116 |     name: Coverage Summary (combined, Python 3.12)
117 |     runs-on: ubuntu-latest
118 | 
119 |     steps:
120 |       - uses: actions/checkout@v4
121 |         with:
122 |           submodules: true
123 | 
124 |       - name: Set up Python 3.12
125 |         uses: actions/setup-python@v4
126 |         with:
127 |           python-version: "3.12"
128 |           cache: "pip"
129 | 
130 |       - name: Install uv
131 |         run: |
132 |           pip install uv
133 | 
134 |       - name: Install just
135 |         run: |
136 |           curl --proto '=https' --tlsv1.2 -sSf https://just.systems/install.sh | bash -s -- --to /usr/local/bin
137 | 
138 |       - name: Create virtual env
139 |         run: |
140 |           uv venv
141 | 
142 |       - name: Install dependencies
143 |         run: |
144 |           uv pip install -e .[dev]
145 | 
146 |       - name: Run combined coverage (SQLite + Postgres)
147 |         run: |
148 |           uv pip install pytest pytest-cov
149 |           just coverage
150 | 
151 |       - name: Add coverage report to job summary
152 |         if: always()
153 |         run: |
154 |           {
155 |             echo "## Coverage"
156 |             echo ""
157 |             echo '```'
158 |             uv run coverage report -m
159 |             echo '```'
160 |           } >> "$GITHUB_STEP_SUMMARY"
161 | 
162 |       - name: Upload HTML coverage report
163 |         if: always()
164 |         uses: actions/upload-artifact@v4
165 |         with:
166 |           name: htmlcov
167 |           path: htmlcov/
168 | 
```

--------------------------------------------------------------------------------
/src/basic_memory/alembic/versions/3dae7c7b1564_initial_schema.py:
--------------------------------------------------------------------------------

```python
 1 | """initial schema
 2 | 
 3 | Revision ID: 3dae7c7b1564
 4 | Revises:
 5 | Create Date: 2025-02-12 21:23:00.336344
 6 | 
 7 | """
 8 | 
 9 | from typing import Sequence, Union
10 | 
11 | from alembic import op
12 | import sqlalchemy as sa
13 | 
14 | 
15 | # revision identifiers, used by Alembic.
16 | revision: str = "3dae7c7b1564"
17 | down_revision: Union[str, None] = None
18 | branch_labels: Union[str, Sequence[str], None] = None
19 | depends_on: Union[str, Sequence[str], None] = None
20 | 
21 | 
22 | def upgrade() -> None:
23 |     # ### commands auto generated by Alembic - please adjust! ###
24 |     op.create_table(
25 |         "entity",
26 |         sa.Column("id", sa.Integer(), nullable=False),
27 |         sa.Column("title", sa.String(), nullable=False),
28 |         sa.Column("entity_type", sa.String(), nullable=False),
29 |         sa.Column("entity_metadata", sa.JSON(), nullable=True),
30 |         sa.Column("content_type", sa.String(), nullable=False),
31 |         sa.Column("permalink", sa.String(), nullable=False),
32 |         sa.Column("file_path", sa.String(), nullable=False),
33 |         sa.Column("checksum", sa.String(), nullable=True),
34 |         sa.Column("created_at", sa.DateTime(), nullable=False),
35 |         sa.Column("updated_at", sa.DateTime(), nullable=False),
36 |         sa.PrimaryKeyConstraint("id"),
37 |         sa.UniqueConstraint("permalink", name="uix_entity_permalink"),
38 |     )
39 |     op.create_index("ix_entity_created_at", "entity", ["created_at"], unique=False)
40 |     op.create_index(op.f("ix_entity_file_path"), "entity", ["file_path"], unique=True)
41 |     op.create_index(op.f("ix_entity_permalink"), "entity", ["permalink"], unique=True)
42 |     op.create_index("ix_entity_title", "entity", ["title"], unique=False)
43 |     op.create_index("ix_entity_type", "entity", ["entity_type"], unique=False)
44 |     op.create_index("ix_entity_updated_at", "entity", ["updated_at"], unique=False)
45 |     op.create_table(
46 |         "observation",
47 |         sa.Column("id", sa.Integer(), nullable=False),
48 |         sa.Column("entity_id", sa.Integer(), nullable=False),
49 |         sa.Column("content", sa.Text(), nullable=False),
50 |         sa.Column("category", sa.String(), nullable=False),
51 |         sa.Column("context", sa.Text(), nullable=True),
52 |         sa.Column("tags", sa.JSON(), server_default="[]", nullable=True),
53 |         sa.ForeignKeyConstraint(["entity_id"], ["entity.id"], ondelete="CASCADE"),
54 |         sa.PrimaryKeyConstraint("id"),
55 |     )
56 |     op.create_index("ix_observation_category", "observation", ["category"], unique=False)
57 |     op.create_index("ix_observation_entity_id", "observation", ["entity_id"], unique=False)
58 |     op.create_table(
59 |         "relation",
60 |         sa.Column("id", sa.Integer(), nullable=False),
61 |         sa.Column("from_id", sa.Integer(), nullable=False),
62 |         sa.Column("to_id", sa.Integer(), nullable=True),
63 |         sa.Column("to_name", sa.String(), nullable=False),
64 |         sa.Column("relation_type", sa.String(), nullable=False),
65 |         sa.Column("context", sa.Text(), nullable=True),
66 |         sa.ForeignKeyConstraint(["from_id"], ["entity.id"], ondelete="CASCADE"),
67 |         sa.ForeignKeyConstraint(["to_id"], ["entity.id"], ondelete="CASCADE"),
68 |         sa.PrimaryKeyConstraint("id"),
69 |         sa.UniqueConstraint("from_id", "to_id", "relation_type", name="uix_relation"),
70 |     )
71 |     op.create_index("ix_relation_from_id", "relation", ["from_id"], unique=False)
72 |     op.create_index("ix_relation_to_id", "relation", ["to_id"], unique=False)
73 |     op.create_index("ix_relation_type", "relation", ["relation_type"], unique=False)
74 |     # ### end Alembic commands ###
75 | 
76 | 
77 | def downgrade() -> None:
78 |     # ### commands auto generated by Alembic - please adjust! ###
79 |     op.drop_index("ix_relation_type", table_name="relation")
80 |     op.drop_index("ix_relation_to_id", table_name="relation")
81 |     op.drop_index("ix_relation_from_id", table_name="relation")
82 |     op.drop_table("relation")
83 |     op.drop_index("ix_observation_entity_id", table_name="observation")
84 |     op.drop_index("ix_observation_category", table_name="observation")
85 |     op.drop_table("observation")
86 |     op.drop_index("ix_entity_updated_at", table_name="entity")
87 |     op.drop_index("ix_entity_type", table_name="entity")
88 |     op.drop_index("ix_entity_title", table_name="entity")
89 |     op.drop_index(op.f("ix_entity_permalink"), table_name="entity")
90 |     op.drop_index(op.f("ix_entity_file_path"), table_name="entity")
91 |     op.drop_index("ix_entity_created_at", table_name="entity")
92 |     op.drop_table("entity")
93 |     # ### end Alembic commands ###
94 | 
```

--------------------------------------------------------------------------------
/src/basic_memory/cli/commands/db.py:
--------------------------------------------------------------------------------

```python
  1 | """Database management commands."""
  2 | 
  3 | from pathlib import Path
  4 | 
  5 | import typer
  6 | from loguru import logger
  7 | from rich.console import Console
  8 | from sqlalchemy.exc import OperationalError
  9 | 
 10 | from basic_memory import db
 11 | from basic_memory.cli.app import app
 12 | from basic_memory.cli.commands.command_utils import run_with_cleanup
 13 | from basic_memory.config import ConfigManager
 14 | from basic_memory.repository import ProjectRepository
 15 | from basic_memory.services.initialization import reconcile_projects_with_config
 16 | from basic_memory.sync.sync_service import get_sync_service
 17 | 
 18 | console = Console()
 19 | 
 20 | 
 21 | async def _reindex_projects(app_config):
 22 |     """Reindex all projects in a single async context.
 23 | 
 24 |     This ensures all database operations use the same event loop,
 25 |     and proper cleanup happens when the function completes.
 26 |     """
 27 |     try:
 28 |         await reconcile_projects_with_config(app_config)
 29 | 
 30 |         # Get database session (migrations already run if needed)
 31 |         _, session_maker = await db.get_or_create_db(
 32 |             db_path=app_config.database_path,
 33 |             db_type=db.DatabaseType.FILESYSTEM,
 34 |         )
 35 |         project_repository = ProjectRepository(session_maker)
 36 |         projects = await project_repository.get_active_projects()
 37 | 
 38 |         for project in projects:
 39 |             console.print(f"  Indexing [cyan]{project.name}[/cyan]...")
 40 |             logger.info(f"Starting sync for project: {project.name}")
 41 |             sync_service = await get_sync_service(project)
 42 |             sync_dir = Path(project.path)
 43 |             await sync_service.sync(sync_dir, project_name=project.name)
 44 |             logger.info(f"Sync completed for project: {project.name}")
 45 |     finally:
 46 |         # Clean up database connections before event loop closes
 47 |         await db.shutdown_db()
 48 | 
 49 | 
 50 | @app.command()
 51 | def reset(
 52 |     reindex: bool = typer.Option(False, "--reindex", help="Rebuild db index from filesystem"),
 53 | ):  # pragma: no cover
 54 |     """Reset database (drop all tables and recreate)."""
 55 |     console.print(
 56 |         "[yellow]Note:[/yellow] This only deletes the index database. "
 57 |         "Your markdown note files will not be affected.\n"
 58 |         "Use [green]bm reset --reindex[/green] to automatically rebuild the index afterward."
 59 |     )
 60 |     if typer.confirm("Reset the database index?"):
 61 |         logger.info("Resetting database...")
 62 |         config_manager = ConfigManager()
 63 |         app_config = config_manager.config
 64 |         # Get database path
 65 |         db_path = app_config.app_database_path
 66 | 
 67 |         # Delete the database file and WAL files if they exist
 68 |         for suffix in ["", "-shm", "-wal"]:
 69 |             path = db_path.parent / f"{db_path.name}{suffix}"
 70 |             if path.exists():
 71 |                 try:
 72 |                     path.unlink()
 73 |                     logger.info(f"Deleted: {path}")
 74 |                 except OSError as e:
 75 |                     console.print(
 76 |                         f"[red]Error:[/red] Cannot delete {path.name}: {e}\n"
 77 |                         "The database may be in use by another process (e.g., MCP server).\n"
 78 |                         "Please close Claude Desktop or any other Basic Memory clients and try again."
 79 |                     )
 80 |                     raise typer.Exit(1)
 81 | 
 82 |         # Create a new empty database (preserves project configuration)
 83 |         try:
 84 |             run_with_cleanup(db.run_migrations(app_config))
 85 |         except OperationalError as e:
 86 |             if "disk I/O error" in str(e) or "database is locked" in str(e):
 87 |                 console.print(
 88 |                     "[red]Error:[/red] Cannot access database. "
 89 |                     "It may be in use by another process (e.g., MCP server).\n"
 90 |                     "Please close Claude Desktop or any other Basic Memory clients and try again."
 91 |                 )
 92 |                 raise typer.Exit(1)
 93 |             raise
 94 |         console.print("[green]Database reset complete[/green]")
 95 | 
 96 |         if reindex:
 97 |             projects = list(app_config.projects)
 98 |             if not projects:
 99 |                 console.print("[yellow]No projects configured. Skipping reindex.[/yellow]")
100 |             else:
101 |                 console.print(f"Rebuilding search index for {len(projects)} project(s)...")
102 |                 # Note: _reindex_projects has its own cleanup, but run_with_cleanup
103 |                 # ensures db.shutdown_db() is called even if _reindex_projects changes
104 |                 run_with_cleanup(_reindex_projects(app_config))
105 |                 console.print("[green]Reindex complete[/green]")
106 | 
```

--------------------------------------------------------------------------------
/tests/sync/test_coordinator.py:
--------------------------------------------------------------------------------

```python
  1 | """Tests for SyncCoordinator - centralized sync/watch lifecycle."""
  2 | 
  3 | import pytest
  4 | from unittest.mock import AsyncMock, patch
  5 | 
  6 | from basic_memory.config import BasicMemoryConfig
  7 | from basic_memory.sync.coordinator import SyncCoordinator, SyncStatus
  8 | 
  9 | 
 10 | class TestSyncCoordinator:
 11 |     """Test SyncCoordinator class."""
 12 | 
 13 |     @pytest.fixture
 14 |     def mock_config(self):
 15 |         """Create a mock config for testing."""
 16 |         return BasicMemoryConfig()
 17 | 
 18 |     def test_initial_status(self, mock_config):
 19 |         """Coordinator starts in NOT_STARTED state."""
 20 |         coordinator = SyncCoordinator(config=mock_config)
 21 |         assert coordinator.status == SyncStatus.NOT_STARTED
 22 |         assert coordinator.is_running is False
 23 | 
 24 |     @pytest.mark.asyncio
 25 |     async def test_start_when_sync_disabled(self, mock_config):
 26 |         """When should_sync is False, start() sets status to STOPPED."""
 27 |         coordinator = SyncCoordinator(
 28 |             config=mock_config,
 29 |             should_sync=False,
 30 |             skip_reason="Test skip",
 31 |         )
 32 | 
 33 |         await coordinator.start()
 34 | 
 35 |         assert coordinator.status == SyncStatus.STOPPED
 36 |         assert coordinator.is_running is False
 37 | 
 38 |     @pytest.mark.asyncio
 39 |     async def test_stop_when_not_started(self, mock_config):
 40 |         """Stop is safe to call when not started."""
 41 |         coordinator = SyncCoordinator(config=mock_config)
 42 | 
 43 |         await coordinator.stop()  # Should not raise
 44 | 
 45 |         assert coordinator.status == SyncStatus.NOT_STARTED
 46 | 
 47 |     @pytest.mark.asyncio
 48 |     async def test_stop_when_stopped(self, mock_config):
 49 |         """Stop is idempotent when already stopped."""
 50 |         coordinator = SyncCoordinator(
 51 |             config=mock_config,
 52 |             should_sync=False,
 53 |         )
 54 |         await coordinator.start()  # Sets to STOPPED
 55 | 
 56 |         await coordinator.stop()  # Should not raise
 57 | 
 58 |         assert coordinator.status == SyncStatus.STOPPED
 59 | 
 60 |     def test_get_status_info(self, mock_config):
 61 |         """get_status_info returns diagnostic info."""
 62 |         coordinator = SyncCoordinator(
 63 |             config=mock_config,
 64 |             should_sync=True,
 65 |             skip_reason=None,
 66 |         )
 67 | 
 68 |         info = coordinator.get_status_info()
 69 | 
 70 |         assert info["status"] == "NOT_STARTED"
 71 |         assert info["should_sync"] is True
 72 |         assert info["skip_reason"] is None
 73 |         assert info["has_task"] is False
 74 | 
 75 |     def test_get_status_info_with_skip_reason(self, mock_config):
 76 |         """get_status_info includes skip reason."""
 77 |         coordinator = SyncCoordinator(
 78 |             config=mock_config,
 79 |             should_sync=False,
 80 |             skip_reason="Test environment detected",
 81 |         )
 82 | 
 83 |         info = coordinator.get_status_info()
 84 | 
 85 |         assert info["should_sync"] is False
 86 |         assert info["skip_reason"] == "Test environment detected"
 87 | 
 88 |     @pytest.mark.asyncio
 89 |     async def test_start_creates_task(self, mock_config):
 90 |         """When should_sync is True, start() creates a background task."""
 91 |         coordinator = SyncCoordinator(
 92 |             config=mock_config,
 93 |             should_sync=True,
 94 |         )
 95 | 
 96 |         # Mock initialize_file_sync to avoid actually starting sync
 97 |         # The import happens inside start(), so patch at the source module
 98 |         with patch(
 99 |             "basic_memory.services.initialization.initialize_file_sync",
100 |             new_callable=AsyncMock,
101 |         ):
102 |             # Start coordinator
103 |             await coordinator.start()
104 | 
105 |             # Should be running with a task
106 |             assert coordinator.status == SyncStatus.RUNNING
107 |             assert coordinator.is_running is True
108 |             assert coordinator._sync_task is not None
109 | 
110 |             # Stop to clean up
111 |             await coordinator.stop()
112 | 
113 |             assert coordinator.status == SyncStatus.STOPPED
114 |             assert coordinator._sync_task is None
115 | 
116 |     @pytest.mark.asyncio
117 |     async def test_start_already_running(self, mock_config):
118 |         """Starting when already running is a no-op."""
119 |         coordinator = SyncCoordinator(
120 |             config=mock_config,
121 |             should_sync=True,
122 |         )
123 | 
124 |         with patch(
125 |             "basic_memory.services.initialization.initialize_file_sync",
126 |             new_callable=AsyncMock,
127 |         ):
128 |             await coordinator.start()
129 |             first_task = coordinator._sync_task
130 | 
131 |             # Start again - should not create new task
132 |             await coordinator.start()
133 |             assert coordinator._sync_task is first_task
134 | 
135 |             await coordinator.stop()
136 | 
```

--------------------------------------------------------------------------------
/tests/schemas/test_base_timeframe_minimum.py:
--------------------------------------------------------------------------------

```python
  1 | """Test minimum 1-day timeframe enforcement for timezone handling."""
  2 | 
  3 | from datetime import datetime, timedelta
  4 | import pytest
  5 | from freezegun import freeze_time
  6 | 
  7 | from basic_memory.schemas.base import parse_timeframe
  8 | 
  9 | 
 10 | class TestTimeframeMinimum:
 11 |     """Test that parse_timeframe enforces a minimum 1-day lookback."""
 12 | 
 13 |     @freeze_time("2025-01-15 15:00:00")
 14 |     def test_today_returns_one_day_ago(self):
 15 |         """Test that 'today' returns 1 day ago instead of start of today."""
 16 |         result = parse_timeframe("today")
 17 |         now = datetime.now()
 18 |         one_day_ago = now - timedelta(days=1)
 19 | 
 20 |         # Should be approximately 1 day ago (within a second for test tolerance)
 21 |         diff = abs((result.replace(tzinfo=None) - one_day_ago).total_seconds())
 22 |         assert diff < 1, f"Expected ~1 day ago, got {result}"
 23 | 
 24 |     @freeze_time("2025-01-15 15:00:00")
 25 |     def test_one_hour_returns_one_day_minimum(self):
 26 |         """Test that '1h' returns 1 day ago due to minimum enforcement."""
 27 |         result = parse_timeframe("1h")
 28 |         now = datetime.now()
 29 |         one_day_ago = now - timedelta(days=1)
 30 | 
 31 |         # Should be approximately 1 day ago, not 1 hour ago
 32 |         diff = abs((result.replace(tzinfo=None) - one_day_ago).total_seconds())
 33 |         assert diff < 1, f"Expected ~1 day ago for '1h', got {result}"
 34 | 
 35 |     @freeze_time("2025-01-15 15:00:00")
 36 |     def test_six_hours_returns_one_day_minimum(self):
 37 |         """Test that '6h' returns 1 day ago due to minimum enforcement."""
 38 |         result = parse_timeframe("6h")
 39 |         now = datetime.now()
 40 |         one_day_ago = now - timedelta(days=1)
 41 | 
 42 |         # Should be approximately 1 day ago, not 6 hours ago
 43 |         diff = abs((result.replace(tzinfo=None) - one_day_ago).total_seconds())
 44 |         assert diff < 1, f"Expected ~1 day ago for '6h', got {result}"
 45 | 
 46 |     @freeze_time("2025-01-15 15:00:00")
 47 |     def test_one_day_returns_one_day(self):
 48 |         """Test that '1d' correctly returns approximately 1 day ago."""
 49 |         result = parse_timeframe("1d")
 50 |         now = datetime.now()
 51 |         one_day_ago = now - timedelta(days=1)
 52 | 
 53 |         # Should be approximately 1 day ago (within 24 hours)
 54 |         diff_hours = abs((result.replace(tzinfo=None) - one_day_ago).total_seconds()) / 3600
 55 |         assert diff_hours < 24, (
 56 |             f"Expected ~1 day ago for '1d', got {result} (diff: {diff_hours} hours)"
 57 |         )
 58 | 
 59 |     @freeze_time("2025-01-15 15:00:00")
 60 |     def test_two_days_returns_two_days(self):
 61 |         """Test that '2d' correctly returns approximately 2 days ago (not affected by minimum)."""
 62 |         result = parse_timeframe("2d")
 63 |         now = datetime.now()
 64 |         two_days_ago = now - timedelta(days=2)
 65 | 
 66 |         # Should be approximately 2 days ago (within 24 hours)
 67 |         diff_hours = abs((result.replace(tzinfo=None) - two_days_ago).total_seconds()) / 3600
 68 |         assert diff_hours < 24, (
 69 |             f"Expected ~2 days ago for '2d', got {result} (diff: {diff_hours} hours)"
 70 |         )
 71 | 
 72 |     @freeze_time("2025-01-15 15:00:00")
 73 |     def test_one_week_returns_one_week(self):
 74 |         """Test that '1 week' correctly returns approximately 1 week ago (not affected by minimum)."""
 75 |         result = parse_timeframe("1 week")
 76 |         now = datetime.now()
 77 |         one_week_ago = now - timedelta(weeks=1)
 78 | 
 79 |         # Should be approximately 1 week ago (within 24 hours)
 80 |         diff_hours = abs((result.replace(tzinfo=None) - one_week_ago).total_seconds()) / 3600
 81 |         assert diff_hours < 24, (
 82 |             f"Expected ~1 week ago for '1 week', got {result} (diff: {diff_hours} hours)"
 83 |         )
 84 | 
 85 |     @freeze_time("2025-01-15 15:00:00")
 86 |     def test_zero_days_returns_one_day_minimum(self):
 87 |         """Test that '0d' returns 1 day ago due to minimum enforcement."""
 88 |         result = parse_timeframe("0d")
 89 |         now = datetime.now()
 90 |         one_day_ago = now - timedelta(days=1)
 91 | 
 92 |         # Should be approximately 1 day ago, not now
 93 |         diff = abs((result.replace(tzinfo=None) - one_day_ago).total_seconds())
 94 |         assert diff < 1, f"Expected ~1 day ago for '0d', got {result}"
 95 | 
 96 |     def test_timezone_awareness(self):
 97 |         """Test that returned datetime is timezone-aware."""
 98 |         result = parse_timeframe("1d")
 99 |         assert result.tzinfo is not None, "Expected timezone-aware datetime"
100 | 
101 |     def test_invalid_timeframe_raises_error(self):
102 |         """Test that invalid timeframe strings raise ValueError."""
103 |         with pytest.raises(ValueError, match="Could not parse timeframe"):
104 |             parse_timeframe("invalid_timeframe")
105 | 
```

--------------------------------------------------------------------------------
/src/basic_memory/api/routers/importer_router.py:
--------------------------------------------------------------------------------

```python
  1 | """Import router for Basic Memory API."""
  2 | 
  3 | import json
  4 | import logging
  5 | 
  6 | from fastapi import APIRouter, Form, HTTPException, UploadFile, status
  7 | 
  8 | from basic_memory.deps import (
  9 |     ChatGPTImporterDep,
 10 |     ClaudeConversationsImporterDep,
 11 |     ClaudeProjectsImporterDep,
 12 |     MemoryJsonImporterDep,
 13 | )
 14 | from basic_memory.importers import Importer
 15 | from basic_memory.schemas.importer import (
 16 |     ChatImportResult,
 17 |     EntityImportResult,
 18 |     ProjectImportResult,
 19 | )
 20 | 
 21 | logger = logging.getLogger(__name__)
 22 | 
 23 | router = APIRouter(prefix="/import", tags=["import"])
 24 | 
 25 | 
 26 | @router.post("/chatgpt", response_model=ChatImportResult)
 27 | async def import_chatgpt(
 28 |     importer: ChatGPTImporterDep,
 29 |     file: UploadFile,
 30 |     folder: str = Form("conversations"),
 31 | ) -> ChatImportResult:
 32 |     """Import conversations from ChatGPT JSON export.
 33 | 
 34 |     Args:
 35 |         file: The ChatGPT conversations.json file.
 36 |         folder: The folder to place the files in.
 37 |         markdown_processor: MarkdownProcessor instance.
 38 | 
 39 |     Returns:
 40 |         ChatImportResult with import statistics.
 41 | 
 42 |     Raises:
 43 |         HTTPException: If import fails.
 44 |     """
 45 |     return await import_file(importer, file, folder)
 46 | 
 47 | 
 48 | @router.post("/claude/conversations", response_model=ChatImportResult)
 49 | async def import_claude_conversations(
 50 |     importer: ClaudeConversationsImporterDep,
 51 |     file: UploadFile,
 52 |     folder: str = Form("conversations"),
 53 | ) -> ChatImportResult:
 54 |     """Import conversations from Claude conversations.json export.
 55 | 
 56 |     Args:
 57 |         file: The Claude conversations.json file.
 58 |         folder: The folder to place the files in.
 59 |         markdown_processor: MarkdownProcessor instance.
 60 | 
 61 |     Returns:
 62 |         ChatImportResult with import statistics.
 63 | 
 64 |     Raises:
 65 |         HTTPException: If import fails.
 66 |     """
 67 |     return await import_file(importer, file, folder)
 68 | 
 69 | 
 70 | @router.post("/claude/projects", response_model=ProjectImportResult)
 71 | async def import_claude_projects(
 72 |     importer: ClaudeProjectsImporterDep,
 73 |     file: UploadFile,
 74 |     folder: str = Form("projects"),
 75 | ) -> ProjectImportResult:
 76 |     """Import projects from Claude projects.json export.
 77 | 
 78 |     Args:
 79 |         file: The Claude projects.json file.
 80 |         base_folder: The base folder to place the files in.
 81 |         markdown_processor: MarkdownProcessor instance.
 82 | 
 83 |     Returns:
 84 |         ProjectImportResult with import statistics.
 85 | 
 86 |     Raises:
 87 |         HTTPException: If import fails.
 88 |     """
 89 |     return await import_file(importer, file, folder)
 90 | 
 91 | 
 92 | @router.post("/memory-json", response_model=EntityImportResult)
 93 | async def import_memory_json(
 94 |     importer: MemoryJsonImporterDep,
 95 |     file: UploadFile,
 96 |     folder: str = Form("conversations"),
 97 | ) -> EntityImportResult:
 98 |     """Import entities and relations from a memory.json file.
 99 | 
100 |     Args:
101 |         file: The memory.json file.
102 |         destination_folder: Optional destination folder within the project.
103 |         markdown_processor: MarkdownProcessor instance.
104 | 
105 |     Returns:
106 |         EntityImportResult with import statistics.
107 | 
108 |     Raises:
109 |         HTTPException: If import fails.
110 |     """
111 |     try:
112 |         file_data = []
113 |         file_bytes = await file.read()
114 |         file_str = file_bytes.decode("utf-8")
115 |         for line in file_str.splitlines():
116 |             json_data = json.loads(line)
117 |             file_data.append(json_data)
118 | 
119 |         result = await importer.import_data(file_data, folder)
120 |         if not result.success:  # pragma: no cover
121 |             raise HTTPException(
122 |                 status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
123 |                 detail=result.error_message or "Import failed",
124 |             )
125 |     except Exception as e:
126 |         logger.exception("Import failed")
127 |         raise HTTPException(
128 |             status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
129 |             detail=f"Import failed: {str(e)}",
130 |         )
131 |     return result
132 | 
133 | 
134 | async def import_file(importer: Importer, file: UploadFile, destination_folder: str):
135 |     try:
136 |         # Process file
137 |         json_data = json.load(file.file)
138 |         result = await importer.import_data(json_data, destination_folder)
139 |         if not result.success:  # pragma: no cover
140 |             raise HTTPException(
141 |                 status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
142 |                 detail=result.error_message or "Import failed",
143 |             )
144 | 
145 |         return result
146 | 
147 |     except Exception as e:
148 |         logger.exception("Import failed")
149 |         raise HTTPException(
150 |             status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
151 |             detail=f"Import failed: {str(e)}",
152 |         )
153 | 
```

--------------------------------------------------------------------------------
/tests/cli/test_import_claude_projects.py:
--------------------------------------------------------------------------------

```python
  1 | """Tests for import_claude_projects command."""
  2 | 
  3 | import json
  4 | 
  5 | import pytest
  6 | from typer.testing import CliRunner
  7 | 
  8 | from basic_memory.cli.app import app
  9 | from basic_memory.cli.commands.import_claude_projects import import_projects  # noqa
 10 | from basic_memory.config import get_project_config
 11 | 
 12 | # Set up CLI runner
 13 | runner = CliRunner()
 14 | 
 15 | 
 16 | @pytest.fixture
 17 | def sample_project():
 18 |     """Sample project data for testing."""
 19 |     return {
 20 |         "uuid": "test-uuid",
 21 |         "name": "Test Project",
 22 |         "created_at": "2025-01-05T20:55:32.499880+00:00",
 23 |         "updated_at": "2025-01-05T20:56:39.477600+00:00",
 24 |         "prompt_template": "# Test Prompt\n\nThis is a test prompt.",
 25 |         "docs": [
 26 |             {
 27 |                 "uuid": "doc-uuid-1",
 28 |                 "filename": "Test Document",
 29 |                 "content": "# Test Document\n\nThis is test content.",
 30 |                 "created_at": "2025-01-05T20:56:39.477600+00:00",
 31 |             },
 32 |             {
 33 |                 "uuid": "doc-uuid-2",
 34 |                 "filename": "Another Document",
 35 |                 "content": "# Another Document\n\nMore test content.",
 36 |                 "created_at": "2025-01-05T20:56:39.477600+00:00",
 37 |             },
 38 |         ],
 39 |     }
 40 | 
 41 | 
 42 | @pytest.fixture
 43 | def sample_projects_json(tmp_path, sample_project):
 44 |     """Create a sample projects.json file."""
 45 |     json_file = tmp_path / "projects.json"
 46 |     with open(json_file, "w", encoding="utf-8") as f:
 47 |         json.dump([sample_project], f)
 48 |     return json_file
 49 | 
 50 | 
 51 | def test_import_projects_command_file_not_found(tmp_path):
 52 |     """Test error handling for nonexistent file."""
 53 |     nonexistent = tmp_path / "nonexistent.json"
 54 |     result = runner.invoke(app, ["import", "claude", "projects", str(nonexistent)])
 55 |     assert result.exit_code == 1
 56 |     assert "File not found" in result.output
 57 | 
 58 | 
 59 | def test_import_projects_command_success(tmp_path, sample_projects_json, monkeypatch):
 60 |     """Test successful project import via command."""
 61 |     # Set up test environment
 62 |     config = get_project_config()
 63 |     config.home = tmp_path
 64 | 
 65 |     # Run import
 66 |     result = runner.invoke(app, ["import", "claude", "projects", str(sample_projects_json)])
 67 |     assert result.exit_code == 0
 68 |     assert "Import complete" in result.output
 69 |     assert "Imported 2 project documents" in result.output
 70 |     assert "Imported 1 prompt templates" in result.output
 71 | 
 72 | 
 73 | def test_import_projects_command_invalid_json(tmp_path):
 74 |     """Test error handling for invalid JSON."""
 75 |     # Create invalid JSON file
 76 |     invalid_file = tmp_path / "invalid.json"
 77 |     invalid_file.write_text("not json")
 78 | 
 79 |     result = runner.invoke(app, ["import", "claude", "projects", str(invalid_file)])
 80 |     assert result.exit_code == 1
 81 |     assert "Error during import" in result.output
 82 | 
 83 | 
 84 | def test_import_projects_with_base_folder(tmp_path, sample_projects_json, monkeypatch):
 85 |     """Test import with custom base folder."""
 86 |     # Set up test environment
 87 |     config = get_project_config()
 88 |     config.home = tmp_path
 89 |     base_folder = "claude-exports"
 90 | 
 91 |     # Run import
 92 |     result = runner.invoke(
 93 |         app,
 94 |         [
 95 |             "import",
 96 |             "claude",
 97 |             "projects",
 98 |             str(sample_projects_json),
 99 |             "--base-folder",
100 |             base_folder,
101 |         ],
102 |     )
103 |     assert result.exit_code == 0
104 | 
105 |     # Check files in base folder
106 |     project_dir = tmp_path / base_folder / "Test_Project"
107 |     assert project_dir.exists()
108 |     assert (project_dir / "docs").exists()
109 |     assert (project_dir / "prompt-template.md").exists()
110 | 
111 | 
112 | def test_import_project_without_prompt(tmp_path):
113 |     """Test importing project without prompt template."""
114 |     # Create project without prompt
115 |     project = {
116 |         "uuid": "test-uuid",
117 |         "name": "No Prompt Project",
118 |         "created_at": "2025-01-05T20:55:32.499880+00:00",
119 |         "updated_at": "2025-01-05T20:56:39.477600+00:00",
120 |         "docs": [
121 |             {
122 |                 "uuid": "doc-uuid-1",
123 |                 "filename": "Test Document",
124 |                 "content": "# Test Document\n\nContent.",
125 |                 "created_at": "2025-01-05T20:56:39.477600+00:00",
126 |             }
127 |         ],
128 |     }
129 | 
130 |     json_file = tmp_path / "no_prompt.json"
131 |     with open(json_file, "w", encoding="utf-8") as f:
132 |         json.dump([project], f)
133 | 
134 |     # Set up environment
135 |     config = get_project_config()
136 |     config.home = tmp_path
137 | 
138 |     # Run import
139 |     result = runner.invoke(app, ["import", "claude", "projects", str(json_file)])
140 |     assert result.exit_code == 0
141 |     assert "Imported 1 project documents" in result.output
142 |     assert "Imported 0 prompt templates" in result.output
143 | 
```

--------------------------------------------------------------------------------
/src/basic_memory/services/link_resolver.py:
--------------------------------------------------------------------------------

```python
  1 | """Service for resolving markdown links to permalinks."""
  2 | 
  3 | from typing import Optional, Tuple
  4 | 
  5 | 
  6 | from loguru import logger
  7 | 
  8 | from basic_memory.models import Entity
  9 | from basic_memory.repository.entity_repository import EntityRepository
 10 | from basic_memory.schemas.search import SearchQuery, SearchItemType
 11 | from basic_memory.services.search_service import SearchService
 12 | 
 13 | 
 14 | class LinkResolver:
 15 |     """Service for resolving markdown links to permalinks.
 16 | 
 17 |     Uses a combination of exact matching and search-based resolution:
 18 |     1. Try exact permalink match (fastest)
 19 |     2. Try exact title match
 20 |     3. Try exact file path match
 21 |     4. Try file path with .md extension (for folder/title patterns)
 22 |     5. Fall back to search for fuzzy matching
 23 |     """
 24 | 
 25 |     def __init__(self, entity_repository: EntityRepository, search_service: SearchService):
 26 |         """Initialize with repositories."""
 27 |         self.entity_repository = entity_repository
 28 |         self.search_service = search_service
 29 | 
 30 |     async def resolve_link(
 31 |         self, link_text: str, use_search: bool = True, strict: bool = False
 32 |     ) -> Optional[Entity]:
 33 |         """Resolve a markdown link to a permalink.
 34 | 
 35 |         Args:
 36 |             link_text: The link text to resolve
 37 |             use_search: Whether to use search-based fuzzy matching as fallback
 38 |             strict: If True, only exact matches are allowed (no fuzzy search fallback)
 39 |         """
 40 |         logger.trace(f"Resolving link: {link_text}")
 41 | 
 42 |         # Clean link text and extract any alias
 43 |         clean_text, alias = self._normalize_link_text(link_text)
 44 | 
 45 |         # 1. Try exact permalink match first (most efficient)
 46 |         entity = await self.entity_repository.get_by_permalink(clean_text)
 47 |         if entity:
 48 |             logger.debug(f"Found exact permalink match: {entity.permalink}")
 49 |             return entity
 50 | 
 51 |         # 2. Try exact title match
 52 |         found = await self.entity_repository.get_by_title(clean_text)
 53 |         if found:
 54 |             # Return first match if there are duplicates (consistent behavior)
 55 |             entity = found[0]
 56 |             logger.debug(f"Found title match: {entity.title}")
 57 |             return entity
 58 | 
 59 |         # 3. Try file path
 60 |         found_path = await self.entity_repository.get_by_file_path(clean_text)
 61 |         if found_path:
 62 |             logger.debug(f"Found entity with path: {found_path.file_path}")
 63 |             return found_path
 64 | 
 65 |         # 4. Try file path with .md extension if not already present
 66 |         if not clean_text.endswith(".md") and "/" in clean_text:
 67 |             file_path_with_md = f"{clean_text}.md"
 68 |             found_path_md = await self.entity_repository.get_by_file_path(file_path_with_md)
 69 |             if found_path_md:
 70 |                 logger.debug(f"Found entity with path (with .md): {found_path_md.file_path}")
 71 |                 return found_path_md
 72 | 
 73 |         # In strict mode, don't try fuzzy search - return None if no exact match found
 74 |         if strict:
 75 |             return None
 76 | 
 77 |         # 5. Fall back to search for fuzzy matching (only if not in strict mode)
 78 |         if use_search and "*" not in clean_text:
 79 |             results = await self.search_service.search(
 80 |                 query=SearchQuery(text=clean_text, entity_types=[SearchItemType.ENTITY]),
 81 |             )
 82 | 
 83 |             if results:
 84 |                 # Look for best match
 85 |                 best_match = min(results, key=lambda x: x.score)  # pyright: ignore
 86 |                 logger.trace(
 87 |                     f"Selected best match from {len(results)} results: {best_match.permalink}"
 88 |                 )
 89 |                 if best_match.permalink:
 90 |                     return await self.entity_repository.get_by_permalink(best_match.permalink)
 91 | 
 92 |         # if we couldn't find anything then return None
 93 |         return None
 94 | 
 95 |     def _normalize_link_text(self, link_text: str) -> Tuple[str, Optional[str]]:
 96 |         """Normalize link text and extract alias if present.
 97 | 
 98 |         Args:
 99 |             link_text: Raw link text from markdown
100 | 
101 |         Returns:
102 |             Tuple of (normalized_text, alias or None)
103 |         """
104 |         # Strip whitespace
105 |         text = link_text.strip()
106 | 
107 |         # Remove enclosing brackets if present
108 |         if text.startswith("[[") and text.endswith("]]"):
109 |             text = text[2:-2]
110 | 
111 |         # Handle Obsidian-style aliases (format: [[actual|alias]])
112 |         alias = None
113 |         if "|" in text:
114 |             text, alias = text.split("|", 1)
115 |             text = text.strip()
116 |             alias = alias.strip()
117 |         else:
118 |             # Strip whitespace from text even if no alias
119 |             text = text.strip()
120 | 
121 |         return text, alias
122 | 
```
Page 3/27FirstPrevNextLast