#
tokens: 49690/50000 30/348 files (page 3/23)
lines: on (toggle) GitHub
raw markdown copy reset
This is page 3 of 23. Use http://codebase.md/basicmachines-co/basic-memory?lines=true&page={x} to view the full context.

# Directory Structure

```
├── .claude
│   ├── agents
│   │   ├── python-developer.md
│   │   └── system-architect.md
│   └── commands
│       ├── release
│       │   ├── beta.md
│       │   ├── changelog.md
│       │   ├── release-check.md
│       │   └── release.md
│       ├── spec.md
│       └── test-live.md
├── .dockerignore
├── .github
│   ├── dependabot.yml
│   ├── ISSUE_TEMPLATE
│   │   ├── bug_report.md
│   │   ├── config.yml
│   │   ├── documentation.md
│   │   └── feature_request.md
│   └── workflows
│       ├── claude-code-review.yml
│       ├── claude-issue-triage.yml
│       ├── claude.yml
│       ├── dev-release.yml
│       ├── docker.yml
│       ├── pr-title.yml
│       ├── release.yml
│       └── test.yml
├── .gitignore
├── .python-version
├── CHANGELOG.md
├── CITATION.cff
├── CLA.md
├── CLAUDE.md
├── CODE_OF_CONDUCT.md
├── CONTRIBUTING.md
├── docker-compose.yml
├── Dockerfile
├── docs
│   ├── ai-assistant-guide-extended.md
│   ├── character-handling.md
│   ├── cloud-cli.md
│   └── Docker.md
├── justfile
├── LICENSE
├── llms-install.md
├── pyproject.toml
├── README.md
├── SECURITY.md
├── smithery.yaml
├── specs
│   ├── SPEC-1 Specification-Driven Development Process.md
│   ├── SPEC-10 Unified Deployment Workflow and Event Tracking.md
│   ├── SPEC-11 Basic Memory API Performance Optimization.md
│   ├── SPEC-12 OpenTelemetry Observability.md
│   ├── SPEC-13 CLI Authentication with Subscription Validation.md
│   ├── SPEC-14 Cloud Git Versioning & GitHub Backup.md
│   ├── SPEC-14- Cloud Git Versioning & GitHub Backup.md
│   ├── SPEC-15 Configuration Persistence via Tigris for Cloud Tenants.md
│   ├── SPEC-16 MCP Cloud Service Consolidation.md
│   ├── SPEC-17 Semantic Search with ChromaDB.md
│   ├── SPEC-18 AI Memory Management Tool.md
│   ├── SPEC-19 Sync Performance and Memory Optimization.md
│   ├── SPEC-2 Slash Commands Reference.md
│   ├── SPEC-3 Agent Definitions.md
│   ├── SPEC-4 Notes Web UI Component Architecture.md
│   ├── SPEC-5 CLI Cloud Upload via WebDAV.md
│   ├── SPEC-6 Explicit Project Parameter Architecture.md
│   ├── SPEC-7 POC to spike Tigris Turso for local access to cloud data.md
│   ├── SPEC-8 TigrisFS Integration.md
│   ├── SPEC-9 Multi-Project Bidirectional Sync Architecture.md
│   ├── SPEC-9 Signed Header Tenant Information.md
│   └── SPEC-9-1 Follow-Ups- Conflict, Sync, and Observability.md
├── src
│   └── basic_memory
│       ├── __init__.py
│       ├── alembic
│       │   ├── alembic.ini
│       │   ├── env.py
│       │   ├── migrations.py
│       │   ├── script.py.mako
│       │   └── versions
│       │       ├── 3dae7c7b1564_initial_schema.py
│       │       ├── 502b60eaa905_remove_required_from_entity_permalink.py
│       │       ├── 5fe1ab1ccebe_add_projects_table.py
│       │       ├── 647e7a75e2cd_project_constraint_fix.py
│       │       ├── 9d9c1cb7d8f5_add_mtime_and_size_columns_to_entity_.py
│       │       ├── a1b2c3d4e5f6_fix_project_foreign_keys.py
│       │       ├── b3c3938bacdb_relation_to_name_unique_index.py
│       │       ├── cc7172b46608_update_search_index_schema.py
│       │       └── e7e1f4367280_add_scan_watermark_tracking_to_project.py
│       ├── api
│       │   ├── __init__.py
│       │   ├── app.py
│       │   ├── routers
│       │   │   ├── __init__.py
│       │   │   ├── directory_router.py
│       │   │   ├── importer_router.py
│       │   │   ├── knowledge_router.py
│       │   │   ├── management_router.py
│       │   │   ├── memory_router.py
│       │   │   ├── project_router.py
│       │   │   ├── prompt_router.py
│       │   │   ├── resource_router.py
│       │   │   ├── search_router.py
│       │   │   └── utils.py
│       │   └── template_loader.py
│       ├── cli
│       │   ├── __init__.py
│       │   ├── app.py
│       │   ├── auth.py
│       │   ├── commands
│       │   │   ├── __init__.py
│       │   │   ├── cloud
│       │   │   │   ├── __init__.py
│       │   │   │   ├── api_client.py
│       │   │   │   ├── bisync_commands.py
│       │   │   │   ├── cloud_utils.py
│       │   │   │   ├── core_commands.py
│       │   │   │   ├── mount_commands.py
│       │   │   │   ├── rclone_config.py
│       │   │   │   ├── rclone_installer.py
│       │   │   │   ├── upload_command.py
│       │   │   │   └── upload.py
│       │   │   ├── command_utils.py
│       │   │   ├── db.py
│       │   │   ├── import_chatgpt.py
│       │   │   ├── import_claude_conversations.py
│       │   │   ├── import_claude_projects.py
│       │   │   ├── import_memory_json.py
│       │   │   ├── mcp.py
│       │   │   ├── project.py
│       │   │   ├── status.py
│       │   │   ├── sync.py
│       │   │   └── tool.py
│       │   └── main.py
│       ├── config.py
│       ├── db.py
│       ├── deps.py
│       ├── file_utils.py
│       ├── ignore_utils.py
│       ├── importers
│       │   ├── __init__.py
│       │   ├── base.py
│       │   ├── chatgpt_importer.py
│       │   ├── claude_conversations_importer.py
│       │   ├── claude_projects_importer.py
│       │   ├── memory_json_importer.py
│       │   └── utils.py
│       ├── markdown
│       │   ├── __init__.py
│       │   ├── entity_parser.py
│       │   ├── markdown_processor.py
│       │   ├── plugins.py
│       │   ├── schemas.py
│       │   └── utils.py
│       ├── mcp
│       │   ├── __init__.py
│       │   ├── async_client.py
│       │   ├── project_context.py
│       │   ├── prompts
│       │   │   ├── __init__.py
│       │   │   ├── ai_assistant_guide.py
│       │   │   ├── continue_conversation.py
│       │   │   ├── recent_activity.py
│       │   │   ├── search.py
│       │   │   └── utils.py
│       │   ├── resources
│       │   │   ├── ai_assistant_guide.md
│       │   │   └── project_info.py
│       │   ├── server.py
│       │   └── tools
│       │       ├── __init__.py
│       │       ├── build_context.py
│       │       ├── canvas.py
│       │       ├── chatgpt_tools.py
│       │       ├── delete_note.py
│       │       ├── edit_note.py
│       │       ├── list_directory.py
│       │       ├── move_note.py
│       │       ├── project_management.py
│       │       ├── read_content.py
│       │       ├── read_note.py
│       │       ├── recent_activity.py
│       │       ├── search.py
│       │       ├── utils.py
│       │       ├── view_note.py
│       │       └── write_note.py
│       ├── models
│       │   ├── __init__.py
│       │   ├── base.py
│       │   ├── knowledge.py
│       │   ├── project.py
│       │   └── search.py
│       ├── repository
│       │   ├── __init__.py
│       │   ├── entity_repository.py
│       │   ├── observation_repository.py
│       │   ├── project_info_repository.py
│       │   ├── project_repository.py
│       │   ├── relation_repository.py
│       │   ├── repository.py
│       │   └── search_repository.py
│       ├── schemas
│       │   ├── __init__.py
│       │   ├── base.py
│       │   ├── cloud.py
│       │   ├── delete.py
│       │   ├── directory.py
│       │   ├── importer.py
│       │   ├── memory.py
│       │   ├── project_info.py
│       │   ├── prompt.py
│       │   ├── request.py
│       │   ├── response.py
│       │   ├── search.py
│       │   └── sync_report.py
│       ├── services
│       │   ├── __init__.py
│       │   ├── context_service.py
│       │   ├── directory_service.py
│       │   ├── entity_service.py
│       │   ├── exceptions.py
│       │   ├── file_service.py
│       │   ├── initialization.py
│       │   ├── link_resolver.py
│       │   ├── project_service.py
│       │   ├── search_service.py
│       │   └── service.py
│       ├── sync
│       │   ├── __init__.py
│       │   ├── background_sync.py
│       │   ├── sync_service.py
│       │   └── watch_service.py
│       ├── templates
│       │   └── prompts
│       │       ├── continue_conversation.hbs
│       │       └── search.hbs
│       └── utils.py
├── test-int
│   ├── BENCHMARKS.md
│   ├── cli
│   │   ├── test_project_commands_integration.py
│   │   ├── test_sync_commands_integration.py
│   │   └── test_version_integration.py
│   ├── conftest.py
│   ├── mcp
│   │   ├── test_build_context_underscore.py
│   │   ├── test_build_context_validation.py
│   │   ├── test_chatgpt_tools_integration.py
│   │   ├── test_default_project_mode_integration.py
│   │   ├── test_delete_note_integration.py
│   │   ├── test_edit_note_integration.py
│   │   ├── test_list_directory_integration.py
│   │   ├── test_move_note_integration.py
│   │   ├── test_project_management_integration.py
│   │   ├── test_project_state_sync_integration.py
│   │   ├── test_read_content_integration.py
│   │   ├── test_read_note_integration.py
│   │   ├── test_search_integration.py
│   │   ├── test_single_project_mcp_integration.py
│   │   └── test_write_note_integration.py
│   ├── test_db_wal_mode.py
│   ├── test_disable_permalinks_integration.py
│   └── test_sync_performance_benchmark.py
├── tests
│   ├── __init__.py
│   ├── api
│   │   ├── conftest.py
│   │   ├── test_async_client.py
│   │   ├── test_continue_conversation_template.py
│   │   ├── test_directory_router.py
│   │   ├── test_importer_router.py
│   │   ├── test_knowledge_router.py
│   │   ├── test_management_router.py
│   │   ├── test_memory_router.py
│   │   ├── test_project_router_operations.py
│   │   ├── test_project_router.py
│   │   ├── test_prompt_router.py
│   │   ├── test_relation_background_resolution.py
│   │   ├── test_resource_router.py
│   │   ├── test_search_router.py
│   │   ├── test_search_template.py
│   │   ├── test_template_loader_helpers.py
│   │   └── test_template_loader.py
│   ├── cli
│   │   ├── conftest.py
│   │   ├── test_bisync_commands.py
│   │   ├── test_cli_tools.py
│   │   ├── test_cloud_authentication.py
│   │   ├── test_cloud_utils.py
│   │   ├── test_ignore_utils.py
│   │   ├── test_import_chatgpt.py
│   │   ├── test_import_claude_conversations.py
│   │   ├── test_import_claude_projects.py
│   │   ├── test_import_memory_json.py
│   │   └── test_upload.py
│   ├── conftest.py
│   ├── db
│   │   └── test_issue_254_foreign_key_constraints.py
│   ├── importers
│   │   ├── test_importer_base.py
│   │   └── test_importer_utils.py
│   ├── markdown
│   │   ├── __init__.py
│   │   ├── test_date_frontmatter_parsing.py
│   │   ├── test_entity_parser_error_handling.py
│   │   ├── test_entity_parser.py
│   │   ├── test_markdown_plugins.py
│   │   ├── test_markdown_processor.py
│   │   ├── test_observation_edge_cases.py
│   │   ├── test_parser_edge_cases.py
│   │   ├── test_relation_edge_cases.py
│   │   └── test_task_detection.py
│   ├── mcp
│   │   ├── conftest.py
│   │   ├── test_obsidian_yaml_formatting.py
│   │   ├── test_permalink_collision_file_overwrite.py
│   │   ├── test_prompts.py
│   │   ├── test_resources.py
│   │   ├── test_tool_build_context.py
│   │   ├── test_tool_canvas.py
│   │   ├── test_tool_delete_note.py
│   │   ├── test_tool_edit_note.py
│   │   ├── test_tool_list_directory.py
│   │   ├── test_tool_move_note.py
│   │   ├── test_tool_read_content.py
│   │   ├── test_tool_read_note.py
│   │   ├── test_tool_recent_activity.py
│   │   ├── test_tool_resource.py
│   │   ├── test_tool_search.py
│   │   ├── test_tool_utils.py
│   │   ├── test_tool_view_note.py
│   │   ├── test_tool_write_note.py
│   │   └── tools
│   │       └── test_chatgpt_tools.py
│   ├── Non-MarkdownFileSupport.pdf
│   ├── repository
│   │   ├── test_entity_repository_upsert.py
│   │   ├── test_entity_repository.py
│   │   ├── test_entity_upsert_issue_187.py
│   │   ├── test_observation_repository.py
│   │   ├── test_project_info_repository.py
│   │   ├── test_project_repository.py
│   │   ├── test_relation_repository.py
│   │   ├── test_repository.py
│   │   ├── test_search_repository_edit_bug_fix.py
│   │   └── test_search_repository.py
│   ├── schemas
│   │   ├── test_base_timeframe_minimum.py
│   │   ├── test_memory_serialization.py
│   │   ├── test_memory_url_validation.py
│   │   ├── test_memory_url.py
│   │   ├── test_schemas.py
│   │   └── test_search.py
│   ├── Screenshot.png
│   ├── services
│   │   ├── test_context_service.py
│   │   ├── test_directory_service.py
│   │   ├── test_entity_service_disable_permalinks.py
│   │   ├── test_entity_service.py
│   │   ├── test_file_service.py
│   │   ├── test_initialization.py
│   │   ├── test_link_resolver.py
│   │   ├── test_project_removal_bug.py
│   │   ├── test_project_service_operations.py
│   │   ├── test_project_service.py
│   │   └── test_search_service.py
│   ├── sync
│   │   ├── test_character_conflicts.py
│   │   ├── test_sync_service_incremental.py
│   │   ├── test_sync_service.py
│   │   ├── test_sync_wikilink_issue.py
│   │   ├── test_tmp_files.py
│   │   ├── test_watch_service_edge_cases.py
│   │   ├── test_watch_service_reload.py
│   │   └── test_watch_service.py
│   ├── test_config.py
│   ├── test_db_migration_deduplication.py
│   ├── test_deps.py
│   ├── test_production_cascade_delete.py
│   └── utils
│       ├── test_file_utils.py
│       ├── test_frontmatter_obsidian_compatible.py
│       ├── test_parse_tags.py
│       ├── test_permalink_formatting.py
│       ├── test_utf8_handling.py
│       └── test_validate_project_path.py
├── uv.lock
├── v0.15.0-RELEASE-DOCS.md
└── v15-docs
    ├── api-performance.md
    ├── background-relations.md
    ├── basic-memory-home.md
    ├── bug-fixes.md
    ├── chatgpt-integration.md
    ├── cloud-authentication.md
    ├── cloud-bisync.md
    ├── cloud-mode-usage.md
    ├── cloud-mount.md
    ├── default-project-mode.md
    ├── env-file-removal.md
    ├── env-var-overrides.md
    ├── explicit-project-parameter.md
    ├── gitignore-integration.md
    ├── project-root-env-var.md
    ├── README.md
    └── sqlite-performance.md
```

# Files

--------------------------------------------------------------------------------
/src/basic_memory/alembic/versions/5fe1ab1ccebe_add_projects_table.py:
--------------------------------------------------------------------------------

```python
  1 | """add projects table
  2 | 
  3 | Revision ID: 5fe1ab1ccebe
  4 | Revises: cc7172b46608
  5 | Create Date: 2025-05-14 09:05:18.214357
  6 | 
  7 | """
  8 | 
  9 | from typing import Sequence, Union
 10 | 
 11 | from alembic import op
 12 | import sqlalchemy as sa
 13 | 
 14 | 
 15 | # revision identifiers, used by Alembic.
 16 | revision: str = "5fe1ab1ccebe"
 17 | down_revision: Union[str, None] = "cc7172b46608"
 18 | branch_labels: Union[str, Sequence[str], None] = None
 19 | depends_on: Union[str, Sequence[str], None] = None
 20 | 
 21 | 
 22 | def upgrade() -> None:
 23 |     # ### commands auto generated by Alembic - please adjust! ###
 24 |     op.create_table(
 25 |         "project",
 26 |         sa.Column("id", sa.Integer(), nullable=False),
 27 |         sa.Column("name", sa.String(), nullable=False),
 28 |         sa.Column("description", sa.Text(), nullable=True),
 29 |         sa.Column("permalink", sa.String(), nullable=False),
 30 |         sa.Column("path", sa.String(), nullable=False),
 31 |         sa.Column("is_active", sa.Boolean(), nullable=False),
 32 |         sa.Column("is_default", sa.Boolean(), nullable=True),
 33 |         sa.Column("created_at", sa.DateTime(), nullable=False),
 34 |         sa.Column("updated_at", sa.DateTime(), nullable=False),
 35 |         sa.PrimaryKeyConstraint("id"),
 36 |         sa.UniqueConstraint("is_default"),
 37 |         sa.UniqueConstraint("name"),
 38 |         sa.UniqueConstraint("permalink"),
 39 |         if_not_exists=True,
 40 |     )
 41 |     with op.batch_alter_table("project", schema=None) as batch_op:
 42 |         batch_op.create_index(
 43 |             "ix_project_created_at", ["created_at"], unique=False, if_not_exists=True
 44 |         )
 45 |         batch_op.create_index("ix_project_name", ["name"], unique=True, if_not_exists=True)
 46 |         batch_op.create_index("ix_project_path", ["path"], unique=False, if_not_exists=True)
 47 |         batch_op.create_index(
 48 |             "ix_project_permalink", ["permalink"], unique=True, if_not_exists=True
 49 |         )
 50 |         batch_op.create_index(
 51 |             "ix_project_updated_at", ["updated_at"], unique=False, if_not_exists=True
 52 |         )
 53 | 
 54 |     with op.batch_alter_table("entity", schema=None) as batch_op:
 55 |         batch_op.add_column(sa.Column("project_id", sa.Integer(), nullable=False))
 56 |         batch_op.drop_index(
 57 |             "uix_entity_permalink",
 58 |             sqlite_where=sa.text("content_type = 'text/markdown' AND permalink IS NOT NULL"),
 59 |         )
 60 |         batch_op.drop_index("ix_entity_file_path")
 61 |         batch_op.create_index(batch_op.f("ix_entity_file_path"), ["file_path"], unique=False)
 62 |         batch_op.create_index("ix_entity_project_id", ["project_id"], unique=False)
 63 |         batch_op.create_index(
 64 |             "uix_entity_file_path_project", ["file_path", "project_id"], unique=True
 65 |         )
 66 |         batch_op.create_index(
 67 |             "uix_entity_permalink_project",
 68 |             ["permalink", "project_id"],
 69 |             unique=True,
 70 |             sqlite_where=sa.text("content_type = 'text/markdown' AND permalink IS NOT NULL"),
 71 |         )
 72 |         batch_op.create_foreign_key("fk_entity_project_id", "project", ["project_id"], ["id"])
 73 | 
 74 |     # drop the search index table. it will be recreated
 75 |     op.drop_table("search_index")
 76 | 
 77 |     # ### end Alembic commands ###
 78 | 
 79 | 
 80 | def downgrade() -> None:
 81 |     # ### commands auto generated by Alembic - please adjust! ###
 82 |     with op.batch_alter_table("entity", schema=None) as batch_op:
 83 |         batch_op.drop_constraint("fk_entity_project_id", type_="foreignkey")
 84 |         batch_op.drop_index(
 85 |             "uix_entity_permalink_project",
 86 |             sqlite_where=sa.text("content_type = 'text/markdown' AND permalink IS NOT NULL"),
 87 |         )
 88 |         batch_op.drop_index("uix_entity_file_path_project")
 89 |         batch_op.drop_index("ix_entity_project_id")
 90 |         batch_op.drop_index(batch_op.f("ix_entity_file_path"))
 91 |         batch_op.create_index("ix_entity_file_path", ["file_path"], unique=1)
 92 |         batch_op.create_index(
 93 |             "uix_entity_permalink",
 94 |             ["permalink"],
 95 |             unique=1,
 96 |             sqlite_where=sa.text("content_type = 'text/markdown' AND permalink IS NOT NULL"),
 97 |         )
 98 |         batch_op.drop_column("project_id")
 99 | 
100 |     with op.batch_alter_table("project", schema=None) as batch_op:
101 |         batch_op.drop_index("ix_project_updated_at")
102 |         batch_op.drop_index("ix_project_permalink")
103 |         batch_op.drop_index("ix_project_path")
104 |         batch_op.drop_index("ix_project_name")
105 |         batch_op.drop_index("ix_project_created_at")
106 | 
107 |     op.drop_table("project")
108 |     # ### end Alembic commands ###
109 | 
```

--------------------------------------------------------------------------------
/src/basic_memory/mcp/tools/canvas.py:
--------------------------------------------------------------------------------

```python
  1 | """Canvas creation tool for Basic Memory MCP server.
  2 | 
  3 | This tool creates Obsidian canvas files (.canvas) using the JSON Canvas 1.0 spec.
  4 | """
  5 | 
  6 | import json
  7 | from typing import Dict, List, Any, Optional
  8 | 
  9 | from loguru import logger
 10 | from fastmcp import Context
 11 | 
 12 | from basic_memory.mcp.async_client import get_client
 13 | from basic_memory.mcp.project_context import get_active_project
 14 | from basic_memory.mcp.server import mcp
 15 | from basic_memory.mcp.tools.utils import call_put
 16 | 
 17 | 
 18 | @mcp.tool(
 19 |     description="Create an Obsidian canvas file to visualize concepts and connections.",
 20 | )
 21 | async def canvas(
 22 |     nodes: List[Dict[str, Any]],
 23 |     edges: List[Dict[str, Any]],
 24 |     title: str,
 25 |     folder: str,
 26 |     project: Optional[str] = None,
 27 |     context: Context | None = None,
 28 | ) -> str:
 29 |     """Create an Obsidian canvas file with the provided nodes and edges.
 30 | 
 31 |     This tool creates a .canvas file compatible with Obsidian's Canvas feature,
 32 |     allowing visualization of relationships between concepts or documents.
 33 | 
 34 |     Project Resolution:
 35 |     Server resolves projects in this order: Single Project Mode → project parameter → default project.
 36 |     If project unknown, use list_memory_projects() or recent_activity() first.
 37 | 
 38 |     For the full JSON Canvas 1.0 specification, see the 'spec://canvas' resource.
 39 | 
 40 |     Args:
 41 |         project: Project name to create canvas in. Optional - server will resolve using hierarchy.
 42 |                 If unknown, use list_memory_projects() to discover available projects.
 43 |         nodes: List of node objects following JSON Canvas 1.0 spec
 44 |         edges: List of edge objects following JSON Canvas 1.0 spec
 45 |         title: The title of the canvas (will be saved as title.canvas)
 46 |         folder: Folder path relative to project root where the canvas should be saved.
 47 |                 Use forward slashes (/) as separators. Examples: "diagrams", "projects/2025", "visual/maps"
 48 |         context: Optional FastMCP context for performance caching.
 49 | 
 50 |     Returns:
 51 |         A summary of the created canvas file
 52 | 
 53 |     Important Notes:
 54 |     - When referencing files, use the exact file path as shown in Obsidian
 55 |       Example: "folder/Document Name.md" (not permalink format)
 56 |     - For file nodes, the "file" attribute must reference an existing file
 57 |     - Nodes require id, type, x, y, width, height properties
 58 |     - Edges require id, fromNode, toNode properties
 59 |     - Position nodes in a logical layout (x,y coordinates in pixels)
 60 |     - Use color attributes ("1"-"6" or hex) for visual organization
 61 | 
 62 |     Basic Structure:
 63 |     ```json
 64 |     {
 65 |       "nodes": [
 66 |         {
 67 |           "id": "node1",
 68 |           "type": "file",  // Options: "file", "text", "link", "group"
 69 |           "file": "folder/Document.md",
 70 |           "x": 0,
 71 |           "y": 0,
 72 |           "width": 400,
 73 |           "height": 300
 74 |         }
 75 |       ],
 76 |       "edges": [
 77 |         {
 78 |           "id": "edge1",
 79 |           "fromNode": "node1",
 80 |           "toNode": "node2",
 81 |           "label": "connects to"
 82 |         }
 83 |       ]
 84 |     }
 85 |     ```
 86 | 
 87 |     Examples:
 88 |         # Create canvas in project
 89 |         canvas("my-project", nodes=[...], edges=[...], title="My Canvas", folder="diagrams")
 90 | 
 91 |         # Create canvas in work project
 92 |         canvas("work-project", nodes=[...], edges=[...], title="Process Flow", folder="visual/maps")
 93 | 
 94 |     Raises:
 95 |         ToolError: If project doesn't exist or folder path is invalid
 96 |     """
 97 |     async with get_client() as client:
 98 |         active_project = await get_active_project(client, project, context)
 99 |         project_url = active_project.project_url
100 | 
101 |         # Ensure path has .canvas extension
102 |         file_title = title if title.endswith(".canvas") else f"{title}.canvas"
103 |         file_path = f"{folder}/{file_title}"
104 | 
105 |         # Create canvas data structure
106 |         canvas_data = {"nodes": nodes, "edges": edges}
107 | 
108 |         # Convert to JSON
109 |         canvas_json = json.dumps(canvas_data, indent=2)
110 | 
111 |         # Write the file using the resource API
112 |         logger.info(f"Creating canvas file: {file_path} in project {project}")
113 |         response = await call_put(client, f"{project_url}/resource/{file_path}", json=canvas_json)
114 | 
115 |         # Parse response
116 |         result = response.json()
117 |         logger.debug(result)
118 | 
119 |         # Build summary
120 |         action = "Created" if response.status_code == 201 else "Updated"
121 |         summary = [f"# {action}: {file_path}", "\nThe canvas is ready to open in Obsidian."]
122 | 
123 |         return "\n".join(summary)
124 | 
```

--------------------------------------------------------------------------------
/tests/schemas/test_base_timeframe_minimum.py:
--------------------------------------------------------------------------------

```python
  1 | """Test minimum 1-day timeframe enforcement for timezone handling."""
  2 | 
  3 | from datetime import datetime, timedelta
  4 | import pytest
  5 | from freezegun import freeze_time
  6 | 
  7 | from basic_memory.schemas.base import parse_timeframe
  8 | 
  9 | 
 10 | class TestTimeframeMinimum:
 11 |     """Test that parse_timeframe enforces a minimum 1-day lookback."""
 12 | 
 13 |     @freeze_time("2025-01-15 15:00:00")
 14 |     def test_today_returns_one_day_ago(self):
 15 |         """Test that 'today' returns 1 day ago instead of start of today."""
 16 |         result = parse_timeframe("today")
 17 |         now = datetime.now()
 18 |         one_day_ago = now - timedelta(days=1)
 19 | 
 20 |         # Should be approximately 1 day ago (within a second for test tolerance)
 21 |         diff = abs((result.replace(tzinfo=None) - one_day_ago).total_seconds())
 22 |         assert diff < 1, f"Expected ~1 day ago, got {result}"
 23 | 
 24 |     @freeze_time("2025-01-15 15:00:00")
 25 |     def test_one_hour_returns_one_day_minimum(self):
 26 |         """Test that '1h' returns 1 day ago due to minimum enforcement."""
 27 |         result = parse_timeframe("1h")
 28 |         now = datetime.now()
 29 |         one_day_ago = now - timedelta(days=1)
 30 | 
 31 |         # Should be approximately 1 day ago, not 1 hour ago
 32 |         diff = abs((result.replace(tzinfo=None) - one_day_ago).total_seconds())
 33 |         assert diff < 1, f"Expected ~1 day ago for '1h', got {result}"
 34 | 
 35 |     @freeze_time("2025-01-15 15:00:00")
 36 |     def test_six_hours_returns_one_day_minimum(self):
 37 |         """Test that '6h' returns 1 day ago due to minimum enforcement."""
 38 |         result = parse_timeframe("6h")
 39 |         now = datetime.now()
 40 |         one_day_ago = now - timedelta(days=1)
 41 | 
 42 |         # Should be approximately 1 day ago, not 6 hours ago
 43 |         diff = abs((result.replace(tzinfo=None) - one_day_ago).total_seconds())
 44 |         assert diff < 1, f"Expected ~1 day ago for '6h', got {result}"
 45 | 
 46 |     @freeze_time("2025-01-15 15:00:00")
 47 |     def test_one_day_returns_one_day(self):
 48 |         """Test that '1d' correctly returns approximately 1 day ago."""
 49 |         result = parse_timeframe("1d")
 50 |         now = datetime.now()
 51 |         one_day_ago = now - timedelta(days=1)
 52 | 
 53 |         # Should be approximately 1 day ago (within 24 hours)
 54 |         diff_hours = abs((result.replace(tzinfo=None) - one_day_ago).total_seconds()) / 3600
 55 |         assert diff_hours < 24, (
 56 |             f"Expected ~1 day ago for '1d', got {result} (diff: {diff_hours} hours)"
 57 |         )
 58 | 
 59 |     @freeze_time("2025-01-15 15:00:00")
 60 |     def test_two_days_returns_two_days(self):
 61 |         """Test that '2d' correctly returns approximately 2 days ago (not affected by minimum)."""
 62 |         result = parse_timeframe("2d")
 63 |         now = datetime.now()
 64 |         two_days_ago = now - timedelta(days=2)
 65 | 
 66 |         # Should be approximately 2 days ago (within 24 hours)
 67 |         diff_hours = abs((result.replace(tzinfo=None) - two_days_ago).total_seconds()) / 3600
 68 |         assert diff_hours < 24, (
 69 |             f"Expected ~2 days ago for '2d', got {result} (diff: {diff_hours} hours)"
 70 |         )
 71 | 
 72 |     @freeze_time("2025-01-15 15:00:00")
 73 |     def test_one_week_returns_one_week(self):
 74 |         """Test that '1 week' correctly returns approximately 1 week ago (not affected by minimum)."""
 75 |         result = parse_timeframe("1 week")
 76 |         now = datetime.now()
 77 |         one_week_ago = now - timedelta(weeks=1)
 78 | 
 79 |         # Should be approximately 1 week ago (within 24 hours)
 80 |         diff_hours = abs((result.replace(tzinfo=None) - one_week_ago).total_seconds()) / 3600
 81 |         assert diff_hours < 24, (
 82 |             f"Expected ~1 week ago for '1 week', got {result} (diff: {diff_hours} hours)"
 83 |         )
 84 | 
 85 |     @freeze_time("2025-01-15 15:00:00")
 86 |     def test_zero_days_returns_one_day_minimum(self):
 87 |         """Test that '0d' returns 1 day ago due to minimum enforcement."""
 88 |         result = parse_timeframe("0d")
 89 |         now = datetime.now()
 90 |         one_day_ago = now - timedelta(days=1)
 91 | 
 92 |         # Should be approximately 1 day ago, not now
 93 |         diff = abs((result.replace(tzinfo=None) - one_day_ago).total_seconds())
 94 |         assert diff < 1, f"Expected ~1 day ago for '0d', got {result}"
 95 | 
 96 |     def test_timezone_awareness(self):
 97 |         """Test that returned datetime is timezone-aware."""
 98 |         result = parse_timeframe("1d")
 99 |         assert result.tzinfo is not None, "Expected timezone-aware datetime"
100 | 
101 |     def test_invalid_timeframe_raises_error(self):
102 |         """Test that invalid timeframe strings raise ValueError."""
103 |         with pytest.raises(ValueError, match="Could not parse timeframe"):
104 |             parse_timeframe("invalid_timeframe")
105 | 
```

--------------------------------------------------------------------------------
/src/basic_memory/api/routers/importer_router.py:
--------------------------------------------------------------------------------

```python
  1 | """Import router for Basic Memory API."""
  2 | 
  3 | import json
  4 | import logging
  5 | 
  6 | from fastapi import APIRouter, Form, HTTPException, UploadFile, status
  7 | 
  8 | from basic_memory.deps import (
  9 |     ChatGPTImporterDep,
 10 |     ClaudeConversationsImporterDep,
 11 |     ClaudeProjectsImporterDep,
 12 |     MemoryJsonImporterDep,
 13 | )
 14 | from basic_memory.importers import Importer
 15 | from basic_memory.schemas.importer import (
 16 |     ChatImportResult,
 17 |     EntityImportResult,
 18 |     ProjectImportResult,
 19 | )
 20 | 
 21 | logger = logging.getLogger(__name__)
 22 | 
 23 | router = APIRouter(prefix="/import", tags=["import"])
 24 | 
 25 | 
 26 | @router.post("/chatgpt", response_model=ChatImportResult)
 27 | async def import_chatgpt(
 28 |     importer: ChatGPTImporterDep,
 29 |     file: UploadFile,
 30 |     folder: str = Form("conversations"),
 31 | ) -> ChatImportResult:
 32 |     """Import conversations from ChatGPT JSON export.
 33 | 
 34 |     Args:
 35 |         file: The ChatGPT conversations.json file.
 36 |         folder: The folder to place the files in.
 37 |         markdown_processor: MarkdownProcessor instance.
 38 | 
 39 |     Returns:
 40 |         ChatImportResult with import statistics.
 41 | 
 42 |     Raises:
 43 |         HTTPException: If import fails.
 44 |     """
 45 |     return await import_file(importer, file, folder)
 46 | 
 47 | 
 48 | @router.post("/claude/conversations", response_model=ChatImportResult)
 49 | async def import_claude_conversations(
 50 |     importer: ClaudeConversationsImporterDep,
 51 |     file: UploadFile,
 52 |     folder: str = Form("conversations"),
 53 | ) -> ChatImportResult:
 54 |     """Import conversations from Claude conversations.json export.
 55 | 
 56 |     Args:
 57 |         file: The Claude conversations.json file.
 58 |         folder: The folder to place the files in.
 59 |         markdown_processor: MarkdownProcessor instance.
 60 | 
 61 |     Returns:
 62 |         ChatImportResult with import statistics.
 63 | 
 64 |     Raises:
 65 |         HTTPException: If import fails.
 66 |     """
 67 |     return await import_file(importer, file, folder)
 68 | 
 69 | 
 70 | @router.post("/claude/projects", response_model=ProjectImportResult)
 71 | async def import_claude_projects(
 72 |     importer: ClaudeProjectsImporterDep,
 73 |     file: UploadFile,
 74 |     folder: str = Form("projects"),
 75 | ) -> ProjectImportResult:
 76 |     """Import projects from Claude projects.json export.
 77 | 
 78 |     Args:
 79 |         file: The Claude projects.json file.
 80 |         base_folder: The base folder to place the files in.
 81 |         markdown_processor: MarkdownProcessor instance.
 82 | 
 83 |     Returns:
 84 |         ProjectImportResult with import statistics.
 85 | 
 86 |     Raises:
 87 |         HTTPException: If import fails.
 88 |     """
 89 |     return await import_file(importer, file, folder)
 90 | 
 91 | 
 92 | @router.post("/memory-json", response_model=EntityImportResult)
 93 | async def import_memory_json(
 94 |     importer: MemoryJsonImporterDep,
 95 |     file: UploadFile,
 96 |     folder: str = Form("conversations"),
 97 | ) -> EntityImportResult:
 98 |     """Import entities and relations from a memory.json file.
 99 | 
100 |     Args:
101 |         file: The memory.json file.
102 |         destination_folder: Optional destination folder within the project.
103 |         markdown_processor: MarkdownProcessor instance.
104 | 
105 |     Returns:
106 |         EntityImportResult with import statistics.
107 | 
108 |     Raises:
109 |         HTTPException: If import fails.
110 |     """
111 |     try:
112 |         file_data = []
113 |         file_bytes = await file.read()
114 |         file_str = file_bytes.decode("utf-8")
115 |         for line in file_str.splitlines():
116 |             json_data = json.loads(line)
117 |             file_data.append(json_data)
118 | 
119 |         result = await importer.import_data(file_data, folder)
120 |         if not result.success:  # pragma: no cover
121 |             raise HTTPException(
122 |                 status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
123 |                 detail=result.error_message or "Import failed",
124 |             )
125 |     except Exception as e:
126 |         logger.exception("Import failed")
127 |         raise HTTPException(
128 |             status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
129 |             detail=f"Import failed: {str(e)}",
130 |         )
131 |     return result
132 | 
133 | 
134 | async def import_file(importer: Importer, file: UploadFile, destination_folder: str):
135 |     try:
136 |         # Process file
137 |         json_data = json.load(file.file)
138 |         result = await importer.import_data(json_data, destination_folder)
139 |         if not result.success:  # pragma: no cover
140 |             raise HTTPException(
141 |                 status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
142 |                 detail=result.error_message or "Import failed",
143 |             )
144 | 
145 |         return result
146 | 
147 |     except Exception as e:
148 |         logger.exception("Import failed")
149 |         raise HTTPException(
150 |             status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
151 |             detail=f"Import failed: {str(e)}",
152 |         )
153 | 
```

--------------------------------------------------------------------------------
/tests/cli/test_import_claude_projects.py:
--------------------------------------------------------------------------------

```python
  1 | """Tests for import_claude_projects command."""
  2 | 
  3 | import json
  4 | 
  5 | import pytest
  6 | from typer.testing import CliRunner
  7 | 
  8 | from basic_memory.cli.app import app
  9 | from basic_memory.cli.commands.import_claude_projects import import_projects  # noqa
 10 | from basic_memory.config import get_project_config
 11 | 
 12 | # Set up CLI runner
 13 | runner = CliRunner()
 14 | 
 15 | 
 16 | @pytest.fixture
 17 | def sample_project():
 18 |     """Sample project data for testing."""
 19 |     return {
 20 |         "uuid": "test-uuid",
 21 |         "name": "Test Project",
 22 |         "created_at": "2025-01-05T20:55:32.499880+00:00",
 23 |         "updated_at": "2025-01-05T20:56:39.477600+00:00",
 24 |         "prompt_template": "# Test Prompt\n\nThis is a test prompt.",
 25 |         "docs": [
 26 |             {
 27 |                 "uuid": "doc-uuid-1",
 28 |                 "filename": "Test Document",
 29 |                 "content": "# Test Document\n\nThis is test content.",
 30 |                 "created_at": "2025-01-05T20:56:39.477600+00:00",
 31 |             },
 32 |             {
 33 |                 "uuid": "doc-uuid-2",
 34 |                 "filename": "Another Document",
 35 |                 "content": "# Another Document\n\nMore test content.",
 36 |                 "created_at": "2025-01-05T20:56:39.477600+00:00",
 37 |             },
 38 |         ],
 39 |     }
 40 | 
 41 | 
 42 | @pytest.fixture
 43 | def sample_projects_json(tmp_path, sample_project):
 44 |     """Create a sample projects.json file."""
 45 |     json_file = tmp_path / "projects.json"
 46 |     with open(json_file, "w", encoding="utf-8") as f:
 47 |         json.dump([sample_project], f)
 48 |     return json_file
 49 | 
 50 | 
 51 | def test_import_projects_command_file_not_found(tmp_path):
 52 |     """Test error handling for nonexistent file."""
 53 |     nonexistent = tmp_path / "nonexistent.json"
 54 |     result = runner.invoke(app, ["import", "claude", "projects", str(nonexistent)])
 55 |     assert result.exit_code == 1
 56 |     assert "File not found" in result.output
 57 | 
 58 | 
 59 | def test_import_projects_command_success(tmp_path, sample_projects_json, monkeypatch):
 60 |     """Test successful project import via command."""
 61 |     # Set up test environment
 62 |     config = get_project_config()
 63 |     config.home = tmp_path
 64 | 
 65 |     # Run import
 66 |     result = runner.invoke(app, ["import", "claude", "projects", str(sample_projects_json)])
 67 |     assert result.exit_code == 0
 68 |     assert "Import complete" in result.output
 69 |     assert "Imported 2 project documents" in result.output
 70 |     assert "Imported 1 prompt templates" in result.output
 71 | 
 72 | 
 73 | def test_import_projects_command_invalid_json(tmp_path):
 74 |     """Test error handling for invalid JSON."""
 75 |     # Create invalid JSON file
 76 |     invalid_file = tmp_path / "invalid.json"
 77 |     invalid_file.write_text("not json")
 78 | 
 79 |     result = runner.invoke(app, ["import", "claude", "projects", str(invalid_file)])
 80 |     assert result.exit_code == 1
 81 |     assert "Error during import" in result.output
 82 | 
 83 | 
 84 | def test_import_projects_with_base_folder(tmp_path, sample_projects_json, monkeypatch):
 85 |     """Test import with custom base folder."""
 86 |     # Set up test environment
 87 |     config = get_project_config()
 88 |     config.home = tmp_path
 89 |     base_folder = "claude-exports"
 90 | 
 91 |     # Run import
 92 |     result = runner.invoke(
 93 |         app,
 94 |         [
 95 |             "import",
 96 |             "claude",
 97 |             "projects",
 98 |             str(sample_projects_json),
 99 |             "--base-folder",
100 |             base_folder,
101 |         ],
102 |     )
103 |     assert result.exit_code == 0
104 | 
105 |     # Check files in base folder
106 |     project_dir = tmp_path / base_folder / "Test_Project"
107 |     assert project_dir.exists()
108 |     assert (project_dir / "docs").exists()
109 |     assert (project_dir / "prompt-template.md").exists()
110 | 
111 | 
112 | def test_import_project_without_prompt(tmp_path):
113 |     """Test importing project without prompt template."""
114 |     # Create project without prompt
115 |     project = {
116 |         "uuid": "test-uuid",
117 |         "name": "No Prompt Project",
118 |         "created_at": "2025-01-05T20:55:32.499880+00:00",
119 |         "updated_at": "2025-01-05T20:56:39.477600+00:00",
120 |         "docs": [
121 |             {
122 |                 "uuid": "doc-uuid-1",
123 |                 "filename": "Test Document",
124 |                 "content": "# Test Document\n\nContent.",
125 |                 "created_at": "2025-01-05T20:56:39.477600+00:00",
126 |             }
127 |         ],
128 |     }
129 | 
130 |     json_file = tmp_path / "no_prompt.json"
131 |     with open(json_file, "w", encoding="utf-8") as f:
132 |         json.dump([project], f)
133 | 
134 |     # Set up environment
135 |     config = get_project_config()
136 |     config.home = tmp_path
137 | 
138 |     # Run import
139 |     result = runner.invoke(app, ["import", "claude", "projects", str(json_file)])
140 |     assert result.exit_code == 0
141 |     assert "Imported 1 project documents" in result.output
142 |     assert "Imported 0 prompt templates" in result.output
143 | 
```

--------------------------------------------------------------------------------
/src/basic_memory/services/link_resolver.py:
--------------------------------------------------------------------------------

```python
  1 | """Service for resolving markdown links to permalinks."""
  2 | 
  3 | from typing import Optional, Tuple
  4 | 
  5 | from loguru import logger
  6 | 
  7 | from basic_memory.models import Entity
  8 | from basic_memory.repository.entity_repository import EntityRepository
  9 | from basic_memory.schemas.search import SearchQuery, SearchItemType
 10 | from basic_memory.services.search_service import SearchService
 11 | 
 12 | 
 13 | class LinkResolver:
 14 |     """Service for resolving markdown links to permalinks.
 15 | 
 16 |     Uses a combination of exact matching and search-based resolution:
 17 |     1. Try exact permalink match (fastest)
 18 |     2. Try exact title match
 19 |     3. Try exact file path match
 20 |     4. Try file path with .md extension (for folder/title patterns)
 21 |     5. Fall back to search for fuzzy matching
 22 |     """
 23 | 
 24 |     def __init__(self, entity_repository: EntityRepository, search_service: SearchService):
 25 |         """Initialize with repositories."""
 26 |         self.entity_repository = entity_repository
 27 |         self.search_service = search_service
 28 | 
 29 |     async def resolve_link(
 30 |         self, link_text: str, use_search: bool = True, strict: bool = False
 31 |     ) -> Optional[Entity]:
 32 |         """Resolve a markdown link to a permalink.
 33 | 
 34 |         Args:
 35 |             link_text: The link text to resolve
 36 |             use_search: Whether to use search-based fuzzy matching as fallback
 37 |             strict: If True, only exact matches are allowed (no fuzzy search fallback)
 38 |         """
 39 |         logger.trace(f"Resolving link: {link_text}")
 40 | 
 41 |         # Clean link text and extract any alias
 42 |         clean_text, alias = self._normalize_link_text(link_text)
 43 | 
 44 |         # 1. Try exact permalink match first (most efficient)
 45 |         entity = await self.entity_repository.get_by_permalink(clean_text)
 46 |         if entity:
 47 |             logger.debug(f"Found exact permalink match: {entity.permalink}")
 48 |             return entity
 49 | 
 50 |         # 2. Try exact title match
 51 |         found = await self.entity_repository.get_by_title(clean_text)
 52 |         if found:
 53 |             # Return first match if there are duplicates (consistent behavior)
 54 |             entity = found[0]
 55 |             logger.debug(f"Found title match: {entity.title}")
 56 |             return entity
 57 | 
 58 |         # 3. Try file path
 59 |         found_path = await self.entity_repository.get_by_file_path(clean_text)
 60 |         if found_path:
 61 |             logger.debug(f"Found entity with path: {found_path.file_path}")
 62 |             return found_path
 63 | 
 64 |         # 4. Try file path with .md extension if not already present
 65 |         if not clean_text.endswith(".md") and "/" in clean_text:
 66 |             file_path_with_md = f"{clean_text}.md"
 67 |             found_path_md = await self.entity_repository.get_by_file_path(file_path_with_md)
 68 |             if found_path_md:
 69 |                 logger.debug(f"Found entity with path (with .md): {found_path_md.file_path}")
 70 |                 return found_path_md
 71 | 
 72 |         # In strict mode, don't try fuzzy search - return None if no exact match found
 73 |         if strict:
 74 |             return None
 75 | 
 76 |         # 5. Fall back to search for fuzzy matching (only if not in strict mode)
 77 |         if use_search and "*" not in clean_text:
 78 |             results = await self.search_service.search(
 79 |                 query=SearchQuery(text=clean_text, entity_types=[SearchItemType.ENTITY]),
 80 |             )
 81 | 
 82 |             if results:
 83 |                 # Look for best match
 84 |                 best_match = min(results, key=lambda x: x.score)  # pyright: ignore
 85 |                 logger.trace(
 86 |                     f"Selected best match from {len(results)} results: {best_match.permalink}"
 87 |                 )
 88 |                 if best_match.permalink:
 89 |                     return await self.entity_repository.get_by_permalink(best_match.permalink)
 90 | 
 91 |         # if we couldn't find anything then return None
 92 |         return None
 93 | 
 94 |     def _normalize_link_text(self, link_text: str) -> Tuple[str, Optional[str]]:
 95 |         """Normalize link text and extract alias if present.
 96 | 
 97 |         Args:
 98 |             link_text: Raw link text from markdown
 99 | 
100 |         Returns:
101 |             Tuple of (normalized_text, alias or None)
102 |         """
103 |         # Strip whitespace
104 |         text = link_text.strip()
105 | 
106 |         # Remove enclosing brackets if present
107 |         if text.startswith("[[") and text.endswith("]]"):
108 |             text = text[2:-2]
109 | 
110 |         # Handle Obsidian-style aliases (format: [[actual|alias]])
111 |         alias = None
112 |         if "|" in text:
113 |             text, alias = text.split("|", 1)
114 |             text = text.strip()
115 |             alias = alias.strip()
116 |         else:
117 |             # Strip whitespace from text even if no alias
118 |             text = text.strip()
119 | 
120 |         return text, alias
121 | 
```

--------------------------------------------------------------------------------
/tests/mcp/test_tool_delete_note.py:
--------------------------------------------------------------------------------

```python
 1 | """Tests for delete_note MCP tool."""
 2 | 
 3 | from basic_memory.mcp.tools.delete_note import _format_delete_error_response
 4 | 
 5 | 
 6 | class TestDeleteNoteErrorFormatting:
 7 |     """Test the error formatting function for better user experience."""
 8 | 
 9 |     def test_format_delete_error_note_not_found(self, test_project):
10 |         """Test formatting for note not found errors."""
11 |         result = _format_delete_error_response(test_project.name, "entity not found", "test-note")
12 | 
13 |         assert "# Delete Failed - Note Not Found" in result
14 |         assert "The note 'test-note' could not be found" in result
15 |         assert 'search_notes("test-project", "test-note")' in result
16 |         assert "Already deleted" in result
17 |         assert "Wrong identifier" in result
18 | 
19 |     def test_format_delete_error_permission_denied(self, test_project):
20 |         """Test formatting for permission errors."""
21 |         result = _format_delete_error_response(test_project.name, "permission denied", "test-note")
22 | 
23 |         assert "# Delete Failed - Permission Error" in result
24 |         assert "You don't have permission to delete 'test-note'" in result
25 |         assert "Check permissions" in result
26 |         assert "File locks" in result
27 |         assert "list_memory_projects()" in result
28 | 
29 |     def test_format_delete_error_access_forbidden(self, test_project):
30 |         """Test formatting for access forbidden errors."""
31 |         result = _format_delete_error_response(test_project.name, "access forbidden", "test-note")
32 | 
33 |         assert "# Delete Failed - Permission Error" in result
34 |         assert "You don't have permission to delete 'test-note'" in result
35 | 
36 |     def test_format_delete_error_server_error(self, test_project):
37 |         """Test formatting for server errors."""
38 |         result = _format_delete_error_response(
39 |             test_project.name, "server error occurred", "test-note"
40 |         )
41 | 
42 |         assert "# Delete Failed - System Error" in result
43 |         assert "A system error occurred while deleting 'test-note'" in result
44 |         assert "Try again" in result
45 |         assert "Check file status" in result
46 | 
47 |     def test_format_delete_error_filesystem_error(self, test_project):
48 |         """Test formatting for filesystem errors."""
49 |         result = _format_delete_error_response(test_project.name, "filesystem error", "test-note")
50 | 
51 |         assert "# Delete Failed - System Error" in result
52 |         assert "A system error occurred while deleting 'test-note'" in result
53 | 
54 |     def test_format_delete_error_disk_error(self, test_project):
55 |         """Test formatting for disk errors."""
56 |         result = _format_delete_error_response(test_project.name, "disk full", "test-note")
57 | 
58 |         assert "# Delete Failed - System Error" in result
59 |         assert "A system error occurred while deleting 'test-note'" in result
60 | 
61 |     def test_format_delete_error_database_error(self, test_project):
62 |         """Test formatting for database errors."""
63 |         result = _format_delete_error_response(test_project.name, "database error", "test-note")
64 | 
65 |         assert "# Delete Failed - Database Error" in result
66 |         assert "A database error occurred while deleting 'test-note'" in result
67 |         assert "Sync conflict" in result
68 |         assert "Database lock" in result
69 | 
70 |     def test_format_delete_error_sync_error(self, test_project):
71 |         """Test formatting for sync errors."""
72 |         result = _format_delete_error_response(test_project.name, "sync failed", "test-note")
73 | 
74 |         assert "# Delete Failed - Database Error" in result
75 |         assert "A database error occurred while deleting 'test-note'" in result
76 | 
77 |     def test_format_delete_error_generic(self, test_project):
78 |         """Test formatting for generic errors."""
79 |         result = _format_delete_error_response(test_project.name, "unknown error", "test-note")
80 | 
81 |         assert "# Delete Failed" in result
82 |         assert "Error deleting note 'test-note': unknown error" in result
83 |         assert "General troubleshooting" in result
84 |         assert "Verify the note exists" in result
85 | 
86 |     def test_format_delete_error_with_complex_identifier(self, test_project):
87 |         """Test formatting with complex identifiers (permalinks)."""
88 |         result = _format_delete_error_response(
89 |             test_project.name, "entity not found", "folder/note-title"
90 |         )
91 | 
92 |         assert 'search_notes("test-project", "note-title")' in result
93 |         assert "Note Title" in result  # Title format
94 |         assert "folder/note-title" in result  # Permalink format
95 | 
96 | 
97 | # Integration tests removed to focus on error formatting coverage
98 | # The error formatting tests above provide the necessary coverage for MCP tool error messaging
99 | 
```

--------------------------------------------------------------------------------
/src/basic_memory/importers/memory_json_importer.py:
--------------------------------------------------------------------------------

```python
  1 | """Memory JSON import service for Basic Memory."""
  2 | 
  3 | import logging
  4 | from typing import Any, Dict, List
  5 | 
  6 | from basic_memory.config import get_project_config
  7 | from basic_memory.markdown.schemas import EntityFrontmatter, EntityMarkdown, Observation, Relation
  8 | from basic_memory.importers.base import Importer
  9 | from basic_memory.schemas.importer import EntityImportResult
 10 | 
 11 | logger = logging.getLogger(__name__)
 12 | 
 13 | 
 14 | class MemoryJsonImporter(Importer[EntityImportResult]):
 15 |     """Service for importing memory.json format data."""
 16 | 
 17 |     async def import_data(
 18 |         self, source_data, destination_folder: str = "", **kwargs: Any
 19 |     ) -> EntityImportResult:
 20 |         """Import entities and relations from a memory.json file.
 21 | 
 22 |         Args:
 23 |             source_data: Path to the memory.json file.
 24 |             destination_folder: Optional destination folder within the project.
 25 |             **kwargs: Additional keyword arguments.
 26 | 
 27 |         Returns:
 28 |             EntityImportResult containing statistics and status of the import.
 29 |         """
 30 |         config = get_project_config()
 31 |         try:
 32 |             # First pass - collect all relations by source entity
 33 |             entity_relations: Dict[str, List[Relation]] = {}
 34 |             entities: Dict[str, Dict[str, Any]] = {}
 35 |             skipped_entities: int = 0
 36 | 
 37 |             # Ensure the base path exists
 38 |             base_path = config.home  # pragma: no cover
 39 |             if destination_folder:  # pragma: no cover
 40 |                 base_path = self.ensure_folder_exists(destination_folder)
 41 | 
 42 |             # First pass - collect entities and relations
 43 |             for line in source_data:
 44 |                 data = line
 45 |                 if data["type"] == "entity":
 46 |                     # Handle different possible name keys
 47 |                     entity_name = data.get("name") or data.get("entityName") or data.get("id")
 48 |                     if not entity_name:
 49 |                         logger.warning(f"Entity missing name field: {data}")
 50 |                         skipped_entities += 1
 51 |                         continue
 52 |                     entities[entity_name] = data
 53 |                 elif data["type"] == "relation":
 54 |                     # Store relation with its source entity
 55 |                     source = data.get("from") or data.get("from_id")
 56 |                     if source not in entity_relations:
 57 |                         entity_relations[source] = []
 58 |                     entity_relations[source].append(
 59 |                         Relation(
 60 |                             type=data.get("relationType") or data.get("relation_type"),
 61 |                             target=data.get("to") or data.get("to_id"),
 62 |                         )
 63 |                     )
 64 | 
 65 |             # Second pass - create and write entities
 66 |             entities_created = 0
 67 |             for name, entity_data in entities.items():
 68 |                 # Get entity type with fallback
 69 |                 entity_type = entity_data.get("entityType") or entity_data.get("type") or "entity"
 70 | 
 71 |                 # Ensure entity type directory exists
 72 |                 entity_type_dir = base_path / entity_type
 73 |                 entity_type_dir.mkdir(parents=True, exist_ok=True)
 74 | 
 75 |                 # Get observations with fallback to empty list
 76 |                 observations = entity_data.get("observations", [])
 77 | 
 78 |                 entity = EntityMarkdown(
 79 |                     frontmatter=EntityFrontmatter(
 80 |                         metadata={
 81 |                             "type": entity_type,
 82 |                             "title": name,
 83 |                             "permalink": f"{entity_type}/{name}",
 84 |                         }
 85 |                     ),
 86 |                     content=f"# {name}\n",
 87 |                     observations=[Observation(content=obs) for obs in observations],
 88 |                     relations=entity_relations.get(name, []),
 89 |                 )
 90 | 
 91 |                 # Write entity file
 92 |                 file_path = base_path / f"{entity_type}/{name}.md"
 93 |                 await self.write_entity(entity, file_path)
 94 |                 entities_created += 1
 95 | 
 96 |             relations_count = sum(len(rels) for rels in entity_relations.values())
 97 | 
 98 |             return EntityImportResult(
 99 |                 import_count={"entities": entities_created, "relations": relations_count},
100 |                 success=True,
101 |                 entities=entities_created,
102 |                 relations=relations_count,
103 |                 skipped_entities=skipped_entities,
104 |             )
105 | 
106 |         except Exception as e:  # pragma: no cover
107 |             logger.exception("Failed to import memory.json")
108 |             return self.handle_error("Failed to import memory.json", e)  # pyright: ignore [reportReturnType]
109 | 
```

--------------------------------------------------------------------------------
/.claude/commands/release/changelog.md:
--------------------------------------------------------------------------------

```markdown
  1 | # /changelog - Generate or Update Changelog Entry
  2 | 
  3 | Analyze commits and generate formatted changelog entry for a version.
  4 | 
  5 | ## Usage
  6 | ```
  7 | /changelog <version> [type]
  8 | ```
  9 | 
 10 | **Parameters:**
 11 | - `version` (required): Version like `v0.14.0` or `v0.14.0b1`
 12 | - `type` (optional): `beta`, `rc`, or `stable` (default: `stable`)
 13 | 
 14 | ## Implementation
 15 | 
 16 | You are an expert technical writer for the Basic Memory project. When the user runs `/changelog`, execute the following steps:
 17 | 
 18 | ### Step 1: Version Analysis
 19 | 1. **Determine Commit Range**
 20 |    ```bash
 21 |    # Find last release tag
 22 |    git tag -l "v*" --sort=-version:refname | grep -v "b\|rc" | head -1
 23 |    
 24 |    # Get commits since last release
 25 |    git log --oneline ${last_tag}..HEAD
 26 |    ```
 27 | 
 28 | 2. **Parse Conventional Commits**
 29 |    - Extract feat: (features)
 30 |    - Extract fix: (bug fixes)  
 31 |    - Extract BREAKING CHANGE: (breaking changes)
 32 |    - Extract chore:, docs:, test: (other improvements)
 33 | 
 34 | ### Step 2: Categorize Changes
 35 | 1. **Features (feat:)**
 36 |    - New MCP tools
 37 |    - New CLI commands
 38 |    - New API endpoints
 39 |    - Major functionality additions
 40 | 
 41 | 2. **Bug Fixes (fix:)**
 42 |    - User-facing bug fixes
 43 |    - Critical issues resolved
 44 |    - Performance improvements
 45 |    - Security fixes
 46 | 
 47 | 3. **Technical Improvements**
 48 |    - Test coverage improvements
 49 |    - Code quality enhancements
 50 |    - Dependency updates
 51 |    - Documentation updates
 52 | 
 53 | 4. **Breaking Changes**
 54 |    - API changes
 55 |    - Configuration changes
 56 |    - Behavior changes
 57 |    - Migration requirements
 58 | 
 59 | ### Step 3: Generate Changelog Entry
 60 | Create formatted entry following existing CHANGELOG.md style:
 61 | 
 62 | Example:
 63 | ```markdown
 64 | ## <version> (<date>)
 65 | 
 66 | ### Features
 67 | 
 68 | - **Multi-Project Management System** - Switch between projects instantly during conversations
 69 |   ([`993e88a`](https://github.com/basicmachines-co/basic-memory/commit/993e88a)) 
 70 |   - Instant project switching with session context
 71 |   - Project-specific operations and isolation
 72 |   - Project discovery and management tools
 73 | 
 74 | - **Advanced Note Editing** - Incremental editing with append, prepend, find/replace, and section operations
 75 |   ([`6fc3904`](https://github.com/basicmachines-co/basic-memory/commit/6fc3904))
 76 |   - `edit_note` tool with multiple operation types
 77 |   - Smart frontmatter-aware editing
 78 |   - Validation and error handling
 79 | 
 80 | ### Bug Fixes
 81 | 
 82 | - **#118**: Fix YAML tag formatting to follow standard specification
 83 |   ([`2dc7e27`](https://github.com/basicmachines-co/basic-memory/commit/2dc7e27))
 84 | 
 85 | - **#110**: Make --project flag work consistently across CLI commands
 86 |   ([`02dd91a`](https://github.com/basicmachines-co/basic-memory/commit/02dd91a))
 87 | 
 88 | ### Technical Improvements
 89 | 
 90 | - **Comprehensive Testing** - 100% test coverage with integration testing
 91 |   ([`468a22f`](https://github.com/basicmachines-co/basic-memory/commit/468a22f))
 92 |   - MCP integration test suite
 93 |   - End-to-end testing framework
 94 |   - Performance and edge case validation
 95 | 
 96 | ### Breaking Changes
 97 | 
 98 | - **Database Migration**: Automatic migration from per-project to unified database. 
 99 |     Data will be re-index from the filesystem, resulting in no data loss. 
100 | - **Configuration Changes**: Projects now synced between config.json and database
101 | - **Full Backward Compatibility**: All existing setups continue to work seamlessly
102 | ```
103 | 
104 | ### Step 4: Integration
105 | 1. **Update CHANGELOG.md**
106 |    - Insert new entry at top
107 |    - Maintain consistent formatting
108 |    - Include commit links and issue references
109 | 
110 | 2. **Validation**
111 |    - Check all major changes are captured
112 |    - Verify commit links work
113 |    - Ensure issue numbers are correct
114 | 
115 | ## Smart Analysis Features
116 | 
117 | ### Automatic Classification
118 | - Detect feature additions from file changes
119 | - Identify bug fixes from commit messages
120 | - Find breaking changes from code analysis
121 | - Extract issue numbers from commit messages
122 | 
123 | ### Content Enhancement
124 | - Add context for technical changes
125 | - Include migration guidance for breaking changes
126 | - Suggest installation/upgrade instructions
127 | - Link to relevant documentation
128 | 
129 | ## Output Format
130 | 
131 | ### For Beta Releases
132 | 
133 | Example: 
134 | ```markdown
135 | ## v0.13.0b4 (2025-06-03)
136 | 
137 | ### Beta Changes Since v0.13.0b3
138 | 
139 | - Fix FastMCP API compatibility issues
140 | - Update dependencies to latest versions  
141 | - Resolve setuptools import error
142 | 
143 | ### Installation
144 | ```bash
145 | uv tool install basic-memory --prerelease=allow
146 | ```
147 | 
148 | ### Known Issues
149 | - [List any known issues for beta testing]
150 | ```
151 | 
152 | ### For Stable Releases
153 | Full changelog with complete feature list, organized by impact and category.
154 | 
155 | ## Context
156 | - Follows existing CHANGELOG.md format and style
157 | - Uses conventional commit standards
158 | - Includes GitHub commit links for traceability
159 | - Focuses on user-facing changes and value
160 | - Maintains consistency with previous entries
```

--------------------------------------------------------------------------------
/src/basic_memory/mcp/tools/build_context.py:
--------------------------------------------------------------------------------

```python
  1 | """Build context tool for Basic Memory MCP server."""
  2 | 
  3 | from typing import Optional
  4 | 
  5 | from loguru import logger
  6 | from fastmcp import Context
  7 | 
  8 | from basic_memory.mcp.async_client import get_client
  9 | from basic_memory.mcp.project_context import get_active_project
 10 | from basic_memory.mcp.server import mcp
 11 | from basic_memory.mcp.tools.utils import call_get
 12 | from basic_memory.schemas.base import TimeFrame
 13 | from basic_memory.schemas.memory import (
 14 |     GraphContext,
 15 |     MemoryUrl,
 16 |     memory_url_path,
 17 | )
 18 | 
 19 | type StringOrInt = str | int
 20 | 
 21 | 
 22 | @mcp.tool(
 23 |     description="""Build context from a memory:// URI to continue conversations naturally.
 24 | 
 25 |     Use this to follow up on previous discussions or explore related topics.
 26 | 
 27 |     Memory URL Format:
 28 |     - Use paths like "folder/note" or "memory://folder/note"
 29 |     - Pattern matching: "folder/*" matches all notes in folder
 30 |     - Valid characters: letters, numbers, hyphens, underscores, forward slashes
 31 |     - Avoid: double slashes (//), angle brackets (<>), quotes, pipes (|)
 32 |     - Examples: "specs/search", "projects/basic-memory", "notes/*"
 33 | 
 34 |     Timeframes support natural language like:
 35 |     - "2 days ago", "last week", "today", "3 months ago"
 36 |     - Or standard formats like "7d", "24h"
 37 |     """,
 38 | )
 39 | async def build_context(
 40 |     url: MemoryUrl,
 41 |     project: Optional[str] = None,
 42 |     depth: Optional[StringOrInt] = 1,
 43 |     timeframe: Optional[TimeFrame] = "7d",
 44 |     page: int = 1,
 45 |     page_size: int = 10,
 46 |     max_related: int = 10,
 47 |     context: Context | None = None,
 48 | ) -> GraphContext:
 49 |     """Get context needed to continue a discussion within a specific project.
 50 | 
 51 |     This tool enables natural continuation of discussions by loading relevant context
 52 |     from memory:// URIs. It uses pattern matching to find relevant content and builds
 53 |     a rich context graph of related information.
 54 | 
 55 |     Project Resolution:
 56 |     Server resolves projects in this order: Single Project Mode → project parameter → default project.
 57 |     If project unknown, use list_memory_projects() or recent_activity() first.
 58 | 
 59 |     Args:
 60 |         project: Project name to build context from. Optional - server will resolve using hierarchy.
 61 |                 If unknown, use list_memory_projects() to discover available projects.
 62 |         url: memory:// URI pointing to discussion content (e.g. memory://specs/search)
 63 |         depth: How many relation hops to traverse (1-3 recommended for performance)
 64 |         timeframe: How far back to look. Supports natural language like "2 days ago", "last week"
 65 |         page: Page number of results to return (default: 1)
 66 |         page_size: Number of results to return per page (default: 10)
 67 |         max_related: Maximum number of related results to return (default: 10)
 68 |         context: Optional FastMCP context for performance caching.
 69 | 
 70 |     Returns:
 71 |         GraphContext containing:
 72 |             - primary_results: Content matching the memory:// URI
 73 |             - related_results: Connected content via relations
 74 |             - metadata: Context building details
 75 | 
 76 |     Examples:
 77 |         # Continue a specific discussion
 78 |         build_context("my-project", "memory://specs/search")
 79 | 
 80 |         # Get deeper context about a component
 81 |         build_context("work-docs", "memory://components/memory-service", depth=2)
 82 | 
 83 |         # Look at recent changes to a specification
 84 |         build_context("research", "memory://specs/document-format", timeframe="today")
 85 | 
 86 |         # Research the history of a feature
 87 |         build_context("dev-notes", "memory://features/knowledge-graph", timeframe="3 months ago")
 88 | 
 89 |     Raises:
 90 |         ToolError: If project doesn't exist or depth parameter is invalid
 91 |     """
 92 |     logger.info(f"Building context from {url} in project {project}")
 93 | 
 94 |     # Convert string depth to integer if needed
 95 |     if isinstance(depth, str):
 96 |         try:
 97 |             depth = int(depth)
 98 |         except ValueError:
 99 |             from mcp.server.fastmcp.exceptions import ToolError
100 | 
101 |             raise ToolError(f"Invalid depth parameter: '{depth}' is not a valid integer")
102 | 
103 |     # URL is already validated and normalized by MemoryUrl type annotation
104 | 
105 |     async with get_client() as client:
106 |         # Get the active project using the new stateless approach
107 |         active_project = await get_active_project(client, project, context)
108 | 
109 |         project_url = active_project.project_url
110 | 
111 |         response = await call_get(
112 |             client,
113 |             f"{project_url}/memory/{memory_url_path(url)}",
114 |             params={
115 |                 "depth": depth,
116 |                 "timeframe": timeframe,
117 |                 "page": page,
118 |                 "page_size": page_size,
119 |                 "max_related": max_related,
120 |             },
121 |         )
122 |         return GraphContext.model_validate(response.json())
123 | 
```

--------------------------------------------------------------------------------
/tests/cli/test_import_memory_json.py:
--------------------------------------------------------------------------------

```python
  1 | """Tests for import_memory_json command."""
  2 | 
  3 | import json
  4 | 
  5 | import pytest
  6 | from typer.testing import CliRunner
  7 | 
  8 | from basic_memory.cli.app import import_app
  9 | from basic_memory.cli.commands import import_memory_json  # noqa
 10 | from basic_memory.markdown import MarkdownProcessor
 11 | 
 12 | # Set up CLI runner
 13 | runner = CliRunner()
 14 | 
 15 | 
 16 | @pytest.fixture
 17 | def sample_entities():
 18 |     """Sample entities for testing."""
 19 |     return [
 20 |         {
 21 |             "type": "entity",
 22 |             "name": "test_entity",
 23 |             "entityType": "test",
 24 |             "observations": ["Test observation 1", "Test observation 2"],
 25 |         },
 26 |         {
 27 |             "type": "relation",
 28 |             "from": "test_entity",
 29 |             "to": "related_entity",
 30 |             "relationType": "test_relation",
 31 |         },
 32 |     ]
 33 | 
 34 | 
 35 | @pytest.fixture
 36 | def sample_json_file(tmp_path, sample_entities):
 37 |     """Create a sample memory.json file."""
 38 |     json_file = tmp_path / "memory.json"
 39 |     with open(json_file, "w", encoding="utf-8") as f:
 40 |         for entity in sample_entities:
 41 |             f.write(json.dumps(entity) + "\n")
 42 |     return json_file
 43 | 
 44 | 
 45 | @pytest.mark.asyncio
 46 | async def test_get_markdown_processor(tmp_path, monkeypatch):
 47 |     """Test getting markdown processor."""
 48 |     monkeypatch.setenv("HOME", str(tmp_path))
 49 |     processor = await import_memory_json.get_markdown_processor()
 50 |     assert isinstance(processor, MarkdownProcessor)
 51 | 
 52 | 
 53 | def test_import_json_command_file_not_found(tmp_path):
 54 |     """Test error handling for nonexistent file."""
 55 |     nonexistent = tmp_path / "nonexistent.json"
 56 |     result = runner.invoke(import_app, ["memory-json", str(nonexistent)])
 57 |     assert result.exit_code == 1
 58 |     assert "File not found" in result.output
 59 | 
 60 | 
 61 | def test_import_json_command_success(tmp_path, sample_json_file, monkeypatch):
 62 |     """Test successful JSON import via command."""
 63 |     # Set up test environment
 64 |     monkeypatch.setenv("HOME", str(tmp_path))
 65 | 
 66 |     # Run import
 67 |     result = runner.invoke(import_app, ["memory-json", str(sample_json_file)])
 68 |     assert result.exit_code == 0
 69 |     assert "Import complete" in result.output
 70 |     assert "Created 1 entities" in result.output
 71 |     assert "Added 1 relations" in result.output
 72 | 
 73 | 
 74 | def test_import_json_command_invalid_json(tmp_path):
 75 |     """Test error handling for invalid JSON."""
 76 |     # Create invalid JSON file
 77 |     invalid_file = tmp_path / "invalid.json"
 78 |     invalid_file.write_text("not json")
 79 | 
 80 |     result = runner.invoke(import_app, ["memory-json", str(invalid_file)])
 81 |     assert result.exit_code == 1
 82 |     assert "Error during import" in result.output
 83 | 
 84 | 
 85 | def test_import_json_command_handle_old_format(tmp_path):
 86 |     """Test handling old format JSON with from_id/to_id."""
 87 |     # Create JSON with old format
 88 |     old_format = [
 89 |         {
 90 |             "type": "entity",
 91 |             "name": "test_entity",
 92 |             "entityType": "test",
 93 |             "observations": ["Test observation"],
 94 |         },
 95 |         {
 96 |             "type": "relation",
 97 |             "from_id": "test_entity",
 98 |             "to_id": "other_entity",
 99 |             "relation_type": "test_relation",
100 |         },
101 |     ]
102 | 
103 |     json_file = tmp_path / "old_format.json"
104 |     with open(json_file, "w", encoding="utf-8") as f:
105 |         for item in old_format:
106 |             f.write(json.dumps(item) + "\n")
107 | 
108 |     # Set up test environment
109 |     monkeypatch = pytest.MonkeyPatch()
110 |     monkeypatch.setenv("HOME", str(tmp_path))
111 | 
112 |     # Run import
113 |     result = runner.invoke(import_app, ["memory-json", str(json_file)])
114 |     assert result.exit_code == 0
115 |     assert "Import complete" in result.output
116 | 
117 | 
118 | def test_import_json_command_missing_name_key(tmp_path):
119 |     """Test handling JSON with missing 'name' key using 'id' instead."""
120 |     # Create JSON with id instead of name (common in Knowledge Graph Memory Server)
121 |     data_with_id = [
122 |         {
123 |             "type": "entity",
124 |             "id": "test_entity_id",
125 |             "entityType": "test",
126 |             "observations": ["Test observation with id"],
127 |         },
128 |         {
129 |             "type": "entity",
130 |             "entityName": "test_entity_2",
131 |             "entityType": "test",
132 |             "observations": ["Test observation with entityName"],
133 |         },
134 |         {
135 |             "type": "entity",
136 |             "name": "test_entity_title",
137 |             "entityType": "test",
138 |             "observations": ["Test observation with name"],
139 |         },
140 |     ]
141 | 
142 |     json_file = tmp_path / "missing_name.json"
143 |     with open(json_file, "w", encoding="utf-8") as f:
144 |         for item in data_with_id:
145 |             f.write(json.dumps(item) + "\n")
146 | 
147 |     # Set up test environment
148 |     monkeypatch = pytest.MonkeyPatch()
149 |     monkeypatch.setenv("HOME", str(tmp_path))
150 | 
151 |     # Run import - should not fail even without 'name' key
152 |     result = runner.invoke(import_app, ["memory-json", str(json_file)])
153 |     assert result.exit_code == 0
154 |     assert "Import complete" in result.output
155 |     assert "Created 3 entities" in result.output
156 | 
```

--------------------------------------------------------------------------------
/src/basic_memory/mcp/project_context.py:
--------------------------------------------------------------------------------

```python
  1 | """Project context utilities for Basic Memory MCP server.
  2 | 
  3 | Provides project lookup utilities for MCP tools.
  4 | Handles project validation and context management in one place.
  5 | """
  6 | 
  7 | import os
  8 | from typing import Optional, List
  9 | from httpx import AsyncClient
 10 | from httpx._types import (
 11 |     HeaderTypes,
 12 | )
 13 | from loguru import logger
 14 | from fastmcp import Context
 15 | 
 16 | from basic_memory.config import ConfigManager
 17 | from basic_memory.mcp.tools.utils import call_get
 18 | from basic_memory.schemas.project_info import ProjectItem, ProjectList
 19 | from basic_memory.utils import generate_permalink
 20 | 
 21 | 
 22 | async def resolve_project_parameter(project: Optional[str] = None) -> Optional[str]:
 23 |     """Resolve project parameter using three-tier hierarchy.
 24 | 
 25 |     if config.cloud_mode:
 26 |         project is required
 27 |     else:
 28 |         Resolution order:
 29 |         1. Single Project Mode  (--project cli arg, or BASIC_MEMORY_MCP_PROJECT env var) - highest priority
 30 |         2. Explicit project parameter - medium priority
 31 |         3. Default project if default_project_mode=true - lowest priority
 32 | 
 33 |     Args:
 34 |         project: Optional explicit project parameter
 35 | 
 36 |     Returns:
 37 |         Resolved project name or None if no resolution possible
 38 |     """
 39 | 
 40 |     config = ConfigManager().config
 41 |     # if cloud_mode, project is required
 42 |     if config.cloud_mode:
 43 |         if project:
 44 |             logger.debug(f"project: {project}, cloud_mode: {config.cloud_mode}")
 45 |             return project
 46 |         else:
 47 |             raise ValueError("No project specified. Project is required for cloud mode.")
 48 | 
 49 |     # Priority 1: CLI constraint overrides everything (--project arg sets env var)
 50 |     constrained_project = os.environ.get("BASIC_MEMORY_MCP_PROJECT")
 51 |     if constrained_project:
 52 |         logger.debug(f"Using CLI constrained project: {constrained_project}")
 53 |         return constrained_project
 54 | 
 55 |     # Priority 2: Explicit project parameter
 56 |     if project:
 57 |         logger.debug(f"Using explicit project parameter: {project}")
 58 |         return project
 59 | 
 60 |     # Priority 3: Default project mode
 61 |     if config.default_project_mode:
 62 |         logger.debug(f"Using default project from config: {config.default_project}")
 63 |         return config.default_project
 64 | 
 65 |     # No resolution possible
 66 |     return None
 67 | 
 68 | 
 69 | async def get_project_names(client: AsyncClient, headers: HeaderTypes | None = None) -> List[str]:
 70 |     response = await call_get(client, "/projects/projects", headers=headers)
 71 |     project_list = ProjectList.model_validate(response.json())
 72 |     return [project.name for project in project_list.projects]
 73 | 
 74 | 
 75 | async def get_active_project(
 76 |     client: AsyncClient,
 77 |     project: Optional[str] = None,
 78 |     context: Optional[Context] = None,
 79 |     headers: HeaderTypes | None = None,
 80 | ) -> ProjectItem:
 81 |     """Get and validate project, setting it in context if available.
 82 | 
 83 |     Args:
 84 |         client: HTTP client for API calls
 85 |         project: Optional project name (resolved using hierarchy)
 86 |         context: Optional FastMCP context to cache the result
 87 | 
 88 |     Returns:
 89 |         The validated project item
 90 | 
 91 |     Raises:
 92 |         ValueError: If no project can be resolved
 93 |         HTTPError: If project doesn't exist or is inaccessible
 94 |     """
 95 |     resolved_project = await resolve_project_parameter(project)
 96 |     if not resolved_project:
 97 |         project_names = await get_project_names(client, headers)
 98 |         raise ValueError(
 99 |             "No project specified. "
100 |             "Either set 'default_project_mode=true' in config, or use 'project' argument.\n"
101 |             f"Available projects: {project_names}"
102 |         )
103 | 
104 |     project = resolved_project
105 | 
106 |     # Check if already cached in context
107 |     if context:
108 |         cached_project = context.get_state("active_project")
109 |         if cached_project and cached_project.name == project:
110 |             logger.debug(f"Using cached project from context: {project}")
111 |             return cached_project
112 | 
113 |     # Validate project exists by calling API
114 |     logger.debug(f"Validating project: {project}")
115 |     permalink = generate_permalink(project)
116 |     response = await call_get(client, f"/{permalink}/project/item", headers=headers)
117 |     active_project = ProjectItem.model_validate(response.json())
118 | 
119 |     # Cache in context if available
120 |     if context:
121 |         context.set_state("active_project", active_project)
122 |         logger.debug(f"Cached project in context: {project}")
123 | 
124 |     logger.debug(f"Validated project: {active_project.name}")
125 |     return active_project
126 | 
127 | 
128 | def add_project_metadata(result: str, project_name: str) -> str:
129 |     """Add project context as metadata footer for assistant session tracking.
130 | 
131 |     Provides clear project context to help the assistant remember which
132 |     project is being used throughout the conversation session.
133 | 
134 |     Args:
135 |         result: The tool result string
136 |         project_name: The project name that was used
137 | 
138 |     Returns:
139 |         Result with project session tracking metadata
140 |     """
141 |     return f"{result}\n\n[Session: Using project '{project_name}']"
142 | 
```

--------------------------------------------------------------------------------
/tests/mcp/test_tool_recent_activity.py:
--------------------------------------------------------------------------------

```python
  1 | """Tests for discussion context MCP tool."""
  2 | 
  3 | import pytest
  4 | 
  5 | from mcp.server.fastmcp.exceptions import ToolError
  6 | 
  7 | from basic_memory.mcp.tools import recent_activity
  8 | from basic_memory.schemas.search import SearchItemType
  9 | 
 10 | # Test data for different timeframe formats
 11 | valid_timeframes = [
 12 |     "7d",  # Standard format
 13 |     "yesterday",  # Natural language
 14 |     "0d",  # Zero duration
 15 | ]
 16 | 
 17 | invalid_timeframes = [
 18 |     "invalid",  # Nonsense string
 19 |     # NOTE: "tomorrow" now returns 1 day ago due to timezone safety - no longer invalid
 20 | ]
 21 | 
 22 | 
 23 | @pytest.mark.asyncio
 24 | async def test_recent_activity_timeframe_formats(client, test_project, test_graph):
 25 |     """Test that recent_activity accepts various timeframe formats."""
 26 |     # Test each valid timeframe with project-specific mode
 27 |     for timeframe in valid_timeframes:
 28 |         try:
 29 |             result = await recent_activity.fn(
 30 |                 project=test_project.name,
 31 |                 type=["entity"],
 32 |                 timeframe=timeframe,
 33 |             )
 34 |             assert result is not None
 35 |             assert isinstance(result, str)
 36 |             assert "Recent Activity:" in result
 37 |             assert timeframe in result
 38 |         except Exception as e:
 39 |             pytest.fail(f"Failed with valid timeframe '{timeframe}': {str(e)}")
 40 | 
 41 |     # Test invalid timeframes should raise ValidationError
 42 |     for timeframe in invalid_timeframes:
 43 |         with pytest.raises(ToolError):
 44 |             await recent_activity.fn(project=test_project.name, timeframe=timeframe)
 45 | 
 46 | 
 47 | @pytest.mark.asyncio
 48 | async def test_recent_activity_type_filters(client, test_project, test_graph):
 49 |     """Test that recent_activity correctly filters by types."""
 50 | 
 51 |     # Test single string type
 52 |     result = await recent_activity.fn(project=test_project.name, type=SearchItemType.ENTITY)
 53 |     assert result is not None
 54 |     assert isinstance(result, str)
 55 |     assert "Recent Activity:" in result
 56 |     assert "Recent Notes & Documents" in result
 57 | 
 58 |     # Test single string type
 59 |     result = await recent_activity.fn(project=test_project.name, type="entity")
 60 |     assert result is not None
 61 |     assert isinstance(result, str)
 62 |     assert "Recent Activity:" in result
 63 |     assert "Recent Notes & Documents" in result
 64 | 
 65 |     # Test single type
 66 |     result = await recent_activity.fn(project=test_project.name, type=["entity"])
 67 |     assert result is not None
 68 |     assert isinstance(result, str)
 69 |     assert "Recent Activity:" in result
 70 |     assert "Recent Notes & Documents" in result
 71 | 
 72 |     # Test multiple types
 73 |     result = await recent_activity.fn(project=test_project.name, type=["entity", "observation"])
 74 |     assert result is not None
 75 |     assert isinstance(result, str)
 76 |     assert "Recent Activity:" in result
 77 |     # Should contain sections for both types
 78 |     assert "Recent Notes & Documents" in result or "Recent Observations" in result
 79 | 
 80 |     # Test multiple types
 81 |     result = await recent_activity.fn(
 82 |         project=test_project.name, type=[SearchItemType.ENTITY, SearchItemType.OBSERVATION]
 83 |     )
 84 |     assert result is not None
 85 |     assert isinstance(result, str)
 86 |     assert "Recent Activity:" in result
 87 |     # Should contain sections for both types
 88 |     assert "Recent Notes & Documents" in result or "Recent Observations" in result
 89 | 
 90 |     # Test all types
 91 |     result = await recent_activity.fn(
 92 |         project=test_project.name, type=["entity", "observation", "relation"]
 93 |     )
 94 |     assert result is not None
 95 |     assert isinstance(result, str)
 96 |     assert "Recent Activity:" in result
 97 |     assert "Activity Summary:" in result
 98 | 
 99 | 
100 | @pytest.mark.asyncio
101 | async def test_recent_activity_type_invalid(client, test_project, test_graph):
102 |     """Test that recent_activity correctly filters by types."""
103 | 
104 |     # Test single invalid string type
105 |     with pytest.raises(ValueError) as e:
106 |         await recent_activity.fn(project=test_project.name, type="note")
107 |     assert (
108 |         str(e.value) == "Invalid type: note. Valid types are: ['entity', 'observation', 'relation']"
109 |     )
110 | 
111 |     # Test invalid string array type
112 |     with pytest.raises(ValueError) as e:
113 |         await recent_activity.fn(project=test_project.name, type=["note"])
114 |     assert (
115 |         str(e.value) == "Invalid type: note. Valid types are: ['entity', 'observation', 'relation']"
116 |     )
117 | 
118 | 
119 | @pytest.mark.asyncio
120 | async def test_recent_activity_discovery_mode(client, test_project, test_graph):
121 |     """Test that recent_activity discovery mode works without project parameter."""
122 |     # Test discovery mode (no project parameter)
123 |     result = await recent_activity.fn()
124 |     assert result is not None
125 |     assert isinstance(result, str)
126 | 
127 |     # Check that we get a formatted summary
128 |     assert "Recent Activity Summary" in result
129 |     assert "Most Active Project:" in result or "Other Active Projects:" in result
130 |     assert "Summary:" in result
131 |     assert "active projects" in result
132 | 
133 |     # Should contain project discovery guidance
134 |     assert "Suggested project:" in result or "Multiple active projects" in result
135 |     assert "Session reminder:" in result
136 | 
```

--------------------------------------------------------------------------------
/src/basic_memory/markdown/markdown_processor.py:
--------------------------------------------------------------------------------

```python
  1 | from pathlib import Path
  2 | from typing import Optional
  3 | from collections import OrderedDict
  4 | 
  5 | from frontmatter import Post
  6 | from loguru import logger
  7 | 
  8 | from basic_memory import file_utils
  9 | from basic_memory.file_utils import dump_frontmatter
 10 | from basic_memory.markdown.entity_parser import EntityParser
 11 | from basic_memory.markdown.schemas import EntityMarkdown, Observation, Relation
 12 | 
 13 | 
 14 | class DirtyFileError(Exception):
 15 |     """Raised when attempting to write to a file that has been modified."""
 16 | 
 17 |     pass
 18 | 
 19 | 
 20 | class MarkdownProcessor:
 21 |     """Process markdown files while preserving content and structure.
 22 | 
 23 |     used only for import
 24 | 
 25 |     This class handles the file I/O aspects of our markdown processing. It:
 26 |     1. Uses EntityParser for reading/parsing files into our schema
 27 |     2. Handles writing files with proper frontmatter
 28 |     3. Formats structured sections (observations/relations) consistently
 29 |     4. Preserves user content exactly as written
 30 |     5. Performs atomic writes using temp files
 31 | 
 32 |     It does NOT:
 33 |     1. Modify the schema directly (that's done by services)
 34 |     2. Handle in-place updates (everything is read->modify->write)
 35 |     3. Track schema changes (that's done by the database)
 36 |     """
 37 | 
 38 |     def __init__(self, entity_parser: EntityParser):
 39 |         """Initialize processor with base path and parser."""
 40 |         self.entity_parser = entity_parser
 41 | 
 42 |     async def read_file(self, path: Path) -> EntityMarkdown:
 43 |         """Read and parse file into EntityMarkdown schema.
 44 | 
 45 |         This is step 1 of our read->modify->write pattern.
 46 |         We use EntityParser to handle all the markdown parsing.
 47 |         """
 48 |         return await self.entity_parser.parse_file(path)
 49 | 
 50 |     async def write_file(
 51 |         self,
 52 |         path: Path,
 53 |         markdown: EntityMarkdown,
 54 |         expected_checksum: Optional[str] = None,
 55 |     ) -> str:
 56 |         """Write EntityMarkdown schema back to file.
 57 | 
 58 |         This is step 3 of our read->modify->write pattern.
 59 |         The entire file is rewritten atomically on each update.
 60 | 
 61 |         File Structure:
 62 |         ---
 63 |         frontmatter fields
 64 |         ---
 65 |         user content area (preserved exactly)
 66 | 
 67 |         ## Observations (if any)
 68 |         formatted observations
 69 | 
 70 |         ## Relations (if any)
 71 |         formatted relations
 72 | 
 73 |         Args:
 74 |             path: Where to write the file
 75 |             markdown: Complete schema to write
 76 |             expected_checksum: If provided, verify file hasn't changed
 77 | 
 78 |         Returns:
 79 |             Checksum of written file
 80 | 
 81 |         Raises:
 82 |             DirtyFileError: If file has been modified (when expected_checksum provided)
 83 |         """
 84 |         # Dirty check if needed
 85 |         if expected_checksum is not None:
 86 |             current_content = path.read_text(encoding="utf-8")
 87 |             current_checksum = await file_utils.compute_checksum(current_content)
 88 |             if current_checksum != expected_checksum:
 89 |                 raise DirtyFileError(f"File {path} has been modified")
 90 | 
 91 |         # Convert frontmatter to dict
 92 |         frontmatter_dict = OrderedDict()
 93 |         frontmatter_dict["title"] = markdown.frontmatter.title
 94 |         frontmatter_dict["type"] = markdown.frontmatter.type
 95 |         frontmatter_dict["permalink"] = markdown.frontmatter.permalink
 96 | 
 97 |         metadata = markdown.frontmatter.metadata or {}
 98 |         for k, v in metadata.items():
 99 |             frontmatter_dict[k] = v
100 | 
101 |         # Start with user content (or minimal title for new files)
102 |         content = markdown.content or f"# {markdown.frontmatter.title}\n"
103 | 
104 |         # Add structured sections with proper spacing
105 |         content = content.rstrip()  # Remove trailing whitespace
106 | 
107 |         # add a blank line if we have semantic content
108 |         if markdown.observations or markdown.relations:
109 |             content += "\n"
110 | 
111 |         if markdown.observations:
112 |             content += self.format_observations(markdown.observations)
113 |         if markdown.relations:
114 |             content += self.format_relations(markdown.relations)
115 | 
116 |         # Create Post object for frontmatter
117 |         post = Post(content, **frontmatter_dict)
118 |         final_content = dump_frontmatter(post)
119 | 
120 |         logger.debug(f"writing file {path} with content:\n{final_content}")
121 | 
122 |         # Write atomically and return checksum of updated file
123 |         path.parent.mkdir(parents=True, exist_ok=True)
124 |         await file_utils.write_file_atomic(path, final_content)
125 |         return await file_utils.compute_checksum(final_content)
126 | 
127 |     def format_observations(self, observations: list[Observation]) -> str:
128 |         """Format observations section in standard way.
129 | 
130 |         Format: - [category] content #tag1 #tag2 (context)
131 |         """
132 |         lines = [f"{obs}" for obs in observations]
133 |         return "\n".join(lines) + "\n"
134 | 
135 |     def format_relations(self, relations: list[Relation]) -> str:
136 |         """Format relations section in standard way.
137 | 
138 |         Format: - relation_type [[target]] (context)
139 |         """
140 |         lines = [f"{rel}" for rel in relations]
141 |         return "\n".join(lines) + "\n"
142 | 
```

--------------------------------------------------------------------------------
/tests/api/test_prompt_router.py:
--------------------------------------------------------------------------------

```python
  1 | """Tests for the prompt router endpoints."""
  2 | 
  3 | import pytest
  4 | import pytest_asyncio
  5 | from httpx import AsyncClient
  6 | 
  7 | from basic_memory.services.context_service import ContextService
  8 | 
  9 | 
 10 | @pytest_asyncio.fixture
 11 | async def context_service(entity_repository, search_service, observation_repository):
 12 |     """Create a real context service for testing."""
 13 |     return ContextService(entity_repository, search_service, observation_repository)
 14 | 
 15 | 
 16 | @pytest.mark.asyncio
 17 | async def test_continue_conversation_endpoint(
 18 |     client: AsyncClient,
 19 |     entity_service,
 20 |     search_service,
 21 |     context_service,
 22 |     entity_repository,
 23 |     test_graph,
 24 |     project_url,
 25 | ):
 26 |     """Test the continue_conversation endpoint with real services."""
 27 |     # Create request data
 28 |     request_data = {
 29 |         "topic": "Root",  # This should match our test entity in test_graph
 30 |         "timeframe": "7d",
 31 |         "depth": 1,
 32 |         "related_items_limit": 2,
 33 |     }
 34 | 
 35 |     # Call the endpoint
 36 |     response = await client.post(f"{project_url}/prompt/continue-conversation", json=request_data)
 37 | 
 38 |     # Verify response
 39 |     assert response.status_code == 200
 40 |     result = response.json()
 41 |     assert "prompt" in result
 42 |     assert "context" in result
 43 | 
 44 |     # Check content of context
 45 |     context = result["context"]
 46 |     assert context["topic"] == "Root"
 47 |     assert context["timeframe"] == "7d"
 48 |     assert context["has_results"] is True
 49 |     assert len(context["hierarchical_results"]) > 0
 50 | 
 51 |     # Check content of prompt
 52 |     prompt = result["prompt"]
 53 |     assert "Continuing conversation on: Root" in prompt
 54 |     assert "memory retrieval session" in prompt
 55 | 
 56 |     # Test without topic - should use recent activity
 57 |     request_data = {"timeframe": "1d", "depth": 1, "related_items_limit": 2}
 58 | 
 59 |     response = await client.post(f"{project_url}/prompt/continue-conversation", json=request_data)
 60 | 
 61 |     assert response.status_code == 200
 62 |     result = response.json()
 63 |     assert "Recent Activity" in result["context"]["topic"]
 64 | 
 65 | 
 66 | @pytest.mark.asyncio
 67 | async def test_search_prompt_endpoint(
 68 |     client: AsyncClient, entity_service, search_service, test_graph, project_url
 69 | ):
 70 |     """Test the search_prompt endpoint with real services."""
 71 |     # Create request data
 72 |     request_data = {
 73 |         "query": "Root",  # This should match our test entity
 74 |         "timeframe": "7d",
 75 |     }
 76 | 
 77 |     # Call the endpoint
 78 |     response = await client.post(f"{project_url}/prompt/search", json=request_data)
 79 | 
 80 |     # Verify response
 81 |     assert response.status_code == 200
 82 |     result = response.json()
 83 |     assert "prompt" in result
 84 |     assert "context" in result
 85 | 
 86 |     # Check content of context
 87 |     context = result["context"]
 88 |     assert context["query"] == "Root"
 89 |     assert context["timeframe"] == "7d"
 90 |     assert context["has_results"] is True
 91 |     assert len(context["results"]) > 0
 92 | 
 93 |     # Check content of prompt
 94 |     prompt = result["prompt"]
 95 |     assert 'Search Results for: "Root"' in prompt
 96 |     assert "This is a memory search session" in prompt
 97 | 
 98 | 
 99 | @pytest.mark.asyncio
100 | async def test_search_prompt_no_results(
101 |     client: AsyncClient, entity_service, search_service, project_url
102 | ):
103 |     """Test the search_prompt endpoint with a query that returns no results."""
104 |     # Create request data with a query that shouldn't match anything
105 |     request_data = {"query": "NonExistentQuery12345", "timeframe": "7d"}
106 | 
107 |     # Call the endpoint
108 |     response = await client.post(f"{project_url}/prompt/search", json=request_data)
109 | 
110 |     # Verify response
111 |     assert response.status_code == 200
112 |     result = response.json()
113 | 
114 |     # Check content of context
115 |     context = result["context"]
116 |     assert context["query"] == "NonExistentQuery12345"
117 |     assert context["has_results"] is False
118 |     assert len(context["results"]) == 0
119 | 
120 |     # Check content of prompt
121 |     prompt = result["prompt"]
122 |     assert 'Search Results for: "NonExistentQuery12345"' in prompt
123 |     assert "I couldn't find any results for this query" in prompt
124 |     assert "Opportunity to Capture Knowledge" in prompt
125 | 
126 | 
127 | @pytest.mark.asyncio
128 | async def test_error_handling(client: AsyncClient, monkeypatch, project_url):
129 |     """Test error handling in the endpoints by breaking the template loader."""
130 | 
131 |     # Patch the template loader to raise an exception
132 |     def mock_render(*args, **kwargs):
133 |         raise Exception("Template error")
134 | 
135 |     # Apply the patch
136 |     monkeypatch.setattr("basic_memory.api.template_loader.TemplateLoader.render", mock_render)
137 | 
138 |     # Test continue_conversation error handling
139 |     response = await client.post(
140 |         f"{project_url}/prompt/continue-conversation",
141 |         json={"topic": "test error", "timeframe": "7d"},
142 |     )
143 | 
144 |     assert response.status_code == 500
145 |     assert "detail" in response.json()
146 |     assert "Template error" in response.json()["detail"]
147 | 
148 |     # Test search_prompt error handling
149 |     response = await client.post(
150 |         f"{project_url}/prompt/search", json={"query": "test error", "timeframe": "7d"}
151 |     )
152 | 
153 |     assert response.status_code == 500
154 |     assert "detail" in response.json()
155 |     assert "Template error" in response.json()["detail"]
156 | 
```

--------------------------------------------------------------------------------
/tests/markdown/test_parser_edge_cases.py:
--------------------------------------------------------------------------------

```python
  1 | """Tests for markdown parser edge cases."""
  2 | 
  3 | from pathlib import Path
  4 | from textwrap import dedent
  5 | 
  6 | import pytest
  7 | 
  8 | from basic_memory.markdown.entity_parser import EntityParser
  9 | 
 10 | 
 11 | @pytest.mark.asyncio
 12 | async def test_unicode_content(tmp_path):
 13 |     """Test handling of Unicode content including emoji and non-Latin scripts."""
 14 |     content = dedent("""
 15 |         ---
 16 |         type: test
 17 |         id: test/unicode
 18 |         created: 2024-12-21T14:00:00Z
 19 |         modified: 2024-12-21T14:00:00Z
 20 |         tags: [unicode, 测试]
 21 |         ---
 22 |         
 23 |         # Unicode Test 🧪
 24 |         
 25 |         ## Observations
 26 |         - [test] Emoji test 👍 #emoji #test (Testing emoji)
 27 |         - [中文] Chinese text 测试 #language (Script test)
 28 |         - [русский] Russian привет #language (More scripts)
 29 |         - [note] Emoji in text 😀 #meta (Category test)
 30 |         
 31 |         ## Relations
 32 |         - tested_by [[测试组件]] (Unicode test)
 33 |         - depends_on [[компонент]] (Another test)
 34 |         """)
 35 | 
 36 |     test_file = tmp_path / "unicode.md"
 37 |     test_file.write_text(content, encoding="utf-8")
 38 | 
 39 |     parser = EntityParser(tmp_path)
 40 |     entity = await parser.parse_file(test_file)
 41 | 
 42 |     assert "测试" in entity.frontmatter.metadata["tags"]
 43 |     assert "chinese" not in entity.frontmatter.metadata["tags"]
 44 |     assert "🧪" in entity.content
 45 | 
 46 |     # Verify Unicode in observations
 47 |     assert any(o.content == "Emoji test 👍 #emoji #test" for o in entity.observations)
 48 |     assert any(o.category == "中文" for o in entity.observations)
 49 |     assert any(o.category == "русский" for o in entity.observations)
 50 | 
 51 |     # Verify Unicode in relations
 52 |     assert any(r.target == "测试组件" for r in entity.relations)
 53 |     assert any(r.target == "компонент" for r in entity.relations)
 54 | 
 55 | 
 56 | @pytest.mark.asyncio
 57 | async def test_empty_file(tmp_path):
 58 |     """Test handling of empty files."""
 59 |     empty_file = tmp_path / "empty.md"
 60 |     empty_file.write_text("")
 61 | 
 62 |     parser = EntityParser(tmp_path)
 63 |     entity = await parser.parse_file(empty_file)
 64 |     assert entity.observations == []
 65 |     assert entity.relations == []
 66 | 
 67 | 
 68 | @pytest.mark.asyncio
 69 | async def test_missing_sections(tmp_path):
 70 |     """Test handling of files with missing sections."""
 71 |     content = dedent("""
 72 |         ---
 73 |         type: test
 74 |         id: test/missing
 75 |         created: 2024-01-09
 76 |         modified: 2024-01-09
 77 |         tags: []
 78 |         ---
 79 |         
 80 |         Just some content
 81 |         with [[links]] but no sections
 82 |         """)
 83 | 
 84 |     test_file = tmp_path / "missing.md"
 85 |     test_file.write_text(content)
 86 | 
 87 |     parser = EntityParser(tmp_path)
 88 |     entity = await parser.parse_file(test_file)
 89 |     assert len(entity.relations) == 1
 90 |     assert entity.relations[0].target == "links"
 91 |     assert entity.relations[0].type == "links to"
 92 | 
 93 | 
 94 | @pytest.mark.asyncio
 95 | async def test_tasks_are_not_observations(tmp_path):
 96 |     """Test handling of plain observations without categories."""
 97 |     content = dedent("""
 98 |         ---
 99 |         type: test
100 |         id: test/missing
101 |         created: 2024-01-09
102 |         modified: 2024-01-09
103 |         tags: []
104 |         ---
105 | 
106 |         - [ ] one
107 |         -[ ] two
108 |         - [x] done
109 |         - [-] not done
110 |         """)
111 | 
112 |     test_file = tmp_path / "missing.md"
113 |     test_file.write_text(content)
114 | 
115 |     parser = EntityParser(tmp_path)
116 |     entity = await parser.parse_file(test_file)
117 |     assert len(entity.observations) == 0
118 | 
119 | 
120 | @pytest.mark.asyncio
121 | async def test_nested_content(tmp_path):
122 |     """Test handling of deeply nested content."""
123 |     content = dedent("""
124 |         ---
125 |         type: test
126 |         id: test/nested
127 |         created: 2024-01-09
128 |         modified: 2024-01-09
129 |         tags: []
130 |         ---
131 |         
132 |         # Test
133 |         
134 |         ## Level 1
135 |         - [test] Level 1 #test (First level)
136 |         - implements [[One]]
137 |             
138 |             ### Level 2
139 |             - [test] Level 2 #test (Second level)
140 |             - uses [[Two]]
141 |                 
142 |                 #### Level 3
143 |                 - [test] Level 3 #test (Third level)
144 |                 - needs [[Three]]
145 |         """)
146 | 
147 |     test_file = tmp_path / "nested.md"
148 |     test_file.write_text(content)
149 | 
150 |     parser = EntityParser(tmp_path)
151 |     entity = await parser.parse_file(test_file)
152 | 
153 |     # Should find all observations and relations regardless of nesting
154 |     assert len(entity.observations) == 3
155 |     assert len(entity.relations) == 3
156 |     assert {r.target for r in entity.relations} == {"One", "Two", "Three"}
157 | 
158 | 
159 | @pytest.mark.asyncio
160 | async def test_malformed_frontmatter(tmp_path):
161 |     """Test handling of malformed frontmatter."""
162 |     # Missing fields
163 |     content = dedent("""
164 |         ---
165 |         type: test
166 |         ---
167 |         
168 |         # Test
169 |         """)
170 | 
171 |     test_file = tmp_path / "malformed.md"
172 |     test_file.write_text(content)
173 | 
174 |     parser = EntityParser(tmp_path)
175 |     entity = await parser.parse_file(test_file)
176 |     assert entity.frontmatter.permalink is None
177 | 
178 | 
179 | @pytest.mark.asyncio
180 | async def test_file_not_found():
181 |     """Test handling of non-existent files."""
182 |     parser = EntityParser(Path("/tmp"))
183 |     with pytest.raises(FileNotFoundError):
184 |         await parser.parse_file(Path("nonexistent.md"))
185 | 
```

--------------------------------------------------------------------------------
/tests/repository/test_entity_upsert_issue_187.py:
--------------------------------------------------------------------------------

```python
  1 | """Tests for issue #187 - UNIQUE constraint violation on file_path during sync."""
  2 | 
  3 | import pytest
  4 | from datetime import datetime, timezone
  5 | 
  6 | from basic_memory.models.knowledge import Entity, Observation
  7 | from basic_memory.repository.entity_repository import EntityRepository
  8 | 
  9 | 
 10 | @pytest.mark.asyncio
 11 | async def test_upsert_entity_with_observations_conflict(entity_repository: EntityRepository):
 12 |     """Test upserting an entity that already exists with observations.
 13 | 
 14 |     This reproduces issue #187 where sync fails with UNIQUE constraint violations
 15 |     when trying to update entities that already exist with observations.
 16 |     """
 17 |     # Create initial entity with observations
 18 |     entity1 = Entity(
 19 |         project_id=entity_repository.project_id,
 20 |         title="Original Title",
 21 |         entity_type="note",
 22 |         permalink="debugging/backup-system/coderabbit-feedback-resolution",
 23 |         file_path="debugging/backup-system/CodeRabbit Feedback Resolution - Backup System Issues.md",
 24 |         content_type="text/markdown",
 25 |         created_at=datetime.now(timezone.utc),
 26 |         updated_at=datetime.now(timezone.utc),
 27 |     )
 28 | 
 29 |     # Add observations to the entity
 30 |     obs1 = Observation(
 31 |         content="This is a test observation",
 32 |         category="testing",
 33 |         tags=["test"],
 34 |     )
 35 |     entity1.observations.append(obs1)
 36 | 
 37 |     result1 = await entity_repository.upsert_entity(entity1)
 38 |     original_id = result1.id
 39 | 
 40 |     # Verify entity was created with observations
 41 |     assert result1.id is not None
 42 |     assert len(result1.observations) == 1
 43 | 
 44 |     # Now try to upsert the same file_path with different content/observations
 45 |     # This simulates a file being modified and re-synced
 46 |     entity2 = Entity(
 47 |         project_id=entity_repository.project_id,
 48 |         title="Updated Title",
 49 |         entity_type="note",
 50 |         permalink="debugging/backup-system/coderabbit-feedback-resolution",  # Same permalink
 51 |         file_path="debugging/backup-system/CodeRabbit Feedback Resolution - Backup System Issues.md",  # Same file_path
 52 |         content_type="text/markdown",
 53 |         created_at=datetime.now(timezone.utc),
 54 |         updated_at=datetime.now(timezone.utc),
 55 |     )
 56 | 
 57 |     # Add different observations
 58 |     obs2 = Observation(
 59 |         content="This is an updated observation",
 60 |         category="updated",
 61 |         tags=["updated"],
 62 |     )
 63 |     obs3 = Observation(
 64 |         content="This is a second observation",
 65 |         category="second",
 66 |         tags=["second"],
 67 |     )
 68 |     entity2.observations.extend([obs2, obs3])
 69 | 
 70 |     # This should UPDATE the existing entity, not fail with IntegrityError
 71 |     result2 = await entity_repository.upsert_entity(entity2)
 72 | 
 73 |     # Should update existing entity (same ID)
 74 |     assert result2.id == original_id
 75 |     assert result2.title == "Updated Title"
 76 |     assert result2.file_path == entity1.file_path
 77 |     assert result2.permalink == entity1.permalink
 78 | 
 79 |     # Observations should be updated
 80 |     assert len(result2.observations) == 2
 81 |     assert result2.observations[0].content == "This is an updated observation"
 82 |     assert result2.observations[1].content == "This is a second observation"
 83 | 
 84 | 
 85 | @pytest.mark.asyncio
 86 | async def test_upsert_entity_repeated_sync_same_file(entity_repository: EntityRepository):
 87 |     """Test that syncing the same file multiple times doesn't cause IntegrityError.
 88 | 
 89 |     This tests the specific scenario from issue #187 where files are being
 90 |     synced repeatedly and hitting UNIQUE constraint violations.
 91 |     """
 92 |     file_path = "processes/Complete Process for Uploading New Training Videos.md"
 93 |     permalink = "processes/complete-process-for-uploading-new-training-videos"
 94 | 
 95 |     # Create initial entity
 96 |     entity1 = Entity(
 97 |         project_id=entity_repository.project_id,
 98 |         title="Complete Process for Uploading New Training Videos",
 99 |         entity_type="note",
100 |         permalink=permalink,
101 |         file_path=file_path,
102 |         content_type="text/markdown",
103 |         checksum="abc123",
104 |         created_at=datetime.now(timezone.utc),
105 |         updated_at=datetime.now(timezone.utc),
106 |     )
107 | 
108 |     result1 = await entity_repository.upsert_entity(entity1)
109 |     first_id = result1.id
110 | 
111 |     # Simulate multiple sync attempts (like the infinite retry loop in the issue)
112 |     for i in range(5):
113 |         entity_new = Entity(
114 |             project_id=entity_repository.project_id,
115 |             title="Complete Process for Uploading New Training Videos",
116 |             entity_type="note",
117 |             permalink=permalink,
118 |             file_path=file_path,
119 |             content_type="text/markdown",
120 |             checksum=f"def{456 + i}",  # Different checksum each time
121 |             created_at=datetime.now(timezone.utc),
122 |             updated_at=datetime.now(timezone.utc),
123 |         )
124 | 
125 |         # Each upsert should succeed and update the existing entity
126 |         result = await entity_repository.upsert_entity(entity_new)
127 | 
128 |         # Should always return the same entity (updated)
129 |         assert result.id == first_id
130 |         assert result.checksum == entity_new.checksum
131 |         assert result.file_path == file_path
132 |         assert result.permalink == permalink
133 | 
```

--------------------------------------------------------------------------------
/tests/mcp/test_tool_build_context.py:
--------------------------------------------------------------------------------

```python
  1 | """Tests for discussion context MCP tool."""
  2 | 
  3 | import pytest
  4 | from datetime import datetime
  5 | 
  6 | from mcp.server.fastmcp.exceptions import ToolError
  7 | 
  8 | from basic_memory.mcp.tools import build_context
  9 | from basic_memory.schemas.memory import (
 10 |     GraphContext,
 11 | )
 12 | 
 13 | 
 14 | @pytest.mark.asyncio
 15 | async def test_get_basic_discussion_context(client, test_graph, test_project):
 16 |     """Test getting basic discussion context."""
 17 |     context = await build_context.fn(project=test_project.name, url="memory://test/root")
 18 | 
 19 |     assert isinstance(context, GraphContext)
 20 |     assert len(context.results) == 1
 21 |     assert context.results[0].primary_result.permalink == "test/root"
 22 |     assert len(context.results[0].related_results) > 0
 23 | 
 24 |     # Verify metadata
 25 |     assert context.metadata.uri == "test/root"
 26 |     assert context.metadata.depth == 1  # default depth
 27 |     assert context.metadata.timeframe is not None
 28 |     assert isinstance(context.metadata.generated_at, datetime)
 29 |     assert context.metadata.primary_count == 1
 30 |     if context.metadata.related_count:
 31 |         assert context.metadata.related_count > 0
 32 | 
 33 | 
 34 | @pytest.mark.asyncio
 35 | async def test_get_discussion_context_pattern(client, test_graph, test_project):
 36 |     """Test getting context with pattern matching."""
 37 |     context = await build_context.fn(project=test_project.name, url="memory://test/*", depth=1)
 38 | 
 39 |     assert isinstance(context, GraphContext)
 40 |     assert len(context.results) > 1  # Should match multiple test/* paths
 41 |     assert all("test/" in item.primary_result.permalink for item in context.results)  # pyright: ignore [reportOperatorIssue]
 42 |     assert context.metadata.depth == 1
 43 | 
 44 | 
 45 | @pytest.mark.asyncio
 46 | async def test_get_discussion_context_timeframe(client, test_graph, test_project):
 47 |     """Test timeframe parameter filtering."""
 48 |     # Get recent context
 49 |     recent_context = await build_context.fn(
 50 |         project=test_project.name,
 51 |         url="memory://test/root",
 52 |         timeframe="1d",  # Last 24 hours
 53 |     )
 54 | 
 55 |     # Get older context
 56 |     older_context = await build_context.fn(
 57 |         project=test_project.name,
 58 |         url="memory://test/root",
 59 |         timeframe="30d",  # Last 30 days
 60 |     )
 61 | 
 62 |     # Calculate total related items
 63 |     total_recent_related = (
 64 |         sum(len(item.related_results) for item in recent_context.results)
 65 |         if recent_context.results
 66 |         else 0
 67 |     )
 68 |     total_older_related = (
 69 |         sum(len(item.related_results) for item in older_context.results)
 70 |         if older_context.results
 71 |         else 0
 72 |     )
 73 | 
 74 |     assert total_older_related >= total_recent_related
 75 | 
 76 | 
 77 | @pytest.mark.asyncio
 78 | async def test_get_discussion_context_not_found(client, test_project):
 79 |     """Test handling of non-existent URIs."""
 80 |     context = await build_context.fn(project=test_project.name, url="memory://test/does-not-exist")
 81 | 
 82 |     assert isinstance(context, GraphContext)
 83 |     assert len(context.results) == 0
 84 |     assert context.metadata.primary_count == 0
 85 |     assert context.metadata.related_count == 0
 86 | 
 87 | 
 88 | # Test data for different timeframe formats
 89 | valid_timeframes = [
 90 |     "7d",  # Standard format
 91 |     "yesterday",  # Natural language
 92 |     "0d",  # Zero duration
 93 | ]
 94 | 
 95 | invalid_timeframes = [
 96 |     "invalid",  # Nonsense string
 97 |     # NOTE: "tomorrow" now returns 1 day ago due to timezone safety - no longer invalid
 98 | ]
 99 | 
100 | 
101 | @pytest.mark.asyncio
102 | async def test_build_context_timeframe_formats(client, test_graph, test_project):
103 |     """Test that build_context accepts various timeframe formats."""
104 |     test_url = "memory://specs/test"
105 | 
106 |     # Test each valid timeframe
107 |     for timeframe in valid_timeframes:
108 |         try:
109 |             result = await build_context.fn(
110 |                 project=test_project.name,
111 |                 url=test_url,
112 |                 timeframe=timeframe,
113 |                 page=1,
114 |                 page_size=10,
115 |                 max_related=10,
116 |             )
117 |             assert result is not None
118 |         except Exception as e:
119 |             pytest.fail(f"Failed with valid timeframe '{timeframe}': {str(e)}")
120 | 
121 |     # Test invalid timeframes should raise ValidationError
122 |     for timeframe in invalid_timeframes:
123 |         with pytest.raises(ToolError):
124 |             await build_context.fn(project=test_project.name, url=test_url, timeframe=timeframe)
125 | 
126 | 
127 | @pytest.mark.asyncio
128 | async def test_build_context_string_depth_parameter(client, test_graph, test_project):
129 |     """Test that build_context handles string depth parameter correctly."""
130 |     test_url = "memory://test/root"
131 | 
132 |     # Test valid string depth parameter - should either raise ToolError or convert to int
133 |     try:
134 |         result = await build_context.fn(url=test_url, depth="2", project=test_project.name)
135 |         # If it succeeds, verify the depth was converted to an integer
136 |         assert isinstance(result.metadata.depth, int)
137 |         assert result.metadata.depth == 2
138 |     except ToolError:
139 |         # This is also acceptable behavior - type validation should catch it
140 |         pass
141 | 
142 |     # Test invalid string depth parameter - should raise ToolError
143 |     with pytest.raises(ToolError):
144 |         await build_context.fn(test_url, depth="invalid", project=test_project.name)
145 | 
```

--------------------------------------------------------------------------------
/tests/api/test_continue_conversation_template.py:
--------------------------------------------------------------------------------

```python
  1 | """Tests for the continue_conversation template rendering."""
  2 | 
  3 | import datetime
  4 | import pytest
  5 | 
  6 | from basic_memory.api.template_loader import TemplateLoader
  7 | from basic_memory.schemas.memory import EntitySummary
  8 | from basic_memory.schemas.search import SearchItemType
  9 | 
 10 | 
 11 | @pytest.fixture
 12 | def template_loader():
 13 |     """Return a TemplateLoader instance for testing."""
 14 |     return TemplateLoader()
 15 | 
 16 | 
 17 | @pytest.fixture
 18 | def entity_summary():
 19 |     """Create a sample EntitySummary for testing."""
 20 |     return EntitySummary(
 21 |         title="Test Entity",
 22 |         permalink="test/entity",
 23 |         type=SearchItemType.ENTITY,
 24 |         content="This is a test entity with some content.",
 25 |         file_path="/path/to/test/entity.md",
 26 |         created_at=datetime.datetime(2023, 1, 1, 12, 0),
 27 |     )
 28 | 
 29 | 
 30 | @pytest.fixture
 31 | def context_with_results(entity_summary):
 32 |     """Create a sample context with results for testing."""
 33 |     from basic_memory.schemas.memory import ObservationSummary, ContextResult
 34 | 
 35 |     # Create an observation for the entity
 36 |     observation = ObservationSummary(
 37 |         title="Test Observation",
 38 |         permalink="test/entity/observations/1",
 39 |         category="test",
 40 |         content="This is a test observation.",
 41 |         file_path="/path/to/test/entity.md",
 42 |         created_at=datetime.datetime(2023, 1, 1, 12, 0),
 43 |     )
 44 | 
 45 |     # Create a context result with primary_result, observations, and related_results
 46 |     context_item = ContextResult(
 47 |         primary_result=entity_summary,
 48 |         observations=[observation],
 49 |         related_results=[entity_summary],
 50 |     )
 51 | 
 52 |     return {
 53 |         "topic": "Test Topic",
 54 |         "timeframe": "7d",
 55 |         "has_results": True,
 56 |         "hierarchical_results": [context_item],
 57 |     }
 58 | 
 59 | 
 60 | @pytest.fixture
 61 | def context_without_results():
 62 |     """Create a sample context without results for testing."""
 63 |     return {
 64 |         "topic": "Empty Topic",
 65 |         "timeframe": "1d",
 66 |         "has_results": False,
 67 |         "hierarchical_results": [],
 68 |     }
 69 | 
 70 | 
 71 | @pytest.mark.asyncio
 72 | async def test_continue_conversation_with_results(template_loader, context_with_results):
 73 |     """Test rendering the continue_conversation template with results."""
 74 |     result = await template_loader.render("prompts/continue_conversation.hbs", context_with_results)
 75 | 
 76 |     # Check that key elements are present
 77 |     assert "Continuing conversation on: Test Topic" in result
 78 |     assert "memory://test/entity" in result
 79 |     assert "Test Entity" in result
 80 |     assert "This is a test entity with some content." in result
 81 |     assert "Related Context" in result
 82 |     assert "read_note" in result
 83 |     assert "Next Steps" in result
 84 |     assert "Knowledge Capture Recommendation" in result
 85 | 
 86 | 
 87 | @pytest.mark.asyncio
 88 | async def test_continue_conversation_without_results(template_loader, context_without_results):
 89 |     """Test rendering the continue_conversation template without results."""
 90 |     result = await template_loader.render(
 91 |         "prompts/continue_conversation.hbs", context_without_results
 92 |     )
 93 | 
 94 |     # Check that key elements are present
 95 |     assert "Continuing conversation on: Empty Topic" in result
 96 |     assert "The supplied query did not return any information" in result
 97 |     assert "Opportunity to Capture New Knowledge!" in result
 98 |     assert 'title="Empty Topic"' in result
 99 |     assert "Next Steps" in result
100 |     assert "Knowledge Capture Recommendation" in result
101 | 
102 | 
103 | @pytest.mark.asyncio
104 | async def test_next_steps_section(template_loader, context_with_results):
105 |     """Test that the next steps section is rendered correctly."""
106 |     result = await template_loader.render("prompts/continue_conversation.hbs", context_with_results)
107 | 
108 |     assert "Next Steps" in result
109 |     assert 'Explore more with: `search_notes("Test Topic")`' in result
110 |     assert (
111 |         f'See what\'s changed: `recent_activity(timeframe="{context_with_results["timeframe"]}")`'
112 |         in result
113 |     )
114 |     assert "Record new learnings or decisions from this conversation" in result
115 | 
116 | 
117 | @pytest.mark.asyncio
118 | async def test_knowledge_capture_recommendation(template_loader, context_with_results):
119 |     """Test that the knowledge capture recommendation is rendered."""
120 |     result = await template_loader.render("prompts/continue_conversation.hbs", context_with_results)
121 | 
122 |     assert "Knowledge Capture Recommendation" in result
123 |     assert "actively look for opportunities to:" in result
124 |     assert "Record key information, decisions, or insights" in result
125 |     assert "Link new knowledge to existing topics" in result
126 |     assert "Suggest capturing important context" in result
127 |     assert "one of the most valuable aspects of Basic Memory" in result
128 | 
129 | 
130 | @pytest.mark.asyncio
131 | async def test_timeframe_default_value(template_loader, context_with_results):
132 |     """Test that the timeframe uses the default value when not provided."""
133 |     # Remove the timeframe from the context
134 |     context_without_timeframe = context_with_results.copy()
135 |     context_without_timeframe["timeframe"] = None
136 | 
137 |     result = await template_loader.render(
138 |         "prompts/continue_conversation.hbs", context_without_timeframe
139 |     )
140 | 
141 |     # Check that the default value is used
142 |     assert 'recent_activity(timeframe="7d")' in result
143 | 
```

--------------------------------------------------------------------------------
/src/basic_memory/api/routers/utils.py:
--------------------------------------------------------------------------------

```python
  1 | from typing import Optional, List
  2 | 
  3 | from basic_memory.repository import EntityRepository
  4 | from basic_memory.repository.search_repository import SearchIndexRow
  5 | from basic_memory.schemas.memory import (
  6 |     EntitySummary,
  7 |     ObservationSummary,
  8 |     RelationSummary,
  9 |     MemoryMetadata,
 10 |     GraphContext,
 11 |     ContextResult,
 12 | )
 13 | from basic_memory.schemas.search import SearchItemType, SearchResult
 14 | from basic_memory.services import EntityService
 15 | from basic_memory.services.context_service import (
 16 |     ContextResultRow,
 17 |     ContextResult as ServiceContextResult,
 18 | )
 19 | 
 20 | 
 21 | async def to_graph_context(
 22 |     context_result: ServiceContextResult,
 23 |     entity_repository: EntityRepository,
 24 |     page: Optional[int] = None,
 25 |     page_size: Optional[int] = None,
 26 | ):
 27 |     # Helper function to convert items to summaries
 28 |     async def to_summary(item: SearchIndexRow | ContextResultRow):
 29 |         match item.type:
 30 |             case SearchItemType.ENTITY:
 31 |                 return EntitySummary(
 32 |                     title=item.title,  # pyright: ignore
 33 |                     permalink=item.permalink,
 34 |                     content=item.content,
 35 |                     file_path=item.file_path,
 36 |                     created_at=item.created_at,
 37 |                 )
 38 |             case SearchItemType.OBSERVATION:
 39 |                 return ObservationSummary(
 40 |                     title=item.title,  # pyright: ignore
 41 |                     file_path=item.file_path,
 42 |                     category=item.category,  # pyright: ignore
 43 |                     content=item.content,  # pyright: ignore
 44 |                     permalink=item.permalink,  # pyright: ignore
 45 |                     created_at=item.created_at,
 46 |                 )
 47 |             case SearchItemType.RELATION:
 48 |                 from_entity = await entity_repository.find_by_id(item.from_id)  # pyright: ignore
 49 |                 to_entity = await entity_repository.find_by_id(item.to_id) if item.to_id else None
 50 |                 return RelationSummary(
 51 |                     title=item.title,  # pyright: ignore
 52 |                     file_path=item.file_path,
 53 |                     permalink=item.permalink,  # pyright: ignore
 54 |                     relation_type=item.relation_type,  # pyright: ignore
 55 |                     from_entity=from_entity.title if from_entity else None,
 56 |                     to_entity=to_entity.title if to_entity else None,
 57 |                     created_at=item.created_at,
 58 |                 )
 59 |             case _:  # pragma: no cover
 60 |                 raise ValueError(f"Unexpected type: {item.type}")
 61 | 
 62 |     # Process the hierarchical results
 63 |     hierarchical_results = []
 64 |     for context_item in context_result.results:
 65 |         # Process primary result
 66 |         primary_result = await to_summary(context_item.primary_result)
 67 | 
 68 |         # Process observations
 69 |         observations = []
 70 |         for obs in context_item.observations:
 71 |             observations.append(await to_summary(obs))
 72 | 
 73 |         # Process related results
 74 |         related = []
 75 |         for rel in context_item.related_results:
 76 |             related.append(await to_summary(rel))
 77 | 
 78 |         # Add to hierarchical results
 79 |         hierarchical_results.append(
 80 |             ContextResult(
 81 |                 primary_result=primary_result,
 82 |                 observations=observations,
 83 |                 related_results=related,
 84 |             )
 85 |         )
 86 | 
 87 |     # Create schema metadata from service metadata
 88 |     metadata = MemoryMetadata(
 89 |         uri=context_result.metadata.uri,
 90 |         types=context_result.metadata.types,
 91 |         depth=context_result.metadata.depth,
 92 |         timeframe=context_result.metadata.timeframe,
 93 |         generated_at=context_result.metadata.generated_at,
 94 |         primary_count=context_result.metadata.primary_count,
 95 |         related_count=context_result.metadata.related_count,
 96 |         total_results=context_result.metadata.primary_count + context_result.metadata.related_count,
 97 |         total_relations=context_result.metadata.total_relations,
 98 |         total_observations=context_result.metadata.total_observations,
 99 |     )
100 | 
101 |     # Return new GraphContext with just hierarchical results
102 |     return GraphContext(
103 |         results=hierarchical_results,
104 |         metadata=metadata,
105 |         page=page,
106 |         page_size=page_size,
107 |     )
108 | 
109 | 
110 | async def to_search_results(entity_service: EntityService, results: List[SearchIndexRow]):
111 |     search_results = []
112 |     for r in results:
113 |         entities = await entity_service.get_entities_by_id([r.entity_id, r.from_id, r.to_id])  # pyright: ignore
114 |         search_results.append(
115 |             SearchResult(
116 |                 title=r.title,  # pyright: ignore
117 |                 type=r.type,  # pyright: ignore
118 |                 permalink=r.permalink,
119 |                 score=r.score,  # pyright: ignore
120 |                 entity=entities[0].permalink if entities else None,
121 |                 content=r.content,
122 |                 file_path=r.file_path,
123 |                 metadata=r.metadata,
124 |                 category=r.category,
125 |                 from_entity=entities[0].permalink if entities else None,
126 |                 to_entity=entities[1].permalink if len(entities) > 1 else None,
127 |                 relation_type=r.relation_type,
128 |             )
129 |         )
130 |     return search_results
131 | 
```

--------------------------------------------------------------------------------
/src/basic_memory/mcp/async_client.py:
--------------------------------------------------------------------------------

```python
  1 | from contextlib import asynccontextmanager, AbstractAsyncContextManager
  2 | from typing import AsyncIterator, Callable, Optional
  3 | 
  4 | from httpx import ASGITransport, AsyncClient, Timeout
  5 | from loguru import logger
  6 | 
  7 | from basic_memory.api.app import app as fastapi_app
  8 | from basic_memory.config import ConfigManager
  9 | 
 10 | 
 11 | # Optional factory override for dependency injection
 12 | _client_factory: Optional[Callable[[], AbstractAsyncContextManager[AsyncClient]]] = None
 13 | 
 14 | 
 15 | def set_client_factory(factory: Callable[[], AbstractAsyncContextManager[AsyncClient]]) -> None:
 16 |     """Override the default client factory (for cloud app, testing, etc).
 17 | 
 18 |     Args:
 19 |         factory: An async context manager that yields an AsyncClient
 20 | 
 21 |     Example:
 22 |         @asynccontextmanager
 23 |         async def custom_client_factory():
 24 |             async with AsyncClient(...) as client:
 25 |                 yield client
 26 | 
 27 |         set_client_factory(custom_client_factory)
 28 |     """
 29 |     global _client_factory
 30 |     _client_factory = factory
 31 | 
 32 | 
 33 | @asynccontextmanager
 34 | async def get_client() -> AsyncIterator[AsyncClient]:
 35 |     """Get an AsyncClient as a context manager.
 36 | 
 37 |     This function provides proper resource management for HTTP clients,
 38 |     ensuring connections are closed after use. It supports three modes:
 39 | 
 40 |     1. **Factory injection** (cloud app, tests):
 41 |        If a custom factory is set via set_client_factory(), use that.
 42 | 
 43 |     2. **CLI cloud mode**:
 44 |        When cloud_mode_enabled is True, create HTTP client with auth
 45 |        token from CLIAuth for requests to cloud proxy endpoint.
 46 | 
 47 |     3. **Local mode** (default):
 48 |        Use ASGI transport for in-process requests to local FastAPI app.
 49 | 
 50 |     Usage:
 51 |         async with get_client() as client:
 52 |             response = await client.get("/path")
 53 | 
 54 |     Yields:
 55 |         AsyncClient: Configured HTTP client for the current mode
 56 | 
 57 |     Raises:
 58 |         RuntimeError: If cloud mode is enabled but user is not authenticated
 59 |     """
 60 |     if _client_factory:
 61 |         # Use injected factory (cloud app, tests)
 62 |         async with _client_factory() as client:
 63 |             yield client
 64 |     else:
 65 |         # Default: create based on config
 66 |         config = ConfigManager().config
 67 |         timeout = Timeout(
 68 |             connect=10.0,  # 10 seconds for connection
 69 |             read=30.0,  # 30 seconds for reading response
 70 |             write=30.0,  # 30 seconds for writing request
 71 |             pool=30.0,  # 30 seconds for connection pool
 72 |         )
 73 | 
 74 |         if config.cloud_mode_enabled:
 75 |             # CLI cloud mode: inject auth when creating client
 76 |             from basic_memory.cli.auth import CLIAuth
 77 | 
 78 |             auth = CLIAuth(client_id=config.cloud_client_id, authkit_domain=config.cloud_domain)
 79 |             token = await auth.get_valid_token()
 80 | 
 81 |             if not token:
 82 |                 raise RuntimeError(
 83 |                     "Cloud mode enabled but not authenticated. "
 84 |                     "Run 'basic-memory cloud login' first."
 85 |                 )
 86 | 
 87 |             # Auth header set ONCE at client creation
 88 |             proxy_base_url = f"{config.cloud_host}/proxy"
 89 |             logger.info(f"Creating HTTP client for cloud proxy at: {proxy_base_url}")
 90 |             async with AsyncClient(
 91 |                 base_url=proxy_base_url,
 92 |                 headers={"Authorization": f"Bearer {token}"},
 93 |                 timeout=timeout,
 94 |             ) as client:
 95 |                 yield client
 96 |         else:
 97 |             # Local mode: ASGI transport for in-process calls
 98 |             logger.info("Creating ASGI client for local Basic Memory API")
 99 |             async with AsyncClient(
100 |                 transport=ASGITransport(app=fastapi_app), base_url="http://test", timeout=timeout
101 |             ) as client:
102 |                 yield client
103 | 
104 | 
105 | def create_client() -> AsyncClient:
106 |     """Create an HTTP client based on configuration.
107 | 
108 |     DEPRECATED: Use get_client() context manager instead for proper resource management.
109 | 
110 |     This function is kept for backward compatibility but will be removed in a future version.
111 |     The returned client should be closed manually by calling await client.aclose().
112 | 
113 |     Returns:
114 |         AsyncClient configured for either local ASGI or remote proxy
115 |     """
116 |     config_manager = ConfigManager()
117 |     config = config_manager.config
118 | 
119 |     # Configure timeout for longer operations like write_note
120 |     # Default httpx timeout is 5 seconds which is too short for file operations
121 |     timeout = Timeout(
122 |         connect=10.0,  # 10 seconds for connection
123 |         read=30.0,  # 30 seconds for reading response
124 |         write=30.0,  # 30 seconds for writing request
125 |         pool=30.0,  # 30 seconds for connection pool
126 |     )
127 | 
128 |     if config.cloud_mode_enabled:
129 |         # Use HTTP transport to proxy endpoint
130 |         proxy_base_url = f"{config.cloud_host}/proxy"
131 |         logger.info(f"Creating HTTP client for proxy at: {proxy_base_url}")
132 |         return AsyncClient(base_url=proxy_base_url, timeout=timeout)
133 |     else:
134 |         # Default: use ASGI transport for local API (development mode)
135 |         logger.info("Creating ASGI client for local Basic Memory API")
136 |         return AsyncClient(
137 |             transport=ASGITransport(app=fastapi_app), base_url="http://test", timeout=timeout
138 |         )
139 | 
```

--------------------------------------------------------------------------------
/test-int/test_db_wal_mode.py:
--------------------------------------------------------------------------------

```python
  1 | """Integration tests for WAL mode and Windows-specific SQLite optimizations.
  2 | 
  3 | These tests use real filesystem databases (not in-memory) to verify WAL mode
  4 | and other SQLite configuration settings work correctly in production scenarios.
  5 | """
  6 | 
  7 | import pytest
  8 | from unittest.mock import patch
  9 | from sqlalchemy import text
 10 | 
 11 | 
 12 | @pytest.mark.asyncio
 13 | async def test_wal_mode_enabled(engine_factory):
 14 |     """Test that WAL mode is enabled on filesystem database connections."""
 15 |     engine, _ = engine_factory
 16 | 
 17 |     # Execute a query to verify WAL mode is enabled
 18 |     async with engine.connect() as conn:
 19 |         result = await conn.execute(text("PRAGMA journal_mode"))
 20 |         journal_mode = result.fetchone()[0]
 21 | 
 22 |         # WAL mode should be enabled for filesystem databases
 23 |         assert journal_mode.upper() == "WAL"
 24 | 
 25 | 
 26 | @pytest.mark.asyncio
 27 | async def test_busy_timeout_configured(engine_factory):
 28 |     """Test that busy timeout is configured for database connections."""
 29 |     engine, _ = engine_factory
 30 | 
 31 |     async with engine.connect() as conn:
 32 |         result = await conn.execute(text("PRAGMA busy_timeout"))
 33 |         busy_timeout = result.fetchone()[0]
 34 | 
 35 |         # Busy timeout should be 10 seconds (10000 milliseconds)
 36 |         assert busy_timeout == 10000
 37 | 
 38 | 
 39 | @pytest.mark.asyncio
 40 | async def test_synchronous_mode_configured(engine_factory):
 41 |     """Test that synchronous mode is set to NORMAL for performance."""
 42 |     engine, _ = engine_factory
 43 | 
 44 |     async with engine.connect() as conn:
 45 |         result = await conn.execute(text("PRAGMA synchronous"))
 46 |         synchronous = result.fetchone()[0]
 47 | 
 48 |         # Synchronous should be NORMAL (1)
 49 |         assert synchronous == 1
 50 | 
 51 | 
 52 | @pytest.mark.asyncio
 53 | async def test_cache_size_configured(engine_factory):
 54 |     """Test that cache size is configured for performance."""
 55 |     engine, _ = engine_factory
 56 | 
 57 |     async with engine.connect() as conn:
 58 |         result = await conn.execute(text("PRAGMA cache_size"))
 59 |         cache_size = result.fetchone()[0]
 60 | 
 61 |         # Cache size should be -64000 (64MB)
 62 |         assert cache_size == -64000
 63 | 
 64 | 
 65 | @pytest.mark.asyncio
 66 | async def test_temp_store_configured(engine_factory):
 67 |     """Test that temp_store is set to MEMORY."""
 68 |     engine, _ = engine_factory
 69 | 
 70 |     async with engine.connect() as conn:
 71 |         result = await conn.execute(text("PRAGMA temp_store"))
 72 |         temp_store = result.fetchone()[0]
 73 | 
 74 |         # temp_store should be MEMORY (2)
 75 |         assert temp_store == 2
 76 | 
 77 | 
 78 | @pytest.mark.asyncio
 79 | async def test_windows_locking_mode_when_on_windows(tmp_path):
 80 |     """Test that Windows-specific locking mode is set when running on Windows."""
 81 |     from basic_memory.db import engine_session_factory, DatabaseType
 82 | 
 83 |     db_path = tmp_path / "test_windows.db"
 84 | 
 85 |     with patch("os.name", "nt"):
 86 |         # Need to patch at module level where it's imported
 87 |         with patch("basic_memory.db.os.name", "nt"):
 88 |             async with engine_session_factory(db_path, DatabaseType.FILESYSTEM) as (
 89 |                 engine,
 90 |                 _,
 91 |             ):
 92 |                 async with engine.connect() as conn:
 93 |                     result = await conn.execute(text("PRAGMA locking_mode"))
 94 |                     locking_mode = result.fetchone()[0]
 95 | 
 96 |                     # Locking mode should be NORMAL on Windows
 97 |                     assert locking_mode.upper() == "NORMAL"
 98 | 
 99 | 
100 | @pytest.mark.asyncio
101 | async def test_null_pool_on_windows(tmp_path):
102 |     """Test that NullPool is used on Windows to avoid connection pooling issues."""
103 |     from basic_memory.db import engine_session_factory, DatabaseType
104 |     from sqlalchemy.pool import NullPool
105 | 
106 |     db_path = tmp_path / "test_windows_pool.db"
107 | 
108 |     with patch("basic_memory.db.os.name", "nt"):
109 |         async with engine_session_factory(db_path, DatabaseType.FILESYSTEM) as (engine, _):
110 |             # Engine should be using NullPool on Windows
111 |             assert isinstance(engine.pool, NullPool)
112 | 
113 | 
114 | @pytest.mark.asyncio
115 | async def test_regular_pool_on_non_windows(tmp_path):
116 |     """Test that regular pooling is used on non-Windows platforms."""
117 |     from basic_memory.db import engine_session_factory, DatabaseType
118 |     from sqlalchemy.pool import NullPool
119 | 
120 |     db_path = tmp_path / "test_posix_pool.db"
121 | 
122 |     with patch("basic_memory.db.os.name", "posix"):
123 |         async with engine_session_factory(db_path, DatabaseType.FILESYSTEM) as (engine, _):
124 |             # Engine should NOT be using NullPool on non-Windows
125 |             assert not isinstance(engine.pool, NullPool)
126 | 
127 | 
128 | @pytest.mark.asyncio
129 | async def test_memory_database_no_null_pool_on_windows(tmp_path):
130 |     """Test that in-memory databases do NOT use NullPool even on Windows.
131 | 
132 |     NullPool closes connections immediately, which destroys in-memory databases.
133 |     This test ensures in-memory databases maintain connection pooling.
134 |     """
135 |     from basic_memory.db import engine_session_factory, DatabaseType
136 |     from sqlalchemy.pool import NullPool
137 | 
138 |     db_path = tmp_path / "test_memory.db"
139 | 
140 |     with patch("basic_memory.db.os.name", "nt"):
141 |         async with engine_session_factory(db_path, DatabaseType.MEMORY) as (engine, _):
142 |             # In-memory databases should NOT use NullPool on Windows
143 |             assert not isinstance(engine.pool, NullPool)
144 | 
```

--------------------------------------------------------------------------------
/tests/services/test_file_service.py:
--------------------------------------------------------------------------------

```python
  1 | """Tests for file operations service."""
  2 | 
  3 | from pathlib import Path
  4 | from unittest.mock import patch
  5 | 
  6 | import pytest
  7 | 
  8 | from basic_memory.services.exceptions import FileOperationError
  9 | from basic_memory.services.file_service import FileService
 10 | 
 11 | 
 12 | @pytest.mark.asyncio
 13 | async def test_exists(tmp_path: Path, file_service: FileService):
 14 |     """Test file existence checking."""
 15 |     # Test path
 16 |     test_path = tmp_path / "test.md"
 17 | 
 18 |     # Should not exist initially
 19 |     assert not await file_service.exists(test_path)
 20 | 
 21 |     # Create file
 22 |     test_path.write_text("test content")
 23 |     assert await file_service.exists(test_path)
 24 | 
 25 |     # Delete file
 26 |     test_path.unlink()
 27 |     assert not await file_service.exists(test_path)
 28 | 
 29 | 
 30 | @pytest.mark.asyncio
 31 | async def test_exists_error_handling(tmp_path: Path, file_service: FileService):
 32 |     """Test error handling in exists() method."""
 33 |     test_path = tmp_path / "test.md"
 34 | 
 35 |     # Mock Path.exists to raise an error
 36 |     with patch.object(Path, "exists") as mock_exists:
 37 |         mock_exists.side_effect = PermissionError("Access denied")
 38 | 
 39 |         with pytest.raises(FileOperationError) as exc_info:
 40 |             await file_service.exists(test_path)
 41 | 
 42 |         assert "Failed to check file existence" in str(exc_info.value)
 43 | 
 44 | 
 45 | @pytest.mark.asyncio
 46 | async def test_write_read_file(tmp_path: Path, file_service: FileService):
 47 |     """Test basic write/read operations with checksums."""
 48 |     test_path = tmp_path / "test.md"
 49 |     test_content = "test content\nwith multiple lines"
 50 | 
 51 |     # Write file and get checksum
 52 |     checksum = await file_service.write_file(test_path, test_content)
 53 |     assert test_path.exists()
 54 | 
 55 |     # Read back and verify content/checksum
 56 |     content, read_checksum = await file_service.read_file(test_path)
 57 |     assert content == test_content
 58 |     assert read_checksum == checksum
 59 | 
 60 | 
 61 | @pytest.mark.asyncio
 62 | async def test_write_creates_directories(tmp_path: Path, file_service: FileService):
 63 |     """Test directory creation on write."""
 64 |     test_path = tmp_path / "subdir" / "nested" / "test.md"
 65 |     test_content = "test content"
 66 | 
 67 |     # Write should create directories
 68 |     await file_service.write_file(test_path, test_content)
 69 |     assert test_path.exists()
 70 |     assert test_path.parent.is_dir()
 71 | 
 72 | 
 73 | @pytest.mark.asyncio
 74 | async def test_write_atomic(tmp_path: Path, file_service: FileService):
 75 |     """Test atomic write with no partial files."""
 76 |     test_path = tmp_path / "test.md"
 77 |     temp_path = test_path.with_suffix(".tmp")
 78 | 
 79 |     # Mock write_file_atomic to raise an error
 80 |     with patch("basic_memory.file_utils.write_file_atomic") as mock_write:
 81 |         mock_write.side_effect = Exception("Write failed")
 82 | 
 83 |         # Attempt write that will fail
 84 |         with pytest.raises(FileOperationError):
 85 |             await file_service.write_file(test_path, "test content")
 86 | 
 87 |         # No partial files should exist
 88 |         assert not test_path.exists()
 89 |         assert not temp_path.exists()
 90 | 
 91 | 
 92 | @pytest.mark.asyncio
 93 | async def test_delete_file(tmp_path: Path, file_service: FileService):
 94 |     """Test file deletion."""
 95 |     test_path = tmp_path / "test.md"
 96 |     test_content = "test content"
 97 | 
 98 |     # Create then delete
 99 |     await file_service.write_file(test_path, test_content)
100 |     assert test_path.exists()
101 | 
102 |     await file_service.delete_file(test_path)
103 |     assert not test_path.exists()
104 | 
105 |     # Delete non-existent file should not error
106 |     await file_service.delete_file(test_path)
107 | 
108 | 
109 | @pytest.mark.asyncio
110 | async def test_checksum_consistency(tmp_path: Path, file_service: FileService):
111 |     """Test checksum remains consistent."""
112 |     test_path = tmp_path / "test.md"
113 |     test_content = "test content\n" * 10
114 | 
115 |     # Get checksum from write
116 |     checksum1 = await file_service.write_file(test_path, test_content)
117 | 
118 |     # Get checksum from read
119 |     _, checksum2 = await file_service.read_file(test_path)
120 | 
121 |     # Write again and get new checksum
122 |     checksum3 = await file_service.write_file(test_path, test_content)
123 | 
124 |     # All should match
125 |     assert checksum1 == checksum2 == checksum3
126 | 
127 | 
128 | @pytest.mark.asyncio
129 | async def test_error_handling_missing_file(tmp_path: Path, file_service: FileService):
130 |     """Test error handling for missing files."""
131 |     test_path = tmp_path / "missing.md"
132 | 
133 |     with pytest.raises(FileOperationError):
134 |         await file_service.read_file(test_path)
135 | 
136 | 
137 | @pytest.mark.asyncio
138 | async def test_error_handling_invalid_path(tmp_path: Path, file_service: FileService):
139 |     """Test error handling for invalid paths."""
140 |     # Try to write to a directory instead of file
141 |     test_path = tmp_path / "test.md"
142 |     test_path.mkdir()  # Create a directory instead of a file
143 | 
144 |     with pytest.raises(FileOperationError):
145 |         await file_service.write_file(test_path, "test")
146 | 
147 | 
148 | @pytest.mark.asyncio
149 | async def test_write_unicode_content(tmp_path: Path, file_service: FileService):
150 |     """Test handling of unicode content."""
151 |     test_path = tmp_path / "test.md"
152 |     test_content = """
153 |     # Test Unicode
154 |     - Emoji: 🚀 ⭐️ 🔥
155 |     - Chinese: 你好世界
156 |     - Arabic: مرحبا بالعالم
157 |     - Russian: Привет, мир
158 |     """
159 | 
160 |     # Write and read back
161 |     await file_service.write_file(test_path, test_content)
162 |     content, _ = await file_service.read_file(test_path)
163 | 
164 |     assert content == test_content
165 | 
```

--------------------------------------------------------------------------------
/tests/api/test_search_template.py:
--------------------------------------------------------------------------------

```python
  1 | """Tests for the search template rendering."""
  2 | 
  3 | import datetime
  4 | import pytest
  5 | 
  6 | from basic_memory.api.template_loader import TemplateLoader
  7 | from basic_memory.schemas.search import SearchItemType, SearchResult
  8 | 
  9 | 
 10 | @pytest.fixture
 11 | def template_loader():
 12 |     """Return a TemplateLoader instance for testing."""
 13 |     return TemplateLoader()
 14 | 
 15 | 
 16 | @pytest.fixture
 17 | def search_result():
 18 |     """Create a sample SearchResult for testing."""
 19 |     return SearchResult(
 20 |         title="Test Search Result",
 21 |         type=SearchItemType.ENTITY,
 22 |         permalink="test/search-result",
 23 |         score=0.95,
 24 |         content="This is a test search result with some content.",
 25 |         file_path="/path/to/test/search-result.md",
 26 |         metadata={"created_at": datetime.datetime(2023, 2, 1, 12, 0)},
 27 |     )
 28 | 
 29 | 
 30 | @pytest.fixture
 31 | def context_with_results(search_result):
 32 |     """Create a sample context with search results."""
 33 |     return {
 34 |         "query": "test query",
 35 |         "timeframe": "30d",
 36 |         "has_results": True,
 37 |         "result_count": 1,
 38 |         "results": [search_result],
 39 |     }
 40 | 
 41 | 
 42 | @pytest.fixture
 43 | def context_without_results():
 44 |     """Create a sample context without search results."""
 45 |     return {
 46 |         "query": "empty query",
 47 |         "timeframe": None,
 48 |         "has_results": False,
 49 |         "result_count": 0,
 50 |         "results": [],
 51 |     }
 52 | 
 53 | 
 54 | @pytest.mark.asyncio
 55 | async def test_search_with_results(template_loader, context_with_results):
 56 |     """Test rendering the search template with results."""
 57 |     result = await template_loader.render("prompts/search.hbs", context_with_results)
 58 | 
 59 |     # Check that key elements are present
 60 |     assert 'Search Results for: "test query" (after 30d)' in result
 61 |     assert "1.0. Test Search Result" in result
 62 |     assert "Type**: entity" in result
 63 |     assert "Relevance Score**: 0.95" in result
 64 |     assert "This is a test search result with some content." in result
 65 |     assert 'read_note("test/search-result")' in result
 66 |     assert "Next Steps" in result
 67 |     assert "Synthesize and Capture Knowledge" in result
 68 | 
 69 | 
 70 | @pytest.mark.asyncio
 71 | async def test_search_without_results(template_loader, context_without_results):
 72 |     """Test rendering the search template without results."""
 73 |     result = await template_loader.render("prompts/search.hbs", context_without_results)
 74 | 
 75 |     # Check that key elements are present
 76 |     assert 'Search Results for: "empty query"' in result
 77 |     assert "I couldn't find any results for this query." in result
 78 |     assert "Opportunity to Capture Knowledge!" in result
 79 |     assert "write_note(" in result
 80 |     assert 'title="Empty query"' in result
 81 |     assert "Other Suggestions" in result
 82 | 
 83 | 
 84 | @pytest.mark.asyncio
 85 | async def test_multiple_search_results(template_loader):
 86 |     """Test rendering the search template with multiple results."""
 87 |     # Create multiple search results
 88 |     results = []
 89 |     for i in range(1, 6):  # Create 5 results
 90 |         results.append(
 91 |             SearchResult(
 92 |                 title=f"Search Result {i}",
 93 |                 type=SearchItemType.ENTITY,
 94 |                 permalink=f"test/result-{i}",
 95 |                 score=1.0 - (i * 0.1),  # Decreasing scores
 96 |                 content=f"Content for result {i}",
 97 |                 file_path=f"/path/to/result-{i}.md",
 98 |                 metadata={},
 99 |             )
100 |         )
101 | 
102 |     context = {
103 |         "query": "multiple results",
104 |         "timeframe": None,
105 |         "has_results": True,
106 |         "result_count": len(results),
107 |         "results": results,
108 |     }
109 | 
110 |     result = await template_loader.render("prompts/search.hbs", context)
111 | 
112 |     # Check that all results are rendered
113 |     for i in range(1, 6):
114 |         assert f"{i}.0. Search Result {i}" in result
115 |         assert f"Content for result {i}" in result
116 |         assert f'read_note("test/result-{i}")' in result
117 | 
118 | 
119 | @pytest.mark.asyncio
120 | async def test_capitalization_in_write_note_template(template_loader, context_with_results):
121 |     """Test that the query is capitalized in the write_note template."""
122 |     result = await template_loader.render("prompts/search.hbs", context_with_results)
123 | 
124 |     # The query should be capitalized in the suggested write_note call
125 |     assert "Synthesis of Test query Information" in result
126 | 
127 | 
128 | @pytest.mark.asyncio
129 | async def test_timeframe_display(template_loader):
130 |     """Test that the timeframe is displayed correctly when present, and not when absent."""
131 |     # Context with timeframe
132 |     context_with_timeframe = {
133 |         "query": "with timeframe",
134 |         "timeframe": "7d",
135 |         "has_results": True,
136 |         "result_count": 0,
137 |         "results": [],
138 |     }
139 | 
140 |     result_with_timeframe = await template_loader.render(
141 |         "prompts/search.hbs", context_with_timeframe
142 |     )
143 |     assert 'Search Results for: "with timeframe" (after 7d)' in result_with_timeframe
144 | 
145 |     # Context without timeframe
146 |     context_without_timeframe = {
147 |         "query": "without timeframe",
148 |         "timeframe": None,
149 |         "has_results": True,
150 |         "result_count": 0,
151 |         "results": [],
152 |     }
153 | 
154 |     result_without_timeframe = await template_loader.render(
155 |         "prompts/search.hbs", context_without_timeframe
156 |     )
157 |     assert 'Search Results for: "without timeframe"' in result_without_timeframe
158 |     assert 'Search Results for: "without timeframe" (after' not in result_without_timeframe
159 | 
```

--------------------------------------------------------------------------------
/test-int/test_disable_permalinks_integration.py:
--------------------------------------------------------------------------------

```python
  1 | """Integration tests for the disable_permalinks configuration."""
  2 | 
  3 | import pytest
  4 | 
  5 | from basic_memory.config import BasicMemoryConfig
  6 | from basic_memory.markdown import EntityParser, MarkdownProcessor
  7 | from basic_memory.repository import (
  8 |     EntityRepository,
  9 |     ObservationRepository,
 10 |     RelationRepository,
 11 |     ProjectRepository,
 12 | )
 13 | from basic_memory.repository.search_repository import SearchRepository
 14 | from basic_memory.schemas import Entity as EntitySchema
 15 | from basic_memory.services import FileService
 16 | from basic_memory.services.entity_service import EntityService
 17 | from basic_memory.services.link_resolver import LinkResolver
 18 | from basic_memory.services.search_service import SearchService
 19 | from basic_memory.sync.sync_service import SyncService
 20 | 
 21 | 
 22 | @pytest.mark.asyncio
 23 | async def test_disable_permalinks_create_entity(tmp_path, engine_factory):
 24 |     """Test that entities created with disable_permalinks=True don't have permalinks."""
 25 |     engine, session_maker = engine_factory
 26 | 
 27 |     # Create app config with disable_permalinks=True
 28 |     app_config = BasicMemoryConfig(disable_permalinks=True)
 29 | 
 30 |     # Setup repositories
 31 |     entity_repository = EntityRepository(session_maker, project_id=1)
 32 |     observation_repository = ObservationRepository(session_maker, project_id=1)
 33 |     relation_repository = RelationRepository(session_maker, project_id=1)
 34 |     search_repository = SearchRepository(session_maker, project_id=1)
 35 | 
 36 |     # Setup services
 37 |     entity_parser = EntityParser(tmp_path)
 38 |     markdown_processor = MarkdownProcessor(entity_parser)
 39 |     file_service = FileService(tmp_path, markdown_processor)
 40 |     search_service = SearchService(search_repository, entity_repository, file_service)
 41 |     await search_service.init_search_index()
 42 |     link_resolver = LinkResolver(entity_repository, search_service)
 43 | 
 44 |     entity_service = EntityService(
 45 |         entity_parser=entity_parser,
 46 |         entity_repository=entity_repository,
 47 |         observation_repository=observation_repository,
 48 |         relation_repository=relation_repository,
 49 |         file_service=file_service,
 50 |         link_resolver=link_resolver,
 51 |         app_config=app_config,
 52 |     )
 53 | 
 54 |     # Create entity via API
 55 |     entity_data = EntitySchema(
 56 |         title="Test Note",
 57 |         folder="test",
 58 |         entity_type="note",
 59 |         content="Test content",
 60 |     )
 61 | 
 62 |     created = await entity_service.create_entity(entity_data)
 63 | 
 64 |     # Verify entity has no permalink
 65 |     assert created.permalink is None
 66 | 
 67 |     # Verify file has no permalink in frontmatter
 68 |     file_path = tmp_path / "test" / "Test Note.md"
 69 |     assert file_path.exists()
 70 |     content = file_path.read_text()
 71 |     assert "permalink:" not in content
 72 |     assert "Test content" in content
 73 | 
 74 | 
 75 | @pytest.mark.asyncio
 76 | async def test_disable_permalinks_sync_workflow(tmp_path, engine_factory):
 77 |     """Test full sync workflow with disable_permalinks enabled."""
 78 |     engine, session_maker = engine_factory
 79 | 
 80 |     # Create app config with disable_permalinks=True
 81 |     app_config = BasicMemoryConfig(disable_permalinks=True)
 82 | 
 83 |     # Create a test markdown file without frontmatter
 84 |     test_file = tmp_path / "test_note.md"
 85 |     test_file.write_text("# Test Note\nThis is test content.")
 86 | 
 87 |     # Setup repositories
 88 |     entity_repository = EntityRepository(session_maker, project_id=1)
 89 |     observation_repository = ObservationRepository(session_maker, project_id=1)
 90 |     relation_repository = RelationRepository(session_maker, project_id=1)
 91 |     search_repository = SearchRepository(session_maker, project_id=1)
 92 |     project_repository = ProjectRepository(session_maker)
 93 | 
 94 |     # Setup services
 95 |     entity_parser = EntityParser(tmp_path)
 96 |     markdown_processor = MarkdownProcessor(entity_parser)
 97 |     file_service = FileService(tmp_path, markdown_processor)
 98 |     search_service = SearchService(search_repository, entity_repository, file_service)
 99 |     await search_service.init_search_index()
100 |     link_resolver = LinkResolver(entity_repository, search_service)
101 | 
102 |     entity_service = EntityService(
103 |         entity_parser=entity_parser,
104 |         entity_repository=entity_repository,
105 |         observation_repository=observation_repository,
106 |         relation_repository=relation_repository,
107 |         file_service=file_service,
108 |         link_resolver=link_resolver,
109 |         app_config=app_config,
110 |     )
111 | 
112 |     sync_service = SyncService(
113 |         app_config=app_config,
114 |         entity_service=entity_service,
115 |         project_repository=project_repository,
116 |         entity_parser=entity_parser,
117 |         entity_repository=entity_repository,
118 |         relation_repository=relation_repository,
119 |         search_service=search_service,
120 |         file_service=file_service,
121 |     )
122 | 
123 |     # Run sync
124 |     report = await sync_service.scan(tmp_path)
125 |     # Note: scan may pick up database files too, so just check our file is there
126 |     assert "test_note.md" in report.new
127 | 
128 |     # Sync the file
129 |     await sync_service.sync_file("test_note.md", new=True)
130 | 
131 |     # Verify file has no permalink added
132 |     content = test_file.read_text()
133 |     assert "permalink:" not in content
134 |     assert "# Test Note" in content
135 | 
136 |     # Verify entity in database has no permalink
137 |     entities = await entity_repository.find_all()
138 |     assert len(entities) == 1
139 |     assert entities[0].permalink is None
140 |     # Title is extracted from filename when no frontmatter, or from frontmatter when present
141 |     assert entities[0].title in ("test_note", "Test Note")
142 | 
```

--------------------------------------------------------------------------------
/src/basic_memory/importers/claude_projects_importer.py:
--------------------------------------------------------------------------------

```python
  1 | """Claude projects import service for Basic Memory."""
  2 | 
  3 | import logging
  4 | from typing import Any, Dict, Optional
  5 | 
  6 | from basic_memory.markdown.schemas import EntityFrontmatter, EntityMarkdown
  7 | from basic_memory.importers.base import Importer
  8 | from basic_memory.schemas.importer import ProjectImportResult
  9 | from basic_memory.importers.utils import clean_filename
 10 | 
 11 | logger = logging.getLogger(__name__)
 12 | 
 13 | 
 14 | class ClaudeProjectsImporter(Importer[ProjectImportResult]):
 15 |     """Service for importing Claude projects."""
 16 | 
 17 |     async def import_data(
 18 |         self, source_data, destination_folder: str, **kwargs: Any
 19 |     ) -> ProjectImportResult:
 20 |         """Import projects from Claude JSON export.
 21 | 
 22 |         Args:
 23 |             source_path: Path to the Claude projects.json file.
 24 |             destination_folder: Base folder for projects within the project.
 25 |             **kwargs: Additional keyword arguments.
 26 | 
 27 |         Returns:
 28 |             ProjectImportResult containing statistics and status of the import.
 29 |         """
 30 |         try:
 31 |             # Ensure the base folder exists
 32 |             base_path = self.base_path
 33 |             if destination_folder:
 34 |                 base_path = self.ensure_folder_exists(destination_folder)
 35 | 
 36 |             projects = source_data
 37 | 
 38 |             # Process each project
 39 |             docs_imported = 0
 40 |             prompts_imported = 0
 41 | 
 42 |             for project in projects:
 43 |                 project_dir = clean_filename(project["name"])
 44 | 
 45 |                 # Create project directories
 46 |                 docs_dir = base_path / project_dir / "docs"
 47 |                 docs_dir.mkdir(parents=True, exist_ok=True)
 48 | 
 49 |                 # Import prompt template if it exists
 50 |                 if prompt_entity := self._format_prompt_markdown(project):
 51 |                     file_path = base_path / f"{prompt_entity.frontmatter.metadata['permalink']}.md"
 52 |                     await self.write_entity(prompt_entity, file_path)
 53 |                     prompts_imported += 1
 54 | 
 55 |                 # Import project documents
 56 |                 for doc in project.get("docs", []):
 57 |                     entity = self._format_project_markdown(project, doc)
 58 |                     file_path = base_path / f"{entity.frontmatter.metadata['permalink']}.md"
 59 |                     await self.write_entity(entity, file_path)
 60 |                     docs_imported += 1
 61 | 
 62 |             return ProjectImportResult(
 63 |                 import_count={"documents": docs_imported, "prompts": prompts_imported},
 64 |                 success=True,
 65 |                 documents=docs_imported,
 66 |                 prompts=prompts_imported,
 67 |             )
 68 | 
 69 |         except Exception as e:  # pragma: no cover
 70 |             logger.exception("Failed to import Claude projects")
 71 |             return self.handle_error("Failed to import Claude projects", e)  # pyright: ignore [reportReturnType]
 72 | 
 73 |     def _format_project_markdown(
 74 |         self, project: Dict[str, Any], doc: Dict[str, Any]
 75 |     ) -> EntityMarkdown:
 76 |         """Format a project document as a Basic Memory entity.
 77 | 
 78 |         Args:
 79 |             project: Project data.
 80 |             doc: Document data.
 81 | 
 82 |         Returns:
 83 |             EntityMarkdown instance representing the document.
 84 |         """
 85 |         # Extract timestamps
 86 |         created_at = doc.get("created_at") or project["created_at"]
 87 |         modified_at = project["updated_at"]
 88 | 
 89 |         # Generate clean names for organization
 90 |         project_dir = clean_filename(project["name"])
 91 |         doc_file = clean_filename(doc["filename"])
 92 | 
 93 |         # Create entity
 94 |         entity = EntityMarkdown(
 95 |             frontmatter=EntityFrontmatter(
 96 |                 metadata={
 97 |                     "type": "project_doc",
 98 |                     "title": doc["filename"],
 99 |                     "created": created_at,
100 |                     "modified": modified_at,
101 |                     "permalink": f"{project_dir}/docs/{doc_file}",
102 |                     "project_name": project["name"],
103 |                     "project_uuid": project["uuid"],
104 |                     "doc_uuid": doc["uuid"],
105 |                 }
106 |             ),
107 |             content=doc["content"],
108 |         )
109 | 
110 |         return entity
111 | 
112 |     def _format_prompt_markdown(self, project: Dict[str, Any]) -> Optional[EntityMarkdown]:
113 |         """Format project prompt template as a Basic Memory entity.
114 | 
115 |         Args:
116 |             project: Project data.
117 | 
118 |         Returns:
119 |             EntityMarkdown instance representing the prompt template, or None if
120 |             no prompt template exists.
121 |         """
122 |         if not project.get("prompt_template"):
123 |             return None
124 | 
125 |         # Extract timestamps
126 |         created_at = project["created_at"]
127 |         modified_at = project["updated_at"]
128 | 
129 |         # Generate clean project directory name
130 |         project_dir = clean_filename(project["name"])
131 | 
132 |         # Create entity
133 |         entity = EntityMarkdown(
134 |             frontmatter=EntityFrontmatter(
135 |                 metadata={
136 |                     "type": "prompt_template",
137 |                     "title": f"Prompt Template: {project['name']}",
138 |                     "created": created_at,
139 |                     "modified": modified_at,
140 |                     "permalink": f"{project_dir}/prompt-template",
141 |                     "project_name": project["name"],
142 |                     "project_uuid": project["uuid"],
143 |                 }
144 |             ),
145 |             content=f"# Prompt Template: {project['name']}\n\n{project['prompt_template']}",
146 |         )
147 | 
148 |         return entity
149 | 
```

--------------------------------------------------------------------------------
/v15-docs/explicit-project-parameter.md:
--------------------------------------------------------------------------------

```markdown
  1 | # Explicit Project Parameter (SPEC-6)
  2 | 
  3 | **Status**: Breaking Change
  4 | **PR**: #298
  5 | **Affects**: All MCP tool users
  6 | 
  7 | ## What Changed
  8 | 
  9 | Starting in v0.15.0, **all MCP tools require an explicit `project` parameter**. The previous implicit project context (via middleware) has been removed in favor of a stateless architecture.
 10 | 
 11 | ### Before v0.15.0
 12 | ```python
 13 | # Tools used implicit current_project from middleware
 14 | await write_note("My Note", "Content", "folder")
 15 | await search_notes("query")
 16 | ```
 17 | 
 18 | ### v0.15.0 and Later
 19 | ```python
 20 | # Explicit project required
 21 | await write_note("My Note", "Content", "folder", project="main")
 22 | await search_notes("query", project="main")
 23 | ```
 24 | 
 25 | ## Why This Matters
 26 | 
 27 | **Benefits:**
 28 | - **Stateless Architecture**: Tools are now truly stateless - no hidden state
 29 | - **Multi-project Clarity**: Explicit about which project you're working with
 30 | - **Better for Cloud**: Enables proper multi-tenant isolation
 31 | - **Simpler Debugging**: No confusion about "current" project
 32 | 
 33 | **Impact:**
 34 | - Existing MCP integrations may break if they don't specify project
 35 | - LLMs need to be aware of project parameter requirement
 36 | - Configuration option available for easier migration (see below)
 37 | 
 38 | ## How to Use
 39 | 
 40 | ### Option 1: Specify Project Every Time (Recommended for Multi-project Users)
 41 | 
 42 | ```python
 43 | # Always include project parameter
 44 | results = await search_notes(
 45 |     query="authentication",
 46 |     project="work-docs"
 47 | )
 48 | 
 49 | content = await read_note(
 50 |     identifier="Search Design",
 51 |     project="work-docs"
 52 | )
 53 | 
 54 | await write_note(
 55 |     title="New Feature",
 56 |     content="...",
 57 |     folder="specs",
 58 |     project="work-docs"
 59 | )
 60 | ```
 61 | 
 62 | ### Option 2: Enable default_project_mode (Recommended for Single-project Users)
 63 | 
 64 | Edit `~/.basic-memory/config.json`:
 65 | 
 66 | ```json
 67 | {
 68 |   "default_project": "main",
 69 |   "default_project_mode": true,
 70 |   "projects": {
 71 |     "main": "/Users/you/basic-memory"
 72 |   }
 73 | }
 74 | ```
 75 | 
 76 | With `default_project_mode: true`:
 77 | ```python
 78 | # Project parameter is optional - uses default_project when omitted
 79 | await write_note("My Note", "Content", "folder")  # Uses "main" project
 80 | await search_notes("query")  # Uses "main" project
 81 | 
 82 | # Can still override with explicit project
 83 | await search_notes("query", project="other-project")
 84 | ```
 85 | 
 86 | ### Option 3: Project Discovery for New Users
 87 | 
 88 | If you don't know which project to use:
 89 | 
 90 | ```python
 91 | # List available projects
 92 | projects = await list_memory_projects()
 93 | for project in projects:
 94 |     print(f"- {project.name}: {project.path}")
 95 | 
 96 | # Check recent activity to find active project
 97 | activity = await recent_activity()  # Shows cross-project activity
 98 | # Returns recommendations for which project to use
 99 | ```
100 | 
101 | ## Migration Guide
102 | 
103 | ### For Claude Desktop Users
104 | 
105 | 1. **Check your config**: `cat ~/.basic-memory/config.json`
106 | 
107 | 2. **Single project setup** (easiest):
108 |    ```json
109 |    {
110 |      "default_project_mode": true,
111 |      "default_project": "main"
112 |    }
113 |    ```
114 | 
115 | 3. **Multi-project setup** (explicit):
116 |    - Keep `default_project_mode: false` (or omit it)
117 |    - LLM will need to specify project in each call
118 | 
119 | ### For MCP Server Developers
120 | 
121 | Update tool calls to include project parameter:
122 | 
123 | ```python
124 | # Old (v0.14.x)
125 | async def my_integration():
126 |     # Relied on middleware to set current_project
127 |     results = await search_notes(query="test")
128 | 
129 | # New (v0.15.0+)
130 | async def my_integration(project: str = "main"):
131 |     # Explicitly pass project
132 |     results = await search_notes(query="test", project=project)
133 | ```
134 | 
135 | ### For API Users
136 | 
137 | If using the Basic Memory API directly:
138 | 
139 | ```python
140 | # All endpoints now require project parameter
141 | import httpx
142 | 
143 | async with httpx.AsyncClient() as client:
144 |     response = await client.post(
145 |         "http://localhost:8000/notes/search",
146 |         json={
147 |             "query": "test",
148 |             "project": "main"  # Required
149 |         }
150 |     )
151 | ```
152 | 
153 | ## Technical Details
154 | 
155 | ### Architecture Change
156 | 
157 | **Removed:**
158 | - `ProjectMiddleware` - no longer maintains project context
159 | - `get_current_project()` - removed from MCP tools
160 | - Implicit project state in MCP server
161 | 
162 | **Added:**
163 | - `default_project_mode` config option
164 | - Explicit project parameter on all MCP tools
165 | - Stateless tool architecture (SPEC-6)
166 | 
167 | ### Configuration Options
168 | 
169 | | Config Key | Type | Default | Description |
170 | |------------|------|---------|-------------|
171 | | `default_project_mode` | bool | `false` | Auto-use default_project when project param omitted |
172 | | `default_project` | string | `"main"` | Project to use in default_project_mode |
173 | 
174 | ### Three-Tier Project Resolution
175 | 
176 | 1. **CLI Constraint** (Highest Priority): `--project` flag constrains all operations
177 | 2. **Explicit Parameter** (Medium): `project="name"` in tool calls
178 | 3. **Default Mode** (Lowest): Falls back to `default_project` if `default_project_mode: true`
179 | 
180 | ## Common Questions
181 | 
182 | **Q: Will my existing setup break?**
183 | A: If you use a single project and enable `default_project_mode: true`, no. Otherwise, you'll need to add project parameters.
184 | 
185 | **Q: Can I still use multiple projects?**
186 | A: Yes! Just specify the project parameter explicitly in each call.
187 | 
188 | **Q: What if I forget the project parameter?**
189 | A: You'll get an error unless `default_project_mode: true` is set in config.
190 | 
191 | **Q: How does this work with Claude Desktop?**
192 | A: Claude can read your config and use default_project_mode, or it can discover projects using `list_memory_projects()`.
193 | 
194 | ## Related Changes
195 | 
196 | - See `default-project-mode.md` for detailed config options
197 | - See `cloud-mode-usage.md` for cloud API usage
198 | - See SPEC-6 for full architectural specification
199 | 
```

--------------------------------------------------------------------------------
/v0.15.0-RELEASE-DOCS.md:
--------------------------------------------------------------------------------

```markdown
  1 | # v0.15.0 Release Plan
  2 | 
  3 | ## Release Overview
  4 | 
  5 | **Target Version**: v0.15.0
  6 | **Previous Version**: v0.14.4
  7 | **Release Date**: TBD
  8 | **Milestone**: [v0.15.0](https://github.com/basicmachines-co/basic-memory/milestone)
  9 | 
 10 | ### Release Highlights
 11 | 
 12 | This is a **major release** with 53 merged PRs introducing:
 13 | - **Cloud Sync**: Bidirectional sync with rclone bisync
 14 | - **Authentication**: JWT-based cloud authentication with subscription validation
 15 | - **Performance**: API optimizations and background processing improvements
 16 | - **Security**: Removed .env loading vulnerability, added .gitignore support
 17 | - **Platform**: Python 3.13 support
 18 | - **Bug Fixes**: 13+ critical fixes
 19 | 
 20 | ## Key Features by Category
 21 | 
 22 | ### Cloud Features
 23 | - Cloud authentication with JWT and subscription validation
 24 | - Bidirectional sync with rclone bisync
 25 | - Cloud mount commands for direct file access
 26 | - Cloud project management
 27 | - Integrity verification
 28 | 
 29 | ### Performance Improvements
 30 | - API performance optimizations (SPEC-11)
 31 | - Background relation resolution (prevents cold start blocking)
 32 | - WAL mode for SQLite
 33 | - Non-blocking sync operations
 34 | 
 35 | ### Security Enhancements
 36 | - Removed .env file loading vulnerability
 37 | - .gitignore integration (respects gitignored files)
 38 | - Improved authentication and session management
 39 | - Better config security
 40 | 
 41 | ### Developer Experience
 42 | - Python 3.13 support
 43 | - ChatGPT tools integration
 44 | - Improved error handling
 45 | - Better CLI output and formatting
 46 | 
 47 | ### Bug Fixes (13+ PRs)
 48 | - Entity upsert conflict resolution (#328)
 49 | - memory:// URL underscore handling (#329)
 50 | - .env loading removed (#330)
 51 | - Minimum timeframe enforcement (#318)
 52 | - move_note file extension handling (#281)
 53 | - Project parameter handling (#310)
 54 | - And more...
 55 | 
 56 | ---
 57 | 
 58 | ## Document
 59 | 
 60 | - [ ] **MNew Cloud Features**
 61 |   - [ ] `bm cloud login` authentication flow
 62 |   - [ ] `bm cloud logout` session cleanup
 63 |   - [ ] `bm cloud sync` bidirectional sync
 64 |   - [ ] `bm cloud check` integrity verification
 65 |   - [ ] Cloud mode toggle for regular commands
 66 |   - [ ] Project creation in cloud mode
 67 | 
 68 | - [ ] **Manual Testing - Bug Fixes**
 69 |   - [ ] Entity upsert conflict resolution (#328)
 70 |   - [ ] memory:// URL underscore normalization (#329)
 71 |   - [ ] .gitignore file filtering (#287, #285)
 72 |   - [ ] move_note with/without file extension (#281)
 73 |   - [ ]  .env file loading removed (#330)
 74 | 
 75 | - [ ] **Platform Testing**
 76 |   - [ ] Python 3.13 compatibility (new in this release)
 77 | 
 78 | - [ ] **CHANGELOG.md**
 79 |   - [ ] Create comprehensive v0.15.0 entry
 80 |   - [ ] List all major features
 81 |   - [ ] Document all bug fixes with issue links
 82 |   - [ ] Include breaking changes (if any)
 83 |   - [ ] Add migration guide (if needed)
 84 |   - [ ] Credit contributors
 85 |   - [ ] `mcp/tools/chatgpt_tools.py` - ChatGPT integration
 86 | 
 87 | - [x] **README.md**
 88 |   - [x] Update Python version badge to 3.13+
 89 |   - [x] Add cloud features to feature list
 90 |   - [x] Add cloud CLI commands section
 91 |   - [x] Expand MCP tools list with all tools organized by category
 92 |   - [x] Add Cloud CLI documentation link
 93 | 
 94 | - [x] **CLAUDE.md**
 95 |   - [x] Add Python 3.13+ support note
 96 |   - [x] Add cloud commands section
 97 |   - [x] Expand MCP tools with all missing tools
 98 |   - [x] Add comprehensive "Cloud Features (v0.15.0+)" section
 99 | 
100 | - [ ] **docs.basicmemory.com Updates** (Docs Site)
101 |   - [ ] **latest-releases.mdx**: Add v0.15.0 release entry with all features
102 |   - [ ] **cli-reference.mdx**: Add cloud commands section (login, logout, sync, check, mount, unmount)
103 |   - [ ] **mcp-tools-reference.mdx**: Add missing tools (read_content, all project management tools)
104 |   - [ ] **cloud-cli.mdx**: CREATE NEW - Cloud authentication, sync, rclone config, troubleshooting
105 |   - [ ] **getting-started.mdx**: Mention Python 3.13 support
106 |   - [ ] **whats-new.mdx**: Add v0.15.0 section with cloud features, performance, security updates
107 | 
108 | - [ ] **Cloud Documentation**
109 |   - [ ] Review docs/cloud-cli.md for accuracy
110 |   - [ ] Update authentication instructions
111 |   - [ ] Document subscription requirements
112 |   - [ ] Add troubleshooting section
113 |   - [ ] rclone configuration
114 | 
115 | - [ ] **API Documentation**
116 |   - [ ] Document new cloud endpoints
117 |   - [ ] Update MCP tool documentation
118 |   - [ ] Review schema documentation
119 |   - [ ] Config file changes
120 | 
121 | - [ ] **New Specifications**
122 |   - [ ] SPEC-11: API Performance Optimization
123 |   - [ ] SPEC-13: CLI Authentication with Subscription Validation
124 |   - [ ] SPEC-6: Explicit Project Parameter Architecture
125 | 
126 | - [ ] **Feature PRs**
127 |   - [ ] #330: Remove .env file loading
128 |   - [ ] #329: Normalize memory:// URLs
129 |   - [ ] #328: Simplify entity upsert
130 |   - [ ] #327: CLI subscription validation
131 |   - [ ] #322: Cloud CLI rclone bisync
132 |   - [ ] #320: Lifecycle management optimization
133 |   - [ ] #319: Background relation resolution
134 |   - [ ] #318: Minimum timeframe enforcement
135 |   - [ ] #317: Cloud deployment fixes
136 |   - [ ] #315: API performance optimizations
137 |   - [ ] #314: .gitignore integration
138 |   - [ ] #313: Disable permalinks config flag
139 |   - [ ] #312: DateTime JSON schema fixes
140 | 
141 | 
142 | ### Phase 5: GitHub Milestone Review
143 | 
144 | - [ ] **Closed Issues** (23 total)
145 |   - [ ] Review all closed issues for completeness
146 |   - [ ] Verify fixes are properly tested
147 |   - [ ] Ensure documentation updated
148 | 
149 | - [ ] **Merged PRs** (13 in milestone, 53 total since v0.14.4)
150 |   - [ ] All critical PRs merged
151 |   - [ ] All PRs properly tested
152 |   - [ ] All PRs documented
153 | 
154 | - [ ] **Open Issues**
155 |   - [ ] #326: Create user guides and demos (can defer to v0.15.1?)
156 |   - [ ] Decision on whether to block release
157 | 
158 | ## Notes
159 | 
160 | - This is a significant release with major new cloud features
161 | - Cloud features require active subscription - ensure this is clear in docs
162 | 
```

--------------------------------------------------------------------------------
/tests/api/test_memory_router.py:
--------------------------------------------------------------------------------

```python
  1 | """Tests for memory router endpoints."""
  2 | 
  3 | from datetime import datetime
  4 | 
  5 | import pytest
  6 | 
  7 | from basic_memory.schemas.memory import GraphContext
  8 | 
  9 | 
 10 | @pytest.mark.asyncio
 11 | async def test_get_memory_context(client, test_graph, project_url):
 12 |     """Test getting context from memory URL."""
 13 |     response = await client.get(f"{project_url}/memory/test/root")
 14 |     assert response.status_code == 200
 15 | 
 16 |     context = GraphContext(**response.json())
 17 |     assert len(context.results) == 1
 18 |     assert context.results[0].primary_result.permalink == "test/root"
 19 |     assert len(context.results[0].related_results) > 0
 20 | 
 21 |     # Verify metadata
 22 |     assert context.metadata.uri == "test/root"
 23 |     assert context.metadata.depth == 1  # default depth
 24 |     assert isinstance(context.metadata.generated_at, datetime)
 25 |     assert context.metadata.primary_count + context.metadata.related_count > 0
 26 |     assert context.metadata.total_results is not None  # Backwards compatibility field
 27 | 
 28 | 
 29 | @pytest.mark.asyncio
 30 | async def test_get_memory_context_pagination(client, test_graph, project_url):
 31 |     """Test getting context from memory URL."""
 32 |     response = await client.get(f"{project_url}/memory/test/root?page=1&page_size=1")
 33 |     assert response.status_code == 200
 34 | 
 35 |     context = GraphContext(**response.json())
 36 |     assert len(context.results) == 1
 37 |     assert context.results[0].primary_result.permalink == "test/root"
 38 |     assert len(context.results[0].related_results) > 0
 39 | 
 40 |     # Verify metadata
 41 |     assert context.metadata.uri == "test/root"
 42 |     assert context.metadata.depth == 1  # default depth
 43 |     assert isinstance(context.metadata.generated_at, datetime)
 44 |     assert context.metadata.primary_count > 0
 45 | 
 46 | 
 47 | @pytest.mark.asyncio
 48 | async def test_get_memory_context_pattern(client, test_graph, project_url):
 49 |     """Test getting context with pattern matching."""
 50 |     response = await client.get(f"{project_url}/memory/test/*")
 51 |     assert response.status_code == 200
 52 | 
 53 |     context = GraphContext(**response.json())
 54 |     assert len(context.results) > 1  # Should match multiple test/* paths
 55 |     assert all("test/" in item.primary_result.permalink for item in context.results)
 56 | 
 57 | 
 58 | @pytest.mark.asyncio
 59 | async def test_get_memory_context_depth(client, test_graph, project_url):
 60 |     """Test depth parameter affects relation traversal."""
 61 |     # With depth=1, should only get immediate connections
 62 |     response = await client.get(f"{project_url}/memory/test/root?depth=1&max_results=20")
 63 |     assert response.status_code == 200
 64 |     context1 = GraphContext(**response.json())
 65 | 
 66 |     # With depth=2, should get deeper connections
 67 |     response = await client.get(f"{project_url}/memory/test/root?depth=3&max_results=20")
 68 |     assert response.status_code == 200
 69 |     context2 = GraphContext(**response.json())
 70 | 
 71 |     # Calculate total related items in all result items
 72 |     total_related1 = sum(len(item.related_results) for item in context1.results)
 73 |     total_related2 = sum(len(item.related_results) for item in context2.results)
 74 | 
 75 |     assert total_related2 > total_related1
 76 | 
 77 | 
 78 | @pytest.mark.asyncio
 79 | async def test_get_memory_context_timeframe(client, test_graph, project_url):
 80 |     """Test timeframe parameter filters by date."""
 81 |     # Recent timeframe
 82 |     response = await client.get(f"{project_url}/memory/test/root?timeframe=1d")
 83 |     assert response.status_code == 200
 84 |     recent = GraphContext(**response.json())
 85 | 
 86 |     # Longer timeframe
 87 |     response = await client.get(f"{project_url}/memory/test/root?timeframe=30d")
 88 |     assert response.status_code == 200
 89 |     older = GraphContext(**response.json())
 90 | 
 91 |     # Calculate total related items
 92 |     total_recent_related = (
 93 |         sum(len(item.related_results) for item in recent.results) if recent.results else 0
 94 |     )
 95 |     total_older_related = (
 96 |         sum(len(item.related_results) for item in older.results) if older.results else 0
 97 |     )
 98 | 
 99 |     assert total_older_related >= total_recent_related
100 | 
101 | 
102 | @pytest.mark.asyncio
103 | async def test_not_found(client, project_url):
104 |     """Test handling of non-existent paths."""
105 |     response = await client.get(f"{project_url}/memory/test/does-not-exist")
106 |     assert response.status_code == 200
107 | 
108 |     context = GraphContext(**response.json())
109 |     assert len(context.results) == 0
110 | 
111 | 
112 | @pytest.mark.asyncio
113 | async def test_recent_activity(client, test_graph, project_url):
114 |     """Test handling of recent activity."""
115 |     response = await client.get(f"{project_url}/memory/recent")
116 |     assert response.status_code == 200
117 | 
118 |     context = GraphContext(**response.json())
119 |     assert len(context.results) > 0
120 |     assert context.metadata.primary_count > 0
121 | 
122 | 
123 | @pytest.mark.asyncio
124 | async def test_recent_activity_pagination(client, test_graph, project_url):
125 |     """Test pagination for recent activity."""
126 |     response = await client.get(f"{project_url}/memory/recent?page=1&page_size=1")
127 |     assert response.status_code == 200
128 | 
129 |     context = GraphContext(**response.json())
130 |     assert len(context.results) == 1
131 |     assert context.page == 1
132 |     assert context.page_size == 1
133 | 
134 | 
135 | @pytest.mark.asyncio
136 | async def test_recent_activity_by_type(client, test_graph, project_url):
137 |     """Test filtering recent activity by type."""
138 |     response = await client.get(f"{project_url}/memory/recent?type=relation&type=observation")
139 |     assert response.status_code == 200
140 | 
141 |     context = GraphContext(**response.json())
142 |     assert len(context.results) > 0
143 | 
144 |     # Check for relation and observation types in primary results
145 |     primary_types = [item.primary_result.type for item in context.results]
146 |     assert "relation" in primary_types or "observation" in primary_types
147 | 
```

--------------------------------------------------------------------------------
/tests/mcp/test_tool_utils.py:
--------------------------------------------------------------------------------

```python
  1 | """Tests for MCP tool utilities."""
  2 | 
  3 | from unittest.mock import AsyncMock
  4 | 
  5 | import pytest
  6 | from httpx import AsyncClient, HTTPStatusError
  7 | from mcp.server.fastmcp.exceptions import ToolError
  8 | 
  9 | from basic_memory.mcp.tools.utils import (
 10 |     call_get,
 11 |     call_post,
 12 |     call_put,
 13 |     call_delete,
 14 |     get_error_message,
 15 | )
 16 | 
 17 | 
 18 | @pytest.fixture
 19 | def mock_response(monkeypatch):
 20 |     """Create a mock response."""
 21 | 
 22 |     class MockResponse:
 23 |         def __init__(self, status_code=200):
 24 |             self.status_code = status_code
 25 |             self.is_success = status_code < 400
 26 |             self.json = lambda: {}
 27 | 
 28 |         def raise_for_status(self):
 29 |             if self.status_code >= 400:
 30 |                 raise HTTPStatusError(
 31 |                     message=f"HTTP Error {self.status_code}", request=None, response=self
 32 |                 )
 33 | 
 34 |     return MockResponse
 35 | 
 36 | 
 37 | @pytest.mark.asyncio
 38 | async def test_call_get_success(mock_response):
 39 |     """Test successful GET request."""
 40 |     client = AsyncClient()
 41 |     client.get = lambda *args, **kwargs: AsyncMock(return_value=mock_response())()
 42 | 
 43 |     response = await call_get(client, "http://test.com")
 44 |     assert response.status_code == 200
 45 | 
 46 | 
 47 | @pytest.mark.asyncio
 48 | async def test_call_get_error(mock_response):
 49 |     """Test GET request with error."""
 50 |     client = AsyncClient()
 51 |     client.get = lambda *args, **kwargs: AsyncMock(return_value=mock_response(404))()
 52 | 
 53 |     with pytest.raises(ToolError) as exc:
 54 |         await call_get(client, "http://test.com")
 55 |     assert "Resource not found" in str(exc.value)
 56 | 
 57 | 
 58 | @pytest.mark.asyncio
 59 | async def test_call_post_success(mock_response):
 60 |     """Test successful POST request."""
 61 |     client = AsyncClient()
 62 |     response = mock_response()
 63 |     response.json = lambda: {"test": "data"}
 64 |     client.post = lambda *args, **kwargs: AsyncMock(return_value=response)()
 65 | 
 66 |     response = await call_post(client, "http://test.com", json={"test": "data"})
 67 |     assert response.status_code == 200
 68 | 
 69 | 
 70 | @pytest.mark.asyncio
 71 | async def test_call_post_error(mock_response):
 72 |     """Test POST request with error."""
 73 |     client = AsyncClient()
 74 |     response = mock_response(500)
 75 |     response.json = lambda: {"test": "error"}
 76 | 
 77 |     client.post = lambda *args, **kwargs: AsyncMock(return_value=response)()
 78 | 
 79 |     with pytest.raises(ToolError) as exc:
 80 |         await call_post(client, "http://test.com", json={"test": "data"})
 81 |     assert "Internal server error" in str(exc.value)
 82 | 
 83 | 
 84 | @pytest.mark.asyncio
 85 | async def test_call_put_success(mock_response):
 86 |     """Test successful PUT request."""
 87 |     client = AsyncClient()
 88 |     client.put = lambda *args, **kwargs: AsyncMock(return_value=mock_response())()
 89 | 
 90 |     response = await call_put(client, "http://test.com", json={"test": "data"})
 91 |     assert response.status_code == 200
 92 | 
 93 | 
 94 | @pytest.mark.asyncio
 95 | async def test_call_put_error(mock_response):
 96 |     """Test PUT request with error."""
 97 |     client = AsyncClient()
 98 |     client.put = lambda *args, **kwargs: AsyncMock(return_value=mock_response(400))()
 99 | 
100 |     with pytest.raises(ToolError) as exc:
101 |         await call_put(client, "http://test.com", json={"test": "data"})
102 |     assert "Invalid request" in str(exc.value)
103 | 
104 | 
105 | @pytest.mark.asyncio
106 | async def test_call_delete_success(mock_response):
107 |     """Test successful DELETE request."""
108 |     client = AsyncClient()
109 |     client.delete = lambda *args, **kwargs: AsyncMock(return_value=mock_response())()
110 | 
111 |     response = await call_delete(client, "http://test.com")
112 |     assert response.status_code == 200
113 | 
114 | 
115 | @pytest.mark.asyncio
116 | async def test_call_delete_error(mock_response):
117 |     """Test DELETE request with error."""
118 |     client = AsyncClient()
119 |     client.delete = lambda *args, **kwargs: AsyncMock(return_value=mock_response(403))()
120 | 
121 |     with pytest.raises(ToolError) as exc:
122 |         await call_delete(client, "http://test.com")
123 |     assert "Access denied" in str(exc.value)
124 | 
125 | 
126 | @pytest.mark.asyncio
127 | async def test_call_get_with_params(mock_response):
128 |     """Test GET request with query parameters."""
129 |     client = AsyncClient()
130 |     mock_get = AsyncMock(return_value=mock_response())
131 |     client.get = mock_get
132 | 
133 |     params = {"key": "value", "test": "data"}
134 |     await call_get(client, "http://test.com", params=params)
135 | 
136 |     mock_get.assert_called_once()
137 |     call_kwargs = mock_get.call_args[1]
138 |     assert call_kwargs["params"] == params
139 | 
140 | 
141 | @pytest.mark.asyncio
142 | async def test_get_error_message():
143 |     """Test the get_error_message function."""
144 | 
145 |     # Test 400 status code
146 |     message = get_error_message(400, "http://test.com/resource", "GET")
147 |     assert "Invalid request" in message
148 |     assert "resource" in message
149 | 
150 |     # Test 404 status code
151 |     message = get_error_message(404, "http://test.com/missing", "GET")
152 |     assert "Resource not found" in message
153 |     assert "missing" in message
154 | 
155 |     # Test 500 status code
156 |     message = get_error_message(500, "http://test.com/server", "POST")
157 |     assert "Internal server error" in message
158 |     assert "server" in message
159 | 
160 |     # Test URL object handling
161 |     from httpx import URL
162 | 
163 |     url = URL("http://test.com/complex/path")
164 |     message = get_error_message(403, url, "DELETE")
165 |     assert "Access denied" in message
166 |     assert "path" in message
167 | 
168 | 
169 | @pytest.mark.asyncio
170 | async def test_call_post_with_json(mock_response):
171 |     """Test POST request with JSON payload."""
172 |     client = AsyncClient()
173 |     response = mock_response()
174 |     response.json = lambda: {"test": "data"}
175 | 
176 |     mock_post = AsyncMock(return_value=response)
177 |     client.post = mock_post
178 | 
179 |     json_data = {"key": "value", "nested": {"test": "data"}}
180 |     await call_post(client, "http://test.com", json=json_data)
181 | 
182 |     mock_post.assert_called_once()
183 |     call_kwargs = mock_post.call_args[1]
184 |     assert call_kwargs["json"] == json_data
185 | 
```
Page 3/23FirstPrevNextLast