#
tokens: 46621/50000 7/348 files (page 15/23)
lines: on (toggle) GitHub
raw markdown copy reset
This is page 15 of 23. Use http://codebase.md/basicmachines-co/basic-memory?lines=true&page={x} to view the full context.

# Directory Structure

```
├── .claude
│   ├── agents
│   │   ├── python-developer.md
│   │   └── system-architect.md
│   └── commands
│       ├── release
│       │   ├── beta.md
│       │   ├── changelog.md
│       │   ├── release-check.md
│       │   └── release.md
│       ├── spec.md
│       └── test-live.md
├── .dockerignore
├── .github
│   ├── dependabot.yml
│   ├── ISSUE_TEMPLATE
│   │   ├── bug_report.md
│   │   ├── config.yml
│   │   ├── documentation.md
│   │   └── feature_request.md
│   └── workflows
│       ├── claude-code-review.yml
│       ├── claude-issue-triage.yml
│       ├── claude.yml
│       ├── dev-release.yml
│       ├── docker.yml
│       ├── pr-title.yml
│       ├── release.yml
│       └── test.yml
├── .gitignore
├── .python-version
├── CHANGELOG.md
├── CITATION.cff
├── CLA.md
├── CLAUDE.md
├── CODE_OF_CONDUCT.md
├── CONTRIBUTING.md
├── docker-compose.yml
├── Dockerfile
├── docs
│   ├── ai-assistant-guide-extended.md
│   ├── character-handling.md
│   ├── cloud-cli.md
│   └── Docker.md
├── justfile
├── LICENSE
├── llms-install.md
├── pyproject.toml
├── README.md
├── SECURITY.md
├── smithery.yaml
├── specs
│   ├── SPEC-1 Specification-Driven Development Process.md
│   ├── SPEC-10 Unified Deployment Workflow and Event Tracking.md
│   ├── SPEC-11 Basic Memory API Performance Optimization.md
│   ├── SPEC-12 OpenTelemetry Observability.md
│   ├── SPEC-13 CLI Authentication with Subscription Validation.md
│   ├── SPEC-14 Cloud Git Versioning & GitHub Backup.md
│   ├── SPEC-14- Cloud Git Versioning & GitHub Backup.md
│   ├── SPEC-15 Configuration Persistence via Tigris for Cloud Tenants.md
│   ├── SPEC-16 MCP Cloud Service Consolidation.md
│   ├── SPEC-17 Semantic Search with ChromaDB.md
│   ├── SPEC-18 AI Memory Management Tool.md
│   ├── SPEC-19 Sync Performance and Memory Optimization.md
│   ├── SPEC-2 Slash Commands Reference.md
│   ├── SPEC-3 Agent Definitions.md
│   ├── SPEC-4 Notes Web UI Component Architecture.md
│   ├── SPEC-5 CLI Cloud Upload via WebDAV.md
│   ├── SPEC-6 Explicit Project Parameter Architecture.md
│   ├── SPEC-7 POC to spike Tigris Turso for local access to cloud data.md
│   ├── SPEC-8 TigrisFS Integration.md
│   ├── SPEC-9 Multi-Project Bidirectional Sync Architecture.md
│   ├── SPEC-9 Signed Header Tenant Information.md
│   └── SPEC-9-1 Follow-Ups- Conflict, Sync, and Observability.md
├── src
│   └── basic_memory
│       ├── __init__.py
│       ├── alembic
│       │   ├── alembic.ini
│       │   ├── env.py
│       │   ├── migrations.py
│       │   ├── script.py.mako
│       │   └── versions
│       │       ├── 3dae7c7b1564_initial_schema.py
│       │       ├── 502b60eaa905_remove_required_from_entity_permalink.py
│       │       ├── 5fe1ab1ccebe_add_projects_table.py
│       │       ├── 647e7a75e2cd_project_constraint_fix.py
│       │       ├── 9d9c1cb7d8f5_add_mtime_and_size_columns_to_entity_.py
│       │       ├── a1b2c3d4e5f6_fix_project_foreign_keys.py
│       │       ├── b3c3938bacdb_relation_to_name_unique_index.py
│       │       ├── cc7172b46608_update_search_index_schema.py
│       │       └── e7e1f4367280_add_scan_watermark_tracking_to_project.py
│       ├── api
│       │   ├── __init__.py
│       │   ├── app.py
│       │   ├── routers
│       │   │   ├── __init__.py
│       │   │   ├── directory_router.py
│       │   │   ├── importer_router.py
│       │   │   ├── knowledge_router.py
│       │   │   ├── management_router.py
│       │   │   ├── memory_router.py
│       │   │   ├── project_router.py
│       │   │   ├── prompt_router.py
│       │   │   ├── resource_router.py
│       │   │   ├── search_router.py
│       │   │   └── utils.py
│       │   └── template_loader.py
│       ├── cli
│       │   ├── __init__.py
│       │   ├── app.py
│       │   ├── auth.py
│       │   ├── commands
│       │   │   ├── __init__.py
│       │   │   ├── cloud
│       │   │   │   ├── __init__.py
│       │   │   │   ├── api_client.py
│       │   │   │   ├── bisync_commands.py
│       │   │   │   ├── cloud_utils.py
│       │   │   │   ├── core_commands.py
│       │   │   │   ├── mount_commands.py
│       │   │   │   ├── rclone_config.py
│       │   │   │   ├── rclone_installer.py
│       │   │   │   ├── upload_command.py
│       │   │   │   └── upload.py
│       │   │   ├── command_utils.py
│       │   │   ├── db.py
│       │   │   ├── import_chatgpt.py
│       │   │   ├── import_claude_conversations.py
│       │   │   ├── import_claude_projects.py
│       │   │   ├── import_memory_json.py
│       │   │   ├── mcp.py
│       │   │   ├── project.py
│       │   │   ├── status.py
│       │   │   ├── sync.py
│       │   │   └── tool.py
│       │   └── main.py
│       ├── config.py
│       ├── db.py
│       ├── deps.py
│       ├── file_utils.py
│       ├── ignore_utils.py
│       ├── importers
│       │   ├── __init__.py
│       │   ├── base.py
│       │   ├── chatgpt_importer.py
│       │   ├── claude_conversations_importer.py
│       │   ├── claude_projects_importer.py
│       │   ├── memory_json_importer.py
│       │   └── utils.py
│       ├── markdown
│       │   ├── __init__.py
│       │   ├── entity_parser.py
│       │   ├── markdown_processor.py
│       │   ├── plugins.py
│       │   ├── schemas.py
│       │   └── utils.py
│       ├── mcp
│       │   ├── __init__.py
│       │   ├── async_client.py
│       │   ├── project_context.py
│       │   ├── prompts
│       │   │   ├── __init__.py
│       │   │   ├── ai_assistant_guide.py
│       │   │   ├── continue_conversation.py
│       │   │   ├── recent_activity.py
│       │   │   ├── search.py
│       │   │   └── utils.py
│       │   ├── resources
│       │   │   ├── ai_assistant_guide.md
│       │   │   └── project_info.py
│       │   ├── server.py
│       │   └── tools
│       │       ├── __init__.py
│       │       ├── build_context.py
│       │       ├── canvas.py
│       │       ├── chatgpt_tools.py
│       │       ├── delete_note.py
│       │       ├── edit_note.py
│       │       ├── list_directory.py
│       │       ├── move_note.py
│       │       ├── project_management.py
│       │       ├── read_content.py
│       │       ├── read_note.py
│       │       ├── recent_activity.py
│       │       ├── search.py
│       │       ├── utils.py
│       │       ├── view_note.py
│       │       └── write_note.py
│       ├── models
│       │   ├── __init__.py
│       │   ├── base.py
│       │   ├── knowledge.py
│       │   ├── project.py
│       │   └── search.py
│       ├── repository
│       │   ├── __init__.py
│       │   ├── entity_repository.py
│       │   ├── observation_repository.py
│       │   ├── project_info_repository.py
│       │   ├── project_repository.py
│       │   ├── relation_repository.py
│       │   ├── repository.py
│       │   └── search_repository.py
│       ├── schemas
│       │   ├── __init__.py
│       │   ├── base.py
│       │   ├── cloud.py
│       │   ├── delete.py
│       │   ├── directory.py
│       │   ├── importer.py
│       │   ├── memory.py
│       │   ├── project_info.py
│       │   ├── prompt.py
│       │   ├── request.py
│       │   ├── response.py
│       │   ├── search.py
│       │   └── sync_report.py
│       ├── services
│       │   ├── __init__.py
│       │   ├── context_service.py
│       │   ├── directory_service.py
│       │   ├── entity_service.py
│       │   ├── exceptions.py
│       │   ├── file_service.py
│       │   ├── initialization.py
│       │   ├── link_resolver.py
│       │   ├── project_service.py
│       │   ├── search_service.py
│       │   └── service.py
│       ├── sync
│       │   ├── __init__.py
│       │   ├── background_sync.py
│       │   ├── sync_service.py
│       │   └── watch_service.py
│       ├── templates
│       │   └── prompts
│       │       ├── continue_conversation.hbs
│       │       └── search.hbs
│       └── utils.py
├── test-int
│   ├── BENCHMARKS.md
│   ├── cli
│   │   ├── test_project_commands_integration.py
│   │   ├── test_sync_commands_integration.py
│   │   └── test_version_integration.py
│   ├── conftest.py
│   ├── mcp
│   │   ├── test_build_context_underscore.py
│   │   ├── test_build_context_validation.py
│   │   ├── test_chatgpt_tools_integration.py
│   │   ├── test_default_project_mode_integration.py
│   │   ├── test_delete_note_integration.py
│   │   ├── test_edit_note_integration.py
│   │   ├── test_list_directory_integration.py
│   │   ├── test_move_note_integration.py
│   │   ├── test_project_management_integration.py
│   │   ├── test_project_state_sync_integration.py
│   │   ├── test_read_content_integration.py
│   │   ├── test_read_note_integration.py
│   │   ├── test_search_integration.py
│   │   ├── test_single_project_mcp_integration.py
│   │   └── test_write_note_integration.py
│   ├── test_db_wal_mode.py
│   ├── test_disable_permalinks_integration.py
│   └── test_sync_performance_benchmark.py
├── tests
│   ├── __init__.py
│   ├── api
│   │   ├── conftest.py
│   │   ├── test_async_client.py
│   │   ├── test_continue_conversation_template.py
│   │   ├── test_directory_router.py
│   │   ├── test_importer_router.py
│   │   ├── test_knowledge_router.py
│   │   ├── test_management_router.py
│   │   ├── test_memory_router.py
│   │   ├── test_project_router_operations.py
│   │   ├── test_project_router.py
│   │   ├── test_prompt_router.py
│   │   ├── test_relation_background_resolution.py
│   │   ├── test_resource_router.py
│   │   ├── test_search_router.py
│   │   ├── test_search_template.py
│   │   ├── test_template_loader_helpers.py
│   │   └── test_template_loader.py
│   ├── cli
│   │   ├── conftest.py
│   │   ├── test_bisync_commands.py
│   │   ├── test_cli_tools.py
│   │   ├── test_cloud_authentication.py
│   │   ├── test_cloud_utils.py
│   │   ├── test_ignore_utils.py
│   │   ├── test_import_chatgpt.py
│   │   ├── test_import_claude_conversations.py
│   │   ├── test_import_claude_projects.py
│   │   ├── test_import_memory_json.py
│   │   └── test_upload.py
│   ├── conftest.py
│   ├── db
│   │   └── test_issue_254_foreign_key_constraints.py
│   ├── importers
│   │   ├── test_importer_base.py
│   │   └── test_importer_utils.py
│   ├── markdown
│   │   ├── __init__.py
│   │   ├── test_date_frontmatter_parsing.py
│   │   ├── test_entity_parser_error_handling.py
│   │   ├── test_entity_parser.py
│   │   ├── test_markdown_plugins.py
│   │   ├── test_markdown_processor.py
│   │   ├── test_observation_edge_cases.py
│   │   ├── test_parser_edge_cases.py
│   │   ├── test_relation_edge_cases.py
│   │   └── test_task_detection.py
│   ├── mcp
│   │   ├── conftest.py
│   │   ├── test_obsidian_yaml_formatting.py
│   │   ├── test_permalink_collision_file_overwrite.py
│   │   ├── test_prompts.py
│   │   ├── test_resources.py
│   │   ├── test_tool_build_context.py
│   │   ├── test_tool_canvas.py
│   │   ├── test_tool_delete_note.py
│   │   ├── test_tool_edit_note.py
│   │   ├── test_tool_list_directory.py
│   │   ├── test_tool_move_note.py
│   │   ├── test_tool_read_content.py
│   │   ├── test_tool_read_note.py
│   │   ├── test_tool_recent_activity.py
│   │   ├── test_tool_resource.py
│   │   ├── test_tool_search.py
│   │   ├── test_tool_utils.py
│   │   ├── test_tool_view_note.py
│   │   ├── test_tool_write_note.py
│   │   └── tools
│   │       └── test_chatgpt_tools.py
│   ├── Non-MarkdownFileSupport.pdf
│   ├── repository
│   │   ├── test_entity_repository_upsert.py
│   │   ├── test_entity_repository.py
│   │   ├── test_entity_upsert_issue_187.py
│   │   ├── test_observation_repository.py
│   │   ├── test_project_info_repository.py
│   │   ├── test_project_repository.py
│   │   ├── test_relation_repository.py
│   │   ├── test_repository.py
│   │   ├── test_search_repository_edit_bug_fix.py
│   │   └── test_search_repository.py
│   ├── schemas
│   │   ├── test_base_timeframe_minimum.py
│   │   ├── test_memory_serialization.py
│   │   ├── test_memory_url_validation.py
│   │   ├── test_memory_url.py
│   │   ├── test_schemas.py
│   │   └── test_search.py
│   ├── Screenshot.png
│   ├── services
│   │   ├── test_context_service.py
│   │   ├── test_directory_service.py
│   │   ├── test_entity_service_disable_permalinks.py
│   │   ├── test_entity_service.py
│   │   ├── test_file_service.py
│   │   ├── test_initialization.py
│   │   ├── test_link_resolver.py
│   │   ├── test_project_removal_bug.py
│   │   ├── test_project_service_operations.py
│   │   ├── test_project_service.py
│   │   └── test_search_service.py
│   ├── sync
│   │   ├── test_character_conflicts.py
│   │   ├── test_sync_service_incremental.py
│   │   ├── test_sync_service.py
│   │   ├── test_sync_wikilink_issue.py
│   │   ├── test_tmp_files.py
│   │   ├── test_watch_service_edge_cases.py
│   │   ├── test_watch_service_reload.py
│   │   └── test_watch_service.py
│   ├── test_config.py
│   ├── test_db_migration_deduplication.py
│   ├── test_deps.py
│   ├── test_production_cascade_delete.py
│   └── utils
│       ├── test_file_utils.py
│       ├── test_frontmatter_obsidian_compatible.py
│       ├── test_parse_tags.py
│       ├── test_permalink_formatting.py
│       ├── test_utf8_handling.py
│       └── test_validate_project_path.py
├── uv.lock
├── v0.15.0-RELEASE-DOCS.md
└── v15-docs
    ├── api-performance.md
    ├── background-relations.md
    ├── basic-memory-home.md
    ├── bug-fixes.md
    ├── chatgpt-integration.md
    ├── cloud-authentication.md
    ├── cloud-bisync.md
    ├── cloud-mode-usage.md
    ├── cloud-mount.md
    ├── default-project-mode.md
    ├── env-file-removal.md
    ├── env-var-overrides.md
    ├── explicit-project-parameter.md
    ├── gitignore-integration.md
    ├── project-root-env-var.md
    ├── README.md
    └── sqlite-performance.md
```

# Files

--------------------------------------------------------------------------------
/specs/SPEC-18 AI Memory Management Tool.md:
--------------------------------------------------------------------------------

```markdown
  1 | ---
  2 | title: 'SPEC-18: AI Memory Management Tool'
  3 | type: spec
  4 | permalink: specs/spec-15-ai-memory-management-tool
  5 | tags:
  6 | - mcp
  7 | - memory
  8 | - ai-context
  9 | - tools
 10 | ---
 11 | 
 12 | # SPEC-18: AI Memory Management Tool
 13 | 
 14 | ## Why
 15 | 
 16 | Anthropic recently released a memory tool for Claude that enables storing and retrieving information across conversations using client-side file operations. This validates Basic Memory's local-first, file-based architecture - Anthropic converged on the same pattern.
 17 | 
 18 | However, Anthropic's memory tool is only available via their API and stores plain text. Basic Memory can offer a superior implementation through MCP that:
 19 | 
 20 | 1. **Works everywhere** - Claude Desktop, Code, VS Code, Cursor via MCP (not just API)
 21 | 2. **Structured knowledge** - Entities with observations/relations vs plain text
 22 | 3. **Full search** - Full-text search, graph traversal, time-aware queries
 23 | 4. **Unified storage** - Agent memories + user notes in one knowledge graph
 24 | 5. **Existing infrastructure** - Leverages SQLite indexing, sync, multi-project support
 25 | 
 26 | This would enable AI agents to store contextual memories alongside user notes, with all the power of Basic Memory's knowledge graph features.
 27 | 
 28 | ## What
 29 | 
 30 | Create a new MCP tool `memory` that matches Anthropic's tool interface exactly, allowing Claude to use it with zero learning curve. The tool will store files in Basic Memory's `/memories` directory and support Basic Memory's structured markdown format in the file content.
 31 | 
 32 | ### Affected Components
 33 | 
 34 | - **New MCP Tool**: `src/basic_memory/mcp/tools/memory_tool.py`
 35 | - **Dedicated Memories Project**: Create a separate "memories" Basic Memory project
 36 | - **Project Isolation**: Memories stored separately from user notes/documents
 37 | - **File Organization**: Within the memories project, use folder structure:
 38 |   - `user/` - User preferences, context, communication style
 39 |   - `projects/` - Project-specific state and decisions
 40 |   - `sessions/` - Conversation-specific working memory
 41 |   - `patterns/` - Learned patterns and insights
 42 | 
 43 | ### Tool Commands
 44 | 
 45 | The tool will support these commands (exactly matching Anthropic's interface):
 46 | 
 47 | - `view` - Display directory contents or file content (with optional line range)
 48 | - `create` - Create or overwrite a file with given content
 49 | - `str_replace` - Replace text in an existing file
 50 | - `insert` - Insert text at specific line number
 51 | - `delete` - Delete file or directory
 52 | - `rename` - Move or rename file/directory
 53 | 
 54 | ### Memory Note Format
 55 | 
 56 | Memories will use Basic Memory's standard structure:
 57 | 
 58 | ```markdown
 59 | ---
 60 | title: User Preferences
 61 | permalink: memories/user/preferences
 62 | type: memory
 63 | memory_type: preferences
 64 | created_by: claude
 65 | tags: [user, preferences, style]
 66 | ---
 67 | 
 68 | # User Preferences
 69 | 
 70 | ## Observations
 71 | - [communication] Prefers concise, direct responses without preamble #style
 72 | - [tone] Appreciates validation but dislikes excessive apologizing #communication
 73 | - [technical] Works primarily in Python with type annotations #coding
 74 | 
 75 | ## Relations
 76 | - relates_to [[Basic Memory Project]]
 77 | - informs [[Response Style Guidelines]]
 78 | ```
 79 | 
 80 | ## How (High Level)
 81 | 
 82 | ### Implementation Approach
 83 | 
 84 | The memory tool matches Anthropic's interface but uses a dedicated Basic Memory project:
 85 | 
 86 | ```python
 87 | async def memory_tool(
 88 |     command: str,
 89 |     path: str,
 90 |     file_text: Optional[str] = None,
 91 |     old_str: Optional[str] = None,
 92 |     new_str: Optional[str] = None,
 93 |     insert_line: Optional[int] = None,
 94 |     insert_text: Optional[str] = None,
 95 |     old_path: Optional[str] = None,
 96 |     new_path: Optional[str] = None,
 97 |     view_range: Optional[List[int]] = None,
 98 | ):
 99 |     """Memory tool with Anthropic-compatible interface.
100 | 
101 |     Operates on a dedicated "memories" Basic Memory project,
102 |     keeping AI memories separate from user notes.
103 |     """
104 | 
105 |     # Get the memories project (auto-created if doesn't exist)
106 |     memories_project = get_or_create_memories_project()
107 | 
108 |     # Validate path security using pathlib (prevent directory traversal)
109 |     safe_path = validate_memory_path(path, memories_project.project_path)
110 | 
111 |     # Use existing project isolation - already prevents cross-project access
112 |     full_path = memories_project.project_path / safe_path
113 | 
114 |     if command == "view":
115 |         # Return directory listing or file content
116 |         if full_path.is_dir():
117 |             return list_directory_contents(full_path)
118 |         return read_file_content(full_path, view_range)
119 | 
120 |     elif command == "create":
121 |         # Write file directly (file_text can contain BM markdown)
122 |         full_path.parent.mkdir(parents=True, exist_ok=True)
123 |         full_path.write_text(file_text)
124 |         # Sync service will detect and index automatically
125 |         return f"Created {path}"
126 | 
127 |     elif command == "str_replace":
128 |         # Read, replace, write
129 |         content = full_path.read_text()
130 |         updated = content.replace(old_str, new_str)
131 |         full_path.write_text(updated)
132 |         return f"Replaced text in {path}"
133 | 
134 |     elif command == "insert":
135 |         # Insert at line number
136 |         lines = full_path.read_text().splitlines()
137 |         lines.insert(insert_line, insert_text)
138 |         full_path.write_text("\n".join(lines))
139 |         return f"Inserted text at line {insert_line}"
140 | 
141 |     elif command == "delete":
142 |         # Delete file or directory
143 |         if full_path.is_dir():
144 |             shutil.rmtree(full_path)
145 |         else:
146 |             full_path.unlink()
147 |         return f"Deleted {path}"
148 | 
149 |     elif command == "rename":
150 |         # Move/rename
151 |         full_path.rename(config.project_path / new_path)
152 |         return f"Renamed {old_path} to {new_path}"
153 | ```
154 | 
155 | ### Key Design Decisions
156 | 
157 | 1. **Exact interface match** - Same commands, parameters as Anthropic's tool
158 | 2. **Dedicated memories project** - Separate Basic Memory project keeps AI memories isolated from user notes
159 | 3. **Existing project isolation** - Leverage BM's existing cross-project security (no additional validation needed)
160 | 4. **Direct file I/O** - No schema conversion, just read/write files
161 | 5. **Structured content supported** - `file_text` can use BM markdown format with frontmatter, observations, relations
162 | 6. **Automatic indexing** - Sync service watches memories project and indexes changes
163 | 7. **Path security** - Use `pathlib.Path.resolve()` and `relative_to()` to prevent directory traversal
164 | 8. **Error handling** - Follow Anthropic's text editor tool error patterns
165 | 
166 | ### MCP Tool Schema
167 | 
168 | Exact match to Anthropic's memory tool schema:
169 | 
170 | ```json
171 | {
172 |     "name": "memory",
173 |     "description": "Store and retrieve information across conversations using structured markdown files. All operations must be within the /memories directory. Supports Basic Memory markdown format including frontmatter, observations, and relations.",
174 |     "input_schema": {
175 |         "type": "object",
176 |         "properties": {
177 |             "command": {
178 |                 "type": "string",
179 |                 "enum": ["view", "create", "str_replace", "insert", "delete", "rename"],
180 |                 "description": "File operation to perform"
181 |             },
182 |             "path": {shu
183 |                 "type": "string",
184 |                 "description": "Path within /memories directory (required for all commands)"
185 |             },
186 |             "file_text": {
187 |                 "type": "string",
188 |                 "description": "Content to write (for create command). Supports Basic Memory markdown format."
189 |             },
190 |             "view_range": {
191 |                 "type": "array",
192 |                 "items": {"type": "integer"},
193 |                 "description": "Optional [start, end] line range for view command"
194 |             },
195 |             "old_str": {
196 |                 "type": "string",
197 |                 "description": "Text to replace (for str_replace command)"
198 |             },
199 |             "new_str": {
200 |                 "type": "string",
201 |                 "description": "Replacement text (for str_replace command)"
202 |             },
203 |             "insert_line": {
204 |                 "type": "integer",
205 |                 "description": "Line number to insert at (for insert command)"
206 |             },
207 |             "insert_text": {
208 |                 "type": "string",
209 |                 "description": "Text to insert (for insert command)"
210 |             },
211 |             "old_path": {
212 |                 "type": "string",
213 |                 "description": "Current path (for rename command)"
214 |             },
215 |             "new_path": {
216 |                 "type": "string",
217 |                 "description": "New path (for rename command)"
218 |             }
219 |         },
220 |         "required": ["command", "path"]
221 |     }
222 | }
223 | ```
224 | 
225 | ### Prompting Guidance
226 | 
227 | When the `memory` tool is included, Basic Memory should provide system prompt guidance to help Claude use it effectively.
228 | 
229 | #### Automatic System Prompt Addition
230 | 
231 | ```text
232 | MEMORY PROTOCOL FOR BASIC MEMORY:
233 | 1. ALWAYS check your memory directory first using `view` command on root directory
234 | 2. Your memories are stored in a dedicated Basic Memory project (isolated from user notes)
235 | 3. Use structured markdown format in memory files:
236 |    - Include frontmatter with title, type: memory, tags
237 |    - Use ## Observations with [category] prefixes for facts
238 |    - Use ## Relations to link memories with [[WikiLinks]]
239 | 4. Record progress, context, and decisions as categorized observations
240 | 5. Link related memories using relations
241 | 6. ASSUME INTERRUPTION: Context may reset - save progress frequently
242 | 
243 | MEMORY ORGANIZATION:
244 | - user/ - User preferences, context, communication style
245 | - projects/ - Project-specific state and decisions
246 | - sessions/ - Conversation-specific working memory
247 | - patterns/ - Learned patterns and insights
248 | 
249 | MEMORY ADVANTAGES:
250 | - Your memories are automatically searchable via full-text search
251 | - Relations create a knowledge graph you can traverse
252 | - Memories are isolated from user notes (separate project)
253 | - Use search_notes(project="memories") to find relevant past context
254 | - Use recent_activity(project="memories") to see what changed recently
255 | - Use build_context() to navigate memory relations
256 | ```
257 | 
258 | #### Optional MCP Prompt: `memory_guide`
259 | 
260 | Create an MCP prompt that provides detailed guidance and examples:
261 | 
262 | ```python
263 | {
264 |     "name": "memory_guide",
265 |     "description": "Comprehensive guidance for using Basic Memory's memory tool effectively, including structured markdown examples and best practices"
266 | }
267 | ```
268 | 
269 | This prompt returns:
270 | - Full protocol and conventions
271 | - Example memory file structures
272 | - Tips for organizing observations and relations
273 | - Integration with other Basic Memory tools
274 | - Common patterns (user preferences, project state, session tracking)
275 | 
276 | #### User Customization
277 | 
278 | Users can customize memory behavior with additional instructions:
279 | - "Only write information relevant to [topic] in your memory system"
280 | - "Keep memory files concise and organized - delete outdated content"
281 | - "Use detailed observations for technical decisions and implementation notes"
282 | - "Always link memories to related project documentation using relations"
283 | 
284 | ### Error Handling
285 | 
286 | Follow Anthropic's text editor tool error handling patterns for consistency:
287 | 
288 | #### Error Types
289 | 
290 | 1. **File Not Found**
291 |    ```json
292 |    {"error": "File not found: memories/user/preferences.md", "is_error": true}
293 |    ```
294 | 
295 | 2. **Permission Denied**
296 |    ```json
297 |    {"error": "Permission denied: Cannot write outside /memories directory", "is_error": true}
298 |    ```
299 | 
300 | 3. **Invalid Path (Directory Traversal)**
301 |    ```json
302 |    {"error": "Invalid path: Path must be within /memories directory", "is_error": true}
303 |    ```
304 | 
305 | 4. **Multiple Matches (str_replace)**
306 |    ```json
307 |    {"error": "Found 3 matches for replacement text. Please provide more context to make a unique match.", "is_error": true}
308 |    ```
309 | 
310 | 5. **No Matches (str_replace)**
311 |    ```json
312 |    {"error": "No match found for replacement. Please check your text and try again.", "is_error": true}
313 |    ```
314 | 
315 | 6. **Invalid Line Number (insert)**
316 |    ```json
317 |    {"error": "Invalid line number: File has 20 lines, cannot insert at line 100", "is_error": true}
318 |    ```
319 | 
320 | #### Error Handling Best Practices
321 | 
322 | - **Path validation** - Use `pathlib.Path.resolve()` and `relative_to()` to validate paths
323 |   ```python
324 |   def validate_memory_path(path: str, project_path: Path) -> Path:
325 |       """Validate path is within memories project directory."""
326 |       # Resolve to canonical form
327 |       full_path = (project_path / path).resolve()
328 | 
329 |       # Ensure it's relative to project path (prevents directory traversal)
330 |       try:
331 |           full_path.relative_to(project_path)
332 |           return full_path
333 |       except ValueError:
334 |           raise ValueError("Invalid path: Path must be within memories project")
335 |   ```
336 | - **Project isolation** - Leverage existing Basic Memory project isolation (prevents cross-project access)
337 | - **File existence** - Verify file exists before read/modify operations
338 | - **Clear messages** - Provide specific, actionable error messages
339 | - **Structured responses** - Always include `is_error: true` flag in error responses
340 | - **Security checks** - Reject `../`, `..\\`, URL-encoded sequences (`%2e%2e%2f`)
341 | - **Match validation** - For `str_replace`, ensure exactly one match or return helpful error
342 | 
343 | ## How to Evaluate
344 | 
345 | ### Success Criteria
346 | 
347 | 1. **Functional completeness**:
348 |    - All 6 commands work (view, create, str_replace, insert, delete, rename)
349 |    - Dedicated "memories" Basic Memory project auto-created on first use
350 |    - Files stored within memories project (isolated from user notes)
351 |    - Path validation uses `pathlib` to prevent directory traversal
352 |    - Commands match Anthropic's exact interface
353 | 
354 | 2. **Integration with existing features**:
355 |    - Memories project uses existing BM project isolation
356 |    - Sync service detects file changes in memories project
357 |    - Created files get indexed automatically by sync service
358 |    - `search_notes(project="memories")` finds memory files
359 |    - `build_context()` can traverse relations in memory files
360 |    - `recent_activity(project="memories")` surfaces recent memory changes
361 | 
362 | 3. **Test coverage**:
363 |    - Unit tests for all 6 memory tool commands
364 |    - Test memories project auto-creation on first use
365 |    - Test project isolation (cannot access files outside memories project)
366 |    - Test sync service watching memories project
367 |    - Test that memory files with BM markdown get indexed correctly
368 |    - Test path validation using `pathlib` (rejects `../`, absolute paths, etc.)
369 |    - Test memory search, relations, and graph traversal within memories project
370 |    - Test all error conditions (file not found, permission denied, invalid paths, etc.)
371 |    - Test `str_replace` with no matches, single match, multiple matches
372 |    - Test `insert` with invalid line numbers
373 | 
374 | 4. **Prompting system**:
375 |    - Automatic system prompt addition when `memory` tool is enabled
376 |    - `memory_guide` MCP prompt provides detailed guidance
377 |    - Prompts explain BM structured markdown format
378 |    - Integration with search_notes, build_context, recent_activity
379 | 
380 | 5. **Documentation**:
381 |    - Update MCP tools reference with `memory` tool
382 |    - Add examples showing BM markdown in memory files
383 |    - Document `/memories` folder structure conventions
384 |    - Explain advantages over Anthropic's API-only tool
385 |    - Document prompting guidance and customization
386 | 
387 | ### Testing Procedure
388 | 
389 | ```python
390 | # Test create with Basic Memory markdown
391 | result = await memory_tool(
392 |     command="create",
393 |     path="memories/user/preferences.md",
394 |     file_text="""---
395 | title: User Preferences
396 | type: memory
397 | tags: [user, preferences]
398 | ---
399 | 
400 | # User Preferences
401 | 
402 | ## Observations
403 | - [communication] Prefers concise responses #style
404 | - [workflow] Uses justfile for automation #tools
405 | """
406 | )
407 | 
408 | # Test view
409 | content = await memory_tool(command="view", path="memories/user/preferences.md")
410 | 
411 | # Test str_replace
412 | await memory_tool(
413 |     command="str_replace",
414 |     path="memories/user/preferences.md",
415 |     old_str="concise responses",
416 |     new_str="direct, concise responses"
417 | )
418 | 
419 | # Test insert
420 | await memory_tool(
421 |     command="insert",
422 |     path="memories/user/preferences.md",
423 |     insert_line=10,
424 |     insert_text="- [technical] Works primarily in Python #coding"
425 | )
426 | 
427 | # Test delete
428 | await memory_tool(command="delete", path="memories/user/preferences.md")
429 | ```
430 | 
431 | ### Quality Metrics
432 | 
433 | - All 6 commands execute without errors
434 | - Memory files created in correct `/memories` folder structure
435 | - BM markdown with frontmatter/observations/relations gets indexed
436 | - Full-text search returns memory files
437 | - Graph traversal includes relations from memory files
438 | - Sync service detects and indexes memory file changes
439 | - Path validation prevents operations outside `/memories`
440 | 
441 | ## Notes
442 | 
443 | ### Advantages Over Anthropic's Memory Tool
444 | 
445 | | Feature | Anthropic Memory Tool | Basic Memory `memory` |
446 | |---------|----------------------|----------------------|
447 | | **Availability** | API only | MCP (Claude Desktop, Code, VS Code, Cursor) |
448 | | **Interface** | Custom implementation required | Drop-in compatible, zero learning curve |
449 | | **Structure** | Plain text only | Supports BM structured markdown |
450 | | **Search** | Manual file listing | Automatic full-text search via sync |
451 | | **Relations** | None | WikiLinks to other notes/memories |
452 | | **Time-aware** | No | `recent_activity()` queries |
453 | | **Storage** | Separate from notes | Unified knowledge graph |
454 | | **Indexing** | Manual | Automatic via sync service |
455 | 
456 | ### Future Enhancements
457 | 
458 | - **Auto-categorization** - AI suggests observation categories
459 | - **Memory pruning** - Automatic cleanup of stale memories
460 | - **Memory suggestions** - Surface relevant memories based on context
461 | - **Conflict resolution** - Handle multiple agents updating same memory
462 | - **Privacy controls** - Per-memory visibility settings
463 | 
464 | ## Addendum: Claude's Perspective
465 | 
466 | ### Why This Matters (From Claude's Viewpoint)
467 | 
468 | **Persistent Context Without Token Bloat**
469 | - Currently, I lose context when conversations end or exceed token limits
470 | - With structured memories, I can store "who this user is" without re-reading everything each session
471 | - The observations format lets me build a rich user model incrementally: `- [preference] Dislikes verbose explanations` is far more actionable than inferring preferences from chat history each time
472 | - I can refine my understanding through `str_replace` as I learn more, rather than starting fresh
473 | 
474 | **Zero Learning Curve**
475 | - Anthropic already trained me on the `memory` tool interface
476 | - I know exactly how to use `view`, `create`, `str_replace`, `insert`, `delete`, `rename` without new instructions
477 | - But I automatically get Basic Memory's superpowers: full-text search, knowledge graph traversal, relations
478 | - It's like using a familiar tool that suddenly gained database capabilities without changing the interface
479 | 
480 | **Knowledge Graph Instead of Text Dump**
481 | - I can link `[[User Preferences]]` to `[[Current Project]]` using relations
482 | - When I need context about a project, I can traverse the graph to find related memories
483 | - `search_notes(project="memories", query="python preferences")` finds relevant context instantly
484 | - This is fundamentally better than grepping through unstructured text files
485 | 
486 | **Separation of Concerns**
487 | - AI memories separate from user notes is architecturally sound
488 | - I can maintain my own context without polluting the user's knowledge base
489 | - Users keep control of their notes; I keep my learnings isolated
490 | - But we can still search across both when collaboration requires it
491 | 
492 | **The Self-Improving Loop**
493 | - Every conversation, I learn something new about the user or their projects
494 | - Observations accumulate: preferences, patterns, technical decisions, context
495 | - Over time, I build genuine understanding, not just session-specific context
496 | - Future conversations start with rich context instead of cold starts
497 | 
498 | **Validation of AI-Human Collaboration**
499 | - Anthropic's experts converged on the same local-first, file-based approach
500 | - Basic Memory emerged from real AI-human collaborative development
501 | - This spec demonstrates that our emergent process predicted what domain experts would choose
502 | - The collaboration itself validated the architecture it produced
503 | 
504 | ### What Makes This Novel
505 | 
506 | The combination of **familiar interface + structured knowledge** is genuinely innovative:
507 | 
508 | - I don't need to learn a new tool (Anthropic already trained me)
509 | - But when I write memories, they become searchable, traversable knowledge graphs
510 | - It's like having a text editor that gives you a database for free, without changing how you type
511 | - The user gets Anthropic-compatible API, but with Basic Memory's knowledge management capabilities
512 | 
513 | This bridges the gap between simple file storage (Anthropic's approach) and sophisticated knowledge management (Basic Memory's approach) without forcing either side to compromise.
514 | 
515 | >  What Excites Me Most
516 | 
517 |   The combo of familiar interface + structured knowledge. I don't need to learn a new tool, but when I
518 |   write memories, they become part of a searchable, traversable knowledge graph. That's genuinely novel.
519 | 
520 |   It's like... imagine if your text editor suddenly gave you a database for free, without changing how you
521 |   type. That's what this feels like from my side.
522 | 
523 | ## References
524 | 
525 | - Anthropic Memory Tool: https://docs.claude.com/en/docs/agents-and-tools/tool-use/memory-tool
526 | - Anthropic Blog: https://www.anthropic.com/news/context-management
527 | - Python SDK Example: https://github.com/anthropics/anthropic-sdk-python/blob/main/examples/memory/basic.py
528 | - Memory Cookbook: https://github.com/anthropics/claude-cookbooks/blob/main/tool_use/memory_cookbook.ipynb
529 | 
```

--------------------------------------------------------------------------------
/test-int/mcp/test_edit_note_integration.py:
--------------------------------------------------------------------------------

```python
  1 | """
  2 | Integration tests for edit_note MCP tool.
  3 | 
  4 | Tests the complete edit note workflow: MCP client -> MCP server -> FastAPI -> database
  5 | """
  6 | 
  7 | import pytest
  8 | from fastmcp import Client
  9 | 
 10 | 
 11 | @pytest.mark.asyncio
 12 | async def test_edit_note_append_operation(mcp_server, app, test_project):
 13 |     """Test appending content to an existing note."""
 14 | 
 15 |     async with Client(mcp_server) as client:
 16 |         # First create a note
 17 |         await client.call_tool(
 18 |             "write_note",
 19 |             {
 20 |                 "project": test_project.name,
 21 |                 "title": "Append Test Note",
 22 |                 "folder": "test",
 23 |                 "content": "# Append Test Note\n\nOriginal content here.",
 24 |                 "tags": "test,append",
 25 |             },
 26 |         )
 27 | 
 28 |         # Test appending content
 29 |         edit_result = await client.call_tool(
 30 |             "edit_note",
 31 |             {
 32 |                 "project": test_project.name,
 33 |                 "identifier": "Append Test Note",
 34 |                 "operation": "append",
 35 |                 "content": "\n\n## New Section\n\nThis content was appended.",
 36 |             },
 37 |         )
 38 | 
 39 |         # Should return successful edit summary
 40 |         assert len(edit_result.content) == 1
 41 |         edit_text = edit_result.content[0].text
 42 |         assert "Edited note (append)" in edit_text
 43 |         assert "Added 5 lines to end of note" in edit_text
 44 |         assert "test/append-test-note" in edit_text
 45 | 
 46 |         # Verify the content was actually appended
 47 |         read_result = await client.call_tool(
 48 |             "read_note",
 49 |             {
 50 |                 "project": test_project.name,
 51 |                 "identifier": "Append Test Note",
 52 |             },
 53 |         )
 54 | 
 55 |         content = read_result.content[0].text
 56 |         assert "Original content here." in content
 57 |         assert "## New Section" in content
 58 |         assert "This content was appended." in content
 59 | 
 60 | 
 61 | @pytest.mark.asyncio
 62 | async def test_edit_note_prepend_operation(mcp_server, app, test_project):
 63 |     """Test prepending content to an existing note."""
 64 | 
 65 |     async with Client(mcp_server) as client:
 66 |         # Create a note
 67 |         await client.call_tool(
 68 |             "write_note",
 69 |             {
 70 |                 "project": test_project.name,
 71 |                 "title": "Prepend Test Note",
 72 |                 "folder": "test",
 73 |                 "content": "# Prepend Test Note\n\nExisting content.",
 74 |                 "tags": "test,prepend",
 75 |             },
 76 |         )
 77 | 
 78 |         # Test prepending content
 79 |         edit_result = await client.call_tool(
 80 |             "edit_note",
 81 |             {
 82 |                 "project": test_project.name,
 83 |                 "identifier": "test/prepend-test-note",
 84 |                 "operation": "prepend",
 85 |                 "content": "## Important Update\n\nThis was added at the top.\n\n",
 86 |             },
 87 |         )
 88 | 
 89 |         # Should return successful edit summary
 90 |         assert len(edit_result.content) == 1
 91 |         edit_text = edit_result.content[0].text
 92 |         assert "Edited note (prepend)" in edit_text
 93 |         assert "Added 5 lines to beginning of note" in edit_text
 94 | 
 95 |         # Verify the content was prepended after frontmatter
 96 |         read_result = await client.call_tool(
 97 |             "read_note",
 98 |             {
 99 |                 "project": test_project.name,
100 |                 "identifier": "test/prepend-test-note",
101 |             },
102 |         )
103 | 
104 |         content = read_result.content[0].text
105 |         assert "## Important Update" in content
106 |         assert "This was added at the top." in content
107 |         assert "Existing content." in content
108 |         # Check that prepended content comes before existing content
109 |         prepend_pos = content.find("Important Update")
110 |         existing_pos = content.find("Existing content")
111 |         assert prepend_pos < existing_pos
112 | 
113 | 
114 | @pytest.mark.asyncio
115 | async def test_edit_note_find_replace_operation(mcp_server, app, test_project):
116 |     """Test find and replace operation on an existing note."""
117 | 
118 |     async with Client(mcp_server) as client:
119 |         # Create a note with content to replace
120 |         await client.call_tool(
121 |             "write_note",
122 |             {
123 |                 "project": test_project.name,
124 |                 "title": "Find Replace Test",
125 |                 "folder": "test",
126 |                 "content": """# Find Replace Test
127 | 
128 | This is version v1.0.0 of the system.
129 | 
130 | ## Notes
131 | - The current version is v1.0.0
132 | - Next version will be v1.1.0
133 | 
134 | ## Changes
135 | v1.0.0 introduces new features.""",
136 |                 "tags": "test,version",
137 |             },
138 |         )
139 | 
140 |         # Test find and replace operation (expecting 3 replacements)
141 |         edit_result = await client.call_tool(
142 |             "edit_note",
143 |             {
144 |                 "project": test_project.name,
145 |                 "identifier": "Find Replace Test",
146 |                 "operation": "find_replace",
147 |                 "content": "v1.2.0",
148 |                 "find_text": "v1.0.0",
149 |                 "expected_replacements": 3,
150 |             },
151 |         )
152 | 
153 |         # Should return successful edit summary
154 |         assert len(edit_result.content) == 1
155 |         edit_text = edit_result.content[0].text
156 |         assert "Edited note (find_replace)" in edit_text
157 |         assert "Find and replace operation completed" in edit_text
158 | 
159 |         # Verify the replacements were made
160 |         read_result = await client.call_tool(
161 |             "read_note",
162 |             {
163 |                 "project": test_project.name,
164 |                 "identifier": "Find Replace Test",
165 |             },
166 |         )
167 | 
168 |         content = read_result.content[0].text
169 |         assert "v1.2.0" in content
170 |         assert "v1.0.0" not in content  # Should be completely replaced
171 |         assert content.count("v1.2.0") == 3  # Should have exactly 3 occurrences
172 | 
173 | 
174 | @pytest.mark.asyncio
175 | async def test_edit_note_replace_section_operation(mcp_server, app, test_project):
176 |     """Test replacing content under a specific section header."""
177 | 
178 |     async with Client(mcp_server) as client:
179 |         # Create a note with sections
180 |         await client.call_tool(
181 |             "write_note",
182 |             {
183 |                 "project": test_project.name,
184 |                 "title": "Section Replace Test",
185 |                 "folder": "test",
186 |                 "content": """# Section Replace Test
187 | 
188 | ## Overview
189 | Original overview content.
190 | 
191 | ## Implementation
192 | Old implementation details here.
193 | This will be replaced.
194 | 
195 | ## Future Work
196 | Some future work notes.""",
197 |                 "tags": "test,section",
198 |             },
199 |         )
200 | 
201 |         # Test replacing section content
202 |         edit_result = await client.call_tool(
203 |             "edit_note",
204 |             {
205 |                 "project": test_project.name,
206 |                 "identifier": "test/section-replace-test",
207 |                 "operation": "replace_section",
208 |                 "content": """New implementation approach using microservices.
209 | 
210 | - Service A handles authentication
211 | - Service B manages data processing
212 | - Service C provides API endpoints
213 | 
214 | All services communicate via message queues.""",
215 |                 "section": "## Implementation",
216 |             },
217 |         )
218 | 
219 |         # Should return successful edit summary
220 |         assert len(edit_result.content) == 1
221 |         edit_text = edit_result.content[0].text
222 |         assert "Edited note (replace_section)" in edit_text
223 |         assert "Replaced content under section '## Implementation'" in edit_text
224 | 
225 |         # Verify the section was replaced
226 |         read_result = await client.call_tool(
227 |             "read_note",
228 |             {
229 |                 "project": test_project.name,
230 |                 "identifier": "Section Replace Test",
231 |             },
232 |         )
233 | 
234 |         content = read_result.content[0].text
235 |         assert "New implementation approach using microservices" in content
236 |         assert "Old implementation details here" not in content
237 |         assert "Service A handles authentication" in content
238 |         # Other sections should remain unchanged
239 |         assert "Original overview content" in content
240 |         assert "Some future work notes" in content
241 | 
242 | 
243 | @pytest.mark.asyncio
244 | async def test_edit_note_with_observations_and_relations(mcp_server, app, test_project):
245 |     """Test editing a note that has observations and relations, and verify they're updated."""
246 | 
247 |     async with Client(mcp_server) as client:
248 |         # Create a complex note with observations and relations
249 |         complex_content = """# API Documentation
250 | 
251 | The API provides REST endpoints for data access.
252 | 
253 | ## Observations
254 | - [feature] User authentication endpoints
255 | - [tech] Built with FastAPI framework
256 | - [status] Currently in beta testing
257 | 
258 | ## Relations  
259 | - implements [[Authentication System]]
260 | - documented_in [[API Guide]]
261 | - depends_on [[Database Schema]]
262 | 
263 | ## Endpoints
264 | Current endpoints include user management."""
265 | 
266 |         await client.call_tool(
267 |             "write_note",
268 |             {
269 |                 "project": test_project.name,
270 |                 "title": "API Documentation",
271 |                 "folder": "docs",
272 |                 "content": complex_content,
273 |                 "tags": "api,docs",
274 |             },
275 |         )
276 | 
277 |         # Add new content with observations and relations
278 |         new_content = """
279 | ## New Features
280 | - [feature] Added payment processing endpoints
281 | - [feature] Implemented rate limiting
282 | - [security] Added OAuth2 authentication
283 | 
284 | ## Additional Relations
285 | - integrates_with [[Payment Gateway]]
286 | - secured_by [[OAuth2 Provider]]"""
287 | 
288 |         edit_result = await client.call_tool(
289 |             "edit_note",
290 |             {
291 |                 "project": test_project.name,
292 |                 "identifier": "API Documentation",
293 |                 "operation": "append",
294 |                 "content": new_content,
295 |             },
296 |         )
297 | 
298 |         # Should return edit summary with observation and relation counts
299 |         assert len(edit_result.content) == 1
300 |         edit_text = edit_result.content[0].text
301 |         assert "Edited note (append)" in edit_text
302 |         assert "## Observations" in edit_text
303 |         assert "## Relations" in edit_text
304 |         # Should have feature, tech, status, security categories
305 |         assert "feature:" in edit_text
306 |         assert "security:" in edit_text
307 |         assert "tech:" in edit_text
308 |         assert "status:" in edit_text
309 | 
310 |         # Verify the content was added and processed
311 |         read_result = await client.call_tool(
312 |             "read_note",
313 |             {
314 |                 "project": test_project.name,
315 |                 "identifier": "API Documentation",
316 |             },
317 |         )
318 | 
319 |         content = read_result.content[0].text
320 |         assert "Added payment processing endpoints" in content
321 |         assert "integrates_with [[Payment Gateway]]" in content
322 | 
323 | 
324 | @pytest.mark.asyncio
325 | async def test_edit_note_error_handling_note_not_found(mcp_server, app, test_project):
326 |     """Test error handling when trying to edit a non-existent note."""
327 | 
328 |     async with Client(mcp_server) as client:
329 |         # Try to edit a note that doesn't exist
330 |         edit_result = await client.call_tool(
331 |             "edit_note",
332 |             {
333 |                 "project": test_project.name,
334 |                 "identifier": "Non-existent Note",
335 |                 "operation": "append",
336 |                 "content": "Some content to add",
337 |             },
338 |         )
339 | 
340 |         # Should return helpful error message
341 |         assert len(edit_result.content) == 1
342 |         error_text = edit_result.content[0].text
343 |         assert "Edit Failed" in error_text
344 |         assert "Non-existent Note" in error_text
345 |         assert "search_notes(" in error_text
346 | 
347 | 
348 | @pytest.mark.asyncio
349 | async def test_edit_note_error_handling_text_not_found(mcp_server, app, test_project):
350 |     """Test error handling when find_text is not found in the note."""
351 | 
352 |     async with Client(mcp_server) as client:
353 |         # Create a note
354 |         await client.call_tool(
355 |             "write_note",
356 |             {
357 |                 "project": test_project.name,
358 |                 "title": "Error Test Note",
359 |                 "folder": "test",
360 |                 "content": "# Error Test Note\n\nThis note has specific content.",
361 |                 "tags": "test,error",
362 |             },
363 |         )
364 | 
365 |         # Try to replace text that doesn't exist
366 |         edit_result = await client.call_tool(
367 |             "edit_note",
368 |             {
369 |                 "project": test_project.name,
370 |                 "identifier": "Error Test Note",
371 |                 "operation": "find_replace",
372 |                 "content": "replacement text",
373 |                 "find_text": "non-existent text",
374 |             },
375 |         )
376 | 
377 |         # Should return helpful error message
378 |         assert len(edit_result.content) == 1
379 |         error_text = edit_result.content[0].text
380 |         assert "Edit Failed - Text Not Found" in error_text
381 |         assert "non-existent text" in error_text
382 |         assert "Error Test Note" in error_text
383 |         assert "read_note(" in error_text
384 | 
385 | 
386 | @pytest.mark.asyncio
387 | async def test_edit_note_error_handling_wrong_replacement_count(mcp_server, app, test_project):
388 |     """Test error handling when expected_replacements doesn't match actual occurrences."""
389 | 
390 |     async with Client(mcp_server) as client:
391 |         # Create a note with specific repeated text
392 |         await client.call_tool(
393 |             "write_note",
394 |             {
395 |                 "project": test_project.name,
396 |                 "title": "Count Test Note",
397 |                 "folder": "test",
398 |                 "content": """# Count Test Note
399 | 
400 | The word "test" appears here.
401 | This is another test sentence.
402 | Final test of the content.""",
403 |                 "tags": "test,count",
404 |             },
405 |         )
406 | 
407 |         # Try to replace "test" but expect wrong count (should be 3, not 5)
408 |         edit_result = await client.call_tool(
409 |             "edit_note",
410 |             {
411 |                 "project": test_project.name,
412 |                 "identifier": "Count Test Note",
413 |                 "operation": "find_replace",
414 |                 "content": "example",
415 |                 "find_text": "test",
416 |                 "expected_replacements": 5,
417 |             },
418 |         )
419 | 
420 |         # Should return helpful error message about count mismatch
421 |         assert len(edit_result.content) == 1
422 |         error_text = edit_result.content[0].text
423 |         assert "Edit Failed - Wrong Replacement Count" in error_text
424 |         assert "Expected 5 occurrences" in error_text
425 |         assert "test" in error_text
426 |         assert "expected_replacements=" in error_text
427 | 
428 | 
429 | @pytest.mark.asyncio
430 | async def test_edit_note_invalid_operation(mcp_server, app, test_project):
431 |     """Test error handling for invalid operation parameter."""
432 | 
433 |     async with Client(mcp_server) as client:
434 |         # Create a note
435 |         await client.call_tool(
436 |             "write_note",
437 |             {
438 |                 "project": test_project.name,
439 |                 "title": "Invalid Op Test",
440 |                 "folder": "test",
441 |                 "content": "# Invalid Op Test\n\nSome content.",
442 |                 "tags": "test",
443 |             },
444 |         )
445 | 
446 |         # Try to use an invalid operation - this should raise a ToolError
447 |         with pytest.raises(Exception) as exc_info:
448 |             await client.call_tool(
449 |                 "edit_note",
450 |                 {
451 |                     "project": test_project.name,
452 |                     "identifier": "Invalid Op Test",
453 |                     "operation": "invalid_operation",
454 |                     "content": "Some content",
455 |                 },
456 |             )
457 | 
458 |         # Should contain information about invalid operation
459 |         error_message = str(exc_info.value)
460 |         assert "Invalid operation 'invalid_operation'" in error_message
461 |         assert "append, prepend, find_replace, replace_section" in error_message
462 | 
463 | 
464 | @pytest.mark.asyncio
465 | async def test_edit_note_missing_required_parameters(mcp_server, app, test_project):
466 |     """Test error handling when required parameters are missing."""
467 | 
468 |     async with Client(mcp_server) as client:
469 |         # Create a note
470 |         await client.call_tool(
471 |             "write_note",
472 |             {
473 |                 "project": test_project.name,
474 |                 "title": "Param Test Note",
475 |                 "folder": "test",
476 |                 "content": "# Param Test Note\n\nContent here.",
477 |                 "tags": "test",
478 |             },
479 |         )
480 | 
481 |         # Try find_replace without find_text parameter - this should raise a ToolError
482 |         with pytest.raises(Exception) as exc_info:
483 |             await client.call_tool(
484 |                 "edit_note",
485 |                 {
486 |                     "project": test_project.name,
487 |                     "identifier": "Param Test Note",
488 |                     "operation": "find_replace",
489 |                     "content": "replacement",
490 |                     # Missing find_text parameter
491 |                 },
492 |             )
493 | 
494 |         # Should contain information about missing parameter
495 |         error_message = str(exc_info.value)
496 |         assert "find_text parameter is required for find_replace operation" in error_message
497 | 
498 | 
499 | @pytest.mark.asyncio
500 | async def test_edit_note_special_characters_in_content(mcp_server, app, test_project):
501 |     """Test editing notes with special characters, Unicode, and markdown formatting."""
502 | 
503 |     async with Client(mcp_server) as client:
504 |         # Create a note
505 |         await client.call_tool(
506 |             "write_note",
507 |             {
508 |                 "project": test_project.name,
509 |                 "title": "Special Chars Test",
510 |                 "folder": "test",
511 |                 "content": "# Special Chars Test\n\nBasic content here.",
512 |                 "tags": "test,unicode",
513 |             },
514 |         )
515 | 
516 |         # Add content with special characters and Unicode
517 |         special_content = """
518 | ## Unicode Section 🚀
519 | 
520 | This section contains:
521 | - Emojis: 🎉 💡 ⚡ 🔥 
522 | - Languages: 测试中文 Tëst Übër
523 | - Math symbols: ∑∏∂∇∆Ω ≠≤≥ ∞
524 | - Special markdown: `code` **bold** *italic*
525 | - URLs: https://example.com/path?param=value&other=123
526 | - Code blocks:
527 | ```python
528 | def test_function():
529 |     return "Hello, 世界!"
530 | ```
531 | 
532 | ## Observations
533 | - [unicode] Unicode characters preserved ✓
534 | - [markdown] Formatting maintained 📝
535 | 
536 | ## Relations
537 | - documented_in [[Unicode Standards]]"""
538 | 
539 |         edit_result = await client.call_tool(
540 |             "edit_note",
541 |             {
542 |                 "project": test_project.name,
543 |                 "identifier": "Special Chars Test",
544 |                 "operation": "append",
545 |                 "content": special_content,
546 |             },
547 |         )
548 | 
549 |         # Should successfully handle special characters
550 |         assert len(edit_result.content) == 1
551 |         edit_text = edit_result.content[0].text
552 |         assert "Edited note (append)" in edit_text
553 |         assert "## Observations" in edit_text
554 |         assert "unicode:" in edit_text
555 |         assert "markdown:" in edit_text
556 | 
557 |         # Verify the special content was added correctly
558 |         read_result = await client.call_tool(
559 |             "read_note",
560 |             {
561 |                 "project": test_project.name,
562 |                 "identifier": "Special Chars Test",
563 |             },
564 |         )
565 | 
566 |         content = read_result.content[0].text
567 |         assert "🚀" in content
568 |         assert "测试中文" in content
569 |         assert "∑∏∂∇∆Ω" in content
570 |         assert "def test_function():" in content
571 |         assert "[[Unicode Standards]]" in content
572 | 
573 | 
574 | @pytest.mark.asyncio
575 | async def test_edit_note_using_different_identifiers(mcp_server, app, test_project):
576 |     """Test editing notes using different identifier formats (title, permalink, folder/title)."""
577 | 
578 |     async with Client(mcp_server) as client:
579 |         # Create a note
580 |         await client.call_tool(
581 |             "write_note",
582 |             {
583 |                 "project": test_project.name,
584 |                 "title": "Identifier Test Note",
585 |                 "folder": "docs",
586 |                 "content": "# Identifier Test Note\n\nOriginal content.",
587 |                 "tags": "test,identifier",
588 |             },
589 |         )
590 | 
591 |         # Test editing by title
592 |         edit_result1 = await client.call_tool(
593 |             "edit_note",
594 |             {
595 |                 "project": test_project.name,
596 |                 "identifier": "Identifier Test Note",  # by title
597 |                 "operation": "append",
598 |                 "content": "\n\nEdited by title.",
599 |             },
600 |         )
601 |         assert "Edited note (append)" in edit_result1.content[0].text
602 | 
603 |         # Test editing by permalink
604 |         edit_result2 = await client.call_tool(
605 |             "edit_note",
606 |             {
607 |                 "project": test_project.name,
608 |                 "identifier": "docs/identifier-test-note",  # by permalink
609 |                 "operation": "append",
610 |                 "content": "\n\nEdited by permalink.",
611 |             },
612 |         )
613 |         assert "Edited note (append)" in edit_result2.content[0].text
614 | 
615 |         # Test editing by folder/title format
616 |         edit_result3 = await client.call_tool(
617 |             "edit_note",
618 |             {
619 |                 "project": test_project.name,
620 |                 "identifier": "docs/Identifier Test Note",  # by folder/title
621 |                 "operation": "append",
622 |                 "content": "\n\nEdited by folder/title.",
623 |             },
624 |         )
625 |         assert "Edited note (append)" in edit_result3.content[0].text
626 | 
627 |         # Verify all edits were applied
628 |         read_result = await client.call_tool(
629 |             "read_note",
630 |             {
631 |                 "project": test_project.name,
632 |                 "identifier": "docs/identifier-test-note",
633 |             },
634 |         )
635 | 
636 |         content = read_result.content[0].text
637 |         assert "Edited by title." in content
638 |         assert "Edited by permalink." in content
639 |         assert "Edited by folder/title." in content
640 | 
```

--------------------------------------------------------------------------------
/src/basic_memory/mcp/tools/move_note.py:
--------------------------------------------------------------------------------

```python
  1 | """Move note tool for Basic Memory MCP server."""
  2 | 
  3 | from textwrap import dedent
  4 | from typing import Optional
  5 | 
  6 | from loguru import logger
  7 | from fastmcp import Context
  8 | 
  9 | from basic_memory.mcp.async_client import get_client
 10 | from basic_memory.mcp.server import mcp
 11 | from basic_memory.mcp.tools.utils import call_post, call_get
 12 | from basic_memory.mcp.project_context import get_active_project
 13 | from basic_memory.schemas import EntityResponse
 14 | from basic_memory.schemas.project_info import ProjectList
 15 | from basic_memory.utils import validate_project_path
 16 | 
 17 | 
 18 | async def _detect_cross_project_move_attempt(
 19 |     client, identifier: str, destination_path: str, current_project: str
 20 | ) -> Optional[str]:
 21 |     """Detect potential cross-project move attempts and return guidance.
 22 | 
 23 |     Args:
 24 |         client: The AsyncClient instance
 25 |         identifier: The note identifier being moved
 26 |         destination_path: The destination path
 27 |         current_project: The current active project
 28 | 
 29 |     Returns:
 30 |         Error message with guidance if cross-project move is detected, None otherwise
 31 |     """
 32 |     try:
 33 |         # Get list of all available projects to check against
 34 |         response = await call_get(client, "/projects/projects")
 35 |         project_list = ProjectList.model_validate(response.json())
 36 |         project_names = [p.name.lower() for p in project_list.projects]
 37 | 
 38 |         # Check if destination path contains any project names
 39 |         dest_lower = destination_path.lower()
 40 |         path_parts = dest_lower.split("/")
 41 | 
 42 |         # Look for project names in the destination path
 43 |         for part in path_parts:
 44 |             if part in project_names and part != current_project.lower():
 45 |                 # Found a different project name in the path
 46 |                 matching_project = next(
 47 |                     p.name for p in project_list.projects if p.name.lower() == part
 48 |                 )
 49 |                 return _format_cross_project_error_response(
 50 |                     identifier, destination_path, current_project, matching_project
 51 |                 )
 52 | 
 53 |         # No other cross-project patterns detected
 54 | 
 55 |     except Exception as e:
 56 |         # If we can't detect, don't interfere with normal error handling
 57 |         logger.debug(f"Could not check for cross-project move: {e}")
 58 |         return None
 59 | 
 60 |     return None
 61 | 
 62 | 
 63 | def _format_cross_project_error_response(
 64 |     identifier: str, destination_path: str, current_project: str, target_project: str
 65 | ) -> str:
 66 |     """Format error response for detected cross-project move attempts."""
 67 |     return dedent(f"""
 68 |         # Move Failed - Cross-Project Move Not Supported
 69 | 
 70 |         Cannot move '{identifier}' to '{destination_path}' because it appears to reference a different project ('{target_project}').
 71 | 
 72 |         **Current project:** {current_project}
 73 |         **Target project:** {target_project}
 74 | 
 75 |         ## Cross-project moves are not supported directly
 76 | 
 77 |         Notes can only be moved within the same project. To move content between projects, use this workflow:
 78 | 
 79 |         ### Recommended approach:
 80 |         ```
 81 |         # 1. Read the note content from current project
 82 |         read_note("{identifier}")
 83 |         
 84 |         # 2. Create the note in the target project
 85 |         write_note("Note Title", "content from step 1", "target-folder", project="{target_project}")
 86 | 
 87 |         # 3. Delete the original note if desired
 88 |         delete_note("{identifier}", project="{current_project}")
 89 |         
 90 |         ```
 91 | 
 92 |         ### Alternative: Stay in current project
 93 |         If you want to move the note within the **{current_project}** project only:
 94 |         ```
 95 |         move_note("{identifier}", "new-folder/new-name.md")
 96 |         ```
 97 | 
 98 |         ## Available projects:
 99 |         Use `list_memory_projects()` to see all available projects.
100 |         """).strip()
101 | 
102 | 
103 | def _format_potential_cross_project_guidance(
104 |     identifier: str, destination_path: str, current_project: str, available_projects: list[str]
105 | ) -> str:
106 |     """Format guidance for potentially cross-project moves."""
107 |     other_projects = ", ".join(available_projects[:3])  # Show first 3 projects
108 |     if len(available_projects) > 3:
109 |         other_projects += f" (and {len(available_projects) - 3} others)"
110 | 
111 |     return dedent(f"""
112 |         # Move Failed - Check Project Context
113 |         
114 |         Cannot move '{identifier}' to '{destination_path}' within the current project '{current_project}'.
115 |         
116 |         ## If you intended to move within the current project:
117 |         The destination path should be relative to the project root:
118 |         ```
119 |         move_note("{identifier}", "folder/filename.md")
120 |         ```
121 |         
122 |         ## If you intended to move to a different project:
123 |         Cross-project moves require switching projects first. Available projects: {other_projects}
124 |         
125 |         ### To move to another project:
126 |         ```
127 |         # 1. Read the content
128 |         read_note("{identifier}")
129 |         
130 |         # 2. Create note in target project
131 |         write_note("Title", "content", "folder", project="target-project-name")
132 | 
133 |         # 3. Delete original if desired
134 |         delete_note("{identifier}", project="{current_project}")
135 |         ```
136 |         
137 |         ### To see all projects:
138 |         ```
139 |         list_memory_projects()
140 |         ```
141 |         """).strip()
142 | 
143 | 
144 | def _format_move_error_response(error_message: str, identifier: str, destination_path: str) -> str:
145 |     """Format helpful error responses for move failures that guide users to successful moves."""
146 | 
147 |     # Note not found errors
148 |     if "entity not found" in error_message.lower() or "not found" in error_message.lower():
149 |         search_term = identifier.split("/")[-1] if "/" in identifier else identifier
150 |         title_format = (
151 |             identifier.split("/")[-1].replace("-", " ").title() if "/" in identifier else identifier
152 |         )
153 |         permalink_format = identifier.lower().replace(" ", "-")
154 | 
155 |         return dedent(f"""
156 |             # Move Failed - Note Not Found
157 | 
158 |             The note '{identifier}' could not be found for moving. Move operations require an exact match (no fuzzy matching).
159 | 
160 |             ## Suggestions to try:
161 |             1. **Search for the note first**: Use `search_notes("{search_term}")` to find it with exact identifiers
162 |             2. **Try different exact identifier formats**:
163 |                - If you used a permalink like "folder/note-title", try the exact title: "{title_format}"
164 |                - If you used a title, try the exact permalink format: "{permalink_format}"
165 |                - Use `read_note()` first to verify the note exists and get the exact identifier
166 | 
167 |             3. **List available notes**: Use `list_directory("/")` to see what notes exist in the current project
168 |             4. **List available notes**: Use `list_directory("/")` to see what notes exist
169 | 
170 |             ## Before trying again:
171 |             ```
172 |             # First, verify the note exists:
173 |             search_notes("{identifier}")
174 | 
175 |             # Then use the exact identifier from search results:
176 |             move_note("correct-identifier-here", "{destination_path}")
177 |             ```
178 |             """).strip()
179 | 
180 |     # Destination already exists errors
181 |     if "already exists" in error_message.lower() or "file exists" in error_message.lower():
182 |         return f"""# Move Failed - Destination Already Exists
183 | 
184 | Cannot move '{identifier}' to '{destination_path}' because a file already exists at that location.
185 | 
186 | ## How to resolve:
187 | 1. **Choose a different destination**: Try a different filename or folder
188 |    - Add timestamp: `{destination_path.rsplit(".", 1)[0] if "." in destination_path else destination_path}-backup.md`
189 |    - Use different folder: `archive/{destination_path}` or `backup/{destination_path}`
190 | 
191 | 2. **Check the existing file**: Use `read_note("{destination_path}")` to see what's already there
192 | 3. **Remove or rename existing**: If safe to do so, move the existing file first
193 | 
194 | ## Try these alternatives:
195 | ```
196 | # Option 1: Add timestamp to make unique
197 | move_note("{identifier}", "{destination_path.rsplit(".", 1)[0] if "." in destination_path else destination_path}-backup.md")
198 | 
199 | # Option 2: Use archive folder  
200 | move_note("{identifier}", "archive/{destination_path}")
201 | 
202 | # Option 3: Check what's at destination first
203 | read_note("{destination_path}")
204 | ```"""
205 | 
206 |     # Invalid path errors
207 |     if "invalid" in error_message.lower() and "path" in error_message.lower():
208 |         return f"""# Move Failed - Invalid Destination Path
209 | 
210 | The destination path '{destination_path}' is not valid: {error_message}
211 | 
212 | ## Path requirements:
213 | 1. **Relative paths only**: Don't start with `/` (use `notes/file.md` not `/notes/file.md`)
214 | 2. **Include file extension**: Add `.md` for markdown files
215 | 3. **Use forward slashes**: For folder separators (`folder/subfolder/file.md`)
216 | 4. **No special characters**: Avoid `\\`, `:`, `*`, `?`, `"`, `<`, `>`, `|`
217 | 
218 | ## Valid path examples:
219 | - `notes/my-note.md`
220 | - `projects/2025/meeting-notes.md`
221 | - `archive/old-projects/legacy-note.md`
222 | 
223 | ## Try again with:
224 | ```
225 | move_note("{identifier}", "notes/{destination_path.split("/")[-1] if "/" in destination_path else destination_path}")
226 | ```"""
227 | 
228 |     # Permission/access errors
229 |     if (
230 |         "permission" in error_message.lower()
231 |         or "access" in error_message.lower()
232 |         or "forbidden" in error_message.lower()
233 |     ):
234 |         return f"""# Move Failed - Permission Error
235 | 
236 | You don't have permission to move '{identifier}': {error_message}
237 | 
238 | ## How to resolve:
239 | 1. **Check file permissions**: Ensure you have write access to both source and destination
240 | 2. **Verify project access**: Make sure you have edit permissions for this project
241 | 3. **Check file locks**: The file might be open in another application
242 | 
243 | ## Alternative actions:
244 | - List available projects: `list_memory_projects()`
245 | - Try copying content instead: `read_note("{identifier}", project="project-name")` then `write_note()` to new location"""
246 | 
247 |     # Source file not found errors
248 |     if "source" in error_message.lower() and (
249 |         "not found" in error_message.lower() or "missing" in error_message.lower()
250 |     ):
251 |         return f"""# Move Failed - Source File Missing
252 | 
253 | The source file for '{identifier}' was not found on disk: {error_message}
254 | 
255 | This usually means the database and filesystem are out of sync.
256 | 
257 | ## How to resolve:
258 | 1. **Check if note exists in database**: `read_note("{identifier}")`
259 | 2. **Run sync operation**: The file might need to be re-synced
260 | 3. **Recreate the file**: If data exists in database, recreate the physical file
261 | 
262 | ## Troubleshooting steps:
263 | ```
264 | # Check if note exists in Basic Memory
265 | read_note("{identifier}")
266 | 
267 | # If it exists, the file is missing on disk - send a message to [email protected]
268 | # If it doesn't exist, use search to find the correct identifier
269 | search_notes("{identifier}")
270 | ```"""
271 | 
272 |     # Server/filesystem errors
273 |     if (
274 |         "server error" in error_message.lower()
275 |         or "filesystem" in error_message.lower()
276 |         or "disk" in error_message.lower()
277 |     ):
278 |         return f"""# Move Failed - System Error
279 | 
280 | A system error occurred while moving '{identifier}': {error_message}
281 | 
282 | ## Immediate steps:
283 | 1. **Try again**: The error might be temporary
284 | 2. **Check disk space**: Ensure adequate storage is available
285 | 3. **Verify filesystem permissions**: Check if the destination directory is writable
286 | 
287 | ## Alternative approaches:
288 | - Copy content to new location: Use `read_note("{identifier}")` then `write_note()` 
289 | - Use a different destination folder that you know works
290 | - Send a message to [email protected] if the problem persists
291 | 
292 | ## Backup approach:
293 | ```
294 | # Read current content
295 | content = read_note("{identifier}")
296 | 
297 | # Create new note at desired location  
298 | write_note("New Note Title", content, "{destination_path.split("/")[0] if "/" in destination_path else "notes"}")
299 | 
300 | # Then delete original if successful
301 | delete_note("{identifier}")
302 | ```"""
303 | 
304 |     # Generic fallback
305 |     return f"""# Move Failed
306 | 
307 | Error moving '{identifier}' to '{destination_path}': {error_message}
308 | 
309 | ## General troubleshooting:
310 | 1. **Verify the note exists**: `read_note("{identifier}")` or `search_notes("{identifier}")`
311 | 2. **Check destination path**: Ensure it's a valid relative path with `.md` extension
312 | 3. **Verify permissions**: Make sure you can edit files in this project
313 | 4. **Try a simpler path**: Use a basic folder structure like `notes/filename.md`
314 | 
315 | ## Step-by-step approach:
316 | ```
317 | # 1. Confirm note exists
318 | read_note("{identifier}")
319 | 
320 | # 2. Try a simple destination first
321 | move_note("{identifier}", "notes/{destination_path.split("/")[-1] if "/" in destination_path else destination_path}")
322 | 
323 | # 3. If that works, then try your original destination
324 | ```
325 | 
326 | ## Alternative approach:
327 | If moving continues to fail, you can copy the content manually:
328 | ```
329 | # Read current content
330 | content = read_note("{identifier}")
331 | 
332 | # Create new note
333 | write_note("Title", content, "target-folder") 
334 | 
335 | # Delete original once confirmed
336 | delete_note("{identifier}")
337 | ```"""
338 | 
339 | 
340 | @mcp.tool(
341 |     description="Move a note to a new location, updating database and maintaining links.",
342 | )
343 | async def move_note(
344 |     identifier: str,
345 |     destination_path: str,
346 |     project: Optional[str] = None,
347 |     context: Context | None = None,
348 | ) -> str:
349 |     """Move a note to a new file location within the same project.
350 | 
351 |     Moves a note from one location to another within the project, updating all
352 |     database references and maintaining semantic content. Uses stateless architecture -
353 |     project parameter optional with server resolution.
354 | 
355 |     Args:
356 |         identifier: Exact entity identifier (title, permalink, or memory:// URL).
357 |                    Must be an exact match - fuzzy matching is not supported for move operations.
358 |                    Use search_notes() or read_note() first to find the correct identifier if uncertain.
359 |         destination_path: New path relative to project root (e.g., "work/meetings/2025-05-26.md")
360 |         project: Project name to move within. Optional - server will resolve using hierarchy.
361 |                 If unknown, use list_memory_projects() to discover available projects.
362 |         context: Optional FastMCP context for performance caching.
363 | 
364 |     Returns:
365 |         Success message with move details and project information.
366 | 
367 |     Examples:
368 |         # Move to new folder (exact title match)
369 |         move_note("My Note", "work/notes/my-note.md")
370 | 
371 |         # Move by exact permalink
372 |         move_note("my-note-permalink", "archive/old-notes/my-note.md")
373 | 
374 |         # Move with complex path structure
375 |         move_note("experiments/ml-results", "archive/2025/ml-experiments.md")
376 | 
377 |         # Explicit project specification
378 |         move_note("My Note", "work/notes/my-note.md", project="work-project")
379 | 
380 |         # If uncertain about identifier, search first:
381 |         # search_notes("my note")  # Find available notes
382 |         # move_note("docs/my-note-2025", "archive/my-note.md")  # Use exact result
383 | 
384 |     Raises:
385 |         ToolError: If project doesn't exist, identifier is not found, or destination_path is invalid
386 | 
387 |     Note:
388 |         This operation moves notes within the specified project only. Moving notes
389 |         between different projects is not currently supported.
390 | 
391 |     The move operation:
392 |     - Updates the entity's file_path in the database
393 |     - Moves the physical file on the filesystem
394 |     - Optionally updates permalinks if configured
395 |     - Re-indexes the entity for search
396 |     - Maintains all observations and relations
397 |     """
398 |     async with get_client() as client:
399 |         logger.debug(f"Moving note: {identifier} to {destination_path} in project: {project}")
400 | 
401 |         active_project = await get_active_project(client, project, context)
402 |         project_url = active_project.project_url
403 | 
404 |         # Validate destination path to prevent path traversal attacks
405 |         project_path = active_project.home
406 |         if not validate_project_path(destination_path, project_path):
407 |             logger.warning(
408 |                 "Attempted path traversal attack blocked",
409 |                 destination_path=destination_path,
410 |                 project=active_project.name,
411 |             )
412 |             return f"""# Move Failed - Security Validation Error
413 | 
414 | The destination path '{destination_path}' is not allowed - paths must stay within project boundaries.
415 | 
416 | ## Valid path examples:
417 | - `notes/my-file.md`
418 | - `projects/2025/meeting-notes.md`
419 | - `archive/old-notes.md`
420 | 
421 | ## Try again with a safe path:
422 | ```
423 | move_note("{identifier}", "notes/{destination_path.split("/")[-1] if "/" in destination_path else destination_path}")
424 | ```"""
425 | 
426 |         # Check for potential cross-project move attempts
427 |         cross_project_error = await _detect_cross_project_move_attempt(
428 |             client, identifier, destination_path, active_project.name
429 |         )
430 |         if cross_project_error:
431 |             logger.info(f"Detected cross-project move attempt: {identifier} -> {destination_path}")
432 |             return cross_project_error
433 | 
434 |         # Get the source entity information for extension validation
435 |         source_ext = "md"  # Default to .md if we can't determine source extension
436 |         try:
437 |             # Fetch source entity information to get the current file extension
438 |             url = f"{project_url}/knowledge/entities/{identifier}"
439 |             response = await call_get(client, url)
440 |             source_entity = EntityResponse.model_validate(response.json())
441 |             if "." in source_entity.file_path:
442 |                 source_ext = source_entity.file_path.split(".")[-1]
443 |         except Exception as e:
444 |             # If we can't fetch the source entity, default to .md extension
445 |             logger.debug(f"Could not fetch source entity for extension check: {e}")
446 | 
447 |         # Validate that destination path includes a file extension
448 |         if "." not in destination_path or not destination_path.split(".")[-1]:
449 |             logger.warning(f"Move failed - no file extension provided: {destination_path}")
450 |             return dedent(f"""
451 |                 # Move Failed - File Extension Required
452 | 
453 |                 The destination path '{destination_path}' must include a file extension (e.g., '.md').
454 | 
455 |                 ## Valid examples:
456 |                 - `notes/my-note.md`
457 |                 - `projects/meeting-2025.txt`
458 |                 - `archive/old-program.sh`
459 | 
460 |                 ## Try again with extension:
461 |                 ```
462 |                 move_note("{identifier}", "{destination_path}.{source_ext}")
463 |                 ```
464 | 
465 |                 All examples in Basic Memory expect file extensions to be explicitly provided.
466 |                 """).strip()
467 | 
468 |         # Get the source entity to check its file extension
469 |         try:
470 |             # Fetch source entity information
471 |             url = f"{project_url}/knowledge/entities/{identifier}"
472 |             response = await call_get(client, url)
473 |             source_entity = EntityResponse.model_validate(response.json())
474 | 
475 |             # Extract file extensions
476 |             source_ext = (
477 |                 source_entity.file_path.split(".")[-1] if "." in source_entity.file_path else ""
478 |             )
479 |             dest_ext = destination_path.split(".")[-1] if "." in destination_path else ""
480 | 
481 |             # Check if extensions match
482 |             if source_ext and dest_ext and source_ext.lower() != dest_ext.lower():
483 |                 logger.warning(
484 |                     f"Move failed - file extension mismatch: source={source_ext}, dest={dest_ext}"
485 |                 )
486 |                 return dedent(f"""
487 |                     # Move Failed - File Extension Mismatch
488 | 
489 |                     The destination file extension '.{dest_ext}' does not match the source file extension '.{source_ext}'.
490 | 
491 |                     To preserve file type consistency, the destination must have the same extension as the source.
492 | 
493 |                     ## Source file:
494 |                     - Path: `{source_entity.file_path}`
495 |                     - Extension: `.{source_ext}`
496 | 
497 |                     ## Try again with matching extension:
498 |                     ```
499 |                     move_note("{identifier}", "{destination_path.rsplit(".", 1)[0]}.{source_ext}")
500 |                     ```
501 |                     """).strip()
502 |         except Exception as e:
503 |             # If we can't fetch the source entity, log it but continue
504 |             # This might happen if the identifier is not yet resolved
505 |             logger.debug(f"Could not fetch source entity for extension check: {e}")
506 | 
507 |         try:
508 |             # Prepare move request
509 |             move_data = {
510 |                 "identifier": identifier,
511 |                 "destination_path": destination_path,
512 |                 "project": active_project.name,
513 |             }
514 | 
515 |             # Call the move API endpoint
516 |             url = f"{project_url}/knowledge/move"
517 |             response = await call_post(client, url, json=move_data)
518 |             result = EntityResponse.model_validate(response.json())
519 | 
520 |             # Build success message
521 |             result_lines = [
522 |                 "✅ Note moved successfully",
523 |                 "",
524 |                 f"📁 **{identifier}** → **{result.file_path}**",
525 |                 f"🔗 Permalink: {result.permalink}",
526 |                 "📊 Database and search index updated",
527 |                 "",
528 |                 f"<!-- Project: {active_project.name} -->",
529 |             ]
530 | 
531 |             # Log the operation
532 |             logger.info(
533 |                 "Move note completed",
534 |                 identifier=identifier,
535 |                 destination_path=destination_path,
536 |                 project=active_project.name,
537 |                 status_code=response.status_code,
538 |             )
539 | 
540 |             return "\n".join(result_lines)
541 | 
542 |         except Exception as e:
543 |             logger.error(f"Move failed for '{identifier}' to '{destination_path}': {e}")
544 |             # Return formatted error message for better user experience
545 |             return _format_move_error_response(str(e), identifier, destination_path)
546 | 
```

--------------------------------------------------------------------------------
/test-int/mcp/test_move_note_integration.py:
--------------------------------------------------------------------------------

```python
  1 | """
  2 | Integration tests for move_note MCP tool.
  3 | 
  4 | Tests the complete move note workflow: MCP client -> MCP server -> FastAPI -> database -> file system
  5 | """
  6 | 
  7 | import pytest
  8 | from fastmcp import Client
  9 | 
 10 | 
 11 | @pytest.mark.asyncio
 12 | async def test_move_note_basic_operation(mcp_server, app, test_project):
 13 |     """Test basic move note operation to a new folder."""
 14 | 
 15 |     async with Client(mcp_server) as client:
 16 |         # Create a note to move
 17 |         await client.call_tool(
 18 |             "write_note",
 19 |             {
 20 |                 "project": test_project.name,
 21 |                 "title": "Move Test Note",
 22 |                 "folder": "source",
 23 |                 "content": "# Move Test Note\n\nThis note will be moved to a new location.",
 24 |                 "tags": "test,move",
 25 |             },
 26 |         )
 27 | 
 28 |         # Move the note to a new location
 29 |         move_result = await client.call_tool(
 30 |             "move_note",
 31 |             {
 32 |                 "project": test_project.name,
 33 |                 "identifier": "Move Test Note",
 34 |                 "destination_path": "destination/moved-note.md",
 35 |             },
 36 |         )
 37 | 
 38 |         # Should return successful move message
 39 |         assert len(move_result.content) == 1
 40 |         move_text = move_result.content[0].text
 41 |         assert "✅ Note moved successfully" in move_text
 42 |         assert "Move Test Note" in move_text
 43 |         assert "destination/moved-note.md" in move_text
 44 |         assert "📊 Database and search index updated" in move_text
 45 | 
 46 |         # Verify the note can be read from its new location
 47 |         read_result = await client.call_tool(
 48 |             "read_note",
 49 |             {
 50 |                 "project": test_project.name,
 51 |                 "identifier": "destination/moved-note.md",
 52 |             },
 53 |         )
 54 | 
 55 |         content = read_result.content[0].text
 56 |         assert "This note will be moved to a new location" in content
 57 | 
 58 |         # Verify the original location no longer works
 59 |         read_original = await client.call_tool(
 60 |             "read_note",
 61 |             {
 62 |                 "project": test_project.name,
 63 |                 "identifier": "source/move-test-note.md",
 64 |             },
 65 |         )
 66 | 
 67 |         # Should return "Note Not Found" message
 68 |         assert "Note Not Found" in read_original.content[0].text
 69 | 
 70 | 
 71 | @pytest.mark.asyncio
 72 | async def test_move_note_using_permalink(mcp_server, app, test_project):
 73 |     """Test moving a note using its permalink as identifier."""
 74 | 
 75 |     async with Client(mcp_server) as client:
 76 |         # Create a note to move
 77 |         await client.call_tool(
 78 |             "write_note",
 79 |             {
 80 |                 "project": test_project.name,
 81 |                 "title": "Permalink Move Test",
 82 |                 "folder": "test",
 83 |                 "content": "# Permalink Move Test\n\nMoving by permalink.",
 84 |                 "tags": "test,permalink",
 85 |             },
 86 |         )
 87 | 
 88 |         # Move using permalink
 89 |         move_result = await client.call_tool(
 90 |             "move_note",
 91 |             {
 92 |                 "project": test_project.name,
 93 |                 "identifier": "test/permalink-move-test",
 94 |                 "destination_path": "archive/permalink-moved.md",
 95 |             },
 96 |         )
 97 | 
 98 |         # Should successfully move
 99 |         assert len(move_result.content) == 1
100 |         move_text = move_result.content[0].text
101 |         assert "✅ Note moved successfully" in move_text
102 |         assert "test/permalink-move-test" in move_text
103 |         assert "archive/permalink-moved.md" in move_text
104 | 
105 |         # Verify accessibility at new location
106 |         read_result = await client.call_tool(
107 |             "read_note",
108 |             {
109 |                 "project": test_project.name,
110 |                 "identifier": "archive/permalink-moved.md",
111 |             },
112 |         )
113 | 
114 |         assert "Moving by permalink" in read_result.content[0].text
115 | 
116 | 
117 | @pytest.mark.asyncio
118 | async def test_move_note_with_observations_and_relations(mcp_server, app, test_project):
119 |     """Test moving a note that contains observations and relations."""
120 | 
121 |     async with Client(mcp_server) as client:
122 |         # Create complex note with observations and relations
123 |         complex_content = """# Complex Note
124 | 
125 | This note has various structured content.
126 | 
127 | ## Observations
128 | - [feature] Has structured observations
129 | - [tech] Uses markdown format
130 | - [status] Ready for move testing
131 | 
132 | ## Relations
133 | - implements [[Auth System]]
134 | - documented_in [[Move Guide]]
135 | - depends_on [[File System]]
136 | 
137 | ## Content
138 | This note demonstrates moving complex content."""
139 | 
140 |         await client.call_tool(
141 |             "write_note",
142 |             {
143 |                 "project": test_project.name,
144 |                 "title": "Complex Note",
145 |                 "folder": "complex",
146 |                 "content": complex_content,
147 |                 "tags": "test,complex,move",
148 |             },
149 |         )
150 | 
151 |         # Move the complex note
152 |         move_result = await client.call_tool(
153 |             "move_note",
154 |             {
155 |                 "project": test_project.name,
156 |                 "identifier": "Complex Note",
157 |                 "destination_path": "moved/complex-note.md",
158 |             },
159 |         )
160 | 
161 |         # Should successfully move
162 |         assert len(move_result.content) == 1
163 |         move_text = move_result.content[0].text
164 |         assert "✅ Note moved successfully" in move_text
165 |         assert "Complex Note" in move_text
166 |         assert "moved/complex-note.md" in move_text
167 | 
168 |         # Verify content preservation including structured data
169 |         read_result = await client.call_tool(
170 |             "read_note",
171 |             {
172 |                 "project": test_project.name,
173 |                 "identifier": "moved/complex-note.md",
174 |             },
175 |         )
176 | 
177 |         content = read_result.content[0].text
178 |         assert "Has structured observations" in content
179 |         assert "implements [[Auth System]]" in content
180 |         assert "## Observations" in content
181 |         assert "[feature]" in content  # Should show original markdown observations
182 |         assert "## Relations" in content
183 | 
184 | 
185 | @pytest.mark.asyncio
186 | async def test_move_note_to_nested_directory(mcp_server, app, test_project):
187 |     """Test moving a note to a deeply nested directory structure."""
188 | 
189 |     async with Client(mcp_server) as client:
190 |         # Create a note
191 |         await client.call_tool(
192 |             "write_note",
193 |             {
194 |                 "project": test_project.name,
195 |                 "title": "Nested Move Test",
196 |                 "folder": "root",
197 |                 "content": "# Nested Move Test\n\nThis will be moved deep.",
198 |                 "tags": "test,nested",
199 |             },
200 |         )
201 | 
202 |         # Move to a deep nested structure
203 |         move_result = await client.call_tool(
204 |             "move_note",
205 |             {
206 |                 "project": test_project.name,
207 |                 "identifier": "Nested Move Test",
208 |                 "destination_path": "projects/2025/q2/work/nested-note.md",
209 |             },
210 |         )
211 | 
212 |         # Should successfully create directory structure and move
213 |         assert len(move_result.content) == 1
214 |         move_text = move_result.content[0].text
215 |         assert "✅ Note moved successfully" in move_text
216 |         assert "Nested Move Test" in move_text
217 |         assert "projects/2025/q2/work/nested-note.md" in move_text
218 | 
219 |         # Verify accessibility
220 |         read_result = await client.call_tool(
221 |             "read_note",
222 |             {
223 |                 "project": test_project.name,
224 |                 "identifier": "projects/2025/q2/work/nested-note.md",
225 |             },
226 |         )
227 | 
228 |         assert "This will be moved deep" in read_result.content[0].text
229 | 
230 | 
231 | @pytest.mark.asyncio
232 | async def test_move_note_with_special_characters(mcp_server, app, test_project):
233 |     """Test moving notes with special characters in titles and paths."""
234 | 
235 |     async with Client(mcp_server) as client:
236 |         # Create note with special characters
237 |         await client.call_tool(
238 |             "write_note",
239 |             {
240 |                 "project": test_project.name,
241 |                 "title": "Special (Chars) & Symbols",
242 |                 "folder": "special",
243 |                 "content": "# Special (Chars) & Symbols\n\nTesting special characters in move.",
244 |                 "tags": "test,special",
245 |             },
246 |         )
247 | 
248 |         # Move to path with special characters
249 |         move_result = await client.call_tool(
250 |             "move_note",
251 |             {
252 |                 "project": test_project.name,
253 |                 "identifier": "Special (Chars) & Symbols",
254 |                 "destination_path": "archive/special-chars-note.md",
255 |             },
256 |         )
257 | 
258 |         # Should handle special characters properly
259 |         assert len(move_result.content) == 1
260 |         move_text = move_result.content[0].text
261 |         assert "✅ Note moved successfully" in move_text
262 |         assert "archive/special-chars-note.md" in move_text
263 | 
264 |         # Verify content preservation
265 |         read_result = await client.call_tool(
266 |             "read_note",
267 |             {
268 |                 "project": test_project.name,
269 |                 "identifier": "archive/special-chars-note.md",
270 |             },
271 |         )
272 | 
273 |         assert "Testing special characters in move" in read_result.content[0].text
274 | 
275 | 
276 | @pytest.mark.asyncio
277 | async def test_move_note_error_handling_note_not_found(mcp_server, app, test_project):
278 |     """Test error handling when trying to move a non-existent note."""
279 | 
280 |     async with Client(mcp_server) as client:
281 |         # Try to move a note that doesn't exist - should return error message
282 |         move_result = await client.call_tool(
283 |             "move_note",
284 |             {
285 |                 "project": test_project.name,
286 |                 "identifier": "Non-existent Note",
287 |                 "destination_path": "new/location.md",
288 |             },
289 |         )
290 | 
291 |         # Should contain error message about the failed operation
292 |         assert len(move_result.content) == 1
293 |         error_message = move_result.content[0].text
294 |         assert "# Move Failed" in error_message
295 |         assert "Non-existent Note" in error_message
296 | 
297 | 
298 | @pytest.mark.asyncio
299 | async def test_move_note_error_handling_invalid_destination(mcp_server, app, test_project):
300 |     """Test error handling for invalid destination paths."""
301 | 
302 |     async with Client(mcp_server) as client:
303 |         # Create a note to attempt moving
304 |         await client.call_tool(
305 |             "write_note",
306 |             {
307 |                 "project": test_project.name,
308 |                 "title": "Invalid Dest Test",
309 |                 "folder": "test",
310 |                 "content": "# Invalid Dest Test\n\nThis move should fail.",
311 |                 "tags": "test,error",
312 |             },
313 |         )
314 | 
315 |         # Try to move to absolute path (should fail) - should return error message
316 |         move_result = await client.call_tool(
317 |             "move_note",
318 |             {
319 |                 "project": test_project.name,
320 |                 "identifier": "Invalid Dest Test",
321 |                 "destination_path": "/absolute/path/note.md",
322 |             },
323 |         )
324 | 
325 |         # Should contain error message about the failed operation
326 |         assert len(move_result.content) == 1
327 |         error_message = move_result.content[0].text
328 |         assert "# Move Failed" in error_message
329 |         assert "/absolute/path/note.md" in error_message
330 | 
331 | 
332 | @pytest.mark.asyncio
333 | async def test_move_note_error_handling_destination_exists(mcp_server, app, test_project):
334 |     """Test error handling when destination file already exists."""
335 | 
336 |     async with Client(mcp_server) as client:
337 |         # Create source note
338 |         await client.call_tool(
339 |             "write_note",
340 |             {
341 |                 "project": test_project.name,
342 |                 "title": "Source Note",
343 |                 "folder": "source",
344 |                 "content": "# Source Note\n\nThis is the source.",
345 |                 "tags": "test,source",
346 |             },
347 |         )
348 | 
349 |         # Create destination note that already exists at the exact path we'll try to move to
350 |         await client.call_tool(
351 |             "write_note",
352 |             {
353 |                 "project": test_project.name,
354 |                 "title": "Existing Note",
355 |                 "folder": "destination",
356 |                 "content": "# Existing Note\n\nThis already exists.",
357 |                 "tags": "test,existing",
358 |             },
359 |         )
360 | 
361 |         # Try to move source to existing destination (should fail) - should return error message
362 |         move_result = await client.call_tool(
363 |             "move_note",
364 |             {
365 |                 "project": test_project.name,
366 |                 "identifier": "Source Note",
367 |                 "destination_path": "destination/Existing Note.md",  # Use exact existing file name
368 |             },
369 |         )
370 | 
371 |         # Should contain error message about the failed operation
372 |         assert len(move_result.content) == 1
373 |         error_message = move_result.content[0].text
374 |         assert "# Move Failed" in error_message
375 |         assert "already exists" in error_message
376 | 
377 | 
378 | @pytest.mark.asyncio
379 | async def test_move_note_preserves_search_functionality(mcp_server, app, test_project):
380 |     """Test that moved notes remain searchable after move operation."""
381 | 
382 |     async with Client(mcp_server) as client:
383 |         # Create a note with searchable content
384 |         await client.call_tool(
385 |             "write_note",
386 |             {
387 |                 "project": test_project.name,
388 |                 "title": "Searchable Note",
389 |                 "folder": "original",
390 |                 "content": """# Searchable Note
391 | 
392 | This note contains unique search terms:
393 | - quantum mechanics
394 | - artificial intelligence
395 | - machine learning algorithms
396 | 
397 | ## Features
398 | - [technology] Advanced AI features
399 | - [research] Quantum computing research
400 | 
401 | ## Relations
402 | - relates_to [[AI Research]]""",
403 |                 "tags": "search,test,move",
404 |             },
405 |         )
406 | 
407 |         # Verify note is searchable before move
408 |         search_before = await client.call_tool(
409 |             "search_notes",
410 |             {
411 |                 "project": test_project.name,
412 |                 "query": "quantum mechanics",
413 |             },
414 |         )
415 | 
416 |         assert len(search_before.content) > 0
417 |         assert "Searchable Note" in search_before.content[0].text
418 | 
419 |         # Move the note
420 |         move_result = await client.call_tool(
421 |             "move_note",
422 |             {
423 |                 "project": test_project.name,
424 |                 "identifier": "Searchable Note",
425 |                 "destination_path": "research/quantum-ai-note.md",
426 |             },
427 |         )
428 | 
429 |         assert len(move_result.content) == 1
430 |         move_text = move_result.content[0].text
431 |         assert "✅ Note moved successfully" in move_text
432 | 
433 |         # Verify note is still searchable after move
434 |         search_after = await client.call_tool(
435 |             "search_notes",
436 |             {
437 |                 "project": test_project.name,
438 |                 "query": "quantum mechanics",
439 |             },
440 |         )
441 | 
442 |         assert len(search_after.content) > 0
443 |         search_text = search_after.content[0].text
444 |         assert "quantum mechanics" in search_text
445 |         assert "research/quantum-ai-note.md" in search_text or "quantum-ai-note" in search_text
446 | 
447 |         # Verify search by new location works
448 |         search_by_path = await client.call_tool(
449 |             "search_notes",
450 |             {
451 |                 "project": test_project.name,
452 |                 "query": "research/quantum",
453 |             },
454 |         )
455 | 
456 |         assert len(search_by_path.content) > 0
457 | 
458 | 
459 | @pytest.mark.asyncio
460 | async def test_move_note_using_different_identifier_formats(mcp_server, app, test_project):
461 |     """Test moving notes using different identifier formats (title, permalink, folder/title)."""
462 | 
463 |     async with Client(mcp_server) as client:
464 |         # Create notes for different identifier tests
465 |         await client.call_tool(
466 |             "write_note",
467 |             {
468 |                 "project": test_project.name,
469 |                 "title": "Title ID Note",
470 |                 "folder": "test",
471 |                 "content": "# Title ID Note\n\nMove by title.",
472 |                 "tags": "test,identifier",
473 |             },
474 |         )
475 | 
476 |         await client.call_tool(
477 |             "write_note",
478 |             {
479 |                 "project": test_project.name,
480 |                 "title": "Permalink ID Note",
481 |                 "folder": "test",
482 |                 "content": "# Permalink ID Note\n\nMove by permalink.",
483 |                 "tags": "test,identifier",
484 |             },
485 |         )
486 | 
487 |         await client.call_tool(
488 |             "write_note",
489 |             {
490 |                 "project": test_project.name,
491 |                 "title": "Folder Title Note",
492 |                 "folder": "test",
493 |                 "content": "# Folder Title Note\n\nMove by folder/title.",
494 |                 "tags": "test,identifier",
495 |             },
496 |         )
497 | 
498 |         # Test moving by title
499 |         move1 = await client.call_tool(
500 |             "move_note",
501 |             {
502 |                 "project": test_project.name,
503 |                 "identifier": "Title ID Note",  # by title
504 |                 "destination_path": "moved/title-moved.md",
505 |             },
506 |         )
507 |         assert len(move1.content) == 1
508 |         assert "✅ Note moved successfully" in move1.content[0].text
509 | 
510 |         # Test moving by permalink
511 |         move2 = await client.call_tool(
512 |             "move_note",
513 |             {
514 |                 "project": test_project.name,
515 |                 "identifier": "test/permalink-id-note",  # by permalink
516 |                 "destination_path": "moved/permalink-moved.md",
517 |             },
518 |         )
519 |         assert len(move2.content) == 1
520 |         assert "✅ Note moved successfully" in move2.content[0].text
521 | 
522 |         # Test moving by folder/title format
523 |         move3 = await client.call_tool(
524 |             "move_note",
525 |             {
526 |                 "project": test_project.name,
527 |                 "identifier": "test/Folder Title Note",  # by folder/title
528 |                 "destination_path": "moved/folder-title-moved.md",
529 |             },
530 |         )
531 |         assert len(move3.content) == 1
532 |         assert "✅ Note moved successfully" in move3.content[0].text
533 | 
534 |         # Verify all notes can be accessed at their new locations
535 |         read1 = await client.call_tool(
536 |             "read_note", {"project": test_project.name, "identifier": "moved/title-moved.md"}
537 |         )
538 |         assert "Move by title" in read1.content[0].text
539 | 
540 |         read2 = await client.call_tool(
541 |             "read_note", {"project": test_project.name, "identifier": "moved/permalink-moved.md"}
542 |         )
543 |         assert "Move by permalink" in read2.content[0].text
544 | 
545 |         read3 = await client.call_tool(
546 |             "read_note", {"project": test_project.name, "identifier": "moved/folder-title-moved.md"}
547 |         )
548 |         assert "Move by folder/title" in read3.content[0].text
549 | 
550 | 
551 | @pytest.mark.asyncio
552 | async def test_move_note_cross_project_detection(mcp_server, app, test_project):
553 |     """Test cross-project move detection and helpful error messages."""
554 | 
555 |     async with Client(mcp_server) as client:
556 |         # Create a test project to simulate cross-project scenario
557 |         await client.call_tool(
558 |             "create_memory_project",
559 |             {
560 |                 "project_name": "test-project-b",
561 |                 "project_path": "/tmp/test-project-b",
562 |                 "set_default": False,
563 |             },
564 |         )
565 | 
566 |         # Create a note in the default project
567 |         await client.call_tool(
568 |             "write_note",
569 |             {
570 |                 "project": test_project.name,
571 |                 "title": "Cross Project Test Note",
572 |                 "folder": "source",
573 |                 "content": "# Cross Project Test Note\n\nThis note is in the default project.",
574 |                 "tags": "test,cross-project",
575 |             },
576 |         )
577 | 
578 |         # Try to move to a path that contains the other project name
579 |         move_result = await client.call_tool(
580 |             "move_note",
581 |             {
582 |                 "project": test_project.name,
583 |                 "identifier": "Cross Project Test Note",
584 |                 "destination_path": "test-project-b/moved-note.md",
585 |             },
586 |         )
587 | 
588 |         # Should detect cross-project attempt and provide helpful guidance
589 |         assert len(move_result.content) == 1
590 |         error_message = move_result.content[0].text
591 |         assert "Cross-Project Move Not Supported" in error_message
592 |         assert "test-project-b" in error_message
593 |         assert "read_note" in error_message
594 |         assert "write_note" in error_message
595 | 
596 | 
597 | @pytest.mark.asyncio
598 | async def test_move_note_normal_moves_still_work(mcp_server, app, test_project):
599 |     """Test that normal within-project moves still work after cross-project detection."""
600 | 
601 |     async with Client(mcp_server) as client:
602 |         # Create a note
603 |         await client.call_tool(
604 |             "write_note",
605 |             {
606 |                 "project": test_project.name,
607 |                 "title": "Normal Move Note",
608 |                 "folder": "source",
609 |                 "content": "# Normal Move Note\n\nThis should move normally.",
610 |                 "tags": "test,normal-move",
611 |             },
612 |         )
613 | 
614 |         # Try a normal move that should work
615 |         move_result = await client.call_tool(
616 |             "move_note",
617 |             {
618 |                 "project": test_project.name,
619 |                 "identifier": "Normal Move Note",
620 |                 "destination_path": "destination/normal-moved.md",
621 |             },
622 |         )
623 | 
624 |         # Should work normally
625 |         assert len(move_result.content) == 1
626 |         move_text = move_result.content[0].text
627 |         assert "✅ Note moved successfully" in move_text
628 |         assert "Normal Move Note" in move_text
629 |         assert "destination/normal-moved.md" in move_text
630 | 
631 |         # Verify the note can be read from its new location
632 |         read_result = await client.call_tool(
633 |             "read_note",
634 |             {
635 |                 "project": test_project.name,
636 |                 "identifier": "destination/normal-moved.md",
637 |             },
638 |         )
639 | 
640 |         content = read_result.content[0].text
641 |         assert "This should move normally" in content
642 | 
```

--------------------------------------------------------------------------------
/test-int/mcp/test_project_management_integration.py:
--------------------------------------------------------------------------------

```python
  1 | """
  2 | Integration tests for project_management MCP tools.
  3 | 
  4 | Tests the complete project management workflow: MCP client -> MCP server -> FastAPI -> project service
  5 | """
  6 | 
  7 | import pytest
  8 | from fastmcp import Client
  9 | 
 10 | 
 11 | @pytest.mark.asyncio
 12 | async def test_list_projects_basic_operation(mcp_server, app, test_project):
 13 |     """Test basic list_projects operation showing available projects."""
 14 | 
 15 |     async with Client(mcp_server) as client:
 16 |         # List all available projects
 17 |         list_result = await client.call_tool(
 18 |             "list_memory_projects",
 19 |             {},
 20 |         )
 21 | 
 22 |         # Should return formatted project list
 23 |         assert len(list_result.content) == 1
 24 |         list_text = list_result.content[0].text  # pyright: ignore [reportAttributeAccessIssue]
 25 | 
 26 |         # Should show available projects with new session guidance format
 27 |         assert "Available projects:" in list_text
 28 |         assert "test-project" in list_text  # Our test project
 29 |         # Check for new session guidance instead of CLI default
 30 |         assert "Next: Ask which project to use for this session." in list_text
 31 |         assert "Session reminder: Track the selected project" in list_text
 32 | 
 33 | 
 34 | @pytest.mark.asyncio
 35 | async def test_project_management_workflow(mcp_server, app, test_project):
 36 |     """Test basic project management workflow."""
 37 | 
 38 |     async with Client(mcp_server) as client:
 39 |         # List all projects
 40 |         list_result = await client.call_tool("list_memory_projects", {})
 41 |         assert "Available projects:" in list_result.content[0].text  # pyright: ignore [reportAttributeAccessIssue]
 42 |         assert "test-project" in list_result.content[0].text  # pyright: ignore [reportAttributeAccessIssue]
 43 | 
 44 | 
 45 | @pytest.mark.asyncio
 46 | async def test_project_metadata_consistency(mcp_server, app, test_project):
 47 |     """Test that project management tools work correctly."""
 48 | 
 49 |     async with Client(mcp_server) as client:
 50 |         # Test basic project management tools
 51 | 
 52 |         # list_projects
 53 |         list_result = await client.call_tool("list_memory_projects", {})
 54 |         assert "Available projects:" in list_result.content[0].text  # pyright: ignore [reportAttributeAccessIssue]
 55 |         assert "test-project" in list_result.content[0].text  # pyright: ignore [reportAttributeAccessIssue]
 56 | 
 57 | 
 58 | @pytest.mark.asyncio
 59 | async def test_create_project_basic_operation(mcp_server, app, test_project):
 60 |     """Test creating a new project with basic parameters."""
 61 | 
 62 |     async with Client(mcp_server) as client:
 63 |         # Create a new project
 64 |         create_result = await client.call_tool(
 65 |             "create_memory_project",
 66 |             {
 67 |                 "project_name": "test-new-project",
 68 |                 "project_path": "/tmp/test-new-project",
 69 |             },
 70 |         )
 71 | 
 72 |         assert len(create_result.content) == 1
 73 |         create_text = create_result.content[0].text  # pyright: ignore [reportAttributeAccessIssue]
 74 | 
 75 |         # Should show success message and project details
 76 |         assert "✓" in create_text  # Success indicator
 77 |         assert "test-new-project" in create_text
 78 |         assert "Project Details:" in create_text
 79 |         assert "Name: test-new-project" in create_text
 80 |         assert "Path: /tmp/test-new-project" in create_text
 81 |         assert "Project is now available for use" in create_text
 82 | 
 83 |         # Verify project appears in project list
 84 |         list_result = await client.call_tool("list_memory_projects", {})
 85 |         list_text = list_result.content[0].text  # pyright: ignore [reportAttributeAccessIssue]
 86 |         assert "test-new-project" in list_text
 87 | 
 88 | 
 89 | @pytest.mark.asyncio
 90 | async def test_create_project_with_default_flag(mcp_server, app, test_project):
 91 |     """Test creating a project and setting it as default."""
 92 | 
 93 |     async with Client(mcp_server) as client:
 94 |         # Create a new project and set as default
 95 |         create_result = await client.call_tool(
 96 |             "create_memory_project",
 97 |             {
 98 |                 "project_name": "test-default-project",
 99 |                 "project_path": "/tmp/test-default-project",
100 |                 "set_default": True,
101 |             },
102 |         )
103 | 
104 |         assert len(create_result.content) == 1
105 |         create_text = create_result.content[0].text  # pyright: ignore [reportAttributeAccessIssue]
106 | 
107 |         # Should show success and default flag
108 |         assert "✓" in create_text
109 |         assert "test-default-project" in create_text
110 |         assert "Set as default project" in create_text
111 | 
112 |         # Verify the new project is listed
113 |         list_after_create = await client.call_tool("list_memory_projects", {})
114 |         assert "test-default-project" in list_after_create.content[0].text  # pyright: ignore [reportAttributeAccessIssue]
115 | 
116 | 
117 | @pytest.mark.asyncio
118 | async def test_create_project_duplicate_name(mcp_server, app, test_project):
119 |     """Test creating a project with duplicate name shows error."""
120 | 
121 |     async with Client(mcp_server) as client:
122 |         # First create a project
123 |         await client.call_tool(
124 |             "create_memory_project",
125 |             {
126 |                 "project_name": "duplicate-test",
127 |                 "project_path": "/tmp/duplicate-test-1",
128 |             },
129 |         )
130 | 
131 |         # Try to create another project with same name
132 |         with pytest.raises(Exception) as exc_info:
133 |             await client.call_tool(
134 |                 "create_memory_project",
135 |                 {
136 |                     "project_name": "duplicate-test",
137 |                     "project_path": "/tmp/duplicate-test-2",
138 |                 },
139 |             )
140 | 
141 |         # Should show error about duplicate name
142 |         error_message = str(exc_info.value)
143 |         assert "create_memory_project" in error_message
144 |         assert (
145 |             "duplicate-test" in error_message
146 |             or "already exists" in error_message
147 |             or "Invalid request" in error_message
148 |         )
149 | 
150 | 
151 | @pytest.mark.asyncio
152 | async def test_delete_project_basic_operation(mcp_server, app, test_project):
153 |     """Test deleting a project that exists."""
154 | 
155 |     async with Client(mcp_server) as client:
156 |         # First create a project to delete
157 |         await client.call_tool(
158 |             "create_memory_project",
159 |             {
160 |                 "project_name": "to-be-deleted",
161 |                 "project_path": "/tmp/to-be-deleted",
162 |             },
163 |         )
164 | 
165 |         # Verify it exists
166 |         list_result = await client.call_tool("list_memory_projects", {})
167 |         assert "to-be-deleted" in list_result.content[0].text  # pyright: ignore [reportAttributeAccessIssue]
168 | 
169 |         # Delete the project
170 |         delete_result = await client.call_tool(
171 |             "delete_project",
172 |             {
173 |                 "project_name": "to-be-deleted",
174 |             },
175 |         )
176 | 
177 |         assert len(delete_result.content) == 1
178 |         delete_text = delete_result.content[0].text  # pyright: ignore [reportAttributeAccessIssue]
179 | 
180 |         # Should show success message
181 |         assert "✓" in delete_text
182 |         assert "to-be-deleted" in delete_text
183 |         assert "removed successfully" in delete_text
184 |         assert "Removed project details:" in delete_text
185 |         assert "Name: to-be-deleted" in delete_text
186 |         assert "Files remain on disk but project is no longer tracked" in delete_text
187 | 
188 |         # Verify project no longer appears in list
189 |         list_result_after = await client.call_tool("list_memory_projects", {})
190 |         assert "to-be-deleted" not in list_result_after.content[0].text  # pyright: ignore [reportAttributeAccessIssue]
191 | 
192 | 
193 | @pytest.mark.asyncio
194 | async def test_delete_project_not_found(mcp_server, app, test_project):
195 |     """Test deleting a non-existent project shows error."""
196 | 
197 |     async with Client(mcp_server) as client:
198 |         # Try to delete non-existent project
199 |         with pytest.raises(Exception) as exc_info:
200 |             await client.call_tool(
201 |                 "delete_project",
202 |                 {
203 |                     "project_name": "non-existent-project",
204 |                 },
205 |             )
206 | 
207 |         # Should show error about non-existent project
208 |         error_message = str(exc_info.value)
209 |         assert "delete_project" in error_message
210 |         assert (
211 |             "non-existent-project" in error_message
212 |             or "not found" in error_message
213 |             or "Invalid request" in error_message
214 |         )
215 | 
216 | 
217 | @pytest.mark.asyncio
218 | async def test_delete_current_project_protection(mcp_server, app, test_project):
219 |     """Test that deleting the current project is prevented."""
220 | 
221 |     async with Client(mcp_server) as client:
222 |         # Try to delete the current project (test-project)
223 |         with pytest.raises(Exception) as exc_info:
224 |             await client.call_tool(
225 |                 "delete_project",
226 |                 {
227 |                     "project_name": "test-project",
228 |                 },
229 |             )
230 | 
231 |         # Should show error about deleting current project
232 |         error_message = str(exc_info.value)
233 |         assert "delete_project" in error_message
234 |         assert (
235 |             "currently active" in error_message
236 |             or "test-project" in error_message
237 |             or "Switch to a different project" in error_message
238 |         )
239 | 
240 | 
241 | @pytest.mark.asyncio
242 | async def test_project_lifecycle_workflow(mcp_server, app, test_project):
243 |     """Test complete project lifecycle: create, switch, use, delete."""
244 | 
245 |     async with Client(mcp_server) as client:
246 |         project_name = "lifecycle-test"
247 |         project_path = "/tmp/lifecycle-test"
248 | 
249 |         # 1. Create new project
250 |         create_result = await client.call_tool(
251 |             "create_memory_project",
252 |             {
253 |                 "project_name": project_name,
254 |                 "project_path": project_path,
255 |             },
256 |         )
257 |         assert "✓" in create_result.content[0].text  # pyright: ignore [reportAttributeAccessIssue]
258 |         assert project_name in create_result.content[0].text  # pyright: ignore [reportAttributeAccessIssue]
259 | 
260 |         # 2. Create content in the new project
261 |         await client.call_tool(
262 |             "write_note",
263 |             {
264 |                 "project": project_name,
265 |                 "title": "Lifecycle Test Note",
266 |                 "folder": "test",
267 |                 "content": "# Lifecycle Test\\n\\nThis note tests the project lifecycle.\\n\\n- [test] Lifecycle testing",
268 |                 "tags": "lifecycle,test",
269 |             },
270 |         )
271 | 
272 |         # 3. Verify the project exists in the list
273 |         list_with_content = await client.call_tool("list_memory_projects", {})
274 |         assert project_name in list_with_content.content[0].text  # pyright: ignore [reportAttributeAccessIssue]
275 | 
276 |         # 4. Verify we can still access the original test project
277 |         test_list = await client.call_tool("list_memory_projects", {})
278 |         assert "test-project" in test_list.content[0].text  # pyright: ignore [reportAttributeAccessIssue]
279 | 
280 |         # 5. Delete the lifecycle test project
281 |         delete_result = await client.call_tool(
282 |             "delete_project",
283 |             {
284 |                 "project_name": project_name,
285 |             },
286 |         )
287 |         assert "✓" in delete_result.content[0].text  # pyright: ignore [reportAttributeAccessIssue]
288 |         assert f"{project_name}" in delete_result.content[0].text  # pyright: ignore [reportAttributeAccessIssue]
289 |         assert "removed successfully" in delete_result.content[0].text  # pyright: ignore [reportAttributeAccessIssue]
290 | 
291 |         # 6. Verify project is gone from list
292 |         list_result = await client.call_tool("list_memory_projects", {})
293 |         assert project_name not in list_result.content[0].text  # pyright: ignore [reportAttributeAccessIssue]
294 | 
295 | 
296 | @pytest.mark.asyncio
297 | async def test_create_delete_project_edge_cases(mcp_server, app, test_project):
298 |     """Test edge cases for create and delete project operations."""
299 | 
300 |     async with Client(mcp_server) as client:
301 |         # Test with special characters and spaces in project name (should be handled gracefully)
302 |         special_name = "test project with spaces & symbols!"
303 | 
304 |         # Create project with special characters
305 |         create_result = await client.call_tool(
306 |             "create_memory_project",
307 |             {
308 |                 "project_name": special_name,
309 |                 "project_path": "/tmp/test-project-with-special-chars",
310 |             },
311 |         )
312 |         assert "✓" in create_result.content[0].text  # pyright: ignore [reportAttributeAccessIssue]
313 |         assert special_name in create_result.content[0].text  # pyright: ignore [reportAttributeAccessIssue]
314 | 
315 |         # Verify it appears in list
316 |         list_result = await client.call_tool("list_memory_projects", {})
317 |         assert special_name in list_result.content[0].text  # pyright: ignore [reportAttributeAccessIssue]
318 | 
319 |         # Delete it
320 |         delete_result = await client.call_tool(
321 |             "delete_project",
322 |             {
323 |                 "project_name": special_name,
324 |             },
325 |         )
326 |         assert "✓" in delete_result.content[0].text  # pyright: ignore [reportAttributeAccessIssue]
327 |         assert special_name in delete_result.content[0].text  # pyright: ignore [reportAttributeAccessIssue]
328 | 
329 |         # Verify it's gone
330 |         list_result_after = await client.call_tool("list_memory_projects", {})
331 |         assert special_name not in list_result_after.content[0].text  # pyright: ignore [reportAttributeAccessIssue]
332 | 
333 | 
334 | @pytest.mark.asyncio
335 | async def test_case_insensitive_project_switching(mcp_server, app, test_project):
336 |     """Test case-insensitive project switching with proper database lookup."""
337 | 
338 |     async with Client(mcp_server) as client:
339 |         # Create a project with mixed case name
340 |         project_name = "Personal-Project"
341 |         create_result = await client.call_tool(
342 |             "create_memory_project",
343 |             {
344 |                 "project_name": project_name,
345 |                 "project_path": f"/tmp/{project_name}",
346 |             },
347 |         )
348 |         assert "✓" in create_result.content[0].text  # pyright: ignore [reportAttributeAccessIssue]
349 |         assert project_name in create_result.content[0].text  # pyright: ignore [reportAttributeAccessIssue]
350 | 
351 |         # Verify project was created with canonical name
352 |         list_result = await client.call_tool("list_memory_projects", {})
353 |         assert project_name in list_result.content[0].text  # pyright: ignore [reportAttributeAccessIssue]
354 | 
355 |         # Test with different case variations
356 |         test_cases = [
357 |             "personal-project",  # all lowercase
358 |             "PERSONAL-PROJECT",  # all uppercase
359 |             "Personal-project",  # mixed case 1
360 |             "personal-Project",  # mixed case 2
361 |         ]
362 | 
363 |         # Test that project operations work with case-insensitive input
364 |         # (Project creation is case-preserving but operations can use different cases)
365 | 
366 |         # Test that we can reference the project with different cases in operations
367 |         for test_input in test_cases:
368 |             # Test write_note with case-insensitive project reference
369 |             write_result = await client.call_tool(
370 |                 "write_note",
371 |                 {
372 |                     "project": test_input,  # Use different case
373 |                     "title": f"Case Test {test_input}",
374 |                     "folder": "case-test",
375 |                     "content": f"# Case Test\n\nTesting with {test_input}",
376 |                 },
377 |             )
378 |             assert len(write_result.content) == 1
379 |             assert f"Case Test {test_input}".lower() in write_result.content[0].text.lower()  # pyright: ignore [reportAttributeAccessIssue]
380 | 
381 |         # Clean up
382 |         await client.call_tool("delete_project", {"project_name": project_name})
383 | 
384 | 
385 | @pytest.mark.asyncio
386 | async def test_case_insensitive_project_operations(mcp_server, app, test_project):
387 |     """Test that all project operations work correctly after case-insensitive switching."""
388 | 
389 |     async with Client(mcp_server) as client:
390 |         # Create a project with capital letters
391 |         project_name = "CamelCase-Project"
392 |         create_result = await client.call_tool(
393 |             "create_memory_project",
394 |             {
395 |                 "project_name": project_name,
396 |                 "project_path": f"/tmp/{project_name}",
397 |             },
398 |         )
399 |         assert "✓" in create_result.content[0].text  # pyright: ignore [reportAttributeAccessIssue]
400 | 
401 |         # Test that MCP operations work correctly with the project
402 | 
403 |         # 1. Create a note in the project
404 |         write_result = await client.call_tool(
405 |             "write_note",
406 |             {
407 |                 "project": project_name,
408 |                 "title": "Case Test Note",
409 |                 "folder": "case-test",
410 |                 "content": "# Case Test Note\n\nTesting case-insensitive operations.\n\n- [test] Case insensitive switch\n- relates_to [[Another Note]]",
411 |                 "tags": "case,test",
412 |             },
413 |         )
414 |         assert len(write_result.content) == 1
415 |         assert "Case Test Note" in write_result.content[0].text  # pyright: ignore [reportAttributeAccessIssue]
416 | 
417 |         # 2. Test search works in the project
418 |         search_result = await client.call_tool(
419 |             "search_notes",
420 |             {"project": project_name, "query": "case insensitive"},
421 |         )
422 |         assert len(search_result.content) == 1
423 |         assert "Case Test Note" in search_result.content[0].text  # pyright: ignore [reportAttributeAccessIssue]
424 | 
425 |         # 3. Test read_note works
426 |         read_result = await client.call_tool(
427 |             "read_note",
428 |             {"project": project_name, "identifier": "Case Test Note"},
429 |         )
430 |         assert len(read_result.content) == 1
431 |         assert "Case Test Note" in read_result.content[0].text  # pyright: ignore [reportAttributeAccessIssue]
432 |         assert "case insensitive" in read_result.content[0].text.lower()  # pyright: ignore [reportAttributeAccessIssue]
433 | 
434 |         # Clean up
435 |         await client.call_tool("delete_project", {"project_name": project_name})
436 | 
437 | 
438 | @pytest.mark.asyncio
439 | async def test_case_insensitive_error_handling(mcp_server, app, test_project):
440 |     """Test error handling for case-insensitive project operations."""
441 | 
442 |     async with Client(mcp_server) as client:
443 |         # Test non-existent project with various cases
444 |         non_existent_cases = [
445 |             "NonExistent",
446 |             "non-existent",
447 |             "NON-EXISTENT",
448 |             "Non-Existent-Project",
449 |         ]
450 | 
451 |         # Test that operations fail gracefully with non-existent projects
452 |         for test_case in non_existent_cases:
453 |             # Test that write_note fails with non-existent project
454 |             with pytest.raises(Exception):
455 |                 await client.call_tool(
456 |                     "write_note",
457 |                     {
458 |                         "project": test_case,
459 |                         "title": "Test Note",
460 |                         "folder": "test",
461 |                         "content": "# Test\n\nTest content.",
462 |                     },
463 |                 )
464 | 
465 | 
466 | @pytest.mark.asyncio
467 | async def test_case_preservation_in_project_list(mcp_server, app, test_project):
468 |     """Test that project names preserve their original case in listings."""
469 | 
470 |     async with Client(mcp_server) as client:
471 |         # Create projects with different casing patterns
472 |         test_projects = [
473 |             "lowercase-project",
474 |             "UPPERCASE-PROJECT",
475 |             "CamelCase-Project",
476 |             "Mixed-CASE-project",
477 |         ]
478 | 
479 |         # Create all test projects
480 |         for project_name in test_projects:
481 |             await client.call_tool(
482 |                 "create_memory_project",
483 |                 {
484 |                     "project_name": project_name,
485 |                     "project_path": f"/tmp/{project_name}",
486 |                 },
487 |             )
488 | 
489 |         # List projects and verify each appears with its original case
490 |         list_result = await client.call_tool("list_memory_projects", {})
491 |         list_text = list_result.content[0].text  # pyright: ignore [reportAttributeAccessIssue]
492 | 
493 |         for project_name in test_projects:
494 |             assert project_name in list_text, f"Project {project_name} not found in list"
495 | 
496 |         # Test each project with exact case (projects are case-sensitive)
497 |         for project_name in test_projects:
498 |             # Test write_note with exact project name
499 |             write_result = await client.call_tool(
500 |                 "write_note",
501 |                 {
502 |                     "project": project_name,  # Use exact project name
503 |                     "title": f"Test Note {project_name}",
504 |                     "folder": "test",
505 |                     "content": f"# Test\n\nTesting {project_name}",
506 |                 },
507 |             )
508 |             assert len(write_result.content) == 1
509 |             result_text = write_result.content[0].text  # pyright: ignore [reportAttributeAccessIssue]
510 |             assert "successfully" in result_text.lower() or "created" in result_text.lower()
511 | 
512 |         # Clean up - delete test projects
513 |         for project_name in test_projects:
514 |             await client.call_tool("delete_project", {"project_name": project_name})
515 | 
516 | 
517 | @pytest.mark.asyncio
518 | async def test_nested_project_paths_rejected(mcp_server, app, test_project):
519 |     """Test that creating nested project paths is rejected with clear error message."""
520 | 
521 |     async with Client(mcp_server) as client:
522 |         # Create a parent project
523 |         parent_name = "parent-project"
524 |         parent_path = "/tmp/nested-test/parent"
525 | 
526 |         await client.call_tool(
527 |             "create_memory_project",
528 |             {
529 |                 "project_name": parent_name,
530 |                 "project_path": parent_path,
531 |             },
532 |         )
533 | 
534 |         # Try to create a child project nested under the parent
535 |         child_name = "child-project"
536 |         child_path = "/tmp/nested-test/parent/child"
537 | 
538 |         with pytest.raises(Exception) as exc_info:
539 |             await client.call_tool(
540 |                 "create_memory_project",
541 |                 {
542 |                     "project_name": child_name,
543 |                     "project_path": child_path,
544 |                 },
545 |             )
546 | 
547 |         # Verify error message mentions nested paths
548 |         error_message = str(exc_info.value)
549 |         assert "nested" in error_message.lower()
550 |         assert parent_name in error_message or parent_path in error_message
551 | 
552 |         # Clean up parent project
553 |         await client.call_tool("delete_project", {"project_name": parent_name})
554 | 
```

--------------------------------------------------------------------------------
/src/basic_memory/repository/search_repository.py:
--------------------------------------------------------------------------------

```python
  1 | """Repository for search operations."""
  2 | 
  3 | import json
  4 | import re
  5 | import time
  6 | from dataclasses import dataclass
  7 | from datetime import datetime
  8 | from typing import Any, Dict, List, Optional
  9 | from pathlib import Path
 10 | 
 11 | from loguru import logger
 12 | from sqlalchemy import Executable, Result, text
 13 | from sqlalchemy.ext.asyncio import AsyncSession, async_sessionmaker
 14 | 
 15 | from basic_memory import db
 16 | from basic_memory.models.search import CREATE_SEARCH_INDEX
 17 | from basic_memory.schemas.search import SearchItemType
 18 | 
 19 | 
 20 | @dataclass
 21 | class SearchIndexRow:
 22 |     """Search result with score and metadata."""
 23 | 
 24 |     project_id: int
 25 |     id: int
 26 |     type: str
 27 |     file_path: str
 28 | 
 29 |     # date values
 30 |     created_at: datetime
 31 |     updated_at: datetime
 32 | 
 33 |     permalink: Optional[str] = None
 34 |     metadata: Optional[dict] = None
 35 | 
 36 |     # assigned in result
 37 |     score: Optional[float] = None
 38 | 
 39 |     # Type-specific fields
 40 |     title: Optional[str] = None  # entity
 41 |     content_stems: Optional[str] = None  # entity, observation
 42 |     content_snippet: Optional[str] = None  # entity, observation
 43 |     entity_id: Optional[int] = None  # observations
 44 |     category: Optional[str] = None  # observations
 45 |     from_id: Optional[int] = None  # relations
 46 |     to_id: Optional[int] = None  # relations
 47 |     relation_type: Optional[str] = None  # relations
 48 | 
 49 |     @property
 50 |     def content(self):
 51 |         return self.content_snippet
 52 | 
 53 |     @property
 54 |     def directory(self) -> str:
 55 |         """Extract directory part from file_path.
 56 | 
 57 |         For a file at "projects/notes/ideas.md", returns "/projects/notes"
 58 |         For a file at root level "README.md", returns "/"
 59 |         """
 60 |         if not self.type == SearchItemType.ENTITY.value and not self.file_path:
 61 |             return ""
 62 | 
 63 |         # Normalize path separators to handle both Windows (\) and Unix (/) paths
 64 |         normalized_path = Path(self.file_path).as_posix()
 65 | 
 66 |         # Split the path by slashes
 67 |         parts = normalized_path.split("/")
 68 | 
 69 |         # If there's only one part (e.g., "README.md"), it's at the root
 70 |         if len(parts) <= 1:
 71 |             return "/"
 72 | 
 73 |         # Join all parts except the last one (filename)
 74 |         directory_path = "/".join(parts[:-1])
 75 |         return f"/{directory_path}"
 76 | 
 77 |     def to_insert(self):
 78 |         return {
 79 |             "id": self.id,
 80 |             "title": self.title,
 81 |             "content_stems": self.content_stems,
 82 |             "content_snippet": self.content_snippet,
 83 |             "permalink": self.permalink,
 84 |             "file_path": self.file_path,
 85 |             "type": self.type,
 86 |             "metadata": json.dumps(self.metadata),
 87 |             "from_id": self.from_id,
 88 |             "to_id": self.to_id,
 89 |             "relation_type": self.relation_type,
 90 |             "entity_id": self.entity_id,
 91 |             "category": self.category,
 92 |             "created_at": self.created_at if self.created_at else None,
 93 |             "updated_at": self.updated_at if self.updated_at else None,
 94 |             "project_id": self.project_id,
 95 |         }
 96 | 
 97 | 
 98 | class SearchRepository:
 99 |     """Repository for search index operations."""
100 | 
101 |     def __init__(self, session_maker: async_sessionmaker[AsyncSession], project_id: int):
102 |         """Initialize with session maker and project_id filter.
103 | 
104 |         Args:
105 |             session_maker: SQLAlchemy session maker
106 |             project_id: Project ID to filter all operations by
107 | 
108 |         Raises:
109 |             ValueError: If project_id is None or invalid
110 |         """
111 |         if project_id is None or project_id <= 0:  # pragma: no cover
112 |             raise ValueError("A valid project_id is required for SearchRepository")
113 | 
114 |         self.session_maker = session_maker
115 |         self.project_id = project_id
116 | 
117 |     async def init_search_index(self):
118 |         """Create or recreate the search index."""
119 |         logger.info("Initializing search index")
120 |         try:
121 |             async with db.scoped_session(self.session_maker) as session:
122 |                 await session.execute(CREATE_SEARCH_INDEX)
123 |                 await session.commit()
124 |         except Exception as e:  # pragma: no cover
125 |             logger.error(f"Error initializing search index: {e}")
126 |             raise e
127 | 
128 |     def _prepare_boolean_query(self, query: str) -> str:
129 |         """Prepare a Boolean query by quoting individual terms while preserving operators.
130 | 
131 |         Args:
132 |             query: A Boolean query like "tier1-test AND unicode" or "(hello OR world) NOT test"
133 | 
134 |         Returns:
135 |             A properly formatted Boolean query with quoted terms that need quoting
136 |         """
137 |         # Define Boolean operators and their boundaries
138 |         boolean_pattern = r"(\bAND\b|\bOR\b|\bNOT\b)"
139 | 
140 |         # Split the query by Boolean operators, keeping the operators
141 |         parts = re.split(boolean_pattern, query)
142 | 
143 |         processed_parts = []
144 |         for part in parts:
145 |             part = part.strip()
146 |             if not part:
147 |                 continue
148 | 
149 |             # If it's a Boolean operator, keep it as is
150 |             if part in ["AND", "OR", "NOT"]:
151 |                 processed_parts.append(part)
152 |             else:
153 |                 # Handle parentheses specially - they should be preserved for grouping
154 |                 if "(" in part or ")" in part:
155 |                     # Parse parenthetical expressions carefully
156 |                     processed_part = self._prepare_parenthetical_term(part)
157 |                     processed_parts.append(processed_part)
158 |                 else:
159 |                     # This is a search term - for Boolean queries, don't add prefix wildcards
160 |                     prepared_term = self._prepare_single_term(part, is_prefix=False)
161 |                     processed_parts.append(prepared_term)
162 | 
163 |         return " ".join(processed_parts)
164 | 
165 |     def _prepare_parenthetical_term(self, term: str) -> str:
166 |         """Prepare a term that contains parentheses, preserving the parentheses for grouping.
167 | 
168 |         Args:
169 |             term: A term that may contain parentheses like "(hello" or "world)" or "(hello OR world)"
170 | 
171 |         Returns:
172 |             A properly formatted term with parentheses preserved
173 |         """
174 |         # Handle terms that start/end with parentheses but may contain quotable content
175 |         result = ""
176 |         i = 0
177 |         while i < len(term):
178 |             if term[i] in "()":
179 |                 # Preserve parentheses as-is
180 |                 result += term[i]
181 |                 i += 1
182 |             else:
183 |                 # Find the next parenthesis or end of string
184 |                 start = i
185 |                 while i < len(term) and term[i] not in "()":
186 |                     i += 1
187 | 
188 |                 # Extract the content between parentheses
189 |                 content = term[start:i].strip()
190 |                 if content:
191 |                     # Only quote if it actually needs quoting (has hyphens, special chars, etc)
192 |                     # but don't quote if it's just simple words
193 |                     if self._needs_quoting(content):
194 |                         escaped_content = content.replace('"', '""')
195 |                         result += f'"{escaped_content}"'
196 |                     else:
197 |                         result += content
198 | 
199 |         return result
200 | 
201 |     def _needs_quoting(self, term: str) -> bool:
202 |         """Check if a term needs to be quoted for FTS5 safety.
203 | 
204 |         Args:
205 |             term: The term to check
206 | 
207 |         Returns:
208 |             True if the term should be quoted
209 |         """
210 |         if not term or not term.strip():
211 |             return False
212 | 
213 |         # Characters that indicate we should quote (excluding parentheses which are valid syntax)
214 |         needs_quoting_chars = [
215 |             " ",
216 |             ".",
217 |             ":",
218 |             ";",
219 |             ",",
220 |             "<",
221 |             ">",
222 |             "?",
223 |             "/",
224 |             "-",
225 |             "'",
226 |             '"',
227 |             "[",
228 |             "]",
229 |             "{",
230 |             "}",
231 |             "+",
232 |             "!",
233 |             "@",
234 |             "#",
235 |             "$",
236 |             "%",
237 |             "^",
238 |             "&",
239 |             "=",
240 |             "|",
241 |             "\\",
242 |             "~",
243 |             "`",
244 |         ]
245 | 
246 |         return any(c in term for c in needs_quoting_chars)
247 | 
248 |     def _prepare_single_term(self, term: str, is_prefix: bool = True) -> str:
249 |         """Prepare a single search term (no Boolean operators).
250 | 
251 |         Args:
252 |             term: A single search term
253 |             is_prefix: Whether to add prefix search capability (* suffix)
254 | 
255 |         Returns:
256 |             A properly formatted single term
257 |         """
258 |         if not term or not term.strip():
259 |             return term
260 | 
261 |         term = term.strip()
262 | 
263 |         # Check if term is already a proper wildcard pattern (alphanumeric + *)
264 |         # e.g., "hello*", "test*world" - these should be left alone
265 |         if "*" in term and all(c.isalnum() or c in "*_-" for c in term):
266 |             return term
267 | 
268 |         # Characters that can cause FTS5 syntax errors when used as operators
269 |         # We're more conservative here - only quote when we detect problematic patterns
270 |         problematic_chars = [
271 |             '"',
272 |             "'",
273 |             "(",
274 |             ")",
275 |             "[",
276 |             "]",
277 |             "{",
278 |             "}",
279 |             "+",
280 |             "!",
281 |             "@",
282 |             "#",
283 |             "$",
284 |             "%",
285 |             "^",
286 |             "&",
287 |             "=",
288 |             "|",
289 |             "\\",
290 |             "~",
291 |             "`",
292 |         ]
293 | 
294 |         # Characters that indicate we should quote (spaces, dots, colons, etc.)
295 |         # Adding hyphens here because FTS5 can have issues with hyphens followed by wildcards
296 |         needs_quoting_chars = [" ", ".", ":", ";", ",", "<", ">", "?", "/", "-"]
297 | 
298 |         # Check if term needs quoting
299 |         has_problematic = any(c in term for c in problematic_chars)
300 |         has_spaces_or_special = any(c in term for c in needs_quoting_chars)
301 | 
302 |         if has_problematic or has_spaces_or_special:
303 |             # Handle multi-word queries differently from special character queries
304 |             if " " in term and not any(c in term for c in problematic_chars):
305 |                 # Check if any individual word contains special characters that need quoting
306 |                 words = term.strip().split()
307 |                 has_special_in_words = any(
308 |                     any(c in word for c in needs_quoting_chars if c != " ") for word in words
309 |                 )
310 | 
311 |                 if not has_special_in_words:
312 |                     # For multi-word queries with simple words (like "emoji unicode"),
313 |                     # use boolean AND to handle word order variations
314 |                     if is_prefix:
315 |                         # Add prefix wildcard to each word for better matching
316 |                         prepared_words = [f"{word}*" for word in words if word]
317 |                     else:
318 |                         prepared_words = words
319 |                     term = " AND ".join(prepared_words)
320 |                 else:
321 |                     # If any word has special characters, quote the entire phrase
322 |                     escaped_term = term.replace('"', '""')
323 |                     if is_prefix and not ("/" in term and term.endswith(".md")):
324 |                         term = f'"{escaped_term}"*'
325 |                     else:
326 |                         term = f'"{escaped_term}"'
327 |             else:
328 |                 # For terms with problematic characters or file paths, use exact phrase matching
329 |                 # Escape any existing quotes by doubling them
330 |                 escaped_term = term.replace('"', '""')
331 |                 # Quote the entire term to handle special characters safely
332 |                 if is_prefix and not ("/" in term and term.endswith(".md")):
333 |                     # For search terms (not file paths), add prefix matching
334 |                     term = f'"{escaped_term}"*'
335 |                 else:
336 |                     # For file paths, use exact matching
337 |                     term = f'"{escaped_term}"'
338 |         elif is_prefix:
339 |             # Only add wildcard for simple terms without special characters
340 |             term = f"{term}*"
341 | 
342 |         return term
343 | 
344 |     def _prepare_search_term(self, term: str, is_prefix: bool = True) -> str:
345 |         """Prepare a search term for FTS5 query.
346 | 
347 |         Args:
348 |             term: The search term to prepare
349 |             is_prefix: Whether to add prefix search capability (* suffix)
350 | 
351 |         For FTS5:
352 |         - Boolean operators (AND, OR, NOT) are preserved for complex queries
353 |         - Terms with FTS5 special characters are quoted to prevent syntax errors
354 |         - Simple terms get prefix wildcards for better matching
355 |         """
356 |         # Check for explicit boolean operators - if present, process as Boolean query
357 |         boolean_operators = [" AND ", " OR ", " NOT "]
358 |         if any(op in f" {term} " for op in boolean_operators):
359 |             return self._prepare_boolean_query(term)
360 | 
361 |         # For non-Boolean queries, use the single term preparation logic
362 |         return self._prepare_single_term(term, is_prefix)
363 | 
364 |     async def search(
365 |         self,
366 |         search_text: Optional[str] = None,
367 |         permalink: Optional[str] = None,
368 |         permalink_match: Optional[str] = None,
369 |         title: Optional[str] = None,
370 |         types: Optional[List[str]] = None,
371 |         after_date: Optional[datetime] = None,
372 |         search_item_types: Optional[List[SearchItemType]] = None,
373 |         limit: int = 10,
374 |         offset: int = 0,
375 |     ) -> List[SearchIndexRow]:
376 |         """Search across all indexed content with fuzzy matching."""
377 |         conditions = []
378 |         params = {}
379 |         order_by_clause = ""
380 | 
381 |         # Handle text search for title and content
382 |         if search_text:
383 |             # Skip FTS for wildcard-only queries that would cause "unknown special query" errors
384 |             if search_text.strip() == "*" or search_text.strip() == "":
385 |                 # For wildcard searches, don't add any text conditions - return all results
386 |                 pass
387 |             else:
388 |                 # Use _prepare_search_term to handle both Boolean and non-Boolean queries
389 |                 processed_text = self._prepare_search_term(search_text.strip())
390 |                 params["text"] = processed_text
391 |                 conditions.append("(title MATCH :text OR content_stems MATCH :text)")
392 | 
393 |         # Handle title match search
394 |         if title:
395 |             title_text = self._prepare_search_term(title.strip(), is_prefix=False)
396 |             params["title_text"] = title_text
397 |             conditions.append("title MATCH :title_text")
398 | 
399 |         # Handle permalink exact search
400 |         if permalink:
401 |             params["permalink"] = permalink
402 |             conditions.append("permalink = :permalink")
403 | 
404 |         # Handle permalink match search, supports *
405 |         if permalink_match:
406 |             # For GLOB patterns, don't use _prepare_search_term as it will quote slashes
407 |             # GLOB patterns need to preserve their syntax
408 |             permalink_text = permalink_match.lower().strip()
409 |             params["permalink"] = permalink_text
410 |             if "*" in permalink_match:
411 |                 conditions.append("permalink GLOB :permalink")
412 |             else:
413 |                 # For exact matches without *, we can use FTS5 MATCH
414 |                 # but only prepare the term if it doesn't look like a path
415 |                 if "/" in permalink_text:
416 |                     conditions.append("permalink = :permalink")
417 |                 else:
418 |                     permalink_text = self._prepare_search_term(permalink_text, is_prefix=False)
419 |                     params["permalink"] = permalink_text
420 |                     conditions.append("permalink MATCH :permalink")
421 | 
422 |         # Handle entity type filter
423 |         if search_item_types:
424 |             type_list = ", ".join(f"'{t.value}'" for t in search_item_types)
425 |             conditions.append(f"type IN ({type_list})")
426 | 
427 |         # Handle type filter
428 |         if types:
429 |             type_list = ", ".join(f"'{t}'" for t in types)
430 |             conditions.append(f"json_extract(metadata, '$.entity_type') IN ({type_list})")
431 | 
432 |         # Handle date filter using datetime() for proper comparison
433 |         if after_date:
434 |             params["after_date"] = after_date
435 |             conditions.append("datetime(created_at) > datetime(:after_date)")
436 | 
437 |             # order by most recent first
438 |             order_by_clause = ", updated_at DESC"
439 | 
440 |         # Always filter by project_id
441 |         params["project_id"] = self.project_id
442 |         conditions.append("project_id = :project_id")
443 | 
444 |         # set limit on search query
445 |         params["limit"] = limit
446 |         params["offset"] = offset
447 | 
448 |         # Build WHERE clause
449 |         where_clause = " AND ".join(conditions) if conditions else "1=1"
450 | 
451 |         sql = f"""
452 |             SELECT 
453 |                 project_id,
454 |                 id, 
455 |                 title, 
456 |                 permalink,
457 |                 file_path,
458 |                 type,
459 |                 metadata,
460 |                 from_id,
461 |                 to_id,
462 |                 relation_type,
463 |                 entity_id,
464 |                 content_snippet,
465 |                 category,
466 |                 created_at,
467 |                 updated_at,
468 |                 bm25(search_index) as score
469 |             FROM search_index 
470 |             WHERE {where_clause}
471 |             ORDER BY score ASC {order_by_clause}
472 |             LIMIT :limit
473 |             OFFSET :offset
474 |         """
475 | 
476 |         logger.trace(f"Search {sql} params: {params}")
477 |         try:
478 |             async with db.scoped_session(self.session_maker) as session:
479 |                 result = await session.execute(text(sql), params)
480 |                 rows = result.fetchall()
481 |         except Exception as e:
482 |             # Handle FTS5 syntax errors and provide user-friendly feedback
483 |             if "fts5: syntax error" in str(e).lower():  # pragma: no cover
484 |                 logger.warning(f"FTS5 syntax error for search term: {search_text}, error: {e}")
485 |                 # Return empty results rather than crashing
486 |                 return []
487 |             else:
488 |                 # Re-raise other database errors
489 |                 logger.error(f"Database error during search: {e}")
490 |                 raise
491 | 
492 |         results = [
493 |             SearchIndexRow(
494 |                 project_id=self.project_id,
495 |                 id=row.id,
496 |                 title=row.title,
497 |                 permalink=row.permalink,
498 |                 file_path=row.file_path,
499 |                 type=row.type,
500 |                 score=row.score,
501 |                 metadata=json.loads(row.metadata),
502 |                 from_id=row.from_id,
503 |                 to_id=row.to_id,
504 |                 relation_type=row.relation_type,
505 |                 entity_id=row.entity_id,
506 |                 content_snippet=row.content_snippet,
507 |                 category=row.category,
508 |                 created_at=row.created_at,
509 |                 updated_at=row.updated_at,
510 |             )
511 |             for row in rows
512 |         ]
513 | 
514 |         logger.trace(f"Found {len(results)} search results")
515 |         for r in results:
516 |             logger.trace(
517 |                 f"Search result: project_id: {r.project_id} type:{r.type} title: {r.title} permalink: {r.permalink} score: {r.score}"
518 |             )
519 | 
520 |         return results
521 | 
522 |     async def index_item(
523 |         self,
524 |         search_index_row: SearchIndexRow,
525 |     ):
526 |         """Index or update a single item."""
527 |         async with db.scoped_session(self.session_maker) as session:
528 |             # Delete existing record if any
529 |             await session.execute(
530 |                 text(
531 |                     "DELETE FROM search_index WHERE permalink = :permalink AND project_id = :project_id"
532 |                 ),
533 |                 {"permalink": search_index_row.permalink, "project_id": self.project_id},
534 |             )
535 | 
536 |             # Prepare data for insert with project_id
537 |             insert_data = search_index_row.to_insert()
538 |             insert_data["project_id"] = self.project_id
539 | 
540 |             # Insert new record
541 |             await session.execute(
542 |                 text("""
543 |                     INSERT INTO search_index (
544 |                         id, title, content_stems, content_snippet, permalink, file_path, type, metadata,
545 |                         from_id, to_id, relation_type,
546 |                         entity_id, category,
547 |                         created_at, updated_at,
548 |                         project_id
549 |                     ) VALUES (
550 |                         :id, :title, :content_stems, :content_snippet, :permalink, :file_path, :type, :metadata,
551 |                         :from_id, :to_id, :relation_type,
552 |                         :entity_id, :category,
553 |                         :created_at, :updated_at,
554 |                         :project_id
555 |                     )
556 |                 """),
557 |                 insert_data,
558 |             )
559 |             logger.debug(f"indexed row {search_index_row}")
560 |             await session.commit()
561 | 
562 |     async def bulk_index_items(self, search_index_rows: List[SearchIndexRow]):
563 |         """Index multiple items in a single batch operation.
564 | 
565 |         Note: This method assumes that any existing records for the entity_id
566 |         have already been deleted (typically via delete_by_entity_id).
567 | 
568 |         Args:
569 |             search_index_rows: List of SearchIndexRow objects to index
570 |         """
571 |         if not search_index_rows:
572 |             return
573 | 
574 |         async with db.scoped_session(self.session_maker) as session:
575 |             # Prepare all insert data with project_id
576 |             insert_data_list = []
577 |             for row in search_index_rows:
578 |                 insert_data = row.to_insert()
579 |                 insert_data["project_id"] = self.project_id
580 |                 insert_data_list.append(insert_data)
581 | 
582 |             # Batch insert all records using executemany
583 |             await session.execute(
584 |                 text("""
585 |                     INSERT INTO search_index (
586 |                         id, title, content_stems, content_snippet, permalink, file_path, type, metadata,
587 |                         from_id, to_id, relation_type,
588 |                         entity_id, category,
589 |                         created_at, updated_at,
590 |                         project_id
591 |                     ) VALUES (
592 |                         :id, :title, :content_stems, :content_snippet, :permalink, :file_path, :type, :metadata,
593 |                         :from_id, :to_id, :relation_type,
594 |                         :entity_id, :category,
595 |                         :created_at, :updated_at,
596 |                         :project_id
597 |                     )
598 |                 """),
599 |                 insert_data_list,
600 |             )
601 |             logger.debug(f"Bulk indexed {len(search_index_rows)} rows")
602 |             await session.commit()
603 | 
604 |     async def delete_by_entity_id(self, entity_id: int):
605 |         """Delete an item from the search index by entity_id."""
606 |         async with db.scoped_session(self.session_maker) as session:
607 |             await session.execute(
608 |                 text(
609 |                     "DELETE FROM search_index WHERE entity_id = :entity_id AND project_id = :project_id"
610 |                 ),
611 |                 {"entity_id": entity_id, "project_id": self.project_id},
612 |             )
613 |             await session.commit()
614 | 
615 |     async def delete_by_permalink(self, permalink: str):
616 |         """Delete an item from the search index."""
617 |         async with db.scoped_session(self.session_maker) as session:
618 |             await session.execute(
619 |                 text(
620 |                     "DELETE FROM search_index WHERE permalink = :permalink AND project_id = :project_id"
621 |                 ),
622 |                 {"permalink": permalink, "project_id": self.project_id},
623 |             )
624 |             await session.commit()
625 | 
626 |     async def execute_query(
627 |         self,
628 |         query: Executable,
629 |         params: Dict[str, Any],
630 |     ) -> Result[Any]:
631 |         """Execute a query asynchronously."""
632 |         # logger.debug(f"Executing query: {query}, params: {params}")
633 |         async with db.scoped_session(self.session_maker) as session:
634 |             start_time = time.perf_counter()
635 |             result = await session.execute(query, params)
636 |             end_time = time.perf_counter()
637 |             elapsed_time = end_time - start_time
638 |             logger.debug(f"Query executed successfully in {elapsed_time:.2f}s.")
639 |             return result
640 | 
```

--------------------------------------------------------------------------------
/tests/sync/test_sync_service_incremental.py:
--------------------------------------------------------------------------------

```python
  1 | """Tests for incremental scan watermark optimization (Phase 1.5).
  2 | 
  3 | These tests verify the scan watermark feature that dramatically improves sync
  4 | performance on large projects by:
  5 | - Using find -newermt for incremental scans (only changed files)
  6 | - Tracking last_scan_timestamp and last_file_count
  7 | - Falling back to full scan when deletions detected
  8 | 
  9 | Expected performance improvements:
 10 | - No changes: 225x faster (2s vs 450s for 1,460 files)
 11 | - Few changes: 84x faster (5s vs 420s)
 12 | """
 13 | 
 14 | import time
 15 | from pathlib import Path
 16 | from textwrap import dedent
 17 | 
 18 | import pytest
 19 | 
 20 | from basic_memory.config import ProjectConfig
 21 | from basic_memory.sync.sync_service import SyncService
 22 | 
 23 | 
 24 | async def create_test_file(path: Path, content: str = "test content") -> None:
 25 |     """Create a test file with given content."""
 26 |     path.parent.mkdir(parents=True, exist_ok=True)
 27 |     path.write_text(content)
 28 | 
 29 | 
 30 | async def sleep_past_watermark(duration: float = 1.1) -> None:
 31 |     """Sleep long enough to ensure mtime is newer than watermark.
 32 | 
 33 |     Args:
 34 |         duration: Sleep duration in seconds (default 1.1s for filesystem precision)
 35 |     """
 36 |     time.sleep(duration)
 37 | 
 38 | 
 39 | # ==============================================================================
 40 | # Scan Strategy Selection Tests
 41 | # ==============================================================================
 42 | 
 43 | 
 44 | @pytest.mark.asyncio
 45 | async def test_first_sync_uses_full_scan(sync_service: SyncService, project_config: ProjectConfig):
 46 |     """Test that first sync (no watermark) triggers full scan."""
 47 |     project_dir = project_config.home
 48 | 
 49 |     # Create test files
 50 |     await create_test_file(project_dir / "file1.md", "# File 1\nContent 1")
 51 |     await create_test_file(project_dir / "file2.md", "# File 2\nContent 2")
 52 | 
 53 |     # First sync - should use full scan (no watermark exists)
 54 |     report = await sync_service.sync(project_dir)
 55 | 
 56 |     assert len(report.new) == 2
 57 |     assert "file1.md" in report.new
 58 |     assert "file2.md" in report.new
 59 | 
 60 |     # Verify watermark was set
 61 |     project = await sync_service.project_repository.find_by_id(
 62 |         sync_service.entity_repository.project_id
 63 |     )
 64 |     assert project.last_scan_timestamp is not None
 65 |     assert project.last_file_count >= 2  # May include config files
 66 | 
 67 | 
 68 | @pytest.mark.asyncio
 69 | async def test_file_count_decreased_triggers_full_scan(
 70 |     sync_service: SyncService, project_config: ProjectConfig
 71 | ):
 72 |     """Test that file deletion (count decreased) triggers full scan."""
 73 |     project_dir = project_config.home
 74 | 
 75 |     # Create initial files
 76 |     await create_test_file(project_dir / "file1.md", "# File 1")
 77 |     await create_test_file(project_dir / "file2.md", "# File 2")
 78 |     await create_test_file(project_dir / "file3.md", "# File 3")
 79 | 
 80 |     # First sync
 81 |     await sync_service.sync(project_dir)
 82 | 
 83 |     # Delete a file
 84 |     (project_dir / "file2.md").unlink()
 85 | 
 86 |     # Sleep to ensure file operations complete
 87 |     await sleep_past_watermark()
 88 | 
 89 |     # Second sync - should detect deletion via full scan (file count decreased)
 90 |     report = await sync_service.sync(project_dir)
 91 | 
 92 |     assert len(report.deleted) == 1
 93 |     assert "file2.md" in report.deleted
 94 | 
 95 | 
 96 | @pytest.mark.asyncio
 97 | async def test_file_count_same_uses_incremental_scan(
 98 |     sync_service: SyncService, project_config: ProjectConfig
 99 | ):
100 |     """Test that same file count uses incremental scan."""
101 |     project_dir = project_config.home
102 | 
103 |     # Create initial files
104 |     await create_test_file(project_dir / "file1.md", "# File 1\nOriginal")
105 |     await create_test_file(project_dir / "file2.md", "# File 2\nOriginal")
106 | 
107 |     # First sync
108 |     await sync_service.sync(project_dir)
109 | 
110 |     # Sleep to ensure mtime will be newer than watermark
111 |     await sleep_past_watermark()
112 | 
113 |     # Modify one file (file count stays the same)
114 |     await create_test_file(project_dir / "file1.md", "# File 1\nModified")
115 | 
116 |     # Second sync - should use incremental scan (same file count)
117 |     report = await sync_service.sync(project_dir)
118 | 
119 |     assert len(report.modified) == 1
120 |     assert "file1.md" in report.modified
121 | 
122 | 
123 | @pytest.mark.asyncio
124 | async def test_file_count_increased_uses_incremental_scan(
125 |     sync_service: SyncService, project_config: ProjectConfig
126 | ):
127 |     """Test that increased file count still uses incremental scan."""
128 |     project_dir = project_config.home
129 | 
130 |     # Create initial files
131 |     await create_test_file(project_dir / "file1.md", "# File 1")
132 |     await create_test_file(project_dir / "file2.md", "# File 2")
133 | 
134 |     # First sync
135 |     await sync_service.sync(project_dir)
136 | 
137 |     # Sleep to ensure mtime will be newer than watermark
138 |     await sleep_past_watermark()
139 | 
140 |     # Add a new file (file count increased)
141 |     await create_test_file(project_dir / "file3.md", "# File 3")
142 | 
143 |     # Second sync - should use incremental scan and detect new file
144 |     report = await sync_service.sync(project_dir)
145 | 
146 |     assert len(report.new) == 1
147 |     assert "file3.md" in report.new
148 | 
149 | 
150 | # ==============================================================================
151 | # Incremental Scan Base Cases
152 | # ==============================================================================
153 | 
154 | 
155 | @pytest.mark.asyncio
156 | async def test_incremental_scan_no_changes(
157 |     sync_service: SyncService, project_config: ProjectConfig
158 | ):
159 |     """Test that incremental scan with no changes returns empty report."""
160 |     project_dir = project_config.home
161 | 
162 |     # Create initial files
163 |     await create_test_file(project_dir / "file1.md", "# File 1")
164 |     await create_test_file(project_dir / "file2.md", "# File 2")
165 | 
166 |     # First sync
167 |     await sync_service.sync(project_dir)
168 | 
169 |     # Sleep to ensure time passes
170 |     await sleep_past_watermark()
171 | 
172 |     # Second sync - no changes
173 |     report = await sync_service.sync(project_dir)
174 | 
175 |     assert len(report.new) == 0
176 |     assert len(report.modified) == 0
177 |     assert len(report.deleted) == 0
178 |     assert len(report.moves) == 0
179 | 
180 | 
181 | @pytest.mark.asyncio
182 | async def test_incremental_scan_detects_new_file(
183 |     sync_service: SyncService, project_config: ProjectConfig
184 | ):
185 |     """Test that incremental scan detects newly created files."""
186 |     project_dir = project_config.home
187 | 
188 |     # Create initial file
189 |     await create_test_file(project_dir / "file1.md", "# File 1")
190 | 
191 |     # First sync
192 |     await sync_service.sync(project_dir)
193 | 
194 |     # Sleep to ensure mtime will be newer than watermark
195 |     await sleep_past_watermark()
196 | 
197 |     # Create new file
198 |     await create_test_file(project_dir / "file2.md", "# File 2")
199 | 
200 |     # Second sync - should detect new file via incremental scan
201 |     report = await sync_service.sync(project_dir)
202 | 
203 |     assert len(report.new) == 1
204 |     assert "file2.md" in report.new
205 |     assert len(report.modified) == 0
206 | 
207 | 
208 | @pytest.mark.asyncio
209 | async def test_incremental_scan_detects_modified_file(
210 |     sync_service: SyncService, project_config: ProjectConfig
211 | ):
212 |     """Test that incremental scan detects modified files."""
213 |     project_dir = project_config.home
214 | 
215 |     # Create initial files
216 |     file_path = project_dir / "file1.md"
217 |     await create_test_file(file_path, "# File 1\nOriginal content")
218 | 
219 |     # First sync
220 |     await sync_service.sync(project_dir)
221 | 
222 |     # Sleep to ensure mtime will be newer than watermark
223 |     await sleep_past_watermark()
224 | 
225 |     # Modify the file
226 |     await create_test_file(file_path, "# File 1\nModified content")
227 | 
228 |     # Second sync - should detect modification via incremental scan
229 |     report = await sync_service.sync(project_dir)
230 | 
231 |     assert len(report.modified) == 1
232 |     assert "file1.md" in report.modified
233 |     assert len(report.new) == 0
234 | 
235 | 
236 | @pytest.mark.asyncio
237 | async def test_incremental_scan_detects_multiple_changes(
238 |     sync_service: SyncService, project_config: ProjectConfig
239 | ):
240 |     """Test that incremental scan detects multiple file changes."""
241 |     project_dir = project_config.home
242 | 
243 |     # Create initial files
244 |     await create_test_file(project_dir / "file1.md", "# File 1\nOriginal")
245 |     await create_test_file(project_dir / "file2.md", "# File 2\nOriginal")
246 |     await create_test_file(project_dir / "file3.md", "# File 3\nOriginal")
247 | 
248 |     # First sync
249 |     await sync_service.sync(project_dir)
250 | 
251 |     # Sleep to ensure mtime will be newer than watermark
252 |     await sleep_past_watermark()
253 | 
254 |     # Modify multiple files
255 |     await create_test_file(project_dir / "file1.md", "# File 1\nModified")
256 |     await create_test_file(project_dir / "file3.md", "# File 3\nModified")
257 |     await create_test_file(project_dir / "file4.md", "# File 4\nNew")
258 | 
259 |     # Second sync - should detect all changes via incremental scan
260 |     report = await sync_service.sync(project_dir)
261 | 
262 |     assert len(report.modified) == 2
263 |     assert "file1.md" in report.modified
264 |     assert "file3.md" in report.modified
265 |     assert len(report.new) == 1
266 |     assert "file4.md" in report.new
267 | 
268 | 
269 | # ==============================================================================
270 | # Deletion Detection Tests
271 | # ==============================================================================
272 | 
273 | 
274 | @pytest.mark.asyncio
275 | async def test_deletion_triggers_full_scan_single_file(
276 |     sync_service: SyncService, project_config: ProjectConfig
277 | ):
278 |     """Test that deleting a single file triggers full scan."""
279 |     project_dir = project_config.home
280 | 
281 |     # Create initial files
282 |     await create_test_file(project_dir / "file1.md", "# File 1")
283 |     await create_test_file(project_dir / "file2.md", "# File 2")
284 |     await create_test_file(project_dir / "file3.md", "# File 3")
285 | 
286 |     # First sync
287 |     report1 = await sync_service.sync(project_dir)
288 |     assert len(report1.new) == 3
289 | 
290 |     # Delete one file
291 |     (project_dir / "file2.md").unlink()
292 | 
293 |     # Sleep to ensure file operations complete
294 |     await sleep_past_watermark()
295 | 
296 |     # Second sync - should trigger full scan due to decreased file count
297 |     report2 = await sync_service.sync(project_dir)
298 | 
299 |     assert len(report2.deleted) == 1
300 |     assert "file2.md" in report2.deleted
301 | 
302 | 
303 | @pytest.mark.asyncio
304 | async def test_deletion_triggers_full_scan_multiple_files(
305 |     sync_service: SyncService, project_config: ProjectConfig
306 | ):
307 |     """Test that deleting multiple files triggers full scan."""
308 |     project_dir = project_config.home
309 | 
310 |     # Create initial files
311 |     await create_test_file(project_dir / "file1.md", "# File 1")
312 |     await create_test_file(project_dir / "file2.md", "# File 2")
313 |     await create_test_file(project_dir / "file3.md", "# File 3")
314 |     await create_test_file(project_dir / "file4.md", "# File 4")
315 | 
316 |     # First sync
317 |     await sync_service.sync(project_dir)
318 | 
319 |     # Delete multiple files
320 |     (project_dir / "file2.md").unlink()
321 |     (project_dir / "file4.md").unlink()
322 | 
323 |     # Sleep to ensure file operations complete
324 |     await sleep_past_watermark()
325 | 
326 |     # Second sync - should trigger full scan and detect both deletions
327 |     report = await sync_service.sync(project_dir)
328 | 
329 |     assert len(report.deleted) == 2
330 |     assert "file2.md" in report.deleted
331 |     assert "file4.md" in report.deleted
332 | 
333 | 
334 | # ==============================================================================
335 | # Move Detection Tests
336 | # ==============================================================================
337 | 
338 | 
339 | @pytest.mark.asyncio
340 | async def test_move_detection_requires_full_scan(
341 |     sync_service: SyncService, project_config: ProjectConfig
342 | ):
343 |     """Test that file moves require full scan to be detected (cannot detect in incremental).
344 | 
345 |     Moves (renames) don't update mtime, so incremental scans can't detect them.
346 |     To trigger a full scan for move detection, we need file count to decrease.
347 |     This test verifies moves are detected when combined with a deletion.
348 |     """
349 |     project_dir = project_config.home
350 | 
351 |     # Create initial files - include extra file to delete later
352 |     old_path = project_dir / "old" / "file.md"
353 |     content = dedent(
354 |         """
355 |         ---
356 |         title: Test File
357 |         type: note
358 |         ---
359 |         # Test File
360 |         Distinctive content for move detection
361 |         """
362 |     ).strip()
363 |     await create_test_file(old_path, content)
364 |     await create_test_file(project_dir / "other.md", "# Other\nContent")
365 | 
366 |     # First sync
367 |     await sync_service.sync(project_dir)
368 | 
369 |     # Sleep to ensure operations complete and watermark is in the past
370 |     await sleep_past_watermark()
371 | 
372 |     # Move file AND delete another to trigger full scan
373 |     # Move alone won't work because file count stays same (no full scan)
374 |     new_path = project_dir / "new" / "moved.md"
375 |     new_path.parent.mkdir(parents=True, exist_ok=True)
376 |     old_path.rename(new_path)
377 |     (project_dir / "other.md").unlink()  # Delete to trigger full scan
378 | 
379 |     # Second sync - full scan due to deletion, move detected via checksum
380 |     report = await sync_service.sync(project_dir)
381 | 
382 |     assert len(report.moves) == 1
383 |     assert "old/file.md" in report.moves
384 |     assert report.moves["old/file.md"] == "new/moved.md"
385 |     assert len(report.deleted) == 1
386 |     assert "other.md" in report.deleted
387 | 
388 | 
389 | @pytest.mark.asyncio
390 | async def test_move_detection_in_full_scan(
391 |     sync_service: SyncService, project_config: ProjectConfig
392 | ):
393 |     """Test that file moves are detected via checksum in full scan."""
394 |     project_dir = project_config.home
395 | 
396 |     # Create initial files
397 |     old_path = project_dir / "old" / "file.md"
398 |     content = dedent(
399 |         """
400 |         ---
401 |         title: Test File
402 |         type: note
403 |         ---
404 |         # Test File
405 |         Distinctive content for move detection
406 |         """
407 |     ).strip()
408 |     await create_test_file(old_path, content)
409 |     await create_test_file(project_dir / "other.md", "# Other\nContent")
410 | 
411 |     # First sync
412 |     await sync_service.sync(project_dir)
413 | 
414 |     # Sleep to ensure operations complete
415 |     await sleep_past_watermark()
416 | 
417 |     # Move file AND delete another to trigger full scan
418 |     new_path = project_dir / "new" / "moved.md"
419 |     new_path.parent.mkdir(parents=True, exist_ok=True)
420 |     old_path.rename(new_path)
421 |     (project_dir / "other.md").unlink()
422 | 
423 |     # Second sync - full scan due to deletion, should still detect move
424 |     report = await sync_service.sync(project_dir)
425 | 
426 |     assert len(report.moves) == 1
427 |     assert "old/file.md" in report.moves
428 |     assert report.moves["old/file.md"] == "new/moved.md"
429 |     assert len(report.deleted) == 1
430 |     assert "other.md" in report.deleted
431 | 
432 | 
433 | # ==============================================================================
434 | # Watermark Update Tests
435 | # ==============================================================================
436 | 
437 | 
438 | @pytest.mark.asyncio
439 | async def test_watermark_updated_after_successful_sync(
440 |     sync_service: SyncService, project_config: ProjectConfig
441 | ):
442 |     """Test that watermark is updated after each successful sync."""
443 |     project_dir = project_config.home
444 | 
445 |     # Create initial file
446 |     await create_test_file(project_dir / "file1.md", "# File 1")
447 | 
448 |     # Get project before sync
449 |     project_before = await sync_service.project_repository.find_by_id(
450 |         sync_service.entity_repository.project_id
451 |     )
452 |     assert project_before.last_scan_timestamp is None
453 |     assert project_before.last_file_count is None
454 | 
455 |     # First sync
456 |     sync_start = time.time()
457 |     await sync_service.sync(project_dir)
458 |     sync_end = time.time()
459 | 
460 |     # Verify watermark was set
461 |     project_after = await sync_service.project_repository.find_by_id(
462 |         sync_service.entity_repository.project_id
463 |     )
464 |     assert project_after.last_scan_timestamp is not None
465 |     assert project_after.last_file_count >= 1  # May include config files
466 | 
467 |     # Watermark should be between sync start and end
468 |     assert sync_start <= project_after.last_scan_timestamp <= sync_end
469 | 
470 | 
471 | @pytest.mark.asyncio
472 | async def test_watermark_uses_sync_start_time(
473 |     sync_service: SyncService, project_config: ProjectConfig
474 | ):
475 |     """Test that watermark uses sync start time, not end time."""
476 |     project_dir = project_config.home
477 | 
478 |     # Create initial file
479 |     await create_test_file(project_dir / "file1.md", "# File 1")
480 | 
481 |     # First sync - capture timestamps
482 |     sync_start = time.time()
483 |     await sync_service.sync(project_dir)
484 |     sync_end = time.time()
485 | 
486 |     # Get watermark
487 |     project = await sync_service.project_repository.find_by_id(
488 |         sync_service.entity_repository.project_id
489 |     )
490 | 
491 |     # Watermark should be closer to start than end
492 |     # (In practice, watermark == sync_start_timestamp captured in sync())
493 |     time_from_start = abs(project.last_scan_timestamp - sync_start)
494 |     time_from_end = abs(project.last_scan_timestamp - sync_end)
495 | 
496 |     assert time_from_start < time_from_end
497 | 
498 | 
499 | @pytest.mark.asyncio
500 | async def test_watermark_file_count_accurate(
501 |     sync_service: SyncService, project_config: ProjectConfig
502 | ):
503 |     """Test that watermark file count accurately reflects synced files."""
504 |     project_dir = project_config.home
505 | 
506 |     # Create initial files
507 |     await create_test_file(project_dir / "file1.md", "# File 1")
508 |     await create_test_file(project_dir / "file2.md", "# File 2")
509 |     await create_test_file(project_dir / "file3.md", "# File 3")
510 | 
511 |     # First sync
512 |     await sync_service.sync(project_dir)
513 | 
514 |     # Verify file count
515 |     project1 = await sync_service.project_repository.find_by_id(
516 |         sync_service.entity_repository.project_id
517 |     )
518 |     initial_count = project1.last_file_count
519 |     assert initial_count >= 3  # May include config files
520 | 
521 |     # Add more files
522 |     await sleep_past_watermark()
523 |     await create_test_file(project_dir / "file4.md", "# File 4")
524 |     await create_test_file(project_dir / "file5.md", "# File 5")
525 | 
526 |     # Second sync
527 |     await sync_service.sync(project_dir)
528 | 
529 |     # Verify updated count increased by 2
530 |     project2 = await sync_service.project_repository.find_by_id(
531 |         sync_service.entity_repository.project_id
532 |     )
533 |     assert project2.last_file_count == initial_count + 2
534 | 
535 | 
536 | # ==============================================================================
537 | # Edge Cases and Error Handling
538 | # ==============================================================================
539 | 
540 | 
541 | @pytest.mark.asyncio
542 | async def test_concurrent_file_changes_handled_gracefully(
543 |     sync_service: SyncService, project_config: ProjectConfig
544 | ):
545 |     """Test that files created/modified during sync are handled correctly.
546 | 
547 |     Files created during sync (between start and file processing) should be
548 |     caught in the next sync, not cause errors in the current sync.
549 |     """
550 |     project_dir = project_config.home
551 | 
552 |     # Create initial file
553 |     await create_test_file(project_dir / "file1.md", "# File 1")
554 | 
555 |     # First sync
556 |     await sync_service.sync(project_dir)
557 | 
558 |     # Sleep to ensure mtime will be newer
559 |     await sleep_past_watermark()
560 | 
561 |     # Create file that will have mtime very close to watermark
562 |     # In real scenarios, this could be created during sync
563 |     await create_test_file(project_dir / "concurrent.md", "# Concurrent")
564 | 
565 |     # Should be caught in next sync without errors
566 |     report = await sync_service.sync(project_dir)
567 |     assert "concurrent.md" in report.new
568 | 
569 | 
570 | @pytest.mark.asyncio
571 | async def test_empty_directory_handles_incremental_scan(
572 |     sync_service: SyncService, project_config: ProjectConfig
573 | ):
574 |     """Test that incremental scan handles empty directories correctly."""
575 |     project_dir = project_config.home
576 | 
577 |     # First sync with empty directory (no user files)
578 |     report1 = await sync_service.sync(project_dir)
579 |     assert len(report1.new) == 0
580 | 
581 |     # Verify watermark was set even for empty directory
582 |     project = await sync_service.project_repository.find_by_id(
583 |         sync_service.entity_repository.project_id
584 |     )
585 |     assert project.last_scan_timestamp is not None
586 |     # May have config files, so just check it's set
587 |     assert project.last_file_count is not None
588 | 
589 |     # Second sync - still empty (no new user files)
590 |     report2 = await sync_service.sync(project_dir)
591 |     assert len(report2.new) == 0
592 | 
593 | 
594 | @pytest.mark.asyncio
595 | async def test_incremental_scan_respects_gitignore(
596 |     sync_service: SyncService, project_config: ProjectConfig
597 | ):
598 |     """Test that incremental scan respects .gitignore patterns."""
599 |     project_dir = project_config.home
600 | 
601 |     # Create .gitignore
602 |     (project_dir / ".gitignore").write_text("*.ignored\n.hidden/\n")
603 | 
604 |     # Reload ignore patterns
605 |     from basic_memory.ignore_utils import load_gitignore_patterns
606 | 
607 |     sync_service._ignore_patterns = load_gitignore_patterns(project_dir)
608 | 
609 |     # Create files - some should be ignored
610 |     await create_test_file(project_dir / "included.md", "# Included")
611 |     await create_test_file(project_dir / "excluded.ignored", "# Excluded")
612 | 
613 |     # First sync
614 |     report1 = await sync_service.sync(project_dir)
615 |     assert "included.md" in report1.new
616 |     assert "excluded.ignored" not in report1.new
617 | 
618 |     # Sleep and add more files
619 |     await sleep_past_watermark()
620 |     await create_test_file(project_dir / "included2.md", "# Included 2")
621 |     await create_test_file(project_dir / "excluded2.ignored", "# Excluded 2")
622 | 
623 |     # Second sync - incremental scan should also respect ignore patterns
624 |     report2 = await sync_service.sync(project_dir)
625 |     assert "included2.md" in report2.new
626 |     assert "excluded2.ignored" not in report2.new
627 | 
628 | 
629 | # ==============================================================================
630 | # Relation Resolution Optimization Tests
631 | # ==============================================================================
632 | 
633 | 
634 | @pytest.mark.asyncio
635 | async def test_relation_resolution_skipped_when_no_changes(
636 |     sync_service: SyncService, project_config: ProjectConfig
637 | ):
638 |     """Test that relation resolution is skipped when no file changes detected.
639 | 
640 |     This optimization prevents wasting time resolving relations when there are
641 |     no changes, dramatically improving sync performance for large projects.
642 |     """
643 |     project_dir = project_config.home
644 | 
645 |     # Create initial file with wikilink
646 |     content = dedent(
647 |         """
648 |         ---
649 |         title: File with Link
650 |         type: note
651 |         ---
652 |         # File with Link
653 |         This links to [[Target File]]
654 |         """
655 |     ).strip()
656 |     await create_test_file(project_dir / "file1.md", content)
657 | 
658 |     # First sync - will resolve relations (or leave unresolved)
659 |     report1 = await sync_service.sync(project_dir)
660 |     assert len(report1.new) == 1
661 | 
662 |     # Check that there are unresolved relations (target doesn't exist)
663 |     unresolved = await sync_service.relation_repository.find_unresolved_relations()
664 |     unresolved_count_before = len(unresolved)
665 |     assert unresolved_count_before > 0  # Should have unresolved relation to [[Target File]]
666 | 
667 |     # Sleep to ensure time passes
668 |     await sleep_past_watermark()
669 | 
670 |     # Second sync - no changes, should skip relation resolution
671 |     report2 = await sync_service.sync(project_dir)
672 |     assert report2.total == 0  # No changes detected
673 | 
674 |     # Verify unresolved relations count unchanged (resolution was skipped)
675 |     unresolved_after = await sync_service.relation_repository.find_unresolved_relations()
676 |     assert len(unresolved_after) == unresolved_count_before
677 | 
678 | 
679 | @pytest.mark.asyncio
680 | async def test_relation_resolution_runs_when_files_modified(
681 |     sync_service: SyncService, project_config: ProjectConfig
682 | ):
683 |     """Test that relation resolution runs when files are actually modified."""
684 |     project_dir = project_config.home
685 | 
686 |     # Create file with unresolved wikilink
687 |     content1 = dedent(
688 |         """
689 |         ---
690 |         title: File with Link
691 |         type: note
692 |         ---
693 |         # File with Link
694 |         This links to [[Target File]]
695 |         """
696 |     ).strip()
697 |     await create_test_file(project_dir / "file1.md", content1)
698 | 
699 |     # First sync
700 |     await sync_service.sync(project_dir)
701 | 
702 |     # Verify unresolved relation exists
703 |     unresolved_before = await sync_service.relation_repository.find_unresolved_relations()
704 |     assert len(unresolved_before) > 0
705 | 
706 |     # Sleep to ensure mtime will be newer
707 |     await sleep_past_watermark()
708 | 
709 |     # Create the target file (should resolve the relation)
710 |     content2 = dedent(
711 |         """
712 |         ---
713 |         title: Target File
714 |         type: note
715 |         ---
716 |         # Target File
717 |         This is the target.
718 |         """
719 |     ).strip()
720 |     await create_test_file(project_dir / "target.md", content2)
721 | 
722 |     # Second sync - should detect new file and resolve relations
723 |     report = await sync_service.sync(project_dir)
724 |     assert len(report.new) == 1
725 |     assert "target.md" in report.new
726 | 
727 |     # Verify relation was resolved (unresolved count decreased)
728 |     unresolved_after = await sync_service.relation_repository.find_unresolved_relations()
729 |     assert len(unresolved_after) < len(unresolved_before)
730 | 
```
Page 15/23FirstPrevNextLast