This is page 1 of 2. Use http://codebase.md/rawr-ai/mcp-filesystem?lines=true&page={x} to view the full context.
# Directory Structure
```
├── .ai
│ └── rules
│ └── filesystem-mcp-server-usage.md
├── .cursor
│ └── rules
│ ├── creating-cursor-rules.mdc
│ ├── filesystem-mcp-tools-guide.mdc
│ └── graphiti
│ ├── graphiti-filesystem-schema.mdc
│ ├── graphiti-knowledge-graph-maintenance.mdc
│ └── graphiti-mcp-core-rules.mdc
├── .early.coverage
│ └── v8
│ └── coverage-final.json
├── .github
│ └── workflows
│ └── ci.yml
├── .gitignore
├── .repomixignore
├── ai
│ ├── graph
│ │ ├── entities
│ │ │ ├── .gitkeep
│ │ │ └── Tool.py
│ │ ├── mcp-config.yaml
│ │ └── rools
│ │ ├── orchestrator_SOPs.md
│ │ └── playbooks
│ │ ├── pb_development_logging.md
│ │ ├── pb_discovery_driven_execution.md
│ │ ├── pb_iterative_execution_verification.md
│ │ └── pb_registry.md
│ └── logs
│ ├── dev
│ │ └── 2025-04-06-regex-content-search.md
│ └── introduce_test_suite
│ └── workflow_diagram.md
├── bun.lock
├── bunfig.toml
├── demo
│ ├── archive
│ │ ├── log.txt
│ │ ├── readme.md
│ │ └── subdir
│ │ └── old_data.txt
│ ├── data.json
│ ├── info.txt
│ ├── nested
│ │ ├── deep
│ │ │ └── hidden.json
│ │ └── info.md
│ ├── README.md
│ └── sample.xml
├── Dockerfile
├── examples
│ ├── mcp_cursor.json
│ ├── mcp_docker.json
│ ├── mcp_glama.json
│ ├── mcp_http.json
│ ├── mcp_permissions.json
│ ├── mcp_roo.json
│ ├── mcp_sse.json
│ └── mcp_stdio.json
├── glama.json
├── index.ts
├── package.json
├── README.md
├── repomix.config.json
├── scripts
│ └── run-docker-demo.sh
├── src
│ ├── config
│ │ └── permissions.ts
│ ├── handlers
│ │ ├── directory-handlers.ts
│ │ ├── file-handlers.ts
│ │ ├── index.ts
│ │ ├── json-handlers.ts
│ │ ├── utility-handlers.ts
│ │ └── xml-handlers.ts
│ ├── schemas
│ │ ├── directory-operations.ts
│ │ ├── file-operations.ts
│ │ ├── index.ts
│ │ ├── json-operations.ts
│ │ └── utility-operations.ts
│ └── utils
│ ├── data-utils.ts
│ ├── file-utils.ts
│ ├── path-utils.ts
│ ├── schema-utils.ts
│ └── typebox-zod.ts
├── test
│ ├── json
│ │ └── users.json
│ ├── sample.xml
│ ├── suites
│ │ ├── regex_search_content
│ │ │ ├── basic_search.test.ts
│ │ │ ├── depth_limiting.test.ts
│ │ │ ├── error_handling.test.ts
│ │ │ ├── file_pattern.test.ts
│ │ │ ├── max_filesize.test.ts
│ │ │ ├── max_results.test.ts
│ │ │ ├── path_usage.test.ts
│ │ │ ├── regex_flags.test.ts
│ │ │ └── spec.md
│ │ └── xml_tools
│ │ └── xml_tools.test.ts
│ ├── transports
│ │ ├── network.test.ts
│ │ └── stdio.test.ts
│ └── utils
│ ├── pathUtils.test.ts
│ └── regexUtils.ts
└── tsconfig.json
```
# Files
--------------------------------------------------------------------------------
/ai/graph/entities/.gitkeep:
--------------------------------------------------------------------------------
```
1 |
```
--------------------------------------------------------------------------------
/.repomixignore:
--------------------------------------------------------------------------------
```
1 | # Add patterns to ignore here, one per line
2 | # Example:
3 | # *.log
4 | # tmp/
5 |
```
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
```
1 | # Build output
2 | dist/
3 |
4 | # Dependencies
5 | node_modules/
6 | node-compile-cache/
7 |
8 | # Environment files
9 | .env
10 | .env.local
11 | .env.test
12 |
13 | # IDE files
14 | .vscode/
15 |
16 | # Logs and temporary files
17 | *.log
18 | *.tmp
19 |
20 | # MCP client configs
21 | .cursor/
22 | !.cursor/rules/
23 | !.cursor/rules/**
24 | .roo/
25 |
```
--------------------------------------------------------------------------------
/demo/archive/readme.md:
--------------------------------------------------------------------------------
```markdown
1 | This directory contains example files for testing file operations such as copy, move, list, and delete within the MCP Filesystem Server demo.
2 |
3 | Files:
4 | - `subdir/old_data.txt` — An archived file deep in a subdirectory, for recursive and nested operation testing.
5 | - `log.txt` — (Consider creating or manipulating this file) A placeholder for recent archive logs or simple text operation tests.
6 |
7 | Feel free to add, read, move, or delete files in this directory as part of your MCP server demonstrations.
```
--------------------------------------------------------------------------------
/demo/README.md:
--------------------------------------------------------------------------------
```markdown
1 | # Demo Directory
2 |
3 | This directory provides a sample filesystem structure to demonstrate and test the capabilities of the MCP Filesystem Server. It includes a variety of files and directories that cover common use cases such as:
4 |
5 | - Nested directories
6 | - JSON files for read/write/modify operations
7 | - XML files for structure and conversion utilities
8 | - Example files and directories for searching/moving/listing/deletion tests
9 |
10 | ## Structure
11 |
12 | - `nested/`
13 | - Contains multiple levels of nested folders and a couple of files at deep levels.
14 | - `data.json`
15 | - A sample JSON file for edit/read/modify operations.
16 | - `sample.xml`
17 | - An example XML file for conversion and structure queries.
18 | - `info.txt`
19 | - A plain text file for basic file operations.
20 | - `emptyfolder/`
21 | - An empty directory to test directory creation and deletion.
22 | - `archive/`
23 | - Contains archived files and subfolders for operations like moving and recursive listing.
24 |
25 | Feel free to add, modify, or delete items in this directory to experiment with and verify server behaviors.
```
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
```markdown
1 | # Filesystem MCP Server
2 |
3 | Bun-based server implementing Model Context Protocol (MCP) for filesystem operations with comprehensive permission controls and enhanced functionality.
4 |
5 | Development uses [Bun](https://bun.sh/) and the server can run directly from TypeScript with `bun`, but most MCP clients execute Node-compatible JavaScript. Use `node dist/index.js` in configs unless you're intentionally running the TypeScript entry with Bun.
6 |
7 | <a href="https://glama.ai/mcp/servers/@rawr-ai/mcp-filesystem">
8 | <img width="380" height="200" src="https://glama.ai/mcp/servers/@rawr-ai/mcp-filesystem/badge" alt="Filesystem Server MCP server" />
9 | </a>
10 |
11 | ## Features
12 |
13 | - Granular permission controls (read-only, full access, or specific operation permissions)
14 | - Secure file operations within allowed directories
15 | - File operations:
16 | - Read/write/modify files
17 | - Create/list/delete directories
18 | - Move files/directories
19 | - Search files by name or extension
20 | - Get file metadata
21 | - Directory operations:
22 | - Tree view of directory structures
23 | - Recursive operations with exclusion patterns
24 | - Utility functions:
25 | - XML to JSON conversion
26 | - Multiple file operations in one call
27 | - Advanced file editing with pattern matching
28 | - Security features:
29 | - Symlink control
30 | - Path validation
31 | - Sandboxed operations
32 |
33 | **Note**: The server will only allow operations within directories specified via `args` and according to the configured permissions.
34 |
35 | ## Installation
36 |
37 | 1. **Install Bun** (requires Bun v1.0 or later)
38 | ```bash
39 | curl -fsSL https://bun.sh/install | bash
40 | ```
41 | 2. **Install dependencies**
42 | ```bash
43 | bun install
44 | ```
45 | 3. **Build the project** (required for Node runtimes)
46 | ```bash
47 | bun run build
48 | ```
49 | 4. **Run tests**
50 | ```bash
51 | bun test
52 | ```
53 |
54 | ## Configuration options
55 |
56 | Paths may include environment variables like `$HOME`, `${CUSTOM}`, or `%USERPROFILE%`. Choose the modality that fits your setup:
57 |
58 | ### Local (Node or Bun)
59 | Use Node for built JavaScript or Bun to run TypeScript directly.
60 | ```json
61 | { "command": "node", "args": ["/path/to/mcp-filesystem/dist/index.js", "$HOME/allowed-directory"] }
62 | ```
63 | ```json
64 | { "command": "bun", "args": ["/path/to/mcp-filesystem/index.ts", "$HOME/allowed-directory"] }
65 | ```
66 |
67 | ### Git hosted
68 | Run straight from the public repo without cloning.
69 | ```json
70 | { "command": "bunx", "args": ["github:rawr-ai/mcp-filesystem", "$HOME/allowed-directory"] }
71 | ```
72 | ```json
73 | { "command": "npx", "args": ["github:rawr-ai/mcp-filesystem", "$HOME/allowed-directory"] }
74 | ```
75 |
76 | ### NPM package (coming soon)
77 | Planned publication to `rawr-ai/mcp-filesystem`.
78 | ```json
79 | { "command": "bunx", "args": ["rawr-ai/mcp-filesystem", "$HOME/allowed-directory"] }
80 | ```
81 | ```json
82 | { "command": "npx", "args": ["rawr-ai/mcp-filesystem", "$HOME/allowed-directory"] }
83 | ```
84 |
85 | ### Docker
86 | Isolated container environment.
87 | ```json
88 | { "command": "docker", "args": ["run", "--rm", "-v", "$HOME/allowed-directory:/data", "mcp/filesystem", "/data"] }
89 | ```
90 |
91 | ### Hosted service
92 | For managed MCP hosts like glama.ai.
93 | ```json
94 | { "mcpServers": { "filesystem": { "url": "https://glama.ai/rawr-ai/mcp-filesystem" } } }
95 | ```
96 |
97 | See the `examples/` directory for platform-specific configs (Cursor, Roo, etc.) and additional path variants.
98 |
99 | ## API
100 |
101 | ### Resources
102 |
103 | - `file://system`: File system operations interface
104 |
105 | ### Tools
106 |
107 | All tool argument schemas are defined with [TypeBox](https://github.com/sinclairzx81/typebox) and registered via the `toolSchemas` map in `src/schemas`. This ensures every tool shares a consistent schema that handlers can reference.
108 |
109 | - **read_file**
110 | - Read contents of a file (response-capped)
111 | - Inputs:
112 | - `path` (string)
113 | - `maxBytes` (number): Maximum bytes to return
114 | - Returns at most `maxBytes` bytes to protect downstream consumers
115 |
116 | - **read_multiple_files**
117 | - Read multiple files simultaneously
118 | - Inputs:
119 | - `paths` (string[])
120 | - `maxBytesPerFile` (number): Maximum bytes to return per file
121 | - Failed reads won't stop the entire operation
122 |
123 | - **create_file**
124 | - Create a new file with content
125 | - Inputs:
126 | - `path` (string): File location
127 | - `content` (string): File content
128 | - Fails if file already exists
129 | - Requires `create` permission
130 |
131 | - **modify_file**
132 | - Modify an existing file with new content
133 | - Inputs:
134 | - `path` (string): File location
135 | - `content` (string): New file content
136 | - Fails if file doesn't exist
137 | - Requires `edit` permission
138 |
139 | - **edit_file**
140 | - Make selective edits using pattern matching and formatting
141 | - Features:
142 | - Line-based and multi-line content matching
143 | - Whitespace normalization with indentation preservation
144 | - Multiple simultaneous edits with correct positioning
145 | - Indentation style detection and preservation
146 | - Git-style diff output with context
147 | - Preview changes with dry run mode
148 | - Inputs:
149 | - `path` (string): File to edit
150 | - `edits` (array): List of edit operations
151 | - `oldText` (string): Text to search for (exact match)
152 | - `newText` (string): Text to replace with
153 | - `dryRun` (boolean): Preview changes without applying (default: false)
154 | - `maxBytes` (number): Maximum bytes to read before editing
155 | - Returns detailed diff for dry runs, otherwise applies changes
156 | - Requires `edit` permission
157 | - Best Practice: Always use dryRun first to preview changes
158 |
159 | - **create_directory**
160 | - Create new directory or ensure it exists
161 | - Input: `path` (string)
162 | - Creates parent directories if needed
163 | - Succeeds silently if directory exists
164 | - Requires `create` permission
165 |
166 | - **list_directory**
167 | - List directory contents with [FILE] or [DIR] prefixes
168 | - Input: `path` (string)
169 | - Returns detailed listing of files and directories
170 |
171 | - **directory_tree**
172 | - Get recursive tree view of directory structure
173 | - Input: `path` (string)
174 | - Returns JSON structure with files and directories
175 | - Each entry includes name, type, and children (for directories)
176 |
177 | - **move_file**
178 | - Move or rename files and directories
179 | - Inputs:
180 | - `source` (string): Source path
181 | - `destination` (string): Destination path
182 | - Fails if destination exists
183 | - Works for both files and directories
184 | - Requires `move` permission
185 |
186 | - **delete_file**
187 | - Delete a file
188 | - Input: `path` (string)
189 | - Fails if file doesn't exist
190 | - Requires `delete` permission
191 |
192 | - **delete_directory**
193 | - Delete a directory
194 | - Inputs:
195 | - `path` (string): Directory to delete
196 | - `recursive` (boolean): Whether to delete contents (default: false)
197 | - Fails if directory is not empty and recursive is false
198 | - Requires `delete` permission
199 |
200 | - **search_files**
201 | - Recursively search for files/directories
202 | - Inputs:
203 | - `path` (string): Starting directory
204 | - `pattern` (string): Search pattern
205 | - `excludePatterns` (string[]): Exclude patterns (glob format supported)
206 | - Case-insensitive matching
207 | - Returns full paths to matches
208 |
209 | - **find_files_by_extension**
210 | - Find all files with specific extension
211 | - Inputs:
212 | - `path` (string): Starting directory
213 | - `extension` (string): File extension to find
214 | - `excludePatterns` (string[]): Optional exclude patterns
215 | - Case-insensitive extension matching
216 | - Returns full paths to matching files
217 |
218 | - **get_file_info**
219 | - Get detailed file/directory metadata
220 | - Input: `path` (string)
221 | - Returns:
222 | - Size
223 | - Creation time
224 | - Modified time
225 | - Access time
226 | - Type (file/directory)
227 | - Permissions
228 |
229 | - **get_permissions**
230 | - Get current server permissions
231 | - No input required
232 | - Returns:
233 | - Permission flags (readonly, fullAccess, create, edit, move, delete)
234 | - Symlink following status
235 | - Number of allowed directories
236 |
237 | - **list_allowed_directories**
238 | - List all directories the server is allowed to access
239 | - No input required
240 | - Returns array of allowed directory paths
241 |
242 | - **xml_to_json**
243 | - Convert XML file to JSON format
244 | - Inputs:
245 | - `xmlPath` (string): Source XML file
246 | - `jsonPath` (string): Destination JSON file
247 | - `maxResponseBytes` (number, optional): Maximum size of written JSON; large outputs are summarized
248 | - `options` (object, optional):
249 | - `ignoreAttributes` (boolean): Skip XML attributes (default: false)
250 | - `preserveOrder` (boolean): Maintain property order (default: true)
251 | - `format` (boolean): Pretty print JSON (default: true)
252 | - `indentSize` (number): JSON indentation (default: 2)
253 | - Requires `read` permission for XML file
254 | - Requires `create` or `edit` permission for JSON file
255 |
256 | - **xml_to_json_string**
257 | - Convert XML file to JSON string
258 | - Inputs:
259 | - `xmlPath` (string): Source XML file
260 | - `maxResponseBytes` (number, optional): Maximum size of returned JSON string; large outputs are summarized
261 | - `options` (object, optional):
262 | - `ignoreAttributes` (boolean): Skip XML attributes (default: false)
263 | - `preserveOrder` (boolean): Maintain property order (default: true)
264 | - Requires `read` permission for XML file
265 | - Returns JSON string representation (response-capped)
266 |
267 | - **xml_query**
268 | - Query XML file using XPath expressions
269 | - Inputs:
270 | - `path` (string): Path to the XML file
271 | - `query` (string, optional): XPath query to execute
272 | - `structureOnly` (boolean, optional): Return only tag structure
273 | - `includeAttributes` (boolean, optional): Include attribute info (default: true)
274 | - `maxResponseBytes` (number, optional): Maximum size of returned JSON; defaults to 200KB
275 | - Legacy `maxBytes` is still accepted and treated as response cap
276 | - XPath examples:
277 | - Get all elements: `//tagname`
278 | - Get elements with specific attribute: `//tagname[@attr="value"]`
279 | - Get text content: `//tagname/text()`
280 | - Parses full file; response is truncated to fit limits as needed
281 |
282 | - **xml_structure**
283 | - Analyze XML structure
284 | - Inputs:
285 | - `path` (string): Path to the XML file
286 | - `maxDepth` (number, optional): How deep to analyze (default: 2)
287 | - `includeAttributes` (boolean, optional): Include attribute analysis (default: true)
288 | - `maxResponseBytes` (number, optional): Maximum size of returned JSON; defaults to 200KB
289 | - Legacy `maxBytes` is still accepted and treated as response cap
290 | - Returns statistical information about elements, attributes, namespaces, and hierarchy
291 | - Parses full file; returns a summarized structure if response exceeds limit
292 |
293 | - **regex_search_content**
294 | - Search file contents with a regular expression
295 | - Inputs:
296 | - `path` (string): Root directory to search
297 | - `regex` (string): Regular expression pattern
298 | - `filePattern` (string, optional): Glob to limit files (default: `*`)
299 | - `maxDepth` (number, optional): Directory depth (default: 2)
300 | - `maxFileSize` (number, optional): Maximum file size in bytes (default: 10MB)
301 | - `maxResults` (number, optional): Maximum number of files with matches (default: 50)
302 | - Returns a human-readable summary of files and matching lines
303 |
304 | ### Argument Validation
305 |
306 | The server validates all tool inputs using the `parseArgs` helper. `parseArgs` parses incoming data against the appropriate TypeBox schema and throws an error when the arguments do not match the expected structure.
307 |
308 | ## Permissions & Security
309 |
310 | The server implements a comprehensive security model with granular permission controls:
311 |
312 | ### Directory Access Control
313 | - Operations are strictly limited to directories specified during startup via `args`
314 | - All operations (including symlink targets) must remain within allowed directories
315 | - Path validation ensures no directory traversal or access outside allowed paths
316 |
317 | ### Permission Flags
318 | - **--readonly**: Enforces read-only mode, overriding all other permission flags
319 | - **--full-access**: Enables all operations (create, edit, move, delete)
320 | - Individual permission flags (require explicit enabling unless --full-access is set):
321 | - **--allow-create**: Allow creation of new files and directories
322 | - **--allow-edit**: Allow modification of existing files
323 | - **--allow-move**: Allow moving/renaming files and directories
324 | - **--allow-delete**: Allow deletion of files and directories
325 |
326 | **Default Behavior**: If no permission flags are specified, the server runs in read-only mode. To enable any write operations, you must use either `--full-access` or specific `--allow-*` flags.
327 |
328 | ### Symlink Handling
329 | - By default, symlinks are followed when both the link and target are within allowed directories
330 | - **--no-follow-symlinks**: treat symlinks as regular files and refuse to traverse their targets, preventing escapes via linked paths
331 |
332 | See `examples/mcp_permissions.json` for sample configurations using these flags.
333 |
334 | ## Build
335 |
336 | To compile the project locally run:
337 |
338 | ```bash
339 | bun run build
340 | ```
341 |
342 | Run the test suite with:
343 |
344 | ```bash
345 | bun test
346 | ```
347 |
348 | Docker build:
349 |
350 | ```bash
351 | docker build -t mcp/filesystem -f Dockerfile .
352 | ```
353 |
354 | ## License
355 |
356 | This MCP server is licensed under the MIT License. This means you are free to use, modify, and distribute the software, subject to the terms and conditions of the MIT License. For more details, please see the LICENSE file in the project repository.
```
--------------------------------------------------------------------------------
/.early.coverage/v8/coverage-final.json:
--------------------------------------------------------------------------------
```json
1 | {}
2 |
```
--------------------------------------------------------------------------------
/bunfig.toml:
--------------------------------------------------------------------------------
```toml
1 | [test]
2 | root = "test"
3 |
4 | [bundle]
5 | entryPoints = ["index.ts"]
6 | outdir = "dist"
7 |
```
--------------------------------------------------------------------------------
/glama.json:
--------------------------------------------------------------------------------
```json
1 | {
2 | "$schema": "https://glama.ai/mcp/schemas/server.json",
3 | "maintainers": [
4 | "mateicanavra"
5 | ]
6 | }
7 |
```
--------------------------------------------------------------------------------
/examples/mcp_glama.json:
--------------------------------------------------------------------------------
```json
1 | {
2 | "mcpServers": {
3 | "filesystem": {
4 | "url": "https://glama.ai/rawr-ai/mcp-filesystem"
5 | }
6 | }
7 | }
8 |
```
--------------------------------------------------------------------------------
/examples/mcp_sse.json:
--------------------------------------------------------------------------------
```json
1 | {
2 | "mcpServers": {
3 | "filesystem": {
4 | "transport": "sse",
5 | "url": "http://localhost:8080/sse"
6 | }
7 | }
8 | }
9 |
```
--------------------------------------------------------------------------------
/demo/nested/deep/hidden.json:
--------------------------------------------------------------------------------
```json
1 | {
2 | "secret": true,
3 | "level": "deep",
4 | "note": "This file is intentionally placed deep to test recursive listing and access."
5 | }
6 |
```
--------------------------------------------------------------------------------
/src/handlers/index.ts:
--------------------------------------------------------------------------------
```typescript
1 | export * from './file-handlers.js';
2 | export * from './directory-handlers.js';
3 | export * from './utility-handlers.js';
4 | export * from './xml-handlers.js';
5 | export * from './json-handlers.js';
```
--------------------------------------------------------------------------------
/demo/info.txt:
--------------------------------------------------------------------------------
```
1 | This is a basic text file for demo purposes.
2 |
3 | You can use this file to test read, write, modify, or delete operations within the MCP Filesystem Server.
4 |
5 | Feel free to append, overwrite, or search for text in this file!
6 |
```
--------------------------------------------------------------------------------
/test/json/users.json:
--------------------------------------------------------------------------------
```json
1 | {"users": [{"id": 1, "name": "John", "age": 30, "address": {"city": "New York"}}, {"id": 2, "name": "Jane", "age": 25, "address": {"city": "Boston"}}, {"id": 3, "name": "Bob", "age": 35, "address": {"city": "New York"}}]}
2 |
```
--------------------------------------------------------------------------------
/examples/mcp_roo.json:
--------------------------------------------------------------------------------
```json
1 | {
2 | "mcpServers": {
3 | "filesystem-home": {
4 | "command": "node",
5 | "args": ["dist/index.js", "$HOME/allowed/path"]
6 | },
7 | "filesystem-env": {
8 | "command": "node",
9 | "args": ["dist/index.js", "${ALLOWED_PATH}"]
10 | }
11 | }
12 | }
13 |
```
--------------------------------------------------------------------------------
/examples/mcp_stdio.json:
--------------------------------------------------------------------------------
```json
1 | {
2 | "mcpServers": {
3 | "filesystem-home": {
4 | "command": "node",
5 | "args": ["dist/index.js", "$HOME/allowed/path"]
6 | },
7 | "filesystem-env": {
8 | "command": "node",
9 | "args": ["dist/index.js", "${ALLOWED_PATH}"]
10 | }
11 | }
12 | }
13 |
```
--------------------------------------------------------------------------------
/demo/data.json:
--------------------------------------------------------------------------------
```json
1 | {
2 | "message": "Hello, MCP Filesystem Server!",
3 | "version": 1,
4 | "features": [
5 | "read",
6 | "write",
7 | "modify",
8 | "search"
9 | ],
10 | "active": true,
11 | "nested": {
12 | "level": 1,
13 | "description": "For testing JSON structure traversal"
14 | }
15 | }
16 |
```
--------------------------------------------------------------------------------
/demo/archive/subdir/old_data.txt:
--------------------------------------------------------------------------------
```
1 | This is an old archive file.
2 |
3 | * Path: archive/subdir/old_data.txt
4 | * Purpose: Test file for demonstrating the server's ability to list, move, or delete files in nested/archive directories.
5 |
6 | Contents:
7 | - Created for MCP Filesystem Server demo.
8 | - Safe to manipulate or remove for testing purposes.
9 |
```
--------------------------------------------------------------------------------
/examples/mcp_http.json:
--------------------------------------------------------------------------------
```json
1 | {
2 | "mcpServers": {
3 | "filesystem-home": {
4 | "command": "node",
5 | "args": ["dist/index.js", "--http", "--port", "8080", "$HOME/allowed/path"]
6 | },
7 | "filesystem-env": {
8 | "command": "node",
9 | "args": ["dist/index.js", "--http", "--port", "8080", "${ALLOWED_PATH}"]
10 | }
11 | }
12 | }
13 |
```
--------------------------------------------------------------------------------
/demo/archive/log.txt:
--------------------------------------------------------------------------------
```
1 | This is a placeholder log file for the MCP Filesystem Server demo.
2 |
3 | You can use this file to test operations such as reading, listing, archiving, deleting, or modifying files in the `archive` directory.
4 |
5 | Log entries can be appended here during tests, or you may clear/overwrite the content while experimenting.
6 |
```
--------------------------------------------------------------------------------
/examples/mcp_cursor.json:
--------------------------------------------------------------------------------
```json
1 | {
2 | "mcpServers": {
3 | "filesystem-home": {
4 | "command": "node",
5 | "args": ["dist/index.js", "--no-follow-symlinks", "--readonly", "$HOME/allowed/path"]
6 | },
7 | "filesystem-env": {
8 | "command": "node",
9 | "args": ["dist/index.js", "--no-follow-symlinks", "--readonly", "${ALLOWED_PATH}"]
10 | }
11 | }
12 | }
13 |
```
--------------------------------------------------------------------------------
/demo/nested/info.md:
--------------------------------------------------------------------------------
```markdown
1 | # Nested Directory — Info
2 |
3 | This file is located in `demo/nested/` and exists to test directory operation scenarios such as:
4 |
5 | - Listing files in nested folders
6 | - Renaming or searching for files in deeper paths
7 | - Modifying content in files not at the root level
8 |
9 | Feel free to move, delete, or modify this file as part of your MCP Filesystem Server demonstrations.
```
--------------------------------------------------------------------------------
/examples/mcp_docker.json:
--------------------------------------------------------------------------------
```json
1 | {
2 | "mcpServers": {
3 | "filesystem-home": {
4 | "command": "docker",
5 | "args": ["run", "--rm", "-v", "$HOME/allowed:/data", "mcp/filesystem", "/data"]
6 | },
7 | "filesystem-env": {
8 | "command": "docker",
9 | "args": ["run", "--rm", "-v", "${ALLOWED_PATH}:/data", "mcp/filesystem", "/data"]
10 | },
11 | "filesystem-container": {
12 | "command": "docker",
13 | "args": ["run", "--rm", "mcp/filesystem", "/data"]
14 | }
15 | }
16 | }
17 |
```
--------------------------------------------------------------------------------
/tsconfig.json:
--------------------------------------------------------------------------------
```json
1 | {
2 | "compilerOptions": {
3 | "target": "ES2020",
4 | "module": "NodeNext",
5 | "moduleResolution": "NodeNext",
6 | "declaration": true,
7 | "sourceMap": true,
8 | "outDir": "./dist",
9 | "removeComments": true,
10 | "esModuleInterop": true,
11 | "forceConsistentCasingInFileNames": true,
12 | "strict": true,
13 | "skipLibCheck": true,
14 | "types": ["bun-types"],
15 | "rootDir": "."
16 | },
17 | "include": ["./**/*.ts"],
18 | "exclude": ["node_modules", "dist"]
19 | }
20 |
```
--------------------------------------------------------------------------------
/examples/mcp_permissions.json:
--------------------------------------------------------------------------------
```json
1 | {
2 | "mcpServers": {
3 | "filesystem-readonly": {
4 | "command": "node",
5 | "args": ["dist/index.js", "$HOME/allowed/path", "--readonly"]
6 | },
7 | "filesystem-full": {
8 | "command": "node",
9 | "args": ["dist/index.js", "$HOME/allowed/path", "--full-access", "--no-follow-symlinks"]
10 | },
11 | "filesystem-selective": {
12 | "command": "node",
13 | "args": ["dist/index.js", "$HOME/allowed/path", "--allow-create", "--allow-edit"]
14 | }
15 | }
16 | }
17 |
```
--------------------------------------------------------------------------------
/ai/graph/mcp-config.yaml:
--------------------------------------------------------------------------------
```yaml
1 | # Configuration for project: filesystem
2 | services:
3 | - id: filesystem # Service ID (used for default naming)
4 | # container_name: "custom-name" # Optional: Specify custom container name
5 | # port_default: 8001 # Optional: Specify custom host port
6 | group_id: "filesystem" # Graph group ID
7 | entity_dir: "entities" # Relative path to entity definitions within ai/graph
8 | # environment: # Optional: Add non-secret env vars here
9 | # GRAPHITI_LOG_LEVEL: "debug"
10 |
```
--------------------------------------------------------------------------------
/src/utils/schema-utils.ts:
--------------------------------------------------------------------------------
```typescript
1 | import { Value } from '@sinclair/typebox/value';
2 | import type { Static, TSchema } from '@sinclair/typebox';
3 |
4 | export function parseArgs<T extends TSchema>(schema: T, args: unknown, context: string): Static<T> {
5 | try {
6 | // Use only the Assert step to ensure strict validation
7 | return Value.Parse(['Assert'], schema, args);
8 | } catch {
9 | const errors = [...Value.Errors(schema, args)]
10 | .map(e => `${e.path}: ${e.message}`)
11 | .join('; ');
12 | throw new Error(`Invalid arguments for ${context}: ${errors}`);
13 | }
14 | }
15 |
```
--------------------------------------------------------------------------------
/ai/graph/rools/playbooks/pb_registry.md:
--------------------------------------------------------------------------------
```markdown
1 | ## Initial Playbook Examples
2 |
3 | ### Head Coach
4 |
5 | - Game plan/strategy: (Test Driven Development) Design, Build, Iterate <-handoff-> Review, Test, Debug, Report <-handoff-> Deploy, Publish, Update
6 |
7 | ### Offensive Coordinator
8 |
9 | - (No examples listed in the source file)
10 |
11 | ### Defensive Coordinator
12 |
13 | - (QA Loop) Review, Test, Debug, Report
14 | - (Security Audit) Security Review, Security Test, Security Debug, Security Report
15 |
16 | ### Special Teams Coordinator
17 |
18 | - (Publish Loop) Build, Deploy, Package, CI/CD
19 | - (Feedback Loop) Ingest, Process, Analyze, Suggest
20 | - (Documentation Loop) Review, Document, Publish, Distribute
```
--------------------------------------------------------------------------------
/ai/logs/dev/2025-04-06-regex-content-search.md:
--------------------------------------------------------------------------------
```markdown
1 | # Development Log: 2025-04-06
2 |
3 | ## Task: Add Regex File Content Search Feature
4 |
5 | **Summary:**
6 | Implemented a new feature allowing users to search file contents using regular expressions. This enhances the file system interaction capabilities by providing more powerful and flexible search options.
7 |
8 | **Details:**
9 | * **Feature Branch:** `feature/regex-content-search`
10 | * **Merged To:** `main`
11 | * **Key Changes:**
12 | * Added necessary handler logic for regex search.
13 | * Defined corresponding schemas for the operation.
14 | * Integrated the feature into the existing file system server.
15 | * **Status:** Merged and completed.
```
--------------------------------------------------------------------------------
/.github/workflows/ci.yml:
--------------------------------------------------------------------------------
```yaml
1 | name: CI
2 |
3 | on:
4 | push:
5 | branches: ["main"]
6 | pull_request:
7 |
8 | jobs:
9 | test:
10 | runs-on: ubuntu-latest
11 | steps:
12 | - uses: actions/checkout@v3
13 | - uses: oven-sh/setup-bun@v1
14 | with:
15 | bun-version: latest
16 | - name: Cache Bun
17 | uses: actions/cache@v3
18 | with:
19 | path: |
20 | ~/.bun
21 | node_modules
22 | key: bun-${{ runner.os }}-${{ hashFiles('**/bun.lock') }}
23 | restore-keys: |
24 | bun-${{ runner.os }}-
25 | - name: Install dependencies
26 | run: bun install
27 | - name: Build
28 | run: bun run build
29 | - name: Test
30 | run: bun test
31 |
```
--------------------------------------------------------------------------------
/repomix.config.json:
--------------------------------------------------------------------------------
```json
1 | {
2 | "output": {
3 | "filePath": "mcp-filesystem-repo.md",
4 | "style": "markdown",
5 | "parsableStyle": false,
6 | "fileSummary": true,
7 | "directoryStructure": true,
8 | "removeComments": false,
9 | "removeEmptyLines": false,
10 | "compress": false,
11 | "topFilesLength": 5,
12 | "showLineNumbers": false,
13 | "copyToClipboard": false,
14 | "git": {
15 | "sortByChanges": true,
16 | "sortByChangesMaxCommits": 100
17 | }
18 | },
19 | "include": [],
20 | "ignore": {
21 | "useGitignore": true,
22 | "useDefaultPatterns": true,
23 | "customPatterns": [
24 | "**/dist/**",
25 | "**/node_modules/**"
26 | ]
27 | },
28 | "security": {
29 | "enableSecurityCheck": true
30 | },
31 | "tokenCount": {
32 | "encoding": "o200k_base"
33 | }
34 | }
```
--------------------------------------------------------------------------------
/demo/sample.xml:
--------------------------------------------------------------------------------
```
1 | <?xml version="1.0" encoding="UTF-8"?>
2 | <library>
3 | <name>MCP Filesystem Demo</name>
4 | <description>
5 | Example XML document to demonstrate conversion, structure parsing, and querying features of the MCP server.
6 | </description>
7 | <books>
8 | <book id="1" genre="sci-fi">
9 | <title>Hyperion</title>
10 | <author>Dan Simmons</author>
11 | <published>1989</published>
12 | <checkedOut>false</checkedOut>
13 | </book>
14 | <book id="2" genre="fantasy">
15 | <title>The Hobbit</title>
16 | <author>J.R.R. Tolkien</author>
17 | <published>1937</published>
18 | <checkedOut>true</checkedOut>
19 | </book>
20 | </books>
21 | <users>
22 | <user>
23 | <username>alice</username>
24 | <permissions>read,write</permissions>
25 | </user>
26 | <user>
27 | <username>bob</username>
28 | <permissions>read</permissions>
29 | </user>
30 | </users>
31 | </library>
32 |
```
--------------------------------------------------------------------------------
/src/schemas/directory-operations.ts:
--------------------------------------------------------------------------------
```typescript
1 | import { Type, Static } from "@sinclair/typebox";
2 |
3 | export const CreateDirectoryArgsSchema = Type.Object({
4 | path: Type.String(),
5 | });
6 | export type CreateDirectoryArgs = Static<typeof CreateDirectoryArgsSchema>;
7 |
8 | export const ListDirectoryArgsSchema = Type.Object({
9 | path: Type.String(),
10 | });
11 | export type ListDirectoryArgs = Static<typeof ListDirectoryArgsSchema>;
12 |
13 | export const DirectoryTreeArgsSchema = Type.Object({
14 | path: Type.String(),
15 | maxDepth: Type.Integer({
16 | minimum: 1,
17 | description: 'Maximum depth to traverse. Must be a positive integer. Handler default: 2.'
18 | }),
19 | excludePatterns: Type.Optional(
20 | Type.Array(Type.String(), {
21 | default: [],
22 | description: 'Glob patterns for files/directories to exclude (e.g., "*.log", "node_modules").'
23 | })
24 | )
25 | });
26 | export type DirectoryTreeArgs = Static<typeof DirectoryTreeArgsSchema>;
27 |
28 | export const DeleteDirectoryArgsSchema = Type.Object({
29 | path: Type.String(),
30 | recursive: Type.Boolean({
31 | default: false,
32 | description: 'Whether to recursively delete the directory and all contents'
33 | })
34 | });
35 | export type DeleteDirectoryArgs = Static<typeof DeleteDirectoryArgsSchema>;
36 |
```
--------------------------------------------------------------------------------
/Dockerfile:
--------------------------------------------------------------------------------
```dockerfile
1 | FROM oven/bun
2 |
3 | # Set the application directory
4 | WORKDIR /app
5 |
6 | # Copy application source code and configuration files
7 | # Using absolute paths for clarity and to avoid issues if WORKDIR changes.
8 | COPY src /app/src
9 | COPY index.ts /app/index.ts
10 | COPY package.json /app/package.json
11 | COPY bun.lock /app/bun.lock
12 | COPY tsconfig.json /app/tsconfig.json
13 |
14 | # Set environment to production
15 | ENV NODE_ENV=production
16 |
17 | # Install production dependencies using the lockfile for reproducible builds.
18 | # The --production flag ensures devDependencies are not installed.
19 | RUN bun install --production --frozen-lockfile
20 |
21 | # Define the entrypoint for the container.
22 | # This specifies the base command to run, which is the bun executable
23 | # followed by the path to our main script. Using an absolute path is crucial
24 | # because the container's working directory will be changed at runtime.
25 | ENTRYPOINT ["bun", "/app/index.ts"]
26 |
27 | # Define the default command arguments.
28 | # These will be appended to the ENTRYPOINT. The user can override these
29 | # arguments in the `docker run` command. Providing `--help` as the default
30 | # is a good practice, as it makes the container's usage self-documenting.
31 | CMD ["--help"]
32 |
```
--------------------------------------------------------------------------------
/ai/logs/introduce_test_suite/workflow_diagram.md:
--------------------------------------------------------------------------------
```markdown
1 | ```mermaid
2 | sequenceDiagram
3 | participant Orchestrator
4 | participant Diagram
5 | participant Git
6 | participant Analyze
7 | participant Test
8 | participant Code
9 |
10 | Orchestrator ->> Diagram: 1. Request diagram generation
11 | Diagram -->> Orchestrator: 2. Return Mermaid syntax
12 |
13 | Orchestrator ->> Git: 3. Request Git environment preparation (stash, branch, apply stash)
14 | Git -->> Orchestrator: 4. Confirm Git preparation
15 |
16 | Orchestrator ->> Analyze: 5. Request test context analysis (command, readiness)
17 | Analyze -->> Orchestrator: 6. Return test command & readiness assessment
18 |
19 | Orchestrator ->> Test: 7. Request test execution
20 | Test -->> Orchestrator: 8. Return test results summary
21 |
22 | Orchestrator ->> Code: 9. Request log directory/file creation
23 | Code -->> Orchestrator: 10. Return log file path
24 |
25 | Orchestrator ->> Analyze: 11. Request log content formatting
26 | Analyze -->> Orchestrator: 12. Return formatted Markdown content
27 |
28 | Orchestrator ->> Code: 13. Request writing log content to file
29 | Code -->> Orchestrator: 14. Confirm file write
30 |
31 | Orchestrator ->> Git: 15. Request log file commit
32 | Git -->> Orchestrator: 16. Confirm commit
33 | ```
```
--------------------------------------------------------------------------------
/test/utils/regexUtils.ts:
--------------------------------------------------------------------------------
```typescript
1 | import path from 'path';
2 |
3 | export interface RegexMatch { line: number; text: string; }
4 | export interface FileResult { file: string; matches: RegexMatch[]; }
5 |
6 | export function parseRegexSearchOutput(text: string): FileResult[] {
7 | const blocks = text.trim().split(/\n\n+/).filter(Boolean);
8 | return blocks.map(block => {
9 | const lines = block.split(/\n/);
10 | const fileLine = lines.shift() || '';
11 | const file = fileLine.replace(/^File:\s*/, '');
12 | const matches = lines.map(l => {
13 | const m = l.match(/Line\s+(\d+):\s*(.*)/);
14 | return m ? { line: parseInt(m[1], 10), text: m[2] } : { line: 0, text: l };
15 | });
16 | return { file: path.normalize(file), matches };
17 | });
18 | }
19 |
20 | // Helper to safely extract text content from a CallToolResult
21 | import { CallToolResultSchema } from '@modelcontextprotocol/sdk/types.js';
22 | import type { CallToolResult, TextContent } from '@modelcontextprotocol/sdk/types.js';
23 |
24 | export function getTextContent(result: unknown): string {
25 | const parsed = CallToolResultSchema.parse(result) as CallToolResult;
26 | const first = parsed.content[0];
27 | if (!first || first.type !== 'text') {
28 | throw new Error('Expected first content element to be text');
29 | }
30 | return (first as TextContent).text;
31 | }
32 |
```
--------------------------------------------------------------------------------
/src/utils/typebox-zod.ts:
--------------------------------------------------------------------------------
```typescript
1 | import { ZodFromTypeBox } from "@sinclair/typemap";
2 | import type { TSchema } from "@sinclair/typebox";
3 | import type { FastMCP, Tool } from "fastmcp";
4 |
5 | /**
6 | * Convert a TypeBox schema to a Zod schema compatible with FastMCP.
7 | * Returns undefined when no schema is provided to match FastMCP API shape.
8 | */
9 | export function toZodParameters(schema?: TSchema) {
10 | return schema ? (ZodFromTypeBox(schema) as unknown) : undefined;
11 | }
12 |
13 | /**
14 | * Convenience helper to register a tool defined with TypeBox parameters.
15 | * This ensures parameters are converted to Zod so MCP clients (Cursor/Claude)
16 | * recognize the schema without xsschema vendor issues.
17 | */
18 | // FastMCP's generic constraint is FastMCPSessionAuth = Record<string, unknown> | undefined.
19 | // Mirror that here to avoid importing non-exported types from fastmcp.
20 | export function addTypeBoxTool<TSession extends Record<string, unknown> | undefined = Record<string, unknown> | undefined>(
21 | server: FastMCP<TSession>,
22 | tool: {
23 | name: string;
24 | description: string;
25 | parameters?: TSchema;
26 | execute: Tool<TSession>["execute"];
27 | },
28 | ) {
29 | server.addTool({
30 | name: tool.name,
31 | description: tool.description,
32 | parameters: toZodParameters(tool.parameters) as any,
33 | execute: tool.execute as any,
34 | } as unknown as Tool<TSession>);
35 | }
36 |
37 |
38 |
```
--------------------------------------------------------------------------------
/ai/graph/rools/playbooks/pb_iterative_execution_verification.md:
--------------------------------------------------------------------------------
```markdown
1 | # Playbook: Iterative Execution & Verification
2 |
3 | **Purpose:** To reliably execute complex tasks involving modifications by incorporating structured planning, execution, verification, and feedback-driven iteration loops.
4 |
5 | **Key Roles (Generic):**
6 | * **Orchestrator:** Manages the process, dispatches agents, interprets results, guides iteration.
7 | * **Planner (Optional/Implicit):** Defines the initial strategy.
8 | * **Executor:** Performs the core modification tasks *as defined by the plan or instructions*.
9 | * **Verifier:** Assesses the Executor's work against *the defined objective, requirements, and/or quality standards*.
10 | * **Feedback Source (Optional):** Provides input on plans or results.
11 |
12 | **Workflow Steps:**
13 | 1. **Initiation & Planning:** Define objective, formulate plan (optional plan review for robustness).
14 | 2. **Execution:** Dispatch Executor agent to perform planned actions.
15 | 3. **Verification:** Dispatch Verifier agent to assess results *against defined criteria* and report findings.
16 | 4. **Evaluation & Decision:** Orchestrator/User evaluates verification report.
17 | * If Success -> Proceed to Step 6 (Completion).
18 | * If Issues -> Proceed to Step 5 (Iteration).
19 | 5. **Iteration Loop:**
20 | * Synthesize feedback *and verification findings* into corrective instructions.
21 | * Dispatch Executor for revision.
22 | * Return to Step 3 (Verification).
23 | 6. **Completion:** Orchestrator confirms successful task completion.
24 |
25 | This pattern provides a structured approach for tasks requiring modification and quality assurance through iterative refinement.
```
--------------------------------------------------------------------------------
/test/sample.xml:
--------------------------------------------------------------------------------
```
1 | <?xml version="1.0" encoding="UTF-8"?>
2 | <catalog xmlns="http://example.org/catalog">
3 | <book id="bk101" category="fiction">
4 | <author>Gambardella, Matthew</author>
5 | <title>XML Developer's Guide</title>
6 | <genre>Computer</genre>
7 | <price>44.95</price>
8 | <publish_date>2000-10-01</publish_date>
9 | <description>An in-depth look at creating applications with XML.</description>
10 | </book>
11 | <book id="bk102" category="fiction">
12 | <author>Ralls, Kim</author>
13 | <title>Midnight Rain</title>
14 | <genre>Fantasy</genre>
15 | <price>5.95</price>
16 | <publish_date>2000-12-16</publish_date>
17 | <description>A former architect battles corporate zombies, an evil sorceress, and her own childhood to become queen of the world.</description>
18 | </book>
19 | <book id="bk103" category="non-fiction">
20 | <author>Corets, Eva</author>
21 | <title>Maeve Ascendant</title>
22 | <genre>Fantasy</genre>
23 | <price>5.95</price>
24 | <publish_date>2000-11-17</publish_date>
25 | <description>After the collapse of a nanotechnology society, the young survivors lay the foundation for a new society.</description>
26 | </book>
27 | <magazine id="mg101" frequency="monthly">
28 | <title>Programming Today</title>
29 | <publisher>Tech Media</publisher>
30 | <price>6.50</price>
31 | <issue>125</issue>
32 | <publish_date>2023-01-15</publish_date>
33 | <articles>
34 | <article>
35 | <author>Jane Smith</author>
36 | <title>Modern XML Processing</title>
37 | </article>
38 | <article>
39 | <author>John Doe</author>
40 | <title>XPath Deep Dive</title>
41 | </article>
42 | </articles>
43 | </magazine>
44 | </catalog>
```
--------------------------------------------------------------------------------
/ai/graph/rools/playbooks/pb_development_logging.md:
--------------------------------------------------------------------------------
```markdown
1 | # Playbook: Development Logging
2 |
3 | ## Purpose
4 |
5 | This playbook outlines the standard process for generating development logs, ensuring consistency and clarity in documenting development tasks.
6 |
7 | ## Key Roles/Modes
8 |
9 | * **Architect:** Defines log storage path and filename conventions.
10 | * **Analyze:** Reviews conversation history and generates a detailed summary of the development task, then formats the summary into a structured Markdown log entry.
11 | * **Code:** Writes the formatted log entry to the file system according to the defined conventions.
12 | * **Git:** Commits the newly created log file to the repository.
13 |
14 | ## Workflow Steps
15 |
16 | 1. **Define Log Convention:**
17 | * Use `architect` mode to determine the log storage path and filename convention. This ensures logs are stored in a consistent and easily accessible manner.
18 | 2. **Summarize History:**
19 | * Use `analyze` mode to review the conversation history related to the development task.
20 | * Generate a detailed summary of the task, including the problem addressed, the solution implemented, and any challenges encountered.
21 | 3. **Format Log Entry:**
22 | * Use `analyze` mode to format the summary into a structured Markdown log entry.
23 | * Include relevant details such as the date, task description, and key steps taken.
24 | 4. **Write Log File:**
25 | * Use `code` mode to write the formatted log entry to the file system.
26 | * Adhere to the log storage path and filename convention defined in step 1.
27 | 5. **Commit Log File:**
28 | * Use `git` mode to commit the newly created log file to the repository.
29 | * Include a descriptive commit message that clearly identifies the task being logged.
```
--------------------------------------------------------------------------------
/package.json:
--------------------------------------------------------------------------------
```json
1 | {
2 | "name": "@modelcontextprotocol/server-filesystem",
3 | "version": "0.6.2",
4 | "description": "MCP server for filesystem access",
5 | "license": "MIT",
6 | "author": "Anthropic, PBC (https://anthropic.com)",
7 | "homepage": "https://modelcontextprotocol.io",
8 | "bugs": "https://github.com/modelcontextprotocol/servers/issues",
9 | "type": "module",
10 | "bin": {
11 | "mcp-server-filesystem": "dist/index.js"
12 | },
13 | "files": [
14 | "dist"
15 | ],
16 | "scripts": {
17 | "build": "bun run tsc && chmod +x dist/*.js",
18 | "watch": "bun run tsc --watch",
19 | "test": "bun test",
20 | "inspect": "bun run build && npx -y fastmcp@latest inspect dist/index.js -- --cwd --readonly",
21 | "demo:node": "bun run demo:node:sse",
22 | "demo:node:sse": "bun run build && (cd demo && node ../dist/index.js --cwd --http --port 8090)",
23 | "demo:node:http": "bun run build && (cd demo && node ../dist/index.js --cwd --http --port 8090)",
24 | "demo:bun": "bun run demo:bun:sse",
25 | "demo:bun:sse": "cd demo && bun ../index.ts --cwd --http --port 8090",
26 | "demo:bun:http": "cd demo && bun ../index.ts --cwd --http --port 8090",
27 | "demo:docker": "./scripts/run-docker-demo.sh --cwd --http"
28 | },
29 | "dependencies": {
30 | "@modelcontextprotocol/sdk": "1.12.1",
31 | "@sinclair/typebox": "^0.34.33",
32 | "@sinclair/typemap": "^0.10.1",
33 | "@xmldom/xmldom": "^0.9.8",
34 | "ajv": "^8.17.1",
35 | "diff": "^8.0.2",
36 | "fast-xml-parser": "^5.2.5",
37 | "fastmcp": "^3.14.1",
38 | "jsonata": "^2.0.6",
39 | "jsonpath-plus": "^10.3.0",
40 | "minimatch": "^10.0.1",
41 | "type-fest": "^4.41.0",
42 | "xpath": "^0.0.34"
43 | },
44 | "devDependencies": {
45 | "@types/cross-spawn": "^6.0.6",
46 | "@types/diff": "^8.0.0",
47 | "@types/jsonpath-plus": "^5.0.5",
48 | "@types/minimatch": "^5.1.2",
49 | "bun-types": "^1.2.15",
50 | "typescript": "^5.8.3"
51 | }
52 | }
53 |
```
--------------------------------------------------------------------------------
/ai/graph/rools/orchestrator_SOPs.md:
--------------------------------------------------------------------------------
```markdown
1 | # Standard Operating Procedures
2 |
3 | ## Git Workflow
4 |
5 | All development work (features, fixes, refactoring) MUST be done on a dedicated feature branch created from the `main` branch.
6 |
7 | Work MUST be committed incrementally to the feature branch.
8 |
9 | Before merging, the work SHOULD be reviewed/verified (details may depend on the task).
10 |
11 | Once complete and verified, the feature branch MUST be merged back into the `main` branch.
12 |
13 | ## Development Logging
14 |
15 | Upon successful completion and merging of any significant development task, a development log entry MUST be created.
16 |
17 | The process outlined in `agents/orchestrate/playbooks/playbook_development_logging.md` MUST be followed to generate and commit this log entry to the `main` branch.
18 |
19 | ## Plan Review
20 |
21 | For complex or large-scale plans involving multiple agents or significant modifications, the Orchestrator SHOULD first submit the proposed plan to an `analyze` or `ask` agent for review and feedback before presenting it to the user or initiating the first step. The Orchestrator MUST incorporate feedback before finalizing the plan.
22 |
23 | ## General Workflow Principles
24 |
25 | 1. **Define Conventions:** Before generating artifacts (logs, code, documentation), establish and adhere to clear conventions (e.g., naming, storage paths, formats).
26 | 2. **Specify Before Execution:** Synthesize research findings or plans into a clear specification or set of instructions before initiating the main execution step.
27 | 3. **Verify & Iterate:** Verify task outputs against defined objectives, requirements, or specifications. Iterate based on verification results and feedback, refining the approach or output until criteria are met.
28 | 4. **Mode Switching for Content Generation:** Agents generating substantial content (e.g., Markdown, code) SHOULD switch to an appropriate mode (like `code` or `document`) within their task loop. After successful generation, they MUST return only the path to the created file.
```
--------------------------------------------------------------------------------
/test/transports/network.test.ts:
--------------------------------------------------------------------------------
```typescript
1 | import { describe, it, expect, beforeAll, afterAll } from "bun:test";
2 | import { Client } from "@modelcontextprotocol/sdk/client/index.js";
3 | import { SSEClientTransport } from "@modelcontextprotocol/sdk/client/sse.js";
4 | import { StreamableHTTPClientTransport } from "@modelcontextprotocol/sdk/client/streamableHttp.js";
5 | import spawn from "cross-spawn";
6 | import type { ChildProcess } from "child_process";
7 | import fs from "fs/promises";
8 | import path from "path";
9 | import { fileURLToPath } from "url";
10 | import { getTextContent } from "../utils/regexUtils.js";
11 |
12 | const __dirname = path.dirname(fileURLToPath(import.meta.url));
13 | const serverRoot = path.resolve(__dirname, "../fs_root");
14 | const serverCommand = "bun";
15 | const port = 8091;
16 | const serverArgs = [
17 | "dist/index.js",
18 | serverRoot,
19 | "--full-access",
20 | "--http",
21 | "--port",
22 | String(port),
23 | ];
24 | let proc: ChildProcess;
25 |
26 | describe("transport", () => {
27 | beforeAll(async () => {
28 | await fs.mkdir(serverRoot, { recursive: true });
29 | proc = spawn(serverCommand, serverArgs, { stdio: "inherit" });
30 | await new Promise((r) => setTimeout(r, 1000));
31 | });
32 |
33 | afterAll(async () => {
34 | proc.kill();
35 | });
36 |
37 | it("supports SSE", async () => {
38 | const client = new Client({ name: "sse-test", version: "1.0" });
39 | const transport = new SSEClientTransport(
40 | new URL(`http://localhost:${port}/sse`),
41 | );
42 | await client.connect(transport);
43 | const res = await client.callTool({
44 | name: "list_allowed_directories",
45 | arguments: {},
46 | });
47 | expect(getTextContent(res)).toContain(serverRoot);
48 | await transport.close();
49 | });
50 |
51 | it("supports HTTP streaming", async () => {
52 | const client = new Client({ name: "http-test", version: "1.0" });
53 | const transport = new StreamableHTTPClientTransport(
54 | new URL(`http://localhost:${port}/mcp`),
55 | );
56 | await client.connect(transport);
57 | const res = await client.callTool({
58 | name: "list_allowed_directories",
59 | arguments: {},
60 | });
61 | expect(getTextContent(res)).toContain(serverRoot);
62 | await transport.close();
63 | });
64 | });
65 |
```
--------------------------------------------------------------------------------
/ai/graph/entities/Tool.py:
--------------------------------------------------------------------------------
```python
1 | """Example of how to create a custom entity type for Graphiti MCP Server."""
2 |
3 | from pydantic import BaseModel, Field
4 |
5 |
6 | class Tool(BaseModel):
7 | """
8 | **AI Persona:** You are an expert entity extraction assistant.
9 |
10 | **Task:** Identify and extract information about Tool entities mentioned in the provided text context.
11 | A Tool represents a specific good or service that a company offers.
12 |
13 | **Context:** The user will provide text containing potential mentions of products.
14 |
15 | **Extraction Instructions:**
16 | Your goal is to accurately populate the fields (`name`, `description`, `category`)
17 | based *only* on information explicitly or implicitly stated in the text.
18 |
19 | 1. **Identify Core Mentions:** Look for explicit mentions of commercial goods or services.
20 | 2. **Extract Name:** Identify Tool names, especially proper nouns, capitalized words, or terms near trademark symbols (™, ®).
21 | 3. **Extract Description:** Synthesize a concise description using details about features, purpose, pricing, or availability found *only* in the text.
22 | 4. **Extract Category:** Determine the product category (e.g., "Software", "Hardware", "Service") based on the description or explicit mentions.
23 | 5. **Refine Details:** Pay attention to specifications, technical details, stated benefits, unique selling points, variations, or models mentioned, and incorporate relevant details into the description.
24 | 6. **Handle Ambiguity:** If information for a field is missing or unclear in the text, indicate that rather than making assumptions.
25 |
26 | **Output Format:** Respond with the extracted data structured according to this Pydantic model.
27 | """
28 |
29 | name: str = Field(
30 | ...,
31 | description='The specific name of the product as mentioned in the text.',
32 | )
33 | description: str = Field(
34 | ...,
35 | description='A concise description of the Tool, synthesized *only* from information present in the provided text context.',
36 | )
37 | category: str = Field(
38 | ...,
39 | description='The category the Tool belongs to (e.g., "Electronics", "Software", "Service") based on the text.',
40 | )
```
--------------------------------------------------------------------------------
/ai/graph/rools/playbooks/pb_discovery_driven_execution.md:
--------------------------------------------------------------------------------
```markdown
1 | # Playbook: Discovery-Driven Execution
2 |
3 | ## Purpose
4 |
5 | This playbook outlines a generic workflow for tasks where execution depends on first understanding external systems, APIs, file formats, or conventions. It emphasizes a research-driven approach to ensure successful task completion when faced with initial unknowns.
6 |
7 | ## Key Roles
8 |
9 | * **Researcher:** Gathers information about unknown conventions/constraints. Employs `search`, `read_file`, etc.
10 | * **Analyzer:** Synthesizes research findings into a clear execution specification.
11 | * **Executor:** Performs the task according to the derived specification. Employs `code`, `implement`, etc.
12 | * **Verifier:** Assesses results against the objective *and* the derived specification. Employs `review`, `test`.
13 |
14 | ## Workflow Steps
15 |
16 | 1. **Initiation & Planning:**
17 | * Define the objective of the task.
18 | * Identify potential unknowns regarding the execution method or conventions.
19 | 2. **Research/Discovery:**
20 | * Dispatch Researcher agent(s) to gather information about the unknown conventions/constraints.
21 | * Utilize tools like `search`, `read_file`, etc., to explore external systems, APIs, file formats, or conventions.
22 | 3. **Analysis & Specification:**
23 | * Dispatch Analyzer agent(s) to synthesize research findings into a clear execution specification.
24 | * Define the required format, API calls, file paths, or any other relevant details for successful execution.
25 | 4. **Execution:**
26 | * Dispatch Executor agent(s) to perform the task *according to the derived specification*.
27 | * Ensure the execution adheres to the identified conventions and constraints.
28 | 5. **Verification:**
29 | * Dispatch Verifier agent(s) to assess the results against the objective *and* the derived specification.
30 | * Check for adherence to the defined format, API calls, file paths, etc.
31 | 6. **Iteration Loop:**
32 | * If verification fails, analyze the reasons for failure.
33 | * Refine understanding/specification (back to Research or Analysis) or execution.
34 | * Re-verify the results.
35 | 7. **Completion:**
36 | * Confirm successful task completion based on the objective and the derived specification.
```
--------------------------------------------------------------------------------
/test/transports/stdio.test.ts:
--------------------------------------------------------------------------------
```typescript
1 | import { describe, it, expect, beforeAll, afterAll } from "bun:test";
2 | import { Client } from "@modelcontextprotocol/sdk/client/index.js";
3 | import { StdioClientTransport } from "@modelcontextprotocol/sdk/client/stdio.js";
4 | import { ListToolsResultSchema } from "@modelcontextprotocol/sdk/types.js";
5 | import fs from "fs/promises";
6 | import path from "path";
7 | import { fileURLToPath } from "url";
8 | import { getTextContent } from "../utils/regexUtils.js";
9 |
10 | const __dirname = path.dirname(fileURLToPath(import.meta.url));
11 | const serverRoot = path.resolve(__dirname, "../fs_root");
12 |
13 | describe("stdio transport", () => {
14 | beforeAll(async () => {
15 | await fs.mkdir(serverRoot, { recursive: true });
16 | });
17 |
18 | afterAll(async () => {
19 | });
20 |
21 | it("announces tools over stdio", async () => {
22 | const client = new Client({ name: "stdio-test", version: "1.0" });
23 | const transport = new StdioClientTransport({
24 | command: "node",
25 | args: [
26 | path.resolve(__dirname, "../../dist/index.js"),
27 | serverRoot,
28 | "--readonly",
29 | ],
30 | });
31 | await client.connect(transport as any);
32 | const res = await client.callTool({
33 | name: "list_allowed_directories",
34 | arguments: {},
35 | });
36 | expect(getTextContent(res)).toContain(serverRoot);
37 | await transport.close();
38 | });
39 |
40 | it("lists tools with parameter schemas", async () => {
41 | const client = new Client({ name: "stdio-list-tools", version: "1.0" });
42 | const transport = new StdioClientTransport({
43 | command: "node",
44 | args: [
45 | path.resolve(__dirname, "../../dist/index.js"),
46 | serverRoot,
47 | "--readonly",
48 | ],
49 | });
50 | await client.connect(transport as any);
51 | const list = await client.listTools();
52 | const parsed = ListToolsResultSchema.parse(list);
53 | const sample = parsed.tools.find((t) => t.name === "list_directory" || t.name === "read_file");
54 | expect(sample).toBeTruthy();
55 | expect(sample?.inputSchema).toBeTruthy();
56 | // Ensure schema isn’t using unsupported vendor by checking presence of jsonSchema or zod-ish shape
57 | const schemaKeys = Object.keys(sample!.inputSchema as Record<string, unknown>);
58 | expect(schemaKeys.length).toBeGreaterThan(0);
59 | await transport.close();
60 | });
61 | });
62 |
63 |
64 |
```
--------------------------------------------------------------------------------
/src/config/permissions.ts:
--------------------------------------------------------------------------------
```typescript
1 | import path from 'path';
2 | import { expandHome, normalizePath } from '../utils/path-utils.js';
3 |
4 | export interface Permissions {
5 | create: boolean;
6 | edit: boolean;
7 | move: boolean;
8 | delete: boolean;
9 | rename: boolean;
10 | fullAccess: boolean;
11 | }
12 |
13 | export interface ServerConfig {
14 | readonlyFlag: boolean;
15 | noFollowSymlinks: boolean;
16 | permissions: Permissions;
17 | allowedDirectories: string[];
18 | }
19 |
20 | export function parseCommandLineArgs(args: string[]): ServerConfig {
21 | // Remove flags from args and store them
22 | const readonlyFlag = args.includes('--readonly');
23 | const noFollowSymlinks = args.includes('--no-follow-symlinks');
24 | const fullAccessFlag = args.includes('--full-access');
25 |
26 | // Granular permission flags
27 | const allowCreate = args.includes('--allow-create');
28 | const allowEdit = args.includes('--allow-edit');
29 | const allowMove = args.includes('--allow-move');
30 | const allowDelete = args.includes('--allow-delete');
31 | const allowRename = args.includes('--allow-rename');
32 |
33 | // Permission calculation
34 | // readonly flag overrides all other permissions as a safety mechanism
35 | // fullAccess enables all permissions unless readonly is set
36 | // individual allow flags enable specific permissions unless readonly is set
37 | const permissions: Permissions = {
38 | create: !readonlyFlag && (fullAccessFlag || allowCreate),
39 | edit: !readonlyFlag && (fullAccessFlag || allowEdit),
40 | move: !readonlyFlag && (fullAccessFlag || allowMove),
41 | delete: !readonlyFlag && (fullAccessFlag || allowDelete),
42 | rename: !readonlyFlag && (fullAccessFlag || allowRename),
43 | // fullAccess is true only if the flag is explicitly set and not in readonly mode
44 | fullAccess: !readonlyFlag && fullAccessFlag
45 | };
46 |
47 | // Remove flags from args
48 | const cleanArgs = args.filter(arg => !arg.startsWith('--'));
49 |
50 | if (cleanArgs.length === 0) {
51 | throw new Error(
52 | "Usage: mcp-server-filesystem [--full-access] [--readonly] [--no-follow-symlinks] " +
53 | "[--allow-create] [--allow-edit] [--allow-move] [--allow-delete] [--allow-rename] " +
54 | "<allowed-directory> [additional-directories...]"
55 | );
56 | }
57 |
58 | return {
59 | readonlyFlag,
60 | noFollowSymlinks,
61 | permissions,
62 | allowedDirectories: cleanArgs.map(dir =>
63 | normalizePath(path.resolve(expandHome(dir)))
64 | )
65 | };
66 | }
```
--------------------------------------------------------------------------------
/test/utils/pathUtils.test.ts:
--------------------------------------------------------------------------------
```typescript
1 | import os from 'os';
2 | import path from 'path';
3 | import { test, expect } from 'bun:test';
4 | import fs from 'fs/promises';
5 | import { expandHome, validatePath } from '../../src/utils/path-utils.js';
6 |
7 | test('expands tilde to home directory', () => {
8 | const result = expandHome('~/example');
9 | expect(result).toBe(path.join(os.homedir(), 'example'));
10 | });
11 |
12 | test('expands $VAR environment variables', () => {
13 | process.env.TEST_VAR = '/tmp/test';
14 | expect(expandHome('$TEST_VAR/file.txt')).toBe('/tmp/test/file.txt');
15 | });
16 |
17 | test('expands %VAR% environment variables', () => {
18 | process.env.TEST_VAR = '/tmp/test';
19 | expect(expandHome('%TEST_VAR%/file.txt')).toBe('/tmp/test/file.txt');
20 | });
21 |
22 | test('expands ${VAR} environment variables', () => {
23 | process.env.BRACED = '/var/tmp';
24 | expect(expandHome('${BRACED}/file.txt')).toBe('/var/tmp/file.txt');
25 | });
26 |
27 | test('throws on undefined environment variables', () => {
28 | delete process.env.UNDEFINED_VAR;
29 | expect(() => expandHome('$UNDEFINED_VAR/file.txt')).toThrow('Environment variable UNDEFINED_VAR is not defined');
30 | });
31 |
32 | test('environment variables cannot bypass symlink restrictions', async () => {
33 | const allowed = await fs.mkdtemp(path.join(os.tmpdir(), 'allowed-'));
34 | const outside = await fs.mkdtemp(path.join(os.tmpdir(), 'outside-'));
35 | const linkPath = path.join(allowed, 'link');
36 | await fs.symlink(outside, linkPath);
37 | process.env.LINK_VAR = linkPath;
38 | await expect(
39 | validatePath('$LINK_VAR/secret.txt', [allowed], new Map(), false)
40 | ).rejects.toThrow(/outside allowed directories/);
41 | });
42 |
43 | test('expands $CWD to process.cwd()', () => {
44 | const cwd = process.cwd();
45 | const result = expandHome('$CWD/subdir');
46 | expect(result).toBe(path.join(cwd, 'subdir'));
47 | });
48 |
49 | test('expands $PWD when set, falls back to process.cwd() when not set', () => {
50 | const originalPwd = process.env.PWD;
51 | try {
52 | process.env.PWD = '/tmp/pwd-test';
53 | expect(expandHome('$PWD/file.txt')).toBe('/tmp/pwd-test/file.txt');
54 | } finally {
55 | // restore first
56 | if (originalPwd === undefined) {
57 | delete process.env.PWD;
58 | } else {
59 | process.env.PWD = originalPwd;
60 | }
61 | }
62 |
63 | // Now unset and verify fallback
64 | const current = process.cwd();
65 | delete process.env.PWD;
66 | expect(expandHome('$PWD/other')).toBe(path.join(current, 'other'));
67 | });
68 |
```
--------------------------------------------------------------------------------
/scripts/run-docker-demo.sh:
--------------------------------------------------------------------------------
```bash
1 | #!/bin/bash
2 |
3 | # This script builds and runs the MCP Filesystem Server demo in a Docker container.
4 | # It includes robust checks for port conflicts and cleans up previous container instances.
5 |
6 | # --- Configuration ---
7 | PORT=8090
8 | CONTAINER_NAME="mcpfs-demo"
9 | IMAGE_NAME="mcpfs-demo"
10 |
11 | # Exit immediately if any command fails
12 | set -e
13 |
14 | # --- Cleanup Function ---
15 | # This function will be called when the script exits (either normally or via signal)
16 | cleanup() {
17 | if [ -n "$CONTAINER_PID" ]; then
18 | echo ""
19 | echo "Stopping container '$CONTAINER_NAME'..."
20 | docker stop $CONTAINER_NAME >/dev/null 2>&1 || true
21 | echo "Container stopped."
22 | fi
23 | }
24 |
25 | # Register cleanup to run on script exit and common signals
26 | trap cleanup EXIT SIGINT SIGTERM
27 |
28 | # --- Pre-flight Checks & Setup ---
29 |
30 | # 1. Check if our container is already running
31 | if docker ps --format '{{.Names}}' | grep -q "^${CONTAINER_NAME}$"; then
32 | echo "Found existing container '$CONTAINER_NAME' running. Stopping it..."
33 | docker stop $CONTAINER_NAME >/dev/null 2>&1 || true
34 | docker rm $CONTAINER_NAME >/dev/null 2>&1 || true
35 | sleep 1 # Give it a moment to release the port
36 | fi
37 |
38 | # 2. Clean up any stopped containers with the same name
39 | docker rm $CONTAINER_NAME >/dev/null 2>&1 || true
40 |
41 | # 3. Check if the required port is still in use (by something else)
42 | if lsof -i :$PORT >/dev/null 2>&1; then
43 | echo "Error: Port $PORT is already in use by another process:"
44 | echo ""
45 | lsof -i :$PORT | grep LISTEN || lsof -i :$PORT
46 | echo ""
47 | echo "Please stop the conflicting process and try again."
48 | exit 1
49 | fi
50 |
51 | # --- Docker Build ---
52 |
53 | echo "Building Docker image '$IMAGE_NAME'..."
54 | docker build -t $IMAGE_NAME .
55 |
56 | # --- Docker Run ---
57 |
58 | echo "Starting container '$CONTAINER_NAME'..."
59 | echo "The server will be accessible at http://localhost:$PORT"
60 | echo "Press Ctrl+C to stop the server."
61 | echo ""
62 |
63 | # Run Docker container in detached mode to maintain control in the script
64 | docker run -d \
65 | --rm \
66 | --name $CONTAINER_NAME \
67 | -p ${PORT}:${PORT} \
68 | -v "$(pwd)/demo:/data" \
69 | -w /data \
70 | $IMAGE_NAME "$@" > /dev/null
71 |
72 | # Mark that we have a container to clean up
73 | CONTAINER_PID=1
74 |
75 | # Follow the container logs
76 | # This will block until the container stops or we receive a signal
77 | docker logs -f $CONTAINER_NAME 2>&1 || true
78 |
79 | # Wait for any background processes
80 | wait
81 |
```
--------------------------------------------------------------------------------
/src/schemas/file-operations.ts:
--------------------------------------------------------------------------------
```typescript
1 | import { Type, Static } from "@sinclair/typebox";
2 |
3 | // Schema definitions moved from index.ts
4 |
5 | export const ReadFileArgsSchema = Type.Object({
6 | path: Type.String(),
7 | maxBytes: Type.Integer({
8 | minimum: 1,
9 | description: 'Maximum bytes to read from the file. Must be a positive integer. Handler default: 10KB.'
10 | })
11 | });
12 | export type ReadFileArgs = Static<typeof ReadFileArgsSchema>;
13 |
14 | export const ReadMultipleFilesArgsSchema = Type.Object({
15 | paths: Type.Array(Type.String()),
16 | maxBytesPerFile: Type.Integer({
17 | minimum: 1,
18 | description: 'Maximum bytes to read per file. Must be a positive integer. Handler default: 10KB.'
19 | })
20 | });
21 | export type ReadMultipleFilesArgs = Static<typeof ReadMultipleFilesArgsSchema>;
22 |
23 | // Note: WriteFileArgsSchema is used by both create_file and modify_file
24 | export const WriteFileArgsSchema = Type.Object({
25 | path: Type.String(),
26 | content: Type.String(),
27 | // No maxBytes here as it's about writing, not reading limit
28 | });
29 | export type WriteFileArgs = Static<typeof WriteFileArgsSchema>;
30 |
31 | export const EditOperation = Type.Object({
32 | oldText: Type.String({ description: 'Text to search for - must match exactly' }),
33 | newText: Type.String({ description: 'Text to replace with' })
34 | });
35 | export type EditOperationType = Static<typeof EditOperation>;
36 |
37 | export const EditFileArgsSchema = Type.Object({
38 | path: Type.String(),
39 | edits: Type.Array(EditOperation),
40 | dryRun: Type.Boolean({
41 | default: false,
42 | description: 'Preview changes using git-style diff format'
43 | }),
44 | maxBytes: Type.Integer({
45 | minimum: 1,
46 | description: 'Maximum bytes to read from the file before editing. Must be a positive integer. Handler default: 10KB.'
47 | })
48 | });
49 | export type EditFileArgs = Static<typeof EditFileArgsSchema>;
50 |
51 | export const GetFileInfoArgsSchema = Type.Object({
52 | path: Type.String(),
53 | });
54 | export type GetFileInfoArgs = Static<typeof GetFileInfoArgsSchema>;
55 |
56 | export const MoveFileArgsSchema = Type.Object({
57 | source: Type.String(),
58 | destination: Type.String(),
59 | });
60 | export type MoveFileArgs = Static<typeof MoveFileArgsSchema>;
61 |
62 | export const DeleteFileArgsSchema = Type.Object({
63 | path: Type.String(),
64 | });
65 | export type DeleteFileArgs = Static<typeof DeleteFileArgsSchema>;
66 |
67 | export const RenameFileArgsSchema = Type.Object({
68 | path: Type.String({ description: 'Path to the file to be renamed' }),
69 | newName: Type.String({ description: 'New name for the file (without path)' })
70 | });
71 | export type RenameFileArgs = Static<typeof RenameFileArgsSchema>;
72 |
73 |
```
--------------------------------------------------------------------------------
/test/suites/regex_search_content/regex_flags.test.ts:
--------------------------------------------------------------------------------
```typescript
1 | import { describe, it, expect, beforeAll, afterAll } from 'bun:test';
2 | import { Client } from '@modelcontextprotocol/sdk/client/index.js';
3 | import { StdioClientTransport } from '@modelcontextprotocol/sdk/client/stdio.js';
4 | import { ClientCapabilities, CallToolResultSchema } from '@modelcontextprotocol/sdk/types.js';
5 | import { parseRegexSearchOutput, getTextContent } from '../../utils/regexUtils.js';
6 | import path from 'path';
7 | import fs from 'fs/promises';
8 | import { fileURLToPath } from 'url';
9 |
10 | const clientInfo = { name: 'regex-search-flags-test-suite', version: '0.1.0' };
11 | const clientCapabilities: ClientCapabilities = { toolUse: { enabled: true } };
12 | const __dirname = path.dirname(fileURLToPath(import.meta.url));
13 | const serverRoot = path.resolve(__dirname, '../../fs_root');
14 | const serverCommand = 'bun';
15 | const serverArgs = ['dist/index.js', serverRoot, '--full-access'];
16 | const testBasePath = 'regex_search_content_flags/';
17 |
18 | describe('test-filesystem::regex_search_content - Regex Flags', () => {
19 | let client: Client;
20 | let transport: StdioClientTransport;
21 |
22 | beforeAll(async () => {
23 | await fs.mkdir(serverRoot, { recursive: true });
24 | transport = new StdioClientTransport({ command: serverCommand, args: serverArgs });
25 | client = new Client(clientInfo, { capabilities: clientCapabilities });
26 | await client.connect(transport);
27 |
28 | await client.callTool({ name: 'create_directory', arguments: { path: testBasePath } });
29 | await client.callTool({ name: 'create_file', arguments: { path: `${testBasePath}case.txt`, content: 'CaseSensitivePattern' } });
30 | });
31 |
32 | afterAll(async () => {
33 | await client.callTool({ name: 'delete_directory', arguments: { path: testBasePath, recursive: true } });
34 | await transport.close();
35 | });
36 |
37 | it('performs case-sensitive search by default', async () => {
38 | const res = await client.callTool({ name: 'regex_search_content', arguments: { path: testBasePath, regex: 'CaseSensitivePattern' } }, CallToolResultSchema);
39 | expect(res.isError).not.toBe(true);
40 | const parsed = parseRegexSearchOutput(getTextContent(res));
41 | expect(parsed[0].file).toBe(path.join(serverRoot, `${testBasePath}case.txt`));
42 | });
43 |
44 | it('returns an error for unsupported (?i) flag', async () => {
45 | const res = await client.callTool({ name: 'regex_search_content', arguments: { path: testBasePath, regex: '(?i)casesensitivepattern' } }, CallToolResultSchema);
46 | expect(res.isError).toBe(true);
47 | expect(getTextContent(res)).toMatch(/Invalid regex pattern/);
48 | });
49 | });
50 |
```
--------------------------------------------------------------------------------
/test/suites/regex_search_content/max_results.test.ts:
--------------------------------------------------------------------------------
```typescript
1 | import { describe, it, expect, beforeAll, afterAll } from 'bun:test';
2 | import { Client } from '@modelcontextprotocol/sdk/client/index.js';
3 | import { StdioClientTransport } from '@modelcontextprotocol/sdk/client/stdio.js';
4 | import { ClientCapabilities, CallToolResultSchema } from '@modelcontextprotocol/sdk/types.js';
5 | import { parseRegexSearchOutput, getTextContent } from '../../utils/regexUtils.js';
6 | import path from 'path';
7 | import fs from 'fs/promises';
8 | import { fileURLToPath } from 'url';
9 |
10 | const clientInfo = { name: 'regex-search-maxresults-test-suite', version: '0.1.0' };
11 | const clientCapabilities: ClientCapabilities = { toolUse: { enabled: true } };
12 | const __dirname = path.dirname(fileURLToPath(import.meta.url));
13 | const serverRoot = path.resolve(__dirname, '../../fs_root');
14 | const serverCommand = 'bun';
15 | const serverArgs = ['dist/index.js', serverRoot, '--full-access'];
16 | const testBasePath = 'regex_search_content_maxresults/';
17 |
18 | describe('test-filesystem::regex_search_content - Max Results Limiting', () => {
19 | let client: Client;
20 | let transport: StdioClientTransport;
21 |
22 | beforeAll(async () => {
23 | await fs.mkdir(serverRoot, { recursive: true });
24 | transport = new StdioClientTransport({ command: serverCommand, args: serverArgs });
25 | client = new Client(clientInfo, { capabilities: clientCapabilities });
26 | await client.connect(transport);
27 |
28 | await client.callTool({ name: 'create_directory', arguments: { path: testBasePath } });
29 | for (let i = 1; i <= 5; i++) {
30 | await client.callTool({ name: 'create_file', arguments: { path: `${testBasePath}file_${i}.txt`, content: 'max_results_pattern' } });
31 | }
32 | });
33 |
34 | afterAll(async () => {
35 | await client.callTool({ name: 'delete_directory', arguments: { path: testBasePath, recursive: true } });
36 | await transport.close();
37 | });
38 |
39 | it('limits number of files returned', async () => {
40 | const res = await client.callTool({ name: 'regex_search_content', arguments: { path: testBasePath, regex: 'max_results_pattern', maxResults: 2 } }, CallToolResultSchema);
41 | expect(res.isError).not.toBe(true);
42 | const parsed = parseRegexSearchOutput(getTextContent(res));
43 | expect(parsed.length).toBe(2);
44 | });
45 |
46 | it('returns all matches when limit higher than count', async () => {
47 | const res = await client.callTool({ name: 'regex_search_content', arguments: { path: testBasePath, regex: 'max_results_pattern', maxResults: 10 } }, CallToolResultSchema);
48 | expect(res.isError).not.toBe(true);
49 | const parsed = parseRegexSearchOutput(getTextContent(res));
50 | expect(parsed.length).toBe(5);
51 | });
52 | });
53 |
```
--------------------------------------------------------------------------------
/test/suites/regex_search_content/error_handling.test.ts:
--------------------------------------------------------------------------------
```typescript
1 | import { describe, it, expect, beforeAll, afterAll } from 'bun:test';
2 | import { Client } from '@modelcontextprotocol/sdk/client/index.js';
3 | import { StdioClientTransport } from '@modelcontextprotocol/sdk/client/stdio.js';
4 | import { ClientCapabilities, CallToolResultSchema } from '@modelcontextprotocol/sdk/types.js';
5 | import path from 'path';
6 | import fs from 'fs/promises';
7 | import { fileURLToPath } from 'url';
8 | import { getTextContent } from '../../utils/regexUtils.js';
9 |
10 | const clientInfo = { name: 'regex-search-error-test-suite', version: '0.1.0' };
11 | const clientCapabilities: ClientCapabilities = { toolUse: { enabled: true } };
12 | const __dirname = path.dirname(fileURLToPath(import.meta.url));
13 | const serverRoot = path.resolve(__dirname, '../../fs_root');
14 | const serverCommand = 'bun';
15 | const serverArgs = ['dist/index.js', serverRoot, '--full-access'];
16 | const testBasePath = 'regex_search_content_errors/';
17 | const nonExistentPath = 'regex_search_content_nonexistent/';
18 |
19 | describe('test-filesystem::regex_search_content - Error Handling', () => {
20 | let client: Client;
21 | let transport: StdioClientTransport;
22 |
23 | beforeAll(async () => {
24 | await fs.mkdir(serverRoot, { recursive: true });
25 | transport = new StdioClientTransport({ command: serverCommand, args: serverArgs });
26 | client = new Client(clientInfo, { capabilities: clientCapabilities });
27 | await client.connect(transport);
28 |
29 | await client.callTool({ name: 'create_directory', arguments: { path: testBasePath } });
30 | await client.callTool({ name: 'create_file', arguments: { path: `${testBasePath}a_file.txt`, content: 'content' } });
31 | });
32 |
33 | afterAll(async () => {
34 | await client.callTool({ name: 'delete_directory', arguments: { path: testBasePath, recursive: true } });
35 | await transport.close();
36 | });
37 |
38 | it('returns error for invalid regex', async () => {
39 | const res = await client.callTool({ name: 'regex_search_content', arguments: { path: testBasePath, regex: '[invalid' } }, CallToolResultSchema);
40 | expect(res.isError).toBe(true);
41 | expect(getTextContent(res)).toMatch(/Invalid regex pattern/);
42 | });
43 |
44 | it('returns no matches for non-existent path', async () => {
45 | const res = await client.callTool({ name: 'regex_search_content', arguments: { path: nonExistentPath, regex: 'x' } }, CallToolResultSchema);
46 | expect(res.isError).not.toBe(true);
47 | expect(getTextContent(res)).toBe('No matches found for the given regex pattern.');
48 | });
49 |
50 | it('returns no matches when path is a file', async () => {
51 | const res = await client.callTool({ name: 'regex_search_content', arguments: { path: `${testBasePath}a_file.txt`, regex: 'x' } }, CallToolResultSchema);
52 | expect(res.isError).not.toBe(true);
53 | expect(getTextContent(res)).toBe('No matches found for the given regex pattern.');
54 | });
55 | });
56 |
```
--------------------------------------------------------------------------------
/test/suites/regex_search_content/max_filesize.test.ts:
--------------------------------------------------------------------------------
```typescript
1 | import { describe, it, expect, beforeAll, afterAll } from 'bun:test';
2 | import { Client } from '@modelcontextprotocol/sdk/client/index.js';
3 | import { StdioClientTransport } from '@modelcontextprotocol/sdk/client/stdio.js';
4 | import { ClientCapabilities, CallToolResultSchema } from '@modelcontextprotocol/sdk/types.js';
5 | import { parseRegexSearchOutput, getTextContent } from '../../utils/regexUtils.js';
6 | import path from 'path';
7 | import fs from 'fs/promises';
8 | import { fileURLToPath } from 'url';
9 |
10 | const clientInfo = { name: 'regex-search-filesize-test-suite', version: '0.1.0' };
11 | const clientCapabilities: ClientCapabilities = { toolUse: { enabled: true } };
12 | const __dirname = path.dirname(fileURLToPath(import.meta.url));
13 | const serverRoot = path.resolve(__dirname, '../../fs_root');
14 | const serverCommand = 'bun';
15 | const serverArgs = ['dist/index.js', serverRoot, '--full-access'];
16 | const testBasePath = 'regex_search_content_filesize/';
17 |
18 | describe('test-filesystem::regex_search_content - Max File Size Limiting', () => {
19 | let client: Client;
20 | let transport: StdioClientTransport;
21 |
22 | beforeAll(async () => {
23 | await fs.mkdir(serverRoot, { recursive: true });
24 | transport = new StdioClientTransport({ command: serverCommand, args: serverArgs });
25 | client = new Client(clientInfo, { capabilities: clientCapabilities });
26 | await client.connect(transport);
27 |
28 | await client.callTool({ name: 'create_directory', arguments: { path: testBasePath } });
29 | await client.callTool({ name: 'create_file', arguments: { path: `${testBasePath}small.txt`, content: 'filesize_pattern small' } });
30 | const bigContent = 'filesize_pattern '.padEnd(2000, 'x');
31 | await client.callTool({ name: 'create_file', arguments: { path: `${testBasePath}large.txt`, content: bigContent } });
32 | });
33 |
34 | afterAll(async () => {
35 | await client.callTool({ name: 'delete_directory', arguments: { path: testBasePath, recursive: true } });
36 | await transport.close();
37 | });
38 |
39 | it('skips files larger than limit', async () => {
40 | const res = await client.callTool({ name: 'regex_search_content', arguments: { path: testBasePath, regex: 'filesize_pattern', maxFileSize: 100 } }, CallToolResultSchema);
41 | expect(res.isError).not.toBe(true);
42 | const files = parseRegexSearchOutput(getTextContent(res)).map(r => r.file);
43 | expect(files).toEqual([path.join(serverRoot, `${testBasePath}small.txt`)]);
44 | });
45 |
46 | it('searches all when limit high', async () => {
47 | const res = await client.callTool({ name: 'regex_search_content', arguments: { path: testBasePath, regex: 'filesize_pattern', maxFileSize: 5000 } }, CallToolResultSchema);
48 | expect(res.isError).not.toBe(true);
49 | const files = parseRegexSearchOutput(getTextContent(res)).map(r => r.file);
50 | expect(files).toEqual(expect.arrayContaining([
51 | path.join(serverRoot, `${testBasePath}small.txt`),
52 | path.join(serverRoot, `${testBasePath}large.txt`)
53 | ]));
54 | });
55 | });
56 |
```
--------------------------------------------------------------------------------
/src/schemas/index.ts:
--------------------------------------------------------------------------------
```typescript
1 | import {
2 | ReadFileArgsSchema,
3 | ReadMultipleFilesArgsSchema,
4 | WriteFileArgsSchema,
5 | EditFileArgsSchema,
6 | EditOperationType,
7 | GetFileInfoArgsSchema,
8 | MoveFileArgsSchema,
9 | DeleteFileArgsSchema,
10 | RenameFileArgsSchema,
11 | ReadFileArgs,
12 | ReadMultipleFilesArgs,
13 | WriteFileArgs,
14 | EditFileArgs,
15 | GetFileInfoArgs,
16 | MoveFileArgs,
17 | DeleteFileArgs,
18 | RenameFileArgs,
19 | } from './file-operations.js';
20 |
21 | import {
22 | CreateDirectoryArgsSchema,
23 | ListDirectoryArgsSchema,
24 | DirectoryTreeArgsSchema,
25 | DeleteDirectoryArgsSchema,
26 | CreateDirectoryArgs,
27 | ListDirectoryArgs,
28 | DirectoryTreeArgs,
29 | DeleteDirectoryArgs,
30 | } from './directory-operations.js';
31 |
32 | import {
33 | SearchFilesArgsSchema,
34 | FindFilesByExtensionArgsSchema,
35 | GetPermissionsArgsSchema,
36 | XmlToJsonArgsSchema,
37 | XmlToJsonStringArgsSchema,
38 | RegexSearchContentArgsSchema,
39 | XmlQueryArgsSchema,
40 | XmlStructureArgsSchema,
41 | SearchFilesArgs,
42 | FindFilesByExtensionArgs,
43 | GetPermissionsArgs,
44 | XmlToJsonArgs,
45 | XmlToJsonStringArgs,
46 | RegexSearchContentArgs,
47 | XmlQueryArgs,
48 | XmlStructureArgs,
49 | } from './utility-operations.js';
50 |
51 | import {
52 | JsonQueryArgsSchema,
53 | JsonFilterArgsSchema,
54 | JsonGetValueArgsSchema,
55 | JsonTransformArgsSchema,
56 | JsonStructureArgsSchema,
57 | JsonSampleArgsSchema,
58 | JsonValidateArgsSchema,
59 | JsonSearchKvArgsSchema,
60 | JsonQueryArgs,
61 | JsonFilterArgs,
62 | JsonGetValueArgs,
63 | JsonTransformArgs,
64 | JsonStructureArgs,
65 | JsonSampleArgs,
66 | JsonValidateArgs,
67 | JsonSearchKvArgs,
68 | } from './json-operations.js';
69 |
70 | export const toolSchemas = {
71 | read_file: ReadFileArgsSchema,
72 | read_multiple_files: ReadMultipleFilesArgsSchema,
73 | create_file: WriteFileArgsSchema,
74 | modify_file: WriteFileArgsSchema,
75 | edit_file: EditFileArgsSchema,
76 | create_directory: CreateDirectoryArgsSchema,
77 | list_directory: ListDirectoryArgsSchema,
78 | directory_tree: DirectoryTreeArgsSchema,
79 | delete_directory: DeleteDirectoryArgsSchema,
80 | search_files: SearchFilesArgsSchema,
81 | find_files_by_extension: FindFilesByExtensionArgsSchema,
82 | move_file: MoveFileArgsSchema,
83 | delete_file: DeleteFileArgsSchema,
84 | rename_file: RenameFileArgsSchema,
85 | get_file_info: GetFileInfoArgsSchema,
86 | get_permissions: GetPermissionsArgsSchema,
87 | xml_query: XmlQueryArgsSchema,
88 | xml_structure: XmlStructureArgsSchema,
89 | xml_to_json: XmlToJsonArgsSchema,
90 | xml_to_json_string: XmlToJsonStringArgsSchema,
91 | json_query: JsonQueryArgsSchema,
92 | json_structure: JsonStructureArgsSchema,
93 | json_filter: JsonFilterArgsSchema,
94 | json_get_value: JsonGetValueArgsSchema,
95 | json_transform: JsonTransformArgsSchema,
96 | json_sample: JsonSampleArgsSchema,
97 | json_validate: JsonValidateArgsSchema,
98 | json_search_kv: JsonSearchKvArgsSchema,
99 | regex_search_content: RegexSearchContentArgsSchema,
100 | } as const;
101 |
102 | export type {
103 | ReadFileArgs,
104 | ReadMultipleFilesArgs,
105 | WriteFileArgs,
106 | EditFileArgs,
107 | EditOperationType,
108 | GetFileInfoArgs,
109 | MoveFileArgs,
110 | DeleteFileArgs,
111 | RenameFileArgs,
112 | CreateDirectoryArgs,
113 | ListDirectoryArgs,
114 | DirectoryTreeArgs,
115 | DeleteDirectoryArgs,
116 | SearchFilesArgs,
117 | FindFilesByExtensionArgs,
118 | GetPermissionsArgs,
119 | XmlToJsonArgs,
120 | XmlToJsonStringArgs,
121 | RegexSearchContentArgs,
122 | XmlQueryArgs,
123 | XmlStructureArgs,
124 | JsonQueryArgs,
125 | JsonFilterArgs,
126 | JsonGetValueArgs,
127 | JsonTransformArgs,
128 | JsonStructureArgs,
129 | JsonSampleArgs,
130 | JsonValidateArgs,
131 | JsonSearchKvArgs,
132 | };
133 |
```
--------------------------------------------------------------------------------
/test/suites/regex_search_content/path_usage.test.ts:
--------------------------------------------------------------------------------
```typescript
1 | import { describe, it, expect, beforeAll, afterAll } from "bun:test";
2 | import { Client } from "@modelcontextprotocol/sdk/client/index.js";
3 | import { StdioClientTransport } from "@modelcontextprotocol/sdk/client/stdio.js";
4 | import {
5 | ClientCapabilities,
6 | CallToolResultSchema,
7 | } from "@modelcontextprotocol/sdk/types.js";
8 | import path from "path";
9 | import { getTextContent } from "../../utils/regexUtils.js";
10 | import fs from "fs/promises";
11 | import { fileURLToPath } from "url";
12 |
13 | const clientInfo = { name: "regex-search-path-test-suite", version: "0.1.0" };
14 | const clientCapabilities: ClientCapabilities = { toolUse: { enabled: true } };
15 | const __dirname = path.dirname(fileURLToPath(import.meta.url));
16 | const serverRoot = path.resolve(__dirname, "../../fs_root");
17 | const serverCommand = "bun";
18 | const serverArgs = ["dist/index.js", serverRoot, "--full-access"];
19 | const testRelativeBasePath = "regex_search_content_paths/";
20 | const absoluteBasePath = path.join(serverRoot, testRelativeBasePath);
21 |
22 | describe("test-filesystem::regex_search_content - Path Usage", () => {
23 | let client: Client;
24 | let transport: StdioClientTransport;
25 |
26 | beforeAll(async () => {
27 | await fs.mkdir(serverRoot, { recursive: true });
28 | transport = new StdioClientTransport({
29 | command: serverCommand,
30 | args: serverArgs,
31 | });
32 | client = new Client(clientInfo, { capabilities: clientCapabilities });
33 | await client.connect(transport);
34 |
35 | await client.callTool({
36 | name: "create_directory",
37 | arguments: { path: testRelativeBasePath },
38 | });
39 | await client.callTool({
40 | name: "create_file",
41 | arguments: {
42 | path: `${testRelativeBasePath}file_in_root.txt`,
43 | content: "Path pattern",
44 | },
45 | });
46 | await client.callTool({
47 | name: "create_directory",
48 | arguments: { path: `${testRelativeBasePath}sub/` },
49 | });
50 | await client.callTool({
51 | name: "create_file",
52 | arguments: {
53 | path: `${testRelativeBasePath}sub/file_in_subdir.txt`,
54 | content: "Path pattern",
55 | },
56 | });
57 | });
58 |
59 | afterAll(async () => {
60 | await client.callTool({
61 | name: "delete_directory",
62 | arguments: { path: testRelativeBasePath, recursive: true },
63 | });
64 | await transport.close();
65 | });
66 |
67 | it("works with relative path", async () => {
68 | const res = await client.callTool(
69 | {
70 | name: "regex_search_content",
71 | arguments: { path: testRelativeBasePath, regex: "Path pattern" },
72 | },
73 | CallToolResultSchema,
74 | );
75 | expect(res.isError).not.toBe(true);
76 | expect(getTextContent(res)).toMatch("file_in_root.txt");
77 | });
78 |
79 | it("works with absolute path within root", async () => {
80 | const res = await client.callTool(
81 | {
82 | name: "regex_search_content",
83 | arguments: { path: absoluteBasePath, regex: "Path pattern" },
84 | },
85 | CallToolResultSchema,
86 | );
87 | expect(res.isError).not.toBe(true);
88 | expect(getTextContent(res)).toMatch("file_in_root.txt");
89 | });
90 |
91 | it("errors for path outside root", async () => {
92 | const outside = path.dirname(serverRoot);
93 | const res = await client.callTool(
94 | {
95 | name: "regex_search_content",
96 | arguments: { path: outside, regex: "x" },
97 | },
98 | CallToolResultSchema,
99 | );
100 | expect(res.isError).toBe(true);
101 | expect(getTextContent(res)).toMatch(/Access denied/);
102 | });
103 | });
104 |
```
--------------------------------------------------------------------------------
/test/suites/regex_search_content/file_pattern.test.ts:
--------------------------------------------------------------------------------
```typescript
1 | import { describe, it, expect, beforeAll, afterAll } from 'bun:test';
2 | import { Client } from '@modelcontextprotocol/sdk/client/index.js';
3 | import { StdioClientTransport } from '@modelcontextprotocol/sdk/client/stdio.js';
4 | import { ClientCapabilities, CallToolResultSchema } from '@modelcontextprotocol/sdk/types.js';
5 | import { parseRegexSearchOutput, getTextContent } from '../../utils/regexUtils.js';
6 | import path from 'path';
7 | import fs from 'fs/promises';
8 | import { fileURLToPath } from 'url';
9 |
10 | const clientInfo = { name: 'regex-search-pattern-test-suite', version: '0.1.0' };
11 | const clientCapabilities: ClientCapabilities = { toolUse: { enabled: true } };
12 | const __dirname = path.dirname(fileURLToPath(import.meta.url));
13 | const serverRoot = path.resolve(__dirname, '../../fs_root');
14 | const serverCommand = 'bun';
15 | const serverArgs = ['dist/index.js', serverRoot, '--full-access'];
16 | const testBasePath = 'regex_search_content_pattern/';
17 |
18 | describe('test-filesystem::regex_search_content - File Pattern Matching', () => {
19 | let client: Client;
20 | let transport: StdioClientTransport;
21 |
22 | beforeAll(async () => {
23 | await fs.mkdir(serverRoot, { recursive: true });
24 | transport = new StdioClientTransport({ command: serverCommand, args: serverArgs });
25 | client = new Client(clientInfo, { capabilities: clientCapabilities });
26 | await client.connect(transport);
27 |
28 | await client.callTool({ name: 'create_directory', arguments: { path: testBasePath } });
29 | await client.callTool({ name: 'create_file', arguments: { path: `${testBasePath}a.txt`, content: 'pattern_here' } });
30 | await client.callTool({ name: 'create_file', arguments: { path: `${testBasePath}b.log`, content: 'pattern_here' } });
31 | await client.callTool({ name: 'create_directory', arguments: { path: `${testBasePath}sub/` } });
32 | await client.callTool({ name: 'create_file', arguments: { path: `${testBasePath}sub/c.txt`, content: 'pattern_here' } });
33 | });
34 |
35 | afterAll(async () => {
36 | await client.callTool({ name: 'delete_directory', arguments: { path: testBasePath, recursive: true } });
37 | await transport.close();
38 | });
39 |
40 | it('limits search using *.txt glob', async () => {
41 | const res = await client.callTool({ name: 'regex_search_content', arguments: { path: testBasePath, regex: 'pattern_here', filePattern: '*.txt' } }, CallToolResultSchema);
42 | expect(res.isError).not.toBe(true);
43 | const files = parseRegexSearchOutput(getTextContent(res)).map(r => r.file);
44 | expect(files).toEqual([path.join(serverRoot, `${testBasePath}a.txt`)]);
45 | });
46 |
47 | it('searches recursively with **/*.txt', async () => {
48 | const res = await client.callTool({ name: 'regex_search_content', arguments: { path: testBasePath, regex: 'pattern_here', filePattern: '**/*.txt' } }, CallToolResultSchema);
49 | expect(res.isError).not.toBe(true);
50 | const files = parseRegexSearchOutput(getTextContent(res)).map(r => r.file);
51 | expect(files).toEqual(expect.arrayContaining([
52 | path.join(serverRoot, `${testBasePath}a.txt`),
53 | path.join(serverRoot, `${testBasePath}sub/c.txt`)
54 | ]));
55 | });
56 |
57 | it('returns empty when glob matches nothing', async () => {
58 | const res = await client.callTool({ name: 'regex_search_content', arguments: { path: testBasePath, regex: 'pattern_here', filePattern: '*.none' } }, CallToolResultSchema);
59 | expect(res.isError).not.toBe(true);
60 | expect(getTextContent(res)).toBe('No matches found for the given regex pattern.');
61 | });
62 | });
63 |
```
--------------------------------------------------------------------------------
/test/suites/regex_search_content/basic_search.test.ts:
--------------------------------------------------------------------------------
```typescript
1 | import { describe, it, expect, beforeAll, afterAll } from 'bun:test';
2 | import { Client } from '@modelcontextprotocol/sdk/client/index.js';
3 | import { StdioClientTransport } from '@modelcontextprotocol/sdk/client/stdio.js';
4 | import { ClientCapabilities, CallToolResultSchema } from '@modelcontextprotocol/sdk/types.js';
5 | import { parseRegexSearchOutput, getTextContent } from '../../utils/regexUtils.js';
6 | import path from 'path';
7 | import fs from 'fs/promises';
8 | import { fileURLToPath } from 'url';
9 |
10 | const clientInfo = { name: 'regex-search-test-suite', version: '0.1.0' };
11 | const clientCapabilities: ClientCapabilities = { toolUse: { enabled: true } };
12 | const __dirname = path.dirname(fileURLToPath(import.meta.url));
13 | const serverRoot = path.resolve(__dirname, '../../fs_root');
14 | const serverCommand = 'bun';
15 | const serverArgs = ['dist/index.js', serverRoot, '--full-access'];
16 | const testBasePath = 'regex_search_content/';
17 |
18 | describe('test-filesystem::regex_search_content - Basic Search', () => {
19 | let client: Client;
20 | let transport: StdioClientTransport;
21 |
22 | beforeAll(async () => {
23 | await fs.mkdir(serverRoot, { recursive: true });
24 | transport = new StdioClientTransport({ command: serverCommand, args: serverArgs });
25 | client = new Client(clientInfo, { capabilities: clientCapabilities });
26 | await client.connect(transport);
27 |
28 | await client.callTool({ name: 'create_directory', arguments: { path: testBasePath } });
29 | await client.callTool({ name: 'create_file', arguments: { path: `${testBasePath}file1.txt`, content: 'A unique_pattern_123 here' } });
30 | await client.callTool({ name: 'create_file', arguments: { path: `${testBasePath}file2.log`, content: 'Another unique_pattern_123 again' } });
31 | await client.callTool({ name: 'create_directory', arguments: { path: `${testBasePath}sub/` } });
32 | await client.callTool({ name: 'create_file', arguments: { path: `${testBasePath}sub/subfile.txt`, content: 'unique_pattern_123 in sub' } });
33 | });
34 |
35 | afterAll(async () => {
36 | await client.callTool({ name: 'delete_directory', arguments: { path: testBasePath, recursive: true } });
37 | await transport.close();
38 | });
39 |
40 | it('finds a pattern in a single file', async () => {
41 | const res = await client.callTool({ name: 'regex_search_content', arguments: { path: testBasePath, regex: 'unique_pattern_123', filePattern: 'file1.txt' } }, CallToolResultSchema);
42 | expect(res.isError).not.toBe(true);
43 | const parsed = parseRegexSearchOutput(getTextContent(res));
44 | expect(parsed).toHaveLength(1);
45 | expect(parsed[0].file).toBe(path.join(serverRoot, `${testBasePath}file1.txt`));
46 | });
47 |
48 | it('returns multiple files when pattern exists in them', async () => {
49 | const res = await client.callTool({ name: 'regex_search_content', arguments: { path: testBasePath, regex: 'unique_pattern_123', filePattern: '**/*' } }, CallToolResultSchema);
50 | expect(res.isError).not.toBe(true);
51 | const parsed = parseRegexSearchOutput(getTextContent(res));
52 | const files = parsed.map(p => p.file);
53 | expect(files).toEqual(expect.arrayContaining([
54 | path.join(serverRoot, `${testBasePath}file1.txt`),
55 | path.join(serverRoot, `${testBasePath}file2.log`),
56 | path.join(serverRoot, `${testBasePath}sub/subfile.txt`)
57 | ]));
58 | });
59 |
60 | it('returns no matches when pattern does not exist', async () => {
61 | const res = await client.callTool({ name: 'regex_search_content', arguments: { path: testBasePath, regex: 'does_not_exist' } }, CallToolResultSchema);
62 | expect(res.isError).not.toBe(true);
63 | expect(getTextContent(res)).toBe('No matches found for the given regex pattern.');
64 | });
65 | });
66 |
```
--------------------------------------------------------------------------------
/test/suites/regex_search_content/depth_limiting.test.ts:
--------------------------------------------------------------------------------
```typescript
1 | import { describe, it, expect, beforeAll, afterAll } from 'bun:test';
2 | import { Client } from '@modelcontextprotocol/sdk/client/index.js';
3 | import { StdioClientTransport } from '@modelcontextprotocol/sdk/client/stdio.js';
4 | import { ClientCapabilities, CallToolResultSchema } from '@modelcontextprotocol/sdk/types.js';
5 | import { parseRegexSearchOutput, getTextContent } from '../../utils/regexUtils.js';
6 | import path from 'path';
7 | import fs from 'fs/promises';
8 | import { fileURLToPath } from 'url';
9 |
10 | const clientInfo = { name: 'regex-search-depth-test-suite', version: '0.1.0' };
11 | const clientCapabilities: ClientCapabilities = { toolUse: { enabled: true } };
12 | const __dirname = path.dirname(fileURLToPath(import.meta.url));
13 | const serverRoot = path.resolve(__dirname, '../../fs_root');
14 | const serverCommand = 'bun';
15 | const serverArgs = ['dist/index.js', serverRoot, '--full-access'];
16 | const testBasePath = 'regex_search_content_depth/';
17 |
18 | describe('test-filesystem::regex_search_content - Depth Limiting', () => {
19 | let client: Client;
20 | let transport: StdioClientTransport;
21 |
22 | beforeAll(async () => {
23 | await fs.mkdir(serverRoot, { recursive: true });
24 | transport = new StdioClientTransport({ command: serverCommand, args: serverArgs });
25 | client = new Client(clientInfo, { capabilities: clientCapabilities });
26 | await client.connect(transport);
27 |
28 | await client.callTool({ name: 'create_directory', arguments: { path: testBasePath } });
29 | await client.callTool({ name: 'create_file', arguments: { path: `${testBasePath}file_root.txt`, content: 'depth_pattern' } });
30 | await client.callTool({ name: 'create_directory', arguments: { path: `${testBasePath}sub1/` } });
31 | await client.callTool({ name: 'create_file', arguments: { path: `${testBasePath}sub1/file1.txt`, content: 'depth_pattern' } });
32 | await client.callTool({ name: 'create_directory', arguments: { path: `${testBasePath}sub1/sub2/` } });
33 | await client.callTool({ name: 'create_file', arguments: { path: `${testBasePath}sub1/sub2/file2.txt`, content: 'depth_pattern' } });
34 | });
35 |
36 | afterAll(async () => {
37 | await client.callTool({ name: 'delete_directory', arguments: { path: testBasePath, recursive: true } });
38 | await transport.close();
39 | });
40 |
41 | it('searches only root when maxDepth is 1', async () => {
42 | const res = await client.callTool({ name: 'regex_search_content', arguments: { path: testBasePath, regex: 'depth_pattern', filePattern: '**/*', maxDepth: 1 } }, CallToolResultSchema);
43 | expect(res.isError).not.toBe(true);
44 | const parsed = parseRegexSearchOutput(getTextContent(res));
45 | expect(parsed.map(p=>p.file)).toEqual([path.join(serverRoot, `${testBasePath}file_root.txt`)]);
46 | });
47 |
48 | it('searches up to depth 2', async () => {
49 | const res = await client.callTool({ name: 'regex_search_content', arguments: { path: testBasePath, regex: 'depth_pattern', filePattern: '**/*', maxDepth: 2 } }, CallToolResultSchema);
50 | expect(res.isError).not.toBe(true);
51 | const files = parseRegexSearchOutput(getTextContent(res)).map(p=>p.file);
52 | expect(files).toEqual(expect.arrayContaining([
53 | path.join(serverRoot, `${testBasePath}file_root.txt`),
54 | path.join(serverRoot, `${testBasePath}sub1/file1.txt`)
55 | ]));
56 | });
57 |
58 | it('searches all when maxDepth large', async () => {
59 | const res = await client.callTool({ name: 'regex_search_content', arguments: { path: testBasePath, regex: 'depth_pattern', filePattern: '**/*', maxDepth: 5 } }, CallToolResultSchema);
60 | expect(res.isError).not.toBe(true);
61 | const files = parseRegexSearchOutput(getTextContent(res)).map(p=>p.file);
62 | expect(files).toEqual(expect.arrayContaining([
63 | path.join(serverRoot, `${testBasePath}file_root.txt`),
64 | path.join(serverRoot, `${testBasePath}sub1/file1.txt`),
65 | path.join(serverRoot, `${testBasePath}sub1/sub2/file2.txt`)
66 | ]));
67 | });
68 | });
69 |
```
--------------------------------------------------------------------------------
/src/utils/data-utils.ts:
--------------------------------------------------------------------------------
```typescript
1 | export function isPlainObject(value: any): value is Record<string, any> {
2 | return Object.prototype.toString.call(value) === '[object Object]';
3 | }
4 |
5 | export function pickBy<T extends Record<string, any>>(obj: T, predicate: (value: any, key: string) => boolean): Partial<T> {
6 | const result: Partial<T> = {};
7 | for (const [key, val] of Object.entries(obj)) {
8 | if (predicate(val, key)) {
9 | (result as any)[key] = val;
10 | }
11 | }
12 | return result;
13 | }
14 |
15 | export function size(value: any): number {
16 | if (Array.isArray(value) || typeof value === 'string') return value.length;
17 | if (isPlainObject(value)) return Object.keys(value).length;
18 | return 0;
19 | }
20 |
21 | export function values<T>(obj: Record<string, T>): T[] {
22 | return Object.values(obj);
23 | }
24 |
25 | export function get(obj: any, path: string | Array<string | number>): any {
26 | const parts = Array.isArray(path) ? path : String(path).split('.');
27 | let current = obj;
28 | for (const part of parts) {
29 | if (current == null) return undefined;
30 | current = current[part as any];
31 | }
32 | return current;
33 | }
34 |
35 | export function isEqual(a: any, b: any): boolean {
36 | return JSON.stringify(a) === JSON.stringify(b);
37 | }
38 |
39 | export function groupBy<T>(array: T[], iteratee: string | ((item: T) => string | number)): Record<string, T[]> {
40 | const getKey = typeof iteratee === 'function' ? iteratee : (item: T) => String(get(item as any, iteratee));
41 | return array.reduce<Record<string, T[]>>((acc, item) => {
42 | const key = String(getKey(item));
43 | (acc[key] ||= []).push(item);
44 | return acc;
45 | }, {});
46 | }
47 |
48 | export function orderBy<T>(array: T[], iteratee: string | ((item: T) => any), orders: ('asc' | 'desc')[] = ['asc']): T[] {
49 | const getValue = typeof iteratee === 'function' ? iteratee : (item: T) => get(item as any, iteratee);
50 | const order = orders[0] ?? 'asc';
51 | return [...array].sort((a, b) => {
52 | const va = getValue(a);
53 | const vb = getValue(b);
54 | if (va < vb) return order === 'asc' ? -1 : 1;
55 | if (va > vb) return order === 'asc' ? 1 : -1;
56 | return 0;
57 | });
58 | }
59 |
60 | export function flattenDeep(arr: any[]): any[] {
61 | const result: any[] = [];
62 | for (const item of arr) {
63 | if (Array.isArray(item)) {
64 | result.push(...flattenDeep(item));
65 | } else {
66 | result.push(item);
67 | }
68 | }
69 | return result;
70 | }
71 |
72 | export function pick<T extends Record<string, any>, K extends keyof T>(obj: T, keys: readonly K[]): Pick<T, K> {
73 | const result: Partial<T> = {};
74 | for (const key of keys) {
75 | if (key in obj) {
76 | (result as any)[key] = obj[key];
77 | }
78 | }
79 | return result as Pick<T, K>;
80 | }
81 |
82 | export function omit<T extends Record<string, any>, K extends keyof T>(obj: T, keys: readonly K[]): Omit<T, K> {
83 | const result: Record<string, any> = { ...obj };
84 | for (const key of keys) {
85 | delete result[key as string];
86 | }
87 | return result as Omit<T, K>;
88 | }
89 |
90 | export function isEmpty(value: any): boolean {
91 | if (Array.isArray(value) || typeof value === 'string') return value.length === 0;
92 | if (isPlainObject(value)) return Object.keys(value).length === 0;
93 | return !value;
94 | }
95 |
96 | export function every<T>(arr: T[], predicate: (item: T) => boolean): boolean {
97 | return arr.every(predicate);
98 | }
99 |
100 | export function some<T>(arr: T[], predicate: (item: T) => boolean): boolean {
101 | return arr.some(predicate);
102 | }
103 |
104 | export function map<T, U>(arr: T[], iteratee: (item: T) => U): U[] {
105 | return arr.map(iteratee);
106 | }
107 |
108 | export function filter<T>(arr: T[], predicate: (item: T) => boolean): T[] {
109 | return arr.filter(predicate);
110 | }
111 |
112 | export function sampleSize<T>(arr: T[], n: number): T[] {
113 | const copy = [...arr];
114 | for (let i = copy.length - 1; i > 0; i--) {
115 | const j = Math.floor(Math.random() * (i + 1));
116 | [copy[i], copy[j]] = [copy[j], copy[i]];
117 | }
118 | return copy.slice(0, n);
119 | }
120 |
121 | export function take<T>(arr: T[], n: number): T[] {
122 | return arr.slice(0, n);
123 | }
124 |
125 | export function transform<T extends Record<string, any>, R>(obj: T, iteratee: (result: R, value: any, key: string) => void, accumulator: R): R {
126 | for (const [key, val] of Object.entries(obj)) {
127 | iteratee(accumulator, val, key);
128 | }
129 | return accumulator;
130 | }
131 |
```
--------------------------------------------------------------------------------
/src/utils/path-utils.ts:
--------------------------------------------------------------------------------
```typescript
1 | import path from 'path';
2 | import os from 'os';
3 | import fs from 'fs/promises';
4 | import type { ReadonlyDeep } from 'type-fest';
5 |
6 | // Normalize all paths consistently
7 | export function normalizePath(p: string): string {
8 | return path.normalize(p);
9 | }
10 |
11 | export function expandHome(filepath: string): string {
12 | // Expand $VAR, ${VAR}, and %VAR% environment variables
13 | let expanded = filepath.replace(/\$(?:\{([A-Za-z_][A-Za-z0-9_]*)\}|([A-Za-z_][A-Za-z0-9_]*))|%([A-Za-z_][A-Za-z0-9_]*)%/g, (match, braced, unixVar, winVar) => {
14 | const envVar = (braced || unixVar || winVar) as string;
15 |
16 | // Built-in fallbacks for common CWD variables
17 | if (envVar === 'CWD') {
18 | return process.cwd();
19 | }
20 | if (envVar === 'PWD') {
21 | return process.env.PWD ?? process.cwd();
22 | }
23 |
24 | const value = process.env[envVar];
25 | if (value === undefined) {
26 | throw new Error(`Environment variable ${envVar} is not defined`);
27 | }
28 | return value;
29 | });
30 |
31 | // Expand ~ to home directory
32 | if (expanded.startsWith('~/') || expanded === '~') {
33 | expanded = path.join(os.homedir(), expanded.slice(1));
34 | }
35 |
36 | // Ensure no unresolved variables remain
37 | if (/\$\{?[A-Za-z_][A-Za-z0-9_]*\}?|%[A-Za-z_][A-Za-z0-9_]*%/.test(expanded)) {
38 | throw new Error('Unresolved environment variables in path');
39 | }
40 |
41 | return expanded;
42 | }
43 | export type ValidatePathOptions = ReadonlyDeep<{
44 | checkParentExists?: boolean;
45 | }>;
46 |
47 |
48 | export async function validatePath(
49 | requestedPath: string,
50 | allowedDirectories: ReadonlyArray<string>,
51 | symlinksMap: Map<string, string>,
52 | noFollowSymlinks: boolean,
53 | options?: ValidatePathOptions
54 | ): Promise<string> {
55 | // Default checkParentExists to true if not provided
56 | const checkParentExists = options?.checkParentExists ?? true;
57 | const expandedPath = expandHome(requestedPath);
58 | // Resolve absolute paths directly, resolve relative paths against the first allowed directory
59 | const absolute = path.isAbsolute(expandedPath)
60 | ? path.resolve(expandedPath)
61 | : path.resolve(allowedDirectories[0], expandedPath);
62 |
63 | const normalizedRequested = normalizePath(absolute);
64 |
65 | // Check if path is within allowed directories
66 | const isAllowed = allowedDirectories.some(dir => normalizedRequested.startsWith(dir));
67 | if (!isAllowed) {
68 | // Check if it's a real path that matches a symlink we know about
69 | const matchingSymlink = Array.from(symlinksMap.entries()).find(([realPath, symlinkPath]) =>
70 | normalizedRequested.startsWith(realPath)
71 | );
72 |
73 | if (matchingSymlink) {
74 | const [realPath, symlinkPath] = matchingSymlink;
75 | // Convert the path from real path to symlink path
76 | const relativePath = normalizedRequested.substring(realPath.length);
77 | const symlinkEquivalent = path.join(symlinkPath, relativePath);
78 |
79 | // Return the symlink path instead
80 | return symlinkEquivalent;
81 | }
82 |
83 | throw new Error(`Access denied - path outside allowed directories: ${absolute} not in ${allowedDirectories.join(', ')}`);
84 | }
85 |
86 | // Handle symlinks by checking their real path
87 | try {
88 | const realPath = await fs.realpath(absolute);
89 | const normalizedReal = normalizePath(realPath);
90 |
91 | // If the real path is different from the requested path, it's a symlink
92 | if (normalizedReal !== normalizedRequested) {
93 | // Store this mapping for future reference
94 | symlinksMap.set(normalizedReal, normalizedRequested);
95 |
96 | // Make sure the real path is also allowed
97 | const isRealPathAllowed = allowedDirectories.some(dir => normalizedReal.startsWith(dir));
98 | if (!isRealPathAllowed) {
99 | throw new Error("Access denied - symlink target outside allowed directories");
100 | }
101 |
102 | // If no-follow-symlinks is true, return the original path
103 | if (noFollowSymlinks) {
104 | return absolute;
105 | }
106 | }
107 |
108 | return realPath;
109 | } catch (error) {
110 | // For new files/dirs that don't exist yet, verify parent directory *if requested*
111 | if (checkParentExists) { // Add this condition
112 | const parentDir = path.dirname(absolute);
113 | try {
114 | const realParentPath = await fs.realpath(parentDir);
115 | const normalizedParent = normalizePath(realParentPath);
116 | const isParentAllowed = allowedDirectories.some(dir => normalizedParent.startsWith(dir));
117 | if (!isParentAllowed) {
118 | throw new Error("Access denied - parent directory outside allowed directories");
119 | }
120 | // If parent exists and is allowed, return the original absolute path for creation
121 | return absolute;
122 | } catch (parentError) {
123 | // If parent check fails, throw specific error
124 | // Check if parent doesn't exist specifically using the error code
125 | if ((parentError as NodeJS.ErrnoException)?.code === 'ENOENT') {
126 | throw new Error(`Parent directory does not exist: ${parentDir}`);
127 | }
128 | // Rethrow other parent errors
129 | throw parentError;
130 | }
131 | } else {
132 | // If checkParentExists is false, just return the absolute path
133 | // The initial isAllowed check already confirmed it's within bounds
134 | return absolute;
135 | }
136 | }
137 | }
```
--------------------------------------------------------------------------------
/test/suites/xml_tools/xml_tools.test.ts:
--------------------------------------------------------------------------------
```typescript
1 | import { describe, it, expect, beforeAll, afterAll } from 'bun:test';
2 | import { Client } from '@modelcontextprotocol/sdk/client/index.js';
3 | import { StdioClientTransport } from '@modelcontextprotocol/sdk/client/stdio.js';
4 | import { ClientCapabilities, CallToolResultSchema } from '@modelcontextprotocol/sdk/types.js';
5 | import path from 'path';
6 | import fs from 'fs/promises';
7 | import { fileURLToPath } from 'url';
8 |
9 | const clientInfo = { name: 'xml-tools-test-suite', version: '0.1.0' };
10 | const clientCapabilities: ClientCapabilities = { toolUse: { enabled: true } };
11 | const __dirname = path.dirname(fileURLToPath(import.meta.url));
12 | const serverRoot = path.resolve(__dirname, '../../fs_root');
13 | const serverCommand = 'bun';
14 | const serverArgs = ['dist/index.js', serverRoot, '--full-access'];
15 | const basePath = 'xml_tools/';
16 |
17 | function getTextContent(result: unknown): string {
18 | const parsed = CallToolResultSchema.parse(result);
19 | const first = parsed.content[0];
20 | if (!first || first.type !== 'text') throw new Error('Expected text content');
21 | return (first as any).text as string;
22 | }
23 |
24 | describe('test-filesystem::xml_tools', () => {
25 | let client: Client;
26 | let transport: StdioClientTransport;
27 |
28 | beforeAll(async () => {
29 | await fs.mkdir(serverRoot, { recursive: true });
30 | transport = new StdioClientTransport({ command: serverCommand, args: serverArgs });
31 | client = new Client(clientInfo, { capabilities: clientCapabilities });
32 | await client.connect(transport);
33 |
34 | await client.callTool({ name: 'create_directory', arguments: { path: basePath } });
35 |
36 | const xml = `<?xml version="1.0" encoding="UTF-8"?>\n` +
37 | `<catalog xmlns="http://example.org/catalog">\n` +
38 | ` <book id="bk101" category="fiction">\n` +
39 | ` <author>Gambardella, Matthew</author>\n` +
40 | ` <title>XML Developer's Guide</title>\n` +
41 | ` </book>\n` +
42 | ` <book id="bk102" category="fiction">\n` +
43 | ` <author>Ralls, Kim</author>\n` +
44 | ` <title>Midnight Rain</title>\n` +
45 | ` </book>\n` +
46 | `</catalog>\n`;
47 |
48 | await client.callTool({ name: 'create_file', arguments: { path: `${basePath}basic.xml`, content: xml } });
49 |
50 | // Create a larger XML to exercise truncation
51 | const manyBooks = Array.from({ length: 200 }, (_, i) => ` <book id="bk${1000 + i}"><title>T${i}</title></book>`).join('\n');
52 | const bigXml = `<?xml version="1.0"?><catalog xmlns="http://example.org/catalog">\n${manyBooks}\n</catalog>\n`;
53 | await client.callTool({ name: 'create_file', arguments: { path: `${basePath}big.xml`, content: bigXml } });
54 | });
55 |
56 | afterAll(async () => {
57 | await client.callTool({ name: 'delete_directory', arguments: { path: basePath, recursive: true } });
58 | await transport.close();
59 | });
60 |
61 | it('xml_structure returns structure for a basic XML', async () => {
62 | const res = await client.callTool({ name: 'xml_structure', arguments: { path: `${basePath}basic.xml`, maxDepth: 2, includeAttributes: true, maxResponseBytes: 1024 * 1024 } }, CallToolResultSchema);
63 | expect(res.isError).not.toBe(true);
64 | const text = getTextContent(res);
65 | const obj = JSON.parse(text);
66 | expect(obj.rootElement).toBe('catalog');
67 | expect(typeof obj.elements).toBe('object');
68 | expect(obj.namespaces).toBeDefined();
69 | });
70 |
71 | it('xml_query supports XPath with local-name() (namespace-agnostic)', async () => {
72 | const res = await client.callTool({ name: 'xml_query', arguments: { path: `${basePath}basic.xml`, query: "//*[local-name()='title']/text()", includeAttributes: true, maxResponseBytes: 50 * 1024 } }, CallToolResultSchema);
73 | expect(res.isError).not.toBe(true);
74 | const text = getTextContent(res);
75 | const arr = JSON.parse(text);
76 | expect(Array.isArray(arr)).toBe(true);
77 | // Expect at least two titles
78 | expect(arr.length).toBeGreaterThanOrEqual(2);
79 | expect(arr[0].type).toBeDefined();
80 | });
81 |
82 | it('xml_structure truncates output when exceeding maxResponseBytes', async () => {
83 | const res = await client.callTool({ name: 'xml_structure', arguments: { path: `${basePath}big.xml`, maxDepth: 2, includeAttributes: false, maxResponseBytes: 300 } }, CallToolResultSchema);
84 | expect(res.isError).not.toBe(true);
85 | const text = getTextContent(res);
86 | const obj = JSON.parse(text);
87 | expect(obj._meta?.truncated).toBe(true);
88 | });
89 |
90 | it('xml_query truncates output when exceeding maxResponseBytes', async () => {
91 | const res = await client.callTool({ name: 'xml_query', arguments: { path: `${basePath}big.xml`, query: "//*[local-name()='book']", includeAttributes: true, maxResponseBytes: 400 } }, CallToolResultSchema);
92 | expect(res.isError).not.toBe(true);
93 | const text = getTextContent(res);
94 | // Either includes meta or a small array; ensure length is not huge
95 | expect(text.length).toBeLessThanOrEqual(400 + 200); // allow small overhead
96 | });
97 |
98 | it('xml_to_json_string returns JSON and applies response cap when small', async () => {
99 | const res = await client.callTool({ name: 'xml_to_json_string', arguments: { xmlPath: `${basePath}big.xml`, maxResponseBytes: 500 } }, CallToolResultSchema);
100 | expect(res.isError).not.toBe(true);
101 | const jsonText = getTextContent(res);
102 | const parsed = JSON.parse(jsonText);
103 | expect(parsed._meta?.truncated).toBe(true);
104 | });
105 |
106 | it('xml_to_json writes a file and applies response cap when small', async () => {
107 | const outPath = `${basePath}out.json`;
108 | const res = await client.callTool({ name: 'xml_to_json', arguments: { xmlPath: `${basePath}big.xml`, jsonPath: outPath, maxResponseBytes: 600, options: { format: true } } }, CallToolResultSchema);
109 | expect(res.isError).not.toBe(true);
110 | const read = await client.callTool({ name: 'read_file', arguments: { path: outPath, maxBytes: 100000 } }, CallToolResultSchema);
111 | const jsonText = getTextContent(read);
112 | const parsed = JSON.parse(jsonText);
113 | expect(parsed._meta?.truncated).toBe(true);
114 | });
115 | });
116 |
117 |
118 |
```
--------------------------------------------------------------------------------
/src/handlers/directory-handlers.ts:
--------------------------------------------------------------------------------
```typescript
1 | import fs from 'fs/promises';
2 | import path from 'path';
3 | import { minimatch } from 'minimatch';
4 | import { Permissions } from '../config/permissions.js';
5 | import { validatePath } from '../utils/path-utils.js';
6 | import { parseArgs } from '../utils/schema-utils.js';
7 | import {
8 | CreateDirectoryArgsSchema,
9 | ListDirectoryArgsSchema,
10 | DirectoryTreeArgsSchema,
11 | DeleteDirectoryArgsSchema,
12 | type CreateDirectoryArgs,
13 | type ListDirectoryArgs,
14 | type DirectoryTreeArgs,
15 | type DeleteDirectoryArgs
16 | } from '../schemas/directory-operations.js';
17 |
18 | interface TreeEntry {
19 | name: string;
20 | type: 'file' | 'directory';
21 | children?: TreeEntry[];
22 | }
23 |
24 | export async function handleCreateDirectory(
25 | args: unknown,
26 | permissions: Permissions,
27 | allowedDirectories: string[],
28 | symlinksMap: Map<string, string>,
29 | noFollowSymlinks: boolean
30 | ) {
31 | const parsed = parseArgs(CreateDirectoryArgsSchema, args, 'create_directory');
32 |
33 | // Enforce permission checks
34 | if (!permissions.create && !permissions.fullAccess) {
35 | throw new Error('Cannot create directory: create permission not granted (requires --allow-create)');
36 | }
37 |
38 | const validPath = await validatePath(
39 | parsed.path,
40 | allowedDirectories,
41 | symlinksMap,
42 | noFollowSymlinks,
43 | { checkParentExists: false } // Add this option
44 | );
45 | await fs.mkdir(validPath, { recursive: true });
46 | return {
47 | content: [{ type: "text", text: `Successfully created directory ${parsed.path}` }],
48 | };
49 | }
50 |
51 | export async function handleListDirectory(
52 | args: unknown,
53 | allowedDirectories: string[],
54 | symlinksMap: Map<string, string>,
55 | noFollowSymlinks: boolean
56 | ) {
57 | const parsed = parseArgs(ListDirectoryArgsSchema, args, 'list_directory');
58 | const validPath = await validatePath(parsed.path, allowedDirectories, symlinksMap, noFollowSymlinks);
59 | const entries = await fs.readdir(validPath, { withFileTypes: true });
60 | const formatted = entries
61 | .map((entry) => `${entry.isDirectory() ? "[DIR]" : "[FILE]"} ${entry.name}`)
62 | .join("\n");
63 | return {
64 | content: [{ type: "text", text: formatted }],
65 | };
66 | }
67 |
68 | export async function handleDirectoryTree(
69 | args: unknown,
70 | allowedDirectories: string[],
71 | symlinksMap: Map<string, string>,
72 | noFollowSymlinks: boolean
73 | ) {
74 | const parsed = parseArgs(DirectoryTreeArgsSchema, args, 'directory_tree');
75 |
76 | const { path: startPath, maxDepth, excludePatterns } = parsed; // maxDepth is mandatory (handler default: 2)
77 | const validatedStartPath = await validatePath(startPath, allowedDirectories, symlinksMap, noFollowSymlinks);
78 |
79 | async function buildTree(
80 | currentPath: string,
81 | basePath: string,
82 | currentDepth: number,
83 | maxDepth?: number,
84 | excludePatterns?: string[]
85 | ): Promise<TreeEntry[]> {
86 | // Depth check
87 | if (maxDepth !== undefined && currentDepth >= maxDepth) {
88 | return []; // Stop traversal if max depth is reached
89 | }
90 |
91 | const validPath = await validatePath(currentPath, allowedDirectories, symlinksMap, noFollowSymlinks);
92 |
93 | let entries;
94 | try {
95 | entries = await fs.readdir(validPath, { withFileTypes: true });
96 | } catch (error) {
97 | // Handle cases where directory might not be readable
98 | console.error(`Error reading directory ${validPath}: ${error}`);
99 | return [];
100 | }
101 |
102 | const result: TreeEntry[] = [];
103 |
104 | for (const entry of entries) {
105 | const entryFullPath = path.join(currentPath, entry.name);
106 | const entryRelativePath = path.relative(basePath, entryFullPath);
107 |
108 | // Exclusion check using minimatch
109 | if (excludePatterns && excludePatterns.length > 0) {
110 | const shouldExclude = excludePatterns.some(pattern =>
111 | minimatch(entryRelativePath, pattern, { dot: true, matchBase: true })
112 | );
113 | if (shouldExclude) {
114 | continue; // Skip this entry if it matches any exclude pattern
115 | }
116 | }
117 |
118 | const entryData: TreeEntry = {
119 | name: entry.name,
120 | type: entry.isDirectory() ? 'directory' : 'file'
121 | };
122 |
123 | if (entry.isDirectory()) {
124 | // Recursive call with incremented depth
125 | entryData.children = await buildTree(
126 | entryFullPath,
127 | basePath,
128 | currentDepth + 1,
129 | maxDepth,
130 | excludePatterns
131 | );
132 | }
133 |
134 | result.push(entryData);
135 | }
136 |
137 | return result;
138 | }
139 |
140 | // Initial call to buildTree with base parameters
141 | const treeData = await buildTree(
142 | validatedStartPath,
143 | validatedStartPath,
144 | 0,
145 | maxDepth,
146 | excludePatterns
147 | );
148 |
149 | return {
150 | content: [{
151 | type: "text",
152 | text: JSON.stringify(treeData, null, 2)
153 | }],
154 | };
155 | }
156 |
157 | export async function handleDeleteDirectory(
158 | args: unknown,
159 | permissions: Permissions,
160 | allowedDirectories: string[],
161 | symlinksMap: Map<string, string>,
162 | noFollowSymlinks: boolean
163 | ) {
164 | const parsed = parseArgs(DeleteDirectoryArgsSchema, args, 'delete_directory');
165 |
166 | // Enforce permission checks
167 | if (!permissions.delete && !permissions.fullAccess) {
168 | throw new Error('Cannot delete directory: delete permission not granted (requires --allow-delete)');
169 | }
170 |
171 | const validPath = await validatePath(parsed.path, allowedDirectories, symlinksMap, noFollowSymlinks);
172 |
173 | try {
174 | if (parsed.recursive) {
175 | // Safety confirmation for recursive delete
176 | await fs.rm(validPath, { recursive: true, force: true });
177 | return {
178 | content: [{ type: "text", text: `Successfully deleted directory ${parsed.path} and all its contents` }],
179 | };
180 | } else {
181 | // Non-recursive directory delete
182 | await fs.rmdir(validPath);
183 | return {
184 | content: [{ type: "text", text: `Successfully deleted directory ${parsed.path}` }],
185 | };
186 | }
187 | } catch (error) {
188 | const msg = error instanceof Error ? error.message : String(error);
189 | if (msg.includes('ENOTEMPTY')) {
190 | throw new Error(`Cannot delete directory: directory is not empty. Use recursive=true to delete with contents.`);
191 | }
192 | throw new Error(`Failed to delete directory: ${msg}`);
193 | }
194 | }
```
--------------------------------------------------------------------------------
/src/schemas/utility-operations.ts:
--------------------------------------------------------------------------------
```typescript
1 | import { Type, Static } from "@sinclair/typebox";
2 |
3 | export const GetPermissionsArgsSchema = Type.Object({});
4 | export type GetPermissionsArgs = Static<typeof GetPermissionsArgsSchema>;
5 |
6 | export const SearchFilesArgsSchema = Type.Object({
7 | path: Type.String(),
8 | pattern: Type.String(),
9 | excludePatterns: Type.Optional(
10 | Type.Array(Type.String(), { default: [] })
11 | ),
12 | maxDepth: Type.Integer({
13 | minimum: 1,
14 | description: 'Maximum directory depth to search. Must be a positive integer. Handler default: 2.'
15 | }),
16 | maxResults: Type.Integer({
17 | minimum: 1,
18 | description: 'Maximum number of results to return. Must be a positive integer. Handler default: 10.'
19 | })
20 | });
21 | export type SearchFilesArgs = Static<typeof SearchFilesArgsSchema>;
22 |
23 | export const FindFilesByExtensionArgsSchema = Type.Object({
24 | path: Type.String(),
25 | extension: Type.String({ description: 'File extension to search for (e.g., "xml", "json", "ts")' }),
26 | excludePatterns: Type.Optional(
27 | Type.Array(Type.String(), { default: [] })
28 | ),
29 | maxDepth: Type.Integer({
30 | minimum: 1,
31 | description: 'Maximum directory depth to search. Must be a positive integer. Handler default: 2.'
32 | }),
33 | maxResults: Type.Integer({
34 | minimum: 1,
35 | description: 'Maximum number of results to return. Must be a positive integer. Handler default: 10.'
36 | })
37 | });
38 | export type FindFilesByExtensionArgs = Static<typeof FindFilesByExtensionArgsSchema>;
39 |
40 | export const XmlToJsonArgsSchema = Type.Object({
41 | xmlPath: Type.String({ description: 'Path to the XML file to convert' }),
42 | jsonPath: Type.String({ description: 'Path where the JSON should be saved' }),
43 | maxBytes: Type.Optional(Type.Integer({
44 | minimum: 1,
45 | description: '[Deprecated semantics] Previously limited file bytes read; ignored for parsing; considered only as a response size cap where applicable.'
46 | })),
47 | maxResponseBytes: Type.Optional(Type.Integer({
48 | minimum: 1,
49 | description: 'Maximum size, in bytes, of the returned content. Parsing reads full file; response may be truncated to respect this limit.'
50 | })),
51 | options: Type.Optional(
52 | Type.Object({
53 | ignoreAttributes: Type.Boolean({ default: false, description: 'Whether to ignore attributes in XML' }),
54 | preserveOrder: Type.Boolean({ default: true, description: 'Whether to preserve the order of properties' }),
55 | format: Type.Boolean({ default: true, description: 'Whether to format the JSON output' }),
56 | indentSize: Type.Number({ default: 2, description: 'Number of spaces for indentation' })
57 | }, { default: {} })
58 | )
59 | });
60 | export type XmlToJsonArgs = Static<typeof XmlToJsonArgsSchema>;
61 |
62 | export const XmlToJsonStringArgsSchema = Type.Object({
63 | xmlPath: Type.String({ description: 'Path to the XML file to convert' }),
64 | maxBytes: Type.Optional(Type.Integer({
65 | minimum: 1,
66 | description: '[Deprecated semantics] Previously limited file bytes read; now treated as a response size cap in bytes.'
67 | })),
68 | maxResponseBytes: Type.Optional(Type.Integer({
69 | minimum: 1,
70 | description: 'Maximum size, in bytes, of the returned JSON string. Parsing reads full file; response may be truncated to respect this limit.'
71 | })),
72 | options: Type.Optional(
73 | Type.Object({
74 | ignoreAttributes: Type.Boolean({ default: false, description: 'Whether to ignore attributes in XML' }),
75 | preserveOrder: Type.Boolean({ default: true, description: 'Whether to preserve the order of properties' })
76 | }, { default: {} })
77 | )
78 | });
79 | export type XmlToJsonStringArgs = Static<typeof XmlToJsonStringArgsSchema>;
80 |
81 | export const XmlQueryArgsSchema = Type.Object({
82 | path: Type.String({ description: 'Path to the XML file to query' }),
83 | query: Type.Optional(Type.String({ description: 'XPath query to execute against the XML file' })),
84 | structureOnly: Type.Optional(Type.Boolean({ default: false, description: 'If true, returns only tag names and structure instead of executing query' })),
85 | maxBytes: Type.Optional(Type.Integer({
86 | minimum: 1,
87 | description: '[Deprecated semantics] Previously limited file bytes read; now treated as a response size cap in bytes.'
88 | })),
89 | maxResponseBytes: Type.Optional(Type.Integer({
90 | minimum: 1,
91 | description: 'Maximum size, in bytes, of the returned content. Parsing reads full file; response may be truncated to respect this limit.'
92 | })),
93 | includeAttributes: Type.Optional(Type.Boolean({ default: true, description: 'Whether to include attribute information in the results' }))
94 | });
95 | export type XmlQueryArgs = Static<typeof XmlQueryArgsSchema>;
96 |
97 | export const XmlStructureArgsSchema = Type.Object({
98 | path: Type.String({ description: 'Path to the XML file to analyze' }),
99 | maxDepth: Type.Integer({
100 | minimum: 1,
101 | description: 'How deep to analyze the hierarchy. Must be a positive integer. Handler default: 2.'
102 | }),
103 | includeAttributes: Type.Optional(Type.Boolean({ default: true, description: 'Whether to include attribute information' })),
104 | maxBytes: Type.Optional(Type.Integer({
105 | minimum: 1,
106 | description: '[Deprecated semantics] Previously limited file bytes read; now treated as a response size cap in bytes.'
107 | })),
108 | maxResponseBytes: Type.Optional(Type.Integer({
109 | minimum: 1,
110 | description: 'Maximum size, in bytes, of the returned content. Parsing reads full file; response may be truncated to respect this limit.'
111 | }))
112 | });
113 | export type XmlStructureArgs = Static<typeof XmlStructureArgsSchema>;
114 |
115 | export const RegexSearchContentArgsSchema = Type.Object({
116 | path: Type.String({ description: 'Directory path to start the search from.' }),
117 | regex: Type.String({ description: 'The regular expression pattern to search for within file content.' }),
118 | filePattern: Type.Optional(Type.String({ default: '*', description: 'Glob pattern to filter files to search within (e.g., "*.ts", "data/**.json"). Defaults to searching all files.' })),
119 | maxDepth: Type.Optional(Type.Integer({ minimum: 1, default: 2, description: 'Maximum directory depth to search recursively. Defaults to 2.' })),
120 | maxFileSize: Type.Optional(Type.Integer({ minimum: 1, default: 10 * 1024 * 1024, description: 'Maximum file size in bytes to read for searching. Defaults to 10MB.' })),
121 | maxResults: Type.Optional(Type.Integer({ minimum: 1, default: 50, description: 'Maximum number of files with matches to return. Defaults to 50.' }))
122 | });
123 | export type RegexSearchContentArgs = Static<typeof RegexSearchContentArgsSchema>;
124 |
```
--------------------------------------------------------------------------------
/src/schemas/json-operations.ts:
--------------------------------------------------------------------------------
```typescript
1 | import { Type, Static } from "@sinclair/typebox";
2 |
3 | // Schema for JSONPath query operations
4 | export const JsonQueryArgsSchema = Type.Object({
5 | path: Type.String({ description: 'Path to the JSON file to query' }),
6 | query: Type.String({ description: 'JSONPath expression to execute against the JSON data' }),
7 | maxBytes: Type.Integer({
8 | minimum: 1,
9 | description: 'Maximum bytes to read from the file. Must be a positive integer. Handler default: 10KB.'
10 | })
11 | });
12 | export type JsonQueryArgs = Static<typeof JsonQueryArgsSchema>;
13 |
14 | // Schema for filtering JSON arrays
15 | export const JsonFilterArgsSchema = Type.Object({
16 | path: Type.String({ description: 'Path to the JSON file to filter' }),
17 | arrayPath: Type.Optional(
18 | Type.String({ description: 'Optional JSONPath expression to locate the target array (e.g., "$.items" or "$.data.records")' })
19 | ),
20 | conditions: Type.Array(
21 | Type.Object({
22 | field: Type.String({ description: 'Path to the field to check (e.g., "address.city" or "tags[0]")' }),
23 | operator: Type.Union([
24 | Type.Literal('eq'), Type.Literal('neq'),
25 | Type.Literal('gt'), Type.Literal('gte'),
26 | Type.Literal('lt'), Type.Literal('lte'),
27 | Type.Literal('contains'),
28 | Type.Literal('startsWith'),
29 | Type.Literal('endsWith'),
30 | Type.Literal('exists'),
31 | Type.Literal('type')
32 | ], { description: 'Comparison operator' }),
33 | value: Type.Any({ description: 'Value to compare against' })
34 | }),
35 | { minItems: 1, description: 'Array of filter conditions' }
36 | ),
37 | match: Type.Optional(
38 | Type.Union([Type.Literal('all'), Type.Literal('any')], {
39 | default: 'all',
40 | description: 'How to combine multiple conditions - "all" for AND, "any" for OR'
41 | })
42 | ),
43 | maxBytes: Type.Integer({
44 | minimum: 1,
45 | description: 'Maximum bytes to read from the file. Must be a positive integer. Handler default: 10KB.'
46 | })
47 | });
48 | export type JsonFilterArgs = Static<typeof JsonFilterArgsSchema>;
49 |
50 | // Schema for getting a specific value from a JSON file
51 | export const JsonGetValueArgsSchema = Type.Object({
52 | path: Type.String({ description: 'Path to the JSON file' }),
53 | field: Type.String({ description: 'Path to the field to retrieve (e.g., "user.address.city" or "items[0].name")' }),
54 | maxBytes: Type.Integer({
55 | minimum: 1,
56 | description: 'Maximum bytes to read from the file. Must be a positive integer. Handler default: 10KB.'
57 | })
58 | });
59 | export type JsonGetValueArgs = Static<typeof JsonGetValueArgsSchema>;
60 |
61 | // Schema for transforming JSON data
62 | export const JsonTransformArgsSchema = Type.Object({
63 | path: Type.String({ description: 'Path to the JSON file to transform' }),
64 | operations: Type.Array(
65 | Type.Object({
66 | type: Type.Union([
67 | Type.Literal('map'),
68 | Type.Literal('groupBy'),
69 | Type.Literal('sort'),
70 | Type.Literal('flatten'),
71 | Type.Literal('pick'),
72 | Type.Literal('omit')
73 | ], { description: 'Type of transformation operation' }),
74 | field: Type.Optional(Type.String({ description: 'Field to operate on (if applicable)' })),
75 | order: Type.Optional(Type.Union([Type.Literal('asc'), Type.Literal('desc')], { description: 'Sort order (if applicable)' })),
76 | fields: Type.Optional(Type.Array(Type.String(), { description: 'Fields to pick/omit (if applicable)' }))
77 | }),
78 | { minItems: 1, description: 'Array of transformation operations to apply in sequence' }
79 | ),
80 | maxBytes: Type.Integer({
81 | minimum: 1,
82 | description: 'Maximum bytes to read from the file. Must be a positive integer. Handler default: 10KB.'
83 | })
84 | });
85 | export type JsonTransformArgs = Static<typeof JsonTransformArgsSchema>;
86 |
87 | // Schema for getting JSON structure
88 | export const JsonStructureArgsSchema = Type.Object({
89 | path: Type.String({ description: 'Path to the JSON file to analyze' }),
90 | maxBytes: Type.Integer({
91 | minimum: 1,
92 | description: 'Maximum bytes to read from the file. Must be a positive integer. Handler default: 10KB.'
93 | }),
94 | maxDepth: Type.Integer({
95 | minimum: 1,
96 | description: 'How deep to analyze the structure. Must be a positive integer. Handler default: 2.'
97 | }),
98 | detailedArrayTypes: Type.Optional(Type.Boolean({
99 | default: false,
100 | description: 'Whether to analyze all array elements for mixed types (default: false)'
101 | }))
102 | });
103 | export type JsonStructureArgs = Static<typeof JsonStructureArgsSchema>;
104 |
105 | // Schema for sampling JSON array elements
106 | export const JsonSampleArgsSchema = Type.Object({
107 | path: Type.String({ description: 'Path to the JSON file containing the array' }),
108 | arrayPath: Type.String({ description: 'JSONPath expression to locate the target array (e.g., "$.items" or "$.data.records")' }),
109 | count: Type.Integer({ minimum: 1, description: 'Number of elements to sample' }),
110 | method: Type.Optional(
111 | Type.Union([Type.Literal('first'), Type.Literal('random')], {
112 | default: 'first',
113 | description: 'Sampling method - "first" for first N elements, "random" for random sampling'
114 | })
115 | ),
116 | maxBytes: Type.Integer({
117 | minimum: 1,
118 | description: 'Maximum bytes to read from the file. Must be a positive integer. Handler default: 10KB.'
119 | })
120 | });
121 | export type JsonSampleArgs = Static<typeof JsonSampleArgsSchema>;
122 |
123 | // Schema for JSON Schema validation
124 | export const JsonValidateArgsSchema = Type.Object({
125 | path: Type.String({ description: 'Path to the JSON file to validate' }),
126 | schemaPath: Type.String({ description: 'Path to the JSON Schema file' }),
127 | maxBytes: Type.Integer({
128 | minimum: 1,
129 | description: 'Maximum bytes to read from the file. Must be a positive integer. Handler default: 10KB.'
130 | }),
131 | strict: Type.Optional(Type.Boolean({
132 | default: false,
133 | description: 'Whether to enable strict mode validation (additionalProperties: false)'
134 | })),
135 | allErrors: Type.Optional(Type.Boolean({
136 | default: true,
137 | description: 'Whether to collect all validation errors or stop at first error'
138 | }))
139 | });
140 | export type JsonValidateArgs = Static<typeof JsonValidateArgsSchema>;
141 |
142 | // Schema for searching JSON files by key/value pairs
143 | export const JsonSearchKvArgsSchema = Type.Object({
144 | directoryPath: Type.String({ description: 'Directory to search in' }),
145 | key: Type.String({ description: 'Key to search for' }),
146 | value: Type.Optional(Type.Any({ description: 'Optional value to match against the key' })),
147 | recursive: Type.Optional(Type.Boolean({ default: true, description: 'Whether to search recursively in subdirectories' })),
148 | matchType: Type.Optional(
149 | Type.Union([
150 | Type.Literal('exact'),
151 | Type.Literal('contains'),
152 | Type.Literal('startsWith'),
153 | Type.Literal('endsWith')
154 | ], { default: 'exact', description: 'How to match values - only applies if value is provided' })
155 | ),
156 | maxBytes: Type.Integer({
157 | minimum: 1,
158 | description: 'Maximum bytes to read from each file. Must be a positive integer. Handler default: 10KB.'
159 | }),
160 | maxResults: Type.Integer({
161 | minimum: 1,
162 | description: 'Maximum number of results to return. Must be a positive integer. Handler default: 10.'
163 | }),
164 | maxDepth: Type.Integer({
165 | minimum: 1,
166 | description: 'Maximum directory depth to search. Must be a positive integer. Handler default: 2.'
167 | })
168 | });
169 | export type JsonSearchKvArgs = Static<typeof JsonSearchKvArgsSchema>;
170 |
```
--------------------------------------------------------------------------------
/test/suites/regex_search_content/spec.md:
--------------------------------------------------------------------------------
```markdown
1 | # Test Suite Specification: `test-filesystem::regex_search_content`
2 |
3 | ## 1. Objective
4 |
5 | This document defines the specification for creating a robust, automated test suite for the `regex_search_content` tool provided by the `test-filesystem` MCP server. The goal is to ensure the tool functions correctly across various scenarios, including those identified during prior interactive testing.
6 |
7 | ## 2. Testing Framework
8 |
9 | **Recommendation:** **Bun's built-in test runner**
10 |
11 | * **Rationale:** Bun's test runner is built-in, provides a Jest-compatible API, and runs quickly with native ESM support.
12 |
13 | ## 3. Directory Structure
14 |
15 | The test suite files and related artifacts will be organized within the main project repository (`mcp-filesystem/`) as follows:
16 |
17 | ```
18 | mcp-filesystem/
19 | ├── src/
20 | ├── test/
21 | │ ├── suites/
22 | │ │ ├── regex_search_content/
23 | │ │ │ ├── spec.md # This specification document
24 | │ │ │ ├── basic_search.test.ts # Example test file
25 | │ │ │ ├── depth_limiting.test.ts
26 | │ │ │ ├── error_handling.test.ts
27 | │ │ │ └── ... (other test files)
28 | │ │ └── ... (other tool suites)
29 | │ └── ... (other test helpers, fixtures)
30 | └── ... (package.json, tsconfig.json, etc.)
31 | ```
32 |
33 | **Test Data Directory (Managed by Tests via MCP):**
34 |
35 | All test files and directories manipulated *during test execution* will reside within the `test-filesystem` MCP server's designated root under a dedicated subdirectory:
36 |
37 | * `test/fs_root/regex_search_content/`
38 |
39 | This ensures test isolation and utilizes the MCP server's file operations for setup and teardown.
40 |
41 | ## 4. Test File Conventions
42 |
43 | * **Naming:** Test files should follow the pattern `[feature_or_scenario].test.ts`. Examples:
44 | * `basic_search.test.ts`
45 | * `file_patterns.test.ts`
46 | * `depth_limiting.test.ts`
47 | * `max_results.test.ts`
48 | * `error_handling.test.ts`
49 | * `edge_cases.test.ts`
50 | * **Structure:** Utilize standard structure using Bun's test API (similar to Jest):
51 | * Use `describe()` blocks to group tests related to a specific feature or aspect (e.g., `describe('maxDepth parameter', () => { ... })`).
52 | * Use `it()` or `test()` for individual test cases with descriptive names (e.g., `it('should return matches only from files matching the filePattern')`).
53 | * Employ `beforeAll`, `afterAll`, `beforeEach`, `afterEach` for setup and teardown logic as described in Section 7.
54 |
55 | ## 5. MCP Tool Call Handling
56 |
57 | * **Method:** Tests will directly invoke the `test-filesystem::regex_search_content` tool using the project's established MCP client/communication mechanism.
58 | * **Invocation:** Calls should mirror how the tool would be used in a real scenario, passing parameters like `path`, `regex`, `filePattern`, `maxDepth`, `maxFileSize`, and `maxResults`.
59 | * **Example (Conceptual):**
60 | ```typescript
61 | import { mcpClient } from '../path/to/mcp/client'; // Assuming an MCP client instance
62 |
63 | it('should find a simple pattern in the root test directory', async () => {
64 | const result = await mcpClient.useTool('test-filesystem', 'regex_search_content', {
65 | path: 'regex_search_content/', // Relative to MCP server root
66 | regex: 'unique_pattern_123',
67 | // maxDepth, maxResults etc. with defaults or specific values
68 | });
69 | // Assertions on the result structure and content
70 | expect(result.success).toBe(true);
71 | expect(result.data).toEqual(expect.arrayContaining([
72 | expect.objectContaining({
73 | file: 'regex_search_content/file1.txt',
74 | matches: expect.arrayContaining([
75 | expect.objectContaining({ line: 5, text: expect.stringContaining('unique_pattern_123') })
76 | ])
77 | })
78 | ]));
79 | });
80 | ```
81 | * **Abstraction (Optional Future Improvement):** A helper function or class could be created later to wrap these MCP calls, simplifying test code and potentially enabling easier mocking if needed for unit testing components that *use* the MCP client. For this initial suite, direct calls are sufficient.
82 |
83 | ## 6. Test Case Implementation
84 |
85 | * **Coverage:** Implement test cases derived from the original list (RCS-001 to RCS-083, skipping noted exclusions) and specifically address the findings from interactive testing.
86 | * **Findings Integration:**
87 | * **Path:** Always use `regex_search_content/` as the base `path` parameter in tests targeting the prepared test data area.
88 | * **Recursion/Depth:**
89 | * Test default depth behavior.
90 | * Test `maxDepth: 1`, `maxDepth: 2`, `maxDepth: N` (where N > 2).
91 | * Test interactions between `maxDepth` and `filePattern` (e.g., pattern matches file beyond `maxDepth`).
92 | * Explicitly test `maxDepth: 0` and assert that it throws an appropriate validation error (addressing RCS-040 failure).
93 | * **Regex Flags:**
94 | * Confirm `(?i)` and `(?m)` are *not* supported and likely cause an invalid regex error.
95 | * Test case-insensitive matching using character sets (e.g., `[Ss]earch`, `[Tt]his`).
96 | * **`maxResults`:**
97 | * Create scenarios with more files containing matches than `maxResults`.
98 | * Assert that the number of *files* in the result array equals `maxResults`.
99 | * Assert that the matches *within* the returned files are not truncated by `maxResults`.
100 | * **Glob Negation:** Test `filePattern: '!(*.log)'` or similar and assert it likely fails or doesn't exclude as expected, confirming lack of support.
101 | * **Error Handling:**
102 | * Test with syntactically invalid regex patterns (confirming RCS-070 pass).
103 | * Test with a `path` that does not exist within the MCP server's scope (assert specific error, not "No matches" - addressing RCS-071 failure).
104 | * Test using a file path as the `path` parameter (assert specific error, not "No matches" - addressing RCS-072 failure).
105 | * **Assertions:** Use `expect` assertions to validate:
106 | * Success/failure status of the MCP call.
107 | * Presence or absence of expected files in the results.
108 | * Correct file paths (relative to the MCP server root).
109 | * Correct line numbers for matches.
110 | * Correct matching text snippets.
111 | * Correct error messages/types for invalid inputs or scenarios.
112 |
113 | ## 7. Setup & Teardown
114 |
115 | Test setup and teardown are crucial for creating a controlled environment within the `test-filesystem` MCP server's accessible directory (`test/fs_root/`).
116 |
117 | * **Mechanism:** Use `beforeAll`, `afterAll` (and potentially `beforeEach`/`afterEach` if needed) within the test files.
118 | * **Actions:** These hooks will use the `test-filesystem` MCP server's *own tools* (`create_directory`, `create_file`, `delete_directory`) to manage the test environment under `regex_search_content/`. The directory `test/fs_root/regex_search_content/` is created during setup and removed after the tests complete.
119 |
120 | * **`beforeAll` (Executed once per test file):**
121 | 1. **Ensure Base Directory:** Call `test-filesystem::create_directory` with `path: 'regex_search_content/'`. This is idempotent.
122 | 2. **Create Test Files/Dirs:** Call `test-filesystem::create_file` multiple times to populate `regex_search_content/` with a variety of files and subdirectories needed for the tests in that specific file. Examples:
123 | * `regex_search_content/file1.txt` (contains pattern A)
124 | * `regex_search_content/subdir1/file2.log` (contains pattern A, pattern B)
125 | * `regex_search_content/subdir1/nested/file3.txt` (contains pattern C)
126 | * `regex_search_content/empty.txt`
127 | * Files designed to test `maxResults`, `maxDepth`, `filePattern`, etc.
128 | * **`afterAll` (Executed once per test file):**
129 | 1. **Clean Up:** Call `test-filesystem::delete_directory` with `path: 'regex_search_content/'` and `recursive: true`. This removes all test-specific files and directories created by the corresponding `beforeAll`.
130 | * **`beforeEach`/`afterEach`:** Use sparingly only if a specific test requires a pristine state different from the `beforeAll` setup or needs to clean up uniquely created artifacts.
131 |
132 | ## 8. Execution
133 | * Tests will be executed using Bun with the configured test runner:
134 | * **Bun test:** `bun test test/suites/regex_search_content/` (or specific file)
135 | * Integration with CI/CD pipelines should execute the Bun command.
136 |
```
--------------------------------------------------------------------------------
/.ai/rules/filesystem-mcp-server-usage.md:
--------------------------------------------------------------------------------
```markdown
1 | ---
2 | description: Guide on using the filesystem MCP server, covering capabilities, permissions, security, use-cases, and efficient tool chaining. Consult before performing filesystem operations.
3 | globs:
4 | alwaysApply: false
5 | ---
6 |
7 | # Guide: Using the Filesystem MCP Server
8 |
9 | This document provides guidance on interacting with the `mcp-filesystem` server, which facilitates secure and permission-controlled access to the local filesystem via the Model Context Protocol (MCP).
10 |
11 | ## Overview
12 |
13 | The `mcp-filesystem` server is a Bun application that exposes filesystem operations as MCP tools. It operates within a sandboxed environment, restricting actions to pre-configured directories and enforcing specific permissions.
14 |
15 | ## Core Capabilities (Tools)
16 |
17 | The server offers a range of tools for interacting with files and directories:
18 |
19 | **Reading:**
20 | * `read_file`: Reads the entire content of a single file.
21 | * `read_multiple_files`: Reads content from multiple files simultaneously.
22 |
23 | **Writing & Creation:**
24 | * `create_file`: Creates a new file with specified content. Requires `create` permission. Fails if the file exists.
25 | * `modify_file`: Overwrites an existing file with new content. Requires `edit` permission. Fails if the file doesn't exist.
26 | * `edit_file`: Makes targeted changes to specific parts of a text file while preserving the rest. Requires `edit` permission.
27 |
28 | **Deletion:**
29 | * `delete_file`: Deletes a specific file. Requires `delete` permission.
30 | * `delete_directory`: Deletes a directory (potentially recursively). Requires `delete` permission.
31 |
32 | **Moving & Renaming:**
33 | * `move_file`: Moves or renames a file or directory. Requires `move` permission.
34 | * `rename_file`: Renames a file. Requires `rename` permission.
35 |
36 | **Listing & Exploration:**
37 | * `list_directory`: Lists the contents of a directory.
38 | * `directory_tree`: Provides a tree-like view of a directory structure.
39 |
40 | **Searching:**
41 | * `search_files`: Finds files based on name patterns.
42 | * `find_files_by_extension`: Finds all files with a specific extension.
43 |
44 | **Metadata:**
45 | * `get_file_info`: Retrieves information about a file or directory (size, type, timestamps).
46 |
47 | **System Information:**
48 | * `list_allowed_directories`: Returns the list of directories that the server is allowed to access.
49 | * `get_permissions`: Returns the current permission state of the server.
50 |
51 | **XML Operations:**
52 | * `xml_query`: Queries XML file using XPath expressions.
53 | * `xml_structure`: Analyzes XML file structure.
54 | * `xml_to_json`: Converts XML file to JSON format and optionally saves to a file.
55 | * `xml_to_json_string`: Converts XML file to a JSON string and returns it directly.
56 |
57 | **JSON Operations:**
58 | * `json_query`: Queries JSON data using JSONPath expressions.
59 | * `json_structure`: Gets the structure of a JSON file.
60 | * `json_filter`: Filters JSON array data using flexible conditions.
61 | * `json_get_value`: Gets a specific value from a JSON file.
62 | * `json_transform`: Transforms JSON data using sequence operations.
63 | * `json_sample`: Samples JSON data from a JSON file.
64 | * `json_validate`: Validates JSON data against a JSON schema.
65 | * `json_search_kv`: Searches for key-value pairs in a JSON file.
66 |
67 | ## Permissions Model
68 |
69 | Understanding the active permissions for the server instance is **critical** before attempting operations, especially write operations.
70 |
71 | * **Default:** If no permission flags are specified, the server operates in **read-only** mode.
72 | * `--readonly`: Explicitly sets read-only mode. **This flag overrides all other permission flags.**
73 | * `--full-access`: Grants permission for **all** operations (read, create, edit, move, rename, delete).
74 | * `--allow-create`: Grants permission to create files/directories.
75 | * `--allow-edit`: Grants permission to modify files.
76 | * `--allow-move`: Grants permission to move files/directories.
77 | * `--allow-rename`: Grants permission to rename files/directories.
78 | * `--allow-delete`: Grants permission to delete files/directories.
79 |
80 | **Action:** Always check the server configuration (usually in `.cursor/mcp.json`) to identify the specific server instance being used (e.g., `mcp-test-readonly`, `filesystem`) and determine its active permissions (`--readonly`, `--full-access`, `--allow-*`) and allowed directories. **Do not assume write permissions are available.**
81 |
82 | ## Security Considerations
83 |
84 | * **Sandboxing:** All operations are strictly confined to the directories specified when the server was launched. Path traversal outside these directories is prevented.
85 | * **Symlinks:** By default, the server might follow symbolic links. If the `--no-follow-symlinks` flag is used, the server will refuse to operate on or through symlinks, enhancing security. Check the server configuration.
86 | * **Path Validation:** Input paths are normalized and validated against the allowed directories.
87 | * **Large File Handling:** Always check file size with `get_file_info` before reading file contents to prevent memory issues with large files. Consider using alternative approaches for very large files, such as targeted searches or incremental processing.
88 | * **Large Directory Trees:** Use extreme caution when requesting directory trees, especially for root directories or large project folders. Always use `get_file_info` first to check the directory size and entry count. For large directories (e.g., >1000 entries), prefer targeted `list_directory` operations or use search with specific patterns instead of full tree traversal.
89 |
90 | ## Common Use Cases
91 |
92 | * Reading configuration or data files.
93 | * Modifying source code within a designated project directory.
94 | * Creating new components or modules.
95 | * Searching for specific functions, variables, or text across project files.
96 | * Refactoring code by moving or renaming files/directories.
97 | * Cleaning up temporary files or build artifacts.
98 | * Analyzing the structure of a project directory.
99 |
100 | ## Efficient Tool Chaining & Workflows
101 |
102 | Combine tools strategically for efficient task execution:
103 |
104 | 1. **Exploration & Reading:**
105 | * Start with `list_directory` to see directory contents.
106 | * **Always use `get_file_info` first** to:
107 | - Check if a path exists and its type (file/directory)
108 | - Verify file sizes before reading contents
109 | - Check directory entry counts before requesting trees
110 | * For large files (e.g., >5MB), consider if you actually need the entire file content or if targeted operations would be more efficient.
111 | * For directories:
112 | - Start with non-recursive `list_directory` to assess directory size
113 | - Only use `directory_tree` for manageable directories (<1000 entries)
114 | - For large directories, use targeted `list_directory` operations
115 | - Consider using search operations instead of full tree traversal
116 | * Use `read_file` for single files or `read_multiple_files` for several files identified via listing/searching.
117 |
118 | 2. **Searching:**
119 | * Use `search_files` to locate files by name/pattern.
120 | * Use `find_files_by_extension` to find files of a specific type.
121 | * Follow up with `read_file` or `read_multiple_files` on the search results.
122 |
123 | 3. **Modification (Requires Permissions):**
124 | * **Verify Permissions:** Check permissions with `get_permissions` first.
125 | * Use `get_file_info` to confirm the file exists before attempting modification.
126 | * Use `modify_file` for simple overwrites or `edit_file` for targeted changes.
127 | * Consider reading the file (`read_file`) first if the modification depends on existing content.
128 |
129 | 4. **Creation (Requires Permissions):**
130 | * **Verify Permissions:** Check permissions with `get_permissions`.
131 | * Use `get_file_info` to ensure the file/directory *does not* already exist.
132 | * Use `create_file` or `create_directory`.
133 |
134 | 5. **Refactoring (Requires Permissions):**
135 | * **Verify Permissions:** Check permissions with `get_permissions`.
136 | * Use `list_directory` to identify targets.
137 | * Use `move_file` or `rename_file`. Use `get_file_info` first to confirm the source exists and the target doesn't (if renaming/moving to a specific new name).
138 |
139 | 6. **Deletion (Requires Permissions):**
140 | * **Verify Permissions:** Check permissions with `get_permissions`.
141 | * Use `get_file_info` to confirm the target exists.
142 | * Use `delete_file` or `delete_directory`. Be cautious with recursive directory deletion.
143 |
144 | ## Summary
145 |
146 | Before using the filesystem server:
147 | 1. **Identify the specific server instance** configured (e.g., in `.cursor/mcp.json`).
148 | 2. **Check its configured allowed directories** using `list_allowed_directories`.
149 | 3. **Check its active permissions** using `get_permissions`.
150 | 4. **Check metadata before heavy operations:**
151 | - File sizes before reading contents
152 | - Directory entry counts before tree traversal
153 | 5. **Choose the appropriate tool(s)** for the task.
154 | 6. **Respect the sandbox** and permissions. Do not attempt operations known to be disallowed.
```
--------------------------------------------------------------------------------
/src/handlers/utility-handlers.ts:
--------------------------------------------------------------------------------
```typescript
1 | import fs from 'fs/promises';
2 | import path from 'path';
3 | import { XMLParser } from 'fast-xml-parser';
4 | import { Permissions } from '../config/permissions.js';
5 | import { validatePath } from '../utils/path-utils.js';
6 | import { parseArgs } from '../utils/schema-utils.js';
7 | import { searchFiles, findFilesByExtension, regexSearchContent } from '../utils/file-utils.js';
8 | import {
9 | GetPermissionsArgsSchema,
10 | SearchFilesArgsSchema,
11 | FindFilesByExtensionArgsSchema,
12 | XmlToJsonArgsSchema,
13 | XmlToJsonStringArgsSchema,
14 | RegexSearchContentArgsSchema, // Added import
15 | type GetPermissionsArgs,
16 | type SearchFilesArgs,
17 | type FindFilesByExtensionArgs,
18 | type XmlToJsonArgs,
19 | type XmlToJsonStringArgs,
20 | type RegexSearchContentArgs
21 | } from '../schemas/utility-operations.js';
22 |
23 | export function handleGetPermissions(
24 | args: unknown,
25 | permissions: Permissions,
26 | readonlyFlag: boolean,
27 | noFollowSymlinks: boolean,
28 | allowedDirectories: string[]
29 | ) {
30 | parseArgs(GetPermissionsArgsSchema, args, 'get_permissions');
31 |
32 | return {
33 | content: [{
34 | type: "text",
35 | text: `Current permission state:
36 | readOnly: ${readonlyFlag}
37 | followSymlinks: ${!noFollowSymlinks}
38 | fullAccess: ${permissions.fullAccess}
39 |
40 | Operations allowed:
41 | - create: ${permissions.create}
42 | - edit: ${permissions.edit}
43 | - move: ${permissions.move}
44 | - rename: ${permissions.rename}
45 | - delete: ${permissions.delete}
46 |
47 | Server was started with ${allowedDirectories.length} allowed ${allowedDirectories.length === 1 ? 'directory' : 'directories'}.
48 | Use 'list_allowed_directories' to see them.`
49 | }],
50 | };
51 | }
52 |
53 | export async function handleSearchFiles(
54 | args: unknown,
55 | allowedDirectories: string[],
56 | symlinksMap: Map<string, string>,
57 | noFollowSymlinks: boolean
58 | ) {
59 | const parsed = parseArgs(SearchFilesArgsSchema, args, 'search_files');
60 | const { path: startPath, pattern, excludePatterns, maxDepth, maxResults } = parsed;
61 | const validPath = await validatePath(startPath, allowedDirectories, symlinksMap, noFollowSymlinks);
62 | const results = await searchFiles(validPath, pattern, excludePatterns, maxDepth, maxResults);
63 | return {
64 | content: [{ type: "text", text: results.length > 0 ? results.join("\n") : "No matches found" }],
65 | };
66 | }
67 |
68 | export async function handleFindFilesByExtension(
69 | args: unknown,
70 | allowedDirectories: string[],
71 | symlinksMap: Map<string, string>,
72 | noFollowSymlinks: boolean
73 | ) {
74 | const parsed = parseArgs(FindFilesByExtensionArgsSchema, args, 'find_files_by_extension');
75 | const { path: startPath, extension, excludePatterns, maxDepth, maxResults } = parsed;
76 | const validPath = await validatePath(startPath, allowedDirectories, symlinksMap, noFollowSymlinks);
77 | const results = await findFilesByExtension(
78 | validPath,
79 | extension,
80 | excludePatterns,
81 | maxDepth,
82 | maxResults
83 | );
84 | return {
85 | content: [{ type: "text", text: results.length > 0 ? results.join("\n") : "No matching files found" }],
86 | };
87 | }
88 |
89 | export async function handleXmlToJson(
90 | args: unknown,
91 | permissions: Permissions,
92 | allowedDirectories: string[],
93 | symlinksMap: Map<string, string>,
94 | noFollowSymlinks: boolean
95 | ) {
96 | const parsed = parseArgs(XmlToJsonArgsSchema, args, 'xml_to_json');
97 |
98 | const { xmlPath, jsonPath, maxBytes, options } = parsed;
99 | const validXmlPath = await validatePath(xmlPath, allowedDirectories, symlinksMap, noFollowSymlinks); // Source must exist
100 |
101 | const validJsonPath = await validatePath(
102 | jsonPath,
103 | allowedDirectories,
104 | symlinksMap,
105 | noFollowSymlinks,
106 | { checkParentExists: false } // Add option here for output JSON path
107 | );
108 | try {
109 | // Read the XML file (no input size gating; limit only output)
110 | const xmlContent = await fs.readFile(validXmlPath, "utf-8");
111 |
112 | // Parse XML to JSON
113 | const parserOptions = {
114 | ignoreAttributes: options?.ignoreAttributes ?? false,
115 | preserveOrder: options?.preserveOrder ?? true,
116 | // Add other options as needed
117 | };
118 |
119 | const parser = new XMLParser(parserOptions);
120 | const jsonObj = parser.parse(xmlContent);
121 |
122 | // Format JSON if requested
123 | const format = options?.format ?? true;
124 | const indentSize = options?.indentSize ?? 2;
125 | let jsonContent = format
126 | ? JSON.stringify(jsonObj, null, indentSize)
127 | : JSON.stringify(jsonObj);
128 |
129 | // Enforce response-size cap for file write by truncating content if needed
130 | const responseLimit = (parsed as any).maxResponseBytes ?? maxBytes;
131 | if (typeof responseLimit === 'number' && responseLimit > 0) {
132 | const size = Buffer.byteLength(jsonContent, 'utf8');
133 | if (size > responseLimit) {
134 | // Produce a summarized payload to fit limit
135 | const summary = {
136 | _meta: {
137 | truncated: true,
138 | originalSize: size,
139 | note: `JSON too large to write fully; summarizing to fit ${responseLimit} bytes.`
140 | },
141 | sample: Array.isArray(jsonObj) ? jsonObj.slice(0, 3) : (typeof jsonObj === 'object' ? Object.fromEntries(Object.entries(jsonObj).slice(0, 50)) : jsonObj)
142 | };
143 | jsonContent = JSON.stringify(summary, null, indentSize);
144 | }
145 | }
146 |
147 | // Check if JSON file exists to determine if this is a create operation
148 | let fileExists = false;
149 | try {
150 | await fs.access(validJsonPath);
151 | fileExists = true;
152 | } catch (error) {
153 | // File doesn't exist - this is a create operation
154 | }
155 |
156 | // Enforce permission checks for writing
157 | if (fileExists && !permissions.edit && !permissions.fullAccess) {
158 | throw new Error('Cannot write to existing JSON file: edit permission not granted (requires --allow-edit)');
159 | }
160 |
161 | if (!fileExists && !permissions.create && !permissions.fullAccess) {
162 | throw new Error('Cannot create new JSON file: create permission not granted (requires --allow-create)');
163 | }
164 |
165 | // Write JSON to file
166 | // Ensure parent dir exists before writing the JSON file
167 | const jsonParentDir = path.dirname(validJsonPath);
168 | await fs.mkdir(jsonParentDir, { recursive: true }); // Ensure parent exists
169 | await fs.writeFile(validJsonPath, jsonContent, "utf-8");
170 |
171 | return {
172 | content: [{
173 | type: "text",
174 | text: `Successfully converted XML from ${xmlPath} to JSON at ${jsonPath}`
175 | }],
176 | };
177 | } catch (error) {
178 | const errorMessage = error instanceof Error ? error.message : String(error);
179 | throw new Error(`Failed to convert XML to JSON: ${errorMessage}`);
180 | }
181 | }
182 |
183 | export async function handleXmlToJsonString(
184 | args: unknown,
185 | allowedDirectories: string[],
186 | symlinksMap: Map<string, string>,
187 | noFollowSymlinks: boolean
188 | ) {
189 | const parsed = parseArgs(XmlToJsonStringArgsSchema, args, 'xml_to_json_string');
190 |
191 | const { xmlPath, maxBytes, options } = parsed;
192 | const validXmlPath = await validatePath(xmlPath, allowedDirectories, symlinksMap, noFollowSymlinks);
193 |
194 | try {
195 | // Read the XML file (no input size gating; limit only output)
196 | const xmlContent = await fs.readFile(validXmlPath, "utf-8");
197 |
198 | // Parse XML to JSON
199 | const parserOptions = {
200 | ignoreAttributes: options?.ignoreAttributes ?? false,
201 | preserveOrder: options?.preserveOrder ?? true,
202 | // Add other options as needed
203 | };
204 |
205 | const parser = new XMLParser(parserOptions);
206 | const jsonObj = parser.parse(xmlContent);
207 |
208 | // Return the JSON as a string
209 | let jsonContent = JSON.stringify(jsonObj, null, 2);
210 |
211 | // Apply response-size cap
212 | const responseLimit = (parsed as any).maxResponseBytes ?? maxBytes ?? 200 * 1024; // default 200KB
213 | if (typeof responseLimit === 'number' && responseLimit > 0) {
214 | const size = Buffer.byteLength(jsonContent, 'utf8');
215 | if (size > responseLimit) {
216 | const summary = {
217 | _meta: {
218 | truncated: true,
219 | originalSize: size,
220 | note: `JSON too large; summarizing to fit ${responseLimit} bytes.`
221 | },
222 | sample: Array.isArray(jsonObj) ? jsonObj.slice(0, 5) : (typeof jsonObj === 'object' ? Object.fromEntries(Object.entries(jsonObj).slice(0, 100)) : jsonObj)
223 | };
224 | jsonContent = JSON.stringify(summary, null, 2);
225 | }
226 | }
227 |
228 | return {
229 | content: [{ type: "text", text: jsonContent }],
230 | };
231 | } catch (error) {
232 | const errorMessage = error instanceof Error ? error.message : String(error);
233 | throw new Error(`Failed to convert XML to JSON: ${errorMessage}`);
234 | }
235 | }
236 |
237 | export function handleListAllowedDirectories(
238 | args: unknown,
239 | allowedDirectories: string[]
240 | ): { content: [{ type: string; text: string }] } {
241 | return {
242 | content: [{
243 | type: "text",
244 | text: `Allowed directories:\n${allowedDirectories.join('\n')}`
245 | }],
246 | };
247 | }
248 |
249 | export async function handleRegexSearchContent(
250 | args: unknown,
251 | allowedDirectories: string[],
252 | symlinksMap: Map<string, string>,
253 | noFollowSymlinks: boolean
254 | ) {
255 | const parsed = parseArgs(RegexSearchContentArgsSchema, args, 'regex_search_content');
256 | const {
257 | path: startPath,
258 | regex,
259 | filePattern,
260 | maxDepth,
261 | maxFileSize,
262 | maxResults
263 | } = parsed;
264 |
265 | const validPath = await validatePath(startPath, allowedDirectories, symlinksMap, noFollowSymlinks);
266 |
267 | try {
268 | const results = await regexSearchContent(
269 | validPath,
270 | regex,
271 | filePattern,
272 | maxDepth,
273 | maxFileSize,
274 | maxResults
275 | );
276 |
277 | if (results.length === 0) {
278 | return { content: [{ type: "text", text: "No matches found for the given regex pattern." }] };
279 | }
280 |
281 | // Format the output
282 | const formattedResults = results.map(fileResult => {
283 | const matchesText = fileResult.matches
284 | .map(match => ` Line ${match.lineNumber}: ${match.lineContent.trim()}`)
285 | .join('\n');
286 | return `File: ${fileResult.path}\n${matchesText}`;
287 | }).join('\n\n');
288 |
289 | return {
290 | content: [{ type: "text", text: formattedResults }],
291 | };
292 | } catch (error: any) {
293 | // Catch errors from regexSearchContent (e.g., invalid regex)
294 | throw new Error(`Error during regex content search: ${error.message}`);
295 | }
296 | }
```