This is page 1 of 3. Use http://codebase.md/shtse8/filesystem-mcp?lines=false&page={x} to view the full context.
# Directory Structure
```
├── __tests__
│ ├── handlers
│ │ ├── apply-diff.test.ts
│ │ ├── chmod-items.test.ts
│ │ ├── copy-items.test.ts
│ │ ├── create-directories.test.ts
│ │ ├── delete-items.test.ts
│ │ ├── list-files.test.ts
│ │ ├── move-items.test.ts
│ │ ├── read-content.test.ts
│ │ ├── replace-content.errors.test.ts
│ │ ├── replace-content.success.test.ts
│ │ ├── search-files.test.ts
│ │ ├── stat-items.test.ts
│ │ └── write-content.test.ts
│ ├── index.test.ts
│ ├── setup.ts
│ ├── test-utils.ts
│ └── utils
│ ├── apply-diff-utils.test.ts
│ ├── error-utils.test.ts
│ ├── path-utils.test.ts
│ ├── stats-utils.test.ts
│ └── string-utils.test.ts
├── .dockerignore
├── .github
│ ├── dependabot.yml
│ ├── FUNDING.yml
│ └── workflows
│ └── publish.yml
├── .gitignore
├── .husky
│ ├── commit-msg
│ └── pre-commit
├── .prettierrc.cjs
├── bun.lock
├── CHANGELOG.md
├── commit_msg.txt
├── commitlint.config.cjs
├── Dockerfile
├── docs
│ ├── .vitepress
│ │ └── config.mts
│ ├── guide
│ │ └── introduction.md
│ └── index.md
├── eslint.config.ts
├── LICENSE
├── memory-bank
│ ├── .clinerules
│ ├── activeContext.md
│ ├── productContext.md
│ ├── progress.md
│ ├── projectbrief.md
│ ├── systemPatterns.md
│ └── techContext.md
├── package.json
├── pnpm-lock.yaml
├── README.md
├── src
│ ├── handlers
│ │ ├── apply-diff.ts
│ │ ├── chmod-items.ts
│ │ ├── chown-items.ts
│ │ ├── common.ts
│ │ ├── copy-items.ts
│ │ ├── create-directories.ts
│ │ ├── delete-items.ts
│ │ ├── index.ts
│ │ ├── list-files.ts
│ │ ├── move-items.ts
│ │ ├── read-content.ts
│ │ ├── replace-content.ts
│ │ ├── search-files.ts
│ │ ├── stat-items.ts
│ │ └── write-content.ts
│ ├── index.ts
│ ├── schemas
│ │ └── apply-diff-schema.ts
│ ├── types
│ │ └── mcp-types.ts
│ └── utils
│ ├── apply-diff-utils.ts
│ ├── error-utils.ts
│ ├── path-utils.ts
│ ├── stats-utils.ts
│ └── string-utils.ts
├── tsconfig.json
└── vitest.config.ts
```
# Files
--------------------------------------------------------------------------------
/.dockerignore:
--------------------------------------------------------------------------------
```
# Git files
.git
.gitignore
# Node modules
node_modules
# Build artifacts (we only need the build output in the final stage)
# Docker files
Dockerfile
.dockerignore
# Documentation / Other
README.md
memory-bank
.vscode
```
--------------------------------------------------------------------------------
/.prettierrc.cjs:
--------------------------------------------------------------------------------
```
// .prettierrc.cjs
module.exports = {
semi: true,
trailingComma: 'all',
singleQuote: true,
printWidth: 100, // Target line width
tabWidth: 2,
endOfLine: 'lf',
arrowParens: 'always',
jsxSingleQuote: false, // Use double quotes in JSX
bracketSpacing: true, // Add spaces inside braces: { foo: bar }
};
```
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
```
node_modules/
build/
*.log
.env*
# Test Coverage
coverage/
# Build output
dist/
# IDE files
.vscode/
.idea/
# OS generated files
.DS_Store
Thumbs.db
# NPM debug logs
npm-debug.log*
yarn-debug.log*
yarn-error.log*
# VitePress cache
.vitepress/cache
.vitepress/dist
# Cache files
.eslintcache
# Test reports
test-report.junit.xml
```
--------------------------------------------------------------------------------
/memory-bank/.clinerules:
--------------------------------------------------------------------------------
```
<!-- Version: 4.6 | Last Updated: 2025-04-06 | Updated By: Roo -->
# Cline Rules for filesystem-mcp Project
## Tool Usage Preferences
- **Prioritize Edit Tools:** When modifying existing files, prefer using `apply_diff`, `insert_content`, or `search_and_replace` over `write_to_file`. `write_to_file` should primarily be used for creating new files or when a complete rewrite is necessary, as it can be less efficient for large files or minor edits.
## Technical Notes & Workarounds
- **Vitest ESM Mocking:** Mocking Node.js built-in ES Modules (like `fs/promises`) or external libraries (`glob`) using `vi.mock` or `vi.doMock` in Vitest can be problematic due to hoisting, scope, and type inference issues, especially when trying to modify mock behavior within tests. **Prefer direct dependency injection:**
- Export the core logic function from the handler file, accepting dependencies as an argument.
- In tests, import the core logic function.
- In `beforeEach`, create mock functions (`vi.fn()`) for dependencies.
- Use `vi.importActual` to get the real implementations and set them as the default for the mock functions.
- Create a `dependencies` object, passing in the mock functions.
- Call the core logic function with the `dependencies` object.
- Within specific tests requiring mocked behavior, modify the implementation of the mock function (e.g., `mockDependency.mockImplementation(...)`).
- *Obsolete `editFile` Strategy:* Previous attempts used `jest.unstable_mockModule` (likely a typo, meant Vitest equivalent) which was also unreliable.
- *Obsolete `listFiles` Strategy:* Initial integration tests avoided mocking but couldn't test error paths effectively. Dependency injection proved superior.
- **Execution Requirement:** Tests still require `NODE_OPTIONS=--experimental-vm-modules` (handled by `cross-env` in `package.json`).
- **`write_content` Tool Limitation:** This tool might incorrectly escape certain characters within the `<content>` block. Prefer `apply_diff` or `replace_content` for modifications.
```
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
```markdown
# Filesystem MCP Server (@sylphlab/filesystem-mcp)
[](https://badge.fury.io/js/%40sylphlab%2Ffilesystem-mcp)
[](https://hub.docker.com/r/sylphlab/filesystem-mcp)
<!-- Add other badges like License, Build Status if applicable -->
<a href="https://glama.ai/mcp/servers/@sylphlab/filesystem-mcp">
<img width="380" height="200" src="https://glama.ai/mcp/servers/@sylphlab/filesystem-mcp/badge" />
</a>
**Empower your AI agents (like Cline/Claude) with secure, efficient, and token-saving access to your project files.** This Node.js server implements the [Model Context Protocol (MCP)](https://docs.modelcontextprotocol.com/) to provide a robust set of filesystem tools, operating safely within a defined project root directory.
## Installation
There are several ways to use the Filesystem MCP Server:
**1. Recommended: `npx` (or `bunx`) via MCP Host Configuration**
The simplest way is via `npx` or `bunx`, configured directly in your MCP host environment (e.g., Roo/Cline's `mcp_settings.json`). This ensures you always use the latest version from npm without needing local installation or Docker.
_Example (`npx`):_
```json
{
"mcpServers": {
"filesystem-mcp": {
"command": "npx",
"args": ["@sylphlab/filesystem-mcp"],
"name": "Filesystem (npx)"
}
}
}
```
_Example (`bunx`):_
```json
{
"mcpServers": {
"filesystem-mcp": {
"command": "bunx",
"args": ["@sylphlab/filesystem-mcp"],
"name": "Filesystem (bunx)"
}
}
}
```
**Important:** The server uses its own Current Working Directory (`cwd`) as the project root. Ensure your MCP Host (e.g., Cline/VSCode) is configured to launch the command with the `cwd` set to your active project's root directory.
**2. Docker**
Use the official Docker image for containerized environments.
_Example MCP Host Configuration:_
```json
{
"mcpServers": {
"filesystem-mcp": {
"command": "docker",
"args": [
"run",
"-i",
"--rm",
"-v",
"/path/to/your/project:/app", // Mount your project to /app
"sylphlab/filesystem-mcp:latest"
],
"name": "Filesystem (Docker)"
}
}
}
```
**Remember to replace `/path/to/your/project` with the correct absolute path.**
**3. Local Build (For Development)**
1. Clone: `git clone https://github.com/sylphlab/filesystem-mcp.git`
2. Install: `cd filesystem-mcp && pnpm install` (Using pnpm now)
3. Build: `pnpm run build`
4. Configure MCP Host:
```json
{
"mcpServers": {
"filesystem-mcp": {
"command": "node",
"args": ["/path/to/cloned/repo/filesystem-mcp/dist/index.js"], // Updated build dir
"name": "Filesystem (Local Build)"
}
}
}
```
**Note:** Launch the `node` command from the directory you intend as the project root.
## Quick Start
Once the server is configured in your MCP host (see Installation), your AI agent can immediately start using the filesystem tools.
_Example Agent Interaction (Conceptual):_
```
Agent: <use_mcp_tool>
<server_name>filesystem-mcp</server_name>
<tool_name>read_content</tool_name>
<arguments>{"paths": ["src/index.ts"]}</arguments>
</use_mcp_tool>
Server Response: (Content of src/index.ts)
```
## Why Choose This Project?
- **🛡️ Secure & Convenient Project Root Focus:** Operations confined to the project root (`cwd` at launch).
- **⚡ Optimized & Consolidated Tools:** Batch operations reduce AI-server round trips, saving tokens and latency. Reliable results for each item in a batch.
- **🚀 Easy Integration:** Quick setup via `npx`/`bunx`.
- **🐳 Containerized Option:** Available as a Docker image.
- **🔧 Comprehensive Functionality:** Covers a wide range of filesystem tasks.
- **✅ Robust Validation:** Uses Zod schemas for argument validation.
## Performance Advantages
_(Placeholder: Add benchmark results and comparisons here, demonstrating advantages over alternative methods like individual shell commands.)_
- **Batch Operations:** Significantly reduces overhead compared to single operations.
- **Direct API Usage:** More efficient than spawning shell processes for each command.
- _(Add specific benchmark data when available)_
## Features
This server equips your AI agent with a powerful and efficient filesystem toolkit:
- 📁 **Explore & Inspect (`list_files`, `stat_items`):** List files/directories (recursive, stats), get detailed status for multiple items.
- 📄 **Read & Write Content (`read_content`, `write_content`):** Read/write/append multiple files, creates parent directories.
- ✏️ **Precision Editing & Searching (`edit_file`, `search_files`, `replace_content`):** Surgical edits (insert, replace, delete) across multiple files with indentation preservation and diff output; regex search with context; multi-file search/replace.
- 🏗️ **Manage Directories (`create_directories`):** Create multiple directories including intermediate parents.
- 🗑️ **Delete Safely (`delete_items`):** Remove multiple files/directories recursively.
- ↔️ **Move & Copy (`move_items`, `copy_items`):** Move/rename/copy multiple files/directories.
- 🔒 **Control Permissions (`chmod_items`, `chown_items`):** Change POSIX permissions and ownership for multiple items.
**Key Benefit:** All tools accepting multiple paths/operations process each item individually and return a detailed status report.
## Design Philosophy
_(Placeholder: Explain the core design principles.)_
- **Security First:** Prioritize preventing access outside the project root.
- **Efficiency:** Minimize communication overhead and token usage for AI interactions.
- **Robustness:** Provide detailed results and error reporting for batch operations.
- **Simplicity:** Offer a clear and consistent API via MCP.
- **Standard Compliance:** Adhere strictly to the Model Context Protocol.
## Comparison with Other Solutions
_(Placeholder: Objectively compare with alternatives.)_
| Feature/Aspect | Filesystem MCP Server | Individual Shell Commands (via Agent) | Other Custom Scripts |
| :---------------------- | :-------------------- | :------------------------------------ | :------------------- |
| **Security** | High (Root Confined) | Low (Agent needs shell access) | Variable |
| **Efficiency (Tokens)** | High (Batching) | Low (One command per op) | Variable |
| **Latency** | Low (Direct API) | High (Shell spawn overhead) | Variable |
| **Batch Operations** | Yes (Most tools) | No | Maybe |
| **Error Reporting** | Detailed (Per item) | Basic (stdout/stderr parsing) | Variable |
| **Setup** | Easy (npx/Docker) | Requires secure shell setup | Custom |
## Future Plans
_(Placeholder: List upcoming features or improvements.)_
- Explore file watching capabilities.
- Investigate streaming support for very large files.
- Enhance performance for specific operations.
- Add more advanced filtering options for `list_files`.
## Documentation
_(Placeholder: Add link to the full documentation website once available.)_
Full documentation, including detailed API references and examples, will be available at: [Link to Docs Site]
## Contributing
Contributions are welcome! Please open an issue or submit a pull request on the [GitHub repository](https://github.com/sylphlab/filesystem-mcp).
## License
This project is released under the [MIT License](LICENSE).
---
## Development
1. Clone: `git clone https://github.com/sylphlab/filesystem-mcp.git`
2. Install: `cd filesystem-mcp && pnpm install`
3. Build: `pnpm run build` (compiles TypeScript to `dist/`)
4. Watch: `pnpm run dev` (optional, recompiles on save)
## Publishing (via GitHub Actions)
This repository uses GitHub Actions (`.github/workflows/publish.yml`) to automatically publish the package to [npm](https://www.npmjs.com/package/@sylphlab/filesystem-mcp) and build/push a Docker image to [Docker Hub](https://hub.docker.com/r/sylphlab/filesystem-mcp) on pushes of version tags (`v*.*.*`) to the `main` branch. Requires `NPM_TOKEN`, `DOCKERHUB_USERNAME`, and `DOCKERHUB_TOKEN` secrets configured in the GitHub repository settings.
```
--------------------------------------------------------------------------------
/commitlint.config.cjs:
--------------------------------------------------------------------------------
```
module.exports = {
extends: ['@commitlint/config-conventional'],
// Add any project-specific rules here if needed in the future
};
```
--------------------------------------------------------------------------------
/src/handlers/common.ts:
--------------------------------------------------------------------------------
```typescript
export type FileSystemDependencies = {
path: {
resolve: (...paths: string[]) => string;
};
writeFile: (path: string, content: string, encoding: string) => Promise<void>;
readFile: (path: string, encoding: string) => Promise<string>;
projectRoot: string;
};
```
--------------------------------------------------------------------------------
/src/utils/error-utils.ts:
--------------------------------------------------------------------------------
```typescript
export function formatFileProcessingError(
error: unknown,
resolvedPath: string,
filePath: string,
// Removed projectRoot parameter entirely
): string {
if (typeof error !== 'object' || error === null) {
return `Failed to process file ${filePath}: ${String(error)}`;
}
const err = error as { code?: string; message?: string };
if (err.code === 'ENOENT') {
return `File not found at resolved path: ${resolvedPath}`;
}
if (err.code === 'EACCES') {
return `Permission denied for file: ${filePath}`;
}
return `Failed to process file ${filePath}: ${err.message ?? 'Unknown error'}`;
}
```
--------------------------------------------------------------------------------
/.github/FUNDING.yml:
--------------------------------------------------------------------------------
```yaml
# These are supported funding model platforms
github: shtse8
patreon: # Replace with a single Patreon username
open_collective: # Replace with a single Open Collective username
ko_fi: # Replace with a single Ko-fi username
tidelift: # Replace with a single Tidelift platform-name/package-name e.g., npm/babel
community_bridge: # Replace with a single Community Bridge project-name e.g., cloud-foundry
liberapay: # Replace with a single Liberapay username
issuehunt: # Replace with a single IssueHunt username
otechie: # Replace with a single Otechie username
lfx_crowdfunding: # Replace with a single LFX Crowdfunding project-name e.g., cloud-foundry
custom: # Replace with up to 4 custom sponsorship URLs e.g., ['link1', 'link2']
buy_me_a_coffee: shtse8
```
--------------------------------------------------------------------------------
/src/types/mcp-types.ts:
--------------------------------------------------------------------------------
```typescript
// Core MCP types
export enum ErrorCode {
InvalidParams = -32_602,
InternalError = -32_603,
InvalidRequest = -32_600,
}
export class McpError extends Error {
constructor(
public code: ErrorCode,
message: string,
public data?: unknown,
) {
super(message);
}
}
// Request/Response types
export interface McpRequest<TInput = unknown> {
jsonrpc?: string;
method?: string;
params: TInput;
}
export interface McpResponse<TOutput = unknown> {
success?: boolean;
output?: TOutput;
error?: McpError | Error;
data?: Record<string, unknown>;
content?: Array<{
type: 'text';
text: string;
}>;
}
// Tool response type
export interface McpToolResponse extends McpResponse {
content: Array<{
type: 'text';
text: string;
}>;
}
```
--------------------------------------------------------------------------------
/Dockerfile:
--------------------------------------------------------------------------------
```dockerfile
# Stage 1: Install production dependencies
FROM node:20-alpine AS deps
WORKDIR /app
# Copy package files
COPY package.json package-lock.json ./
# Install ONLY production dependencies
RUN npm ci --omit=dev
# Stage 2: Create the final lightweight image
FROM node:20-alpine
WORKDIR /app
# Create a non-root user and group for security
RUN addgroup -S appgroup && adduser -S appuser -G appgroup
# Copy production dependencies and package.json from the deps stage
COPY --from=deps --chown=appuser:appgroup /app/node_modules ./node_modules
COPY --from=deps --chown=appuser:appgroup /app/package.json ./
# Copy the pre-built application code (from the CI artifact)
# This assumes the 'build' directory is present in the build context
COPY --chown=appuser:appgroup build ./build
# Switch to the non-root user
USER appuser
# Command to run the server using the built output
CMD ["node", "build/index.js"]
```
--------------------------------------------------------------------------------
/docs/guide/introduction.md:
--------------------------------------------------------------------------------
```markdown
# Introduction
Welcome to the documentation for the **Filesystem MCP Server** (`@sylphlab/filesystem-mcp`).
This server acts as a secure and efficient bridge between AI agents (like Cline/Claude using the Model Context Protocol) and your local project files.
## Key Goals
- **Security**: Confine AI filesystem access strictly within your project directory.
- **Efficiency**: Reduce AI-server communication overhead and token usage through batch operations.
- **Control**: Operate predictably using relative paths within the project context.
- **Standardization**: Adhere to the Model Context Protocol for interoperability.
## Getting Started
The easiest way to use the server is via `npx` or `bunx`, configured within your MCP host environment. Please refer to the [README](https://github.com/sylphlab/filesystem-mcp#readme) for detailed setup instructions.
This documentation site will provide further details on available tools, configuration options, and development guidelines.
```
--------------------------------------------------------------------------------
/src/utils/path-utils.ts:
--------------------------------------------------------------------------------
```typescript
import path from 'node:path';
import { McpError as OriginalMcpError, ErrorCode } from '@modelcontextprotocol/sdk/types.js';
const McpError = OriginalMcpError;
const PROJECT_ROOT = path.resolve(import.meta.dirname, '../../');
export function resolvePath(relativePath: string, rootPath?: string): string {
// Validate input types
if (typeof relativePath !== 'string') {
throw new McpError(ErrorCode.InvalidParams, 'Path must be a string');
}
if (rootPath && typeof rootPath !== 'string') {
throw new McpError(ErrorCode.InvalidParams, 'Root path must be a string');
}
// Validate path format
if (path.isAbsolute(relativePath)) {
throw new McpError(ErrorCode.InvalidParams, `Absolute paths are not allowed: ${relativePath}`);
}
const root = rootPath || PROJECT_ROOT;
const absolutePath = path.resolve(root, relativePath);
// Validate path traversal
if (!absolutePath.startsWith(root)) {
throw new McpError(ErrorCode.InvalidRequest, `Path traversal detected: ${relativePath}`);
}
return absolutePath;
}
export { PROJECT_ROOT };
```
--------------------------------------------------------------------------------
/commit_msg.txt:
--------------------------------------------------------------------------------
```
feat: Add apply-diff schema and utility functions for diff operations
- Introduced `apply-diff-schema.ts` to define schemas for diff operations, including validation for line numbers and unique file paths.
- Created `mcp-types.ts` for core MCP error handling and request/response types.
- Implemented `apply-diff-utils.ts` with functions to validate and apply diff blocks to file content, including context retrieval and content verification.
- Removed deprecated `applyDiffUtils.ts` to streamline utility functions.
- Added `edit-file-specific-utils.ts` for regex matching and indentation handling.
- Created `error-utils.ts` for standardized error formatting during file processing.
- Introduced `path-utils.ts` for path resolution with security checks against path traversal.
- Removed old `pathUtils.ts` to consolidate path handling logic.
- Added `stats-utils.ts` for formatting file statistics for MCP responses.
- Created `string-utils.ts` for string manipulation utilities, including regex escaping and line matching.
- Updated `tsconfig.json` to include type definitions and adjust exclusions.
- Modified `vitest.config.ts` to add clean option and remove non-existent setup files.
```
--------------------------------------------------------------------------------
/tsconfig.json:
--------------------------------------------------------------------------------
```json
{
"compilerOptions": {
/* Base Options */
"esModuleInterop": true,
"skipLibCheck": true,
"target": "ES2022",
"allowJs": false,
"resolveJsonModule": true,
"moduleDetection": "force",
"isolatedModules": true,
/* Strictness */
"strict": true,
"noImplicitAny": true,
"strictNullChecks": true,
"strictFunctionTypes": true,
"strictBindCallApply": true,
"strictPropertyInitialization": true,
"noImplicitThis": true,
"useUnknownInCatchVariables": true,
"alwaysStrict": true,
/* Linter Checks */
"noUnusedLocals": true,
"noUnusedParameters": true,
"exactOptionalPropertyTypes": true,
"noImplicitReturns": true,
"noFallthroughCasesInSwitch": true,
"noUncheckedIndexedAccess": true,
"noImplicitOverride": true,
"noPropertyAccessFromIndexSignature": true,
/* Module Resolution */
"module": "NodeNext",
"moduleResolution": "NodeNext",
/* Emit */
"outDir": "dist",
"declaration": true,
"sourceMap": true,
"removeComments": false,
/* Other */
"forceConsistentCasingInFileNames": true
},
"include": ["src/**/*.ts", "src/types/**/*.d.ts"],
"exclude": [
"node_modules",
"build",
"dist",
"**/*.test.ts",
"**/*.spec.ts",
"**/*.bench.ts"
]
}
```
--------------------------------------------------------------------------------
/.github/dependabot.yml:
--------------------------------------------------------------------------------
```yaml
# .github/dependabot.yml
version: 2
updates:
# Dependency updates for npm
- package-ecosystem: 'npm'
directory: '/' # Location of package manifests
schedule:
interval: 'weekly' # Check for updates weekly
open-pull-requests-limit: 10 # Limit open PRs
versioning-strategy: 'auto' # Use default strategy
# Allow only non-major updates for production dependencies initially
allow:
- dependency-type: 'production'
update-types:
['version-update:semver-minor', 'version-update:semver-patch']
- dependency-type: 'development'
update-types:
[
'version-update:semver-major',
'version-update:semver-minor',
'version-update:semver-patch',
]
commit-message:
prefix: 'chore' # Use 'chore' for dependency updates
prefix-development: 'chore(dev)' # Use 'chore(dev)' for devDependencies
include: 'scope'
rebase-strategy: 'auto' # Automatically rebase PRs
# GitHub Actions updates
- package-ecosystem: 'github-actions'
directory: '/' # Location of workflow files
schedule:
interval: 'weekly' # Check for updates weekly
open-pull-requests-limit: 5 # Limit open PRs for actions
commit-message:
prefix: 'chore(ci)' # Use 'chore(ci)' for action updates
include: 'scope'
rebase-strategy: 'auto'
```
--------------------------------------------------------------------------------
/src/utils/stats-utils.ts:
--------------------------------------------------------------------------------
```typescript
import type { Stats } from 'node:fs';
// Define and export the return type interface
export interface FormattedStats {
// Add export keyword
path: string;
isFile: boolean;
isDirectory: boolean;
isSymbolicLink: boolean;
size: number;
atime: string;
mtime: string;
ctime: string;
birthtime: string;
mode: string;
uid: number;
gid: number;
}
/**
* Formats an fs.Stats object into a standardized structure for MCP responses.
* @param relativePath The original relative path requested.
* @param absolutePath The resolved absolute path of the item.
* @param stats The fs.Stats object.
* @returns A formatted stats object.
*/
export const formatStats = (
relativePath: string,
_absolutePath: string, // Unused parameter
stats: Stats,
): FormattedStats => {
// Add return type annotation
// Ensure mode is represented as a 3-digit octal string
const modeOctal = (stats.mode & 0o777).toString(8).padStart(3, '0');
return {
path: relativePath.replaceAll('\\', '/'), // Ensure forward slashes for consistency
isFile: stats.isFile(),
isDirectory: stats.isDirectory(),
isSymbolicLink: stats.isSymbolicLink(),
size: stats.size,
atime: stats.atime.toISOString(),
mtime: stats.mtime.toISOString(),
ctime: stats.ctime.toISOString(),
birthtime: stats.birthtime.toISOString(),
mode: modeOctal,
uid: stats.uid,
gid: stats.gid,
};
};
```
--------------------------------------------------------------------------------
/docs/index.md:
--------------------------------------------------------------------------------
```markdown
---
layout: home
hero:
name: Filesystem MCP Server
text: Secure & Efficient Filesystem Access for AI Agents
tagline: Empower your AI agents (like Cline/Claude) with secure, efficient, and token-saving access to your project files via the Model Context Protocol.
image:
# Replace with a relevant logo/image if available
# src: /logo.svg
# alt: Filesystem MCP Server Logo
actions:
- theme: brand
text: Get Started
link: /guide/introduction
- theme: alt
text: View on GitHub
link: https://github.com/sylphlab/filesystem-mcp
features:
- title: 🛡️ Secure by Design
details: All operations are strictly confined to the project root directory, preventing unauthorized access. Uses relative paths.
- title: ⚡ Optimized for AI
details: Batch operations minimize AI-server round trips, reducing token usage and latency compared to individual commands.
- title: 🔧 Comprehensive Toolkit
details: Offers a wide range of tools covering file/directory listing, reading, writing, editing, searching, moving, copying, and more.
- title: ✅ Robust & Reliable
details: Uses Zod for argument validation and provides detailed results for batch operations, indicating success or failure for each item.
- title: 🚀 Easy Integration
details: Get started quickly using npx or Docker with minimal configuration in your MCP host environment.
- title: 🤝 Open Source
details: MIT Licensed and open to contributions.
---
```
--------------------------------------------------------------------------------
/__tests__/utils/error-utils.test.ts:
--------------------------------------------------------------------------------
```typescript
import { describe, it, expect } from 'vitest';
import { formatFileProcessingError } from '../../src/utils/error-utils';
describe('errorUtils', () => {
describe('formatFileProcessingError', () => {
it('should handle ENOENT errors', () => {
const error = new Error('Not found');
(error as any).code = 'ENOENT';
const result = formatFileProcessingError(error, '/path', 'file.txt', '/project');
expect(result).toContain('File not found at resolved path');
});
it('should handle EACCES errors', () => {
const error = new Error('Permission denied');
(error as any).code = 'EACCES';
const result = formatFileProcessingError(error, '/path', 'file.txt', '/project');
expect(result).toContain('Permission denied for file');
});
it('should handle generic Error objects', () => {
const result = formatFileProcessingError(
new Error('Test error'),
'/path',
'file.txt',
'/project',
);
expect(result).toContain('Failed to process file file.txt: Test error');
});
it('should handle non-Error objects', () => {
const result = formatFileProcessingError('string error', '/path', 'file.txt', '/project');
expect(result).toContain('Failed to process file file.txt: string error');
});
it('should handle null/undefined errors', () => {
const result1 = formatFileProcessingError(null, '/path', 'file.txt', '/project');
expect(result1).toContain('Failed to process file file.txt: null');
const result2 = formatFileProcessingError(undefined, '/path', 'file.txt', '/project');
expect(result2).toContain('Failed to process file file.txt: undefined');
});
});
});
```
--------------------------------------------------------------------------------
/__tests__/setup.ts:
--------------------------------------------------------------------------------
```typescript
// Module mapping for tests to load correct compiled files
import { vi } from 'vitest';
import path from 'node:path';
// Setup module aliases to redirect imports to the compiled code
const srcToDistMap = new Map<string, string>();
// Map all src module paths to their compiled versions
function mapSourceToCompiledModule(id: string) {
// Convert import paths from src to dist
const sourcePattern = /^\.\.\/\.\.\/src\/(.+)$/;
// Check for TypeScript module imports
if (id.endsWith('.ts')) {
const match = id.match(sourcePattern);
if (match) {
const relativePath = match[1];
// Remove .ts extension if present
const basePath = relativePath.endsWith('.ts') ? relativePath.slice(0, -3) : relativePath;
return `${path.resolve(__dirname, '../dist', basePath)}`;
}
}
// Check for JavaScript module imports
if (id.endsWith('.js')) {
const match = id.match(sourcePattern);
if (match) {
const relativePath = match[1];
const basePath = relativePath.endsWith('.js') ? relativePath.slice(0, -3) : relativePath;
return `${path.resolve(__dirname, '../dist', basePath)}.js`;
}
}
// If no match, return the original id
return id;
}
// Register module mock
vi.mock(/^\.\.\/\.\.\/src\/(.+)$/, (importOriginal) => {
const origPath = importOriginal as string;
const compiledPath = mapSourceToCompiledModule(origPath);
if (compiledPath !== origPath) {
srcToDistMap.set(origPath, compiledPath);
return vi.importActual(compiledPath);
}
// Fallback to the original import for non-mapped paths
return vi.importActual(origPath);
});
// Debug log - will be visible during test run
console.log('Test setup: Module aliases configured for src to dist mapping');
```
--------------------------------------------------------------------------------
/__tests__/handlers/chmod-items.test.ts:
--------------------------------------------------------------------------------
```typescript
import { vi, describe, it, expect, beforeEach } from 'vitest';
// 設置模擬模塊
vi.mock('node:fs', () => ({
promises: {
chmod: vi.fn().mockName('fs.chmod')
}
}));
vi.mock('../../src/utils/path-utils', () => ({
resolvePath: vi.fn().mockImplementation((path) =>
`/project-root/${path}`
).mockName('pathUtils.resolvePath'),
PROJECT_ROOT: '/project-root'
}));
describe('chmod-items handler', () => {
let handler: any;
let fsMock: any;
let pathUtilsMock: any;
beforeEach(async () => {
// 動態導入模擬模塊
fsMock = (await import('node:fs')).promises;
pathUtilsMock = await import('../../src/utils/path-utils');
// 重置模擬
vi.resetAllMocks();
// 設置默認模擬實現
pathUtilsMock.resolvePath.mockImplementation((path: string) =>
`/project-root/${path}`
);
fsMock.chmod.mockResolvedValue(undefined);
// 動態導入處理程序
const { chmodItemsToolDefinition } = await import('../../src/handlers/chmod-items');
handler = chmodItemsToolDefinition.handler;
});
it('should change permissions for valid paths', async () => {
const result = await handler({
paths: ['file1.txt', 'dir/file2.txt'],
mode: '755'
});
expect(fsMock.chmod).toHaveBeenCalledTimes(2);
expect(JSON.parse(result.content[0].text)).toEqual([
{ path: 'file1.txt', mode: '755', success: true },
{ path: 'dir/file2.txt', mode: '755', success: true }
]);
});
it('should handle multiple operations with mixed results', async () => {
fsMock.chmod
.mockResolvedValueOnce(undefined)
.mockRejectedValueOnce({ code: 'EPERM' });
const result = await handler({
paths: ['file1.txt', 'file2.txt'],
mode: '755'
});
const output = JSON.parse(result.content[0].text);
expect(output[0].success).toBe(true);
expect(output[1].success).toBe(false);
});
});
```
--------------------------------------------------------------------------------
/vitest.config.ts:
--------------------------------------------------------------------------------
```typescript
import { defineConfig } from 'vitest/config';
export default defineConfig({
test: {
globals: true, // Use Vitest globals (describe, it, expect, etc.)
environment: 'node', // Set the test environment to Node.js
coverage: {
provider: 'v8', // Use V8 for coverage collection
reporter: ['text', 'json', 'html', 'lcov'], // Added lcov reporter
reportsDirectory: './coverage', // Explicitly set the output directory
thresholds: {
lines: 90,
functions: 90,
branches: 90,
statements: 90,
},
include: ['src/**/*.ts'], // Restored include
exclude: [
// Restored and adjusted exclude
'src/index.ts', // Often just exports
'src/types/**', // Assuming types might be added later
'**/*.d.ts',
'**/*.config.ts',
'**/constants.ts', // Assuming constants might be added later
'src/handlers/chmodItems.ts', // Exclude due to Windows limitations
'src/handlers/chownItems.ts', // Exclude due to Windows limitations
],
clean: true, // Added clean option
},
deps: {
optimizer: {
ssr: {
// Suggested replacement for deprecated 'inline' to handle problematic ESM dependencies
include: [
'@modelcontextprotocol/sdk',
'@modelcontextprotocol/sdk/stdio',
'@modelcontextprotocol/sdk/dist/types', // Add specific dist path
'@modelcontextprotocol/sdk/dist/server', // Add specific dist path
],
},
},
},
// Exclude the problematic index test again
exclude: [
'**/node_modules/**', // Keep default excludes
'**/dist/**',
'**/cypress/**',
'**/.{idea,git,cache,output,temp}/**',
'**/{karma,rollup,webpack,vite,vitest,jest,ava,babel,nyc,cypress,tsup,build}.config.*',
'__tests__/index.test.ts', // Exclude the index test
'**/*.bench.ts', // Added benchmark file exclusion
],
},
});
```
--------------------------------------------------------------------------------
/memory-bank/activeContext.md:
--------------------------------------------------------------------------------
```markdown
<!-- Version: 4.39 | Last Updated: 2025-07-04 | Updated By: Sylph -->
# Active Context: Filesystem MCP Server
## 1. Current Work Focus & Status
**Task:** Implement `apply_diff` tool.
**Status:** Completed configuration alignment and file renaming based on `guidelines/typescript/style_quality.md` (SHA: 9d56a9d...). ESLint check (with `--no-cache`) confirms **no errors**. `import/no-unresolved` rule was temporarily disabled but seems unnecessary now.
## 2. Recent Changes/Decisions
- **Configuration Alignment:**
- Updated `package.json`: Added ESLint dependencies (`eslint-config-airbnb-typescript`, `eslint-plugin-import`, `eslint-plugin-unicorn`), updated scripts (`lint`, `validate`), updated `lint-staged`.
- Created `.eslintrc.js` based on guideline template.
- Deleted old `eslint.config.js`.
- Updated `.prettierrc.js` (formerly `.cjs`) content and filename based on guideline.
- Updated `tsconfig.json`: Set `module` and `moduleResolution` to `NodeNext`.
- **Guideline Checksum:** Updated `memory-bank/techContext.md` with the latest SHA for `style_quality.md`.
- (Previous changes remain relevant)
## 3. Next Steps
1. **NEXT:** Rename `__tests__/testUtils.ts` to `__tests__/test-utils.ts`.
2. **DONE:** ESLint errors fixed (confirmed via `--no-cache`).
3. **DONE:** Verified `import/no-unresolved` rule (re-enabled in `eslint.config.ts`, no errors reported).
4. **DONE:** Verified tests in `__tests__/handlers/apply-diff.test.ts` are passing.
5. **NEXT:** Enhance `apply_diff` tests further (edge cases, large files).
6. Consider adding performance benchmarks for `apply_diff`.
7. Update `README.md` with details about the new `apply_diff` tool and remove mentions of `edit_file`.
## 4. Active Decisions
- **Skipped Tests:** `chmodItems`, `chownItems` (Windows limitations), `searchFiles` zero-width regex test (implementation complexity).
- Temporarily skipping full ESLint validation pass to focus on completing the `apply_diff` implementation and basic testing.
- (Previous decisions remain active unless superseded).
```
--------------------------------------------------------------------------------
/__tests__/handlers/apply-diff.test.ts:
--------------------------------------------------------------------------------
```typescript
import { describe, it, expect, vi, beforeEach } from 'vitest';
import { handleApplyDiffInternal, handleApplyDiff } from '../../src/handlers/apply-diff';
import type { FileDiff } from '../../src/schemas/apply-diff-schema';
describe('applyDiff Handler', () => {
const mockDeps = {
path: {
resolve: vi.fn((root, path) => `${root}/${path}`),
},
writeFile: vi.fn(),
readFile: vi.fn().mockResolvedValue(''),
projectRoot: '/project',
};
beforeEach(() => {
vi.resetAllMocks();
});
describe('handleApplyDiffInternal', () => {
it('should return success on successful write', async () => {
mockDeps.writeFile.mockResolvedValue('');
const result = await handleApplyDiffInternal('file.txt', 'content', mockDeps);
expect(result.success).toBe(true);
expect(result.results[0].success).toBe(true);
});
it('should handle write errors', async () => {
mockDeps.writeFile.mockRejectedValue(new Error('Write failed'));
const result = await handleApplyDiffInternal('file.txt', 'content', mockDeps);
expect(result.success).toBe(false);
expect(result.results[0].success).toBe(false);
expect(result.results[0].error).toBeDefined();
});
});
describe('handleApplyDiff', () => {
it('should handle empty changes', async () => {
const result = await handleApplyDiff([], mockDeps);
expect(result.success).toBe(true);
expect(result.results).toEqual([]);
});
it('should process multiple files', async () => {
mockDeps.writeFile.mockResolvedValue('');
const changes: FileDiff[] = [
{ path: 'file1.txt', diffs: [] },
{ path: 'file2.txt', diffs: [] },
];
const result = await handleApplyDiff(changes, mockDeps);
expect(result.results.length).toBe(2);
expect(result.success).toBe(true);
});
it('should handle mixed success/failure', async () => {
mockDeps.writeFile.mockResolvedValueOnce('').mockRejectedValueOnce(new Error('Failed'));
const changes: FileDiff[] = [
{ path: 'file1.txt', diffs: [] },
{ path: 'file2.txt', diffs: [] },
];
const result = await handleApplyDiff(changes, mockDeps);
expect(result.results.length).toBe(2);
expect(result.success).toBe(false);
});
});
});
```
--------------------------------------------------------------------------------
/src/utils/string-utils.ts:
--------------------------------------------------------------------------------
```typescript
// src/utils/stringUtils.ts
/**
* Escapes special characters in a string for use in a regular expression.
* @param str The string to escape.
* @returns The escaped string.
*/
export function escapeRegex(str: string): string {
// Escape characters with special meaning either inside or outside character sets.
// Use a simple backslash escape for characters like *, +, ?, ^, $, {}, (), |, [], \.
// - Outside character sets, escape special characters: * + ? ^ $ { } ( ) | [ ] \
// - Inside character sets, escape special characters: ^ - ] \
// This function handles the common cases for use outside character sets.
return str.replaceAll(/[$()*+.?[\\\]^{|}]/g, '\\$&'); // $& means the whole matched string. Manually escape backslash.
}
/**
* Gets the leading whitespace (indentation) of a line.
* @param line The line to check.
* @returns The leading whitespace, or an empty string if no line or no whitespace.
*/
export function getIndentation(line: string | undefined): string {
if (!line) return '';
const match = /^\s*/.exec(line);
return match ? match[0] : '';
}
/**
* Applies indentation to each line of a multi-line string.
* @param content The content string.
* @param indent The indentation string to apply.
* @returns An array of indented lines.
*/
export function applyIndentation(content: string, indent: string): string[] {
return content.split('\n').map((line) => indent + line);
}
/**
* Checks if two lines match, optionally ignoring leading whitespace on the file line.
* @param fileLine The line from the file content.
* @param searchLine The line from the search pattern.
* @param ignoreLeadingWhitespace Whether to ignore leading whitespace on the file line.
* @returns True if the lines match according to the rules.
*/
export function linesMatch(
fileLine: string | undefined,
searchLine: string | undefined,
ignoreLeadingWhitespace: boolean,
): boolean {
if (fileLine === undefined || searchLine === undefined) {
return false;
}
const trimmedSearchLine = searchLine.trimStart();
// Always trim fileLine if ignoring whitespace, compare against trimmed searchLine
const effectiveFileLine = ignoreLeadingWhitespace ? fileLine.trimStart() : fileLine;
const effectiveSearchLine = ignoreLeadingWhitespace ? trimmedSearchLine : searchLine;
return effectiveFileLine === effectiveSearchLine;
}
```
--------------------------------------------------------------------------------
/src/handlers/apply-diff.ts:
--------------------------------------------------------------------------------
```typescript
import type { ApplyDiffOutput, DiffApplyResult } from '../schemas/apply-diff-schema.js';
import { formatFileProcessingError } from '../utils/error-utils.js';
import { applyDiffsToFileContent } from '../utils/apply-diff-utils.js';
import type { FileSystemDependencies } from './common.js';
export async function handleApplyDiffInternal(
filePath: string,
content: string,
deps: FileSystemDependencies,
): Promise<ApplyDiffOutput> {
const resolvedPath = deps.path.resolve(deps.projectRoot, filePath);
try {
await deps.writeFile(resolvedPath, content, 'utf8'); // Use utf-8
return {
success: true,
results: [
{
path: filePath,
success: true,
},
],
};
} catch (error) {
const errorMessage =
error instanceof Error
? formatFileProcessingError(error, resolvedPath, filePath, deps.projectRoot)
: `Unknown error occurred while processing ${filePath}`;
return {
success: false,
results: [
{
path: filePath,
success: false,
error: errorMessage,
context: errorMessage.includes('ENOENT') ? 'File not found' : 'Error writing file',
},
],
};
}
}
async function applyDiffsToContent(
originalContent: string,
diffs: {
search: string;
replace: string;
start_line: number;
end_line: number;
}[],
filePath: string,
): Promise<string> {
const result = applyDiffsToFileContent(originalContent, diffs, filePath);
if (!result.success) {
throw new Error(result.error || 'Failed to apply diffs');
}
return result.newContent || originalContent;
}
export async function handleApplyDiff(
changes: {
path: string;
diffs: {
search: string;
replace: string;
start_line: number;
end_line: number;
}[];
}[],
deps: FileSystemDependencies,
): Promise<ApplyDiffOutput> {
const results: DiffApplyResult[] = [];
for (const change of changes) {
const { path: filePath, diffs } = change;
const originalContent = await deps.readFile(
deps.path.resolve(deps.projectRoot, filePath),
'utf8',
);
const newContent = await applyDiffsToContent(originalContent, diffs, filePath);
const result = await handleApplyDiffInternal(filePath, newContent, deps);
results.push(...result.results);
}
return {
success: results.every((r) => r.success),
results,
};
}
```
--------------------------------------------------------------------------------
/CHANGELOG.md:
--------------------------------------------------------------------------------
```markdown
# Changelog
All notable changes to this project will be documented in this file.
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
## [0.5.9] - 2025-06-04
### Changed
- Updated project ownership to `sylphlab`.
- Updated package name to `@sylphlab/filesystem-mcp`.
- Updated `README.md`, `LICENSE`, and GitHub Actions workflow (`publish.yml`) to reflect new ownership and package name.
## [0.5.8] - 2025-04-05
### Fixed
- Removed `build` directory exclusion from `.dockerignore` to fix Docker build context error where `COPY build ./build` failed.
## [0.5.7] - 2025-04-05
### Fixed
- Corrected artifact archiving in CI/CD workflow (`.github/workflows/publish.yml`) to include the `build` directory itself, resolving Docker build context errors (5f5c7c4).
## [0.5.6] - 2025-05-04
### Fixed
- Corrected CI/CD artifact handling (`package-lock.json` inclusion, extraction paths) in `publish.yml` to ensure successful npm and Docker publishing (4372afa).
- Simplified CI/CD structure back to a single workflow (`publish.yml`) with conditional artifact upload, removing `ci.yml` and `build-reusable.yml` (38029ca).
### Changed
- Bumped version to 0.5.6 due to previous failed release attempt of 0.5.5.
## [0.5.5] - 2025-05-04
### Changed
- Refined GitHub Actions workflow (`publish.yml`) triggers: publishing jobs (`publish-npm`, `publish-docker`, `create-release`) now run _only_ on version tag pushes (`v*.*.*`), not on pushes to `main` (9c0df99).
### Fixed
- Corrected artifact extraction path in the `publish-docker` CI/CD job to resolve "Dockerfile not found" error (708d3f5).
## [0.5.3] - 2025-05-04
### Added
- Enhanced path error reporting in `resolvePath` to include original path, resolved path, and project root for better debugging context (3810f14).
- Created `.clinerules` file to document project-specific patterns and preferences, starting with tool usage recommendations (3810f14).
- Enhanced `ENOENT` (File not found) error reporting in `readContent` handler to include resolved path, relative path, and project root (8b82e1c).
### Changed
- Updated `write_content` tool description to recommend using edit tools (`edit_file`, `replace_content`) for modifications (5521102).
- Updated `edit_file` tool description to reinforce its recommendation for modifications (5e44ef2).
- Refactored GitHub Actions workflow (`publish.yml`) to parallelize npm and Docker publishing using separate jobs dependent on a shared build job, improving release speed (3b51c2b).
- Bumped version to 0.5.3.
### Fixed
- Corrected TypeScript errors in `readContent.ts` related to variable scope and imports during error reporting enhancement (8b82e1c).
<!-- Previous versions can be added below -->
```
--------------------------------------------------------------------------------
/__tests__/utils/stats-utils.test.ts:
--------------------------------------------------------------------------------
```typescript
import { describe, it, expect } from 'vitest';
import { formatStats, FormattedStats } from '../../src/utils/stats-utils';
function makeMockStats(partial: Partial<Record<keyof FormattedStats, any>> = {}): any {
// Provide default values and allow overrides
return {
isFile: () => partial.isFile ?? true,
isDirectory: () => partial.isDirectory ?? false,
isSymbolicLink: () => partial.isSymbolicLink ?? false,
size: partial.size ?? 1234,
atime: partial.atime ?? new Date('2024-01-01T01:02:03.000Z'),
mtime: partial.mtime ?? new Date('2024-01-02T01:02:03.000Z'),
ctime: partial.ctime ?? new Date('2024-01-03T01:02:03.000Z'),
birthtime: partial.birthtime ?? new Date('2024-01-04T01:02:03.000Z'),
mode: partial.mode ?? 0o755,
uid: partial.uid ?? 1000,
gid: partial.gid ?? 1000,
};
}
describe('formatStats', () => {
it('formats a regular file', () => {
const stats = makeMockStats({ isFile: true, isDirectory: false, isSymbolicLink: false, mode: 0o644 });
const result = formatStats('foo\\bar.txt', '/abs/foo/bar.txt', stats as any);
expect(result).toEqual({
path: 'foo/bar.txt',
isFile: true,
isDirectory: false,
isSymbolicLink: false,
size: 1234,
atime: '2024-01-01T01:02:03.000Z',
mtime: '2024-01-02T01:02:03.000Z',
ctime: '2024-01-03T01:02:03.000Z',
birthtime: '2024-01-04T01:02:03.000Z',
mode: '644',
uid: 1000,
gid: 1000,
});
});
it('formats a directory', () => {
const stats = makeMockStats({ isFile: false, isDirectory: true, isSymbolicLink: false, mode: 0o755 });
const result = formatStats('dir\\', '/abs/dir', stats as any);
expect(result.isDirectory).toBe(true);
expect(result.isFile).toBe(false);
expect(result.mode).toBe('755');
});
it('formats a symbolic link', () => {
const stats = makeMockStats({ isFile: false, isDirectory: false, isSymbolicLink: true, mode: 0o777 });
const result = formatStats('link', '/abs/link', stats as any);
expect(result.isSymbolicLink).toBe(true);
expect(result.mode).toBe('777');
});
it('pads mode with leading zeros', () => {
const stats = makeMockStats({ mode: 0o7 });
const result = formatStats('file', '/abs/file', stats as any);
expect(result.mode).toBe('007');
});
it('converts all date fields to ISO string', () => {
const stats = makeMockStats({
atime: new Date('2020-01-01T00:00:00.000Z'),
mtime: new Date('2020-01-02T00:00:00.000Z'),
ctime: new Date('2020-01-03T00:00:00.000Z'),
birthtime: new Date('2020-01-04T00:00:00.000Z'),
});
const result = formatStats('file', '/abs/file', stats as any);
expect(result.atime).toBe('2020-01-01T00:00:00.000Z');
expect(result.mtime).toBe('2020-01-02T00:00:00.000Z');
expect(result.ctime).toBe('2020-01-03T00:00:00.000Z');
expect(result.birthtime).toBe('2020-01-04T00:00:00.000Z');
});
it('replaces backslashes in path with forward slashes', () => {
const stats = makeMockStats();
const result = formatStats('foo\\bar\\baz.txt', '/abs/foo/bar/baz.txt', stats as any);
expect(result.path).toBe('foo/bar/baz.txt');
});
});
```
--------------------------------------------------------------------------------
/eslint.config.ts:
--------------------------------------------------------------------------------
```typescript
import eslint from "@eslint/js";
import tseslint from "typescript-eslint";
import prettierConfig from "eslint-config-prettier";
// import unicornPlugin from "eslint-plugin-unicorn"; // Keep commented out for now
import importPlugin from "eslint-plugin-import";
import globals from "globals";
export default tseslint.config(
// Global ignores
{
ignores: [
"node_modules/",
"dist/",
"build/",
"coverage/",
"docs/.vitepress/dist/",
"docs/.vitepress/cache/",
],
},
// Apply recommended rules globally
eslint.configs.recommended,
...tseslint.configs.recommended,
// ...tseslint.configs.recommendedTypeChecked, // Enable later if needed
// Configuration for SOURCE TypeScript files (requiring type info)
{
files: ["src/**/*.ts"], // Apply project-specific parsing only to src files
languageOptions: {
parserOptions: {
project: true, // Enable project-based parsing ONLY for src files
tsconfigRootDir: import.meta.dirname,
},
globals: {
...globals.node,
},
},
rules: {
// Add specific rules for source TS if needed
},
},
// Configuration for OTHER TypeScript files (tests, configs - NO type info needed)
{
files: ["__tests__/**/*.ts", "*.config.ts", "*.config.js"], // Include JS configs here too
languageOptions: {
parserOptions: {
project: null, // Explicitly disable project-based parsing for these files
},
globals: {
...globals.node,
// Removed ...globals.vitest
},
},
rules: {
// Relax rules if needed for tests/configs, e.g., allow console in tests
"no-console": "off", // Allow console.log in tests and configs
"@typescript-eslint/no-explicit-any": "off", // Allow 'any' in test files
// Potentially disable rules that rely on type info if they cause issues
// "@typescript-eslint/no-unsafe-assignment": "off",
// "@typescript-eslint/no-unsafe-call": "off",
// "@typescript-eslint/no-unsafe-member-access": "off",
},
},
// Configuration for OTHER JavaScript files (if any)
// Note: *.config.js is handled above now. Keep this for other potential JS files.
{
files: ["**/*.js", "**/*.cjs"],
ignores: ["*.config.js"], // Ignore config files already handled
languageOptions: {
globals: {
...globals.node,
},
},
rules: {
// Add specific rules for other JS if needed
},
},
// Apply Prettier config last to override other formatting rules
prettierConfig,
// Add other plugins/configs as needed
// Example: Unicorn plugin (ensure installed)
/*
{
plugins: {
unicorn: unicornPlugin,
},
rules: {
...unicornPlugin.configs.recommended.rules,
// Override specific unicorn rules if needed
},
},
*/
// Example: Import plugin (ensure installed and configured)
{
plugins: {
import: importPlugin,
},
settings: {
'import/resolver': {
typescript: true,
node: true,
}
},
rules: {
// Add import rules
'import/no-unresolved': 'error',
}
}
);
```
--------------------------------------------------------------------------------
/src/schemas/apply-diff-schema.ts:
--------------------------------------------------------------------------------
```typescript
import { z } from 'zod';
// Schema for a single diff block
const diffBlockSchema = z
.object({
search: z.string().describe('Exact content to find, including whitespace and newlines.'),
replace: z.string().describe('Content to replace the search block with.'),
start_line: z
.number()
.int()
.min(1)
.describe('The 1-based line number where the search block starts.'),
end_line: z
.number()
.int()
.min(1)
.describe('The 1-based line number where the search block ends.'),
operation: z
.enum(['insert', 'replace'])
.default('replace')
.optional()
.describe('Type of operation - insert or replace content'),
})
.describe('A single search/replace operation within a file.');
// Ensure valid line numbers based on operation type
const refinedDiffBlockSchema = diffBlockSchema.refine(
(data) => {
if (data.operation === 'insert') {
return data.end_line >= data.start_line - 1;
}
return data.end_line >= data.start_line;
},
{
message: 'Invalid line numbers for operation type',
path: ['end_line'],
},
);
// Schema for changes to a single file
const fileDiffSchema = z.object({
path: z.string().min(1).describe('Relative path to the file to modify.'),
diffs: z
.array(refinedDiffBlockSchema)
.min(1)
.describe('Array of diff blocks to apply to this file.'),
});
// Main input schema for the apply_diff tool
export const applyDiffInputSchema = z.object({
changes: z
.array(fileDiffSchema)
.min(1)
.describe('An array of file modification requests.')
// Ensure each path appears only once
.refine(
(changes) => {
const paths = changes.map((c) => c.path);
return new Set(paths).size === paths.length;
},
{
message: 'Each file path must appear only once in a single request.',
path: ['changes'], // Attach error to the main changes array
},
),
});
export type ApplyDiffInput = z.infer<typeof applyDiffInputSchema>;
export type FileDiff = z.infer<typeof fileDiffSchema>;
export type DiffBlock = z.infer<typeof refinedDiffBlockSchema>;
// Schema for individual diff operation result
export const diffResultSchema = z.object({
operation: z.enum(['insert', 'replace']),
start_line: z.number().int().min(1),
end_line: z.number().int().min(1),
success: z.boolean(),
error: z.string().optional(),
context: z.string().optional(),
});
export type DiffResult = z.infer<typeof diffResultSchema>;
// Define potential output structure
const diffApplyResultSchema = z.object({
path: z.string(),
success: z.boolean(),
error: z.string().optional().describe('Detailed error message if success is false.'),
context: z.string().optional().describe('Lines around the error location if success is false.'),
diffResults: z.array(diffResultSchema).optional(),
});
export const applyDiffOutputSchema = z.object({
success: z.boolean().describe('True if all operations succeeded.'),
results: z.array(diffApplyResultSchema).describe('Results for each file processed.'),
});
export type ApplyDiffOutput = z.infer<typeof applyDiffOutputSchema>;
export type DiffApplyResult = z.infer<typeof diffApplyResultSchema>;
```
--------------------------------------------------------------------------------
/memory-bank/projectbrief.md:
--------------------------------------------------------------------------------
```markdown
<!-- Version: 4.6 | Last Updated: 2025-07-04 | Updated By: Sylph -->
# Project Brief: Filesystem MCP Server
## 1. Project Goal
The primary goal of this project is to create a Model Context Protocol (MCP)
server specifically designed for filesystem operations. This server should allow
an AI agent (like Cline) to interact with the user's filesystem in a controlled
and secure manner, operating relative to a defined project root directory.
## 2. Core Requirements
- **MCP Compliance:** The server must adhere to the Model Context Protocol
specifications for communication.
- **Relative Pathing:** All filesystem operations must be strictly relative to
the project root directory. Absolute paths should be disallowed, and path
traversal attempts must be prevented.
- **Core Filesystem Tools:** Implement a comprehensive set of tools for common
filesystem tasks:
- `list_files`: List files/directories within a specified directory (options
for recursion, stats).
- `stat_items`: Get detailed status information for multiple specified paths.
- `read_content`: Read content from multiple specified files.
- `write_content`: Write or append content to multiple specified files
(creating directories if needed).
- `delete_items`: Delete multiple specified files or directories.
- `create_directories`: Create multiple specified directories (including
intermediate ones).
- `chmod_items`: Change permissions for multiple specified files/directories.
- `chown_items`: Change owner (UID) and group (GID) for multiple specified
files/directories.
- `move_items`: Move or rename multiple specified files/directories.
- `copy_items`: Copy multiple specified files/directories.
- `search_files`: Search for regex patterns within files in a specified
directory.
- `replace_content`: Search and replace content within files across multiple
specified paths (files or directories).
- `apply_diff`: Applies multiple search/replace diff blocks to multiple files atomically per file.
- **Technology Stack:** Use Node.js and TypeScript. Leverage the
`@modelcontextprotocol/sdk` for MCP implementation and `glob` for file
searching/listing.
- **Efficiency:** Tools should be implemented efficiently, especially for
operations involving multiple files or large directories.
- **Security:** Robust path resolution and validation are critical to prevent
access outside the designated project root.
## 3. Scope
- **In Scope:** Implementation of the MCP server logic, definition of tool
schemas, handling requests, performing filesystem operations via Node.js `fs`
and `path` modules, and using `glob`. Basic error handling for common
filesystem issues (e.g., file not found, permissions).
- **Out of Scope:** Advanced features like file watching, complex permission
management beyond basic `chmod`, handling extremely large files requiring
streaming (beyond basic read/write), or integration with version control
systems.
## 4. Success Criteria
- The server compiles successfully using TypeScript.
- The server connects and responds to MCP requests (e.g., `list_tools`).
- All implemented tools function correctly according to their descriptions,
respecting relative path constraints.
- Path traversal attempts are correctly blocked.
- The server handles basic errors gracefully (e.g., file not found).
```
--------------------------------------------------------------------------------
/memory-bank/productContext.md:
--------------------------------------------------------------------------------
```markdown
<!-- Version: 4.5 | Last Updated: 2025-04-06 | Updated By: Roo -->
# Product Context: Filesystem MCP Server
## 1. Problem Solved
AI agents like Cline often need to interact with a user's project files to
perform tasks such as reading code, writing new code, modifying configurations,
or searching for specific information. Directly granting unrestricted filesystem
access poses significant security risks. Furthermore, requiring the user to
manually perform every filesystem action requested by the agent is inefficient
and hinders the agent's autonomy.
This Filesystem MCP server acts as a secure and controlled bridge, solving the
following problems:
- **Security:** It confines the agent's filesystem operations strictly within
the boundaries of the project root directory (determined by the server's
launch context), preventing accidental or malicious access to sensitive system
files outside the project scope.
- **Efficiency:** It provides the agent with a dedicated set of tools
(`list_files`, `read_content`, `write_content`, `move_items`, `copy_items`,
etc.) to perform common filesystem tasks directly, reducing the need for
constant user intervention for basic operations.
- **Control:** Operations are performed relative to the project root (determined
by the server's current working directory at launch), ensuring predictability
and consistency within that specific project context. **Note:** For
multi-project support, the system launching the server must set the correct
working directory for each project instance.
- **Standardization:** It uses the Model Context Protocol (MCP), providing a
standardized way for the agent and the server to communicate about filesystem
capabilities and operations.
## 2. How It Should Work
- The server runs as a background process, typically managed by the agent's host
environment (e.g., Cline's VSCode extension).
- It listens for incoming MCP requests over a defined transport (initially
stdio).
- Upon receiving a `call_tool` request for a filesystem operation:
1. It validates the request parameters against the tool's schema.
2. It resolves all provided relative paths against the `PROJECT_ROOT` (which
is the server process's current working directory, `process.cwd()`).
3. It performs security checks to ensure paths do not attempt to escape the
`PROJECT_ROOT` (the server's `cwd`).
4. It executes the corresponding Node.js filesystem function (`fs.readFile`,
`fs.writeFile`, `fs.rename`, `glob`, etc.).
5. It formats the result (or error) according to MCP specifications and sends
it back to the agent.
- It responds to `list_tools` requests by providing a list of all available
filesystem tools and their input schemas.
## 3. User Experience Goals
- **Seamless Integration:** The server should operate transparently in the
background. The user primarily interacts with the agent, and the agent
utilizes the server's tools as needed.
- **Security Assurance:** The user should feel confident that the agent's
filesystem access is restricted to the intended project directory.
- **Reliability:** The tools should perform filesystem operations reliably and
predictably. Errors should be reported clearly back to the agent (and
potentially surfaced to the user by the agent if necessary).
- **Performance:** Filesystem operations should be reasonably fast, not
introducing significant delays into the agent's workflow.
```
--------------------------------------------------------------------------------
/src/handlers/index.ts:
--------------------------------------------------------------------------------
```typescript
import { listFilesToolDefinition } from './list-files.js';
import { statItemsToolDefinition } from './stat-items.js';
import { readContentToolDefinition } from './read-content.js';
import { writeContentToolDefinition } from './write-content.js';
import { deleteItemsToolDefinition } from './delete-items.js';
import { createDirectoriesToolDefinition } from './create-directories.js';
import { chmodItemsToolDefinition } from './chmod-items.js';
import { chownItemsToolDefinition } from './chown-items.js';
import { moveItemsToolDefinition } from './move-items.js';
import { copyItemsToolDefinition } from './copy-items.js';
import { searchFilesToolDefinition } from './search-files.js';
import { replaceContentToolDefinition } from './replace-content.js';
import { handleApplyDiff } from './apply-diff.js';
import { applyDiffInputSchema, ApplyDiffOutput } from '../schemas/apply-diff-schema.js';
import fs from 'node:fs';
import path from 'node:path';
// Define the structure for a tool definition (used internally and for index.ts)
import type { ZodType } from 'zod';
import type { McpToolResponse } from '../types/mcp-types.js';
// Define local interfaces based on usage observed in handlers
// Define the structure for a tool definition
// Matches the structure in individual tool files like applyDiff.ts
export interface ToolDefinition<TInput = unknown, TOutput = unknown> {
name: string;
description: string;
inputSchema: ZodType<TInput>;
outputSchema?: ZodType<TOutput>;
handler: (args: TInput) => Promise<McpToolResponse>; // Changed _args to args
}
// Helper type to extract input type from a tool definition
export type ToolInput<T extends ToolDefinition> =
T extends ToolDefinition<infer I, unknown> ? I : never;
// Define a more specific type for our tool definitions to avoid naming conflicts
type HandlerToolDefinition = {
name: string;
description: string;
inputSchema: ZodType<unknown>;
outputSchema?: ZodType<unknown>;
handler: (args: unknown) => Promise<{ content: Array<{ type: 'text'; text: string }> }>;
};
// Aggregate all tool definitions into a single array
// Use our more specific type to avoid naming conflicts
export const allToolDefinitions: HandlerToolDefinition[] = [
listFilesToolDefinition,
statItemsToolDefinition,
readContentToolDefinition,
writeContentToolDefinition,
deleteItemsToolDefinition,
createDirectoriesToolDefinition,
chmodItemsToolDefinition,
chownItemsToolDefinition,
moveItemsToolDefinition,
copyItemsToolDefinition,
searchFilesToolDefinition,
replaceContentToolDefinition,
{
name: 'apply_diff',
description: 'Apply diffs to files',
inputSchema: applyDiffInputSchema,
handler: async (args: unknown): Promise<McpToolResponse> => {
const validatedArgs = applyDiffInputSchema.parse(args);
const result: ApplyDiffOutput = await handleApplyDiff(validatedArgs.changes, {
readFile: async (path: string) => fs.promises.readFile(path, 'utf8'),
writeFile: async (path: string, content: string) =>
fs.promises.writeFile(path, content, 'utf8'),
path,
projectRoot: process.cwd(),
});
return {
content: [
{
type: 'text',
text: JSON.stringify(
{
success: result.success,
results: result.results,
},
undefined,
2,
),
},
],
};
},
},
];
```
--------------------------------------------------------------------------------
/memory-bank/techContext.md:
--------------------------------------------------------------------------------
```markdown
<!-- Version: 4.9 | Last Updated: 2025-07-04 | Updated By: Sylph -->
# Tech Context: Filesystem MCP Server
## Playbook Guideline Versions Checked
- `guidelines/typescript/style_quality.md`: 9d56a9d6626ecc2fafd0b07033220a1282282236
# Tech Context: Filesystem MCP Server
## 1. Core Technologies
- **Runtime:** Node.js (MUST use latest LTS version, currently v22 - `~22.0.0` specified in `package.json`)
- **Language:** TypeScript (Compiled to JavaScript for execution)
- **Package Manager:** pnpm (Preferred package manager as per guidelines)
- **Testing Framework:** Vitest (using v8 for coverage)
## 2. Key Libraries/Dependencies
- **`@modelcontextprotocol/sdk`:** The official SDK for implementing MCP servers and clients.
- **`glob`:** Library for matching files using glob patterns.
- **`typescript`:** TypeScript compiler (`tsc`).
- **`@types/node`:** TypeScript type definitions for Node.js built-in modules.
- **`@types/glob`:** TypeScript type definitions for the `glob` library.
- **`zod`:** Library for schema declaration and validation.
- **`zod-to-json-schema`:** Utility to convert Zod schemas to JSON schemas.
- **`vitest`:** Testing framework.
- **`@vitest/coverage-v8`:** Coverage provider for Vitest.
- **`uuid`:** For generating unique IDs (used in testUtils).
- **`@types/uuid`:** TypeScript type definitions for uuid.
## 3. Development Setup
- **Source Code:** Located in the `src` directory.
- **Tests:** Located in the `__tests__` directory.
- **Main File:** `src/index.ts`.
- **Configuration:**
- `tsconfig.json`: Configures the TypeScript compiler options.
- `vitest.config.ts`: Configures Vitest (test environment, globals, coverage).
- `package.json`: Defines project metadata, dependencies, and pnpm scripts. - `dependencies`: `@modelcontextprotocol/sdk`, `glob`, `zod`, `zod-to-json-schema`. - `devDependencies`: `typescript`, `@types/node`, `@types/glob`, `vitest`, `@vitest/coverage-v8`, `uuid`, `@types/uuid`, `husky`, `lint-staged`, `@commitlint/cli`, `@commitlint/config-conventional`, `prettier`, `eslint`, `typescript-eslint`, `eslint-plugin-prettier`, `eslint-config-prettier`, `standard-version`, `typedoc`, `typedoc-plugin-markdown`, `vitepress`, `rimraf`, `@changesets/cli`. (List might need verification against actual `package.json`)
- `scripts`: (Uses `pnpm run ...`)
- `build`: Compiles TypeScript code. - `watch`: Runs `tsc` in watch mode. - `clean`: `rimraf dist coverage` - `inspector`: `npx @modelcontextprotocol/inspector dist/index.js`
- `test`: Runs Vitest tests.
- `test:cov`: Runs Vitest tests with coverage.
- `validate`: Runs format check, lint, typecheck, and tests.
- `docs:build`: Builds documentation. - `start`: `node dist/index.js` - `prepare`: `husky` - `prepublishOnly`: `pnpm run clean && pnpm run build` - (Other scripts as defined in `package.json`)
- **Build Output:** Compiled JavaScript code is placed in the `dist` directory.
- **Execution:** The server is intended to be run via `node dist/index.js`.
## 4. Technical Constraints & Considerations
- **Node.js Environment:** Relies on Node.js runtime and built-in modules.
- **Permissions:** Server process permissions limit filesystem operations.
- **Cross-Platform Compatibility:** Filesystem behaviors differ. Code uses `path` module and normalizes slashes.
- **Error Handling:** Relies on Node.js error codes and `McpError`.
- **Security Model:** Relies on `resolvePath` function.
- **Project Root Determination:** Uses `process.cwd()`. Launching process must set correct `cwd`.
- **ESM:** Project uses ES Modules. Vitest generally handles ESM well, including mocking.
```
--------------------------------------------------------------------------------
/package.json:
--------------------------------------------------------------------------------
```json
{
"name": "@sylphlab/filesystem-mcp",
"version": "0.5.9",
"description": "An MCP server providing filesystem tools relative to a project root.",
"type": "module",
"main": "./dist/index.js",
"module": "./dist/index.js",
"types": "./dist/index.d.ts",
"bin": {
"filesystem-mcp": "./dist/index.js"
},
"exports": {
".": {
"types": "./dist/index.d.ts",
"import": "./dist/index.js"
},
"./package.json": "./package.json"
},
"files": [
"dist/",
"README.md",
"LICENSE"
],
"engines": {
"node": "22.14"
},
"scripts": {
"build": "bun run clean && tsup",
"watch": "tsc --watch",
"inspector": "npx @modelcontextprotocol/inspector dist/index.js",
"test": "vitest run",
"test:watch": "vitest watch",
"test:cov": "vitest run --coverage --reporter=junit --outputFile=test-report.junit.xml",
"lint": "eslint . --ext .ts,.tsx,.vue,.js,.cjs --cache --max-warnings=0",
"lint:fix": "eslint . --ext .ts,.tsx,.vue,.js,.cjs --fix --cache",
"format": "prettier --write . --cache --ignore-unknown",
"check-format": "prettier --check . --cache --ignore-unknown",
"validate": "bun run check-format && bun run lint && bun run typecheck && bun run test",
"docs:dev": "vitepress dev docs",
"docs:build": "bun run docs:api && vitepress build docs",
"docs:preview": "vitepress preview docs",
"start": "node dist/index.js",
"typecheck": "tsc --noEmit",
"benchmark": "vitest bench",
"clean": "rimraf dist coverage",
"docs:api": "node scripts/generate-api-docs.mjs",
"prepublishOnly": "bun run clean && bun run build",
"changeset": "changeset",
"version-packages": "changeset version",
"prepare": "husky"
},
"homepage": "https://github.com/sylphlab/filesystem-mcp#readme",
"repository": {
"type": "git",
"url": "git+https://github.com/sylphlab/filesystem-mcp.git"
},
"keywords": [
"mcp",
"model-context-protocol",
"filesystem",
"file",
"directory",
"typescript",
"node",
"cli",
"ai",
"agent",
"tool"
],
"author": "Sylph Lab <[email protected]> (https://sylphlab.ai)",
"license": "MIT",
"bugs": {
"url": "https://github.com/sylphlab/filesystem-mcp/issues"
},
"publishConfig": {
"access": "public"
},
"dependencies": {
"@modelcontextprotocol/sdk": "^1.9.0",
"glob": "^11.0.1",
"zod": "^3.24.2",
"zod-to-json-schema": "^3.24.5"
},
"devDependencies": {
"@changesets/cli": "^2.28.1",
"@commitlint/cli": "^19.8.0",
"@commitlint/config-conventional": "^19.8.0",
"@eslint/eslintrc": "^3.3.1",
"@eslint/js": "^9.24.0",
"@sylphlab/eslint-config-sylph": "^3.3.0",
"@sylphlab/typescript-config": "^0.3.1",
"@types/glob": "^8.1.0",
"@types/node": "^22.14.0",
"@types/uuid": "^10.0.0",
"@vitest/coverage-v8": "^3.1.1",
"eslint": "^9.24.0",
"eslint-config-prettier": "^10.1.2",
"eslint-import-resolver-typescript": "^3.10.0",
"eslint-plugin-import": "^2.31.0",
"eslint-plugin-prettier": "^5.2.6",
"eslint-plugin-unicorn": "^55.0.0",
"husky": "^9.1.7",
"lint-staged": "^15.5.0",
"prettier": "^3.5.3",
"rimraf": "^5.0.10",
"standard-version": "^9.5.0",
"typedoc": "^0.28.2",
"typedoc-plugin-markdown": "^4.6.2",
"typescript": "^5.8.3",
"typescript-eslint": "^8.29.1",
"uuid": "^11.1.0",
"vitepress": "^1.6.3",
"vitest": "^3.1.1"
},
"lint-staged": {
"*.{ts,tsx,js,cjs}": [
"eslint --fix --cache --max-warnings=0",
"prettier --write --cache --ignore-unknown"
],
"*.{json,md,yaml,yml,html,css}": [
"prettier --write --cache --ignore-unknown"
]
}
}
```
--------------------------------------------------------------------------------
/memory-bank/progress.md:
--------------------------------------------------------------------------------
```markdown
<!-- Version: 4.32 | Last Updated: 2025-07-04 | Updated By: Sylph -->
# Progress: Filesystem MCP Server
## 1. What Works
- **Server Initialization & Core MCP:** Starts, connects, lists tools.
- **Path Security:** `resolvePath` prevents traversal and absolute paths.
- **Project Root:** Determined by `process.cwd()`.
- **Core Tool Functionality:** Most tools (`create_directories`, `write_content`, `stat_items`, `read_content`, `move_items`, `copy_items`, `search_files`, `replace_content`, `delete_items`, `listFiles`) have basic functionality and passing tests (except skipped tests).
- **`applyDiff` Tool:** Implemented with multi-file, multi-block, atomic (per file) application logic. Tests added, but currently failing due to mock/assertion issues.
- **Documentation (`README.md`):** Updated for new owner/package name.
- **Tool Descriptions:** Updated.
- **Dockerization:** Multi-stage `Dockerfile` functional.
- **CI/CD (GitHub Actions):** Single workflow handles CI/Releases, updated for new owner. Release `v0.5.9` triggered.
- **Versioning:** Package version at `0.5.9`.
- **`.clinerules`:** Created.
- **Changelog:** Updated up to `v0.5.9`.
- **License:** MIT `LICENSE` file added, updated for new owner.
- **Funding File:** `.github/FUNDING.yml` added.
- **Testing Framework:** Vitest configured with v8 coverage.
- **Coverage Reports:** Generating successfully.
- **Tests Added & Passing (Vitest):** (List omitted for brevity - unchanged)
- **Guideline Alignment (Configuration & Tooling):**
- Package Manager: `pnpm`.
- Node.js Version: `~22.0.0`.
- Dependency Versions: Updated.
- Configuration Files:
- `package.json`: Updated dependencies, scripts, lint-staged for `style_quality.md` (SHA: 9d56a9d...).
- `eslint.config.js`: Configured based on `style_quality.md` principles (Flat Config). `import/no-unresolved` temporarily disabled.
- `.prettierrc.cjs`: Updated content based on `style_quality.md`.
- `tsconfig.json`: Updated `module` and `moduleResolution` to `NodeNext`.
- (Other configs like `vitest.config.ts`, `commitlint.config.cjs`, `dependabot.yml` assumed aligned from previous checks).
- Git Hooks (Husky + lint-staged): Configured.
- `README.md` Structure: Aligned (placeholders remain).
- **File Naming:** Most `.ts` files in `src` and `__tests__` renamed to kebab-case.
- **Import Paths:** Updated to use kebab-case and `.js` extension.
## 2. What's Left to Build / Test
- **Add Tests for Remaining Handlers:**
- `chmodItems` (**Skipped** - Windows limitations)
- `chownItems` (**Skipped** - Windows limitations)
- **Address Skipped Tests:**
- `copyItems` fallback tests: Removed as fallback logic was unnecessary.
- `searchFiles` zero-width regex test: Skipped due to implementation complexity.
## 3. Current Status
- Project configuration and tooling aligned with Playbook guidelines (pnpm, Node LTS, dependency versions, config files, hooks, README structure).
- **ESLint check (with `--no-cache`) confirms no errors.** Previous commit likely fixed them. `import/no-unresolved` rule was temporarily disabled but seems unnecessary now.
- Mocking issues previously resolved using dependency injection.
- Coverage reports are generating.
- Release `v0.5.9` was the last release triggered.
## 4. Compliance Tasks
- **DONE:** ESLint errors fixed (confirmed via `--no-cache`).
## 5. Known Issues / Areas for Improvement
- **ESLint Import Resolver:** Verified `import/no-unresolved` rule (re-enabled, no errors).
- **`__tests__/test-utils.ts` Renaming:** File has been renamed.
- **Coverage Reports:** Generation fixed. Coverage improved but some branches remain uncovered due to mocking issues.
- **`applyDiff.test.ts` Failures:** Resolved. Tests are now passing.
- **ESLint Errors:** Resolved.
- **`README.md` Placeholders:** Needs content for sections like Performance, Design Philosophy, etc.
- **Launcher Dependency:** Server functionality relies on the launching process setting the correct `cwd`.
- **Windows `chmod`/`chown`:** Effectiveness is limited. Tests skipped.
- **Cross-Device Moves/Copies:** May fail (`EXDEV`).
- **`deleteItems` Root Deletion Test:** Using a workaround.
- **`searchFiles` Zero-Width Matches:** Handler does not correctly find all zero-width matches with global regex. Test skipped.
```
--------------------------------------------------------------------------------
/__tests__/test-utils.ts:
--------------------------------------------------------------------------------
```typescript
import * as fsPromises from 'node:fs/promises';
import path from 'node:path';
import { v4 as uuidv4 } from 'uuid'; // Use uuid for unique temp dir names
/**
* Recursively creates a directory structure based on the provided object.
* @param structure Object defining the structure. Keys are filenames/dirnames.
* String values are file contents. Object values are subdirectories.
* @param currentPath The path where the structure should be created.
*/
async function createStructureRecursively(
structure: FileSystemStructure,
currentPath: string,
): Promise<void> {
for (const name in structure) {
if (!Object.prototype.hasOwnProperty.call(structure, name)) {
continue;
}
const itemPath = path.join(currentPath, name);
const content = structure[name];
if (typeof content === 'string' || Buffer.isBuffer(content)) {
// It's a file - ensure parent directory exists first
try {
await fsPromises.mkdir(path.dirname(itemPath), { recursive: true });
await fsPromises.writeFile(itemPath, content);
} catch (error) {
console.error(`Failed to create file ${itemPath}:`, error);
throw error;
}
} else if (typeof content === 'object' && content !== null) {
// It's a directory (plain object)
await fsPromises.mkdir(itemPath, { recursive: true });
// Recurse into the subdirectory
await createStructureRecursively(content, itemPath);
} else {
// Handle other potential types or throw an error
console.warn(`Unsupported type for item '${name}' in test structure.`);
}
}
}
/**
* Removes the temporary directory and its contents.
* @param dirPath The absolute path to the temporary directory to remove.
*/
export async function cleanupTemporaryFilesystem(dirPath: string): Promise<void> {
if (!dirPath) {
console.warn('Attempted to cleanup an undefined or empty directory path.');
return;
}
try {
// Basic check to prevent accidental deletion outside expected temp pattern
if (!path.basename(dirPath).startsWith('jest-temp-')) {
console.error(`Refusing to delete directory not matching 'jest-temp-*' pattern: ${dirPath}`);
return; // Or throw an error
}
await fsPromises.rm(dirPath, { recursive: true, force: true });
} catch (error: unknown) {
// Log error but don't necessarily fail the test run because of cleanup issues
if (error instanceof Error) {
console.error(`Failed to cleanup temporary directory ${dirPath}:`, error.message);
} else {
console.error(`Failed to cleanup temporary directory ${dirPath}:`, String(error));
}
}
}
/**
* Creates a temporary directory with a unique name and populates it based on the structure.
* @param structure Object defining the desired filesystem structure within the temp dir.
* @param baseTempDir Optional base directory for temporary folders (defaults to project root).
* @returns The absolute path to the created temporary root directory.
*/
interface FileSystemStructure {
[key: string]: string | Buffer | FileSystemStructure;
}
export async function createTemporaryFilesystem(
structure: FileSystemStructure,
baseTempDir = process.cwd(),
): Promise<string> {
// Verify base directory exists
try {
await fsPromises.access(baseTempDir);
} catch {
throw new Error(`Base temp directory does not exist: ${baseTempDir}`);
}
// Create a unique directory name within the base temp directory
const tempDirName = `jest-temp-${uuidv4()}`;
const tempDirPath = path.join(baseTempDir, tempDirName);
try {
console.log(`Creating temp directory: ${tempDirPath}`);
await fsPromises.mkdir(tempDirPath, { recursive: true }); // Ensure base temp dir exists
console.log(`Temp directory created successfully`);
console.log(`Creating structure in temp directory`);
await createStructureRecursively(structure, tempDirPath);
console.log(`Structure created successfully`);
return tempDirPath;
} catch (error: unknown) {
if (error instanceof Error) {
console.error(`Failed to create temporary filesystem at ${tempDirPath}:`, error.message);
} else {
console.error(`Failed to create temporary filesystem at ${tempDirPath}:`, String(error));
}
// Attempt cleanup even if creation failed partially
try {
await cleanupTemporaryFilesystem(tempDirPath); // Now defined before use
} catch (cleanupError) {
console.error(
`Failed to cleanup partially created temp directory ${tempDirPath}:`,
cleanupError,
);
}
throw error; // Re-throw the original error
}
}
```
--------------------------------------------------------------------------------
/src/handlers/chown-items.ts:
--------------------------------------------------------------------------------
```typescript
// src/handlers/chownItems.ts
import { promises as fs } from 'node:fs';
import { z } from 'zod';
import { McpError, ErrorCode } from '@modelcontextprotocol/sdk/types.js';
import { resolvePath, PROJECT_ROOT } from '../utils/path-utils.js';
// --- Types ---
interface McpToolResponse {
content: { type: 'text'; text: string }[];
}
export const ChownItemsArgsSchema = z
.object({
paths: z
.array(z.string())
.min(1, { message: 'Paths array cannot be empty' })
.describe('An array of relative paths.'),
uid: z.number().int({ message: 'UID must be an integer' }).describe('User ID.'),
gid: z.number().int({ message: 'GID must be an integer' }).describe('Group ID.'),
})
.strict();
type ChownItemsArgs = z.infer<typeof ChownItemsArgsSchema>;
interface ChownResult {
path: string;
success: boolean;
uid?: number;
gid?: number;
error?: string;
}
// --- Helper Functions ---
/** Parses and validates the input arguments. */
function parseAndValidateArgs(args: unknown): ChownItemsArgs {
try {
return ChownItemsArgsSchema.parse(args);
} catch (error) {
if (error instanceof z.ZodError) {
throw new McpError(
ErrorCode.InvalidParams,
`Invalid arguments: ${error.errors.map((e) => `${e.path.join('.')} (${e.message})`).join(', ')}`,
);
}
throw new McpError(ErrorCode.InvalidParams, 'Argument validation failed');
}
}
/** Handles errors during chown operation. */
function handleChownError(error: unknown, _relativePath: string, pathOutput: string): ChownResult {
let errorMessage = `Failed to change ownership: ${error instanceof Error ? error.message : String(error)}`;
let logError = true;
if (error instanceof McpError) {
errorMessage = error.message;
logError = false;
} else if (error && typeof error === 'object' && 'code' in error) {
if (error.code === 'ENOENT') {
errorMessage = 'Path not found';
logError = false;
} else if (error.code === 'EPERM') {
// Common error on Windows or insufficient permissions
errorMessage = 'Operation not permitted (Permissions or unsupported on OS)';
}
}
if (logError) {
// Error logged via McpError
}
return { path: pathOutput, success: false, error: errorMessage };
}
/** Processes the chown operation for a single path. */
async function processSingleChownOperation(
relativePath: string,
uid: number,
gid: number,
): Promise<ChownResult> {
const pathOutput = relativePath.replaceAll('\\', '/');
try {
const targetPath = resolvePath(relativePath);
if (targetPath === PROJECT_ROOT) {
return {
path: pathOutput,
success: false,
error: 'Changing ownership of the project root is not allowed.',
};
}
await fs.chown(targetPath, uid, gid);
return { path: pathOutput, success: true, uid, gid };
} catch (error: unknown) {
return handleChownError(error, relativePath, pathOutput);
}
}
/** Processes results from Promise.allSettled. */
function processSettledResults(
results: PromiseSettledResult<ChownResult>[],
originalPaths: string[],
): ChownResult[] {
return results.map((result, index) => {
const originalPath = originalPaths[index] ?? 'unknown_path';
const pathOutput = originalPath.replaceAll('\\', '/');
return result.status === 'fulfilled'
? result.value
: {
path: pathOutput,
success: false,
error: `Unexpected error during processing: ${result.reason instanceof Error ? result.reason.message : String(result.reason)}`,
};
});
}
/** Main handler function */
const handleChownItemsFunc = async (args: unknown): Promise<McpToolResponse> => {
const { paths: relativePaths, uid, gid } = parseAndValidateArgs(args);
const chownPromises = relativePaths.map((relativePath) =>
processSingleChownOperation(relativePath, uid, gid),
);
const settledResults = await Promise.allSettled(chownPromises);
const outputResults = processSettledResults(settledResults, relativePaths);
// Sort results by original path order for predictability
const originalIndexMap = new Map(relativePaths.map((p, i) => [p.replaceAll('\\', '/'), i]));
outputResults.sort((a, b) => {
const indexA = originalIndexMap.get(a.path) ?? Infinity;
const indexB = originalIndexMap.get(b.path) ?? Infinity;
return indexA - indexB;
});
return {
content: [{ type: 'text', text: JSON.stringify(outputResults, undefined, 2) }],
};
};
// Export the complete tool definition
export const chownItemsToolDefinition = {
name: 'chown_items',
description: 'Change owner (UID) and group (GID) for multiple specified files/directories.',
inputSchema: ChownItemsArgsSchema,
handler: handleChownItemsFunc,
};
```
--------------------------------------------------------------------------------
/src/handlers/stat-items.ts:
--------------------------------------------------------------------------------
```typescript
// src/handlers/statItems.ts
import { promises as fs, type Stats } from 'node:fs'; // Import Stats
import { z } from 'zod';
import { McpError, ErrorCode } from '@modelcontextprotocol/sdk/types.js';
import { resolvePath } from '../utils/path-utils.js';
import type { FormattedStats } from '../utils/stats-utils.js'; // Import type
import { formatStats } from '../utils/stats-utils.js';
// --- Types ---
import type { McpToolResponse } from '../types/mcp-types.js';
export const StatItemsArgsSchema = z
.object({
paths: z
.array(z.string())
.min(1, { message: 'Paths array cannot be empty' })
.describe('An array of relative paths (files or directories) to get status for.'),
})
.strict();
type StatItemsArgs = z.infer<typeof StatItemsArgsSchema>;
export interface StatResult {
path: string;
status: 'success' | 'error';
stats?: FormattedStats;
error?: string;
}
// --- Helper Functions ---
/** Parses and validates the input arguments. */
function parseAndValidateArgs(args: unknown): StatItemsArgs {
try {
return StatItemsArgsSchema.parse(args);
} catch (error) {
if (error instanceof z.ZodError) {
throw new McpError(
ErrorCode.InvalidParams,
`Invalid arguments: ${error.errors.map((e) => `${e.path.join('.')} (${e.message})`).join(', ')}`,
);
}
throw new McpError(ErrorCode.InvalidParams, 'Argument validation failed');
}
}
/** Handles errors during stat operation. */
function handleStatError(error: unknown, relativePath: string, pathOutput: string): StatResult {
let errorMessage = `Failed to get stats: ${error instanceof Error ? error.message : String(error)}`;
let logError = true;
if (error instanceof McpError) {
errorMessage = error.message; // Use McpError message directly
logError = false; // Assume McpError was logged at source or is expected
} else if (error && typeof error === 'object' && 'code' in error) {
if (error.code === 'ENOENT') {
errorMessage = 'Path not found';
logError = false; // ENOENT is a common, expected error
} else if (error.code === 'EACCES' || error.code === 'EPERM') {
errorMessage = `Permission denied stating path: ${relativePath}`;
}
}
if (logError) {
// Error logged via McpError
}
return {
path: pathOutput,
status: 'error',
error: errorMessage,
};
}
/** Processes the stat operation for a single path. */
async function processSingleStatOperation(relativePath: string): Promise<StatResult> {
const pathOutput = relativePath.replaceAll('\\', '/');
try {
const targetPath = resolvePath(relativePath);
const stats: Stats = await fs.stat(targetPath); // Explicitly type Stats
return {
path: pathOutput,
status: 'success',
stats: formatStats(relativePath, targetPath, stats), // Pass targetPath as absolutePath
};
} catch (error: unknown) {
return handleStatError(error, relativePath, pathOutput);
}
}
/** Processes results from Promise.allSettled. */
function processSettledResults(
results: PromiseSettledResult<StatResult>[],
originalPaths: string[],
): StatResult[] {
return results.map((result, index) => {
const originalPath = originalPaths[index] ?? 'unknown_path';
const pathOutput = originalPath.replaceAll('\\', '/');
if (result.status === 'fulfilled') {
return result.value;
} else {
// Handle unexpected rejections
// Error logged via McpError
return {
path: pathOutput,
status: 'error',
error: `Unexpected error during processing: ${result.reason instanceof Error ? result.reason.message : String(result.reason)}`,
};
}
});
}
/** Main handler function */
const handleStatItemsFunc = async (args: unknown): Promise<McpToolResponse> => {
const { paths: pathsToStat } = parseAndValidateArgs(args);
const statPromises = pathsToStat.map(processSingleStatOperation);
const settledResults = await Promise.allSettled(statPromises);
const outputResults = processSettledResults(settledResults, pathsToStat);
// Sort results by original path order for predictability
const originalIndexMap = new Map(pathsToStat.map((p, i) => [p.replaceAll('\\', '/'), i]));
outputResults.sort((a, b) => {
const indexA = originalIndexMap.get(a.path) ?? Infinity;
const indexB = originalIndexMap.get(b.path) ?? Infinity;
return indexA - indexB;
});
return {
content: [{ type: 'text', text: JSON.stringify(outputResults, null, 2) }],
};
};
// Export the complete tool definition
export const statItemsToolDefinition = {
name: 'stat_items',
description: 'Get detailed status information for multiple specified paths.',
inputSchema: StatItemsArgsSchema,
handler: handleStatItemsFunc,
};
```
--------------------------------------------------------------------------------
/src/index.ts:
--------------------------------------------------------------------------------
```typescript
#!/usr/bin/env node
import { Server } from '@modelcontextprotocol/sdk/server/index.js';
import { StdioServerTransport } from '@modelcontextprotocol/sdk/server/stdio.js';
import type { ZodTypeAny } from 'zod'; // Keep ZodTypeAny
import { zodToJsonSchema } from 'zod-to-json-schema';
import { applyDiffInputSchema } from './schemas/apply-diff-schema.js';
// Import SDK types needed
import type { CallToolRequest } from '@modelcontextprotocol/sdk/types.js';
import {
CallToolRequestSchema,
ListToolsRequestSchema,
McpError,
ErrorCode,
} from '@modelcontextprotocol/sdk/types.js';
// Import the LOCAL McpRequest/McpResponse types defined in handlers/index.ts
import type { ToolDefinition } from './handlers/index.js';
import type {
McpRequest as LocalMcpRequest,
McpToolResponse as LocalMcpResponse,
} from './types/mcp-types.js';
// Import the aggregated tool definitions
import { allToolDefinitions } from './handlers/index.js';
// --- Server Setup ---
const server = new Server(
{
name: 'filesystem-mcp',
version: '0.6.0', // Version bump for apply_diff tool
description: 'MCP Server for filesystem operations relative to the project root.',
},
{
capabilities: { tools: {} },
},
);
// Helper function to convert Zod schema to JSON schema for MCP
const generateInputSchema = (schema: ZodTypeAny): Record<string, unknown> => {
// Pass ZodTypeAny directly
return zodToJsonSchema(schema, { target: 'openApi3' }) as Record<string, unknown>;
};
// Set request handler for listing tools
server.setRequestHandler(
ListToolsRequestSchema,
(): {
tools: {
name: string;
description: string;
inputSchema: Record<string, unknown>;
}[];
} => {
// Map the aggregated definitions to the format expected by the SDK
const availableTools = allToolDefinitions.map((def) => {
if (typeof def === 'function') {
// Handle function-based tools (like handleApplyDiff)
return {
name: 'apply_diff',
description: 'Apply diffs to files',
inputSchema: generateInputSchema(applyDiffInputSchema),
};
}
return {
name: def.name,
description: def.description,
inputSchema: generateInputSchema(def.inputSchema),
};
});
return { tools: availableTools };
},
);
// --- Helper Functions for handleCallTool ---
/** Handles errors from the local tool handler response. */
function handleToolError(localResponse: LocalMcpResponse): void {
// Use optional chaining for safer access
if (localResponse.error) {
throw localResponse.error instanceof McpError
? localResponse.error
: new McpError(ErrorCode.InternalError, 'Handler returned an unexpected error format.');
}
}
/** Formats the successful response payload from the local tool handler. */
function formatSuccessPayload(localResponse: LocalMcpResponse): Record<string, unknown> {
// Check for data property safely
if (localResponse.data && typeof localResponse.data === 'object') {
// Assert type for safety, assuming data is the primary payload
return localResponse.data as Record<string, unknown>;
}
// Check for content property safely
if (localResponse.content && Array.isArray(localResponse.content)) {
// Assuming if it's an array, the structure is correct based on handler return types
// Removed the .every check causing the unnecessary-condition error
return { content: localResponse.content };
}
// Return empty object if no specific data or valid content found
return {};
}
// --- Main Handler for Tool Calls ---
/** Handles incoming 'call_tool' requests from the SDK. */
const handleCallTool = async (sdkRequest: CallToolRequest): Promise<Record<string, unknown>> => {
// Find the corresponding tool definition
const toolDefinition: ToolDefinition | undefined = allToolDefinitions.find(
(def) => def.name === sdkRequest.params.name,
);
// Throw error if tool is not found
if (!toolDefinition) {
throw new McpError(ErrorCode.MethodNotFound, `Unknown tool: ${sdkRequest.params.name}`);
}
// Construct the request object expected by the local handler
const localRequest: LocalMcpRequest = {
jsonrpc: '2.0',
method: sdkRequest.method,
params: sdkRequest.params,
};
// Execute the local tool handler
const localResponse: LocalMcpResponse = await toolDefinition.handler(localRequest);
// Process potential errors from the handler
handleToolError(localResponse);
// Format and return the success payload
return formatSuccessPayload(localResponse);
};
// Register the main handler function with the SDK server
server.setRequestHandler(CallToolRequestSchema, handleCallTool);
// --- Server Start ---
try {
const transport = new StdioServerTransport();
await server.connect(transport);
// Server started successfully
} catch {
// Server failed to start
process.exit(1);
}
```
--------------------------------------------------------------------------------
/src/handlers/chmod-items.ts:
--------------------------------------------------------------------------------
```typescript
// src/handlers/chmodItems.ts
import { promises as fs } from 'node:fs';
import { z } from 'zod';
import { McpError, ErrorCode } from '@modelcontextprotocol/sdk/types.js';
import { resolvePath, PROJECT_ROOT } from '../utils/path-utils.js';
// --- Types ---
interface McpToolResponse {
content: { type: 'text'; text: string }[];
}
export const ChmodItemsArgsSchema = z
.object({
paths: z
.array(z.string())
.min(1, { message: 'Paths array cannot be empty' })
.describe('An array of relative paths.'),
mode: z
.string()
.regex(/^[0-7]{3,4}$/, {
message: "Mode must be an octal string like '755' or '0755'",
})
.describe("The permission mode as an octal string (e.g., '755', '644')."),
})
.strict();
type ChmodItemsArgs = z.infer<typeof ChmodItemsArgsSchema>;
interface ChmodResult {
path: string;
success: boolean;
mode?: string; // Include mode on success
error?: string;
}
// --- Helper Functions ---
/** Parses and validates the input arguments. */
function parseAndValidateArgs(args: unknown): ChmodItemsArgs {
try {
return ChmodItemsArgsSchema.parse(args);
} catch (error) {
if (error instanceof z.ZodError) {
throw new McpError(
ErrorCode.InvalidParams,
`Invalid arguments: ${error.errors.map((e) => `${e.path.join('.')} (${e.message})`).join(', ')}`,
);
}
throw new McpError(ErrorCode.InvalidParams, 'Argument validation failed');
}
}
/** Handles errors during chmod operation. */
function handleChmodError(error: unknown, relativePath: string, pathOutput: string): ChmodResult {
let errorMessage = `Failed to change mode: ${error instanceof Error ? error.message : String(error)}`;
let logError = true;
if (error instanceof McpError) {
errorMessage = error.message;
logError = false;
} else if (error && typeof error === 'object' && 'code' in error) {
if (error.code === 'ENOENT') {
errorMessage = 'Path not found';
logError = false; // ENOENT is a common, expected error
} else if (error.code === 'EPERM' || error.code === 'EACCES') {
errorMessage = `Permission denied changing mode for ${relativePath}`;
}
}
if (logError) {
// Error logged via McpError
}
return { path: pathOutput, success: false, error: errorMessage };
}
/** Processes the chmod operation for a single path. */
async function processSingleChmodOperation(
relativePath: string,
mode: number, // Pass parsed mode
modeString: string, // Pass original string for success result
): Promise<ChmodResult> {
const pathOutput = relativePath.replaceAll('\\', '/');
try {
const targetPath = resolvePath(relativePath);
if (targetPath === PROJECT_ROOT) {
return {
path: pathOutput,
success: false,
error: 'Changing permissions of the project root is not allowed.',
};
}
await fs.chmod(targetPath, mode);
return { path: pathOutput, success: true, mode: modeString };
} catch (error: unknown) {
return handleChmodError(error, relativePath, pathOutput);
}
}
/** Processes results from Promise.allSettled. */
function processSettledResults(
results: PromiseSettledResult<ChmodResult>[],
originalPaths: string[],
): ChmodResult[] {
return results.map((result, index) => {
const originalPath = originalPaths[index] ?? 'unknown_path';
const pathOutput = originalPath.replaceAll('\\', '/');
if (result.status === 'fulfilled') {
return result.value;
} else {
// Error logged via McpError
return {
path: pathOutput,
success: false,
error: `Unexpected error during processing: ${result.reason instanceof Error ? result.reason.message : String(result.reason)}`,
};
}
});
}
/** Main handler function */
const handleChmodItemsFunc = async (args: unknown): Promise<McpToolResponse> => {
const { paths: relativePaths, mode: modeString } = parseAndValidateArgs(args);
const mode = Number.parseInt(modeString, 8); // Parse mode once
const chmodPromises = relativePaths.map((relativePath) =>
processSingleChmodOperation(relativePath, mode, modeString),
);
const settledResults = await Promise.allSettled(chmodPromises);
const outputResults = processSettledResults(settledResults, relativePaths);
// Sort results by original path order for predictability
const originalIndexMap = new Map(relativePaths.map((p, i) => [p.replaceAll('\\', '/'), i]));
outputResults.sort((a, b) => {
const indexA = originalIndexMap.get(a.path) ?? Infinity;
const indexB = originalIndexMap.get(b.path) ?? Infinity;
return indexA - indexB;
});
return {
content: [{ type: 'text', text: JSON.stringify(outputResults, null, 2) }],
};
};
// Export the complete tool definition
export const chmodItemsToolDefinition = {
name: 'chmod_items',
description: 'Change permissions mode for multiple specified files/directories (POSIX-style).',
inputSchema: ChmodItemsArgsSchema,
handler: handleChmodItemsFunc,
};
```
--------------------------------------------------------------------------------
/__tests__/utils/string-utils.test.ts:
--------------------------------------------------------------------------------
```typescript
import { describe, it, expect } from 'vitest';
import {
escapeRegex,
getIndentation,
applyIndentation,
linesMatch,
} from '../../src/utils/string-utils';
describe('String Utilities', () => {
describe('escapeRegex', () => {
it('should escape special regex characters', () => {
const input = 'Hello? [$()*+.^{|}] World\\';
// Use the correct string literal based on manual trace of the function's behavior
const expected = 'Hello\\? \\[\\$\\(\\)\\*\\+\\.\\^\\{\\|\\}\\] World\\\\';
expect(escapeRegex(input)).toBe(expected);
});
it('should not escape normal characters', () => {
const input = 'abcdef123';
expect(escapeRegex(input)).toBe(input);
});
it('should handle empty string', () => {
expect(escapeRegex('')).toBe('');
});
});
describe('getIndentation', () => {
it('should return leading spaces', () => {
expect(getIndentation(' indented line')).toBe(' ');
});
it('should return leading tabs', () => {
expect(getIndentation('\t\tindented line')).toBe('\t\t');
});
it('should return mixed leading whitespace', () => {
expect(getIndentation(' \t indented line')).toBe(' \t ');
});
it('should return empty string for no leading whitespace', () => {
expect(getIndentation('no indent')).toBe('');
});
it('should return empty string for empty line', () => {
expect(getIndentation('')).toBe('');
});
it('should return empty string for undefined input', () => {
expect(getIndentation(undefined)).toBe(''); // Covers line 23
});
});
describe('applyIndentation', () => {
it('should apply indentation to a single line', () => {
expect(applyIndentation('line1', ' ')).toEqual([' line1']);
});
it('should apply indentation to multiple lines', () => {
const content = 'line1\nline2\nline3';
const indent = '\t';
const expected = ['\tline1', '\tline2', '\tline3'];
expect(applyIndentation(content, indent)).toEqual(expected); // Covers line 35
});
it('should handle empty content', () => {
expect(applyIndentation('', ' ')).toEqual([' ']); // split returns ['']
});
it('should handle empty indentation', () => {
const content = 'line1\nline2';
expect(applyIndentation(content, '')).toEqual(['line1', 'line2']);
});
});
describe('linesMatch', () => {
// ignoreLeadingWhitespace = false
it('should match identical lines when not ignoring whitespace', () => {
expect(linesMatch(' line', ' line', false)).toBe(true);
});
it('should not match different lines when not ignoring whitespace', () => {
expect(linesMatch(' line', ' line', false)).toBe(false);
expect(linesMatch('line', 'line ', false)).toBe(false);
});
// ignoreLeadingWhitespace = true
it('should match lines with different leading whitespace when ignoring', () => {
expect(linesMatch(' line', ' line', true)).toBe(true);
expect(linesMatch('line', '\tline', true)).toBe(true);
});
it('should not match lines with different content when ignoring whitespace', () => {
expect(linesMatch(' line1', ' line2', true)).toBe(false);
});
it('should not match if search line has extra indent when ignoring', () => {
// This ensures we only trim the file line based on the search line's content
expect(linesMatch('line', ' line', true)).toBe(true); // Should match if ignoring whitespace
});
it('should match lines with identical content but different trailing whitespace when ignoring', () => {
// Note: trimStart() is used, so trailing whitespace matters
expect(linesMatch(' line ', ' line', true)).toBe(false);
expect(linesMatch(' line', ' line ', true)).toBe(false);
expect(linesMatch(' line ', ' line ', true)).toBe(true);
});
it('should handle empty search line correctly when ignoring whitespace', () => {
expect(linesMatch(' ', '', true)).toBe(true); // fileLine becomes '', searchLine is ''
expect(linesMatch(' content', '', true)).toBe(false); // fileLine becomes 'content', searchLine is ''
expect(linesMatch('', '', true)).toBe(true);
});
it('should handle empty file line correctly when ignoring whitespace', () => {
expect(linesMatch('', ' content', true)).toBe(false); // fileLine is '', searchLine becomes 'content'
expect(linesMatch('', ' ', true)).toBe(true); // fileLine is '', searchLine becomes ''
});
// Edge cases for undefined (Covers lines 50-52)
it('should return false if fileLine is undefined', () => {
expect(linesMatch(undefined, 'line', false)).toBe(false);
expect(linesMatch(undefined, 'line', true)).toBe(false);
});
it('should return false if searchLine is undefined', () => {
expect(linesMatch('line', undefined, false)).toBe(false);
expect(linesMatch('line', undefined, true)).toBe(false);
});
it('should return false if both lines are undefined', () => {
expect(linesMatch(undefined, undefined, false)).toBe(false);
expect(linesMatch(undefined, undefined, true)).toBe(false);
});
it('should handle lines with only whitespace correctly when ignoring', () => {
expect(linesMatch(' ', '\t', true)).toBe(true); // Both trimStart to ''
expect(linesMatch(' ', ' a', true)).toBe(false); // fileLine '', searchLine 'a'
expect(linesMatch(' a', ' ', true)).toBe(false); // fileLine 'a', searchLine ''
});
});
});
```
--------------------------------------------------------------------------------
/src/handlers/copy-items.ts:
--------------------------------------------------------------------------------
```typescript
// src/handlers/copyItems.ts
import { promises as fs } from 'node:fs';
import path from 'node:path';
import { z } from 'zod';
import { McpError, ErrorCode } from '@modelcontextprotocol/sdk/types.js';
import { resolvePath, PROJECT_ROOT } from '../utils/path-utils.js';
// --- Types ---
import type { McpToolResponse } from '../types/mcp-types.js';
export const CopyOperationSchema = z
.object({
source: z.string().describe('Relative path of the source.'),
destination: z.string().describe('Relative path of the destination.'),
})
.strict();
export const CopyItemsArgsSchema = z
.object({
operations: z
.array(CopyOperationSchema)
.min(1, { message: 'Operations array cannot be empty' })
.describe('Array of {source, destination} objects.'),
})
.strict();
type CopyItemsArgs = z.infer<typeof CopyItemsArgsSchema>;
type CopyOperation = z.infer<typeof CopyOperationSchema>; // Export or define locally if needed
interface CopyResult {
source: string;
destination: string;
success: boolean;
error?: string;
}
// --- Parameter Interfaces ---
interface HandleCopyErrorParams {
error: unknown;
sourceRelative: string;
destinationRelative: string;
sourceOutput: string;
destOutput: string;
}
interface ProcessSingleCopyParams {
op: CopyOperation;
}
// --- Helper Functions ---
/** Parses and validates the input arguments. */
function parseAndValidateArgs(args: unknown): CopyItemsArgs {
try {
return CopyItemsArgsSchema.parse(args);
} catch (error) {
if (error instanceof z.ZodError) {
throw new McpError(
ErrorCode.InvalidParams,
`Invalid arguments: ${error.errors.map((e) => `${e.path.join('.')} (${e.message})`).join(', ')}`,
);
}
throw new McpError(ErrorCode.InvalidParams, 'Argument validation failed');
}
}
/** Handles errors during the copy operation for a single item. */
function handleCopyError(params: HandleCopyErrorParams): CopyResult {
const { error, sourceRelative, destinationRelative, sourceOutput, destOutput } = params;
let errorMessage = 'An unknown error occurred during copy.';
let errorCode: string | undefined = undefined;
if (error && typeof error === 'object' && 'code' in error && typeof error.code === 'string') {
errorCode = error.code;
}
if (error instanceof McpError) {
errorMessage = error.message;
} else if (error instanceof Error) {
errorMessage = `Failed to copy item: ${error.message}`;
}
if (errorCode === 'ENOENT') {
errorMessage = `Source path not found: ${sourceRelative}`;
} else if (errorCode === 'EPERM' || errorCode === 'EACCES') {
errorMessage = `Permission denied copying '${sourceRelative}' to '${destinationRelative}'.`;
}
return {
source: sourceOutput,
destination: destOutput,
success: false,
error: errorMessage,
};
}
/** Processes a single copy operation. */
async function processSingleCopyOperation(params: ProcessSingleCopyParams): Promise<CopyResult> {
const { op } = params;
const sourceRelative = op.source;
const destinationRelative = op.destination;
const sourceOutput = sourceRelative.replaceAll('\\', '/');
const destOutput = destinationRelative.replaceAll('\\', '/');
let sourceAbsolute = ''; // Initialize for potential use in error message
try {
sourceAbsolute = resolvePath(sourceRelative);
const destinationAbsolute = resolvePath(destinationRelative);
if (sourceAbsolute === PROJECT_ROOT) {
return {
source: sourceOutput,
destination: destOutput,
success: false,
error: 'Copying the project root is not allowed.',
};
}
// Ensure parent directory of destination exists
const destDir = path.dirname(destinationAbsolute);
await fs.mkdir(destDir, { recursive: true });
// Perform the copy (recursive for directories)
await fs.cp(sourceAbsolute, destinationAbsolute, {
recursive: true,
errorOnExist: false, // Overwrite existing files/dirs
force: true, // Ensure overwrite
});
return { source: sourceOutput, destination: destOutput, success: true };
} catch (error: unknown) {
return handleCopyError({
// Pass object
error,
sourceRelative,
destinationRelative,
sourceOutput,
destOutput,
});
}
}
/** Processes results from Promise.allSettled. */
function processSettledResults(
results: PromiseSettledResult<CopyResult>[],
originalOps: CopyOperation[],
): CopyResult[] {
return results.map((result, index) => {
const op = originalOps[index];
const sourceOutput = (op?.source ?? 'unknown').replaceAll('\\', '/');
const destOutput = (op?.destination ?? 'unknown').replaceAll('\\', '/');
return result.status === 'fulfilled'
? result.value
: {
source: sourceOutput,
destination: destOutput,
success: false,
error: `Unexpected error during processing: ${result.reason instanceof Error ? result.reason.message : String(result.reason)}`,
};
});
}
/** Main handler function */
const handleCopyItemsFunc = async (args: unknown): Promise<McpToolResponse> => {
const { operations } = parseAndValidateArgs(args);
const copyPromises = operations.map((op) => processSingleCopyOperation({ op }));
const settledResults = await Promise.allSettled(copyPromises);
const outputResults = processSettledResults(settledResults, operations);
// Sort results based on the original order
const originalIndexMap = new Map(operations.map((op, i) => [op.source.replaceAll('\\', '/'), i]));
outputResults.sort((a, b) => {
const indexA = originalIndexMap.get(a.source) ?? Infinity;
const indexB = originalIndexMap.get(b.source) ?? Infinity;
return indexA - indexB;
});
return {
content: [{ type: 'text', text: JSON.stringify(outputResults, undefined, 2) }],
};
};
// Export the complete tool definition
export const copyItemsToolDefinition = {
name: 'copy_items',
description: 'Copy multiple specified files/directories.',
inputSchema: CopyItemsArgsSchema,
handler: handleCopyItemsFunc,
};
```
--------------------------------------------------------------------------------
/src/handlers/write-content.ts:
--------------------------------------------------------------------------------
```typescript
// src/handlers/writeContent.ts
import { promises as fs } from 'node:fs';
import path from 'node:path';
import { z } from 'zod';
import { McpError, ErrorCode } from '@modelcontextprotocol/sdk/types.js';
import { resolvePath, PROJECT_ROOT } from '../utils/path-utils.js';
// --- Types ---
import type { McpToolResponse } from '../types/mcp-types.js';
export const WriteItemSchema = z
.object({
path: z.string().describe('Relative path for the file.'),
content: z.string().describe('Content to write.'),
append: z
.boolean()
.optional()
.default(false)
.describe('Append content instead of overwriting.'),
})
.strict();
export const WriteContentArgsSchema = z
.object({
items: z
.array(WriteItemSchema)
.min(1, { message: 'Items array cannot be empty' })
.describe('Array of {path, content, append?} objects.'),
})
.strict();
type WriteContentArgs = z.infer<typeof WriteContentArgsSchema>;
type WriteItem = z.infer<typeof WriteItemSchema>; // Define type for item
interface WriteResult {
path: string;
success: boolean;
operation?: 'written' | 'appended';
error?: string;
}
export interface WriteContentDependencies {
writeFile: typeof fs.writeFile;
mkdir: typeof fs.mkdir;
stat: typeof fs.stat; // Keep stat if needed for future checks, though not used now
appendFile: typeof fs.appendFile;
resolvePath: typeof resolvePath;
PROJECT_ROOT: string;
pathDirname: (p: string) => string;
}
// --- Helper Functions ---
/** Parses and validates the input arguments. */
function parseAndValidateArgs(args: unknown): WriteContentArgs {
try {
return WriteContentArgsSchema.parse(args);
} catch (error) {
if (error instanceof z.ZodError) {
throw new McpError(
ErrorCode.InvalidParams,
`Invalid arguments: ${error.errors.map((e) => `${e.path.join('.')} (${e.message})`).join(', ')}`,
);
}
throw new McpError(ErrorCode.InvalidParams, 'Argument validation failed');
}
}
/** Handles errors during file write/append operation. */
function handleWriteError(
error: unknown,
_relativePath: string,
pathOutput: string,
append: boolean,
): WriteResult {
if (error instanceof McpError) {
return { path: pathOutput, success: false, error: error.message };
}
const errorMessage = error instanceof Error ? error.message : String(error);
// Error logged via McpError
return {
path: pathOutput,
success: false,
error: `Failed to ${append ? 'append' : 'write'} file: ${errorMessage}`,
};
}
/** Processes a single write/append operation. */
async function processSingleWriteOperation(
file: WriteItem,
deps: WriteContentDependencies,
): Promise<WriteResult> {
const relativePath = file.path;
const content = file.content;
const append = file.append;
const pathOutput = relativePath.replaceAll('\\', '/');
try {
const targetPath = deps.resolvePath(relativePath);
if (targetPath === deps.PROJECT_ROOT) {
return {
path: pathOutput,
success: false,
error: 'Writing directly to the project root is not allowed.',
};
}
const targetDir = deps.pathDirname(targetPath);
// Avoid creating the root dir itself
if (targetDir !== deps.PROJECT_ROOT) {
await deps.mkdir(targetDir, { recursive: true });
}
if (append) {
await deps.appendFile(targetPath, content, 'utf-8');
return { path: pathOutput, success: true, operation: 'appended' };
} else {
await deps.writeFile(targetPath, content, 'utf-8');
return { path: pathOutput, success: true, operation: 'written' };
}
} catch (error: unknown) {
return handleWriteError(error, relativePath, pathOutput, append);
}
}
/** Processes results from Promise.allSettled. */
function processSettledResults(
results: PromiseSettledResult<WriteResult>[],
originalItems: WriteItem[],
): WriteResult[] {
return results.map((result, index) => {
const originalItem = originalItems[index];
const pathOutput = (originalItem?.path ?? 'unknown_path').replaceAll('\\', '/');
if (result.status === 'fulfilled') {
return result.value;
} else {
// Error logged via McpError
return {
path: pathOutput,
success: false,
error: `Unexpected error during processing: ${result.reason instanceof Error ? result.reason.message : String(result.reason)}`,
};
}
});
}
/** Main handler function */
export const handleWriteContentFunc = async (
// Added export
deps: WriteContentDependencies,
args: unknown,
): Promise<McpToolResponse> => {
const { items: filesToWrite } = parseAndValidateArgs(args);
const writePromises = filesToWrite.map((file) => processSingleWriteOperation(file, deps));
const settledResults = await Promise.allSettled(writePromises);
const outputResults = processSettledResults(settledResults, filesToWrite);
// Sort results based on the original order
const originalIndexMap = new Map(filesToWrite.map((f, i) => [f.path.replaceAll('\\', '/'), i]));
outputResults.sort((a, b) => {
const indexA = originalIndexMap.get(a.path) ?? Infinity;
const indexB = originalIndexMap.get(b.path) ?? Infinity;
return indexA - indexB;
});
return {
content: [{ type: 'text', text: JSON.stringify(outputResults, null, 2) }],
};
};
// Export the complete tool definition
export const writeContentToolDefinition = {
name: 'write_content',
description:
"Write or append content to multiple specified files (creating directories if needed). NOTE: For modifying existing files, prefer using 'edit_file' or 'replace_content' for better performance, especially with large files. Use 'write_content' primarily for creating new files or complete overwrites.",
inputSchema: WriteContentArgsSchema,
handler: (args: unknown): Promise<McpToolResponse> => {
const deps: WriteContentDependencies = {
writeFile: fs.writeFile,
mkdir: fs.mkdir,
stat: fs.stat,
appendFile: fs.appendFile,
resolvePath: resolvePath,
PROJECT_ROOT: PROJECT_ROOT,
pathDirname: path.dirname.bind(path),
};
return handleWriteContentFunc(deps, args);
},
};
```
--------------------------------------------------------------------------------
/__tests__/index.test.ts:
--------------------------------------------------------------------------------
```typescript
// __tests__/index.test.ts
import { vi, describe, it, expect, beforeEach, afterEach } from 'vitest';
// Keep standard imports, even though they are mocked below
// Keep standard imports, even though they are mocked below
// import { Server } from '@modelcontextprotocol/sdk'; // Removed as it's mocked
// import { StdioServerTransport } from '@modelcontextprotocol/sdk/stdio'; // Removed as it's mocked
// McpError might be imported from sdk directly if needed, or mocked within sdk mock
// import { McpError } from '@modelcontextprotocol/sdk/error'; // Or '@modelcontextprotocol/sdk'
import * as allHandlers from '../src/handlers/index.js'; // Import all handlers with extension
// import { ZodError } from 'zod'; // Removed unused import
// Mock the SDK components within the factory functions
// Define mock variables outside the factory to be accessible later
const mockServerInstance = {
registerTool: vi.fn(),
start: vi.fn(),
stop: vi.fn(),
};
const MockServer = vi.fn().mockImplementation(() => mockServerInstance);
vi.mock('@modelcontextprotocol/sdk', () => {
const MockMcpError = class extends Error {
code: number;
data: any;
constructor(message: string, code = -32_000, data?: any) {
super(message);
this.name = 'McpError';
this.code = code;
this.data = data;
}
};
return {
Server: MockServer, // Export the mock constructor
McpError: MockMcpError,
};
});
// Define mock variable outside the factory
const mockTransportInstance = {};
const MockStdioServerTransport = vi.fn().mockImplementation(() => mockTransportInstance);
vi.mock('@modelcontextprotocol/sdk/stdio', () => {
return {
StdioServerTransport: MockStdioServerTransport, // Export the mock constructor
};
});
// Remove the separate mock for sdk/error as McpError is mocked above
// Define an interface for the expected handler structure
interface HandlerDefinition {
name: string;
description: string;
schema: any;
handler: (...args: any[]) => Promise<any>;
jsonSchema?: any;
}
// Mock the handlers to prevent actual execution
// Iterate over values and check structure more robustly with type guard
const mockHandlers = Object.values(allHandlers).reduce<
Record<string, HandlerDefinition & { handler: ReturnType<typeof vi.fn> }>
>((acc, handlerDef) => {
// Type guard to check if handlerDef matches HandlerDefinition structure
const isHandlerDefinition = (def: any): def is HandlerDefinition =>
typeof def === 'object' &&
def !== null &&
typeof def.name === 'string' &&
typeof def.handler === 'function';
if (isHandlerDefinition(handlerDef)) {
// Now TypeScript knows handlerDef has a 'name' property of type string
acc[handlerDef.name] = {
...handlerDef, // Spread the original definition
handler: vi.fn().mockResolvedValue({ success: true }), // Mock the handler function
};
}
// Ignore exports that don't match the expected structure
return acc;
}, {}); // Initial value for reduce
// Ensure mockHandlers is correctly typed before spreading
const typedMockHandlers: Record<string, any> = mockHandlers;
vi.mock('../src/handlers/index.js', () => ({
// Also update path here
...typedMockHandlers,
}));
// Mock console methods
vi.spyOn(console, 'log').mockImplementation(() => {}); // Remove unused variable assignment
vi.spyOn(console, 'error').mockImplementation(() => {}); // Remove unused variable assignment
// Adjust the type assertion for process.exit mock
vi.spyOn(process, 'exit').mockImplementation((() => {
// Remove unused variable assignment
throw new Error('process.exit called');
}) as (code?: number | string | null | undefined) => never);
describe('Server Initialization (src/index.ts)', () => {
// Remove explicit type annotations, let TS infer from mocks
let serverInstance: any;
let transportInstance: any;
beforeEach(async () => {
// Reset mocks before each test
vi.clearAllMocks();
// Dynamically import the module to run the setup logic
// Use .js extension consistent with module resolution
await import('../src/index.js');
// Get the mocked instances using the mock variables defined outside
serverInstance = MockServer.mock.instances[0];
transportInstance = MockStdioServerTransport.mock.instances[0];
});
afterEach(() => {
vi.resetModules(); // Ensure fresh import for next test
});
it('should create a StdioServerTransport instance', () => {
expect(MockStdioServerTransport).toHaveBeenCalledTimes(1); // Use the mock variable
expect(transportInstance).toBeDefined();
});
it('should create a Server instance with the transport', () => {
expect(MockServer).toHaveBeenCalledTimes(1); // Use the mock variable
// expect(MockServer).toHaveBeenCalledWith(transportInstance); // Checking constructor args might be complex/brittle with mocks
expect(serverInstance).toBeDefined();
});
it('should register all expected tools', () => {
// Get names from the keys of the refined mockHandlers object
const expectedToolNames = Object.keys(typedMockHandlers); // Use typedMockHandlers
expect(serverInstance.registerTool).toHaveBeenCalledTimes(expectedToolNames.length);
// Check if each handler name (which is the key in mockHandlers now) was registered
for (const toolName of expectedToolNames) {
const handlerDefinition = typedMockHandlers[toolName]; // Use typedMockHandlers
expect(serverInstance.registerTool).toHaveBeenCalledWith(
expect.objectContaining({
name: handlerDefinition.name,
description: handlerDefinition.description,
inputSchema: expect.any(Object), // Zod schema converts to object
handler: handlerDefinition.handler, // Check if the mocked handler was passed
}),
);
// Optionally, more specific schema checks if needed
// expect(serverInstance.registerTool).toHaveBeenCalledWith(
// expect.objectContaining({ name: handlerDefinition.name, inputSchema: handlerDefinition.jsonSchema })
// );
}
});
it('should call server.start()', () => {
expect(serverInstance.start).toHaveBeenCalledTimes(1);
});
// Add tests for signal handling if possible/necessary
// This might be harder to test reliably without actually sending signals
// it('should register signal handlers for SIGINT and SIGTERM', () => {
// // Difficult to directly test process.on('SIGINT', ...) registration
// // Could potentially spy on process.on but might be fragile
// });
// it('should call server.stop() and process.exit() on SIGINT/SIGTERM', () => {
// // Simulate signal? Requires more advanced mocking or test setup
// });
});
```
--------------------------------------------------------------------------------
/src/handlers/delete-items.ts:
--------------------------------------------------------------------------------
```typescript
// src/handlers/deleteItems.ts
import { promises as fs } from 'node:fs';
import { z } from 'zod';
import { McpError, ErrorCode } from '@modelcontextprotocol/sdk/types.js';
import { resolvePath, PROJECT_ROOT } from '../utils/path-utils.js';
// --- Types ---
interface McpToolResponse {
content: { type: 'text'; text: string }[];
}
export const DeleteItemsArgsSchema = z
.object({
paths: z
.array(z.string())
.min(1, { message: 'Paths array cannot be empty' })
.describe('An array of relative paths (files or directories) to delete.'),
})
.strict();
type DeleteItemsArgs = z.infer<typeof DeleteItemsArgsSchema>;
interface DeleteResult {
path: string;
success: boolean;
note?: string;
error?: string;
}
// --- Helper Functions ---
/** Parses and validates the input arguments. */
function parseAndValidateArgs(args: unknown): DeleteItemsArgs {
try {
return DeleteItemsArgsSchema.parse(args);
} catch (error) {
if (error instanceof z.ZodError) {
throw new McpError(
ErrorCode.InvalidParams,
`Invalid arguments: ${error.errors.map((e) => `${e.path.join('.')} (${e.message})`).join(', ')}`,
);
}
throw new McpError(ErrorCode.InvalidParams, 'Argument validation failed');
}
}
/** Determines the error message based on the error type. */
function getErrorMessage(error: unknown, relativePath: string): string {
if (error instanceof McpError) {
return error.message;
}
if (error instanceof Error) {
const errnoError = error as NodeJS.ErrnoException;
if (errnoError.code) {
// Don't handle ENOENT here
if (errnoError.code === 'EPERM' || errnoError.code === 'EACCES') {
return `Permission denied deleting ${relativePath}`;
}
return `Failed to delete ${relativePath}: ${error.message} (code: ${errnoError.code})`;
}
return `Failed to delete ${relativePath}: ${error.message}`;
}
return `Failed to delete ${relativePath}: ${String(error)}`;
}
/** Handles errors during delete operation. Revised logic again. */
function handleDeleteError(error: unknown, relativePath: string, pathOutput: string): DeleteResult {
console.error(`[handleDeleteError] Received error for path "${relativePath}":`, JSON.stringify(error));
// Check for McpError FIRST
if (error instanceof McpError) {
const errorMessage = getErrorMessage(error, relativePath);
console.error(`[Filesystem MCP] McpError deleting ${relativePath}: ${errorMessage}`);
console.error(`[handleDeleteError] Returning failure for "${relativePath}" (McpError): ${errorMessage}`);
return { path: pathOutput, success: false, error: errorMessage };
}
// THEN check specifically for ENOENT
const isENOENT =
typeof error === 'object' &&
error !== null &&
'code' in error &&
(error as { code?: string }).code === 'ENOENT';
if (isENOENT) {
console.error(`[handleDeleteError] Detected ENOENT for "${relativePath}", returning success with note.`);
return {
path: pathOutput,
success: true,
note: 'Path not found, nothing to delete',
};
}
// For ALL OTHER errors (including permission, generic), return failure
const errorMessage = getErrorMessage(error, relativePath);
console.error(`[Filesystem MCP] Other error deleting ${relativePath}: ${errorMessage}`);
console.error(`[handleDeleteError] Returning failure for "${relativePath}" (Other Error): ${errorMessage}`);
return { path: pathOutput, success: false, error: errorMessage };
}
/** Processes the deletion of a single item. */
async function processSingleDeleteOperation(relativePath: string): Promise<DeleteResult> {
const pathOutput = relativePath.replaceAll('\\', '/');
try {
const targetPath = resolvePath(relativePath);
if (targetPath === PROJECT_ROOT) {
throw new McpError(ErrorCode.InvalidRequest, 'Deleting the project root is not allowed.');
}
await fs.rm(targetPath, { recursive: true, force: false });
return { path: pathOutput, success: true };
} catch (error: unknown) {
// This catch block will now correctly pass McpError or other errors to handleDeleteError
return handleDeleteError(error, relativePath, pathOutput);
}
}
/** Processes results from Promise.allSettled. Exported for testing. */
export function processSettledResults( // Add export
results: PromiseSettledResult<DeleteResult>[],
originalPaths: string[],
): DeleteResult[] {
return results.map((result, index) => {
const originalPath = originalPaths[index] ?? 'unknown_path';
const pathOutput = originalPath.replaceAll('\\', '/');
if (result.status === 'fulfilled') {
return result.value;
} else {
// This case should ideally be less frequent now as errors are handled within safeProcessSingleDeleteOperation
console.error(`[processSettledResults] Unexpected rejection for ${originalPath}:`, result.reason);
// Pass rejection reason to the error handler
return handleDeleteError(result.reason, originalPath, pathOutput);
}
});
}
/** Main handler function */
const handleDeleteItemsFunc = async (args: unknown): Promise<McpToolResponse> => {
const { paths: pathsToDelete } = parseAndValidateArgs(args);
const safeProcessSingleDeleteOperation = async (relativePath: string): Promise<DeleteResult> => {
const pathOutput = relativePath.replaceAll('\\', '/');
try {
// Call the core logic which might return a DeleteResult or throw
return await processSingleDeleteOperation(relativePath);
} catch (error) {
// Catch errors thrown *before* the try block in processSingleDeleteOperation (like resolvePath)
// or unexpected errors within it not returning a DeleteResult.
return handleDeleteError(error, relativePath, pathOutput);
}
};
const deletePromises = pathsToDelete.map(safeProcessSingleDeleteOperation);
const settledResults = await Promise.allSettled(deletePromises);
const outputResults = processSettledResults(settledResults, pathsToDelete);
// Sort results by original path order for predictability
const originalIndexMap = new Map(pathsToDelete.map((p, i) => [p.replaceAll('\\', '/'), i]));
outputResults.sort((a, b) => {
const indexA = originalIndexMap.get(a.path) ?? Infinity;
const indexB = originalIndexMap.get(b.path) ?? Infinity;
return indexA - indexB;
});
return {
content: [{ type: 'text', text: JSON.stringify(outputResults, null, 2) }],
};
};
// Export the complete tool definition
export const deleteItemsToolDefinition = {
name: 'delete_items',
description: 'Delete multiple specified files or directories.',
inputSchema: DeleteItemsArgsSchema,
handler: handleDeleteItemsFunc,
};
```
--------------------------------------------------------------------------------
/src/handlers/read-content.ts:
--------------------------------------------------------------------------------
```typescript
// src/handlers/readContent.ts
import { promises as fs, type Stats } from 'node:fs'; // Import Stats
import { z } from 'zod';
import { McpError, ErrorCode } from '@modelcontextprotocol/sdk/types.js';
import { resolvePath } from '../utils/path-utils.js';
// --- Types ---
interface McpToolResponse {
content: { type: 'text'; text: string }[];
}
export const ReadContentArgsSchema = z
.object({
paths: z
.array(z.string())
.min(1, { message: 'Paths array cannot be empty' })
.describe('Array of relative file paths to read.'),
start_line: z
.number()
.int()
.min(1)
.optional()
.describe('Optional 1-based starting line number'),
end_line: z.number().int().min(1).optional().describe('Optional 1-based ending line number'),
format: z
.enum(['raw', 'lines'])
.default('lines')
.describe('Output format - "raw" for plain text, "lines" for line objects'),
})
.strict();
type ReadContentArgs = z.infer<typeof ReadContentArgsSchema>;
interface ReadResult {
path: string;
content?: string | { lineNumber: number; content: string }[];
error?: string;
}
// --- Helper Functions ---
/** Parses and validates the input arguments. */
function parseAndValidateArgs(args: unknown): ReadContentArgs {
try {
return ReadContentArgsSchema.parse(args);
} catch (error) {
if (error instanceof z.ZodError) {
throw new McpError(
ErrorCode.InvalidParams,
`Invalid arguments: ${error.errors.map((e) => `${e.path.join('.')} (${e.message})`).join(', ')}`,
);
}
throw new McpError(ErrorCode.InvalidParams, 'Argument validation failed');
}
}
/** Handles filesystem errors during file read or stat. */
interface FileReadErrorOptions {
pathOutput: string;
relativePath?: string;
targetPath?: string;
}
function getBasicFsErrorMessage(fsError: unknown): string {
return `Filesystem error: ${fsError instanceof Error ? fsError.message : String(fsError)}`;
}
function getSpecificFsErrorMessage(
code: string,
relativePath?: string,
targetPath?: string,
): string | undefined {
switch (code) {
case 'ENOENT': {
return targetPath
? `File not found at resolved path '${targetPath}'${relativePath ? ` (from relative path '${relativePath}')` : ''}`
: 'File not found';
}
case 'EISDIR': {
return relativePath
? `Path is a directory, not a file: ${relativePath}`
: 'Path is a directory, not a file';
}
case 'EACCES':
case 'EPERM': {
return relativePath
? `Permission denied reading file: ${relativePath}`
: 'Permission denied reading file';
}
default: {
return undefined;
}
}
}
function getFsErrorMessage(fsError: unknown, relativePath?: string, targetPath?: string): string {
if (!fsError || typeof fsError !== 'object' || !('code' in fsError)) {
return getBasicFsErrorMessage(fsError);
}
const specificMessage = getSpecificFsErrorMessage(String(fsError.code), relativePath, targetPath);
return specificMessage || getBasicFsErrorMessage(fsError);
}
function handleFileReadFsError(fsError: unknown, options: FileReadErrorOptions): ReadResult {
const { pathOutput, relativePath, targetPath } = options;
const errorMessage = getFsErrorMessage(fsError, relativePath, targetPath);
return { path: pathOutput, error: errorMessage };
}
/** Handles errors during path resolution. */
function handlePathResolveError(
resolveError: unknown,
_relativePath: string,
pathOutput: string,
): ReadResult {
const errorMessage = resolveError instanceof Error ? resolveError.message : String(resolveError);
// Error logged via McpError
return { path: pathOutput, error: `Error resolving path: ${errorMessage}` };
}
/** Processes the reading of a single file. */
interface ReadOperationOptions {
startLine?: number | undefined;
endLine?: number | undefined;
format?: 'raw' | 'lines';
}
async function processSingleReadOperation(
_relativePath: string,
options: ReadOperationOptions = {},
): Promise<ReadResult> {
const { startLine, endLine, format } = options;
const pathOutput = _relativePath.replaceAll('\\', '/');
let targetPath = '';
try {
targetPath = resolvePath(_relativePath);
try {
const stats: Stats = await fs.stat(targetPath); // Explicitly type Stats
if (!stats.isFile()) {
return {
path: pathOutput,
error: `Path is not a regular file: ${_relativePath}`,
};
}
if (startLine !== undefined || endLine !== undefined) {
// Read file line by line when line range is specified
const fileContent = await fs.readFile(targetPath, 'utf8');
const lines = fileContent.split('\n');
const start = startLine ? Math.min(startLine - 1, lines.length) : 0;
const end = endLine ? Math.min(endLine, lines.length) : lines.length;
const filteredLines = lines.slice(start, end);
const content =
format === 'raw'
? filteredLines.join('\n')
: filteredLines.map((line, i) => ({
lineNumber: start + i + 1,
content: line,
}));
return { path: pathOutput, content };
} else {
// Read entire file when no line range specified
const content = await fs.readFile(targetPath, 'utf8');
return { path: pathOutput, content: content };
}
} catch (fsError: unknown) {
return handleFileReadFsError(fsError, {
pathOutput,
relativePath: _relativePath,
targetPath,
});
}
} catch (resolveError: unknown) {
return handlePathResolveError(resolveError, _relativePath, pathOutput);
}
}
/** Processes results from Promise.allSettled. */
function processSettledResults(
results: PromiseSettledResult<ReadResult>[],
originalPaths: string[],
): ReadResult[] {
return results.map((result, index) => {
const originalPath = originalPaths[index] ?? 'unknown_path';
const pathOutput = originalPath.replaceAll('\\', '/');
return result.status === 'fulfilled'
? result.value
: {
path: pathOutput,
error: `Unexpected error during processing: ${result.reason instanceof Error ? result.reason.message : String(result.reason)}`,
};
});
}
/** Main handler function */
const handleReadContentFunc = async (args: unknown): Promise<McpToolResponse> => {
const { paths: relativePaths, start_line, end_line, format } = parseAndValidateArgs(args);
const readPromises = relativePaths.map((path) =>
processSingleReadOperation(path, { startLine: start_line, endLine: end_line, format }),
);
const settledResults = await Promise.allSettled(readPromises);
const outputContents = processSettledResults(settledResults, relativePaths);
// Sort results by original path order for predictability
const originalIndexMap = new Map(relativePaths.map((p, i) => [p.replaceAll('\\', '/'), i]));
outputContents.sort((a, b) => {
const indexA = originalIndexMap.get(a.path) ?? Infinity;
const indexB = originalIndexMap.get(b.path) ?? Infinity;
return indexA - indexB;
});
return {
content: [{ type: 'text', text: JSON.stringify(outputContents, undefined, 2) }],
};
};
// Export the complete tool definition
export const readContentToolDefinition = {
name: 'read_content',
description: 'Read content from multiple specified files.',
inputSchema: ReadContentArgsSchema,
handler: handleReadContentFunc,
};
```
--------------------------------------------------------------------------------
/memory-bank/systemPatterns.md:
--------------------------------------------------------------------------------
```markdown
<!-- Version: 4.6 | Last Updated: 2025-07-04 | Updated By: Sylph -->
# System Patterns: Filesystem MCP Server
## 1. Architecture Overview
The Filesystem MCP server is a standalone Node.js application designed to run as
a child process, communicating with its parent (the AI agent host) via standard
input/output (stdio) using the Model Context Protocol (MCP).
```mermaid
graph LR
A[Agent Host Environment] -- MCP over Stdio --> B(Filesystem MCP Server);
B -- Node.js fs/path/glob --> C[User Filesystem (Project Root)];
C -- Results/Data --> B;
B -- MCP over Stdio --> A;
```
## 2. Key Technical Decisions & Patterns
- **MCP SDK Usage:** Leverages the `@modelcontextprotocol/sdk` for handling MCP
communication (request parsing, response formatting, error handling). This
standardizes interaction and reduces boilerplate code.
- **Stdio Transport:** Uses `StdioServerTransport` from the SDK for
communication, suitable for running as a managed child process.
- **Asynchronous Operations:** All filesystem interactions and request handling
are implemented using `async/await` and Node.js's promise-based `fs` module
(`fs.promises`) for non-blocking I/O.
- **Strict Path Resolution:** A dedicated `resolvePath` function is used for
_every_ path received from the agent.
- It normalizes the path.
- It resolves the path relative to the server process's current working
directory (`process.cwd()`), which is treated as the `PROJECT_ROOT`.
**Crucially, this requires the process launching the server (e.g., the agent
host) to set the correct `cwd` for the target project.**
- It explicitly checks if the resolved absolute path still starts with the
`PROJECT_ROOT` absolute path to prevent path traversal vulnerabilities
(e.g., `../../sensitive-file`).
- It rejects absolute paths provided by the agent.
- **Enhanced Error Reporting:** Throws `McpError` with detailed messages on
failure, including the original path, resolved path (if applicable), and
project root to aid debugging. Includes console logging for diagnostics.
- **Zod for Schemas & Validation:** Uses `zod` library to define input schemas
for tools and perform robust validation within each handler. JSON schemas for
MCP listing are generated from Zod schemas.
- **Tool Definition Aggregation:** Tool definitions (name, description, Zod
schema, handler function) are defined in their respective handler files and
aggregated in `src/handlers/index.ts` for registration in `src/index.ts`.
- **Description Updates:** Descriptions (e.g., for `write_content`, `apply_diff`) are updated based on user feedback and best practices.
- **`apply_diff` Logic:**
- Processes multiple diff blocks per file, applying them sequentially from bottom-to-top based on `start_line` to minimize line number conflicts.
- Verifies that the content at the specified `start_line`/`end_line` exactly matches the `search` block before applying the `replace` block.
- Ensures atomicity at the file level: if any block fails (e.g., content mismatch, invalid lines), the entire file's changes are discarded.
- Returns detailed success/failure status per file, including context on error.
- **Error Handling:**
- Uses `try...catch` blocks within each tool handler.
- Catches specific Node.js filesystem errors (like `ENOENT`, `EPERM`,
`EACCES`) and maps them to appropriate MCP error codes (`InvalidRequest`) or returns detailed error messages in the result object.
- **Enhanced `ENOENT` Reporting:** Specifically in `readContent.ts`, `ENOENT` errors now include the resolved path, relative path, and project root in the returned error message for better context.
- Uses custom `McpError` objects for standardized error reporting back to the
agent (including enhanced details from `resolvePath`).
- Logs unexpected errors to the server's console (`stderr`) for debugging.
- **Glob for Listing/Searching:** Uses the `glob` library for flexible and
powerful file listing and searching based on glob patterns, including
recursive operations and stat retrieval. Careful handling of `glob`'s
different output types based on options (`string[]`, `Path[]`, `Path[]` with
`stats`) is implemented.
- **TypeScript:** Provides static typing for better code maintainability, early
error detection, and improved developer experience. Uses ES module syntax
(`import`/`export`).
- **Dockerfile:** Uses a multi-stage build. The first stage (`deps`) installs _only_ production dependencies. The final stage copies `node_modules` and `package.json` from the `deps` stage, and copies the pre-built `build/` directory from the CI artifact context. This avoids rebuilding the project inside Docker and keeps the final image smaller.
- **CI/CD (GitHub Actions - Single Workflow):**
- A single workflow file (`.github/workflows/publish.yml`) handles both CI checks and releases.
- **Triggers:** Runs on pushes to the `main` branch and pushes of tags matching `v*.*.*`.
- **Conditional Logic:**
- The `build` job runs on both triggers but _only uploads artifacts_ (including `build/`, `package.json`, `package-lock.json`, `Dockerfile`, etc.) when triggered by a tag push.
- The `publish-npm`, `publish-docker`, and `create-release` jobs depend on the `build` job but run _only_ when triggered by a version tag push.
- **Structure & Artifact Handling:**
- `build`: Checks out, installs, builds. Archives and uploads artifacts _if_ it's a tag push. Outputs version and archive filename.
- `publish-npm`: Needs `build`. Downloads artifact, extracts using correct filename (`build-artifacts.tar.gz`), publishes to npm.
- `publish-docker`: Needs `build`. Downloads artifact, extracts using correct filename, includes diagnostic `ls -la` steps, sets up Docker, builds (using pre-built code from artifact), and pushes image.
- `create-release`: Needs `build`, `publish-npm`, `publish-docker`. Downloads artifact, extracts using correct filename, creates GitHub Release.
- This simplified structure avoids workflow interdependencies while still preventing duplicate publishing actions and unnecessary artifact uploads during CI checks on `main`. Includes diagnostic steps for debugging artifact issues.
## 3. Component Relationships
- **`index.ts`:** Main entry point. Sets up the MCP server instance, defines
tool schemas, registers request handlers, and starts the server connection.
- **`Server` (from SDK):** Core MCP server class handling protocol logic.
- **`StdioServerTransport` (from SDK):** Handles reading/writing MCP messages
via stdio.
- **Tool Handler Functions (`handleListFiles`, `handleEditFile`, etc.):**
Contain the specific logic for each tool, including Zod argument validation,
path resolution, filesystem interaction, and result formatting (including enhanced error details).
- **`resolvePath` Helper:** Centralized security function for path validation with enhanced error reporting.
- **`formatStats` Helper:** Utility to create a consistent stats object
structure.
- **Node.js Modules (`fs`, `path`):** Used for actual filesystem operations and
path manipulation.
- **`glob` Library:** Used for pattern-based file searching and listing.
- **`zod` Library:** Used for defining and validating tool input schemas.
- **`Dockerfile`:** Defines the multi-stage build process for the production Docker image.
- **`.github/workflows/publish.yml`:** Defines the combined CI check and release process using conditional logic within a single workflow.
```
--------------------------------------------------------------------------------
/src/handlers/create-directories.ts:
--------------------------------------------------------------------------------
```typescript
// src/handlers/createDirectories.ts
import { promises as fs, type Stats } from 'node:fs'; // Import Stats type
import { z } from 'zod';
import { McpError, ErrorCode } from '@modelcontextprotocol/sdk/types.js';
import { resolvePath, PROJECT_ROOT } from '../utils/path-utils.js';
// --- Types ---
interface McpToolResponse {
content: { type: 'text'; text: string }[];
}
export const CreateDirsArgsSchema = z
.object({
paths: z
.array(z.string())
.min(1, { message: 'Paths array cannot be empty' })
.describe('An array of relative directory paths to create.'),
})
.strict();
type CreateDirsArgs = z.infer<typeof CreateDirsArgsSchema>;
interface CreateDirResult {
path: string;
success: boolean;
note?: string;
error?: string; // Added error field back
resolvedPath?: string;
}
// --- Define Dependencies Interface ---
export interface CreateDirsDeps {
mkdir: typeof fs.mkdir;
stat: typeof fs.stat;
resolvePath: typeof resolvePath;
PROJECT_ROOT: string;
}
// --- Helper Functions ---
/** Parses and validates the input arguments. */
function parseAndValidateArgs(args: unknown): CreateDirsArgs {
try {
return CreateDirsArgsSchema.parse(args);
} catch (error) {
if (error instanceof z.ZodError) {
throw new McpError(
ErrorCode.InvalidParams,
`Invalid arguments: ${error.errors.map((e) => `${e.path.join('.')} (${e.message})`).join(', ')}`,
);
}
// Throw a more specific error for non-Zod issues during parsing
throw new McpError(
ErrorCode.InvalidParams,
`Argument validation failed: ${error instanceof Error ? error.message : String(error)}`,
);
}
}
/** Handles EEXIST errors by checking if the existing path is a directory. */
async function handleEexistError(
targetPath: string,
pathOutput: string,
deps: CreateDirsDeps, // Added deps
): Promise<CreateDirResult> {
try {
const stats: Stats = await deps.stat(targetPath); // Use deps.stat
return stats.isDirectory()
? {
path: pathOutput,
success: true,
note: 'Directory already exists',
resolvedPath: targetPath,
}
: {
path: pathOutput,
success: false,
error: 'Path exists but is not a directory',
resolvedPath: targetPath,
};
} catch (statError: unknown) {
// Error logged via McpError
return {
path: pathOutput,
success: false,
error: `Failed to stat existing path: ${statError instanceof Error ? statError.message : String(statError)}`,
resolvedPath: targetPath,
};
}
}
/** Handles general errors during directory creation. */
function handleDirectoryCreationError(
error: unknown,
pathOutput: string,
targetPath: string,
// No deps needed here as it only formats errors
): CreateDirResult {
// Handle McpError specifically (likely from resolvePath)
if (error instanceof McpError) {
return {
path: pathOutput,
success: false,
error: error.message, // Use the McpError message directly
resolvedPath: targetPath || 'Resolution failed', // targetPath might be empty if resolvePath failed early
};
}
// Handle filesystem errors (like EPERM, EACCES, etc.)
const errorMessage = error instanceof Error ? error.message : String(error);
let specificError = `Failed to create directory: ${errorMessage}`;
if (
error &&
typeof error === 'object' &&
'code' in error &&
(error.code === 'EPERM' || error.code === 'EACCES')
) {
specificError = `Permission denied creating directory: ${errorMessage}`;
}
// Note: EEXIST is handled separately by handleEexistError
// Error logged via McpError
return {
path: pathOutput,
success: false,
error: specificError,
resolvedPath: targetPath || 'Resolution failed',
};
}
/** Processes the creation of a single directory. */
async function processSingleDirectoryCreation(
relativePath: string, // Corrected signature: relativePath first
deps: CreateDirsDeps, // Corrected signature: deps second
): Promise<CreateDirResult> {
const pathOutput = relativePath.replaceAll('\\', '/'); // Normalize for output consistency
let targetPath = '';
try {
targetPath = deps.resolvePath(relativePath); // Use deps.resolvePath
if (targetPath === deps.PROJECT_ROOT) {
// Use deps.PROJECT_ROOT
return {
path: pathOutput,
success: false,
error: 'Creating the project root is not allowed.',
resolvedPath: targetPath,
};
}
await deps.mkdir(targetPath, { recursive: true }); // Use deps.mkdir
return { path: pathOutput, success: true, resolvedPath: targetPath };
} catch (error: unknown) {
if (error && typeof error === 'object' && 'code' in error && error.code === 'EEXIST') {
// Pass deps to handleEexistError
return await handleEexistError(targetPath, pathOutput, deps);
}
// Pass potential McpError from resolvePath or other errors
return handleDirectoryCreationError(error, pathOutput, targetPath);
}
}
/** Processes results from Promise.allSettled. */
export function processSettledResults( // Keep export for testing
results: PromiseSettledResult<CreateDirResult>[],
originalPaths: string[],
): CreateDirResult[] {
return results.map((result, index) => {
const originalPath = originalPaths[index] ?? 'unknown_path';
const pathOutput = originalPath.replaceAll('\\', '/');
return result.status === 'fulfilled'
? result.value
: {
path: pathOutput,
success: false,
error: `Unexpected error during processing: ${result.reason instanceof Error ? result.reason.message : String(result.reason)}`,
resolvedPath: 'Unknown on rejection',
};
});
}
/** Main handler function (internal, accepts dependencies) */
// Export for testing
export const handleCreateDirectoriesInternal = async (
args: unknown,
deps: CreateDirsDeps,
): Promise<McpToolResponse> => {
let pathsToCreate: string[];
try {
// Validate arguments first
const validatedArgs = parseAndValidateArgs(args);
pathsToCreate = validatedArgs.paths;
} catch (error) {
// If validation fails, re-throw the McpError from parseAndValidateArgs
if (error instanceof McpError) {
throw error;
}
// Wrap unexpected validation errors
throw new McpError(
ErrorCode.InvalidParams,
`Unexpected error during argument validation: ${error instanceof Error ? error.message : String(error)}`,
);
}
// Proceed with validated paths
const creationPromises = pathsToCreate.map((p) => processSingleDirectoryCreation(p, deps));
const settledResults = await Promise.allSettled(creationPromises);
const outputResults = processSettledResults(settledResults, pathsToCreate);
// Sort results by original path order for predictability
const originalIndexMap = new Map(pathsToCreate.map((p, i) => [p.replaceAll('\\', '/'), i]));
outputResults.sort((a, b) => {
const indexA = originalIndexMap.get(a.path) ?? Infinity;
const indexB = originalIndexMap.get(b.path) ?? Infinity;
return indexA - indexB;
});
return {
content: [{ type: 'text', text: JSON.stringify(outputResults, undefined, 2) }],
};
};
// Export the complete tool definition using the production handler
export const createDirectoriesToolDefinition = {
name: 'create_directories',
description: 'Create multiple specified directories (including intermediate ones).',
inputSchema: CreateDirsArgsSchema,
handler: (args: unknown): Promise<McpToolResponse> => {
// Production handler provides real dependencies
const productionDeps: CreateDirsDeps = {
mkdir: fs.mkdir,
stat: fs.stat,
resolvePath: resolvePath,
PROJECT_ROOT: PROJECT_ROOT,
};
return handleCreateDirectoriesInternal(args, productionDeps);
},
};
```
--------------------------------------------------------------------------------
/.github/workflows/publish.yml:
--------------------------------------------------------------------------------
```yaml
name: CI, Publish & Release
on:
push:
branches:
- main # Trigger on push to main branch
tags:
- 'v*.*.*' # Trigger on push of version tags (e.g., v0.5.5)
pull_request:
branches:
- main # Trigger on PR to main branch
jobs:
validate:
name: Validate Code Quality
runs-on: ubuntu-latest
permissions: # Added permissions
actions: read
contents: read
security-events: write # Required for CodeQL results
steps:
- name: Checkout repository
uses: actions/[email protected]
# Initializes the CodeQL tools for scanning. # Added CodeQL init
- name: Initialize CodeQL
uses: github/codeql-action/init@v3
with:
languages: typescript # Specify the language to analyze
# Optional: config-file: './.github/codeql/codeql-config.yml'
# Optional: queries: '+security-extended'
- name: Install pnpm
uses: pnpm/action-setup@v4
with:
version: latest # Use the latest pnpm version
- name: Set up Node.js
uses: actions/[email protected]
with:
node-version: 'lts/*' # Use latest LTS
cache: 'pnpm' # Let pnpm handle caching via pnpm/action-setup
- name: Install dependencies # Correct install step
run: pnpm install --frozen-lockfile
- name: Check for vulnerabilities # Added pnpm audit
run: pnpm audit --prod # Check only production dependencies
- name: Check Formatting
run: pnpm run check-format # Fails job if check fails
- name: Lint Code
run: pnpm run lint # Fails job if lint fails
- name: Run Tests and Check Coverage
run: pnpm run test:cov # Fails job if tests fail or coverage threshold not met
- name: Upload coverage to Codecov
uses: codecov/[email protected] # Use Codecov action with fixed version
with:
token: ${{ secrets.CODECOV_TOKEN }} # Use Codecov token
files: ./coverage/lcov.info # Specify LCOV file path
fail_ci_if_error: true # Optional: fail CI if upload error
- name: Upload test results to Codecov
if: ${{ !cancelled() }}
uses: codecov/test-results-action@v1
with:
token: ${{ secrets.CODECOV_TOKEN }}
# No file specified, action defaults to common patterns like test-report.junit.xml
- name: Perform CodeQL Analysis # Added CodeQL analyze
uses: github/codeql-action/analyze@v3
- name: Upload coverage reports # Kept artifact upload
uses: actions/[email protected]
with:
name: coverage-report
path: coverage/ # Upload the whole coverage directory
build-archive:
name: Build and Archive Artifacts
needs: validate # Depends on successful validation
runs-on: ubuntu-latest
if: startsWith(github.ref, 'refs/tags/v') # Only run for tags
outputs: # Define outputs for the release job
version: ${{ steps.get_version.outputs.version }}
artifact_path: ${{ steps.archive_build.outputs.artifact_path }}
# Removed incorrect permissions block from here
steps:
- name: Checkout repository
uses: actions/[email protected]
# Removed incorrect CodeQL init from here
- name: Install pnpm
uses: pnpm/action-setup@v4
with:
version: latest
- name: Set up Node.js
uses: actions/[email protected]
with:
node-version: 'lts/*' # Use latest LTS
registry-url: 'https://registry.npmjs.org/' # For pnpm publish
cache: 'pnpm' # Let pnpm handle caching
- name: Install dependencies
run: pnpm install --frozen-lockfile
- name: Build project
run: pnpm run build
- name: Get package version from tag
id: get_version
run: |
VERSION=$(echo "${{ github.ref }}" | sed 's#refs/tags/##')
echo "version=$VERSION" >> $GITHUB_OUTPUT
- name: Archive build artifacts for release
id: archive_build
run: |
ARTIFACT_NAME="pdf-reader-mcp-${{ steps.get_version.outputs.version }}.tar.gz"
tar -czf $ARTIFACT_NAME dist package.json README.md LICENSE CHANGELOG.md
echo "artifact_path=$ARTIFACT_NAME" >> $GITHUB_OUTPUT
- name: Upload build artifact for release job
uses: actions/[email protected]
with:
name: release-artifact
path: ${{ steps.archive_build.outputs.artifact_path }}
# Publish steps moved to parallel jobs below
publish-npm:
name: Publish to NPM
needs: build-archive # Depends on build-archive completion
runs-on: ubuntu-latest
if: startsWith(github.ref, 'refs/tags/v') # Only run for tags
steps:
- name: Checkout repository
uses: actions/[email protected]
- name: Install pnpm
uses: pnpm/action-setup@v4
with:
version: latest
- name: Set up Node.js for NPM
uses: actions/[email protected]
with:
node-version: 'lts/*'
registry-url: 'https://registry.npmjs.org/'
cache: 'pnpm'
# No need to install dependencies again if publish doesn't need them
# If pnpm publish needs package.json, it's checked out
- name: Install all dependencies for prepublishOnly script
run: pnpm install --frozen-lockfile
- name: Publish to npm
run: pnpm changeset publish
env:
NODE_AUTH_TOKEN: ${{ secrets.NPM_TOKEN }}
publish-docker:
name: Publish to Docker Hub
needs: build-archive # Depends on build-archive completion
runs-on: ubuntu-latest
if: startsWith(github.ref, 'refs/tags/v') # Only run for tags
steps:
- name: Checkout repository
uses: actions/[email protected]
- name: Set up QEMU
uses: docker/[email protected]
- name: Set up Docker Buildx
uses: docker/[email protected]
- name: Log in to Docker Hub
uses: docker/[email protected]
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Extract metadata (tags, labels) for Docker
id: meta
uses: docker/[email protected]
with:
images: sylphlab/pdf-reader-mcp
# Use version from the build-archive job output
tags: |
type=semver,pattern={{version}},value=${{ needs.build-archive.outputs.version }}
type=semver,pattern={{major}}.{{minor}},value=${{ needs.build-archive.outputs.version }}
type=raw,value=latest,enable=${{ startsWith(github.ref, 'refs/tags/v') }}
- name: Build and push Docker image
uses: docker/[email protected]
with:
context: .
push: true
tags: ${{ steps.meta.outputs.tags }}
labels: ${{ steps.meta.outputs.labels }}
cache-from: type=gha
cache-to: type=gha,mode=max
release:
name: Create GitHub Release
needs: [publish-npm, publish-docker] # Depends on successful parallel publishes
runs-on: ubuntu-latest
if: startsWith(github.ref, 'refs/tags/v') # Only run for tags
permissions:
contents: write # Need permission to create releases and release notes
steps:
- name: Download build artifact
uses: actions/[email protected]
with:
name: release-artifact
# No path specified, downloads to current directory
- name: Create GitHub Release
uses: softprops/[email protected]
with:
tag_name: ${{ github.ref_name }}
name: Release ${{ github.ref_name }}
generate_release_notes: true # Auto-generate release notes from commits
files: ${{ needs.build-archive.outputs.artifact_path }} # Attach the artifact archive from build-archive job
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
```
--------------------------------------------------------------------------------
/__tests__/utils/path-utils.test.ts:
--------------------------------------------------------------------------------
```typescript
import { describe, it, expect } from 'vitest'; // Removed vi, beforeEach
import path from 'node:path';
import { ErrorCode } from '@modelcontextprotocol/sdk/types.js';
// Import the functions and constant to test
import {
resolvePath,
PROJECT_ROOT, // Import the constant again
} from '../../src/utils/path-utils.ts';
// Define the mock root path for testing overrides
const MOCK_PROJECT_ROOT_OVERRIDE = path.resolve('/mock/project/root/override');
const ACTUAL_PROJECT_ROOT = process.cwd(); // Get the actual root for comparison
describe('pathUtils', () => {
it('should have PROJECT_ROOT set to the actual process.cwd()', () => {
// We can no longer easily mock this at the module level with current setup
// So we test that it equals the actual cwd
expect(PROJECT_ROOT).toBe(ACTUAL_PROJECT_ROOT);
});
describe('resolvePath', () => {
// Test using the override parameter to simulate different roots
it('should resolve a valid relative path using override root', () => {
const userPath = 'src/file.ts';
const expectedPath = path.resolve(MOCK_PROJECT_ROOT_OVERRIDE, userPath);
// Pass the override root as the second argument
expect(resolvePath(userPath, MOCK_PROJECT_ROOT_OVERRIDE)).toBe(expectedPath);
});
it('should resolve a valid relative path using default PROJECT_ROOT when override is not provided', () => {
const userPath = 'src/file.ts';
const expectedPath = path.resolve(ACTUAL_PROJECT_ROOT, userPath);
expect(resolvePath(userPath)).toBe(expectedPath); // No override
});
it('should resolve a relative path with "." correctly', () => {
const userPath = './src/./file.ts';
const expectedPath = path.resolve(MOCK_PROJECT_ROOT_OVERRIDE, 'src/file.ts');
expect(resolvePath(userPath, MOCK_PROJECT_ROOT_OVERRIDE)).toBe(expectedPath);
});
it('should resolve a relative path with "." correctly using default root', () => {
const userPath = './src/./file.ts';
const expectedPath = path.resolve(ACTUAL_PROJECT_ROOT, 'src/file.ts');
expect(resolvePath(userPath)).toBe(expectedPath);
});
it('should resolve a relative path with ".." correctly if it stays within root', () => {
const userPath = 'src/../dist/bundle.js';
const expectedPath = path.resolve(MOCK_PROJECT_ROOT_OVERRIDE, 'dist/bundle.js');
expect(resolvePath(userPath, MOCK_PROJECT_ROOT_OVERRIDE)).toBe(expectedPath);
});
it('should resolve a relative path with ".." correctly using default root', () => {
const userPath = 'src/../dist/bundle.js';
const expectedPath = path.resolve(ACTUAL_PROJECT_ROOT, 'dist/bundle.js');
expect(resolvePath(userPath)).toBe(expectedPath);
});
it('should throw McpError for absolute paths (posix)', () => {
const userPath = '/etc/passwd';
// Test with override, should still fail
expect(() => resolvePath(userPath, MOCK_PROJECT_ROOT_OVERRIDE)).toThrow(
expect.objectContaining({
name: 'McpError',
code: ErrorCode.InvalidParams,
message: 'MCP error -32602: Absolute paths are not allowed: /etc/passwd',
data: undefined,
}),
);
// Test without override
expect(() => resolvePath(userPath)).toThrow(
expect.objectContaining({
name: 'McpError',
code: ErrorCode.InvalidParams,
message: 'MCP error -32602: Absolute paths are not allowed: /etc/passwd',
data: undefined,
}),
);
});
it('should throw McpError for absolute paths (windows)', () => {
const userPath = String.raw`C:\Windows\System32`;
const normalizedPath = path.normalize(userPath);
// Test with override
expect(() => resolvePath(normalizedPath, MOCK_PROJECT_ROOT_OVERRIDE)).toThrow(
expect.objectContaining({
name: 'McpError',
code: expect.any(Number),
message: expect.stringContaining('Absolute paths are not allowed'),
}),
);
expect(() => resolvePath(normalizedPath, MOCK_PROJECT_ROOT_OVERRIDE)).toThrow(
/Absolute paths are not allowed/,
);
expect(() => resolvePath(normalizedPath, MOCK_PROJECT_ROOT_OVERRIDE)).toThrow(
expect.objectContaining({ code: ErrorCode.InvalidParams }),
);
// Test without override
expect(() => resolvePath(normalizedPath)).toThrow(
expect.objectContaining({
name: 'McpError',
code: ErrorCode.InvalidParams,
message: expect.stringContaining('Absolute paths are not allowed'),
}),
);
});
it('should throw McpError for path traversal attempts (using ..)', () => {
const userPath = '../outside/file';
// Test with override
expect(() => resolvePath(userPath, MOCK_PROJECT_ROOT_OVERRIDE)).toThrow(
expect.objectContaining({
name: 'McpError',
code: ErrorCode.InvalidRequest,
message: 'MCP error -32600: Path traversal detected: ../outside/file',
data: undefined,
}),
);
// Test without override
expect(() => resolvePath(userPath)).toThrow(
expect.objectContaining({
name: 'McpError',
code: ErrorCode.InvalidRequest,
message: 'MCP error -32600: Path traversal detected: ../outside/file',
data: undefined,
}),
);
});
it('should throw McpError for path traversal attempts (using .. multiple times)', () => {
const userPath = '../../../../outside/file';
// Test with override
expect(() => resolvePath(userPath, MOCK_PROJECT_ROOT_OVERRIDE)).toThrow(
expect.objectContaining({
name: 'McpError',
code: ErrorCode.InvalidRequest,
message: 'MCP error -32600: Path traversal detected: ../../../../outside/file',
data: undefined,
}),
);
// Test without override
expect(() => resolvePath(userPath)).toThrow(
expect.objectContaining({
name: 'McpError',
code: ErrorCode.InvalidRequest,
message: 'MCP error -32600: Path traversal detected: ../../../../outside/file',
data: undefined,
}),
);
});
it('should throw McpError if the input path is not a string', () => {
const userPath: any = 123; // intentionally testing invalid input
// Test with override (should still fail type check before override matters)
expect(() => resolvePath(userPath, MOCK_PROJECT_ROOT_OVERRIDE)).toThrow(
expect.objectContaining({
name: 'McpError',
code: expect.any(Number),
message: expect.stringContaining('Path must be a string'),
}),
);
expect(() => resolvePath(userPath, MOCK_PROJECT_ROOT_OVERRIDE)).toThrow(
/Path must be a string/,
);
expect(() => resolvePath(userPath, MOCK_PROJECT_ROOT_OVERRIDE)).toThrow(
expect.objectContaining({ code: ErrorCode.InvalidParams }),
);
// Test without override
expect(() => resolvePath(userPath)).toThrow(
expect.objectContaining({
name: 'McpError',
code: ErrorCode.InvalidParams,
message: expect.stringContaining('Path must be a string'),
}),
);
});
it('should handle paths with trailing slashes', () => {
const userPath = 'src/subdir/';
const expectedPathOverride = path.resolve(MOCK_PROJECT_ROOT_OVERRIDE, 'src/subdir');
expect(resolvePath(userPath, MOCK_PROJECT_ROOT_OVERRIDE)).toBe(expectedPathOverride);
});
it('should handle paths with trailing slashes using default root', () => {
const userPath = 'src/subdir/';
const expectedPath = path.resolve(ACTUAL_PROJECT_ROOT, 'src/subdir');
expect(resolvePath(userPath)).toBe(expectedPath);
});
it('should handle empty string path', () => {
const userPath = '';
// Test with override
expect(resolvePath(userPath, MOCK_PROJECT_ROOT_OVERRIDE)).toBe(MOCK_PROJECT_ROOT_OVERRIDE);
});
it('should handle empty string path using default root', () => {
const userPath = '';
const expectedPath = ACTUAL_PROJECT_ROOT; // Resolves to the root itself
expect(resolvePath(userPath)).toBe(expectedPath);
});
it('should handle "." path', () => {
const userPath = '.';
// Test with override
expect(resolvePath(userPath, MOCK_PROJECT_ROOT_OVERRIDE)).toBe(MOCK_PROJECT_ROOT_OVERRIDE);
});
it('should handle "." path using default root', () => {
const userPath = '.';
const expectedPath = ACTUAL_PROJECT_ROOT; // Resolves to the root itself
expect(resolvePath(userPath)).toBe(expectedPath);
});
});
});
```
--------------------------------------------------------------------------------
/__tests__/handlers/replace-content.success.test.ts:
--------------------------------------------------------------------------------
```typescript
import { describe, it, expect, beforeEach, afterEach, vi, type Mock } from 'vitest';
import * as fsPromises from 'node:fs/promises';
import path from 'node:path';
import { McpError, ErrorCode } from '@modelcontextprotocol/sdk/types.js';
import { createTemporaryFilesystem, cleanupTemporaryFilesystem } from '../test-utils.js';
// Mock pathUtils BEFORE importing the handler
const mockResolvePath = vi.fn((path: string) => path);
vi.mock('../../src/utils/path-utils.js', () => ({
PROJECT_ROOT: 'mocked/project/root', // Keep simple for now
resolvePath: mockResolvePath,
}));
// Import the internal function, deps type, and exported helper
const { handleReplaceContentInternal } = await import('../../src/handlers/replace-content.js');
import type { ReplaceContentDeps } from '../../src/handlers/replace-content.js'; // Import type separately
// Define the initial structure
const initialTestStructure = {
'fileA.txt': 'Hello world, world!',
'fileB.log': 'Error: world not found.\nWarning: world might be deprecated.',
'noReplace.txt': 'Nothing to see here.',
dir1: {
'fileC.txt': 'Another world inside dir1.',
},
};
let tempRootDir: string;
describe('handleReplaceContent Success Scenarios', () => {
let mockDependencies: ReplaceContentDeps;
let mockReadFile: Mock;
let mockWriteFile: Mock;
let mockStat: Mock;
beforeEach(async () => {
tempRootDir = await createTemporaryFilesystem(initialTestStructure);
// Mock implementations for dependencies
const actualFsPromises = await vi.importActual<typeof fsPromises>('fs/promises');
mockReadFile = vi.fn().mockImplementation(actualFsPromises.readFile);
mockWriteFile = vi.fn().mockImplementation(actualFsPromises.writeFile);
mockStat = vi.fn().mockImplementation(actualFsPromises.stat);
// Configure the mock resolvePath
mockResolvePath.mockImplementation((relativePath: string): string => {
if (path.isAbsolute(relativePath)) {
throw new McpError(
ErrorCode.InvalidParams,
`Mocked Absolute paths are not allowed for ${relativePath}`,
);
}
const absolutePath = path.resolve(tempRootDir, relativePath);
if (!absolutePath.startsWith(tempRootDir)) {
throw new McpError(
ErrorCode.InvalidRequest,
`Mocked Path traversal detected for ${relativePath}`,
);
}
return absolutePath;
});
// Assign mock dependencies
mockDependencies = {
readFile: mockReadFile,
writeFile: mockWriteFile,
stat: mockStat,
resolvePath: mockResolvePath, // Use the vi.fn mock directly
};
});
afterEach(async () => {
await cleanupTemporaryFilesystem(tempRootDir);
vi.restoreAllMocks(); // Use restoreAllMocks to reset spies/mocks
});
it('should replace simple text in specified files', async () => {
const request = {
paths: ['fileA.txt', 'fileB.log'],
operations: [{ search: 'world', replace: 'planet' }],
};
const rawResult = await handleReplaceContentInternal(request, mockDependencies);
// Updated to access data directly
const resultsArray = rawResult.data?.results;
expect(rawResult.success).toBe(true);
expect(resultsArray).toBeDefined();
expect(resultsArray).toHaveLength(2);
expect(resultsArray?.[0]).toEqual({
file: 'fileA.txt',
modified: true,
replacements: 2,
});
expect(resultsArray?.[1]).toEqual({
file: 'fileB.log',
modified: true,
replacements: 2,
});
const contentA = await fsPromises.readFile(path.join(tempRootDir, 'fileA.txt'), 'utf8');
expect(contentA).toBe('Hello planet, planet!');
const contentB = await fsPromises.readFile(path.join(tempRootDir, 'fileB.log'), 'utf8');
expect(contentB).toBe('Error: planet not found.\nWarning: planet might be deprecated.');
});
it('should handle multiple operations sequentially', async () => {
const request = {
paths: ['fileA.txt'],
operations: [
{ search: 'world', replace: 'galaxy' },
{ search: 'galaxy', replace: 'universe' },
],
};
const rawResult = await handleReplaceContentInternal(request, mockDependencies);
const resultsArray = rawResult.data?.results;
expect(rawResult.success).toBe(true);
expect(resultsArray).toBeDefined();
expect(resultsArray).toHaveLength(1);
// Replacements are counted per operation on the state *before* that operation
expect(resultsArray?.[0]).toEqual({
file: 'fileA.txt',
modified: true,
replacements: 4,
}); // 2 from op1 + 2 from op2
const contentA = await fsPromises.readFile(path.join(tempRootDir, 'fileA.txt'), 'utf8');
expect(contentA).toBe('Hello universe, universe!');
});
it('should use regex for replacement', async () => {
const request = {
paths: ['fileB.log'],
operations: [{ search: '^(Error|Warning):', replace: 'Log[$1]:', use_regex: true }],
};
const rawResult = await handleReplaceContentInternal(request, mockDependencies);
const resultsArray = rawResult.data?.results;
expect(rawResult.success).toBe(true);
expect(resultsArray).toBeDefined();
expect(resultsArray).toHaveLength(1);
expect(resultsArray?.[0]).toEqual({
file: 'fileB.log',
modified: true,
replacements: 2,
});
const contentB = await fsPromises.readFile(path.join(tempRootDir, 'fileB.log'), 'utf8');
expect(contentB).toBe('Log[Error]: world not found.\nLog[Warning]: world might be deprecated.');
});
it('should handle case-insensitive replacement', async () => {
const request = {
paths: ['fileA.txt'],
operations: [{ search: 'hello', replace: 'Greetings', ignore_case: true }],
};
const rawResult = await handleReplaceContentInternal(request, mockDependencies);
const resultsArray = rawResult.data?.results;
expect(rawResult.success).toBe(true);
expect(resultsArray).toBeDefined();
expect(resultsArray).toHaveLength(1);
expect(resultsArray?.[0]).toEqual({
file: 'fileA.txt',
modified: true,
replacements: 1,
});
const contentA = await fsPromises.readFile(path.join(tempRootDir, 'fileA.txt'), 'utf8');
expect(contentA).toBe('Greetings world, world!');
});
it('should report 0 replacements if search term not found', async () => {
const request = {
paths: ['noReplace.txt'],
operations: [{ search: 'world', replace: 'planet' }],
};
const rawResult = await handleReplaceContentInternal(request, mockDependencies);
const resultsArray = rawResult.data?.results;
expect(rawResult.success).toBe(true);
expect(resultsArray).toBeDefined();
expect(resultsArray).toHaveLength(1);
expect(resultsArray?.[0]).toEqual({
file: 'noReplace.txt',
modified: false,
replacements: 0,
});
const content = await fsPromises.readFile(path.join(tempRootDir, 'noReplace.txt'), 'utf8');
expect(content).toBe('Nothing to see here.');
});
it('should handle replacing content in an empty file', async () => {
const emptyFileName = 'emptyFile.txt';
await fsPromises.writeFile(path.join(tempRootDir, emptyFileName), '');
const request = {
paths: [emptyFileName],
operations: [{ search: 'anything', replace: 'something' }],
};
const rawResult = await handleReplaceContentInternal(request, mockDependencies);
const resultsArray = rawResult.data?.results;
expect(rawResult.success).toBe(true);
expect(resultsArray).toBeDefined();
expect(resultsArray).toHaveLength(1);
expect(resultsArray?.[0]).toEqual({
file: emptyFileName,
modified: false,
replacements: 0,
});
const content = await fsPromises.readFile(path.join(tempRootDir, emptyFileName), 'utf8');
expect(content).toBe('');
});
it('should handle replacing content with an empty string (deletion)', async () => {
const request = {
paths: ['fileA.txt'],
operations: [{ search: 'world', replace: '' }],
};
const rawResult = await handleReplaceContentInternal(request, mockDependencies);
const resultsArray = rawResult.data?.results;
expect(rawResult.success).toBe(true);
expect(resultsArray).toBeDefined();
expect(resultsArray).toHaveLength(1);
expect(resultsArray?.[0]).toEqual({
file: 'fileA.txt',
modified: true,
replacements: 2,
});
const contentA = await fsPromises.readFile(path.join(tempRootDir, 'fileA.txt'), 'utf8');
expect(contentA).toBe('Hello , !');
});
it('should handle regex with line anchors (^ or $)', async () => {
const request = {
paths: ['fileB.log'],
operations: [
{ search: '^Error.*', replace: 'FIRST_LINE_ERROR', use_regex: true }, // Matches first line
// The second regex needs 'm' flag to match end of line, not just end of string
{
search: 'deprecated.$', // Corrected regex to only match the word at the end
replace: 'LAST_LINE_DEPRECATED',
use_regex: true,
},
],
};
const rawResult = await handleReplaceContentInternal(request, mockDependencies);
const resultsArray = rawResult.data?.results;
expect(rawResult.success).toBe(true);
expect(resultsArray).toBeDefined();
// First op replaces 1, second replaces 1 (due to multiline flag being added)
expect(resultsArray?.[0].replacements).toBe(2);
const contentB = await fsPromises.readFile(path.join(tempRootDir, 'fileB.log'), 'utf8');
// Corrected expectation based on corrected regex
expect(contentB).toBe('FIRST_LINE_ERROR\nWarning: world might be LAST_LINE_DEPRECATED');
});
});
```
--------------------------------------------------------------------------------
/src/handlers/search-files.ts:
--------------------------------------------------------------------------------
```typescript
// src/handlers/searchFiles.ts
import { promises as fsPromises } from 'node:fs';
import path from 'node:path';
import { z } from 'zod';
import { glob as globFn } from 'glob';
// Import SDK types from the correct path
import { McpError, ErrorCode } from '@modelcontextprotocol/sdk/types.js';
// Import the LOCAL McpResponse type (assuming it's exported from handlers/index)
import type { McpToolResponse } from '../types/mcp-types.js';
export type LocalMcpResponse = McpToolResponse;
import {
resolvePath as resolvePathUtil,
PROJECT_ROOT as projectRootUtil,
} from '../utils/path-utils.js';
// --- Types ---
// Define a unified result type that can hold either a match or an error
interface SearchResultItem {
type: 'match' | 'error';
file: string;
line?: number;
match?: string;
context?: string[];
error?: string; // Error message
value?: null | undefined; // Explicit null/undefined for compatibility
}
// Define the structure for the final response data
export const SearchFilesArgsSchema = z
.object({
path: z
.string()
.optional()
.default('.')
.describe('Relative path of the directory to search in.'),
regex: z
.string()
.min(1, { message: 'Regex pattern cannot be empty' })
.describe('The regex pattern to search for.'),
file_pattern: z
.string()
.optional()
.default('*')
.describe("Glob pattern to filter files (e.g., '*.ts'). Defaults to all files ('*')."),
})
.strict();
type SearchFilesArgs = z.infer<typeof SearchFilesArgsSchema>;
// Type for file reading function
type ReadFileFn = {
(
path: Parameters<typeof fsPromises.readFile>[0],
options?: Parameters<typeof fsPromises.readFile>[1],
): Promise<string>;
};
export interface SearchFilesDependencies {
readFile: ReadFileFn;
glob: typeof globFn;
resolvePath: typeof resolvePathUtil;
PROJECT_ROOT: string;
pathRelative: typeof path.relative;
pathJoin: typeof path.join;
}
interface SearchFileParams {
deps: SearchFilesDependencies;
absoluteFilePath: string;
searchRegex: RegExp;
}
const CONTEXT_LINES = 2; // Number of lines before and after the match
// --- Helper Functions ---
function parseAndValidateArgs(args: unknown): SearchFilesArgs {
try {
return SearchFilesArgsSchema.parse(args);
} catch (error) {
if (error instanceof z.ZodError) {
throw new McpError(
ErrorCode.InvalidParams,
`Invalid arguments: ${error.errors.map((e) => `${e.path.join('.')} (${e.message})`).join(', ')}`,
);
}
throw new McpError(
ErrorCode.InvalidParams,
`Argument validation failed: ${error instanceof Error ? error.message : String(error)}`,
);
}
}
function compileSearchRegex(regexString: string): RegExp {
try {
let pattern = regexString;
let flags = '';
const regexFormat = /^\/(.+)\/([gimsuy]*)$/s;
const regexParts = regexFormat.exec(regexString);
if (regexParts?.[1] !== undefined) {
pattern = regexParts[1];
flags = regexParts[2] ?? '';
}
if (!flags.includes('g')) {
flags += 'g';
}
return new RegExp(pattern, flags);
} catch (error: unknown) {
const errorMessage =
error instanceof Error ? `Invalid regex pattern: ${error.message}` : 'Invalid regex pattern';
throw new McpError(ErrorCode.InvalidParams, errorMessage);
}
}
async function findFilesToSearch(
deps: SearchFilesDependencies,
relativePath: string,
filePattern: string,
): Promise<string[]> {
const targetPath = deps.resolvePath(relativePath);
const ignorePattern = deps.pathJoin(targetPath, '**/node_modules/**').replaceAll('\\', '/');
try {
const files = await deps.glob(filePattern, {
cwd: targetPath,
nodir: true,
dot: true,
ignore: [ignorePattern],
absolute: true,
});
return files;
} catch (error: unknown) {
const errorMessage = error instanceof Error ? error.message : 'Unknown glob error';
// Error logged via McpError
// Throw a more specific error about glob failing
throw new McpError(
ErrorCode.InternalError,
`Failed to find files using glob in '${relativePath}': ${errorMessage}`,
);
}
}
function processFileMatch(
fileContent: string,
matchResult: RegExpExecArray,
fileRelative: string,
): SearchResultItem {
const lines = fileContent.split('\n');
const match = matchResult[0];
const matchStartIndex = matchResult.index;
const contentUpToMatch = fileContent.slice(0, Math.max(0, matchStartIndex));
const lineNumber = (contentUpToMatch.match(/\n/g) ?? []).length + 1;
const startContextLineIndex = Math.max(0, lineNumber - 1 - CONTEXT_LINES);
const endContextLineIndex = Math.min(lines.length, lineNumber + CONTEXT_LINES);
const context = lines.slice(startContextLineIndex, endContextLineIndex);
return {
type: 'match',
file: fileRelative,
line: lineNumber,
match: match,
context: context,
};
}
// Refactored to reduce complexity and return an error object
function handleFileReadError(readError: unknown, fileRelative: string): SearchResultItem | null {
// Check if it's a Node.js error object
const isNodeError = readError && typeof readError === 'object' && 'code' in readError;
// Ignore ENOENT errors silently
if (isNodeError && (readError as NodeJS.ErrnoException).code === 'ENOENT') {
return { type: 'error', file: '', value: undefined };
}
const errorMessage = readError instanceof Error ? readError.message : String(readError);
// Log appropriately
if (isNodeError) {
// Error logged via McpError
} else {
// Error logged via McpError
}
// Return the error item
return {
type: 'error',
file: fileRelative,
error: `Read/Process Error: ${String(errorMessage)}`, // Explicit String conversion
};
}
// Modified to return SearchResultItem[] which includes potential errors
async function searchFileContent(params: SearchFileParams): Promise<SearchResultItem[]> {
const { deps, absoluteFilePath, searchRegex } = params;
const fileRelative = deps.pathRelative(deps.PROJECT_ROOT, absoluteFilePath).replaceAll('\\', '/');
const fileResults: SearchResultItem[] = [];
try {
const fileContent = await deps.readFile(absoluteFilePath, 'utf8');
searchRegex.lastIndex = 0;
const matches = fileContent.matchAll(searchRegex);
for (const matchResult of matches) {
fileResults.push(processFileMatch(fileContent, matchResult, fileRelative));
}
} catch (readError: unknown) {
const errorResult = handleFileReadError(readError, fileRelative);
if (errorResult) {
fileResults.push(errorResult); // Add error to results
}
}
return fileResults;
}
/** Main handler function */
// Use the imported local McpResponse type
export const handleSearchFilesFunc = async (
deps: SearchFilesDependencies,
args: unknown,
): Promise<LocalMcpResponse> => {
// Updated response type
const {
path: relativePath,
regex: regexString,
file_pattern: filePattern,
} = parseAndValidateArgs(args);
const searchRegex = compileSearchRegex(regexString);
const allResults: SearchResultItem[] = [];
try {
const filesToSearch = await findFilesToSearch(deps, relativePath, filePattern);
const searchPromises = filesToSearch.map((absoluteFilePath) =>
searchFileContent({ deps, absoluteFilePath, searchRegex }),
);
const resultsPerFile = await Promise.all(searchPromises);
// Flatten results (which now include potential errors)
for (const fileResults of resultsPerFile) allResults.push(...fileResults);
} catch (error: unknown) {
// Errors from findFilesToSearch or Promise.all rejections (should be less likely now)
if (error instanceof McpError) throw error;
const errorMessage =
error instanceof Error ? error.message : 'An unknown error occurred during file search.';
// Error logged via McpError
// Include a general error if the whole process fails unexpectedly
allResults.push({ type: 'error', file: 'general', error: errorMessage });
// Don't throw, return the collected results including the general error
// throw new McpError(ErrorCode.InternalError, errorMessage);
}
// Return the structured data including matches and errors
return {
content: [
{
type: 'text',
text: JSON.stringify({ results: allResults }, undefined, 2),
},
],
data: {
results: allResults,
},
};
};
// --- Tool Definition ---
export const searchFilesToolDefinition = {
name: 'search_files',
description:
'Search for a regex pattern within files in a specified directory (read-only). Returns matches and any errors encountered.',
inputSchema: SearchFilesArgsSchema,
// Define output schema
outputSchema: z.object({
results: z.array(
z.object({
type: z.enum(['match', 'error']),
file: z.string(),
line: z.number().int().optional(),
match: z.string().optional(),
context: z.array(z.string()).optional(),
error: z.string().optional(),
}),
),
}),
// Use the imported local McpResponse type
handler: (args: unknown): Promise<LocalMcpResponse> => {
const deps: SearchFilesDependencies = {
readFile: async (_path, _options) => {
const encoding = typeof _options === 'string' ? _options : (_options?.encoding ?? 'utf8');
return fsPromises.readFile(_path, { encoding });
},
glob: globFn,
resolvePath: resolvePathUtil,
PROJECT_ROOT: projectRootUtil,
pathRelative: path.relative.bind(path),
pathJoin: path.join.bind(path),
};
return handleSearchFilesFunc(deps, args);
},
};
```
--------------------------------------------------------------------------------
/__tests__/handlers/read-content.test.ts:
--------------------------------------------------------------------------------
```typescript
import { describe, it, expect, beforeEach, afterEach, vi } from 'vitest';
// import * as fsPromises from 'fs/promises'; // Removed unused import
// import * as actualFsPromises from 'fs/promises'; // Removed unused import
interface FileReadResult {
path: string;
content?: string;
error?: string;
}
import path from 'node:path';
import { McpError, ErrorCode } from '@modelcontextprotocol/sdk/types.js';
import { createTemporaryFilesystem, cleanupTemporaryFilesystem } from '../test-utils.js';
// Mock pathUtils BEFORE importing the handler
// Mock pathUtils using vi.mock (hoisted)
const mockResolvePath = vi.fn((userPath: string) => {
// Default mock implementation that matches the real behavior
if (path.isAbsolute(userPath)) {
throw new McpError(ErrorCode.InvalidParams, `Absolute paths are not allowed for ${userPath}`);
}
return path.resolve('mocked/project/root', userPath);
});
vi.mock('../../src/utils/path-utils.js', () => ({
PROJECT_ROOT: 'mocked/project/root', // Keep simple for now
resolvePath: mockResolvePath,
}));
// Import the handler AFTER the mock - fixed to use kebab-case
const { readContentToolDefinition } = await import('../../src/handlers/read-content.js');
// Define the structure for the temporary filesystem
const testStructure = {
'file1.txt': 'Hello World!',
dir1: {
'file2.js': 'console.log("test");',
'another.txt': 'More content here.',
},
'emptyFile.txt': '',
'binaryFile.bin': Buffer.from([0x01, 0x02, 0x03, 0x04]), // Example binary data
};
let tempRootDir: string;
describe('handleReadContent Integration Tests', () => {
beforeEach(async () => {
tempRootDir = await createTemporaryFilesystem(testStructure);
// Configure the mock resolvePath
mockResolvePath.mockImplementation((relativePath: string): string => {
// Simulate absolute path rejection first, as the original does
if (path.isAbsolute(relativePath)) {
throw new McpError(
ErrorCode.InvalidParams,
`Mocked Absolute paths are not allowed for ${relativePath}`,
);
}
// Resolve the path relative to the temp directory
const absolutePath = path.resolve(tempRootDir, relativePath);
// Simulate path traversal check
if (!absolutePath.startsWith(tempRootDir)) {
throw new McpError(
ErrorCode.InvalidRequest,
`Mocked Path traversal detected for ${relativePath}`,
);
}
// Return the resolved path. The actual fs.readFile in the handler will handle ENOENT.
return absolutePath;
});
});
afterEach(async () => {
await cleanupTemporaryFilesystem(tempRootDir);
vi.clearAllMocks(); // Clear all mocks
});
it('should read content from existing files', async () => {
const request = {
paths: ['file1.txt', 'dir1/file2.js', 'emptyFile.txt'],
};
const rawResult = await readContentToolDefinition.handler(request);
const result = JSON.parse(rawResult.content[0].text); // Assuming similar return structure
expect(result).toHaveLength(3);
const file1 = result.find((r: FileReadResult) => r.path === 'file1.txt');
expect(file1).toBeDefined();
expect(file1?.error).toBeUndefined(); // Check for absence of error
expect(file1?.content).toBe('Hello World!');
const file2 = result.find((r: FileReadResult) => r.path === 'dir1/file2.js');
expect(file2).toBeDefined();
expect(file2?.error).toBeUndefined(); // Check for absence of error
expect(file2?.content).toBe('console.log("test");');
const emptyFile = result.find((r: FileReadResult) => r.path === 'emptyFile.txt');
expect(emptyFile).toBeDefined();
expect(emptyFile?.error).toBeUndefined(); // Check for absence of error
expect(emptyFile?.content).toBe('');
});
it('should return errors for non-existent files', async () => {
const request = {
paths: ['file1.txt', 'nonexistent.txt'],
};
const rawResult = await readContentToolDefinition.handler(request);
const result = JSON.parse(rawResult.content[0].text);
expect(result).toHaveLength(2);
const file1 = result.find((r: FileReadResult) => r.path === 'file1.txt');
expect(file1).toBeDefined();
expect(file1?.error).toBeUndefined(); // Check for absence of error
expect(file1?.content).toBeDefined(); // Should have content
const nonexistent = result.find((r: FileReadResult) => r.path === 'nonexistent.txt');
expect(nonexistent).toBeDefined();
expect(nonexistent?.content).toBeUndefined(); // Should not have content
expect(nonexistent?.error).toBeDefined(); // Should have an error
// Check the specific error message from the handler for ENOENT - updated based on handler code
expect(nonexistent.error).toMatch(/File not found at resolved path/);
expect(nonexistent.error).toContain(path.resolve(tempRootDir, 'nonexistent.txt')); // Check resolved path is in the error message
});
it('should return errors for directories', async () => {
const request = {
paths: ['dir1'],
};
const rawResult = await readContentToolDefinition.handler(request);
const result = JSON.parse(rawResult.content[0].text);
expect(result).toHaveLength(1);
const dir1 = result[0];
expect(dir1.path).toBe('dir1');
expect(dir1.content).toBeUndefined(); // Should not have content
expect(dir1.error).toBeDefined(); // Should have an error
// Check the specific error message from the handler for non-files
expect(dir1.error).toMatch(/Path is not a regular file: dir1/); // Match the updated error message
});
it('should return error for absolute paths (caught by mock resolvePath)', async () => {
const absolutePath = path.resolve(tempRootDir, 'file1.txt');
const request = { paths: [absolutePath] };
const rawResult = await readContentToolDefinition.handler(request);
const result = JSON.parse(rawResult.content[0].text);
expect(result).toHaveLength(1);
expect(result[0].content).toBeUndefined();
expect(result[0].error).toBeDefined();
expect(result[0].error).toMatch(/Mocked Absolute paths are not allowed/);
});
it('should return error for path traversal (caught by mock resolvePath)', async () => {
const request = { paths: ['../outside.txt'] };
const rawResult = await readContentToolDefinition.handler(request);
const result = JSON.parse(rawResult.content[0].text);
expect(result).toHaveLength(1);
expect(result[0].content).toBeUndefined();
expect(result[0].error).toBeDefined();
expect(result[0].error).toMatch(/Mocked Path traversal detected/);
});
it('should reject requests with empty paths array based on Zod schema', async () => {
const request = { paths: [] };
await expect(readContentToolDefinition.handler(request)).rejects.toThrow(McpError);
await expect(readContentToolDefinition.handler(request)).rejects.toThrow(
/Paths array cannot be empty/,
);
});
// Note: Testing binary file reading might require adjustments based on how
// the handler returns binary content (e.g., base64 encoded string).
// Assuming it returns utf8 string for now, which might corrupt binary data.
it('should attempt to read binary files (result might be corrupted if not handled)', async () => {
const request = {
paths: ['binaryFile.bin'],
};
const rawResult = await readContentToolDefinition.handler(request);
const result = JSON.parse(rawResult.content[0].text);
expect(result).toHaveLength(1);
const binaryFile = result[0];
expect(binaryFile.error).toBeUndefined(); // Should be successful read attempt
expect(binaryFile.content).toBeDefined();
// The content will likely be garbled UTF-8 interpretation of binary data
// Reading binary data as utf-8 might return garbled content, but the read itself should succeed.
// We just check that an error wasn't returned and some content was.
expect(binaryFile.error).toBeUndefined();
expect(binaryFile.content).toBeDefined();
// Optionally, check that the content is a string of expected length if the behavior is consistent
// expect(binaryFile.content.length).toBe(4); // This seems to be the observed behavior
expect(binaryFile.content).toBeDefined();
});
it('should handle unexpected errors during path resolution', async () => {
const errorPath = 'resolveErrorPath.txt';
const genericErrorMessage = 'Simulated generic resolve error';
// Mock resolvePath to throw a generic Error for this path
mockResolvePath.mockImplementationOnce((relativePath: string): string => {
if (relativePath === errorPath) {
throw new Error(genericErrorMessage);
}
// Fallback (might not be needed if only errorPath is requested)
const absolutePath = path.resolve(tempRootDir, relativePath);
if (!absolutePath.startsWith(tempRootDir))
throw new McpError(ErrorCode.InvalidRequest, `Traversal`);
if (path.isAbsolute(relativePath)) throw new McpError(ErrorCode.InvalidParams, `Absolute`);
return absolutePath;
});
const request = { paths: [errorPath] };
const rawResult = await readContentToolDefinition.handler(request);
const result = JSON.parse(rawResult.content[0].text);
expect(result).toHaveLength(1);
const errorResult = result.find((r: FileReadResult) => r.path === errorPath);
expect(errorResult).toBeDefined();
expect(errorResult?.content).toBeUndefined();
expect(errorResult?.error).toBeDefined();
// Check for the unexpected resolve error message from line 82
expect(errorResult.error).toMatch(
// Corrected regex
/Error resolving path: Simulated generic resolve error/,
);
});
});
```