#
tokens: 39472/50000 5/76 files (page 4/5)
lines: on (toggle) GitHub
raw markdown copy reset
This is page 4 of 5. Use http://codebase.md/modelcontextprotocol/servers?lines=true&page={x} to view the full context.

# Directory Structure

```
├── .gitattributes
├── .github
│   ├── pull_request_template.md
│   └── workflows
│       ├── claude.yml
│       ├── python.yml
│       ├── release.yml
│       └── typescript.yml
├── .gitignore
├── .npmrc
├── .vscode
│   └── settings.json
├── CODE_OF_CONDUCT.md
├── CONTRIBUTING.md
├── LICENSE
├── package-lock.json
├── package.json
├── README.md
├── scripts
│   └── release.py
├── SECURITY.md
├── src
│   ├── everything
│   │   ├── CLAUDE.md
│   │   ├── Dockerfile
│   │   ├── everything.ts
│   │   ├── index.ts
│   │   ├── instructions.md
│   │   ├── package.json
│   │   ├── README.md
│   │   ├── sse.ts
│   │   ├── stdio.ts
│   │   ├── streamableHttp.ts
│   │   └── tsconfig.json
│   ├── fetch
│   │   ├── .python-version
│   │   ├── Dockerfile
│   │   ├── LICENSE
│   │   ├── pyproject.toml
│   │   ├── README.md
│   │   ├── src
│   │   │   └── mcp_server_fetch
│   │   │       ├── __init__.py
│   │   │       ├── __main__.py
│   │   │       └── server.py
│   │   └── uv.lock
│   ├── filesystem
│   │   ├── __tests__
│   │   │   ├── directory-tree.test.ts
│   │   │   ├── lib.test.ts
│   │   │   ├── path-utils.test.ts
│   │   │   ├── path-validation.test.ts
│   │   │   └── roots-utils.test.ts
│   │   ├── Dockerfile
│   │   ├── index.ts
│   │   ├── jest.config.cjs
│   │   ├── lib.ts
│   │   ├── package.json
│   │   ├── path-utils.ts
│   │   ├── path-validation.ts
│   │   ├── README.md
│   │   ├── roots-utils.ts
│   │   └── tsconfig.json
│   ├── git
│   │   ├── .gitignore
│   │   ├── .python-version
│   │   ├── Dockerfile
│   │   ├── LICENSE
│   │   ├── pyproject.toml
│   │   ├── README.md
│   │   ├── src
│   │   │   └── mcp_server_git
│   │   │       ├── __init__.py
│   │   │       ├── __main__.py
│   │   │       ├── py.typed
│   │   │       └── server.py
│   │   ├── tests
│   │   │   └── test_server.py
│   │   └── uv.lock
│   ├── memory
│   │   ├── Dockerfile
│   │   ├── index.ts
│   │   ├── package.json
│   │   ├── README.md
│   │   └── tsconfig.json
│   ├── sequentialthinking
│   │   ├── Dockerfile
│   │   ├── index.ts
│   │   ├── package.json
│   │   ├── README.md
│   │   └── tsconfig.json
│   └── time
│       ├── .python-version
│       ├── Dockerfile
│       ├── pyproject.toml
│       ├── README.md
│       ├── src
│       │   └── mcp_server_time
│       │       ├── __init__.py
│       │       ├── __main__.py
│       │       └── server.py
│       ├── test
│       │   └── time_server_test.py
│       └── uv.lock
└── tsconfig.json
```

# Files

--------------------------------------------------------------------------------
/src/memory/index.ts:
--------------------------------------------------------------------------------

```typescript
  1 | #!/usr/bin/env node
  2 | 
  3 | import { Server } from "@modelcontextprotocol/sdk/server/index.js";
  4 | import { StdioServerTransport } from "@modelcontextprotocol/sdk/server/stdio.js";
  5 | import {
  6 |   CallToolRequestSchema,
  7 |   ListToolsRequestSchema,
  8 | } from "@modelcontextprotocol/sdk/types.js";
  9 | import { promises as fs } from 'fs';
 10 | import path from 'path';
 11 | import { fileURLToPath } from 'url';
 12 | 
 13 | // Define memory file path using environment variable with fallback
 14 | const defaultMemoryPath = path.join(path.dirname(fileURLToPath(import.meta.url)), 'memory.json');
 15 | 
 16 | // If MEMORY_FILE_PATH is just a filename, put it in the same directory as the script
 17 | const MEMORY_FILE_PATH = process.env.MEMORY_FILE_PATH
 18 |   ? path.isAbsolute(process.env.MEMORY_FILE_PATH)
 19 |     ? process.env.MEMORY_FILE_PATH
 20 |     : path.join(path.dirname(fileURLToPath(import.meta.url)), process.env.MEMORY_FILE_PATH)
 21 |   : defaultMemoryPath;
 22 | 
 23 | // We are storing our memory using entities, relations, and observations in a graph structure
 24 | interface Entity {
 25 |   name: string;
 26 |   entityType: string;
 27 |   observations: string[];
 28 | }
 29 | 
 30 | interface Relation {
 31 |   from: string;
 32 |   to: string;
 33 |   relationType: string;
 34 | }
 35 | 
 36 | interface KnowledgeGraph {
 37 |   entities: Entity[];
 38 |   relations: Relation[];
 39 | }
 40 | 
 41 | // The KnowledgeGraphManager class contains all operations to interact with the knowledge graph
 42 | class KnowledgeGraphManager {
 43 |   private async loadGraph(): Promise<KnowledgeGraph> {
 44 |     try {
 45 |       const data = await fs.readFile(MEMORY_FILE_PATH, "utf-8");
 46 |       const lines = data.split("\n").filter(line => line.trim() !== "");
 47 |       return lines.reduce((graph: KnowledgeGraph, line) => {
 48 |         const item = JSON.parse(line);
 49 |         if (item.type === "entity") graph.entities.push(item as Entity);
 50 |         if (item.type === "relation") graph.relations.push(item as Relation);
 51 |         return graph;
 52 |       }, { entities: [], relations: [] });
 53 |     } catch (error) {
 54 |       if (error instanceof Error && 'code' in error && (error as any).code === "ENOENT") {
 55 |         return { entities: [], relations: [] };
 56 |       }
 57 |       throw error;
 58 |     }
 59 |   }
 60 | 
 61 |   private async saveGraph(graph: KnowledgeGraph): Promise<void> {
 62 |     const lines = [
 63 |       ...graph.entities.map(e => JSON.stringify({ 
 64 |         type: "entity", 
 65 |         name: e.name, 
 66 |         entityType: e.entityType, 
 67 |         observations: e.observations 
 68 |       })),
 69 |       ...graph.relations.map(r => JSON.stringify({ 
 70 |         type: "relation", 
 71 |         from: r.from, 
 72 |         to: r.to, 
 73 |         relationType: r.relationType 
 74 |       })),
 75 |     ];
 76 |     await fs.writeFile(MEMORY_FILE_PATH, lines.join("\n"));
 77 |   }
 78 | 
 79 |   async createEntities(entities: Entity[]): Promise<Entity[]> {
 80 |     const graph = await this.loadGraph();
 81 |     const newEntities = entities.filter(e => !graph.entities.some(existingEntity => existingEntity.name === e.name));
 82 |     graph.entities.push(...newEntities);
 83 |     await this.saveGraph(graph);
 84 |     return newEntities;
 85 |   }
 86 | 
 87 |   async createRelations(relations: Relation[]): Promise<Relation[]> {
 88 |     const graph = await this.loadGraph();
 89 |     const newRelations = relations.filter(r => !graph.relations.some(existingRelation => 
 90 |       existingRelation.from === r.from && 
 91 |       existingRelation.to === r.to && 
 92 |       existingRelation.relationType === r.relationType
 93 |     ));
 94 |     graph.relations.push(...newRelations);
 95 |     await this.saveGraph(graph);
 96 |     return newRelations;
 97 |   }
 98 | 
 99 |   async addObservations(observations: { entityName: string; contents: string[] }[]): Promise<{ entityName: string; addedObservations: string[] }[]> {
100 |     const graph = await this.loadGraph();
101 |     const results = observations.map(o => {
102 |       const entity = graph.entities.find(e => e.name === o.entityName);
103 |       if (!entity) {
104 |         throw new Error(`Entity with name ${o.entityName} not found`);
105 |       }
106 |       const newObservations = o.contents.filter(content => !entity.observations.includes(content));
107 |       entity.observations.push(...newObservations);
108 |       return { entityName: o.entityName, addedObservations: newObservations };
109 |     });
110 |     await this.saveGraph(graph);
111 |     return results;
112 |   }
113 | 
114 |   async deleteEntities(entityNames: string[]): Promise<void> {
115 |     const graph = await this.loadGraph();
116 |     graph.entities = graph.entities.filter(e => !entityNames.includes(e.name));
117 |     graph.relations = graph.relations.filter(r => !entityNames.includes(r.from) && !entityNames.includes(r.to));
118 |     await this.saveGraph(graph);
119 |   }
120 | 
121 |   async deleteObservations(deletions: { entityName: string; observations: string[] }[]): Promise<void> {
122 |     const graph = await this.loadGraph();
123 |     deletions.forEach(d => {
124 |       const entity = graph.entities.find(e => e.name === d.entityName);
125 |       if (entity) {
126 |         entity.observations = entity.observations.filter(o => !d.observations.includes(o));
127 |       }
128 |     });
129 |     await this.saveGraph(graph);
130 |   }
131 | 
132 |   async deleteRelations(relations: Relation[]): Promise<void> {
133 |     const graph = await this.loadGraph();
134 |     graph.relations = graph.relations.filter(r => !relations.some(delRelation => 
135 |       r.from === delRelation.from && 
136 |       r.to === delRelation.to && 
137 |       r.relationType === delRelation.relationType
138 |     ));
139 |     await this.saveGraph(graph);
140 |   }
141 | 
142 |   async readGraph(): Promise<KnowledgeGraph> {
143 |     return this.loadGraph();
144 |   }
145 | 
146 |   // Very basic search function
147 |   async searchNodes(query: string): Promise<KnowledgeGraph> {
148 |     const graph = await this.loadGraph();
149 |     
150 |     // Filter entities
151 |     const filteredEntities = graph.entities.filter(e => 
152 |       e.name.toLowerCase().includes(query.toLowerCase()) ||
153 |       e.entityType.toLowerCase().includes(query.toLowerCase()) ||
154 |       e.observations.some(o => o.toLowerCase().includes(query.toLowerCase()))
155 |     );
156 |   
157 |     // Create a Set of filtered entity names for quick lookup
158 |     const filteredEntityNames = new Set(filteredEntities.map(e => e.name));
159 |   
160 |     // Filter relations to only include those between filtered entities
161 |     const filteredRelations = graph.relations.filter(r => 
162 |       filteredEntityNames.has(r.from) && filteredEntityNames.has(r.to)
163 |     );
164 |   
165 |     const filteredGraph: KnowledgeGraph = {
166 |       entities: filteredEntities,
167 |       relations: filteredRelations,
168 |     };
169 |   
170 |     return filteredGraph;
171 |   }
172 | 
173 |   async openNodes(names: string[]): Promise<KnowledgeGraph> {
174 |     const graph = await this.loadGraph();
175 |     
176 |     // Filter entities
177 |     const filteredEntities = graph.entities.filter(e => names.includes(e.name));
178 |   
179 |     // Create a Set of filtered entity names for quick lookup
180 |     const filteredEntityNames = new Set(filteredEntities.map(e => e.name));
181 |   
182 |     // Filter relations to only include those between filtered entities
183 |     const filteredRelations = graph.relations.filter(r => 
184 |       filteredEntityNames.has(r.from) && filteredEntityNames.has(r.to)
185 |     );
186 |   
187 |     const filteredGraph: KnowledgeGraph = {
188 |       entities: filteredEntities,
189 |       relations: filteredRelations,
190 |     };
191 |   
192 |     return filteredGraph;
193 |   }
194 | }
195 | 
196 | const knowledgeGraphManager = new KnowledgeGraphManager();
197 | 
198 | 
199 | // The server instance and tools exposed to Claude
200 | const server = new Server({
201 |   name: "memory-server",
202 |   version: "0.6.3",
203 | },    {
204 |     capabilities: {
205 |       tools: {},
206 |     },
207 |   },);
208 | 
209 | server.setRequestHandler(ListToolsRequestSchema, async () => {
210 |   return {
211 |     tools: [
212 |       {
213 |         name: "create_entities",
214 |         description: "Create multiple new entities in the knowledge graph",
215 |         inputSchema: {
216 |           type: "object",
217 |           properties: {
218 |             entities: {
219 |               type: "array",
220 |               items: {
221 |                 type: "object",
222 |                 properties: {
223 |                   name: { type: "string", description: "The name of the entity" },
224 |                   entityType: { type: "string", description: "The type of the entity" },
225 |                   observations: { 
226 |                     type: "array", 
227 |                     items: { type: "string" },
228 |                     description: "An array of observation contents associated with the entity"
229 |                   },
230 |                 },
231 |                 required: ["name", "entityType", "observations"],
232 |                 additionalProperties: false,
233 |               },
234 |             },
235 |           },
236 |           required: ["entities"],
237 |           additionalProperties: false,
238 |         },
239 |       },
240 |       {
241 |         name: "create_relations",
242 |         description: "Create multiple new relations between entities in the knowledge graph. Relations should be in active voice",
243 |         inputSchema: {
244 |           type: "object",
245 |           properties: {
246 |             relations: {
247 |               type: "array",
248 |               items: {
249 |                 type: "object",
250 |                 properties: {
251 |                   from: { type: "string", description: "The name of the entity where the relation starts" },
252 |                   to: { type: "string", description: "The name of the entity where the relation ends" },
253 |                   relationType: { type: "string", description: "The type of the relation" },
254 |                 },
255 |                 required: ["from", "to", "relationType"],
256 |                 additionalProperties: false,
257 |               },
258 |             },
259 |           },
260 |           required: ["relations"],
261 |           additionalProperties: false,
262 |         },
263 |       },
264 |       {
265 |         name: "add_observations",
266 |         description: "Add new observations to existing entities in the knowledge graph",
267 |         inputSchema: {
268 |           type: "object",
269 |           properties: {
270 |             observations: {
271 |               type: "array",
272 |               items: {
273 |                 type: "object",
274 |                 properties: {
275 |                   entityName: { type: "string", description: "The name of the entity to add the observations to" },
276 |                   contents: { 
277 |                     type: "array", 
278 |                     items: { type: "string" },
279 |                     description: "An array of observation contents to add"
280 |                   },
281 |                 },
282 |                 required: ["entityName", "contents"],
283 |                 additionalProperties: false,
284 |               },
285 |             },
286 |           },
287 |           required: ["observations"],
288 |           additionalProperties: false,
289 |         },
290 |       },
291 |       {
292 |         name: "delete_entities",
293 |         description: "Delete multiple entities and their associated relations from the knowledge graph",
294 |         inputSchema: {
295 |           type: "object",
296 |           properties: {
297 |             entityNames: { 
298 |               type: "array", 
299 |               items: { type: "string" },
300 |               description: "An array of entity names to delete" 
301 |             },
302 |           },
303 |           required: ["entityNames"],
304 |           additionalProperties: false,
305 |         },
306 |       },
307 |       {
308 |         name: "delete_observations",
309 |         description: "Delete specific observations from entities in the knowledge graph",
310 |         inputSchema: {
311 |           type: "object",
312 |           properties: {
313 |             deletions: {
314 |               type: "array",
315 |               items: {
316 |                 type: "object",
317 |                 properties: {
318 |                   entityName: { type: "string", description: "The name of the entity containing the observations" },
319 |                   observations: { 
320 |                     type: "array", 
321 |                     items: { type: "string" },
322 |                     description: "An array of observations to delete"
323 |                   },
324 |                 },
325 |                 required: ["entityName", "observations"],
326 |                 additionalProperties: false,
327 |               },
328 |             },
329 |           },
330 |           required: ["deletions"],
331 |           additionalProperties: false,
332 |         },
333 |       },
334 |       {
335 |         name: "delete_relations",
336 |         description: "Delete multiple relations from the knowledge graph",
337 |         inputSchema: {
338 |           type: "object",
339 |           properties: {
340 |             relations: { 
341 |               type: "array", 
342 |               items: {
343 |                 type: "object",
344 |                 properties: {
345 |                   from: { type: "string", description: "The name of the entity where the relation starts" },
346 |                   to: { type: "string", description: "The name of the entity where the relation ends" },
347 |                   relationType: { type: "string", description: "The type of the relation" },
348 |                 },
349 |                 required: ["from", "to", "relationType"],
350 |                 additionalProperties: false,
351 |               },
352 |               description: "An array of relations to delete" 
353 |             },
354 |           },
355 |           required: ["relations"],
356 |           additionalProperties: false,
357 |         },
358 |       },
359 |       {
360 |         name: "read_graph",
361 |         description: "Read the entire knowledge graph",
362 |         inputSchema: {
363 |           type: "object",
364 |           properties: {},
365 |           additionalProperties: false,
366 |         },
367 |       },
368 |       {
369 |         name: "search_nodes",
370 |         description: "Search for nodes in the knowledge graph based on a query",
371 |         inputSchema: {
372 |           type: "object",
373 |           properties: {
374 |             query: { type: "string", description: "The search query to match against entity names, types, and observation content" },
375 |           },
376 |           required: ["query"],
377 |           additionalProperties: false,
378 |         },
379 |       },
380 |       {
381 |         name: "open_nodes",
382 |         description: "Open specific nodes in the knowledge graph by their names",
383 |         inputSchema: {
384 |           type: "object",
385 |           properties: {
386 |             names: {
387 |               type: "array",
388 |               items: { type: "string" },
389 |               description: "An array of entity names to retrieve",
390 |             },
391 |           },
392 |           required: ["names"],
393 |           additionalProperties: false,
394 |         },
395 |       },
396 |     ],
397 |   };
398 | });
399 | 
400 | server.setRequestHandler(CallToolRequestSchema, async (request) => {
401 |   const { name, arguments: args } = request.params;
402 | 
403 |   if (name === "read_graph") {
404 |     return { content: [{ type: "text", text: JSON.stringify(await knowledgeGraphManager.readGraph(), null, 2) }] };
405 |   }
406 | 
407 |   if (!args) {
408 |     throw new Error(`No arguments provided for tool: ${name}`);
409 |   }
410 | 
411 |   switch (name) {
412 |     case "create_entities":
413 |       return { content: [{ type: "text", text: JSON.stringify(await knowledgeGraphManager.createEntities(args.entities as Entity[]), null, 2) }] };
414 |     case "create_relations":
415 |       return { content: [{ type: "text", text: JSON.stringify(await knowledgeGraphManager.createRelations(args.relations as Relation[]), null, 2) }] };
416 |     case "add_observations":
417 |       return { content: [{ type: "text", text: JSON.stringify(await knowledgeGraphManager.addObservations(args.observations as { entityName: string; contents: string[] }[]), null, 2) }] };
418 |     case "delete_entities":
419 |       await knowledgeGraphManager.deleteEntities(args.entityNames as string[]);
420 |       return { content: [{ type: "text", text: "Entities deleted successfully" }] };
421 |     case "delete_observations":
422 |       await knowledgeGraphManager.deleteObservations(args.deletions as { entityName: string; observations: string[] }[]);
423 |       return { content: [{ type: "text", text: "Observations deleted successfully" }] };
424 |     case "delete_relations":
425 |       await knowledgeGraphManager.deleteRelations(args.relations as Relation[]);
426 |       return { content: [{ type: "text", text: "Relations deleted successfully" }] };
427 |     case "search_nodes":
428 |       return { content: [{ type: "text", text: JSON.stringify(await knowledgeGraphManager.searchNodes(args.query as string), null, 2) }] };
429 |     case "open_nodes":
430 |       return { content: [{ type: "text", text: JSON.stringify(await knowledgeGraphManager.openNodes(args.names as string[]), null, 2) }] };
431 |     default:
432 |       throw new Error(`Unknown tool: ${name}`);
433 |   }
434 | });
435 | 
436 | async function main() {
437 |   const transport = new StdioServerTransport();
438 |   await server.connect(transport);
439 |   console.error("Knowledge Graph MCP Server running on stdio");
440 | }
441 | 
442 | main().catch((error) => {
443 |   console.error("Fatal error in main():", error);
444 |   process.exit(1);
445 | });
446 | 
```

--------------------------------------------------------------------------------
/src/time/test/time_server_test.py:
--------------------------------------------------------------------------------

```python
  1 | 
  2 | from freezegun import freeze_time
  3 | from mcp.shared.exceptions import McpError
  4 | import pytest
  5 | from unittest.mock import patch
  6 | from zoneinfo import ZoneInfo
  7 | 
  8 | from mcp_server_time.server import TimeServer, get_local_tz
  9 | 
 10 | 
 11 | @pytest.mark.parametrize(
 12 |     "test_time,timezone,expected",
 13 |     [
 14 |         # UTC+1 non-DST
 15 |         (
 16 |             "2024-01-01 12:00:00+00:00",
 17 |             "Europe/Warsaw",
 18 |             {
 19 |                 "timezone": "Europe/Warsaw",
 20 |                 "datetime": "2024-01-01T13:00:00+01:00",
 21 |                 "is_dst": False,
 22 |             },
 23 |         ),
 24 |         # UTC non-DST
 25 |         (
 26 |             "2024-01-01 12:00:00+00:00",
 27 |             "Europe/London",
 28 |             {
 29 |                 "timezone": "Europe/London",
 30 |                 "datetime": "2024-01-01T12:00:00+00:00",
 31 |                 "is_dst": False,
 32 |             },
 33 |         ),
 34 |         # UTC-5 non-DST
 35 |         (
 36 |             "2024-01-01 12:00:00-00:00",
 37 |             "America/New_York",
 38 |             {
 39 |                 "timezone": "America/New_York",
 40 |                 "datetime": "2024-01-01T07:00:00-05:00",
 41 |                 "is_dst": False,
 42 |             },
 43 |         ),
 44 |         # UTC+1 DST
 45 |         (
 46 |             "2024-03-31 12:00:00+00:00",
 47 |             "Europe/Warsaw",
 48 |             {
 49 |                 "timezone": "Europe/Warsaw",
 50 |                 "datetime": "2024-03-31T14:00:00+02:00",
 51 |                 "is_dst": True,
 52 |             },
 53 |         ),
 54 |         # UTC DST
 55 |         (
 56 |             "2024-03-31 12:00:00+00:00",
 57 |             "Europe/London",
 58 |             {
 59 |                 "timezone": "Europe/London",
 60 |                 "datetime": "2024-03-31T13:00:00+01:00",
 61 |                 "is_dst": True,
 62 |             },
 63 |         ),
 64 |         # UTC-5 DST
 65 |         (
 66 |             "2024-03-31 12:00:00-00:00",
 67 |             "America/New_York",
 68 |             {
 69 |                 "timezone": "America/New_York",
 70 |                 "datetime": "2024-03-31T08:00:00-04:00",
 71 |                 "is_dst": True,
 72 |             },
 73 |         ),
 74 |     ],
 75 | )
 76 | def test_get_current_time(test_time, timezone, expected):
 77 |     with freeze_time(test_time):
 78 |         time_server = TimeServer()
 79 |         result = time_server.get_current_time(timezone)
 80 |         assert result.timezone == expected["timezone"]
 81 |         assert result.datetime == expected["datetime"]
 82 |         assert result.is_dst == expected["is_dst"]
 83 | 
 84 | 
 85 | def test_get_current_time_with_invalid_timezone():
 86 |     time_server = TimeServer()
 87 |     with pytest.raises(
 88 |         McpError,
 89 |         match=r"Invalid timezone: 'No time zone found with key Invalid/Timezone'",
 90 |     ):
 91 |         time_server.get_current_time("Invalid/Timezone")
 92 | 
 93 | 
 94 | @pytest.mark.parametrize(
 95 |     "source_tz,time_str,target_tz,expected_error",
 96 |     [
 97 |         (
 98 |             "invalid_tz",
 99 |             "12:00",
100 |             "Europe/London",
101 |             "Invalid timezone: 'No time zone found with key invalid_tz'",
102 |         ),
103 |         (
104 |             "Europe/Warsaw",
105 |             "12:00",
106 |             "invalid_tz",
107 |             "Invalid timezone: 'No time zone found with key invalid_tz'",
108 |         ),
109 |         (
110 |             "Europe/Warsaw",
111 |             "25:00",
112 |             "Europe/London",
113 |             "Invalid time format. Expected HH:MM [24-hour format]",
114 |         ),
115 |     ],
116 | )
117 | def test_convert_time_errors(source_tz, time_str, target_tz, expected_error):
118 |     time_server = TimeServer()
119 |     with pytest.raises((McpError, ValueError), match=expected_error):
120 |         time_server.convert_time(source_tz, time_str, target_tz)
121 | 
122 | 
123 | @pytest.mark.parametrize(
124 |     "test_time,source_tz,time_str,target_tz,expected",
125 |     [
126 |         # Basic case: Standard time conversion between Warsaw and London (1 hour difference)
127 |         # Warsaw is UTC+1, London is UTC+0
128 |         (
129 |             "2024-01-01 00:00:00+00:00",
130 |             "Europe/Warsaw",
131 |             "12:00",
132 |             "Europe/London",
133 |             {
134 |                 "source": {
135 |                     "timezone": "Europe/Warsaw",
136 |                     "datetime": "2024-01-01T12:00:00+01:00",
137 |                     "is_dst": False,
138 |                 },
139 |                 "target": {
140 |                     "timezone": "Europe/London",
141 |                     "datetime": "2024-01-01T11:00:00+00:00",
142 |                     "is_dst": False,
143 |                 },
144 |                 "time_difference": "-1.0h",
145 |             },
146 |         ),
147 |         # Reverse case of above: London to Warsaw conversion
148 |         # Shows how time difference is positive when going east
149 |         (
150 |             "2024-01-01 00:00:00+00:00",
151 |             "Europe/London",
152 |             "12:00",
153 |             "Europe/Warsaw",
154 |             {
155 |                 "source": {
156 |                     "timezone": "Europe/London",
157 |                     "datetime": "2024-01-01T12:00:00+00:00",
158 |                     "is_dst": False,
159 |                 },
160 |                 "target": {
161 |                     "timezone": "Europe/Warsaw",
162 |                     "datetime": "2024-01-01T13:00:00+01:00",
163 |                     "is_dst": False,
164 |                 },
165 |                 "time_difference": "+1.0h",
166 |             },
167 |         ),
168 |         # Edge case: Different DST periods between Europe and USA
169 |         # Europe ends DST on Oct 27, while USA waits until Nov 3
170 |         # This creates a one-week period where Europe is in standard time but USA still observes DST
171 |         (
172 |             "2024-10-28 00:00:00+00:00",
173 |             "Europe/Warsaw",
174 |             "12:00",
175 |             "America/New_York",
176 |             {
177 |                 "source": {
178 |                     "timezone": "Europe/Warsaw",
179 |                     "datetime": "2024-10-28T12:00:00+01:00",
180 |                     "is_dst": False,
181 |                 },
182 |                 "target": {
183 |                     "timezone": "America/New_York",
184 |                     "datetime": "2024-10-28T07:00:00-04:00",
185 |                     "is_dst": True,
186 |                 },
187 |                 "time_difference": "-5.0h",
188 |             },
189 |         ),
190 |         # Follow-up to previous case: After both regions end DST
191 |         # Shows how time difference increases by 1 hour when USA also ends DST
192 |         (
193 |             "2024-11-04 00:00:00+00:00",
194 |             "Europe/Warsaw",
195 |             "12:00",
196 |             "America/New_York",
197 |             {
198 |                 "source": {
199 |                     "timezone": "Europe/Warsaw",
200 |                     "datetime": "2024-11-04T12:00:00+01:00",
201 |                     "is_dst": False,
202 |                 },
203 |                 "target": {
204 |                     "timezone": "America/New_York",
205 |                     "datetime": "2024-11-04T06:00:00-05:00",
206 |                     "is_dst": False,
207 |                 },
208 |                 "time_difference": "-6.0h",
209 |             },
210 |         ),
211 |         # Edge case: Nepal's unusual UTC+5:45 offset
212 |         # One of the few time zones using 45-minute offset
213 |         (
214 |             "2024-01-01 00:00:00+00:00",
215 |             "Europe/Warsaw",
216 |             "12:00",
217 |             "Asia/Kathmandu",
218 |             {
219 |                 "source": {
220 |                     "timezone": "Europe/Warsaw",
221 |                     "datetime": "2024-01-01T12:00:00+01:00",
222 |                     "is_dst": False,
223 |                 },
224 |                 "target": {
225 |                     "timezone": "Asia/Kathmandu",
226 |                     "datetime": "2024-01-01T16:45:00+05:45",
227 |                     "is_dst": False,
228 |                 },
229 |                 "time_difference": "+4.75h",
230 |             },
231 |         ),
232 |         # Reverse case for Nepal
233 |         # Demonstrates how 45-minute offset works in opposite direction
234 |         (
235 |             "2024-01-01 00:00:00+00:00",
236 |             "Asia/Kathmandu",
237 |             "12:00",
238 |             "Europe/Warsaw",
239 |             {
240 |                 "source": {
241 |                     "timezone": "Asia/Kathmandu",
242 |                     "datetime": "2024-01-01T12:00:00+05:45",
243 |                     "is_dst": False,
244 |                 },
245 |                 "target": {
246 |                     "timezone": "Europe/Warsaw",
247 |                     "datetime": "2024-01-01T07:15:00+01:00",
248 |                     "is_dst": False,
249 |                 },
250 |                 "time_difference": "-4.75h",
251 |             },
252 |         ),
253 |         # Edge case: Lord Howe Island's unique DST rules
254 |         # One of the few places using 30-minute DST shift
255 |         # During summer (DST), they use UTC+11
256 |         (
257 |             "2024-01-01 00:00:00+00:00",
258 |             "Europe/Warsaw",
259 |             "12:00",
260 |             "Australia/Lord_Howe",
261 |             {
262 |                 "source": {
263 |                     "timezone": "Europe/Warsaw",
264 |                     "datetime": "2024-01-01T12:00:00+01:00",
265 |                     "is_dst": False,
266 |                 },
267 |                 "target": {
268 |                     "timezone": "Australia/Lord_Howe",
269 |                     "datetime": "2024-01-01T22:00:00+11:00",
270 |                     "is_dst": True,
271 |                 },
272 |                 "time_difference": "+10.0h",
273 |             },
274 |         ),
275 |         # Second Lord Howe Island case: During their standard time
276 |         # Shows transition to UTC+10:30 after DST ends
277 |         (
278 |             "2024-04-07 00:00:00+00:00",
279 |             "Europe/Warsaw",
280 |             "12:00",
281 |             "Australia/Lord_Howe",
282 |             {
283 |                 "source": {
284 |                     "timezone": "Europe/Warsaw",
285 |                     "datetime": "2024-04-07T12:00:00+02:00",
286 |                     "is_dst": True,
287 |                 },
288 |                 "target": {
289 |                     "timezone": "Australia/Lord_Howe",
290 |                     "datetime": "2024-04-07T20:30:00+10:30",
291 |                     "is_dst": False,
292 |                 },
293 |                 "time_difference": "+8.5h",
294 |             },
295 |         ),
296 |         # Edge case: Date line crossing with Samoa
297 |         # Demonstrates how a single time conversion can result in a date change
298 |         # Samoa is UTC+13, creating almost a full day difference with Warsaw
299 |         (
300 |             "2024-01-01 00:00:00+00:00",
301 |             "Europe/Warsaw",
302 |             "23:00",
303 |             "Pacific/Apia",
304 |             {
305 |                 "source": {
306 |                     "timezone": "Europe/Warsaw",
307 |                     "datetime": "2024-01-01T23:00:00+01:00",
308 |                     "is_dst": False,
309 |                 },
310 |                 "target": {
311 |                     "timezone": "Pacific/Apia",
312 |                     "datetime": "2024-01-02T11:00:00+13:00",
313 |                     "is_dst": False,
314 |                 },
315 |                 "time_difference": "+12.0h",
316 |             },
317 |         ),
318 |         # Edge case: Iran's unusual half-hour offset
319 |         # Demonstrates conversion with Iran's UTC+3:30 timezone
320 |         (
321 |             "2024-03-21 00:00:00+00:00",
322 |             "Europe/Warsaw",
323 |             "12:00",
324 |             "Asia/Tehran",
325 |             {
326 |                 "source": {
327 |                     "timezone": "Europe/Warsaw",
328 |                     "datetime": "2024-03-21T12:00:00+01:00",
329 |                     "is_dst": False,
330 |                 },
331 |                 "target": {
332 |                     "timezone": "Asia/Tehran",
333 |                     "datetime": "2024-03-21T14:30:00+03:30",
334 |                     "is_dst": False,
335 |                 },
336 |                 "time_difference": "+2.5h",
337 |             },
338 |         ),
339 |         # Edge case: Venezuela's unusual -4:30 offset (historical)
340 |         # In 2016, Venezuela moved from -4:30 to -4:00
341 |         # Useful for testing historical dates
342 |         (
343 |             "2016-04-30 00:00:00+00:00",  # Just before the change
344 |             "Europe/Warsaw",
345 |             "12:00",
346 |             "America/Caracas",
347 |             {
348 |                 "source": {
349 |                     "timezone": "Europe/Warsaw",
350 |                     "datetime": "2016-04-30T12:00:00+02:00",
351 |                     "is_dst": True,
352 |                 },
353 |                 "target": {
354 |                     "timezone": "America/Caracas",
355 |                     "datetime": "2016-04-30T05:30:00-04:30",
356 |                     "is_dst": False,
357 |                 },
358 |                 "time_difference": "-6.5h",
359 |             },
360 |         ),
361 |         # Edge case: Israel's variable DST
362 |         # Israel's DST changes don't follow a fixed pattern
363 |         # They often change dates year-to-year based on Hebrew calendar
364 |         (
365 |             "2024-10-27 00:00:00+00:00",
366 |             "Europe/Warsaw",
367 |             "12:00",
368 |             "Asia/Jerusalem",
369 |             {
370 |                 "source": {
371 |                     "timezone": "Europe/Warsaw",
372 |                     "datetime": "2024-10-27T12:00:00+01:00",
373 |                     "is_dst": False,
374 |                 },
375 |                 "target": {
376 |                     "timezone": "Asia/Jerusalem",
377 |                     "datetime": "2024-10-27T13:00:00+02:00",
378 |                     "is_dst": False,
379 |                 },
380 |                 "time_difference": "+1.0h",
381 |             },
382 |         ),
383 |         # Edge case: Antarctica/Troll station
384 |         # Only timezone that uses UTC+0 in winter and UTC+2 in summer
385 |         # One of the few zones with exactly 2 hours DST difference
386 |         (
387 |             "2024-03-31 00:00:00+00:00",
388 |             "Europe/Warsaw",
389 |             "12:00",
390 |             "Antarctica/Troll",
391 |             {
392 |                 "source": {
393 |                     "timezone": "Europe/Warsaw",
394 |                     "datetime": "2024-03-31T12:00:00+02:00",
395 |                     "is_dst": True,
396 |                 },
397 |                 "target": {
398 |                     "timezone": "Antarctica/Troll",
399 |                     "datetime": "2024-03-31T12:00:00+02:00",
400 |                     "is_dst": True,
401 |                 },
402 |                 "time_difference": "+0.0h",
403 |             },
404 |         ),
405 |         # Edge case: Kiribati date line anomaly
406 |         # After skipping Dec 31, 1994, eastern Kiribati is UTC+14
407 |         # The furthest forward timezone in the world
408 |         (
409 |             "2024-01-01 00:00:00+00:00",
410 |             "Europe/Warsaw",
411 |             "23:00",
412 |             "Pacific/Kiritimati",
413 |             {
414 |                 "source": {
415 |                     "timezone": "Europe/Warsaw",
416 |                     "datetime": "2024-01-01T23:00:00+01:00",
417 |                     "is_dst": False,
418 |                 },
419 |                 "target": {
420 |                     "timezone": "Pacific/Kiritimati",
421 |                     "datetime": "2024-01-02T12:00:00+14:00",
422 |                     "is_dst": False,
423 |                 },
424 |                 "time_difference": "+13.0h",
425 |             },
426 |         ),
427 |         # Edge case: Chatham Islands, New Zealand
428 |         # Uses unusual 45-minute offset AND observes DST
429 |         # UTC+12:45 in standard time, UTC+13:45 in DST
430 |         (
431 |             "2024-01-01 00:00:00+00:00",
432 |             "Europe/Warsaw",
433 |             "12:00",
434 |             "Pacific/Chatham",
435 |             {
436 |                 "source": {
437 |                     "timezone": "Europe/Warsaw",
438 |                     "datetime": "2024-01-01T12:00:00+01:00",
439 |                     "is_dst": False,
440 |                 },
441 |                 "target": {
442 |                     "timezone": "Pacific/Chatham",
443 |                     "datetime": "2024-01-02T00:45:00+13:45",
444 |                     "is_dst": True,
445 |                 },
446 |                 "time_difference": "+12.75h",
447 |             },
448 |         ),
449 |     ],
450 | )
451 | def test_convert_time(test_time, source_tz, time_str, target_tz, expected):
452 |     with freeze_time(test_time):
453 |         time_server = TimeServer()
454 |         result = time_server.convert_time(source_tz, time_str, target_tz)
455 | 
456 |         assert result.source.timezone == expected["source"]["timezone"]
457 |         assert result.target.timezone == expected["target"]["timezone"]
458 |         assert result.source.datetime == expected["source"]["datetime"]
459 |         assert result.target.datetime == expected["target"]["datetime"]
460 |         assert result.source.is_dst == expected["source"]["is_dst"]
461 |         assert result.target.is_dst == expected["target"]["is_dst"]
462 |         assert result.time_difference == expected["time_difference"]
463 | 
464 | 
465 | def test_get_local_tz_with_override():
466 |     """Test that timezone override works correctly."""
467 |     result = get_local_tz("America/New_York")
468 |     assert str(result) == "America/New_York"
469 |     assert isinstance(result, ZoneInfo)
470 | 
471 | 
472 | def test_get_local_tz_with_invalid_override():
473 |     """Test that invalid timezone override raises an error."""
474 |     with pytest.raises(Exception):  # ZoneInfo will raise an exception
475 |         get_local_tz("Invalid/Timezone")
476 | 
477 | 
478 | @patch('mcp_server_time.server.get_localzone_name')
479 | def test_get_local_tz_with_valid_iana_name(mock_get_localzone):
480 |     """Test that valid IANA timezone names from tzlocal work correctly."""
481 |     mock_get_localzone.return_value = "Europe/London"
482 |     result = get_local_tz()
483 |     assert str(result) == "Europe/London"
484 |     assert isinstance(result, ZoneInfo)
485 | 
486 | 
487 | @patch('mcp_server_time.server.get_localzone_name')
488 | def test_get_local_tz_when_none_returned(mock_get_localzone):
489 |     """Test default to UTC when tzlocal returns None."""
490 |     mock_get_localzone.return_value = None
491 |     result = get_local_tz()
492 |     assert str(result) == "UTC"
493 | 
494 | 
495 | @patch('mcp_server_time.server.get_localzone_name')
496 | def test_get_local_tz_handles_windows_timezones(mock_get_localzone):
497 |     """Test that tzlocal properly handles Windows timezone names.
498 |     
499 |     Note: tzlocal should convert Windows names like 'Pacific Standard Time'
500 |     to proper IANA names like 'America/Los_Angeles'.
501 |     """
502 |     # tzlocal should return IANA names even on Windows
503 |     mock_get_localzone.return_value = "America/Los_Angeles"
504 |     result = get_local_tz()
505 |     assert str(result) == "America/Los_Angeles"
506 |     assert isinstance(result, ZoneInfo)
507 | 
508 | 
509 | @pytest.mark.parametrize(
510 |     "timezone_name",
511 |     [
512 |         "America/New_York",
513 |         "Europe/Paris", 
514 |         "Asia/Tokyo",
515 |         "Australia/Sydney",
516 |         "Africa/Cairo",
517 |         "America/Sao_Paulo",
518 |         "Pacific/Auckland",
519 |         "UTC",
520 |     ],
521 | )
522 | @patch('mcp_server_time.server.get_localzone_name')
523 | def test_get_local_tz_various_timezones(mock_get_localzone, timezone_name):
524 |     """Test various timezone names that tzlocal might return."""
525 |     mock_get_localzone.return_value = timezone_name
526 |     result = get_local_tz()
527 |     assert str(result) == timezone_name
528 |     assert isinstance(result, ZoneInfo)
529 | 
```

--------------------------------------------------------------------------------
/src/filesystem/__tests__/lib.test.ts:
--------------------------------------------------------------------------------

```typescript
  1 | import { describe, it, expect, beforeEach, afterEach, jest } from '@jest/globals';
  2 | import fs from 'fs/promises';
  3 | import path from 'path';
  4 | import os from 'os';
  5 | import {
  6 |   // Pure utility functions
  7 |   formatSize,
  8 |   normalizeLineEndings,
  9 |   createUnifiedDiff,
 10 |   // Security & validation functions
 11 |   validatePath,
 12 |   setAllowedDirectories,
 13 |   // File operations
 14 |   getFileStats,
 15 |   readFileContent,
 16 |   writeFileContent,
 17 |   // Search & filtering functions
 18 |   searchFilesWithValidation,
 19 |   // File editing functions
 20 |   applyFileEdits,
 21 |   tailFile,
 22 |   headFile
 23 | } from '../lib.js';
 24 | 
 25 | // Mock fs module
 26 | jest.mock('fs/promises');
 27 | const mockFs = fs as jest.Mocked<typeof fs>;
 28 | 
 29 | describe('Lib Functions', () => {
 30 |   beforeEach(() => {
 31 |     jest.clearAllMocks();
 32 |     // Set up allowed directories for tests
 33 |     const allowedDirs = process.platform === 'win32' ? ['C:\\Users\\test', 'C:\\temp', 'C:\\allowed'] : ['/home/user', '/tmp', '/allowed'];
 34 |     setAllowedDirectories(allowedDirs);
 35 |   });
 36 | 
 37 |   afterEach(() => {
 38 |     jest.restoreAllMocks();
 39 |     // Clear allowed directories after tests
 40 |     setAllowedDirectories([]);
 41 |   });
 42 | 
 43 |   describe('Pure Utility Functions', () => {
 44 |     describe('formatSize', () => {
 45 |       it('formats bytes correctly', () => {
 46 |         expect(formatSize(0)).toBe('0 B');
 47 |         expect(formatSize(512)).toBe('512 B');
 48 |         expect(formatSize(1024)).toBe('1.00 KB');
 49 |         expect(formatSize(1536)).toBe('1.50 KB');
 50 |         expect(formatSize(1048576)).toBe('1.00 MB');
 51 |         expect(formatSize(1073741824)).toBe('1.00 GB');
 52 |         expect(formatSize(1099511627776)).toBe('1.00 TB');
 53 |       });
 54 | 
 55 |       it('handles edge cases', () => {
 56 |         expect(formatSize(1023)).toBe('1023 B');
 57 |         expect(formatSize(1025)).toBe('1.00 KB');
 58 |         expect(formatSize(1048575)).toBe('1024.00 KB');
 59 |       });
 60 | 
 61 |       it('handles very large numbers beyond TB', () => {
 62 |         // The function only supports up to TB, so very large numbers will show as TB
 63 |         expect(formatSize(1024 * 1024 * 1024 * 1024 * 1024)).toBe('1024.00 TB');
 64 |         expect(formatSize(Number.MAX_SAFE_INTEGER)).toContain('TB');
 65 |       });
 66 | 
 67 |       it('handles negative numbers', () => {
 68 |         // Negative numbers will result in NaN for the log calculation
 69 |         expect(formatSize(-1024)).toContain('NaN');
 70 |         expect(formatSize(-0)).toBe('0 B');
 71 |       });
 72 | 
 73 |       it('handles decimal numbers', () => {
 74 |         expect(formatSize(1536.5)).toBe('1.50 KB');
 75 |         expect(formatSize(1023.9)).toBe('1023.9 B');
 76 |       });
 77 | 
 78 |       it('handles very small positive numbers', () => {
 79 |         expect(formatSize(1)).toBe('1 B');
 80 |         expect(formatSize(0.5)).toBe('0.5 B');
 81 |         expect(formatSize(0.1)).toBe('0.1 B');
 82 |       });
 83 |     });
 84 | 
 85 |     describe('normalizeLineEndings', () => {
 86 |       it('converts CRLF to LF', () => {
 87 |         expect(normalizeLineEndings('line1\r\nline2\r\nline3')).toBe('line1\nline2\nline3');
 88 |       });
 89 | 
 90 |       it('leaves LF unchanged', () => {
 91 |         expect(normalizeLineEndings('line1\nline2\nline3')).toBe('line1\nline2\nline3');
 92 |       });
 93 | 
 94 |       it('handles mixed line endings', () => {
 95 |         expect(normalizeLineEndings('line1\r\nline2\nline3\r\n')).toBe('line1\nline2\nline3\n');
 96 |       });
 97 | 
 98 |       it('handles empty string', () => {
 99 |         expect(normalizeLineEndings('')).toBe('');
100 |       });
101 |     });
102 | 
103 |     describe('createUnifiedDiff', () => {
104 |       it('creates diff for simple changes', () => {
105 |         const original = 'line1\nline2\nline3';
106 |         const modified = 'line1\nmodified line2\nline3';
107 |         const diff = createUnifiedDiff(original, modified, 'test.txt');
108 |         
109 |         expect(diff).toContain('--- test.txt');
110 |         expect(diff).toContain('+++ test.txt');
111 |         expect(diff).toContain('-line2');
112 |         expect(diff).toContain('+modified line2');
113 |       });
114 | 
115 |       it('handles CRLF normalization', () => {
116 |         const original = 'line1\r\nline2\r\n';
117 |         const modified = 'line1\nmodified line2\n';
118 |         const diff = createUnifiedDiff(original, modified);
119 |         
120 |         expect(diff).toContain('-line2');
121 |         expect(diff).toContain('+modified line2');
122 |       });
123 | 
124 |       it('handles identical content', () => {
125 |         const content = 'line1\nline2\nline3';
126 |         const diff = createUnifiedDiff(content, content);
127 |         
128 |         // Should not contain any +/- lines for identical content (excluding header lines)
129 |         expect(diff.split('\n').filter((line: string) => line.startsWith('+++') || line.startsWith('---'))).toHaveLength(2);
130 |         expect(diff.split('\n').filter((line: string) => line.startsWith('+') && !line.startsWith('+++'))).toHaveLength(0);
131 |         expect(diff.split('\n').filter((line: string) => line.startsWith('-') && !line.startsWith('---'))).toHaveLength(0);
132 |       });
133 | 
134 |       it('handles empty content', () => {
135 |         const diff = createUnifiedDiff('', '');
136 |         expect(diff).toContain('--- file');
137 |         expect(diff).toContain('+++ file');
138 |       });
139 | 
140 |       it('handles default filename parameter', () => {
141 |         const diff = createUnifiedDiff('old', 'new');
142 |         expect(diff).toContain('--- file');
143 |         expect(diff).toContain('+++ file');
144 |       });
145 | 
146 |       it('handles custom filename', () => {
147 |         const diff = createUnifiedDiff('old', 'new', 'custom.txt');
148 |         expect(diff).toContain('--- custom.txt');
149 |         expect(diff).toContain('+++ custom.txt');
150 |       });
151 |     });
152 |   });
153 | 
154 |   describe('Security & Validation Functions', () => {
155 |     describe('validatePath', () => {
156 |       // Use Windows-compatible paths for testing
157 |       const allowedDirs = process.platform === 'win32' ? ['C:\\Users\\test', 'C:\\temp'] : ['/home/user', '/tmp'];
158 | 
159 |       beforeEach(() => {
160 |         mockFs.realpath.mockImplementation(async (path: any) => path.toString());
161 |       });
162 | 
163 |       it('validates allowed paths', async () => {
164 |         const testPath = process.platform === 'win32' ? 'C:\\Users\\test\\file.txt' : '/home/user/file.txt';
165 |         const result = await validatePath(testPath);
166 |         expect(result).toBe(testPath);
167 |       });
168 | 
169 |       it('rejects disallowed paths', async () => {
170 |         const testPath = process.platform === 'win32' ? 'C:\\Windows\\System32\\file.txt' : '/etc/passwd';
171 |         await expect(validatePath(testPath))
172 |           .rejects.toThrow('Access denied - path outside allowed directories');
173 |       });
174 | 
175 |       it('handles non-existent files by checking parent directory', async () => {
176 |         const newFilePath = process.platform === 'win32' ? 'C:\\Users\\test\\newfile.txt' : '/home/user/newfile.txt';
177 |         const parentPath = process.platform === 'win32' ? 'C:\\Users\\test' : '/home/user';
178 |         
179 |         // Create an error with the ENOENT code that the implementation checks for
180 |         const enoentError = new Error('ENOENT') as NodeJS.ErrnoException;
181 |         enoentError.code = 'ENOENT';
182 |         
183 |         mockFs.realpath
184 |           .mockRejectedValueOnce(enoentError)
185 |           .mockResolvedValueOnce(parentPath);
186 |         
187 |         const result = await validatePath(newFilePath);
188 |         expect(result).toBe(path.resolve(newFilePath));
189 |       });
190 | 
191 |       it('rejects when parent directory does not exist', async () => {
192 |         const newFilePath = process.platform === 'win32' ? 'C:\\Users\\test\\nonexistent\\newfile.txt' : '/home/user/nonexistent/newfile.txt';
193 |         
194 |         // Create errors with the ENOENT code
195 |         const enoentError1 = new Error('ENOENT') as NodeJS.ErrnoException;
196 |         enoentError1.code = 'ENOENT';
197 |         const enoentError2 = new Error('ENOENT') as NodeJS.ErrnoException;
198 |         enoentError2.code = 'ENOENT';
199 |         
200 |         mockFs.realpath
201 |           .mockRejectedValueOnce(enoentError1)
202 |           .mockRejectedValueOnce(enoentError2);
203 |         
204 |         await expect(validatePath(newFilePath))
205 |           .rejects.toThrow('Parent directory does not exist');
206 |       });
207 |     });
208 |   });
209 | 
210 |   describe('File Operations', () => {
211 |     describe('getFileStats', () => {
212 |       it('returns file statistics', async () => {
213 |         const mockStats = {
214 |           size: 1024,
215 |           birthtime: new Date('2023-01-01'),
216 |           mtime: new Date('2023-01-02'),
217 |           atime: new Date('2023-01-03'),
218 |           isDirectory: () => false,
219 |           isFile: () => true,
220 |           mode: 0o644
221 |         };
222 |         
223 |         mockFs.stat.mockResolvedValueOnce(mockStats as any);
224 |         
225 |         const result = await getFileStats('/test/file.txt');
226 |         
227 |         expect(result).toEqual({
228 |           size: 1024,
229 |           created: new Date('2023-01-01'),
230 |           modified: new Date('2023-01-02'),
231 |           accessed: new Date('2023-01-03'),
232 |           isDirectory: false,
233 |           isFile: true,
234 |           permissions: '644'
235 |         });
236 |       });
237 | 
238 |       it('handles directory statistics', async () => {
239 |         const mockStats = {
240 |           size: 4096,
241 |           birthtime: new Date('2023-01-01'),
242 |           mtime: new Date('2023-01-02'),
243 |           atime: new Date('2023-01-03'),
244 |           isDirectory: () => true,
245 |           isFile: () => false,
246 |           mode: 0o755
247 |         };
248 |         
249 |         mockFs.stat.mockResolvedValueOnce(mockStats as any);
250 |         
251 |         const result = await getFileStats('/test/dir');
252 |         
253 |         expect(result.isDirectory).toBe(true);
254 |         expect(result.isFile).toBe(false);
255 |         expect(result.permissions).toBe('755');
256 |       });
257 |     });
258 | 
259 |     describe('readFileContent', () => {
260 |       it('reads file with default encoding', async () => {
261 |         mockFs.readFile.mockResolvedValueOnce('file content');
262 |         
263 |         const result = await readFileContent('/test/file.txt');
264 |         
265 |         expect(result).toBe('file content');
266 |         expect(mockFs.readFile).toHaveBeenCalledWith('/test/file.txt', 'utf-8');
267 |       });
268 | 
269 |       it('reads file with custom encoding', async () => {
270 |         mockFs.readFile.mockResolvedValueOnce('file content');
271 |         
272 |         const result = await readFileContent('/test/file.txt', 'ascii');
273 |         
274 |         expect(result).toBe('file content');
275 |         expect(mockFs.readFile).toHaveBeenCalledWith('/test/file.txt', 'ascii');
276 |       });
277 |     });
278 | 
279 |     describe('writeFileContent', () => {
280 |       it('writes file content', async () => {
281 |         mockFs.writeFile.mockResolvedValueOnce(undefined);
282 |         
283 |         await writeFileContent('/test/file.txt', 'new content');
284 |         
285 |         expect(mockFs.writeFile).toHaveBeenCalledWith('/test/file.txt', 'new content', { encoding: "utf-8", flag: 'wx' });
286 |       });
287 |     });
288 | 
289 |   });
290 | 
291 |   describe('Search & Filtering Functions', () => {
292 |     describe('searchFilesWithValidation', () => {
293 |       beforeEach(() => {
294 |         mockFs.realpath.mockImplementation(async (path: any) => path.toString());
295 |       });
296 | 
297 | 
298 |       it('excludes files matching exclude patterns', async () => {
299 |         const mockEntries = [
300 |           { name: 'test.txt', isDirectory: () => false },
301 |           { name: 'test.log', isDirectory: () => false },
302 |           { name: 'node_modules', isDirectory: () => true }
303 |         ];
304 |         
305 |         mockFs.readdir.mockResolvedValueOnce(mockEntries as any);
306 |         
307 |         const testDir = process.platform === 'win32' ? 'C:\\allowed\\dir' : '/allowed/dir';
308 |         const allowedDirs = process.platform === 'win32' ? ['C:\\allowed'] : ['/allowed'];
309 |         
310 |         // Mock realpath to return the same path for validation to pass
311 |         mockFs.realpath.mockImplementation(async (inputPath: any) => {
312 |           const pathStr = inputPath.toString();
313 |           // Return the path as-is for validation
314 |           return pathStr;
315 |         });
316 |         
317 |         const result = await searchFilesWithValidation(
318 |           testDir,
319 |           '*test*',
320 |           allowedDirs,
321 |           { excludePatterns: ['*.log', 'node_modules'] }
322 |         );
323 |         
324 |         const expectedResult = process.platform === 'win32' ? 'C:\\allowed\\dir\\test.txt' : '/allowed/dir/test.txt';
325 |         expect(result).toEqual([expectedResult]);
326 |       });
327 | 
328 |       it('handles validation errors during search', async () => {
329 |         const mockEntries = [
330 |           { name: 'test.txt', isDirectory: () => false },
331 |           { name: 'invalid_file.txt', isDirectory: () => false }
332 |         ];
333 |         
334 |         mockFs.readdir.mockResolvedValueOnce(mockEntries as any);
335 |         
336 |         // Mock validatePath to throw error for invalid_file.txt
337 |         mockFs.realpath.mockImplementation(async (path: any) => {
338 |           if (path.toString().includes('invalid_file.txt')) {
339 |             throw new Error('Access denied');
340 |           }
341 |           return path.toString();
342 |         });
343 |         
344 |         const testDir = process.platform === 'win32' ? 'C:\\allowed\\dir' : '/allowed/dir';
345 |         const allowedDirs = process.platform === 'win32' ? ['C:\\allowed'] : ['/allowed'];
346 |         
347 |         const result = await searchFilesWithValidation(
348 |           testDir,
349 |           '*test*',
350 |           allowedDirs,
351 |           {}
352 |         );
353 |         
354 |         // Should only return the valid file, skipping the invalid one
355 |         const expectedResult = process.platform === 'win32' ? 'C:\\allowed\\dir\\test.txt' : '/allowed/dir/test.txt';
356 |         expect(result).toEqual([expectedResult]);
357 |       });
358 | 
359 |       it('handles complex exclude patterns with wildcards', async () => {
360 |         const mockEntries = [
361 |           { name: 'test.txt', isDirectory: () => false },
362 |           { name: 'test.backup', isDirectory: () => false },
363 |           { name: 'important_test.js', isDirectory: () => false }
364 |         ];
365 |         
366 |         mockFs.readdir.mockResolvedValueOnce(mockEntries as any);
367 |         
368 |         const testDir = process.platform === 'win32' ? 'C:\\allowed\\dir' : '/allowed/dir';
369 |         const allowedDirs = process.platform === 'win32' ? ['C:\\allowed'] : ['/allowed'];
370 |         
371 |         const result = await searchFilesWithValidation(
372 |           testDir,
373 |           '*test*',
374 |           allowedDirs,
375 |           { excludePatterns: ['*.backup'] }
376 |         );
377 |         
378 |         const expectedResults = process.platform === 'win32' ? [
379 |           'C:\\allowed\\dir\\test.txt',
380 |           'C:\\allowed\\dir\\important_test.js'
381 |         ] : [
382 |           '/allowed/dir/test.txt',
383 |           '/allowed/dir/important_test.js'
384 |         ];
385 |         expect(result).toEqual(expectedResults);
386 |       });
387 |     });
388 |   });
389 | 
390 |   describe('File Editing Functions', () => {
391 |     describe('applyFileEdits', () => {
392 |       beforeEach(() => {
393 |         mockFs.readFile.mockResolvedValue('line1\nline2\nline3\n');
394 |         mockFs.writeFile.mockResolvedValue(undefined);
395 |       });
396 | 
397 |       it('applies simple text replacement', async () => {
398 |         const edits = [
399 |           { oldText: 'line2', newText: 'modified line2' }
400 |         ];
401 |         
402 |         mockFs.rename.mockResolvedValueOnce(undefined);
403 |         
404 |         const result = await applyFileEdits('/test/file.txt', edits, false);
405 |         
406 |         expect(result).toContain('modified line2');
407 |         // Should write to temporary file then rename
408 |         expect(mockFs.writeFile).toHaveBeenCalledWith(
409 |           expect.stringMatching(/\/test\/file\.txt\.[a-f0-9]+\.tmp$/),
410 |           'line1\nmodified line2\nline3\n',
411 |           'utf-8'
412 |         );
413 |         expect(mockFs.rename).toHaveBeenCalledWith(
414 |           expect.stringMatching(/\/test\/file\.txt\.[a-f0-9]+\.tmp$/),
415 |           '/test/file.txt'
416 |         );
417 |       });
418 | 
419 |       it('handles dry run mode', async () => {
420 |         const edits = [
421 |           { oldText: 'line2', newText: 'modified line2' }
422 |         ];
423 |         
424 |         const result = await applyFileEdits('/test/file.txt', edits, true);
425 |         
426 |         expect(result).toContain('modified line2');
427 |         expect(mockFs.writeFile).not.toHaveBeenCalled();
428 |       });
429 | 
430 |       it('applies multiple edits sequentially', async () => {
431 |         const edits = [
432 |           { oldText: 'line1', newText: 'first line' },
433 |           { oldText: 'line3', newText: 'third line' }
434 |         ];
435 |         
436 |         mockFs.rename.mockResolvedValueOnce(undefined);
437 |         
438 |         await applyFileEdits('/test/file.txt', edits, false);
439 |         
440 |         expect(mockFs.writeFile).toHaveBeenCalledWith(
441 |           expect.stringMatching(/\/test\/file\.txt\.[a-f0-9]+\.tmp$/),
442 |           'first line\nline2\nthird line\n',
443 |           'utf-8'
444 |         );
445 |         expect(mockFs.rename).toHaveBeenCalledWith(
446 |           expect.stringMatching(/\/test\/file\.txt\.[a-f0-9]+\.tmp$/),
447 |           '/test/file.txt'
448 |         );
449 |       });
450 | 
451 |       it('handles whitespace-flexible matching', async () => {
452 |         mockFs.readFile.mockResolvedValue('  line1\n    line2\n  line3\n');
453 |         
454 |         const edits = [
455 |           { oldText: 'line2', newText: 'modified line2' }
456 |         ];
457 |         
458 |         mockFs.rename.mockResolvedValueOnce(undefined);
459 |         
460 |         await applyFileEdits('/test/file.txt', edits, false);
461 |         
462 |         expect(mockFs.writeFile).toHaveBeenCalledWith(
463 |           expect.stringMatching(/\/test\/file\.txt\.[a-f0-9]+\.tmp$/),
464 |           '  line1\n    modified line2\n  line3\n',
465 |           'utf-8'
466 |         );
467 |         expect(mockFs.rename).toHaveBeenCalledWith(
468 |           expect.stringMatching(/\/test\/file\.txt\.[a-f0-9]+\.tmp$/),
469 |           '/test/file.txt'
470 |         );
471 |       });
472 | 
473 |       it('throws error for non-matching edits', async () => {
474 |         const edits = [
475 |           { oldText: 'nonexistent line', newText: 'replacement' }
476 |         ];
477 |         
478 |         await expect(applyFileEdits('/test/file.txt', edits, false))
479 |           .rejects.toThrow('Could not find exact match for edit');
480 |       });
481 | 
482 |       it('handles complex multi-line edits with indentation', async () => {
483 |         mockFs.readFile.mockResolvedValue('function test() {\n  console.log("hello");\n  return true;\n}');
484 |         
485 |         const edits = [
486 |           { 
487 |             oldText: '  console.log("hello");\n  return true;', 
488 |             newText: '  console.log("world");\n  console.log("test");\n  return false;' 
489 |           }
490 |         ];
491 |         
492 |         mockFs.rename.mockResolvedValueOnce(undefined);
493 |         
494 |         await applyFileEdits('/test/file.js', edits, false);
495 |         
496 |         expect(mockFs.writeFile).toHaveBeenCalledWith(
497 |           expect.stringMatching(/\/test\/file\.js\.[a-f0-9]+\.tmp$/),
498 |           'function test() {\n  console.log("world");\n  console.log("test");\n  return false;\n}',
499 |           'utf-8'
500 |         );
501 |         expect(mockFs.rename).toHaveBeenCalledWith(
502 |           expect.stringMatching(/\/test\/file\.js\.[a-f0-9]+\.tmp$/),
503 |           '/test/file.js'
504 |         );
505 |       });
506 | 
507 |       it('handles edits with different indentation patterns', async () => {
508 |         mockFs.readFile.mockResolvedValue('    if (condition) {\n        doSomething();\n    }');
509 |         
510 |         const edits = [
511 |           { 
512 |             oldText: 'doSomething();', 
513 |             newText: 'doSomethingElse();\n        doAnotherThing();' 
514 |           }
515 |         ];
516 |         
517 |         mockFs.rename.mockResolvedValueOnce(undefined);
518 |         
519 |         await applyFileEdits('/test/file.js', edits, false);
520 |         
521 |         expect(mockFs.writeFile).toHaveBeenCalledWith(
522 |           expect.stringMatching(/\/test\/file\.js\.[a-f0-9]+\.tmp$/),
523 |           '    if (condition) {\n        doSomethingElse();\n        doAnotherThing();\n    }',
524 |           'utf-8'
525 |         );
526 |         expect(mockFs.rename).toHaveBeenCalledWith(
527 |           expect.stringMatching(/\/test\/file\.js\.[a-f0-9]+\.tmp$/),
528 |           '/test/file.js'
529 |         );
530 |       });
531 | 
532 |       it('handles CRLF line endings in file content', async () => {
533 |         mockFs.readFile.mockResolvedValue('line1\r\nline2\r\nline3\r\n');
534 |         
535 |         const edits = [
536 |           { oldText: 'line2', newText: 'modified line2' }
537 |         ];
538 |         
539 |         mockFs.rename.mockResolvedValueOnce(undefined);
540 |         
541 |         await applyFileEdits('/test/file.txt', edits, false);
542 |         
543 |         expect(mockFs.writeFile).toHaveBeenCalledWith(
544 |           expect.stringMatching(/\/test\/file\.txt\.[a-f0-9]+\.tmp$/),
545 |           'line1\nmodified line2\nline3\n',
546 |           'utf-8'
547 |         );
548 |         expect(mockFs.rename).toHaveBeenCalledWith(
549 |           expect.stringMatching(/\/test\/file\.txt\.[a-f0-9]+\.tmp$/),
550 |           '/test/file.txt'
551 |         );
552 |       });
553 |     });
554 | 
555 |     describe('tailFile', () => {
556 |       it('handles empty files', async () => {
557 |         mockFs.stat.mockResolvedValue({ size: 0 } as any);
558 |         
559 |         const result = await tailFile('/test/empty.txt', 5);
560 |         
561 |         expect(result).toBe('');
562 |         expect(mockFs.open).not.toHaveBeenCalled();
563 |       });
564 | 
565 |       it('calls stat to check file size', async () => {
566 |         mockFs.stat.mockResolvedValue({ size: 100 } as any);
567 |         
568 |         // Mock file handle with proper typing
569 |         const mockFileHandle = {
570 |           read: jest.fn(),
571 |           close: jest.fn()
572 |         } as any;
573 |         
574 |         mockFileHandle.read.mockResolvedValue({ bytesRead: 0 });
575 |         mockFileHandle.close.mockResolvedValue(undefined);
576 |         
577 |         mockFs.open.mockResolvedValue(mockFileHandle);
578 |         
579 |         await tailFile('/test/file.txt', 2);
580 |         
581 |         expect(mockFs.stat).toHaveBeenCalledWith('/test/file.txt');
582 |         expect(mockFs.open).toHaveBeenCalledWith('/test/file.txt', 'r');
583 |       });
584 | 
585 |       it('handles files with content and returns last lines', async () => {
586 |         mockFs.stat.mockResolvedValue({ size: 50 } as any);
587 |         
588 |         const mockFileHandle = {
589 |           read: jest.fn(),
590 |           close: jest.fn()
591 |         } as any;
592 |         
593 |         // Simulate reading file content in chunks
594 |         mockFileHandle.read
595 |           .mockResolvedValueOnce({ bytesRead: 20, buffer: Buffer.from('line3\nline4\nline5\n') })
596 |           .mockResolvedValueOnce({ bytesRead: 0 });
597 |         mockFileHandle.close.mockResolvedValue(undefined);
598 |         
599 |         mockFs.open.mockResolvedValue(mockFileHandle);
600 |         
601 |         const result = await tailFile('/test/file.txt', 2);
602 |         
603 |         expect(mockFileHandle.close).toHaveBeenCalled();
604 |       });
605 | 
606 |       it('handles read errors gracefully', async () => {
607 |         mockFs.stat.mockResolvedValue({ size: 100 } as any);
608 |         
609 |         const mockFileHandle = {
610 |           read: jest.fn(),
611 |           close: jest.fn()
612 |         } as any;
613 |         
614 |         mockFileHandle.read.mockResolvedValue({ bytesRead: 0 });
615 |         mockFileHandle.close.mockResolvedValue(undefined);
616 |         
617 |         mockFs.open.mockResolvedValue(mockFileHandle);
618 |         
619 |         await tailFile('/test/file.txt', 5);
620 |         
621 |         expect(mockFileHandle.close).toHaveBeenCalled();
622 |       });
623 |     });
624 | 
625 |     describe('headFile', () => {
626 |       it('opens file for reading', async () => {
627 |         // Mock file handle with proper typing
628 |         const mockFileHandle = {
629 |           read: jest.fn(),
630 |           close: jest.fn()
631 |         } as any;
632 |         
633 |         mockFileHandle.read.mockResolvedValue({ bytesRead: 0 });
634 |         mockFileHandle.close.mockResolvedValue(undefined);
635 |         
636 |         mockFs.open.mockResolvedValue(mockFileHandle);
637 |         
638 |         await headFile('/test/file.txt', 2);
639 |         
640 |         expect(mockFs.open).toHaveBeenCalledWith('/test/file.txt', 'r');
641 |       });
642 | 
643 |       it('handles files with content and returns first lines', async () => {
644 |         const mockFileHandle = {
645 |           read: jest.fn(),
646 |           close: jest.fn()
647 |         } as any;
648 |         
649 |         // Simulate reading file content with newlines
650 |         mockFileHandle.read
651 |           .mockResolvedValueOnce({ bytesRead: 20, buffer: Buffer.from('line1\nline2\nline3\n') })
652 |           .mockResolvedValueOnce({ bytesRead: 0 });
653 |         mockFileHandle.close.mockResolvedValue(undefined);
654 |         
655 |         mockFs.open.mockResolvedValue(mockFileHandle);
656 |         
657 |         const result = await headFile('/test/file.txt', 2);
658 |         
659 |         expect(mockFileHandle.close).toHaveBeenCalled();
660 |       });
661 | 
662 |       it('handles files with leftover content', async () => {
663 |         const mockFileHandle = {
664 |           read: jest.fn(),
665 |           close: jest.fn()
666 |         } as any;
667 |         
668 |         // Simulate reading file content without final newline
669 |         mockFileHandle.read
670 |           .mockResolvedValueOnce({ bytesRead: 15, buffer: Buffer.from('line1\nline2\nend') })
671 |           .mockResolvedValueOnce({ bytesRead: 0 });
672 |         mockFileHandle.close.mockResolvedValue(undefined);
673 |         
674 |         mockFs.open.mockResolvedValue(mockFileHandle);
675 |         
676 |         const result = await headFile('/test/file.txt', 5);
677 |         
678 |         expect(mockFileHandle.close).toHaveBeenCalled();
679 |       });
680 | 
681 |       it('handles reaching requested line count', async () => {
682 |         const mockFileHandle = {
683 |           read: jest.fn(),
684 |           close: jest.fn()
685 |         } as any;
686 |         
687 |         // Simulate reading exactly the requested number of lines
688 |         mockFileHandle.read
689 |           .mockResolvedValueOnce({ bytesRead: 12, buffer: Buffer.from('line1\nline2\n') })
690 |           .mockResolvedValueOnce({ bytesRead: 0 });
691 |         mockFileHandle.close.mockResolvedValue(undefined);
692 |         
693 |         mockFs.open.mockResolvedValue(mockFileHandle);
694 |         
695 |         const result = await headFile('/test/file.txt', 2);
696 |         
697 |         expect(mockFileHandle.close).toHaveBeenCalled();
698 |       });
699 |     });
700 |   });
701 | });
702 | 
```

--------------------------------------------------------------------------------
/src/filesystem/index.ts:
--------------------------------------------------------------------------------

```typescript
  1 | #!/usr/bin/env node
  2 | 
  3 | import { Server } from "@modelcontextprotocol/sdk/server/index.js";
  4 | import { StdioServerTransport } from "@modelcontextprotocol/sdk/server/stdio.js";
  5 | import {
  6 |   CallToolRequestSchema,
  7 |   ListToolsRequestSchema,
  8 |   ToolSchema,
  9 |   RootsListChangedNotificationSchema,
 10 |   type Root,
 11 | } from "@modelcontextprotocol/sdk/types.js";
 12 | import fs from "fs/promises";
 13 | import { createReadStream } from "fs";
 14 | import path from "path";
 15 | import { z } from "zod";
 16 | import { zodToJsonSchema } from "zod-to-json-schema";
 17 | import { minimatch } from "minimatch";
 18 | import { normalizePath, expandHome } from './path-utils.js';
 19 | import { getValidRootDirectories } from './roots-utils.js';
 20 | import {
 21 |   // Function imports
 22 |   formatSize,
 23 |   validatePath,
 24 |   getFileStats,
 25 |   readFileContent,
 26 |   writeFileContent,
 27 |   searchFilesWithValidation,
 28 |   applyFileEdits,
 29 |   tailFile,
 30 |   headFile,
 31 |   setAllowedDirectories,
 32 | } from './lib.js';
 33 | 
 34 | // Command line argument parsing
 35 | const args = process.argv.slice(2);
 36 | if (args.length === 0) {
 37 |   console.error("Usage: mcp-server-filesystem [allowed-directory] [additional-directories...]");
 38 |   console.error("Note: Allowed directories can be provided via:");
 39 |   console.error("  1. Command-line arguments (shown above)");
 40 |   console.error("  2. MCP roots protocol (if client supports it)");
 41 |   console.error("At least one directory must be provided by EITHER method for the server to operate.");
 42 | }
 43 | 
 44 | // Store allowed directories in normalized and resolved form
 45 | let allowedDirectories = await Promise.all(
 46 |   args.map(async (dir) => {
 47 |     const expanded = expandHome(dir);
 48 |     const absolute = path.resolve(expanded);
 49 |     try {
 50 |       // Security: Resolve symlinks in allowed directories during startup
 51 |       // This ensures we know the real paths and can validate against them later
 52 |       const resolved = await fs.realpath(absolute);
 53 |       return normalizePath(resolved);
 54 |     } catch (error) {
 55 |       // If we can't resolve (doesn't exist), use the normalized absolute path
 56 |       // This allows configuring allowed dirs that will be created later
 57 |       return normalizePath(absolute);
 58 |     }
 59 |   })
 60 | );
 61 | 
 62 | // Validate that all directories exist and are accessible
 63 | await Promise.all(allowedDirectories.map(async (dir) => {
 64 |   try {
 65 |     const stats = await fs.stat(dir);
 66 |     if (!stats.isDirectory()) {
 67 |       console.error(`Error: ${dir} is not a directory`);
 68 |       process.exit(1);
 69 |     }
 70 |   } catch (error) {
 71 |     console.error(`Error accessing directory ${dir}:`, error);
 72 |     process.exit(1);
 73 |   }
 74 | }));
 75 | 
 76 | // Initialize the global allowedDirectories in lib.ts
 77 | setAllowedDirectories(allowedDirectories);
 78 | 
 79 | // Schema definitions
 80 | const ReadTextFileArgsSchema = z.object({
 81 |   path: z.string(),
 82 |   tail: z.number().optional().describe('If provided, returns only the last N lines of the file'),
 83 |   head: z.number().optional().describe('If provided, returns only the first N lines of the file')
 84 | });
 85 | 
 86 | const ReadMediaFileArgsSchema = z.object({
 87 |   path: z.string()
 88 | });
 89 | 
 90 | const ReadMultipleFilesArgsSchema = z.object({
 91 |   paths: z
 92 |     .array(z.string())
 93 |     .min(1, "At least one file path must be provided")
 94 |     .describe("Array of file paths to read. Each path must be a string pointing to a valid file within allowed directories."),
 95 | });
 96 | 
 97 | const WriteFileArgsSchema = z.object({
 98 |   path: z.string(),
 99 |   content: z.string(),
100 | });
101 | 
102 | const EditOperation = z.object({
103 |   oldText: z.string().describe('Text to search for - must match exactly'),
104 |   newText: z.string().describe('Text to replace with')
105 | });
106 | 
107 | const EditFileArgsSchema = z.object({
108 |   path: z.string(),
109 |   edits: z.array(EditOperation),
110 |   dryRun: z.boolean().default(false).describe('Preview changes using git-style diff format')
111 | });
112 | 
113 | const CreateDirectoryArgsSchema = z.object({
114 |   path: z.string(),
115 | });
116 | 
117 | const ListDirectoryArgsSchema = z.object({
118 |   path: z.string(),
119 | });
120 | 
121 | const ListDirectoryWithSizesArgsSchema = z.object({
122 |   path: z.string(),
123 |   sortBy: z.enum(['name', 'size']).optional().default('name').describe('Sort entries by name or size'),
124 | });
125 | 
126 | const DirectoryTreeArgsSchema = z.object({
127 |   path: z.string(),
128 |   excludePatterns: z.array(z.string()).optional().default([])
129 | });
130 | 
131 | const MoveFileArgsSchema = z.object({
132 |   source: z.string(),
133 |   destination: z.string(),
134 | });
135 | 
136 | const SearchFilesArgsSchema = z.object({
137 |   path: z.string(),
138 |   pattern: z.string(),
139 |   excludePatterns: z.array(z.string()).optional().default([])
140 | });
141 | 
142 | const GetFileInfoArgsSchema = z.object({
143 |   path: z.string(),
144 | });
145 | 
146 | const ToolInputSchema = ToolSchema.shape.inputSchema;
147 | type ToolInput = z.infer<typeof ToolInputSchema>;
148 | 
149 | // Server setup
150 | const server = new Server(
151 |   {
152 |     name: "secure-filesystem-server",
153 |     version: "0.2.0",
154 |   },
155 |   {
156 |     capabilities: {
157 |       tools: {},
158 |     },
159 |   },
160 | );
161 | 
162 | // Reads a file as a stream of buffers, concatenates them, and then encodes
163 | // the result to a Base64 string. This is a memory-efficient way to handle
164 | // binary data from a stream before the final encoding.
165 | async function readFileAsBase64Stream(filePath: string): Promise<string> {
166 |   return new Promise((resolve, reject) => {
167 |     const stream = createReadStream(filePath);
168 |     const chunks: Buffer[] = [];
169 |     stream.on('data', (chunk) => {
170 |       chunks.push(chunk as Buffer);
171 |     });
172 |     stream.on('end', () => {
173 |       const finalBuffer = Buffer.concat(chunks);
174 |       resolve(finalBuffer.toString('base64'));
175 |     });
176 |     stream.on('error', (err) => reject(err));
177 |   });
178 | }
179 | 
180 | // Tool handlers
181 | server.setRequestHandler(ListToolsRequestSchema, async () => {
182 |   return {
183 |     tools: [
184 |       {
185 |         name: "read_file",
186 |         description: "Read the complete contents of a file as text. DEPRECATED: Use read_text_file instead.",
187 |         inputSchema: zodToJsonSchema(ReadTextFileArgsSchema) as ToolInput,
188 |       },
189 |       {
190 |         name: "read_text_file",
191 |         description:
192 |           "Read the complete contents of a file from the file system as text. " +
193 |           "Handles various text encodings and provides detailed error messages " +
194 |           "if the file cannot be read. Use this tool when you need to examine " +
195 |           "the contents of a single file. Use the 'head' parameter to read only " +
196 |           "the first N lines of a file, or the 'tail' parameter to read only " +
197 |           "the last N lines of a file. Operates on the file as text regardless of extension. " +
198 |           "Only works within allowed directories.",
199 |         inputSchema: zodToJsonSchema(ReadTextFileArgsSchema) as ToolInput,
200 |       },
201 |       {
202 |         name: "read_media_file",
203 |         description:
204 |           "Read an image or audio file. Returns the base64 encoded data and MIME type. " +
205 |           "Only works within allowed directories.",
206 |         inputSchema: zodToJsonSchema(ReadMediaFileArgsSchema) as ToolInput,
207 |       },
208 |       {
209 |         name: "read_multiple_files",
210 |         description:
211 |           "Read the contents of multiple files simultaneously. This is more " +
212 |           "efficient than reading files one by one when you need to analyze " +
213 |           "or compare multiple files. Each file's content is returned with its " +
214 |           "path as a reference. Failed reads for individual files won't stop " +
215 |           "the entire operation. Only works within allowed directories.",
216 |         inputSchema: zodToJsonSchema(ReadMultipleFilesArgsSchema) as ToolInput,
217 |       },
218 |       {
219 |         name: "write_file",
220 |         description:
221 |           "Create a new file or completely overwrite an existing file with new content. " +
222 |           "Use with caution as it will overwrite existing files without warning. " +
223 |           "Handles text content with proper encoding. Only works within allowed directories.",
224 |         inputSchema: zodToJsonSchema(WriteFileArgsSchema) as ToolInput,
225 |       },
226 |       {
227 |         name: "edit_file",
228 |         description:
229 |           "Make line-based edits to a text file. Each edit replaces exact line sequences " +
230 |           "with new content. Returns a git-style diff showing the changes made. " +
231 |           "Only works within allowed directories.",
232 |         inputSchema: zodToJsonSchema(EditFileArgsSchema) as ToolInput,
233 |       },
234 |       {
235 |         name: "create_directory",
236 |         description:
237 |           "Create a new directory or ensure a directory exists. Can create multiple " +
238 |           "nested directories in one operation. If the directory already exists, " +
239 |           "this operation will succeed silently. Perfect for setting up directory " +
240 |           "structures for projects or ensuring required paths exist. Only works within allowed directories.",
241 |         inputSchema: zodToJsonSchema(CreateDirectoryArgsSchema) as ToolInput,
242 |       },
243 |       {
244 |         name: "list_directory",
245 |         description:
246 |           "Get a detailed listing of all files and directories in a specified path. " +
247 |           "Results clearly distinguish between files and directories with [FILE] and [DIR] " +
248 |           "prefixes. This tool is essential for understanding directory structure and " +
249 |           "finding specific files within a directory. Only works within allowed directories.",
250 |         inputSchema: zodToJsonSchema(ListDirectoryArgsSchema) as ToolInput,
251 |       },
252 |       {
253 |         name: "list_directory_with_sizes",
254 |         description:
255 |           "Get a detailed listing of all files and directories in a specified path, including sizes. " +
256 |           "Results clearly distinguish between files and directories with [FILE] and [DIR] " +
257 |           "prefixes. This tool is useful for understanding directory structure and " +
258 |           "finding specific files within a directory. Only works within allowed directories.",
259 |         inputSchema: zodToJsonSchema(ListDirectoryWithSizesArgsSchema) as ToolInput,
260 |       },
261 |       {
262 |         name: "directory_tree",
263 |         description:
264 |             "Get a recursive tree view of files and directories as a JSON structure. " +
265 |             "Each entry includes 'name', 'type' (file/directory), and 'children' for directories. " +
266 |             "Files have no children array, while directories always have a children array (which may be empty). " +
267 |             "The output is formatted with 2-space indentation for readability. Only works within allowed directories.",
268 |         inputSchema: zodToJsonSchema(DirectoryTreeArgsSchema) as ToolInput,
269 |       },
270 |       {
271 |         name: "move_file",
272 |         description:
273 |           "Move or rename files and directories. Can move files between directories " +
274 |           "and rename them in a single operation. If the destination exists, the " +
275 |           "operation will fail. Works across different directories and can be used " +
276 |           "for simple renaming within the same directory. Both source and destination must be within allowed directories.",
277 |         inputSchema: zodToJsonSchema(MoveFileArgsSchema) as ToolInput,
278 |       },
279 |       {
280 |         name: "search_files",
281 |         description:
282 |           "Recursively search for files and directories matching a pattern. " +
283 |           "The patterns should be glob-style patterns that match paths relative to the working directory. " +
284 |           "Use pattern like '*.ext' to match files in current directory, and '**/*.ext' to match files in all subdirectories. " +
285 |           "Returns full paths to all matching items. Great for finding files when you don't know their exact location. " +
286 |           "Only searches within allowed directories.",
287 |         inputSchema: zodToJsonSchema(SearchFilesArgsSchema) as ToolInput,
288 |       },
289 |       {
290 |         name: "get_file_info",
291 |         description:
292 |           "Retrieve detailed metadata about a file or directory. Returns comprehensive " +
293 |           "information including size, creation time, last modified time, permissions, " +
294 |           "and type. This tool is perfect for understanding file characteristics " +
295 |           "without reading the actual content. Only works within allowed directories.",
296 |         inputSchema: zodToJsonSchema(GetFileInfoArgsSchema) as ToolInput,
297 |       },
298 |       {
299 |         name: "list_allowed_directories",
300 |         description:
301 |           "Returns the list of directories that this server is allowed to access. " +
302 |           "Subdirectories within these allowed directories are also accessible. " +
303 |           "Use this to understand which directories and their nested paths are available " +
304 |           "before trying to access files.",
305 |         inputSchema: {
306 |           type: "object",
307 |           properties: {},
308 |           required: [],
309 |         },
310 |       },
311 |     ],
312 |   };
313 | });
314 | 
315 | 
316 | server.setRequestHandler(CallToolRequestSchema, async (request) => {
317 |   try {
318 |     const { name, arguments: args } = request.params;
319 | 
320 |     switch (name) {
321 |       case "read_file":
322 |       case "read_text_file": {
323 |         const parsed = ReadTextFileArgsSchema.safeParse(args);
324 |         if (!parsed.success) {
325 |           throw new Error(`Invalid arguments for read_text_file: ${parsed.error}`);
326 |         }
327 |         const validPath = await validatePath(parsed.data.path);
328 | 
329 |         if (parsed.data.head && parsed.data.tail) {
330 |           throw new Error("Cannot specify both head and tail parameters simultaneously");
331 |         }
332 | 
333 |         if (parsed.data.tail) {
334 |           // Use memory-efficient tail implementation for large files
335 |           const tailContent = await tailFile(validPath, parsed.data.tail);
336 |           return {
337 |             content: [{ type: "text", text: tailContent }],
338 |           };
339 |         }
340 | 
341 |         if (parsed.data.head) {
342 |           // Use memory-efficient head implementation for large files
343 |           const headContent = await headFile(validPath, parsed.data.head);
344 |           return {
345 |             content: [{ type: "text", text: headContent }],
346 |           };
347 |         }
348 |         const content = await readFileContent(validPath);
349 |         return {
350 |           content: [{ type: "text", text: content }],
351 |         };
352 |       }
353 | 
354 |       case "read_media_file": {
355 |         const parsed = ReadMediaFileArgsSchema.safeParse(args);
356 |         if (!parsed.success) {
357 |           throw new Error(`Invalid arguments for read_media_file: ${parsed.error}`);
358 |         }
359 |         const validPath = await validatePath(parsed.data.path);
360 |         const extension = path.extname(validPath).toLowerCase();
361 |         const mimeTypes: Record<string, string> = {
362 |           ".png": "image/png",
363 |           ".jpg": "image/jpeg",
364 |           ".jpeg": "image/jpeg",
365 |           ".gif": "image/gif",
366 |           ".webp": "image/webp",
367 |           ".bmp": "image/bmp",
368 |           ".svg": "image/svg+xml",
369 |           ".mp3": "audio/mpeg",
370 |           ".wav": "audio/wav",
371 |           ".ogg": "audio/ogg",
372 |           ".flac": "audio/flac",
373 |         };
374 |         const mimeType = mimeTypes[extension] || "application/octet-stream";
375 |         const data = await readFileAsBase64Stream(validPath);
376 |         const type = mimeType.startsWith("image/")
377 |           ? "image"
378 |           : mimeType.startsWith("audio/")
379 |             ? "audio"
380 |             : "blob";
381 |         return {
382 |           content: [{ type, data, mimeType }],
383 |         };
384 |       }
385 | 
386 |       case "read_multiple_files": {
387 |         const parsed = ReadMultipleFilesArgsSchema.safeParse(args);
388 |         if (!parsed.success) {
389 |           throw new Error(`Invalid arguments for read_multiple_files: ${parsed.error}`);
390 |         }
391 |         const results = await Promise.all(
392 |           parsed.data.paths.map(async (filePath: string) => {
393 |             try {
394 |               const validPath = await validatePath(filePath);
395 |               const content = await readFileContent(validPath);
396 |               return `${filePath}:\n${content}\n`;
397 |             } catch (error) {
398 |               const errorMessage = error instanceof Error ? error.message : String(error);
399 |               return `${filePath}: Error - ${errorMessage}`;
400 |             }
401 |           }),
402 |         );
403 |         return {
404 |           content: [{ type: "text", text: results.join("\n---\n") }],
405 |         };
406 |       }
407 | 
408 |       case "write_file": {
409 |         const parsed = WriteFileArgsSchema.safeParse(args);
410 |         if (!parsed.success) {
411 |           throw new Error(`Invalid arguments for write_file: ${parsed.error}`);
412 |         }
413 |         const validPath = await validatePath(parsed.data.path);
414 |         await writeFileContent(validPath, parsed.data.content);
415 |         return {
416 |           content: [{ type: "text", text: `Successfully wrote to ${parsed.data.path}` }],
417 |         };
418 |       }
419 | 
420 |       case "edit_file": {
421 |         const parsed = EditFileArgsSchema.safeParse(args);
422 |         if (!parsed.success) {
423 |           throw new Error(`Invalid arguments for edit_file: ${parsed.error}`);
424 |         }
425 |         const validPath = await validatePath(parsed.data.path);
426 |         const result = await applyFileEdits(validPath, parsed.data.edits, parsed.data.dryRun);
427 |         return {
428 |           content: [{ type: "text", text: result }],
429 |         };
430 |       }
431 | 
432 |       case "create_directory": {
433 |         const parsed = CreateDirectoryArgsSchema.safeParse(args);
434 |         if (!parsed.success) {
435 |           throw new Error(`Invalid arguments for create_directory: ${parsed.error}`);
436 |         }
437 |         const validPath = await validatePath(parsed.data.path);
438 |         await fs.mkdir(validPath, { recursive: true });
439 |         return {
440 |           content: [{ type: "text", text: `Successfully created directory ${parsed.data.path}` }],
441 |         };
442 |       }
443 | 
444 |       case "list_directory": {
445 |         const parsed = ListDirectoryArgsSchema.safeParse(args);
446 |         if (!parsed.success) {
447 |           throw new Error(`Invalid arguments for list_directory: ${parsed.error}`);
448 |         }
449 |         const validPath = await validatePath(parsed.data.path);
450 |         const entries = await fs.readdir(validPath, { withFileTypes: true });
451 |         const formatted = entries
452 |           .map((entry) => `${entry.isDirectory() ? "[DIR]" : "[FILE]"} ${entry.name}`)
453 |           .join("\n");
454 |         return {
455 |           content: [{ type: "text", text: formatted }],
456 |         };
457 |       }
458 | 
459 |       case "list_directory_with_sizes": {
460 |         const parsed = ListDirectoryWithSizesArgsSchema.safeParse(args);
461 |         if (!parsed.success) {
462 |           throw new Error(`Invalid arguments for list_directory_with_sizes: ${parsed.error}`);
463 |         }
464 |         const validPath = await validatePath(parsed.data.path);
465 |         const entries = await fs.readdir(validPath, { withFileTypes: true });
466 | 
467 |         // Get detailed information for each entry
468 |         const detailedEntries = await Promise.all(
469 |           entries.map(async (entry) => {
470 |             const entryPath = path.join(validPath, entry.name);
471 |             try {
472 |               const stats = await fs.stat(entryPath);
473 |               return {
474 |                 name: entry.name,
475 |                 isDirectory: entry.isDirectory(),
476 |                 size: stats.size,
477 |                 mtime: stats.mtime
478 |               };
479 |             } catch (error) {
480 |               return {
481 |                 name: entry.name,
482 |                 isDirectory: entry.isDirectory(),
483 |                 size: 0,
484 |                 mtime: new Date(0)
485 |               };
486 |             }
487 |           })
488 |         );
489 | 
490 |         // Sort entries based on sortBy parameter
491 |         const sortedEntries = [...detailedEntries].sort((a, b) => {
492 |           if (parsed.data.sortBy === 'size') {
493 |             return b.size - a.size; // Descending by size
494 |           }
495 |           // Default sort by name
496 |           return a.name.localeCompare(b.name);
497 |         });
498 | 
499 |         // Format the output
500 |         const formattedEntries = sortedEntries.map(entry =>
501 |           `${entry.isDirectory ? "[DIR]" : "[FILE]"} ${entry.name.padEnd(30)} ${
502 |             entry.isDirectory ? "" : formatSize(entry.size).padStart(10)
503 |           }`
504 |         );
505 | 
506 |         // Add summary
507 |         const totalFiles = detailedEntries.filter(e => !e.isDirectory).length;
508 |         const totalDirs = detailedEntries.filter(e => e.isDirectory).length;
509 |         const totalSize = detailedEntries.reduce((sum, entry) => sum + (entry.isDirectory ? 0 : entry.size), 0);
510 | 
511 |         const summary = [
512 |           "",
513 |           `Total: ${totalFiles} files, ${totalDirs} directories`,
514 |           `Combined size: ${formatSize(totalSize)}`
515 |         ];
516 | 
517 |         return {
518 |           content: [{
519 |             type: "text",
520 |             text: [...formattedEntries, ...summary].join("\n")
521 |           }],
522 |         };
523 |       }
524 | 
525 |       case "directory_tree": {
526 |         const parsed = DirectoryTreeArgsSchema.safeParse(args);
527 |         if (!parsed.success) {
528 |           throw new Error(`Invalid arguments for directory_tree: ${parsed.error}`);
529 |         }
530 | 
531 |         interface TreeEntry {
532 |             name: string;
533 |             type: 'file' | 'directory';
534 |             children?: TreeEntry[];
535 |         }
536 |         const rootPath = parsed.data.path;
537 | 
538 |         async function buildTree(currentPath: string, excludePatterns: string[] = []): Promise<TreeEntry[]> {
539 |             const validPath = await validatePath(currentPath);
540 |             const entries = await fs.readdir(validPath, {withFileTypes: true});
541 |             const result: TreeEntry[] = [];
542 | 
543 |             for (const entry of entries) {
544 |                 const relativePath = path.relative(rootPath, path.join(currentPath, entry.name));
545 |                 const shouldExclude = excludePatterns.some(pattern => {
546 |                     if (pattern.includes('*')) {
547 |                         return minimatch(relativePath, pattern, {dot: true});
548 |                     }
549 |                     // For files: match exact name or as part of path
550 |                     // For directories: match as directory path
551 |                     return minimatch(relativePath, pattern, {dot: true}) ||
552 |                            minimatch(relativePath, `**/${pattern}`, {dot: true}) ||
553 |                            minimatch(relativePath, `**/${pattern}/**`, {dot: true});
554 |                 });
555 |                 if (shouldExclude)
556 |                     continue;
557 | 
558 |                 const entryData: TreeEntry = {
559 |                     name: entry.name,
560 |                     type: entry.isDirectory() ? 'directory' : 'file'
561 |                 };
562 | 
563 |                 if (entry.isDirectory()) {
564 |                     const subPath = path.join(currentPath, entry.name);
565 |                     entryData.children = await buildTree(subPath, excludePatterns);
566 |                 }
567 | 
568 |                 result.push(entryData);
569 |             }
570 | 
571 |             return result;
572 |         }
573 | 
574 |         const treeData = await buildTree(rootPath, parsed.data.excludePatterns);
575 |         return {
576 |             content: [{
577 |                 type: "text",
578 |                 text: JSON.stringify(treeData, null, 2)
579 |             }],
580 |         };
581 |       }
582 | 
583 |       case "move_file": {
584 |         const parsed = MoveFileArgsSchema.safeParse(args);
585 |         if (!parsed.success) {
586 |           throw new Error(`Invalid arguments for move_file: ${parsed.error}`);
587 |         }
588 |         const validSourcePath = await validatePath(parsed.data.source);
589 |         const validDestPath = await validatePath(parsed.data.destination);
590 |         await fs.rename(validSourcePath, validDestPath);
591 |         return {
592 |           content: [{ type: "text", text: `Successfully moved ${parsed.data.source} to ${parsed.data.destination}` }],
593 |         };
594 |       }
595 | 
596 |       case "search_files": {
597 |         const parsed = SearchFilesArgsSchema.safeParse(args);
598 |         if (!parsed.success) {
599 |           throw new Error(`Invalid arguments for search_files: ${parsed.error}`);
600 |         }
601 |         const validPath = await validatePath(parsed.data.path);
602 |         const results = await searchFilesWithValidation(validPath, parsed.data.pattern, allowedDirectories, { excludePatterns: parsed.data.excludePatterns });
603 |         return {
604 |           content: [{ type: "text", text: results.length > 0 ? results.join("\n") : "No matches found" }],
605 |         };
606 |       }
607 | 
608 |       case "get_file_info": {
609 |         const parsed = GetFileInfoArgsSchema.safeParse(args);
610 |         if (!parsed.success) {
611 |           throw new Error(`Invalid arguments for get_file_info: ${parsed.error}`);
612 |         }
613 |         const validPath = await validatePath(parsed.data.path);
614 |         const info = await getFileStats(validPath);
615 |         return {
616 |           content: [{ type: "text", text: Object.entries(info)
617 |             .map(([key, value]) => `${key}: ${value}`)
618 |             .join("\n") }],
619 |         };
620 |       }
621 | 
622 |       case "list_allowed_directories": {
623 |         return {
624 |           content: [{
625 |             type: "text",
626 |             text: `Allowed directories:\n${allowedDirectories.join('\n')}`
627 |           }],
628 |         };
629 |       }
630 | 
631 |       default:
632 |         throw new Error(`Unknown tool: ${name}`);
633 |     }
634 |   } catch (error) {
635 |     const errorMessage = error instanceof Error ? error.message : String(error);
636 |     return {
637 |       content: [{ type: "text", text: `Error: ${errorMessage}` }],
638 |       isError: true,
639 |     };
640 |   }
641 | });
642 | 
643 | // Updates allowed directories based on MCP client roots
644 | async function updateAllowedDirectoriesFromRoots(requestedRoots: Root[]) {
645 |   const validatedRootDirs = await getValidRootDirectories(requestedRoots);
646 |   if (validatedRootDirs.length > 0) {
647 |     allowedDirectories = [...validatedRootDirs];
648 |     setAllowedDirectories(allowedDirectories); // Update the global state in lib.ts
649 |     console.error(`Updated allowed directories from MCP roots: ${validatedRootDirs.length} valid directories`);
650 |   } else {
651 |     console.error("No valid root directories provided by client");
652 |   }
653 | }
654 | 
655 | // Handles dynamic roots updates during runtime, when client sends "roots/list_changed" notification, server fetches the updated roots and replaces all allowed directories with the new roots.
656 | server.setNotificationHandler(RootsListChangedNotificationSchema, async () => {
657 |   try {
658 |     // Request the updated roots list from the client
659 |     const response = await server.listRoots();
660 |     if (response && 'roots' in response) {
661 |       await updateAllowedDirectoriesFromRoots(response.roots);
662 |     }
663 |   } catch (error) {
664 |     console.error("Failed to request roots from client:", error instanceof Error ? error.message : String(error));
665 |   }
666 | });
667 | 
668 | // Handles post-initialization setup, specifically checking for and fetching MCP roots.
669 | server.oninitialized = async () => {
670 |   const clientCapabilities = server.getClientCapabilities();
671 | 
672 |   if (clientCapabilities?.roots) {
673 |     try {
674 |       const response = await server.listRoots();
675 |       if (response && 'roots' in response) {
676 |         await updateAllowedDirectoriesFromRoots(response.roots);
677 |       } else {
678 |         console.error("Client returned no roots set, keeping current settings");
679 |       }
680 |     } catch (error) {
681 |       console.error("Failed to request initial roots from client:", error instanceof Error ? error.message : String(error));
682 |     }
683 |   } else {
684 |     if (allowedDirectories.length > 0) {
685 |       console.error("Client does not support MCP Roots, using allowed directories set from server args:", allowedDirectories);
686 |     }else{
687 |       throw new Error(`Server cannot operate: No allowed directories available. Server was started without command-line directories and client either does not support MCP roots protocol or provided empty roots. Please either: 1) Start server with directory arguments, or 2) Use a client that supports MCP roots protocol and provides valid root directories.`);
688 |     }
689 |   }
690 | };
691 | 
692 | // Start server
693 | async function runServer() {
694 |   const transport = new StdioServerTransport();
695 |   await server.connect(transport);
696 |   console.error("Secure MCP Filesystem Server running on stdio");
697 |   if (allowedDirectories.length === 0) {
698 |     console.error("Started without allowed directories - waiting for client to provide roots via MCP protocol");
699 |   }
700 | }
701 | 
702 | runServer().catch((error) => {
703 |   console.error("Fatal error running server:", error);
704 |   process.exit(1);
705 | });
706 | 
```

--------------------------------------------------------------------------------
/src/everything/everything.ts:
--------------------------------------------------------------------------------

```typescript
   1 | import { Server } from "@modelcontextprotocol/sdk/server/index.js";
   2 | import type { RequestHandlerExtra } from "@modelcontextprotocol/sdk/shared/protocol.js";
   3 | import {
   4 |   CallToolRequestSchema,
   5 |   ClientCapabilities,
   6 |   CompleteRequestSchema,
   7 |   CreateMessageRequest,
   8 |   CreateMessageResultSchema,
   9 |   ElicitRequest,
  10 |   ElicitResultSchema,
  11 |   GetPromptRequestSchema,
  12 |   ListPromptsRequestSchema,
  13 |   ListResourcesRequestSchema,
  14 |   ListResourceTemplatesRequestSchema,
  15 |   ListToolsRequestSchema,
  16 |   LoggingLevel,
  17 |   ReadResourceRequestSchema,
  18 |   Resource,
  19 |   RootsListChangedNotificationSchema,
  20 |   ServerNotification,
  21 |   ServerRequest,
  22 |   SubscribeRequestSchema,
  23 |   Tool,
  24 |   ToolSchema,
  25 |   UnsubscribeRequestSchema,
  26 |   type Root
  27 | } from "@modelcontextprotocol/sdk/types.js";
  28 | import { z } from "zod";
  29 | import { zodToJsonSchema } from "zod-to-json-schema";
  30 | import { readFileSync } from "fs";
  31 | import { fileURLToPath } from "url";
  32 | import { dirname, join } from "path";
  33 | import JSZip from "jszip";
  34 | 
  35 | const __filename = fileURLToPath(import.meta.url);
  36 | const __dirname = dirname(__filename);
  37 | const instructions = readFileSync(join(__dirname, "instructions.md"), "utf-8");
  38 | 
  39 | const ToolInputSchema = ToolSchema.shape.inputSchema;
  40 | type ToolInput = z.infer<typeof ToolInputSchema>;
  41 | 
  42 | const ToolOutputSchema = ToolSchema.shape.outputSchema;
  43 | type ToolOutput = z.infer<typeof ToolOutputSchema>;
  44 | 
  45 | type SendRequest = RequestHandlerExtra<ServerRequest, ServerNotification>["sendRequest"];
  46 | 
  47 | /* Input schemas for tools implemented in this server */
  48 | const EchoSchema = z.object({
  49 |   message: z.string().describe("Message to echo"),
  50 | });
  51 | 
  52 | const AddSchema = z.object({
  53 |   a: z.number().describe("First number"),
  54 |   b: z.number().describe("Second number"),
  55 | });
  56 | 
  57 | const LongRunningOperationSchema = z.object({
  58 |   duration: z
  59 |     .number()
  60 |     .default(10)
  61 |     .describe("Duration of the operation in seconds"),
  62 |   steps: z
  63 |     .number()
  64 |     .default(5)
  65 |     .describe("Number of steps in the operation"),
  66 | });
  67 | 
  68 | const PrintEnvSchema = z.object({});
  69 | 
  70 | const SampleLLMSchema = z.object({
  71 |   prompt: z.string().describe("The prompt to send to the LLM"),
  72 |   maxTokens: z
  73 |     .number()
  74 |     .default(100)
  75 |     .describe("Maximum number of tokens to generate"),
  76 | });
  77 | 
  78 | const GetTinyImageSchema = z.object({});
  79 | 
  80 | const AnnotatedMessageSchema = z.object({
  81 |   messageType: z
  82 |     .enum(["error", "success", "debug"])
  83 |     .describe("Type of message to demonstrate different annotation patterns"),
  84 |   includeImage: z
  85 |     .boolean()
  86 |     .default(false)
  87 |     .describe("Whether to include an example image"),
  88 | });
  89 | 
  90 | const GetResourceReferenceSchema = z.object({
  91 |   resourceId: z
  92 |     .number()
  93 |     .min(1)
  94 |     .max(100)
  95 |     .describe("ID of the resource to reference (1-100)"),
  96 | });
  97 | 
  98 | const ElicitationSchema = z.object({});
  99 | 
 100 | const GetResourceLinksSchema = z.object({
 101 |   count: z
 102 |     .number()
 103 |     .min(1)
 104 |     .max(10)
 105 |     .default(3)
 106 |     .describe("Number of resource links to return (1-10)"),
 107 | });
 108 | 
 109 | const ListRootsSchema = z.object({});
 110 | 
 111 | const StructuredContentSchema = {
 112 |   input: z.object({
 113 |     location: z
 114 |       .string()
 115 |       .trim()
 116 |       .min(1)
 117 |       .describe("City name or zip code"),
 118 |   }),
 119 | 
 120 |   output: z.object({
 121 |     temperature: z
 122 |       .number()
 123 |       .describe("Temperature in celsius"),
 124 |     conditions: z
 125 |       .string()
 126 |       .describe("Weather conditions description"),
 127 |     humidity: z
 128 |       .number()
 129 |       .describe("Humidity percentage"),
 130 |   })
 131 | };
 132 | 
 133 | const ZipResourcesInputSchema = z.object({
 134 |   files: z.record(z.string().url().describe("URL of the file to include in the zip")).describe("Mapping of file names to URLs to include in the zip"),
 135 | });
 136 | 
 137 | enum ToolName {
 138 |   ECHO = "echo",
 139 |   ADD = "add",
 140 |   LONG_RUNNING_OPERATION = "longRunningOperation",
 141 |   PRINT_ENV = "printEnv",
 142 |   SAMPLE_LLM = "sampleLLM",
 143 |   GET_TINY_IMAGE = "getTinyImage",
 144 |   ANNOTATED_MESSAGE = "annotatedMessage",
 145 |   GET_RESOURCE_REFERENCE = "getResourceReference",
 146 |   ELICITATION = "startElicitation",
 147 |   GET_RESOURCE_LINKS = "getResourceLinks",
 148 |   STRUCTURED_CONTENT = "structuredContent",
 149 |   ZIP_RESOURCES = "zip",
 150 |   LIST_ROOTS = "listRoots"
 151 | }
 152 | 
 153 | enum PromptName {
 154 |   SIMPLE = "simple_prompt",
 155 |   COMPLEX = "complex_prompt",
 156 |   RESOURCE = "resource_prompt",
 157 | }
 158 | 
 159 | // Example completion values
 160 | const EXAMPLE_COMPLETIONS = {
 161 |   style: ["casual", "formal", "technical", "friendly"],
 162 |   temperature: ["0", "0.5", "0.7", "1.0"],
 163 |   resourceId: ["1", "2", "3", "4", "5"],
 164 | };
 165 | 
 166 | export const createServer = () => {
 167 |   const server = new Server(
 168 |     {
 169 |       name: "example-servers/everything",
 170 |       title: "Everything Example Server",
 171 |       version: "1.0.0",
 172 |     },
 173 |     {
 174 |       capabilities: {
 175 |         prompts: {},
 176 |         resources: { subscribe: true },
 177 |         tools: {},
 178 |         logging: {},
 179 |         completions: {}
 180 |       },
 181 |       instructions
 182 |     }
 183 |   );
 184 | 
 185 |   let subscriptions: Set<string> = new Set();
 186 |   let subsUpdateInterval: NodeJS.Timeout | undefined;
 187 |   let stdErrUpdateInterval: NodeJS.Timeout | undefined;
 188 | 
 189 |   let logsUpdateInterval: NodeJS.Timeout | undefined;
 190 |   // Store client capabilities
 191 |   let clientCapabilities: ClientCapabilities | undefined;
 192 | 
 193 |   // Roots state management
 194 |   let currentRoots: Root[] = [];
 195 |   let clientSupportsRoots = false;
 196 |   let sessionId: string | undefined;
 197 | 
 198 |     // Function to start notification intervals when a client connects
 199 |   const startNotificationIntervals = (sid?: string|undefined) => {
 200 |       sessionId = sid;
 201 |       if (!subsUpdateInterval) {
 202 |         subsUpdateInterval = setInterval(() => {
 203 |           for (const uri of subscriptions) {
 204 |             server.notification({
 205 |               method: "notifications/resources/updated",
 206 |               params: { uri },
 207 |             });
 208 |           }
 209 |         }, 10000);
 210 |       }
 211 | 
 212 |       const maybeAppendSessionId = sessionId ? ` - SessionId ${sessionId}`: "";
 213 |       const messages: { level: LoggingLevel; data: string }[] = [
 214 |           { level: "debug", data: `Debug-level message${maybeAppendSessionId}` },
 215 |           { level: "info", data: `Info-level message${maybeAppendSessionId}` },
 216 |           { level: "notice", data: `Notice-level message${maybeAppendSessionId}` },
 217 |           { level: "warning", data: `Warning-level message${maybeAppendSessionId}` },
 218 |           { level: "error", data: `Error-level message${maybeAppendSessionId}` },
 219 |           { level: "critical", data: `Critical-level message${maybeAppendSessionId}` },
 220 |           { level: "alert", data: `Alert level-message${maybeAppendSessionId}` },
 221 |           { level: "emergency", data: `Emergency-level message${maybeAppendSessionId}` },
 222 |       ];
 223 | 
 224 |       if (!logsUpdateInterval) {
 225 |           console.error("Starting logs update interval");
 226 |           logsUpdateInterval = setInterval(async () => {
 227 |           await server.sendLoggingMessage( messages[Math.floor(Math.random() * messages.length)], sessionId);
 228 |       }, 15000);
 229 |     }
 230 |   };
 231 | 
 232 |   // Helper method to request sampling from client
 233 |   const requestSampling = async (
 234 |     context: string,
 235 |     uri: string,
 236 |     maxTokens: number = 100,
 237 |     sendRequest: SendRequest
 238 |   ) => {
 239 |     const request: CreateMessageRequest = {
 240 |       method: "sampling/createMessage",
 241 |       params: {
 242 |         messages: [
 243 |           {
 244 |             role: "user",
 245 |             content: {
 246 |               type: "text",
 247 |               text: `Resource ${uri} context: ${context}`,
 248 |             },
 249 |           },
 250 |         ],
 251 |         systemPrompt: "You are a helpful test server.",
 252 |         maxTokens,
 253 |         temperature: 0.7,
 254 |         includeContext: "thisServer",
 255 |       },
 256 |     };
 257 | 
 258 |     return await sendRequest(request, CreateMessageResultSchema);
 259 | 
 260 |   };
 261 | 
 262 |   const requestElicitation = async (
 263 |     message: string,
 264 |     requestedSchema: any,
 265 |     sendRequest: SendRequest
 266 |   ) => {
 267 |     const request: ElicitRequest = {
 268 |       method: 'elicitation/create',
 269 |       params: {
 270 |         message,
 271 |         requestedSchema,
 272 |       },
 273 |     };
 274 | 
 275 |     return await sendRequest(request, ElicitResultSchema);
 276 |   };
 277 | 
 278 |   const ALL_RESOURCES: Resource[] = Array.from({ length: 100 }, (_, i) => {
 279 |     const uri = `test://static/resource/${i + 1}`;
 280 |     if (i % 2 === 0) {
 281 |       return {
 282 |         uri,
 283 |         name: `Resource ${i + 1}`,
 284 |         mimeType: "text/plain",
 285 |         text: `Resource ${i + 1}: This is a plaintext resource`,
 286 |       };
 287 |     } else {
 288 |       const buffer = Buffer.from(`Resource ${i + 1}: This is a base64 blob`);
 289 |       return {
 290 |         uri,
 291 |         name: `Resource ${i + 1}`,
 292 |         mimeType: "application/octet-stream",
 293 |         blob: buffer.toString("base64"),
 294 |       };
 295 |     }
 296 |   });
 297 | 
 298 |   const PAGE_SIZE = 10;
 299 | 
 300 |   server.setRequestHandler(ListResourcesRequestSchema, async (request) => {
 301 |     const cursor = request.params?.cursor;
 302 |     let startIndex = 0;
 303 | 
 304 |     if (cursor) {
 305 |       const decodedCursor = parseInt(atob(cursor), 10);
 306 |       if (!isNaN(decodedCursor)) {
 307 |         startIndex = decodedCursor;
 308 |       }
 309 |     }
 310 | 
 311 |     const endIndex = Math.min(startIndex + PAGE_SIZE, ALL_RESOURCES.length);
 312 |     const resources = ALL_RESOURCES.slice(startIndex, endIndex);
 313 | 
 314 |     let nextCursor: string | undefined;
 315 |     if (endIndex < ALL_RESOURCES.length) {
 316 |       nextCursor = btoa(endIndex.toString());
 317 |     }
 318 | 
 319 |     return {
 320 |       resources,
 321 |       nextCursor,
 322 |     };
 323 |   });
 324 | 
 325 |   server.setRequestHandler(ListResourceTemplatesRequestSchema, async () => {
 326 |     return {
 327 |       resourceTemplates: [
 328 |         {
 329 |           uriTemplate: "test://static/resource/{id}",
 330 |           name: "Static Resource",
 331 |           description: "A static resource with a numeric ID",
 332 |         },
 333 |       ],
 334 |     };
 335 |   });
 336 | 
 337 |   server.setRequestHandler(ReadResourceRequestSchema, async (request) => {
 338 |     const uri = request.params.uri;
 339 | 
 340 |     if (uri.startsWith("test://static/resource/")) {
 341 |       const index = parseInt(uri.split("/").pop() ?? "", 10) - 1;
 342 |       if (index >= 0 && index < ALL_RESOURCES.length) {
 343 |         const resource = ALL_RESOURCES[index];
 344 |         return {
 345 |           contents: [resource],
 346 |         };
 347 |       }
 348 |     }
 349 | 
 350 |     throw new Error(`Unknown resource: ${uri}`);
 351 |   });
 352 | 
 353 |   server.setRequestHandler(SubscribeRequestSchema, async (request, extra) => {
 354 |     const { uri } = request.params;
 355 |     subscriptions.add(uri);
 356 |     return {};
 357 |   });
 358 | 
 359 |   server.setRequestHandler(UnsubscribeRequestSchema, async (request) => {
 360 |     subscriptions.delete(request.params.uri);
 361 |     return {};
 362 |   });
 363 | 
 364 |   server.setRequestHandler(ListPromptsRequestSchema, async () => {
 365 |     return {
 366 |       prompts: [
 367 |         {
 368 |           name: PromptName.SIMPLE,
 369 |           description: "A prompt without arguments",
 370 |         },
 371 |         {
 372 |           name: PromptName.COMPLEX,
 373 |           description: "A prompt with arguments",
 374 |           arguments: [
 375 |             {
 376 |               name: "temperature",
 377 |               description: "Temperature setting",
 378 |               required: true,
 379 |             },
 380 |             {
 381 |               name: "style",
 382 |               description: "Output style",
 383 |               required: false,
 384 |             },
 385 |           ],
 386 |         },
 387 |         {
 388 |           name: PromptName.RESOURCE,
 389 |           description: "A prompt that includes an embedded resource reference",
 390 |           arguments: [
 391 |             {
 392 |               name: "resourceId",
 393 |               description: "Resource ID to include (1-100)",
 394 |               required: true,
 395 |             },
 396 |           ],
 397 |         },
 398 |       ],
 399 |     };
 400 |   });
 401 | 
 402 |   server.setRequestHandler(GetPromptRequestSchema, async (request) => {
 403 |     const { name, arguments: args } = request.params;
 404 | 
 405 |     if (name === PromptName.SIMPLE) {
 406 |       return {
 407 |         messages: [
 408 |           {
 409 |             role: "user",
 410 |             content: {
 411 |               type: "text",
 412 |               text: "This is a simple prompt without arguments.",
 413 |             },
 414 |           },
 415 |         ],
 416 |       };
 417 |     }
 418 | 
 419 |     if (name === PromptName.COMPLEX) {
 420 |       return {
 421 |         messages: [
 422 |           {
 423 |             role: "user",
 424 |             content: {
 425 |               type: "text",
 426 |               text: `This is a complex prompt with arguments: temperature=${args?.temperature}, style=${args?.style}`,
 427 |             },
 428 |           },
 429 |           {
 430 |             role: "assistant",
 431 |             content: {
 432 |               type: "text",
 433 |               text: "I understand. You've provided a complex prompt with temperature and style arguments. How would you like me to proceed?",
 434 |             },
 435 |           },
 436 |           {
 437 |             role: "user",
 438 |             content: {
 439 |               type: "image",
 440 |               data: MCP_TINY_IMAGE,
 441 |               mimeType: "image/png",
 442 |             },
 443 |           },
 444 |         ],
 445 |       };
 446 |     }
 447 | 
 448 |     if (name === PromptName.RESOURCE) {
 449 |       const resourceId = parseInt(args?.resourceId as string, 10);
 450 |       if (isNaN(resourceId) || resourceId < 1 || resourceId > 100) {
 451 |         throw new Error(
 452 |           `Invalid resourceId: ${args?.resourceId}. Must be a number between 1 and 100.`
 453 |         );
 454 |       }
 455 | 
 456 |       const resourceIndex = resourceId - 1;
 457 |       const resource = ALL_RESOURCES[resourceIndex];
 458 | 
 459 |       return {
 460 |         messages: [
 461 |           {
 462 |             role: "user",
 463 |             content: {
 464 |               type: "text",
 465 |               text: `This prompt includes Resource ${resourceId}. Please analyze the following resource:`,
 466 |             },
 467 |           },
 468 |           {
 469 |             role: "user",
 470 |             content: {
 471 |               type: "resource",
 472 |               resource: resource,
 473 |             },
 474 |           },
 475 |         ],
 476 |       };
 477 |     }
 478 | 
 479 |     throw new Error(`Unknown prompt: ${name}`);
 480 |   });
 481 | 
 482 |   server.setRequestHandler(ListToolsRequestSchema, async () => {
 483 |     const tools: Tool[] = [
 484 |       {
 485 |         name: ToolName.ECHO,
 486 |         description: "Echoes back the input",
 487 |         inputSchema: zodToJsonSchema(EchoSchema) as ToolInput,
 488 |       },
 489 |       {
 490 |         name: ToolName.ADD,
 491 |         description: "Adds two numbers",
 492 |         inputSchema: zodToJsonSchema(AddSchema) as ToolInput,
 493 |       },
 494 |       {
 495 |         name: ToolName.LONG_RUNNING_OPERATION,
 496 |         description:
 497 |           "Demonstrates a long running operation with progress updates",
 498 |         inputSchema: zodToJsonSchema(LongRunningOperationSchema) as ToolInput,
 499 |       },
 500 |       {
 501 |         name: ToolName.PRINT_ENV,
 502 |         description:
 503 |           "Prints all environment variables, helpful for debugging MCP server configuration",
 504 |         inputSchema: zodToJsonSchema(PrintEnvSchema) as ToolInput,
 505 |       },
 506 |       {
 507 |         name: ToolName.SAMPLE_LLM,
 508 |         description: "Samples from an LLM using MCP's sampling feature",
 509 |         inputSchema: zodToJsonSchema(SampleLLMSchema) as ToolInput,
 510 |       },
 511 |       {
 512 |         name: ToolName.GET_TINY_IMAGE,
 513 |         description: "Returns the MCP_TINY_IMAGE",
 514 |         inputSchema: zodToJsonSchema(GetTinyImageSchema) as ToolInput,
 515 |       },
 516 |       {
 517 |         name: ToolName.ANNOTATED_MESSAGE,
 518 |         description:
 519 |           "Demonstrates how annotations can be used to provide metadata about content",
 520 |         inputSchema: zodToJsonSchema(AnnotatedMessageSchema) as ToolInput,
 521 |       },
 522 |       {
 523 |         name: ToolName.GET_RESOURCE_REFERENCE,
 524 |         description:
 525 |           "Returns a resource reference that can be used by MCP clients",
 526 |         inputSchema: zodToJsonSchema(GetResourceReferenceSchema) as ToolInput,
 527 |       },
 528 |       {
 529 |         name: ToolName.GET_RESOURCE_LINKS,
 530 |         description:
 531 |           "Returns multiple resource links that reference different types of resources",
 532 |         inputSchema: zodToJsonSchema(GetResourceLinksSchema) as ToolInput,
 533 |       },
 534 |       {
 535 |         name: ToolName.STRUCTURED_CONTENT,
 536 |         description:
 537 |           "Returns structured content along with an output schema for client data validation",
 538 |         inputSchema: zodToJsonSchema(StructuredContentSchema.input) as ToolInput,
 539 |         outputSchema: zodToJsonSchema(StructuredContentSchema.output) as ToolOutput,
 540 |       },
 541 |       {
 542 |         name: ToolName.ZIP_RESOURCES,
 543 |         description: "Compresses the provided resource files (mapping of name to URI, which can be a data URI) to a zip file, which it returns as a data URI resource link.",
 544 |         inputSchema: zodToJsonSchema(ZipResourcesInputSchema) as ToolInput,
 545 |       }
 546 |     ];
 547 |     if (clientCapabilities!.roots) tools.push ({
 548 |         name: ToolName.LIST_ROOTS,
 549 |         description:
 550 |             "Lists the current MCP roots provided by the client. Demonstrates the roots protocol capability even though this server doesn't access files.",
 551 |         inputSchema: zodToJsonSchema(ListRootsSchema) as ToolInput,
 552 |     });
 553 |     if (clientCapabilities!.elicitation) tools.push ({
 554 |         name: ToolName.ELICITATION,
 555 |         description: "Demonstrates the Elicitation feature by asking the user to provide information about their favorite color, number, and pets.",
 556 |         inputSchema: zodToJsonSchema(ElicitationSchema) as ToolInput,
 557 |     });
 558 | 
 559 |     return { tools };
 560 |   });
 561 | 
 562 |   server.setRequestHandler(CallToolRequestSchema, async (request,extra) => {
 563 |     const { name, arguments: args } = request.params;
 564 | 
 565 |     if (name === ToolName.ECHO) {
 566 |       const validatedArgs = EchoSchema.parse(args);
 567 |       return {
 568 |         content: [{ type: "text", text: `Echo: ${validatedArgs.message}` }],
 569 |       };
 570 |     }
 571 | 
 572 |     if (name === ToolName.ADD) {
 573 |       const validatedArgs = AddSchema.parse(args);
 574 |       const sum = validatedArgs.a + validatedArgs.b;
 575 |       return {
 576 |         content: [
 577 |           {
 578 |             type: "text",
 579 |             text: `The sum of ${validatedArgs.a} and ${validatedArgs.b} is ${sum}.`,
 580 |           },
 581 |         ],
 582 |       };
 583 |     }
 584 | 
 585 |     if (name === ToolName.LONG_RUNNING_OPERATION) {
 586 |       const validatedArgs = LongRunningOperationSchema.parse(args);
 587 |       const { duration, steps } = validatedArgs;
 588 |       const stepDuration = duration / steps;
 589 |       const progressToken = request.params._meta?.progressToken;
 590 | 
 591 |       for (let i = 1; i < steps + 1; i++) {
 592 |         await new Promise((resolve) =>
 593 |           setTimeout(resolve, stepDuration * 1000)
 594 |         );
 595 | 
 596 |         if (progressToken !== undefined) {
 597 |           await server.notification({
 598 |             method: "notifications/progress",
 599 |             params: {
 600 |               progress: i,
 601 |               total: steps,
 602 |               progressToken,
 603 |             },
 604 |           },{relatedRequestId: extra.requestId});
 605 |         }
 606 |       }
 607 | 
 608 |       return {
 609 |         content: [
 610 |           {
 611 |             type: "text",
 612 |             text: `Long running operation completed. Duration: ${duration} seconds, Steps: ${steps}.`,
 613 |           },
 614 |         ],
 615 |       };
 616 |     }
 617 | 
 618 |     if (name === ToolName.PRINT_ENV) {
 619 |       return {
 620 |         content: [
 621 |           {
 622 |             type: "text",
 623 |             text: JSON.stringify(process.env, null, 2),
 624 |           },
 625 |         ],
 626 |       };
 627 |     }
 628 | 
 629 |     if (name === ToolName.SAMPLE_LLM) {
 630 |       const validatedArgs = SampleLLMSchema.parse(args);
 631 |       const { prompt, maxTokens } = validatedArgs;
 632 | 
 633 |       const result = await requestSampling(
 634 |         prompt,
 635 |         ToolName.SAMPLE_LLM,
 636 |         maxTokens,
 637 |         extra.sendRequest
 638 |       );
 639 |       return {
 640 |         content: [
 641 |           { type: "text", text: `LLM sampling result: ${result.content.text}` },
 642 |         ],
 643 |       };
 644 |     }
 645 | 
 646 |     if (name === ToolName.GET_TINY_IMAGE) {
 647 |       GetTinyImageSchema.parse(args);
 648 |       return {
 649 |         content: [
 650 |           {
 651 |             type: "text",
 652 |             text: "This is a tiny image:",
 653 |           },
 654 |           {
 655 |             type: "image",
 656 |             data: MCP_TINY_IMAGE,
 657 |             mimeType: "image/png",
 658 |           },
 659 |           {
 660 |             type: "text",
 661 |             text: "The image above is the MCP tiny image.",
 662 |           },
 663 |         ],
 664 |       };
 665 |     }
 666 | 
 667 |     if (name === ToolName.ANNOTATED_MESSAGE) {
 668 |       const { messageType, includeImage } = AnnotatedMessageSchema.parse(args);
 669 | 
 670 |       const content = [];
 671 | 
 672 |       // Main message with different priorities/audiences based on type
 673 |       if (messageType === "error") {
 674 |         content.push({
 675 |           type: "text",
 676 |           text: "Error: Operation failed",
 677 |           annotations: {
 678 |             priority: 1.0, // Errors are highest priority
 679 |             audience: ["user", "assistant"], // Both need to know about errors
 680 |           },
 681 |         });
 682 |       } else if (messageType === "success") {
 683 |         content.push({
 684 |           type: "text",
 685 |           text: "Operation completed successfully",
 686 |           annotations: {
 687 |             priority: 0.7, // Success messages are important but not critical
 688 |             audience: ["user"], // Success mainly for user consumption
 689 |           },
 690 |         });
 691 |       } else if (messageType === "debug") {
 692 |         content.push({
 693 |           type: "text",
 694 |           text: "Debug: Cache hit ratio 0.95, latency 150ms",
 695 |           annotations: {
 696 |             priority: 0.3, // Debug info is low priority
 697 |             audience: ["assistant"], // Technical details for assistant
 698 |           },
 699 |         });
 700 |       }
 701 | 
 702 |       // Optional image with its own annotations
 703 |       if (includeImage) {
 704 |         content.push({
 705 |           type: "image",
 706 |           data: MCP_TINY_IMAGE,
 707 |           mimeType: "image/png",
 708 |           annotations: {
 709 |             priority: 0.5,
 710 |             audience: ["user"], // Images primarily for user visualization
 711 |           },
 712 |         });
 713 |       }
 714 | 
 715 |       return { content };
 716 |     }
 717 | 
 718 |     if (name === ToolName.GET_RESOURCE_REFERENCE) {
 719 |       const validatedArgs = GetResourceReferenceSchema.parse(args);
 720 |       const resourceId = validatedArgs.resourceId;
 721 | 
 722 |       const resourceIndex = resourceId - 1;
 723 |       if (resourceIndex < 0 || resourceIndex >= ALL_RESOURCES.length) {
 724 |         throw new Error(`Resource with ID ${resourceId} does not exist`);
 725 |       }
 726 | 
 727 |       const resource = ALL_RESOURCES[resourceIndex];
 728 | 
 729 |       return {
 730 |         content: [
 731 |           {
 732 |             type: "text",
 733 |             text: `Returning resource reference for Resource ${resourceId}:`,
 734 |           },
 735 |           {
 736 |             type: "resource",
 737 |             resource: resource,
 738 |           },
 739 |           {
 740 |             type: "text",
 741 |             text: `You can access this resource using the URI: ${resource.uri}`,
 742 |           },
 743 |         ],
 744 |       };
 745 |     }
 746 | 
 747 |     if (name === ToolName.ELICITATION) {
 748 |       ElicitationSchema.parse(args);
 749 | 
 750 |       const elicitationResult = await requestElicitation(
 751 |         'What are your favorite things?',
 752 |         {
 753 |           type: 'object',
 754 |           properties: {
 755 |             color: { type: 'string', description: 'Favorite color' },
 756 |             number: {
 757 |               type: 'integer',
 758 |               description: 'Favorite number',
 759 |               minimum: 1,
 760 |               maximum: 100,
 761 |             },
 762 |             pets: {
 763 |               type: 'string',
 764 |               enum: ['cats', 'dogs', 'birds', 'fish', 'reptiles'],
 765 |               description: 'Favorite pets',
 766 |             },
 767 |           },
 768 |         },
 769 |         extra.sendRequest
 770 |       );
 771 | 
 772 |       // Handle different response actions
 773 |       const content = [];
 774 | 
 775 |       if (elicitationResult.action === 'accept' && elicitationResult.content) {
 776 |         content.push({
 777 |           type: "text",
 778 |           text: `✅ User provided their favorite things!`,
 779 |         });
 780 | 
 781 |         // Only access elicitationResult.content when action is accept
 782 |         const { color, number, pets } = elicitationResult.content;
 783 |         content.push({
 784 |           type: "text",
 785 |           text: `Their favorites are:\n- Color: ${color || 'not specified'}\n- Number: ${number || 'not specified'}\n- Pets: ${pets || 'not specified'}`,
 786 |         });
 787 |       } else if (elicitationResult.action === 'decline') {
 788 |         content.push({
 789 |           type: "text",
 790 |           text: `❌ User declined to provide their favorite things.`,
 791 |         });
 792 |       } else if (elicitationResult.action === 'cancel') {
 793 |         content.push({
 794 |           type: "text",
 795 |           text: `⚠️ User cancelled the elicitation dialog.`,
 796 |         });
 797 |       }
 798 | 
 799 |       // Include raw result for debugging
 800 |       content.push({
 801 |         type: "text",
 802 |         text: `\nRaw result: ${JSON.stringify(elicitationResult, null, 2)}`,
 803 |       });
 804 | 
 805 |       return { content };
 806 |     }
 807 | 
 808 |     if (name === ToolName.GET_RESOURCE_LINKS) {
 809 |       const { count } = GetResourceLinksSchema.parse(args);
 810 |       const content = [];
 811 | 
 812 |       // Add intro text
 813 |       content.push({
 814 |         type: "text",
 815 |         text: `Here are ${count} resource links to resources available in this server (see full output in tool response if your client does not support resource_link yet):`,
 816 |       });
 817 | 
 818 |       // Return resource links to actual resources from ALL_RESOURCES
 819 |       const actualCount = Math.min(count, ALL_RESOURCES.length);
 820 |       for (let i = 0; i < actualCount; i++) {
 821 |         const resource = ALL_RESOURCES[i];
 822 |         content.push({
 823 |           type: "resource_link",
 824 |           uri: resource.uri,
 825 |           name: resource.name,
 826 |           description: `Resource ${i + 1}: ${resource.mimeType === "text/plain"
 827 |             ? "plaintext resource"
 828 |             : "binary blob resource"
 829 |             }`,
 830 |           mimeType: resource.mimeType,
 831 |         });
 832 |       }
 833 | 
 834 |       return { content };
 835 |     }
 836 | 
 837 |     if (name === ToolName.STRUCTURED_CONTENT) {
 838 |       // The same response is returned for every input.
 839 |       const validatedArgs = StructuredContentSchema.input.parse(args);
 840 | 
 841 |       const weather = {
 842 |         temperature: 22.5,
 843 |         conditions: "Partly cloudy",
 844 |         humidity: 65
 845 |       }
 846 | 
 847 |       const backwardCompatiblecontent = {
 848 |         type: "text",
 849 |         text: JSON.stringify(weather)
 850 |       }
 851 | 
 852 |       return {
 853 |         content: [backwardCompatiblecontent],
 854 |         structuredContent: weather
 855 |       };
 856 |     }
 857 | 
 858 |     if (name === ToolName.ZIP_RESOURCES) {
 859 |       const { files } = ZipResourcesInputSchema.parse(args);
 860 | 
 861 |       const zip = new JSZip();
 862 | 
 863 |       for (const [fileName, fileUrl] of Object.entries(files)) {
 864 |         try {
 865 |           const response = await fetch(fileUrl);
 866 |           if (!response.ok) {
 867 |             throw new Error(`Failed to fetch ${fileUrl}: ${response.statusText}`);
 868 |           }
 869 |           const arrayBuffer = await response.arrayBuffer();
 870 |           zip.file(fileName, arrayBuffer);
 871 |         } catch (error) {
 872 |           throw new Error(`Error fetching file ${fileUrl}: ${error instanceof Error ? error.message : String(error)}`);
 873 |         }
 874 |       }
 875 | 
 876 |       const uri = `data:application/zip;base64,${await zip.generateAsync({ type: "base64" })}`;
 877 | 
 878 |       return {
 879 |         content: [
 880 |           {
 881 |             type: "resource_link",
 882 |             mimeType: "application/zip",
 883 |             uri,
 884 |           },
 885 |         ],
 886 |       };
 887 |     }
 888 | 
 889 |     if (name === ToolName.LIST_ROOTS) {
 890 |       ListRootsSchema.parse(args);
 891 | 
 892 |       if (!clientSupportsRoots) {
 893 |         return {
 894 |           content: [
 895 |             {
 896 |               type: "text",
 897 |               text: "The MCP client does not support the roots protocol.\n\n" +
 898 |                 "This means the server cannot access information about the client's workspace directories or file system roots."
 899 |             }
 900 |           ]
 901 |         };
 902 |       }
 903 | 
 904 |       if (currentRoots.length === 0) {
 905 |         return {
 906 |           content: [
 907 |             {
 908 |               type: "text",
 909 |               text: "The client supports roots but no roots are currently configured.\n\n" +
 910 |                 "This could mean:\n" +
 911 |                 "1. The client hasn't provided any roots yet\n" +
 912 |                 "2. The client provided an empty roots list\n" +
 913 |                 "3. The roots configuration is still being loaded"
 914 |             }
 915 |           ]
 916 |         };
 917 |       }
 918 | 
 919 |       const rootsList = currentRoots.map((root, index) => {
 920 |         return `${index + 1}. ${root.name || 'Unnamed Root'}\n   URI: ${root.uri}`;
 921 |       }).join('\n\n');
 922 | 
 923 |       return {
 924 |         content: [
 925 |           {
 926 |             type: "text",
 927 |             text: `Current MCP Roots (${currentRoots.length} total):\n\n${rootsList}\n\n` +
 928 |               "Note: This server demonstrates the roots protocol capability but doesn't actually access files. " +
 929 |               "The roots are provided by the MCP client and can be used by servers that need file system access."
 930 |           }
 931 |         ]
 932 |       };
 933 |     }
 934 | 
 935 |     throw new Error(`Unknown tool: ${name}`);
 936 |   });
 937 | 
 938 |   server.setRequestHandler(CompleteRequestSchema, async (request) => {
 939 |     const { ref, argument } = request.params;
 940 | 
 941 |     if (ref.type === "ref/resource") {
 942 |       const resourceId = ref.uri.split("/").pop();
 943 |       if (!resourceId) return { completion: { values: [] } };
 944 | 
 945 |       // Filter resource IDs that start with the input value
 946 |       const values = EXAMPLE_COMPLETIONS.resourceId.filter((id) =>
 947 |         id.startsWith(argument.value)
 948 |       );
 949 |       return { completion: { values, hasMore: false, total: values.length } };
 950 |     }
 951 | 
 952 |     if (ref.type === "ref/prompt") {
 953 |       // Handle completion for prompt arguments
 954 |       const completions =
 955 |         EXAMPLE_COMPLETIONS[argument.name as keyof typeof EXAMPLE_COMPLETIONS];
 956 |       if (!completions) return { completion: { values: [] } };
 957 | 
 958 |       const values = completions.filter((value) =>
 959 |         value.startsWith(argument.value)
 960 |       );
 961 |       return { completion: { values, hasMore: false, total: values.length } };
 962 |     }
 963 | 
 964 |     throw new Error(`Unknown reference type`);
 965 |   });
 966 | 
 967 |   // Roots protocol handlers
 968 |   server.setNotificationHandler(RootsListChangedNotificationSchema, async () => {
 969 |     try {
 970 |       // Request the updated roots list from the client
 971 |       const response = await server.listRoots();
 972 |       if (response && 'roots' in response) {
 973 |         currentRoots = response.roots;
 974 | 
 975 |         // Log the roots update for demonstration
 976 |         await server.sendLoggingMessage({
 977 |             level: "info",
 978 |             logger: "everything-server",
 979 |             data: `Roots updated: ${currentRoots.length} root(s) received from client`,
 980 |         }, sessionId);
 981 |       }
 982 |     } catch (error) {
 983 |       await server.sendLoggingMessage({
 984 |           level: "error",
 985 |           logger: "everything-server",
 986 |           data: `Failed to request roots from client: ${error instanceof Error ? error.message : String(error)}`,
 987 |       }, sessionId);
 988 |     }
 989 |   });
 990 | 
 991 |   // Handle post-initialization setup for roots
 992 |   server.oninitialized = async () => {
 993 |    clientCapabilities = server.getClientCapabilities();
 994 | 
 995 |     if (clientCapabilities?.roots) {
 996 |       clientSupportsRoots = true;
 997 |       try {
 998 |         const response = await server.listRoots();
 999 |         if (response && 'roots' in response) {
1000 |           currentRoots = response.roots;
1001 | 
1002 |           await server.sendLoggingMessage({
1003 |               level: "info",
1004 |               logger: "everything-server",
1005 |               data: `Initial roots received: ${currentRoots.length} root(s) from client`,
1006 |           }, sessionId);
1007 |         } else {
1008 |           await server.sendLoggingMessage({
1009 |               level: "warning",
1010 |               logger: "everything-server",
1011 |               data: "Client returned no roots set",
1012 |           }, sessionId);
1013 |         }
1014 |       } catch (error) {
1015 |         await server.sendLoggingMessage({
1016 |             level: "error",
1017 |             logger: "everything-server",
1018 |             data: `Failed to request initial roots from client: ${error instanceof Error ? error.message : String(error)}`,
1019 |         }, sessionId);
1020 |       }
1021 |     } else {
1022 |       await server.sendLoggingMessage({
1023 |           level: "info",
1024 |           logger: "everything-server",
1025 |           data: "Client does not support MCP roots protocol",
1026 |       }, sessionId);
1027 |     }
1028 |   };
1029 | 
1030 |   const cleanup = async () => {
1031 |     if (subsUpdateInterval) clearInterval(subsUpdateInterval);
1032 |     if (logsUpdateInterval) clearInterval(logsUpdateInterval);
1033 |     if (stdErrUpdateInterval) clearInterval(stdErrUpdateInterval);
1034 |   };
1035 | 
1036 |   return { server, cleanup, startNotificationIntervals };
1037 | };
1038 | 
1039 | const MCP_TINY_IMAGE =
1040 |   "iVBORw0KGgoAAAANSUhEUgAAABQAAAAUCAYAAACNiR0NAAAKsGlDQ1BJQ0MgUHJvZmlsZQAASImVlwdUU+kSgOfe9JDQEiIgJfQmSCeAlBBaAAXpYCMkAUKJMRBU7MriClZURLCs6KqIgo0idizYFsWC3QVZBNR1sWDDlXeBQ9jdd9575805c+a7c+efmf+e/z9nLgCdKZDJMlF1gCxpjjwyyI8dn5DIJvUABRiY0kBdIMyWcSMiwgCTUft3+dgGyJC9YzuU69/f/1fREImzhQBIBMbJomxhFsbHMe0TyuQ5ALg9mN9kbo5siK9gzJRjDWL8ZIhTR7hviJOHGY8fjomO5GGsDUCmCQTyVACaKeZn5wpTsTw0f4ztpSKJFGPsGbyzsmaLMMbqgiUWI8N4KD8n+S95Uv+WM1mZUyBIVfLIXoaF7C/JlmUK5v+fn+N/S1amYrSGOaa0NHlwJGaxvpAHGbNDlSxNnhI+yhLRcPwwpymCY0ZZmM1LHGWRwD9UuTZzStgop0gC+co8OfzoURZnB0SNsnx2pLJWipzHHWWBfKyuIiNG6U8T85X589Ki40Y5VxI7ZZSzM6JCx2J4Sr9cEansXywN8hurG6jce1b2X/Yr4SvX5qRFByv3LhjrXyzljuXMjlf2JhL7B4zFxCjjZTl+ylqyzAhlvDgzSOnPzo1Srs3BDuTY2gjlN0wXhESMMoRBELAhBjIhB+QggECQgBTEOeJ5Q2cUeLNl8+WS1LQcNhe7ZWI2Xyq0m8B2tHd0Bhi6syNH4j1r+C4irGtjvhWVAF4nBgcHT475Qm4BHEkCoNaO+SxnAKh3A1w5JVTIc0d8Q9cJCEAFNWCCDhiACViCLTiCK3iCLwRACIRDNCTATBBCGmRhnc+FhbAMCqAI1sNmKIOdsBv2wyE4CvVwCs7DZbgOt+AePIZ26IJX0AcfYQBBEBJCRxiIDmKImCE2iCPCQbyRACQMiUQSkCQkFZEiCmQhsgIpQoqRMmQXUokcQU4g55GrSCvyEOlAepF3yFcUh9JQJqqPmqMTUQ7KRUPRaHQGmorOQfPQfHQtWopWoAfROvQ8eh29h7ajr9B+HOBUcCycEc4Wx8HxcOG4RFwKTo5bjCvEleAqcNW4Rlwz7g6uHfca9wVPxDPwbLwt3hMfjI/BC/Fz8Ivxq/Fl+P34OvxF/B18B74P/51AJ+gRbAgeBD4hnpBKmEsoIJQQ9hJqCZcI9whdhI9EIpFFtCC6EYOJCcR04gLiauJ2Yg3xHLGV2EnsJ5FIOiQbkhcpnCQg5ZAKSFtJB0lnSbdJXaTPZBWyIdmRHEhOJEvJy8kl5APkM+Tb5G7yAEWdYkbxoIRTRJT5lHWUPZRGyk1KF2WAqkG1oHpRo6np1GXUUmo19RL1CfW9ioqKsYq7ylQVicpSlVKVwypXVDpUvtA0adY0Hm06TUFbS9tHO0d7SHtPp9PN6b70RHoOfS29kn6B/oz+WZWhaqfKVxWpLlEtV61Tva36Ro2iZqbGVZuplqdWonZM7abaa3WKurk6T12gvli9XP2E+n31fg2GhoNGuEaWxmqNAxpXNXo0SZrmmgGaIs18zd2aFzQ7GTiGCYPHEDJWMPYwLjG6mESmBZPPTGcWMQ8xW5h9WppazlqxWvO0yrVOa7WzcCxzFp+VyVrHOspqY30dpz+OO048btW46nG3x33SHq/tqy3WLtSu0b6n/VWHrROgk6GzQade56kuXtdad6ruXN0dupd0X49njvccLxxfOP7o+Ed6qJ61XqTeAr3dejf0+vUN9IP0Zfpb9S/ovzZgGfgapBtsMjhj0GvIMPQ2lBhuMjxr+JKtxeayM9ml7IvsPiM9o2AjhdEuoxajAWML4xjj5cY1xk9NqCYckxSTTSZNJn2mhqaTTReaVpk+MqOYcczSzLaYNZt9MrcwjzNfaV5v3mOhbcG3yLOosnhiSbf0sZxjWWF514poxbHKsNpudcsatXaxTrMut75pg9q42khsttu0TiBMcJ8gnVAx4b4tzZZrm2tbZdthx7ILs1tuV2/3ZqLpxMSJGyY2T/xu72Kfab/H/rGDpkOIw3KHRod3jtaOQsdyx7tOdKdApyVODU5vnW2cxc47nB+4MFwmu6x0aXL509XNVe5a7drrZuqW5LbN7T6HyYngrOZccSe4+7kvcT/l/sXD1SPH46jHH562nhmeBzx7JllMEk/aM6nTy9hL4LXLq92b7Z3k/ZN3u4+Rj8Cnwue5r4mvyHevbzfXipvOPch942fvJ/er9fvE8+At4p3zx/kH+Rf6twRoBsQElAU8CzQOTA2sCuwLcglaEHQumBAcGrwh+D5fny/kV/L7QtxCFoVcDKWFRoWWhT4Psw6ThzVORieHTN44+ckUsynSKfXhEM4P3xj+NMIiYk7EyanEqRFTy6e+iHSIXBjZHMWImhV1IOpjtF/0uujHMZYxipimWLXY6bGVsZ/i/OOK49rjJ8Yvir+eoJsgSWhIJCXGJu5N7J8WMG3ztK7pLtMLprfNsJgxb8bVmbozM2eenqU2SzDrWBIhKS7pQNI3QbigQtCfzE/eltwn5Am3CF+JfEWbRL1iL3GxuDvFK6U4pSfVK3Vjam+aT1pJ2msJT1ImeZsenL4z/VNGeMa+jMHMuMyaLHJWUtYJqaY0Q3pxtsHsebNbZTayAln7HI85m+f0yUPle7OR7BnZDTlMbDi6obBU/KDoyPXOLc/9PDd27rF5GvOk827Mt56/an53XmDezwvwC4QLmhYaLVy2sGMRd9Guxcji5MVNS0yW5C/pWhq0dP8y6rKMZb8st19evPzDirgVjfn6+UvzO38I+qGqQLVAXnB/pefKnT/if5T82LLKadXWVd8LRYXXiuyLSoq+rRauvrbGYU3pmsG1KWtb1rmu27GeuF66vm2Dz4b9xRrFecWdGydvrNvE3lS46cPmWZuvljiX7NxC3aLY0l4aVtqw1XTr+q3fytLK7pX7ldds09u2atun7aLtt3f47qjeqb+zaOfXnyQ/PdgVtKuuwryiZDdxd+7uF3ti9zT/zPm5cq/u3qK9f+6T7mvfH7n/YqVbZeUBvQPrqtAqRVXvwekHbx3yP9RQbVu9q4ZVU3QYDisOvzySdKTtaOjRpmOcY9XHzY5vq2XUFtYhdfPr+urT6tsbEhpaT4ScaGr0bKw9aXdy3ymjU+WntU6vO0M9k39m8Gze2f5zsnOvz6ee72ya1fT4QvyFuxenXmy5FHrpyuXAyxeauc1nr3hdOXXV4+qJa5xr9dddr9fdcLlR+4vLL7Utri11N91uNtzyv9XYOqn1zG2f2+fv+N+5fJd/9/q9Kfda22LaHtyffr/9gehBz8PMh28f5T4aeLz0CeFJ4VP1pyXP9J5V/Gr1a027a/vpDv+OG8+jnj/uFHa++i37t29d+S/oL0q6Dbsrexx7TvUG9t56Oe1l1yvZq4HXBb9r/L7tjeWb43/4/nGjL76v66387eC71e913u/74PyhqT+i/9nHrI8Dnwo/63ze/4Xzpflr3NfugbnfSN9K/7T6s/F76Pcng1mDgzKBXDA8CuAwRVNSAN7tA6AnADCwGYI6bWSmHhZk5D9gmOA/8cjcPSyuANWYGRqNeOcADmNqvhRAzRdgaCyK9gXUyUmpo/Pv8Kw+JAbYv8K0HECi2x6tebQU/iEjc/xf+v6nBWXWv9l/AV0EC6JTIblRAAAAeGVYSWZNTQAqAAAACAAFARIAAwAAAAEAAQAAARoABQAAAAEAAABKARsABQAAAAEAAABSASgAAwAAAAEAAgAAh2kABAAAAAEAAABaAAAAAAAAAJAAAAABAAAAkAAAAAEAAqACAAQAAAABAAAAFKADAAQAAAABAAAAFAAAAAAXNii1AAAACXBIWXMAABYlAAAWJQFJUiTwAAAB82lUWHRYTUw6Y29tLmFkb2JlLnhtcAAAAAAAPHg6eG1wbWV0YSB4bWxuczp4PSJhZG9iZTpuczptZXRhLyIgeDp4bXB0az0iWE1QIENvcmUgNi4wLjAiPgogICA8cmRmOlJERiB4bWxuczpyZGY9Imh0dHA6Ly93d3cudzMub3JnLzE5OTkvMDIvMjItcmRmLXN5bnRheC1ucyMiPgogICAgICA8cmRmOkRlc2NyaXB0aW9uIHJkZjphYm91dD0iIgogICAgICAgICAgICB4bWxuczp0aWZmPSJodHRwOi8vbnMuYWRvYmUuY29tL3RpZmYvMS4wLyI+CiAgICAgICAgIDx0aWZmOllSZXNvbHV0aW9uPjE0NDwvdGlmZjpZUmVzb2x1dGlvbj4KICAgICAgICAgPHRpZmY6T3JpZW50YXRpb24+MTwvdGlmZjpPcmllbnRhdGlvbj4KICAgICAgICAgPHRpZmY6WFJlc29sdXRpb24+MTQ0PC90aWZmOlhSZXNvbHV0aW9uPgogICAgICAgICA8dGlmZjpSZXNvbHV0aW9uVW5pdD4yPC90aWZmOlJlc29sdXRpb25Vbml0PgogICAgICA8L3JkZjpEZXNjcmlwdGlvbj4KICAgPC9yZGY6UkRGPgo8L3g6eG1wbWV0YT4KReh49gAAAjRJREFUOBGFlD2vMUEUx2clvoNCcW8hCqFAo1dKhEQpvsF9KrWEBh/ALbQ0KkInBI3SWyGPCCJEQliXgsTLefaca/bBWjvJzs6cOf/fnDkzOQJIjWm06/XKBEGgD8c6nU5VIWgBtQDPZPWtJE8O63a7LBgMMo/Hw0ql0jPjcY4RvmqXy4XMjUYDUwLtdhtmsxnYbDbI5/O0djqdFFKmsEiGZ9jP9gem0yn0ej2Yz+fg9XpfycimAD7DttstQTDKfr8Po9GIIg6Hw1Cr1RTgB+A72GAwgMPhQLBMJgNSXsFqtUI2myUo18pA6QJogefsPrLBX4QdCVatViklw+EQRFGEj88P2O12pEUGATmsXq+TaLPZ0AXgMRF2vMEqlQoJTSYTpNNpApvNZliv1/+BHDaZTAi2Wq1A3Ig0xmMej7+RcZjdbodUKkWAaDQK+GHjHPnImB88JrZIJAKFQgH2+z2BOczhcMiwRCIBgUAA+NN5BP6mj2DYff35gk6nA61WCzBn2JxO5wPM7/fLz4vD0E+OECfn8xl/0Gw2KbLxeAyLxQIsFgt8p75pDSO7h/HbpUWpewCike9WLpfB7XaDy+WCYrFI/slk8i0MnRRAUt46hPMI4vE4+Hw+ec7t9/44VgWigEeby+UgFArJWjUYOqhWG6x50rpcSfR6PVUfNOgEVRlTX0HhrZBKz4MZjUYWi8VoA+lc9H/VaRZYjBKrtXR8tlwumcFgeMWRbZpA9ORQWfVm8A/FsrLaxebd5wAAAABJRU5ErkJggg==";
1041 | 
```
Page 4/5FirstPrevNextLast