#
tokens: 46103/50000 11/111 files (page 3/5)
lines: on (toggle) GitHub
raw markdown copy reset
This is page 3 of 5. Use http://codebase.md/cyanheads/pubmed-mcp-server?lines=true&page={x} to view the full context.

# Directory Structure

```
├── .clinerules
│   └── clinerules.md
├── .dockerignore
├── .github
│   ├── FUNDING.yml
│   └── workflows
│       └── publish.yml
├── .gitignore
├── .ncurc.json
├── CHANGELOG.md
├── Dockerfile
├── docs
│   ├── project-spec.md
│   ├── publishing-mcp-server-registry.md
│   └── tree.md
├── eslint.config.js
├── examples
│   ├── generate_pubmed_chart
│   │   ├── bar_chart.png
│   │   ├── doughnut_chart.png
│   │   ├── line_chart.png
│   │   ├── pie_chart.png
│   │   ├── polar_chart.png
│   │   ├── radar_chart.png
│   │   └── scatter_plot.png
│   ├── pubmed_article_connections_1.md
│   ├── pubmed_article_connections_2.md
│   ├── pubmed_fetch_contents_example.md
│   ├── pubmed_research_agent_example.md
│   └── pubmed_search_articles_example.md
├── LICENSE
├── mcp.json
├── package-lock.json
├── package.json
├── README.md
├── repomix.config.json
├── scripts
│   ├── clean.ts
│   ├── fetch-openapi-spec.ts
│   ├── make-executable.ts
│   ├── tree.ts
│   └── validate-mcp-publish-schema.ts
├── server.json
├── smithery.yaml
├── src
│   ├── config
│   │   └── index.ts
│   ├── index.ts
│   ├── mcp-server
│   │   ├── server.ts
│   │   ├── tools
│   │   │   ├── pubmedArticleConnections
│   │   │   │   ├── index.ts
│   │   │   │   ├── logic
│   │   │   │   │   ├── citationFormatter.ts
│   │   │   │   │   ├── elinkHandler.ts
│   │   │   │   │   ├── index.ts
│   │   │   │   │   └── types.ts
│   │   │   │   └── registration.ts
│   │   │   ├── pubmedFetchContents
│   │   │   │   ├── index.ts
│   │   │   │   ├── logic.ts
│   │   │   │   └── registration.ts
│   │   │   ├── pubmedGenerateChart
│   │   │   │   ├── index.ts
│   │   │   │   ├── logic.ts
│   │   │   │   └── registration.ts
│   │   │   ├── pubmedResearchAgent
│   │   │   │   ├── index.ts
│   │   │   │   ├── logic
│   │   │   │   │   ├── index.ts
│   │   │   │   │   ├── inputSchema.ts
│   │   │   │   │   ├── outputTypes.ts
│   │   │   │   │   └── planOrchestrator.ts
│   │   │   │   ├── logic.ts
│   │   │   │   └── registration.ts
│   │   │   └── pubmedSearchArticles
│   │   │       ├── index.ts
│   │   │       ├── logic.ts
│   │   │       └── registration.ts
│   │   └── transports
│   │       ├── auth
│   │       │   ├── authFactory.ts
│   │       │   ├── authMiddleware.ts
│   │       │   ├── index.ts
│   │       │   ├── lib
│   │       │   │   ├── authContext.ts
│   │       │   │   ├── authTypes.ts
│   │       │   │   └── authUtils.ts
│   │       │   └── strategies
│   │       │       ├── authStrategy.ts
│   │       │       ├── jwtStrategy.ts
│   │       │       └── oauthStrategy.ts
│   │       ├── core
│   │       │   ├── baseTransportManager.ts
│   │       │   ├── headerUtils.ts
│   │       │   ├── honoNodeBridge.ts
│   │       │   ├── statefulTransportManager.ts
│   │       │   ├── statelessTransportManager.ts
│   │       │   └── transportTypes.ts
│   │       ├── http
│   │       │   ├── httpErrorHandler.ts
│   │       │   ├── httpTransport.ts
│   │       │   ├── httpTypes.ts
│   │       │   ├── index.ts
│   │       │   └── mcpTransportMiddleware.ts
│   │       └── stdio
│   │           ├── index.ts
│   │           └── stdioTransport.ts
│   ├── services
│   │   └── NCBI
│   │       ├── core
│   │       │   ├── ncbiConstants.ts
│   │       │   ├── ncbiCoreApiClient.ts
│   │       │   ├── ncbiRequestQueueManager.ts
│   │       │   ├── ncbiResponseHandler.ts
│   │       │   └── ncbiService.ts
│   │       └── parsing
│   │           ├── eSummaryResultParser.ts
│   │           ├── index.ts
│   │           ├── pubmedArticleStructureParser.ts
│   │           └── xmlGenericHelpers.ts
│   ├── types-global
│   │   ├── declarations.d.ts
│   │   ├── errors.ts
│   │   └── pubmedXml.ts
│   └── utils
│       ├── index.ts
│       ├── internal
│       │   ├── errorHandler.ts
│       │   ├── index.ts
│       │   ├── logger.ts
│       │   ├── performance.ts
│       │   └── requestContext.ts
│       ├── metrics
│       │   ├── index.ts
│       │   └── tokenCounter.ts
│       ├── network
│       │   ├── fetchWithTimeout.ts
│       │   └── index.ts
│       ├── parsing
│       │   ├── dateParser.ts
│       │   ├── index.ts
│       │   └── jsonParser.ts
│       ├── scheduling
│       │   ├── index.ts
│       │   └── scheduler.ts
│       ├── security
│       │   ├── idGenerator.ts
│       │   ├── index.ts
│       │   ├── rateLimiter.ts
│       │   └── sanitization.ts
│       └── telemetry
│           ├── instrumentation.ts
│           └── semconv.ts
├── tsconfig.json
├── tsconfig.typedoc.json
├── tsdoc.json
└── typedoc.json
```

# Files

--------------------------------------------------------------------------------
/src/mcp-server/tools/pubmedArticleConnections/logic/elinkHandler.ts:
--------------------------------------------------------------------------------

```typescript
  1 | /**
  2 |  * @fileoverview Handles ELink requests and enriches results with ESummary data
  3 |  * for the pubmedArticleConnections tool.
  4 |  * @module src/mcp-server/tools/pubmedArticleConnections/logic/elinkHandler
  5 |  */
  6 | 
  7 | import { getNcbiService } from "../../../../services/NCBI/core/ncbiService.js";
  8 | import type {
  9 |   ESummaryResult,
 10 |   ParsedBriefSummary,
 11 | } from "../../../../types-global/pubmedXml.js";
 12 | import { logger, RequestContext } from "../../../../utils/index.js";
 13 | import { extractBriefSummaries } from "../../../../services/NCBI/parsing/index.js";
 14 | import { ensureArray } from "../../../../services/NCBI/parsing/xmlGenericHelpers.js"; // Added import
 15 | import type { PubMedArticleConnectionsInput } from "./index.js";
 16 | import type { ToolOutputData } from "./types.js";
 17 | 
 18 | // Local interface for the structure of an ELink 'Link' item
 19 | interface XmlELinkItem {
 20 |   Id: string | number | { "#text"?: string | number }; // Allow number for Id
 21 |   Score?: string | number | { "#text"?: string | number }; // Allow number for Score
 22 | }
 23 | 
 24 | interface ELinkResult {
 25 |   eLinkResult?: {
 26 |     LinkSet?: {
 27 |       LinkSetDb?: {
 28 |         LinkName?: string;
 29 |         Link?: XmlELinkItem[];
 30 |       }[];
 31 |       LinkSetDbHistory?: {
 32 |         QueryKey?: string;
 33 |       }[];
 34 |       WebEnv?: string;
 35 |     };
 36 |     ERROR?: string;
 37 |   }[];
 38 | }
 39 | 
 40 | export async function handleELinkRelationships(
 41 |   input: PubMedArticleConnectionsInput,
 42 |   outputData: ToolOutputData,
 43 |   context: RequestContext,
 44 | ): Promise<void> {
 45 |   const eLinkParams: Record<string, string> = {
 46 |     dbfrom: "pubmed",
 47 |     db: "pubmed",
 48 |     id: input.sourcePmid,
 49 |     retmode: "xml",
 50 |     // cmd and linkname will be set below based on relationshipType
 51 |   };
 52 | 
 53 |   switch (input.relationshipType) {
 54 |     case "pubmed_citedin":
 55 |       eLinkParams.cmd = "neighbor_history";
 56 |       eLinkParams.linkname = "pubmed_pubmed_citedin";
 57 |       break;
 58 |     case "pubmed_references":
 59 |       eLinkParams.cmd = "neighbor_history";
 60 |       eLinkParams.linkname = "pubmed_pubmed_refs";
 61 |       break;
 62 |     case "pubmed_similar_articles":
 63 |     default: // Default to similar articles
 64 |       eLinkParams.cmd = "neighbor_score";
 65 |       // No linkname is explicitly needed for neighbor_score when dbfrom and db are pubmed
 66 |       break;
 67 |   }
 68 | 
 69 |   const tempUrl = new URL(
 70 |     "https://dummy.ncbi.nlm.nih.gov/entrez/eutils/elink.fcgi",
 71 |   );
 72 |   Object.keys(eLinkParams).forEach((key) =>
 73 |     tempUrl.searchParams.append(key, String(eLinkParams[key])),
 74 |   );
 75 |   outputData.eUtilityUrl = `https://eutils.ncbi.nlm.nih.gov/entrez/eutils/elink.fcgi?${tempUrl.search.substring(1)}`;
 76 | 
 77 |   const ncbiService = getNcbiService();
 78 |   const eLinkResult: ELinkResult = (await ncbiService.eLink(
 79 |     eLinkParams,
 80 |     context,
 81 |   )) as ELinkResult;
 82 | 
 83 |   // Log the full eLinkResult for debugging
 84 |   logger.debug("Raw eLinkResult from ncbiService:", {
 85 |     ...context,
 86 |     eLinkResultString: JSON.stringify(eLinkResult, null, 2),
 87 |   });
 88 | 
 89 |   // Use ensureArray for robust handling of potentially single or array eLinkResult
 90 |   const eLinkResultsArray = ensureArray(eLinkResult?.eLinkResult);
 91 |   const firstELinkResult = eLinkResultsArray[0];
 92 | 
 93 |   // Use ensureArray for LinkSet as well
 94 |   const linkSetsArray = ensureArray(firstELinkResult?.LinkSet);
 95 |   const linkSet = linkSetsArray[0];
 96 | 
 97 |   let foundPmids: { pmid: string; score?: number }[] = [];
 98 | 
 99 |   if (firstELinkResult?.ERROR) {
100 |     const errorMsg =
101 |       typeof firstELinkResult.ERROR === "string"
102 |         ? firstELinkResult.ERROR
103 |         : JSON.stringify(firstELinkResult.ERROR);
104 |     logger.warning(`ELink returned an error: ${errorMsg}`, context);
105 |     outputData.message = `ELink error: ${errorMsg}`;
106 |     outputData.retrievedCount = 0;
107 |     return;
108 |   }
109 | 
110 |   if (linkSet?.LinkSetDbHistory) {
111 |     // Handle cmd=neighbor_history response (citedin, references)
112 |     const history = Array.isArray(linkSet.LinkSetDbHistory)
113 |       ? linkSet.LinkSetDbHistory[0]
114 |       : linkSet.LinkSetDbHistory;
115 | 
116 |     if (history?.QueryKey && firstELinkResult?.LinkSet?.WebEnv) {
117 |       const eSearchParams = {
118 |         db: "pubmed",
119 |         query_key: history.QueryKey,
120 |         WebEnv: firstELinkResult.LinkSet.WebEnv,
121 |         retmode: "xml",
122 |         retmax: input.maxRelatedResults * 2, // Fetch a bit more to allow filtering sourcePmid
123 |       };
124 |       const eSearchResult: { eSearchResult?: { IdList?: { Id?: unknown } } } =
125 |         (await ncbiService.eSearch(eSearchParams, context)) as {
126 |           eSearchResult?: { IdList?: { Id?: unknown } };
127 |         };
128 |       if (eSearchResult?.eSearchResult?.IdList?.Id) {
129 |         const ids = ensureArray(eSearchResult.eSearchResult.IdList.Id);
130 |         foundPmids = ids
131 |           .map((idNode: string | number | { "#text"?: string | number }) => {
132 |             // Allow number for idNode
133 |             let pmidVal: string | number | undefined;
134 |             if (typeof idNode === "object" && idNode !== null) {
135 |               pmidVal = idNode["#text"];
136 |             } else {
137 |               pmidVal = idNode;
138 |             }
139 |             return {
140 |               pmid: pmidVal !== undefined ? String(pmidVal) : "",
141 |               // No scores from this ESearch path
142 |             };
143 |           })
144 |           .filter(
145 |             (item: { pmid: string }) =>
146 |               item.pmid && item.pmid !== input.sourcePmid && item.pmid !== "0",
147 |           );
148 |       }
149 |     }
150 |   } else if (linkSet?.LinkSetDb) {
151 |     // Handle cmd=neighbor_score response (similar_articles)
152 |     const linkSetDbArray = Array.isArray(linkSet.LinkSetDb)
153 |       ? linkSet.LinkSetDb
154 |       : [linkSet.LinkSetDb];
155 | 
156 |     const targetLinkSetDbEntry = linkSetDbArray.find(
157 |       (db) => db.LinkName === "pubmed_pubmed",
158 |     );
159 | 
160 |     if (targetLinkSetDbEntry?.Link) {
161 |       const links = ensureArray(targetLinkSetDbEntry.Link); // Use ensureArray here too
162 |       foundPmids = links
163 |         .map((link: XmlELinkItem) => {
164 |           let pmidValue: string | number | undefined;
165 |           if (typeof link.Id === "object" && link.Id !== null) {
166 |             pmidValue = link.Id["#text"];
167 |           } else if (link.Id !== undefined) {
168 |             pmidValue = link.Id;
169 |           }
170 | 
171 |           let scoreValue: string | number | undefined;
172 |           if (typeof link.Score === "object" && link.Score !== null) {
173 |             scoreValue = link.Score["#text"];
174 |           } else if (link.Score !== undefined) {
175 |             scoreValue = link.Score;
176 |           }
177 | 
178 |           const pmidString = pmidValue !== undefined ? String(pmidValue) : "";
179 | 
180 |           return {
181 |             pmid: pmidString,
182 |             score: scoreValue !== undefined ? Number(scoreValue) : undefined,
183 |           };
184 |         })
185 |         .filter(
186 |           (item: { pmid: string; score?: number }) =>
187 |             item.pmid && item.pmid !== input.sourcePmid && item.pmid !== "0",
188 |         );
189 |     }
190 |   }
191 | 
192 |   if (foundPmids.length === 0) {
193 |     logger.warning(
194 |       "No related PMIDs found after ELink/ESearch processing.",
195 |       context,
196 |     );
197 |     outputData.message = "No related articles found or ELink error."; // Generic message if no PMIDs
198 |     outputData.retrievedCount = 0;
199 |     return;
200 |   }
201 | 
202 |   logger.debug(
203 |     "Found PMIDs after initial parsing and filtering (before sort):",
204 |     {
205 |       ...context,
206 |       foundPmidsCount: foundPmids.length,
207 |       firstFewFoundPmids: foundPmids.slice(0, 3),
208 |     },
209 |   );
210 | 
211 |   if (foundPmids.every((p) => p.score !== undefined)) {
212 |     foundPmids.sort((a, b) => (b.score ?? 0) - (a.score ?? 0));
213 |   }
214 | 
215 |   logger.debug("Found PMIDs after sorting:", {
216 |     ...context,
217 |     sortedFoundPmidsCount: foundPmids.length,
218 |     firstFewSortedFoundPmids: foundPmids.slice(0, 3),
219 |   });
220 | 
221 |   const pmidsToEnrich = foundPmids
222 |     .slice(0, input.maxRelatedResults)
223 |     .map((p) => p.pmid);
224 | 
225 |   logger.debug("PMIDs to enrich with ESummary:", {
226 |     ...context,
227 |     pmidsToEnrichCount: pmidsToEnrich.length,
228 |     pmidsToEnrichList: pmidsToEnrich,
229 |   });
230 | 
231 |   if (pmidsToEnrich.length > 0) {
232 |     try {
233 |       const summaryParams = {
234 |         db: "pubmed",
235 |         id: pmidsToEnrich.join(","),
236 |         version: "2.0",
237 |         retmode: "xml",
238 |       };
239 |       const summaryResultContainer: {
240 |         eSummaryResult?: ESummaryResult;
241 |         result?: ESummaryResult;
242 |       } = (await ncbiService.eSummary(summaryParams, context)) as {
243 |         eSummaryResult?: ESummaryResult;
244 |         result?: ESummaryResult;
245 |       };
246 |       const summaryResult: ESummaryResult | undefined =
247 |         summaryResultContainer?.eSummaryResult ||
248 |         summaryResultContainer?.result ||
249 |         summaryResultContainer;
250 | 
251 |       if (summaryResult) {
252 |         const briefSummaries: ParsedBriefSummary[] =
253 |           await extractBriefSummaries(summaryResult, context);
254 |         const pmidDetailsMap = new Map<string, ParsedBriefSummary>();
255 |         briefSummaries.forEach((bs) => pmidDetailsMap.set(bs.pmid, bs));
256 | 
257 |         outputData.relatedArticles = foundPmids
258 |           .filter((p) => pmidsToEnrich.includes(p.pmid))
259 |           .map((p) => {
260 |             const details = pmidDetailsMap.get(p.pmid);
261 |             return {
262 |               pmid: p.pmid,
263 |               title: details?.title,
264 |               authors: details?.authors,
265 |               score: p.score,
266 |               linkUrl: `https://pubmed.ncbi.nlm.nih.gov/${p.pmid}/`,
267 |             };
268 |           })
269 |           .slice(0, input.maxRelatedResults);
270 |       } else {
271 |         logger.warning(
272 |           "ESummary did not return usable data for enrichment.",
273 |           context,
274 |         );
275 |         outputData.relatedArticles = foundPmids
276 |           .slice(0, input.maxRelatedResults)
277 |           .map((p) => ({
278 |             pmid: p.pmid,
279 |             score: p.score,
280 |             linkUrl: `https://pubmed.ncbi.nlm.nih.gov/${p.pmid}/`,
281 |           }));
282 |       }
283 |     } catch (summaryError: unknown) {
284 |       logger.error(
285 |         "Failed to enrich related articles with summaries",
286 |         summaryError instanceof Error
287 |           ? summaryError
288 |           : new Error(String(summaryError)),
289 |         context,
290 |       );
291 |       outputData.relatedArticles = foundPmids
292 |         .slice(0, input.maxRelatedResults)
293 |         .map((p) => ({
294 |           pmid: p.pmid,
295 |           score: p.score,
296 |           linkUrl: `https://pubmed.ncbi.nlm.nih.gov/${p.pmid}/`,
297 |         }));
298 |     }
299 |   }
300 |   outputData.retrievedCount = outputData.relatedArticles.length;
301 | }
302 | 
```

--------------------------------------------------------------------------------
/scripts/tree.ts:
--------------------------------------------------------------------------------

```typescript
  1 | #!/usr/bin/env node
  2 | 
  3 | /**
  4 |  * @fileoverview Generates a visual tree representation of the project's directory structure.
  5 |  * @module scripts/tree
  6 |  *   Respects .gitignore patterns and common exclusions (e.g., node_modules).
  7 |  *   Saves the tree to a markdown file (default: docs/tree.md).
  8 |  *   Supports custom output path and depth limitation.
  9 |  *   Ensures all file operations are within the project root for security.
 10 |  *
 11 |  * @example
 12 |  * // Generate tree with default settings:
 13 |  * // npm run tree
 14 |  *
 15 |  * @example
 16 |  * // Specify custom output path and depth:
 17 |  * // ts-node --esm scripts/tree.ts ./documentation/structure.md --depth=3
 18 |  */
 19 | 
 20 | import fs from "fs/promises";
 21 | import path from "path";
 22 | import type { Dirent } from "fs";
 23 | 
 24 | const projectRoot = process.cwd();
 25 | let outputPathArg = "docs/tree.md"; // Default output path
 26 | let maxDepthArg = Infinity;
 27 | 
 28 | /**
 29 |  * Represents a processed .gitignore pattern.
 30 |  * @property pattern - The original glob pattern (without negation prefix).
 31 |  * @property negated - True if the original pattern was negated (e.g., !pattern).
 32 |  * @property regex - A string representation of the regex derived from the glob pattern.
 33 |  */
 34 | interface GitignorePattern {
 35 |   pattern: string;
 36 |   negated: boolean;
 37 |   regex: string;
 38 | }
 39 | 
 40 | const args = process.argv.slice(2);
 41 | if (args.includes("--help")) {
 42 |   console.log(`
 43 | Generate Tree - Project directory structure visualization tool
 44 | 
 45 | Usage:
 46 |   ts-node --esm scripts/tree.ts [output-path] [--depth=<number>] [--help]
 47 | 
 48 | Options:
 49 |   output-path      Custom file path for the tree output (relative to project root, default: docs/tree.md)
 50 |   --depth=<number> Maximum directory depth to display (default: unlimited)
 51 |   --help           Show this help message
 52 | `);
 53 |   process.exit(0);
 54 | }
 55 | 
 56 | args.forEach((arg) => {
 57 |   if (arg.startsWith("--depth=")) {
 58 |     const depthValue = parseInt(arg.split("=")[1], 10);
 59 |     if (!isNaN(depthValue) && depthValue >= 0) {
 60 |       maxDepthArg = depthValue;
 61 |     } else {
 62 |       console.warn(`Invalid depth value: "${arg}". Using unlimited depth.`);
 63 |     }
 64 |   } else if (!arg.startsWith("--")) {
 65 |     outputPathArg = arg;
 66 |   }
 67 | });
 68 | 
 69 | const DEFAULT_IGNORE_PATTERNS: string[] = [
 70 |   ".git",
 71 |   "node_modules",
 72 |   ".DS_Store",
 73 |   "dist",
 74 |   "build",
 75 |   "logs", // Added logs as a common default ignore
 76 | ];
 77 | 
 78 | /**
 79 |  * Loads and parses patterns from the .gitignore file at the project root.
 80 |  * @returns A promise resolving to an array of GitignorePattern objects.
 81 |  */
 82 | async function loadGitignorePatterns(): Promise<GitignorePattern[]> {
 83 |   const gitignorePath = path.join(projectRoot, ".gitignore");
 84 |   try {
 85 |     // Security: Ensure we read only from within the project root
 86 |     if (!path.resolve(gitignorePath).startsWith(projectRoot + path.sep)) {
 87 |       console.warn(
 88 |         "Warning: Attempted to read .gitignore outside project root. Using default ignore patterns only.",
 89 |       );
 90 |       return [];
 91 |     }
 92 |     const gitignoreContent = await fs.readFile(gitignorePath, "utf-8");
 93 |     return gitignoreContent
 94 |       .split("\n")
 95 |       .map((line) => line.trim())
 96 |       .filter((line) => line && !line.startsWith("#"))
 97 |       .map((patternLine) => {
 98 |         const negated = patternLine.startsWith("!");
 99 |         const pattern = negated ? patternLine.slice(1) : patternLine;
100 |         // Simplified glob to regex conversion. For full gitignore spec, a library might be better.
101 |         // This handles basic wildcards '*' and directory indicators '/'.
102 |         const regexString = pattern
103 |           .replace(/[.+?^${}()|[\]\\]/g, "\\$&") // Escape standard regex special chars
104 |           .replace(/\*\*/g, ".*") // Handle '**' as 'match anything including slashes'
105 |           .replace(/\*/g, "[^/]*") // Handle '*' as 'match anything except slashes'
106 |           .replace(/\/$/, "(/.*)?"); // Handle trailing slash for directories
107 |         return {
108 |           pattern: pattern,
109 |           negated: negated,
110 |           regex: regexString,
111 |         };
112 |       });
113 |   } catch (error: unknown) {
114 |     const err = error as NodeJS.ErrnoException | undefined;
115 |     if (err?.code === "ENOENT") {
116 |       console.warn(
117 |         "Info: No .gitignore file found at project root. Using default ignore patterns only.",
118 |       );
119 |     } else {
120 |       console.error(
121 |         `Error reading .gitignore: ${err?.message ?? String(error)}`,
122 |       );
123 |     }
124 |     return [];
125 |   }
126 | }
127 | 
128 | /**
129 |  * Checks if a given path should be ignored based on default and .gitignore patterns.
130 |  * @param entryPath - The absolute path to the file or directory entry.
131 |  * @param ignorePatterns - An array of GitignorePattern objects.
132 |  * @returns True if the path should be ignored, false otherwise.
133 |  */
134 | function isIgnored(
135 |   entryPath: string,
136 |   ignorePatterns: GitignorePattern[],
137 | ): boolean {
138 |   const relativePath = path.relative(projectRoot, entryPath);
139 |   const baseName = path.basename(relativePath); // Get the file/directory name
140 | 
141 |   // Check default patterns:
142 |   // - If the baseName itself is in DEFAULT_IGNORE_PATTERNS (e.g., ".DS_Store")
143 |   // - Or if the relativePath starts with a default pattern that is a directory (e.g., "node_modules/")
144 |   //   followed by a path separator, or if the relativePath exactly matches the pattern.
145 |   if (
146 |     DEFAULT_IGNORE_PATTERNS.some((p) => {
147 |       if (p === baseName) return true; // Matches ".DS_Store" as a filename anywhere
148 |       // For directory-like patterns in DEFAULT_IGNORE_PATTERNS (e.g. "node_modules", ".git")
149 |       if (relativePath.startsWith(p + path.sep) || relativePath === p)
150 |         return true;
151 |       return false;
152 |     })
153 |   ) {
154 |     return true;
155 |   }
156 | 
157 |   let ignoredByGitignore = false;
158 |   for (const { negated, regex } of ignorePatterns) {
159 |     // Test regex against the start of the relative path for directories, or full match for files.
160 |     const regexPattern = new RegExp(`^${regex}(/|$)`);
161 |     if (regexPattern.test(relativePath)) {
162 |       ignoredByGitignore = !negated; // If negated, a match means it's NOT ignored by this rule.
163 |     }
164 |   }
165 |   return ignoredByGitignore;
166 | }
167 | 
168 | /**
169 |  * Recursively generates a string representation of the directory tree.
170 |  * @param dir - The absolute path of the directory to traverse.
171 |  * @param ignorePatterns - Patterns to ignore.
172 |  * @param prefix - String prefix for formatting the tree lines.
173 |  * @param currentDepth - Current depth of traversal.
174 |  * @returns A promise resolving to the tree string.
175 |  */
176 | async function generateTree(
177 |   dir: string,
178 |   ignorePatterns: GitignorePattern[],
179 |   prefix = "",
180 |   currentDepth = 0,
181 | ): Promise<string> {
182 |   const resolvedDir = path.resolve(dir);
183 |   if (
184 |     !resolvedDir.startsWith(projectRoot + path.sep) &&
185 |     resolvedDir !== projectRoot
186 |   ) {
187 |     console.warn(
188 |       `Security: Skipping directory outside project root: ${resolvedDir}`,
189 |     );
190 |     return "";
191 |   }
192 | 
193 |   if (currentDepth > maxDepthArg) {
194 |     return "";
195 |   }
196 | 
197 |   let entries: Dirent[];
198 |   try {
199 |     entries = (await fs.readdir(resolvedDir, {
200 |       withFileTypes: true,
201 |     })) as unknown as Dirent[];
202 |   } catch (error: unknown) {
203 |     const err = error as NodeJS.ErrnoException | undefined;
204 |     console.error(
205 |       `Error reading directory ${resolvedDir}: ${err?.message ?? String(error)}`,
206 |     );
207 |     return "";
208 |   }
209 | 
210 |   let output = "";
211 |   const filteredEntries = entries
212 |     .filter(
213 |       (entry) => !isIgnored(path.join(resolvedDir, entry.name), ignorePatterns),
214 |     )
215 |     .sort((a, b) => {
216 |       if (a.isDirectory() && !b.isDirectory()) return -1;
217 |       if (!a.isDirectory() && b.isDirectory()) return 1;
218 |       return a.name.localeCompare(b.name);
219 |     });
220 | 
221 |   for (let i = 0; i < filteredEntries.length; i++) {
222 |     const entry = filteredEntries[i];
223 |     const isLastEntry = i === filteredEntries.length - 1;
224 |     const connector = isLastEntry ? "└── " : "├── ";
225 |     const newPrefix = prefix + (isLastEntry ? "    " : "│   ");
226 | 
227 |     output += prefix + connector + entry.name + "\n";
228 | 
229 |     if (entry.isDirectory()) {
230 |       output += await generateTree(
231 |         path.join(resolvedDir, entry.name),
232 |         ignorePatterns,
233 |         newPrefix,
234 |         currentDepth + 1,
235 |       );
236 |     }
237 |   }
238 |   return output;
239 | }
240 | 
241 | /**
242 |  * Main function to orchestrate loading ignore patterns, generating the tree,
243 |  * and writing it to the specified output file.
244 |  */
245 | const writeTreeToFile = async (): Promise<void> => {
246 |   try {
247 |     const projectName = path.basename(projectRoot);
248 |     const ignorePatterns = await loadGitignorePatterns();
249 |     const resolvedOutputFile = path.resolve(projectRoot, outputPathArg);
250 | 
251 |     // Security Validation for Output Path
252 |     if (!resolvedOutputFile.startsWith(projectRoot + path.sep)) {
253 |       console.error(
254 |         `Error: Output path "${outputPathArg}" resolves outside the project directory: ${resolvedOutputFile}. Aborting.`,
255 |       );
256 |       process.exit(1);
257 |     }
258 |     const resolvedOutputDir = path.dirname(resolvedOutputFile);
259 |     if (
260 |       !resolvedOutputDir.startsWith(projectRoot + path.sep) &&
261 |       resolvedOutputDir !== projectRoot
262 |     ) {
263 |       console.error(
264 |         `Error: Output directory "${resolvedOutputDir}" is outside the project directory. Aborting.`,
265 |       );
266 |       process.exit(1);
267 |     }
268 | 
269 |     console.log(`Generating directory tree for project: ${projectName}`);
270 |     console.log(`Output will be saved to: ${resolvedOutputFile}`);
271 |     if (maxDepthArg !== Infinity) {
272 |       console.log(`Maximum depth set to: ${maxDepthArg}`);
273 |     }
274 | 
275 |     const treeContent = await generateTree(projectRoot, ignorePatterns, "", 0);
276 | 
277 |     try {
278 |       await fs.access(resolvedOutputDir);
279 |     } catch {
280 |       console.log(`Output directory not found. Creating: ${resolvedOutputDir}`);
281 |       await fs.mkdir(resolvedOutputDir, { recursive: true });
282 |     }
283 | 
284 |     const timestamp = new Date()
285 |       .toISOString()
286 |       .replace(/T/, " ")
287 |       .replace(/\..+/, "");
288 |     const fileHeader = `# ${projectName} - Directory Structure\n\nGenerated on: ${timestamp}\n`;
289 |     const depthInfo =
290 |       maxDepthArg !== Infinity
291 |         ? `\n_Depth limited to ${maxDepthArg} levels_\n\n`
292 |         : "\n";
293 |     const treeBlock = `\`\`\`\n${projectName}\n${treeContent}\`\`\`\n`;
294 |     const fileFooter = `\n_Note: This tree excludes files and directories matched by .gitignore and default patterns._\n`;
295 |     const finalContent = fileHeader + depthInfo + treeBlock + fileFooter;
296 | 
297 |     await fs.writeFile(resolvedOutputFile, finalContent);
298 |     console.log(
299 |       `Successfully generated tree structure in: ${resolvedOutputFile}`,
300 |     );
301 |   } catch (error) {
302 |     console.error(
303 |       `Error generating tree: ${error instanceof Error ? error.message : String(error)}`,
304 |     );
305 |     process.exit(1);
306 |   }
307 | };
308 | 
309 | writeTreeToFile();
310 | 
```

--------------------------------------------------------------------------------
/src/utils/security/idGenerator.ts:
--------------------------------------------------------------------------------

```typescript
  1 | /**
  2 |  * @fileoverview Provides a utility class `IdGenerator` for creating customizable, prefixed unique identifiers,
  3 |  * and a standalone `generateUUID` function for generating standard UUIDs.
  4 |  * The `IdGenerator` supports entity-specific prefixes, custom character sets, and lengths.
  5 |  *
  6 |  * Note: Logging has been removed from this module to prevent circular dependencies
  7 |  * with the `requestContextService`, which itself uses `generateUUID` from this module.
  8 |  * This was causing `ReferenceError: Cannot access 'generateUUID' before initialization`
  9 |  * during application startup.
 10 |  * @module src/utils/security/idGenerator
 11 |  */
 12 | import { randomUUID as cryptoRandomUUID, randomBytes } from "crypto";
 13 | import { BaseErrorCode, McpError } from "../../types-global/errors.js";
 14 | // Removed: import { logger, requestContextService } from "../index.js";
 15 | 
 16 | /**
 17 |  * Defines the structure for configuring entity prefixes.
 18 |  * Keys are entity type names (e.g., "project", "task"), and values are their corresponding ID prefixes (e.g., "PROJ", "TASK").
 19 |  */
 20 | export interface EntityPrefixConfig {
 21 |   [key: string]: string;
 22 | }
 23 | 
 24 | /**
 25 |  * Defines options for customizing ID generation.
 26 |  */
 27 | export interface IdGenerationOptions {
 28 |   length?: number;
 29 |   separator?: string;
 30 |   charset?: string;
 31 | }
 32 | 
 33 | /**
 34 |  * A generic ID Generator class for creating and managing unique, prefixed identifiers.
 35 |  * Allows defining custom prefixes, generating random strings, and validating/normalizing IDs.
 36 |  */
 37 | export class IdGenerator {
 38 |   /**
 39 |    * Default character set for the random part of the ID.
 40 |    * @private
 41 |    */
 42 |   private static DEFAULT_CHARSET = "ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789";
 43 |   /**
 44 |    * Default separator character between prefix and random part.
 45 |    * @private
 46 |    */
 47 |   private static DEFAULT_SEPARATOR = "_";
 48 |   /**
 49 |    * Default length for the random part of the ID.
 50 |    * @private
 51 |    */
 52 |   private static DEFAULT_LENGTH = 6;
 53 | 
 54 |   /**
 55 |    * Stores the mapping of entity types to their prefixes.
 56 |    * @private
 57 |    */
 58 |   private entityPrefixes: EntityPrefixConfig = {};
 59 |   /**
 60 |    * Stores a reverse mapping from prefixes (case-insensitive) to entity types.
 61 |    * @private
 62 |    */
 63 |   private prefixToEntityType: Record<string, string> = {};
 64 | 
 65 |   /**
 66 |    * Constructs an `IdGenerator` instance.
 67 |    * @param entityPrefixes - An initial map of entity types to their prefixes.
 68 |    */
 69 |   constructor(entityPrefixes: EntityPrefixConfig = {}) {
 70 |     // Logging removed to prevent circular dependency with requestContextService.
 71 |     this.setEntityPrefixes(entityPrefixes);
 72 |   }
 73 | 
 74 |   /**
 75 |    * Sets or updates the entity prefix configuration and rebuilds the internal reverse lookup map.
 76 |    * @param entityPrefixes - A map where keys are entity type names and values are their desired ID prefixes.
 77 |    */
 78 |   public setEntityPrefixes(entityPrefixes: EntityPrefixConfig): void {
 79 |     // Logging removed.
 80 |     this.entityPrefixes = { ...entityPrefixes };
 81 | 
 82 |     this.prefixToEntityType = Object.entries(this.entityPrefixes).reduce(
 83 |       (acc, [type, prefix]) => {
 84 |         acc[prefix.toLowerCase()] = type; // Store lowercase for case-insensitive lookup
 85 |         return acc;
 86 |       },
 87 |       {} as Record<string, string>,
 88 |     );
 89 |   }
 90 | 
 91 |   /**
 92 |    * Retrieves a copy of the current entity prefix configuration.
 93 |    * @returns The current entity prefix configuration.
 94 |    */
 95 |   public getEntityPrefixes(): EntityPrefixConfig {
 96 |     return { ...this.entityPrefixes };
 97 |   }
 98 | 
 99 |   /**
100 |    * Generates a cryptographically secure random string.
101 |    * @param length - The desired length of the random string. Defaults to `IdGenerator.DEFAULT_LENGTH`.
102 |    * @param charset - The character set to use. Defaults to `IdGenerator.DEFAULT_CHARSET`.
103 |    * @returns The generated random string.
104 |    */
105 |   public generateRandomString(
106 |     length: number = IdGenerator.DEFAULT_LENGTH,
107 |     charset: string = IdGenerator.DEFAULT_CHARSET,
108 |   ): string {
109 |     let result = "";
110 |     // Determine the largest multiple of charset.length that is less than or equal to 256
111 |     // This is the threshold for rejection sampling to avoid bias.
112 |     const maxValidByteValue = Math.floor(256 / charset.length) * charset.length;
113 | 
114 |     while (result.length < length) {
115 |       const byteBuffer = randomBytes(1); // Get one random byte
116 |       const byte = byteBuffer[0];
117 | 
118 |       // If the byte is within the valid range (i.e., it won't introduce bias),
119 |       // use it to select a character from the charset. Otherwise, discard and try again.
120 |       if (byte !== undefined && byte < maxValidByteValue) {
121 |         const charIndex = byte % charset.length;
122 |         const char = charset[charIndex];
123 |         if (char) {
124 |           result += char;
125 |         }
126 |       }
127 |     }
128 |     return result;
129 |   }
130 | 
131 |   /**
132 |    * Generates a unique ID, optionally prepended with a prefix.
133 |    * @param prefix - An optional prefix for the ID.
134 |    * @param options - Optional parameters for ID generation (length, separator, charset).
135 |    * @returns A unique identifier string.
136 |    */
137 |   public generate(prefix?: string, options: IdGenerationOptions = {}): string {
138 |     // Logging removed.
139 |     const {
140 |       length = IdGenerator.DEFAULT_LENGTH,
141 |       separator = IdGenerator.DEFAULT_SEPARATOR,
142 |       charset = IdGenerator.DEFAULT_CHARSET,
143 |     } = options;
144 | 
145 |     const randomPart = this.generateRandomString(length, charset);
146 |     const generatedId = prefix
147 |       ? `${prefix}${separator}${randomPart}`
148 |       : randomPart;
149 |     return generatedId;
150 |   }
151 | 
152 |   /**
153 |    * Generates a unique ID for a specified entity type, using its configured prefix.
154 |    * @param entityType - The type of entity (must be registered).
155 |    * @param options - Optional parameters for ID generation.
156 |    * @returns A unique identifier string for the entity (e.g., "PROJ_A6B3J0").
157 |    * @throws {McpError} If the `entityType` is not registered.
158 |    */
159 |   public generateForEntity(
160 |     entityType: string,
161 |     options: IdGenerationOptions = {},
162 |   ): string {
163 |     const prefix = this.entityPrefixes[entityType];
164 |     if (!prefix) {
165 |       throw new McpError(
166 |         BaseErrorCode.VALIDATION_ERROR,
167 |         `Unknown entity type: ${entityType}. No prefix registered.`,
168 |       );
169 |     }
170 |     return this.generate(prefix, options);
171 |   }
172 | 
173 |   /**
174 |    * Validates if an ID conforms to the expected format for a specific entity type.
175 |    * @param id - The ID string to validate.
176 |    * @param entityType - The expected entity type of the ID.
177 |    * @param options - Optional parameters used during generation for validation consistency.
178 |    *                  The `charset` from these options will be used for validation.
179 |    * @returns `true` if the ID is valid, `false` otherwise.
180 |    */
181 |   public isValid(
182 |     id: string,
183 |     entityType: string,
184 |     options: IdGenerationOptions = {},
185 |   ): boolean {
186 |     const prefix = this.entityPrefixes[entityType];
187 |     const {
188 |       length = IdGenerator.DEFAULT_LENGTH,
189 |       separator = IdGenerator.DEFAULT_SEPARATOR,
190 |       charset = IdGenerator.DEFAULT_CHARSET, // Use charset from options or default
191 |     } = options;
192 | 
193 |     if (!prefix) {
194 |       return false;
195 |     }
196 | 
197 |     // Build regex character class from the charset
198 |     // Escape characters that have special meaning inside a regex character class `[]`
199 |     const escapedCharsetForClass = charset.replace(/[[\]\\^-]/g, "\\$&");
200 |     const charsetRegexPart = `[${escapedCharsetForClass}]`;
201 | 
202 |     const pattern = new RegExp(
203 |       `^${this.escapeRegex(prefix)}${this.escapeRegex(separator)}${charsetRegexPart}{${length}}$`,
204 |     );
205 |     return pattern.test(id);
206 |   }
207 | 
208 |   /**
209 |    * Escapes special characters in a string for use in a regular expression.
210 |    * @param str - The string to escape.
211 |    * @returns The escaped string.
212 |    * @private
213 |    */
214 |   private escapeRegex(str: string): string {
215 |     return str.replace(/[.*+?^${}()|[\]\\]/g, "\\$&");
216 |   }
217 | 
218 |   /**
219 |    * Strips the prefix and separator from an ID string.
220 |    * @param id - The ID string (e.g., "PROJ_A6B3J0").
221 |    * @param separator - The separator used in the ID. Defaults to `IdGenerator.DEFAULT_SEPARATOR`.
222 |    * @returns The ID part without the prefix, or the original ID if separator not found.
223 |    */
224 |   public stripPrefix(
225 |     id: string,
226 |     separator: string = IdGenerator.DEFAULT_SEPARATOR,
227 |   ): string {
228 |     const parts = id.split(separator);
229 |     return parts.length > 1 ? parts.slice(1).join(separator) : id; // Handle separators in random part
230 |   }
231 | 
232 |   /**
233 |    * Determines the entity type from an ID string by its prefix (case-insensitive).
234 |    * @param id - The ID string (e.g., "PROJ_A6B3J0").
235 |    * @param separator - The separator used in the ID. Defaults to `IdGenerator.DEFAULT_SEPARATOR`.
236 |    * @returns The determined entity type.
237 |    * @throws {McpError} If ID format is invalid or prefix is unknown.
238 |    */
239 |   public getEntityType(
240 |     id: string,
241 |     separator: string = IdGenerator.DEFAULT_SEPARATOR,
242 |   ): string {
243 |     const parts = id.split(separator);
244 |     if (parts.length < 2 || !parts[0]) {
245 |       throw new McpError(
246 |         BaseErrorCode.VALIDATION_ERROR,
247 |         `Invalid ID format: ${id}. Expected format like: PREFIX${separator}RANDOMLPART`,
248 |       );
249 |     }
250 | 
251 |     const prefix = parts[0];
252 |     const entityType = this.prefixToEntityType[prefix.toLowerCase()];
253 | 
254 |     if (!entityType) {
255 |       throw new McpError(
256 |         BaseErrorCode.VALIDATION_ERROR,
257 |         `Unknown entity type for prefix: ${prefix}`,
258 |       );
259 |     }
260 |     return entityType;
261 |   }
262 | 
263 |   /**
264 |    * Normalizes an entity ID to ensure the prefix matches the registered case
265 |    * and the random part is uppercase. Note: This assumes the charset characters
266 |    * have a meaningful uppercase version if case-insensitivity is desired for the random part.
267 |    * For default charset (A-Z0-9), this is fine. For custom charsets, behavior might vary.
268 |    * @param id - The ID to normalize (e.g., "proj_a6b3j0").
269 |    * @param separator - The separator used in the ID. Defaults to `IdGenerator.DEFAULT_SEPARATOR`.
270 |    * @returns The normalized ID (e.g., "PROJ_A6B3J0").
271 |    * @throws {McpError} If the entity type cannot be determined from the ID.
272 |    */
273 |   public normalize(
274 |     id: string,
275 |     separator: string = IdGenerator.DEFAULT_SEPARATOR,
276 |   ): string {
277 |     const entityType = this.getEntityType(id, separator);
278 |     const registeredPrefix = this.entityPrefixes[entityType];
279 |     const idParts = id.split(separator);
280 |     const randomPart = idParts.slice(1).join(separator);
281 | 
282 |     // Consider if randomPart.toUpperCase() is always correct for custom charsets.
283 |     // For now, maintaining existing behavior.
284 |     return `${registeredPrefix}${separator}${randomPart.toUpperCase()}`;
285 |   }
286 | }
287 | 
288 | /**
289 |  * Default singleton instance of the `IdGenerator`.
290 |  * Initialize with `idGenerator.setEntityPrefixes({})` to configure.
291 |  */
292 | export const idGenerator = new IdGenerator();
293 | 
294 | /**
295 |  * Generates a standard Version 4 UUID (Universally Unique Identifier).
296 |  * Uses the Node.js `crypto` module. This function is independent of the IdGenerator instance
297 |  * to prevent circular dependencies when used by other utilities like requestContextService.
298 |  * @returns A new UUID string.
299 |  */
300 | export const generateUUID = (): string => {
301 |   return cryptoRandomUUID();
302 | };
303 | 
```

--------------------------------------------------------------------------------
/src/config/index.ts:
--------------------------------------------------------------------------------

```typescript
  1 | /**
  2 |  * @fileoverview Loads, validates, and exports application configuration.
  3 |  * This module centralizes configuration management, sourcing values from
  4 |  * environment variables and `package.json`. It uses Zod for schema validation
  5 |  * to ensure type safety and correctness of configuration parameters.
  6 |  *
  7 |  * @module src/config/index
  8 |  */
  9 | 
 10 | import dotenv from "dotenv";
 11 | import { existsSync, mkdirSync, readFileSync, statSync } from "fs";
 12 | import path, { dirname, join } from "path";
 13 | import { fileURLToPath } from "url";
 14 | import { z } from "zod";
 15 | 
 16 | dotenv.config();
 17 | 
 18 | // --- Determine Project Root ---
 19 | const findProjectRoot = (startDir: string): string => {
 20 |   let currentDir = startDir;
 21 |   // If the start directory is in `dist`, start searching from the parent directory.
 22 |   if (path.basename(currentDir) === "dist") {
 23 |     currentDir = path.dirname(currentDir);
 24 |   }
 25 |   while (true) {
 26 |     const packageJsonPath = join(currentDir, "package.json");
 27 |     if (existsSync(packageJsonPath)) {
 28 |       return currentDir;
 29 |     }
 30 |     const parentDir = dirname(currentDir);
 31 |     if (parentDir === currentDir) {
 32 |       throw new Error(
 33 |         `Could not find project root (package.json) starting from ${startDir}`,
 34 |       );
 35 |     }
 36 |     currentDir = parentDir;
 37 |   }
 38 | };
 39 | let projectRoot: string;
 40 | try {
 41 |   const currentModuleDir = dirname(fileURLToPath(import.meta.url));
 42 |   projectRoot = findProjectRoot(currentModuleDir);
 43 | } catch (error: unknown) {
 44 |   const errorMessage = error instanceof Error ? error.message : String(error);
 45 |   console.error(`FATAL: Error determining project root: ${errorMessage}`);
 46 |   projectRoot = process.cwd();
 47 |   if (process.stdout.isTTY) {
 48 |     console.warn(
 49 |       `Warning: Using process.cwd() (${projectRoot}) as fallback project root.`,
 50 |     );
 51 |   }
 52 | }
 53 | // --- End Determine Project Root ---
 54 | 
 55 | /**
 56 |  * Loads and parses the package.json file from the project root.
 57 |  * @returns The parsed package.json object or a fallback default.
 58 |  * @private
 59 |  */
 60 | const loadPackageJson = (): {
 61 |   name: string;
 62 |   version: string;
 63 |   description: string;
 64 | } => {
 65 |   const pkgPath = join(projectRoot, "package.json");
 66 |   const fallback = {
 67 |     name: "pubmed-mcp-server",
 68 |     version: "0.0.0",
 69 |     description: "No description provided.",
 70 |   };
 71 | 
 72 |   if (!existsSync(pkgPath)) {
 73 |     if (process.stdout.isTTY) {
 74 |       console.warn(
 75 |         `Warning: package.json not found at ${pkgPath}. Using fallback values. This is expected in some environments (e.g., Docker) but may indicate an issue with project root detection.`,
 76 |       );
 77 |     }
 78 |     return fallback;
 79 |   }
 80 | 
 81 |   try {
 82 |     const fileContents = readFileSync(pkgPath, "utf-8");
 83 |     const parsed = JSON.parse(fileContents);
 84 |     return {
 85 |       name: typeof parsed.name === "string" ? parsed.name : fallback.name,
 86 |       version:
 87 |         typeof parsed.version === "string" ? parsed.version : fallback.version,
 88 |       description:
 89 |         typeof parsed.description === "string"
 90 |           ? parsed.description
 91 |           : fallback.description,
 92 |     };
 93 |   } catch (error) {
 94 |     if (process.stdout.isTTY) {
 95 |       console.error(
 96 |         "Warning: Could not read or parse package.json. Using hardcoded defaults.",
 97 |         error,
 98 |       );
 99 |     }
100 |     return fallback;
101 |   }
102 | };
103 | 
104 | const pkg = loadPackageJson();
105 | 
106 | const EnvSchema = z
107 |   .object({
108 |     // Core Server Config
109 |     MCP_SERVER_NAME: z.string().optional(),
110 |     MCP_SERVER_VERSION: z.string().optional(),
111 |     NODE_ENV: z.string().default("development"),
112 | 
113 |     // Logging
114 |     MCP_LOG_LEVEL: z.string().default("debug"),
115 |     LOGS_DIR: z.string().default(path.join(projectRoot, "logs")),
116 | 
117 |     // Transport
118 |     MCP_TRANSPORT_TYPE: z.enum(["stdio", "http"]).default("stdio"),
119 |     MCP_SESSION_MODE: z.enum(["stateless", "stateful", "auto"]).default("auto"),
120 |     MCP_HTTP_PORT: z.coerce.number().int().positive().default(3017),
121 |     MCP_HTTP_HOST: z.string().default("127.0.0.1"),
122 |     MCP_HTTP_ENDPOINT_PATH: z.string().default("/mcp"),
123 |     MCP_HTTP_MAX_PORT_RETRIES: z.coerce
124 |       .number()
125 |       .int()
126 |       .nonnegative()
127 |       .default(15),
128 |     MCP_HTTP_PORT_RETRY_DELAY_MS: z.coerce
129 |       .number()
130 |       .int()
131 |       .nonnegative()
132 |       .default(50),
133 |     MCP_STATEFUL_SESSION_STALE_TIMEOUT_MS: z.coerce
134 |       .number()
135 |       .int()
136 |       .positive()
137 |       .default(1_800_000),
138 |     MCP_ALLOWED_ORIGINS: z.string().optional(),
139 | 
140 |     // Authentication
141 |     MCP_AUTH_MODE: z.enum(["jwt", "oauth", "none"]).default("none"),
142 |     MCP_AUTH_SECRET_KEY: z
143 |       .string()
144 |       .min(32, "MCP_AUTH_SECRET_KEY must be at least 32 characters long.")
145 |       .optional(),
146 |     OAUTH_ISSUER_URL: z.string().url().optional(),
147 |     OAUTH_JWKS_URI: z.string().url().optional(),
148 |     OAUTH_AUDIENCE: z.string().optional(),
149 | 
150 |     // Dev mode JWT
151 |     DEV_MCP_CLIENT_ID: z.string().optional(),
152 |     DEV_MCP_SCOPES: z.string().optional(),
153 | 
154 |     // NCBI E-utilities
155 |     NCBI_API_KEY: z.string().optional(),
156 |     NCBI_TOOL_IDENTIFIER: z.string().optional(),
157 |     NCBI_ADMIN_EMAIL: z.string().email().optional(),
158 |     NCBI_REQUEST_DELAY_MS: z.coerce.number().int().positive().optional(),
159 |     NCBI_MAX_RETRIES: z.coerce.number().int().nonnegative().default(3),
160 | 
161 |     // --- START: OpenTelemetry Configuration ---
162 |     /** If 'true', OpenTelemetry will be initialized and enabled. Default: 'false'. */
163 |     OTEL_ENABLED: z
164 |       .string()
165 |       .transform((v) => v.toLowerCase() === "true")
166 |       .default("false"),
167 |     /** The logical name of the service. Defaults to MCP_SERVER_NAME or package name. */
168 |     OTEL_SERVICE_NAME: z.string().optional(),
169 |     /** The version of the service. Defaults to MCP_SERVER_VERSION or package version. */
170 |     OTEL_SERVICE_VERSION: z.string().optional(),
171 |     /** The OTLP endpoint for traces. If not set, traces are logged to a file in development. */
172 |     OTEL_EXPORTER_OTLP_TRACES_ENDPOINT: z.string().url().optional(),
173 |     /** The OTLP endpoint for metrics. If not set, metrics are not exported. */
174 |     OTEL_EXPORTER_OTLP_METRICS_ENDPOINT: z.string().url().optional(),
175 |     /** Sampling ratio for traces (0.0 to 1.0). 1.0 means sample all. Default: 1.0 */
176 |     OTEL_TRACES_SAMPLER_ARG: z.coerce.number().min(0).max(1).default(1.0),
177 |     /** Log level for OpenTelemetry's internal diagnostic logger. Default: "INFO". */
178 |     OTEL_LOG_LEVEL: z
179 |       .enum(["NONE", "ERROR", "WARN", "INFO", "DEBUG", "VERBOSE", "ALL"])
180 |       .default("INFO"),
181 |   })
182 |   .superRefine((data, ctx) => {
183 |     if (
184 |       data.NODE_ENV === "production" &&
185 |       data.MCP_TRANSPORT_TYPE === "http" &&
186 |       data.MCP_AUTH_MODE === "jwt" &&
187 |       !data.MCP_AUTH_SECRET_KEY
188 |     ) {
189 |       ctx.addIssue({
190 |         code: z.ZodIssueCode.custom,
191 |         path: ["MCP_AUTH_SECRET_KEY"],
192 |         message:
193 |           "MCP_AUTH_SECRET_KEY is required for 'jwt' auth in production with 'http' transport.",
194 |       });
195 |     }
196 |     if (data.MCP_AUTH_MODE === "oauth") {
197 |       if (!data.OAUTH_ISSUER_URL) {
198 |         ctx.addIssue({
199 |           code: z.ZodIssueCode.custom,
200 |           path: ["OAUTH_ISSUER_URL"],
201 |           message: "OAUTH_ISSUER_URL is required for 'oauth' mode.",
202 |         });
203 |       }
204 |       if (!data.OAUTH_AUDIENCE) {
205 |         ctx.addIssue({
206 |           code: z.ZodIssueCode.custom,
207 |           path: ["OAUTH_AUDIENCE"],
208 |           message: "OAUTH_AUDIENCE is required for 'oauth' mode.",
209 |         });
210 |       }
211 |     }
212 |   });
213 | 
214 | const parsedEnv = EnvSchema.safeParse(process.env);
215 | 
216 | if (!parsedEnv.success) {
217 |   if (process.stdout.isTTY) {
218 |     console.error(
219 |       "❌ Invalid environment variables:",
220 |       parsedEnv.error.flatten().fieldErrors,
221 |     );
222 |   }
223 | }
224 | 
225 | const env = parsedEnv.success ? parsedEnv.data : EnvSchema.parse({});
226 | 
227 | const ensureDirectory = (
228 |   dirPath: string,
229 |   rootDir: string,
230 |   dirName: string,
231 | ): string | null => {
232 |   const resolvedDirPath = path.isAbsolute(dirPath)
233 |     ? dirPath
234 |     : path.resolve(rootDir, dirPath);
235 | 
236 |   if (
237 |     !resolvedDirPath.startsWith(rootDir + path.sep) &&
238 |     resolvedDirPath !== rootDir
239 |   ) {
240 |     if (process.stdout.isTTY) {
241 |       console.error(
242 |         `Error: ${dirName} path "${dirPath}" resolves to "${resolvedDirPath}", which is outside the project boundary "${rootDir}".`,
243 |       );
244 |     }
245 |     return null;
246 |   }
247 | 
248 |   if (!existsSync(resolvedDirPath)) {
249 |     try {
250 |       mkdirSync(resolvedDirPath, { recursive: true });
251 |       if (process.stdout.isTTY) {
252 |         console.log(`Created ${dirName} directory: ${resolvedDirPath}`);
253 |       }
254 |     } catch (err: unknown) {
255 |       const errorMessage = err instanceof Error ? err.message : String(err);
256 |       if (process.stdout.isTTY) {
257 |         console.error(
258 |           `Error creating ${dirName} directory at ${resolvedDirPath}: ${errorMessage}`,
259 |         );
260 |       }
261 |       return null;
262 |     }
263 |   } else {
264 |     try {
265 |       const stats = statSync(resolvedDirPath);
266 |       if (!stats.isDirectory()) {
267 |         if (process.stdout.isTTY) {
268 |           console.error(
269 |             `Error: ${dirName} path ${resolvedDirPath} exists but is not a directory.`,
270 |           );
271 |         }
272 |         return null;
273 |       }
274 |     } catch (statError: unknown) {
275 |       const errorMessage =
276 |         statError instanceof Error
277 |           ? statError.message
278 |           : "An unknown error occurred";
279 |       if (process.stdout.isTTY) {
280 |         console.error(
281 |           `Error accessing ${dirName} path ${resolvedDirPath}: ${errorMessage}`,
282 |         );
283 |       }
284 |       return null;
285 |     }
286 |   }
287 |   return resolvedDirPath;
288 | };
289 | 
290 | let validatedLogsPath: string | null = ensureDirectory(
291 |   env.LOGS_DIR,
292 |   projectRoot,
293 |   "logs",
294 | );
295 | 
296 | if (!validatedLogsPath) {
297 |   if (process.stdout.isTTY) {
298 |     console.warn(
299 |       `Warning: Custom logs directory ('${env.LOGS_DIR}') is invalid or outside the project boundary. Falling back to default.`,
300 |     );
301 |   }
302 |   const defaultLogsDir = path.join(projectRoot, "logs");
303 |   validatedLogsPath = ensureDirectory(defaultLogsDir, projectRoot, "logs");
304 | 
305 |   if (!validatedLogsPath) {
306 |     if (process.stdout.isTTY) {
307 |       console.warn(
308 |         "Warning: Default logs directory could not be created. File logging will be disabled.",
309 |       );
310 |     }
311 |   }
312 | }
313 | 
314 | export const config = {
315 |   pkg,
316 |   mcpServerName: env.MCP_SERVER_NAME || pkg.name,
317 |   mcpServerVersion: env.MCP_SERVER_VERSION || pkg.version,
318 |   mcpServerDescription: pkg.description,
319 |   logLevel: env.MCP_LOG_LEVEL,
320 |   logsPath: validatedLogsPath,
321 |   environment: env.NODE_ENV,
322 |   mcpTransportType: env.MCP_TRANSPORT_TYPE,
323 |   mcpSessionMode: env.MCP_SESSION_MODE,
324 |   mcpHttpPort: env.MCP_HTTP_PORT,
325 |   mcpHttpHost: env.MCP_HTTP_HOST,
326 |   mcpHttpEndpointPath: env.MCP_HTTP_ENDPOINT_PATH,
327 |   mcpHttpMaxPortRetries: env.MCP_HTTP_MAX_PORT_RETRIES,
328 |   mcpHttpPortRetryDelayMs: env.MCP_HTTP_PORT_RETRY_DELAY_MS,
329 |   mcpStatefulSessionStaleTimeoutMs: env.MCP_STATEFUL_SESSION_STALE_TIMEOUT_MS,
330 |   mcpAllowedOrigins: env.MCP_ALLOWED_ORIGINS?.split(",")
331 |     .map((o) => o.trim())
332 |     .filter(Boolean),
333 |   mcpAuthMode: env.MCP_AUTH_MODE,
334 |   mcpAuthSecretKey: env.MCP_AUTH_SECRET_KEY,
335 |   oauthIssuerUrl: env.OAUTH_ISSUER_URL,
336 |   oauthJwksUri: env.OAUTH_JWKS_URI,
337 |   oauthAudience: env.OAUTH_AUDIENCE,
338 |   devMcpClientId: env.DEV_MCP_CLIENT_ID,
339 |   devMcpScopes: env.DEV_MCP_SCOPES?.split(",").map((s) => s.trim()),
340 |   ncbiApiKey: env.NCBI_API_KEY,
341 |   ncbiToolIdentifier:
342 |     env.NCBI_TOOL_IDENTIFIER ||
343 |     `${env.MCP_SERVER_NAME || pkg.name}/${env.MCP_SERVER_VERSION || pkg.version}`,
344 |   ncbiAdminEmail: env.NCBI_ADMIN_EMAIL,
345 |   ncbiRequestDelayMs:
346 |     env.NCBI_REQUEST_DELAY_MS ?? (env.NCBI_API_KEY ? 100 : 334),
347 |   ncbiMaxRetries: env.NCBI_MAX_RETRIES,
348 |   openTelemetry: {
349 |     enabled: env.OTEL_ENABLED,
350 |     serviceName: env.OTEL_SERVICE_NAME || env.MCP_SERVER_NAME || pkg.name,
351 |     serviceVersion:
352 |       env.OTEL_SERVICE_VERSION || env.MCP_SERVER_VERSION || pkg.version,
353 |     tracesEndpoint: env.OTEL_EXPORTER_OTLP_TRACES_ENDPOINT,
354 |     metricsEndpoint: env.OTEL_EXPORTER_OTLP_METRICS_ENDPOINT,
355 |     samplingRatio: env.OTEL_TRACES_SAMPLER_ARG,
356 |     logLevel: env.OTEL_LOG_LEVEL,
357 |   },
358 | };
359 | 
360 | export const logLevel: string = config.logLevel;
361 | export const environment: string = config.environment;
362 | 
```

--------------------------------------------------------------------------------
/src/types-global/pubmedXml.ts:
--------------------------------------------------------------------------------

```typescript
  1 | /**
  2 |  * @fileoverview Global TypeScript type definitions for PubMed XML structures.
  3 |  * These types are used for parsing data returned by NCBI E-utilities,
  4 |  * particularly from EFetch for PubMed articles and ESummary.
  5 |  * @module src/types-global/pubmedXml
  6 |  */
  7 | 
  8 | // Basic type for elements that primarily contain text but might have attributes
  9 | export interface XmlTextElement {
 10 |   "#text"?: string;
 11 |   [key: string]: unknown; // For attributes like _UI, _MajorTopicYN, _EIdType, _ValidYN, _IdType, Label, NlmCategory, _DateType
 12 | }
 13 | 
 14 | // Specific XML element types based on PubMed DTD (simplified)
 15 | 
 16 | export type XmlPMID = XmlTextElement; // e.g., <PMID Version="1">12345</PMID>
 17 | 
 18 | export interface XmlArticleDate extends XmlTextElement {
 19 |   Year?: XmlTextElement;
 20 |   Month?: XmlTextElement;
 21 |   Day?: XmlTextElement;
 22 |   _DateType?: string;
 23 | }
 24 | 
 25 | export interface XmlAuthor {
 26 |   LastName?: XmlTextElement;
 27 |   ForeName?: XmlTextElement;
 28 |   Initials?: XmlTextElement;
 29 |   AffiliationInfo?: {
 30 |     Affiliation?: XmlTextElement;
 31 |   }[];
 32 |   Identifier?: XmlTextElement[]; // For ORCID etc.
 33 |   CollectiveName?: XmlTextElement; // For group authors
 34 | }
 35 | 
 36 | export interface XmlAuthorList {
 37 |   Author?: XmlAuthor[] | XmlAuthor;
 38 |   _CompleteYN?: "Y" | "N";
 39 | }
 40 | 
 41 | export interface XmlPublicationType extends XmlTextElement {
 42 |   _UI?: string;
 43 | }
 44 | 
 45 | export interface XmlPublicationTypeList {
 46 |   PublicationType: XmlPublicationType[] | XmlPublicationType;
 47 | }
 48 | 
 49 | export interface XmlELocationID extends XmlTextElement {
 50 |   _EIdType?: string; // "doi", "pii"
 51 |   _ValidYN?: "Y" | "N";
 52 | }
 53 | 
 54 | export interface XmlArticleId extends XmlTextElement {
 55 |   _IdType?: string; // "doi", "pubmed", "pmc", "mid", etc.
 56 | }
 57 | 
 58 | export interface XmlArticleIdList {
 59 |   ArticleId: XmlArticleId[] | XmlArticleId;
 60 | }
 61 | 
 62 | export interface XmlAbstractText extends XmlTextElement {
 63 |   Label?: string;
 64 |   NlmCategory?: string; // e.g., "BACKGROUND", "METHODS", "RESULTS", "CONCLUSIONS"
 65 | }
 66 | 
 67 | export interface XmlAbstract {
 68 |   AbstractText: XmlAbstractText[] | XmlAbstractText;
 69 |   CopyrightInformation?: XmlTextElement;
 70 | }
 71 | 
 72 | export interface XmlPagination {
 73 |   MedlinePgn?: XmlTextElement; // e.g., "10-5" or "e123"
 74 |   StartPage?: XmlTextElement;
 75 |   EndPage?: XmlTextElement;
 76 | }
 77 | 
 78 | export interface XmlPubDate {
 79 |   Year?: XmlTextElement;
 80 |   Month?: XmlTextElement;
 81 |   Day?: XmlTextElement;
 82 |   MedlineDate?: XmlTextElement; // e.g., "2000 Spring", "1999-2000"
 83 | }
 84 | 
 85 | export interface XmlJournalIssue {
 86 |   Volume?: XmlTextElement;
 87 |   Issue?: XmlTextElement;
 88 |   PubDate?: XmlPubDate;
 89 |   _CitedMedium?: string; // "Internet" or "Print"
 90 | }
 91 | 
 92 | export interface XmlJournal {
 93 |   ISSN?: XmlTextElement & { _IssnType?: string };
 94 |   JournalIssue?: XmlJournalIssue;
 95 |   Title?: XmlTextElement; // Full Journal Title
 96 |   ISOAbbreviation?: XmlTextElement; // Journal Abbreviation
 97 | }
 98 | 
 99 | export interface XmlArticle {
100 |   Journal?: XmlJournal;
101 |   ArticleTitle?: XmlTextElement | string; // Can be just string or object with #text
102 |   Pagination?: XmlPagination;
103 |   ELocationID?: XmlELocationID[] | XmlELocationID;
104 |   Abstract?: XmlAbstract;
105 |   AuthorList?: XmlAuthorList;
106 |   Language?: XmlTextElement[] | XmlTextElement; // Array of languages
107 |   GrantList?: XmlGrantList;
108 |   PublicationTypeList?: XmlPublicationTypeList;
109 |   ArticleDate?: XmlArticleDate[] | XmlArticleDate;
110 |   ArticleIdList?: XmlArticleIdList;
111 |   KeywordList?: XmlKeywordList[] | XmlKeywordList; // Can have multiple KeywordList elements
112 |   // Other elements like VernacularTitle, DataBankList, etc.
113 | }
114 | 
115 | export interface XmlMeshQualifierName extends XmlTextElement {
116 |   _UI?: string;
117 |   _MajorTopicYN?: "Y" | "N";
118 | }
119 | export interface XmlMeshDescriptorName extends XmlTextElement {
120 |   _UI?: string;
121 |   _MajorTopicYN?: "Y" | "N";
122 | }
123 | 
124 | export interface XmlMeshHeading {
125 |   DescriptorName: XmlMeshDescriptorName;
126 |   QualifierName?: XmlMeshQualifierName[] | XmlMeshQualifierName;
127 |   _MajorTopicYN?: "Y" | "N"; // Can also be at the root of MeshHeading
128 | }
129 | 
130 | export interface XmlMeshHeadingList {
131 |   MeshHeading: XmlMeshHeading[] | XmlMeshHeading;
132 | }
133 | 
134 | export interface XmlKeyword extends XmlTextElement {
135 |   _MajorTopicYN?: "Y" | "N";
136 |   _Owner?: string; // NLM, NLM-AUTO, PIP, KIE, NOTNLM, NASA, HHS
137 | }
138 | 
139 | export interface XmlKeywordList {
140 |   Keyword: XmlKeyword[] | XmlKeyword;
141 |   _Owner?: string;
142 | }
143 | 
144 | export interface XmlGrant {
145 |   GrantID?: XmlTextElement;
146 |   Acronym?: XmlTextElement;
147 |   Agency?: XmlTextElement;
148 |   Country?: XmlTextElement;
149 | }
150 | 
151 | export interface XmlGrantList {
152 |   Grant: XmlGrant[] | XmlGrant;
153 |   _CompleteYN?: "Y" | "N";
154 | }
155 | 
156 | export interface XmlMedlineCitation {
157 |   PMID: XmlPMID;
158 |   DateCreated?: XmlArticleDate;
159 |   DateCompleted?: XmlArticleDate;
160 |   DateRevised?: XmlArticleDate;
161 |   Article?: XmlArticle;
162 |   MeshHeadingList?: XmlMeshHeadingList;
163 |   KeywordList?: XmlKeywordList[] | XmlKeywordList; // Can be an array of KeywordList
164 |   GeneralNote?: (XmlTextElement & { _Owner?: string })[];
165 |   CitationSubset?: XmlTextElement[] | XmlTextElement;
166 |   MedlinePgn?: XmlTextElement; // For page numbers, sometimes here
167 |   // Other elements like CommentsCorrectionsList, GeneSymbolList, etc.
168 |   _Owner?: string; // e.g., "NLM", "NASA", "PIP", "KIE", "HSR", "HMD", "NOTNLM"
169 |   _Status?: string; // e.g., "MEDLINE", "PubMed-not-MEDLINE", "In-Data-Review", "In-Process", "Publisher", "Completed"
170 | }
171 | 
172 | export interface XmlPubmedArticle {
173 |   MedlineCitation: XmlMedlineCitation;
174 |   PubmedData?: {
175 |     History?: {
176 |       PubMedPubDate: (XmlArticleDate & { _PubStatus?: string })[];
177 |     };
178 |     PublicationStatus?: XmlTextElement;
179 |     ArticleIdList?: XmlArticleIdList; // ArticleIdList can also be under PubmedData
180 |     ReferenceList?: unknown; // Complex structure for references
181 |   };
182 | }
183 | 
184 | export interface XmlPubmedArticleSet {
185 |   PubmedArticle?: XmlPubmedArticle[] | XmlPubmedArticle;
186 |   DeleteCitation?: {
187 |     PMID: XmlPMID[] | XmlPMID;
188 |   };
189 |   // Can also contain ErrorList or other elements if the request had issues
190 | }
191 | 
192 | // Parsed object types (for application use, derived from XML types)
193 | 
194 | export interface ParsedArticleAuthor {
195 |   lastName?: string;
196 |   firstName?: string;
197 |   initials?: string;
198 |   affiliation?: string;
199 |   collectiveName?: string;
200 | }
201 | 
202 | export interface ParsedArticleDate {
203 |   dateType?: string;
204 |   year?: string;
205 |   month?: string;
206 |   day?: string;
207 | }
208 | 
209 | export interface ParsedJournalPublicationDate {
210 |   year?: string;
211 |   month?: string;
212 |   day?: string;
213 |   medlineDate?: string;
214 | }
215 | 
216 | export interface ParsedJournalInfo {
217 |   title?: string;
218 |   isoAbbreviation?: string;
219 |   volume?: string;
220 |   issue?: string;
221 |   pages?: string;
222 |   publicationDate?: ParsedJournalPublicationDate;
223 | }
224 | 
225 | export interface ParsedMeshTerm {
226 |   descriptorName?: string;
227 |   descriptorUi?: string;
228 |   qualifierName?: string;
229 |   qualifierUi?: string;
230 |   isMajorTopic: boolean;
231 | }
232 | 
233 | export interface ParsedGrant {
234 |   grantId?: string;
235 |   agency?: string;
236 |   country?: string;
237 | }
238 | 
239 | export interface ParsedArticle {
240 |   pmid: string;
241 |   title?: string;
242 |   abstractText?: string;
243 |   authors?: ParsedArticleAuthor[];
244 |   journalInfo?: ParsedJournalInfo;
245 |   publicationTypes?: string[];
246 |   keywords?: string[];
247 |   meshTerms?: ParsedMeshTerm[];
248 |   grantList?: ParsedGrant[];
249 |   doi?: string;
250 |   articleDates?: ParsedArticleDate[]; // Dates like 'received', 'accepted', 'revised'
251 |   // Add other fields as needed, e.g., language, publication status
252 | }
253 | 
254 | // ESummary specific types
255 | // Based on ESummary v2.0 XML (DocSum) and JSON-like XML structure
256 | // This is a common structure, but individual fields can vary.
257 | 
258 | /**
259 |  * Represents a raw author entry as parsed from ESummary XML.
260 |  * This type accounts for potential inconsistencies in property naming (e.g., Name/name)
261 |  * and structure directly from the XML-to-JavaScript conversion.
262 |  * It is intended for use as an intermediate type before normalization into ESummaryAuthor.
263 |  */
264 | export interface XmlESummaryAuthorRaw {
265 |   Name?: string; // Primary name field (often "LastName Initials")
266 |   name?: string; // Alternative casing for name
267 | 
268 |   AuthType?: string; // Author type (e.g., "Author")
269 |   authtype?: string; // Alternative casing
270 | 
271 |   ClusterId?: string; // Cluster ID
272 |   clusterid?: string; // Alternative casing
273 | 
274 |   "#text"?: string; // If the author is represented as a simple text node
275 | 
276 |   // Allow other properties as NCBI XML can be unpredictable
277 |   [key: string]: unknown;
278 | }
279 | 
280 | /**
281 |  * Represents a normalized author entry after parsing from ESummary data.
282 |  * This is the clean, canonical structure for application use.
283 |  */
284 | export interface ESummaryAuthor {
285 |   name: string; // Standardized: "LastName Initials"
286 |   authtype?: string; // Standardized: e.g., "Author"
287 |   clusterid?: string; // Standardized
288 | }
289 | 
290 | export interface ESummaryArticleId {
291 |   idtype: string; // e.g., "pubmed", "doi", "pmc"
292 |   idtypen: number;
293 |   value: string;
294 |   [key: string]: unknown; // For other attributes like _IdType (if parsed differently)
295 | }
296 | 
297 | export interface ESummaryHistory {
298 |   pubstatus: string; // e.g., "pubmed", "medline", "entrez"
299 |   date: string; // Date string
300 | }
301 | 
302 | // For the older DocSum <Item Name="..." Type="..."> structure
303 | export interface ESummaryItem {
304 |   "#text"?: string; // Value of the item
305 |   Item?: ESummaryItem[] | ESummaryItem; // For nested lists
306 |   _Name: string;
307 |   _Type:
308 |     | "String"
309 |     | "Integer"
310 |     | "Date"
311 |     | "List"
312 |     | "Structure"
313 |     | "Unknown"
314 |     | "ERROR";
315 |   [key: string]: unknown; // Other attributes like idtype for ArticleIds
316 | }
317 | 
318 | export interface ESummaryDocSumOldXml {
319 |   Id: string; // PMID
320 |   Item: ESummaryItem[];
321 | }
322 | 
323 | // For the newer DocumentSummarySet structure (often from retmode=xml with version=2.0)
324 | export interface ESummaryDocumentSummary {
325 |   "@_uid": string; // PMID
326 |   PubDate?: string;
327 |   EPubDate?: string;
328 |   Source?: string;
329 |   Authors?:
330 |     | XmlESummaryAuthorRaw[] // Array of raw author entries
331 |     | { Author: XmlESummaryAuthorRaw[] | XmlESummaryAuthorRaw } // Object containing raw author entries
332 |     | string; // Or a simple string for authors
333 |   LastAuthor?: string;
334 |   Title?: string;
335 |   SortTitle?: string;
336 |   Volume?: string;
337 |   Issue?: string;
338 |   Pages?: string;
339 |   Lang?: string[];
340 |   ISSN?: string;
341 |   ESSN?: string;
342 |   PubType?: string[]; // Array of publication types
343 |   RecordStatus?: string;
344 |   PubStatus?: string;
345 |   ArticleIds?:
346 |     | ESummaryArticleId[]
347 |     | { ArticleId: ESummaryArticleId[] | ESummaryArticleId };
348 |   History?:
349 |     | ESummaryHistory[]
350 |     | { PubMedPubDate: ESummaryHistory[] | ESummaryHistory };
351 |   References?: unknown[]; // Usually empty or complex
352 |   Attributes?: string[];
353 |   DOI?: string; // Sometimes directly available
354 |   FullJournalName?: string;
355 |   SO?: string; // Source Abbreviation
356 |   [key: string]: unknown; // For other dynamic fields
357 | }
358 | 
359 | export interface ESummaryDocumentSummarySet {
360 |   DocumentSummary: ESummaryDocumentSummary[] | ESummaryDocumentSummary;
361 | }
362 | 
363 | export interface ESummaryResult {
364 |   DocSum?: ESummaryDocSumOldXml[] | ESummaryDocSumOldXml; // Older XML format
365 |   DocumentSummarySet?: ESummaryDocumentSummarySet; // Newer XML format
366 |   ERROR?: string; // Error message if present
367 |   [key: string]: unknown; // For other potential top-level elements like 'dbinfo'
368 | }
369 | 
370 | export interface ESummaryResponseContainer {
371 |   eSummaryResult: ESummaryResult;
372 |   // header?: unknown; // If there's a header part in the response
373 | }
374 | 
375 | // Parsed brief summary (application-level)
376 | export interface ParsedBriefSummary {
377 |   pmid: string;
378 |   title?: string;
379 |   authors?: string; // Formatted string
380 |   source?: string;
381 |   pubDate?: string; // Standardized YYYY-MM-DD
382 |   epubDate?: string; // Standardized YYYY-MM-DD
383 |   doi?: string;
384 | }
385 | 
386 | // ESearch specific types
387 | export interface ESearchResultIdList {
388 |   Id: string[];
389 | }
390 | 
391 | export interface ESearchTranslation {
392 |   From: string;
393 |   To: string;
394 | }
395 | 
396 | export interface ESearchTranslationSet {
397 |   Translation: ESearchTranslation[];
398 | }
399 | 
400 | export interface ESearchWarningList {
401 |   PhraseNotFound?: string[];
402 |   QuotedPhraseNotFound?: string[];
403 |   OutputMessage?: string[];
404 |   FieldNotFound?: string[];
405 | }
406 | export interface ESearchErrorList {
407 |   PhraseNotFound?: string[];
408 |   FieldNotFound?: string[];
409 | }
410 | 
411 | export interface ESearchResultContent {
412 |   Count: string;
413 |   RetMax: string;
414 |   RetStart: string;
415 |   QueryKey?: string;
416 |   WebEnv?: string;
417 |   IdList?: ESearchResultIdList;
418 |   TranslationSet?: ESearchTranslationSet;
419 |   TranslationStack?: unknown; // Usually complex, define if needed
420 |   QueryTranslation: string;
421 |   ErrorList?: ESearchErrorList;
422 |   WarningList?: ESearchWarningList;
423 | }
424 | 
425 | export interface ESearchResponseContainer {
426 |   eSearchResult: ESearchResultContent;
427 |   // header?: unknown;
428 | }
429 | 
430 | // Fully parsed and typed result for ESearch
431 | export interface ESearchResult {
432 |   count: number;
433 |   retmax: number;
434 |   retstart: number;
435 |   queryKey?: string;
436 |   webEnv?: string;
437 |   idList: string[];
438 |   queryTranslation: string;
439 |   errorList?: ESearchErrorList;
440 |   warningList?: ESearchWarningList;
441 | }
442 | 
443 | // Fully parsed and typed result for EFetch
444 | export interface EFetchArticleSet {
445 |   articles: ParsedArticle[];
446 |   // Add any other top-level fields from the parsed EFetch result if necessary
447 | }
448 | 
```

--------------------------------------------------------------------------------
/src/mcp-server/transports/http/httpTransport.ts:
--------------------------------------------------------------------------------

```typescript
  1 | /**
  2 |  * @fileoverview Configures and starts the HTTP MCP transport using Hono.
  3 |  * This file has been refactored to correctly integrate Hono's streaming
  4 |  * capabilities with the Model Context Protocol SDK's transport layer.
  5 |  * @module src/mcp-server/transports/http/httpTransport
  6 |  */
  7 | 
  8 | import { serve, ServerType } from "@hono/node-server";
  9 | import { Context, Hono, Next } from "hono";
 10 | import { cors } from "hono/cors";
 11 | import { stream } from "hono/streaming";
 12 | import http from "http";
 13 | import { config } from "../../../config/index.js";
 14 | import {
 15 |   logger,
 16 |   rateLimiter,
 17 |   RequestContext,
 18 |   requestContextService,
 19 | } from "../../../utils/index.js";
 20 | import { ServerInstanceInfo } from "../../server.js";
 21 | import { createAuthMiddleware, createAuthStrategy } from "../auth/index.js";
 22 | import { StatelessTransportManager } from "../core/statelessTransportManager.js";
 23 | import { TransportManager } from "../core/transportTypes.js";
 24 | import { StatefulTransportManager } from "./../core/statefulTransportManager.js";
 25 | import { httpErrorHandler } from "./httpErrorHandler.js";
 26 | import { HonoNodeBindings } from "./httpTypes.js";
 27 | import { mcpTransportMiddleware } from "./mcpTransportMiddleware.js";
 28 | 
 29 | const HTTP_PORT = config.mcpHttpPort;
 30 | const HTTP_HOST = config.mcpHttpHost;
 31 | const MCP_ENDPOINT_PATH = config.mcpHttpEndpointPath;
 32 | 
 33 | /**
 34 |  * Extracts the client IP address from the request, prioritizing common proxy headers.
 35 |  * @param c - The Hono context object.
 36 |  * @returns The client's IP address or a default string if not found.
 37 |  */
 38 | function getClientIp(c: Context<{ Bindings: HonoNodeBindings }>): string {
 39 |   const forwardedFor = c.req.header("x-forwarded-for");
 40 |   return (
 41 |     (forwardedFor?.split(",")[0] ?? "").trim() ||
 42 |     c.req.header("x-real-ip") ||
 43 |     "unknown_ip"
 44 |   );
 45 | }
 46 | 
 47 | /**
 48 |  * Converts a Fetch API Headers object to Node.js IncomingHttpHeaders.
 49 |  * Hono uses Fetch API Headers, but the underlying transport managers expect
 50 |  * Node's native IncomingHttpHeaders.
 51 |  * @param headers - The Headers object to convert.
 52 |  * @returns An object compatible with IncomingHttpHeaders.
 53 |  */
 54 | 
 55 | async function isPortInUse(
 56 |   port: number,
 57 |   host: string,
 58 |   parentContext: RequestContext,
 59 | ): Promise<boolean> {
 60 |   const context = { ...parentContext, operation: "isPortInUse", port, host };
 61 |   logger.debug(`Checking if port ${port} is in use...`, context);
 62 |   return new Promise((resolve) => {
 63 |     const tempServer = http.createServer();
 64 |     tempServer
 65 |       .once("error", (err: NodeJS.ErrnoException) => {
 66 |         const inUse = err.code === "EADDRINUSE";
 67 |         logger.debug(
 68 |           `Port check resulted in error: ${err.code}. Port in use: ${inUse}`,
 69 |           context,
 70 |         );
 71 |         resolve(inUse);
 72 |       })
 73 |       .once("listening", () => {
 74 |         logger.debug(
 75 |           `Successfully bound to port ${port} temporarily. Port is not in use.`,
 76 |           context,
 77 |         );
 78 |         tempServer.close(() => resolve(false));
 79 |       })
 80 |       .listen(port, host);
 81 |   });
 82 | }
 83 | 
 84 | function startHttpServerWithRetry(
 85 |   app: Hono<{ Bindings: HonoNodeBindings }>,
 86 |   initialPort: number,
 87 |   host: string,
 88 |   maxRetries: number,
 89 |   parentContext: RequestContext,
 90 | ): Promise<ServerType> {
 91 |   const startContext = {
 92 |     ...parentContext,
 93 |     operation: "startHttpServerWithRetry",
 94 |   };
 95 |   logger.info(
 96 |     `Attempting to start HTTP server on port ${initialPort} with ${maxRetries} retries.`,
 97 |     startContext,
 98 |   );
 99 | 
100 |   return new Promise((resolve, reject) => {
101 |     const tryBind = (port: number, attempt: number) => {
102 |       const attemptContext = { ...startContext, port, attempt };
103 |       if (attempt > maxRetries + 1) {
104 |         const error = new Error(
105 |           `Failed to bind to any port after ${maxRetries} retries.`,
106 |         );
107 |         logger.fatal(error.message, attemptContext);
108 |         return reject(error);
109 |       }
110 | 
111 |       isPortInUse(port, host, attemptContext)
112 |         .then((inUse) => {
113 |           if (inUse) {
114 |             logger.warning(
115 |               `Port ${port} is in use, retrying on port ${port + 1}...`,
116 |               attemptContext,
117 |             );
118 |             setTimeout(
119 |               () => tryBind(port + 1, attempt + 1),
120 |               config.mcpHttpPortRetryDelayMs,
121 |             );
122 |             return;
123 |           }
124 | 
125 |           try {
126 |             const serverInstance = serve(
127 |               { fetch: app.fetch, port, hostname: host },
128 |               (info: { address: string; port: number }) => {
129 |                 const serverAddress = `http://${info.address}:${info.port}${MCP_ENDPOINT_PATH}`;
130 |                 logger.info(`HTTP transport listening at ${serverAddress}`, {
131 |                   ...attemptContext,
132 |                   address: serverAddress,
133 |                   sessionMode: config.mcpSessionMode,
134 |                 });
135 |                 if (process.stdout.isTTY) {
136 |                   console.log(`\n🚀 MCP Server running at: ${serverAddress}`);
137 |                   console.log(`   Session Mode: ${config.mcpSessionMode}\n`);
138 |                 }
139 |               },
140 |             );
141 |             resolve(serverInstance);
142 |           } catch (err: unknown) {
143 |             if (
144 |               err &&
145 |               typeof err === "object" &&
146 |               "code" in err &&
147 |               (err as { code: string }).code !== "EADDRINUSE"
148 |             ) {
149 |               const errorToLog =
150 |                 err instanceof Error ? err : new Error(String(err));
151 |               logger.error(
152 |                 "An unexpected error occurred while starting the server.",
153 |                 errorToLog,
154 |                 attemptContext,
155 |               );
156 |               return reject(err);
157 |             }
158 |             logger.warning(
159 |               `Encountered EADDRINUSE race condition on port ${port}, retrying...`,
160 |               attemptContext,
161 |             );
162 |             setTimeout(
163 |               () => tryBind(port + 1, attempt + 1),
164 |               config.mcpHttpPortRetryDelayMs,
165 |             );
166 |           }
167 |         })
168 |         .catch((err) => {
169 |           const error = err instanceof Error ? err : new Error(String(err));
170 |           logger.fatal(
171 |             "Failed to check if port is in use.",
172 |             error,
173 |             attemptContext,
174 |           );
175 |           reject(err);
176 |         });
177 |     };
178 | 
179 |     tryBind(initialPort, 1);
180 |   });
181 | }
182 | 
183 | function createTransportManager(
184 |   createServerInstanceFn: () => Promise<ServerInstanceInfo>,
185 |   sessionMode: string,
186 |   context: RequestContext,
187 | ): TransportManager {
188 |   const opContext = {
189 |     ...context,
190 |     operation: "createTransportManager",
191 |     sessionMode,
192 |   };
193 |   logger.info(
194 |     `Creating transport manager for session mode: ${sessionMode}`,
195 |     opContext,
196 |   );
197 | 
198 |   const statefulOptions = {
199 |     staleSessionTimeoutMs: config.mcpStatefulSessionStaleTimeoutMs,
200 |     mcpHttpEndpointPath: config.mcpHttpEndpointPath,
201 |   };
202 | 
203 |   const getMcpServer = async () => (await createServerInstanceFn()).server;
204 | 
205 |   switch (sessionMode) {
206 |     case "stateless":
207 |       return new StatelessTransportManager(getMcpServer);
208 |     case "stateful":
209 |       return new StatefulTransportManager(getMcpServer, statefulOptions);
210 |     case "auto":
211 |     default:
212 |       logger.info(
213 |         "Defaulting to 'auto' mode (stateful with stateless fallback).",
214 |         opContext,
215 |       );
216 |       return new StatefulTransportManager(getMcpServer, statefulOptions);
217 |   }
218 | }
219 | 
220 | export function createHttpApp(
221 |   transportManager: TransportManager,
222 |   createServerInstanceFn: () => Promise<ServerInstanceInfo>,
223 |   parentContext: RequestContext,
224 | ): Hono<{ Bindings: HonoNodeBindings }> {
225 |   const app = new Hono<{ Bindings: HonoNodeBindings }>();
226 |   const transportContext = {
227 |     ...parentContext,
228 |     component: "HttpTransportSetup",
229 |   };
230 |   logger.info("Creating Hono HTTP application.", transportContext);
231 | 
232 |   app.use(
233 |     "*",
234 |     cors({
235 |       origin: config.mcpAllowedOrigins || [],
236 |       allowMethods: ["GET", "POST", "DELETE", "OPTIONS"],
237 |       allowHeaders: [
238 |         "Content-Type",
239 |         "Mcp-Session-Id",
240 |         "Last-Event-ID",
241 |         "Authorization",
242 |       ],
243 |       credentials: true,
244 |     }),
245 |   );
246 | 
247 |   app.use(
248 |     "*",
249 |     async (c: Context<{ Bindings: HonoNodeBindings }>, next: Next) => {
250 |       (c.env.outgoing as http.ServerResponse).setHeader(
251 |         "X-Content-Type-Options",
252 |         "nosniff",
253 |       );
254 |       await next();
255 |     },
256 |   );
257 | 
258 |   app.use(
259 |     MCP_ENDPOINT_PATH,
260 |     async (c: Context<{ Bindings: HonoNodeBindings }>, next: Next) => {
261 |       const clientIp = getClientIp(c);
262 |       const context = requestContextService.createRequestContext({
263 |         operation: "httpRateLimitCheck",
264 |         ipAddress: clientIp,
265 |       });
266 |       try {
267 |         rateLimiter.check(clientIp, context);
268 |         logger.debug("Rate limit check passed.", context);
269 |       } catch (error) {
270 |         logger.warning("Rate limit check failed.", {
271 |           ...context,
272 |           error: error instanceof Error ? error.message : String(error),
273 |         });
274 |         throw error;
275 |       }
276 |       await next();
277 |     },
278 |   );
279 | 
280 |   const authStrategy = createAuthStrategy();
281 |   if (authStrategy) {
282 |     logger.info(
283 |       "Authentication strategy found, enabling auth middleware.",
284 |       transportContext,
285 |     );
286 |     app.use(MCP_ENDPOINT_PATH, createAuthMiddleware(authStrategy));
287 |   } else {
288 |     logger.info(
289 |       "No authentication strategy found, auth middleware disabled.",
290 |       transportContext,
291 |     );
292 |   }
293 | 
294 |   app.onError(httpErrorHandler);
295 | 
296 |   app.get("/healthz", (c) => {
297 |     return c.json({
298 |       status: "ok",
299 |       timestamp: new Date().toISOString(),
300 |     });
301 |   });
302 | 
303 |   app.get(
304 |     MCP_ENDPOINT_PATH,
305 |     async (c: Context<{ Bindings: HonoNodeBindings }>) => {
306 |       const sessionId = c.req.header("mcp-session-id");
307 |       if (sessionId) {
308 |         return c.text(
309 |           "GET requests to existing sessions are not supported.",
310 |           405,
311 |         );
312 |       }
313 | 
314 |       // Since this is a stateless endpoint, we create a temporary instance
315 |       // to report on the server's configuration.
316 |       const { tools, identity, options } = await createServerInstanceFn();
317 |       const effectiveSessionMode =
318 |         transportManager instanceof StatefulTransportManager
319 |           ? "stateful"
320 |           : "stateless";
321 | 
322 |       return c.json({
323 |         status: "ok",
324 |         server: {
325 |           name: identity.name,
326 |           version: identity.version,
327 |           description: identity.description || "No description provided.",
328 |           nodeVersion: process.version,
329 |           environment: config.environment,
330 |           capabilities: options.capabilities,
331 |         },
332 |         sessionMode: {
333 |           configured: config.mcpSessionMode,
334 |           effective: effectiveSessionMode,
335 |         },
336 |         tools: tools,
337 |         message:
338 |           "Server is running. POST to this endpoint to execute a tool call.",
339 |       });
340 |     },
341 |   );
342 | 
343 |   app.post(
344 |     MCP_ENDPOINT_PATH,
345 |     mcpTransportMiddleware(transportManager, createServerInstanceFn),
346 |     (c) => {
347 |       const response = c.get("mcpResponse");
348 | 
349 |       if (response.sessionId) {
350 |         c.header("Mcp-Session-Id", response.sessionId);
351 |       }
352 |       response.headers.forEach((value, key) => {
353 |         c.header(key, value);
354 |       });
355 | 
356 |       c.status(response.statusCode);
357 | 
358 |       if (response.type === "stream") {
359 |         return stream(c, async (s) => {
360 |           await s.pipe(response.stream);
361 |         });
362 |       } else {
363 |         const body =
364 |           typeof response.body === "object" && response.body !== null
365 |             ? response.body
366 |             : { body: response.body };
367 |         return c.json(body);
368 |       }
369 |     },
370 |   );
371 | 
372 |   app.delete(
373 |     MCP_ENDPOINT_PATH,
374 |     async (c: Context<{ Bindings: HonoNodeBindings }>) => {
375 |       const sessionId = c.req.header("mcp-session-id");
376 |       const context = requestContextService.createRequestContext({
377 |         ...transportContext,
378 |         operation: "handleDeleteRequest",
379 |         sessionId,
380 |       });
381 | 
382 |       if (sessionId) {
383 |         if (transportManager instanceof StatefulTransportManager) {
384 |           const response = await transportManager.handleDeleteRequest(
385 |             sessionId,
386 |             context,
387 |           );
388 |           if (response.type === "buffered") {
389 |             const body =
390 |               typeof response.body === "object" && response.body !== null
391 |                 ? response.body
392 |                 : { body: response.body };
393 |             return c.json(body, response.statusCode);
394 |           }
395 |           // Fallback for unexpected stream response on DELETE
396 |           return c.body(null, response.statusCode);
397 |         } else {
398 |           return c.json(
399 |             {
400 |               error: "Method Not Allowed",
401 |               message: "DELETE operations are not supported in this mode.",
402 |             },
403 |             405,
404 |           );
405 |         }
406 |       } else {
407 |         return c.json({
408 |           status: "stateless_mode",
409 |           message: "No sessions to delete in stateless mode",
410 |         });
411 |       }
412 |     },
413 |   );
414 | 
415 |   logger.info("Hono application setup complete.", transportContext);
416 |   return app;
417 | }
418 | 
419 | export async function startHttpTransport(
420 |   createServerInstanceFn: () => Promise<ServerInstanceInfo>,
421 |   parentContext: RequestContext,
422 | ): Promise<{
423 |   app: Hono<{ Bindings: HonoNodeBindings }>;
424 |   server: ServerType;
425 |   transportManager: TransportManager;
426 | }> {
427 |   const transportContext = {
428 |     ...parentContext,
429 |     component: "HttpTransportStart",
430 |   };
431 |   logger.info("Starting HTTP transport.", transportContext);
432 | 
433 |   const transportManager = createTransportManager(
434 |     createServerInstanceFn,
435 |     config.mcpSessionMode,
436 |     transportContext,
437 |   );
438 |   const app = createHttpApp(
439 |     transportManager,
440 |     createServerInstanceFn,
441 |     transportContext,
442 |   );
443 | 
444 |   const server = await startHttpServerWithRetry(
445 |     app,
446 |     HTTP_PORT,
447 |     HTTP_HOST,
448 |     config.mcpHttpMaxPortRetries,
449 |     transportContext,
450 |   );
451 | 
452 |   logger.info("HTTP transport started successfully.", transportContext);
453 |   return { app, server, transportManager };
454 | }
455 | 
```

--------------------------------------------------------------------------------
/src/mcp-server/tools/pubmedFetchContents/logic.ts:
--------------------------------------------------------------------------------

```typescript
  1 | /**
  2 |  * @fileoverview Logic for the pubmed_fetch_contents MCP tool.
  3 |  * Handles EFetch queries for specific PMIDs and formats the results.
  4 |  * This tool can fetch various details from PubMed including abstracts, full XML,
  5 |  * MEDLINE text, and citation data.
  6 |  * @module src/mcp-server/tools/pubmedFetchContents/logic
  7 |  */
  8 | 
  9 | import { z } from "zod";
 10 | import { getNcbiService } from "../../../services/NCBI/core/ncbiService.js";
 11 | import { BaseErrorCode, McpError } from "../../../types-global/errors.js";
 12 | import {
 13 |   ParsedArticle,
 14 |   XmlMedlineCitation,
 15 |   XmlPubmedArticleSet,
 16 | } from "../../../types-global/pubmedXml.js";
 17 | import {
 18 |   logger,
 19 |   RequestContext,
 20 |   requestContextService,
 21 |   sanitizeInputForLogging,
 22 | } from "../../../utils/index.js";
 23 | import {
 24 |   ensureArray,
 25 |   extractAbstractText,
 26 |   extractArticleDates,
 27 |   extractAuthors,
 28 |   extractDoi,
 29 |   extractGrants,
 30 |   extractJournalInfo,
 31 |   extractKeywords,
 32 |   extractMeshTerms,
 33 |   extractPmid,
 34 |   extractPublicationTypes,
 35 |   getText,
 36 | } from "../../../services/NCBI/parsing/index.js";
 37 | 
 38 | export const PubMedFetchContentsInputSchema = z
 39 |   .object({
 40 |     pmids: z
 41 |       .array(z.string().regex(/^\d+$/))
 42 |       .max(200, "Max 200 PMIDs per call if not using history.")
 43 |       .optional()
 44 |       .describe(
 45 |         "An array of PubMed Unique Identifiers (PMIDs) to fetch. Use this OR queryKey/webEnv.",
 46 |       ),
 47 |     queryKey: z
 48 |       .string()
 49 |       .optional()
 50 |       .describe(
 51 |         "Query key from ESearch history. Requires webEnv. Use this OR pmids.",
 52 |       ),
 53 |     webEnv: z
 54 |       .string()
 55 |       .optional()
 56 |       .describe(
 57 |         "Web environment from ESearch history. Requires queryKey. Use this OR pmids.",
 58 |       ),
 59 |     retstart: z
 60 |       .number()
 61 |       .int()
 62 |       .min(0)
 63 |       .optional()
 64 |       .describe(
 65 |         "0-based index of the first record to retrieve. Used with queryKey/webEnv.",
 66 |       ),
 67 |     retmax: z
 68 |       .number()
 69 |       .int()
 70 |       .min(1)
 71 |       .optional()
 72 |       .describe(
 73 |         "Maximum number of records to retrieve. Used with queryKey/webEnv.",
 74 |       ),
 75 |     detailLevel: z
 76 |       .enum(["abstract_plus", "full_xml", "medline_text", "citation_data"])
 77 |       .optional()
 78 |       .default("abstract_plus")
 79 |       .describe(
 80 |         "Specifies the level of detail for the fetched content. Options: 'abstract_plus' (parsed details), 'full_xml' (raw PubMedArticle XML), 'medline_text' (MEDLINE format), 'citation_data' (minimal citation data). Defaults to 'abstract_plus'.",
 81 |       ),
 82 |     includeMeshTerms: z
 83 |       .boolean()
 84 |       .optional()
 85 |       .default(true)
 86 |       .describe(
 87 |         "Include MeSH terms in 'abstract_plus' and 'citation_data' results. Default: true.",
 88 |       ),
 89 |     includeGrantInfo: z
 90 |       .boolean()
 91 |       .optional()
 92 |       .default(false)
 93 |       .describe(
 94 |         "Include grant info in 'abstract_plus' results. Default: false.",
 95 |       ),
 96 |     outputFormat: z
 97 |       .enum(["json", "raw_text"])
 98 |       .optional()
 99 |       .default("json")
100 |       .describe(
101 |         "Output format. 'json' (default) wraps data in a JSON object. 'raw_text' returns raw text for 'medline_text' or 'full_xml' detail levels.",
102 |       ),
103 |   })
104 |   .superRefine((data, ctx) => {
105 |     if (data.queryKey && !data.webEnv) {
106 |       ctx.addIssue({
107 |         code: z.ZodIssueCode.custom,
108 |         message: "webEnv is required if queryKey is provided.",
109 |         path: ["webEnv"],
110 |       });
111 |     }
112 |     if (!data.queryKey && data.webEnv) {
113 |       ctx.addIssue({
114 |         code: z.ZodIssueCode.custom,
115 |         message: "queryKey is required if webEnv is provided.",
116 |         path: ["queryKey"],
117 |       });
118 |     }
119 |     if (
120 |       (!data.pmids || data.pmids.length === 0) &&
121 |       !(data.queryKey && data.webEnv)
122 |     ) {
123 |       ctx.addIssue({
124 |         code: z.ZodIssueCode.custom,
125 |         message:
126 |           "Either pmids (non-empty array) or both queryKey and webEnv must be provided.",
127 |         path: ["pmids"],
128 |       });
129 |     }
130 |     if (data.pmids && data.pmids.length > 0 && (data.queryKey || data.webEnv)) {
131 |       ctx.addIssue({
132 |         code: z.ZodIssueCode.custom,
133 |         message:
134 |           "Cannot use pmids and queryKey/webEnv simultaneously. Please choose one method.",
135 |         path: ["pmids"],
136 |       });
137 |     }
138 |     if (
139 |       (data.retstart !== undefined || data.retmax !== undefined) &&
140 |       !(data.queryKey && data.webEnv)
141 |     ) {
142 |       ctx.addIssue({
143 |         code: z.ZodIssueCode.custom,
144 |         message: "retstart/retmax can only be used with queryKey and webEnv.",
145 |         path: ["retstart"],
146 |       });
147 |     }
148 |   });
149 | 
150 | export type PubMedFetchContentsInput = z.infer<
151 |   typeof PubMedFetchContentsInputSchema
152 | >;
153 | 
154 | export type PubMedFetchContentsOutput = {
155 |   content: string;
156 |   articlesReturned: number;
157 |   eFetchUrl: string;
158 | };
159 | 
160 | interface EFetchServiceParams {
161 |   db: string;
162 |   id?: string;
163 |   query_key?: string;
164 |   WebEnv?: string;
165 |   retmode?: "xml" | "text";
166 |   rettype?: string;
167 |   retstart?: string;
168 |   retmax?: string;
169 |   [key: string]: string | undefined;
170 | }
171 | 
172 | function parsePubMedArticleSet(
173 |   xmlData: unknown,
174 |   input: PubMedFetchContentsInput,
175 |   parentContext: RequestContext,
176 | ): ParsedArticle[] {
177 |   const articles: ParsedArticle[] = [];
178 |   const operationContext = requestContextService.createRequestContext({
179 |     parentRequestId: parentContext.requestId,
180 |     operation: "parsePubMedArticleSet",
181 |   });
182 | 
183 |   if (
184 |     !xmlData ||
185 |     typeof xmlData !== "object" ||
186 |     !("PubmedArticleSet" in xmlData)
187 |   ) {
188 |     throw new McpError(
189 |       BaseErrorCode.PARSING_ERROR,
190 |       "Invalid or unexpected structure for xmlData in parsePubMedArticleSet.",
191 |       {
192 |         ...operationContext,
193 |         xmlDataType: typeof xmlData,
194 |         xmlDataPreview: sanitizeInputForLogging(
195 |           JSON.stringify(xmlData).substring(0, 200),
196 |         ),
197 |       },
198 |     );
199 |   }
200 | 
201 |   const typedXmlData = xmlData as { PubmedArticleSet?: XmlPubmedArticleSet };
202 |   const articleSet = typedXmlData.PubmedArticleSet;
203 | 
204 |   if (!articleSet || !articleSet.PubmedArticle) {
205 |     logger.warning(
206 |       "PubmedArticleSet or PubmedArticle array not found in EFetch XML response.",
207 |       operationContext,
208 |     );
209 |     return articles;
210 |   }
211 | 
212 |   const pubmedArticlesXml = ensureArray(articleSet.PubmedArticle);
213 | 
214 |   for (const articleXml of pubmedArticlesXml) {
215 |     if (!articleXml || typeof articleXml !== "object") continue;
216 | 
217 |     const medlineCitation: XmlMedlineCitation | undefined =
218 |       articleXml.MedlineCitation;
219 |     if (!medlineCitation) continue;
220 | 
221 |     const pmid = extractPmid(medlineCitation);
222 |     if (!pmid) continue;
223 | 
224 |     const articleNode = medlineCitation.Article;
225 |     const parsedArticle: ParsedArticle = {
226 |       pmid: pmid,
227 |       title: articleNode?.ArticleTitle
228 |         ? getText(articleNode.ArticleTitle)
229 |         : undefined,
230 |       abstractText: articleNode?.Abstract
231 |         ? extractAbstractText(articleNode.Abstract)
232 |         : undefined,
233 |       authors: articleNode?.AuthorList
234 |         ? extractAuthors(articleNode.AuthorList)
235 |         : undefined,
236 |       journalInfo: articleNode?.Journal
237 |         ? extractJournalInfo(articleNode.Journal, medlineCitation)
238 |         : undefined,
239 |       publicationTypes: articleNode?.PublicationTypeList
240 |         ? extractPublicationTypes(articleNode.PublicationTypeList)
241 |         : undefined,
242 |       keywords: articleNode?.KeywordList
243 |         ? extractKeywords(articleNode.KeywordList)
244 |         : undefined,
245 |       doi: articleNode ? extractDoi(articleNode) : undefined,
246 |       articleDates: articleNode?.ArticleDate
247 |         ? extractArticleDates(articleNode)
248 |         : undefined,
249 |     };
250 | 
251 |     if (input.includeMeshTerms) {
252 |       parsedArticle.meshTerms = medlineCitation.MeshHeadingList
253 |         ? extractMeshTerms(medlineCitation.MeshHeadingList)
254 |         : undefined;
255 |     }
256 | 
257 |     if (input.includeGrantInfo) {
258 |       parsedArticle.grantList = articleNode?.GrantList
259 |         ? extractGrants(articleNode.GrantList)
260 |         : undefined;
261 |     }
262 | 
263 |     articles.push(parsedArticle);
264 |   }
265 |   return articles;
266 | }
267 | 
268 | export async function pubMedFetchContentsLogic(
269 |   input: PubMedFetchContentsInput,
270 |   parentRequestContext: RequestContext,
271 | ): Promise<PubMedFetchContentsOutput> {
272 |   const toolLogicContext = requestContextService.createRequestContext({
273 |     parentRequestId: parentRequestContext.requestId,
274 |     operation: "pubMedFetchContentsLogic",
275 |     input: sanitizeInputForLogging(input),
276 |   });
277 | 
278 |   const validationResult = PubMedFetchContentsInputSchema.safeParse(input);
279 |   if (!validationResult.success) {
280 |     throw new McpError(
281 |       BaseErrorCode.VALIDATION_ERROR,
282 |       validationResult.error.errors[0]?.message || "Invalid input",
283 |       { ...toolLogicContext, details: validationResult.error.flatten() },
284 |     );
285 |   }
286 | 
287 |   const ncbiService = getNcbiService();
288 |   logger.info("Executing pubmed_fetch_contents tool", toolLogicContext);
289 | 
290 |   const eFetchParams: EFetchServiceParams = { db: "pubmed" };
291 | 
292 |   if (input.queryKey && input.webEnv) {
293 |     eFetchParams.query_key = input.queryKey;
294 |     eFetchParams.WebEnv = input.webEnv;
295 |     if (input.retstart !== undefined)
296 |       eFetchParams.retstart = String(input.retstart);
297 |     if (input.retmax !== undefined) eFetchParams.retmax = String(input.retmax);
298 |   } else if (input.pmids && input.pmids.length > 0) {
299 |     eFetchParams.id = input.pmids.join(",");
300 |   }
301 | 
302 |   let serviceRetmode: "xml" | "text" = "xml";
303 |   let rettype: string | undefined;
304 | 
305 |   switch (input.detailLevel) {
306 |     case "full_xml":
307 |       serviceRetmode = "xml";
308 |       break;
309 |     case "medline_text":
310 |       serviceRetmode = "text";
311 |       rettype = "medline";
312 |       break;
313 |     case "abstract_plus":
314 |     case "citation_data":
315 |       serviceRetmode = "xml";
316 |       break;
317 |   }
318 |   eFetchParams.retmode = serviceRetmode;
319 |   if (rettype) eFetchParams.rettype = rettype;
320 | 
321 |   const eFetchBase =
322 |     "https://eutils.ncbi.nlm.nih.gov/entrez/eutils/efetch.fcgi";
323 |   const eFetchQueryString = new URLSearchParams(
324 |     eFetchParams as Record<string, string>,
325 |   ).toString();
326 |   const eFetchUrl = `${eFetchBase}?${eFetchQueryString}`;
327 | 
328 |   const shouldReturnRawXml =
329 |     input.detailLevel === "full_xml" && input.outputFormat === "raw_text";
330 | 
331 |   const eFetchResponseData = await ncbiService.eFetch(
332 |     eFetchParams,
333 |     toolLogicContext,
334 |     { retmode: serviceRetmode, rettype, returnRawXml: shouldReturnRawXml },
335 |   );
336 | 
337 |   let finalOutputText: string;
338 |   let articlesCount = 0;
339 | 
340 |   if (input.detailLevel === "medline_text") {
341 |     const medlineText = String(eFetchResponseData);
342 |     const foundPmidsInMedline = new Set<string>();
343 |     const pmidRegex = /^PMID- (\d+)/gm;
344 |     let match;
345 |     while ((match = pmidRegex.exec(medlineText)) !== null) {
346 |       if (match[1]) {
347 |         foundPmidsInMedline.add(match[1]);
348 |       }
349 |     }
350 |     articlesCount = foundPmidsInMedline.size;
351 | 
352 |     if (input.outputFormat === "raw_text") {
353 |       finalOutputText = medlineText;
354 |     } else {
355 |       const notFoundPmids =
356 |         input.pmids?.filter((pmid) => !foundPmidsInMedline.has(pmid)) || [];
357 |       finalOutputText = JSON.stringify({
358 |         requestedPmids: input.pmids || "N/A (history query)",
359 |         articles: [{ medlineText }],
360 |         notFoundPmids,
361 |         eFetchDetails: { urls: [eFetchUrl] },
362 |       });
363 |     }
364 |   } else if (input.detailLevel === "full_xml") {
365 |     const articlesXml = ensureArray(
366 |       (eFetchResponseData as { PubmedArticleSet?: XmlPubmedArticleSet })
367 |         ?.PubmedArticleSet?.PubmedArticle || [],
368 |     );
369 |     articlesCount = articlesXml.length;
370 |     if (input.outputFormat === "raw_text") {
371 |       // Note: Raw XML output is requested, but we still parse to get an accurate count.
372 |       // This is a trade-off for robustness over performance in this specific case.
373 |       finalOutputText = String(eFetchResponseData);
374 |     } else {
375 |       const foundPmidsInXml = new Set<string>();
376 |       const articlesPayload = articlesXml.map((articleXml) => {
377 |         const pmid = extractPmid(articleXml.MedlineCitation) || "unknown_pmid";
378 |         if (pmid !== "unknown_pmid") foundPmidsInXml.add(pmid);
379 |         return { pmid, fullXmlContent: articleXml };
380 |       });
381 |       const notFoundPmids =
382 |         input.pmids?.filter((pmid) => !foundPmidsInXml.has(pmid)) || [];
383 |       finalOutputText = JSON.stringify({
384 |         requestedPmids: input.pmids || "N/A (history query)",
385 |         articles: articlesPayload,
386 |         notFoundPmids,
387 |         eFetchDetails: { urls: [eFetchUrl] },
388 |       });
389 |     }
390 |   } else {
391 |     const parsedArticles = parsePubMedArticleSet(
392 |       eFetchResponseData as XmlPubmedArticleSet,
393 |       input,
394 |       toolLogicContext,
395 |     );
396 |     articlesCount = parsedArticles.length;
397 |     const foundPmids = new Set(parsedArticles.map((p) => p.pmid));
398 |     const notFoundPmids =
399 |       input.pmids?.filter((pmid) => !foundPmids.has(pmid)) || [];
400 | 
401 |     let articlesToReturn: ParsedArticle[] | Record<string, unknown>[] =
402 |       parsedArticles;
403 |     if (input.detailLevel === "citation_data") {
404 |       articlesToReturn = parsedArticles.map((article) => ({
405 |         pmid: article.pmid,
406 |         title: article.title,
407 |         authors: article.authors?.map((a) => ({
408 |           lastName: a.lastName,
409 |           initials: a.initials,
410 |         })),
411 |         journalInfo: {
412 |           title: article.journalInfo?.title,
413 |           isoAbbreviation: article.journalInfo?.isoAbbreviation,
414 |           volume: article.journalInfo?.volume,
415 |           issue: article.journalInfo?.issue,
416 |           pages: article.journalInfo?.pages,
417 |           year: article.journalInfo?.publicationDate?.year,
418 |         },
419 |         doi: article.doi,
420 |         ...(input.includeMeshTerms && { meshTerms: article.meshTerms }),
421 |       }));
422 |     }
423 |     finalOutputText = JSON.stringify({
424 |       requestedPmids: input.pmids || "N/A (history query)",
425 |       articles: articlesToReturn,
426 |       notFoundPmids,
427 |       eFetchDetails: { urls: [eFetchUrl] },
428 |     });
429 |   }
430 | 
431 |   logger.notice("Successfully executed pubmed_fetch_contents tool.", {
432 |     ...toolLogicContext,
433 |     articlesReturned: articlesCount,
434 |   });
435 | 
436 |   return {
437 |     content: finalOutputText,
438 |     articlesReturned: articlesCount,
439 |     eFetchUrl,
440 |   };
441 | }
442 | 
```

--------------------------------------------------------------------------------
/src/services/NCBI/parsing/eSummaryResultParser.ts:
--------------------------------------------------------------------------------

```typescript
  1 | /**
  2 |  * @fileoverview Helper functions for parsing ESummary results from NCBI.
  3 |  * Handles different ESummary XML structures and formats the data into
  4 |  * consistent ParsedBriefSummary objects.
  5 |  * @module src/services/NCBI/parsing/eSummaryResultParser
  6 |  */
  7 | 
  8 | import {
  9 |   ESummaryArticleId,
 10 |   ESummaryDocSumOldXml,
 11 |   ESummaryDocumentSummary,
 12 |   ESummaryItem,
 13 |   ESummaryResult,
 14 |   ParsedBriefSummary,
 15 |   ESummaryAuthor as XmlESummaryAuthor, // This is the normalized output type
 16 |   XmlESummaryAuthorRaw, // This is the raw input type from XML parsing
 17 | } from "../../../types-global/pubmedXml.js";
 18 | import {
 19 |   dateParser,
 20 |   logger,
 21 |   RequestContext,
 22 |   requestContextService,
 23 | } from "../../../utils/index.js"; // Note: utils/index.js is the barrel file
 24 | import { ensureArray, getAttribute, getText } from "./xmlGenericHelpers.js";
 25 | 
 26 | /**
 27 |  * Formats an array of ESummary authors into a string.
 28 |  * Limits to the first 3 authors and adds "et al." if more exist.
 29 |  * @param authors - Array of ESummary author objects (normalized).
 30 |  * @returns A string like "Doe J, Smith A, Brown B, et al." or empty if no authors.
 31 |  */
 32 | export function formatESummaryAuthors(authors?: XmlESummaryAuthor[]): string {
 33 |   if (!authors || authors.length === 0) return "";
 34 |   return (
 35 |     authors
 36 |       .slice(0, 3)
 37 |       .map((author) => author.name) // Assumes author.name is the string representation
 38 |       .join(", ") + (authors.length > 3 ? ", et al." : "")
 39 |   );
 40 | }
 41 | 
 42 | /**
 43 |  * Standardizes date strings from ESummary to "YYYY-MM-DD" format.
 44 |  * Uses the dateParser utility.
 45 |  * @param dateStr - Date string from ESummary (e.g., "2023/01/15", "2023 Jan 15", "2023").
 46 |  * @param parentContext - Optional parent request context for logging.
 47 |  * @returns A promise resolving to a standardized date string ("YYYY-MM-DD") or undefined if parsing fails.
 48 |  */
 49 | export async function standardizeESummaryDate(
 50 |   dateStr?: string,
 51 |   parentContext?: RequestContext,
 52 | ): Promise<string | undefined> {
 53 |   if (dateStr === undefined || dateStr === null) return undefined; // Check for null as well
 54 | 
 55 |   const dateInputString = String(dateStr); // Ensure it's a string
 56 | 
 57 |   const currentContext =
 58 |     parentContext ||
 59 |     requestContextService.createRequestContext({
 60 |       operation: "standardizeESummaryDateInternal",
 61 |       inputDate: dateInputString, // Log the stringified version
 62 |     });
 63 |   try {
 64 |     // Pass the stringified version to the date parser
 65 |     const parsedDate = await dateParser.parseDate(
 66 |       dateInputString,
 67 |       currentContext,
 68 |     );
 69 |     if (parsedDate) {
 70 |       return parsedDate.toISOString().split("T")[0]; // Format as YYYY-MM-DD
 71 |     }
 72 |     logger.debug(
 73 |       `standardizeESummaryDate: dateParser could not parse "${dateInputString}", returning undefined.`,
 74 |       currentContext,
 75 |     );
 76 |   } catch (e) {
 77 |     logger.warning(
 78 |       `standardizeESummaryDate: Error during dateParser.parseDate for "${dateInputString}", returning undefined.`,
 79 |       {
 80 |         ...currentContext,
 81 |         error: e instanceof Error ? e.message : String(e),
 82 |       },
 83 |     );
 84 |   }
 85 |   return undefined; // Return undefined if parsing fails
 86 | }
 87 | 
 88 | /**
 89 |  * Parses authors from an ESummary DocumentSummary structure.
 90 |  * Handles various ways authors might be represented.
 91 |  * Returns an array of normalized XmlESummaryAuthor objects.
 92 |  * Internal helper function.
 93 |  */
 94 | function parseESummaryAuthorsFromDocumentSummary(
 95 |   docSummary: ESummaryDocumentSummary,
 96 | ): XmlESummaryAuthor[] {
 97 |   const authorsProp = docSummary.Authors;
 98 |   if (!authorsProp) return [];
 99 | 
100 |   const parsedAuthors: XmlESummaryAuthor[] = [];
101 | 
102 |   const processRawAuthor = (rawAuthInput: XmlESummaryAuthorRaw | string) => {
103 |     let name = "";
104 |     let authtype: string | undefined;
105 |     let clusterid: string | undefined;
106 | 
107 |     if (typeof rawAuthInput === "string") {
108 |       name = rawAuthInput;
109 |     } else if (rawAuthInput && typeof rawAuthInput === "object") {
110 |       const authorObj = rawAuthInput as XmlESummaryAuthorRaw; // Now typed
111 |       // Try extracting text from the object itself (e.g., if it's { '#text': 'Author Name' })
112 |       name = getText(authorObj, "");
113 | 
114 |       // If name is still empty, try common property names for author names
115 |       if (!name) {
116 |         name = getText(authorObj.Name || authorObj.name, "");
117 |       }
118 | 
119 |       authtype = getText(authorObj.AuthType || authorObj.authtype, undefined);
120 |       clusterid = getText(
121 |         authorObj.ClusterId || authorObj.clusterid,
122 |         undefined,
123 |       );
124 | 
125 |       // Fallback for unhandled structures: log and try to stringify
126 |       if (!name) {
127 |         const authInputString = JSON.stringify(authorObj);
128 |         logger.warning(
129 |           `Unhandled author structure in parseESummaryAuthorsFromDocumentSummary. authInput: ${authInputString.substring(0, 100)}`,
130 |           requestContextService.createRequestContext({
131 |             operation: "parseESummaryAuthorsFromDocumentSummary",
132 |             detail: "Unhandled author structure",
133 |           }),
134 |         );
135 |         // As a last resort, if it's a simple object with a single value, that might be the name
136 |         const keys = Object.keys(authorObj);
137 |         if (
138 |           keys.length === 1 &&
139 |           keys[0] &&
140 |           typeof (authorObj as Record<string, unknown>)[keys[0]] === "string"
141 |         ) {
142 |           name = (authorObj as Record<string, unknown>)[keys[0]] as string;
143 |         } else if (authInputString.length < 100) {
144 |           // Avoid overly long stringified objects
145 |           name = authInputString; // Not ideal, but better than empty for debugging
146 |         }
147 |       }
148 |     }
149 | 
150 |     if (name.trim()) {
151 |       parsedAuthors.push({
152 |         name: name.trim(),
153 |         authtype,
154 |         clusterid,
155 |       });
156 |     }
157 |   };
158 | 
159 |   if (Array.isArray(authorsProp)) {
160 |     // authorsProp could be Array<string> or Array<XmlESummaryAuthorRaw>
161 |     (authorsProp as (XmlESummaryAuthorRaw | string)[]).forEach(
162 |       processRawAuthor,
163 |     );
164 |   } else if (
165 |     typeof authorsProp === "object" &&
166 |     "Author" in authorsProp && // authorsProp is { Author: ... }
167 |     authorsProp.Author
168 |   ) {
169 |     const rawAuthors = ensureArray(
170 |       authorsProp.Author as
171 |         | XmlESummaryAuthorRaw
172 |         | XmlESummaryAuthorRaw[]
173 |         | string,
174 |     );
175 |     rawAuthors.forEach(processRawAuthor);
176 |   } else if (typeof authorsProp === "string") {
177 |     try {
178 |       // Attempt to parse if it looks like a JSON array string
179 |       if (authorsProp.startsWith("[") && authorsProp.endsWith("]")) {
180 |         const parsedJsonAuthors = JSON.parse(authorsProp) as unknown[];
181 |         if (Array.isArray(parsedJsonAuthors)) {
182 |           parsedJsonAuthors.forEach((authItem: unknown) => {
183 |             if (typeof authItem === "string") {
184 |               parsedAuthors.push({ name: authItem.trim() });
185 |             } else if (
186 |               typeof authItem === "object" &&
187 |               authItem !== null &&
188 |               ((authItem as XmlESummaryAuthorRaw).name ||
189 |                 (authItem as XmlESummaryAuthorRaw).Name)
190 |             ) {
191 |               // If it's an object with a name property, treat as XmlESummaryAuthorRaw
192 |               processRawAuthor(authItem as XmlESummaryAuthorRaw);
193 |             }
194 |           });
195 |           if (parsedAuthors.length > 0) return parsedAuthors; // Return if JSON parsing yielded results
196 |         }
197 |       }
198 |     } catch (e) {
199 |       logger.debug(
200 |         `Failed to parse Authors string as JSON: ${authorsProp.substring(0, 100)}`,
201 |         requestContextService.createRequestContext({
202 |           operation: "parseESummaryAuthorsFromString",
203 |           input: authorsProp.substring(0, 100),
204 |           error: e instanceof Error ? e.message : String(e),
205 |         }),
206 |       );
207 |     }
208 |     // Fallback: split string by common delimiters
209 |     authorsProp
210 |       .split(/[,;]/)
211 |       .map((namePart: string) => namePart.trim())
212 |       .filter((namePart) => namePart)
213 |       .forEach((namePart) => parsedAuthors.push({ name: namePart }));
214 |   }
215 |   return parsedAuthors.filter((author) => author.name);
216 | }
217 | 
218 | /**
219 |  * Parses a single ESummary DocumentSummary (newer XML format) into a raw summary object.
220 |  * Internal helper function.
221 |  */
222 | function parseSingleDocumentSummary(docSummary: ESummaryDocumentSummary): Omit<
223 |   ParsedBriefSummary,
224 |   "pubDate" | "epubDate"
225 | > & {
226 |   rawPubDate?: string;
227 |   rawEPubDate?: string;
228 | } {
229 |   const pmid = docSummary["@_uid"];
230 |   const authorsArray = parseESummaryAuthorsFromDocumentSummary(docSummary);
231 | 
232 |   let doiValue: string | undefined = getText(docSummary.DOI, undefined);
233 |   if (!doiValue) {
234 |     const articleIdsProp = docSummary.ArticleIds;
235 |     if (articleIdsProp) {
236 |       const idsArray = Array.isArray(articleIdsProp)
237 |         ? articleIdsProp
238 |         : ensureArray(
239 |             (
240 |               articleIdsProp as {
241 |                 ArticleId: ESummaryArticleId[] | ESummaryArticleId;
242 |               }
243 |             ).ArticleId,
244 |           );
245 | 
246 |       const doiEntry = idsArray.find(
247 |         (id) => (id as ESummaryArticleId).idtype === "doi",
248 |       );
249 |       if (doiEntry) {
250 |         doiValue = getText((doiEntry as ESummaryArticleId).value, undefined);
251 |       }
252 |     }
253 |   }
254 | 
255 |   return {
256 |     pmid: String(pmid),
257 |     title: getText(docSummary.Title, undefined),
258 |     authors: formatESummaryAuthors(authorsArray),
259 |     source:
260 |       getText(docSummary.Source, undefined) ||
261 |       getText(docSummary.FullJournalName, undefined) ||
262 |       getText(docSummary.SO, undefined) ||
263 |       undefined,
264 |     doi: doiValue,
265 |     rawPubDate: getText(docSummary.PubDate, undefined),
266 |     rawEPubDate: getText(docSummary.EPubDate, undefined),
267 |   };
268 | }
269 | 
270 | /**
271 |  * Parses a single ESummary DocSum (older XML item-based format) into a raw summary object.
272 |  * Internal helper function.
273 |  */
274 | function parseSingleDocSumOldXml(docSum: ESummaryDocSumOldXml): Omit<
275 |   ParsedBriefSummary,
276 |   "pubDate" | "epubDate"
277 | > & {
278 |   rawPubDate?: string;
279 |   rawEPubDate?: string;
280 | } {
281 |   const pmid = docSum.Id;
282 |   const items = ensureArray(docSum.Item);
283 | 
284 |   const getItemValue = (
285 |     name: string | string[],
286 |     type?: ESummaryItem["_Type"],
287 |   ): string | undefined => {
288 |     const namesToTry = ensureArray(name);
289 |     for (const n of namesToTry) {
290 |       const item = items.find(
291 |         (i) =>
292 |           i._Name === n &&
293 |           (type ? i._Type === type : true) &&
294 |           i._Type !== "ERROR",
295 |       );
296 |       if (item) {
297 |         const textVal = getText(item);
298 |         if (textVal !== undefined) return String(textVal);
299 |       }
300 |     }
301 |     return undefined;
302 |   };
303 | 
304 |   const getAuthorList = (): XmlESummaryAuthor[] => {
305 |     const authorListItem = items.find(
306 |       (i) => i._Name === "AuthorList" && i._Type === "List",
307 |     );
308 |     if (authorListItem && authorListItem.Item) {
309 |       return ensureArray(authorListItem.Item)
310 |         .filter((a) => a._Name === "Author" && a._Type === "String")
311 |         .map((a) => ({ name: getText(a, "") }));
312 |     }
313 |     // Fallback for authors directly under DocSum items
314 |     return items
315 |       .filter((i) => i._Name === "Author" && i._Type === "String")
316 |       .map((a) => ({ name: getText(a, "") }));
317 |   };
318 | 
319 |   const authorsArray = getAuthorList();
320 | 
321 |   let doiFromItems: string | undefined = getItemValue("DOI", "String");
322 |   if (!doiFromItems) {
323 |     const articleIdsItem = items.find(
324 |       (i) => i._Name === "ArticleIds" && i._Type === "List",
325 |     );
326 |     if (articleIdsItem && articleIdsItem.Item) {
327 |       const ids = ensureArray(articleIdsItem.Item);
328 |       const doiIdItem = ids.find(
329 |         (id) =>
330 |           getAttribute(id as ESummaryItem, "idtype") === "doi" ||
331 |           (id as ESummaryItem)._Name === "doi", // Some older formats might use Name="doi"
332 |       );
333 |       if (doiIdItem) {
334 |         doiFromItems = getText(doiIdItem);
335 |       }
336 |     }
337 |   }
338 | 
339 |   return {
340 |     pmid: String(pmid),
341 |     title: getItemValue("Title", "String"),
342 |     authors: formatESummaryAuthors(authorsArray),
343 |     source: getItemValue(["Source", "FullJournalName", "SO"], "String"),
344 |     doi: doiFromItems,
345 |     rawPubDate: getItemValue(["PubDate", "ArticleDate"], "Date"),
346 |     rawEPubDate: getItemValue("EPubDate", "Date"),
347 |   };
348 | }
349 | 
350 | /**
351 |  * Extracts and formats brief summaries from ESummary XML result.
352 |  * Handles both DocumentSummarySet (newer) and older DocSum structures.
353 |  * Asynchronously standardizes dates.
354 |  * @param eSummaryResult - The parsed XML object from ESummary (eSummaryResult part).
355 |  * @param context - Request context for logging and passing to date standardization.
356 |  * @returns A promise resolving to an array of parsed brief summary objects.
357 |  */
358 | export async function extractBriefSummaries(
359 |   eSummaryResult?: ESummaryResult,
360 |   context?: RequestContext,
361 | ): Promise<ParsedBriefSummary[]> {
362 |   if (!eSummaryResult) return [];
363 |   const opContext =
364 |     context ||
365 |     requestContextService.createRequestContext({
366 |       operation: "extractBriefSummariesInternal",
367 |     });
368 | 
369 |   if (eSummaryResult.ERROR) {
370 |     logger.warning("ESummary result contains an error", {
371 |       ...opContext,
372 |       errorDetails: eSummaryResult.ERROR,
373 |     });
374 |     return [];
375 |   }
376 | 
377 |   let rawSummaries: (Omit<ParsedBriefSummary, "pubDate" | "epubDate"> & {
378 |     rawPubDate?: string;
379 |     rawEPubDate?: string;
380 |   })[] = [];
381 | 
382 |   if (eSummaryResult.DocumentSummarySet?.DocumentSummary) {
383 |     const docSummaries = ensureArray(
384 |       eSummaryResult.DocumentSummarySet.DocumentSummary,
385 |     );
386 |     rawSummaries = docSummaries
387 |       .map(parseSingleDocumentSummary)
388 |       .filter((s) => s.pmid);
389 |   } else if (eSummaryResult.DocSum) {
390 |     const docSums = ensureArray(eSummaryResult.DocSum);
391 |     rawSummaries = docSums.map(parseSingleDocSumOldXml).filter((s) => s.pmid);
392 |   }
393 | 
394 |   const processedSummaries: ParsedBriefSummary[] = [];
395 |   for (const rawSummary of rawSummaries) {
396 |     const pubDate = await standardizeESummaryDate(
397 |       rawSummary.rawPubDate,
398 |       opContext,
399 |     );
400 |     const epubDate = await standardizeESummaryDate(
401 |       rawSummary.rawEPubDate,
402 |       opContext,
403 |     );
404 |     processedSummaries.push({
405 |       pmid: rawSummary.pmid,
406 |       title: rawSummary.title,
407 |       authors: rawSummary.authors,
408 |       source: rawSummary.source,
409 |       doi: rawSummary.doi,
410 |       pubDate,
411 |       epubDate,
412 |     });
413 |   }
414 | 
415 |   return processedSummaries;
416 | }
417 | 
```

--------------------------------------------------------------------------------
/src/mcp-server/transports/core/statefulTransportManager.ts:
--------------------------------------------------------------------------------

```typescript
  1 | /**
  2 |  * @fileoverview Implements a stateful transport manager for the MCP SDK.
  3 |  *
  4 |  * This manager handles multiple, persistent MCP sessions. It creates and maintains
  5 |  * a dedicated McpServer and StreamableHTTPServerTransport instance for each session,
  6 |  * allowing for stateful, multi-turn interactions. It includes robust mechanisms for
  7 |  * session lifecycle management, including garbage collection of stale sessions and
  8 |  * concurrency controls to prevent race conditions.
  9 |  *
 10 |  * SCALABILITY NOTE: This manager maintains all session state in local process memory.
 11 |  * For horizontal scaling across multiple server instances, a load balancer with
 12 |  * sticky sessions (session affinity) is required to ensure that all requests for a
 13 |  * given session are routed to the same process instance that holds that session's state.
 14 |  *
 15 |  * @module src/mcp-server/transports/core/statefulTransportManager
 16 |  */
 17 | 
 18 | import { McpServer } from "@modelcontextprotocol/sdk/server/mcp.js";
 19 | import { StreamableHTTPServerTransport } from "@modelcontextprotocol/sdk/server/streamableHttp.js";
 20 | import type { IncomingHttpHeaders, ServerResponse } from "http";
 21 | import { randomUUID } from "node:crypto";
 22 | import { Readable } from "stream";
 23 | import { BaseErrorCode, McpError } from "../../../types-global/errors.js";
 24 | import {
 25 |   ErrorHandler,
 26 |   logger,
 27 |   RequestContext,
 28 |   requestContextService,
 29 | } from "../../../utils/index.js";
 30 | import { BaseTransportManager } from "./baseTransportManager.js";
 31 | import { HonoStreamResponse } from "./honoNodeBridge.js";
 32 | import { convertNodeHeadersToWebHeaders } from "./headerUtils.js";
 33 | import {
 34 |   HttpStatusCode,
 35 |   StatefulTransportManager as IStatefulTransportManager,
 36 |   TransportResponse,
 37 |   TransportSession,
 38 | } from "./transportTypes.js";
 39 | 
 40 | /**
 41 |  * Defines the configuration options for the StatefulTransportManager.
 42 |  */
 43 | export interface StatefulTransportOptions {
 44 |   staleSessionTimeoutMs: number;
 45 |   mcpHttpEndpointPath: string;
 46 | }
 47 | 
 48 | /**
 49 |  * Manages persistent, stateful MCP sessions.
 50 |  */
 51 | export class StatefulTransportManager
 52 |   extends BaseTransportManager
 53 |   implements IStatefulTransportManager
 54 | {
 55 |   private readonly transports = new Map<
 56 |     string,
 57 |     StreamableHTTPServerTransport
 58 |   >();
 59 |   private readonly servers = new Map<string, McpServer>();
 60 |   private readonly sessions = new Map<string, TransportSession>();
 61 |   private readonly garbageCollector: NodeJS.Timeout;
 62 |   private readonly options: StatefulTransportOptions;
 63 | 
 64 |   /**
 65 |    * @param createServerInstanceFn - A factory function to create new McpServer instances.
 66 |    * @param options - Configuration options for the manager.
 67 |    */
 68 |   constructor(
 69 |     createServerInstanceFn: () => Promise<McpServer>,
 70 |     options: StatefulTransportOptions,
 71 |   ) {
 72 |     super(createServerInstanceFn);
 73 |     this.options = options;
 74 |     const context = requestContextService.createRequestContext({
 75 |       operation: "StatefulTransportManager.constructor",
 76 |     });
 77 |     logger.info("Starting session garbage collector.", context);
 78 |     this.garbageCollector = setInterval(
 79 |       () => this.cleanupStaleSessions(),
 80 |       this.options.staleSessionTimeoutMs,
 81 |     );
 82 |   }
 83 | 
 84 |   /**
 85 |    * Initializes a new stateful session and handles the first request.
 86 |    *
 87 |    * @param headers - The incoming request headers.
 88 |    * @param body - The parsed body of the request.
 89 |    * @param context - The request context.
 90 |    * @returns A promise resolving to a streaming TransportResponse with a session ID.
 91 |    */
 92 |   async initializeAndHandle(
 93 |     headers: IncomingHttpHeaders,
 94 |     body: unknown,
 95 |     context: RequestContext,
 96 |   ): Promise<TransportResponse> {
 97 |     const opContext = {
 98 |       ...context,
 99 |       operation: "StatefulTransportManager.initializeAndHandle",
100 |     };
101 |     logger.debug("Initializing new stateful session.", opContext);
102 | 
103 |     let server: McpServer | undefined;
104 |     let transport: StreamableHTTPServerTransport | undefined;
105 | 
106 |     try {
107 |       server = await this.createServerInstanceFn();
108 |       const mockRes = new HonoStreamResponse() as unknown as ServerResponse;
109 |       const currentServer = server;
110 | 
111 |       transport = new StreamableHTTPServerTransport({
112 |         sessionIdGenerator: () => randomUUID(),
113 |         onsessioninitialized: (sessionId) => {
114 |           const sessionContext = { ...opContext, sessionId };
115 |           this.transports.set(sessionId, transport!);
116 |           this.servers.set(sessionId, currentServer);
117 |           this.sessions.set(sessionId, {
118 |             id: sessionId,
119 |             createdAt: new Date(),
120 |             lastAccessedAt: new Date(),
121 |             activeRequests: 0,
122 |           });
123 |           logger.info(`MCP Session created: ${sessionId}`, sessionContext);
124 |         },
125 |       });
126 | 
127 |       transport.onclose = () => {
128 |         const sessionId = transport!.sessionId;
129 |         if (sessionId) {
130 |           const closeContext = { ...opContext, sessionId };
131 |           this.closeSession(sessionId, closeContext).catch((err) =>
132 |             logger.error(
133 |               `Error during transport.onclose cleanup for session ${sessionId}`,
134 |               err,
135 |               closeContext,
136 |             ),
137 |           );
138 |         }
139 |       };
140 | 
141 |       await server.connect(transport);
142 |       logger.debug("Server connected, handling initial request.", opContext);
143 | 
144 |       const mockReq = {
145 |         headers,
146 |         method: "POST",
147 |         url: this.options.mcpHttpEndpointPath,
148 |       } as import("http").IncomingMessage;
149 |       await transport.handleRequest(mockReq, mockRes, body);
150 | 
151 |       const responseHeaders = convertNodeHeadersToWebHeaders(
152 |         mockRes.getHeaders(),
153 |       );
154 |       if (transport.sessionId) {
155 |         responseHeaders.set("Mcp-Session-Id", transport.sessionId);
156 |       }
157 | 
158 |       const webStream = Readable.toWeb(
159 |         mockRes as unknown as HonoStreamResponse,
160 |       ) as ReadableStream<Uint8Array>;
161 | 
162 |       return {
163 |         type: "stream",
164 |         headers: responseHeaders,
165 |         statusCode: mockRes.statusCode as HttpStatusCode,
166 |         stream: webStream,
167 |         sessionId: transport.sessionId,
168 |       };
169 |     } catch (error) {
170 |       logger.error(
171 |         "Failed to initialize stateful session. Cleaning up orphaned resources.",
172 |         error instanceof Error ? error : undefined,
173 |         { ...opContext, error: String(error) },
174 |       );
175 | 
176 |       const sessionInitialized =
177 |         transport?.sessionId && this.transports.has(transport.sessionId);
178 |       if (!sessionInitialized) {
179 |         (async () => {
180 |           await ErrorHandler.tryCatch(
181 |             async () => {
182 |               if (transport) await transport.close();
183 |               if (server) await server.close();
184 |             },
185 |             {
186 |               operation: "initializeAndHandle.cleanupOrphaned",
187 |               context: opContext,
188 |             },
189 |           );
190 |         })();
191 |       }
192 |       throw ErrorHandler.handleError(error, {
193 |         operation: opContext.operation,
194 |         context: opContext,
195 |         rethrow: true,
196 |       });
197 |     }
198 |   }
199 | 
200 |   /**
201 |    * Handles a subsequent request for an existing stateful session.
202 |    */
203 |   async handleRequest(
204 |     headers: IncomingHttpHeaders,
205 |     body: unknown,
206 |     context: RequestContext,
207 |     sessionId?: string,
208 |   ): Promise<TransportResponse> {
209 |     if (!sessionId) {
210 |       throw new McpError(
211 |         BaseErrorCode.INVALID_INPUT,
212 |         "Session ID is required for stateful requests.",
213 |         context,
214 |       );
215 |     }
216 |     const sessionContext = {
217 |       ...context,
218 |       sessionId,
219 |       operation: "StatefulTransportManager.handleRequest",
220 |     };
221 | 
222 |     const transport = this.transports.get(sessionId);
223 |     const session = this.sessions.get(sessionId);
224 | 
225 |     if (!transport || !session) {
226 |       logger.warning(
227 |         `Request for non-existent session: ${sessionId}`,
228 |         sessionContext,
229 |       );
230 |       return {
231 |         type: "buffered",
232 |         headers: new Headers({ "Content-Type": "application/json" }),
233 |         statusCode: 404,
234 |         body: {
235 |           jsonrpc: "2.0",
236 |           error: { code: -32601, message: "Session not found" },
237 |         },
238 |       };
239 |     }
240 | 
241 |     session.lastAccessedAt = new Date();
242 |     session.activeRequests += 1;
243 |     logger.debug(
244 |       `Incremented activeRequests for session ${sessionId}. Count: ${session.activeRequests}`,
245 |       sessionContext,
246 |     );
247 | 
248 |     try {
249 |       const mockReq = {
250 |         headers,
251 |         method: "POST",
252 |         url: this.options.mcpHttpEndpointPath,
253 |       } as import("http").IncomingMessage;
254 |       const mockRes = new HonoStreamResponse() as unknown as ServerResponse;
255 | 
256 |       await transport.handleRequest(mockReq, mockRes, body);
257 | 
258 |       const responseHeaders = convertNodeHeadersToWebHeaders(
259 |         mockRes.getHeaders(),
260 |       );
261 |       const webStream = Readable.toWeb(
262 |         mockRes as unknown as HonoStreamResponse,
263 |       ) as ReadableStream<Uint8Array>;
264 | 
265 |       return {
266 |         type: "stream",
267 |         headers: responseHeaders,
268 |         statusCode: mockRes.statusCode as HttpStatusCode,
269 |         stream: webStream,
270 |         sessionId: transport.sessionId,
271 |       };
272 |     } catch (error) {
273 |       throw ErrorHandler.handleError(error, {
274 |         operation: sessionContext.operation,
275 |         context: sessionContext,
276 |         rethrow: true,
277 |       });
278 |     } finally {
279 |       session.activeRequests -= 1;
280 |       session.lastAccessedAt = new Date();
281 |       logger.debug(
282 |         `Decremented activeRequests for session ${sessionId}. Count: ${session.activeRequests}`,
283 |         sessionContext,
284 |       );
285 |     }
286 |   }
287 | 
288 |   /**
289 |    * Handles a request to explicitly delete a session.
290 |    */
291 |   async handleDeleteRequest(
292 |     sessionId: string,
293 |     context: RequestContext,
294 |   ): Promise<TransportResponse> {
295 |     const sessionContext = {
296 |       ...context,
297 |       sessionId,
298 |       operation: "StatefulTransportManager.handleDeleteRequest",
299 |     };
300 |     logger.info(`Attempting to delete session: ${sessionId}`, sessionContext);
301 | 
302 |     if (!this.transports.has(sessionId)) {
303 |       logger.warning(
304 |         `Attempted to delete non-existent session: ${sessionId}`,
305 |         sessionContext,
306 |       );
307 |       throw new McpError(
308 |         BaseErrorCode.NOT_FOUND,
309 |         "Session not found or expired.",
310 |         sessionContext,
311 |       );
312 |     }
313 | 
314 |     await this.closeSession(sessionId, sessionContext);
315 | 
316 |     return {
317 |       type: "buffered",
318 |       headers: new Headers({ "Content-Type": "application/json" }),
319 |       statusCode: 200 as HttpStatusCode,
320 |       body: { status: "session_closed", sessionId },
321 |     };
322 |   }
323 | 
324 |   /**
325 |    * Retrieves information about a specific session.
326 |    */
327 |   getSession(sessionId: string): TransportSession | undefined {
328 |     return this.sessions.get(sessionId);
329 |   }
330 | 
331 |   /**
332 |    * Gracefully shuts down the manager, closing all active sessions.
333 |    */
334 |   async shutdown(): Promise<void> {
335 |     const context = requestContextService.createRequestContext({
336 |       operation: "StatefulTransportManager.shutdown",
337 |     });
338 |     logger.info("Shutting down stateful transport manager...", context);
339 |     clearInterval(this.garbageCollector);
340 |     logger.debug("Garbage collector stopped.", context);
341 | 
342 |     const sessionIds = Array.from(this.transports.keys());
343 |     if (sessionIds.length > 0) {
344 |       logger.info(`Closing ${sessionIds.length} active sessions.`, context);
345 |       const closePromises = sessionIds.map((sessionId) =>
346 |         this.closeSession(sessionId, context),
347 |       );
348 |       await Promise.all(closePromises);
349 |     }
350 | 
351 |     this.transports.clear();
352 |     this.sessions.clear();
353 |     this.servers.clear();
354 |     logger.info("All active sessions closed and manager shut down.", context);
355 |   }
356 | 
357 |   /**
358 |    * Closes a single session and releases its associated resources.
359 |    */
360 |   private async closeSession(
361 |     sessionId: string,
362 |     context: RequestContext,
363 |   ): Promise<void> {
364 |     const sessionContext = {
365 |       ...context,
366 |       sessionId,
367 |       operation: "StatefulTransportManager.closeSession",
368 |     };
369 |     logger.debug(`Closing session: ${sessionId}`, sessionContext);
370 | 
371 |     const transport = this.transports.get(sessionId);
372 |     const server = this.servers.get(sessionId);
373 | 
374 |     await ErrorHandler.tryCatch(
375 |       async () => {
376 |         if (transport) await transport.close();
377 |         if (server) await server.close();
378 |       },
379 |       { operation: "closeSession.cleanup", context: sessionContext },
380 |     );
381 | 
382 |     this.transports.delete(sessionId);
383 |     this.servers.delete(sessionId);
384 |     this.sessions.delete(sessionId);
385 | 
386 |     logger.info(
387 |       `MCP Session closed and resources released: ${sessionId}`,
388 |       sessionContext,
389 |     );
390 |   }
391 | 
392 |   /**
393 |    * Periodically runs to find and clean up stale, inactive sessions.
394 |    */
395 |   private async cleanupStaleSessions(): Promise<void> {
396 |     const context = requestContextService.createRequestContext({
397 |       operation: "StatefulTransportManager.cleanupStaleSessions",
398 |     });
399 |     logger.debug("Running stale session cleanup...", context);
400 | 
401 |     const now = Date.now();
402 |     const STALE_TIMEOUT_MS = this.options.staleSessionTimeoutMs;
403 |     const staleSessionIds: string[] = [];
404 | 
405 |     for (const [sessionId, session] of this.sessions.entries()) {
406 |       if (now - session.lastAccessedAt.getTime() > STALE_TIMEOUT_MS) {
407 |         if (session.activeRequests > 0) {
408 |           logger.info(
409 |             `Session ${sessionId} is stale but has ${session.activeRequests} active requests. Skipping cleanup.`,
410 |             { ...context, sessionId },
411 |           );
412 |           continue;
413 |         }
414 |         staleSessionIds.push(sessionId);
415 |       }
416 |     }
417 | 
418 |     if (staleSessionIds.length > 0) {
419 |       logger.info(
420 |         `Found ${staleSessionIds.length} stale sessions. Closing concurrently.`,
421 |         context,
422 |       );
423 |       const closePromises = staleSessionIds.map((sessionId) =>
424 |         this.closeSession(sessionId, context).catch((err) => {
425 |           logger.error(
426 |             `Error during concurrent stale session cleanup for ${sessionId}`,
427 |             err,
428 |             { ...context, sessionId },
429 |           );
430 |         }),
431 |       );
432 |       await Promise.all(closePromises);
433 |       logger.info(
434 |         `Stale session cleanup complete. Closed ${staleSessionIds.length} sessions.`,
435 |         context,
436 |       );
437 |     } else {
438 |       logger.debug("No stale sessions found.", context);
439 |     }
440 |   }
441 | }
442 | 
```

--------------------------------------------------------------------------------
/examples/pubmed_fetch_contents_example.md:
--------------------------------------------------------------------------------

```markdown
  1 | Tool Call Arguments:
  2 | 
  3 | ```json
  4 | {
  5 |   "pmids": ["39715098", "39359093", "39704040"],
  6 |   "detailLevel": "abstract_plus",
  7 |   "includeMeshTerms": true,
  8 |   "includeGrantInfo": true,
  9 |   "outputFormat": "json"
 10 | }
 11 | ```
 12 | 
 13 | Tool Response:
 14 | 
 15 | ```json
 16 | {
 17 |   "requestedPmids": ["39715098", "39359093", "39704040"],
 18 |   "articles": [
 19 |     {
 20 |       "pmid": "39715098",
 21 |       "title": "The compound (E)-2-(3,4-dihydroxystyryl)-3-hydroxy-4H-pyran-4-one alleviates neuroinflammation and cognitive impairment in a mouse model of Alzheimer's disease.",
 22 |       "abstractText": "JOURNAL/nrgr/04.03/01300535-202511000-00034/figure1/v/2024-12-20T164640Z/r/image-tiff Previous studies have shown that the compound (E)-2-(3,4-dihydroxystyryl)-3-hydroxy-4H-pyran-4-one (D30), a pyromeconic acid derivative, possesses antioxidant and anti-inflammatory properties, inhibits amyloid-&#x3b2; aggregation, and alleviates scopolamine-induced cognitive impairment, similar to the phase III clinical drug resveratrol. In this study, we established a mouse model of Alzheimer's disease via intracerebroventricular injection of fibrillar amyloid-&#x3b2; to investigate the effect of D30 on fibrillar amyloid-&#x3b2;-induced neuropathology. Our results showed that D30 alleviated fibrillar amyloid-&#x3b2;-induced cognitive impairment, promoted fibrillar amyloid-&#x3b2; clearance from the hippocampus and cortex, suppressed oxidative stress, and inhibited activation of microglia and astrocytes. D30 also reversed the fibrillar amyloid-&#x3b2;-induced loss of dendritic spines and synaptic protein expression. Notably, we demonstrated that exogenous fibrillar amyloid-&#x3b2; introduced by intracerebroventricular injection greatly increased galectin-3 expression levels in the brain, and this increase was blocked by D30. Considering the role of D30 in clearing amyloid-&#x3b2;, inhibiting neuroinflammation, protecting synapses, and improving cognition, this study highlights the potential of galectin-3 as a promising treatment target for patients with Alzheimer's disease.",
 23 |       "authors": [
 24 |         {
 25 |           "lastName": "Liu",
 26 |           "firstName": "Xueyan",
 27 |           "initials": "X",
 28 |           "affiliation": "Department of Medicinal Chemistry, School of Pharmacy, Fujian Medical University, Fuzhou, Fujian Province, China."
 29 |         },
 30 |         {
 31 |           "lastName": "Wu",
 32 |           "firstName": "Wei",
 33 |           "initials": "W",
 34 |           "affiliation": "Fujian Provincial Key Laboratory of Brain Aging and Neurodegenerative Diseases, School of Basic Medical Sciences, Fujian Medical University, Fuzhou, Fujian Province, China."
 35 |         },
 36 |         {
 37 |           "lastName": "Li",
 38 |           "firstName": "Xuejuan",
 39 |           "initials": "X",
 40 |           "affiliation": "Department of Medicinal Chemistry, School of Pharmacy, Fujian Medical University, Fuzhou, Fujian Province, China."
 41 |         },
 42 |         {
 43 |           "lastName": "Wang",
 44 |           "firstName": "Chengyan",
 45 |           "initials": "C",
 46 |           "affiliation": "Institute of Laboratory Animal Center, Fujian Medical University, Fuzhou, Fujian Province, China."
 47 |         },
 48 |         {
 49 |           "lastName": "Chai",
 50 |           "firstName": "Ke",
 51 |           "initials": "K",
 52 |           "affiliation": "Fujian Provincial Key Laboratory of Brain Aging and Neurodegenerative Diseases, School of Basic Medical Sciences, Fujian Medical University, Fuzhou, Fujian Province, China."
 53 |         },
 54 |         {
 55 |           "lastName": "Yuan",
 56 |           "firstName": "Fanru",
 57 |           "initials": "F",
 58 |           "affiliation": "Department of Medicinal Chemistry, School of Pharmacy, Fujian Medical University, Fuzhou, Fujian Province, China."
 59 |         },
 60 |         {
 61 |           "lastName": "Zheng",
 62 |           "firstName": "Huijuan",
 63 |           "initials": "H",
 64 |           "affiliation": "Department of Medicinal Chemistry, School of Pharmacy, Fujian Medical University, Fuzhou, Fujian Province, China."
 65 |         },
 66 |         {
 67 |           "lastName": "Yao",
 68 |           "firstName": "Yuxing",
 69 |           "initials": "Y",
 70 |           "affiliation": "Department of Medicinal Chemistry, School of Pharmacy, Fujian Medical University, Fuzhou, Fujian Province, China."
 71 |         },
 72 |         {
 73 |           "lastName": "Li",
 74 |           "firstName": "Chenlu",
 75 |           "initials": "C",
 76 |           "affiliation": "Department of Neurosurgery, Neurosurgery Research Institute, The First Affiliated Hospital, Fujian Medical University, Fuzhou, Fujian Province, China."
 77 |         },
 78 |         {
 79 |           "lastName": "Ye",
 80 |           "firstName": "Zu-Cheng",
 81 |           "initials": "ZC",
 82 |           "affiliation": "Fujian Provincial Key Laboratory of Brain Aging and Neurodegenerative Diseases, School of Basic Medical Sciences, Fujian Medical University, Fuzhou, Fujian Province, China."
 83 |         },
 84 |         {
 85 |           "lastName": "Zha",
 86 |           "firstName": "Daijun",
 87 |           "initials": "D",
 88 |           "affiliation": "Department of Medicinal Chemistry, School of Pharmacy, Fujian Medical University, Fuzhou, Fujian Province, China."
 89 |         }
 90 |       ],
 91 |       "journalInfo": {
 92 |         "title": "Neural regeneration research",
 93 |         "isoAbbreviation": "Neural Regen Res",
 94 |         "volume": "20",
 95 |         "issue": "11",
 96 |         "pages": "3330-3344",
 97 |         "publicationDate": { "year": "2025", "month": "Nov", "day": "1" }
 98 |       },
 99 |       "publicationTypes": ["Journal Article"],
100 |       "doi": "10.4103/NRR.NRR-D-23-01890",
101 |       "articleDates": [
102 |         { "dateType": "Electronic", "year": "2024", "month": "7", "day": "10" }
103 |       ]
104 |     },
105 |     {
106 |       "pmid": "39359093",
107 |       "title": "The cGAS-STING-interferon regulatory factor 7 pathway regulates neuroinflammation in Parkinson's disease.",
108 |       "abstractText": "JOURNAL/nrgr/04.03/01300535-202508000-00026/figure1/v/2024-09-30T120553Z/r/image-tiff Interferon regulatory factor 7 plays a crucial role in the innate immune response. However, whether interferon regulatory factor 7-mediated signaling contributes to Parkinson's disease remains unknown. Here we report that interferon regulatory factor 7 is markedly up-regulated in a 1-methyl-4-phenyl-1,2,3,6-tetrahydropyridine-induced mouse model of Parkinson's disease and co-localizes with microglial cells. Both the selective cyclic guanosine monophosphate adenosine monophosphate synthase inhibitor RU.521 and the stimulator of interferon genes inhibitor H151 effectively suppressed interferon regulatory factor 7 activation in BV2 microglia exposed to 1-methyl-4-phenylpyridinium and inhibited transformation of mouse BV2 microglia into the neurotoxic M1 phenotype. In addition, siRNA-mediated knockdown of interferon regulatory factor 7 expression in BV2 microglia reduced the expression of inducible nitric oxide synthase, tumor necrosis factor &#x3b1;, CD16, CD32, and CD86 and increased the expression of the anti-inflammatory markers ARG1 and YM1. Taken together, our findings indicate that the cyclic guanosine monophosphate adenosine monophosphate synthase-stimulator of interferon genes-interferon regulatory factor 7 pathway plays a crucial role in the pathogenesis of Parkinson's disease.",
109 |       "authors": [
110 |         {
111 |           "lastName": "Zhou",
112 |           "firstName": "Shengyang",
113 |           "initials": "S",
114 |           "affiliation": "Laboratory of Neurodegenerative and Neuroinjury Diseases, Wuxi Medicine School, Jiangnan University, Wuxi, Jiangsu Province, China."
115 |         },
116 |         { "lastName": "Li", "firstName": "Ting", "initials": "T" },
117 |         { "lastName": "Zhang", "firstName": "Wei", "initials": "W" },
118 |         { "lastName": "Wu", "firstName": "Jian", "initials": "J" },
119 |         { "lastName": "Hong", "firstName": "Hui", "initials": "H" },
120 |         { "lastName": "Quan", "firstName": "Wei", "initials": "W" },
121 |         { "lastName": "Qiao", "firstName": "Xinyu", "initials": "X" },
122 |         { "lastName": "Cui", "firstName": "Chun", "initials": "C" },
123 |         { "lastName": "Qiao", "firstName": "Chenmeng", "initials": "C" },
124 |         { "lastName": "Zhao", "firstName": "Weijiang", "initials": "W" },
125 |         { "lastName": "Shen", "firstName": "Yanqin", "initials": "Y" }
126 |       ],
127 |       "journalInfo": {
128 |         "title": "Neural regeneration research",
129 |         "isoAbbreviation": "Neural Regen Res",
130 |         "volume": "20",
131 |         "issue": "8",
132 |         "pages": "2361-2372",
133 |         "publicationDate": { "year": "2025", "month": "Aug", "day": "1" }
134 |       },
135 |       "publicationTypes": ["Journal Article"],
136 |       "doi": "10.4103/NRR.NRR-D-23-01684",
137 |       "articleDates": [
138 |         { "dateType": "Electronic", "year": "2024", "month": "6", "day": "3" }
139 |       ]
140 |     },
141 |     {
142 |       "pmid": "39704040",
143 |       "title": "&#x3b1;-Synuclein in Parkinson's Disease: From Bench to Bedside.",
144 |       "abstractText": "&#x3b1;-Synuclein (&#x3b1;-syn), a pathological hallmark of PD, is emerging as a bridging element at the crossroads between neuro/immune-inflammatory responses and neurodegeneration in PD. Several evidence show that pathological &#x3b1;-syn accumulates in neuronal and non-neuronal cells (i.e., neurons, microglia, macrophages, skin cells, and intestinal cells) in central and peripheral tissues since the prodromal phase of the disease, contributing to brain pathology. Indeed, pathological &#x3b1;-syn deposition can promote neurogenic/immune-inflammatory responses that contribute to systemic and central neuroinflammation associated with PD. After providing an overview of the structure and functions of physiological &#x3b1;-syn as well as its pathological forms, we review current studies about the role of neuronal and non-neuronal &#x3b1;-syn at the crossroads between neuroinflammation and neurodegeneration in PD. In addition, we provide an overview of the correlation between the accumulation of &#x3b1;-syn in central and peripheral tissues and PD, related symptoms, and neuroinflammation. Special attention was paid to discussing whether targeting &#x3b1;-syn can represent a suitable therapeutical approach for PD.",
145 |       "authors": [
146 |         {
147 |           "lastName": "Bellini",
148 |           "firstName": "Gabriele",
149 |           "initials": "G",
150 |           "affiliation": "Center for Neurodegenerative Diseases, Unit of Neurology, Parkinson's Disease and Movement Disorders, Department of Clinical and Experimental Medicine, University of Pisa, Pisa, Italy."
151 |         },
152 |         {
153 |           "lastName": "D'Antongiovanni",
154 |           "firstName": "Vanessa",
155 |           "initials": "V",
156 |           "affiliation": "Unit of Histology and Embryology, Department of Clinical and Experimental Medicine, University of Pisa, Pisa, Italy."
157 |         },
158 |         {
159 |           "lastName": "Palermo",
160 |           "firstName": "Giovanni",
161 |           "initials": "G",
162 |           "affiliation": "Center for Neurodegenerative Diseases, Unit of Neurology, Parkinson's Disease and Movement Disorders, Department of Clinical and Experimental Medicine, University of Pisa, Pisa, Italy."
163 |         },
164 |         {
165 |           "lastName": "Antonioli",
166 |           "firstName": "Luca",
167 |           "initials": "L",
168 |           "affiliation": "Unit of Pharmacology and Pharmacovigilance, Department of Clinical and Experimental Medicine, University of Pisa, Pisa, Italy."
169 |         },
170 |         {
171 |           "lastName": "Fornai",
172 |           "firstName": "Matteo",
173 |           "initials": "M",
174 |           "affiliation": "Unit of Pharmacology and Pharmacovigilance, Department of Clinical and Experimental Medicine, University of Pisa, Pisa, Italy."
175 |         },
176 |         {
177 |           "lastName": "Ceravolo",
178 |           "firstName": "Roberto",
179 |           "initials": "R",
180 |           "affiliation": "Center for Neurodegenerative Diseases, Unit of Neurology, Parkinson's Disease and Movement Disorders, Department of Clinical and Experimental Medicine, University of Pisa, Pisa, Italy."
181 |         },
182 |         {
183 |           "lastName": "Bernardini",
184 |           "firstName": "Nunzia",
185 |           "initials": "N",
186 |           "affiliation": "Unit of Histology and Embryology, Department of Clinical and Experimental Medicine, University of Pisa, Pisa, Italy."
187 |         },
188 |         {
189 |           "lastName": "Derkinderen",
190 |           "firstName": "Pascal",
191 |           "initials": "P",
192 |           "affiliation": "Department of Neurology, Nantes Universit&#xe9;, CHU Nantes, INSERM, Nantes, France."
193 |         },
194 |         {
195 |           "lastName": "Pellegrini",
196 |           "firstName": "Carolina",
197 |           "initials": "C",
198 |           "affiliation": "Unit of Histology and Embryology, Department of Clinical and Experimental Medicine, University of Pisa, Pisa, Italy."
199 |         }
200 |       ],
201 |       "journalInfo": {
202 |         "title": "Medicinal research reviews",
203 |         "isoAbbreviation": "Med Res Rev",
204 |         "volume": "45",
205 |         "issue": "3",
206 |         "pages": "909-946",
207 |         "publicationDate": { "year": "2025", "month": "May" }
208 |       },
209 |       "publicationTypes": ["Journal Article", "Review"],
210 |       "doi": "10.1002/med.22091",
211 |       "articleDates": [
212 |         { "dateType": "Electronic", "year": "2024", "month": "12", "day": "20" }
213 |       ],
214 |       "meshTerms": [
215 |         {
216 |           "descriptorName": "Humans",
217 |           "descriptorUi": "D006801",
218 |           "isMajorTopic": false
219 |         },
220 |         {
221 |           "descriptorName": "alpha-Synuclein",
222 |           "descriptorUi": "D051844",
223 |           "qualifierName": "metabolism",
224 |           "qualifierUi": "Q000378",
225 |           "isMajorTopic": true
226 |         },
227 |         {
228 |           "descriptorName": "Parkinson Disease",
229 |           "descriptorUi": "D010300",
230 |           "qualifierName": "metabolism",
231 |           "qualifierUi": "Q000378",
232 |           "isMajorTopic": true
233 |         },
234 |         {
235 |           "descriptorName": "Animals",
236 |           "descriptorUi": "D000818",
237 |           "isMajorTopic": false
238 |         },
239 |         {
240 |           "descriptorName": "Translational Research, Biomedical",
241 |           "descriptorUi": "D057170",
242 |           "isMajorTopic": true
243 |         },
244 |         {
245 |           "descriptorName": "Inflammation",
246 |           "descriptorUi": "D007249",
247 |           "isMajorTopic": false
248 |         },
249 |         {
250 |           "descriptorName": "Neurons",
251 |           "descriptorUi": "D009474",
252 |           "qualifierName": "metabolism",
253 |           "qualifierUi": "Q000378",
254 |           "isMajorTopic": false
255 |         }
256 |       ],
257 |       "grantList": [
258 |         { "agency": "The authors received no specific funding for this work." }
259 |       ]
260 |     }
261 |   ],
262 |   "notFoundPmids": [],
263 |   "eFetchDetails": {
264 |     "urls": [
265 |       "https://eutils.ncbi.nlm.nih.gov/entrez/eutils/efetch.fcgi?db=pubmed&id=39715098%2C39359093%2C39704040&retmode=xml"
266 |     ],
267 |     "requestMethod": "GET"
268 |   }
269 | }
270 | ```
271 | 
```

--------------------------------------------------------------------------------
/src/utils/internal/errorHandler.ts:
--------------------------------------------------------------------------------

```typescript
  1 | /**
  2 |  * @fileoverview This module provides utilities for robust error handling.
  3 |  * It defines structures for error context, options for handling errors,
  4 |  * and mappings for classifying errors. The main `ErrorHandler` class
  5 |  * offers static methods for consistent error processing, logging, and transformation.
  6 |  * @module src/utils/internal/errorHandler
  7 |  */
  8 | import { SpanStatusCode, trace } from "@opentelemetry/api";
  9 | import { BaseErrorCode, McpError } from "../../types-global/errors.js";
 10 | import { generateUUID, sanitizeInputForLogging } from "../index.js";
 11 | import { logger } from "./logger.js";
 12 | import { RequestContext } from "./requestContext.js";
 13 | 
 14 | /**
 15 |  * Defines a generic structure for providing context with errors.
 16 |  * This context can include identifiers like `requestId` or any other relevant
 17 |  * key-value pairs that aid in debugging or understanding the error's circumstances.
 18 |  */
 19 | export interface ErrorContext {
 20 |   /**
 21 |    * A unique identifier for the request or operation during which the error occurred.
 22 |    * Useful for tracing errors through logs and distributed systems.
 23 |    */
 24 |   requestId?: string;
 25 | 
 26 |   /**
 27 |    * Allows for arbitrary additional context information.
 28 |    * Keys are strings, and values can be of any type.
 29 |    */
 30 |   [key: string]: unknown;
 31 | }
 32 | 
 33 | /**
 34 |  * Configuration options for the `ErrorHandler.handleError` method.
 35 |  * These options control how an error is processed, logged, and whether it's rethrown.
 36 |  */
 37 | export interface ErrorHandlerOptions {
 38 |   /**
 39 |    * The context of the operation that caused the error.
 40 |    * This can include `requestId` and other relevant debugging information.
 41 |    */
 42 |   context?: ErrorContext;
 43 | 
 44 |   /**
 45 |    * A descriptive name of the operation being performed when the error occurred.
 46 |    * This helps in identifying the source or nature of the error in logs.
 47 |    * Example: "UserLogin", "ProcessPayment", "FetchUserProfile".
 48 |    */
 49 |   operation: string;
 50 | 
 51 |   /**
 52 |    * The input data or parameters that were being processed when the error occurred.
 53 |    * This input will be sanitized before logging to prevent sensitive data exposure.
 54 |    */
 55 |   input?: unknown;
 56 | 
 57 |   /**
 58 |    * If true, the (potentially transformed) error will be rethrown after handling.
 59 |    * Defaults to `false`.
 60 |    */
 61 |   rethrow?: boolean;
 62 | 
 63 |   /**
 64 |    * A specific `BaseErrorCode` to assign to the error, overriding any
 65 |    * automatically determined error code.
 66 |    */
 67 |   errorCode?: BaseErrorCode;
 68 | 
 69 |   /**
 70 |    * A custom function to map or transform the original error into a new `Error` instance.
 71 |    * If provided, this function is used instead of the default `McpError` creation.
 72 |    * @param error - The original error that occurred.
 73 |    * @returns The transformed error.
 74 |    */
 75 |   errorMapper?: (error: unknown) => Error;
 76 | 
 77 |   /**
 78 |    * If true, stack traces will be included in the logs.
 79 |    * Defaults to `true`.
 80 |    */
 81 |   includeStack?: boolean;
 82 | 
 83 |   /**
 84 |    * If true, indicates that the error is critical and might require immediate attention
 85 |    * or could lead to system instability. This is primarily for logging and alerting.
 86 |    * Defaults to `false`.
 87 |    */
 88 |   critical?: boolean;
 89 | }
 90 | 
 91 | /**
 92 |  * Defines a basic rule for mapping errors based on patterns.
 93 |  * Used internally by `COMMON_ERROR_PATTERNS` and as a base for `ErrorMapping`.
 94 |  */
 95 | export interface BaseErrorMapping {
 96 |   /**
 97 |    * A string or regular expression to match against the error message.
 98 |    * If a string is provided, it's typically used for substring matching (case-insensitive).
 99 |    */
100 |   pattern: string | RegExp;
101 | 
102 |   /**
103 |    * The `BaseErrorCode` to assign if the pattern matches.
104 |    */
105 |   errorCode: BaseErrorCode;
106 | 
107 |   /**
108 |    * An optional custom message template for the mapped error.
109 |    * (Note: This property is defined but not directly used by `ErrorHandler.determineErrorCode`
110 |    * which focuses on `errorCode`. It's more relevant for custom mapping logic.)
111 |    */
112 |   messageTemplate?: string;
113 | }
114 | 
115 | /**
116 |  * Extends `BaseErrorMapping` to include a factory function for creating
117 |  * specific error instances and additional context for the mapping.
118 |  * Used by `ErrorHandler.mapError`.
119 |  * @template T The type of `Error` this mapping will produce, defaults to `Error`.
120 |  */
121 | export interface ErrorMapping<T extends Error = Error>
122 |   extends BaseErrorMapping {
123 |   /**
124 |    * A factory function that creates and returns an instance of the mapped error type `T`.
125 |    * @param error - The original error that occurred.
126 |    * @param context - Optional additional context provided in the mapping rule.
127 |    * @returns The newly created error instance.
128 |    */
129 |   factory: (error: unknown, context?: Record<string, unknown>) => T;
130 | 
131 |   /**
132 |    * Additional static context to be merged or passed to the `factory` function
133 |    * when this mapping rule is applied.
134 |    */
135 |   additionalContext?: Record<string, unknown>;
136 | }
137 | 
138 | /**
139 |  * Maps standard JavaScript error constructor names to `BaseErrorCode` values.
140 |  * @private
141 |  */
142 | const ERROR_TYPE_MAPPINGS: Readonly<Record<string, BaseErrorCode>> = {
143 |   SyntaxError: BaseErrorCode.VALIDATION_ERROR,
144 |   TypeError: BaseErrorCode.VALIDATION_ERROR,
145 |   ReferenceError: BaseErrorCode.INTERNAL_ERROR,
146 |   RangeError: BaseErrorCode.VALIDATION_ERROR,
147 |   URIError: BaseErrorCode.VALIDATION_ERROR,
148 |   EvalError: BaseErrorCode.INTERNAL_ERROR,
149 | };
150 | 
151 | /**
152 |  * Array of `BaseErrorMapping` rules to classify errors by message/name patterns.
153 |  * Order matters: more specific patterns should precede generic ones.
154 |  * @private
155 |  */
156 | const COMMON_ERROR_PATTERNS: ReadonlyArray<Readonly<BaseErrorMapping>> = [
157 |   {
158 |     pattern:
159 |       /auth|unauthorized|unauthenticated|not.*logged.*in|invalid.*token|expired.*token/i,
160 |     errorCode: BaseErrorCode.UNAUTHORIZED,
161 |   },
162 |   {
163 |     pattern: /permission|forbidden|access.*denied|not.*allowed/i,
164 |     errorCode: BaseErrorCode.FORBIDDEN,
165 |   },
166 |   {
167 |     pattern: /not found|missing|no such|doesn't exist|couldn't find/i,
168 |     errorCode: BaseErrorCode.NOT_FOUND,
169 |   },
170 |   {
171 |     pattern:
172 |       /invalid|validation|malformed|bad request|wrong format|missing required/i,
173 |     errorCode: BaseErrorCode.VALIDATION_ERROR,
174 |   },
175 |   {
176 |     pattern: /conflict|already exists|duplicate|unique constraint/i,
177 |     errorCode: BaseErrorCode.CONFLICT,
178 |   },
179 |   {
180 |     pattern: /rate limit|too many requests|throttled/i,
181 |     errorCode: BaseErrorCode.RATE_LIMITED,
182 |   },
183 |   {
184 |     pattern: /timeout|timed out|deadline exceeded/i,
185 |     errorCode: BaseErrorCode.TIMEOUT,
186 |   },
187 |   {
188 |     pattern: /service unavailable|bad gateway|gateway timeout|upstream error/i,
189 |     errorCode: BaseErrorCode.SERVICE_UNAVAILABLE,
190 |   },
191 | ];
192 | 
193 | /**
194 |  * Creates a "safe" RegExp for testing error messages.
195 |  * Ensures case-insensitivity and removes the global flag.
196 |  * @param pattern - The string or RegExp pattern.
197 |  * @returns A new RegExp instance.
198 |  * @private
199 |  */
200 | function createSafeRegex(pattern: string | RegExp): RegExp {
201 |   if (pattern instanceof RegExp) {
202 |     let flags = pattern.flags.replace("g", "");
203 |     if (!flags.includes("i")) {
204 |       flags += "i";
205 |     }
206 |     return new RegExp(pattern.source, flags);
207 |   }
208 |   return new RegExp(pattern, "i");
209 | }
210 | 
211 | /**
212 |  * Retrieves a descriptive name for an error object or value.
213 |  * @param error - The error object or value.
214 |  * @returns A string representing the error's name or type.
215 |  * @private
216 |  */
217 | function getErrorName(error: unknown): string {
218 |   if (error instanceof Error) {
219 |     return error.name || "Error";
220 |   }
221 |   if (error === null) {
222 |     return "NullValueEncountered";
223 |   }
224 |   if (error === undefined) {
225 |     return "UndefinedValueEncountered";
226 |   }
227 |   if (
228 |     typeof error === "object" &&
229 |     error !== null &&
230 |     error.constructor &&
231 |     typeof error.constructor.name === "string" &&
232 |     error.constructor.name !== "Object"
233 |   ) {
234 |     return `${error.constructor.name}Encountered`;
235 |   }
236 |   return `${typeof error}Encountered`;
237 | }
238 | 
239 | /**
240 |  * Extracts a message string from an error object or value.
241 |  * @param error - The error object or value.
242 |  * @returns The error message string.
243 |  * @private
244 |  */
245 | function getErrorMessage(error: unknown): string {
246 |   if (error instanceof Error) {
247 |     return error.message;
248 |   }
249 |   if (error === null) {
250 |     return "Null value encountered as error";
251 |   }
252 |   if (error === undefined) {
253 |     return "Undefined value encountered as error";
254 |   }
255 |   if (typeof error === "string") {
256 |     return error;
257 |   }
258 |   try {
259 |     const str = String(error);
260 |     if (str === "[object Object]" && error !== null) {
261 |       try {
262 |         return `Non-Error object encountered: ${JSON.stringify(error)}`;
263 |       } catch {
264 |         return `Unstringifyable non-Error object encountered (constructor: ${error.constructor?.name || "Unknown"})`;
265 |       }
266 |     }
267 |     return str;
268 |   } catch (e) {
269 |     return `Error converting error to string: ${e instanceof Error ? e.message : "Unknown conversion error"}`;
270 |   }
271 | }
272 | 
273 | /**
274 |  * A utility class providing static methods for comprehensive error handling.
275 |  */
276 | export class ErrorHandler {
277 |   /**
278 |    * Determines an appropriate `BaseErrorCode` for a given error.
279 |    * Checks `McpError` instances, `ERROR_TYPE_MAPPINGS`, and `COMMON_ERROR_PATTERNS`.
280 |    * Defaults to `BaseErrorCode.INTERNAL_ERROR`.
281 |    * @param error - The error instance or value to classify.
282 |    * @returns The determined error code.
283 |    */
284 |   public static determineErrorCode(error: unknown): BaseErrorCode {
285 |     if (error instanceof McpError) {
286 |       return error.code;
287 |     }
288 | 
289 |     const errorName = getErrorName(error);
290 |     const errorMessage = getErrorMessage(error);
291 | 
292 |     const mappedFromType =
293 |       ERROR_TYPE_MAPPINGS[errorName as keyof typeof ERROR_TYPE_MAPPINGS];
294 |     if (mappedFromType) {
295 |       return mappedFromType;
296 |     }
297 | 
298 |     for (const mapping of COMMON_ERROR_PATTERNS) {
299 |       const regex = createSafeRegex(mapping.pattern);
300 |       if (regex.test(errorMessage) || regex.test(errorName)) {
301 |         return mapping.errorCode;
302 |       }
303 |     }
304 |     return BaseErrorCode.INTERNAL_ERROR;
305 |   }
306 | 
307 |   /**
308 |    * Handles an error with consistent logging and optional transformation.
309 |    * Sanitizes input, determines error code, logs details, and can rethrow.
310 |    * @param error - The error instance or value that occurred.
311 |    * @param options - Configuration for handling the error.
312 |    * @returns The handled (and potentially transformed) error instance.
313 |    */
314 |   public static handleError(
315 |     error: unknown,
316 |     options: ErrorHandlerOptions,
317 |   ): Error {
318 |     // --- OpenTelemetry Integration ---
319 |     const activeSpan = trace.getActiveSpan();
320 |     if (activeSpan) {
321 |       if (error instanceof Error) {
322 |         activeSpan.recordException(error);
323 |       }
324 |       activeSpan.setStatus({
325 |         code: SpanStatusCode.ERROR,
326 |         message: error instanceof Error ? error.message : String(error),
327 |       });
328 |     }
329 |     // --- End OpenTelemetry Integration ---
330 | 
331 |     const {
332 |       context = {},
333 |       operation,
334 |       input,
335 |       rethrow = false,
336 |       errorCode: explicitErrorCode,
337 |       includeStack = true,
338 |       critical = false,
339 |       errorMapper,
340 |     } = options;
341 | 
342 |     const sanitizedInput =
343 |       input !== undefined ? sanitizeInputForLogging(input) : undefined;
344 |     const originalErrorName = getErrorName(error);
345 |     const originalErrorMessage = getErrorMessage(error);
346 |     const originalStack = error instanceof Error ? error.stack : undefined;
347 | 
348 |     let finalError: Error;
349 |     let loggedErrorCode: BaseErrorCode;
350 | 
351 |     const errorDetailsSeed =
352 |       error instanceof McpError &&
353 |       typeof error.details === "object" &&
354 |       error.details !== null
355 |         ? { ...error.details }
356 |         : {};
357 | 
358 |     const consolidatedDetails: Record<string, unknown> = {
359 |       ...errorDetailsSeed,
360 |       ...context,
361 |       originalErrorName,
362 |       originalMessage: originalErrorMessage,
363 |     };
364 |     if (
365 |       originalStack &&
366 |       !(error instanceof McpError && error.details?.originalStack)
367 |     ) {
368 |       consolidatedDetails.originalStack = originalStack;
369 |     }
370 | 
371 |     const cause = error instanceof Error ? error : undefined;
372 | 
373 |     if (error instanceof McpError) {
374 |       loggedErrorCode = error.code;
375 |       finalError = errorMapper
376 |         ? errorMapper(error)
377 |         : new McpError(error.code, error.message, {
378 |             ...consolidatedDetails,
379 |             cause,
380 |           });
381 |     } else {
382 |       loggedErrorCode =
383 |         explicitErrorCode || ErrorHandler.determineErrorCode(error);
384 |       const message = `Error in ${operation}: ${originalErrorMessage}`;
385 |       finalError = errorMapper
386 |         ? errorMapper(error)
387 |         : new McpError(loggedErrorCode, message, {
388 |             ...consolidatedDetails,
389 |             cause,
390 |           });
391 |     }
392 | 
393 |     if (
394 |       finalError !== error &&
395 |       error instanceof Error &&
396 |       finalError instanceof Error &&
397 |       !finalError.stack &&
398 |       error.stack
399 |     ) {
400 |       finalError.stack = error.stack;
401 |     }
402 | 
403 |     const logRequestId =
404 |       typeof context.requestId === "string" && context.requestId
405 |         ? context.requestId
406 |         : generateUUID();
407 | 
408 |     const logTimestamp =
409 |       typeof context.timestamp === "string" && context.timestamp
410 |         ? context.timestamp
411 |         : new Date().toISOString();
412 | 
413 |     const logPayload: Record<string, unknown> = {
414 |       ...Object.fromEntries(
415 |         Object.entries(context).filter(
416 |           ([key]) => key !== "requestId" && key !== "timestamp",
417 |         ),
418 |       ),
419 |       requestId: logRequestId,
420 |       timestamp: logTimestamp,
421 |       operation,
422 |       input: sanitizedInput,
423 |       critical,
424 |       errorCode: loggedErrorCode,
425 |       originalErrorType: originalErrorName,
426 |       finalErrorType: getErrorName(finalError),
427 |     };
428 | 
429 |     if (finalError instanceof McpError && finalError.details) {
430 |       logPayload.errorDetails = finalError.details;
431 |     } else {
432 |       logPayload.errorDetails = consolidatedDetails;
433 |     }
434 | 
435 |     if (includeStack) {
436 |       const stack =
437 |         finalError instanceof Error ? finalError.stack : originalStack;
438 |       if (stack) {
439 |         logPayload.stack = stack;
440 |       }
441 |     }
442 | 
443 |     logger.error(
444 |       finalError.message || originalErrorMessage,
445 |       logPayload as unknown as RequestContext, // Cast to RequestContext for logger compatibility
446 |     );
447 | 
448 |     if (rethrow) {
449 |       throw finalError;
450 |     }
451 |     return finalError;
452 |   }
453 | 
454 |   /**
455 |    * Maps an error to a specific error type `T` based on `ErrorMapping` rules.
456 |    * Returns original/default error if no mapping matches.
457 |    * @template T The target error type, extending `Error`.
458 |    * @param error - The error instance or value to map.
459 |    * @param mappings - An array of mapping rules to apply.
460 |    * @param defaultFactory - Optional factory for a default error if no mapping matches.
461 |    * @returns The mapped error of type `T`, or the original/defaulted error.
462 |    */
463 |   public static mapError<T extends Error>(
464 |     error: unknown,
465 |     mappings: ReadonlyArray<ErrorMapping<T>>,
466 |     defaultFactory?: (error: unknown, context?: Record<string, unknown>) => T,
467 |   ): T | Error {
468 |     const errorMessage = getErrorMessage(error);
469 |     const errorName = getErrorName(error);
470 | 
471 |     for (const mapping of mappings) {
472 |       const regex = createSafeRegex(mapping.pattern);
473 |       if (regex.test(errorMessage) || regex.test(errorName)) {
474 |         return mapping.factory(error, mapping.additionalContext);
475 |       }
476 |     }
477 | 
478 |     if (defaultFactory) {
479 |       return defaultFactory(error);
480 |     }
481 |     return error instanceof Error ? error : new Error(String(error));
482 |   }
483 | 
484 |   /**
485 |    * Formats an error into a consistent object structure for API responses or structured logging.
486 |    * @param error - The error instance or value to format.
487 |    * @returns A structured representation of the error.
488 |    */
489 |   public static formatError(error: unknown): Record<string, unknown> {
490 |     if (error instanceof McpError) {
491 |       return {
492 |         code: error.code,
493 |         message: error.message,
494 |         details:
495 |           typeof error.details === "object" && error.details !== null
496 |             ? error.details
497 |             : {},
498 |       };
499 |     }
500 | 
501 |     if (error instanceof Error) {
502 |       return {
503 |         code: ErrorHandler.determineErrorCode(error),
504 |         message: error.message,
505 |         details: { errorType: error.name || "Error" },
506 |       };
507 |     }
508 | 
509 |     return {
510 |       code: BaseErrorCode.UNKNOWN_ERROR,
511 |       message: getErrorMessage(error),
512 |       details: { errorType: getErrorName(error) },
513 |     };
514 |   }
515 | 
516 |   /**
517 |    * Safely executes a function (sync or async) and handles errors using `ErrorHandler.handleError`.
518 |    * The error is always rethrown.
519 |    * @template T The expected return type of the function `fn`.
520 |    * @param fn - The function to execute.
521 |    * @param options - Error handling options (excluding `rethrow`).
522 |    * @returns A promise resolving with the result of `fn` if successful.
523 |    * @throws {McpError | Error} The error processed by `ErrorHandler.handleError`.
524 |    * @example
525 |    * ```typescript
526 |    * async function fetchData(userId: string, context: RequestContext) {
527 |    *   return ErrorHandler.tryCatch(
528 |    *     async () => {
529 |    *       const response = await fetch(`/api/users/${userId}`);
530 |    *       if (!response.ok) throw new Error(`Failed to fetch user: ${response.status}`);
531 |    *       return response.json();
532 |    *     },
533 |    *     { operation: 'fetchUserData', context, input: { userId } }
534 |    *   );
535 |    * }
536 |    * ```
537 |    */
538 |   public static async tryCatch<T>(
539 |     fn: () => Promise<T> | T,
540 |     options: Omit<ErrorHandlerOptions, "rethrow">,
541 |   ): Promise<T> {
542 |     try {
543 |       return await Promise.resolve(fn());
544 |     } catch (error) {
545 |       // ErrorHandler.handleError will return the error to be thrown.
546 |       throw ErrorHandler.handleError(error, { ...options, rethrow: true });
547 |     }
548 |   }
549 | }
550 | 
```
Page 3/5FirstPrevNextLast