#
tokens: 47835/50000 20/111 files (page 2/4)
lines: off (toggle) GitHub
raw markdown copy
This is page 2 of 4. Use http://codebase.md/cyanheads/pubmed-mcp-server?page={x} to view the full context.

# Directory Structure

```
├── .clinerules
│   └── clinerules.md
├── .dockerignore
├── .github
│   ├── FUNDING.yml
│   └── workflows
│       └── publish.yml
├── .gitignore
├── .ncurc.json
├── CHANGELOG.md
├── Dockerfile
├── docs
│   ├── project-spec.md
│   ├── publishing-mcp-server-registry.md
│   └── tree.md
├── eslint.config.js
├── examples
│   ├── generate_pubmed_chart
│   │   ├── bar_chart.png
│   │   ├── doughnut_chart.png
│   │   ├── line_chart.png
│   │   ├── pie_chart.png
│   │   ├── polar_chart.png
│   │   ├── radar_chart.png
│   │   └── scatter_plot.png
│   ├── pubmed_article_connections_1.md
│   ├── pubmed_article_connections_2.md
│   ├── pubmed_fetch_contents_example.md
│   ├── pubmed_research_agent_example.md
│   └── pubmed_search_articles_example.md
├── LICENSE
├── mcp.json
├── package-lock.json
├── package.json
├── README.md
├── repomix.config.json
├── scripts
│   ├── clean.ts
│   ├── fetch-openapi-spec.ts
│   ├── make-executable.ts
│   ├── tree.ts
│   └── validate-mcp-publish-schema.ts
├── server.json
├── smithery.yaml
├── src
│   ├── config
│   │   └── index.ts
│   ├── index.ts
│   ├── mcp-server
│   │   ├── server.ts
│   │   ├── tools
│   │   │   ├── pubmedArticleConnections
│   │   │   │   ├── index.ts
│   │   │   │   ├── logic
│   │   │   │   │   ├── citationFormatter.ts
│   │   │   │   │   ├── elinkHandler.ts
│   │   │   │   │   ├── index.ts
│   │   │   │   │   └── types.ts
│   │   │   │   └── registration.ts
│   │   │   ├── pubmedFetchContents
│   │   │   │   ├── index.ts
│   │   │   │   ├── logic.ts
│   │   │   │   └── registration.ts
│   │   │   ├── pubmedGenerateChart
│   │   │   │   ├── index.ts
│   │   │   │   ├── logic.ts
│   │   │   │   └── registration.ts
│   │   │   ├── pubmedResearchAgent
│   │   │   │   ├── index.ts
│   │   │   │   ├── logic
│   │   │   │   │   ├── index.ts
│   │   │   │   │   ├── inputSchema.ts
│   │   │   │   │   ├── outputTypes.ts
│   │   │   │   │   └── planOrchestrator.ts
│   │   │   │   ├── logic.ts
│   │   │   │   └── registration.ts
│   │   │   └── pubmedSearchArticles
│   │   │       ├── index.ts
│   │   │       ├── logic.ts
│   │   │       └── registration.ts
│   │   └── transports
│   │       ├── auth
│   │       │   ├── authFactory.ts
│   │       │   ├── authMiddleware.ts
│   │       │   ├── index.ts
│   │       │   ├── lib
│   │       │   │   ├── authContext.ts
│   │       │   │   ├── authTypes.ts
│   │       │   │   └── authUtils.ts
│   │       │   └── strategies
│   │       │       ├── authStrategy.ts
│   │       │       ├── jwtStrategy.ts
│   │       │       └── oauthStrategy.ts
│   │       ├── core
│   │       │   ├── baseTransportManager.ts
│   │       │   ├── headerUtils.ts
│   │       │   ├── honoNodeBridge.ts
│   │       │   ├── statefulTransportManager.ts
│   │       │   ├── statelessTransportManager.ts
│   │       │   └── transportTypes.ts
│   │       ├── http
│   │       │   ├── httpErrorHandler.ts
│   │       │   ├── httpTransport.ts
│   │       │   ├── httpTypes.ts
│   │       │   ├── index.ts
│   │       │   └── mcpTransportMiddleware.ts
│   │       └── stdio
│   │           ├── index.ts
│   │           └── stdioTransport.ts
│   ├── services
│   │   └── NCBI
│   │       ├── core
│   │       │   ├── ncbiConstants.ts
│   │       │   ├── ncbiCoreApiClient.ts
│   │       │   ├── ncbiRequestQueueManager.ts
│   │       │   ├── ncbiResponseHandler.ts
│   │       │   └── ncbiService.ts
│   │       └── parsing
│   │           ├── eSummaryResultParser.ts
│   │           ├── index.ts
│   │           ├── pubmedArticleStructureParser.ts
│   │           └── xmlGenericHelpers.ts
│   ├── types-global
│   │   ├── declarations.d.ts
│   │   ├── errors.ts
│   │   └── pubmedXml.ts
│   └── utils
│       ├── index.ts
│       ├── internal
│       │   ├── errorHandler.ts
│       │   ├── index.ts
│       │   ├── logger.ts
│       │   ├── performance.ts
│       │   └── requestContext.ts
│       ├── metrics
│       │   ├── index.ts
│       │   └── tokenCounter.ts
│       ├── network
│       │   ├── fetchWithTimeout.ts
│       │   └── index.ts
│       ├── parsing
│       │   ├── dateParser.ts
│       │   ├── index.ts
│       │   └── jsonParser.ts
│       ├── scheduling
│       │   ├── index.ts
│       │   └── scheduler.ts
│       ├── security
│       │   ├── idGenerator.ts
│       │   ├── index.ts
│       │   ├── rateLimiter.ts
│       │   └── sanitization.ts
│       └── telemetry
│           ├── instrumentation.ts
│           └── semconv.ts
├── tsconfig.json
├── tsconfig.typedoc.json
├── tsdoc.json
└── typedoc.json
```

# Files

--------------------------------------------------------------------------------
/src/mcp-server/transports/core/statelessTransportManager.ts:
--------------------------------------------------------------------------------

```typescript
/**
 * @fileoverview Implements a stateless transport manager for the MCP SDK.
 *
 * This manager handles single, ephemeral MCP operations. For each incoming request,
 * it dynamically creates a temporary McpServer and transport instance, processes the
 * request, and then immediately schedules the resources for cleanup. This approach
 * is ideal for simple, one-off tool calls that do not require persistent session state.
 *
 * The key challenge addressed here is bridging the Node.js-centric MCP SDK with
 * modern, Web Standards-based frameworks like Hono. This is achieved by deferring
 * resource cleanup until the response stream has been fully consumed by the web
 * framework, preventing premature closure and truncated responses.
 *
 * @module src/mcp-server/transports/core/statelessTransportManager
 */

import { McpServer } from "@modelcontextprotocol/sdk/server/mcp.js";
import { StreamableHTTPServerTransport } from "@modelcontextprotocol/sdk/server/streamableHttp.js";
import type { IncomingHttpHeaders, ServerResponse } from "http";
import { Readable } from "stream";
import {
  ErrorHandler,
  logger,
  RequestContext,
  requestContextService,
} from "../../../utils/index.js";
import { BaseTransportManager } from "./baseTransportManager.js";
import { HonoStreamResponse } from "./honoNodeBridge.js";
import { convertNodeHeadersToWebHeaders } from "./headerUtils.js";
import { HttpStatusCode, TransportResponse } from "./transportTypes.js";

/**
 * Manages ephemeral, single-request MCP operations.
 */
export class StatelessTransportManager extends BaseTransportManager {
  /**
   * Handles a single, stateless MCP request.
   *
   * This method orchestrates the creation of temporary server and transport instances,
   * handles the request, and ensures resources are cleaned up only after the
   * response stream is closed.
   *
   * @param headers - The incoming request headers.
   * @param body - The parsed body of the request.
   * @param context - The request context for logging and tracing.
   * @returns A promise resolving to a streaming TransportResponse.
   */
  async handleRequest(
    headers: IncomingHttpHeaders,
    body: unknown,
    context: RequestContext,
  ): Promise<TransportResponse> {
    const opContext = {
      ...context,
      operation: "StatelessTransportManager.handleRequest",
    };
    logger.debug(
      "Creating ephemeral server instance for stateless request.",
      opContext,
    );

    let server: McpServer | undefined;
    let transport: StreamableHTTPServerTransport | undefined;

    try {
      // 1. Create ephemeral instances for this request.
      server = await this.createServerInstanceFn();
      transport = new StreamableHTTPServerTransport({
        sessionIdGenerator: undefined,
        onsessioninitialized: undefined,
      });

      await server.connect(transport);
      logger.debug("Ephemeral server connected to transport.", opContext);

      // 2. Set up the Node.js-to-Web stream bridge.
      const mockReq = {
        headers,
        method: "POST",
      } as import("http").IncomingMessage;
      const mockResBridge = new HonoStreamResponse();

      // 3. Defer cleanup until the stream is fully processed.
      // This is the critical fix to prevent premature resource release.
      this.setupDeferredCleanup(mockResBridge, server, transport, opContext);

      // 4. Process the request using the MCP transport.
      const mockRes = mockResBridge as unknown as ServerResponse;
      await transport.handleRequest(mockReq, mockRes, body);

      logger.info("Stateless request handled successfully.", opContext);

      // 5. Convert headers and create the final streaming response.
      const responseHeaders = convertNodeHeadersToWebHeaders(
        mockRes.getHeaders(),
      );
      const webStream = Readable.toWeb(
        mockResBridge,
      ) as ReadableStream<Uint8Array>;

      return {
        type: "stream",
        headers: responseHeaders,
        statusCode: mockRes.statusCode as HttpStatusCode,
        stream: webStream,
      };
    } catch (error) {
      // If an error occurs before the stream is returned, we must clean up immediately.
      if (server || transport) {
        this.cleanup(server, transport, opContext);
      }
      throw ErrorHandler.handleError(error, {
        operation: "StatelessTransportManager.handleRequest",
        context: opContext,
        rethrow: true,
      });
    }
  }

  /**
   * Attaches listeners to the response stream to trigger resource cleanup
   * only after the stream has been fully consumed or has errored.
   *
   * @param stream - The response stream bridge.
   * @param server - The ephemeral McpServer instance.
   * @param transport - The ephemeral transport instance.
   * @param context - The request context for logging.
   */
  private setupDeferredCleanup(
    stream: HonoStreamResponse,
    server: McpServer,
    transport: StreamableHTTPServerTransport,
    context: RequestContext,
  ): void {
    let cleanedUp = false;
    const cleanupFn = (error?: Error) => {
      if (cleanedUp) return;
      cleanedUp = true;

      if (error) {
        logger.warning("Stream ended with an error, proceeding to cleanup.", {
          ...context,
          error: error.message,
        });
      }
      // Cleanup is fire-and-forget.
      this.cleanup(server, transport, context);
    };

    // 'close' is the most reliable event, firing on both normal completion and abrupt termination.
    stream.on("close", () => cleanupFn());
    stream.on("error", (err) => cleanupFn(err));
  }

  /**
   * Performs the actual cleanup of ephemeral resources.
   * This method is designed to be "fire-and-forget".
   */
  private cleanup(
    server: McpServer | undefined,
    transport: StreamableHTTPServerTransport | undefined,
    context: RequestContext,
  ): void {
    const opContext = {
      ...context,
      operation: "StatelessTransportManager.cleanup",
    };
    logger.debug("Scheduling cleanup for ephemeral resources.", opContext);

    Promise.all([transport?.close(), server?.close()])
      .then(() => {
        logger.debug("Ephemeral resources cleaned up successfully.", opContext);
      })
      .catch((cleanupError) => {
        logger.warning("Error during stateless resource cleanup.", {
          ...opContext,
          error:
            cleanupError instanceof Error
              ? cleanupError.message
              : String(cleanupError),
        });
      });
  }

  /**
   * Shuts down the manager. For the stateless manager, this is a no-op
   * as there are no persistent resources to manage.
   */
  async shutdown(): Promise<void> {
    const context = requestContextService.createRequestContext({
      operation: "StatelessTransportManager.shutdown",
    });
    logger.info(
      "Stateless transport manager shutdown - no persistent resources to clean up.",
      context,
    );
    return Promise.resolve();
  }
}

```

--------------------------------------------------------------------------------
/src/mcp-server/tools/pubmedGenerateChart/logic.ts:
--------------------------------------------------------------------------------

```typescript
/**
 * @fileoverview Core logic for the pubmed_generate_chart tool.
 * Generates charts from parameterized input by creating Chart.js configurations
 * and rendering them on the server using chartjs-node-canvas.
 * @module src/mcp-server/tools/pubmedGenerateChart/logic
 */
import { ChartConfiguration } from "chart.js";
import { ChartJSNodeCanvas } from "chartjs-node-canvas";
import { z } from "zod";
import { BaseErrorCode, McpError } from "../../../types-global/errors.js";
import {
  logger,
  RequestContext,
  requestContextService,
  sanitizeInputForLogging,
} from "../../../utils/index.js";

export const PubMedGenerateChartInputSchema = z.object({
  chartType: z
    .enum([
      "bar",
      "line",
      "scatter",
      "pie",
      "doughnut",
      "bubble",
      "radar",
      "polarArea",
    ])
    .describe("Specifies the type of chart to generate."),
  title: z
    .string()
    .optional()
    .describe("The main title displayed above the chart."),
  width: z
    .number()
    .int()
    .positive()
    .optional()
    .default(800)
    .describe("The width of the chart canvas in pixels."),
  height: z
    .number()
    .int()
    .positive()
    .optional()
    .default(600)
    .describe("The height of the chart canvas in pixels."),
  dataValues: z
    .array(z.record(z.string(), z.any()))
    .min(1)
    .describe(
      "An array of data objects to plot the chart (e.g., [{ 'year': '2020', 'articles': 150 }]).",
    ),
  outputFormat: z
    .enum(["png"])
    .default("png")
    .describe("Specifies the output format for the chart."),
  xField: z
    .string()
    .describe("The field name from `dataValues` for the X-axis."),
  yField: z
    .string()
    .describe("The field name from `dataValues` for the Y-axis."),
  seriesField: z
    .string()
    .optional()
    .describe(
      "The field name for creating multiple data series on the same chart.",
    ),
  sizeField: z
    .string()
    .optional()
    .describe("For bubble charts, the field name for encoding bubble size."),
});

export type PubMedGenerateChartInput = z.infer<
  typeof PubMedGenerateChartInputSchema
>;

export type PubMedGenerateChartOutput = {
  base64Data: string;
  chartType: string;
  dataPoints: number;
};

// Helper to group data by a series field
function groupDataBySeries(
  data: Record<string, unknown>[],
  xField: string,
  yField: string,
  seriesField: string,
) {
  const series = new Map<string, { x: unknown; y: unknown }[]>();
  for (const item of data) {
    const seriesName = item[seriesField] as string;
    if (!series.has(seriesName)) {
      series.set(seriesName, []);
    }
    series.get(seriesName)!.push({ x: item[xField], y: item[yField] });
  }
  return series;
}

export async function pubmedGenerateChartLogic(
  input: PubMedGenerateChartInput,
  parentRequestContext: RequestContext,
): Promise<PubMedGenerateChartOutput> {
  const operationContext = requestContextService.createRequestContext({
    parentRequestId: parentRequestContext.requestId,
    operation: "pubmedGenerateChartLogicExecution",
    input: sanitizeInputForLogging(input),
  });

  logger.info(
    `Executing 'pubmed_generate_chart' with Chart.js. Chart type: ${input.chartType}`,
    operationContext,
  );

  const {
    width,
    height,
    chartType,
    dataValues,
    xField,
    yField,
    title,
    seriesField,
    sizeField,
  } = input;

  const chartJSNodeCanvas = new ChartJSNodeCanvas({
    width,
    height,
    chartCallback: (ChartJS) => {
      ChartJS.defaults.responsive = false;
      ChartJS.defaults.maintainAspectRatio = false;
    },
  });

  const labels = [
    ...new Set(dataValues.map((item) => item[xField])),
  ] as string[];
  let datasets: ChartConfiguration["data"]["datasets"];

  if (seriesField) {
    const groupedData = groupDataBySeries(
      dataValues,
      xField,
      yField,
      seriesField,
    );
    datasets = Array.from(groupedData.entries()).map(([seriesName, data]) => ({
      label: seriesName,
      data: labels.map((label) => {
        const point = data.find((p) => p.x === label);
        return point ? (point.y as number) : null;
      }),
    }));
  } else {
    datasets = [
      {
        label: yField,
        data: labels.map((label) => {
          const item = dataValues.find((d) => d[xField] === label);
          return item ? (item[yField] as number) : null;
        }),
      },
    ];
  }

  // For scatter and bubble charts, the data format is different
  if (chartType === "scatter" || chartType === "bubble") {
    if (seriesField) {
      const groupedData = groupDataBySeries(
        dataValues,
        xField,
        yField,
        seriesField,
      );
      datasets = Array.from(groupedData.entries()).map(
        ([seriesName, data]) => ({
          label: seriesName,
          data: data.map((point) => ({
            x: point.x as number,
            y: point.y as number,
            r:
              chartType === "bubble" && sizeField
                ? (dataValues.find((d) => d[xField] === point.x)![
                    sizeField
                  ] as number)
                : undefined,
          })),
        }),
      );
    } else {
      datasets = [
        {
          label: yField,
          data: dataValues.map((item) => ({
            x: item[xField] as number,
            y: item[yField] as number,
            r:
              chartType === "bubble" && sizeField
                ? (item[sizeField] as number)
                : undefined,
          })),
        },
      ];
    }
  }

  const configuration: ChartConfiguration = {
    type: chartType,
    data: {
      labels:
        chartType !== "scatter" && chartType !== "bubble" ? labels : undefined,
      datasets: datasets,
    },
    options: {
      plugins: {
        title: {
          display: !!title,
          text: title,
        },
      },
      scales:
        chartType === "pie" ||
        chartType === "doughnut" ||
        chartType === "polarArea"
          ? undefined
          : {
              x: {
                title: {
                  display: true,
                  text: xField,
                },
              },
              y: {
                title: {
                  display: true,
                  text: yField,
                },
              },
            },
    },
  };

  try {
    const imageBuffer = await chartJSNodeCanvas.renderToBuffer(configuration);
    const base64Data = imageBuffer.toString("base64");

    logger.notice("Successfully generated chart with Chart.js.", {
      ...operationContext,
      chartType: input.chartType,
      dataPoints: input.dataValues.length,
    });

    return {
      base64Data,
      chartType: input.chartType,
      dataPoints: input.dataValues.length,
    };
  } catch (error: unknown) {
    const err = error as Error;
    throw new McpError(
      BaseErrorCode.INTERNAL_ERROR,
      `Chart generation failed: ${err.message || "Internal server error during chart generation."}`,
      {
        ...operationContext,
        originalErrorName: err.name,
        originalErrorMessage: err.message,
      },
    );
  }
}

```

--------------------------------------------------------------------------------
/src/utils/telemetry/instrumentation.ts:
--------------------------------------------------------------------------------

```typescript
/**
 * @fileoverview OpenTelemetry SDK initialization and lifecycle management.
 * This file MUST be imported before any other module in the application's
 * entry point (`src/index.ts`) to ensure all modules are correctly instrumented.
 * It handles both the initialization (startup) and graceful shutdown of the SDK.
 * @module src/utils/telemetry/instrumentation
 */
import { DiagConsoleLogger, DiagLogLevel, diag } from "@opentelemetry/api";
import { getNodeAutoInstrumentations } from "@opentelemetry/auto-instrumentations-node";
import { WinstonInstrumentation } from "@opentelemetry/instrumentation-winston";
import { OTLPMetricExporter } from "@opentelemetry/exporter-metrics-otlp-http";
import { OTLPTraceExporter } from "@opentelemetry/exporter-trace-otlp-http";
import { resourceFromAttributes } from "@opentelemetry/resources";
import { PeriodicExportingMetricReader } from "@opentelemetry/sdk-metrics";
import { NodeSDK } from "@opentelemetry/sdk-node";
import {
  BatchSpanProcessor,
  ReadableSpan,
  SpanProcessor,
  TraceIdRatioBasedSampler,
} from "@opentelemetry/sdk-trace-node";
import {
  ATTR_SERVICE_NAME,
  ATTR_SERVICE_VERSION,
} from "@opentelemetry/semantic-conventions/incubating";
import path from "path";
import winston from "winston";
import { config } from "../../config/index.js";

export let sdk: NodeSDK | null = null;

if (config.openTelemetry.enabled) {
  // --- Custom Diagnostic Logger for OpenTelemetry ---
  class OtelDiagnosticLogger extends DiagConsoleLogger {
    private winstonLogger: winston.Logger;
    constructor(logLevel: DiagLogLevel) {
      super();
      const logsDir = config.logsPath;
      if (!logsDir) {
        if (process.stdout.isTTY) {
          console.error(
            "OpenTelemetry Diagnostics: Log directory not available. Diagnostics will be written to console only.",
          );
        }
        this.winstonLogger = winston.createLogger({
          level: DiagLogLevel[logLevel].toLowerCase(),
          transports: [new winston.transports.Console()],
        });
        return;
      }
      this.winstonLogger = winston.createLogger({
        level: DiagLogLevel[logLevel].toLowerCase(),
        format: winston.format.combine(
          winston.format.timestamp(),
          winston.format.json(),
        ),
        transports: [
          new winston.transports.File({
            filename: path.join(logsDir, "opentelemetry.log"),
            maxsize: 5 * 1024 * 1024,
            maxFiles: 3,
          }),
        ],
      });
    }
    public override error = (message: string, ...args: unknown[]): void => {
      this.winstonLogger.error(message, { args });
    };
    public override warn = (message: string, ...args: unknown[]): void => {
      this.winstonLogger.warn(message, { args });
    };
    public override info = (message: string, ...args: unknown[]): void => {
      this.winstonLogger.info(message, { args });
    };
    public override debug = (message: string, ...args: unknown[]): void => {
      this.winstonLogger.debug(message, { args });
    };
    public override verbose = (message: string, ...args: unknown[]): void => {
      this.winstonLogger.verbose(message, { args });
    };
  }

  /**
   * A custom SpanProcessor that writes ended spans to a log file using Winston.
   */
  class FileSpanProcessor implements SpanProcessor {
    private traceLogger: winston.Logger;

    constructor() {
      const logsDir = config.logsPath;
      if (!logsDir) {
        diag.error(
          "[FileSpanProcessor] Cannot initialize: logsPath is not available.",
        );
        this.traceLogger = winston.createLogger({ silent: true });
        return;
      }
      this.traceLogger = winston.createLogger({
        format: winston.format.json(),
        transports: [
          new winston.transports.File({
            filename: path.join(logsDir, "traces.log"),
            maxsize: 10 * 1024 * 1024, // 10MB
            maxFiles: 5,
          }),
        ],
      });
    }

    forceFlush(): Promise<void> {
      return Promise.resolve();
    }
    onStart(_span: ReadableSpan): void {}
    onEnd(span: ReadableSpan): void {
      const loggableSpan = {
        traceId: span.spanContext().traceId,
        spanId: span.spanContext().spanId,
        name: span.name,
        kind: span.kind,
        startTime: span.startTime,
        endTime: span.endTime,
        duration: span.duration,
        status: span.status,
        attributes: span.attributes,
        events: span.events,
      };
      this.traceLogger.info(loggableSpan);
    }
    shutdown(): Promise<void> {
      return new Promise((resolve) =>
        this.traceLogger.on("finish", resolve).end(),
      );
    }
  }

  try {
    const otelLogLevel =
      DiagLogLevel[
        config.openTelemetry.logLevel as keyof typeof DiagLogLevel
      ] ?? DiagLogLevel.INFO;
    diag.setLogger(new OtelDiagnosticLogger(otelLogLevel), otelLogLevel);

    const resource = resourceFromAttributes({
      [ATTR_SERVICE_NAME]: config.openTelemetry.serviceName,
      [ATTR_SERVICE_VERSION]: config.openTelemetry.serviceVersion,
      "deployment.environment.name": config.environment,
    });

    let spanProcessor: SpanProcessor;
    if (config.openTelemetry.tracesEndpoint) {
      diag.info(
        `Using OTLP exporter for traces, endpoint: ${config.openTelemetry.tracesEndpoint}`,
      );
      const traceExporter = new OTLPTraceExporter({
        url: config.openTelemetry.tracesEndpoint,
      });
      spanProcessor = new BatchSpanProcessor(traceExporter);
    } else {
      diag.info(
        "No OTLP endpoint configured. Using FileSpanProcessor for local trace logging.",
      );
      spanProcessor = new FileSpanProcessor();
    }

    const metricReader = config.openTelemetry.metricsEndpoint
      ? new PeriodicExportingMetricReader({
          exporter: new OTLPMetricExporter({
            url: config.openTelemetry.metricsEndpoint,
          }),
          exportIntervalMillis: 15000,
        })
      : undefined;

    sdk = new NodeSDK({
      resource,
      spanProcessors: [spanProcessor],
      metricReader,
      sampler: new TraceIdRatioBasedSampler(config.openTelemetry.samplingRatio),
      instrumentations: [
        getNodeAutoInstrumentations({
          "@opentelemetry/instrumentation-http": {
            enabled: true,
            ignoreIncomingRequestHook: (req) => req.url === "/healthz",
          },
          "@opentelemetry/instrumentation-fs": { enabled: false },
        }),
        new WinstonInstrumentation({
          enabled: true,
        }),
      ],
    });

    sdk.start();
    diag.info(
      `OpenTelemetry initialized for ${config.openTelemetry.serviceName} v${config.openTelemetry.serviceVersion}`,
    );
  } catch (error) {
    diag.error("Error initializing OpenTelemetry", error);
    process.exit(1);
  }
}

/**
 * Gracefully shuts down the OpenTelemetry SDK.
 * This function is called during the application's shutdown sequence.
 */
export async function shutdownOpenTelemetry() {
  if (sdk) {
    await sdk
      .shutdown()
      .then(() => diag.info("OpenTelemetry terminated"))
      .catch((error) => diag.error("Error terminating OpenTelemetry", error));
  }
}

```

--------------------------------------------------------------------------------
/src/mcp-server/tools/pubmedResearchAgent/logic/inputSchema.ts:
--------------------------------------------------------------------------------

```typescript
/**
 * @fileoverview Defines the Zod input schema and TypeScript types for the pubmed_research_agent tool.
 * This schema accepts detailed components of a research plan from the client,
 * which the tool will then structure into a standardized output format.
 * @module pubmedResearchAgent/logic/inputSchema
 */

import { z } from "zod";

export const PubMedResearchAgentInputSchema = z.object({
  // Overall Project Information
  project_title_suggestion: z
    .string()
    .min(5)
    .describe("A concise and descriptive title for the research project."),
  primary_research_goal: z
    .string()
    .min(10)
    .describe(
      'The main scientific objective or central question the research aims to address (e.g., "To investigate the role of TREM2 in microglial response to amyloid-beta plaques").',
    ),
  research_keywords: z
    .array(z.string().min(1))
    .min(1)
    .describe(
      'Core scientific keywords or MeSH terms defining the research domain (e.g., ["neuroinflammation", "Alzheimer\'s disease", "TREM2"]).',
    ),
  organism_focus: z
    .string()
    .optional()
    .describe(
      'Primary organism(s) or model systems (e.g., "Homo sapiens (iPSC-derived microglia)", "Mus musculus (5xFAD model)").',
    ),

  // Phase 1: Conception and Planning Inputs
  p1_introduction_and_background: z
    .string()
    .optional()
    .describe(
      "Brief overview of the research area, its significance, and relevant background information leading to this study.",
    ),
  p1_specific_research_question: z
    .string()
    .optional()
    .describe(
      "The precise, focused primary research question the study will answer.",
    ),
  p1_knowledge_gap: z
    .string()
    .optional()
    .describe(
      "Statement clearly identifying the specific gap in current knowledge this research addresses.",
    ),
  p1_primary_hypothesis: z
    .string()
    .optional()
    .describe(
      "The main, testable hypothesis. Should be clear, specific, and falsifiable.",
    ),
  p1_secondary_questions_or_hypotheses: z
    .array(z.string())
    .optional()
    .describe("Any secondary questions or hypotheses to be explored."),
  p1_pubmed_search_strategy_description: z
    .string()
    .optional()
    .describe(
      "Description of the primary literature search strategy (e.g., for PubMed), including key terms and database considerations.",
    ),

  p1_literature_review_scope: z
    .string()
    .optional()
    .describe(
      "The defined scope for the literature review (e.g., timeframes, study types, key themes).",
    ),
  p1_lit_review_databases_and_approach: z
    .string()
    .optional()
    .describe(
      "Key databases (e.g., PubMed, EMBASE) and the search approach (e.g., iterative queries, snowballing).",
    ),

  p1_experimental_paradigm: z
    .string()
    .optional()
    .describe(
      "The overarching experimental design or study type (e.g., 'comparative multi-omics analysis', 'longitudinal cohort study').",
    ),
  p1_data_acquisition_plan_existing_data: z
    .string()
    .optional()
    .describe(
      "Strategy for identifying and retrieving relevant existing datasets (databases, data types, tools).",
    ),
  p1_data_acquisition_plan_new_data: z
    .string()
    .optional()
    .describe(
      "Plan for generating novel data (data types, experimental models, key procedures, deposition plan).",
    ),
  p1_blast_utilization_plan: z
    .string()
    .optional()
    .describe(
      "If applicable, how sequence alignment services (e.g., NCBI BLAST) will be used (purpose, programs, databases).",
    ),
  p1_controls_and_rigor: z
    .string()
    .optional()
    .describe(
      "Description of key experimental controls and measures to ensure scientific rigor and reproducibility.",
    ),
  p1_methodological_challenges_and_mitigation: z
    .string()
    .optional()
    .describe(
      "Anticipated methodological challenges and proposed mitigation strategies.",
    ),

  // Phase 2: Data Collection and Processing Inputs
  p2_data_collection_methods_wet_lab: z
    .string()
    .optional()
    .describe(
      "Specific wet-lab protocols if new data is generated (sample prep, treatments, instruments).",
    ),
  p2_data_collection_methods_dry_lab: z
    .string()
    .optional()
    .describe(
      "Execution details for data retrieval from databases (queries, tools, accessioning).",
    ),
  p2_data_preprocessing_and_qc_plan: z
    .string()
    .optional()
    .describe(
      "Pipeline for data cleaning, preprocessing (e.g., alignment, normalization), and quality control (metrics, thresholds, tools).",
    ),

  // Phase 3: Analysis and Interpretation Inputs
  p3_data_analysis_strategy: z
    .string()
    .optional()
    .describe(
      "Core statistical and computational methods to analyze data and test hypotheses (tests, software, ML models if any).",
    ),
  p3_bioinformatics_pipeline_summary: z
    .string()
    .optional()
    .describe(
      "Summary of the bioinformatics pipeline for high-throughput data analysis (tools, downstream analyses).",
    ),
  p3_results_interpretation_framework: z
    .string()
    .optional()
    .describe(
      "Framework for evaluating findings against hypotheses (statistical significance, biological relevance).",
    ),
  p3_comparison_with_literature_plan: z
    .string()
    .optional()
    .describe(
      "Strategy for contextualizing results with existing literature and addressing discrepancies.",
    ),

  // Phase 4: Dissemination and Iteration Inputs
  p4_dissemination_manuscript_plan: z
    .string()
    .optional()
    .describe(
      "Plan for manuscript preparation (core message, target journal profile, key figures).",
    ),
  p4_dissemination_data_deposition_plan: z
    .string()
    .optional()
    .describe(
      "Strategy for depositing data in public repositories (types, repositories, FAIR principles).",
    ),
  p4_peer_review_and_publication_approach: z
    .string()
    .optional()
    .describe("Approach to journal submission and addressing peer review."),
  p4_future_research_directions: z
    .string()
    .optional()
    .describe(
      "Potential next steps, new questions, or translational applications arising from the research.",
    ),

  // Cross-Cutting Considerations Inputs
  cc_record_keeping_and_data_management: z
    .string()
    .optional()
    .describe(
      "Plan for record-keeping, version control, data storage, and DMP.",
    ),
  cc_collaboration_strategy: z
    .string()
    .optional()
    .describe(
      "If applicable, strategy for collaboration, communication, roles, and authorship.",
    ),
  cc_ethical_considerations: z
    .string()
    .optional()
    .describe(
      "Ethical considerations, IRB/IACUC approval plans, data privacy, RCR training.",
    ),

  // Meta-parameter from previous iterations, still useful
  include_detailed_prompts_for_agent: z
    .boolean()
    .optional()
    .default(false) // Default to false, meaning the tool primarily structures the detailed input.
    .describe(
      "If true, the tool will add more detailed instructive prompts/guidance within the output fields for a research agent. If false (default), it will primarily structure the provided inputs with minimal additional prompting.",
    ),
});

export type PubMedResearchAgentInput = z.infer<
  typeof PubMedResearchAgentInputSchema
>;

```

--------------------------------------------------------------------------------
/docs/publishing-mcp-server-registry.md:
--------------------------------------------------------------------------------

```markdown
# How to Publish Your MCP Server

This guide provides step-by-step instructions on how to publish your MCP server, based on the `mcp-ts-template`, to the official MCP registry.

The recommended method is to use the all-in-one `publish-mcp` script included in this template. It automates the entire workflow, from version synchronization and validation to committing and publishing.

## Prerequisites

- **MCP Publisher CLI**: You need the `mcp-publisher` command-line tool. If you don't have it, install it using one of the methods from the [official publishing guide](https://github.com/modelcontextprotocol/registry/blob/main/docs/guides/publishing/publish-server.md#step-1-install-the-publisher-cli). (i.e. `brew install mcp-publisher`)
- **[Bun](https://bun.sh/)**: Ensure you have Bun v1.2.0 or higher installed. The script uses Bun to execute.
- **GitHub Account**: Publishing to an `io.github.*` namespace requires you to authenticate with a corresponding GitHub account. The script will guide you through this.

## The Recommended Method: The All-in-One `publish-mcp` Script

This is the easiest and most reliable way to publish your server.

### Step 1: Run the Script

From the root of the project, simply run:

```bash
bun run publish-mcp
```

The script will handle all the necessary steps, including prompting you to log in with GitHub via your browser.

### What the Script Does Automatically

1.  **Syncs Metadata**: Reads `package.json` and updates the `version` and `mcpName` fields in `server.json` (and applies the version to all entries in `packages`).
2.  **Validates Schema**: Validates the updated `server.json` against the official MCP server schema.
3.  **Auto-Commits**: Creates a `git commit` for the `server.json` version bump.
4.  **Handles Authentication**: Kicks off the `mcp-publisher login github` command and waits for you to complete it.
5.  **Publishes**: Runs `mcp-publisher publish` to finalize the process.

### Advanced Control with Flags

You can customize the script's behavior with flags:

- `--validate-only`: Syncs and validates, then stops. Perfect for a pre-flight check.
- `--no-commit`: Skips the automatic Git commit step.
- `--publish-only`: Skips local file changes and proceeds directly to login and publish.
- `--sync-only`: Only syncs versions from `package.json` to `server.json`, then stops.

---

## Manual Fallback Workflow

If you need to perform each step manually, or wish to understand the process under the hood, you can follow these steps.

### Step 1: Review and Align Configuration

Before publishing, it's crucial to ensure that your server's configuration is consistent across the project. This prevents validation errors and ensures clients receive the correct metadata.

Review the following files:

1.  **`package.json`**:
    - Verify that the `version` matches the intended release version.
    - Update the `name` of your package if you have renamed it.
    - Update the `mcpName` field to reflect your desired server name (e.g., `io.github.your-username/your-server-name`). This name must be unique in the registry.

2.  **`server.json`**:
    - Update the `name` to match the `mcpName` in your `package.json`.
    - Ensure the `version` matches the one in `package.json`.
    - Check that the `packages.identifier` field matches the `name` in your `package.json`.
    - Verify that the `packages.version` also matches the version in `package.json`.
    - Add a `website_url` pointing to your project homepage or README (recommended for discoverability).
    - Consider adding `repository.id` (e.g., GitHub repo ID) for registry safety. You can obtain it with: `gh api repos/<owner>/<repo> --jq '.id'`.
    - Prefer HTTP transport URL templating so clients can override host/port/path without editing JSON, for example:
      ```json
      {
        "type": "streamable-http",
        "url": "http://{MCP_HTTP_HOST}:{MCP_HTTP_PORT}{MCP_HTTP_ENDPOINT_PATH}"
      }
      ```
      Provide corresponding entries in `packages[].environment_variables` such as `MCP_HTTP_HOST`, `MCP_HTTP_PORT`, and `MCP_HTTP_ENDPOINT_PATH` with sensible defaults.

3.  **`src/config/index.ts`**:
    - Look for any default values that might affect the server's runtime behavior, such as `mcpHttpPort`. The default HTTP port is currently `3010`. If you've configured a different port via environment variables for your deployment, ensure your `server.json` reflects that.

### Environment Variable Precedence (Important)

Depending on how you start the server, environment variables set in `package.json` scripts can override values provided via `server.json`'s `environment_variables`. For example, this template sets `MCP_LOG_LEVEL=debug` in `start:*` scripts. If you want `server.json` to be the source of truth for those values, remove or adjust the hardcoded env vars in scripts, or invoke the runtime directly (e.g., `bun ./dist/index.js`) and allow client-provided values to take effect.

## Step 2: Validate the `server.json` Schema

This project includes a script to validate your `server.json` against the official MCP schema. This helps catch errors before you attempt to publish.

Run the validation using the all-in-one script with the `--validate-only` flag:

```bash
bun run publish-mcp --validate-only
```

This command will first sync the versions from `package.json` and then validate the resulting `server.json`.

### Step 3: Authenticate with the MCP Registry

Since the server name follows the `io.github.*` namespace, you must authenticate using GitHub. If you chose a different namespace (e.g., a custom domain), follow the appropriate authentication method outlined in the [official documentation](https://github.com/modelcontextprotocol/registry/blob/main/docs/guides/publishing/publish-server.md#step-4-authenticate).

Run the following command:

```bash
mcp-publisher login github
```

This will open a browser window, prompting you to authorize the MCP Publisher application with your GitHub account. Follow the on-screen instructions to complete the login process.

## Step 4: Publish the Server

Once you've aligned your configurations, validated the schema, and authenticated your session, you are ready to publish.

From the root directory of the project, execute the publish command:

```bash
mcp-publisher publish
```

The publisher CLI will read your `server.json`, perform server-side validation against the package registry (NPM, in this case), and, if successful, add your server entry to the MCP registry.

You should see a confirmation upon success:

```
✓ Successfully published
```

## Step 5: Verify the Publication

After publishing, you can verify that your server is listed in the registry by making a simple API request. Replace the placeholder with your server's name.

```bash
# Replace with your server name
curl "https://registry.modelcontextprotocol.io/v0/servers?search=io.github.your-username/your-server-name"
```

For example, this template server is located at:

```bash
curl "https://registry.modelcontextprotocol.io/v0/servers?search=io.github.cyanheads/mcp-ts-template"
```

The response should be a JSON object containing the metadata for your newly published or updated server.

---

## Automated Publishing with CI/CD

For a more robust workflow, consider automating this process using GitHub Actions. This ensures that every new release is automatically published without manual intervention. You can find a guide on setting this up here: [Automate publishing with GitHub Actions](https://github.com/modelcontextprotocol/registry/blob/main/docs/guides/publishing/github-actions.md).

```

--------------------------------------------------------------------------------
/src/utils/security/rateLimiter.ts:
--------------------------------------------------------------------------------

```typescript
/**
 * @fileoverview Provides a generic `RateLimiter` class for implementing rate limiting logic.
 * It supports configurable time windows, request limits, and automatic cleanup of expired entries.
 * @module src/utils/security/rateLimiter
 */
import { trace } from "@opentelemetry/api";
import { environment } from "../../config/index.js";
import { BaseErrorCode, McpError } from "../../types-global/errors.js";
import { logger, RequestContext, requestContextService } from "../index.js";

/**
 * Defines configuration options for the {@link RateLimiter}.
 */
export interface RateLimitConfig {
  /** Time window in milliseconds. */
  windowMs: number;
  /** Maximum number of requests allowed in the window. */
  maxRequests: number;
  /** Custom error message template. Can include `{waitTime}` placeholder. */
  errorMessage?: string;
  /** If true, skip rate limiting in development. */
  skipInDevelopment?: boolean;
  /** Optional function to generate a custom key for rate limiting. */
  keyGenerator?: (identifier: string, context?: RequestContext) => string;
  /** How often, in milliseconds, to clean up expired entries. */
  cleanupInterval?: number;
}

/**
 * Represents an individual entry for tracking requests against a rate limit key.
 */
export interface RateLimitEntry {
  /** Current request count. */
  count: number;
  /** When the window resets (timestamp in milliseconds). */
  resetTime: number;
}

/**
 * A generic rate limiter class using an in-memory store.
 * Controls frequency of operations based on unique keys.
 */
export class RateLimiter {
  /**
   * Stores current request counts and reset times for each key.
   * @private
   */
  private limits: Map<string, RateLimitEntry>;
  /**
   * Timer ID for periodic cleanup.
   * @private
   */
  private cleanupTimer: NodeJS.Timeout | null = null;

  /**
   * Default configuration values.
   * @private
   */
  private static DEFAULT_CONFIG: RateLimitConfig = {
    windowMs: 15 * 60 * 1000, // 15 minutes
    maxRequests: 100,
    errorMessage:
      "Rate limit exceeded. Please try again in {waitTime} seconds.",
    skipInDevelopment: false,
    cleanupInterval: 5 * 60 * 1000, // 5 minutes
  };

  /**
   * Creates a new `RateLimiter` instance.
   * @param config - Configuration options, merged with defaults.
   */
  constructor(private config: RateLimitConfig) {
    this.config = { ...RateLimiter.DEFAULT_CONFIG, ...config };
    this.limits = new Map();
    this.startCleanupTimer();
  }

  /**
   * Starts the periodic timer to clean up expired rate limit entries.
   * @private
   */
  private startCleanupTimer(): void {
    if (this.cleanupTimer) {
      clearInterval(this.cleanupTimer);
    }

    const interval =
      this.config.cleanupInterval ?? RateLimiter.DEFAULT_CONFIG.cleanupInterval;

    if (interval && interval > 0) {
      this.cleanupTimer = setInterval(() => {
        this.cleanupExpiredEntries();
      }, interval);

      if (this.cleanupTimer.unref) {
        this.cleanupTimer.unref(); // Allow Node.js process to exit if only timer active
      }
    }
  }

  /**
   * Removes expired rate limit entries from the store.
   * @private
   */
  private cleanupExpiredEntries(): void {
    const now = Date.now();
    let expiredCount = 0;

    for (const [key, entry] of this.limits.entries()) {
      if (now >= entry.resetTime) {
        this.limits.delete(key);
        expiredCount++;
      }
    }

    if (expiredCount > 0) {
      const logContext = requestContextService.createRequestContext({
        operation: "RateLimiter.cleanupExpiredEntries",
        cleanedCount: expiredCount,
        totalRemainingAfterClean: this.limits.size,
      });
      logger.debug(
        `Cleaned up ${expiredCount} expired rate limit entries`,
        logContext,
      );
    }
  }

  /**
   * Updates the configuration of the rate limiter instance.
   * @param config - New configuration options to merge.
   */
  public configure(config: Partial<RateLimitConfig>): void {
    this.config = { ...this.config, ...config };
    if (config.cleanupInterval !== undefined) {
      this.startCleanupTimer();
    }
  }

  /**
   * Retrieves a copy of the current rate limiter configuration.
   * @returns The current configuration.
   */
  public getConfig(): RateLimitConfig {
    return { ...this.config };
  }

  /**
   * Resets all rate limits by clearing the internal store.
   */
  public reset(): void {
    this.limits.clear();
    const logContext = requestContextService.createRequestContext({
      operation: "RateLimiter.reset",
    });
    logger.debug("Rate limiter reset, all limits cleared", logContext);
  }

  /**
   * Checks if a request exceeds the configured rate limit.
   * Throws an `McpError` if the limit is exceeded.
   *
   * @param key - A unique identifier for the request source.
   * @param context - Optional request context for custom key generation.
   * @throws {McpError} If the rate limit is exceeded.
   */
  public check(key: string, context?: RequestContext): void {
    const activeSpan = trace.getActiveSpan();
    activeSpan?.setAttribute("mcp.rate_limit.checked", true);

    if (this.config.skipInDevelopment && environment === "development") {
      activeSpan?.setAttribute("mcp.rate_limit.skipped", "development");
      return;
    }

    const limitKey = this.config.keyGenerator
      ? this.config.keyGenerator(key, context)
      : key;
    activeSpan?.setAttribute("mcp.rate_limit.key", limitKey);

    const now = Date.now();
    let entry = this.limits.get(limitKey);

    if (!entry || now >= entry.resetTime) {
      entry = {
        count: 1,
        resetTime: now + this.config.windowMs,
      };
      this.limits.set(limitKey, entry);
    } else {
      entry.count++;
    }

    const remaining = Math.max(0, this.config.maxRequests - entry.count);
    activeSpan?.setAttributes({
      "mcp.rate_limit.limit": this.config.maxRequests,
      "mcp.rate_limit.count": entry.count,
      "mcp.rate_limit.remaining": remaining,
    });

    if (entry.count > this.config.maxRequests) {
      const waitTime = Math.ceil((entry.resetTime - now) / 1000);
      const errorMessage = (
        this.config.errorMessage || RateLimiter.DEFAULT_CONFIG.errorMessage!
      ).replace("{waitTime}", waitTime.toString());

      activeSpan?.addEvent("rate_limit_exceeded", {
        "mcp.rate_limit.wait_time_seconds": waitTime,
      });

      throw new McpError(BaseErrorCode.RATE_LIMITED, errorMessage, {
        waitTimeSeconds: waitTime,
        key: limitKey,
        limit: this.config.maxRequests,
        windowMs: this.config.windowMs,
      });
    }
  }

  /**
   * Retrieves the current rate limit status for a specific key.
   * @param key - The rate limit key.
   * @returns Status object or `null` if no entry exists.
   */
  public getStatus(key: string): {
    current: number;
    limit: number;
    remaining: number;
    resetTime: number;
  } | null {
    const entry = this.limits.get(key);
    if (!entry) {
      return null;
    }
    return {
      current: entry.count,
      limit: this.config.maxRequests,
      remaining: Math.max(0, this.config.maxRequests - entry.count),
      resetTime: entry.resetTime,
    };
  }

  /**
   * Stops the cleanup timer and clears all rate limit entries.
   * Call when the rate limiter is no longer needed.
   */
  public dispose(): void {
    if (this.cleanupTimer) {
      clearInterval(this.cleanupTimer);
      this.cleanupTimer = null;
    }
    this.limits.clear();
  }
}

/**
 * Default singleton instance of the `RateLimiter`.
 * Initialized with default configuration. Use `rateLimiter.configure({})` to customize.
 */
export const rateLimiter = new RateLimiter({
  windowMs: 15 * 60 * 1000, // Default: 15 minutes
  maxRequests: 100, // Default: 100 requests per window
});

```

--------------------------------------------------------------------------------
/scripts/validate-mcp-publish-schema.ts:
--------------------------------------------------------------------------------

```typescript
/**
 * @fileoverview This script automates the process of preparing and publishing an MCP server
 * to the MCP Registry. It performs the following steps in order:
 *
 * 1.  **Sync Metadata**: Reads `package.json` to get the `version` and `mcpName`,
 *     then updates `server.json` with these values.
 * 2.  **Validate Schema**: Validates the updated `server.json` against the official
 *     MCP server schema from the static CDN.
 * 3.  **Auto-Commit**: Automatically commits the updated `server.json` with a
 *     conventional commit message, only if there are changes.
 * 4.  **Authenticate**: Initiates `mcp-publisher login github` and waits for the user
 *     to complete the browser-based authentication.
 * 5.  **Publish**: Runs `mcp-publisher publish` to upload the server package to the registry.
 * 6.  **Verify**: Polls the registry to confirm the server is publicly available.
 *
 * It supports flags like `--validate-only` and `--no-commit` for flexible control.
 * @module scripts/validate-mcp-publish-schema
 */

import Ajv from "ajv";
import addFormats from "ajv-formats";
import axios from "axios";
import { execSync } from "child_process";
import fs from "fs/promises";
import path from "path";

// --- Constants ---
const PACKAGE_JSON_PATH = path.resolve(process.cwd(), "package.json");
const SERVER_JSON_PATH = path.resolve(process.cwd(), "server.json");
const MCP_SCHEMA_URL =
  "https://static.modelcontextprotocol.io/schemas/2025-07-09/server.schema.json";
const MCP_REGISTRY_URL = "https://registry.modelcontextprotocol.io/v0/servers";

// --- Helper Functions ---

function runCommand(command: string, stepName: string) {
  console.log(`\n--- 🚀 Starting Step: ${stepName} ---`);
  console.log(`> ${command}`);
  try {
    execSync(command, { stdio: "inherit" });
    console.log(`--- ✅ Finished Step: ${stepName} ---`);
  } catch (_error) {
    console.error(`\n--- ❌ Step Failed: ${stepName} ---`);
    console.error(`Command "${command}" failed.`);
    process.exit(1);
  }
}

async function verifyPublication(
  serverName: string,
  maxRetries = 5,
  delay = 3000,
) {
  const stepName = "Verify Publication";
  console.log(`\n--- 🚀 Starting Step: ${stepName} ---`);
  const searchUrl = `${MCP_REGISTRY_URL}?search=${serverName}`;
  console.log(`Querying: ${searchUrl}`);

  for (let i = 0; i < maxRetries; i++) {
    try {
      const response = await axios.get(searchUrl);
      if (
        response.data &&
        response.data.servers &&
        response.data.servers.length > 0
      ) {
        console.log(
          "✅ Verification successful! Server is live in the registry.",
        );
        console.log(`--- ✅ Finished Step: ${stepName} ---`);
        return;
      }
    } catch (error) {
      if (error instanceof Error) {
        console.warn(`Attempt ${i + 1} failed:`, error.message);
      } else {
        console.warn(`Attempt ${i + 1} failed:`, String(error));
      }
    }
    await new Promise((resolve) => setTimeout(resolve, delay));
  }
  console.error(`\n--- ❌ Step Failed: ${stepName} ---`);
  console.error(
    `Could not verify server publication after ${maxRetries} attempts.`,
  );
  process.exit(1);
}

async function syncMetadata(): Promise<{ version: string; mcpName: string }> {
  const stepName = "Sync Metadata from package.json";
  console.log(`\n--- 🚀 Starting Step: ${stepName} ---`);
  try {
    const pkgContent = await fs.readFile(PACKAGE_JSON_PATH, "utf-8");
    const serverContent = await fs.readFile(SERVER_JSON_PATH, "utf-8");
    const pkg = JSON.parse(pkgContent);
    const server = JSON.parse(serverContent);
    const { version, mcpName } = pkg;

    if (!version || !mcpName) {
      throw new Error(
        "`version` and/or `mcpName` are missing from package.json.",
      );
    }

    server.version = version;
    server.mcpName = mcpName;
    if (Array.isArray(server.packages)) {
      server.packages.forEach((p: { version?: string }) => {
        p.version = version;
      });
      console.log(`Updated version for ${server.packages.length} package(s).`);
    }

    await fs.writeFile(SERVER_JSON_PATH, JSON.stringify(server, null, 2));
    console.log(`Synced server.json to version "${version}".`);
    console.log(`--- ✅ Finished Step: ${stepName} ---`);
    return { version, mcpName };
  } catch (error) {
    console.error(`\n--- ❌ Step Failed: ${stepName} ---`, error);
    process.exit(1);
  }
}

function autoCommitChanges(version: string) {
  const stepName = "Auto-commit server.json";
  console.log(`\n--- 🚀 Starting Step: ${stepName} ---`);
  try {
    const status = execSync("git status --porcelain server.json")
      .toString()
      .trim();
    if (!status) {
      console.log("No changes to commit in server.json. Skipping.");
      console.log(`--- ✅ Finished Step: ${stepName} (No-op) ---`);
      return;
    }

    execSync("git add server.json");
    const commitMessage = `chore(release): bump server.json to v${version}`;
    const commitCommand = `git commit --no-verify -m "${commitMessage}"`;
    console.log(`> ${commitCommand}`);
    execSync(commitCommand);
    console.log("Successfully committed version bump for server.json.");
    console.log(`--- ✅ Finished Step: ${stepName} ---`);
  } catch (_error) {
    console.warn(`\n--- ⚠️ Step Skipped: ${stepName} ---`);
    console.warn("Failed to auto-commit. Please commit changes manually.");
  }
}

async function validateServerJson() {
  const stepName = "Validate server.json Schema";
  console.log(`\n--- 🚀 Starting Step: ${stepName} ---`);
  try {
    const { data: schema } = await axios.get(MCP_SCHEMA_URL);
    const serverJson = JSON.parse(await fs.readFile(SERVER_JSON_PATH, "utf-8"));
    const ajv = new Ajv({ strict: false });
    addFormats(ajv);
    const validate = ajv.compile(schema);
    if (!validate(serverJson)) {
      console.error("Validation failed:", validate.errors);
      throw new Error("server.json does not conform to the MCP schema.");
    }
    console.log("Validation successful!");
    console.log(`--- ✅ Finished Step: ${stepName} ---`);
  } catch (error) {
    console.error(`\n--- ❌ Step Failed: ${stepName} ---`, error);
    process.exit(1);
  }
}

async function main() {
  const args = process.argv.slice(2);
  const syncOnly = args.includes("--sync-only");
  const validateOnly = args.includes("--validate-only");
  const noCommit = args.includes("--no-commit");
  const publishOnly = args.includes("--publish-only");
  const verifyOnly = args.includes("--verify-only");

  console.log("🚀 Starting MCP Server Publish Workflow...");

  if (verifyOnly) {
    console.log("\n⚪ --verify-only flag detected. Skipping all other steps.");
    const pkg = JSON.parse(await fs.readFile(PACKAGE_JSON_PATH, "utf-8"));
    await verifyPublication(pkg.mcpName);
    console.log("\n🎉🎉🎉 Verification Complete! 🎉🎉🎉");
    return;
  }

  if (publishOnly) {
    console.log(
      "\n⚪ --publish-only flag detected. Skipping local file changes.",
    );
    runCommand("mcp-publisher login github", "Authenticate with GitHub");
    runCommand("mcp-publisher publish", "Publish to MCP Registry");
    const pkg = JSON.parse(await fs.readFile(PACKAGE_JSON_PATH, "utf-8"));
    await verifyPublication(pkg.mcpName);
    console.log("\n🎉🎉🎉 Publish Complete! 🎉🎉🎉");
    return;
  }

  const { version, mcpName } = await syncMetadata();
  if (syncOnly) {
    console.log("\n✅ --sync-only flag detected. Halting after metadata sync.");
    return;
  }

  await validateServerJson();
  if (validateOnly) {
    console.log(
      "\n✅ --validate-only flag detected. Halting after validation.",
    );
    return;
  }

  if (!noCommit) {
    autoCommitChanges(version);
  } else {
    console.log("\n⚪ --no-commit flag detected. Skipping auto-commit.");
  }

  runCommand("mcp-publisher login github", "Authenticate with GitHub");
  runCommand("mcp-publisher publish", "Publish to MCP Registry");
  await verifyPublication(mcpName);

  console.log(
    "\n🎉🎉🎉 Workflow Complete! Your server has been successfully published. 🎉🎉🎉",
  );
}

// --- Execute ---
main();

```

--------------------------------------------------------------------------------
/src/mcp-server/tools/pubmedSearchArticles/logic.ts:
--------------------------------------------------------------------------------

```typescript
/**
 * @fileoverview Logic for the pubmed_search_articles MCP tool.
 * Handles constructing ESearch and ESummary queries, interacting with
 * the NcbiService, and formatting the results.
 * @module src/mcp-server/tools/pubmedSearchArticles/logic
 */

import { z } from "zod";
import { getNcbiService } from "../../../services/NCBI/core/ncbiService.js";
import { BaseErrorCode, McpError } from "../../../types-global/errors.js";
import {
  ESearchResult,
  ESummaryResponseContainer,
  ParsedBriefSummary,
} from "../../../types-global/pubmedXml.js";
import {
  logger,
  RequestContext,
  requestContextService,
  sanitizeInputForLogging,
} from "../../../utils/index.js";
import { extractBriefSummaries } from "../../../services/NCBI/parsing/index.js";
import { sanitization } from "../../../utils/security/sanitization.js";

export const PubMedSearchArticlesInputSchema = z.object({
  queryTerm: z
    .string()
    .min(3, "Query term must be at least 3 characters")
    .describe(
      "The primary keyword or phrase to search for in PubMed. Must be at least 3 characters long.",
    ),
  maxResults: z
    .number()
    .int()
    .positive()
    .max(1000, "Max results per query. ESearch's retmax is used.")
    .optional()
    .default(20)
    .describe(
      "Maximum number of articles to retrieve. Corresponds to ESearch's 'retmax' parameter. Default is 20, max is 1000.",
    ),
  sortBy: z
    .enum(["relevance", "pub_date", "author", "journal_name"])
    .optional()
    .default("relevance")
    .describe(
      "Sorting criteria for results. Options: 'relevance' (default), 'pub_date', 'author', 'journal_name'.",
    ),
  dateRange: z
    .object({
      minDate: z
        .string()
        .regex(
          /^\d{4}(\/\d{2}(\/\d{2})?)?$/,
          "Date must be YYYY, YYYY/MM, or YYYY/MM/DD",
        )
        .optional()
        .describe(
          "The start date for the search range (YYYY, YYYY/MM, or YYYY/MM/DD).",
        ),
      maxDate: z
        .string()
        .regex(
          /^\d{4}(\/\d{2}(\/\d{2})?)?$/,
          "Date must be YYYY, YYYY/MM, or YYYY/MM/DD",
        )
        .optional()
        .describe(
          "The end date for the search range (YYYY, YYYY/MM, or YYYY/MM/DD).",
        ),
      dateType: z
        .enum(["pdat", "mdat", "edat"])
        .optional()
        .default("pdat")
        .describe(
          "The type of date to filter by: 'pdat' (Publication Date), 'mdat' (Modification Date), 'edat' (Entrez Date). Default is 'pdat'.",
        ),
    })
    .optional()
    .describe("Defines an optional date range for the search."),
  filterByPublicationTypes: z
    .array(z.string())
    .optional()
    .describe(
      'An array of publication types to filter by (e.g., ["Review", "Clinical Trial"]).',
    ),
  fetchBriefSummaries: z
    .number()
    .int()
    .min(0)
    .max(50)
    .optional()
    .default(0)
    .describe(
      "Number of top PMIDs for which to fetch brief summaries using ESummary. Set to 0 to disable. Max 50. Default 0.",
    ),
});

export type PubMedSearchArticlesInput = z.infer<
  typeof PubMedSearchArticlesInputSchema
>;

export type PubMedSearchArticlesOutput = {
  searchParameters: PubMedSearchArticlesInput;
  effectiveESearchTerm: string;
  totalFound: number;
  retrievedPmidCount: number;
  pmids: string[];
  briefSummaries: ParsedBriefSummary[];
  eSearchUrl: string;
  eSummaryUrl?: string;
};

interface ESearchServiceParams {
  db: string;
  term?: string;
  retmax?: number;
  sort?: string;
  usehistory?: "y" | "n";
  WebEnv?: string;
  query_key?: string;
  id?: string;
  version?: string;
  retmode?: string;
  [key: string]: string | number | undefined;
}

export async function pubmedSearchArticlesLogic(
  input: PubMedSearchArticlesInput,
  parentRequestContext: RequestContext,
): Promise<PubMedSearchArticlesOutput> {
  const ncbiService = getNcbiService();
  const toolLogicContext = requestContextService.createRequestContext({
    parentRequestId: parentRequestContext.requestId,
    operation: "pubmedSearchArticlesLogic",
    input: sanitizeInputForLogging(input),
  });

  logger.info("Executing pubmed_search_articles tool", toolLogicContext);

  let effectiveQuery = sanitization.sanitizeString(input.queryTerm, {
    context: "text",
  });

  if (input.dateRange) {
    const { minDate, maxDate, dateType } = input.dateRange;
    if (minDate && maxDate) {
      effectiveQuery += ` AND (${minDate}[${dateType}] : ${maxDate}[${dateType}])`;
    } else if (minDate) {
      effectiveQuery += ` AND ${minDate}[${dateType}]`;
    } else if (maxDate) {
      effectiveQuery += ` AND ${maxDate}[${dateType}]`;
    }
  }

  if (
    input.filterByPublicationTypes &&
    input.filterByPublicationTypes.length > 0
  ) {
    const ptQuery = input.filterByPublicationTypes
      .map(
        (pt: string) =>
          `"${sanitization.sanitizeString(pt, { context: "text" })}"[Publication Type]`,
      )
      .join(" OR ");
    effectiveQuery += ` AND (${ptQuery})`;
  }

  const currentFetchBriefSummaries = input.fetchBriefSummaries ?? 0;

  const eSearchParams: ESearchServiceParams = {
    db: "pubmed",
    term: effectiveQuery,
    retmax: input.maxResults,
    sort: input.sortBy,
    usehistory: currentFetchBriefSummaries > 0 ? "y" : "n",
  };

  const esResult: ESearchResult = await ncbiService.eSearch(
    eSearchParams,
    toolLogicContext,
  );

  const eSearchBase =
    "https://eutils.ncbi.nlm.nih.gov/entrez/eutils/esearch.fcgi";
  const eSearchQueryStringParams: Record<string, string> = {};
  for (const key in eSearchParams) {
    if (eSearchParams[key] !== undefined) {
      eSearchQueryStringParams[key] = String(eSearchParams[key]);
    }
  }
  const eSearchQueryString = new URLSearchParams(
    eSearchQueryStringParams,
  ).toString();
  const eSearchUrl = `${eSearchBase}?${eSearchQueryString}`;

  if (!esResult) {
    throw new McpError(
      BaseErrorCode.NCBI_PARSING_ERROR,
      "Invalid or empty ESearch response from NCBI.",
      {
        ...toolLogicContext,
        responsePreview: sanitizeInputForLogging(
          JSON.stringify(esResult).substring(0, 200),
        ),
      },
    );
  }

  const pmids: string[] = esResult.idList || [];
  const totalFound = esResult.count || 0;
  const retrievedPmidCount = pmids.length;

  let briefSummaries: ParsedBriefSummary[] = [];
  let eSummaryUrl: string | undefined;

  if (currentFetchBriefSummaries > 0 && pmids.length > 0) {
    const eSummaryParams: ESearchServiceParams = {
      db: "pubmed",
      version: "2.0",
      retmode: "xml",
    };

    if (esResult.webEnv && esResult.queryKey) {
      eSummaryParams.WebEnv = esResult.webEnv;
      eSummaryParams.query_key = esResult.queryKey;
      eSummaryParams.retmax = currentFetchBriefSummaries;
    } else {
      const pmidsForSummary = pmids
        .slice(0, currentFetchBriefSummaries)
        .join(",");
      eSummaryParams.id = pmidsForSummary;
    }

    const eSummaryBase =
      "https://eutils.ncbi.nlm.nih.gov/entrez/eutils/esummary.fcgi";
    const eSummaryQueryStringParams: Record<string, string> = {};
    for (const key in eSummaryParams) {
      if (eSummaryParams[key] !== undefined) {
        eSummaryQueryStringParams[key] = String(eSummaryParams[key]);
      }
    }
    const eSummaryQueryString = new URLSearchParams(
      eSummaryQueryStringParams,
    ).toString();
    eSummaryUrl = `${eSummaryBase}?${eSummaryQueryString}`;

    const eSummaryResponseXml: ESummaryResponseContainer =
      (await ncbiService.eSummary(
        eSummaryParams,
        toolLogicContext,
      )) as ESummaryResponseContainer;

    if (eSummaryResponseXml && eSummaryResponseXml.eSummaryResult) {
      briefSummaries = await extractBriefSummaries(
        eSummaryResponseXml.eSummaryResult,
        toolLogicContext,
      );
    } else if (
      eSummaryResponseXml &&
      (eSummaryResponseXml as ESummaryResponseContainer).eSummaryResult?.ERROR
    ) {
      logger.warning("ESummary returned a top-level error", {
        ...toolLogicContext,
        errorDetails: (eSummaryResponseXml as ESummaryResponseContainer)
          .eSummaryResult?.ERROR,
      });
    }
  }

  logger.notice("Successfully executed pubmed_search_articles tool.", {
    ...toolLogicContext,
    totalFound,
    retrievedPmidCount,
    summariesFetched: briefSummaries.length,
  });

  return {
    searchParameters: input,
    effectiveESearchTerm: effectiveQuery,
    totalFound,
    retrievedPmidCount,
    pmids,
    briefSummaries,
    eSearchUrl,
    eSummaryUrl,
  };
}

```

--------------------------------------------------------------------------------
/scripts/fetch-openapi-spec.ts:
--------------------------------------------------------------------------------

```typescript
#!/usr/bin/env node

/**
 * @fileoverview Fetches an OpenAPI specification (YAML/JSON) from a URL,
 * parses it, and saves it locally in both YAML and JSON formats.
 * @module scripts/fetch-openapi-spec
 *   Includes fallback logic for common OpenAPI file names (openapi.yaml, openapi.json).
 *   Ensures output paths are within the project directory for security.
 *
 * @example
 * // Fetch spec and save to docs/api/my_api.yaml and docs/api/my_api.json
 * // ts-node --esm scripts/fetch-openapi-spec.ts https://api.example.com/v1 docs/api/my_api
 *
 * @example
 * // Fetch spec from a direct file URL
 * // ts-node --esm scripts/fetch-openapi-spec.ts https://petstore3.swagger.io/api/v3/openapi.json docs/api/petstore_v3
 */

import axios, { AxiosError } from "axios";
import fs from "fs/promises";
import yaml from "js-yaml";
import path from "path";

const projectRoot = process.cwd();

const args = process.argv.slice(2);
const helpFlag = args.includes("--help");
const urlArg = args[0];
const outputBaseArg = args[1];

if (helpFlag || !urlArg || !outputBaseArg) {
  console.log(`
Fetch OpenAPI Specification Script

Usage:
  ts-node --esm scripts/fetch-openapi-spec.ts <url> <output-base-path> [--help]

Arguments:
  <url>                Base URL or direct URL to the OpenAPI spec (YAML/JSON).
  <output-base-path>   Base path for output files (relative to project root),
                       e.g., 'docs/api/my_api'. Will generate .yaml and .json.
  --help               Show this help message.

Example:
  ts-node --esm scripts/fetch-openapi-spec.ts https://petstore3.swagger.io/api/v3 docs/api/petstore_v3
`);
  process.exit(helpFlag ? 0 : 1);
}

const outputBasePathAbsolute = path.resolve(projectRoot, outputBaseArg);
const yamlOutputPath = `${outputBasePathAbsolute}.yaml`;
const jsonOutputPath = `${outputBasePathAbsolute}.json`;
const outputDirAbsolute = path.dirname(outputBasePathAbsolute);

// Security Check: Ensure output paths are within project root
if (
  !outputDirAbsolute.startsWith(projectRoot + path.sep) ||
  !yamlOutputPath.startsWith(projectRoot + path.sep) ||
  !jsonOutputPath.startsWith(projectRoot + path.sep)
) {
  console.error(
    `Error: Output path "${outputBaseArg}" resolves outside the project directory. Aborting.`,
  );
  process.exit(1);
}

/**
 * Attempts to fetch content from a given URL.
 * @param url - The URL to fetch data from.
 * @returns A promise resolving to an object with data and content type, or null if fetch fails.
 */
async function tryFetch(
  url: string,
): Promise<{ data: string; contentType: string | null } | null> {
  try {
    console.log(`Attempting to fetch from: ${url}`);
    const response = await axios.get(url, {
      responseType: "text",
      validateStatus: (status) => status >= 200 && status < 300,
    });
    const contentType = response.headers["content-type"] || null;
    console.log(
      `Successfully fetched (Status: ${response.status}, Content-Type: ${contentType || "N/A"})`,
    );
    return { data: response.data, contentType };
  } catch (error) {
    let status = "Unknown";
    if (axios.isAxiosError(error)) {
      const axiosError = error as AxiosError;
      status = axiosError.response
        ? String(axiosError.response.status)
        : "Network Error";
    }
    console.warn(`Failed to fetch from ${url} (Status: ${status})`);
    return null;
  }
}

/**
 * Parses fetched data as YAML or JSON, attempting to infer from content type or by trying both.
 * @param data - The raw string data fetched from the URL.
 * @param contentType - The content type header from the HTTP response, if available.
 * @returns The parsed OpenAPI specification as an object, or null if parsing fails.
 */
function parseSpec(data: string, contentType: string | null): object | null {
  try {
    const lowerContentType = contentType?.toLowerCase();
    if (
      lowerContentType?.includes("yaml") ||
      lowerContentType?.includes("yml")
    ) {
      console.log("Parsing content as YAML based on Content-Type...");
      return yaml.load(data) as object;
    } else if (lowerContentType?.includes("json")) {
      console.log("Parsing content as JSON based on Content-Type...");
      return JSON.parse(data);
    } else {
      console.log(
        "Content-Type is ambiguous or missing. Attempting to parse as YAML first...",
      );
      try {
        const parsedYaml = yaml.load(data) as object;
        // Basic validation: check if it's a non-null object.
        if (parsedYaml && typeof parsedYaml === "object") {
          console.log("Successfully parsed as YAML.");
          return parsedYaml;
        }
      } catch (_yamlError) {
        console.log("YAML parsing failed. Attempting to parse as JSON...");
        try {
          const parsedJson = JSON.parse(data);
          if (parsedJson && typeof parsedJson === "object") {
            console.log("Successfully parsed as JSON.");
            return parsedJson;
          }
        } catch (_jsonError) {
          console.warn(
            "Could not parse content as YAML or JSON after attempting both.",
          );
          return null;
        }
      }
      // If YAML parsing resulted in a non-object (e.g. string, number) but didn't throw
      console.warn(
        "Content parsed as YAML but was not a valid object structure. Trying JSON.",
      );
      try {
        const parsedJson = JSON.parse(data);
        if (parsedJson && typeof parsedJson === "object") {
          console.log(
            "Successfully parsed as JSON on second attempt for non-object YAML.",
          );
          return parsedJson;
        }
      } catch (_jsonError) {
        console.warn(
          "Could not parse content as YAML or JSON after attempting both.",
        );
        return null;
      }
    }
  } catch (parseError) {
    console.error(
      `Error parsing specification: ${parseError instanceof Error ? parseError.message : String(parseError)}`,
    );
  }
  return null;
}

/**
 * Main orchestrator function. Fetches the OpenAPI spec from the provided URL (with fallbacks),
 * parses it, and saves it to the specified output paths in both YAML and JSON formats.
 */
async function fetchAndProcessSpec(): Promise<void> {
  let fetchedResult: { data: string; contentType: string | null } | null = null;
  const potentialUrls: string[] = [urlArg];

  if (
    !urlArg.endsWith(".yaml") &&
    !urlArg.endsWith(".yml") &&
    !urlArg.endsWith(".json")
  ) {
    const urlWithoutTrailingSlash = urlArg.endsWith("/")
      ? urlArg.slice(0, -1)
      : urlArg;
    potentialUrls.push(`${urlWithoutTrailingSlash}/openapi.yaml`);
    potentialUrls.push(`${urlWithoutTrailingSlash}/openapi.json`);
  }

  for (const url of potentialUrls) {
    fetchedResult = await tryFetch(url);
    if (fetchedResult) break;
  }

  if (!fetchedResult) {
    console.error(
      `Error: Failed to fetch specification from all attempted URLs: ${potentialUrls.join(", ")}. Aborting.`,
    );
    process.exit(1);
  }

  const openapiSpec = parseSpec(fetchedResult.data, fetchedResult.contentType);

  if (!openapiSpec || typeof openapiSpec !== "object") {
    console.error(
      "Error: Failed to parse specification content or content is not a valid object. Aborting.",
    );
    process.exit(1);
  }

  try {
    await fs.access(outputDirAbsolute);
  } catch (error: unknown) {
    const err = error as NodeJS.ErrnoException | undefined;
    if (err?.code === "ENOENT") {
      console.log(`Output directory not found. Creating: ${outputDirAbsolute}`);
      await fs.mkdir(outputDirAbsolute, { recursive: true });
    } else {
      console.error(
        `Error accessing output directory ${outputDirAbsolute}: ${err?.message ?? String(error)}. Aborting.`,
      );
      process.exit(1);
    }
  }

  try {
    console.log(`Saving YAML specification to: ${yamlOutputPath}`);
    await fs.writeFile(yamlOutputPath, yaml.dump(openapiSpec), "utf8");
    console.log(`Successfully saved YAML specification.`);
  } catch (error) {
    console.error(
      `Error saving YAML to ${yamlOutputPath}: ${error instanceof Error ? error.message : String(error)}. Aborting.`,
    );
    process.exit(1);
  }

  try {
    console.log(`Saving JSON specification to: ${jsonOutputPath}`);
    await fs.writeFile(
      jsonOutputPath,
      JSON.stringify(openapiSpec, null, 2),
      "utf8",
    );
    console.log(`Successfully saved JSON specification.`);
  } catch (error) {
    console.error(
      `Error saving JSON to ${jsonOutputPath}: ${error instanceof Error ? error.message : String(error)}. Aborting.`,
    );
    process.exit(1);
  }

  console.log("OpenAPI specification processed and saved successfully.");
}

fetchAndProcessSpec();

```

--------------------------------------------------------------------------------
/src/services/NCBI/core/ncbiResponseHandler.ts:
--------------------------------------------------------------------------------

```typescript
/**
 * @fileoverview Handles parsing of NCBI E-utility responses and NCBI-specific error extraction.
 * @module src/services/NCBI/core/ncbiResponseHandler
 */

import { AxiosResponse } from "axios";
import { XMLParser, XMLValidator } from "fast-xml-parser";
import { BaseErrorCode, McpError } from "../../../types-global/errors.js";
import {
  logger,
  RequestContext,
  requestContextService,
  sanitizeInputForLogging,
} from "../../../utils/index.js";
import { NcbiRequestOptions } from "./ncbiConstants.js";

export class NcbiResponseHandler {
  private xmlParser: XMLParser;

  constructor() {
    this.xmlParser = new XMLParser({
      ignoreAttributes: false,
      attributeNamePrefix: "@_",
      parseTagValue: true, // auto-convert numbers, booleans if possible
      isArray: (_name, jpath) => {
        // Common NCBI list tags - expand as needed
        const arrayTags = [
          "IdList.Id",
          "eSearchResult.IdList.Id",
          "PubmedArticleSet.PubmedArticle",
          "PubmedArticleSet.DeleteCitation.PMID",
          "AuthorList.Author",
          "MeshHeadingList.MeshHeading",
          "GrantList.Grant",
          "KeywordList.Keyword",
          "PublicationTypeList.PublicationType",
          "LinkSet.LinkSetDb.Link",
          "Link.Id",
          "DbInfo.FieldList.Field",
          "DbInfo.LinkList.Link",
          "DocSum.Item", // For ESummary v2.0 JSON-like XML
        ];
        return arrayTags.includes(jpath);
      },
    });
  }

  private extractNcbiErrorMessages(
    parsedXml: Record<string, unknown>,
  ): string[] {
    const messages: string[] = [];
    // Order matters for specificity if multiple error types could exist
    const errorPaths = [
      "eLinkResult.ERROR",
      "eSummaryResult.ERROR",
      "eSearchResult.ErrorList.PhraseNotFound",
      "eSearchResult.ErrorList.FieldNotFound",
      "PubmedArticleSet.ErrorList.CannotRetrievePMID", // More specific error
      "ERROR", // Generic top-level error
    ];

    for (const path of errorPaths) {
      let errorSource: unknown = parsedXml;
      const parts = path.split(".");
      for (const part of parts) {
        if (
          errorSource &&
          typeof errorSource === "object" &&
          part in errorSource
        ) {
          errorSource = (errorSource as Record<string, unknown>)[part];
        } else {
          errorSource = undefined;
          break;
        }
      }

      if (errorSource) {
        const items = Array.isArray(errorSource) ? errorSource : [errorSource];
        for (const item of items) {
          if (typeof item === "string") {
            messages.push(item);
          } else if (item && typeof item["#text"] === "string") {
            messages.push(item["#text"]);
          }
        }
      }
    }

    // Handle warnings if no primary errors found
    if (messages.length === 0) {
      const warningPaths = [
        "eSearchResult.WarningList.QuotedPhraseNotFound",
        "eSearchResult.WarningList.OutputMessage",
      ];
      for (const path of warningPaths) {
        let warningSource: unknown = parsedXml;
        const parts = path.split(".");
        for (const part of parts) {
          if (
            warningSource &&
            typeof warningSource === "object" &&
            part in warningSource
          ) {
            warningSource = (warningSource as Record<string, unknown>)[part];
          } else {
            warningSource = undefined;
            break;
          }
        }
        if (warningSource) {
          const items = Array.isArray(warningSource)
            ? warningSource
            : [warningSource];
          for (const item of items) {
            if (typeof item === "string") {
              messages.push(`Warning: ${item}`);
            } else if (item && typeof item["#text"] === "string") {
              messages.push(`Warning: ${item["#text"]}`);
            }
          }
        }
      }
    }
    return messages.length > 0
      ? messages
      : ["Unknown NCBI API error structure."];
  }

  /**
   * Parses the raw AxiosResponse data based on retmode and checks for NCBI-specific errors.
   * @param response The raw AxiosResponse from an NCBI E-utility call.
   * @param endpoint The E-utility endpoint for context.
   * @param context The request context for logging.
   * @param options The original request options, particularly `retmode`.
   * @returns The parsed data (object for XML/JSON, string for text).
   * @throws {McpError} If parsing fails or NCBI reports an error in the response body.
   */
  public parseAndHandleResponse<T>(
    response: AxiosResponse,
    endpoint: string,
    context: RequestContext,
    options: NcbiRequestOptions,
  ): T {
    const responseData = response.data;
    const operationContext = requestContextService.createRequestContext({
      ...context,
      operation: "NCBI_ParseResponse",
      endpoint,
      retmode: options.retmode,
    });

    if (options.retmode === "text") {
      logger.debug("Received text response from NCBI.", operationContext);
      return responseData as T;
    }

    if (options.retmode === "xml") {
      logger.debug(
        "Attempting to parse XML response from NCBI.",
        operationContext,
      );
      if (
        typeof responseData !== "string" ||
        XMLValidator.validate(responseData) !== true
      ) {
        logger.error(
          "Invalid or non-string XML response from NCBI",
          new Error("Invalid XML structure"),
          {
            ...operationContext,
            responseSnippet: String(responseData).substring(0, 500),
          },
        );
        throw new McpError(
          BaseErrorCode.NCBI_PARSING_ERROR,
          "Received invalid XML from NCBI.",
          { endpoint, responseSnippet: String(responseData).substring(0, 200) },
        );
      }

      // Always parse for error checking, even if returning raw XML
      const parsedXml = this.xmlParser.parse(responseData);

      // Check for error indicators within the parsed XML structure
      if (
        parsedXml.eSearchResult?.ErrorList ||
        parsedXml.eLinkResult?.ERROR ||
        parsedXml.eSummaryResult?.ERROR ||
        parsedXml.PubmedArticleSet?.ErrorList || // Check for ErrorList specifically
        parsedXml.ERROR // Generic top-level error
      ) {
        const errorMessages = this.extractNcbiErrorMessages(parsedXml);
        logger.error(
          "NCBI API returned an error in XML response",
          new Error(errorMessages.join("; ")),
          {
            ...operationContext,
            errors: errorMessages,
            parsedXml: sanitizeInputForLogging(parsedXml), // Log the parsed structure for error diagnosis
          },
        );
        throw new McpError(
          BaseErrorCode.NCBI_API_ERROR,
          `NCBI API Error: ${errorMessages.join("; ")}`,
          { endpoint, ncbiErrors: errorMessages },
        );
      }

      // If raw XML is requested and no errors were found, return the original string
      if (options.returnRawXml) {
        logger.debug(
          "Successfully validated XML response. Returning raw XML string as requested.",
          operationContext,
        );
        return responseData as T; // responseData is the raw XML string
      }

      logger.debug(
        "Successfully parsed XML response. Returning parsed object.",
        operationContext,
      );
      return parsedXml as T; // Return the parsed object by default
    }

    if (options.retmode === "json") {
      logger.debug("Handling JSON response from NCBI.", operationContext);
      // Assuming responseData is already parsed by Axios if Content-Type was application/json
      if (
        typeof responseData === "object" &&
        responseData !== null &&
        responseData.error
      ) {
        const errorMessage = String(responseData.error);
        logger.error(
          "NCBI API returned an error in JSON response",
          new Error(errorMessage),
          {
            ...operationContext,
            error: errorMessage,
            responseData: sanitizeInputForLogging(responseData),
          },
        );
        throw new McpError(
          BaseErrorCode.NCBI_API_ERROR,
          `NCBI API Error: ${errorMessage}`,
          { endpoint, ncbiError: errorMessage },
        );
      }
      logger.debug("Successfully processed JSON response.", operationContext);
      return responseData as T;
    }

    // Fallback for unknown retmode or if retmode is undefined
    logger.warning(
      `Response received with unspecified or unhandled retmode: ${options.retmode}. Returning raw data.`,
      operationContext,
    );
    return responseData as T;
  }
}

```

--------------------------------------------------------------------------------
/src/index.ts:
--------------------------------------------------------------------------------

```typescript
#!/usr/bin/env node

/**
 * @fileoverview Main entry point for the MCP TypeScript Template application.
 * This script initializes the configuration, sets up the logger, starts the
 * MCP server (either via STDIO or HTTP transport), and handles graceful
 * shutdown on process signals or unhandled errors.
 *
 * The script uses an Immediately Invoked Function Expression (IIFE) with async/await
 * to manage the asynchronous nature of server startup and shutdown.
 *
 * Key operations:
 * 1. Import necessary modules and utilities.
 * 2. Define a `shutdown` function for graceful server termination.
 * 3. Define a `start` function to:
 *    - Initialize the logger with the configured log level.
 *    - Create a startup request context for logging and correlation.
 *    - Initialize and start the MCP server transport (stdio or http).
 *    - Set up global error handlers (uncaughtException, unhandledRejection)
 *      and signal handlers (SIGTERM, SIGINT) to trigger graceful shutdown.
 * 4. Execute the `start` function within an async IIFE.
 *
 * @module src/index
 */

import { McpServer } from "@modelcontextprotocol/sdk/server/mcp.js";
import http from "http"; // Import http module
import { config, environment } from "./config/index.js";
import { initializeAndStartServer } from "./mcp-server/server.js";
import { requestContextService } from "./utils/index.js";
import { logger, McpLogLevel } from "./utils/internal/logger.js";

/**
 * Holds the main MCP server instance, primarily for STDIO transport.
 * @private
 */
let mcpStdioServer: McpServer | undefined;

/**
 * Holds the Node.js HTTP server instance if HTTP transport is used.
 * @private
 */
let actualHttpServer: http.Server | undefined;

/**
 * Gracefully shuts down the main MCP server and associated resources.
 * Called on process termination signals or critical unhandled errors.
 *
 * @param signal - The signal or event name that triggered the shutdown.
 * @returns A promise that resolves when shutdown is complete or an error occurs.
 * @private
 */
const shutdown = async (signal: string): Promise<void> => {
  const shutdownContext = requestContextService.createRequestContext({
    operation: "ServerShutdown",
    triggerEvent: signal,
  });

  logger.info(
    `Received ${signal}. Initiating graceful shutdown...`,
    shutdownContext,
  );

  try {
    let closePromise: Promise<void> = Promise.resolve();
    const transportType = config.mcpTransportType;

    if (transportType === "stdio" && mcpStdioServer) {
      logger.info(
        "Attempting to close main MCP server (STDIO)...",
        shutdownContext,
      );
      closePromise = mcpStdioServer.close();
    } else if (transportType === "http" && actualHttpServer) {
      logger.info("Attempting to close HTTP server...", shutdownContext);
      closePromise = new Promise((resolve, reject) => {
        actualHttpServer!.close((err) => {
          if (err) {
            logger.error("Error closing HTTP server.", {
              ...shutdownContext,
              error: err.message,
            });
            return reject(err);
          }
          logger.info("HTTP server closed successfully.", shutdownContext);
          resolve();
        });
      });
    }

    await closePromise;
    logger.info(
      "Graceful shutdown completed successfully. Exiting.",
      shutdownContext,
    );
    process.exit(0);
  } catch (error) {
    logger.error("Critical error during shutdown process.", {
      ...shutdownContext,
      errorMessage: error instanceof Error ? error.message : String(error),
      errorStack: error instanceof Error ? error.stack : undefined,
    });
    process.exit(1);
  }
};

/**
 * Initializes and starts the main MCP server application.
 * Orchestrates logger setup, server initialization, transport startup,
 * and global error/signal handling.
 *
 * @returns A promise that resolves when the server has started and handlers are registered,
 *   or rejects if a critical startup error occurs.
 * @private
 */
const start = async (): Promise<void> => {
  const validMcpLogLevels: McpLogLevel[] = [
    "debug",
    "info",
    "notice",
    "warning",
    "error",
    "crit",
    "alert",
    "emerg",
  ];
  const initialLogLevelConfig = config.logLevel;

  let validatedMcpLogLevel: McpLogLevel = "info";
  if (validMcpLogLevels.includes(initialLogLevelConfig as McpLogLevel)) {
    validatedMcpLogLevel = initialLogLevelConfig as McpLogLevel;
  } else {
    if (process.stdout.isTTY) {
      console.warn(
        `[Startup Warning] Invalid MCP_LOG_LEVEL "${initialLogLevelConfig}" found in configuration. ` +
          `Defaulting to log level "info". Valid levels are: ${validMcpLogLevels.join(", ")}.`,
      );
    }
  }
  await logger.initialize(validatedMcpLogLevel);
  logger.info(
    `Logger has been initialized by start(). Effective MCP logging level set to: ${validatedMcpLogLevel}.`,
  );

  const transportType = config.mcpTransportType;
  const startupContext = requestContextService.createRequestContext({
    operation: `ServerStartupSequence_${transportType}`,
    applicationName: config.mcpServerName,
    applicationVersion: config.mcpServerVersion,
    nodeEnvironment: environment,
  });

  logger.debug("Application configuration loaded successfully.", {
    ...startupContext,
    configSummary: {
      serverName: config.mcpServerName,
      serverVersion: config.mcpServerVersion,
      transport: config.mcpTransportType,
      logLevel: config.logLevel,
      env: config.environment,
      httpPort:
        config.mcpTransportType === "http" ? config.mcpHttpPort : undefined,
      httpHost:
        config.mcpTransportType === "http" ? config.mcpHttpHost : undefined,
    },
  });

  logger.info(
    `Starting ${config.mcpServerName} (Version: ${config.mcpServerVersion}, Transport: ${transportType}, Env: ${environment})...`,
    startupContext,
  );

  try {
    logger.debug(
      "Calling initializeAndStartServer to set up MCP transport...",
      startupContext,
    );

    const serverInstance = await initializeAndStartServer();

    if (transportType === "stdio" && serverInstance instanceof McpServer) {
      mcpStdioServer = serverInstance;
      logger.info(
        "STDIO McpServer instance stored globally for shutdown.",
        startupContext,
      );
    } else if (
      transportType === "http" &&
      serverInstance instanceof http.Server
    ) {
      actualHttpServer = serverInstance;
      logger.info(
        "HTTP transport initialized, http.Server instance stored globally for shutdown.",
        startupContext,
      );
    } else if (transportType === "http") {
      // This case should ideally not be reached if initializeAndStartServer correctly returns http.Server
      logger.warning(
        "HTTP transport initialized, but no http.Server instance was returned to index.ts. Shutdown might be incomplete.",
        startupContext,
      );
    }

    logger.info(
      `${config.mcpServerName} is now running and ready to accept connections via ${transportType} transport.`,
      {
        ...startupContext,
        serverStartTime: new Date().toISOString(),
      },
    );

    process.on("SIGTERM", () => shutdown("SIGTERM"));
    process.on("SIGINT", () => shutdown("SIGINT"));

    process.on("uncaughtException", async (error: Error) => {
      const errorContext = {
        ...startupContext,
        triggerEvent: "uncaughtException",
        errorMessage: error.message,
        errorStack: error.stack,
      };
      logger.error(
        "FATAL: Uncaught exception detected. This indicates a bug or unexpected state. Initiating shutdown...",
        errorContext,
      );
      await shutdown("uncaughtException");
    });

    process.on("unhandledRejection", async (reason: unknown) => {
      const rejectionContext = {
        ...startupContext,
        triggerEvent: "unhandledRejection",
        rejectionReason:
          reason instanceof Error ? reason.message : String(reason),
        rejectionStack: reason instanceof Error ? reason.stack : undefined,
      };
      logger.error(
        "FATAL: Unhandled promise rejection detected. This indicates a bug or missing error handling in async code. Initiating shutdown...",
        rejectionContext,
      );
      await shutdown("unhandledRejection");
    });
  } catch (error) {
    logger.error(
      "CRITICAL ERROR DURING STARTUP: The application could not start. Exiting.",
      {
        ...startupContext,
        finalErrorContext: "ApplicationStartupFailure",
        errorMessage: error instanceof Error ? error.message : String(error),
        errorStack: error instanceof Error ? error.stack : undefined,
      },
    );
    process.exit(1);
  }
};

// Async IIFE to allow top-level await for the start function.
(async () => {
  try {
    await start();
  } catch (error) {
    // This catch is a final fallback. `start()` should handle its errors and exit.
    if (process.stdout.isTTY) {
      console.error(
        "[GLOBAL CATCH] An unexpected error occurred outside of the main start function's error handling:",
        error,
      );
    }
    process.exit(1);
  }
})();

```

--------------------------------------------------------------------------------
/src/services/NCBI/parsing/pubmedArticleStructureParser.ts:
--------------------------------------------------------------------------------

```typescript
/**
 * @fileoverview Helper functions for parsing detailed PubMed Article XML structures,
 * typically from EFetch results.
 * @module src/services/NCBI/parsing/pubmedArticleStructureParser
 */

import {
  XmlAbstractText,
  XmlArticle,
  XmlArticleDate,
  XmlAuthor,
  XmlAuthorList,
  XmlGrant,
  XmlGrantList,
  XmlJournal,
  XmlKeyword,
  XmlKeywordList,
  XmlMedlineCitation,
  XmlMeshHeading,
  XmlMeshHeadingList,
  XmlPublicationType,
  XmlPublicationTypeList,
  ParsedArticleAuthor,
  ParsedJournalInfo,
  ParsedMeshTerm,
  ParsedGrant,
  ParsedArticleDate,
} from "../../../types-global/pubmedXml.js";
import { ensureArray, getText, getAttribute } from "./xmlGenericHelpers.js";

/**
 * Extracts and formats author information from XML.
 * @param authorListXml - The XML AuthorList element.
 * @returns An array of formatted author objects.
 */
export function extractAuthors(
  authorListXml?: XmlAuthorList,
): ParsedArticleAuthor[] {
  if (!authorListXml) return [];
  const authors = ensureArray(authorListXml.Author);
  return authors.map((auth: XmlAuthor) => {
    const collectiveName = getText(auth.CollectiveName);
    if (collectiveName) {
      return { collectiveName };
    }

    let affiliation = "";
    const affiliations = ensureArray(auth.AffiliationInfo);
    if (affiliations.length > 0) {
      affiliation = getText(affiliations[0]?.Affiliation);
    }
    return {
      lastName: getText(auth.LastName),
      firstName: getText(auth.ForeName), // XML uses ForeName
      initials: getText(auth.Initials),
      affiliation: affiliation || undefined, // Ensure undefined if empty
    };
  });
}

/**
 * Extracts and formats journal information from XML.
 * @param journalXml - The XML Journal element from an Article.
 * @param medlineCitationXml - The XML MedlineCitation element (for MedlinePgn).
 * @returns Formatted journal information.
 */
export function extractJournalInfo(
  journalXml?: XmlJournal,
  medlineCitationXml?: XmlMedlineCitation,
): ParsedJournalInfo | undefined {
  if (!journalXml) return undefined;

  const pubDate = journalXml.JournalIssue?.PubDate;
  const year = getText(
    pubDate?.Year,
    getText(pubDate?.MedlineDate, "").match(/\d{4}/)?.[0],
  );

  return {
    title: getText(journalXml.Title),
    isoAbbreviation: getText(journalXml.ISOAbbreviation),
    volume: getText(journalXml.JournalIssue?.Volume),
    issue: getText(journalXml.JournalIssue?.Issue),
    pages:
      getText(medlineCitationXml?.MedlinePgn) ||
      getText(medlineCitationXml?.Article?.Pagination?.MedlinePgn),
    publicationDate: {
      year: year || undefined,
      month: getText(pubDate?.Month) || undefined,
      day: getText(pubDate?.Day) || undefined,
      medlineDate: getText(pubDate?.MedlineDate) || undefined,
    },
  };
}

/**
 * Extracts and formats MeSH terms from XML.
 * @param meshHeadingListXml - The XML MeshHeadingList element.
 * @returns An array of formatted MeSH term objects.
 */
export function extractMeshTerms(
  meshHeadingListXml?: XmlMeshHeadingList,
): ParsedMeshTerm[] {
  if (!meshHeadingListXml) return [];
  const meshHeadings = ensureArray(meshHeadingListXml.MeshHeading);
  return meshHeadings.map((mh: XmlMeshHeading) => {
    const qualifiers = ensureArray(mh.QualifierName);
    const firstQualifier = qualifiers[0];

    // Check MajorTopicYN at DescriptorName, QualifierName, and the root MeshHeading element
    const isMajorDescriptor =
      getAttribute(mh.DescriptorName, "MajorTopicYN") === "Y";
    const isMajorQualifier = firstQualifier
      ? getAttribute(firstQualifier, "MajorTopicYN") === "Y"
      : false;
    // Some schemas might place MajorTopicYN directly on MeshHeading if no qualifiers
    const isMajorRoot = getAttribute(mh, "MajorTopicYN") === "Y";

    return {
      descriptorName: getText(mh.DescriptorName),
      descriptorUi: getAttribute(mh.DescriptorName, "UI"),
      qualifierName: firstQualifier ? getText(firstQualifier) : undefined,
      qualifierUi: firstQualifier
        ? getAttribute(firstQualifier, "UI")
        : undefined,
      isMajorTopic: isMajorRoot || isMajorDescriptor || isMajorQualifier,
    };
  });
}

/**
 * Extracts and formats grant information from XML.
 * @param grantListXml - The XML GrantList element.
 * @returns An array of formatted grant objects.
 */
export function extractGrants(grantListXml?: XmlGrantList): ParsedGrant[] {
  if (!grantListXml) return [];
  const grants = ensureArray(grantListXml.Grant);
  return grants.map((g: XmlGrant) => ({
    grantId: getText(g.GrantID) || undefined,
    agency: getText(g.Agency) || undefined,
    country: getText(g.Country) || undefined,
  }));
}

/**
 * Extracts DOI from various possible locations in the XML.
 * Prioritizes ELocationID with ValidYN="Y", then any ELocationID, then ArticleIdList.
 * @param articleXml - The XML Article element.
 * @returns The DOI string or undefined.
 */
export function extractDoi(articleXml?: XmlArticle): string | undefined {
  if (!articleXml) return undefined;

  // Check ELocationID first
  const eLocationIDs = ensureArray(articleXml.ELocationID);
  // Prioritize valid DOI
  for (const eloc of eLocationIDs) {
    if (
      getAttribute(eloc, "EIdType") === "doi" &&
      getAttribute(eloc, "ValidYN") === "Y"
    ) {
      const doi = getText(eloc);
      if (doi) return doi;
    }
  }
  // Fallback to any DOI in ELocationID
  for (const eloc of eLocationIDs) {
    if (getAttribute(eloc, "EIdType") === "doi") {
      const doi = getText(eloc);
      if (doi) return doi;
    }
  }

  // Check ArticleIdList as a secondary source
  const articleIds = ensureArray(articleXml.ArticleIdList?.ArticleId);
  for (const aid of articleIds) {
    if (getAttribute(aid, "IdType") === "doi") {
      const doi = getText(aid);
      if (doi) return doi;
    }
  }
  return undefined;
}

/**
 * Extracts publication types from XML.
 * @param publicationTypeListXml - The XML PublicationTypeList element.
 * @returns An array of publication type strings.
 */
export function extractPublicationTypes(
  publicationTypeListXml?: XmlPublicationTypeList,
): string[] {
  if (!publicationTypeListXml) return [];
  const pubTypes = ensureArray(publicationTypeListXml.PublicationType);
  return pubTypes.map((pt: XmlPublicationType) => getText(pt)).filter(Boolean);
}

/**
 * Extracts keywords from XML. Handles single or multiple KeywordList elements.
 * @param keywordListsXml - The XML KeywordList element or an array of them.
 * @returns An array of keyword strings.
 */
export function extractKeywords(
  keywordListsXml?: XmlKeywordList[] | XmlKeywordList,
): string[] {
  if (!keywordListsXml) return [];
  const lists = ensureArray(keywordListsXml);
  const allKeywords: string[] = [];
  for (const list of lists) {
    const keywords = ensureArray(list.Keyword);
    keywords.forEach((kw: XmlKeyword) => {
      const keywordText = getText(kw);
      if (keywordText) {
        allKeywords.push(keywordText);
      }
    });
  }
  return allKeywords;
}

/**
 * Extracts abstract text from XML. Handles structured abstracts by concatenating sections.
 * If AbstractText is an array, joins them. If it's a single object/string, uses it directly.
 * Prefixes with Label if present.
 * @param abstractXml - The XML Abstract element from an Article.
 * @returns The abstract text string, or undefined if not found or empty.
 */
export function extractAbstractText(
  abstractXml?: XmlArticle["Abstract"],
): string | undefined {
  if (!abstractXml || !abstractXml.AbstractText) return undefined;

  const abstractTexts = ensureArray(abstractXml.AbstractText);
  if (abstractTexts.length === 0) return undefined;

  const processedTexts = abstractTexts
    .map((at: XmlAbstractText | string) => {
      // AbstractText can be string directly or object
      if (typeof at === "string") {
        return at;
      }
      // If it's an object, it should have #text or Label
      const sectionText = getText(at); // Handles at["#text"]
      const label = getAttribute(at, "Label");
      if (label && sectionText) {
        return `${label.trim()}: ${sectionText.trim()}`;
      }
      return sectionText.trim();
    })
    .filter(Boolean); // Remove any empty strings resulting from empty sections

  if (processedTexts.length === 0) return undefined;

  return processedTexts.join("\n\n").trim() || undefined; // Join sections with double newline
}

/**
 * Extracts PMID from MedlineCitation.
 * @param medlineCitationXml - The XML MedlineCitation element.
 * @returns The PMID string or undefined.
 */
export function extractPmid(
  medlineCitationXml?: XmlMedlineCitation,
): string | undefined {
  if (!medlineCitationXml || !medlineCitationXml.PMID) return undefined;
  return getText(medlineCitationXml.PMID);
}

/**
 * Extracts article dates from XML.
 * @param articleXml - The XML Article element.
 * @returns An array of parsed article dates.
 */
export function extractArticleDates(
  articleXml?: XmlArticle,
): ParsedArticleDate[] {
  if (!articleXml || !articleXml.ArticleDate) return [];
  const articleDatesXml = ensureArray(articleXml.ArticleDate);
  return articleDatesXml.map((ad: XmlArticleDate) => ({
    dateType: getAttribute(ad, "DateType"),
    year: getText(ad.Year),
    month: getText(ad.Month),
    day: getText(ad.Day),
  }));
}

```

--------------------------------------------------------------------------------
/src/mcp-server/tools/pubmedArticleConnections/logic/elinkHandler.ts:
--------------------------------------------------------------------------------

```typescript
/**
 * @fileoverview Handles ELink requests and enriches results with ESummary data
 * for the pubmedArticleConnections tool.
 * @module src/mcp-server/tools/pubmedArticleConnections/logic/elinkHandler
 */

import { getNcbiService } from "../../../../services/NCBI/core/ncbiService.js";
import type {
  ESummaryResult,
  ParsedBriefSummary,
} from "../../../../types-global/pubmedXml.js";
import { logger, RequestContext } from "../../../../utils/index.js";
import { extractBriefSummaries } from "../../../../services/NCBI/parsing/index.js";
import { ensureArray } from "../../../../services/NCBI/parsing/xmlGenericHelpers.js"; // Added import
import type { PubMedArticleConnectionsInput } from "./index.js";
import type { ToolOutputData } from "./types.js";

// Local interface for the structure of an ELink 'Link' item
interface XmlELinkItem {
  Id: string | number | { "#text"?: string | number }; // Allow number for Id
  Score?: string | number | { "#text"?: string | number }; // Allow number for Score
}

interface ELinkResult {
  eLinkResult?: {
    LinkSet?: {
      LinkSetDb?: {
        LinkName?: string;
        Link?: XmlELinkItem[];
      }[];
      LinkSetDbHistory?: {
        QueryKey?: string;
      }[];
      WebEnv?: string;
    };
    ERROR?: string;
  }[];
}

export async function handleELinkRelationships(
  input: PubMedArticleConnectionsInput,
  outputData: ToolOutputData,
  context: RequestContext,
): Promise<void> {
  const eLinkParams: Record<string, string> = {
    dbfrom: "pubmed",
    db: "pubmed",
    id: input.sourcePmid,
    retmode: "xml",
    // cmd and linkname will be set below based on relationshipType
  };

  switch (input.relationshipType) {
    case "pubmed_citedin":
      eLinkParams.cmd = "neighbor_history";
      eLinkParams.linkname = "pubmed_pubmed_citedin";
      break;
    case "pubmed_references":
      eLinkParams.cmd = "neighbor_history";
      eLinkParams.linkname = "pubmed_pubmed_refs";
      break;
    case "pubmed_similar_articles":
    default: // Default to similar articles
      eLinkParams.cmd = "neighbor_score";
      // No linkname is explicitly needed for neighbor_score when dbfrom and db are pubmed
      break;
  }

  const tempUrl = new URL(
    "https://dummy.ncbi.nlm.nih.gov/entrez/eutils/elink.fcgi",
  );
  Object.keys(eLinkParams).forEach((key) =>
    tempUrl.searchParams.append(key, String(eLinkParams[key])),
  );
  outputData.eUtilityUrl = `https://eutils.ncbi.nlm.nih.gov/entrez/eutils/elink.fcgi?${tempUrl.search.substring(1)}`;

  const ncbiService = getNcbiService();
  const eLinkResult: ELinkResult = (await ncbiService.eLink(
    eLinkParams,
    context,
  )) as ELinkResult;

  // Log the full eLinkResult for debugging
  logger.debug("Raw eLinkResult from ncbiService:", {
    ...context,
    eLinkResultString: JSON.stringify(eLinkResult, null, 2),
  });

  // Use ensureArray for robust handling of potentially single or array eLinkResult
  const eLinkResultsArray = ensureArray(eLinkResult?.eLinkResult);
  const firstELinkResult = eLinkResultsArray[0];

  // Use ensureArray for LinkSet as well
  const linkSetsArray = ensureArray(firstELinkResult?.LinkSet);
  const linkSet = linkSetsArray[0];

  let foundPmids: { pmid: string; score?: number }[] = [];

  if (firstELinkResult?.ERROR) {
    const errorMsg =
      typeof firstELinkResult.ERROR === "string"
        ? firstELinkResult.ERROR
        : JSON.stringify(firstELinkResult.ERROR);
    logger.warning(`ELink returned an error: ${errorMsg}`, context);
    outputData.message = `ELink error: ${errorMsg}`;
    outputData.retrievedCount = 0;
    return;
  }

  if (linkSet?.LinkSetDbHistory) {
    // Handle cmd=neighbor_history response (citedin, references)
    const history = Array.isArray(linkSet.LinkSetDbHistory)
      ? linkSet.LinkSetDbHistory[0]
      : linkSet.LinkSetDbHistory;

    if (history?.QueryKey && firstELinkResult?.LinkSet?.WebEnv) {
      const eSearchParams = {
        db: "pubmed",
        query_key: history.QueryKey,
        WebEnv: firstELinkResult.LinkSet.WebEnv,
        retmode: "xml",
        retmax: input.maxRelatedResults * 2, // Fetch a bit more to allow filtering sourcePmid
      };
      const eSearchResult: { eSearchResult?: { IdList?: { Id?: unknown } } } =
        (await ncbiService.eSearch(eSearchParams, context)) as {
          eSearchResult?: { IdList?: { Id?: unknown } };
        };
      if (eSearchResult?.eSearchResult?.IdList?.Id) {
        const ids = ensureArray(eSearchResult.eSearchResult.IdList.Id);
        foundPmids = ids
          .map((idNode: string | number | { "#text"?: string | number }) => {
            // Allow number for idNode
            let pmidVal: string | number | undefined;
            if (typeof idNode === "object" && idNode !== null) {
              pmidVal = idNode["#text"];
            } else {
              pmidVal = idNode;
            }
            return {
              pmid: pmidVal !== undefined ? String(pmidVal) : "",
              // No scores from this ESearch path
            };
          })
          .filter(
            (item: { pmid: string }) =>
              item.pmid && item.pmid !== input.sourcePmid && item.pmid !== "0",
          );
      }
    }
  } else if (linkSet?.LinkSetDb) {
    // Handle cmd=neighbor_score response (similar_articles)
    const linkSetDbArray = Array.isArray(linkSet.LinkSetDb)
      ? linkSet.LinkSetDb
      : [linkSet.LinkSetDb];

    const targetLinkSetDbEntry = linkSetDbArray.find(
      (db) => db.LinkName === "pubmed_pubmed",
    );

    if (targetLinkSetDbEntry?.Link) {
      const links = ensureArray(targetLinkSetDbEntry.Link); // Use ensureArray here too
      foundPmids = links
        .map((link: XmlELinkItem) => {
          let pmidValue: string | number | undefined;
          if (typeof link.Id === "object" && link.Id !== null) {
            pmidValue = link.Id["#text"];
          } else if (link.Id !== undefined) {
            pmidValue = link.Id;
          }

          let scoreValue: string | number | undefined;
          if (typeof link.Score === "object" && link.Score !== null) {
            scoreValue = link.Score["#text"];
          } else if (link.Score !== undefined) {
            scoreValue = link.Score;
          }

          const pmidString = pmidValue !== undefined ? String(pmidValue) : "";

          return {
            pmid: pmidString,
            score: scoreValue !== undefined ? Number(scoreValue) : undefined,
          };
        })
        .filter(
          (item: { pmid: string; score?: number }) =>
            item.pmid && item.pmid !== input.sourcePmid && item.pmid !== "0",
        );
    }
  }

  if (foundPmids.length === 0) {
    logger.warning(
      "No related PMIDs found after ELink/ESearch processing.",
      context,
    );
    outputData.message = "No related articles found or ELink error."; // Generic message if no PMIDs
    outputData.retrievedCount = 0;
    return;
  }

  logger.debug(
    "Found PMIDs after initial parsing and filtering (before sort):",
    {
      ...context,
      foundPmidsCount: foundPmids.length,
      firstFewFoundPmids: foundPmids.slice(0, 3),
    },
  );

  if (foundPmids.every((p) => p.score !== undefined)) {
    foundPmids.sort((a, b) => (b.score ?? 0) - (a.score ?? 0));
  }

  logger.debug("Found PMIDs after sorting:", {
    ...context,
    sortedFoundPmidsCount: foundPmids.length,
    firstFewSortedFoundPmids: foundPmids.slice(0, 3),
  });

  const pmidsToEnrich = foundPmids
    .slice(0, input.maxRelatedResults)
    .map((p) => p.pmid);

  logger.debug("PMIDs to enrich with ESummary:", {
    ...context,
    pmidsToEnrichCount: pmidsToEnrich.length,
    pmidsToEnrichList: pmidsToEnrich,
  });

  if (pmidsToEnrich.length > 0) {
    try {
      const summaryParams = {
        db: "pubmed",
        id: pmidsToEnrich.join(","),
        version: "2.0",
        retmode: "xml",
      };
      const summaryResultContainer: {
        eSummaryResult?: ESummaryResult;
        result?: ESummaryResult;
      } = (await ncbiService.eSummary(summaryParams, context)) as {
        eSummaryResult?: ESummaryResult;
        result?: ESummaryResult;
      };
      const summaryResult: ESummaryResult | undefined =
        summaryResultContainer?.eSummaryResult ||
        summaryResultContainer?.result ||
        summaryResultContainer;

      if (summaryResult) {
        const briefSummaries: ParsedBriefSummary[] =
          await extractBriefSummaries(summaryResult, context);
        const pmidDetailsMap = new Map<string, ParsedBriefSummary>();
        briefSummaries.forEach((bs) => pmidDetailsMap.set(bs.pmid, bs));

        outputData.relatedArticles = foundPmids
          .filter((p) => pmidsToEnrich.includes(p.pmid))
          .map((p) => {
            const details = pmidDetailsMap.get(p.pmid);
            return {
              pmid: p.pmid,
              title: details?.title,
              authors: details?.authors,
              score: p.score,
              linkUrl: `https://pubmed.ncbi.nlm.nih.gov/${p.pmid}/`,
            };
          })
          .slice(0, input.maxRelatedResults);
      } else {
        logger.warning(
          "ESummary did not return usable data for enrichment.",
          context,
        );
        outputData.relatedArticles = foundPmids
          .slice(0, input.maxRelatedResults)
          .map((p) => ({
            pmid: p.pmid,
            score: p.score,
            linkUrl: `https://pubmed.ncbi.nlm.nih.gov/${p.pmid}/`,
          }));
      }
    } catch (summaryError: unknown) {
      logger.error(
        "Failed to enrich related articles with summaries",
        summaryError instanceof Error
          ? summaryError
          : new Error(String(summaryError)),
        context,
      );
      outputData.relatedArticles = foundPmids
        .slice(0, input.maxRelatedResults)
        .map((p) => ({
          pmid: p.pmid,
          score: p.score,
          linkUrl: `https://pubmed.ncbi.nlm.nih.gov/${p.pmid}/`,
        }));
    }
  }
  outputData.retrievedCount = outputData.relatedArticles.length;
}

```

--------------------------------------------------------------------------------
/scripts/tree.ts:
--------------------------------------------------------------------------------

```typescript
#!/usr/bin/env node

/**
 * @fileoverview Generates a visual tree representation of the project's directory structure.
 * @module scripts/tree
 *   Respects .gitignore patterns and common exclusions (e.g., node_modules).
 *   Saves the tree to a markdown file (default: docs/tree.md).
 *   Supports custom output path and depth limitation.
 *   Ensures all file operations are within the project root for security.
 *
 * @example
 * // Generate tree with default settings:
 * // npm run tree
 *
 * @example
 * // Specify custom output path and depth:
 * // ts-node --esm scripts/tree.ts ./documentation/structure.md --depth=3
 */

import fs from "fs/promises";
import path from "path";
import type { Dirent } from "fs";

const projectRoot = process.cwd();
let outputPathArg = "docs/tree.md"; // Default output path
let maxDepthArg = Infinity;

/**
 * Represents a processed .gitignore pattern.
 * @property pattern - The original glob pattern (without negation prefix).
 * @property negated - True if the original pattern was negated (e.g., !pattern).
 * @property regex - A string representation of the regex derived from the glob pattern.
 */
interface GitignorePattern {
  pattern: string;
  negated: boolean;
  regex: string;
}

const args = process.argv.slice(2);
if (args.includes("--help")) {
  console.log(`
Generate Tree - Project directory structure visualization tool

Usage:
  ts-node --esm scripts/tree.ts [output-path] [--depth=<number>] [--help]

Options:
  output-path      Custom file path for the tree output (relative to project root, default: docs/tree.md)
  --depth=<number> Maximum directory depth to display (default: unlimited)
  --help           Show this help message
`);
  process.exit(0);
}

args.forEach((arg) => {
  if (arg.startsWith("--depth=")) {
    const depthValue = parseInt(arg.split("=")[1], 10);
    if (!isNaN(depthValue) && depthValue >= 0) {
      maxDepthArg = depthValue;
    } else {
      console.warn(`Invalid depth value: "${arg}". Using unlimited depth.`);
    }
  } else if (!arg.startsWith("--")) {
    outputPathArg = arg;
  }
});

const DEFAULT_IGNORE_PATTERNS: string[] = [
  ".git",
  "node_modules",
  ".DS_Store",
  "dist",
  "build",
  "logs", // Added logs as a common default ignore
];

/**
 * Loads and parses patterns from the .gitignore file at the project root.
 * @returns A promise resolving to an array of GitignorePattern objects.
 */
async function loadGitignorePatterns(): Promise<GitignorePattern[]> {
  const gitignorePath = path.join(projectRoot, ".gitignore");
  try {
    // Security: Ensure we read only from within the project root
    if (!path.resolve(gitignorePath).startsWith(projectRoot + path.sep)) {
      console.warn(
        "Warning: Attempted to read .gitignore outside project root. Using default ignore patterns only.",
      );
      return [];
    }
    const gitignoreContent = await fs.readFile(gitignorePath, "utf-8");
    return gitignoreContent
      .split("\n")
      .map((line) => line.trim())
      .filter((line) => line && !line.startsWith("#"))
      .map((patternLine) => {
        const negated = patternLine.startsWith("!");
        const pattern = negated ? patternLine.slice(1) : patternLine;
        // Simplified glob to regex conversion. For full gitignore spec, a library might be better.
        // This handles basic wildcards '*' and directory indicators '/'.
        const regexString = pattern
          .replace(/[.+?^${}()|[\]\\]/g, "\\$&") // Escape standard regex special chars
          .replace(/\*\*/g, ".*") // Handle '**' as 'match anything including slashes'
          .replace(/\*/g, "[^/]*") // Handle '*' as 'match anything except slashes'
          .replace(/\/$/, "(/.*)?"); // Handle trailing slash for directories
        return {
          pattern: pattern,
          negated: negated,
          regex: regexString,
        };
      });
  } catch (error: unknown) {
    const err = error as NodeJS.ErrnoException | undefined;
    if (err?.code === "ENOENT") {
      console.warn(
        "Info: No .gitignore file found at project root. Using default ignore patterns only.",
      );
    } else {
      console.error(
        `Error reading .gitignore: ${err?.message ?? String(error)}`,
      );
    }
    return [];
  }
}

/**
 * Checks if a given path should be ignored based on default and .gitignore patterns.
 * @param entryPath - The absolute path to the file or directory entry.
 * @param ignorePatterns - An array of GitignorePattern objects.
 * @returns True if the path should be ignored, false otherwise.
 */
function isIgnored(
  entryPath: string,
  ignorePatterns: GitignorePattern[],
): boolean {
  const relativePath = path.relative(projectRoot, entryPath);
  const baseName = path.basename(relativePath); // Get the file/directory name

  // Check default patterns:
  // - If the baseName itself is in DEFAULT_IGNORE_PATTERNS (e.g., ".DS_Store")
  // - Or if the relativePath starts with a default pattern that is a directory (e.g., "node_modules/")
  //   followed by a path separator, or if the relativePath exactly matches the pattern.
  if (
    DEFAULT_IGNORE_PATTERNS.some((p) => {
      if (p === baseName) return true; // Matches ".DS_Store" as a filename anywhere
      // For directory-like patterns in DEFAULT_IGNORE_PATTERNS (e.g. "node_modules", ".git")
      if (relativePath.startsWith(p + path.sep) || relativePath === p)
        return true;
      return false;
    })
  ) {
    return true;
  }

  let ignoredByGitignore = false;
  for (const { negated, regex } of ignorePatterns) {
    // Test regex against the start of the relative path for directories, or full match for files.
    const regexPattern = new RegExp(`^${regex}(/|$)`);
    if (regexPattern.test(relativePath)) {
      ignoredByGitignore = !negated; // If negated, a match means it's NOT ignored by this rule.
    }
  }
  return ignoredByGitignore;
}

/**
 * Recursively generates a string representation of the directory tree.
 * @param dir - The absolute path of the directory to traverse.
 * @param ignorePatterns - Patterns to ignore.
 * @param prefix - String prefix for formatting the tree lines.
 * @param currentDepth - Current depth of traversal.
 * @returns A promise resolving to the tree string.
 */
async function generateTree(
  dir: string,
  ignorePatterns: GitignorePattern[],
  prefix = "",
  currentDepth = 0,
): Promise<string> {
  const resolvedDir = path.resolve(dir);
  if (
    !resolvedDir.startsWith(projectRoot + path.sep) &&
    resolvedDir !== projectRoot
  ) {
    console.warn(
      `Security: Skipping directory outside project root: ${resolvedDir}`,
    );
    return "";
  }

  if (currentDepth > maxDepthArg) {
    return "";
  }

  let entries: Dirent[];
  try {
    entries = (await fs.readdir(resolvedDir, {
      withFileTypes: true,
    })) as unknown as Dirent[];
  } catch (error: unknown) {
    const err = error as NodeJS.ErrnoException | undefined;
    console.error(
      `Error reading directory ${resolvedDir}: ${err?.message ?? String(error)}`,
    );
    return "";
  }

  let output = "";
  const filteredEntries = entries
    .filter(
      (entry) => !isIgnored(path.join(resolvedDir, entry.name), ignorePatterns),
    )
    .sort((a, b) => {
      if (a.isDirectory() && !b.isDirectory()) return -1;
      if (!a.isDirectory() && b.isDirectory()) return 1;
      return a.name.localeCompare(b.name);
    });

  for (let i = 0; i < filteredEntries.length; i++) {
    const entry = filteredEntries[i];
    const isLastEntry = i === filteredEntries.length - 1;
    const connector = isLastEntry ? "└── " : "├── ";
    const newPrefix = prefix + (isLastEntry ? "    " : "│   ");

    output += prefix + connector + entry.name + "\n";

    if (entry.isDirectory()) {
      output += await generateTree(
        path.join(resolvedDir, entry.name),
        ignorePatterns,
        newPrefix,
        currentDepth + 1,
      );
    }
  }
  return output;
}

/**
 * Main function to orchestrate loading ignore patterns, generating the tree,
 * and writing it to the specified output file.
 */
const writeTreeToFile = async (): Promise<void> => {
  try {
    const projectName = path.basename(projectRoot);
    const ignorePatterns = await loadGitignorePatterns();
    const resolvedOutputFile = path.resolve(projectRoot, outputPathArg);

    // Security Validation for Output Path
    if (!resolvedOutputFile.startsWith(projectRoot + path.sep)) {
      console.error(
        `Error: Output path "${outputPathArg}" resolves outside the project directory: ${resolvedOutputFile}. Aborting.`,
      );
      process.exit(1);
    }
    const resolvedOutputDir = path.dirname(resolvedOutputFile);
    if (
      !resolvedOutputDir.startsWith(projectRoot + path.sep) &&
      resolvedOutputDir !== projectRoot
    ) {
      console.error(
        `Error: Output directory "${resolvedOutputDir}" is outside the project directory. Aborting.`,
      );
      process.exit(1);
    }

    console.log(`Generating directory tree for project: ${projectName}`);
    console.log(`Output will be saved to: ${resolvedOutputFile}`);
    if (maxDepthArg !== Infinity) {
      console.log(`Maximum depth set to: ${maxDepthArg}`);
    }

    const treeContent = await generateTree(projectRoot, ignorePatterns, "", 0);

    try {
      await fs.access(resolvedOutputDir);
    } catch {
      console.log(`Output directory not found. Creating: ${resolvedOutputDir}`);
      await fs.mkdir(resolvedOutputDir, { recursive: true });
    }

    const timestamp = new Date()
      .toISOString()
      .replace(/T/, " ")
      .replace(/\..+/, "");
    const fileHeader = `# ${projectName} - Directory Structure\n\nGenerated on: ${timestamp}\n`;
    const depthInfo =
      maxDepthArg !== Infinity
        ? `\n_Depth limited to ${maxDepthArg} levels_\n\n`
        : "\n";
    const treeBlock = `\`\`\`\n${projectName}\n${treeContent}\`\`\`\n`;
    const fileFooter = `\n_Note: This tree excludes files and directories matched by .gitignore and default patterns._\n`;
    const finalContent = fileHeader + depthInfo + treeBlock + fileFooter;

    await fs.writeFile(resolvedOutputFile, finalContent);
    console.log(
      `Successfully generated tree structure in: ${resolvedOutputFile}`,
    );
  } catch (error) {
    console.error(
      `Error generating tree: ${error instanceof Error ? error.message : String(error)}`,
    );
    process.exit(1);
  }
};

writeTreeToFile();

```

--------------------------------------------------------------------------------
/src/utils/security/idGenerator.ts:
--------------------------------------------------------------------------------

```typescript
/**
 * @fileoverview Provides a utility class `IdGenerator` for creating customizable, prefixed unique identifiers,
 * and a standalone `generateUUID` function for generating standard UUIDs.
 * The `IdGenerator` supports entity-specific prefixes, custom character sets, and lengths.
 *
 * Note: Logging has been removed from this module to prevent circular dependencies
 * with the `requestContextService`, which itself uses `generateUUID` from this module.
 * This was causing `ReferenceError: Cannot access 'generateUUID' before initialization`
 * during application startup.
 * @module src/utils/security/idGenerator
 */
import { randomUUID as cryptoRandomUUID, randomBytes } from "crypto";
import { BaseErrorCode, McpError } from "../../types-global/errors.js";
// Removed: import { logger, requestContextService } from "../index.js";

/**
 * Defines the structure for configuring entity prefixes.
 * Keys are entity type names (e.g., "project", "task"), and values are their corresponding ID prefixes (e.g., "PROJ", "TASK").
 */
export interface EntityPrefixConfig {
  [key: string]: string;
}

/**
 * Defines options for customizing ID generation.
 */
export interface IdGenerationOptions {
  length?: number;
  separator?: string;
  charset?: string;
}

/**
 * A generic ID Generator class for creating and managing unique, prefixed identifiers.
 * Allows defining custom prefixes, generating random strings, and validating/normalizing IDs.
 */
export class IdGenerator {
  /**
   * Default character set for the random part of the ID.
   * @private
   */
  private static DEFAULT_CHARSET = "ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789";
  /**
   * Default separator character between prefix and random part.
   * @private
   */
  private static DEFAULT_SEPARATOR = "_";
  /**
   * Default length for the random part of the ID.
   * @private
   */
  private static DEFAULT_LENGTH = 6;

  /**
   * Stores the mapping of entity types to their prefixes.
   * @private
   */
  private entityPrefixes: EntityPrefixConfig = {};
  /**
   * Stores a reverse mapping from prefixes (case-insensitive) to entity types.
   * @private
   */
  private prefixToEntityType: Record<string, string> = {};

  /**
   * Constructs an `IdGenerator` instance.
   * @param entityPrefixes - An initial map of entity types to their prefixes.
   */
  constructor(entityPrefixes: EntityPrefixConfig = {}) {
    // Logging removed to prevent circular dependency with requestContextService.
    this.setEntityPrefixes(entityPrefixes);
  }

  /**
   * Sets or updates the entity prefix configuration and rebuilds the internal reverse lookup map.
   * @param entityPrefixes - A map where keys are entity type names and values are their desired ID prefixes.
   */
  public setEntityPrefixes(entityPrefixes: EntityPrefixConfig): void {
    // Logging removed.
    this.entityPrefixes = { ...entityPrefixes };

    this.prefixToEntityType = Object.entries(this.entityPrefixes).reduce(
      (acc, [type, prefix]) => {
        acc[prefix.toLowerCase()] = type; // Store lowercase for case-insensitive lookup
        return acc;
      },
      {} as Record<string, string>,
    );
  }

  /**
   * Retrieves a copy of the current entity prefix configuration.
   * @returns The current entity prefix configuration.
   */
  public getEntityPrefixes(): EntityPrefixConfig {
    return { ...this.entityPrefixes };
  }

  /**
   * Generates a cryptographically secure random string.
   * @param length - The desired length of the random string. Defaults to `IdGenerator.DEFAULT_LENGTH`.
   * @param charset - The character set to use. Defaults to `IdGenerator.DEFAULT_CHARSET`.
   * @returns The generated random string.
   */
  public generateRandomString(
    length: number = IdGenerator.DEFAULT_LENGTH,
    charset: string = IdGenerator.DEFAULT_CHARSET,
  ): string {
    let result = "";
    // Determine the largest multiple of charset.length that is less than or equal to 256
    // This is the threshold for rejection sampling to avoid bias.
    const maxValidByteValue = Math.floor(256 / charset.length) * charset.length;

    while (result.length < length) {
      const byteBuffer = randomBytes(1); // Get one random byte
      const byte = byteBuffer[0];

      // If the byte is within the valid range (i.e., it won't introduce bias),
      // use it to select a character from the charset. Otherwise, discard and try again.
      if (byte !== undefined && byte < maxValidByteValue) {
        const charIndex = byte % charset.length;
        const char = charset[charIndex];
        if (char) {
          result += char;
        }
      }
    }
    return result;
  }

  /**
   * Generates a unique ID, optionally prepended with a prefix.
   * @param prefix - An optional prefix for the ID.
   * @param options - Optional parameters for ID generation (length, separator, charset).
   * @returns A unique identifier string.
   */
  public generate(prefix?: string, options: IdGenerationOptions = {}): string {
    // Logging removed.
    const {
      length = IdGenerator.DEFAULT_LENGTH,
      separator = IdGenerator.DEFAULT_SEPARATOR,
      charset = IdGenerator.DEFAULT_CHARSET,
    } = options;

    const randomPart = this.generateRandomString(length, charset);
    const generatedId = prefix
      ? `${prefix}${separator}${randomPart}`
      : randomPart;
    return generatedId;
  }

  /**
   * Generates a unique ID for a specified entity type, using its configured prefix.
   * @param entityType - The type of entity (must be registered).
   * @param options - Optional parameters for ID generation.
   * @returns A unique identifier string for the entity (e.g., "PROJ_A6B3J0").
   * @throws {McpError} If the `entityType` is not registered.
   */
  public generateForEntity(
    entityType: string,
    options: IdGenerationOptions = {},
  ): string {
    const prefix = this.entityPrefixes[entityType];
    if (!prefix) {
      throw new McpError(
        BaseErrorCode.VALIDATION_ERROR,
        `Unknown entity type: ${entityType}. No prefix registered.`,
      );
    }
    return this.generate(prefix, options);
  }

  /**
   * Validates if an ID conforms to the expected format for a specific entity type.
   * @param id - The ID string to validate.
   * @param entityType - The expected entity type of the ID.
   * @param options - Optional parameters used during generation for validation consistency.
   *                  The `charset` from these options will be used for validation.
   * @returns `true` if the ID is valid, `false` otherwise.
   */
  public isValid(
    id: string,
    entityType: string,
    options: IdGenerationOptions = {},
  ): boolean {
    const prefix = this.entityPrefixes[entityType];
    const {
      length = IdGenerator.DEFAULT_LENGTH,
      separator = IdGenerator.DEFAULT_SEPARATOR,
      charset = IdGenerator.DEFAULT_CHARSET, // Use charset from options or default
    } = options;

    if (!prefix) {
      return false;
    }

    // Build regex character class from the charset
    // Escape characters that have special meaning inside a regex character class `[]`
    const escapedCharsetForClass = charset.replace(/[[\]\\^-]/g, "\\$&");
    const charsetRegexPart = `[${escapedCharsetForClass}]`;

    const pattern = new RegExp(
      `^${this.escapeRegex(prefix)}${this.escapeRegex(separator)}${charsetRegexPart}{${length}}$`,
    );
    return pattern.test(id);
  }

  /**
   * Escapes special characters in a string for use in a regular expression.
   * @param str - The string to escape.
   * @returns The escaped string.
   * @private
   */
  private escapeRegex(str: string): string {
    return str.replace(/[.*+?^${}()|[\]\\]/g, "\\$&");
  }

  /**
   * Strips the prefix and separator from an ID string.
   * @param id - The ID string (e.g., "PROJ_A6B3J0").
   * @param separator - The separator used in the ID. Defaults to `IdGenerator.DEFAULT_SEPARATOR`.
   * @returns The ID part without the prefix, or the original ID if separator not found.
   */
  public stripPrefix(
    id: string,
    separator: string = IdGenerator.DEFAULT_SEPARATOR,
  ): string {
    const parts = id.split(separator);
    return parts.length > 1 ? parts.slice(1).join(separator) : id; // Handle separators in random part
  }

  /**
   * Determines the entity type from an ID string by its prefix (case-insensitive).
   * @param id - The ID string (e.g., "PROJ_A6B3J0").
   * @param separator - The separator used in the ID. Defaults to `IdGenerator.DEFAULT_SEPARATOR`.
   * @returns The determined entity type.
   * @throws {McpError} If ID format is invalid or prefix is unknown.
   */
  public getEntityType(
    id: string,
    separator: string = IdGenerator.DEFAULT_SEPARATOR,
  ): string {
    const parts = id.split(separator);
    if (parts.length < 2 || !parts[0]) {
      throw new McpError(
        BaseErrorCode.VALIDATION_ERROR,
        `Invalid ID format: ${id}. Expected format like: PREFIX${separator}RANDOMLPART`,
      );
    }

    const prefix = parts[0];
    const entityType = this.prefixToEntityType[prefix.toLowerCase()];

    if (!entityType) {
      throw new McpError(
        BaseErrorCode.VALIDATION_ERROR,
        `Unknown entity type for prefix: ${prefix}`,
      );
    }
    return entityType;
  }

  /**
   * Normalizes an entity ID to ensure the prefix matches the registered case
   * and the random part is uppercase. Note: This assumes the charset characters
   * have a meaningful uppercase version if case-insensitivity is desired for the random part.
   * For default charset (A-Z0-9), this is fine. For custom charsets, behavior might vary.
   * @param id - The ID to normalize (e.g., "proj_a6b3j0").
   * @param separator - The separator used in the ID. Defaults to `IdGenerator.DEFAULT_SEPARATOR`.
   * @returns The normalized ID (e.g., "PROJ_A6B3J0").
   * @throws {McpError} If the entity type cannot be determined from the ID.
   */
  public normalize(
    id: string,
    separator: string = IdGenerator.DEFAULT_SEPARATOR,
  ): string {
    const entityType = this.getEntityType(id, separator);
    const registeredPrefix = this.entityPrefixes[entityType];
    const idParts = id.split(separator);
    const randomPart = idParts.slice(1).join(separator);

    // Consider if randomPart.toUpperCase() is always correct for custom charsets.
    // For now, maintaining existing behavior.
    return `${registeredPrefix}${separator}${randomPart.toUpperCase()}`;
  }
}

/**
 * Default singleton instance of the `IdGenerator`.
 * Initialize with `idGenerator.setEntityPrefixes({})` to configure.
 */
export const idGenerator = new IdGenerator();

/**
 * Generates a standard Version 4 UUID (Universally Unique Identifier).
 * Uses the Node.js `crypto` module. This function is independent of the IdGenerator instance
 * to prevent circular dependencies when used by other utilities like requestContextService.
 * @returns A new UUID string.
 */
export const generateUUID = (): string => {
  return cryptoRandomUUID();
};

```

--------------------------------------------------------------------------------
/src/config/index.ts:
--------------------------------------------------------------------------------

```typescript
/**
 * @fileoverview Loads, validates, and exports application configuration.
 * This module centralizes configuration management, sourcing values from
 * environment variables and `package.json`. It uses Zod for schema validation
 * to ensure type safety and correctness of configuration parameters.
 *
 * @module src/config/index
 */

import dotenv from "dotenv";
import { existsSync, mkdirSync, readFileSync, statSync } from "fs";
import path, { dirname, join } from "path";
import { fileURLToPath } from "url";
import { z } from "zod";

dotenv.config();

// --- Determine Project Root ---
const findProjectRoot = (startDir: string): string => {
  let currentDir = startDir;
  // If the start directory is in `dist`, start searching from the parent directory.
  if (path.basename(currentDir) === "dist") {
    currentDir = path.dirname(currentDir);
  }
  while (true) {
    const packageJsonPath = join(currentDir, "package.json");
    if (existsSync(packageJsonPath)) {
      return currentDir;
    }
    const parentDir = dirname(currentDir);
    if (parentDir === currentDir) {
      throw new Error(
        `Could not find project root (package.json) starting from ${startDir}`,
      );
    }
    currentDir = parentDir;
  }
};
let projectRoot: string;
try {
  const currentModuleDir = dirname(fileURLToPath(import.meta.url));
  projectRoot = findProjectRoot(currentModuleDir);
} catch (error: unknown) {
  const errorMessage = error instanceof Error ? error.message : String(error);
  console.error(`FATAL: Error determining project root: ${errorMessage}`);
  projectRoot = process.cwd();
  if (process.stdout.isTTY) {
    console.warn(
      `Warning: Using process.cwd() (${projectRoot}) as fallback project root.`,
    );
  }
}
// --- End Determine Project Root ---

/**
 * Loads and parses the package.json file from the project root.
 * @returns The parsed package.json object or a fallback default.
 * @private
 */
const loadPackageJson = (): {
  name: string;
  version: string;
  description: string;
} => {
  const pkgPath = join(projectRoot, "package.json");
  const fallback = {
    name: "pubmed-mcp-server",
    version: "0.0.0",
    description: "No description provided.",
  };

  if (!existsSync(pkgPath)) {
    if (process.stdout.isTTY) {
      console.warn(
        `Warning: package.json not found at ${pkgPath}. Using fallback values. This is expected in some environments (e.g., Docker) but may indicate an issue with project root detection.`,
      );
    }
    return fallback;
  }

  try {
    const fileContents = readFileSync(pkgPath, "utf-8");
    const parsed = JSON.parse(fileContents);
    return {
      name: typeof parsed.name === "string" ? parsed.name : fallback.name,
      version:
        typeof parsed.version === "string" ? parsed.version : fallback.version,
      description:
        typeof parsed.description === "string"
          ? parsed.description
          : fallback.description,
    };
  } catch (error) {
    if (process.stdout.isTTY) {
      console.error(
        "Warning: Could not read or parse package.json. Using hardcoded defaults.",
        error,
      );
    }
    return fallback;
  }
};

const pkg = loadPackageJson();

const EnvSchema = z
  .object({
    // Core Server Config
    MCP_SERVER_NAME: z.string().optional(),
    MCP_SERVER_VERSION: z.string().optional(),
    NODE_ENV: z.string().default("development"),

    // Logging
    MCP_LOG_LEVEL: z.string().default("debug"),
    LOGS_DIR: z.string().default(path.join(projectRoot, "logs")),

    // Transport
    MCP_TRANSPORT_TYPE: z.enum(["stdio", "http"]).default("stdio"),
    MCP_SESSION_MODE: z.enum(["stateless", "stateful", "auto"]).default("auto"),
    MCP_HTTP_PORT: z.coerce.number().int().positive().default(3017),
    MCP_HTTP_HOST: z.string().default("127.0.0.1"),
    MCP_HTTP_ENDPOINT_PATH: z.string().default("/mcp"),
    MCP_HTTP_MAX_PORT_RETRIES: z.coerce
      .number()
      .int()
      .nonnegative()
      .default(15),
    MCP_HTTP_PORT_RETRY_DELAY_MS: z.coerce
      .number()
      .int()
      .nonnegative()
      .default(50),
    MCP_STATEFUL_SESSION_STALE_TIMEOUT_MS: z.coerce
      .number()
      .int()
      .positive()
      .default(1_800_000),
    MCP_ALLOWED_ORIGINS: z.string().optional(),

    // Authentication
    MCP_AUTH_MODE: z.enum(["jwt", "oauth", "none"]).default("none"),
    MCP_AUTH_SECRET_KEY: z
      .string()
      .min(32, "MCP_AUTH_SECRET_KEY must be at least 32 characters long.")
      .optional(),
    OAUTH_ISSUER_URL: z.string().url().optional(),
    OAUTH_JWKS_URI: z.string().url().optional(),
    OAUTH_AUDIENCE: z.string().optional(),

    // Dev mode JWT
    DEV_MCP_CLIENT_ID: z.string().optional(),
    DEV_MCP_SCOPES: z.string().optional(),

    // NCBI E-utilities
    NCBI_API_KEY: z.string().optional(),
    NCBI_TOOL_IDENTIFIER: z.string().optional(),
    NCBI_ADMIN_EMAIL: z.string().email().optional(),
    NCBI_REQUEST_DELAY_MS: z.coerce.number().int().positive().optional(),
    NCBI_MAX_RETRIES: z.coerce.number().int().nonnegative().default(3),

    // --- START: OpenTelemetry Configuration ---
    /** If 'true', OpenTelemetry will be initialized and enabled. Default: 'false'. */
    OTEL_ENABLED: z
      .string()
      .transform((v) => v.toLowerCase() === "true")
      .default("false"),
    /** The logical name of the service. Defaults to MCP_SERVER_NAME or package name. */
    OTEL_SERVICE_NAME: z.string().optional(),
    /** The version of the service. Defaults to MCP_SERVER_VERSION or package version. */
    OTEL_SERVICE_VERSION: z.string().optional(),
    /** The OTLP endpoint for traces. If not set, traces are logged to a file in development. */
    OTEL_EXPORTER_OTLP_TRACES_ENDPOINT: z.string().url().optional(),
    /** The OTLP endpoint for metrics. If not set, metrics are not exported. */
    OTEL_EXPORTER_OTLP_METRICS_ENDPOINT: z.string().url().optional(),
    /** Sampling ratio for traces (0.0 to 1.0). 1.0 means sample all. Default: 1.0 */
    OTEL_TRACES_SAMPLER_ARG: z.coerce.number().min(0).max(1).default(1.0),
    /** Log level for OpenTelemetry's internal diagnostic logger. Default: "INFO". */
    OTEL_LOG_LEVEL: z
      .enum(["NONE", "ERROR", "WARN", "INFO", "DEBUG", "VERBOSE", "ALL"])
      .default("INFO"),
  })
  .superRefine((data, ctx) => {
    if (
      data.NODE_ENV === "production" &&
      data.MCP_TRANSPORT_TYPE === "http" &&
      data.MCP_AUTH_MODE === "jwt" &&
      !data.MCP_AUTH_SECRET_KEY
    ) {
      ctx.addIssue({
        code: z.ZodIssueCode.custom,
        path: ["MCP_AUTH_SECRET_KEY"],
        message:
          "MCP_AUTH_SECRET_KEY is required for 'jwt' auth in production with 'http' transport.",
      });
    }
    if (data.MCP_AUTH_MODE === "oauth") {
      if (!data.OAUTH_ISSUER_URL) {
        ctx.addIssue({
          code: z.ZodIssueCode.custom,
          path: ["OAUTH_ISSUER_URL"],
          message: "OAUTH_ISSUER_URL is required for 'oauth' mode.",
        });
      }
      if (!data.OAUTH_AUDIENCE) {
        ctx.addIssue({
          code: z.ZodIssueCode.custom,
          path: ["OAUTH_AUDIENCE"],
          message: "OAUTH_AUDIENCE is required for 'oauth' mode.",
        });
      }
    }
  });

const parsedEnv = EnvSchema.safeParse(process.env);

if (!parsedEnv.success) {
  if (process.stdout.isTTY) {
    console.error(
      "❌ Invalid environment variables:",
      parsedEnv.error.flatten().fieldErrors,
    );
  }
}

const env = parsedEnv.success ? parsedEnv.data : EnvSchema.parse({});

const ensureDirectory = (
  dirPath: string,
  rootDir: string,
  dirName: string,
): string | null => {
  const resolvedDirPath = path.isAbsolute(dirPath)
    ? dirPath
    : path.resolve(rootDir, dirPath);

  if (
    !resolvedDirPath.startsWith(rootDir + path.sep) &&
    resolvedDirPath !== rootDir
  ) {
    if (process.stdout.isTTY) {
      console.error(
        `Error: ${dirName} path "${dirPath}" resolves to "${resolvedDirPath}", which is outside the project boundary "${rootDir}".`,
      );
    }
    return null;
  }

  if (!existsSync(resolvedDirPath)) {
    try {
      mkdirSync(resolvedDirPath, { recursive: true });
      if (process.stdout.isTTY) {
        console.log(`Created ${dirName} directory: ${resolvedDirPath}`);
      }
    } catch (err: unknown) {
      const errorMessage = err instanceof Error ? err.message : String(err);
      if (process.stdout.isTTY) {
        console.error(
          `Error creating ${dirName} directory at ${resolvedDirPath}: ${errorMessage}`,
        );
      }
      return null;
    }
  } else {
    try {
      const stats = statSync(resolvedDirPath);
      if (!stats.isDirectory()) {
        if (process.stdout.isTTY) {
          console.error(
            `Error: ${dirName} path ${resolvedDirPath} exists but is not a directory.`,
          );
        }
        return null;
      }
    } catch (statError: unknown) {
      const errorMessage =
        statError instanceof Error
          ? statError.message
          : "An unknown error occurred";
      if (process.stdout.isTTY) {
        console.error(
          `Error accessing ${dirName} path ${resolvedDirPath}: ${errorMessage}`,
        );
      }
      return null;
    }
  }
  return resolvedDirPath;
};

let validatedLogsPath: string | null = ensureDirectory(
  env.LOGS_DIR,
  projectRoot,
  "logs",
);

if (!validatedLogsPath) {
  if (process.stdout.isTTY) {
    console.warn(
      `Warning: Custom logs directory ('${env.LOGS_DIR}') is invalid or outside the project boundary. Falling back to default.`,
    );
  }
  const defaultLogsDir = path.join(projectRoot, "logs");
  validatedLogsPath = ensureDirectory(defaultLogsDir, projectRoot, "logs");

  if (!validatedLogsPath) {
    if (process.stdout.isTTY) {
      console.warn(
        "Warning: Default logs directory could not be created. File logging will be disabled.",
      );
    }
  }
}

export const config = {
  pkg,
  mcpServerName: env.MCP_SERVER_NAME || pkg.name,
  mcpServerVersion: env.MCP_SERVER_VERSION || pkg.version,
  mcpServerDescription: pkg.description,
  logLevel: env.MCP_LOG_LEVEL,
  logsPath: validatedLogsPath,
  environment: env.NODE_ENV,
  mcpTransportType: env.MCP_TRANSPORT_TYPE,
  mcpSessionMode: env.MCP_SESSION_MODE,
  mcpHttpPort: env.MCP_HTTP_PORT,
  mcpHttpHost: env.MCP_HTTP_HOST,
  mcpHttpEndpointPath: env.MCP_HTTP_ENDPOINT_PATH,
  mcpHttpMaxPortRetries: env.MCP_HTTP_MAX_PORT_RETRIES,
  mcpHttpPortRetryDelayMs: env.MCP_HTTP_PORT_RETRY_DELAY_MS,
  mcpStatefulSessionStaleTimeoutMs: env.MCP_STATEFUL_SESSION_STALE_TIMEOUT_MS,
  mcpAllowedOrigins: env.MCP_ALLOWED_ORIGINS?.split(",")
    .map((o) => o.trim())
    .filter(Boolean),
  mcpAuthMode: env.MCP_AUTH_MODE,
  mcpAuthSecretKey: env.MCP_AUTH_SECRET_KEY,
  oauthIssuerUrl: env.OAUTH_ISSUER_URL,
  oauthJwksUri: env.OAUTH_JWKS_URI,
  oauthAudience: env.OAUTH_AUDIENCE,
  devMcpClientId: env.DEV_MCP_CLIENT_ID,
  devMcpScopes: env.DEV_MCP_SCOPES?.split(",").map((s) => s.trim()),
  ncbiApiKey: env.NCBI_API_KEY,
  ncbiToolIdentifier:
    env.NCBI_TOOL_IDENTIFIER ||
    `${env.MCP_SERVER_NAME || pkg.name}/${env.MCP_SERVER_VERSION || pkg.version}`,
  ncbiAdminEmail: env.NCBI_ADMIN_EMAIL,
  ncbiRequestDelayMs:
    env.NCBI_REQUEST_DELAY_MS ?? (env.NCBI_API_KEY ? 100 : 334),
  ncbiMaxRetries: env.NCBI_MAX_RETRIES,
  openTelemetry: {
    enabled: env.OTEL_ENABLED,
    serviceName: env.OTEL_SERVICE_NAME || env.MCP_SERVER_NAME || pkg.name,
    serviceVersion:
      env.OTEL_SERVICE_VERSION || env.MCP_SERVER_VERSION || pkg.version,
    tracesEndpoint: env.OTEL_EXPORTER_OTLP_TRACES_ENDPOINT,
    metricsEndpoint: env.OTEL_EXPORTER_OTLP_METRICS_ENDPOINT,
    samplingRatio: env.OTEL_TRACES_SAMPLER_ARG,
    logLevel: env.OTEL_LOG_LEVEL,
  },
};

export const logLevel: string = config.logLevel;
export const environment: string = config.environment;

```

--------------------------------------------------------------------------------
/src/types-global/pubmedXml.ts:
--------------------------------------------------------------------------------

```typescript
/**
 * @fileoverview Global TypeScript type definitions for PubMed XML structures.
 * These types are used for parsing data returned by NCBI E-utilities,
 * particularly from EFetch for PubMed articles and ESummary.
 * @module src/types-global/pubmedXml
 */

// Basic type for elements that primarily contain text but might have attributes
export interface XmlTextElement {
  "#text"?: string;
  [key: string]: unknown; // For attributes like _UI, _MajorTopicYN, _EIdType, _ValidYN, _IdType, Label, NlmCategory, _DateType
}

// Specific XML element types based on PubMed DTD (simplified)

export type XmlPMID = XmlTextElement; // e.g., <PMID Version="1">12345</PMID>

export interface XmlArticleDate extends XmlTextElement {
  Year?: XmlTextElement;
  Month?: XmlTextElement;
  Day?: XmlTextElement;
  _DateType?: string;
}

export interface XmlAuthor {
  LastName?: XmlTextElement;
  ForeName?: XmlTextElement;
  Initials?: XmlTextElement;
  AffiliationInfo?: {
    Affiliation?: XmlTextElement;
  }[];
  Identifier?: XmlTextElement[]; // For ORCID etc.
  CollectiveName?: XmlTextElement; // For group authors
}

export interface XmlAuthorList {
  Author?: XmlAuthor[] | XmlAuthor;
  _CompleteYN?: "Y" | "N";
}

export interface XmlPublicationType extends XmlTextElement {
  _UI?: string;
}

export interface XmlPublicationTypeList {
  PublicationType: XmlPublicationType[] | XmlPublicationType;
}

export interface XmlELocationID extends XmlTextElement {
  _EIdType?: string; // "doi", "pii"
  _ValidYN?: "Y" | "N";
}

export interface XmlArticleId extends XmlTextElement {
  _IdType?: string; // "doi", "pubmed", "pmc", "mid", etc.
}

export interface XmlArticleIdList {
  ArticleId: XmlArticleId[] | XmlArticleId;
}

export interface XmlAbstractText extends XmlTextElement {
  Label?: string;
  NlmCategory?: string; // e.g., "BACKGROUND", "METHODS", "RESULTS", "CONCLUSIONS"
}

export interface XmlAbstract {
  AbstractText: XmlAbstractText[] | XmlAbstractText;
  CopyrightInformation?: XmlTextElement;
}

export interface XmlPagination {
  MedlinePgn?: XmlTextElement; // e.g., "10-5" or "e123"
  StartPage?: XmlTextElement;
  EndPage?: XmlTextElement;
}

export interface XmlPubDate {
  Year?: XmlTextElement;
  Month?: XmlTextElement;
  Day?: XmlTextElement;
  MedlineDate?: XmlTextElement; // e.g., "2000 Spring", "1999-2000"
}

export interface XmlJournalIssue {
  Volume?: XmlTextElement;
  Issue?: XmlTextElement;
  PubDate?: XmlPubDate;
  _CitedMedium?: string; // "Internet" or "Print"
}

export interface XmlJournal {
  ISSN?: XmlTextElement & { _IssnType?: string };
  JournalIssue?: XmlJournalIssue;
  Title?: XmlTextElement; // Full Journal Title
  ISOAbbreviation?: XmlTextElement; // Journal Abbreviation
}

export interface XmlArticle {
  Journal?: XmlJournal;
  ArticleTitle?: XmlTextElement | string; // Can be just string or object with #text
  Pagination?: XmlPagination;
  ELocationID?: XmlELocationID[] | XmlELocationID;
  Abstract?: XmlAbstract;
  AuthorList?: XmlAuthorList;
  Language?: XmlTextElement[] | XmlTextElement; // Array of languages
  GrantList?: XmlGrantList;
  PublicationTypeList?: XmlPublicationTypeList;
  ArticleDate?: XmlArticleDate[] | XmlArticleDate;
  ArticleIdList?: XmlArticleIdList;
  KeywordList?: XmlKeywordList[] | XmlKeywordList; // Can have multiple KeywordList elements
  // Other elements like VernacularTitle, DataBankList, etc.
}

export interface XmlMeshQualifierName extends XmlTextElement {
  _UI?: string;
  _MajorTopicYN?: "Y" | "N";
}
export interface XmlMeshDescriptorName extends XmlTextElement {
  _UI?: string;
  _MajorTopicYN?: "Y" | "N";
}

export interface XmlMeshHeading {
  DescriptorName: XmlMeshDescriptorName;
  QualifierName?: XmlMeshQualifierName[] | XmlMeshQualifierName;
  _MajorTopicYN?: "Y" | "N"; // Can also be at the root of MeshHeading
}

export interface XmlMeshHeadingList {
  MeshHeading: XmlMeshHeading[] | XmlMeshHeading;
}

export interface XmlKeyword extends XmlTextElement {
  _MajorTopicYN?: "Y" | "N";
  _Owner?: string; // NLM, NLM-AUTO, PIP, KIE, NOTNLM, NASA, HHS
}

export interface XmlKeywordList {
  Keyword: XmlKeyword[] | XmlKeyword;
  _Owner?: string;
}

export interface XmlGrant {
  GrantID?: XmlTextElement;
  Acronym?: XmlTextElement;
  Agency?: XmlTextElement;
  Country?: XmlTextElement;
}

export interface XmlGrantList {
  Grant: XmlGrant[] | XmlGrant;
  _CompleteYN?: "Y" | "N";
}

export interface XmlMedlineCitation {
  PMID: XmlPMID;
  DateCreated?: XmlArticleDate;
  DateCompleted?: XmlArticleDate;
  DateRevised?: XmlArticleDate;
  Article?: XmlArticle;
  MeshHeadingList?: XmlMeshHeadingList;
  KeywordList?: XmlKeywordList[] | XmlKeywordList; // Can be an array of KeywordList
  GeneralNote?: (XmlTextElement & { _Owner?: string })[];
  CitationSubset?: XmlTextElement[] | XmlTextElement;
  MedlinePgn?: XmlTextElement; // For page numbers, sometimes here
  // Other elements like CommentsCorrectionsList, GeneSymbolList, etc.
  _Owner?: string; // e.g., "NLM", "NASA", "PIP", "KIE", "HSR", "HMD", "NOTNLM"
  _Status?: string; // e.g., "MEDLINE", "PubMed-not-MEDLINE", "In-Data-Review", "In-Process", "Publisher", "Completed"
}

export interface XmlPubmedArticle {
  MedlineCitation: XmlMedlineCitation;
  PubmedData?: {
    History?: {
      PubMedPubDate: (XmlArticleDate & { _PubStatus?: string })[];
    };
    PublicationStatus?: XmlTextElement;
    ArticleIdList?: XmlArticleIdList; // ArticleIdList can also be under PubmedData
    ReferenceList?: unknown; // Complex structure for references
  };
}

export interface XmlPubmedArticleSet {
  PubmedArticle?: XmlPubmedArticle[] | XmlPubmedArticle;
  DeleteCitation?: {
    PMID: XmlPMID[] | XmlPMID;
  };
  // Can also contain ErrorList or other elements if the request had issues
}

// Parsed object types (for application use, derived from XML types)

export interface ParsedArticleAuthor {
  lastName?: string;
  firstName?: string;
  initials?: string;
  affiliation?: string;
  collectiveName?: string;
}

export interface ParsedArticleDate {
  dateType?: string;
  year?: string;
  month?: string;
  day?: string;
}

export interface ParsedJournalPublicationDate {
  year?: string;
  month?: string;
  day?: string;
  medlineDate?: string;
}

export interface ParsedJournalInfo {
  title?: string;
  isoAbbreviation?: string;
  volume?: string;
  issue?: string;
  pages?: string;
  publicationDate?: ParsedJournalPublicationDate;
}

export interface ParsedMeshTerm {
  descriptorName?: string;
  descriptorUi?: string;
  qualifierName?: string;
  qualifierUi?: string;
  isMajorTopic: boolean;
}

export interface ParsedGrant {
  grantId?: string;
  agency?: string;
  country?: string;
}

export interface ParsedArticle {
  pmid: string;
  title?: string;
  abstractText?: string;
  authors?: ParsedArticleAuthor[];
  journalInfo?: ParsedJournalInfo;
  publicationTypes?: string[];
  keywords?: string[];
  meshTerms?: ParsedMeshTerm[];
  grantList?: ParsedGrant[];
  doi?: string;
  articleDates?: ParsedArticleDate[]; // Dates like 'received', 'accepted', 'revised'
  // Add other fields as needed, e.g., language, publication status
}

// ESummary specific types
// Based on ESummary v2.0 XML (DocSum) and JSON-like XML structure
// This is a common structure, but individual fields can vary.

/**
 * Represents a raw author entry as parsed from ESummary XML.
 * This type accounts for potential inconsistencies in property naming (e.g., Name/name)
 * and structure directly from the XML-to-JavaScript conversion.
 * It is intended for use as an intermediate type before normalization into ESummaryAuthor.
 */
export interface XmlESummaryAuthorRaw {
  Name?: string; // Primary name field (often "LastName Initials")
  name?: string; // Alternative casing for name

  AuthType?: string; // Author type (e.g., "Author")
  authtype?: string; // Alternative casing

  ClusterId?: string; // Cluster ID
  clusterid?: string; // Alternative casing

  "#text"?: string; // If the author is represented as a simple text node

  // Allow other properties as NCBI XML can be unpredictable
  [key: string]: unknown;
}

/**
 * Represents a normalized author entry after parsing from ESummary data.
 * This is the clean, canonical structure for application use.
 */
export interface ESummaryAuthor {
  name: string; // Standardized: "LastName Initials"
  authtype?: string; // Standardized: e.g., "Author"
  clusterid?: string; // Standardized
}

export interface ESummaryArticleId {
  idtype: string; // e.g., "pubmed", "doi", "pmc"
  idtypen: number;
  value: string;
  [key: string]: unknown; // For other attributes like _IdType (if parsed differently)
}

export interface ESummaryHistory {
  pubstatus: string; // e.g., "pubmed", "medline", "entrez"
  date: string; // Date string
}

// For the older DocSum <Item Name="..." Type="..."> structure
export interface ESummaryItem {
  "#text"?: string; // Value of the item
  Item?: ESummaryItem[] | ESummaryItem; // For nested lists
  _Name: string;
  _Type:
    | "String"
    | "Integer"
    | "Date"
    | "List"
    | "Structure"
    | "Unknown"
    | "ERROR";
  [key: string]: unknown; // Other attributes like idtype for ArticleIds
}

export interface ESummaryDocSumOldXml {
  Id: string; // PMID
  Item: ESummaryItem[];
}

// For the newer DocumentSummarySet structure (often from retmode=xml with version=2.0)
export interface ESummaryDocumentSummary {
  "@_uid": string; // PMID
  PubDate?: string;
  EPubDate?: string;
  Source?: string;
  Authors?:
    | XmlESummaryAuthorRaw[] // Array of raw author entries
    | { Author: XmlESummaryAuthorRaw[] | XmlESummaryAuthorRaw } // Object containing raw author entries
    | string; // Or a simple string for authors
  LastAuthor?: string;
  Title?: string;
  SortTitle?: string;
  Volume?: string;
  Issue?: string;
  Pages?: string;
  Lang?: string[];
  ISSN?: string;
  ESSN?: string;
  PubType?: string[]; // Array of publication types
  RecordStatus?: string;
  PubStatus?: string;
  ArticleIds?:
    | ESummaryArticleId[]
    | { ArticleId: ESummaryArticleId[] | ESummaryArticleId };
  History?:
    | ESummaryHistory[]
    | { PubMedPubDate: ESummaryHistory[] | ESummaryHistory };
  References?: unknown[]; // Usually empty or complex
  Attributes?: string[];
  DOI?: string; // Sometimes directly available
  FullJournalName?: string;
  SO?: string; // Source Abbreviation
  [key: string]: unknown; // For other dynamic fields
}

export interface ESummaryDocumentSummarySet {
  DocumentSummary: ESummaryDocumentSummary[] | ESummaryDocumentSummary;
}

export interface ESummaryResult {
  DocSum?: ESummaryDocSumOldXml[] | ESummaryDocSumOldXml; // Older XML format
  DocumentSummarySet?: ESummaryDocumentSummarySet; // Newer XML format
  ERROR?: string; // Error message if present
  [key: string]: unknown; // For other potential top-level elements like 'dbinfo'
}

export interface ESummaryResponseContainer {
  eSummaryResult: ESummaryResult;
  // header?: unknown; // If there's a header part in the response
}

// Parsed brief summary (application-level)
export interface ParsedBriefSummary {
  pmid: string;
  title?: string;
  authors?: string; // Formatted string
  source?: string;
  pubDate?: string; // Standardized YYYY-MM-DD
  epubDate?: string; // Standardized YYYY-MM-DD
  doi?: string;
}

// ESearch specific types
export interface ESearchResultIdList {
  Id: string[];
}

export interface ESearchTranslation {
  From: string;
  To: string;
}

export interface ESearchTranslationSet {
  Translation: ESearchTranslation[];
}

export interface ESearchWarningList {
  PhraseNotFound?: string[];
  QuotedPhraseNotFound?: string[];
  OutputMessage?: string[];
  FieldNotFound?: string[];
}
export interface ESearchErrorList {
  PhraseNotFound?: string[];
  FieldNotFound?: string[];
}

export interface ESearchResultContent {
  Count: string;
  RetMax: string;
  RetStart: string;
  QueryKey?: string;
  WebEnv?: string;
  IdList?: ESearchResultIdList;
  TranslationSet?: ESearchTranslationSet;
  TranslationStack?: unknown; // Usually complex, define if needed
  QueryTranslation: string;
  ErrorList?: ESearchErrorList;
  WarningList?: ESearchWarningList;
}

export interface ESearchResponseContainer {
  eSearchResult: ESearchResultContent;
  // header?: unknown;
}

// Fully parsed and typed result for ESearch
export interface ESearchResult {
  count: number;
  retmax: number;
  retstart: number;
  queryKey?: string;
  webEnv?: string;
  idList: string[];
  queryTranslation: string;
  errorList?: ESearchErrorList;
  warningList?: ESearchWarningList;
}

// Fully parsed and typed result for EFetch
export interface EFetchArticleSet {
  articles: ParsedArticle[];
  // Add any other top-level fields from the parsed EFetch result if necessary
}

```

--------------------------------------------------------------------------------
/src/mcp-server/transports/http/httpTransport.ts:
--------------------------------------------------------------------------------

```typescript
/**
 * @fileoverview Configures and starts the HTTP MCP transport using Hono.
 * This file has been refactored to correctly integrate Hono's streaming
 * capabilities with the Model Context Protocol SDK's transport layer.
 * @module src/mcp-server/transports/http/httpTransport
 */

import { serve, ServerType } from "@hono/node-server";
import { Context, Hono, Next } from "hono";
import { cors } from "hono/cors";
import { stream } from "hono/streaming";
import http from "http";
import { config } from "../../../config/index.js";
import {
  logger,
  rateLimiter,
  RequestContext,
  requestContextService,
} from "../../../utils/index.js";
import { ServerInstanceInfo } from "../../server.js";
import { createAuthMiddleware, createAuthStrategy } from "../auth/index.js";
import { StatelessTransportManager } from "../core/statelessTransportManager.js";
import { TransportManager } from "../core/transportTypes.js";
import { StatefulTransportManager } from "./../core/statefulTransportManager.js";
import { httpErrorHandler } from "./httpErrorHandler.js";
import { HonoNodeBindings } from "./httpTypes.js";
import { mcpTransportMiddleware } from "./mcpTransportMiddleware.js";

const HTTP_PORT = config.mcpHttpPort;
const HTTP_HOST = config.mcpHttpHost;
const MCP_ENDPOINT_PATH = config.mcpHttpEndpointPath;

/**
 * Extracts the client IP address from the request, prioritizing common proxy headers.
 * @param c - The Hono context object.
 * @returns The client's IP address or a default string if not found.
 */
function getClientIp(c: Context<{ Bindings: HonoNodeBindings }>): string {
  const forwardedFor = c.req.header("x-forwarded-for");
  return (
    (forwardedFor?.split(",")[0] ?? "").trim() ||
    c.req.header("x-real-ip") ||
    "unknown_ip"
  );
}

/**
 * Converts a Fetch API Headers object to Node.js IncomingHttpHeaders.
 * Hono uses Fetch API Headers, but the underlying transport managers expect
 * Node's native IncomingHttpHeaders.
 * @param headers - The Headers object to convert.
 * @returns An object compatible with IncomingHttpHeaders.
 */

async function isPortInUse(
  port: number,
  host: string,
  parentContext: RequestContext,
): Promise<boolean> {
  const context = { ...parentContext, operation: "isPortInUse", port, host };
  logger.debug(`Checking if port ${port} is in use...`, context);
  return new Promise((resolve) => {
    const tempServer = http.createServer();
    tempServer
      .once("error", (err: NodeJS.ErrnoException) => {
        const inUse = err.code === "EADDRINUSE";
        logger.debug(
          `Port check resulted in error: ${err.code}. Port in use: ${inUse}`,
          context,
        );
        resolve(inUse);
      })
      .once("listening", () => {
        logger.debug(
          `Successfully bound to port ${port} temporarily. Port is not in use.`,
          context,
        );
        tempServer.close(() => resolve(false));
      })
      .listen(port, host);
  });
}

function startHttpServerWithRetry(
  app: Hono<{ Bindings: HonoNodeBindings }>,
  initialPort: number,
  host: string,
  maxRetries: number,
  parentContext: RequestContext,
): Promise<ServerType> {
  const startContext = {
    ...parentContext,
    operation: "startHttpServerWithRetry",
  };
  logger.info(
    `Attempting to start HTTP server on port ${initialPort} with ${maxRetries} retries.`,
    startContext,
  );

  return new Promise((resolve, reject) => {
    const tryBind = (port: number, attempt: number) => {
      const attemptContext = { ...startContext, port, attempt };
      if (attempt > maxRetries + 1) {
        const error = new Error(
          `Failed to bind to any port after ${maxRetries} retries.`,
        );
        logger.fatal(error.message, attemptContext);
        return reject(error);
      }

      isPortInUse(port, host, attemptContext)
        .then((inUse) => {
          if (inUse) {
            logger.warning(
              `Port ${port} is in use, retrying on port ${port + 1}...`,
              attemptContext,
            );
            setTimeout(
              () => tryBind(port + 1, attempt + 1),
              config.mcpHttpPortRetryDelayMs,
            );
            return;
          }

          try {
            const serverInstance = serve(
              { fetch: app.fetch, port, hostname: host },
              (info: { address: string; port: number }) => {
                const serverAddress = `http://${info.address}:${info.port}${MCP_ENDPOINT_PATH}`;
                logger.info(`HTTP transport listening at ${serverAddress}`, {
                  ...attemptContext,
                  address: serverAddress,
                  sessionMode: config.mcpSessionMode,
                });
                if (process.stdout.isTTY) {
                  console.log(`\n🚀 MCP Server running at: ${serverAddress}`);
                  console.log(`   Session Mode: ${config.mcpSessionMode}\n`);
                }
              },
            );
            resolve(serverInstance);
          } catch (err: unknown) {
            if (
              err &&
              typeof err === "object" &&
              "code" in err &&
              (err as { code: string }).code !== "EADDRINUSE"
            ) {
              const errorToLog =
                err instanceof Error ? err : new Error(String(err));
              logger.error(
                "An unexpected error occurred while starting the server.",
                errorToLog,
                attemptContext,
              );
              return reject(err);
            }
            logger.warning(
              `Encountered EADDRINUSE race condition on port ${port}, retrying...`,
              attemptContext,
            );
            setTimeout(
              () => tryBind(port + 1, attempt + 1),
              config.mcpHttpPortRetryDelayMs,
            );
          }
        })
        .catch((err) => {
          const error = err instanceof Error ? err : new Error(String(err));
          logger.fatal(
            "Failed to check if port is in use.",
            error,
            attemptContext,
          );
          reject(err);
        });
    };

    tryBind(initialPort, 1);
  });
}

function createTransportManager(
  createServerInstanceFn: () => Promise<ServerInstanceInfo>,
  sessionMode: string,
  context: RequestContext,
): TransportManager {
  const opContext = {
    ...context,
    operation: "createTransportManager",
    sessionMode,
  };
  logger.info(
    `Creating transport manager for session mode: ${sessionMode}`,
    opContext,
  );

  const statefulOptions = {
    staleSessionTimeoutMs: config.mcpStatefulSessionStaleTimeoutMs,
    mcpHttpEndpointPath: config.mcpHttpEndpointPath,
  };

  const getMcpServer = async () => (await createServerInstanceFn()).server;

  switch (sessionMode) {
    case "stateless":
      return new StatelessTransportManager(getMcpServer);
    case "stateful":
      return new StatefulTransportManager(getMcpServer, statefulOptions);
    case "auto":
    default:
      logger.info(
        "Defaulting to 'auto' mode (stateful with stateless fallback).",
        opContext,
      );
      return new StatefulTransportManager(getMcpServer, statefulOptions);
  }
}

export function createHttpApp(
  transportManager: TransportManager,
  createServerInstanceFn: () => Promise<ServerInstanceInfo>,
  parentContext: RequestContext,
): Hono<{ Bindings: HonoNodeBindings }> {
  const app = new Hono<{ Bindings: HonoNodeBindings }>();
  const transportContext = {
    ...parentContext,
    component: "HttpTransportSetup",
  };
  logger.info("Creating Hono HTTP application.", transportContext);

  app.use(
    "*",
    cors({
      origin: config.mcpAllowedOrigins || [],
      allowMethods: ["GET", "POST", "DELETE", "OPTIONS"],
      allowHeaders: [
        "Content-Type",
        "Mcp-Session-Id",
        "Last-Event-ID",
        "Authorization",
      ],
      credentials: true,
    }),
  );

  app.use(
    "*",
    async (c: Context<{ Bindings: HonoNodeBindings }>, next: Next) => {
      (c.env.outgoing as http.ServerResponse).setHeader(
        "X-Content-Type-Options",
        "nosniff",
      );
      await next();
    },
  );

  app.use(
    MCP_ENDPOINT_PATH,
    async (c: Context<{ Bindings: HonoNodeBindings }>, next: Next) => {
      const clientIp = getClientIp(c);
      const context = requestContextService.createRequestContext({
        operation: "httpRateLimitCheck",
        ipAddress: clientIp,
      });
      try {
        rateLimiter.check(clientIp, context);
        logger.debug("Rate limit check passed.", context);
      } catch (error) {
        logger.warning("Rate limit check failed.", {
          ...context,
          error: error instanceof Error ? error.message : String(error),
        });
        throw error;
      }
      await next();
    },
  );

  const authStrategy = createAuthStrategy();
  if (authStrategy) {
    logger.info(
      "Authentication strategy found, enabling auth middleware.",
      transportContext,
    );
    app.use(MCP_ENDPOINT_PATH, createAuthMiddleware(authStrategy));
  } else {
    logger.info(
      "No authentication strategy found, auth middleware disabled.",
      transportContext,
    );
  }

  app.onError(httpErrorHandler);

  app.get("/healthz", (c) => {
    return c.json({
      status: "ok",
      timestamp: new Date().toISOString(),
    });
  });

  app.get(
    MCP_ENDPOINT_PATH,
    async (c: Context<{ Bindings: HonoNodeBindings }>) => {
      const sessionId = c.req.header("mcp-session-id");
      if (sessionId) {
        return c.text(
          "GET requests to existing sessions are not supported.",
          405,
        );
      }

      // Since this is a stateless endpoint, we create a temporary instance
      // to report on the server's configuration.
      const { tools, identity, options } = await createServerInstanceFn();
      const effectiveSessionMode =
        transportManager instanceof StatefulTransportManager
          ? "stateful"
          : "stateless";

      return c.json({
        status: "ok",
        server: {
          name: identity.name,
          version: identity.version,
          description: identity.description || "No description provided.",
          nodeVersion: process.version,
          environment: config.environment,
          capabilities: options.capabilities,
        },
        sessionMode: {
          configured: config.mcpSessionMode,
          effective: effectiveSessionMode,
        },
        tools: tools,
        message:
          "Server is running. POST to this endpoint to execute a tool call.",
      });
    },
  );

  app.post(
    MCP_ENDPOINT_PATH,
    mcpTransportMiddleware(transportManager, createServerInstanceFn),
    (c) => {
      const response = c.get("mcpResponse");

      if (response.sessionId) {
        c.header("Mcp-Session-Id", response.sessionId);
      }
      response.headers.forEach((value, key) => {
        c.header(key, value);
      });

      c.status(response.statusCode);

      if (response.type === "stream") {
        return stream(c, async (s) => {
          await s.pipe(response.stream);
        });
      } else {
        const body =
          typeof response.body === "object" && response.body !== null
            ? response.body
            : { body: response.body };
        return c.json(body);
      }
    },
  );

  app.delete(
    MCP_ENDPOINT_PATH,
    async (c: Context<{ Bindings: HonoNodeBindings }>) => {
      const sessionId = c.req.header("mcp-session-id");
      const context = requestContextService.createRequestContext({
        ...transportContext,
        operation: "handleDeleteRequest",
        sessionId,
      });

      if (sessionId) {
        if (transportManager instanceof StatefulTransportManager) {
          const response = await transportManager.handleDeleteRequest(
            sessionId,
            context,
          );
          if (response.type === "buffered") {
            const body =
              typeof response.body === "object" && response.body !== null
                ? response.body
                : { body: response.body };
            return c.json(body, response.statusCode);
          }
          // Fallback for unexpected stream response on DELETE
          return c.body(null, response.statusCode);
        } else {
          return c.json(
            {
              error: "Method Not Allowed",
              message: "DELETE operations are not supported in this mode.",
            },
            405,
          );
        }
      } else {
        return c.json({
          status: "stateless_mode",
          message: "No sessions to delete in stateless mode",
        });
      }
    },
  );

  logger.info("Hono application setup complete.", transportContext);
  return app;
}

export async function startHttpTransport(
  createServerInstanceFn: () => Promise<ServerInstanceInfo>,
  parentContext: RequestContext,
): Promise<{
  app: Hono<{ Bindings: HonoNodeBindings }>;
  server: ServerType;
  transportManager: TransportManager;
}> {
  const transportContext = {
    ...parentContext,
    component: "HttpTransportStart",
  };
  logger.info("Starting HTTP transport.", transportContext);

  const transportManager = createTransportManager(
    createServerInstanceFn,
    config.mcpSessionMode,
    transportContext,
  );
  const app = createHttpApp(
    transportManager,
    createServerInstanceFn,
    transportContext,
  );

  const server = await startHttpServerWithRetry(
    app,
    HTTP_PORT,
    HTTP_HOST,
    config.mcpHttpMaxPortRetries,
    transportContext,
  );

  logger.info("HTTP transport started successfully.", transportContext);
  return { app, server, transportManager };
}

```

--------------------------------------------------------------------------------
/src/mcp-server/tools/pubmedFetchContents/logic.ts:
--------------------------------------------------------------------------------

```typescript
/**
 * @fileoverview Logic for the pubmed_fetch_contents MCP tool.
 * Handles EFetch queries for specific PMIDs and formats the results.
 * This tool can fetch various details from PubMed including abstracts, full XML,
 * MEDLINE text, and citation data.
 * @module src/mcp-server/tools/pubmedFetchContents/logic
 */

import { z } from "zod";
import { getNcbiService } from "../../../services/NCBI/core/ncbiService.js";
import { BaseErrorCode, McpError } from "../../../types-global/errors.js";
import {
  ParsedArticle,
  XmlMedlineCitation,
  XmlPubmedArticleSet,
} from "../../../types-global/pubmedXml.js";
import {
  logger,
  RequestContext,
  requestContextService,
  sanitizeInputForLogging,
} from "../../../utils/index.js";
import {
  ensureArray,
  extractAbstractText,
  extractArticleDates,
  extractAuthors,
  extractDoi,
  extractGrants,
  extractJournalInfo,
  extractKeywords,
  extractMeshTerms,
  extractPmid,
  extractPublicationTypes,
  getText,
} from "../../../services/NCBI/parsing/index.js";

export const PubMedFetchContentsInputSchema = z
  .object({
    pmids: z
      .array(z.string().regex(/^\d+$/))
      .max(200, "Max 200 PMIDs per call if not using history.")
      .optional()
      .describe(
        "An array of PubMed Unique Identifiers (PMIDs) to fetch. Use this OR queryKey/webEnv.",
      ),
    queryKey: z
      .string()
      .optional()
      .describe(
        "Query key from ESearch history. Requires webEnv. Use this OR pmids.",
      ),
    webEnv: z
      .string()
      .optional()
      .describe(
        "Web environment from ESearch history. Requires queryKey. Use this OR pmids.",
      ),
    retstart: z
      .number()
      .int()
      .min(0)
      .optional()
      .describe(
        "0-based index of the first record to retrieve. Used with queryKey/webEnv.",
      ),
    retmax: z
      .number()
      .int()
      .min(1)
      .optional()
      .describe(
        "Maximum number of records to retrieve. Used with queryKey/webEnv.",
      ),
    detailLevel: z
      .enum(["abstract_plus", "full_xml", "medline_text", "citation_data"])
      .optional()
      .default("abstract_plus")
      .describe(
        "Specifies the level of detail for the fetched content. Options: 'abstract_plus' (parsed details), 'full_xml' (raw PubMedArticle XML), 'medline_text' (MEDLINE format), 'citation_data' (minimal citation data). Defaults to 'abstract_plus'.",
      ),
    includeMeshTerms: z
      .boolean()
      .optional()
      .default(true)
      .describe(
        "Include MeSH terms in 'abstract_plus' and 'citation_data' results. Default: true.",
      ),
    includeGrantInfo: z
      .boolean()
      .optional()
      .default(false)
      .describe(
        "Include grant info in 'abstract_plus' results. Default: false.",
      ),
    outputFormat: z
      .enum(["json", "raw_text"])
      .optional()
      .default("json")
      .describe(
        "Output format. 'json' (default) wraps data in a JSON object. 'raw_text' returns raw text for 'medline_text' or 'full_xml' detail levels.",
      ),
  })
  .superRefine((data, ctx) => {
    if (data.queryKey && !data.webEnv) {
      ctx.addIssue({
        code: z.ZodIssueCode.custom,
        message: "webEnv is required if queryKey is provided.",
        path: ["webEnv"],
      });
    }
    if (!data.queryKey && data.webEnv) {
      ctx.addIssue({
        code: z.ZodIssueCode.custom,
        message: "queryKey is required if webEnv is provided.",
        path: ["queryKey"],
      });
    }
    if (
      (!data.pmids || data.pmids.length === 0) &&
      !(data.queryKey && data.webEnv)
    ) {
      ctx.addIssue({
        code: z.ZodIssueCode.custom,
        message:
          "Either pmids (non-empty array) or both queryKey and webEnv must be provided.",
        path: ["pmids"],
      });
    }
    if (data.pmids && data.pmids.length > 0 && (data.queryKey || data.webEnv)) {
      ctx.addIssue({
        code: z.ZodIssueCode.custom,
        message:
          "Cannot use pmids and queryKey/webEnv simultaneously. Please choose one method.",
        path: ["pmids"],
      });
    }
    if (
      (data.retstart !== undefined || data.retmax !== undefined) &&
      !(data.queryKey && data.webEnv)
    ) {
      ctx.addIssue({
        code: z.ZodIssueCode.custom,
        message: "retstart/retmax can only be used with queryKey and webEnv.",
        path: ["retstart"],
      });
    }
  });

export type PubMedFetchContentsInput = z.infer<
  typeof PubMedFetchContentsInputSchema
>;

export type PubMedFetchContentsOutput = {
  content: string;
  articlesReturned: number;
  eFetchUrl: string;
};

interface EFetchServiceParams {
  db: string;
  id?: string;
  query_key?: string;
  WebEnv?: string;
  retmode?: "xml" | "text";
  rettype?: string;
  retstart?: string;
  retmax?: string;
  [key: string]: string | undefined;
}

function parsePubMedArticleSet(
  xmlData: unknown,
  input: PubMedFetchContentsInput,
  parentContext: RequestContext,
): ParsedArticle[] {
  const articles: ParsedArticle[] = [];
  const operationContext = requestContextService.createRequestContext({
    parentRequestId: parentContext.requestId,
    operation: "parsePubMedArticleSet",
  });

  if (
    !xmlData ||
    typeof xmlData !== "object" ||
    !("PubmedArticleSet" in xmlData)
  ) {
    throw new McpError(
      BaseErrorCode.PARSING_ERROR,
      "Invalid or unexpected structure for xmlData in parsePubMedArticleSet.",
      {
        ...operationContext,
        xmlDataType: typeof xmlData,
        xmlDataPreview: sanitizeInputForLogging(
          JSON.stringify(xmlData).substring(0, 200),
        ),
      },
    );
  }

  const typedXmlData = xmlData as { PubmedArticleSet?: XmlPubmedArticleSet };
  const articleSet = typedXmlData.PubmedArticleSet;

  if (!articleSet || !articleSet.PubmedArticle) {
    logger.warning(
      "PubmedArticleSet or PubmedArticle array not found in EFetch XML response.",
      operationContext,
    );
    return articles;
  }

  const pubmedArticlesXml = ensureArray(articleSet.PubmedArticle);

  for (const articleXml of pubmedArticlesXml) {
    if (!articleXml || typeof articleXml !== "object") continue;

    const medlineCitation: XmlMedlineCitation | undefined =
      articleXml.MedlineCitation;
    if (!medlineCitation) continue;

    const pmid = extractPmid(medlineCitation);
    if (!pmid) continue;

    const articleNode = medlineCitation.Article;
    const parsedArticle: ParsedArticle = {
      pmid: pmid,
      title: articleNode?.ArticleTitle
        ? getText(articleNode.ArticleTitle)
        : undefined,
      abstractText: articleNode?.Abstract
        ? extractAbstractText(articleNode.Abstract)
        : undefined,
      authors: articleNode?.AuthorList
        ? extractAuthors(articleNode.AuthorList)
        : undefined,
      journalInfo: articleNode?.Journal
        ? extractJournalInfo(articleNode.Journal, medlineCitation)
        : undefined,
      publicationTypes: articleNode?.PublicationTypeList
        ? extractPublicationTypes(articleNode.PublicationTypeList)
        : undefined,
      keywords: articleNode?.KeywordList
        ? extractKeywords(articleNode.KeywordList)
        : undefined,
      doi: articleNode ? extractDoi(articleNode) : undefined,
      articleDates: articleNode?.ArticleDate
        ? extractArticleDates(articleNode)
        : undefined,
    };

    if (input.includeMeshTerms) {
      parsedArticle.meshTerms = medlineCitation.MeshHeadingList
        ? extractMeshTerms(medlineCitation.MeshHeadingList)
        : undefined;
    }

    if (input.includeGrantInfo) {
      parsedArticle.grantList = articleNode?.GrantList
        ? extractGrants(articleNode.GrantList)
        : undefined;
    }

    articles.push(parsedArticle);
  }
  return articles;
}

export async function pubMedFetchContentsLogic(
  input: PubMedFetchContentsInput,
  parentRequestContext: RequestContext,
): Promise<PubMedFetchContentsOutput> {
  const toolLogicContext = requestContextService.createRequestContext({
    parentRequestId: parentRequestContext.requestId,
    operation: "pubMedFetchContentsLogic",
    input: sanitizeInputForLogging(input),
  });

  const validationResult = PubMedFetchContentsInputSchema.safeParse(input);
  if (!validationResult.success) {
    throw new McpError(
      BaseErrorCode.VALIDATION_ERROR,
      validationResult.error.errors[0]?.message || "Invalid input",
      { ...toolLogicContext, details: validationResult.error.flatten() },
    );
  }

  const ncbiService = getNcbiService();
  logger.info("Executing pubmed_fetch_contents tool", toolLogicContext);

  const eFetchParams: EFetchServiceParams = { db: "pubmed" };

  if (input.queryKey && input.webEnv) {
    eFetchParams.query_key = input.queryKey;
    eFetchParams.WebEnv = input.webEnv;
    if (input.retstart !== undefined)
      eFetchParams.retstart = String(input.retstart);
    if (input.retmax !== undefined) eFetchParams.retmax = String(input.retmax);
  } else if (input.pmids && input.pmids.length > 0) {
    eFetchParams.id = input.pmids.join(",");
  }

  let serviceRetmode: "xml" | "text" = "xml";
  let rettype: string | undefined;

  switch (input.detailLevel) {
    case "full_xml":
      serviceRetmode = "xml";
      break;
    case "medline_text":
      serviceRetmode = "text";
      rettype = "medline";
      break;
    case "abstract_plus":
    case "citation_data":
      serviceRetmode = "xml";
      break;
  }
  eFetchParams.retmode = serviceRetmode;
  if (rettype) eFetchParams.rettype = rettype;

  const eFetchBase =
    "https://eutils.ncbi.nlm.nih.gov/entrez/eutils/efetch.fcgi";
  const eFetchQueryString = new URLSearchParams(
    eFetchParams as Record<string, string>,
  ).toString();
  const eFetchUrl = `${eFetchBase}?${eFetchQueryString}`;

  const shouldReturnRawXml =
    input.detailLevel === "full_xml" && input.outputFormat === "raw_text";

  const eFetchResponseData = await ncbiService.eFetch(
    eFetchParams,
    toolLogicContext,
    { retmode: serviceRetmode, rettype, returnRawXml: shouldReturnRawXml },
  );

  let finalOutputText: string;
  let articlesCount = 0;

  if (input.detailLevel === "medline_text") {
    const medlineText = String(eFetchResponseData);
    const foundPmidsInMedline = new Set<string>();
    const pmidRegex = /^PMID- (\d+)/gm;
    let match;
    while ((match = pmidRegex.exec(medlineText)) !== null) {
      if (match[1]) {
        foundPmidsInMedline.add(match[1]);
      }
    }
    articlesCount = foundPmidsInMedline.size;

    if (input.outputFormat === "raw_text") {
      finalOutputText = medlineText;
    } else {
      const notFoundPmids =
        input.pmids?.filter((pmid) => !foundPmidsInMedline.has(pmid)) || [];
      finalOutputText = JSON.stringify({
        requestedPmids: input.pmids || "N/A (history query)",
        articles: [{ medlineText }],
        notFoundPmids,
        eFetchDetails: { urls: [eFetchUrl] },
      });
    }
  } else if (input.detailLevel === "full_xml") {
    const articlesXml = ensureArray(
      (eFetchResponseData as { PubmedArticleSet?: XmlPubmedArticleSet })
        ?.PubmedArticleSet?.PubmedArticle || [],
    );
    articlesCount = articlesXml.length;
    if (input.outputFormat === "raw_text") {
      // Note: Raw XML output is requested, but we still parse to get an accurate count.
      // This is a trade-off for robustness over performance in this specific case.
      finalOutputText = String(eFetchResponseData);
    } else {
      const foundPmidsInXml = new Set<string>();
      const articlesPayload = articlesXml.map((articleXml) => {
        const pmid = extractPmid(articleXml.MedlineCitation) || "unknown_pmid";
        if (pmid !== "unknown_pmid") foundPmidsInXml.add(pmid);
        return { pmid, fullXmlContent: articleXml };
      });
      const notFoundPmids =
        input.pmids?.filter((pmid) => !foundPmidsInXml.has(pmid)) || [];
      finalOutputText = JSON.stringify({
        requestedPmids: input.pmids || "N/A (history query)",
        articles: articlesPayload,
        notFoundPmids,
        eFetchDetails: { urls: [eFetchUrl] },
      });
    }
  } else {
    const parsedArticles = parsePubMedArticleSet(
      eFetchResponseData as XmlPubmedArticleSet,
      input,
      toolLogicContext,
    );
    articlesCount = parsedArticles.length;
    const foundPmids = new Set(parsedArticles.map((p) => p.pmid));
    const notFoundPmids =
      input.pmids?.filter((pmid) => !foundPmids.has(pmid)) || [];

    let articlesToReturn: ParsedArticle[] | Record<string, unknown>[] =
      parsedArticles;
    if (input.detailLevel === "citation_data") {
      articlesToReturn = parsedArticles.map((article) => ({
        pmid: article.pmid,
        title: article.title,
        authors: article.authors?.map((a) => ({
          lastName: a.lastName,
          initials: a.initials,
        })),
        journalInfo: {
          title: article.journalInfo?.title,
          isoAbbreviation: article.journalInfo?.isoAbbreviation,
          volume: article.journalInfo?.volume,
          issue: article.journalInfo?.issue,
          pages: article.journalInfo?.pages,
          year: article.journalInfo?.publicationDate?.year,
        },
        doi: article.doi,
        ...(input.includeMeshTerms && { meshTerms: article.meshTerms }),
      }));
    }
    finalOutputText = JSON.stringify({
      requestedPmids: input.pmids || "N/A (history query)",
      articles: articlesToReturn,
      notFoundPmids,
      eFetchDetails: { urls: [eFetchUrl] },
    });
  }

  logger.notice("Successfully executed pubmed_fetch_contents tool.", {
    ...toolLogicContext,
    articlesReturned: articlesCount,
  });

  return {
    content: finalOutputText,
    articlesReturned: articlesCount,
    eFetchUrl,
  };
}

```

--------------------------------------------------------------------------------
/src/services/NCBI/parsing/eSummaryResultParser.ts:
--------------------------------------------------------------------------------

```typescript
/**
 * @fileoverview Helper functions for parsing ESummary results from NCBI.
 * Handles different ESummary XML structures and formats the data into
 * consistent ParsedBriefSummary objects.
 * @module src/services/NCBI/parsing/eSummaryResultParser
 */

import {
  ESummaryArticleId,
  ESummaryDocSumOldXml,
  ESummaryDocumentSummary,
  ESummaryItem,
  ESummaryResult,
  ParsedBriefSummary,
  ESummaryAuthor as XmlESummaryAuthor, // This is the normalized output type
  XmlESummaryAuthorRaw, // This is the raw input type from XML parsing
} from "../../../types-global/pubmedXml.js";
import {
  dateParser,
  logger,
  RequestContext,
  requestContextService,
} from "../../../utils/index.js"; // Note: utils/index.js is the barrel file
import { ensureArray, getAttribute, getText } from "./xmlGenericHelpers.js";

/**
 * Formats an array of ESummary authors into a string.
 * Limits to the first 3 authors and adds "et al." if more exist.
 * @param authors - Array of ESummary author objects (normalized).
 * @returns A string like "Doe J, Smith A, Brown B, et al." or empty if no authors.
 */
export function formatESummaryAuthors(authors?: XmlESummaryAuthor[]): string {
  if (!authors || authors.length === 0) return "";
  return (
    authors
      .slice(0, 3)
      .map((author) => author.name) // Assumes author.name is the string representation
      .join(", ") + (authors.length > 3 ? ", et al." : "")
  );
}

/**
 * Standardizes date strings from ESummary to "YYYY-MM-DD" format.
 * Uses the dateParser utility.
 * @param dateStr - Date string from ESummary (e.g., "2023/01/15", "2023 Jan 15", "2023").
 * @param parentContext - Optional parent request context for logging.
 * @returns A promise resolving to a standardized date string ("YYYY-MM-DD") or undefined if parsing fails.
 */
export async function standardizeESummaryDate(
  dateStr?: string,
  parentContext?: RequestContext,
): Promise<string | undefined> {
  if (dateStr === undefined || dateStr === null) return undefined; // Check for null as well

  const dateInputString = String(dateStr); // Ensure it's a string

  const currentContext =
    parentContext ||
    requestContextService.createRequestContext({
      operation: "standardizeESummaryDateInternal",
      inputDate: dateInputString, // Log the stringified version
    });
  try {
    // Pass the stringified version to the date parser
    const parsedDate = await dateParser.parseDate(
      dateInputString,
      currentContext,
    );
    if (parsedDate) {
      return parsedDate.toISOString().split("T")[0]; // Format as YYYY-MM-DD
    }
    logger.debug(
      `standardizeESummaryDate: dateParser could not parse "${dateInputString}", returning undefined.`,
      currentContext,
    );
  } catch (e) {
    logger.warning(
      `standardizeESummaryDate: Error during dateParser.parseDate for "${dateInputString}", returning undefined.`,
      {
        ...currentContext,
        error: e instanceof Error ? e.message : String(e),
      },
    );
  }
  return undefined; // Return undefined if parsing fails
}

/**
 * Parses authors from an ESummary DocumentSummary structure.
 * Handles various ways authors might be represented.
 * Returns an array of normalized XmlESummaryAuthor objects.
 * Internal helper function.
 */
function parseESummaryAuthorsFromDocumentSummary(
  docSummary: ESummaryDocumentSummary,
): XmlESummaryAuthor[] {
  const authorsProp = docSummary.Authors;
  if (!authorsProp) return [];

  const parsedAuthors: XmlESummaryAuthor[] = [];

  const processRawAuthor = (rawAuthInput: XmlESummaryAuthorRaw | string) => {
    let name = "";
    let authtype: string | undefined;
    let clusterid: string | undefined;

    if (typeof rawAuthInput === "string") {
      name = rawAuthInput;
    } else if (rawAuthInput && typeof rawAuthInput === "object") {
      const authorObj = rawAuthInput as XmlESummaryAuthorRaw; // Now typed
      // Try extracting text from the object itself (e.g., if it's { '#text': 'Author Name' })
      name = getText(authorObj, "");

      // If name is still empty, try common property names for author names
      if (!name) {
        name = getText(authorObj.Name || authorObj.name, "");
      }

      authtype = getText(authorObj.AuthType || authorObj.authtype, undefined);
      clusterid = getText(
        authorObj.ClusterId || authorObj.clusterid,
        undefined,
      );

      // Fallback for unhandled structures: log and try to stringify
      if (!name) {
        const authInputString = JSON.stringify(authorObj);
        logger.warning(
          `Unhandled author structure in parseESummaryAuthorsFromDocumentSummary. authInput: ${authInputString.substring(0, 100)}`,
          requestContextService.createRequestContext({
            operation: "parseESummaryAuthorsFromDocumentSummary",
            detail: "Unhandled author structure",
          }),
        );
        // As a last resort, if it's a simple object with a single value, that might be the name
        const keys = Object.keys(authorObj);
        if (
          keys.length === 1 &&
          keys[0] &&
          typeof (authorObj as Record<string, unknown>)[keys[0]] === "string"
        ) {
          name = (authorObj as Record<string, unknown>)[keys[0]] as string;
        } else if (authInputString.length < 100) {
          // Avoid overly long stringified objects
          name = authInputString; // Not ideal, but better than empty for debugging
        }
      }
    }

    if (name.trim()) {
      parsedAuthors.push({
        name: name.trim(),
        authtype,
        clusterid,
      });
    }
  };

  if (Array.isArray(authorsProp)) {
    // authorsProp could be Array<string> or Array<XmlESummaryAuthorRaw>
    (authorsProp as (XmlESummaryAuthorRaw | string)[]).forEach(
      processRawAuthor,
    );
  } else if (
    typeof authorsProp === "object" &&
    "Author" in authorsProp && // authorsProp is { Author: ... }
    authorsProp.Author
  ) {
    const rawAuthors = ensureArray(
      authorsProp.Author as
        | XmlESummaryAuthorRaw
        | XmlESummaryAuthorRaw[]
        | string,
    );
    rawAuthors.forEach(processRawAuthor);
  } else if (typeof authorsProp === "string") {
    try {
      // Attempt to parse if it looks like a JSON array string
      if (authorsProp.startsWith("[") && authorsProp.endsWith("]")) {
        const parsedJsonAuthors = JSON.parse(authorsProp) as unknown[];
        if (Array.isArray(parsedJsonAuthors)) {
          parsedJsonAuthors.forEach((authItem: unknown) => {
            if (typeof authItem === "string") {
              parsedAuthors.push({ name: authItem.trim() });
            } else if (
              typeof authItem === "object" &&
              authItem !== null &&
              ((authItem as XmlESummaryAuthorRaw).name ||
                (authItem as XmlESummaryAuthorRaw).Name)
            ) {
              // If it's an object with a name property, treat as XmlESummaryAuthorRaw
              processRawAuthor(authItem as XmlESummaryAuthorRaw);
            }
          });
          if (parsedAuthors.length > 0) return parsedAuthors; // Return if JSON parsing yielded results
        }
      }
    } catch (e) {
      logger.debug(
        `Failed to parse Authors string as JSON: ${authorsProp.substring(0, 100)}`,
        requestContextService.createRequestContext({
          operation: "parseESummaryAuthorsFromString",
          input: authorsProp.substring(0, 100),
          error: e instanceof Error ? e.message : String(e),
        }),
      );
    }
    // Fallback: split string by common delimiters
    authorsProp
      .split(/[,;]/)
      .map((namePart: string) => namePart.trim())
      .filter((namePart) => namePart)
      .forEach((namePart) => parsedAuthors.push({ name: namePart }));
  }
  return parsedAuthors.filter((author) => author.name);
}

/**
 * Parses a single ESummary DocumentSummary (newer XML format) into a raw summary object.
 * Internal helper function.
 */
function parseSingleDocumentSummary(docSummary: ESummaryDocumentSummary): Omit<
  ParsedBriefSummary,
  "pubDate" | "epubDate"
> & {
  rawPubDate?: string;
  rawEPubDate?: string;
} {
  const pmid = docSummary["@_uid"];
  const authorsArray = parseESummaryAuthorsFromDocumentSummary(docSummary);

  let doiValue: string | undefined = getText(docSummary.DOI, undefined);
  if (!doiValue) {
    const articleIdsProp = docSummary.ArticleIds;
    if (articleIdsProp) {
      const idsArray = Array.isArray(articleIdsProp)
        ? articleIdsProp
        : ensureArray(
            (
              articleIdsProp as {
                ArticleId: ESummaryArticleId[] | ESummaryArticleId;
              }
            ).ArticleId,
          );

      const doiEntry = idsArray.find(
        (id) => (id as ESummaryArticleId).idtype === "doi",
      );
      if (doiEntry) {
        doiValue = getText((doiEntry as ESummaryArticleId).value, undefined);
      }
    }
  }

  return {
    pmid: String(pmid),
    title: getText(docSummary.Title, undefined),
    authors: formatESummaryAuthors(authorsArray),
    source:
      getText(docSummary.Source, undefined) ||
      getText(docSummary.FullJournalName, undefined) ||
      getText(docSummary.SO, undefined) ||
      undefined,
    doi: doiValue,
    rawPubDate: getText(docSummary.PubDate, undefined),
    rawEPubDate: getText(docSummary.EPubDate, undefined),
  };
}

/**
 * Parses a single ESummary DocSum (older XML item-based format) into a raw summary object.
 * Internal helper function.
 */
function parseSingleDocSumOldXml(docSum: ESummaryDocSumOldXml): Omit<
  ParsedBriefSummary,
  "pubDate" | "epubDate"
> & {
  rawPubDate?: string;
  rawEPubDate?: string;
} {
  const pmid = docSum.Id;
  const items = ensureArray(docSum.Item);

  const getItemValue = (
    name: string | string[],
    type?: ESummaryItem["_Type"],
  ): string | undefined => {
    const namesToTry = ensureArray(name);
    for (const n of namesToTry) {
      const item = items.find(
        (i) =>
          i._Name === n &&
          (type ? i._Type === type : true) &&
          i._Type !== "ERROR",
      );
      if (item) {
        const textVal = getText(item);
        if (textVal !== undefined) return String(textVal);
      }
    }
    return undefined;
  };

  const getAuthorList = (): XmlESummaryAuthor[] => {
    const authorListItem = items.find(
      (i) => i._Name === "AuthorList" && i._Type === "List",
    );
    if (authorListItem && authorListItem.Item) {
      return ensureArray(authorListItem.Item)
        .filter((a) => a._Name === "Author" && a._Type === "String")
        .map((a) => ({ name: getText(a, "") }));
    }
    // Fallback for authors directly under DocSum items
    return items
      .filter((i) => i._Name === "Author" && i._Type === "String")
      .map((a) => ({ name: getText(a, "") }));
  };

  const authorsArray = getAuthorList();

  let doiFromItems: string | undefined = getItemValue("DOI", "String");
  if (!doiFromItems) {
    const articleIdsItem = items.find(
      (i) => i._Name === "ArticleIds" && i._Type === "List",
    );
    if (articleIdsItem && articleIdsItem.Item) {
      const ids = ensureArray(articleIdsItem.Item);
      const doiIdItem = ids.find(
        (id) =>
          getAttribute(id as ESummaryItem, "idtype") === "doi" ||
          (id as ESummaryItem)._Name === "doi", // Some older formats might use Name="doi"
      );
      if (doiIdItem) {
        doiFromItems = getText(doiIdItem);
      }
    }
  }

  return {
    pmid: String(pmid),
    title: getItemValue("Title", "String"),
    authors: formatESummaryAuthors(authorsArray),
    source: getItemValue(["Source", "FullJournalName", "SO"], "String"),
    doi: doiFromItems,
    rawPubDate: getItemValue(["PubDate", "ArticleDate"], "Date"),
    rawEPubDate: getItemValue("EPubDate", "Date"),
  };
}

/**
 * Extracts and formats brief summaries from ESummary XML result.
 * Handles both DocumentSummarySet (newer) and older DocSum structures.
 * Asynchronously standardizes dates.
 * @param eSummaryResult - The parsed XML object from ESummary (eSummaryResult part).
 * @param context - Request context for logging and passing to date standardization.
 * @returns A promise resolving to an array of parsed brief summary objects.
 */
export async function extractBriefSummaries(
  eSummaryResult?: ESummaryResult,
  context?: RequestContext,
): Promise<ParsedBriefSummary[]> {
  if (!eSummaryResult) return [];
  const opContext =
    context ||
    requestContextService.createRequestContext({
      operation: "extractBriefSummariesInternal",
    });

  if (eSummaryResult.ERROR) {
    logger.warning("ESummary result contains an error", {
      ...opContext,
      errorDetails: eSummaryResult.ERROR,
    });
    return [];
  }

  let rawSummaries: (Omit<ParsedBriefSummary, "pubDate" | "epubDate"> & {
    rawPubDate?: string;
    rawEPubDate?: string;
  })[] = [];

  if (eSummaryResult.DocumentSummarySet?.DocumentSummary) {
    const docSummaries = ensureArray(
      eSummaryResult.DocumentSummarySet.DocumentSummary,
    );
    rawSummaries = docSummaries
      .map(parseSingleDocumentSummary)
      .filter((s) => s.pmid);
  } else if (eSummaryResult.DocSum) {
    const docSums = ensureArray(eSummaryResult.DocSum);
    rawSummaries = docSums.map(parseSingleDocSumOldXml).filter((s) => s.pmid);
  }

  const processedSummaries: ParsedBriefSummary[] = [];
  for (const rawSummary of rawSummaries) {
    const pubDate = await standardizeESummaryDate(
      rawSummary.rawPubDate,
      opContext,
    );
    const epubDate = await standardizeESummaryDate(
      rawSummary.rawEPubDate,
      opContext,
    );
    processedSummaries.push({
      pmid: rawSummary.pmid,
      title: rawSummary.title,
      authors: rawSummary.authors,
      source: rawSummary.source,
      doi: rawSummary.doi,
      pubDate,
      epubDate,
    });
  }

  return processedSummaries;
}

```
Page 2/4FirstPrevNextLast