This is page 3 of 6. Use http://codebase.md/apollographql/apollo-mcp-server?lines=false&page={x} to view the full context.
# Directory Structure
```
├── .cargo
│   └── config.toml
├── .changesets
│   └── README.md
├── .envrc
├── .github
│   ├── CODEOWNERS
│   ├── renovate.json5
│   └── workflows
│       ├── canary-release.yml
│       ├── ci.yml
│       ├── prep-release.yml
│       ├── release-bins.yml
│       ├── release-container.yml
│       ├── sync-develop.yml
│       └── verify-changeset.yml
├── .gitignore
├── .idea
│   └── runConfigurations
│       ├── clippy.xml
│       ├── format___test___clippy.xml
│       ├── format.xml
│       ├── Run_spacedevs.xml
│       └── Test_apollo_mcp_server.xml
├── .vscode
│   ├── extensions.json
│   ├── launch.json
│   ├── settings.json
│   └── tasks.json
├── apollo.config.json
├── Cargo.lock
├── Cargo.toml
├── CHANGELOG_SECTION.md
├── CHANGELOG.md
├── clippy.toml
├── codecov.yml
├── CONTRIBUTING.md
├── crates
│   ├── apollo-mcp-registry
│   │   ├── Cargo.toml
│   │   └── src
│   │       ├── files.rs
│   │       ├── lib.rs
│   │       ├── logging.rs
│   │       ├── platform_api
│   │       │   ├── operation_collections
│   │       │   │   ├── collection_poller.rs
│   │       │   │   ├── error.rs
│   │       │   │   ├── event.rs
│   │       │   │   └── operation_collections.graphql
│   │       │   ├── operation_collections.rs
│   │       │   └── platform-api.graphql
│   │       ├── platform_api.rs
│   │       ├── testdata
│   │       │   ├── minimal_supergraph.graphql
│   │       │   └── supergraph.graphql
│   │       ├── uplink
│   │       │   ├── persisted_queries
│   │       │   │   ├── event.rs
│   │       │   │   ├── manifest_poller.rs
│   │       │   │   ├── manifest.rs
│   │       │   │   └── persisted_queries_manifest_query.graphql
│   │       │   ├── persisted_queries.rs
│   │       │   ├── schema
│   │       │   │   ├── event.rs
│   │       │   │   ├── schema_query.graphql
│   │       │   │   └── schema_stream.rs
│   │       │   ├── schema.rs
│   │       │   ├── snapshots
│   │       │   │   ├── apollo_mcp_registry__uplink__schema__tests__schema_by_url_all_fail@logs.snap
│   │       │   │   ├── apollo_mcp_registry__uplink__schema__tests__schema_by_url_fallback@logs.snap
│   │       │   │   └── apollo_mcp_registry__uplink__schema__tests__schema_by_url@logs.snap
│   │       │   └── uplink.graphql
│   │       └── uplink.rs
│   ├── apollo-mcp-server
│   │   ├── build.rs
│   │   ├── Cargo.toml
│   │   ├── src
│   │   │   ├── auth
│   │   │   │   ├── networked_token_validator.rs
│   │   │   │   ├── protected_resource.rs
│   │   │   │   ├── valid_token.rs
│   │   │   │   └── www_authenticate.rs
│   │   │   ├── auth.rs
│   │   │   ├── config_schema.rs
│   │   │   ├── cors.rs
│   │   │   ├── custom_scalar_map.rs
│   │   │   ├── errors.rs
│   │   │   ├── event.rs
│   │   │   ├── explorer.rs
│   │   │   ├── graphql.rs
│   │   │   ├── headers.rs
│   │   │   ├── health.rs
│   │   │   ├── introspection
│   │   │   │   ├── minify.rs
│   │   │   │   ├── snapshots
│   │   │   │   │   └── apollo_mcp_server__introspection__minify__tests__minify_schema.snap
│   │   │   │   ├── tools
│   │   │   │   │   ├── execute.rs
│   │   │   │   │   ├── introspect.rs
│   │   │   │   │   ├── search.rs
│   │   │   │   │   ├── snapshots
│   │   │   │   │   │   └── apollo_mcp_server__introspection__tools__search__tests__search_tool.snap
│   │   │   │   │   ├── testdata
│   │   │   │   │   │   └── schema.graphql
│   │   │   │   │   └── validate.rs
│   │   │   │   └── tools.rs
│   │   │   ├── introspection.rs
│   │   │   ├── json_schema.rs
│   │   │   ├── lib.rs
│   │   │   ├── main.rs
│   │   │   ├── meter.rs
│   │   │   ├── operations
│   │   │   │   ├── mutation_mode.rs
│   │   │   │   ├── operation_source.rs
│   │   │   │   ├── operation.rs
│   │   │   │   ├── raw_operation.rs
│   │   │   │   ├── schema_walker
│   │   │   │   │   ├── name.rs
│   │   │   │   │   └── type.rs
│   │   │   │   └── schema_walker.rs
│   │   │   ├── operations.rs
│   │   │   ├── runtime
│   │   │   │   ├── config.rs
│   │   │   │   ├── endpoint.rs
│   │   │   │   ├── filtering_exporter.rs
│   │   │   │   ├── graphos.rs
│   │   │   │   ├── introspection.rs
│   │   │   │   ├── logging
│   │   │   │   │   ├── defaults.rs
│   │   │   │   │   ├── log_rotation_kind.rs
│   │   │   │   │   └── parsers.rs
│   │   │   │   ├── logging.rs
│   │   │   │   ├── operation_source.rs
│   │   │   │   ├── overrides.rs
│   │   │   │   ├── schema_source.rs
│   │   │   │   ├── schemas.rs
│   │   │   │   ├── telemetry
│   │   │   │   │   └── sampler.rs
│   │   │   │   └── telemetry.rs
│   │   │   ├── runtime.rs
│   │   │   ├── sanitize.rs
│   │   │   ├── schema_tree_shake.rs
│   │   │   ├── server
│   │   │   │   ├── states
│   │   │   │   │   ├── configuring.rs
│   │   │   │   │   ├── operations_configured.rs
│   │   │   │   │   ├── running.rs
│   │   │   │   │   ├── schema_configured.rs
│   │   │   │   │   └── starting.rs
│   │   │   │   └── states.rs
│   │   │   ├── server.rs
│   │   │   └── telemetry_attributes.rs
│   │   └── telemetry.toml
│   └── apollo-schema-index
│       ├── Cargo.toml
│       └── src
│           ├── error.rs
│           ├── lib.rs
│           ├── path.rs
│           ├── snapshots
│           │   ├── apollo_schema_index__tests__search.snap
│           │   └── apollo_schema_index__traverse__tests__schema_traverse.snap
│           ├── testdata
│           │   └── schema.graphql
│           └── traverse.rs
├── docs
│   └── source
│       ├── _sidebar.yaml
│       ├── auth.mdx
│       ├── best-practices.mdx
│       ├── config-file.mdx
│       ├── cors.mdx
│       ├── custom-scalars.mdx
│       ├── debugging.mdx
│       ├── define-tools.mdx
│       ├── deploy.mdx
│       ├── guides
│       │   └── auth-auth0.mdx
│       ├── health-checks.mdx
│       ├── images
│       │   ├── auth0-permissions-enable.png
│       │   ├── mcp-getstarted-inspector-http.jpg
│       │   └── mcp-getstarted-inspector-stdio.jpg
│       ├── index.mdx
│       ├── licensing.mdx
│       ├── limitations.mdx
│       ├── quickstart.mdx
│       ├── run.mdx
│       └── telemetry.mdx
├── e2e
│   └── mcp-server-tester
│       ├── local-operations
│       │   ├── api.graphql
│       │   ├── config.yaml
│       │   ├── operations
│       │   │   ├── ExploreCelestialBodies.graphql
│       │   │   ├── GetAstronautDetails.graphql
│       │   │   ├── GetAstronautsCurrentlyInSpace.graphql
│       │   │   └── SearchUpcomingLaunches.graphql
│       │   └── tool-tests.yaml
│       ├── pq-manifest
│       │   ├── api.graphql
│       │   ├── apollo.json
│       │   ├── config.yaml
│       │   └── tool-tests.yaml
│       ├── run_tests.sh
│       └── server-config.template.json
├── flake.lock
├── flake.nix
├── graphql
│   ├── TheSpaceDevs
│   │   ├── .vscode
│   │   │   ├── extensions.json
│   │   │   └── tasks.json
│   │   ├── api.graphql
│   │   ├── apollo.config.json
│   │   ├── config.yaml
│   │   ├── operations
│   │   │   ├── ExploreCelestialBodies.graphql
│   │   │   ├── GetAstronautDetails.graphql
│   │   │   ├── GetAstronautsCurrentlyInSpace.graphql
│   │   │   └── SearchUpcomingLaunches.graphql
│   │   ├── persisted_queries
│   │   │   └── apollo.json
│   │   ├── persisted_queries.config.json
│   │   ├── README.md
│   │   └── supergraph.yaml
│   └── weather
│       ├── api.graphql
│       ├── config.yaml
│       ├── operations
│       │   ├── alerts.graphql
│       │   ├── all.graphql
│       │   └── forecast.graphql
│       ├── persisted_queries
│       │   └── apollo.json
│       ├── supergraph.graphql
│       ├── supergraph.yaml
│       └── weather.graphql
├── LICENSE
├── macos-entitlements.plist
├── nix
│   ├── apollo-mcp.nix
│   ├── cargo-zigbuild.patch
│   ├── mcp-server-tools
│   │   ├── default.nix
│   │   ├── node-generated
│   │   │   ├── default.nix
│   │   │   ├── node-env.nix
│   │   │   └── node-packages.nix
│   │   ├── node-mcp-servers.json
│   │   └── README.md
│   └── mcphost.nix
├── README.md
├── rust-toolchain.toml
├── scripts
│   ├── nix
│   │   └── install.sh
│   └── windows
│       └── install.ps1
└── xtask
    ├── Cargo.lock
    ├── Cargo.toml
    └── src
        ├── commands
        │   ├── changeset
        │   │   ├── matching_pull_request.graphql
        │   │   ├── matching_pull_request.rs
        │   │   ├── mod.rs
        │   │   ├── scalars.rs
        │   │   └── snapshots
        │   │       ├── xtask__commands__changeset__tests__it_templatizes_with_multiple_issues_in_title_and_multiple_prs_in_footer.snap
        │   │       ├── xtask__commands__changeset__tests__it_templatizes_with_multiple_issues_in_title.snap
        │   │       ├── xtask__commands__changeset__tests__it_templatizes_with_multiple_prs_in_footer.snap
        │   │       ├── xtask__commands__changeset__tests__it_templatizes_with_neither_issues_or_prs.snap
        │   │       ├── xtask__commands__changeset__tests__it_templatizes_with_prs_in_title_when_empty_issues.snap
        │   │       └── xtask__commands__changeset__tests__it_templatizes_without_prs_in_title_when_issues_present.snap
        │   └── mod.rs
        ├── lib.rs
        └── main.rs
```
# Files
--------------------------------------------------------------------------------
/graphql/TheSpaceDevs/api.graphql:
--------------------------------------------------------------------------------
```graphql
type Agency {
  id: ID!
  name: String
  abbrev: String
  type: String
  featured: Boolean
  country: [Country]
  description: String
  administrator: String
  foundingYear: Int
  spacecraft: String
  image: Image
  logo: Image
  socialLogo: Image
  totalLaunchCount: Int
  consecutiveSuccessfulLaunches: Int
  successfulLaunches: Int
  failedLaunches: Int
  pendingLaunches: Int
  consecutiveSuccessfulLandings: Int
  successfulLandings: Int
  failedLandings: Int
  attemptedLandings: Int
  successfulLandingsSpacecraft: Int
  failedLandingsSpacecraft: Int
  attemptedLandingsSpacecraft: Int
  successfulLandingsPayload: Int
  failedLandingsPayload: Int
  attemptedLandingsPayload: Int
  infoUrl: String
  wikiUrl: String
  socialMediaLinks: [SocialMediaLink]
}
type AgencyConnection {
  pageInfo: PageInfo
  results: [Agency]
}
type ApiThrottle {
  yourRequestLimit: Int
  limitFrequencySecs: Int
  currentUse: Int
  nextUseSecs: Int
  ident: String
}
type Astronaut {
  id: ID!
  name: String
  status: String
  agency: Agency
  image: Image
  type: String
  inSpace: Boolean
  timeInSpace: String
  evaTime: String
  age: Int
  dateOfBirth: String
  dateOfDeath: String
  nationality: Country
  bio: String
  wiki: String
  lastFlight: String
  firstFlight: String
  socialMediaLinks: [SocialMediaLink]
}
type AstronautConnection {
  pageInfo: PageInfo
  results: [Astronaut]
}
input AstronautFilters {
  search: String
  inSpace: Boolean
}
type CelestialBody {
  id: ID!
  name: String
  type: CelestialType
  diameter: Float
  mass: Float
  gravity: Float
  lengthOfDay: String
  atmosphere: Boolean
  image: Image
  description: String
  wikiUrl: String
}
type CelestialBodyConnection {
  pageInfo: PageInfo
  results: [CelestialBody]
}
type CelestialType {
  id: ID!
  name: String
}
type Country {
  id: ID!
  name: String
  alpha2Code: String
  alpha3Code: String
  nationalityName: String
  nationalityNameComposed: String
}
type DockingEvent {
  id: ID!
  docking: String
  departure: String
  dockingLocation: DockingLocation
  spaceStationTarget: SpaceStationTarget
  flightVehicleTarget: FlightVehicleTarget
  payloadFlightTarget: PayloadFlightTarget
  flightVehicleChaser: FlightVehicleChaser
  spaceStationChaser: SpaceStationChaser
  payloadFlightChaser: PayloadFlightChaser
}
type DockingEventConnection {
  pageInfo: PageInfo
  results: [DockingEvent]
}
type DockingLocation {
  id: ID!
  name: String
  spacestation: SpaceStation
  spacecraft: Spacecraft
  payload: Payload
}
type FlightVehicleChaser {
  id: ID!
  destination: String
  missionEnd: String
  spacecraft: Spacecraft
  launch: Launch
  landing: Landing
}
type FlightVehicleTarget {
  id: ID!
  destination: String
  missionEnd: String
  spacecraft: Spacecraft
}
type Image {
  id: ID!
  name: String
  url: String
  thumbnail: String
  credit: String
  singleUse: Boolean
  license: ImageLicense
}
type ImageLicense {
  name: String
  link: String
}
type InfoUrl {
  priority: Int
  source: String
  title: String
  description: String
  featureImage: String
  url: String
  type: String
  language: Language
}
type Landing {
  id: ID!
  type: LandingType
  attempt: Boolean
  success: Boolean
  description: String
  downrangeDistance: String
  landingLocation: LandingLocation
}
type LandingLocation {
  id: ID!
  name: String
  active: Boolean
  abbrev: String
  description: String
  location: Location
  longitude: String
  latitude: String
  image: Image
  landings: SuccessCount
  celestialBody: CelestialBody
}
type LandingType {
  id: ID!
  name: String
  abbrev: String
  description: String
}
type Language {
  id: ID!
  name: String
  code: String
}
type Launch {
  id: ID!
  name: String
  launchDesignator: String
  status: LaunchStatus
  lastUpdated: String
  net: String
  netPrecision: String
  window: LaunchWindow
  image: Image
  infographic: String
  probability: Float
  weatherConcerns: String
  failreason: String
  hashtag: String
  provider: Agency
  rocket: Rocket
  mission: Mission
  pad: Pad
  webcastLive: Boolean
  program: Program
  orbitalLaunchAttemps: Int
  locationLaunchAttemps: Int
  padLaunchAttemps: Int
  agencyLaunchAttemps: Int
  orbitalLaunchAttempsYear: Int
  locationLaunchAttempsYear: Int
  padLaunchAttempsYear: Int
  agencyLaunchAttempsYear: Int
}
type LaunchConnection {
  pageInfo: PageInfo
  results: [Launch]
}
type LaunchStatus {
  id: ID!
  name: String
  abbrev: String
  description: String
}
type LaunchWindow {
  start: String
  end: String
}
type Location {
  id: ID!
  name: String
  active: Boolean
  country: Country
  image: Image
  mapImage: String
  longitude: String
  latitude: String
  totalLaunchCount: Int
  totalLandingCount: Int
  description: String
  timezone: String
}
type Manufacturer {
  id: ID!
  name: String
  abbrev: String
  type: String
  featured: Boolean
  country: Country
  description: String
  administrator: String
  foundingYear: Int
  spacecraft: String
  image: Image
  logo: Image
  socialLogo: Image
}
type Mission {
  id: ID!
  name: String
  type: String
  description: String
  image: Image
  orbit: Orbit
  agencies: [Agency]
  infoUrls: [InfoUrl]
  vidUrls: [VideoUrl]
}
type MissionPatch {
  id: ID!
  name: String
  priority: Int
  imageUrl: String
  agency: Agency
}
type Orbit {
  id: ID!
  name: String
  abbrev: String
  celestialBody: CelestialBody
}
type Pad {
  id: ID!
  active: Boolean
  agencies: [Agency]
  name: String
  image: Image
  description: String
  infoUrl: String
  wikiUrl: String
  mapUrl: String
  latitude: Float
  longitude: Float
  country: Country
  mapImage: String
  launchTotalCount: Int
  orbitalLaunchAttemptCount: Int
  fastestTurnaround: String
  location: Location
}
type PageInfo {
  count: Int
  next: String
  previous: String
}
type Payload {
  id: ID!
  name: String
  type: String
  manufacturer: Manufacturer
  operator: Agency
  image: Image
  wikiLink: String
  infoLink: String
  program: Program
  cost: Float
  mass: Float
  description: String
}
type PayloadFlightChaser {
  id: ID!
  url: String
  destination: String
  amount: String
  payload: Payload
  launch: Launch
  landing: Landing
}
type PayloadFlightTarget {
  id: ID!
  destination: String
  amount: String
  payload: Payload
  launch: Launch
  landing: Landing
}
type Program {
  id: ID!
  name: String
  image: Image
  infoUrl: String
  wikiUrl: String
  description: String
  agencies: [Agency]
  startDate: String
  endDate: String
  missionPatches: [MissionPatch]
}
type Query {
  agency(id: ID!): Agency
  agencies(search: String, offset: Int = 0, limit: Int = 20): AgencyConnection
  apiThrottle: ApiThrottle
  astronaut(id: ID!): Astronaut
  astronauts(filters: AstronautFilters, offset: Int = 0, limit: Int = 20): AstronautConnection
  celestialBody(id: ID!): CelestialBody
  celestialBodies(search: String, offset: Int = 0, limit: Int = 20): CelestialBodyConnection
  dockingEvent(id: ID!): DockingEvent
  dockingEvents(search: String, offset: Int = 0, limit: Int = 20): DockingEventConnection
  launch(id: ID!): Launch
  launches(search: String, limit: Int = 5, offset: Int = 0): LaunchConnection
  previousLaunces(search: String, limit: Int = 5, offset: Int = 0): LaunchConnection
  upcomingLaunches(search: String, limit: Int = 5, offset: Int = 0): LaunchConnection
}
type Rocket {
  id: ID!
  configuration: RocketLaunchConfigurations
}
type RocketFamily {
  id: ID!
  name: String
}
type RocketLaunchConfigurations {
  id: ID!
  name: String
  fullName: String
  variant: String
  families: [RocketFamily]
}
type SocialMedia {
  id: ID!
  name: String
  url: String
  logo: Image
}
type SocialMediaLink {
  id: ID!
  url: String
  socialMedia: SocialMedia
}
type Spacecraft {
  id: ID!
  name: String
  type: String
  agency: Agency
  family: SpacecraftFamily
  inUse: Boolean
  serialNumber: String
  isPlaceholder: Boolean
  image: Image
  inSpace: Boolean
  timeInSpace: String
  timeDocked: String
  flightsCount: Int
  missionEndsCount: Int
  status: String
  description: String
  spacecraftConfig: SpacecraftConfig
  fastestTurnaround: String
}
type SpacecraftConfig {
  id: ID!
  name: String
  type: String
  agency: Agency
  family: SpacecraftFamily
  inUse: Boolean
  image: Image
}
type SpacecraftFamily {
  id: ID!
  name: String
  description: String
  manufacturer: Manufacturer
  maidenFlight: String
}
type SpaceStation {
  id: ID!
  name: String
  image: Image
}
type SpaceStationChaser {
  id: ID!
  name: String
  image: Image
  status: String
  founded: String
  deorbited: String
  description: String
  orbit: String
  type: String
}
type SpaceStationTarget {
  id: ID!
  name: String
  image: Image
}
type SuccessCount {
  total: Int
  successful: Int
  failed: Int
}
type VideoUrl {
  priority: Int
  source: String
  publisher: String
  title: String
  description: String
  featureImage: String
  url: String
  type: String
  language: Language
  startTime: String
  endTime: String
  live: Boolean
}
```
--------------------------------------------------------------------------------
/.github/workflows/prep-release.yml:
--------------------------------------------------------------------------------
```yaml
name: Prep release
on:
  workflow_dispatch:
    inputs:
      version_bump:
        type: choice
        description: "Type of version bump"
        default: patch
        required: true
        options:
          - major
          - minor
          - patch
          - custom
      custom_version:
        type: string
        required: false
        description: "Custom version (ignored for other bump types)"
permissions:
  contents: write
  pull-requests: write
concurrency:
  group: pre-release
  cancel-in-progress: false
jobs:
  validate:
    runs-on: ubuntu-latest
    steps:
      - name: Enforce custom_version when bump=custom
        run: |
          if [[ "${{ inputs.version_bump }}" == "custom" ]]; then
            if [[ -z "${{ inputs.custom_version }}" ]]; then
              echo "::error title=Missing input::Set 'custom_version' when version_bump=custom"; exit 1
            fi
            if [[ ! "${{ inputs.custom_version }}" =~ ^[0-9]+\.[0-9]+\.[0-9]+(-[0-9A-Za-z.-]+)?(\+[0-9A-Za-z.-]+)?$ ]]; then
              echo "::error title=Invalid SemVer::Use x.y.z (can use optional pre-release/build identifiers)"; exit 1
            fi
          fi
  prep-release:
    runs-on: ubuntu-latest
    env:
      GH_TOKEN: ${{ secrets.GH_PAT }}
    steps:
      - uses: actions/checkout@v5
        with:
          fetch-depth: 0
      - name: Configure git author
        run: |
          git config --local user.name "Apollo Bot"
          git config --local user.email "[email protected]"
      - name: Retrieve current version from Cargo.toml
        id: meta
        run: |
          set -eu
          VERSION=$(cargo metadata --no-deps --format-version=1 | jq -er --arg NAME "apollo-mcp-server" '.packages[] | select(.name == $NAME) | .version')
          [ -n "$VERSION" ] || { echo "::error::Could not determine version"; exit 1; }
          echo "current_version=$VERSION" >> "$GITHUB_OUTPUT"
      - name: Bump the version
        id: bump
        shell: bash
        env:
          CURR: ${{ steps.meta.outputs.current_version }}
          CUSTOM: ${{ inputs.custom_version }}
          BUMP: ${{ inputs.version_bump }}
        run: |
          set -euo pipefail
          
          if [[ -n "${CUSTOM:-}" ]]; then
            echo "new_version=$CUSTOM" >> "$GITHUB_OUTPUT"
            echo "Custom Bumped: $CURR -> $CUSTOM"
          else
            # strip any pre-release / build metadata for arithmetic (e.g., -rc.1, +build.5)
            BASE="${CURR%%[-+]*}"
            
            IFS=. read -r MA MI PA <<< "$BASE"
            
            case "$BUMP" in
              major) MA=$((MA+1)); MI=0; PA=0 ;;
              minor) MI=$((MI+1)); PA=0 ;;
              patch) PA=$((PA+1)) ;;
              *) echo "::error::Unknown bump '$BUMP'"; exit 1 ;;
            esac
            
            NEW_VERSION="$MA.$MI.$PA"
            echo "new_version=$NEW_VERSION" >> "$GITHUB_OUTPUT"
            echo "Bumped: $CURR -> $NEW_VERSION"
          fi
      - name: Prepare release branch
        id: prep_branch
        run: |
          set -e
          git fetch origin develop
          git switch -c "release/${{ steps.bump.outputs.new_version }}" "origin/develop"
          echo "release_branch=release/${{ steps.bump.outputs.new_version }}" >> "$GITHUB_OUTPUT"
      - name: Update Cargo version
        run: |
          cargo install cargo-edit --locked
          cargo set-version --workspace "${{ steps.bump.outputs.new_version }}"
      - name: Replace versions in scripts and docs
        env:
          CURR_VERSION: ${{ steps.meta.outputs.current_version }}
          NEW_VERSION: ${{ steps.bump.outputs.new_version }}
        run: |
          python3 - <<'PY'
          try:
            import os, re, sys, glob, pathlib
            
            current_version = os.environ["CURR_VERSION"]
            new_version = os.environ["NEW_VERSION"]
            
            print(f"current={current_version} new={new_version}")
          
            # negative lookbehind (word,., or -) + optional 'v' + the escaped current version + negative lookahead (word or .)
            # e.g. current version of 1.0.1 will match 1.0.1, v1.0.1, v1.0.1-rc.1
            # e.g. current version of 1.0.1 will not match ver1.0.1, 1.0.1x, 1.0.11, 1.0.1.beta
            pat = re.compile(rf'(?<![\w.-])(v?){re.escape(current_version)}(?![\w.])')
  
            def repl(m): # preserve 'v' prefix if it existed
              return (m.group(1) or '') + new_version
  
            # Targets to update
            targets = [
              "scripts/nix/install.sh",                # nix shell script
              "scripts/windows/install.ps1",           # PowerShell
              *glob.glob("**/*.mdx", recursive=True),  # docs
            ]
  
            print(targets)
            print(f"Scanning {len(targets)} targets…")
          
            changed = 0
            for path in targets:
              p = pathlib.Path(path)
              if not p.exists():
                continue
              txt = p.read_text(encoding="utf-8")
              new_txt, n = pat.subn(repl, txt)
              if n:
                p.write_text(new_txt, encoding="utf-8")
                print(f"Updated {path} ({n} occurrence{'s' if n!=1 else ''})")
                changed += n
  
            if changed == 0:
              print("::error::No occurrences of the current version were found.", file=sys.stderr)
              sys.exit(1)
          except Exception:
            import traceback
            traceback.print_exc()
            sys.exit(1)
          PY
      - name: Commit version bumps
        id: commit_version_bumps
        run: |
          set -euo pipefail
          git add -A || true
          git commit -m "chore(release): bumping to version ${{ steps.bump.outputs.new_version }}" || echo "No version bump changes to commit"
      - name: Update changelog via xtask
        run: cargo xtask changeset changelog ${{ steps.bump.outputs.new_version }}
      - name: Extract changelog section
        id: changelog
        shell: bash
        env:
          NEW: ${{ steps.bump.outputs.new_version }}
          OLD: ${{ steps.meta.outputs.current_version }}
        run: |
          set -euo pipefail
          # Write the extracted section to a file and also expose it as a multiline output "body"
          python3 - <<'PY' > CHANGELOG_SECTION.md
          try:
            import os, re, sys, pathlib
            new = os.environ["NEW"]
            old = os.environ["OLD"]
      
            p = pathlib.Path("CHANGELOG.md")
            if not p.exists():
              raise FileNotFoundError("CHANGELOG.md not found at repo root")
            text = p.read_text(encoding="utf-8")
      
            # Find header for the new version
            start = re.search(rf'(?m)^# \[{re.escape(new)}\]', text)
            if not start:
              print(f"::error::Could not find changelog entry for {new}", file=sys.stderr)
              sys.exit(1)
      
            # Prefer the *specific* previous version header if present; otherwise, next '# ['; else, EOF
            segment = text[start.start():]
            end_old = re.search(rf'(?m)^# \[{re.escape(old)}\]', segment)
            if end_old:
              segment = segment[:end_old.start()]
            else:
              nxt = re.search(r'(?m)^# \[', segment[len('# [' + new + ']'):])
              if nxt:
                # adjust to absolute end
                segment = segment[: (len('# [' + new + ']') + nxt.start())]
      
            segment = segment.rstrip() + "\n"
            print(segment)
          except Exception:
            import traceback
            traceback.print_exc()
            sys.exit(1)
          PY
            
            {
              echo 'body<<EOF'
              cat CHANGELOG_SECTION.md
              echo 'EOF'
            } >> "$GITHUB_OUTPUT"
      - name: Commit and push changelog updates
        shell: bash
        run: |
          set -euo pipefail
          git add -A || true
          git commit -m "chore(release): changelog for ${{ steps.bump.outputs.new_version }}" || echo "No changelog updates to commit"
          git push origin HEAD
      - name: Open/Update draft PR to main
        env:
          HEAD: release/${{ steps.bump.outputs.new_version }}
          TITLE: Releasing ${{ steps.bump.outputs.new_version }}
        shell: bash
        run: |
          set -euo pipefail
          # Try to create; if it already exists, update it
          if ! gh pr create \
            --base main \
            --head "$HEAD" \
            --title "$TITLE" \
            --draft \
            --body-file CHANGELOG_SECTION.md \
            --label release
          then
            num=$(gh pr list --head "$HEAD" --base main --state open --json number -q '.[0].number' || true)
            if [[ -n "$num" ]]; then
              gh pr edit "$num" --title "$TITLE" --body-file CHANGELOG_SECTION.md --add-label release
            else
              echo "::error::Failed to create or find PR from $HEAD to main"
              exit 1
            fi
          fi
```
--------------------------------------------------------------------------------
/docs/source/define-tools.mdx:
--------------------------------------------------------------------------------
```markdown
---
title: Define MCP Tools
---
You can manually define the GraphQL operations that are exposed by Apollo MCP Server as MCP tools. You can define these operations using:
- Local operation files
- Operation collections
- Persisted query manifests
- GraphOS-managed persisted queries
Alternatively, you can let an AI model read your graph schema via GraphQL introspection and have it determine the available operations.
## Define GraphQL operations for tools
### From operation files
An operation file is a `.graphql` file containing a single GraphQL operation.
<CodeColumns cols={2}>
```graphql title="Example operation GetForecast"
query GetForecast($coordinate: InputCoordinate!) {
  forecast(coordinate: $coordinate) {
    detailed
  }
}
```
```graphql title="Example operation GetWeatherData"
query GetAllWeatherData($coordinate: InputCoordinate!, $state: String!) {
  forecast(coordinate: $coordinate) {
    detailed
  }
  alerts(state: $state) {
    severity
    description
    instruction
  }
}
```
</CodeColumns>
Use the `operations` option to provide the MCP Server with a list of operation files. For each operation file you provide, the MCP Server creates an MCP tool that calls the corresponding GraphQL operation.
You can also use the `operations` option to specify a directory. The server then loads all files with a `.graphql` extension in that directory as operations.
Files and directories specified with `operations` are hot reloaded. When you specify a file, the MCP tool is updated when the file contents are modified. When you specify a directory, operations exposed as MCP tools are updated when files are added, modified, or removed from the directory.
### From operation collections
For graphs managed by GraphOS, Apollo MCP Server can retrieve operations from an [operation collection](/graphos/platform/explorer/operation-collections).
Use GraphOS Studio Explorer to create and manage operation collections.
#### Configuring the MCP Server to use a GraphOS operation collection
To use a GraphOS operation collection, you must set your graph credentials (`APOLLO_GRAPH_REF` and `APOLLO_KEY`) as environment variables.
Each graph variant has its own default MCP Tools Collection, but you can specify any shared collection by using `operations.source: collection`.
Specify the collection to use with the `operations.id` option. To view the ID of a collection, click the ••• button next to its entry, select **View details**, and copy the **Collection ID**.
Each graph variant has its own default collection called **Default MCP Tools**. To use this default collection, specify `operations.id: default`. Apollo MCP Server automatically fetches the default collection if no ID is specified.
```yaml title="Example config file for using a GraphOS operation collection"
operations:
  source: collection
  id: default
```
The MCP Server supports hot reloading of the GraphOS operation collection, so it automatically picks up changes from GraphOS without restarting.
#### Setting operation collection variables
When saving operation collections, remove any dynamic variables from the **Variables** panel of Explorer. This enables the LLM to modify the variables when calling the operation.
Any variables set to any valid value (even `null`) in the Variables panel of a saved operation are used as a hardcoded override for that operation's variable.
For example, if you create the following operation for an operation collection:
```graphql
query GetProduct($productId: ID!) {
  product(id: $productId) {
    id
    description
  }
}
```
And the Variables panel has `productId` set to `1234`:
```json
{
  "productId": "1234"
}
```
Then, every time the LLM calls the `GetProduct` operation, the `productId` variable is always set to `1234`. The same is true if `productId` is set to `null`.
If you want to use dynamic variables that the LLM can modify, remove any variables from the Variables panel and save that operation to the collection.
### From persisted query manifests
Apollo MCP Server supports reading GraphQL operations from Apollo-formatted [persisted query manifest](/graphos/platform/security/persisted-queries#manifest-format) files.
Set the persisted query manifest file for the MCP Server with the `operations` option. The MCP Server supports hot reloading of persisted query manifests, so changes to manifests are applied without restarting.
An example manifest is available in the [GitHub repo](https://github.com/apollographql/apollo-mcp-server/tree/main/graphql/weather/persisted_queries).
```yaml title="Example config for using persisted query manifest"
operations:
  source: manifest
  path: <PATH/TO/persisted-queries-manifest.json>
```
### From GraphOS-managed persisted queries
For graphs managed by GraphOS, Apollo MCP Server can get operations by reading persisted queries from GraphOS. The MCP Server uses Apollo Uplink to access the persisted queries.
To use GraphOS persisted queries, you must set your graph credentials `APOLLO_GRAPH_REF` and `APOLLO_KEY` as environment variables.
Use the `operations.source: uplink` option to specify that tools should be loaded from GraphOS-managed persisted queries.
<Tip>
Use a [contract variant](/graphos/platform/schema-management/delivery/contracts/overview) with a persisted query list associated with that variant, so you can control what AI can consume from your graph. [Learn more](/apollo-mcp-server/best-practices#use-contract-variants-to-control-ai-access-to-graphs).
</Tip>
```yaml title="Example config using GraphOS-managed persisted queries"
operations:
  source: uplink
```
The MCP Server supports hot reloading of GraphOS-managed persisted queries, so it can automatically pick up changes from GraphOS without restarting.
If you register a persisted query with a specific client name instead of `null`, you must configure the MCP Server to send the necessary header indicating the client name to the router.
Use the `headers` option when running the MCP Server to pass the header to the router. The default name of the header expected by the router is `apollographql-client-name`. To use a different header name, configure `telemetry.apollo.client_name_header` in router YAML configuration.
```yaml title="Example config using GraphOS-managed persisted queries" {1-2}
headers:
  "apollographql-client-name": "my-web-app"
operations:
  source: uplink
```
## Introspection tools
In addition to defining specific tools for pre-defined GraphQL operations, Apollo MCP Server supports introspection tools that enable AI agents to explore the graph schema and execute operations dynamically.
You can enable the following introspection tools:
- `introspect`: allows the AI model to introspect the schema of the GraphQL API by providing a specific type name to get information about, and a depth parameter to determine how deep to traverse the subtype hierarchy. The AI model can start the introspection by looking up the top-level `Query` or `Mutation` type.
- `search`: allows the AI model to search for type information by providing a set of search terms. This can result in fewer tool calls than `introspect`, especially if the desired type is deep in the type hierarchy of the schema. Search results include all the parent type information needed to construct operations involving the matching type.
- `validate`: validates a GraphQL operation against the schema without executing it. This allows AI models to verify that their operations are syntactically correct and conform to the schema before execution, preventing unintended side effects. Operations should be validated prior to calling the `execute` tool.
- `execute`: executes an operation on the GraphQL endpoint
The MCP client can use these tools to provide schema information to the model and its context window, and allow the model to execute GraphQL operations based on that schema.
### Minification
Both the `introspect` and `search` tools support minification of their results through the `minify` option. These options help optimize context window usage for AI models.
- **Reduces context window usage**: Minified GraphQL SDL takes up significantly less space in the AI model's context window, allowing for more complex schemas or additional context
- **Uses compact notation**: Type definitions use prefixed compact syntax and common scalar types are shortened
- **Preserves functionality**: All essential type information is retained, just in a more compact format
- **Includes legend in tool descriptions**: When minify is enabled, the tool descriptions automatically include a legend explaining the notation
**Minification format:**
- **Type prefixes**: `T=type`, `I=input`, `E=enum`, `U=union`, `F=interface`
- **Scalar abbreviations**: `s=String`, `i=Int`, `f=Float`, `b=Boolean`, `d=ID`
- **Directive abbreviations**: `@D=deprecated`
- **Type modifiers**: `!=required`, `[]=list`, `<>=implements`
Example comparison:
**Regular output:**
```graphql
type User {
  id: ID!
  name: String
  email: String!
  posts: [Post]
}
```
**Minified output:**
```
T:User:id:d!,name:s,email:s!,posts:[Post]
```
<Tip>
Use a [contract variant](/graphos/platform/schema-management/delivery/contracts/overview) so you can control the parts of your graph that AI can introspect. [Learn more](/apollo-mcp-server/best-practices#use-contract-variants-to-control-ai-access-to-graphs)
</Tip>
```yaml title="Example config using introspection"
introspection:
  execute:
    enabled: true
  introspect:
    enabled: true
    minify: true
  search:
    enabled: true
    minify: true
    index_memory_bytes: 50000000
    leaf_depth: 1
  validate:
    enabled: true
```
```
--------------------------------------------------------------------------------
/crates/apollo-mcp-server/src/runtime.rs:
--------------------------------------------------------------------------------
```rust
//! Runtime utilities
//!
//! This module is only used by the main binary and provides helper code
//! related to runtime configuration.
mod config;
mod endpoint;
mod filtering_exporter;
mod graphos;
mod introspection;
pub mod logging;
mod operation_source;
mod overrides;
mod schema_source;
mod schemas;
pub mod telemetry;
use std::path::Path;
pub use config::Config;
use figment::{
    Figment,
    providers::{Env, Format, Yaml},
};
pub use operation_source::{IdOrDefault, OperationSource};
pub use schema_source::SchemaSource;
/// Separator to use when drilling down into nested options in the env figment
const ENV_NESTED_SEPARATOR: &str = "__";
/// Read configuration from environment variables only (when no config file is provided)
#[allow(clippy::result_large_err)]
pub fn read_config_from_env() -> Result<Config, figment::Error> {
    Figment::new()
        .join(apollo_common_env())
        .join(Env::prefixed("APOLLO_MCP_").split(ENV_NESTED_SEPARATOR))
        .extract()
}
/// Read in a config from a YAML file, filling in any missing values from the environment
#[allow(clippy::result_large_err)]
pub fn read_config(yaml_path: impl AsRef<Path>) -> Result<Config, figment::Error> {
    Figment::new()
        .join(apollo_common_env())
        .join(Env::prefixed("APOLLO_MCP_").split(ENV_NESTED_SEPARATOR))
        .join(Yaml::file(yaml_path))
        .extract()
}
/// Figment provider that handles mapping common Apollo environment variables into
/// the nested structure needed by the config
fn apollo_common_env() -> Env {
    Env::prefixed("APOLLO_")
        .only(&["graph_ref", "key", "uplink_endpoints"])
        .map(|key| match key.to_string().to_lowercase().as_str() {
            "graph_ref" => "GRAPHOS:APOLLO_GRAPH_REF".into(),
            "key" => "GRAPHOS:APOLLO_KEY".into(),
            "uplink_endpoints" => "GRAPHOS:APOLLO_UPLINK_ENDPOINTS".into(),
            // This case should never happen, so we just pass through this case as is
            other => other.to_string().into(),
        })
        .split(":")
}
#[cfg(test)]
mod test {
    use super::read_config;
    #[test]
    fn it_prioritizes_env_vars() {
        let config = r#"
            endpoint: http://from_file:4000
        "#;
        figment::Jail::expect_with(move |jail| {
            let path = "config.yaml";
            let endpoint = "https://from_env:4000/";
            jail.create_file(path, config)?;
            jail.set_env("APOLLO_MCP_ENDPOINT", endpoint);
            let config = read_config(path)?;
            assert_eq!(config.endpoint.as_str(), endpoint);
            Ok(())
        });
    }
    #[test]
    fn it_extracts_nested_env() {
        let config = r#"
            overrides:
                disable_type_description: false
        "#;
        figment::Jail::expect_with(move |jail| {
            let path = "config.yaml";
            jail.create_file(path, config)?;
            jail.set_env("APOLLO_MCP_OVERRIDES__DISABLE_TYPE_DESCRIPTION", "true");
            let config = read_config(path)?;
            assert!(config.overrides.disable_type_description);
            Ok(())
        });
    }
    #[test]
    fn it_merges_env_and_file() {
        let config = "
            endpoint: http://from_file:4000/
        ";
        figment::Jail::expect_with(move |jail| {
            let path = "config.yaml";
            jail.create_file(path, config)?;
            jail.set_env("APOLLO_MCP_INTROSPECTION__EXECUTE__ENABLED", "true");
            let config = read_config(path)?;
            assert_eq!(config.endpoint.as_str(), "http://from_file:4000/");
            assert!(config.introspection.execute.enabled);
            Ok(())
        });
    }
    #[test]
    fn it_merges_env_and_file_with_uplink_endpoints() {
        let config = "
            endpoint: http://from_file:4000/
        ";
        let saved_path = std::env::var("PATH").unwrap_or_default();
        let workspace = env!("CARGO_MANIFEST_DIR");
        figment::Jail::expect_with(move |jail| {
            jail.clear_env();
            jail.set_env("PATH", &saved_path);
            jail.set_env("INSTA_WORKSPACE_ROOT", workspace);
            let path = "config.yaml";
            jail.create_file(path, config)?;
            jail.set_env(
                "APOLLO_UPLINK_ENDPOINTS",
                "http://from_env:4000/,http://from_env2:4000/",
            );
            let config = read_config(path)?;
            insta::assert_debug_snapshot!(config, @r#"
            Config {
                cors: CorsConfig {
                    enabled: false,
                    origins: [],
                    match_origins: [],
                    allow_any_origin: false,
                    allow_credentials: false,
                    allow_methods: [
                        "GET",
                        "POST",
                        "DELETE",
                    ],
                    allow_headers: [
                        "content-type",
                        "mcp-protocol-version",
                        "mcp-session-id",
                        "traceparent",
                        "tracestate",
                    ],
                    expose_headers: [
                        "mcp-session-id",
                        "traceparent",
                        "tracestate",
                    ],
                    max_age: Some(
                        7200,
                    ),
                },
                custom_scalars: None,
                endpoint: Endpoint(
                    Url {
                        scheme: "http",
                        cannot_be_a_base: false,
                        username: "",
                        password: None,
                        host: Some(
                            Domain(
                                "from_file",
                            ),
                        ),
                        port: Some(
                            4000,
                        ),
                        path: "/",
                        query: None,
                        fragment: None,
                    },
                ),
                graphos: GraphOSConfig {
                    apollo_key: None,
                    apollo_graph_ref: None,
                    apollo_registry_url: None,
                    apollo_uplink_endpoints: [
                        Url {
                            scheme: "http",
                            cannot_be_a_base: false,
                            username: "",
                            password: None,
                            host: Some(
                                Domain(
                                    "from_env",
                                ),
                            ),
                            port: Some(
                                4000,
                            ),
                            path: "/",
                            query: None,
                            fragment: None,
                        },
                        Url {
                            scheme: "http",
                            cannot_be_a_base: false,
                            username: "",
                            password: None,
                            host: Some(
                                Domain(
                                    "from_env2",
                                ),
                            ),
                            port: Some(
                                4000,
                            ),
                            path: "/",
                            query: None,
                            fragment: None,
                        },
                    ],
                },
                headers: {},
                forward_headers: [],
                health_check: HealthCheckConfig {
                    enabled: false,
                    path: "/health",
                    readiness: ReadinessConfig {
                        interval: ReadinessIntervalConfig {
                            sampling: 5s,
                            unready: None,
                        },
                        allowed: 100,
                    },
                },
                introspection: Introspection {
                    execute: ExecuteConfig {
                        enabled: false,
                    },
                    introspect: IntrospectConfig {
                        enabled: false,
                        minify: false,
                    },
                    search: SearchConfig {
                        enabled: false,
                        index_memory_bytes: 50000000,
                        leaf_depth: 1,
                        minify: false,
                    },
                    validate: ValidateConfig {
                        enabled: false,
                    },
                },
                logging: Logging {
                    level: Level(
                        Info,
                    ),
                    path: None,
                    rotation: Hourly,
                },
                telemetry: Telemetry {
                    exporters: None,
                    service_name: None,
                    version: None,
                },
                operations: Infer,
                overrides: Overrides {
                    disable_type_description: false,
                    disable_schema_description: false,
                    enable_explorer: false,
                    mutation_mode: None,
                },
                schema: Uplink,
                transport: Stdio,
            }
            "#);
            Ok(())
        });
    }
}
```
--------------------------------------------------------------------------------
/crates/apollo-mcp-server/src/server/states.rs:
--------------------------------------------------------------------------------
```rust
use apollo_compiler::{Schema, validation::Valid};
use apollo_federation::{ApiSchemaOptions, Supergraph};
use apollo_mcp_registry::uplink::schema::{SchemaState, event::Event as SchemaEvent};
use futures::{FutureExt as _, Stream, StreamExt as _, stream};
use reqwest::header::HeaderMap;
use url::Url;
use crate::{
    cors::CorsConfig,
    custom_scalar_map::CustomScalarMap,
    errors::{OperationError, ServerError},
    headers::ForwardHeaders,
    health::HealthCheckConfig,
    operations::MutationMode,
};
use super::{Server, ServerEvent, Transport};
mod configuring;
mod operations_configured;
mod running;
mod schema_configured;
mod starting;
use configuring::Configuring;
use operations_configured::OperationsConfigured;
use running::Running;
use schema_configured::SchemaConfigured;
use starting::Starting;
pub(super) struct StateMachine {}
/// Common configuration options for the states
struct Config {
    transport: Transport,
    endpoint: Url,
    headers: HeaderMap,
    forward_headers: ForwardHeaders,
    execute_introspection: bool,
    validate_introspection: bool,
    introspect_introspection: bool,
    search_introspection: bool,
    introspect_minify: bool,
    search_minify: bool,
    explorer_graph_ref: Option<String>,
    custom_scalar_map: Option<CustomScalarMap>,
    mutation_mode: MutationMode,
    disable_type_description: bool,
    disable_schema_description: bool,
    disable_auth_token_passthrough: bool,
    search_leaf_depth: usize,
    index_memory_bytes: usize,
    health_check: HealthCheckConfig,
    cors: CorsConfig,
}
impl StateMachine {
    pub(crate) async fn start(self, server: Server) -> Result<(), ServerError> {
        let schema_stream = server
            .schema_source
            .into_stream()
            .map(ServerEvent::SchemaUpdated)
            .boxed();
        let operation_stream = server.operation_source.into_stream().await.boxed();
        let ctrl_c_stream = Self::ctrl_c_stream().boxed();
        let mut stream = stream::select_all(vec![schema_stream, operation_stream, ctrl_c_stream]);
        let mut state = State::Configuring(Configuring {
            config: Config {
                transport: server.transport,
                endpoint: server.endpoint,
                headers: server.headers,
                forward_headers: server.forward_headers,
                execute_introspection: server.execute_introspection,
                validate_introspection: server.validate_introspection,
                introspect_introspection: server.introspect_introspection,
                search_introspection: server.search_introspection,
                introspect_minify: server.introspect_minify,
                search_minify: server.search_minify,
                explorer_graph_ref: server.explorer_graph_ref,
                custom_scalar_map: server.custom_scalar_map,
                mutation_mode: server.mutation_mode,
                disable_type_description: server.disable_type_description,
                disable_schema_description: server.disable_schema_description,
                disable_auth_token_passthrough: server.disable_auth_token_passthrough,
                search_leaf_depth: server.search_leaf_depth,
                index_memory_bytes: server.index_memory_bytes,
                health_check: server.health_check,
                cors: server.cors,
            },
        });
        while let Some(event) = stream.next().await {
            state = match event {
                ServerEvent::SchemaUpdated(registry_event) => match registry_event {
                    SchemaEvent::UpdateSchema(schema_state) => {
                        let schema = Self::sdl_to_api_schema(schema_state)?;
                        match state {
                            State::Configuring(configuring) => {
                                configuring.set_schema(schema).await.into()
                            }
                            State::SchemaConfigured(schema_configured) => {
                                schema_configured.set_schema(schema).await.into()
                            }
                            State::OperationsConfigured(operations_configured) => {
                                operations_configured.set_schema(schema).await.into()
                            }
                            State::Running(running) => running.update_schema(schema).await.into(),
                            other => other,
                        }
                    }
                    SchemaEvent::NoMoreSchema => match state {
                        State::Configuring(_) | State::OperationsConfigured(_) => {
                            State::Error(ServerError::NoSchema)
                        }
                        _ => state,
                    },
                },
                ServerEvent::OperationsUpdated(operations) => match state {
                    State::Configuring(configuring) => {
                        configuring.set_operations(operations).await.into()
                    }
                    State::SchemaConfigured(schema_configured) => {
                        schema_configured.set_operations(operations).await.into()
                    }
                    State::OperationsConfigured(operations_configured) => operations_configured
                        .set_operations(operations)
                        .await
                        .into(),
                    State::Running(running) => running.update_operations(operations).await.into(),
                    other => other,
                },
                ServerEvent::OperationError(e, _) => {
                    State::Error(ServerError::Operation(OperationError::File(e)))
                }
                ServerEvent::CollectionError(e) => {
                    State::Error(ServerError::Operation(OperationError::Collection(e)))
                }
                ServerEvent::Shutdown => match state {
                    State::Running(running) => {
                        running.cancellation_token.cancel();
                        State::Stopping
                    }
                    _ => State::Stopping,
                },
            };
            if let State::Starting(starting) = state {
                state = starting.start().await.into();
            }
            if matches!(&state, State::Error(_) | State::Stopping) {
                break;
            }
        }
        match state {
            State::Error(e) => Err(e),
            _ => Ok(()),
        }
    }
    #[allow(clippy::result_large_err)]
    fn sdl_to_api_schema(schema_state: SchemaState) -> Result<Valid<Schema>, ServerError> {
        match Supergraph::new_with_router_specs(&schema_state.sdl) {
            Ok(supergraph) => Ok(supergraph
                .to_api_schema(ApiSchemaOptions::default())
                .map_err(|e| ServerError::Federation(Box::new(e)))?
                .schema()
                .clone()),
            Err(_) => Schema::parse_and_validate(schema_state.sdl, "schema.graphql")
                .map_err(|e| ServerError::GraphQLSchema(e.into())),
        }
    }
    fn ctrl_c_stream() -> impl Stream<Item = ServerEvent> {
        shutdown_signal()
            .map(|_| ServerEvent::Shutdown)
            .into_stream()
            .boxed()
    }
}
#[allow(clippy::expect_used)]
async fn shutdown_signal() {
    let ctrl_c = async {
        tokio::signal::ctrl_c()
            .await
            .expect("Failed to install CTRL+C signal handler");
    };
    #[cfg(unix)]
    let terminate = async {
        tokio::signal::unix::signal(tokio::signal::unix::SignalKind::terminate())
            .expect("Failed to install SIGTERM signal handler")
            .recv()
            .await;
    };
    #[cfg(not(unix))]
    let terminate = std::future::pending::<()>();
    tokio::select! {
        _ = ctrl_c => {},
        _ = terminate => {},
    }
}
#[allow(clippy::large_enum_variant)]
enum State {
    Configuring(Configuring),
    SchemaConfigured(SchemaConfigured),
    OperationsConfigured(OperationsConfigured),
    Starting(Starting),
    Running(Running),
    Error(ServerError),
    Stopping,
}
impl From<Configuring> for State {
    fn from(starting: Configuring) -> Self {
        State::Configuring(starting)
    }
}
impl From<SchemaConfigured> for State {
    fn from(schema_configured: SchemaConfigured) -> Self {
        State::SchemaConfigured(schema_configured)
    }
}
impl From<Result<SchemaConfigured, ServerError>> for State {
    fn from(result: Result<SchemaConfigured, ServerError>) -> Self {
        match result {
            Ok(schema_configured) => State::SchemaConfigured(schema_configured),
            Err(error) => State::Error(error),
        }
    }
}
impl From<OperationsConfigured> for State {
    fn from(operations_configured: OperationsConfigured) -> Self {
        State::OperationsConfigured(operations_configured)
    }
}
impl From<Result<OperationsConfigured, ServerError>> for State {
    fn from(result: Result<OperationsConfigured, ServerError>) -> Self {
        match result {
            Ok(operations_configured) => State::OperationsConfigured(operations_configured),
            Err(error) => State::Error(error),
        }
    }
}
impl From<Starting> for State {
    fn from(starting: Starting) -> Self {
        State::Starting(starting)
    }
}
impl From<Result<Starting, ServerError>> for State {
    fn from(result: Result<Starting, ServerError>) -> Self {
        match result {
            Ok(starting) => State::Starting(starting),
            Err(error) => State::Error(error),
        }
    }
}
impl From<Running> for State {
    fn from(running: Running) -> Self {
        State::Running(running)
    }
}
impl From<Result<Running, ServerError>> for State {
    fn from(result: Result<Running, ServerError>) -> Self {
        match result {
            Ok(running) => State::Running(running),
            Err(error) => State::Error(error),
        }
    }
}
impl From<ServerError> for State {
    fn from(error: ServerError) -> Self {
        State::Error(error)
    }
}
```
--------------------------------------------------------------------------------
/crates/apollo-mcp-server/src/introspection/tools/search.rs:
--------------------------------------------------------------------------------
```rust
//! MCP tool to search a GraphQL schema.
use crate::errors::McpError;
use crate::introspection::minify::MinifyExt as _;
use crate::schema_from_type;
use crate::schema_tree_shake::{DepthLimit, SchemaTreeShaker};
use apollo_compiler::ast::{Field, OperationType as AstOperationType, Selection};
use apollo_compiler::validation::Valid;
use apollo_compiler::{Name, Node, Schema};
use apollo_schema_index::{OperationType, Options, SchemaIndex};
use rmcp::model::{CallToolResult, Content, ErrorCode, Tool};
use rmcp::schemars::JsonSchema;
use rmcp::serde_json::Value;
use rmcp::{schemars, serde_json};
use serde::Deserialize;
use std::fmt::Debug;
use std::sync::Arc;
use tokio::sync::Mutex;
use tracing::debug;
/// The name of the tool to search a GraphQL schema.
pub const SEARCH_TOOL_NAME: &str = "search";
/// The maximum number of search results to consider.
const MAX_SEARCH_RESULTS: usize = 5;
/// A tool to search a GraphQL schema.
#[derive(Clone)]
pub struct Search {
    schema: Arc<Mutex<Valid<Schema>>>,
    index: SchemaIndex,
    allow_mutations: bool,
    leaf_depth: usize,
    minify: bool,
    pub tool: Tool,
}
/// Input for the search tool.
#[derive(JsonSchema, Deserialize, Debug)]
pub struct Input {
    /// The search terms
    terms: Vec<String>,
}
/// An error while indexing the GraphQL schema.
#[derive(Debug, thiserror::Error)]
pub enum IndexingError {
    #[error("Unable to index schema: {0}")]
    IndexingError(#[from] apollo_schema_index::error::IndexingError),
    #[error("Unable to lock schema: {0}")]
    TryLockError(#[from] tokio::sync::TryLockError),
}
impl Search {
    pub fn new(
        schema: Arc<Mutex<Valid<Schema>>>,
        allow_mutations: bool,
        leaf_depth: usize,
        index_memory_bytes: usize,
        minify: bool,
    ) -> Result<Self, IndexingError> {
        let root_types = if allow_mutations {
            OperationType::Query | OperationType::Mutation
        } else {
            OperationType::Query.into()
        };
        let locked = &schema.try_lock()?;
        Ok(Self {
            schema: schema.clone(),
            index: SchemaIndex::new(locked, root_types, index_memory_bytes)?,
            allow_mutations,
            leaf_depth,
            minify,
            tool: Tool::new(
                SEARCH_TOOL_NAME,
                format!(
                    "Search a GraphQL schema for types matching the provided search terms. Returns complete type definitions including all related types needed to construct GraphQL operations. Instructions: If the introspect tool is also available, you can discover type names by using the introspect tool starting from the root Query or Mutation types. Avoid reusing previously searched terms for more efficient exploration.{}",
                    if minify {
                        " - T=type,I=input,E=enum,U=union,F=interface;s=String,i=Int,f=Float,b=Boolean,d=ID;@D=deprecated;!=required,[]=list,<>=implements"
                    } else {
                        ""
                    }
                ),
                schema_from_type!(Input),
            ),
        })
    }
    #[tracing::instrument(skip(self))]
    pub async fn execute(&self, input: Input) -> Result<CallToolResult, McpError> {
        let mut root_paths = self
            .index
            .search(input.terms.clone(), Options::default())
            .map_err(|e| {
                McpError::new(
                    ErrorCode::INTERNAL_ERROR,
                    format!("Failed to search index: {e}"),
                    None,
                )
            })?;
        root_paths.truncate(MAX_SEARCH_RESULTS);
        debug!(
            "Root paths for search terms: {}\n{}",
            input.terms.join(", "),
            root_paths
                .iter()
                .map(ToString::to_string)
                .collect::<Vec<String>>()
                .join("\n"),
        );
        let schema = self.schema.lock().await;
        let mut tree_shaker = SchemaTreeShaker::new(&schema);
        for root_path in root_paths {
            let path_len = root_path.inner.len();
            for (i, path_node) in root_path.inner.into_iter().enumerate() {
                if let Some(extended_type) = schema.types.get(path_node.node_type.as_str()) {
                    let (selection_set, depth) = if i == path_len - 1 {
                        (None, DepthLimit::Limited(self.leaf_depth))
                    } else {
                        (
                            path_node.field_name.as_ref().map(|field_name| {
                                vec![Selection::Field(Node::from(Field {
                                    alias: Default::default(),
                                    name: Name::new_unchecked(field_name),
                                    arguments: Default::default(),
                                    selection_set: Default::default(),
                                    directives: Default::default(),
                                }))]
                            }),
                            DepthLimit::Limited(1),
                        )
                    };
                    tree_shaker.retain_type(extended_type, selection_set.as_ref(), depth)
                }
                for field_arg in path_node.field_args {
                    if let Some(extended_type) = schema.types.get(field_arg.as_str()) {
                        // Retain input types with unlimited depth because all input must be given
                        tree_shaker.retain_type(extended_type, None, DepthLimit::Unlimited);
                    }
                }
            }
        }
        let shaken = tree_shaker.shaken().unwrap_or_else(|schema| schema.partial);
        Ok(CallToolResult {
            content: shaken
                .types
                .iter()
                .filter(|(_name, extended_type)| {
                    !extended_type.is_built_in()
                        && schema
                            .root_operation(AstOperationType::Mutation)
                            .is_none_or(|root_name| {
                                extended_type.name() != root_name || self.allow_mutations
                            })
                })
                .map(|(_, extended_type)| {
                    if self.minify {
                        extended_type.minify()
                    } else {
                        extended_type.serialize().to_string()
                    }
                })
                .map(Content::text)
                .collect(),
            is_error: None,
            meta: None,
            // Note: The returned content is treated as text, so no need to structure its output
            structured_content: None,
        })
    }
}
#[cfg(test)]
mod tests {
    use super::*;
    use rmcp::model::RawContent;
    use rstest::{fixture, rstest};
    use std::ops::Deref;
    const TEST_SCHEMA: &str = include_str!("testdata/schema.graphql");
    fn content_to_snapshot(result: CallToolResult) -> String {
        result
            .content
            .into_iter()
            .filter_map(|c| {
                let c = c.deref();
                match c {
                    RawContent::Text(text) => Some(text.text.clone()),
                    _ => None,
                }
            })
            .collect::<Vec<String>>()
            .join("\n")
    }
    #[fixture]
    fn schema() -> Valid<Schema> {
        Schema::parse(TEST_SCHEMA, "schema.graphql")
            .expect("Failed to parse test schema")
            .validate()
            .expect("Failed to validate test schema")
    }
    #[rstest]
    #[tokio::test]
    async fn test_search_tool(schema: Valid<Schema>) {
        let schema = Arc::new(Mutex::new(schema));
        let search = Search::new(schema.clone(), false, 1, 15_000_000, false)
            .expect("Failed to create search tool");
        let result = search
            .execute(Input {
                terms: vec!["User".to_string()],
            })
            .await
            .expect("Search execution failed");
        assert!(!result.is_error.unwrap_or(false));
        insta::assert_snapshot!(content_to_snapshot(result));
    }
    #[rstest]
    #[tokio::test]
    async fn test_referencing_types_are_collected(schema: Valid<Schema>) {
        let schema = Arc::new(Mutex::new(schema));
        let search = Search::new(schema.clone(), true, 1, 15_000_000, false)
            .expect("Failed to create search tool");
        // Search for a type that should have references
        let result = search
            .execute(Input {
                terms: vec!["User".to_string()],
            })
            .await
            .expect("Search execution failed");
        assert!(!result.is_error.unwrap_or(false));
        assert!(
            content_to_snapshot(result).contains("createUser"),
            "Expected to find the createUser mutation in search results"
        );
    }
    #[rstest]
    #[tokio::test]
    async fn test_search_tool_description_is_not_minified(schema: Valid<Schema>) {
        let schema = Arc::new(Mutex::new(schema));
        let search = Search::new(schema.clone(), false, 1, 15_000_000, false)
            .expect("Failed to create search tool");
        let description = search.tool.description.unwrap();
        assert!(
            description
                .contains("Search a GraphQL schema for types matching the provided search terms")
        );
        assert!(description.contains("Instructions: If the introspect tool is also available"));
        assert!(description.contains("Avoid reusing previously searched terms"));
        // Should not contain minification legend
        assert!(!description.contains("T=type,I=input"));
    }
    #[rstest]
    #[tokio::test]
    async fn test_tool_description_minified(schema: Valid<Schema>) {
        let schema = Arc::new(Mutex::new(schema));
        let search = Search::new(schema.clone(), false, 1, 15_000_000, true)
            .expect("Failed to create search tool");
        let description = search.tool.description.unwrap();
        // Should contain minification legend
        assert!(description.contains("T=type,I=input,E=enum,U=union,F=interface"));
        assert!(description.contains("s=String,i=Int,f=Float,b=Boolean,d=ID"));
    }
}
```
--------------------------------------------------------------------------------
/docs/source/quickstart.mdx:
--------------------------------------------------------------------------------
```markdown
---
title: Apollo MCP Server Quickstart
subtitle: Create and run an MCP server in minutes with Apollo
---
Apollo MCP Server is a [Model Context Protocol](https://modelcontextprotocol.io/) server that exposes your GraphQL API operations as MCP tools.
This guide walks you through the process of creating, running and configuring an MCP server with Apollo.
## Prerequisites
- [Rover CLI](/rover/getting-started) v0.36 or later. We'll use Rover to initialize a project and run the MCP server. Follow the instructions for [installing](/rover/getting-started) and [authenticating](/rover/getting-started#connecting-to-graphos) Rover with a GraphOS account.
- [Node.js](https://nodejs.org/) v18 or later (for `mcp-remote`)
- [Claude Desktop](https://claude.ai/download) or another MCP-compatible client
## Step 1: Create an MCP server
Run the interactive initialization command:
```bash showLineNumbers=false
rover init --mcp
```
The CLI wizard guides you through several prompts. 
Select **Create MCP tools from a new Apollo GraphOS project** and **Apollo graph with Connectors (connect to REST services)** as your starting point.
You'll also need to select your organization and give your project a name and ID.
The wizard shows all files that will be created, including:
- MCP server configuration files
- GraphQL schema and operations
- Docker setup for (optional) deployment
Type `Y` to confirm and create your project files.
## Step 2: Run your MCP Server
You can start your MCP server locally with `rover dev`.
1. Choose the environment-specific command to load environment variables from the provided `.env` file and start the MCP server.
    <Tabs>
        <Tab label="Linux / MacOS">
        
        ```terminal showLineNumbers=false
        set -a && source .env && set +a && rover dev --supergraph-config supergraph.yaml --mcp .apollo/mcp.local.yaml
        ```
        </Tab>
        <Tab label="Windows Powershell">
        ```terminal showLineNumbers=false
        Get-Content .env | ForEach-Object { $name, $value = $_.split('=',2); [System.Environment]::SetEnvironmentVariable($name, $value) }
        rover dev --supergraph-config supergraph.yaml --mcp .apollo/mcp.local.yaml
        ```
        </Tab>
        
    </Tabs>
1. You should see some output indicating that the GraphQL server is running at `http://localhost:4000` and the MCP server is running at `http://127.0.0.1:8000`.
1. In a new terminal window, run the MCP Inspector to verify the server is running:
    ```terminal showLineNumbers=false
    npx @modelcontextprotocol/inspector http://127.0.0.1:8000/mcp --transport http
    ```
1. This will automatically open your browser to `http://127.0.0.1:6274`.
1. Click **Connect**, then **List Tools** to see the available tools.
## Step 3: Connect to an MCP client
Apollo MCP Server works with any MCP-compatible client. Choose your favorite client and follow the instructions to connect.
<ExpansionPanel title="Claude Desktop (recommended)">
Open the `claude_desktop_config.json` file in one of the following paths:
- Mac OS: `~/Library/Application\ Support/Claude/claude_desktop_config.json`
- Windows: `%APPDATA%\Claude\claude_desktop_config.json`
- Linux: `~/.config/Claude/claude_desktop_config.json`
Copy the configuration:
```json
{
  "mcpServers": {
    "mcp-My API": {
      "command": "npx",
      "args": [
        "mcp-remote",
        "http://127.0.0.1:8000/mcp"
      ]
    }
  }
}
```
</ExpansionPanel>
<ExpansionPanel title="Claude Code">
Install using the CLI:
```bash
claude mcp add apollo-mcp npx mcp-remote http://127.0.0.1:8000/mcp
```
</ExpansionPanel>
<ExpansionPanel title="Cursor">
Click the button to quick install:
<a href="cursor://anysphere.cursor-deeplink/mcp/install?name=apollo-mcp&config=eyJjb21tYW5kIjoibnB4IG1jcC1yZW1vdGUgaHR0cDovLzEyNy4wLjAuMTo1MDUwL21jcCJ9">
  <img
    src="https://cursor.com/deeplink/mcp-install-dark.svg"
    alt="Install Apollo MCP Server"
    width="200"
  />
</a>
Or install manually:
1. Go to **Cursor Settings** → **MCP** → **Add new MCP Server**
2. Name: `Apollo MCP` (choose a title)
3. Command: `npx`
4. Arguments: `["mcp-remote", "http://127.0.0.1:8000/mcp"]`
</ExpansionPanel>
<ExpansionPanel title="Goose">
Add Apollo MCP Server to your Goose configuration. Edit your `~/.config/goose/profiles.yaml`:
```yaml
default:
  provider: openai
  processor: gpt-4
  accelerator: gpt-4o-mini
  moderator: passive
  toolkits:
    - name: developer
    - name: mcp
      requires:
        apollo-mcp:
          command: npx
          args:
            - mcp-remote
            - http://127.0.0.1:8000/mcp
```
Or use the Goose CLI to add the MCP server:
```bash
goose mcp add apollo-mcp npx mcp-remote http://127.0.0.1:8000/mcp
```
</ExpansionPanel>
<ExpansionPanel title="Cline (VS Code Extension)">
1. Go to **Advanced settings** → **Extensions** → **Add custom extension**
2. Name: `Apollo MCP`
3. Type: **STDIO**
4. Command: `npx mcp-remote http://127.0.0.1:8000/mcp`
</ExpansionPanel>
<ExpansionPanel title="OpenCode">
Edit `~/.config/opencode/opencode.json`:
```json
{
  "$schema": "https://opencode.ai/config.json",
  "mcp": {
    "apollo-mcp": {
      "type": "local", 
      "command": [
        "npx",
        "mcp-remote",
        "http://127.0.0.1:8000/mcp"
      ],
      "enabled": true
    }
  }
}
```
</ExpansionPanel>
<ExpansionPanel title="Windsurf">
1. Go to **Windsurf Settings → MCP → Add new MCP Server**
2. Name: `Apollo MCP`
3. Command: `npx`
4. Arguments: `["mcp-remote", "http://127.0.0.1:8000/mcp"]`
Alternatively, edit your Windsurf configuration file directly:
```json
{
  "mcpServers": {
    "apollo-mcp": {
      "command": "npx",
      "args": [
        "mcp-remote",
        "http://127.0.0.1:8000/mcp"
      ]
    }
  }
}
```
</ExpansionPanel>
1. Restart your MCP client.
1. Test the connection by asking: "What MCP tools do you have available?". 
1. Verify GraphQL operations are listed as available tools.
1. Test a query using one of your configured operations.
## Step 4: Define MCP tools
MCP tools are defined as GraphQL operations. The project template currently uses operation collections as the source of its tools.
<Note>
See [Define MCP Tools](/apollo-mcp-server/define-tools) for other ways to define MCP tools.
</Note>
1. Navigate to Sandbox at [http://localhost:4000](http://localhost:4000).
1. Click the Bookmark icon to open Operation Collections.
1. Click  **Sandbox** beside "Showing saved operations for your Sandbox, across all endpoints" and select your graph. This represents the graph name and ID you used when creating your project.
1. You'll see an operation collection called "Default MCP Tools".
1. Create a new operation in the middle panel:
    ```graphql
    # Retrieves product information
    query GetProducts {
      products {
        id
        name
        description
      }
    }
    ```
1. Click the **Save** button and give it the name `GetProducts`.
1. Select the `Default MCP Tools` collection and click **Save**.
1. Restart your MCP client and test the connection by asking: "What MCP tools do you have available?". You should see the `GetProducts` tool listed. You can also test this with MCP Inspector.
## Step 5: Deploy your MCP server
Apollo MCP Server can run in any container environment.
### Using the Apollo Runtime Container
Your project includes a pre-configured `mcp.Dockerfile` for easy deployment. This container includes:
- Apollo Router for serving your GraphQL API
- Apollo MCP Server for MCP protocol support
- All necessary dependencies
1. Build the container:
    ```bash
    docker build -f mcp.Dockerfile -t my-mcp-server .
    ```
1. Run locally:
    ```bash
    docker run -p 4000:4000 -p 8000:8000 \
      -e APOLLO_KEY=$APOLLO_KEY \
      -e APOLLO_GRAPH_REF=$APOLLO_GRAPH_REF \
      -e MCP_ENABLE=1 \
      my-mcp-server
    ```
1. Deploy to your platform. The container can be deployed to any platform supporting Docker, such as: AWS ECS/Fargate, Google Cloud Run, Azure Container Instances, Kubernetes, Fly.io, Railway, Render.
1. Ensure these variables are set in your deployment environment:
| Variable                     | Description                     | Required |
| ---------------------------- | ------------------------------- | -------- |
| `APOLLO_KEY`                 | Your graph's API key            | Yes      |
| `APOLLO_GRAPH_REF`           | Your graph reference            | Yes      |
| `APOLLO_MCP_TRANSPORT__PORT` | MCP server port (default: 8000) | No       |
| `APOLLO_ROUTER_PORT`         | Router port (default: 4000)     | No       |
For more deployment options, see the [Deploy the MCP Server](/apollo-mcp-server/deploy) page.
### Update client configuration
After deploying, update your MCP client configuration to use the deployed URL:
```json
{
  "mcpServers": {
    "my-api": {
      "command": "npx",
      "args": [
        "mcp-remote",
        "https://your-deployed-server.com/mcp"
      ]
    }
  }
}
```
## Troubleshooting
**Client doesn't see tools:**
- Ensure you restarted your MCP client after configuration
- Verify the Apollo MCP Server is running (`rover dev` command)
- Check port numbers match between server and client config
**Connection refused errors:**
- Confirm the server is running on the correct port
- Verify firewall settings allow connections to localhost:8000
- For remote connections, ensure the host is set to `0.0.0.0` in your config
**Authentication issues:**
- Verify environment variables are properly set
- Check that your GraphQL endpoint accepts the provided headers
- When using `rover dev` you can test your GraphQL endpoint using Sandbox at [http://localhost:4000](http://localhost:4000)
## Additional resources
- [Tutorial: Getting started with MCP and GraphQL](https://www.apollographql.com/tutorials/intro-mcp-graphql)
- [Tutorial: Agentic GraphQL: MCP for the Enterprise](https://www.apollographql.com/tutorials/enterprise-mcp-graphql)
- [Blog: Getting Started with Apollo MCP Server](https://www.apollographql.com/blog/getting-started-with-apollo-mcp-server-for-any-graphql-api)
### Getting help
If you're still having issues:
- Check [Apollo MCP Server GitHub issues](https://github.com/apollographql/apollo-mcp-server/issues)
- Join the [Apollo community forums](https://community.apollographql.com/c/mcp-server/41)
- Contact your Apollo representative for direct support
```
--------------------------------------------------------------------------------
/crates/apollo-mcp-server/src/headers.rs:
--------------------------------------------------------------------------------
```rust
use std::ops::Deref;
use std::str::FromStr;
use headers::HeaderMapExt;
use http::Extensions;
use reqwest::header::{HeaderMap, HeaderName};
use crate::auth::ValidToken;
/// List of header names to forward from MCP clients to GraphQL API
pub type ForwardHeaders = Vec<String>;
/// Build headers for a GraphQL request by combining static headers with forwarded headers
pub fn build_request_headers(
    static_headers: &HeaderMap,
    forward_header_names: &ForwardHeaders,
    incoming_headers: &HeaderMap,
    extensions: &Extensions,
    disable_auth_token_passthrough: bool,
) -> HeaderMap {
    // Starts with static headers
    let mut headers = static_headers.clone();
    // Forward headers dynamically
    forward_headers(forward_header_names, incoming_headers, &mut headers);
    // Optionally extract the validated token and propagate it to upstream servers if present
    if !disable_auth_token_passthrough && let Some(token) = extensions.get::<ValidToken>() {
        headers.typed_insert(token.deref().clone());
    }
    // Forward the mcp-session-id header if present
    if let Some(session_id) = incoming_headers.get("mcp-session-id") {
        headers.insert("mcp-session-id", session_id.clone());
    }
    headers
}
/// Forward matching headers from incoming headers to outgoing headers
fn forward_headers(names: &[String], incoming: &HeaderMap, outgoing: &mut HeaderMap) {
    for header in names {
        if let Ok(header_name) = HeaderName::from_str(header)
            && let Some(value) = incoming.get(&header_name)
            // Hop-by-hop headers are blocked per RFC 7230: https://datatracker.ietf.org/doc/html/rfc7230#section-6.1
            && !matches!(
                header_name.as_str().to_lowercase().as_str(),
                "connection"
                    | "keep-alive"
                    | "proxy-authenticate"
                    | "proxy-authorization"
                    | "te"
                    | "trailers"
                    | "transfer-encoding"
                    | "upgrade"
                    | "content-length"
            )
        {
            outgoing.insert(header_name, value.clone());
        }
    }
}
#[cfg(test)]
mod tests {
    use super::*;
    use headers::Authorization;
    use http::Extensions;
    use reqwest::header::HeaderValue;
    use crate::auth::ValidToken;
    #[test]
    fn test_build_request_headers_includes_static_headers() {
        let mut static_headers = HeaderMap::new();
        static_headers.insert("x-api-key", HeaderValue::from_static("static-key"));
        static_headers.insert("user-agent", HeaderValue::from_static("mcp-server"));
        let forward_header_names = vec![];
        let incoming_headers = HeaderMap::new();
        let extensions = Extensions::new();
        let result = build_request_headers(
            &static_headers,
            &forward_header_names,
            &incoming_headers,
            &extensions,
            false,
        );
        assert_eq!(result.get("x-api-key").unwrap(), "static-key");
        assert_eq!(result.get("user-agent").unwrap(), "mcp-server");
    }
    #[test]
    fn test_build_request_headers_forwards_configured_headers() {
        let static_headers = HeaderMap::new();
        let forward_header_names = vec!["x-tenant-id".to_string(), "x-trace-id".to_string()];
        let mut incoming_headers = HeaderMap::new();
        incoming_headers.insert("x-tenant-id", HeaderValue::from_static("tenant-123"));
        incoming_headers.insert("x-trace-id", HeaderValue::from_static("trace-456"));
        incoming_headers.insert("other-header", HeaderValue::from_static("ignored"));
        let extensions = Extensions::new();
        let result = build_request_headers(
            &static_headers,
            &forward_header_names,
            &incoming_headers,
            &extensions,
            false,
        );
        assert_eq!(result.get("x-tenant-id").unwrap(), "tenant-123");
        assert_eq!(result.get("x-trace-id").unwrap(), "trace-456");
        assert!(result.get("other-header").is_none());
    }
    #[test]
    fn test_build_request_headers_adds_oauth_token_when_enabled() {
        let static_headers = HeaderMap::new();
        let forward_header_names = vec![];
        let incoming_headers = HeaderMap::new();
        let mut extensions = Extensions::new();
        let token = ValidToken(Authorization::bearer("test-token").unwrap());
        extensions.insert(token);
        let result = build_request_headers(
            &static_headers,
            &forward_header_names,
            &incoming_headers,
            &extensions,
            false,
        );
        assert!(result.get("authorization").is_some());
        assert_eq!(result.get("authorization").unwrap(), "Bearer test-token");
    }
    #[test]
    fn test_build_request_headers_skips_oauth_token_when_disabled() {
        let static_headers = HeaderMap::new();
        let forward_header_names = vec![];
        let incoming_headers = HeaderMap::new();
        let mut extensions = Extensions::new();
        let token = ValidToken(Authorization::bearer("test-token").unwrap());
        extensions.insert(token);
        let result = build_request_headers(
            &static_headers,
            &forward_header_names,
            &incoming_headers,
            &extensions,
            true,
        );
        assert!(result.get("authorization").is_none());
    }
    #[test]
    fn test_build_request_headers_forwards_mcp_session_id() {
        let static_headers = HeaderMap::new();
        let forward_header_names = vec![];
        let mut incoming_headers = HeaderMap::new();
        incoming_headers.insert("mcp-session-id", HeaderValue::from_static("session-123"));
        let extensions = Extensions::new();
        let result = build_request_headers(
            &static_headers,
            &forward_header_names,
            &incoming_headers,
            &extensions,
            false,
        );
        assert_eq!(result.get("mcp-session-id").unwrap(), "session-123");
    }
    #[test]
    fn test_build_request_headers_combined_scenario() {
        // Static headers
        let mut static_headers = HeaderMap::new();
        static_headers.insert("x-api-key", HeaderValue::from_static("static-key"));
        // Forward specific headers
        let forward_header_names = vec!["x-tenant-id".to_string()];
        // Incoming headers
        let mut incoming_headers = HeaderMap::new();
        incoming_headers.insert("x-tenant-id", HeaderValue::from_static("tenant-123"));
        incoming_headers.insert("mcp-session-id", HeaderValue::from_static("session-456"));
        incoming_headers.insert(
            "ignored-header",
            HeaderValue::from_static("should-not-appear"),
        );
        // OAuth token
        let mut extensions = Extensions::new();
        let token = ValidToken(Authorization::bearer("oauth-token").unwrap());
        extensions.insert(token);
        let result = build_request_headers(
            &static_headers,
            &forward_header_names,
            &incoming_headers,
            &extensions,
            false,
        );
        // Verify all parts combined correctly
        assert_eq!(result.get("x-api-key").unwrap(), "static-key");
        assert_eq!(result.get("x-tenant-id").unwrap(), "tenant-123");
        assert_eq!(result.get("mcp-session-id").unwrap(), "session-456");
        assert_eq!(result.get("authorization").unwrap(), "Bearer oauth-token");
        assert!(result.get("ignored-header").is_none());
    }
    #[test]
    fn test_forward_headers_no_headers_by_default() {
        let names: Vec<String> = vec![];
        let mut incoming = HeaderMap::new();
        incoming.insert("x-tenant-id", HeaderValue::from_static("tenant-123"));
        let mut outgoing = HeaderMap::new();
        forward_headers(&names, &incoming, &mut outgoing);
        assert!(outgoing.is_empty());
    }
    #[test]
    fn test_forward_headers_only_specific_headers() {
        let names = vec![
            "x-tenant-id".to_string(),     // Multi-tenancy
            "x-trace-id".to_string(),      // Distributed tracing
            "x-geo-country".to_string(),   // Geo information from CDN
            "x-experiment-id".to_string(), // A/B testing
            "ai-client-name".to_string(),  // Client identification
        ];
        let mut incoming = HeaderMap::new();
        incoming.insert("x-tenant-id", HeaderValue::from_static("tenant-123"));
        incoming.insert("x-trace-id", HeaderValue::from_static("trace-456"));
        incoming.insert("x-geo-country", HeaderValue::from_static("US"));
        incoming.insert("x-experiment-id", HeaderValue::from_static("exp-789"));
        incoming.insert("ai-client-name", HeaderValue::from_static("claude"));
        incoming.insert("other-header", HeaderValue::from_static("ignored"));
        let mut outgoing = HeaderMap::new();
        forward_headers(&names, &incoming, &mut outgoing);
        assert_eq!(outgoing.get("x-tenant-id").unwrap(), "tenant-123");
        assert_eq!(outgoing.get("x-trace-id").unwrap(), "trace-456");
        assert_eq!(outgoing.get("x-geo-country").unwrap(), "US");
        assert_eq!(outgoing.get("x-experiment-id").unwrap(), "exp-789");
        assert_eq!(outgoing.get("ai-client-name").unwrap(), "claude");
        assert!(outgoing.get("other-header").is_none());
    }
    #[test]
    fn test_forward_headers_blocks_hop_by_hop_headers() {
        let names = vec!["connection".to_string(), "content-length".to_string()];
        let mut incoming = HeaderMap::new();
        incoming.insert("connection", HeaderValue::from_static("keep-alive"));
        incoming.insert("content-length", HeaderValue::from_static("1234"));
        let mut outgoing = HeaderMap::new();
        forward_headers(&names, &incoming, &mut outgoing);
        assert!(outgoing.get("connection").is_none());
        assert!(outgoing.get("content-length").is_none());
    }
    #[test]
    fn test_forward_headers_case_insensitive_matching() {
        let names = vec!["X-Tenant-ID".to_string()];
        let mut incoming = HeaderMap::new();
        incoming.insert("x-tenant-id", HeaderValue::from_static("tenant-123"));
        let mut outgoing = HeaderMap::new();
        forward_headers(&names, &incoming, &mut outgoing);
        assert_eq!(outgoing.get("x-tenant-id").unwrap(), "tenant-123");
    }
}
```
--------------------------------------------------------------------------------
/crates/apollo-mcp-server/src/introspection/tools/introspect.rs:
--------------------------------------------------------------------------------
```rust
use crate::errors::McpError;
use crate::introspection::minify::MinifyExt as _;
use crate::schema_from_type;
use crate::schema_tree_shake::{DepthLimit, SchemaTreeShaker};
use apollo_compiler::Schema;
use apollo_compiler::ast::OperationType;
use apollo_compiler::schema::ExtendedType;
use apollo_compiler::validation::Valid;
use rmcp::model::{CallToolResult, Content, Tool};
use rmcp::schemars::JsonSchema;
use rmcp::serde_json::Value;
use rmcp::{schemars, serde_json};
use serde::Deserialize;
use std::sync::Arc;
use tokio::sync::Mutex;
/// The name of the tool to get GraphQL schema type information
pub const INTROSPECT_TOOL_NAME: &str = "introspect";
/// A tool to get detailed information about specific types from the GraphQL schema.
#[derive(Clone)]
pub struct Introspect {
    schema: Arc<Mutex<Valid<Schema>>>,
    allow_mutations: bool,
    minify: bool,
    pub tool: Tool,
}
/// Input for the introspect tool.
#[derive(JsonSchema, Deserialize, Debug)]
pub struct Input {
    /// The name of the type to get information about.
    type_name: String,
    /// How far to recurse the type hierarchy. Use 0 for no limit. Defaults to 1.
    #[serde(default = "default_depth")]
    depth: usize,
}
impl Introspect {
    pub fn new(
        schema: Arc<Mutex<Valid<Schema>>>,
        root_query_type: Option<String>,
        root_mutation_type: Option<String>,
        minify: bool,
    ) -> Self {
        Self {
            schema,
            allow_mutations: root_mutation_type.is_some(),
            minify,
            tool: Tool::new(
                INTROSPECT_TOOL_NAME,
                tool_description(root_query_type, root_mutation_type, minify),
                schema_from_type!(Input),
            ),
        }
    }
    #[tracing::instrument(skip(self))]
    pub async fn execute(&self, input: Input) -> Result<CallToolResult, McpError> {
        let schema = self.schema.lock().await;
        let type_name = input.type_name.as_str();
        let mut tree_shaker = SchemaTreeShaker::new(&schema);
        match schema.types.get(type_name) {
            Some(extended_type) => tree_shaker.retain_type(
                extended_type,
                None,
                if input.depth > 0 {
                    DepthLimit::Limited(input.depth)
                } else {
                    DepthLimit::Unlimited
                },
            ),
            None => {
                return Ok(CallToolResult {
                    content: vec![],
                    is_error: None,
                    meta: None,
                    structured_content: None,
                });
            }
        }
        let shaken = tree_shaker.shaken().unwrap_or_else(|schema| schema.partial);
        Ok(CallToolResult {
            content: shaken
                .types
                .iter()
                .filter(|(_name, extended_type)| {
                    !extended_type.is_built_in()
                        && schema
                            .root_operation(OperationType::Mutation)
                            .is_none_or(|root_name| {
                                // Allow introspection of the mutation type itself even when mutations are disabled
                                extended_type.name() != root_name
                                    || type_name == root_name.as_str()
                                    || self.allow_mutations
                            })
                        && schema
                            .root_operation(OperationType::Subscription)
                            .is_none_or(|root_name| extended_type.name() != root_name)
                })
                .map(|(_, extended_type)| extended_type)
                .map(|extended_type| self.serialize(extended_type))
                .map(Content::text)
                .collect(),
            is_error: None,
            meta: None,
            // The content being returned is a raw string, so no need to create structured content for it
            structured_content: None,
        })
    }
    fn serialize(&self, extended_type: &ExtendedType) -> String {
        if self.minify {
            extended_type.minify()
        } else {
            extended_type.serialize().to_string()
        }
    }
}
fn tool_description(
    root_query_type: Option<String>,
    root_mutation_type: Option<String>,
    minify: bool,
) -> String {
    if minify {
        "Get GraphQL type information - T=type,I=input,E=enum,U=union,F=interface;s=String,i=Int,f=Float,b=Boolean,d=ID;@D=deprecated;!=required,[]=list,<>=implements;".to_string()
    } else {
        format!(
            "Get information about a given GraphQL type defined in the schema. Instructions: Use this tool to explore the schema by providing specific type names. Start with the root query ({}) or mutation ({}) types to discover available fields. If the search tool is also available, use this tool first to get the fields, then use the search tool with relevant field return types and argument input types (ignore default GraphQL scalars) as search terms.",
            root_query_type.as_deref().unwrap_or("Query"),
            root_mutation_type.as_deref().unwrap_or("Mutation")
        )
    }
}
/// The default depth to recurse the type hierarchy.
fn default_depth() -> usize {
    1
}
#[cfg(test)]
mod tests {
    use super::*;
    use apollo_compiler::Schema;
    use apollo_compiler::validation::Valid;
    use rstest::{fixture, rstest};
    use std::sync::Arc;
    use tokio::sync::Mutex;
    const TEST_SCHEMA: &str = include_str!("testdata/schema.graphql");
    #[fixture]
    fn schema() -> Valid<Schema> {
        Schema::parse(TEST_SCHEMA, "schema.graphql")
            .expect("Failed to parse test schema")
            .validate()
            .expect("Failed to validate test schema")
    }
    #[rstest]
    #[tokio::test]
    async fn test_introspect_tool_description_is_not_minified(schema: Valid<Schema>) {
        let introspect = Introspect::new(Arc::new(Mutex::new(schema)), None, None, false);
        let description = introspect.tool.description.unwrap();
        assert!(
            description
                .contains("Get information about a given GraphQL type defined in the schema")
        );
        assert!(description.contains("Instructions: Use this tool to explore the schema"));
        // Should not contain minification legend
        assert!(!description.contains("T=type,I=input"));
        // Should mention conditional search tool usage
        assert!(description.contains("If the search tool is also available"));
    }
    #[rstest]
    #[tokio::test]
    async fn test_introspect_tool_description_is_minified_with_an_appropriate_legend(
        schema: Valid<Schema>,
    ) {
        let introspect = Introspect::new(Arc::new(Mutex::new(schema)), None, None, true);
        let description = introspect.tool.description.unwrap();
        // Should contain minification legend
        assert!(description.contains("T=type,I=input,E=enum,U=union,F=interface"));
        assert!(description.contains("s=String,i=Int,f=Float,b=Boolean,d=ID"));
    }
    #[rstest]
    #[tokio::test]
    async fn test_introspect_query_depth_1_returns_fields(schema: Valid<Schema>) {
        let introspect = Introspect::new(
            Arc::new(Mutex::new(schema)),
            Some("Query".to_string()),
            Some("Mutation".to_string()),
            false,
        );
        let result = introspect
            .execute(Input {
                type_name: "Query".to_string(),
                depth: 1,
            })
            .await
            .expect("Introspect execution failed");
        let content = result
            .content
            .iter()
            .filter_map(|c| {
                use rmcp::model::RawContent;
                use std::ops::Deref;
                let c = c.deref();
                match c {
                    RawContent::Text(text) => Some(text.text.clone()),
                    _ => None,
                }
            })
            .collect::<Vec<String>>()
            .join("\n");
        // Query with depth 1 should return the Query type with its fields
        assert!(!result.content.is_empty());
        assert!(content.contains("type Query"));
    }
    #[rstest]
    #[tokio::test]
    async fn test_introspect_mutation_depth_1_returns_fields(schema: Valid<Schema>) {
        let introspect = Introspect::new(
            Arc::new(Mutex::new(schema)),
            Some("Query".to_string()),
            Some("Mutation".to_string()),
            false,
        );
        let result = introspect
            .execute(Input {
                type_name: "Mutation".to_string(),
                depth: 1,
            })
            .await
            .expect("Introspect execution failed");
        let content = result
            .content
            .iter()
            .filter_map(|c| {
                use rmcp::model::RawContent;
                use std::ops::Deref;
                let c = c.deref();
                match c {
                    RawContent::Text(text) => Some(text.text.clone()),
                    _ => None,
                }
            })
            .collect::<Vec<String>>()
            .join("\n");
        // Mutation with depth 1 should return the Mutation type with its fields, just like Query
        assert!(
            !result.content.is_empty(),
            "Mutation introspection should return content"
        );
        assert!(
            content.contains("type Mutation"),
            "Should contain Mutation type definition"
        );
    }
    #[rstest]
    #[tokio::test]
    async fn test_introspect_mutation_depth_1_with_mutations_disabled(schema: Valid<Schema>) {
        // This test verifies the fix: when mutations are not allowed, mutation introspection should still work
        let introspect = Introspect::new(
            Arc::new(Mutex::new(schema)),
            Some("Query".to_string()),
            None,
            false,
        );
        let result = introspect
            .execute(Input {
                type_name: "Mutation".to_string(),
                depth: 1,
            })
            .await
            .expect("Introspect execution failed");
        let content = result
            .content
            .iter()
            .filter_map(|c| {
                use rmcp::model::RawContent;
                use std::ops::Deref;
                let c = c.deref();
                match c {
                    RawContent::Text(text) => Some(text.text.clone()),
                    _ => None,
                }
            })
            .collect::<Vec<String>>()
            .join("\n");
        // After the fix: mutation introspection should work even when mutations are disabled
        assert!(
            !result.content.is_empty(),
            "Mutation introspection should return content even when mutations are disabled"
        );
        assert!(
            content.contains("type Mutation"),
            "Should contain Mutation type definition"
        );
    }
}
```
--------------------------------------------------------------------------------
/crates/apollo-mcp-registry/src/uplink/schema.rs:
--------------------------------------------------------------------------------
```rust
pub mod event;
mod schema_stream;
use std::convert::Infallible;
use std::str::FromStr;
use std::path::PathBuf;
use std::pin::Pin;
use std::time::Duration;
use crate::uplink::UplinkConfig;
use crate::uplink::stream_from_uplink;
use derive_more::Display;
use derive_more::From;
use educe::Educe;
use event::Event;
use event::Event::{NoMoreSchema, UpdateSchema};
use futures::prelude::*;
pub(crate) use schema_stream::SupergraphSdlQuery;
use url::Url;
/// Represents the new state of a schema after an update.
#[derive(Eq, PartialEq, Debug)]
pub struct SchemaState {
    pub sdl: String,
    pub(crate) launch_id: Option<String>,
}
impl FromStr for SchemaState {
    type Err = Infallible;
    fn from_str(s: &str) -> Result<Self, Self::Err> {
        Ok(Self {
            sdl: s.to_string(),
            launch_id: None,
        })
    }
}
type SchemaStream = Pin<Box<dyn Stream<Item = String> + Send>>;
/// The user supplied schema. Either a static string or a stream for hot reloading.
#[derive(From, Display, Educe)]
#[educe(Debug)]
#[non_exhaustive]
pub enum SchemaSource {
    /// A static schema.
    #[display("String")]
    Static { schema_sdl: String },
    /// A stream of schema.
    #[display("Stream")]
    Stream(#[educe(Debug(ignore))] SchemaStream),
    /// A YAML file that may be watched for changes.
    #[display("File")]
    File {
        /// The path of the schema file.
        path: PathBuf,
        /// `true` to watch the file for changes and hot apply them.
        watch: bool,
    },
    /// Apollo managed federation.
    #[display("Registry")]
    Registry(UplinkConfig),
    /// A list of URLs to fetch the schema from.
    #[display("URLs")]
    URLs {
        /// The URLs to fetch the schema from.
        urls: Vec<Url>,
    },
}
impl From<&'_ str> for SchemaSource {
    fn from(s: &'_ str) -> Self {
        Self::Static {
            schema_sdl: s.to_owned(),
        }
    }
}
impl SchemaSource {
    /// Convert this schema into a stream regardless of if is static or not. Allows for unified handling later.
    pub fn into_stream(self) -> impl Stream<Item = Event> {
        match self {
            SchemaSource::Static { schema_sdl: schema } => {
                let update_schema = UpdateSchema(SchemaState {
                    sdl: schema,
                    launch_id: None,
                });
                stream::once(future::ready(update_schema)).boxed()
            }
            SchemaSource::Stream(stream) => stream
                .map(|sdl| {
                    UpdateSchema(SchemaState {
                        sdl,
                        launch_id: None,
                    })
                })
                .boxed(),
            SchemaSource::File {
                path,
                watch,
            } => {
                // Sanity check, does the schema file exists, if it doesn't then bail.
                if !path.exists() {
                    tracing::error!(
                        "Supergraph schema at path '{}' does not exist.",
                        path.to_string_lossy()
                    );
                    stream::empty().boxed()
                } else {
                    //The schema file exists try and load it
                    match std::fs::read_to_string(&path) {
                        Ok(schema) => {
                            if watch {
                                crate::files::watch(&path)
                                    .filter_map(move |_| {
                                        let path = path.clone();
                                        async move {
                                            match tokio::fs::read_to_string(&path).await {
                                                Ok(schema) => {
                                                    let update_schema = UpdateSchema(SchemaState {
                                                        sdl: schema,
                                                        launch_id: None,
                                                    });
                                                    Some(update_schema)
                                                }
                                                Err(err) => {
                                                    tracing::error!(reason = %err, "failed to read supergraph schema");
                                                    None
                                                }
                                            }
                                        }
                                    })
                                    .boxed()
                            } else {
                                let update_schema = UpdateSchema(SchemaState {
                                    sdl: schema,
                                    launch_id: None,
                                });
                                stream::once(future::ready(update_schema)).boxed()
                            }
                        }
                        Err(err) => {
                            tracing::error!(reason = %err, "failed to read supergraph schema");
                            stream::empty().boxed()
                        }
                    }
                }
            }
            SchemaSource::Registry(uplink_config) => {
                stream_from_uplink::<SupergraphSdlQuery, SchemaState>(uplink_config)
                    .filter_map(|res| {
                        future::ready(match res {
                            Ok(schema) => {
                                let update_schema = UpdateSchema(schema);
                                Some(update_schema)
                            }
                            Err(e) => {
                                tracing::error!("{}", e);
                                None
                            }
                        })
                    })
                    .boxed()
            }
            SchemaSource::URLs { urls } => {
                futures::stream::once(async move {
                    fetch_supergraph_from_first_viable_url(&urls).await
                })
                    .filter_map(|s| async move { s.map(Event::UpdateSchema) })
                    .boxed()
            }
        }
            .chain(stream::iter(vec![NoMoreSchema]))
            .boxed()
    }
}
// Encapsulates fetching the schema from the first viable url.
// It will try each url in order until it finds one that works.
#[allow(clippy::unwrap_used)] // TODO - existing unwrap from router code
async fn fetch_supergraph_from_first_viable_url(urls: &[Url]) -> Option<SchemaState> {
    let Ok(client) = reqwest::Client::builder()
        .no_gzip()
        .timeout(Duration::from_secs(10))
        .build()
    else {
        tracing::error!("failed to create HTTP client to fetch supergraph schema");
        return None;
    };
    for url in urls {
        match client.get(Url::parse(url.as_ref()).unwrap()).send().await {
            Ok(res) if res.status().is_success() => match res.text().await {
                Ok(schema) => {
                    return Some(SchemaState {
                        sdl: schema,
                        launch_id: None,
                    });
                }
                Err(err) => {
                    tracing::warn!(
                        url.full = %url,
                        reason = %err,
                        "failed to fetch supergraph schema"
                    )
                }
            },
            Ok(res) => tracing::warn!(
                http.response.status_code = res.status().as_u16(),
                url.full = %url,
                "failed to fetch supergraph schema"
            ),
            Err(err) => tracing::warn!(
                url.full = %url,
                reason = %err,
                "failed to fetch supergraph schema"
            ),
        }
    }
    tracing::error!("failed to fetch supergraph schema from all urls");
    None
}
#[cfg(test)]
mod tests {
    use std::env::temp_dir;
    use test_log::test;
    use tracing_futures::WithSubscriber;
    use wiremock::Mock;
    use wiremock::MockServer;
    use wiremock::ResponseTemplate;
    use wiremock::matchers::method;
    use wiremock::matchers::path;
    use super::*;
    use crate::assert_snapshot_subscriber;
    use crate::files::tests::create_temp_file;
    use crate::files::tests::write_and_flush;
    #[test(tokio::test)]
    async fn schema_by_file_watching() {
        let (path, mut file) = create_temp_file();
        let schema = include_str!("../testdata/supergraph.graphql");
        write_and_flush(&mut file, schema).await;
        let mut stream = SchemaSource::File { path, watch: true }
            .into_stream()
            .boxed();
        // First update is guaranteed
        assert!(matches!(stream.next().await.unwrap(), UpdateSchema(_)));
        // Need different contents, since we won't get an event if content is the same
        let schema_minimal = include_str!("../testdata/minimal_supergraph.graphql");
        // Modify the file and try again
        write_and_flush(&mut file, schema_minimal).await;
        assert!(matches!(stream.next().await.unwrap(), UpdateSchema(_)));
    }
    #[test(tokio::test)]
    async fn schema_by_file_no_watch() {
        let (path, mut file) = create_temp_file();
        let schema = include_str!("../testdata/supergraph.graphql");
        write_and_flush(&mut file, schema).await;
        let mut stream = SchemaSource::File { path, watch: false }.into_stream();
        assert!(matches!(stream.next().await.unwrap(), UpdateSchema(_)));
        assert!(matches!(stream.next().await.unwrap(), NoMoreSchema));
    }
    #[test(tokio::test)]
    async fn schema_by_file_missing() {
        let mut stream = SchemaSource::File {
            path: temp_dir().join("does_not_exist"),
            watch: true,
        }
        .into_stream();
        // First update fails because the file is invalid.
        assert!(matches!(stream.next().await.unwrap(), NoMoreSchema));
    }
    const SCHEMA_1: &str = "schema1";
    const SCHEMA_2: &str = "schema2";
    #[test(tokio::test)]
    async fn schema_by_url() {
        async {
            let mock_server = MockServer::start().await;
            Mock::given(method("GET"))
                .and(path("/schema1"))
                .respond_with(ResponseTemplate::new(200).set_body_string(SCHEMA_1))
                .mount(&mock_server)
                .await;
            Mock::given(method("GET"))
                .and(path("/schema2"))
                .respond_with(ResponseTemplate::new(200).set_body_string(SCHEMA_2))
                .mount(&mock_server)
                .await;
            let mut stream = SchemaSource::URLs {
                urls: vec![
                    Url::parse(&format!("http://{}/schema1", mock_server.address())).unwrap(),
                    Url::parse(&format!("http://{}/schema2", mock_server.address())).unwrap(),
                ],
            }
                .into_stream();
            assert!(
                matches!(stream.next().await.unwrap(), UpdateSchema(schema) if schema.sdl == SCHEMA_1)
            );
            assert!(matches!(stream.next().await.unwrap(), NoMoreSchema));
        }
            .with_subscriber(assert_snapshot_subscriber!())
            .await;
    }
    #[test(tokio::test)]
    async fn schema_by_url_fallback() {
        async {
            let mock_server = MockServer::start().await;
            Mock::given(method("GET"))
                .and(path("/schema1"))
                .respond_with(ResponseTemplate::new(400))
                .mount(&mock_server)
                .await;
            Mock::given(method("GET"))
                .and(path("/schema2"))
                .respond_with(ResponseTemplate::new(200).set_body_string(SCHEMA_2))
                .mount(&mock_server)
                .await;
            let mut stream = SchemaSource::URLs {
                urls: vec![
                    Url::parse(&format!("http://{}/schema1", mock_server.address())).unwrap(),
                    Url::parse(&format!("http://{}/schema2", mock_server.address())).unwrap(),
                ],
            }
                .into_stream();
            assert!(
                matches!(stream.next().await.unwrap(), UpdateSchema(schema) if schema.sdl == SCHEMA_2)
            );
            assert!(matches!(stream.next().await.unwrap(), NoMoreSchema));
        }
            .with_subscriber(assert_snapshot_subscriber!({
            "[].fields[\"url.full\"]" => "[url.full]"
        }))
            .await;
    }
    #[test(tokio::test)]
    async fn schema_by_url_all_fail() {
        async {
            let mock_server = MockServer::start().await;
            Mock::given(method("GET"))
                .and(path("/schema1"))
                .respond_with(ResponseTemplate::new(400))
                .mount(&mock_server)
                .await;
            Mock::given(method("GET"))
                .and(path("/schema2"))
                .respond_with(ResponseTemplate::new(400))
                .mount(&mock_server)
                .await;
            let mut stream = SchemaSource::URLs {
                urls: vec![
                    Url::parse(&format!("http://{}/schema1", mock_server.address())).unwrap(),
                    Url::parse(&format!("http://{}/schema2", mock_server.address())).unwrap(),
                ],
            }
            .into_stream();
            assert!(matches!(stream.next().await.unwrap(), NoMoreSchema));
        }
        .with_subscriber(assert_snapshot_subscriber!({
            "[].fields[\"url.full\"]" => "[url.full]"
        }))
        .await;
    }
}
```
--------------------------------------------------------------------------------
/crates/apollo-mcp-server/src/runtime/telemetry.rs:
--------------------------------------------------------------------------------
```rust
mod sampler;
use crate::runtime::Config;
use crate::runtime::filtering_exporter::FilteringExporter;
use crate::runtime::logging::Logging;
use crate::runtime::telemetry::sampler::SamplerOption;
use apollo_mcp_server::generated::telemetry::TelemetryAttribute;
use opentelemetry::{Key, KeyValue, global, trace::TracerProvider as _};
use opentelemetry_otlp::WithExportConfig;
use opentelemetry_sdk::metrics::{Instrument, Stream, Temporality};
use opentelemetry_sdk::{
    Resource,
    metrics::{MeterProviderBuilder, PeriodicReader, SdkMeterProvider},
    propagation::TraceContextPropagator,
    trace::{RandomIdGenerator, SdkTracerProvider},
};
use opentelemetry_semantic_conventions::{
    SCHEMA_URL,
    attribute::{DEPLOYMENT_ENVIRONMENT_NAME, SERVICE_VERSION},
};
use schemars::JsonSchema;
use serde::Deserialize;
use std::collections::HashSet;
use tracing_opentelemetry::{MetricsLayer, OpenTelemetryLayer};
use tracing_subscriber::{layer::SubscriberExt, util::SubscriberInitExt};
/// Telemetry related options
#[derive(Debug, Deserialize, JsonSchema, Default)]
pub struct Telemetry {
    exporters: Option<Exporters>,
    service_name: Option<String>,
    version: Option<String>,
}
#[derive(Debug, Deserialize, JsonSchema)]
pub struct Exporters {
    metrics: Option<MetricsExporters>,
    tracing: Option<TracingExporters>,
}
#[derive(Debug, Deserialize, JsonSchema)]
pub struct MetricsExporters {
    otlp: Option<OTLPMetricExporter>,
    omitted_attributes: Option<HashSet<TelemetryAttribute>>,
}
#[derive(Debug, Deserialize, JsonSchema)]
pub enum MetricTemporality {
    Cumulative,
    Delta,
}
impl OTLPMetricExporter {
    pub fn to_temporality(&self) -> Temporality {
        match self
            .temporality
            .as_ref()
            .unwrap_or(&MetricTemporality::Cumulative)
        {
            MetricTemporality::Cumulative => Temporality::Cumulative,
            MetricTemporality::Delta => Temporality::Delta,
        }
    }
}
#[derive(Debug, Deserialize, JsonSchema)]
pub struct OTLPMetricExporter {
    endpoint: String,
    protocol: String,
    temporality: Option<MetricTemporality>,
}
impl Default for OTLPMetricExporter {
    fn default() -> Self {
        Self {
            endpoint: "http://localhost:4317".into(),
            protocol: "grpc".into(),
            temporality: Some(MetricTemporality::Cumulative),
        }
    }
}
#[derive(Debug, Deserialize, JsonSchema)]
pub struct TracingExporters {
    otlp: Option<OTLPTracingExporter>,
    sampler: Option<SamplerOption>,
    omitted_attributes: Option<HashSet<TelemetryAttribute>>,
}
#[derive(Debug, Deserialize, JsonSchema)]
pub struct OTLPTracingExporter {
    endpoint: String,
    protocol: String,
}
impl Default for OTLPTracingExporter {
    fn default() -> Self {
        Self {
            endpoint: "http://localhost:4317".into(),
            protocol: "grpc".into(),
        }
    }
}
fn resource(telemetry: &Telemetry) -> Resource {
    let service_name = telemetry
        .service_name
        .clone()
        .unwrap_or_else(|| env!("CARGO_PKG_NAME").to_string());
    let service_version = telemetry
        .version
        .clone()
        .unwrap_or_else(|| env!("CARGO_PKG_VERSION").to_string());
    let deployment_env = std::env::var("ENVIRONMENT").unwrap_or_else(|_| "development".to_string());
    Resource::builder()
        .with_service_name(service_name)
        .with_schema_url(
            [
                KeyValue::new(SERVICE_VERSION, service_version),
                KeyValue::new(DEPLOYMENT_ENVIRONMENT_NAME, deployment_env),
            ],
            SCHEMA_URL,
        )
        .build()
}
fn init_meter_provider(telemetry: &Telemetry) -> Result<SdkMeterProvider, anyhow::Error> {
    let metrics_exporters = telemetry
        .exporters
        .as_ref()
        .and_then(|exporters| exporters.metrics.as_ref());
    let otlp = metrics_exporters
        .and_then(|metrics_exporters| metrics_exporters.otlp.as_ref())
        .ok_or_else(|| {
            anyhow::anyhow!("No metrics exporters configured, at least one is required")
        })?;
    let exporter = match otlp.protocol.as_str() {
        "grpc" => opentelemetry_otlp::MetricExporter::builder()
            .with_tonic()
            .with_endpoint(otlp.endpoint.clone())
            .with_temporality(otlp.to_temporality())
            .build()?,
        "http/protobuf" => opentelemetry_otlp::MetricExporter::builder()
            .with_http()
            .with_endpoint(otlp.endpoint.clone())
            .with_temporality(otlp.to_temporality())
            .build()?,
        other => {
            return Err(anyhow::anyhow!(
                "Unsupported OTLP protocol: {other}. Supported protocols are: grpc, http/protobuf"
            ));
        }
    };
    let omitted_attributes: HashSet<TelemetryAttribute> = metrics_exporters
        .and_then(|exporters| exporters.omitted_attributes.clone())
        .unwrap_or_default();
    let included_attributes: Vec<Key> = TelemetryAttribute::included_attributes(omitted_attributes)
        .iter()
        .map(|a| a.to_key())
        .collect();
    let reader = PeriodicReader::builder(exporter)
        .with_interval(std::time::Duration::from_secs(30))
        .build();
    let filtered_view = move |i: &Instrument| {
        if i.name().starts_with("apollo.") {
            Stream::builder()
                .with_allowed_attribute_keys(included_attributes.clone()) // if available in your version
                .build()
                .ok()
        } else {
            None
        }
    };
    let meter_provider = MeterProviderBuilder::default()
        .with_resource(resource(telemetry))
        .with_reader(reader)
        .with_view(filtered_view)
        .build();
    Ok(meter_provider)
}
fn init_tracer_provider(telemetry: &Telemetry) -> Result<SdkTracerProvider, anyhow::Error> {
    let tracer_exporters = telemetry
        .exporters
        .as_ref()
        .and_then(|exporters| exporters.tracing.as_ref());
    let otlp = tracer_exporters
        .and_then(|tracing_exporters| tracing_exporters.otlp.as_ref())
        .ok_or_else(|| {
            anyhow::anyhow!("No tracing exporters configured, at least one is required")
        })?;
    let exporter = match otlp.protocol.as_str() {
        "grpc" => opentelemetry_otlp::SpanExporter::builder()
            .with_tonic()
            .with_endpoint(otlp.endpoint.clone())
            .build()?,
        "http/protobuf" => opentelemetry_otlp::SpanExporter::builder()
            .with_http()
            .with_endpoint(otlp.endpoint.clone())
            .build()?,
        other => {
            return Err(anyhow::anyhow!(
                "Unsupported OTLP protocol: {other}. Supported protocols are: grpc, http/protobuf"
            ));
        }
    };
    let sampler: opentelemetry_sdk::trace::Sampler = tracer_exporters
        .as_ref()
        .and_then(|e| e.sampler.clone())
        .unwrap_or_default()
        .into();
    let omitted_attributes: HashSet<Key> = tracer_exporters
        .and_then(|exporters| exporters.omitted_attributes.clone())
        .map(|set| set.iter().map(|a| a.to_key()).collect())
        .unwrap_or_default();
    let filtering_exporter = FilteringExporter::new(exporter, omitted_attributes);
    let tracer_provider = SdkTracerProvider::builder()
        .with_id_generator(RandomIdGenerator::default())
        .with_resource(resource(telemetry))
        .with_batch_exporter(filtering_exporter)
        .with_sampler(sampler)
        .build();
    Ok(tracer_provider)
}
/// Initialize tracing-subscriber and return TelemetryGuard for logging and opentelemetry-related termination processing
pub fn init_tracing_subscriber(config: &Config) -> Result<TelemetryGuard, anyhow::Error> {
    let tracer_provider = if let Some(exporters) = &config.telemetry.exporters {
        if let Some(_tracing_exporters) = &exporters.tracing {
            init_tracer_provider(&config.telemetry)?
        } else {
            SdkTracerProvider::builder().build()
        }
    } else {
        SdkTracerProvider::builder().build()
    };
    let meter_provider = if let Some(exporters) = &config.telemetry.exporters {
        if let Some(_metrics_exporters) = &exporters.metrics {
            init_meter_provider(&config.telemetry)?
        } else {
            SdkMeterProvider::builder().build()
        }
    } else {
        SdkMeterProvider::builder().build()
    };
    let env_filter = Logging::env_filter(&config.logging)?;
    let (logging_layer, logging_guard) = Logging::logging_layer(&config.logging)?;
    let tracer = tracer_provider.tracer("apollo-mcp-trace");
    global::set_meter_provider(meter_provider.clone());
    global::set_text_map_propagator(TraceContextPropagator::new());
    global::set_tracer_provider(tracer_provider.clone());
    tracing_subscriber::registry()
        .with(logging_layer)
        .with(env_filter)
        .with(MetricsLayer::new(meter_provider.clone()))
        .with(OpenTelemetryLayer::new(tracer))
        .try_init()?;
    Ok(TelemetryGuard {
        tracer_provider,
        meter_provider,
        logging_guard,
    })
}
pub struct TelemetryGuard {
    tracer_provider: SdkTracerProvider,
    meter_provider: SdkMeterProvider,
    logging_guard: Option<tracing_appender::non_blocking::WorkerGuard>,
}
impl Drop for TelemetryGuard {
    fn drop(&mut self) {
        if let Err(err) = self.tracer_provider.shutdown() {
            tracing::error!("{err:?}");
        }
        if let Err(err) = self.meter_provider.shutdown() {
            tracing::error!("{err:?}");
        }
        drop(self.logging_guard.take());
    }
}
#[cfg(test)]
mod tests {
    use super::*;
    fn test_config(
        service_name: Option<&str>,
        version: Option<&str>,
        metrics: Option<MetricsExporters>,
        tracing: Option<TracingExporters>,
    ) -> Config {
        Config {
            telemetry: Telemetry {
                exporters: Some(Exporters { metrics, tracing }),
                service_name: service_name.map(|s| s.to_string()),
                version: version.map(|v| v.to_string()),
            },
            ..Default::default()
        }
    }
    #[tokio::test]
    async fn guard_is_provided_when_tracing_configured() {
        let mut ommitted = HashSet::new();
        ommitted.insert(TelemetryAttribute::RequestId);
        let config = test_config(
            Some("test-config"),
            Some("1.0.0"),
            Some(MetricsExporters {
                otlp: Some(OTLPMetricExporter::default()),
                omitted_attributes: None,
            }),
            Some(TracingExporters {
                otlp: Some(OTLPTracingExporter::default()),
                sampler: Default::default(),
                omitted_attributes: Some(ommitted),
            }),
        );
        // init_tracing_subscriber can only be called once in the test suite to avoid
        // panic when calling global::set_tracer_provider multiple times
        let guard = init_tracing_subscriber(&config);
        assert!(guard.is_ok());
    }
    #[tokio::test]
    async fn unknown_protocol_raises_meter_provider_error() {
        let config = test_config(
            None,
            None,
            Some(MetricsExporters {
                otlp: Some(OTLPMetricExporter {
                    protocol: "bogus".to_string(),
                    endpoint: "http://localhost:4317".to_string(),
                    temporality: None,
                }),
                omitted_attributes: None,
            }),
            None,
        );
        let result = init_meter_provider(&config.telemetry);
        assert!(
            result
                .err()
                .map(|e| e.to_string().contains("Unsupported OTLP protocol"))
                .unwrap_or(false)
        );
    }
    #[tokio::test]
    async fn http_protocol_returns_valid_meter_provider() {
        let config = test_config(
            None,
            None,
            Some(MetricsExporters {
                otlp: Some(OTLPMetricExporter {
                    protocol: "http/protobuf".to_string(),
                    endpoint: "http://localhost:4318/v1/metrics".to_string(),
                    temporality: Some(MetricTemporality::Delta),
                }),
                omitted_attributes: None,
            }),
            None,
        );
        let result = init_meter_provider(&config.telemetry);
        assert!(result.is_ok());
    }
    #[tokio::test]
    async fn unknown_protocol_raises_tracer_provider_error() {
        let config = test_config(
            None,
            None,
            None,
            Some(TracingExporters {
                otlp: Some(OTLPTracingExporter {
                    protocol: "bogus".to_string(),
                    endpoint: "http://localhost:4317".to_string(),
                }),
                sampler: Default::default(),
                omitted_attributes: None,
            }),
        );
        let result = init_tracer_provider(&config.telemetry);
        assert!(
            result
                .err()
                .map(|e| e.to_string().contains("Unsupported OTLP protocol"))
                .unwrap_or(false)
        );
    }
    #[tokio::test]
    async fn http_protocol_returns_valid_tracer_provider() {
        let config = test_config(
            None,
            None,
            None,
            Some(TracingExporters {
                otlp: Some(OTLPTracingExporter {
                    protocol: "http/protobuf".to_string(),
                    endpoint: "http://localhost:4318/v1/traces".to_string(),
                }),
                sampler: Default::default(),
                omitted_attributes: None,
            }),
        );
        let result = init_tracer_provider(&config.telemetry);
        assert!(result.is_ok());
    }
}
```
--------------------------------------------------------------------------------
/crates/apollo-mcp-server/src/auth/valid_token.rs:
--------------------------------------------------------------------------------
```rust
use std::ops::Deref;
use headers::{Authorization, authorization::Bearer};
use jsonwebtoken::{Algorithm, Validation, decode, decode_header, jwk};
use jwks::Jwk;
use serde::{Deserialize, Serialize};
use tracing::{info, warn};
use url::Url;
/// A validated authentication token
///
/// Note: This is used as a marker to ensure that we have validated this
/// separately from just reading the header itself.
#[derive(Clone, Debug, PartialEq)]
pub(crate) struct ValidToken(pub(crate) Authorization<Bearer>);
impl Deref for ValidToken {
    type Target = Authorization<Bearer>;
    fn deref(&self) -> &Self::Target {
        &self.0
    }
}
/// Trait to handle validation of tokens
pub(super) trait ValidateToken {
    /// Get the intended audiences
    fn get_audiences(&self) -> &Vec<String>;
    /// Get the available upstream servers
    fn get_servers(&self) -> &Vec<Url>;
    /// Fetch the key by its ID
    async fn get_key(&self, server: &Url, key_id: &str) -> Option<Jwk>;
    /// Attempt to validate a token against the validator
    async fn validate(&self, token: Authorization<Bearer>) -> Option<ValidToken> {
        /// Claims which must be present in the JWT (and must match validation)
        /// in order for a JWT to be considered valid.
        ///
        /// See: https://auth0.com/docs/secure/tokens/json-web-tokens/json-web-token-claims#registered-claims
        #[derive(Clone, Debug, Serialize, Deserialize)]
        pub struct Claims {
            /// The intended audience of this token.
            /// Can be either a single string or an array of strings per JWT spec. (https://datatracker.ietf.org/doc/html/rfc7519#section-4.1.3)
            #[serde(deserialize_with = "deserialize_audience")]
            pub aud: Vec<String>,
            /// The user who owns this token
            pub sub: String,
        }
        fn deserialize_audience<'de, D>(deserializer: D) -> Result<Vec<String>, D::Error>
        where
            D: serde::Deserializer<'de>,
        {
            #[derive(Deserialize)]
            #[serde(untagged)]
            enum Audience {
                Single(String),
                Multiple(Vec<String>),
            }
            Ok(match Audience::deserialize(deserializer)? {
                Audience::Single(s) => vec![s],
                Audience::Multiple(v) => v,
            })
        }
        let jwt = token.token();
        let header = decode_header(jwt).ok()?;
        let key_id = header.kid.as_ref()?;
        for server in self.get_servers() {
            let Some(jwk) = self.get_key(server, key_id).await else {
                continue;
            };
            let validation = {
                let mut val = Validation::new(match jwk.alg {
                    jwk::KeyAlgorithm::HS256 => Algorithm::HS256,
                    jwk::KeyAlgorithm::HS384 => Algorithm::HS384,
                    jwk::KeyAlgorithm::HS512 => Algorithm::HS512,
                    jwk::KeyAlgorithm::ES256 => Algorithm::ES256,
                    jwk::KeyAlgorithm::ES384 => Algorithm::ES384,
                    jwk::KeyAlgorithm::RS256 => Algorithm::RS256,
                    jwk::KeyAlgorithm::RS384 => Algorithm::RS384,
                    jwk::KeyAlgorithm::RS512 => Algorithm::RS512,
                    jwk::KeyAlgorithm::PS256 => Algorithm::PS256,
                    jwk::KeyAlgorithm::PS384 => Algorithm::PS384,
                    jwk::KeyAlgorithm::PS512 => Algorithm::PS512,
                    jwk::KeyAlgorithm::EdDSA => Algorithm::EdDSA,
                    // No other validation key type is supported by this library, so we
                    // warn and fail if we encounter one.
                    other => {
                        warn!("Skipping JWT signed by unsupported algorithm: {other}");
                        continue;
                    }
                });
                val.set_audience(self.get_audiences());
                val
            };
            match decode::<Claims>(jwt, &jwk.decoding_key, &validation) {
                Ok(_) => {
                    return Some(ValidToken(token));
                }
                Err(e) => warn!("Token failed validation with error: {e}"),
            };
        }
        info!("Token did not pass validation");
        None
    }
}
#[cfg(test)]
mod test {
    use std::str::FromStr;
    use headers::{Authorization, authorization::Bearer};
    use jsonwebtoken::{Algorithm, DecodingKey, EncodingKey, Header, encode, jwk::KeyAlgorithm};
    use jwks::Jwk;
    use serde::Serialize;
    use tracing_test::traced_test;
    use url::Url;
    use super::ValidateToken;
    struct TestTokenValidator {
        audiences: Vec<String>,
        key_pair: (String, Jwk),
        servers: Vec<Url>,
    }
    impl ValidateToken for TestTokenValidator {
        fn get_audiences(&self) -> &Vec<String> {
            &self.audiences
        }
        fn get_servers(&self) -> &Vec<url::Url> {
            &self.servers
        }
        async fn get_key(&self, server: &url::Url, key_id: &str) -> Option<jwks::Jwk> {
            // Return nothing if the server is not known to us
            if !self.get_servers().contains(server) {
                return None;
            }
            // Only return the key if it is the one we know
            self.key_pair
                .0
                .eq(key_id)
                .then_some(self.key_pair.1.clone())
        }
    }
    /// Creates a key for signing / verifying JWTs
    fn create_key(base64_secret: &str) -> (EncodingKey, DecodingKey) {
        let encode =
            EncodingKey::from_base64_secret(base64_secret).expect("create valid encoding key");
        let decode =
            DecodingKey::from_base64_secret(base64_secret).expect("create valid decoding key");
        (encode, decode)
    }
    fn create_jwt(
        key_id: String,
        key: EncodingKey,
        audience: String,
        expires_at: i64,
    ) -> Authorization<Bearer> {
        #[derive(Serialize)]
        struct Claims {
            aud: String,
            exp: i64,
            sub: String,
        }
        let header = {
            let mut h = Header::new(Algorithm::HS512);
            h.kid = Some(key_id);
            h
        };
        let token = encode(
            &header,
            &Claims {
                aud: audience,
                exp: expires_at,
                sub: "test user".to_string(),
            },
            &key,
        )
        .expect("encode JWT");
        Authorization::bearer(&token).expect("create bearer token")
    }
    #[tokio::test]
    async fn it_validates_jwt() {
        let key_id = "some-example-id".to_string();
        let (encode_key, decode_key) = create_key("DEADBEEF");
        let jwk = Jwk {
            alg: KeyAlgorithm::HS512,
            decoding_key: decode_key,
        };
        let audience = "test-audience".to_string();
        let in_the_future = chrono::Utc::now().timestamp() + 1000;
        let jwt = create_jwt(key_id.clone(), encode_key, audience.clone(), in_the_future);
        let server =
            Url::from_str("https://auth.example.com").expect("should parse a valid example server");
        let test_validator = TestTokenValidator {
            audiences: vec![audience],
            key_pair: (key_id, jwk),
            servers: vec![server],
        };
        let token = jwt.token().to_string();
        assert_eq!(
            test_validator
                .validate(jwt)
                .await
                .expect("valid token")
                .0
                .token(),
            token
        );
    }
    #[traced_test]
    #[tokio::test]
    async fn it_rejects_different_key() {
        let key_id = "some-example-id".to_string();
        let (_, decode_key) = create_key("CAFED00D");
        let (bad_encode_key, _) = create_key("DEADC0DE");
        let jwk = Jwk {
            alg: KeyAlgorithm::HS512,
            decoding_key: decode_key,
        };
        let audience = "test-audience".to_string();
        let in_the_future = chrono::Utc::now().timestamp() + 1000;
        let jwt = create_jwt(
            key_id.clone(),
            bad_encode_key,
            audience.clone(),
            in_the_future,
        );
        let server =
            Url::from_str("https://auth.example.com").expect("should parse a valid example server");
        let test_validator = TestTokenValidator {
            audiences: vec![audience],
            key_pair: (key_id, jwk),
            servers: vec![server],
        };
        assert_eq!(test_validator.validate(jwt).await, None);
        logs_assert(|lines: &[&str]| {
            lines
                .iter()
                .filter(|line| line.contains("WARN"))
                .any(|line| line.contains("InvalidSignature"))
                .then_some(())
                .ok_or("Expected warning for validation failure".to_string())
        });
    }
    #[traced_test]
    #[tokio::test]
    async fn it_rejects_expired() {
        let key_id = "some-example-id".to_string();
        let (encode_key, decode_key) = create_key("F0CACC1A");
        let jwk = Jwk {
            alg: KeyAlgorithm::HS512,
            decoding_key: decode_key,
        };
        let audience = "test-audience".to_string();
        let in_the_past = chrono::Utc::now().timestamp() - 1000;
        let jwt = create_jwt(key_id.clone(), encode_key, audience.clone(), in_the_past);
        let server =
            Url::from_str("https://auth.example.com").expect("should parse a valid example server");
        let test_validator = TestTokenValidator {
            audiences: vec![audience],
            key_pair: (key_id, jwk),
            servers: vec![server],
        };
        assert_eq!(test_validator.validate(jwt).await, None);
        logs_assert(|lines: &[&str]| {
            lines
                .iter()
                .filter(|line| line.contains("WARN"))
                .any(|line| line.contains("ExpiredSignature"))
                .then_some(())
                .ok_or("Expected warning for validation failure".to_string())
        });
    }
    #[traced_test]
    #[tokio::test]
    async fn it_rejects_different_audience() {
        let key_id = "some-example-id".to_string();
        let (encode_key, decode_key) = create_key("F0CACC1A");
        let jwk = Jwk {
            alg: KeyAlgorithm::HS512,
            decoding_key: decode_key,
        };
        let audience = "test-audience".to_string();
        let bad_audience = "not-test-audience".to_string();
        let in_the_future = chrono::Utc::now().timestamp() + 1000;
        let jwt = create_jwt(key_id.clone(), encode_key, bad_audience, in_the_future);
        let server =
            Url::from_str("https://auth.example.com").expect("should parse a valid example server");
        let test_validator = TestTokenValidator {
            audiences: vec![audience],
            key_pair: (key_id, jwk),
            servers: vec![server],
        };
        assert_eq!(test_validator.validate(jwt).await, None);
        logs_assert(|lines: &[&str]| {
            lines
                .iter()
                .filter(|line| line.contains("WARN"))
                .any(|line| line.contains("InvalidAudience"))
                .then_some(())
                .ok_or("Expected warning for validation failure".to_string())
        });
    }
    #[tokio::test]
    async fn it_validates_jwt_with_array_audience() {
        use serde_json::json;
        let key_id = "some-example-id".to_string();
        let (encode_key, decode_key) = create_key("DEADBEEF");
        let jwk = Jwk {
            alg: KeyAlgorithm::HS512,
            decoding_key: decode_key,
        };
        let audience = "test-audience".to_string();
        let in_the_future = chrono::Utc::now().timestamp() + 1000;
        let header = {
            let mut h = Header::new(Algorithm::HS512);
            h.kid = Some(key_id.clone());
            h
        };
        let claims = json!({
            "aud": ["test-audience", "another-audience"],
            "exp": in_the_future,
            "sub": "test user"
        });
        let token = encode(&header, &claims, &encode_key).expect("encode JWT");
        let jwt = Authorization::bearer(&token).expect("create bearer token");
        let server =
            Url::from_str("https://auth.example.com").expect("should parse a valid example server");
        let test_validator = TestTokenValidator {
            audiences: vec![audience],
            key_pair: (key_id, jwk),
            servers: vec![server],
        };
        assert_eq!(
            test_validator
                .validate(jwt)
                .await
                .expect("valid token")
                .0
                .token(),
            token
        );
    }
    #[traced_test]
    #[tokio::test]
    async fn it_rejects_array_audience_with_no_matches() {
        use serde_json::json;
        let key_id = "some-example-id".to_string();
        let (encode_key, decode_key) = create_key("DEADBEEF");
        let jwk = Jwk {
            alg: KeyAlgorithm::HS512,
            decoding_key: decode_key,
        };
        let expected_audience = "expected-audience".to_string();
        let in_the_future = chrono::Utc::now().timestamp() + 1000;
        let header = {
            let mut h = Header::new(Algorithm::HS512);
            h.kid = Some(key_id.clone());
            h
        };
        let claims = json!({
            "aud": ["wrong-audience-1", "wrong-audience-2"],
            "exp": in_the_future,
            "sub": "test user"
        });
        let token = encode(&header, &claims, &encode_key).expect("encode JWT");
        let jwt = Authorization::bearer(&token).expect("create bearer token");
        let server =
            Url::from_str("https://auth.example.com").expect("should parse a valid example server");
        let test_validator = TestTokenValidator {
            audiences: vec![expected_audience],
            key_pair: (key_id, jwk),
            servers: vec![server],
        };
        assert_eq!(test_validator.validate(jwt).await, None);
        logs_assert(|lines: &[&str]| {
            lines
                .iter()
                .filter(|line| line.contains("WARN"))
                .any(|line| line.contains("InvalidAudience"))
                .then_some(())
                .ok_or("Expected warning for validation failure".to_string())
        });
    }
}
```
--------------------------------------------------------------------------------
/docs/source/guides/auth-auth0.mdx:
--------------------------------------------------------------------------------
```markdown
---
title: Authorization with Auth0
---
## Example: Auth0
This guide uses [Auth0](https://auth0.com/) as the Identity Provider.
### Pre-requisites
1. [Create an Apollo account](https://studio.apollographql.com/signup?referrer=docs-content).
1. Clone the repo for the example project.
   ```sh showLineNumbers=false
   git clone [email protected]:apollographql/apollo-mcp-server.git
   ```
1. Install or update the Rover CLI. You need at least v0.35 or later.
   ```sh showLineNumbers=false
   curl -sSL https://rover.apollo.dev/nix/latest | sh
   ```
### Step 1: Set up the Auth0 Identity Provider
[Create an Auth0 account](https://auth0.com/).
#### Create the Auth0 API
1. In your dashboard, navigate to **Applications** -> **APIs**.
1. Click **Create API**.
1. Give it a friendly name like `MCP Auth API`.
1. For the **Identifier** field, Auth0 recommends using a URL. This identifier is used in the MCP server configuration later as the `audience` property. For this guide, use `http://localhost:8000/mcp-example`.
1. Leave the defaults for the rest of the fields and click **Create**.
1. Navigate to your dashboard **Settings**.
   1. Under **General** -> **API Authorization Settings**, set the **Default Audience** to the `Identifier` you chose.
   1. Navigate to the **Advanced** tab.
   1. Toggle on **OIDC Dynamic Application Registration** to enable [dynamic client registration](https://auth0.com/docs/get-started/applications/dynamic-client-registration#enable-dynamic-client-registration).
   1. Toggle on **Enable Application Connections**.
   1. Save your changes.
#### Create the Auth0 Connection
The Auth0 Connection is the method clients use to authenticate. This guide uses the default **Username-Password-Authentication** connection.
1. In your Auth0 dashboard, navigate to **Authentication** -> **Database**.
1. Create the default **Username-Password-Authentication** connection. Click the **Try Connection** button to test it and set up a username and password for later.
1. Back on your Auth0 dashboard, note the **Connection Identifier** at the top of the page. It should start with something like `con_`. Copy it into a temporary location. This guide refers to it as `<CONNECTION ID>`.
1. Navigate to **Applications** -> **APIs** -> **Auth0 Management API**.
1. Copy the **Identifier** for the Auth0 Management API to a temporary location. It should look something like `dev-123456.us.auth0.com`, where `dev-123456` is your Auth0 tenant ID. This guide refers to it as `<AUTH0 DOMAIN>`.
1. Click the **API Explorer** tab. Copy the token value to a temporary location. This guide refers to it as `<MGMT API ACCESS TOKEN>`.
1. Run the following `curl` command to promote the connection to domain level, replacing `<CONNECTION ID>`, `<AUTH0 DOMAIN>`, and `<MGMT API ACCESS TOKEN>` with the values you copied in the previous steps:
   ```sh
   curl --request PATCH \
     --url 'https://<AUTH0 DOMAIN>/api/v2/connections/<CONNECTION ID>' \
     --header 'authorization: Bearer <MGMT API ACCESS TOKEN>' \
     --header 'cache-control: no-cache' \
     --header 'content-type: application/json' \
     --data '{ "is_domain_connection": true }'
   ```
Your Auth0 setup is now complete. You have an API with an audience and a connection for authentication.
<ExpansionPanel title="Something went wrong? Try these troubleshooting steps">
  - Check that the `curl` command is correct and that you have the correct values for `<CONNECTION ID>`, `<AUTH0 DOMAIN>`, and `<MGMT API ACCESS TOKEN>`.
  - Check that you have the correct permissions to promote the connection to domain level. 
    - In your Auth0 dashboard, navigate to **Applications** -> **API Explorer Application** -> **APIs**. Ensure that the **Auth0 Management API** is authorized.
    - Expand the **Auth0 Management API** item and enable the `update:connections` permission.
    
    - Click **Update** to save your changes.
</ExpansionPanel>
### Step 2: Configure the MCP Server for authorization
Configure the MCP server to use the Auth0 instance for authentication.
1. Open the example repo you cloned earlier.
1. In the `graphql/TheSpaceDevs` directory, open the `config.yaml` file.
1. Add the following `auth` configuration under the `transport` key:
   ```yaml title="graphql/TheSpaceDevs/config.yaml"
   transport:
     type: streamable_http
     auth:
       servers:
         - https://<AUTH0 DOMAIN> # Fill in your Auth0 domain
       audiences:
         - <AUTH0 DEFAULT AUDIENCE> # Fill in your Auth0 Identifier
       resource: http://127.0.0.1:8000/mcp
       scopes:
         - read:users # Adjust scopes as needed
   ```
1. Replace the `<AUTH0 DOMAIN>` with your own Auth0 domain from earlier.
1. Replace the `<AUTH0 DEFAULT AUDIENCE>` with the matching `Identifier` you set when creating the Auth0 API. In this guide, you used `http://localhost:8000/mcp-example`.
Your MCP server is now configured to use Auth0 for authentication.
### Step 3: Configure the router for JWT authentication
Configure your GraphOS Router to validate JWTs issued by Auth0. This involves setting up the JWKS endpoint and defining the authorization rules.
#### Define authorization and authentication rules in the router
1. In the `graphql/TheSpaceDevs` directory, create a new file called `router.yaml`.
1. Paste the following configuration, replacing `<AUTH0 DOMAIN>` with your Auth0 domain:
   ```yaml title="graphql/TheSpaceDevs/router.yaml"
   authorization:
     require_authentication: true # Enforces authentication on all requests
   authentication:
     router:
       jwt:
         jwks:
           - url: https://<AUTH0 DOMAIN>/.well-known/jwks.json
   homepage:
     enabled: false
   sandbox:
     enabled: true
   supergraph:
     introspection: true
   ```
   With this configuration, the router requires authentication for all requests. If a request doesn't include an Authorization token, the router returns an `UNAUTHENTICATED` error.
#### Retrieve your GraphOS license credentials for auth
You need a graph's credentials and a valid GraphOS plan to use the router's authentication features.
1. Navigate to [GraphOS Studio](https://studio.apollographql.com/) and log in.
1. Click **Add graph** and **Connect an existing graph**.
1. Give it a name and click **Next**.
1. In the next modal, find the command that looks something like this:
   ```sh showLineNumbers=false {2}
   APOLLO_KEY=<YOUR_APOLLO_KEY> \
   rover subgraph publish <YOUR_APOLLO_GRAPH_REF> \
   --schema ./products-schema.graphql \
   --name your-subgraph-name \
   --routing-url http://products.prod.svc.cluster.local:4001/graphql
   ```
   Note: You don't need to run this command.
1. Retrieve the values for `YOUR_APOLLO_KEY` and `YOUR_APOLLO_GRAPH_REF` from the modal and click **Finish later**.
#### Run the MCP Server and router
1. Back in your terminal, in the root of the project directory, replace and run the following command to start the MCP Server and the router together:
   ```sh
   APOLLO_GRAPH_REF=<YOUR_APOLLO_GRAPH_REF> APOLLO_KEY=<YOUR_APOLLO_KEY> \
   rover dev --supergraph-config ./graphql/TheSpaceDevs/supergraph.yaml \
   --router-config ./graphql/TheSpaceDevs/router.yaml \
   --mcp ./graphql/TheSpaceDevs/config.yaml
   ```
1. Test the router by navigating to `http://localhost:4000` in your browser. You should see the Explorer, where you can run GraphQL queries against the router.
1. Remember, the router is configured to require authentication on all requests. Any operations without a valid Authorization token returns an `UNAUTHENTICATED` error. Run the operation:
   ```graphql
   query GetAstronautsCurrentlyInSpace {
     astronauts(filters: { inSpace: true, search: "" }) {
       results {
         id
         name
         timeInSpace
         lastFlight
       }
     }
   }
   ```
1. You should see an `UNAUTHENTICATED` error, which means the router is correctly enforcing authentication.
### Step 4: Make requests with MCP Inspector
1. In a new terminal window, run the MCP Inspector:
   ```sh
   npx @modelcontextprotocol/inspector
   ```
   The browser should open automatically with a proxy auth token.
1. In the MCP Inspector, select `Streamable HTTP` as the Transport Type and enter `http://127.0.0.1:8000/mcp` as the URL.
1. Click **Connect**. This triggers the OAuth flow, and you are redirected to the Auth0 login page.
1. Log in with the credentials you set up in the Auth0 connection and allow MCP Inspector access.
1. After you connect, the browser redirects back to MCP Inspector.
1. Click **List Tools** to see the available tools.
1. Select the `GetAstronautsCurrentlyInSpace` tool listed and click **Run Tool**.
1. You should see the results of the query, which means the authentication is working correctly.
You can select the **Auth** tab in MCP Inspector to see the details of the authenticated user and the scopes granted.
<ExpansionPanel title="Alternative: Guided OAuth flow in MCP Inspector">
You can also use the guided OAuth flow in MCP Inspector to test authentication. This gives you a detailed look into each step the client does to connect to the server.
1. Click **Open Auth Settings**.
1. In the **OAuth Flow Progress** section, click **Continue** to start the **Metadata Discovery** step.
1. Click **Continue** to start the **Client Registration** step. Expand the **Registered Client Information** step to note the `client_id` value.
1. Click **Continue** to start the **Preparing Authorization** step. Click the link to open up a new tab to authorize MCP Inspector.
1. Copy the authorization code and return to MCP Inspector.
1. Paste the code in the next step **Request Authorization and acquire authorization code** then click **Continue**.
1. Click **Continue** to start the **Token Request** step. This completes the authentication flow.
Before continuing, you need to set up the Auth0 client to accept an additional callback URL.
1. In your Auth0 dashboard, navigate to **Applications**.
1. Select the client for **MCP Inspector**. If you have multiple entries, find the `client_id` value from the MCP Inspector.
1. In the client's **Settings** -> **Application URIs**, copy and paste the existing callback URL. Then, remove the `/debug` suffix. Make sure the URLs are comma-separated. It should look something like this:
   ```txt
   http://localhost:6274/oauth/callback/debug,
   http://localhost:6274/oauth/callback
   ```
1. Back in MCP Inspector, click **Connect**. You are now authenticated and can run tools as usual.
</ExpansionPanel>
### Step 5: Make requests with an MCP Client (Goose)
We'll use [Goose](https://block.github.io/goose/) as our MCP Client. Goose allows you to choose between many different LLMs and provides some built-in functionality for connecting to MCP servers, called [Extensions](https://block.github.io/goose/docs/getting-started/using-extensions).
[Install the Goose CLI](https://block.github.io/goose/docs/getting-started/installation), following the instructions for your operating system. Set up the LLM provider of your choice with `goose configure` --> **Configure Providers**. Each provider has its own set of instructions, rate limiting and pricing.
Then, continue with the following steps:
1. In your terminal, run `goose configure`.
1. Select or enter the following answers at the prompts:
| Prompt                                                      | Answer                                     |
| ----------------------------------------------------------- | ------------------------------------------ |
| "What would you like to configure?"                         | "Add Extension"                            |
| "What type of extension would you like to add?"             | "Command Line Extension"                   |
| "What's the name of this extension?"                        | "mcp-auth-quickstart"                      |
| "What command should be run?"                               | `npx mcp-remote http://127.0.0.1:8000/mcp` |
| Other prompts (timeout, description, environment variables) | Use the default values                     |
1. To start Goose, type `goose`. This will open a browser window and send you through the auth flow.
1. Log in to your Auth0 instance and authorize your MCP server to gain access to your tools.
1. In Goose, ask "What astronauts are in space right now?". This question is similar to the `GetAstronautsCurrentlyInSpace` operation from earlier, which fails as unauthenticated without the proper token.
1. Goose will select the `GetAstronautsCurrentlyInSpace` tool and respond with information about the astronauts found in TheSpaceDevs.
## Troubleshooting
### Common Issues
#### MCP Server Won't Start
- **Error**: "Port 8000 is already in use"
  - Solution: Kill any existing processes using port 8000 or specify a different port with the `transport.port` option or `APOLLO_MCP_TRANSPORT__PORT` env variable
- **Error**: "Failed to load supergraph configuration"
  - Solution: Verify you're running the command from the repo root directory
  - Solution: Check that the path to `supergraph.yaml` is correct
- **Error**: "License violation"
  - Solution: Ensure that the `rover dev` command includes valid `APOLLO_KEY` and `APOLLO_GRAPH_REF` values and that your plan supports authentication features.
- **Error**: "What URL is your subgraph running on?" question in terminal
  - Solution: Verify that the file path for your config files is correct. You should run the `rover dev` command from the root of the example project directory and the file paths should be relative to that root.
#### MCP Inspector Connection Issues
- **Error**: "Failed to connect to server"
  - Solution: Ensure the MCP server is running (check terminal output)
  - Solution: Verify you're using the correct URL (`http://127.0.0.1:8000/mcp`)
  - Solution: Check if your firewall is blocking the connection
### Infinite loop during OAuth flow
- **Issue**: After logging in to Auth0, MCP Inspector keeps refreshing and doesn't complete the OAuth flow
  - Solution: In MCP Inspector, open the **Authentication** panel in the sidebar. Clear out any values in the **Header Name** and **Bearer Token** fields. Then try connecting again.
  - Solution: In MCP Inspector, select **Clear OAuth State** and try connecting again.
### Getting Help
If you're still having issues:
1. Check the [Apollo MCP Server GitHub issues](https://github.com/apollographql/apollo-mcp-server/issues).
2. Join the [Apollo Community MCP Server Category](https://community.apollographql.com/c/mcp-server/41).
3. Contact your Apollo representative for direct support.
```
--------------------------------------------------------------------------------
/crates/apollo-mcp-server/src/server/states/starting.rs:
--------------------------------------------------------------------------------
```rust
use std::{net::SocketAddr, sync::Arc};
use apollo_compiler::{Name, Schema, ast::OperationType, validation::Valid};
use axum::{Router, extract::Query, http::StatusCode, response::Json, routing::get};
use axum_otel_metrics::HttpMetricsLayerBuilder;
use axum_tracing_opentelemetry::middleware::{OtelAxumLayer, OtelInResponseLayer};
use rmcp::transport::streamable_http_server::session::local::LocalSessionManager;
use rmcp::transport::{StreamableHttpServerConfig, StreamableHttpService};
use rmcp::{
    ServiceExt as _,
    transport::{SseServer, sse_server::SseServerConfig, stdio},
};
use serde_json::json;
use tokio::sync::{Mutex, RwLock};
use tokio_util::sync::CancellationToken;
use tower_http::trace::TraceLayer;
use tracing::{Instrument as _, debug, error, info, trace};
use crate::{
    errors::ServerError,
    explorer::Explorer,
    health::HealthCheck,
    introspection::tools::{
        execute::Execute, introspect::Introspect, search::Search, validate::Validate,
    },
    operations::{MutationMode, RawOperation},
    server::Transport,
};
use super::{Config, Running, shutdown_signal};
pub(super) struct Starting {
    pub(super) config: Config,
    pub(super) schema: Valid<Schema>,
    pub(super) operations: Vec<RawOperation>,
}
impl Starting {
    pub(super) async fn start(self) -> Result<Running, ServerError> {
        let peers = Arc::new(RwLock::new(Vec::new()));
        let operations: Vec<_> = self
            .operations
            .into_iter()
            .filter_map(|operation| {
                operation
                    .into_operation(
                        &self.schema,
                        self.config.custom_scalar_map.as_ref(),
                        self.config.mutation_mode,
                        self.config.disable_type_description,
                        self.config.disable_schema_description,
                    )
                    .unwrap_or_else(|error| {
                        error!("Invalid operation: {}", error);
                        None
                    })
            })
            .collect();
        debug!(
            "Loaded {} operations:\n{}",
            operations.len(),
            serde_json::to_string_pretty(&operations)?
        );
        let execute_tool = self
            .config
            .execute_introspection
            .then(|| Execute::new(self.config.mutation_mode));
        let root_query_type = self
            .config
            .introspect_introspection
            .then(|| {
                self.schema
                    .root_operation(OperationType::Query)
                    .map(Name::as_str)
                    .map(|s| s.to_string())
            })
            .flatten();
        let root_mutation_type = self
            .config
            .introspect_introspection
            .then(|| {
                matches!(self.config.mutation_mode, MutationMode::All)
                    .then(|| {
                        self.schema
                            .root_operation(OperationType::Mutation)
                            .map(Name::as_str)
                            .map(|s| s.to_string())
                    })
                    .flatten()
            })
            .flatten();
        let schema = Arc::new(Mutex::new(self.schema));
        let introspect_tool = self.config.introspect_introspection.then(|| {
            Introspect::new(
                schema.clone(),
                root_query_type,
                root_mutation_type,
                self.config.introspect_minify,
            )
        });
        let validate_tool = self
            .config
            .validate_introspection
            .then(|| Validate::new(schema.clone()));
        let search_tool = if self.config.search_introspection {
            Some(Search::new(
                schema.clone(),
                matches!(self.config.mutation_mode, MutationMode::All),
                self.config.search_leaf_depth,
                self.config.index_memory_bytes,
                self.config.search_minify,
            )?)
        } else {
            None
        };
        let explorer_tool = self.config.explorer_graph_ref.map(Explorer::new);
        let cancellation_token = CancellationToken::new();
        // Create health check if enabled (only for StreamableHttp transport)
        let health_check = match (&self.config.transport, self.config.health_check.enabled) {
            (
                Transport::StreamableHttp {
                    auth: _,
                    address: _,
                    port: _,
                    stateful_mode: _,
                },
                true,
            ) => Some(HealthCheck::new(self.config.health_check.clone())),
            _ => None, // No health check for SSE, Stdio, or when disabled
        };
        let running = Running {
            schema,
            operations: Arc::new(Mutex::new(operations)),
            headers: self.config.headers,
            forward_headers: self.config.forward_headers.clone(),
            endpoint: self.config.endpoint,
            execute_tool,
            introspect_tool,
            search_tool,
            explorer_tool,
            validate_tool,
            custom_scalar_map: self.config.custom_scalar_map,
            peers,
            cancellation_token: cancellation_token.clone(),
            mutation_mode: self.config.mutation_mode,
            disable_type_description: self.config.disable_type_description,
            disable_schema_description: self.config.disable_schema_description,
            disable_auth_token_passthrough: self.config.disable_auth_token_passthrough,
            health_check: health_check.clone(),
        };
        // Helper to enable auth
        macro_rules! with_auth {
            ($router:expr, $auth:ident) => {{
                let mut router = $router;
                if let Some(auth) = $auth {
                    router = auth.enable_middleware(router);
                }
                router
            }};
        }
        // Helper to enable CORS
        macro_rules! with_cors {
            ($router:expr, $config:expr) => {{
                let mut router = $router;
                if $config.enabled {
                    match $config.build_cors_layer() {
                        Ok(cors_layer) => {
                            router = router.layer(cors_layer);
                        }
                        Err(e) => {
                            error!("Failed to build CORS layer: {}", e);
                            return Err(e);
                        }
                    }
                }
                router
            }};
        }
        match self.config.transport {
            Transport::StreamableHttp {
                auth,
                address,
                port,
                stateful_mode,
            } => {
                info!(port = ?port, address = ?address, "Starting MCP server in Streamable HTTP mode");
                let running = running.clone();
                let listen_address = SocketAddr::new(address, port);
                let service = StreamableHttpService::new(
                    move || Ok(running.clone()),
                    LocalSessionManager::default().into(),
                    StreamableHttpServerConfig {
                        stateful_mode,
                        ..Default::default()
                    },
                );
                let mut router = with_cors!(
                    with_auth!(axum::Router::new().nest_service("/mcp", service), auth),
                    self.config.cors
                )
                .layer(HttpMetricsLayerBuilder::new().build())
                // include trace context as header into the response
                .layer(OtelInResponseLayer)
                //start OpenTelemetry trace on incoming request
                .layer(OtelAxumLayer::default())
                // Add tower-http tracing layer for additional HTTP-level tracing
                .layer(
                    TraceLayer::new_for_http()
                        .make_span_with(|request: &axum::http::Request<_>| {
                            tracing::info_span!(
                                "mcp_server",
                                method = %request.method(),
                                uri = %request.uri(),
                                session_id = tracing::field::Empty,
                                status_code = tracing::field::Empty,
                            )
                        })
                        .on_response(
                            |response: &axum::http::Response<_>,
                             _latency: std::time::Duration,
                             span: &tracing::Span| {
                                span.record(
                                    "status_code",
                                    tracing::field::display(response.status()),
                                );
                                if let Some(session_id) = response
                                    .headers()
                                    .get("mcp-session-id")
                                    .and_then(|v| v.to_str().ok())
                                {
                                    span.record("session_id", tracing::field::display(session_id));
                                }
                            },
                        ),
                );
                // Add health check endpoint if configured
                if let Some(health_check) = health_check.filter(|h| h.config().enabled) {
                    let health_router = with_cors!(
                        Router::new()
                            .route(&health_check.config().path, get(health_endpoint))
                            .with_state(health_check.clone()),
                        self.config.cors
                    );
                    router = router.merge(health_router);
                }
                let tcp_listener = tokio::net::TcpListener::bind(listen_address).await?;
                tokio::spawn(async move {
                    // Health check is already active from creation
                    if let Err(e) = axum::serve(tcp_listener, router)
                        .with_graceful_shutdown(shutdown_signal())
                        .await
                    {
                        // This can never really happen
                        error!("Failed to start MCP server: {e:?}");
                    }
                });
            }
            Transport::SSE {
                auth,
                address,
                port,
            } => {
                info!(port = ?port, address = ?address, "Starting MCP server in SSE mode");
                let running = running.clone();
                let listen_address = SocketAddr::new(address, port);
                let (server, router) = SseServer::new(SseServerConfig {
                    bind: listen_address,
                    sse_path: "/sse".to_string(),
                    post_path: "/message".to_string(),
                    ct: cancellation_token,
                    sse_keep_alive: None,
                });
                // Optionally wrap the router with auth, if enabled
                let router = with_auth!(router, auth);
                // Start up the SSE server
                // Note: Until RMCP consolidates SSE with the same tower system as StreamableHTTP,
                // we need to basically copy the implementation of `SseServer::serve_with_config` here.
                let listener = tokio::net::TcpListener::bind(server.config.bind).await?;
                let ct = server.config.ct.child_token();
                let axum_server =
                    axum::serve(listener, router).with_graceful_shutdown(async move {
                        ct.cancelled().await;
                        tracing::info!("mcp server cancelled");
                    });
                tokio::spawn(
                    async move {
                        if let Err(e) = axum_server.await {
                            tracing::error!(error = %e, "mcp shutdown with error");
                        }
                    }
                    .instrument(
                        tracing::info_span!("mcp-server", bind_address = %server.config.bind),
                    ),
                );
                server.with_service(move || running.clone());
            }
            Transport::Stdio => {
                info!("Starting MCP server in stdio mode");
                let service = running
                    .clone()
                    .serve(stdio())
                    .await
                    .inspect_err(|e| {
                        error!("serving error: {:?}", e);
                    })
                    .map_err(Box::new)?;
                service.waiting().await.map_err(ServerError::StartupError)?;
            }
        }
        Ok(running)
    }
}
/// Health check endpoint handler
async fn health_endpoint(
    axum::extract::State(health_check): axum::extract::State<HealthCheck>,
    Query(params): Query<std::collections::HashMap<String, String>>,
) -> Result<(StatusCode, Json<serde_json::Value>), StatusCode> {
    let query = params.keys().next().map(|k| k.as_str());
    let (health, status_code) = health_check.get_health_state(query);
    trace!(?health, query = ?query, "health check");
    Ok((status_code, Json(json!(health))))
}
#[cfg(test)]
mod tests {
    use http::HeaderMap;
    use url::Url;
    use crate::health::HealthCheckConfig;
    use super::*;
    #[tokio::test]
    async fn start_basic_server() {
        let starting = Starting {
            config: Config {
                transport: Transport::StreamableHttp {
                    auth: None,
                    address: "127.0.0.1".parse().unwrap(),
                    port: 7799,
                    stateful_mode: false,
                },
                endpoint: Url::parse("http://localhost:4000").expect("valid url"),
                mutation_mode: MutationMode::All,
                execute_introspection: true,
                headers: HeaderMap::new(),
                forward_headers: vec![],
                validate_introspection: true,
                introspect_introspection: true,
                search_introspection: true,
                introspect_minify: false,
                search_minify: false,
                explorer_graph_ref: None,
                custom_scalar_map: None,
                disable_type_description: false,
                disable_schema_description: false,
                disable_auth_token_passthrough: false,
                search_leaf_depth: 5,
                index_memory_bytes: 1024 * 1024 * 1024,
                health_check: HealthCheckConfig {
                    enabled: true,
                    ..Default::default()
                },
                cors: Default::default(),
            },
            schema: Schema::parse_and_validate("type Query { hello: String }", "test.graphql")
                .expect("Valid schema"),
            operations: vec![],
        };
        let running = starting.start();
        assert!(running.await.is_ok());
    }
}
```
--------------------------------------------------------------------------------
/crates/apollo-mcp-server/src/server/states/running.rs:
--------------------------------------------------------------------------------
```rust
use std::sync::Arc;
use apollo_compiler::{Schema, validation::Valid};
use opentelemetry::trace::FutureExt;
use opentelemetry::{Context, KeyValue};
use reqwest::header::HeaderMap;
use rmcp::model::Implementation;
use rmcp::{
    Peer, RoleServer, ServerHandler, ServiceError,
    model::{
        CallToolRequestParam, CallToolResult, ErrorCode, InitializeRequestParam, InitializeResult,
        ListToolsResult, PaginatedRequestParam, ServerCapabilities, ServerInfo,
    },
    service::RequestContext,
};
use serde_json::Value;
use tokio::sync::{Mutex, RwLock};
use tokio_util::sync::CancellationToken;
use tracing::{debug, error};
use url::Url;
use crate::generated::telemetry::{TelemetryAttribute, TelemetryMetric};
use crate::meter;
use crate::{
    custom_scalar_map::CustomScalarMap,
    errors::{McpError, ServerError},
    explorer::{EXPLORER_TOOL_NAME, Explorer},
    graphql::{self, Executable as _},
    headers::{ForwardHeaders, build_request_headers},
    health::HealthCheck,
    introspection::tools::{
        execute::{EXECUTE_TOOL_NAME, Execute},
        introspect::{INTROSPECT_TOOL_NAME, Introspect},
        search::{SEARCH_TOOL_NAME, Search},
        validate::{VALIDATE_TOOL_NAME, Validate},
    },
    operations::{MutationMode, Operation, RawOperation},
};
#[derive(Clone)]
pub(super) struct Running {
    pub(super) schema: Arc<Mutex<Valid<Schema>>>,
    pub(super) operations: Arc<Mutex<Vec<Operation>>>,
    pub(super) headers: HeaderMap,
    pub(super) forward_headers: ForwardHeaders,
    pub(super) endpoint: Url,
    pub(super) execute_tool: Option<Execute>,
    pub(super) introspect_tool: Option<Introspect>,
    pub(super) search_tool: Option<Search>,
    pub(super) explorer_tool: Option<Explorer>,
    pub(super) validate_tool: Option<Validate>,
    pub(super) custom_scalar_map: Option<CustomScalarMap>,
    pub(super) peers: Arc<RwLock<Vec<Peer<RoleServer>>>>,
    pub(super) cancellation_token: CancellationToken,
    pub(super) mutation_mode: MutationMode,
    pub(super) disable_type_description: bool,
    pub(super) disable_schema_description: bool,
    pub(super) disable_auth_token_passthrough: bool,
    pub(super) health_check: Option<HealthCheck>,
}
impl Running {
    /// Update a running server with a new schema.
    pub(super) async fn update_schema(self, schema: Valid<Schema>) -> Result<Running, ServerError> {
        debug!("Schema updated:\n{}", schema);
        // Update the operations based on the new schema. This is necessary because the MCP tool
        // input schemas and description are derived from the schema.
        let operations: Vec<Operation> = self
            .operations
            .lock()
            .await
            .iter()
            .cloned()
            .map(|operation| operation.into_inner())
            .filter_map(|operation| {
                operation
                    .into_operation(
                        &schema,
                        self.custom_scalar_map.as_ref(),
                        self.mutation_mode,
                        self.disable_type_description,
                        self.disable_schema_description,
                    )
                    .unwrap_or_else(|error| {
                        error!("Invalid operation: {}", error);
                        None
                    })
            })
            .collect();
        debug!(
            "Updated {} operations:\n{}",
            operations.len(),
            serde_json::to_string_pretty(&operations)?
        );
        *self.operations.lock().await = operations;
        // Update the schema itself
        *self.schema.lock().await = schema;
        // Notify MCP clients that tools have changed
        Self::notify_tool_list_changed(self.peers.clone()).await;
        Ok(self)
    }
    #[tracing::instrument(skip_all)]
    pub(super) async fn update_operations(
        self,
        operations: Vec<RawOperation>,
    ) -> Result<Running, ServerError> {
        debug!("Operations updated:\n{:?}", operations);
        // Update the operations based on the current schema
        {
            let schema = &*self.schema.lock().await;
            let updated_operations: Vec<Operation> = operations
                .into_iter()
                .filter_map(|operation| {
                    operation
                        .into_operation(
                            schema,
                            self.custom_scalar_map.as_ref(),
                            self.mutation_mode,
                            self.disable_type_description,
                            self.disable_schema_description,
                        )
                        .unwrap_or_else(|error| {
                            error!("Invalid operation: {}", error);
                            None
                        })
                })
                .collect();
            debug!(
                "Loaded {} operations:\n{}",
                updated_operations.len(),
                serde_json::to_string_pretty(&updated_operations)?
            );
            *self.operations.lock().await = updated_operations;
        }
        // Notify MCP clients that tools have changed
        Self::notify_tool_list_changed(self.peers.clone()).await;
        Ok(self)
    }
    /// Notify any peers that tools have changed. Drops unreachable peers from the list.
    #[tracing::instrument(skip_all)]
    async fn notify_tool_list_changed(peers: Arc<RwLock<Vec<Peer<RoleServer>>>>) {
        let mut peers = peers.write().await;
        if !peers.is_empty() {
            debug!(
                "Operations changed, notifying {} peers of tool change",
                peers.len()
            );
        }
        let mut retained_peers = Vec::new();
        for peer in peers.iter() {
            if !peer.is_transport_closed() {
                match peer.notify_tool_list_changed().await {
                    Ok(_) => retained_peers.push(peer.clone()),
                    Err(ServiceError::TransportSend(_) | ServiceError::TransportClosed) => {
                        error!("Failed to notify peer of tool list change - dropping peer",);
                    }
                    Err(e) => {
                        error!("Failed to notify peer of tool list change {:?}", e);
                        retained_peers.push(peer.clone());
                    }
                }
            }
        }
        *peers = retained_peers;
    }
}
impl ServerHandler for Running {
    #[tracing::instrument(skip_all, fields(apollo.mcp.client_name = request.client_info.name, apollo.mcp.client_version = request.client_info.version))]
    async fn initialize(
        &self,
        request: InitializeRequestParam,
        context: RequestContext<RoleServer>,
    ) -> Result<InitializeResult, McpError> {
        let meter = &meter::METER;
        let attributes = vec![
            KeyValue::new(
                TelemetryAttribute::ClientName.to_key(),
                request.client_info.name.clone(),
            ),
            KeyValue::new(
                TelemetryAttribute::ClientVersion.to_key(),
                request.client_info.version.clone(),
            ),
        ];
        meter
            .u64_counter(TelemetryMetric::InitializeCount.as_str())
            .build()
            .add(1, &attributes);
        // TODO: how to remove these?
        let mut peers = self.peers.write().await;
        peers.push(context.peer);
        Ok(self.get_info())
    }
    #[tracing::instrument(skip_all, fields(apollo.mcp.tool_name = request.name.as_ref(), apollo.mcp.request_id = %context.id.clone()))]
    async fn call_tool(
        &self,
        request: CallToolRequestParam,
        context: RequestContext<RoleServer>,
    ) -> Result<CallToolResult, McpError> {
        let meter = &meter::METER;
        let start = std::time::Instant::now();
        let tool_name = request.name.clone();
        let result = match tool_name.as_ref() {
            INTROSPECT_TOOL_NAME => {
                self.introspect_tool
                    .as_ref()
                    .ok_or(tool_not_found(&tool_name))?
                    .execute(convert_arguments(request)?)
                    .await
            }
            SEARCH_TOOL_NAME => {
                self.search_tool
                    .as_ref()
                    .ok_or(tool_not_found(&tool_name))?
                    .execute(convert_arguments(request)?)
                    .await
            }
            EXPLORER_TOOL_NAME => {
                self.explorer_tool
                    .as_ref()
                    .ok_or(tool_not_found(&tool_name))?
                    .execute(convert_arguments(request)?)
                    .await
            }
            EXECUTE_TOOL_NAME => {
                let headers = if let Some(axum_parts) =
                    context.extensions.get::<axum::http::request::Parts>()
                {
                    build_request_headers(
                        &self.headers,
                        &self.forward_headers,
                        &axum_parts.headers,
                        &axum_parts.extensions,
                        self.disable_auth_token_passthrough,
                    )
                } else {
                    self.headers.clone()
                };
                self.execute_tool
                    .as_ref()
                    .ok_or(tool_not_found(&tool_name))?
                    .execute(graphql::Request {
                        input: Value::from(request.arguments.clone()),
                        endpoint: &self.endpoint,
                        headers,
                    })
                    .await
            }
            VALIDATE_TOOL_NAME => {
                self.validate_tool
                    .as_ref()
                    .ok_or(tool_not_found(&tool_name))?
                    .execute(convert_arguments(request)?)
                    .await
            }
            _ => {
                let headers = if let Some(axum_parts) =
                    context.extensions.get::<axum::http::request::Parts>()
                {
                    build_request_headers(
                        &self.headers,
                        &self.forward_headers,
                        &axum_parts.headers,
                        &axum_parts.extensions,
                        self.disable_auth_token_passthrough,
                    )
                } else {
                    self.headers.clone()
                };
                let graphql_request = graphql::Request {
                    input: Value::from(request.arguments.clone()),
                    endpoint: &self.endpoint,
                    headers,
                };
                self.operations
                    .lock()
                    .await
                    .iter()
                    .find(|op| op.as_ref().name == tool_name)
                    .ok_or(tool_not_found(&tool_name))?
                    .execute(graphql_request)
                    .with_context(Context::current())
                    .await
            }
        };
        // Track errors for health check
        if let (Err(_), Some(health_check)) = (&result, &self.health_check) {
            health_check.record_rejection();
        }
        let attributes = vec![
            KeyValue::new(
                TelemetryAttribute::Success.to_key(),
                result.as_ref().is_ok_and(|r| r.is_error != Some(true)),
            ),
            KeyValue::new(TelemetryAttribute::ToolName.to_key(), tool_name),
        ];
        // Record response time and status
        meter
            .f64_histogram(TelemetryMetric::ToolDuration.as_str())
            .build()
            .record(start.elapsed().as_millis() as f64, &attributes);
        meter
            .u64_counter(TelemetryMetric::ToolCount.as_str())
            .build()
            .add(1, &attributes);
        result
    }
    #[tracing::instrument(skip_all)]
    async fn list_tools(
        &self,
        _request: Option<PaginatedRequestParam>,
        _context: RequestContext<RoleServer>,
    ) -> Result<ListToolsResult, McpError> {
        let meter = &meter::METER;
        meter
            .u64_counter(TelemetryMetric::ListToolsCount.as_str())
            .build()
            .add(1, &[]);
        Ok(ListToolsResult {
            next_cursor: None,
            tools: self
                .operations
                .lock()
                .await
                .iter()
                .map(|op| op.as_ref().clone())
                .chain(self.execute_tool.as_ref().iter().map(|e| e.tool.clone()))
                .chain(self.introspect_tool.as_ref().iter().map(|e| e.tool.clone()))
                .chain(self.search_tool.as_ref().iter().map(|e| e.tool.clone()))
                .chain(self.explorer_tool.as_ref().iter().map(|e| e.tool.clone()))
                .chain(self.validate_tool.as_ref().iter().map(|e| e.tool.clone()))
                .collect(),
        })
    }
    fn get_info(&self) -> ServerInfo {
        let meter = &meter::METER;
        meter
            .u64_counter(TelemetryMetric::GetInfoCount.as_str())
            .build()
            .add(1, &[]);
        ServerInfo {
            server_info: Implementation {
                name: "Apollo MCP Server".to_string(),
                icons: None,
                title: Some("Apollo MCP Server".to_string()),
                version: env!("CARGO_PKG_VERSION").to_string(),
                website_url: Some(
                    "https://www.apollographql.com/docs/apollo-mcp-server".to_string(),
                ),
            },
            capabilities: ServerCapabilities::builder()
                .enable_tools()
                .enable_tool_list_changed()
                .build(),
            ..Default::default()
        }
    }
}
fn tool_not_found(name: &str) -> McpError {
    McpError::new(
        ErrorCode::METHOD_NOT_FOUND,
        format!("Tool {name} not found"),
        None,
    )
}
fn convert_arguments<T: serde::de::DeserializeOwned>(
    arguments: CallToolRequestParam,
) -> Result<T, McpError> {
    serde_json::from_value(Value::from(arguments.arguments))
        .map_err(|_| McpError::new(ErrorCode::INVALID_PARAMS, "Invalid input".to_string(), None))
}
#[cfg(test)]
mod tests {
    use super::*;
    #[tokio::test]
    async fn invalid_operations_should_not_crash_server() {
        let schema = Schema::parse("type Query { id: String }", "schema.graphql")
            .unwrap()
            .validate()
            .unwrap();
        let running = Running {
            schema: Arc::new(Mutex::new(schema)),
            operations: Arc::new(Mutex::new(vec![])),
            headers: HeaderMap::new(),
            forward_headers: vec![],
            endpoint: "http://localhost:4000".parse().unwrap(),
            execute_tool: None,
            introspect_tool: None,
            search_tool: None,
            explorer_tool: None,
            validate_tool: None,
            custom_scalar_map: None,
            peers: Arc::new(RwLock::new(vec![])),
            cancellation_token: CancellationToken::new(),
            mutation_mode: MutationMode::None,
            disable_type_description: false,
            disable_schema_description: false,
            disable_auth_token_passthrough: false,
            health_check: None,
        };
        let operations = vec![
            RawOperation::from((
                "query Valid { id }".to_string(),
                Some("valid.graphql".to_string()),
            )),
            RawOperation::from((
                "query Invalid {{ id }".to_string(),
                Some("invalid.graphql".to_string()),
            )),
            RawOperation::from((
                "query { id }".to_string(),
                Some("unnamed.graphql".to_string()),
            )),
        ];
        let updated_running = running.update_operations(operations).await.unwrap();
        let updated_operations = updated_running.operations.lock().await;
        assert_eq!(updated_operations.len(), 1);
        assert_eq!(updated_operations.first().unwrap().as_ref().name, "Valid");
    }
}
```
--------------------------------------------------------------------------------
/crates/apollo-schema-index/src/path.rs:
--------------------------------------------------------------------------------
```rust
//! Defines a path from a root type in a GraphQL schema (Query, Mutation, or Subscription) to
//! another type.
use apollo_compiler::Name;
use apollo_compiler::ast::NamedType;
use std::collections::HashSet;
use std::fmt;
use std::fmt::Display;
use std::hash::Hash;
/// Iterator over references to PathNode elements
pub struct PathNodeIter<'a> {
    current: Option<&'a PathNode>,
}
impl<'a> Iterator for PathNodeIter<'a> {
    type Item = &'a PathNode;
    fn next(&mut self) -> Option<Self::Item> {
        let current = self.current?;
        self.current = current.child.as_deref();
        Some(current)
    }
}
/// Iterator over mutable references to PathNode elements
pub struct PathNodeIterMut<'a> {
    current: Option<&'a mut PathNode>,
}
impl<'a> Iterator for PathNodeIterMut<'a> {
    type Item = &'a mut PathNode;
    fn next(&mut self) -> Option<Self::Item> {
        let current = self.current.take()?;
        let child_ptr = current
            .child
            .as_deref_mut()
            .map(|child| child as *mut PathNode);
        self.current = child_ptr.map(|ptr| unsafe { &mut *ptr });
        Some(current)
    }
}
/// Iterator over owned PathNode elements
pub struct PathNodeIntoIter {
    current: Option<PathNode>,
}
impl Iterator for PathNodeIntoIter {
    type Item = PathNode;
    fn next(&mut self) -> Option<Self::Item> {
        let mut current = self.current.take()?;
        self.current = current.child.map(|boxed| *boxed);
        current.child = None; // Remove child to avoid double ownership
        Some(current)
    }
}
#[derive(Clone, PartialEq, Eq, Hash)]
pub struct PathNode {
    /// The schema type of this node
    pub node_type: NamedType,
    /// The name of the field referencing the child type, if the child is a field type
    pub field_name: Option<Name>,
    /// The arguments of the field referencing the child type, if the child is a field type
    pub field_args: Vec<NamedType>,
    /// The child type
    child: Option<Box<PathNode>>,
}
impl PathNode {
    /// Create a new path containing just one type
    pub fn new(node_type: NamedType) -> Self {
        Self {
            node_type,
            field_name: None,
            field_args: Vec::default(),
            child: None,
        }
    }
    /// Add a child to the end of a path. Allows building up a path from the root down.
    pub fn add_child(
        self,
        field_name: Option<Name>,
        field_args: Vec<NamedType>,
        child_type: NamedType,
    ) -> Self {
        if let Some(child) = self.child {
            Self {
                node_type: self.node_type,
                field_name: self.field_name,
                field_args: self.field_args,
                child: Some(Box::new(
                    child.add_child(field_name, field_args, child_type),
                )),
            }
        } else {
            Self {
                node_type: self.node_type,
                field_name,
                field_args,
                child: Some(Box::new(PathNode::new(child_type))),
            }
        }
    }
    /// Add a parent to the beginning of a path. Allows building up a path from the bottom up.
    pub fn add_parent(
        self,
        field_name: Option<Name>,
        field_args: Vec<NamedType>,
        parent_type: NamedType,
    ) -> Self {
        Self {
            node_type: parent_type,
            field_name,
            field_args,
            child: Some(Box::new(self)),
        }
    }
    /// Gets the penultimate node in a path
    pub fn referencing_type(&self) -> Option<(&NamedType, Option<&Name>, Vec<&NamedType>)> {
        if let Some(child) = &self.child {
            child.referencing_type_inner(self)
        } else {
            None
        }
    }
    fn referencing_type_inner<'a>(
        &'a self,
        referencing_node: &'a PathNode,
    ) -> Option<(&'a NamedType, Option<&'a Name>, Vec<&'a NamedType>)> {
        if let Some(child) = &self.child {
            child.referencing_type_inner(self)
        } else {
            Some((
                &referencing_node.node_type,
                referencing_node.field_name.as_ref(),
                referencing_node.field_args.iter().collect(),
            ))
        }
    }
    /// Determines if a path contains a cycle
    pub(crate) fn has_cycle(&self) -> bool {
        self.has_cycle_inner(HashSet::new())
    }
    fn has_cycle_inner(&self, mut visited: HashSet<NamedType>) -> bool {
        if visited.contains(&self.node_type) {
            return true;
        }
        visited.insert(self.node_type.clone());
        if let Some(child) = &self.child {
            child.has_cycle_inner(visited)
        } else {
            false
        }
    }
    /// Gets the length of the path
    pub fn len(&self) -> usize {
        if let Some(child) = &self.child {
            child.len() + 1
        } else {
            1
        }
    }
    /// Get an iterator over references to all nodes in this path
    pub fn iter(&self) -> PathNodeIter<'_> {
        PathNodeIter {
            current: Some(self),
        }
    }
    /// Get an iterator over mutable references to all nodes in this path
    pub fn iter_mut(&mut self) -> PathNodeIterMut<'_> {
        PathNodeIterMut {
            current: Some(self),
        }
    }
}
impl<'a> IntoIterator for &'a PathNode {
    type Item = &'a PathNode;
    type IntoIter = PathNodeIter<'a>;
    fn into_iter(self) -> Self::IntoIter {
        self.iter()
    }
}
impl<'a> IntoIterator for &'a mut PathNode {
    type Item = &'a mut PathNode;
    type IntoIter = PathNodeIterMut<'a>;
    fn into_iter(self) -> Self::IntoIter {
        self.iter_mut()
    }
}
impl IntoIterator for PathNode {
    type Item = PathNode;
    type IntoIter = PathNodeIntoIter;
    fn into_iter(self) -> Self::IntoIter {
        PathNodeIntoIter {
            current: Some(self),
        }
    }
}
impl Display for PathNode {
    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
        if let Some(child) = &self.child {
            if let Some(field_name) = &self.field_name {
                if !self.field_args.is_empty() {
                    write!(
                        f,
                        "{} -> {}({}) -> {}",
                        self.node_type.as_str(),
                        field_name.as_str(),
                        self.field_args
                            .iter()
                            .map(|arg| arg.as_str())
                            .collect::<Vec<_>>()
                            .join(","),
                        child
                    )
                } else {
                    write!(
                        f,
                        "{} -> {} -> {}",
                        self.node_type.as_str(),
                        field_name.as_str(),
                        child
                    )
                }
            } else {
                write!(f, "{} -> {}", self.node_type.as_str(), child)
            }
        } else {
            write!(f, "{}", self.node_type.as_str())
        }
    }
}
/// An item with a score
pub struct Scored<T: Eq + Hash + Display> {
    pub inner: T,
    score: f32,
}
impl<T: Eq + Hash + Display> Scored<T> {
    /// Create a new scored item
    pub fn new(inner: T, score: f32) -> Self {
        Self { inner, score }
    }
    /// Get the score associated with this item
    pub fn score(&self) -> f32 {
        self.score
    }
}
impl<T: Eq + Hash + Display> PartialEq for Scored<T> {
    fn eq(&self, other: &Self) -> bool {
        self.inner == other.inner && self.score() == other.score()
    }
}
impl<T: Eq + Hash + Display> Eq for Scored<T> {}
impl<T: Eq + Hash + Display> PartialOrd for Scored<T> {
    fn partial_cmp(&self, other: &Self) -> Option<std::cmp::Ordering> {
        Some(self.cmp(other))
    }
}
impl<T: Eq + Hash + Display> Ord for Scored<T> {
    fn cmp(&self, other: &Self) -> std::cmp::Ordering {
        self.score().total_cmp(&other.score())
    }
}
impl<T: Eq + Hash + Display> Hash for Scored<T> {
    fn hash<H: std::hash::Hasher>(&self, state: &mut H) {
        self.inner.hash(state);
    }
}
impl<T: Eq + Hash + Display> Display for Scored<T> {
    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
        write!(f, "{} ({})", self.inner, self.score)
    }
}
#[cfg(test)]
mod test {
    use super::*;
    use apollo_compiler::name;
    use insta::assert_snapshot;
    #[test]
    fn test_add_child() {
        let path = PathNode::new(NamedType::new("Root").unwrap());
        let path = path.add_child(
            Some(name!("child")),
            vec![],
            NamedType::new("Child").unwrap(),
        );
        assert_eq!(path.to_string(), "Root -> child -> Child");
    }
    #[test]
    fn test_add_parent() {
        let path = PathNode::new(NamedType::new("Child").unwrap());
        let path = path.add_parent(
            Some(name!("child")),
            vec![],
            NamedType::new("Root").unwrap(),
        );
        assert_eq!(path.to_string(), "Root -> child -> Child");
    }
    #[test]
    fn test_len() {
        // Test path with no children
        let path = PathNode::new(NamedType::new("Root").unwrap());
        assert_eq!(path.len(), 1);
        // Test path with one child
        let path = path.add_child(
            Some(name!("child")),
            vec![],
            NamedType::new("Child").unwrap(),
        );
        assert_eq!(path.len(), 2);
        // Test path with two children
        let path = path.add_child(
            Some(name!("grandchild")),
            vec![],
            NamedType::new("GrandChild").unwrap(),
        );
        assert_eq!(path.len(), 3);
        // Test path with a non-field child
        let path = path.add_child(None, vec![], NamedType::new("GreatGrandChild").unwrap());
        assert_eq!(path.len(), 4);
    }
    #[test]
    fn test_display() {
        let path = PathNode::new(NamedType::new("Root").unwrap());
        let path = path.add_child(
            Some(name!("child")),
            vec![
                NamedType::new("Arg1").unwrap(),
                NamedType::new("Arg2").unwrap(),
            ],
            NamedType::new("Child").unwrap(),
        );
        let path = path.add_child(
            Some(name!("grandchild")),
            vec![],
            NamedType::new("GrandChild").unwrap(),
        );
        assert_snapshot!(
            path.to_string(),
            @"Root -> child(Arg1,Arg2) -> Child -> grandchild -> GrandChild"
        );
    }
    #[test]
    fn test_has_cycle() {
        // Test path without cycle
        let path = PathNode::new(NamedType::new("Root").unwrap());
        let path = path.add_child(
            Some(name!("child")),
            vec![],
            NamedType::new("Child").unwrap(),
        );
        assert!(!path.has_cycle());
        // Test path with cycle (Root -> Child -> Root)
        let root_type = NamedType::new("Root").unwrap();
        let path = PathNode::new(root_type.clone());
        let path = path.add_child(
            Some(name!("child")),
            vec![],
            NamedType::new("Child").unwrap(),
        );
        let path = path.add_child(Some(name!("back_to_root")), vec![], root_type);
        assert!(path.has_cycle());
    }
    #[test]
    fn test_referencing_type() {
        // Test single level path
        let path = PathNode::new(NamedType::new("Root").unwrap());
        assert_eq!(path.referencing_type(), None);
        // Test two level path: Root -> child -> Child
        let root_type = NamedType::new("Root").unwrap();
        let child_type = NamedType::new("Child").unwrap();
        let path = PathNode::new(root_type.clone());
        let path = path.add_child(Some(name!("child")), vec![], child_type.clone());
        assert_eq!(
            path.referencing_type(),
            Some((&root_type, Some(&name!("child")), vec![])),
        );
        // Test three level path: Root -> child -> Child -> grandchild -> GrandChild
        let path = path.add_child(
            Some(name!("grandchild")),
            vec![],
            NamedType::new("GrandChild").unwrap(),
        );
        assert_eq!(
            path.referencing_type(),
            Some((&child_type, Some(&name!("grandchild")), vec![]))
        );
    }
    #[test]
    fn test_iteration() {
        // Test single node
        let path = PathNode::new(NamedType::new("Root").unwrap());
        let nodes: Vec<_> = path.iter().collect();
        assert_eq!(nodes.len(), 1);
        assert_eq!(nodes[0].node_type.as_str(), "Root");
        // Test two level path: Root -> child -> Child
        let path = path.add_child(
            Some(name!("child")),
            vec![],
            NamedType::new("Child").unwrap(),
        );
        let nodes: Vec<_> = path.iter().collect();
        assert_eq!(nodes.len(), 2);
        assert_eq!(nodes[0].node_type.as_str(), "Root");
        assert_eq!(nodes[1].node_type.as_str(), "Child");
        assert_eq!(nodes[0].field_name.as_ref().unwrap().as_str(), "child");
        assert_eq!(nodes[1].field_name, None);
        // Test three level path: Root -> child -> Child -> grandchild -> GrandChild
        let path = path.add_child(
            Some(name!("grandchild")),
            vec![],
            NamedType::new("GrandChild").unwrap(),
        );
        let nodes: Vec<_> = path.iter().collect();
        assert_eq!(nodes.len(), 3);
        assert_eq!(nodes[0].node_type.as_str(), "Root");
        assert_eq!(nodes[1].node_type.as_str(), "Child");
        assert_eq!(nodes[2].node_type.as_str(), "GrandChild");
        assert_eq!(nodes[0].field_name.as_ref().unwrap().as_str(), "child");
        assert_eq!(nodes[1].field_name.as_ref().unwrap().as_str(), "grandchild");
        assert_eq!(nodes[2].field_name, None);
    }
    #[test]
    fn test_iteration_mut() {
        // Test mutable iteration
        let path = PathNode::new(NamedType::new("Root").unwrap());
        let path = path.add_child(
            Some(name!("child")),
            vec![],
            NamedType::new("Child").unwrap(),
        );
        let path = path.add_child(
            Some(name!("grandchild")),
            vec![],
            NamedType::new("GrandChild").unwrap(),
        );
        let mut path = path;
        let nodes: Vec<_> = path.iter_mut().collect();
        assert_eq!(nodes.len(), 3);
        // Verify we can access the nodes mutably
        for node in nodes {
            assert!(!node.node_type.as_str().is_empty());
        }
    }
    #[test]
    fn test_into_iter() {
        // Test owned iteration
        let path = PathNode::new(NamedType::new("Root").unwrap());
        let path = path.add_child(
            Some(name!("child")),
            vec![],
            NamedType::new("Child").unwrap(),
        );
        let path = path.add_child(
            Some(name!("grandchild")),
            vec![],
            NamedType::new("GrandChild").unwrap(),
        );
        let nodes: Vec<_> = path.into_iter().collect();
        assert_eq!(nodes.len(), 3);
        assert_eq!(nodes[0].node_type.as_str(), "Root");
        assert_eq!(nodes[1].node_type.as_str(), "Child");
        assert_eq!(nodes[2].node_type.as_str(), "GrandChild");
    }
    #[test]
    fn test_iteration_with_into_iter() {
        // Test using IntoIterator trait
        let path = PathNode::new(NamedType::new("Root").unwrap());
        let path = path.add_child(
            Some(name!("child")),
            vec![],
            NamedType::new("Child").unwrap(),
        );
        let path = path.add_child(
            Some(name!("grandchild")),
            vec![],
            NamedType::new("GrandChild").unwrap(),
        );
        // Test reference iteration
        let nodes: Vec<_> = (&path).into_iter().collect();
        assert_eq!(nodes.len(), 3);
        assert_eq!(nodes[0].node_type.as_str(), "Root");
        assert_eq!(nodes[1].node_type.as_str(), "Child");
        assert_eq!(nodes[2].node_type.as_str(), "GrandChild");
        // Test mutable reference iteration
        let mut path = path;
        let nodes: Vec<_> = (&mut path).into_iter().collect();
        assert_eq!(nodes.len(), 3);
        // Test owned iteration
        let path = PathNode::new(NamedType::new("Root").unwrap());
        let path = path.add_child(
            Some(name!("child")),
            vec![],
            NamedType::new("Child").unwrap(),
        );
        let nodes: Vec<_> = path.into_iter().collect();
        assert_eq!(nodes.len(), 2);
    }
    #[test]
    fn test_iteration_empty_path() {
        // Test iteration on a path with no children
        let path = PathNode::new(NamedType::new("Root").unwrap());
        let nodes: Vec<_> = path.iter().collect();
        assert_eq!(nodes.len(), 1);
        assert_eq!(nodes[0].node_type.as_str(), "Root");
    }
}
```
--------------------------------------------------------------------------------
/crates/apollo-mcp-server/src/graphql.rs:
--------------------------------------------------------------------------------
```rust
//! Execute GraphQL operations from an MCP tool
use std::sync::LazyLock;
use crate::errors::McpError;
use crate::generated::telemetry::{TelemetryAttribute, TelemetryMetric};
use crate::meter;
use opentelemetry::KeyValue;
use reqwest::header::{HeaderMap, HeaderValue};
use reqwest_middleware::{ClientBuilder, ClientWithMiddleware, Extension};
use reqwest_tracing::{OtelName, TracingMiddleware};
use rmcp::model::{CallToolResult, Content, ErrorCode};
use serde_json::{Map, Value};
use url::Url;
#[derive(Debug)]
pub struct Request<'a> {
    pub input: Value,
    pub endpoint: &'a Url,
    pub headers: HeaderMap,
}
#[derive(Debug, PartialEq)]
pub struct OperationDetails {
    pub query: String,
    pub operation_name: Option<String>,
}
static GRAPHQL_CLIENT: LazyLock<ClientWithMiddleware> = LazyLock::new(|| {
    ClientBuilder::new(reqwest::Client::new())
        .with_init(Extension(OtelName("mcp-graphql-client".into())))
        .with(TracingMiddleware::default())
        .build()
});
/// Able to be executed as a GraphQL operation
pub trait Executable {
    /// Get the persisted query ID to be executed, if any
    fn persisted_query_id(&self) -> Option<String>;
    /// Get the operation to execute and its name
    fn operation(&self, input: Value) -> Result<OperationDetails, McpError>;
    /// Get the variables to execute the operation with
    fn variables(&self, input: Value) -> Result<Value, McpError>;
    /// Get the headers to execute the operation with
    fn headers(&self, default_headers: &HeaderMap<HeaderValue>) -> HeaderMap<HeaderValue>;
    /// Execute as a GraphQL operation using the endpoint and headers
    #[tracing::instrument(skip(self, request))]
    async fn execute(&self, request: Request<'_>) -> Result<CallToolResult, McpError> {
        let meter = &meter::METER;
        let start = std::time::Instant::now();
        let mut op_id: Option<String> = None;
        let client_metadata = serde_json::json!({
            "name": "mcp",
            "version": std::env!("CARGO_PKG_VERSION")
        });
        let mut request_body = Map::from_iter([(
            String::from("variables"),
            self.variables(request.input.clone())?,
        )]);
        if let Some(id) = self.persisted_query_id() {
            request_body.insert(
                String::from("extensions"),
                serde_json::json!({
                    "persistedQuery": {
                        "version": 1,
                        "sha256Hash": id,
                    },
                    "clientLibrary": client_metadata,
                }),
            );
            op_id = Some(id.to_string());
        } else {
            let OperationDetails {
                query,
                operation_name,
            } = self.operation(request.input)?;
            request_body.insert(String::from("query"), Value::String(query));
            request_body.insert(
                String::from("extensions"),
                serde_json::json!({
                    "clientLibrary": client_metadata,
                }),
            );
            if let Some(op_name) = operation_name {
                op_id = Some(op_name.clone());
                request_body.insert(String::from("operationName"), Value::String(op_name));
            }
        }
        let result = GRAPHQL_CLIENT
            .post(request.endpoint.as_str())
            .headers(self.headers(&request.headers))
            .body(Value::Object(request_body).to_string())
            .send()
            .await
            .map_err(|reqwest_error| {
                McpError::new(
                    ErrorCode::INTERNAL_ERROR,
                    format!("Failed to send GraphQL request: {reqwest_error}"),
                    None,
                )
            })?
            .json::<Value>()
            .await
            .map_err(|reqwest_error| {
                McpError::new(
                    ErrorCode::INTERNAL_ERROR,
                    format!("Failed to read GraphQL response body: {reqwest_error}"),
                    None,
                )
            })
            .map(|json| CallToolResult {
                content: vec![Content::json(&json).unwrap_or(Content::text(json.to_string()))],
                is_error: Some(
                    json.get("errors")
                        .filter(|value| !matches!(value, Value::Null))
                        .is_some()
                        && json
                            .get("data")
                            .filter(|value| !matches!(value, Value::Null))
                            .is_none(),
                ),
                meta: None,
                structured_content: Some(json),
            });
        // Record response metrics
        let attributes = vec![
            KeyValue::new(
                TelemetryAttribute::Success.to_key(),
                result.as_ref().is_ok_and(|r| r.is_error != Some(true)),
            ),
            KeyValue::new(
                TelemetryAttribute::OperationId.to_key(),
                op_id.unwrap_or("".to_string()),
            ),
            KeyValue::new(
                TelemetryAttribute::OperationSource.to_key(),
                match self.persisted_query_id() {
                    Some(_) => "persisted_query",
                    None => "operation",
                },
            ),
        ];
        meter
            .f64_histogram(TelemetryMetric::OperationDuration.as_str())
            .build()
            .record(start.elapsed().as_millis() as f64, &attributes);
        meter
            .u64_counter(TelemetryMetric::OperationCount.as_str())
            .build()
            .add(1, &attributes);
        result
    }
}
#[cfg(test)]
mod test {
    use crate::errors::McpError;
    use crate::generated::telemetry::TelemetryMetric;
    use crate::graphql::{Executable, OperationDetails, Request};
    use http::{HeaderMap, HeaderValue};
    use opentelemetry::global;
    use opentelemetry_sdk::metrics::data::{AggregatedMetrics, MetricData};
    use opentelemetry_sdk::metrics::{
        InMemoryMetricExporter, MeterProviderBuilder, PeriodicReader,
    };
    use serde_json::{Map, Value, json};
    use url::Url;
    struct TestExecutableWithoutPersistedQueryId;
    impl Executable for TestExecutableWithoutPersistedQueryId {
        fn persisted_query_id(&self) -> Option<String> {
            None
        }
        fn operation(&self, _input: Value) -> Result<OperationDetails, McpError> {
            Ok(OperationDetails {
                query: "query MockOp { mockOp { id } }".to_string(),
                operation_name: Some("mock_operation".to_string()),
            })
        }
        fn variables(&self, _input: Value) -> Result<Value, McpError> {
            let json = r#"{ "arg1": "foobar" }"#;
            let parsed_json = serde_json::from_str(json).expect("Failed to parse json");
            let json_map: Map<String, Value> = match parsed_json {
                Value::Object(map) => map,
                _ => panic!("Expected a JSON object, but received a different type"),
            };
            Ok(Value::from(json_map))
        }
        fn headers(&self, _default_headers: &HeaderMap<HeaderValue>) -> HeaderMap<HeaderValue> {
            HeaderMap::new()
        }
    }
    struct TestExecutableWithPersistedQueryId;
    impl Executable for TestExecutableWithPersistedQueryId {
        fn persisted_query_id(&self) -> Option<String> {
            Some("4f059505-fe13-4043-819a-461dd82dd5ed".to_string())
        }
        fn operation(&self, _input: Value) -> Result<OperationDetails, McpError> {
            Ok(OperationDetails {
                query: "query MockOp { mockOp { id } }".to_string(),
                operation_name: Some("mock_operation".to_string()),
            })
        }
        fn variables(&self, _input: Value) -> Result<Value, McpError> {
            Ok(Value::String("mock_variables".to_string()))
        }
        fn headers(&self, _default_headers: &HeaderMap<HeaderValue>) -> HeaderMap<HeaderValue> {
            HeaderMap::new()
        }
    }
    #[tokio::test]
    async fn calls_graphql_endpoint_with_expected_body_without_pq_extensions() {
        // given
        let mut server = mockito::Server::new_async().await;
        let url = Url::parse(server.url().as_str()).unwrap();
        let mock_request = Request {
            input: json!({}),
            endpoint: &url,
            headers: HeaderMap::new(),
        };
        let expected_request_body = json!({
            "variables": { "arg1": "foobar" },
            "query": "query MockOp { mockOp { id } }",
            "extensions": {
                "clientLibrary": {
                    "name":"mcp",
                    "version": std::env!("CARGO_PKG_VERSION")
                }
            },
            "operationName":"mock_operation"
        })
        .to_string();
        let mock = server
            .mock("POST", "/")
            .match_body(expected_request_body.as_str())
            .with_status(200)
            .with_header("content-type", "application/json")
            .with_body(json!({ "data": {}  }).to_string())
            .expect(1)
            .create_async()
            .await;
        // when
        let test_executable = TestExecutableWithoutPersistedQueryId {};
        let result = test_executable.execute(mock_request).await.unwrap();
        // then
        mock.assert(); // verify that the mock http server route was invoked
        assert!(!result.content.is_empty());
        assert!(!result.is_error.unwrap());
    }
    #[tokio::test]
    async fn calls_graphql_endpoint_with_expected_pq_extensions_in_request_body() {
        // given
        let mut server = mockito::Server::new_async().await;
        let url = Url::parse(server.url().as_str()).unwrap();
        let mock_request = Request {
            input: json!({}),
            endpoint: &url,
            headers: HeaderMap::new(),
        };
        let expected_request_body = json!({
            "variables": "mock_variables",
            "extensions": {
                "persistedQuery": {
                    "version": 1,
                    "sha256Hash": "4f059505-fe13-4043-819a-461dd82dd5ed",
                },
                "clientLibrary": {
                    "name":"mcp",
                    "version": std::env!("CARGO_PKG_VERSION")
                }
            },
        })
        .to_string();
        let mock = server
            .mock("POST", "/")
            .match_body(expected_request_body.as_str())
            .with_status(200)
            .with_header("content-type", "application/json")
            .with_body(json!({ "data": {},  }).to_string())
            .expect(1)
            .create_async()
            .await;
        // when
        let test_executable = TestExecutableWithPersistedQueryId {};
        let result = test_executable.execute(mock_request).await.unwrap();
        // then
        mock.assert(); // verify that the mock http server route was invoked
        assert!(!result.content.is_empty());
        assert!(!result.is_error.unwrap());
    }
    #[tokio::test]
    async fn results_in_mcp_error_when_gql_server_cannot_be_reached() {
        // given
        let url = Url::parse("http://localhost/no-server").unwrap();
        let mock_request = Request {
            input: json!({}),
            endpoint: &url,
            headers: HeaderMap::new(),
        };
        // when
        let test_executable = TestExecutableWithPersistedQueryId {};
        let result = test_executable.execute(mock_request).await;
        // then
        match result {
            Err(e) => {
                assert!(
                    e.message
                        .to_string()
                        .starts_with("Failed to send GraphQL request")
                );
            }
            _ => {
                panic!("Expected MCP error");
            }
        }
    }
    #[tokio::test]
    async fn results_in_mcp_error_when_json_body_cannot_be_parsed() {
        // given
        let mut server = mockito::Server::new_async().await;
        let url = Url::parse(server.url().as_str()).unwrap();
        let mock_request = Request {
            input: json!({}),
            endpoint: &url,
            headers: HeaderMap::new(),
        };
        server
            .mock("POST", "/")
            .with_status(200)
            .with_header("content-type", "application/json")
            .with_body("{ \"invalid_json\": 'foo' }")
            .expect(1)
            .create_async()
            .await;
        // when
        let test_executable = TestExecutableWithPersistedQueryId {};
        let result = test_executable.execute(mock_request).await;
        // then
        match result {
            Err(e) => {
                assert!(
                    e.message
                        .to_string()
                        .starts_with("Failed to read GraphQL response body")
                );
            }
            _ => {
                panic!("Expected MCP error");
            }
        }
    }
    #[tokio::test]
    async fn gql_response_error_are_found_in_call_tool_result() {
        // given
        let mut server = mockito::Server::new_async().await;
        let url = Url::parse(server.url().as_str()).unwrap();
        let mock_request = Request {
            input: json!({}),
            endpoint: &url,
            headers: HeaderMap::new(),
        };
        server
            .mock("POST", "/")
            .with_status(200)
            .with_header("content-type", "application/json")
            .with_body(json!({ "data": null, "errors": ["an error"] }).to_string())
            .expect(1)
            .create_async()
            .await;
        // when
        let test_executable = TestExecutableWithPersistedQueryId {};
        let result = test_executable.execute(mock_request).await.unwrap();
        // then
        assert!(result.is_error.is_some());
        assert!(result.is_error.unwrap());
    }
    #[tokio::test]
    async fn validate_metric_attributes_success_false() {
        // given
        let exporter = InMemoryMetricExporter::default();
        let meter_provider = MeterProviderBuilder::default()
            .with_reader(PeriodicReader::builder(exporter.clone()).build())
            .build();
        global::set_meter_provider(meter_provider.clone());
        let mut server = mockito::Server::new_async().await;
        let url = Url::parse(server.url().as_str()).unwrap();
        let mock_request = Request {
            input: json!({}),
            endpoint: &url,
            headers: HeaderMap::new(),
        };
        server
            .mock("POST", "/")
            .with_status(200)
            .with_header("content-type", "application/json")
            .with_body(json!({ "data": null, "errors": ["an error"] }).to_string())
            .expect(1)
            .create_async()
            .await;
        // when
        let test_executable = TestExecutableWithPersistedQueryId {};
        let result = test_executable.execute(mock_request).await.unwrap();
        // then
        assert!(result.is_error.is_some());
        assert!(result.is_error.unwrap());
        // Retrieve the finished metrics from the exporter
        let finished_metrics = exporter.get_finished_metrics().unwrap();
        // validate the attributes of the apollo.mcp.operation.count counter
        for resource_metrics in finished_metrics {
            if let Some(scope_metrics) = resource_metrics
                .scope_metrics()
                .find(|scope_metrics| scope_metrics.scope().name() == "apollo.mcp")
            {
                for metric in scope_metrics.metrics() {
                    if metric.name() == TelemetryMetric::OperationCount.as_str()
                        && let AggregatedMetrics::U64(MetricData::Sum(data)) = metric.data()
                    {
                        for point in data.data_points() {
                            let attributes = point.attributes();
                            let mut attr_map = std::collections::HashMap::new();
                            for kv in attributes {
                                attr_map.insert(kv.key.as_str(), kv.value.as_str());
                            }
                            assert_eq!(
                                attr_map.get("operation.id").map(|s| s.as_ref()),
                                Some("mock_operation")
                            );
                            assert_eq!(
                                attr_map.get("operation.type").map(|s| s.as_ref()),
                                Some("persisted_query")
                            );
                            assert_eq!(
                                attr_map.get("success"),
                                Some(&std::borrow::Cow::Borrowed("false"))
                            );
                        }
                    }
                }
            }
        }
    }
}
```