#
tokens: 49949/50000 131/187 files (page 1/6)
lines: off (toggle) GitHub
raw markdown copy
This is page 1 of 6. Use http://codebase.md/apollographql/apollo-mcp-server?page={x} to view the full context.

# Directory Structure

```
├── .cargo
│   └── config.toml
├── .changesets
│   └── README.md
├── .envrc
├── .github
│   ├── CODEOWNERS
│   ├── renovate.json5
│   └── workflows
│       ├── canary-release.yml
│       ├── ci.yml
│       ├── prep-release.yml
│       ├── release-bins.yml
│       ├── release-container.yml
│       ├── sync-develop.yml
│       └── verify-changeset.yml
├── .gitignore
├── .idea
│   └── runConfigurations
│       ├── clippy.xml
│       ├── format___test___clippy.xml
│       ├── format.xml
│       ├── Run_spacedevs.xml
│       └── Test_apollo_mcp_server.xml
├── .vscode
│   ├── extensions.json
│   ├── launch.json
│   ├── settings.json
│   └── tasks.json
├── apollo.config.json
├── Cargo.lock
├── Cargo.toml
├── CHANGELOG_SECTION.md
├── CHANGELOG.md
├── clippy.toml
├── codecov.yml
├── CONTRIBUTING.md
├── crates
│   ├── apollo-mcp-registry
│   │   ├── Cargo.toml
│   │   └── src
│   │       ├── files.rs
│   │       ├── lib.rs
│   │       ├── logging.rs
│   │       ├── platform_api
│   │       │   ├── operation_collections
│   │       │   │   ├── collection_poller.rs
│   │       │   │   ├── error.rs
│   │       │   │   ├── event.rs
│   │       │   │   └── operation_collections.graphql
│   │       │   ├── operation_collections.rs
│   │       │   └── platform-api.graphql
│   │       ├── platform_api.rs
│   │       ├── testdata
│   │       │   ├── minimal_supergraph.graphql
│   │       │   └── supergraph.graphql
│   │       ├── uplink
│   │       │   ├── persisted_queries
│   │       │   │   ├── event.rs
│   │       │   │   ├── manifest_poller.rs
│   │       │   │   ├── manifest.rs
│   │       │   │   └── persisted_queries_manifest_query.graphql
│   │       │   ├── persisted_queries.rs
│   │       │   ├── schema
│   │       │   │   ├── event.rs
│   │       │   │   ├── schema_query.graphql
│   │       │   │   └── schema_stream.rs
│   │       │   ├── schema.rs
│   │       │   ├── snapshots
│   │       │   │   ├── apollo_mcp_registry__uplink__schema__tests__schema_by_url_all_fail@logs.snap
│   │       │   │   ├── apollo_mcp_registry__uplink__schema__tests__schema_by_url_fallback@logs.snap
│   │       │   │   └── apollo_mcp_registry__uplink__schema__tests__schema_by_url@logs.snap
│   │       │   └── uplink.graphql
│   │       └── uplink.rs
│   ├── apollo-mcp-server
│   │   ├── build.rs
│   │   ├── Cargo.toml
│   │   ├── src
│   │   │   ├── auth
│   │   │   │   ├── networked_token_validator.rs
│   │   │   │   ├── protected_resource.rs
│   │   │   │   ├── valid_token.rs
│   │   │   │   └── www_authenticate.rs
│   │   │   ├── auth.rs
│   │   │   ├── config_schema.rs
│   │   │   ├── cors.rs
│   │   │   ├── custom_scalar_map.rs
│   │   │   ├── errors.rs
│   │   │   ├── event.rs
│   │   │   ├── explorer.rs
│   │   │   ├── graphql.rs
│   │   │   ├── headers.rs
│   │   │   ├── health.rs
│   │   │   ├── introspection
│   │   │   │   ├── minify.rs
│   │   │   │   ├── snapshots
│   │   │   │   │   └── apollo_mcp_server__introspection__minify__tests__minify_schema.snap
│   │   │   │   ├── tools
│   │   │   │   │   ├── execute.rs
│   │   │   │   │   ├── introspect.rs
│   │   │   │   │   ├── search.rs
│   │   │   │   │   ├── snapshots
│   │   │   │   │   │   └── apollo_mcp_server__introspection__tools__search__tests__search_tool.snap
│   │   │   │   │   ├── testdata
│   │   │   │   │   │   └── schema.graphql
│   │   │   │   │   └── validate.rs
│   │   │   │   └── tools.rs
│   │   │   ├── introspection.rs
│   │   │   ├── json_schema.rs
│   │   │   ├── lib.rs
│   │   │   ├── main.rs
│   │   │   ├── meter.rs
│   │   │   ├── operations
│   │   │   │   ├── mutation_mode.rs
│   │   │   │   ├── operation_source.rs
│   │   │   │   ├── operation.rs
│   │   │   │   ├── raw_operation.rs
│   │   │   │   ├── schema_walker
│   │   │   │   │   ├── name.rs
│   │   │   │   │   └── type.rs
│   │   │   │   └── schema_walker.rs
│   │   │   ├── operations.rs
│   │   │   ├── runtime
│   │   │   │   ├── config.rs
│   │   │   │   ├── endpoint.rs
│   │   │   │   ├── filtering_exporter.rs
│   │   │   │   ├── graphos.rs
│   │   │   │   ├── introspection.rs
│   │   │   │   ├── logging
│   │   │   │   │   ├── defaults.rs
│   │   │   │   │   ├── log_rotation_kind.rs
│   │   │   │   │   └── parsers.rs
│   │   │   │   ├── logging.rs
│   │   │   │   ├── operation_source.rs
│   │   │   │   ├── overrides.rs
│   │   │   │   ├── schema_source.rs
│   │   │   │   ├── schemas.rs
│   │   │   │   ├── telemetry
│   │   │   │   │   └── sampler.rs
│   │   │   │   └── telemetry.rs
│   │   │   ├── runtime.rs
│   │   │   ├── sanitize.rs
│   │   │   ├── schema_tree_shake.rs
│   │   │   ├── server
│   │   │   │   ├── states
│   │   │   │   │   ├── configuring.rs
│   │   │   │   │   ├── operations_configured.rs
│   │   │   │   │   ├── running.rs
│   │   │   │   │   ├── schema_configured.rs
│   │   │   │   │   └── starting.rs
│   │   │   │   └── states.rs
│   │   │   ├── server.rs
│   │   │   └── telemetry_attributes.rs
│   │   └── telemetry.toml
│   └── apollo-schema-index
│       ├── Cargo.toml
│       └── src
│           ├── error.rs
│           ├── lib.rs
│           ├── path.rs
│           ├── snapshots
│           │   ├── apollo_schema_index__tests__search.snap
│           │   └── apollo_schema_index__traverse__tests__schema_traverse.snap
│           ├── testdata
│           │   └── schema.graphql
│           └── traverse.rs
├── docs
│   └── source
│       ├── _sidebar.yaml
│       ├── auth.mdx
│       ├── best-practices.mdx
│       ├── config-file.mdx
│       ├── cors.mdx
│       ├── custom-scalars.mdx
│       ├── debugging.mdx
│       ├── define-tools.mdx
│       ├── deploy.mdx
│       ├── guides
│       │   └── auth-auth0.mdx
│       ├── health-checks.mdx
│       ├── images
│       │   ├── auth0-permissions-enable.png
│       │   ├── mcp-getstarted-inspector-http.jpg
│       │   └── mcp-getstarted-inspector-stdio.jpg
│       ├── index.mdx
│       ├── licensing.mdx
│       ├── limitations.mdx
│       ├── quickstart.mdx
│       ├── run.mdx
│       └── telemetry.mdx
├── e2e
│   └── mcp-server-tester
│       ├── local-operations
│       │   ├── api.graphql
│       │   ├── config.yaml
│       │   ├── operations
│       │   │   ├── ExploreCelestialBodies.graphql
│       │   │   ├── GetAstronautDetails.graphql
│       │   │   ├── GetAstronautsCurrentlyInSpace.graphql
│       │   │   └── SearchUpcomingLaunches.graphql
│       │   └── tool-tests.yaml
│       ├── pq-manifest
│       │   ├── api.graphql
│       │   ├── apollo.json
│       │   ├── config.yaml
│       │   └── tool-tests.yaml
│       ├── run_tests.sh
│       └── server-config.template.json
├── flake.lock
├── flake.nix
├── graphql
│   ├── TheSpaceDevs
│   │   ├── .vscode
│   │   │   ├── extensions.json
│   │   │   └── tasks.json
│   │   ├── api.graphql
│   │   ├── apollo.config.json
│   │   ├── config.yaml
│   │   ├── operations
│   │   │   ├── ExploreCelestialBodies.graphql
│   │   │   ├── GetAstronautDetails.graphql
│   │   │   ├── GetAstronautsCurrentlyInSpace.graphql
│   │   │   └── SearchUpcomingLaunches.graphql
│   │   ├── persisted_queries
│   │   │   └── apollo.json
│   │   ├── persisted_queries.config.json
│   │   ├── README.md
│   │   └── supergraph.yaml
│   └── weather
│       ├── api.graphql
│       ├── config.yaml
│       ├── operations
│       │   ├── alerts.graphql
│       │   ├── all.graphql
│       │   └── forecast.graphql
│       ├── persisted_queries
│       │   └── apollo.json
│       ├── supergraph.graphql
│       ├── supergraph.yaml
│       └── weather.graphql
├── LICENSE
├── macos-entitlements.plist
├── nix
│   ├── apollo-mcp.nix
│   ├── cargo-zigbuild.patch
│   ├── mcp-server-tools
│   │   ├── default.nix
│   │   ├── node-generated
│   │   │   ├── default.nix
│   │   │   ├── node-env.nix
│   │   │   └── node-packages.nix
│   │   ├── node-mcp-servers.json
│   │   └── README.md
│   └── mcphost.nix
├── README.md
├── rust-toolchain.toml
├── scripts
│   ├── nix
│   │   └── install.sh
│   └── windows
│       └── install.ps1
└── xtask
    ├── Cargo.lock
    ├── Cargo.toml
    └── src
        ├── commands
        │   ├── changeset
        │   │   ├── matching_pull_request.graphql
        │   │   ├── matching_pull_request.rs
        │   │   ├── mod.rs
        │   │   ├── scalars.rs
        │   │   └── snapshots
        │   │       ├── xtask__commands__changeset__tests__it_templatizes_with_multiple_issues_in_title_and_multiple_prs_in_footer.snap
        │   │       ├── xtask__commands__changeset__tests__it_templatizes_with_multiple_issues_in_title.snap
        │   │       ├── xtask__commands__changeset__tests__it_templatizes_with_multiple_prs_in_footer.snap
        │   │       ├── xtask__commands__changeset__tests__it_templatizes_with_neither_issues_or_prs.snap
        │   │       ├── xtask__commands__changeset__tests__it_templatizes_with_prs_in_title_when_empty_issues.snap
        │   │       └── xtask__commands__changeset__tests__it_templatizes_without_prs_in_title_when_issues_present.snap
        │   └── mod.rs
        ├── lib.rs
        └── main.rs
```

# Files

--------------------------------------------------------------------------------
/.envrc:
--------------------------------------------------------------------------------

```
use flake

```

--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------

```
.idea/*

# Let run configurations be unignored so they can be shared if desired
!.idea/runConfigurations/

.DS_Store

# Generated by Cargo
# will have compiled files and executables
debug/
target/

# These are backup files generated by rustfmt
**/*.rs.bk

# MSVC Windows builds of rustc generate these, which store debugging information
*.pdb

# RustRover
#  JetBrains specific template is maintained in a separate JetBrains.gitignore that can
#  be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
#  and can be added to the global gitignore or merged into this file.  For a more nuclear
#  option (not recommended) you can uncomment the following to ignore the entire idea folder.
#.idea/

# Generated by direnv
.direnv/

# Symlink created by nix
result

```

--------------------------------------------------------------------------------
/nix/mcp-server-tools/README.md:
--------------------------------------------------------------------------------

```markdown
# MCP Server Tools

This directory contains autogenerated nix evaluations for the various MCP server
tools for use during development.

## Adding More Servers

In order to add more server binaries to this autogenerated evaluation, do the
following:

1. Add the tool to the list of tools in `mcp-servers.json`

```json
{
  <EXISTING TOOLING>,
  "@modelcontextprotocol/server-<NAME OF SERVER>"
}
```

2. Regenerate the autogenerated files by running the following in the current
directory

```shell
$ node2nix -i mcp-servers.json --pkg-name nodejs_22 \
  -o node-generated/node-packages.nix \
  -c node-generated/default.nix \
  -e node-generated/node-env.nix
```

3. Reload your nix development shell

```

--------------------------------------------------------------------------------
/.changesets/README.md:
--------------------------------------------------------------------------------

```markdown
# Upcoming Changelog Entries

This directory keeps files which individually represent entries that will represent the CHANGELOG produced for the next release.

> **Note**
>
> The files within this directory use a **convention which must be obeyed** in order for the file to be slurped up by automated tooling.

> **Warning**
>
> The aforementioned **tooling doesn't exist yet** but will be created soon. 😺

### How to create a Changelog entry

1. Push the change you are writing a changeset for up to GitHub.
2. Open a pull request for it.  Note that your PR title and body will be used to pre-populate the changeset.
3. On your local checkout, **run `cargo xtask changeset create` from the root of the repository** and follow the prompts.
4. Add, commit and push the changeset file that is created and push it up to GitHub.

### Conventions used in this `.changesets/` directory

The convention used in this directory and obeyed by the `cargo xtask changeset create` command is:

1. Files in this directory must use the `.md` file extension.
2. There must not be multiple changelog entries in a single file.
3. Files *must start with a prefix* that indicates the classification of the changeset.  The prefixes are as follows:
   - **Breaking**: `breaking_`
   - **Feature**: `feat_`
   - **Fixes**: `fix_`
   - **Configuration**: `config_`
   - **Maintenance**: `maint_`
   - **Documentation**: `docs_`
   - **Experimental**: `exp_`
4. The pattern following the prefix can be anything that matches `[a-z_]+` (i.e., any number of lowercased `a-z` and `_`).  Again, `.md` must be on the end as the extension.  For example, `feat_flying_forest_foxes.md`.
5. Other files not matching the above convention will be ignored, including this `README.md`.
6. The files must use the following format:

       ### Brief but complete sentence that stands on its own - @USERNAME PR #PULL_NUMBER

       A description of the fix which stands on its own separate from the title.  It should embrace the use of Markdown to stylize the commentary so it looks great on the GitHub Releases, when shared on social cards, etc.

     Note the key components:

     - A _brief but complete_ sentence as a **title** that stands on its own without needing to read the description
     - A GitHub reference to **one or more authors** who contributed
     - A GitHub reference to the **pull request**
     - A **description** which _doesn't need the title's context_ to be be understood

```

--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------

```markdown
<div align="center">
<a href="https://www.apollographql.com/"><img src="https://raw.githubusercontent.com/apollographql/apollo-client-devtools/main/assets/apollo-wordmark.svg" height="100" alt="Apollo Client"></a>
</div>

![version](https://img.shields.io/github/v/release/apollographql/apollo-mcp-server)
![ci workflow status](https://img.shields.io/github/actions/workflow/status/apollographql/apollo-mcp-server/ci.yml)
![release binaries workflow status](https://img.shields.io/github/actions/workflow/status/apollographql/apollo-mcp-server/release-bins.yml?label=release%20binaries)
![release container workflow status](https://img.shields.io/github/actions/workflow/status/apollographql/apollo-mcp-server/release-container.yml?label=release%20container)
![license](https://img.shields.io/github/license/apollographql/apollo-mcp-server)
[![codecov](https://codecov.io/github/apollographql/apollo-mcp-server/graph/badge.svg?token=6NHuvZQ8ak)](https://codecov.io/github/apollographql/apollo-mcp-server)

# Apollo MCP Server

Apollo MCP Server is a [Model Context Protocol](https://modelcontextprotocol.io/) server that exposes GraphQL operations as MCP tools. It provides a standard way for AI models to access and orchestrate your APIs running with Apollo.

## Documentation

See [the documentation](https://www.apollographql.com/docs/apollo-mcp-server/) for full details. This README shows the basics of getting this MCP server running. More details are available on the documentation site.

## Installation

You can either build this server from source, if you have Rust installed on your workstation, or you can follow the [installation guide](https://www.apollographql.com/docs/apollo-mcp-server/run). To build from source, run `cargo build` from the root of this repository and the server will be built in the `target/debug` directory.

## Getting started

Follow the [quickstart tutorial](https://www.apollographql.com/docs/apollo-mcp-server/quickstart) to get started with this server.

## Usage

Full usage of Apollo MCP Server is documented on the [user guide](https://www.apollographql.com/docs/apollo-mcp-server/run). There are a few items that are necessary for this server to function. Specifically, the following things must be configured:

1. A graph for the MCP server to sit in front of.
2. Definitions for the GraphQL operations that should be exposed as MCP tools.
3. A configuration file describing how the MCP server should run.
4. A connection to an MCP client, such as an LLM or [MCP inspector](https://modelcontextprotocol.io/legacy/tools/inspector).

These are all described on the user guide. Specific configuration options for the configuration file are documented in the [config file reference](https://www.apollographql.com/docs/apollo-mcp-server/config-file).

## Contributions

Checkout the [contributor guidelines](https://github.com/apollographql/apollo-mcp-server/blob/main/CONTRIBUTING.md) for more information.

## Licensing

This project is licensed under the MIT License. See the [LICENSE](./LICENSE) file for the full license text.

# Security

Refer to our [security policy](https://github.com/apollographql/.github/blob/main/SECURITY.md).

> [!IMPORTANT]  
> **Do not open up a GitHub issue if a found bug is a security vulnerability**, and instead to refer to our [security policy](https://github.com/apollographql/.github/blob/main/SECURITY.md).

```

--------------------------------------------------------------------------------
/graphql/TheSpaceDevs/README.md:
--------------------------------------------------------------------------------

```markdown
# The Space Devs MCP Server

This folder contains an example usage of the Apollo MCP server for [The Space Devs](https://thespacedevs.com/) APIs, a set of APIs that exposes spaceflight information. We have a [hosted GraphQL endpoint](https://thespacedevs-production.up.railway.app/) that exposes The Space Devs Launch Library v2 REST APIs using Apollo Connectors. 

## Setup

To use this example, you must setup on of these three options to run the Apollo MCP server locally:

1. **_(Coming Soon)_** Use `rover dev` to run the Apollo MCP server - requires [installing `rover`](https://www.apollographql.com/docs/rover/getting-started)
2. Run the Docker image - requires having [Docker installed](https://docs.docker.com/engine/install/)
3. Build the `apollo-mcp-server` repo from source 

```bash
git clone https://github.com/apollographql/apollo-mcp-server
cd apollo-mcp-server
cargo build

# Built binaries will be located in ./target/debug/apollo-mcp-server
```

If you don't have an MCP client you plan on using right away, you can inspect the tools of the Apollo MCP server using the MCP Inspector:

```sh
npx @modelcontextprotocol/inspector
```

## Using STDIO and invoking Apollo MCP server with command

This option is typically used when you have built the source repository and use the binary outputs in the `target/build/*` folder.

There are operations located at `./operations/*.graphql` for you to use in your configuration. You can provide a set of operations in your MCP configuration along with the `--introspection` option that enables the LLM to generate a dynamic operation along with the ability to execute it. 

Here is an example configuration you can use _(Note: you must provide your fill path to the binary in the command. Make sure to replace the command with the path to where you cloned the repository)_:

```json
{
  "mcpServers": {
    "thespacedevs": {
      "command": "/Users/michaelwatson/Documents/GitHub/apollographql/apollo-mcp-server/target/debug/apollo-mcp-server",
      "args": [
        "graphql/TheSpaceDevs/config.yaml"
      ]
    }
  }
}
```

## Using Streamable HTTP with Apollo MCP server

There are operations located at `./operations/*.graphql` for you to use in your configuration. You can provide a set of operations in your MCP configuration that enables the LLM to generate a dynamic operation along with the ability to execute it. 

### Running with `rover dev`

```BASH
rover dev --supergraph-config supergraph.yaml --mcp config.yaml
```

### Running Apollo MCP server Docker image

1. Start up the MCP server locally

```bash
docker run \
  -it --rm \
  --name apollo-mcp-server \
  -p 8000:8000 \
  -v $PWD/graphql/TheSpaceDevs/config.yaml:/config.yaml \
  -v $PWD/graphql/TheSpaceDevs:/data \
  ghcr.io/apollographql/apollo-mcp-server:latest /config.yaml
```

2. Add the MCP port to your MCP Server configuration for the client application you are running. If you are running locally, the server link will be `http://127.0.0.1:8000/mcp`.

_Note: Claude Desktop currently doesn't support SSE_

```
{
  "mcpServers": {
    "thespacedevs": {
      "command": "npx",
      "args": [
        "mcp-remote",
        "http://127.0.0.1:8000/mcp"
      ]
    }
  }
}
```

### Running binary built from source code

Here is an example configuration you can use _(Note: you must provide your fill path to the binary in the command. Make sure to replace the command with the path to where you cloned the repository)_:

```json
{
  "mcpServers": {
    "thespacedevs": {
      "command": "/Users/michaelwatson/Documents/GitHub/apollographql/apollo-mcp-server/target/debug/apollo-mcp-server",
      "args": [
        "graphql/TheSpaceDevs/config.yaml"
      ]
    }
  }
}
```

```

--------------------------------------------------------------------------------
/CONTRIBUTING.md:
--------------------------------------------------------------------------------

```markdown
![ci workflow status](https://img.shields.io/github/actions/workflow/status/apollographql/apollo-mcp-server/ci.yml)
![release binaries workflow status](https://img.shields.io/github/actions/workflow/status/apollographql/apollo-mcp-server/release-bins.yml?label=release%20binaries)
![release container workflow status](https://img.shields.io/github/actions/workflow/status/apollographql/apollo-mcp-server/release-container.yml?label=release%20container)
![version](https://img.shields.io/github/v/release/apollographql/apollo-mcp-server)
![license](https://img.shields.io/github/license/apollographql/apollo-mcp-server)

## How to contribute to Apollo MCP Server

### Bug Reporting

> [!WARNING]  
> **Do not open up a GitHub issue if the bug is a security vulnerability**, and instead refer to our [security policy](https://github.com/apollographql/.github/blob/main/SECURITY.md).
* **Ensure the bug was not already reported** by searching on GitHub under [Issues](https://github.com/apollographql/apollo-mcp-server/issues) as well as the [Apollo Community forums](https://community.apollographql.com/latest).
* If you're unable to find an open issue addressing the problem, [open a new one](https://github.com/apollographql/apollo-mcp-server/issues/new). Be sure to include a **title and clear description**, as much relevant information as possible, and a **code sample** or an **executable test case** demonstrating the expected behavior that is not occurring.
* If appropriate, add the most relevant label but leave empty if unsure.

### Did you write a patch that fixes a bug?

* Refer to the simple [branching guide](#branching-strategy) for the project.
* Open a new GitHub pull request with the patch.
* Ensure the PR description clearly describes the problem and solution. Include the relevant issue number if applicable.
* Before submitting, please read the [branching strategy](#branching-strategy) and [code review guidelines](#code-review-guidelines) to learn more about our coding conventions, branching strategies, code reviews guidelines, etc.

### Do you intend to add a new feature or change an existing one?

* Suggest your change as a new [issue](https://github.com/apollographql/apollo-mcp-server/issues) using the `enhancement` label.
* You can also suggest changes and features using the [Apollo Community forums](https://community.apollographql.com/latest).
* Once the feature is coded and complete, open a GitHub pull request providing clear description of the feature/change and include any relevant links to discussions.
* Before submitting, please read the [branching strategy](#branching-strategy) and [code review guidelines](#code-review-guidelines) to learn more about our coding conventions, branching strategies, code reviews guidelines, etc.

### Do you have questions about the code or about Apollo MCP Server itself?

* Ask any question about Apollo MCP Server using either the [issues](https://github.com/apollographql/apollo-mcp-server/issues) page or the [Apollo Community forums](https://community.apollographql.com/latest). 
* If using the issues page, please use the `question` label.

Thanks!

Apollo MCP Server team

---

### Code of Conduct

Please refer to our [code of conduct policy](https://github.com/apollographql/router/blob/dev/CONTRIBUTING.md#code-of-conduct).

---

### Branching strategy
The Apollo MCP Server project follows a pseudo [GitFlow](https://docs.aws.amazon.com/prescriptive-guidance/latest/choosing-git-branch-approach/gitflow-branching-strategy.html) branch strategy.

1. All feature/bug fix/patch work should branch off the `develop` branch.

### Code review guidelines
It’s important that every piece of code in Apollo packages is reviewed by at least one core contributor familiar with that codebase. Here are some things we look for:

1. Required CI checks pass. This is a prerequisite for the review, and it is the PR author's responsibility. As long as the tests don’t pass, the PR won't get reviewed.
2. Simplicity. Is this the simplest way to achieve the intended goal? If there are too many files, redundant functions, or complex lines of code, suggest a simpler way to do the same thing. In particular, avoid implementing an overly general solution when a simple, small, and pragmatic fix will do.
3. Testing. Please make sure that the tests ensure that the code won’t break when other stuff change around it. The error messages in the test should help identify what is broken exactly and how. The tests should test every edge case if possible. Please make sure you get as much coverage as possible.
4. No unnecessary or unrelated changes. PRs shouldn’t come with random formatting changes, especially in unrelated parts of the code. If there is some refactoring that needs to be done, it should be in a separate PR from a bug fix or feature, if possible.
5. Please run `cargo test`, `cargo clippy`, and `cargo fmt` prior to creating a PR.

### Code Coverage

Apollo MCP Server uses comprehensive code coverage reporting to ensure code quality and test effectiveness. 
The project uses [cargo-llvm-cov](https://crates.io/crates/cargo-llvm-cov) for generating code coverage reports and [Codecov](https://www.codecov.io/) for coverage analysis and reporting. Coverage is automatically generated and reported on every pull request through GitHub Actions.

#### Coverage Targets

The project maintains the following coverage targets, configured in `codecov.yml`:

- **Project Coverage**: Automatically maintained - should increase overall coverage on each PR
- **Patch Coverage**: 80% - requires 80% coverage on all new/modified code

These targets help ensure that:

- The overall codebase coverage doesn't decrease over time
- New code is well-tested before being merged

```

--------------------------------------------------------------------------------
/apollo.config.json:
--------------------------------------------------------------------------------

```json
{
  "rover": {
  }
}
```

--------------------------------------------------------------------------------
/xtask/src/commands/mod.rs:
--------------------------------------------------------------------------------

```rust
pub(crate) mod changeset;
```

--------------------------------------------------------------------------------
/crates/apollo-mcp-registry/src/platform_api/operation_collections.rs:
--------------------------------------------------------------------------------

```rust
pub mod collection_poller;
pub mod error;
pub mod event;

```

--------------------------------------------------------------------------------
/graphql/TheSpaceDevs/.vscode/extensions.json:
--------------------------------------------------------------------------------

```json
{
    "recommendations": ["apollographql.vscode-apollo"]
}
```

--------------------------------------------------------------------------------
/crates/apollo-mcp-registry/src/lib.rs:
--------------------------------------------------------------------------------

```rust
pub mod files;
pub(crate) mod logging;
pub mod platform_api;
pub mod uplink;

```

--------------------------------------------------------------------------------
/.cargo/config.toml:
--------------------------------------------------------------------------------

```toml
[alias]
xtask = "run --locked --package xtask --manifest-path xtask/Cargo.toml --"

```

--------------------------------------------------------------------------------
/crates/apollo-mcp-server/src/introspection.rs:
--------------------------------------------------------------------------------

```rust
//! Allow an AI agent to introspect a GraphQL schema.

mod minify;
pub(crate) mod tools;

```

--------------------------------------------------------------------------------
/rust-toolchain.toml:
--------------------------------------------------------------------------------

```toml
[toolchain]
channel = "1.90.0"
profile = "default"
components = ["rust-analyzer", "rust-src"]

```

--------------------------------------------------------------------------------
/graphql/weather/operations/forecast.graphql:
--------------------------------------------------------------------------------

```graphql
query GetForecast($coordinate: InputCoordinate!) {
  forecast(coordinate: $coordinate) {
    detailed
  }
}

```

--------------------------------------------------------------------------------
/graphql/weather/operations/alerts.graphql:
--------------------------------------------------------------------------------

```graphql
query GetAlerts($state: String!) {
  alerts(state: $state) {
    severity
    description
    instruction
  }
}

```

--------------------------------------------------------------------------------
/graphql/TheSpaceDevs/persisted_queries.config.json:
--------------------------------------------------------------------------------

```json
{
  "documents": [
    "operations/**/*.{graphql,gql,js,jsx,ts,tsx}"
  ],
  "output": "persisted_queries/apollo.json"
}
```

--------------------------------------------------------------------------------
/clippy.toml:
--------------------------------------------------------------------------------

```toml
allow-expect-in-tests = true
allow-panic-in-tests = true
allow-unwrap-in-tests = true
allow-indexing-slicing-in-tests = true

```

--------------------------------------------------------------------------------
/nix/mcp-server-tools/node-mcp-servers.json:
--------------------------------------------------------------------------------

```json
[
  "@modelcontextprotocol/inspector",
  "@modelcontextprotocol/server-filesystem",
  "@modelcontextprotocol/server-memory"
]

```

--------------------------------------------------------------------------------
/graphql/weather/supergraph.yaml:
--------------------------------------------------------------------------------

```yaml
federation_version: =2.10.0
subgraphs:
  weather:
    routing_url: http://localhost # this value is ignored
    schema:
      file: weather.graphql

```

--------------------------------------------------------------------------------
/e2e/mcp-server-tester/server-config.template.json:
--------------------------------------------------------------------------------

```json
{
  "mcpServers": {
    "mcp-server": {
      "command": "../../target/release/apollo-mcp-server",
      "args": ["./<test-dir>/config.yaml"]
    }
  }
}

```

--------------------------------------------------------------------------------
/xtask/src/commands/changeset/scalars.rs:
--------------------------------------------------------------------------------

```rust
/// The GitHub API uses a Scalar called URI.  I promise it's still
/// just a String.
#[allow(clippy::upper_case_acronyms)]
pub(crate) type URI = String;

```

--------------------------------------------------------------------------------
/graphql/TheSpaceDevs/supergraph.yaml:
--------------------------------------------------------------------------------

```yaml
federation_version: =2.10.0
subgraphs:
  thespacedevs:
    routing_url: https://thespacedevs-production.up.railway.app/
    schema:
      file: api.graphql

```

--------------------------------------------------------------------------------
/graphql/TheSpaceDevs/apollo.config.json:
--------------------------------------------------------------------------------

```json
{
  "client": {
    "includes": ["./operations/**/*.graphql"],
    "service": {
      "name": "TheSpaceDevs",
      "localSchemaFile": "./api.graphql"
    }
  }
}
```

--------------------------------------------------------------------------------
/crates/apollo-mcp-server/src/meter.rs:
--------------------------------------------------------------------------------

```rust
use opentelemetry::{global, metrics::Meter};
use std::sync::LazyLock;

pub static METER: LazyLock<Meter> = LazyLock::new(|| global::meter(env!("CARGO_PKG_NAME")));

```

--------------------------------------------------------------------------------
/.vscode/extensions.json:
--------------------------------------------------------------------------------

```json
{
    "recommendations": [
        "mkhl.direnv",
        "vadimcn.vscode-lldb",
        "streetsidesoftware.code-spell-checker",
        "apollographql.vscode-apollo"
    ]
}
```

--------------------------------------------------------------------------------
/crates/apollo-mcp-server/src/introspection/tools.rs:
--------------------------------------------------------------------------------

```rust
//! MCP tools to allow an AI agent to introspect a GraphQL schema and execute operations.

pub(crate) mod execute;
pub(crate) mod introspect;
pub(crate) mod search;
pub(crate) mod validate;

```

--------------------------------------------------------------------------------
/crates/apollo-mcp-registry/src/platform_api/operation_collections/event.rs:
--------------------------------------------------------------------------------

```rust
use super::collection_poller::OperationData;
use super::error::CollectionError;

pub enum CollectionEvent {
    UpdateOperationCollection(Vec<OperationData>),
    CollectionError(CollectionError),
}

```

--------------------------------------------------------------------------------
/crates/apollo-mcp-server/src/runtime/logging/defaults.rs:
--------------------------------------------------------------------------------

```rust
use super::LogRotationKind;
use tracing::Level;

pub(super) const fn log_level() -> Level {
    Level::INFO
}

pub(super) const fn default_rotation() -> LogRotationKind {
    LogRotationKind::Hourly
}

```

--------------------------------------------------------------------------------
/graphql/weather/operations/all.graphql:
--------------------------------------------------------------------------------

```graphql
query GetAllWeatherData($coordinate: InputCoordinate!, $state: String!) {
  forecast(coordinate: $coordinate) {
    detailed
  }
  alerts(state: $state) {
    severity
    description
    instruction
  }
}

```

--------------------------------------------------------------------------------
/codecov.yml:
--------------------------------------------------------------------------------

```yaml
coverage:
  status:
    project:
      default:
        # Should increase overall coverage on each PR
        target: auto
    patch:
      default:
        # Require 80% coverage on all new/modified code
        target: 80%

```

--------------------------------------------------------------------------------
/crates/apollo-mcp-server/src/runtime/schemas.rs:
--------------------------------------------------------------------------------

```rust
use std::collections::HashMap;

use schemars::JsonSchema;

pub(super) fn header_map(generator: &mut schemars::SchemaGenerator) -> schemars::Schema {
    // A header map is just a hash map of string to string with extra validation
    HashMap::<String, String>::json_schema(generator)
}

```

--------------------------------------------------------------------------------
/graphql/weather/config.yaml:
--------------------------------------------------------------------------------

```yaml
transport:
  type: streamable_http
operations:
  source: local
  paths:
    - ./graphql/weather/operations
schema:
  source: local
  path: ./graphql/weather/api.graphql
introspection:
  execute:
    enabled: true
  introspect:
    enabled: true
  search:
    enabled: true
  validate:
    enabled: true
cors:
  enabled: true
  allow_any_origin: true

```

--------------------------------------------------------------------------------
/e2e/mcp-server-tester/pq-manifest/config.yaml:
--------------------------------------------------------------------------------

```yaml
endpoint: https://thespacedevs-production.up.railway.app/
transport:
  type: stdio
operations:
  source: manifest
  path: ./pq-manifest/apollo.json
schema:
  source: local
  path: ./pq-manifest/api.graphql
overrides:
  mutation_mode: all
introspection:
  execute:
    enabled: true
  introspect:
    enabled: true
  search:
    enabled: true
  validate:
    enabled: true

```

--------------------------------------------------------------------------------
/crates/apollo-schema-index/src/error.rs:
--------------------------------------------------------------------------------

```rust
use tantivy::TantivyError;

/// An error during indexing
#[derive(Debug, thiserror::Error)]
pub enum IndexingError {
    #[error("Unable to index schema: {0}")]
    TantivyError(#[from] TantivyError),
}

/// An error in a search operation
#[derive(Debug, thiserror::Error)]
pub enum SearchError {
    #[error("Search error: {0}")]
    TantivyError(#[from] TantivyError),
}

```

--------------------------------------------------------------------------------
/e2e/mcp-server-tester/local-operations/operations/GetAstronautsCurrentlyInSpace.graphql:
--------------------------------------------------------------------------------

```graphql
query GetAstronautsCurrentlyInSpace {
  astronauts(filters: { inSpace: true, search: "" }) {
    results {
      id
      name
      timeInSpace
      lastFlight
      agency {
        name
        abbrev
        country {
          name
        }
      }
      nationality {
        name
        nationalityName
      }
      image {
        thumbnail
      }
    }
  }
}

```

--------------------------------------------------------------------------------
/graphql/TheSpaceDevs/operations/GetAstronautsCurrentlyInSpace.graphql:
--------------------------------------------------------------------------------

```graphql
query GetAstronautsCurrentlyInSpace {
  astronauts(filters: { inSpace: true, search: "" }) {
    results {
      id
      name
      timeInSpace
      lastFlight
      agency {
        name
        abbrev
        country {
          name
        }
      }
      nationality {
        name
        nationalityName
      }
      image {
        thumbnail
      }
    }
  }
}

```

--------------------------------------------------------------------------------
/e2e/mcp-server-tester/local-operations/config.yaml:
--------------------------------------------------------------------------------

```yaml
endpoint: https://thespacedevs-production.up.railway.app/
transport:
  type: stdio
operations:
  source: local
  paths:
    - ./local-operations/operations
schema:
  source: local
  path: ./local-operations/api.graphql
overrides:
  mutation_mode: all
introspection:
  execute:
    enabled: true
  introspect:
    enabled: true
  search:
    enabled: true
  validate:
    enabled: true

```

--------------------------------------------------------------------------------
/crates/apollo-mcp-server/src/operations.rs:
--------------------------------------------------------------------------------

```rust
//! Operations
//!
//! This module includes transformation utilities that convert GraphQL operations
//! into MCP tools.

mod mutation_mode;
mod operation;
mod operation_source;
mod raw_operation;
mod schema_walker;

pub use mutation_mode::MutationMode;
pub use operation::{Operation, operation_defs, operation_name};
pub use operation_source::OperationSource;
pub use raw_operation::RawOperation;

```

--------------------------------------------------------------------------------
/docs/source/licensing.mdx:
--------------------------------------------------------------------------------

```markdown
---
title: Apollo MCP Server Licensing
---

Source code for Apollo MCP Server in GitHub is covered by the MIT License. All files in the Apollo MCP Server repository are licensed under MIT, unless explicitly stated otherwise in a file header or license file in a subdirectory.

See the repository [LICENSE](https://github.com/apollographql/apollo-mcp-server/blob/main/LICENSE) for the full license text.
```

--------------------------------------------------------------------------------
/crates/apollo-mcp-server/src/operations/mutation_mode.rs:
--------------------------------------------------------------------------------

```rust
use schemars::JsonSchema;
use serde::{Deserialize, Serialize};

#[derive(Clone, Default, Debug, Deserialize, Serialize, PartialEq, Copy, JsonSchema)]
#[serde(rename_all = "snake_case")]
pub enum MutationMode {
    /// Don't allow any mutations
    #[default]
    None,
    /// Allow explicit mutations, but don't allow the LLM to build them
    Explicit,
    /// Allow the LLM to build mutations
    All,
}

```

--------------------------------------------------------------------------------
/crates/apollo-mcp-server/src/runtime/schema_source.rs:
--------------------------------------------------------------------------------

```rust
use std::path::PathBuf;

use schemars::JsonSchema;
use serde::Deserialize;

/// Source for upstream GraphQL schema
#[derive(Debug, Default, Deserialize, JsonSchema)]
#[serde(tag = "source", rename_all = "snake_case")]
pub enum SchemaSource {
    /// Schema should be loaded (and watched) from a local file path
    Local { path: PathBuf },

    /// Fetch the schema from uplink
    #[default]
    Uplink,
}

```

--------------------------------------------------------------------------------
/.idea/runConfigurations/format___test___clippy.xml:
--------------------------------------------------------------------------------

```
<component name="ProjectRunConfigurationManager">
  <configuration default="false" name="format - test - clippy" type="CompoundRunConfigurationType">
    <toRun name="Test apollo-mcp-server" type="CargoCommandRunConfiguration" />
    <toRun name="clippy" type="CargoCommandRunConfiguration" />
    <toRun name="format" type="CargoCommandRunConfiguration" />
    <method v="2" />
  </configuration>
</component>
```

--------------------------------------------------------------------------------
/.vscode/tasks.json:
--------------------------------------------------------------------------------

```json
{
    "version": "2.0.0",
    "tasks": [{
        "label": "Generate Apollo Manifest",
        "command": "npx",
        "args": ["@apollo/generate-persisted-query-manifest","generate-persisted-query-manifest","--config","persisted_queries.config.json"],
        "type": "shell",
        "problemMatcher": [],
        "options": {
            "cwd": "${workspaceFolder}/graphql/TheSpaceDevs"
        }
    }]
}
```

--------------------------------------------------------------------------------
/crates/apollo-mcp-registry/src/uplink/persisted_queries/event.rs:
--------------------------------------------------------------------------------

```rust
use std::fmt::Debug;
use std::fmt::Formatter;

/// Persisted Query events
pub enum Event {
    /// The persisted query manifest was updated
    UpdateManifest(Vec<(String, String)>),
}

impl Debug for Event {
    fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
        match self {
            Event::UpdateManifest(_) => {
                write!(f, "UpdateManifest(<redacted>)")
            }
        }
    }
}

```

--------------------------------------------------------------------------------
/crates/apollo-mcp-server/src/config_schema.rs:
--------------------------------------------------------------------------------

```rust
//! Binary to output the JSON Schema for Apollo MCP Server configuration files

// Most runtime code is unused by this binary
#![allow(unused_imports, dead_code)]

use anyhow::Context;
use schemars::schema_for;

mod runtime;

fn main() -> anyhow::Result<()> {
    println!(
        "{}",
        serde_json::to_string_pretty(&schema_for!(runtime::Config))
            .with_context(|| "Failed to generate schema")?
    );
    Ok(())
}

```

--------------------------------------------------------------------------------
/crates/apollo-mcp-registry/src/platform_api/operation_collections/error.rs:
--------------------------------------------------------------------------------

```rust
use reqwest::header::{InvalidHeaderName, InvalidHeaderValue};

#[derive(Debug, thiserror::Error)]
pub enum CollectionError {
    #[error(transparent)]
    HeaderName(InvalidHeaderName),

    #[error(transparent)]
    HeaderValue(InvalidHeaderValue),

    #[error(transparent)]
    Request(reqwest::Error),

    #[error("Error in response: {0}")]
    Response(String),

    #[error("invalid variables: {0}")]
    InvalidVariables(String),
}

```

--------------------------------------------------------------------------------
/graphql/TheSpaceDevs/config.yaml:
--------------------------------------------------------------------------------

```yaml
endpoint: https://thespacedevs-production.up.railway.app/
transport:
  type: streamable_http
operations:
  source: local
  paths:
    - ./graphql/TheSpaceDevs/operations
schema:
  source: local
  path: ./graphql/TheSpaceDevs/api.graphql
overrides:
  mutation_mode: all
introspection:
  execute:
    enabled: true
  introspect:
    enabled: true
  search:
    enabled: true
  validate:
    enabled: true
cors:
  enabled: true
  allow_any_origin: true

```

--------------------------------------------------------------------------------
/xtask/src/lib.rs:
--------------------------------------------------------------------------------

```rust
use std::convert::TryFrom;
use std::env;
use std::str;

use camino::Utf8PathBuf;
use once_cell::sync::Lazy;

const MANIFEST_DIR: &str = env!("CARGO_MANIFEST_DIR");

pub static PKG_PROJECT_ROOT: Lazy<Utf8PathBuf> = Lazy::new(|| {
    let manifest_dir =
        Utf8PathBuf::try_from(MANIFEST_DIR).expect("could not get the root directory.");
    let root_dir = manifest_dir
        .ancestors()
        .nth(1)
        .expect("could not find project root");

    root_dir.to_path_buf()
});

```

--------------------------------------------------------------------------------
/crates/apollo-schema-index/Cargo.toml:
--------------------------------------------------------------------------------

```toml
[package]
name = "apollo-schema-index"
authors.workspace = true
edition.workspace = true
license-file.workspace = true
repository.workspace = true
rust-version.workspace = true
version.workspace = true

description = "GraphQL schema indexing"

[dependencies]
apollo-compiler.workspace = true
enumset = "1.1.6"
itertools = "0.14.0"
tantivy = "0.24.2"
thiserror.workspace = true
tracing.workspace = true

[dev-dependencies]
insta.workspace = true
rstest.workspace = true

[lints]
workspace = true

```

--------------------------------------------------------------------------------
/crates/apollo-mcp-registry/src/uplink/schema/schema_query.graphql:
--------------------------------------------------------------------------------

```graphql
query SupergraphSdlQuery(
    $apiKey: String!,
    $graphRef: String!,
    $ifAfterId: ID
) {
    routerConfig(
        ref: $graphRef,
        apiKey: $apiKey,
        ifAfterId: $ifAfterId
    ) {
        __typename
        ... on RouterConfigResult {
            id
            supergraphSDL
            minDelaySeconds
        }
        ... on Unchanged {
            id
            minDelaySeconds
        }
        ... on FetchError {
            code
            message
        }
    }
}

```

--------------------------------------------------------------------------------
/crates/apollo-mcp-registry/src/uplink/persisted_queries/persisted_queries_manifest_query.graphql:
--------------------------------------------------------------------------------

```graphql
query PersistedQueriesManifestQuery(
    $apiKey: String!
    $graphRef: String!
    $ifAfterId: ID
) {
    persistedQueries(
        ref: $graphRef
        apiKey: $apiKey
        ifAfterId: $ifAfterId
    ) {
        __typename
        ... on PersistedQueriesResult {
            id
            minDelaySeconds
            chunks {
                id
                urls
            }
        }
        ... on Unchanged {
            id
            minDelaySeconds
        }
        ... on FetchError {
            code
            message
        }
    }
}
```

--------------------------------------------------------------------------------
/.vscode/settings.json:
--------------------------------------------------------------------------------

```json
{
    "rust-analyzer.check.command": "clippy",
    "[rust]": {
        "editor.defaultFormatter": "rust-lang.rust-analyzer",
        "editor.formatOnSave": true
    },
    "cSpell.words": [
        "apollographql",
        "clippy",
        "graphos",
        "insta",
        "peekable",
        "redactions",
        "reqwest",
        "rmcp",
        "rstest",
        "schemars",
        "serde",
        "splitn",
        "Streamable",
        "Subschema",
        "subschemas",
        "Supergraph",
        "thiserror",
        "webbrowser",
        "wiremock"
    ]
}

```

--------------------------------------------------------------------------------
/crates/apollo-mcp-server/src/lib.rs:
--------------------------------------------------------------------------------

```rust
#![cfg_attr(coverage_nightly, feature(coverage_attribute))]

pub mod auth;
pub mod cors;
pub mod custom_scalar_map;
pub mod errors;
pub mod event;
mod explorer;
mod graphql;
pub mod headers;
pub mod health;
mod introspection;
pub mod json_schema;
pub(crate) mod meter;
pub mod operations;
pub mod sanitize;
pub(crate) mod schema_tree_shake;
pub mod server;
pub mod telemetry_attributes;

/// These values are generated at build time by build.rs using telemetry.toml as input.
pub mod generated {
    pub mod telemetry {
        include!(concat!(env!("OUT_DIR"), "/telemetry_attributes.rs"));
    }
}

```

--------------------------------------------------------------------------------
/graphql/TheSpaceDevs/.vscode/tasks.json:
--------------------------------------------------------------------------------

```json
{
  "version": "2.0.0",
  "tasks": [
    {
      "label": "Generate Apollo Manifest",
      "command": "npx",
      "args": [
        "@apollo/generate-persisted-query-manifest",
        "generate-persisted-query-manifest",
        "--config",
        "persisted_queries.config.json"
      ],
      "type": "shell",
      "problemMatcher": []
    },
    {
        "label": "Run rover dev",
        "command": "rover",
        "args": [
          "dev",
          "--supergraph-config",
          "supergraph.yaml",
          "--mcp"
        ],
        "type": "shell",
        "problemMatcher": []
      }
  ]
}

```

--------------------------------------------------------------------------------
/e2e/mcp-server-tester/local-operations/operations/ExploreCelestialBodies.graphql:
--------------------------------------------------------------------------------

```graphql
query ExploreCelestialBodies($search: String, $limit: Int = 10, $offset: Int = 0) {
  celestialBodies(search: $search, limit: $limit, offset: $offset) {
    pageInfo {
      count
      next
      previous
    }
    results {
      id
      name
      
      # Physical characteristics
      diameter  # in kilometers
      mass     # in kilograms
      gravity  # in m/s²
      lengthOfDay
      atmosphere
      
      # Classification
      type {
        id
        name
      }
      
      # Visual and descriptive content
      image {
        url
        thumbnail
        credit
      }
      description
      wikiUrl
    }
  }
}
```

--------------------------------------------------------------------------------
/graphql/TheSpaceDevs/operations/ExploreCelestialBodies.graphql:
--------------------------------------------------------------------------------

```graphql
query ExploreCelestialBodies($search: String, $limit: Int = 10, $offset: Int = 0) {
  celestialBodies(search: $search, limit: $limit, offset: $offset) {
    pageInfo {
      count
      next
      previous
    }
    results {
      id
      name
      
      # Physical characteristics
      diameter  # in kilometers
      mass     # in kilograms
      gravity  # in m/s²
      lengthOfDay
      atmosphere
      
      # Classification
      type {
        id
        name
      }
      
      # Visual and descriptive content
      image {
        url
        thumbnail
        credit
      }
      description
      wikiUrl
    }
  }
}
```

--------------------------------------------------------------------------------
/crates/apollo-mcp-server/src/runtime/overrides.rs:
--------------------------------------------------------------------------------

```rust
use apollo_mcp_server::operations::MutationMode;
use schemars::JsonSchema;
use serde::Deserialize;

/// Overridable flags
#[derive(Debug, Deserialize, Default, JsonSchema)]
#[serde(default)]
pub struct Overrides {
    /// Disable type descriptions to save on context-window space
    pub disable_type_description: bool,

    /// Disable schema descriptions to save on context-window space
    pub disable_schema_description: bool,

    /// Expose a tool that returns the URL to open a GraphQL operation in Apollo Explorer (requires APOLLO_GRAPH_REF)
    pub enable_explorer: bool,

    /// Set the mutation mode access level for the MCP server
    pub mutation_mode: MutationMode,
}

```

--------------------------------------------------------------------------------
/xtask/src/commands/changeset/matching_pull_request.graphql:
--------------------------------------------------------------------------------

```graphql
# This operation is used to generate Rust code which lives in a file directly
# next to this with the same name but a `.rs` extension.  For instructions on
# how to generate the code, see the top of `./mod.rs`.
fragment PrInfo on PullRequest {
  url
  number
  author {
    __typename
    login
  }
  title
  closingIssuesReferences(last: 4) {
    nodes {
      url
      number
      repository {
        nameWithOwner
      }
    }
  }
  body
}
fragment PrSearchResult on SearchResultItemConnection {
  issueCount
  nodes {
    __typename
    ...PrInfo
  }
 }

query MatchingPullRequest($search: String!) {
  search(
    type: ISSUE
    query: $search
    first: 1
  ) {
    ...PrSearchResult
  }
}

```

--------------------------------------------------------------------------------
/e2e/mcp-server-tester/local-operations/operations/GetAstronautDetails.graphql:
--------------------------------------------------------------------------------

```graphql
query GetAstronautDetails($astronautId: ID!) {
  astronaut(id: $astronautId) {
    id
    name
    status
    inSpace
    age

    # Birth and career dates
    dateOfBirth
    dateOfDeath
    firstFlight
    lastFlight

    # Space experience metrics
    timeInSpace
    evaTime # Extravehicular Activity time
    
    # Agency information
    agency {
      id
      name
      abbrev
      country {
        name
        nationalityName
      }
    }

    # Nationality
    nationality {
      name
      nationalityName
      alpha2Code
    }

    # Media
    image {
      url
      thumbnail
      credit
    }

    # Bio and links
    bio
    wiki

    # Social media
    socialMediaLinks {
      url
      socialMedia {
        name
        url
      }
    }
  }
}
```

--------------------------------------------------------------------------------
/graphql/TheSpaceDevs/operations/GetAstronautDetails.graphql:
--------------------------------------------------------------------------------

```graphql
query GetAstronautDetails($astronautId: ID!) {
  astronaut(id: $astronautId) {
    id
    name
    status
    inSpace
    age

    # Birth and career dates
    dateOfBirth
    dateOfDeath
    firstFlight
    lastFlight

    # Space experience metrics
    timeInSpace
    evaTime # Extravehicular Activity time
    
    # Agency information
    agency {
      id
      name
      abbrev
      country {
        name
        nationalityName
      }
    }

    # Nationality
    nationality {
      name
      nationalityName
      alpha2Code
    }

    # Media
    image {
      url
      thumbnail
      credit
    }

    # Bio and links
    bio
    wiki

    # Social media
    socialMediaLinks {
      url
      socialMedia {
        name
        url
      }
    }
  }
}
```

--------------------------------------------------------------------------------
/xtask/src/main.rs:
--------------------------------------------------------------------------------

```rust
mod commands;

use anyhow::Result;
use clap::Parser;
use nu_ansi_term::Color::Green;

fn main() -> Result<()> {
    let app = Xtask::parse();
    app.run()
}

#[derive(Debug, clap::Parser)]
#[structopt(
    name = "xtask",
    about = "Workflows used locally and in CI for developing the Apollo MCP Server"
)]
struct Xtask {
    #[command(subcommand)]
    pub command: Command,
}

#[derive(Debug, clap::Subcommand)]
pub enum Command {
    /// Produce or consume changesets
    #[command(subcommand)]
    Changeset(commands::changeset::Command),
}

impl Xtask {
    pub fn run(&self) -> Result<()> {
        match &self.command {
            Command::Changeset(command) => command.run(),
        }?;
        eprintln!("{}", Green.bold().paint("Success!"));
        Ok(())
    }
}

```

--------------------------------------------------------------------------------
/e2e/mcp-server-tester/local-operations/operations/SearchUpcomingLaunches.graphql:
--------------------------------------------------------------------------------

```graphql
# Fields searched - launch_designator, launch_service_provider__name, mission__name, name, pad__location__name, pad__name, rocket__configuration__manufacturer__abbrev, rocket__configuration__manufacturer__name, rocket__configuration__name, rocket__spacecraftflight__spacecraft__name. Codes are the best search terms to use. Single words are the next best alternative when you cannot use a code to search
query SearchUpcomingLaunches($query: String!) {
  upcomingLaunches(limit: 20, search: $query){
    pageInfo {
      count
    }
    results {
      id
      name
      weatherConcerns
      rocket {
        id
        configuration {
          fullName
        }
      }
      mission {
        name
        description
      }
      webcastLive
      provider {
        name
      }
    }
  }
}
```

--------------------------------------------------------------------------------
/graphql/TheSpaceDevs/operations/SearchUpcomingLaunches.graphql:
--------------------------------------------------------------------------------

```graphql
# Fields searched - launch_designator, launch_service_provider__name, mission__name, name, pad__location__name, pad__name, rocket__configuration__manufacturer__abbrev, rocket__configuration__manufacturer__name, rocket__configuration__name, rocket__spacecraftflight__spacecraft__name. Codes are the best search terms to use. Single words are the next best alternative when you cannot use a code to search
query SearchUpcomingLaunches($query: String!) {
  upcomingLaunches(limit: 20, search: $query){
    pageInfo {
      count
    }
    results {
      id
      name
      weatherConcerns
      rocket {
        id
        configuration {
          fullName
        }
      }
      mission {
        name
        description
      }
      webcastLive
      provider {
        name
      }
    }
  }
}
```

--------------------------------------------------------------------------------
/docs/source/custom-scalars.mdx:
--------------------------------------------------------------------------------

```markdown
---
title: Custom Scalars Configuration
---

## Custom scalars configuration

You can specify a custom scalars configuration JSON file to map a custom scalar to a [JSON schema type](https://json-schema.org/understanding-json-schema/reference/type). The JSON file is an object with custom scalar names as keys and JSON schema types as values:

```json
{
  "MyCustomScalar": { "type": "string" }
}
```

Other than JSON schema type, an overriding description can also be provided. In the following example the description provided in the schema, `scalar description`, would get overridden by the description found in the custom scalar configuration file, `override description`:

```graphql
"""
scalar description
"""
scalar MyCustomScalar
```

```json
{
  "MyCustomScalar": { "type": "string", "description": "override description" }
}
```

```

--------------------------------------------------------------------------------
/.idea/runConfigurations/clippy.xml:
--------------------------------------------------------------------------------

```
<component name="ProjectRunConfigurationManager">
  <configuration default="false" name="clippy" type="CargoCommandRunConfiguration" factoryName="Cargo Command">
    <option name="command" value="clippy" />
    <option name="workingDirectory" value="file://$PROJECT_DIR$" />
    <envs />
    <option name="emulateTerminal" value="true" />
    <option name="channel" value="DEFAULT" />
    <option name="requiredFeatures" value="true" />
    <option name="allFeatures" value="false" />
    <option name="withSudo" value="false" />
    <option name="buildTarget" value="REMOTE" />
    <option name="backtrace" value="SHORT" />
    <option name="isRedirectInput" value="false" />
    <option name="redirectInputPath" value="" />
    <method v="2">
      <option name="CARGO.BUILD_TASK_PROVIDER" enabled="true" />
    </method>
  </configuration>
</component>
```

--------------------------------------------------------------------------------
/.idea/runConfigurations/format.xml:
--------------------------------------------------------------------------------

```
<component name="ProjectRunConfigurationManager">
  <configuration default="false" name="format" type="CargoCommandRunConfiguration" factoryName="Cargo Command">
    <option name="command" value="fmt --all" />
    <option name="workingDirectory" value="file://$PROJECT_DIR$" />
    <envs />
    <option name="emulateTerminal" value="true" />
    <option name="channel" value="DEFAULT" />
    <option name="requiredFeatures" value="true" />
    <option name="allFeatures" value="false" />
    <option name="withSudo" value="false" />
    <option name="buildTarget" value="REMOTE" />
    <option name="backtrace" value="SHORT" />
    <option name="isRedirectInput" value="false" />
    <option name="redirectInputPath" value="" />
    <method v="2">
      <option name="CARGO.BUILD_TASK_PROVIDER" enabled="true" />
    </method>
  </configuration>
</component>
```

--------------------------------------------------------------------------------
/.idea/runConfigurations/Test_apollo_mcp_server.xml:
--------------------------------------------------------------------------------

```
<component name="ProjectRunConfigurationManager">
  <configuration default="false" name="Test apollo-mcp-server" type="CargoCommandRunConfiguration" factoryName="Cargo Command">
    <option name="buildProfileId" value="test" />
    <option name="command" value="test --workspace" />
    <option name="workingDirectory" value="file://$PROJECT_DIR$" />
    <envs />
    <option name="emulateTerminal" value="true" />
    <option name="channel" value="DEFAULT" />
    <option name="requiredFeatures" value="true" />
    <option name="allFeatures" value="false" />
    <option name="withSudo" value="false" />
    <option name="buildTarget" value="REMOTE" />
    <option name="backtrace" value="SHORT" />
    <option name="isRedirectInput" value="false" />
    <option name="redirectInputPath" value="" />
    <method v="2">
      <option name="CARGO.BUILD_TASK_PROVIDER" enabled="true" />
    </method>
  </configuration>
</component>
```

--------------------------------------------------------------------------------
/crates/apollo-mcp-server/src/sanitize.rs:
--------------------------------------------------------------------------------

```rust
//! Provide sanitized type definitions suitable for an AI model.
//! For example, remove directives from GraphQL schema types.
use apollo_compiler::schema::{EnumType, FieldDefinition, ObjectType, ScalarType, UnionType};
use apollo_compiler::{ast, schema};

pub trait Sanitize<T> {
    fn sanitize(self) -> T;
}

// Implementation for all schema directive types
macro_rules! impl_sanitize {
    ($type:ty, $directive_list_type:path) => {
        impl Sanitize<$type> for $type {
            fn sanitize(self) -> Self {
                Self {
                    directives: $directive_list_type(vec![]),
                    ..self
                }
            }
        }
    };
}

impl_sanitize!(EnumType, schema::DirectiveList);
impl_sanitize!(FieldDefinition, ast::DirectiveList);
impl_sanitize!(ObjectType, schema::DirectiveList);
impl_sanitize!(UnionType, schema::DirectiveList);
impl_sanitize!(ScalarType, schema::DirectiveList);

```

--------------------------------------------------------------------------------
/.idea/runConfigurations/Run_spacedevs.xml:
--------------------------------------------------------------------------------

```
<component name="ProjectRunConfigurationManager">
  <configuration default="false" name="Run spacedevs" type="CargoCommandRunConfiguration" factoryName="Cargo Command">
    <option name="command" value="run --package apollo-mcp-server --bin apollo-mcp-server -- ./graphql/TheSpaceDevs/config.yaml" />
    <option name="workingDirectory" value="file://$PROJECT_DIR$" />
    <envs />
    <option name="emulateTerminal" value="true" />
    <option name="channel" value="DEFAULT" />
    <option name="requiredFeatures" value="true" />
    <option name="allFeatures" value="false" />
    <option name="withSudo" value="false" />
    <option name="buildTarget" value="REMOTE" />
    <option name="backtrace" value="SHORT" />
    <option name="isRedirectInput" value="false" />
    <option name="redirectInputPath" value="" />
    <method v="2">
      <option name="CARGO.BUILD_TASK_PROVIDER" enabled="true" />
    </method>
  </configuration>
</component>
```

--------------------------------------------------------------------------------
/crates/apollo-mcp-server/src/server/states/schema_configured.rs:
--------------------------------------------------------------------------------

```rust
use apollo_compiler::{Schema, validation::Valid};
use tracing::debug;

use crate::{errors::ServerError, operations::RawOperation};

use super::{Config, Starting};

pub(super) struct SchemaConfigured {
    pub(super) config: Config,
    pub(super) schema: Valid<Schema>,
}

impl SchemaConfigured {
    pub(super) async fn set_schema(
        self,
        schema: Valid<Schema>,
    ) -> Result<SchemaConfigured, ServerError> {
        debug!("Received schema:\n{}", schema);
        Ok(SchemaConfigured { schema, ..self })
    }

    pub(super) async fn set_operations(
        self,
        operations: Vec<RawOperation>,
    ) -> Result<Starting, ServerError> {
        debug!(
            "Received {} operations:\n{}",
            operations.len(),
            serde_json::to_string_pretty(&operations)?
        );
        Ok(Starting {
            config: self.config,
            schema: self.schema,
            operations,
        })
    }
}

```

--------------------------------------------------------------------------------
/crates/apollo-mcp-server/src/runtime/logging/parsers.rs:
--------------------------------------------------------------------------------

```rust
use std::{fmt::Display, marker::PhantomData, str::FromStr};

use serde::Deserializer;

pub(crate) fn from_str<'de, D, T>(deserializer: D) -> Result<T, D::Error>
where
    D: Deserializer<'de>,
    T: FromStr,
    <T as FromStr>::Err: Display,
{
    struct FromStrVisitor<Inner> {
        _phantom: PhantomData<Inner>,
    }
    impl<Inner> serde::de::Visitor<'_> for FromStrVisitor<Inner>
    where
        Inner: FromStr,
        <Inner as FromStr>::Err: Display,
    {
        type Value = Inner;

        fn expecting(&self, formatter: &mut std::fmt::Formatter) -> std::fmt::Result {
            formatter.write_str("a string")
        }

        fn visit_str<E>(self, v: &str) -> Result<Self::Value, E>
        where
            E: serde::de::Error,
        {
            Inner::from_str(v).map_err(|e| serde::de::Error::custom(e.to_string()))
        }
    }

    deserializer.deserialize_str(FromStrVisitor {
        _phantom: PhantomData,
    })
}

```

--------------------------------------------------------------------------------
/crates/apollo-mcp-server/src/server/states/configuring.rs:
--------------------------------------------------------------------------------

```rust
use apollo_compiler::{Schema, validation::Valid};
use tracing::debug;

use crate::{errors::ServerError, operations::RawOperation};

use super::{Config, OperationsConfigured, SchemaConfigured};

pub(super) struct Configuring {
    pub(super) config: Config,
}

impl Configuring {
    pub(super) async fn set_schema(
        self,
        schema: Valid<Schema>,
    ) -> Result<SchemaConfigured, ServerError> {
        debug!("Received schema:\n{}", schema);
        Ok(SchemaConfigured {
            config: self.config,
            schema,
        })
    }

    pub(super) async fn set_operations(
        self,
        operations: Vec<RawOperation>,
    ) -> Result<OperationsConfigured, ServerError> {
        debug!(
            "Received {} operations:\n{}",
            operations.len(),
            serde_json::to_string_pretty(&operations)?
        );
        Ok(OperationsConfigured {
            config: self.config,
            operations,
        })
    }
}

```

--------------------------------------------------------------------------------
/crates/apollo-mcp-server/src/server/states/operations_configured.rs:
--------------------------------------------------------------------------------

```rust
use apollo_compiler::{Schema, validation::Valid};
use tracing::debug;

use crate::{errors::ServerError, operations::RawOperation, server::states::Starting};

use super::Config;

pub(super) struct OperationsConfigured {
    pub(super) config: Config,
    pub(super) operations: Vec<RawOperation>,
}

impl OperationsConfigured {
    pub(super) async fn set_schema(self, schema: Valid<Schema>) -> Result<Starting, ServerError> {
        debug!("Received schema:\n{}", schema);
        Ok(Starting {
            config: self.config,
            operations: self.operations,
            schema,
        })
    }

    pub(super) async fn set_operations(
        self,
        operations: Vec<RawOperation>,
    ) -> Result<OperationsConfigured, ServerError> {
        debug!(
            "Received {} operations:\n{}",
            operations.len(),
            serde_json::to_string_pretty(&operations)?
        );
        Ok(OperationsConfigured { operations, ..self })
    }
}

```

--------------------------------------------------------------------------------
/crates/apollo-mcp-server/telemetry.toml:
--------------------------------------------------------------------------------

```toml
[attributes.apollo.mcp]
tool_name = "The tool name"
operation_id = "The operation id - either persisted query id, operation name, or unknown"
operation_source = "The operation source - either operation (local file/op collection), persisted query, or LLM generated"
request_id = "The request id"
success = "Sucess flag indicator"
raw_operation = "Graphql operation text and metadata used for Tool generation"
client_name = "The client name that initializes with the MCP Server"
client_version = "The client version that initializes with the MCP Server"

[metrics.apollo.mcp]
"initialize.count" = "Number of times initialize has been called"
"tool.count" = "Number of times call_tool has been called"
"tool.duration" = "Duration of call_tool"
"list_tools.count" = "Number of times list_tools has been called"
"get_info.count" = "Number of times get_info has been called"
"operation.duration" = "Duration of graphql execute"
"operation.count" = "Number of times graphql execute has been called"

```

--------------------------------------------------------------------------------
/graphql/weather/persisted_queries/apollo.json:
--------------------------------------------------------------------------------

```json
{
  "format": "apollo-persisted-query-manifest",
  "version": 1,
  "operations": [
    {
      "id": "f4d7c9e3dca95d72be8b2ae5df7db1a92a29d8c2f43c1d3e04e30e7eb0fb23d",
      "clientName": "my-web-app",
      "body": "query GetAlerts($state: String!) { alerts(state: $state) { severity description instruction } }",
      "name": "GetAlerts",
      "type": "query"
    },
    {
      "id": "e2c1b89a5e4d95f6b8f7dfed7d9db192ea39d0cb34b3d4cd1bd7e0fbec23efb",
      "clientName": "my-web-app",
      "body": "query GetAllWeatherData($coordinate: InputCoordinate!, $state: String!) { forecast(coordinate: $coordinate) { detailed } alerts(state: $state) { severity description instruction } }",
      "name": "GetAllWeatherData",
      "type": "query"
    },
    {
      "id": "7f4c9e3dca95d72be8b2ae5df7db1a92a29d8c2f43c1d3e04e30e7eb0fb23d",
      "clientName": "my-web-app",
      "body": "query GetForecast($coordinate: InputCoordinate!) { forecast(coordinate: $coordinate) { detailed } }",
      "name": "GetForecast",
      "type": "query"
    }
  ]
}

```

--------------------------------------------------------------------------------
/crates/apollo-mcp-registry/Cargo.toml:
--------------------------------------------------------------------------------

```toml
[package]
name = "apollo-mcp-registry"
authors.workspace = true
edition.workspace = true
license-file.workspace = true
repository.workspace = true
rust-version.workspace = true
version.workspace = true

description = "Registry providing schema and operations to the MCP Server"

[dependencies]
derive_more = { version = "2.0.1", default-features = false, features = [
  "from",
  "display",
] }
educe = "0.6.0"
futures.workspace = true
graphql_client = "0.14.0"
insta.workspace = true
notify = "8.0.0"
reqwest.workspace = true
secrecy.workspace = true
serde.workspace = true
serde_json.workspace = true
thiserror.workspace = true
tokio.workspace = true
tokio-stream.workspace = true
tower = "0.5.2"
tracing.workspace = true
url.workspace = true
uuid = { version = "1.16.0", features = ["serde", "v4"] }
tracing-core.workspace = true
tracing-subscriber.workspace = true

[dev-dependencies]
test-log = { version = "0.2.16", default-features = false, features = [
  "trace",
] }
tracing-futures = { version = "0.2.5", features = ["futures-03"] }
wiremock = "0.6.3"

[lints]
workspace = true

```

--------------------------------------------------------------------------------
/docs/source/_sidebar.yaml:
--------------------------------------------------------------------------------

```yaml
switcher:
  heading: "Apollo MCP Server"
  versions:
    - label: v1
      latest: true
      href: ./
defaultOpenDepth: 2
items:
  - label: "Overview"
    href: "."
  - label: "Quickstart"
    href: "./quickstart"
  - label: "Define tools"
    href: "./define-tools"
  - label: "Configuration"
    children:
      - label: "YAML Config Reference"
        href: "./config-file"
      - label: "Custom Scalars"
        href: "./custom-scalars"
  - label: "Run the MCP Server"
    href: "./run"
  - label: "Debugging"
    href: "./debugging"
  - label: "Deployment"
    children:
      - label: "Overview"
        href: "./deploy"
      - label: "Health Checks"
        href: "./health-checks"
      - label: "CORS"
        href: "./cors"
  - label: "Authorization"
    href: "./auth"
  - label: "Telemetry"
    href: "./telemetry"
  - label: "Best Practices"
    href: "./best-practices"
  - label: "Licensing"
    href: "./licensing"
  - label: "Limitations"
    href: "./limitations"
  - label: "Guides"
    children:
      - label: "Authorization with Auth0"
        href: "./guides/auth-auth0"

```

--------------------------------------------------------------------------------
/crates/apollo-mcp-server/src/auth/protected_resource.rs:
--------------------------------------------------------------------------------

```rust
use serde::Serialize;
use url::Url;

use super::Config;

/// OAuth 2.1 Protected Resource Response
// TODO: This might be better found in an existing rust crate (or contributed upstream to one)
#[derive(Serialize)]
pub(super) struct ProtectedResource {
    /// The URL of the resource
    resource: Url,

    /// List of authorization servers protecting this resource
    authorization_servers: Vec<Url>,

    /// List of authentication methods allowed
    bearer_methods_supported: Vec<String>,

    /// Scopes allowed to request from the authorization servers
    scopes_supported: Vec<String>,

    /// Link to documentation about this resource
    #[serde(skip_serializing_if = "Option::is_none")]
    resource_documentation: Option<Url>,
}

impl From<Config> for ProtectedResource {
    fn from(value: Config) -> Self {
        Self {
            resource: value.resource,
            authorization_servers: value.servers,
            bearer_methods_supported: vec!["header".to_string()], // The spec only supports header auth
            scopes_supported: value.scopes,
            resource_documentation: value.resource_documentation,
        }
    }
}

```

--------------------------------------------------------------------------------
/xtask/Cargo.toml:
--------------------------------------------------------------------------------

```toml
[workspace]

[package]
name = "xtask"
version = "1.5.0"
authors = ["Apollo Graph, Inc. <[email protected]>"]
edition = "2021"
publish = false

[dependencies]
anyhow = "1"
camino = "1"
clap = { version = "4.5.1", features = ["derive"] }
cargo_metadata = "0.19.0"
# Only use the `clock` features of `chrono` to avoid the `time` dependency
# impacted by CVE-2020-26235.  https://github.com/chronotope/chrono/issues/602
# and https://github.com/chronotope/chrono/issues/1073 will explain more.
chrono = { version = "0.4.34", default-features = false, features = ["clock"] }
console = "0.15.8"
dialoguer = "0.11.0"
graphql_client = "0.14.0"
itertools = "0.14.0"
libc = "0.2"
memorable-wordlist = "0.1.7"
nu-ansi-term = "0.50"
once_cell = "1"
regex = "1.10.3"
reqwest = { version = "0.11", default-features = false, features = [
  "blocking",
  "json",
  "rustls-tls",
  "rustls-tls-native-roots",
] }
serde = { version = "1.0.197", features = ["derive"] }
serde_json = "1"
tempfile = "3"
tinytemplate = "1.2.1"
tokio = { version = "1.36.0", features = ["full"] }
which = "7.0.0"

[dev-dependencies]
insta = { version = "1.43.1", features = ["json", "redactions", "yaml"] }

```

--------------------------------------------------------------------------------
/crates/apollo-mcp-server/src/operations/schema_walker.rs:
--------------------------------------------------------------------------------

```rust
//! JSON Schema generation utilities
//!
//! The types in this module generate JSON schemas for GraphQL types by walking
//! the types recursively.

use apollo_compiler::{Schema as GraphQLSchema, ast::Type};
use schemars::Schema;
use serde_json::{Map, Value};

use crate::custom_scalar_map::CustomScalarMap;

mod name;
mod r#type;

/// Convert a GraphQL type into a JSON Schema.
///
/// Note: This is recursive, which might cause a stack overflow if the type is
/// sufficiently nested / complex.
pub fn type_to_schema(
    r#type: &Type,
    schema: &GraphQLSchema,
    definitions: &mut Map<String, Value>,
    custom_scalar_map: Option<&CustomScalarMap>,
    description: Option<String>,
) -> Schema {
    r#type::Type {
        cache: definitions,
        custom_scalar_map,
        description: &description,
        schema,
        r#type,
    }
    .into()
}

/// Modifies a schema to include an optional description
fn with_desc(mut schema: Schema, description: &Option<String>) -> Schema {
    if let Some(desc) = description {
        schema
            .ensure_object()
            .entry("description")
            .or_insert(desc.clone().into());
    }

    schema
}

```

--------------------------------------------------------------------------------
/.github/workflows/canary-release.yml:
--------------------------------------------------------------------------------

```yaml
name: Canary Release
on:
  push:
    # don't run on tags, run on commits
    # https://github.com/orgs/community/discussions/25615
    tags-ignore:
      - "**"
    paths-ignore:
      - '.github/**'
      - '.cargo/**'
      - '.direnv/**'
      - '.vscode/**'
      - 'docs/**'
      - 'Cargo.*'
      - 'crates/**/Cargo.*'
      - '*.md'
    branches:
      - develop
  workflow_dispatch:

permissions:
  contents: read
  packages: write

concurrency:
  group: canary-${{ github.ref }}
  cancel-in-progress: true

jobs:
  compute_canary_version:
    runs-on: ubuntu-24.04
    outputs:
      version: ${{ steps.canary_version.outputs.version }}
    steps:
      - name: Compute canary version
        id: canary_version
        run: |
          SHORT_SHA=${GITHUB_SHA::7}
          DATE=$(date -u +%Y%m%dT%H%M%SZ)
          echo "version=canary-${DATE}-${SHORT_SHA}" >> "$GITHUB_OUTPUT"

  release_canary_container:
    needs: compute_canary_version
    permissions:
      contents: read
      packages: write
      attestations: write
      id-token: write
    uses: ./.github/workflows/release-container.yml
    with:
      version: ${{ needs.compute_canary_version.outputs.version }}
    secrets: inherit
```

--------------------------------------------------------------------------------
/crates/apollo-mcp-server/src/auth/www_authenticate.rs:
--------------------------------------------------------------------------------

```rust
//! WWW Authenticate header definition.
//!
//! TODO: This might be nice to upstream to hyper.

use headers::{Header, HeaderValue};
use http::header::WWW_AUTHENTICATE;
use tracing::warn;
use url::Url;

pub(super) enum WwwAuthenticate {
    Bearer { resource_metadata: Url },
}

impl Header for WwwAuthenticate {
    fn name() -> &'static http::HeaderName {
        &WWW_AUTHENTICATE
    }

    fn decode<'i, I>(_values: &mut I) -> Result<Self, headers::Error>
    where
        Self: Sized,
        I: Iterator<Item = &'i http::HeaderValue>,
    {
        // We don't care about decoding, so we do nothing here.
        Err(headers::Error::invalid())
    }

    fn encode<E: Extend<http::HeaderValue>>(&self, values: &mut E) {
        let encoded = match &self {
            WwwAuthenticate::Bearer { resource_metadata } => format!(
                r#"Bearer resource_metadata="{}""#,
                resource_metadata.as_str()
            ),
        };

        // TODO: This shouldn't error, but it can so we might need to do something else here
        match HeaderValue::from_str(&encoded) {
            Ok(value) => values.extend(std::iter::once(value)),
            Err(e) => warn!("could not construct WWW-AUTHENTICATE header: {e}"),
        }
    }
}

```

--------------------------------------------------------------------------------
/graphql/weather/api.graphql:
--------------------------------------------------------------------------------

```graphql
"""A weather alert"""
type Alert {
  """The severity of this alert"""
  severity: String

  """A description of the alert"""
  description: String

  """Information about how people should respond to the alert"""
  instruction: String
}

"""A coordinate, consisting of a latitude and longitude"""
type Coordinate {
  """The latitude of this coordinate"""
  latitude: String!

  """The longitude of this coordinate"""
  longitude: String!
}

"""A weather forecast"""
type Forecast {
  """The coordinate associated with this forecast"""
  coordinate: Coordinate!

  """
  The National Weather Service (NWS) URL where the forecast data can be read
  """
  forecastURL: String!

  """A detailed weather forecast from the National Weather Service (NWS)"""
  detailed: String!
}

"""A coordinate, consisting of a latitude and longitude"""
input InputCoordinate {
  """The latitude of this coordinate"""
  latitude: String!

  """The longitude of this coordinate"""
  longitude: String!
}

type Query {
  """Get the weather forecast for a coordinate"""
  forecast(coordinate: InputCoordinate!): Forecast

  """
  Get the weather alerts for a state
  """
  alerts(
    """The two-letter state abbreviation (e.g., 'CO' for Colorado)"""
    state: String!
  ): [Alert]
}

```

--------------------------------------------------------------------------------
/crates/apollo-mcp-registry/src/uplink/schema/event.rs:
--------------------------------------------------------------------------------

```rust
use super::SchemaState;
use std::fmt::Debug;
use std::fmt::Formatter;
use std::fmt::Result;

/// Schema events
pub enum Event {
    /// The schema was updated.
    UpdateSchema(SchemaState),

    /// There are no more updates to the schema
    NoMoreSchema,
}

impl Debug for Event {
    fn fmt(&self, f: &mut Formatter) -> Result {
        match self {
            Event::UpdateSchema(_) => {
                write!(f, "UpdateSchema(<redacted>)")
            }
            Event::NoMoreSchema => {
                write!(f, "NoMoreSchema")
            }
        }
    }
}

#[cfg(test)]
mod tests {
    use super::*;

    #[test]
    fn test_debug_event_no_more_schema() {
        let event = Event::NoMoreSchema;
        let output = format!("{:?}", event);
        assert_eq!(output, "NoMoreSchema");
    }

    #[test]
    fn test_debug_redacts_update_schema() {
        let event = Event::UpdateSchema(SchemaState {
            sdl: "type Query { hello: String }".to_string(),
            launch_id: Some("test-launch-123".to_string()),
        });

        let output = format!("{:?}", event);
        assert_eq!(output, "UpdateSchema(<redacted>)");
        assert!(!output.contains("type Query"));
        assert!(!output.contains("test-launch-123"));
    }
}

```

--------------------------------------------------------------------------------
/crates/apollo-mcp-server/src/runtime/logging/log_rotation_kind.rs:
--------------------------------------------------------------------------------

```rust
use schemars::JsonSchema;
use serde::Deserialize;
use tracing_appender::rolling::Rotation;

#[derive(Debug, Deserialize, JsonSchema, Clone)]
pub enum LogRotationKind {
    #[serde(alias = "minutely", alias = "MINUTELY")]
    Minutely,
    #[serde(alias = "hourly", alias = "HOURLY")]
    Hourly,
    #[serde(alias = "daily", alias = "DAILY")]
    Daily,
    #[serde(alias = "never", alias = "NEVER")]
    Never,
}

impl From<LogRotationKind> for Rotation {
    fn from(value: LogRotationKind) -> Self {
        match value {
            LogRotationKind::Minutely => Rotation::MINUTELY,
            LogRotationKind::Hourly => Rotation::HOURLY,
            LogRotationKind::Daily => Rotation::DAILY,
            LogRotationKind::Never => Rotation::NEVER,
        }
    }
}

#[cfg(test)]
mod tests {
    use super::LogRotationKind;
    use rstest::rstest;
    use tracing_appender::rolling::Rotation;

    #[rstest]
    #[case(LogRotationKind::Minutely, Rotation::MINUTELY)]
    #[case(LogRotationKind::Hourly, Rotation::HOURLY)]
    #[case(LogRotationKind::Daily, Rotation::DAILY)]
    #[case(LogRotationKind::Never, Rotation::NEVER)]
    fn it_maps_to_rotation_correctly(
        #[case] log_rotation_kind: LogRotationKind,
        #[case] expected: Rotation,
    ) {
        let actual: Rotation = log_rotation_kind.into();
        assert_eq!(expected, actual);
    }
}

```

--------------------------------------------------------------------------------
/crates/apollo-mcp-registry/src/testdata/minimal_supergraph.graphql:
--------------------------------------------------------------------------------

```graphql
schema
@link(url: "https://specs.apollo.dev/link/v1.0")
@link(url: "https://specs.apollo.dev/join/v0.3", for: EXECUTION) {
    query: Query
}

directive @join__enumValue(graph: join__Graph!) repeatable on ENUM_VALUE

directive @join__field(
    graph: join__Graph
    requires: join__FieldSet
    provides: join__FieldSet
    type: String
    external: Boolean
    override: String
    usedOverridden: Boolean
) repeatable on FIELD_DEFINITION | INPUT_FIELD_DEFINITION

directive @join__graph(name: String!, url: String!) on ENUM_VALUE

directive @join__type(
    graph: join__Graph!
    key: join__FieldSet
    extension: Boolean! = false
    resolvable: Boolean! = true
    isInterfaceObject: Boolean! = false
) repeatable on OBJECT | INTERFACE | UNION | ENUM | INPUT_OBJECT | SCALAR

directive @join__unionMember(
    graph: join__Graph!
    member: String!
) repeatable on UNION

directive @link(
    url: String
    as: String
    for: link__Purpose
    import: [link__Import]
) repeatable on SCHEMA

directive @join__implements(
    graph: join__Graph!
    interface: String!
) repeatable on OBJECT | INTERFACE

scalar join__FieldSet
scalar link__Import

enum join__Graph {
    SUBGRAPH_A
    @join__graph(
        name: "subgraph-a"
        url: "http://graphql.subgraph-a.svc.cluster.local:4000"
    )
}

enum link__Purpose {
    SECURITY
    EXECUTION
}

type Query @join__type(graph: SUBGRAPH_A) {
    me: String @join__field(graph: SUBGRAPH_A)
}

```

--------------------------------------------------------------------------------
/crates/apollo-mcp-server/src/json_schema.rs:
--------------------------------------------------------------------------------

```rust
/// Macro to generate a JSON schema from a type
#[macro_export]
macro_rules! schema_from_type {
    ($type:ty) => {{
        // Use Draft-07 for compatibility with MCP clients like VSCode/Copilot that don't support newer drafts.
        // See: https://github.com/microsoft/vscode/issues/251315
        let settings = schemars::generate::SchemaSettings::draft07();
        let generator = settings.into_generator();
        let schema = generator.into_root_schema_for::<$type>();
        match serde_json::to_value(schema) {
            Ok(Value::Object(schema)) => schema,
            _ => panic!("Failed to generate schema for {}", stringify!($type)),
        }
    }};
}

#[cfg(test)]
mod tests {
    use schemars::JsonSchema;
    use serde::Deserialize;
    use serde_json::Value;

    #[derive(JsonSchema, Deserialize)]
    struct TestInput {
        #[allow(dead_code)]
        field: String,
    }

    #[test]
    fn test_schema_from_type() {
        let schema = schema_from_type!(TestInput);

        assert_eq!(
            serde_json::to_value(&schema).unwrap(),
            serde_json::json!({
                "$schema": "http://json-schema.org/draft-07/schema#",
                "title": "TestInput",
                "type": "object",
                "properties": {
                    "field": {
                        "type": "string"
                    }
                },
                "required": ["field"]
            })
        );
    }
}

```

--------------------------------------------------------------------------------
/crates/apollo-mcp-server/src/runtime/endpoint.rs:
--------------------------------------------------------------------------------

```rust
//! Endpoint newtype
//!
//! This module defines a simple newtype around a Url for demarking a GraphQL
//! endpoint. This allows overlaying validation and default behaviour on top
//! of the wrapped URL.

use std::ops::Deref;

use serde::Deserialize;
use url::Url;

/// A GraphQL endpoint
#[derive(Debug)]
pub struct Endpoint(Url);

impl Endpoint {
    /// Unwrap the endpoint into its inner URL
    pub fn into_inner(self) -> Url {
        self.0
    }
}

impl Default for Endpoint {
    fn default() -> Self {
        Self(defaults::endpoint())
    }
}

impl<'de> Deserialize<'de> for Endpoint {
    fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
    where
        D: serde::Deserializer<'de>,
    {
        // This is a simple wrapper around URL, so we just use its deserializer
        let url = Url::deserialize(deserializer)?;
        Ok(Self(url))
    }
}

impl Deref for Endpoint {
    type Target = Url;

    fn deref(&self) -> &Self::Target {
        &self.0
    }
}

mod defaults {
    use url::Url;

    pub(super) fn endpoint() -> Url {
        // SAFETY: This should always parse correctly and is considered a breaking
        // error otherwise. It is also explicitly tested in [test::default_endpoint_parses_correctly]
        #[allow(clippy::unwrap_used)]
        Url::parse("http://127.0.0.1:4000").unwrap()
    }

    #[cfg(test)]
    mod test {
        use super::endpoint;

        #[test]
        fn default_endpoint_parses_correctly() {
            endpoint();
        }
    }
}

```

--------------------------------------------------------------------------------
/docs/source/auth.mdx:
--------------------------------------------------------------------------------

```markdown
---
title: Authorization with Apollo MCP Server
redirectFrom:
    - /apollo-mcp-server/guides/auth
---

The Apollo MCP server supports authorizing clients (e.g., LLMs) in accordance with [the MCP specification](https://modelcontextprotocol.io/specification/2025-06-18/basic/authorization).

The current implementation passes through OAuth tokens from MCP clients directly to upstream GraphQL APIs. You can read more about [security considerations](/apollo-mcp-server/limitations#oauth-token-passthrough) when using this feature.

## Implement authorization with Apollo MCP Server

To implement authorization, you need an [OAuth 2.1-compliant](https://oauth.net/2.1/) Identity Provider (for example, your own in-house IdP or a third-party IdP such as Auth0, Okta, or Keycloak). You need the following values from your IdP:

- **URL**: The base URL of your Identity Provider, which is used to validate the JSON Web Tokens (JWTs) issued by it.
- **Audience**: Identifies the intended recipient of the token, typically a resource server or API. Represented by the `aud` claim in the JWT.
- **Scopes**: The scopes that the client will request. These scopes define the permissions granted to the client when it accesses the API.

Then, you [configure the MCP server with `auth` settings](/apollo-mcp-server/config-file#auth) and the [GraphOS Router for JWT authentication](/graphos/routing/security/jwt) using those IdP values.

For an example of how to configure Apollo MCP Server with Auth0, see [Authorization with Auth0](/apollo-mcp-server/guides/auth-auth0).
```

--------------------------------------------------------------------------------
/crates/apollo-mcp-server/src/telemetry_attributes.rs:
--------------------------------------------------------------------------------

```rust
use crate::generated::telemetry::{ALL_ATTRS, TelemetryAttribute};
use opentelemetry::Key;
use std::collections::HashSet;

impl TelemetryAttribute {
    pub const fn to_key(self) -> Key {
        match self {
            TelemetryAttribute::ToolName => {
                Key::from_static_str(TelemetryAttribute::ToolName.as_str())
            }
            TelemetryAttribute::OperationId => {
                Key::from_static_str(TelemetryAttribute::OperationId.as_str())
            }
            TelemetryAttribute::OperationSource => {
                Key::from_static_str(TelemetryAttribute::OperationSource.as_str())
            }
            TelemetryAttribute::Success => {
                Key::from_static_str(TelemetryAttribute::Success.as_str())
            }
            TelemetryAttribute::RequestId => {
                Key::from_static_str(TelemetryAttribute::RequestId.as_str())
            }
            TelemetryAttribute::RawOperation => {
                Key::from_static_str(TelemetryAttribute::RawOperation.as_str())
            }
            TelemetryAttribute::ClientName => {
                Key::from_static_str(TelemetryAttribute::ClientName.as_str())
            }
            TelemetryAttribute::ClientVersion => {
                Key::from_static_str(TelemetryAttribute::ClientVersion.as_str())
            }
        }
    }

    pub fn included_attributes(omitted: HashSet<TelemetryAttribute>) -> Vec<TelemetryAttribute> {
        ALL_ATTRS
            .iter()
            .copied()
            .filter(|a| !omitted.contains(a))
            .collect()
    }
}

```

--------------------------------------------------------------------------------
/.github/renovate.json5:
--------------------------------------------------------------------------------

```
{
  // Allow for intellisense in editors
  $schema: "https://docs.renovatebot.com/renovate-schema.json",

  // List of rules to apply
  extends: [
    // Recommended best practices from renovate itself
    // See: https://docs.renovatebot.com/upgrade-best-practices/#whats-in-the-configbest-practices-preset
    "config:best-practices",

    // Apply our own internal best practices
    // See: https://github.com/apollographql/apollo-mcp-server/commits/main/.github/renovate.json5
    "github>apollographql/renovate-config-apollo-open-source:default.json5",

    // Update to the latest rust stable version as it releases.
    // See: https://github.com/Turbo87/renovate-config/blob/master/rust/updateToolchain.json
    "github>Turbo87/renovate-config//rust/updateToolchain",
  ],

  // Globally disable all automatic update PRs from renovate
  packageRules: [
    {
      enabled: false,
      matchPackageNames: ["*"],
    },
  ],

  // Automating Nix upgrades is currently in beta and opt-in only.
  // https://docs.renovatebot.com/modules/manager/nix/
  nix: {
    enabled: true,
  },

  // Globally enable vulnerability alerts
  //
  // Note: This needs extra configuration at the repository level, which is described in the link
  // below.
  //
  // See: https://docs.renovatebot.com/configuration-options/#vulnerabilityalerts
  vulnerabilityAlerts: {
    enabled: true,
  },

  // Disable automatically updating lock files to latest versions once a week.
  //
  // See: https://docs.renovatebot.com/configuration-options/#lockfilemaintenance
  lockFileMaintenance: {
    enabled: false,
  },
}

```

--------------------------------------------------------------------------------
/Cargo.toml:
--------------------------------------------------------------------------------

```toml
[workspace]
resolver = "2"
members = [
  "crates/apollo-mcp-server",
  "crates/apollo-mcp-registry",
  "crates/apollo-schema-index",
]

[workspace.package]
authors = ["Apollo <[email protected]>"]
edition = "2024"
license-file = "LICENSE"
repository = "https://github.com/apollographql/apollo-mcp-server"
rust-version = "1.89.0"
version = "1.1.1"

[workspace.dependencies]
apollo-compiler = "1.27.0"
apollo-federation = "2.1.3"
futures = { version = "0.3.31", features = ["thread-pool"] }
insta = { version = "1.43.1", features = [
  "json",
  "redactions",
  "yaml",
  "glob",
] }
reqwest = { version = "0.12.15", default-features = false, features = [
  "gzip",
  "json",
  "native-tls-vendored",
] }
rstest = "0.25.0"
secrecy = { version = "0.10.3", features = ["serde"] }
serde = { version = "1.0.219", features = ["derive"] }
serde_json = "1.0.140"
thiserror = "2.0.12"
tokio = { version = "1.45.0", features = [
  "fs",
  "io-std",
  "macros",
  "net",
  "rt",
  "rt-multi-thread",
  "signal",
  "sync",
  "time",
] }
tokio-stream = "0.1"
tracing = "0.1.41"
tracing-core = "0.1.33"
tracing-subscriber = { version = "0.3.19", features = ["json"] }
url = { version = "2.4", features = ["serde"] }

[workspace.metadata]
crane.name = "apollo-mcp"

# This allows usage of coverage(off) attribute without causing a linting error.
# This attribute doesn't work in stable Rust yet and can be removed whenever it does.
# See https://github.com/apollographql/apollo-mcp-server/pull/372
[workspace.lints.rust]
unexpected_cfgs = { level = "warn", check-cfg = ['cfg(coverage_nightly)'] }

[workspace.lints.clippy]
exit = "deny"
expect_used = "deny"
indexing_slicing = "deny"
unwrap_used = "deny"
panic = "deny"

```

--------------------------------------------------------------------------------
/crates/apollo-mcp-registry/src/platform_api.rs:
--------------------------------------------------------------------------------

```rust
use secrecy::SecretString;
use std::fmt::Debug;
use std::time::Duration;
use url::Url;

pub mod operation_collections;

const DEFAULT_PLATFORM_API: &str = "https://graphql.api.apollographql.com/api/graphql";

/// Configuration for polling Apollo Uplink.
#[derive(Clone, Debug)]
pub struct PlatformApiConfig {
    /// The Apollo key: `<YOUR_GRAPH_API_KEY>`
    pub apollo_key: SecretString,

    /// The duration between polling
    pub poll_interval: Duration,

    /// The HTTP client timeout for each poll
    pub timeout: Duration,

    /// The URL of the Apollo registry
    pub registry_url: Url,
}

impl PlatformApiConfig {
    /// Creates a new `PlatformApiConfig` with the given Apollo key and default values for other fields.
    pub fn new(
        apollo_key: SecretString,
        poll_interval: Duration,
        timeout: Duration,
        registry_url: Option<Url>,
    ) -> Self {
        Self {
            apollo_key,
            poll_interval,
            timeout,
            #[allow(clippy::expect_used)]
            registry_url: registry_url
                .unwrap_or(Url::parse(DEFAULT_PLATFORM_API).expect("default URL should be valid")),
        }
    }
}

#[cfg(test)]
mod test {
    use super::*;
    use secrecy::{ExposeSecret, SecretString};
    use std::time::Duration;

    #[test]
    fn test_platform_api_config_with_none_endpoints() {
        let config = PlatformApiConfig::new(
            SecretString::from("test_apollo_key"),
            Duration::from_secs(10),
            Duration::from_secs(5),
            None,
        );
        assert_eq!(config.apollo_key.expose_secret(), "test_apollo_key");
        assert_eq!(config.poll_interval, Duration::from_secs(10));
        assert_eq!(config.timeout, Duration::from_secs(5));
        assert_eq!(config.registry_url.to_string(), DEFAULT_PLATFORM_API);
    }
}

```

--------------------------------------------------------------------------------
/docs/source/best-practices.mdx:
--------------------------------------------------------------------------------

```markdown
---
title: Best Practices
subtitle: Guidelines for using Apollo MCP Server
---

## Writing your schema for efficient MCP tools

The schema is required for:

- **Tool Descriptions**: The schema provides type information used to generate tool descriptions. You can override these descriptions by adding comments to your operation files.
- **Input Validation**: The schema is used to translate GraphQL input types into JSON Schema, ensuring that AI models provide correctly formatted inputs.
- **Introspection Support**: If you enable the `introspection` option, the schema is used to provide information about available types and operations to AI models.

## Use contract variants to control AI access to graphs

GraphOS [contract variants](/graphos/platform/schema-management/delivery/contracts/overview) let you deliver different subsets of your graph to different consumers.

When running Apollo MCP Server with GraphOS, use contract variants whenever possible. This allows you to control which parts of your graph are accessible to AI by exposing only the necessary subsets.

In particular, we strongly recommend contract variants when using:

- [GraphOS-managed persisted queries](/apollo-mcp-server/define-tools#from-graphos-managed-persisted-queries)
- [Introspection](/apollo-mcp-server/define-tools#introspection-tools)

## Send client name header when using persisted queries

If you register a persisted query with a specific client name instead of `null`, you must configure the MCP Server to send the necessary header indicating the client name to the router.

Use [the `headers` option](/apollo-mcp-server/config-file#headers) when running the MCP Server to pass the header to the router. The default name of the header expected by the router is `apollographql-client-name`. To use a different header name, configure `telemetry.apollo.client_name_header` in router YAML configuration.

```

--------------------------------------------------------------------------------
/docs/source/health-checks.mdx:
--------------------------------------------------------------------------------

```markdown
---
title: Health Checks
---


## Health checks

Apollo MCP Server provides health check endpoints for monitoring server health and readiness. This feature is useful for load balancers, container orchestrators, and monitoring systems.

### Configuration

Health checks are only available when using the `streamable_http` transport and must be explicitly enabled:

```yaml title="Example health check configuration"
transport:
  type: streamable_http
  address: 127.0.0.1
  port: 8000
health_check:
  enabled: true
  path: /health
  readiness:
    allowed: 50
    interval:
      sampling: 10s
      unready: 30s
```

### Endpoints

The health check provides different responses based on query parameters:

| Endpoint            | Description        | Response                                                         |
| :------------------ | :----------------- | :--------------------------------------------------------------- |
| `GET /health`       | Basic health check | Always returns `{"status": "UP"}`                                |
| `GET /health?live`  | Liveness check     | Returns `{"status": "UP"}` if server is alive                    |
| `GET /health?ready` | Readiness check    | Returns `{"status": "UP"}` if server is ready to handle requests |

### Probes

The server tracks failed requests and automatically marks itself as unready if too many failures occur within a sampling interval:

- **Sampling interval**: How often the server checks the rejection count (default: 5 seconds)
- **Allowed rejections**: Maximum failures allowed before becoming unready (default: 100)
- **Recovery time**: How long to wait before attempting to recover (default: 2x sampling interval)

When the server becomes unready:

- The `/health?ready` endpoint returns HTTP 503 with `{"status": "DOWN"}`
- After the recovery period, the rejection counter resets and the server becomes ready again

This allows external systems to automatically route traffic away from unhealthy servers and back when they recover.

```

--------------------------------------------------------------------------------
/e2e/mcp-server-tester/local-operations/tool-tests.yaml:
--------------------------------------------------------------------------------

```yaml
tools:
  expected_tool_list: ['introspect', 'execute', 'search', 'validate', 'SearchUpcomingLaunches', 'ExploreCelestialBodies', 'GetAstronautDetails', 'GetAstronautsCurrentlyInSpace']

  tests:
    - name: 'Introspection of launches query'
      tool: 'introspect'
      params: 
        type_name: launches
        depth: 1
      expect: 
        success: true

    - name: 'Search for launches query'
      tool: 'search'
      params:
        terms: ['launches']
      expect:
        success: true
        result:
          contains: 'launches(search: String, limit: Int = 5, offset: Int = 0): LaunchConnection'

    - name: 'Validate a valid launches query'
      tool: 'validate'
      params:
        operation: >
          query GetLaunches {
            launches {
              results {
                id
                name
                launchDesignator
              }
            }
          }
      expect:
        success: true
        result:
          contains: 'Operation is valid'

    - name: 'Validates an invalid query'
      tool: 'validate'
      params:
        operation: >
          query { invalidField }
      expect:
        success: false
        error:
          contains: 'Error: type `Query` does not have a field `invalidField`'

    - name: 'Validates a launches query with an invalid field'
      tool: 'validate'
      params:
        operation: >
          query GetLaunches {
            launches {
              results {
                id
                invalid
              }
            }
          }
      expect:
        success: false
        error:
          contains: 'Error: type `Launch` does not have a field `invalid`'

    - name: 'Validates a launches query with an missing argument'
      tool: 'validate'
      params:
        operation: >
          query Agency {
            agency {
              id
            }
          }
      expect:
        success: false
        error:
          contains: 'Error: the required argument `Query.agency(id:)` is not provided'
```

--------------------------------------------------------------------------------
/e2e/mcp-server-tester/pq-manifest/tool-tests.yaml:
--------------------------------------------------------------------------------

```yaml
tools:
  expected_tool_list: ['introspect', 'execute', 'search', 'validate', 'SearchUpcomingLaunches', 'ExploreCelestialBodies', 'GetAstronautDetails', 'GetAstronautsCurrentlyInSpace']

  tests:
    - name: 'Introspection of launches query'
      tool: 'introspect'
      params: 
        type_name: launches
        depth: 1
      expect: 
        success: true

    - name: 'Search for launches query'
      tool: 'search'
      params:
        terms: ['launches']
      expect:
        success: true
        result:
          contains: 'launches(search: String, limit: Int = 5, offset: Int = 0): LaunchConnection'

    - name: 'Validate a valid launches query'
      tool: 'validate'
      params:
        operation: >
          query GetLaunches {
            launches {
              results {
                id
                name
                launchDesignator
              }
            }
          }
      expect:
        success: true
        result:
          contains: 'Operation is valid'

    - name: 'Validates an invalid query'
      tool: 'validate'
      params:
        operation: >
          query { invalidField }
      expect:
        success: false
        error:
          contains: 'Error: type `Query` does not have a field `invalidField`'

    - name: 'Validates a launches query with an invalid field'
      tool: 'validate'
      params:
        operation: >
          query GetLaunches {
            launches {
              results {
                id
                invalid
              }
            }
          }
      expect:
        success: false
        error:
          contains: 'Error: type `Launch` does not have a field `invalid`'

    - name: 'Validates a launches query with an missing argument'
      tool: 'validate'
      params:
        operation: >
          query Agency {
            agency {
              id
            }
          }
      expect:
        success: false
        error:
          contains: 'Error: the required argument `Query.agency(id:)` is not provided'

```

--------------------------------------------------------------------------------
/docs/source/limitations.mdx:
--------------------------------------------------------------------------------

```markdown
---
title: Limitations
---


## Known limitations

### OAuth token passthrough

Apollo MCP Server currently passes through OAuth tokens received from MCP clients directly to upstream GraphQL APIs.

#### Rationale 

The decision to pass through tokens stems from practical enterprise requirements that may conflict with the MCP specification. For example: 

- **Multi-user scenarios**: Enterprise GraphQL APIs often require direct user identity to perform granular access control and tenant isolation.
- **Existing enterprise patterns**: Many organizations have GraphQL APIs that depend on the original Authorization header to identify users and apply existing identity-based access controls.
- **Multi-tenant applications**: Upstream APIs frequently need to read the Authorization header to identify the tenant and apply appropriate data filtering.
- **User context propagation**: The MCP specification lacks clear guidance on how user/session/identity information should reach upstream APIs when they need to perform their own authorization logic.

#### Security implications

- Token passthrough can lead to confused deputy vulnerabilities.
- Upstream APIs might incorrectly trust tokens as if they were validated by the MCP server.
- Tokens intended for the MCP server audience might be inappropriately used with different services.
- However, if upstream APIs enforce proper audience (`aud` claim) validation, they should reject inappropriately scoped tokens.

#### Recommended workaround

- Use the MCP server only with GraphQL APIs that accept the same OAuth tokens and audiences
- Ensure your OAuth authorization server issues tokens with appropriate audience claims for both the MCP server and upstream APIs
- Verify that your upstream APIs properly validate token audiences
- Consider the security implications in your threat model, especially regarding OAuth trust boundaries

#### Future plans

We plan to address this limitation in a future release by implementing token exchange or separate authentication flows for upstream APIs, while still supporting the enterprise requirement for user identity propagation.

```

--------------------------------------------------------------------------------
/.vscode/launch.json:
--------------------------------------------------------------------------------

```json
{
  "version": "0.2.0",
  "configurations": [
    {
      "type": "node",
      "request": "launch",
      "name": "Run apollo-mcp-server [Weather][Streamable HTTP]",
      "runtimeExecutable": "cargo",
      "runtimeArgs": [
        "run",
        "--bin",
        "apollo-mcp-server",
        "--",
        "graphql/weather/config.yaml"
      ],
      "cwd": "${workspaceFolder}",
      "console": "integratedTerminal",
      "env": {
        "RUST_BACKTRACE": "1"
      }
    },
    {
      "type": "lldb",
      "request": "launch",
      "name": "Debug apollo-mcp-server [Weather][Streamable HTTP]",
      "cargo": {
        "args": ["build", "--bin=apollo-mcp-server", "--lib"],
        "filter": {
          "name": "apollo-mcp-server",
          "kind": "bin"
        }
      },
      "args": ["graphql/weather/config.yaml"],
      "cwd": "${workspaceFolder}",
      "env": {
        "RUST_BACKTRACE": "1",
        "APOLLO_MCP_LOGGING__LEVEL": "debug"
      }
    },
    {
      "type": "node",
      "request": "launch",
      "name": "Run apollo-mcp-server [TheSpaceDevs][Streamable HTTP]",
      "runtimeExecutable": "cargo",
      "runtimeArgs": [
        "run",
        "--bin",
        "apollo-mcp-server",
        "--",
        "graphql/TheSpaceDevs/config.yaml"
      ],
      "cwd": "${workspaceFolder}",
      "console": "integratedTerminal",
      "env": {
        "RUST_BACKTRACE": "1"
      }
    },
    {
      "type": "lldb",
      "request": "launch",
      "name": "Debug apollo-mcp-server [TheSpaceDevs][Streamable HTTP]",
      "cargo": {
        "args": ["build", "--bin=apollo-mcp-server", "--lib"],
        "filter": {
          "name": "apollo-mcp-server",
          "kind": "bin"
        }
      },
      "args": ["graphql/TheSpaceDevs/config.yaml"],
      "cwd": "${workspaceFolder}",
      "env": {
        "RUST_BACKTRACE": "1",
        "APOLLO_MCP_LOGGING__LEVEL": "debug"
      }
    },
    {
      "type": "node",
      "request": "launch",
      "name": "Run mcp-inspector",
      "runtimeExecutable": "npx",
      "runtimeArgs": ["@modelcontextprotocol/inspector"],
      "cwd": "${workspaceFolder}",
      "console": "integratedTerminal"
    }
  ]
}

```

--------------------------------------------------------------------------------
/crates/apollo-mcp-server/src/runtime/introspection.rs:
--------------------------------------------------------------------------------

```rust
use schemars::JsonSchema;
use serde::Deserialize;

/// Introspection configuration
#[derive(Debug, Default, Deserialize, JsonSchema)]
#[serde(default)]
pub struct Introspection {
    /// Execution configuration for introspection
    pub execute: ExecuteConfig,

    /// Introspect configuration for allowing clients to run introspection
    pub introspect: IntrospectConfig,

    /// Search tool configuration
    pub search: SearchConfig,

    /// Validate configuration for checking operations before execution
    pub validate: ValidateConfig,
}

/// Execution-specific introspection configuration
#[derive(Debug, Default, Deserialize, JsonSchema)]
#[serde(default)]
pub struct ExecuteConfig {
    /// Enable introspection for execution
    pub enabled: bool,
}

/// Introspect-specific introspection configuration
#[derive(Debug, Default, Deserialize, JsonSchema)]
#[serde(default)]
pub struct IntrospectConfig {
    /// Enable introspection requests
    pub enabled: bool,

    /// Minify introspection results
    pub minify: bool,
}

/// Search tool configuration
#[derive(Debug, Deserialize, JsonSchema)]
#[serde(default)]
pub struct SearchConfig {
    /// Enable search tool
    pub enabled: bool,

    /// The amount of memory used for indexing (in bytes)
    pub index_memory_bytes: usize,

    /// The depth of subtype information to include from matching types
    /// (1 is just the matching type, 2 is the matching type plus the types it references, etc.
    /// Defaults to 1.)
    pub leaf_depth: usize,

    /// Minify search results
    pub minify: bool,
}

impl Default for SearchConfig {
    fn default() -> Self {
        Self {
            enabled: false,
            index_memory_bytes: 50_000_000,
            leaf_depth: 1,
            minify: false,
        }
    }
}

/// Validation tool configuration
#[derive(Debug, Default, Deserialize, JsonSchema)]
#[serde(default)]
pub struct ValidateConfig {
    /// Enable validation tool
    pub enabled: bool,
}

impl Introspection {
    /// Check if any introspection tools are enabled
    pub fn any_enabled(&self) -> bool {
        self.execute.enabled | self.introspect.enabled | self.search.enabled | self.validate.enabled
    }
}

```

--------------------------------------------------------------------------------
/docs/source/debugging.mdx:
--------------------------------------------------------------------------------

```markdown
---
title: Debugging the MCP Server
---


## Debugging with MCP Inspector

[MCP Inspector](https://modelcontextprotocol.io/docs/tools/inspector) is a debugging tool for MCP servers.

### Debug locally over stdio transport

You can inspect a local Apollo MCP Server by running it with MCP Inspector.

1. Run the MCP Server with Inspector:

```yaml title="Example config for debugging over stdio"
operations:
  source: local
  paths:
    - <absolute path to this git repo>/graphql/weather/operations/
schema:
  source: local
  path: <absolute path to this git repo>/graphql/weather/api.graphql
transport:
  type: stdio
```

```sh
npx @modelcontextprotocol/inspector \
  target/debug/apollo-mcp-server <path to the preceding config>
```

<ExpansionPanel title="Example output">

```sh showLineNumbers=false disableCopy=true
Starting MCP inspector...
⚙️ Proxy server listening on port 6277
🔍 MCP Inspector is up and running at http://127.0.0.1:6274 🚀
```

</ExpansionPanel>

1. In a browser, go to the URL returned by Inspector, then click **Connect** and **List Tools**. You should see the tools for the operations you provided.

### Debug over the Streamable HTTP transport

When running the MCP Server over the Streamable HTTP transport, you can run MCP Inspector as follows.

1. Start the MCP Server in Streamable HTTP mode:

<Tip>

You can also deploy the server as a container using the instructions in [Deploying a Container](#deploying-a-container).

</Tip>

```yaml title="Example config for running in Streamable HTTP"
operations:
  source: local
  paths:
    - <absolute path to this git repo>/graphql/weather/operations/
schema:
  source: local
  path: <absolute path to this git repo>/graphql/weather/api.graphql
transport:
  type: streamable_http
  address: 127.0.0.1
  port: 8000
```

```sh
target/debug/apollo-mcp-server <path to the above config>
```

1. Start the MCP Inspector:

```sh
npx @modelcontextprotocol/inspector
```

1. In a browser, go to the URL returned by Inspector, then fill in the details:

   - **Transport Type**: Select `Streamable HTTP`
   - **URL**: Enter `http://127.0.0.1:8000/mcp`, where the port must match the `transport.port` option

1. Click **Connect** and **List Tools**. You should see the tools for the operations you provided.

```

--------------------------------------------------------------------------------
/.github/workflows/verify-changeset.yml:
--------------------------------------------------------------------------------

```yaml
name: Verify Changeset
on:
  pull_request:
    types: [opened, reopened, synchronize, ready_for_review]
    branches-ignore:
      - main
      - release/**
      - conflict/*
      - sync/*
    paths-ignore:
      - '.github/**'
      - '.cargo/**'
      - '.direnv/**'
      - '.vscode/**'
      - 'docs/**'
      - 'Cargo.*'
      - 'crates/**/Cargo.*'
      - '*.md'
  workflow_dispatch:

jobs:
  verify-changeset:
    if: ${{ !contains(github.event.pull_request.labels.*.name, 'skip-changeset') && !startsWith(github.head_ref, 'sync/') && !startsWith(github.head_ref, 'conflict/') && !github.event.pull_request.draft }}
    name: Verify
    runs-on: ubuntu-24.04
    permissions:
      pull-requests: write
      contents: read
    steps:
      - name: Verify changeset included
        uses: actions/github-script@v7
        with:
          script: |
            const dir = '.changesets/';
            const pr = context.payload.pull_request;
            const files = await github.paginate(
              github.rest.pulls.listFiles,
              { owner: context.repo.owner, repo: context.repo.repo, pull_number: pr.number, per_page: 100 }
            );
            const ok = files.some(f =>
              f.filename.startsWith(dir) &&
              ['added','modified','renamed'].includes(f.status)
            );
            if (!ok) {
              core.setFailed(`No changeset added to ${dir}.`);
            } else {
              core.info(`Changeset found under ${dir}.`);
            }
            core.setOutput('ok', ok ? 'true' : 'false');
      - name: Add changeset missing comment on failure
        uses: actions/github-script@v7
        if: failure()
        with:
          script: |
            const pr = context.payload.pull_request;
            await github.rest.issues.createComment({
             owner: context.repo.owner,
              repo: context.repo.repo,
              issue_number: pr.number,
              body: [
                "❌ **Changeset file missing for PR**",
                "",
                "All changes should include an associated changeset file.",
                "Please refer to [README](https://github.com/apollographql/apollo-mcp-server/blob/main/.changesets/README.md) for more information on generating changesets."
              ].join("\n")
            });

```

--------------------------------------------------------------------------------
/crates/apollo-mcp-server/src/operations/schema_walker/type.rs:
--------------------------------------------------------------------------------

```rust
use apollo_compiler::{Schema as GraphQLSchema, ast::Type as GraphQLType};
use schemars::{Schema as JSONSchema, json_schema};
use serde_json::{Map, Value};

use crate::custom_scalar_map::CustomScalarMap;

use super::name::Name;

pub(super) struct Type<'a> {
    /// The definition cache which contains full schemas for nested types
    pub(super) cache: &'a mut Map<String, Value>,

    /// Custom scalar map for supplementing information from the GraphQL schema
    pub(super) custom_scalar_map: Option<&'a CustomScalarMap>,

    /// The optional description of the type, from comments in the schema
    pub(super) description: &'a Option<String>,

    /// The original GraphQL schema with all type information
    pub(super) schema: &'a GraphQLSchema,

    /// The actual type to translate into a JSON schema
    pub(super) r#type: &'a GraphQLType,
}

impl From<Type<'_>> for JSONSchema {
    fn from(
        Type {
            cache,
            custom_scalar_map,
            description,
            schema,
            r#type,
        }: Type,
    ) -> Self {
        // JSON Schema assumes that all properties are nullable unless there is a
        // required field, so we treat cases the same here.
        match r#type {
            GraphQLType::List(list) | GraphQLType::NonNullList(list) => {
                let nested_schema: JSONSchema = Type {
                    cache,
                    custom_scalar_map,
                    description,
                    schema,
                    r#type: list,
                }
                .into();

                // Arrays, however, do need to specify that fields can be null
                let nested_schema = if list.is_non_null() {
                    nested_schema
                } else {
                    json_schema!({"oneOf": [
                        nested_schema,
                        {"type": "null"},
                    ]})
                };

                json_schema!({
                    "type": "array",
                    "items": nested_schema,
                })
            }

            GraphQLType::Named(name) | GraphQLType::NonNullNamed(name) => JSONSchema::from(Name {
                cache,
                custom_scalar_map,
                description,
                name,
                schema,
            }),
        }
    }
}

```

--------------------------------------------------------------------------------
/crates/apollo-mcp-registry/src/platform_api/operation_collections/operation_collections.graphql:
--------------------------------------------------------------------------------

```graphql
fragment OperationData on OperationCollectionEntry {
    name
    currentOperationRevision {
      body
      headers {
        name
        value
      }
      variables
    }
}

query OperationCollectionQuery($operationCollectionId: ID!) {
    operationCollection(id: $operationCollectionId) {
        __typename
        ... on OperationCollection {
            operations {
                lastUpdatedAt
                id
                ...OperationData
            }
        }
        ... on NotFoundError {
            message
        }
        ... on PermissionError {
            message
        }
        ... on ValidationError {
            message
        }
    }
}

query OperationCollectionPollingQuery($operationCollectionId: ID!) {
    operationCollection(id: $operationCollectionId) {
        __typename
        ... on OperationCollection {
            operations {
                lastUpdatedAt
                id
            }
        }
        ... on NotFoundError {
            message
        }
        ... on PermissionError {
            message
        }
        ... on ValidationError {
            message
        }
    }
}

query OperationCollectionDefaultQuery($graphRef: ID!) {
    variant(ref: $graphRef) {
        __typename
        ... on GraphVariant {
          mcpDefaultCollection {
            __typename
            ... on OperationCollection {
              operations {
                lastUpdatedAt
                id
                ...OperationData
              }
            }
            ... on PermissionError {
              message
            }
          }
        }
        ... on InvalidRefFormat {
            message
        }
    }
}

query OperationCollectionDefaultPollingQuery($graphRef: ID!) {
    variant(ref: $graphRef) {
        __typename
        ... on GraphVariant {
          mcpDefaultCollection {
            __typename
            ... on OperationCollection {
              operations {
                id
                lastUpdatedAt
              }
            }
            ... on PermissionError {
              message
            }
          }
        }
        ... on InvalidRefFormat {
            message
        }
    }
}

query OperationCollectionEntriesQuery($collectionEntryIds: [ID!]!) {
  operationCollectionEntries(collectionEntryIds: $collectionEntryIds) {
    id
    lastUpdatedAt
    ...OperationData
  }
}
```

--------------------------------------------------------------------------------
/e2e/mcp-server-tester/run_tests.sh:
--------------------------------------------------------------------------------

```bash
#!/usr/bin/env bash
set -euo pipefail

usage() {
  cat <<'USAGE'
Usage: run-tools.sh <test-dir>

Runs:
  npx mcp-server-tester tools <test-dir>/tool-tests.yaml --server-config <test-dir>/apollo-mcp-server-config.json

Notes:
  - <test-dir> is resolved relative to this script's directory (not the caller's cwd),
    so calling: foo/bar/run-tools.sh local-directory
    uses:       foo/bar/local-directory/tool-tests.yaml
  - If ../../target/release/apollo-mcp-server (relative to this script) doesn't exist,
    it is built from the repo root (../../) with: cargo build --release
USAGE
  exit 1
}

[[ "${1:-}" == "-h" || "${1:-}" == "--help" || $# -eq 0 ]] && usage

RAW_DIR_ARG="${1%/}"
SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )

# If absolute path, use it as-is; otherwise, resolve relative to the script dir.
if [[ "$RAW_DIR_ARG" = /* ]]; then
  TEST_DIR="$RAW_DIR_ARG"
else
  TEST_DIR="$(cd -P -- "$SCRIPT_DIR/$RAW_DIR_ARG" && pwd)"
fi

TEST_DIR="${1%/}"  # strip trailing slash if present
TESTS="$TEST_DIR/tool-tests.yaml"
MCP_CONFIG="$TEST_DIR/config.yaml"

# Sanity checks
[[ -f "$TESTS" ]]  || { echo "✗ Missing file: $TESTS";  exit 2; }
[[ -f "$MCP_CONFIG" ]] || { echo "✗ Missing file: $MCP_CONFIG"; exit 2; }

REPO_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)"
BIN_PATH="$REPO_ROOT/target/release/apollo-mcp-server"

if [[ ! -x "$BIN_PATH" ]]; then
  echo "ℹ️  Binary not found at: $BIN_PATH"
  echo "➡️  Building release binary from: $REPO_ROOT"
  (cd "$REPO_ROOT" && cargo build --release)

  # Re-check after build
  if [[ ! -x "$BIN_PATH" ]]; then
    echo "✗ Build succeeded but binary not found/executable at: $BIN_PATH"
    exit 3
  fi
fi

# Template → generated server-config
TEMPLATE_PATH="${SERVER_CONFIG_TEMPLATE:-"$SCRIPT_DIR/server-config.template.json"}"
[[ -f "$TEMPLATE_PATH" ]] || { echo "✗ Missing server-config template: $TEMPLATE_PATH"; exit 4; }

TMP_DIR="$(mktemp -d)"
cleanup() { rm -rf "$TMP_DIR"; }
trap cleanup EXIT INT TERM # cleanup before exiting
GEN_CONFIG="$TMP_DIR/server-config.generated.json"

# Safe replacement for <test-dir> with absolute path (handles /, &, and |)
safe_dir="${TEST_DIR//\\/\\\\}"
safe_dir="${safe_dir//&/\\&}"
safe_dir="${safe_dir//|/\\|}"

# Replace the literal token "<test-dir>" everywhere
sed "s|<test-dir>|$safe_dir|g" "$TEMPLATE_PATH" > "$GEN_CONFIG"

# Run the command
npx -y [email protected] tools "$TESTS" --server-config "$GEN_CONFIG"
```

--------------------------------------------------------------------------------
/crates/apollo-mcp-server/src/auth/networked_token_validator.rs:
--------------------------------------------------------------------------------

```rust
use jwks::{Jwk, Jwks};
use tracing::warn;
use url::Url;

use super::valid_token::ValidateToken;

/// Implementation of the `ValidateToken` trait which fetches key information
/// from the network.
pub(super) struct NetworkedTokenValidator<'a> {
    audiences: &'a Vec<String>,
    upstreams: &'a Vec<Url>,
}

impl<'a> NetworkedTokenValidator<'a> {
    pub fn new(audiences: &'a Vec<String>, upstreams: &'a Vec<Url>) -> Self {
        Self {
            audiences,
            upstreams,
        }
    }
}

/// Constructs the OIDC discovery URL by appending the well-known path to the oauth server URL.
fn build_oidc_url(oauth_server: &Url) -> Url {
    let mut discovery_url = oauth_server.clone();
    // This ensures Keycloak URLs like /auth/realms/<realm>/ work correctly.
    let current_path = discovery_url.path().trim_end_matches('/');
    discovery_url.set_path(&format!(
        "{current_path}/.well-known/oauth-authorization-server"
    ));
    discovery_url
}

impl ValidateToken for NetworkedTokenValidator<'_> {
    fn get_audiences(&self) -> &Vec<String> {
        self.audiences
    }

    fn get_servers(&self) -> &Vec<Url> {
        self.upstreams
    }

    async fn get_key(&self, server: &Url, key_id: &str) -> Option<Jwk> {
        let oidc_url = build_oidc_url(server);

        let jwks = Jwks::from_oidc_url(oidc_url)
            .await
            .inspect_err(|e| {
                warn!("could not fetch OIDC information from {server}: {e}");
            })
            .ok()?;

        jwks.keys.get(key_id).cloned()
    }
}

#[cfg(test)]
mod tests {
    use super::*;
    use rstest::rstest;

    #[rstest]
    // Keycloak
    #[case(
        "https://sso.company.com/auth/realms/my-realm",
        "https://sso.company.com/auth/realms/my-realm/.well-known/oauth-authorization-server"
    )]
    #[case(
        "https://sso.company.com/auth/realms/my-realm/",
        "https://sso.company.com/auth/realms/my-realm/.well-known/oauth-authorization-server"
    )]
    // Auth0
    #[case(
        "https://dev-abc123.us.auth0.com",
        "https://dev-abc123.us.auth0.com/.well-known/oauth-authorization-server"
    )]
    // WorkOS
    #[case(
        "https://abb-123-staging.authkit.app/",
        "https://abb-123-staging.authkit.app/.well-known/oauth-authorization-server"
    )]
    fn test_build_oidc_discovery_url(#[case] input: &str, #[case] expected: &str) {
        let oauth_url = Url::parse(input).unwrap();
        let oidc_url = build_oidc_url(&oauth_url);

        assert_eq!(oidc_url.as_str(), expected);
    }
}

```

--------------------------------------------------------------------------------
/crates/apollo-mcp-server/src/runtime/telemetry/sampler.rs:
--------------------------------------------------------------------------------

```rust
use schemars::JsonSchema;
use serde::Deserialize;

#[derive(Clone, Debug, Deserialize, JsonSchema)]
#[serde(deny_unknown_fields, untagged)]
pub(crate) enum SamplerOption {
    /// Sample a given fraction. Fractions >= 1 will always sample.
    RatioBased(f64),
    Always(Sampler),
}

#[derive(Clone, Debug, Deserialize, JsonSchema)]
#[serde(deny_unknown_fields, rename_all = "snake_case")]
pub(crate) enum Sampler {
    /// Always sample
    AlwaysOn,
    /// Never sample
    AlwaysOff,
}

impl From<Sampler> for opentelemetry_sdk::trace::Sampler {
    fn from(s: Sampler) -> Self {
        match s {
            Sampler::AlwaysOn => opentelemetry_sdk::trace::Sampler::AlwaysOn,
            Sampler::AlwaysOff => opentelemetry_sdk::trace::Sampler::AlwaysOff,
        }
    }
}

impl From<SamplerOption> for opentelemetry_sdk::trace::Sampler {
    fn from(s: SamplerOption) -> Self {
        match s {
            SamplerOption::Always(s) => s.into(),
            SamplerOption::RatioBased(ratio) => {
                opentelemetry_sdk::trace::Sampler::TraceIdRatioBased(ratio)
            }
        }
    }
}

impl Default for SamplerOption {
    fn default() -> Self {
        SamplerOption::Always(Sampler::AlwaysOn)
    }
}

#[cfg(test)]
mod tests {
    use super::*;

    #[test]
    fn sampler_always_on_maps_to_otel_always_on() {
        assert!(matches!(
            Sampler::AlwaysOn.into(),
            opentelemetry_sdk::trace::Sampler::AlwaysOn
        ));
    }

    #[test]
    fn sampler_always_off_maps_to_otel_always_off() {
        assert!(matches!(
            Sampler::AlwaysOff.into(),
            opentelemetry_sdk::trace::Sampler::AlwaysOff
        ));
    }

    #[test]
    fn sampler_option_always_on_maps_to_otel_always_on() {
        assert!(matches!(
            SamplerOption::Always(Sampler::AlwaysOn).into(),
            opentelemetry_sdk::trace::Sampler::AlwaysOn
        ));
    }

    #[test]
    fn sampler_option_always_off_maps_to_otel_always_off() {
        assert!(matches!(
            SamplerOption::Always(Sampler::AlwaysOff).into(),
            opentelemetry_sdk::trace::Sampler::AlwaysOff
        ));
    }

    #[test]
    fn sampler_option_ratio_based_maps_to_otel_ratio_based_sampler() {
        assert!(matches!(
            SamplerOption::RatioBased(0.5).into(),
            opentelemetry_sdk::trace::Sampler::TraceIdRatioBased(0.5)
        ));
    }

    #[test]
    fn default_sampler_option_is_always_on() {
        assert!(matches!(
            SamplerOption::default(),
            SamplerOption::Always(Sampler::AlwaysOn)
        ));
    }
}

```

--------------------------------------------------------------------------------
/crates/apollo-mcp-server/src/event.rs:
--------------------------------------------------------------------------------

```rust
use crate::operations::RawOperation;
use apollo_mcp_registry::platform_api::operation_collections::error::CollectionError;
use apollo_mcp_registry::uplink::schema::event::Event as SchemaEvent;
use std::fmt::Debug;
use std::fmt::Formatter;
use std::fmt::Result;
use std::io;

/// MCP Server events
pub enum Event {
    /// The schema has been updated
    SchemaUpdated(SchemaEvent),

    /// The operations have been updated
    OperationsUpdated(Vec<RawOperation>),

    /// An error occurred when loading operations
    OperationError(io::Error, Option<String>),

    /// An error occurred when loading operations from collection
    CollectionError(CollectionError),

    /// The server should gracefully shut down
    Shutdown,
}

impl Debug for Event {
    fn fmt(&self, f: &mut Formatter) -> Result {
        match self {
            Event::SchemaUpdated(event) => {
                write!(f, "SchemaUpdated({event:?})")
            }
            Event::OperationsUpdated(operations) => {
                write!(f, "OperationsChanged({operations:?})")
            }
            Event::OperationError(e, path) => {
                write!(f, "OperationError({e:?}, {path:?})")
            }
            Event::CollectionError(e) => {
                write!(f, "OperationError({e:?})")
            }
            Event::Shutdown => {
                write!(f, "Shutdown")
            }
        }
    }
}

#[cfg(test)]
mod tests {
    use super::*;

    #[test]
    fn test_debug_event_schema_updated() {
        let event = Event::SchemaUpdated(SchemaEvent::NoMoreSchema);
        let output = format!("{:?}", event);
        assert_eq!(output, "SchemaUpdated(NoMoreSchema)");
    }

    #[test]
    fn test_debug_event_operations_updated() {
        let event = Event::OperationsUpdated(vec![]);
        let output = format!("{:?}", event);
        assert_eq!(output, "OperationsChanged([])");
    }

    #[test]
    fn test_debug_event_operation_error() {
        let event = Event::OperationError(std::io::Error::other("TEST"), None);
        let output = format!("{:?}", event);
        assert_eq!(
            output,
            r#"OperationError(Custom { kind: Other, error: "TEST" }, None)"#
        );
    }

    #[test]
    fn test_debug_event_collection_error() {
        let event = Event::CollectionError(CollectionError::Response("TEST".to_string()));
        let output = format!("{:?}", event);
        assert_eq!(output, r#"OperationError(Response("TEST"))"#);
    }

    #[test]
    fn test_debug_event_shutdown() {
        let event = Event::Shutdown;
        let output = format!("{:?}", event);
        assert_eq!(output, "Shutdown");
    }
}

```

--------------------------------------------------------------------------------
/crates/apollo-mcp-server/Cargo.toml:
--------------------------------------------------------------------------------

```toml
[package]
name = "apollo-mcp-server"
authors.workspace = true
edition.workspace = true
license-file.workspace = true
repository.workspace = true
rust-version.workspace = true
version.workspace = true
build = "build.rs"

default-run = "apollo-mcp-server"

[dependencies]
anyhow = "1.0.98"
apollo-compiler.workspace = true
apollo-federation.workspace = true
apollo-mcp-registry = { path = "../apollo-mcp-registry" }
apollo-schema-index = { path = "../apollo-schema-index" }
axum = "0.8.4"
axum-extra = { version = "0.10.1", features = ["typed-header"] }
axum-otel-metrics = "0.12.0"
axum-tracing-opentelemetry = "0.29.0"
bon = "3.6.3"
clap = { version = "4.5.36", features = ["derive", "env"] }
figment = { version = "0.10.19", features = ["env", "yaml"] }
futures.workspace = true
headers = "0.4.1"
http = "1.3.1"
humantime-serde = "1.1.1"
jsonschema = "0.33.0"
jsonwebtoken = "9"
jwks = "0.4.0"
lz-str = "0.2.1"
opentelemetry = "0.30.0"
opentelemetry-appender-log = "0.30.0"
opentelemetry-otlp = { version = "0.30.0", features = [
  "grpc-tonic",
  "tonic",
  "http-proto",
  "metrics",
  "trace",
] }
opentelemetry-resource-detectors = "0.9.0"
opentelemetry-semantic-conventions = "0.30.0"
opentelemetry-stdout = "0.30.0"
opentelemetry_sdk = { version = "0.30.0", features = [
  "spec_unstable_metrics_views",
] }
regex = "1.11.1"
reqwest-middleware = "0.4.2"
reqwest-tracing = { version = "0.5.8", features = ["opentelemetry_0_30"] }
reqwest.workspace = true
rmcp = { version = "0.8", features = [
  "server",
  "transport-io",
  "transport-sse-server",
  "transport-streamable-http-server",
] }
schemars = { version = "1.0.1", features = ["url2"] }
serde.workspace = true
serde_json.workspace = true
thiserror.workspace = true
tokio.workspace = true
tokio-util = "0.7.15"
tower-http = { version = "0.6.6", features = ["cors", "trace"] }
tracing-appender = "0.2.3"
tracing-core.workspace = true
tracing-opentelemetry = "0.31.0"
tracing-subscriber = { version = "0.3.19", features = ["env-filter"] }
tracing.workspace = true
url.workspace = true
async-trait = "0.1.89"

[dev-dependencies]
chrono = { version = "0.4.41", default-features = false, features = ["now"] }
figment = { version = "0.10.19", features = ["test"] }
insta.workspace = true
mockito = "1.7.0"
opentelemetry_sdk = { version = "0.30.0", features = ["testing"] }
rstest.workspace = true
tokio.workspace = true
tower = "0.5.2"
tracing-test = "0.2.5"

[build-dependencies]
cruet = "0.15.0"
prettyplease = "0.2.37"
quote = "1.0.40"
serde.workspace = true
syn = "2.0.106"
toml = "0.9.5"

[lints]
workspace = true

[[bin]]
name = "apollo-mcp-server"
path = "src/main.rs"

[[bin]]
name = "config-schema"
path = "src/config_schema.rs"
test = false

```

--------------------------------------------------------------------------------
/crates/apollo-mcp-registry/src/uplink/uplink.graphql:
--------------------------------------------------------------------------------

```graphql
"""
Schema for requests to Apollo Uplink
"""

type Query {
    """
    Fetch schema through router configuration
    """
    routerConfig(
        """
        The reference to a graph variant, like `engine@prod` or `engine` (i.e. `engine@current`).
        """
        ref: String!,

        """
        the API key to authenticate with
        """
        apiKey: String!,

        """
        When specified and the result is not newer, `Unchanged` is returned rather than `RouterConfigResult`.
        """
        ifAfterId: ID
    ): RouterConfigResponse!

    """
    Fetch persisted queries
    """
    persistedQueries(
        """
        The reference to a graph variant, like `engine@prod` or `engine` (i.e. `engine@current`).
        """
        ref: String!

        """
        the API key to authenticate with
        """
        apiKey: String!

        """
        When specified and the result is not newer, `Unchanged` is returned rather than `PersistedQueriesResult`.
        """
        ifAfterId: ID
    ): PersistedQueriesResponse!
}

union RouterConfigResponse = RouterConfigResult | Unchanged | FetchError

type RouterConfigResult {
    "Variant-unique identifier."
    id: ID!
    "The configuration as core schema."
    supergraphSDL: String!
    "Messages that should be reported back to the operators of this router, eg through logs and/or monitoring."
    messages: [Message!]!
    "Minimum delay before the next fetch should occur, in seconds."
    minDelaySeconds: Float!
}

type Message {
    level: MessageLevel!
    body: String!
}

enum MessageLevel {
    ERROR
    WARN
    INFO
}

union PersistedQueriesResponse = PersistedQueriesResult | Unchanged | FetchError

type PersistedQueriesResult {
    """
    Uniquely identifies this version. Must be passed via ifAfterId for incremental updates.
    """
    id: ID!

    """
    Minimum seconds to wait before checking again on 'unchanged'
    """
    minDelaySeconds: Float!

    """
    Chunks of operations
    """
    chunks: [PersistedQueriesResultChunks!]
}

"""
A sublist of the persisted query result which can be fetched directly from a content-addressed storage
"""
type PersistedQueriesResultChunks {
    """
    Chunk ID
    """
    id: ID!

    """
    Locations to find the operations from
    """
    urls: [String!]!
}

type Unchanged {
    """
    Uniquely identifies this version. Must be passed via ifAfterId for subsequent checks.
    """
    id: ID!

    """
    Minimum seconds to wait before checking again
    """
    minDelaySeconds: Float!
}

enum FetchErrorCode {
    AUTHENTICATION_FAILED
    ACCESS_DENIED
    UNKNOWN_REF
    RETRY_LATER
    NOT_IMPLEMENTED_ON_THIS_INSTANCE
}

type FetchError {
    code: FetchErrorCode!
    message: String!
}
```

--------------------------------------------------------------------------------
/crates/apollo-mcp-server/src/runtime/operation_source.rs:
--------------------------------------------------------------------------------

```rust
use std::path::PathBuf;

use schemars::JsonSchema;
use serde::Deserialize;

/// Source for loaded operations
#[derive(Debug, Default, Deserialize, JsonSchema)]
#[serde(tag = "source", rename_all = "snake_case")]
pub enum OperationSource {
    /// Load operations from a GraphOS collection
    Collection {
        #[schemars(with = "String")]
        id: IdOrDefault,
    },

    /// Infer where to load operations based on other configuration options.
    ///
    /// Note: This setting tries to load the operations from introspection, if enabled
    /// or from the default operation collection when APOLLO_GRAPH_REF is set.
    #[default]
    Infer,

    /// Load operations by introspecting the schema
    ///
    /// Note: Requires introspection to be enabled
    Introspect,

    /// Load operations from local GraphQL files / folders
    Local { paths: Vec<PathBuf> },

    /// Load operations from a persisted queries manifest file
    Manifest { path: PathBuf },

    /// Load operations from uplink manifest
    Uplink,
}

/// Either a custom ID or the default variant
#[derive(Debug, PartialEq, Eq)]
pub enum IdOrDefault {
    /// The default tools for the variant (requires APOLLO_KEY)
    Default,

    /// The specific collection ID
    Id(String),
}

impl<'de> Deserialize<'de> for IdOrDefault {
    fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
    where
        D: serde::Deserializer<'de>,
    {
        struct IdOrDefaultVisitor;
        impl serde::de::Visitor<'_> for IdOrDefaultVisitor {
            type Value = IdOrDefault;

            fn expecting(&self, formatter: &mut std::fmt::Formatter) -> std::fmt::Result {
                formatter.write_str("a string or 'default'")
            }

            fn visit_str<E>(self, v: &str) -> Result<Self::Value, E>
            where
                E: serde::de::Error,
            {
                let variant = if v.to_lowercase() == "default" {
                    IdOrDefault::Default
                } else {
                    IdOrDefault::Id(v.to_string())
                };

                Ok(variant)
            }
        }

        deserializer.deserialize_str(IdOrDefaultVisitor)
    }
}

#[cfg(test)]
mod test {
    use super::IdOrDefault;

    #[test]
    fn id_parses() {
        let id = "something";

        let actual: IdOrDefault =
            serde_json::from_value(serde_json::Value::String(id.into())).unwrap();
        let expected = IdOrDefault::Id(id.to_string());

        assert_eq!(actual, expected);
    }

    #[test]
    fn default_parses() {
        let id = "dEfAuLt";

        let actual: IdOrDefault =
            serde_json::from_value(serde_json::Value::String(id.into())).unwrap();
        let expected = IdOrDefault::Default;

        assert_eq!(actual, expected);
    }
}

```

--------------------------------------------------------------------------------
/graphql/weather/weather.graphql:
--------------------------------------------------------------------------------

```graphql
extend schema
  @link(url: "https://specs.apollo.dev/federation/v2.10", import: ["@tag"])
  @link(
    url: "https://specs.apollo.dev/connect/v0.1"
    import: ["@connect", "@source"]
  )
  @source(
    name: "NWS"
    http: {
      baseURL: "https://api.weather.gov"
      headers: [
        { name: "User-Agent", value: "weather-app/1.0" }
        { name: "Accept", value: "application/geo+json" }
      ]
    }
  )

type Query {
  """
  Get the weather forecast for a coordinate
  """
  forecast(coordinate: InputCoordinate!): Forecast
    @connect(
      source: "NWS"
      http: { GET: "/points/{$args.coordinate.latitude},{$args.coordinate.longitude}" }
      selection: """
      coordinate: {
        latitude: $args.coordinate.latitude
        longitude: $args.coordinate.longitude
      }
      forecastURL: properties.forecast
      """
      entity: true
    )

  """
  Get the weather alerts for a state, using the two-letter abbreviation for the state - for example, CO for Colorado
  """
  alerts(state: String!): [Alert]
    @tag(name: "mcp")
    @connect(
      source: "NWS"
      http: { GET: "/alerts/active/area/{$args.state}" }
      selection: """
      $.features.properties {
        severity
        description
        instruction
      }
      """
    )
}

"""
A weather forecast
"""
type Forecast {
  """
  The coordinate associated with this forecast
  """
  coordinate: Coordinate!

  """
  The National Weather Service (NWS) URL where the forecast data can be read
  """
  forecastURL: String!

  """
  A detailed weather forecast from the National Weather Service (NWS)
  """
  detailed: String!
    @connect(
      http: {
        # GET: "{$this.forecastURL->urlSafe}" # TODO: Use this when urlSafe is implemented
        GET: "https://api.weather.gov/gridpoints/FFC/51,87/forecast" # TODO: remove this hardcoded value
        headers: [
          { name: "foo", value: "{$this.forecastURL}" } # required to make composition not throw a satisfiability error
          { name: "Accept", value: "application/geo+json" }
          { name: "User-Agent", value: "weather-app/1.0" }
        ]
      }
      selection: """
      $.properties.periods->first.detailedForecast
      """
    )
}

"""
A weather alert
"""
type Alert @tag(name: "mcp") {
  """
  The severity of this alert
  """
  severity: String

  """
  A description of the alert
  """
  description: String

  """
  Information about how people should respond to the alert
  """
  instruction: String
}

"""
A coordinate, consisting of a latitude and longitude
"""
input InputCoordinate {
  """
  The latitude of this coordinate
  """
  latitude: String!

  """
  The longitude of this coordinate
  """
  longitude: String!
}


"""
A coordinate, consisting of a latitude and longitude
"""
type Coordinate {
  """
  The latitude of this coordinate
  """
  latitude: String!

  """
  The longitude of this coordinate
  """
  longitude: String!
}
```

--------------------------------------------------------------------------------
/crates/apollo-mcp-registry/src/testdata/supergraph.graphql:
--------------------------------------------------------------------------------

```graphql
schema
@link(url: "https://specs.apollo.dev/link/v1.0")
@link(url: "https://specs.apollo.dev/join/v0.3", for: EXECUTION) {
    query: Query
}

directive @join__enumValue(graph: join__Graph!) repeatable on ENUM_VALUE

directive @join__field(
    graph: join__Graph
    requires: join__FieldSet
    provides: join__FieldSet
    type: String
    external: Boolean
    override: String
    usedOverridden: Boolean
) repeatable on FIELD_DEFINITION | INPUT_FIELD_DEFINITION

directive @join__graph(name: String!, url: String!) on ENUM_VALUE

directive @join__implements(
    graph: join__Graph!
    interface: String!
) repeatable on OBJECT | INTERFACE

directive @join__type(
    graph: join__Graph!
    key: join__FieldSet
    extension: Boolean! = false
    resolvable: Boolean! = true
    isInterfaceObject: Boolean! = false
) repeatable on OBJECT | INTERFACE | UNION | ENUM | INPUT_OBJECT | SCALAR

directive @join__unionMember(
    graph: join__Graph!
    member: String!
) repeatable on UNION

directive @link(
    url: String
    as: String
    for: link__Purpose
    import: [link__Import]
) repeatable on SCHEMA

scalar join__FieldSet

enum join__Graph {
    ACCOUNTS
    @join__graph(name: "accounts", url: "https://accounts.demo.starstuff.dev/")
    INVENTORY
    @join__graph(
        name: "inventory"
        url: "https://inventory.demo.starstuff.dev/"
    )
    PRODUCTS
    @join__graph(name: "products", url: "https://products.demo.starstuff.dev/")
    REVIEWS
    @join__graph(name: "reviews", url: "https://reviews.demo.starstuff.dev/")
}

scalar link__Import

enum link__Purpose {
    SECURITY
    EXECUTION
}

type Product
@join__type(graph: INVENTORY, key: "upc")
@join__type(graph: PRODUCTS, key: "upc")
@join__type(graph: REVIEWS, key: "upc") {
    upc: String!
    weight: Int
    @join__field(graph: INVENTORY, external: true)
    @join__field(graph: PRODUCTS)
    price: Int
    @join__field(graph: INVENTORY, external: true)
    @join__field(graph: PRODUCTS)
    inStock: Boolean @join__field(graph: INVENTORY)
    shippingEstimate: Int @join__field(graph: INVENTORY, requires: "price weight")
    name: String @join__field(graph: PRODUCTS)
    reviews: [Review] @join__field(graph: REVIEWS)
}

type Query
@join__type(graph: ACCOUNTS)
@join__type(graph: INVENTORY)
@join__type(graph: PRODUCTS)
@join__type(graph: REVIEWS) {
    me: User @join__field(graph: ACCOUNTS)
    topProducts(first: Int = 5): [Product] @join__field(graph: PRODUCTS)
}

type Review @join__type(graph: REVIEWS, key: "id") {
    id: ID!
    body: String
    author: User @join__field(graph: REVIEWS, provides: "username")
    product: Product @join__field(graph: REVIEWS)
}

type User
@join__type(graph: ACCOUNTS, key: "id")
@join__type(graph: REVIEWS, key: "id") {
    id: ID!
    name: String @join__field(graph: ACCOUNTS)
    username: String
    @join__field(graph: ACCOUNTS)
    @join__field(graph: REVIEWS, external: true)
    reviews: [Review] @join__field(graph: REVIEWS)
}

```

--------------------------------------------------------------------------------
/e2e/mcp-server-tester/pq-manifest/apollo.json:
--------------------------------------------------------------------------------

```json
{
  "format": "apollo-persisted-query-manifest",
  "version": 1,
  "operations": [
    {
      "id": "1417c051c5b1ba2fa41975fc02547c9c34c619c8694bf225df74e7b527575d5f",
      "name": "ExploreCelestialBodies",
      "type": "query",
      "body": "query ExploreCelestialBodies($search: String, $limit: Int = 10, $offset: Int = 0) {\n  celestialBodies(search: $search, limit: $limit, offset: $offset) {\n    pageInfo {\n      count\n      next\n      previous\n      __typename\n    }\n    results {\n      id\n      name\n      diameter\n      mass\n      gravity\n      lengthOfDay\n      atmosphere\n      type {\n        id\n        name\n        __typename\n      }\n      image {\n        url\n        thumbnail\n        credit\n        __typename\n      }\n      description\n      wikiUrl\n      __typename\n    }\n    __typename\n  }\n}"
    },
    {
      "id": "5cc5c30ad71bdf7d57e4fa5a8428c2d49ebc3e16a3d17f21efbd1ad22b4ba70b",
      "name": "GetAstronautDetails",
      "type": "query",
      "body": "query GetAstronautDetails($astronautId: ID!) {\n  astronaut(id: $astronautId) {\n    id\n    name\n    status\n    inSpace\n    age\n    dateOfBirth\n    dateOfDeath\n    firstFlight\n    lastFlight\n    timeInSpace\n    evaTime\n    agency {\n      id\n      name\n      abbrev\n      country {\n        name\n        nationalityName\n        __typename\n      }\n      __typename\n    }\n    nationality {\n      name\n      nationalityName\n      alpha2Code\n      __typename\n    }\n    image {\n      url\n      thumbnail\n      credit\n      __typename\n    }\n    bio\n    wiki\n    socialMediaLinks {\n      url\n      socialMedia {\n        name\n        url\n        __typename\n      }\n      __typename\n    }\n    __typename\n  }\n}"
    },
    {
      "id": "83af5184f29c1eb5ce9b0d6da11285829f2f155d3815affbe66b56fa249f7603",
      "name": "GetAstronautsCurrentlyInSpace",
      "type": "query",
      "body": "query GetAstronautsCurrentlyInSpace {\n  astronauts(filters: {inSpace: true, search: \"\"}) {\n    results {\n      id\n      name\n      timeInSpace\n      lastFlight\n      agency {\n        name\n        abbrev\n        country {\n          name\n          __typename\n        }\n        __typename\n      }\n      nationality {\n        name\n        nationalityName\n        __typename\n      }\n      image {\n        thumbnail\n        __typename\n      }\n      __typename\n    }\n    __typename\n  }\n}"
    },
    {
      "id": "824e3c8a1612c32a315450abbd5c7aedc0c402fdf6068583a54461f5b67d55be",
      "name": "SearchUpcomingLaunches",
      "type": "query",
      "body": "query SearchUpcomingLaunches($query: String!) {\n  upcomingLaunches(limit: 20, search: $query) {\n    pageInfo {\n      count\n      __typename\n    }\n    results {\n      id\n      name\n      weatherConcerns\n      rocket {\n        id\n        configuration {\n          fullName\n          __typename\n        }\n        __typename\n      }\n      mission {\n        name\n        description\n        __typename\n      }\n      webcastLive\n      provider {\n        name\n        __typename\n      }\n      __typename\n    }\n    __typename\n  }\n}"
    }
  ]
}
```

--------------------------------------------------------------------------------
/graphql/TheSpaceDevs/persisted_queries/apollo.json:
--------------------------------------------------------------------------------

```json
{
  "format": "apollo-persisted-query-manifest",
  "version": 1,
  "operations": [
    {
      "id": "1417c051c5b1ba2fa41975fc02547c9c34c619c8694bf225df74e7b527575d5f",
      "name": "ExploreCelestialBodies",
      "type": "query",
      "body": "query ExploreCelestialBodies($search: String, $limit: Int = 10, $offset: Int = 0) {\n  celestialBodies(search: $search, limit: $limit, offset: $offset) {\n    pageInfo {\n      count\n      next\n      previous\n      __typename\n    }\n    results {\n      id\n      name\n      diameter\n      mass\n      gravity\n      lengthOfDay\n      atmosphere\n      type {\n        id\n        name\n        __typename\n      }\n      image {\n        url\n        thumbnail\n        credit\n        __typename\n      }\n      description\n      wikiUrl\n      __typename\n    }\n    __typename\n  }\n}"
    },
    {
      "id": "5cc5c30ad71bdf7d57e4fa5a8428c2d49ebc3e16a3d17f21efbd1ad22b4ba70b",
      "name": "GetAstronautDetails",
      "type": "query",
      "body": "query GetAstronautDetails($astronautId: ID!) {\n  astronaut(id: $astronautId) {\n    id\n    name\n    status\n    inSpace\n    age\n    dateOfBirth\n    dateOfDeath\n    firstFlight\n    lastFlight\n    timeInSpace\n    evaTime\n    agency {\n      id\n      name\n      abbrev\n      country {\n        name\n        nationalityName\n        __typename\n      }\n      __typename\n    }\n    nationality {\n      name\n      nationalityName\n      alpha2Code\n      __typename\n    }\n    image {\n      url\n      thumbnail\n      credit\n      __typename\n    }\n    bio\n    wiki\n    socialMediaLinks {\n      url\n      socialMedia {\n        name\n        url\n        __typename\n      }\n      __typename\n    }\n    __typename\n  }\n}"
    },
    {
      "id": "83af5184f29c1eb5ce9b0d6da11285829f2f155d3815affbe66b56fa249f7603",
      "name": "GetAstronautsCurrentlyInSpace",
      "type": "query",
      "body": "query GetAstronautsCurrentlyInSpace {\n  astronauts(filters: {inSpace: true, search: \"\"}) {\n    results {\n      id\n      name\n      timeInSpace\n      lastFlight\n      agency {\n        name\n        abbrev\n        country {\n          name\n          __typename\n        }\n        __typename\n      }\n      nationality {\n        name\n        nationalityName\n        __typename\n      }\n      image {\n        thumbnail\n        __typename\n      }\n      __typename\n    }\n    __typename\n  }\n}"
    },
    {
      "id": "824e3c8a1612c32a315450abbd5c7aedc0c402fdf6068583a54461f5b67d55be",
      "name": "SearchUpcomingLaunches",
      "type": "query",
      "body": "query SearchUpcomingLaunches($query: String!) {\n  upcomingLaunches(limit: 20, search: $query) {\n    pageInfo {\n      count\n      __typename\n    }\n    results {\n      id\n      name\n      weatherConcerns\n      rocket {\n        id\n        configuration {\n          fullName\n          __typename\n        }\n        __typename\n      }\n      mission {\n        name\n        description\n        __typename\n      }\n      webcastLive\n      provider {\n        name\n        __typename\n      }\n      __typename\n    }\n    __typename\n  }\n}"
    }
  ]
}
```

--------------------------------------------------------------------------------
/crates/apollo-mcp-server/src/errors.rs:
--------------------------------------------------------------------------------

```rust
use crate::introspection::tools::search::IndexingError;
use apollo_compiler::{Schema, ast::Document, validation::WithErrors};
use apollo_federation::error::FederationError;
use apollo_mcp_registry::platform_api::operation_collections::error::CollectionError;
use reqwest::header::{InvalidHeaderName, InvalidHeaderValue};
use rmcp::serde_json;
use tokio::task::JoinError;
use url::ParseError;

/// An error in operation parsing
#[derive(Debug, thiserror::Error)]
pub enum OperationError {
    #[error("Could not parse GraphQL document: {0}")]
    GraphQLDocument(Box<WithErrors<Document>>),

    #[error("Internal error: {0}")]
    Internal(String),

    #[error("{0}Operation is missing its required name: {1}", .source_path.as_ref().map(|s| format!("{s}: ")).unwrap_or_default(), operation)]
    MissingName {
        source_path: Option<String>,
        operation: String,
    },

    #[error("{0}No operations defined", .source_path.as_ref().map(|s| format!("{s}: ")).unwrap_or_default())]
    NoOperations { source_path: Option<String> },

    #[error("Invalid JSON: {0}")]
    Json(#[from] serde_json::Error),

    #[error("{0}Too many operations. Expected 1 but got {1}", .source_path.as_ref().map(|s| format!("{s}: ")).unwrap_or_default(), count)]
    TooManyOperations {
        source_path: Option<String>,
        count: usize,
    },

    #[error(transparent)]
    File(#[from] std::io::Error),

    #[error("Error loading collection: {0}")]
    Collection(CollectionError),
}

/// An error in server initialization
#[derive(Debug, thiserror::Error)]
pub enum ServerError {
    #[error("Could not parse GraphQL document: {0}")]
    GraphQLDocument(Box<WithErrors<Document>>),

    #[error("Could not parse GraphQL schema: {0}")]
    GraphQLSchema(Box<WithErrors<Schema>>),

    #[error("Could not parse GraphQL schema: {0}")]
    GraphQLDocumentSchema(Box<WithErrors<Document>>),

    #[error("Federation error in GraphQL schema: {0}")]
    Federation(Box<FederationError>),

    #[error("Invalid JSON: {0}")]
    Json(#[from] serde_json::Error),

    #[error("Failed to create operation: {0}")]
    Operation(#[from] OperationError),

    #[error("Could not open file: {0}")]
    ReadFile(#[from] std::io::Error),

    #[error("invalid header value: {0}")]
    HeaderValue(#[from] InvalidHeaderValue),

    #[error("invalid header name: {0}")]
    HeaderName(#[from] InvalidHeaderName),

    #[error("invalid header: {0}")]
    Header(String),

    #[error("invalid custom_scalar_config: {0}")]
    CustomScalarConfig(serde_json::Error),

    #[error("invalid json schema: {0}")]
    CustomScalarJsonSchema(String),

    #[error("Missing environment variable: {0}")]
    EnvironmentVariable(String),

    #[error("You must define operations or enable introspection")]
    NoOperations,

    #[error("No valid schema was supplied")]
    NoSchema,

    #[error("Failed to start server")]
    StartupError(#[from] JoinError),

    #[error("Failed to initialize MCP server")]
    McpInitializeError(#[from] Box<rmcp::service::ServerInitializeError>),

    #[error(transparent)]
    UrlParseError(ParseError),

    #[error("Failed to index schema: {0}")]
    Indexing(#[from] IndexingError),

    #[error("CORS configuration error: {0}")]
    Cors(String),
}

/// An MCP tool error
pub type McpError = rmcp::model::ErrorData;

```

--------------------------------------------------------------------------------
/crates/apollo-mcp-registry/src/uplink/persisted_queries/manifest.rs:
--------------------------------------------------------------------------------

```rust
use std::collections::HashMap;
use std::ops::Deref;
use std::ops::DerefMut;

use serde::Deserialize;
use serde::Serialize;
use tower::BoxError;

/// The full identifier for an operation in a PQ list consists of an operation
/// ID and an optional client name.
#[derive(Debug, Clone, Eq, Hash, PartialEq)]
pub struct FullPersistedQueryOperationId {
    /// The operation ID (usually a hash).
    pub operation_id: String,
    /// The client name associated with the operation; if None, can be any client.
    pub client_name: Option<String>,
}

/// A single operation containing an ID and a body.
#[derive(Debug, Clone, Deserialize, Serialize)]
#[serde(rename_all = "camelCase")]
pub struct ManifestOperation {
    /// The operation ID (usually a hash).
    pub id: String,
    /// The operation body.
    pub body: String,
    /// The client name associated with the operation. If None, can be any client.
    pub client_name: Option<String>,
}

/// The format of each persisted query chunk returned from uplink.
#[derive(Debug, Clone, Deserialize, Serialize)]
pub struct SignedUrlChunk {
    pub format: String,
    pub version: u64,
    pub operations: Vec<ManifestOperation>,
}

impl SignedUrlChunk {
    pub fn validate(self) -> Result<Self, BoxError> {
        if self.format != "apollo-persisted-query-manifest" {
            return Err("chunk format is not 'apollo-persisted-query-manifest'".into());
        }

        if self.version != 1 {
            return Err("persisted query manifest chunk version is not 1".into());
        }

        Ok(self)
    }

    pub fn parse_and_validate(raw_chunk: &str) -> Result<Self, BoxError> {
        let parsed_chunk =
            serde_json::from_str::<SignedUrlChunk>(raw_chunk).map_err(|e| -> BoxError {
                format!("Could not parse persisted query manifest chunk: {e}").into()
            })?;

        parsed_chunk.validate()
    }
}

/// An in memory cache of persisted queries.
#[derive(Debug, Clone, Default)]
pub struct PersistedQueryManifest {
    inner: HashMap<FullPersistedQueryOperationId, String>,
}

impl PersistedQueryManifest {
    /// Add a chunk to the manifest.
    pub fn add_chunk(&mut self, chunk: &SignedUrlChunk) {
        for operation in &chunk.operations {
            self.inner.insert(
                FullPersistedQueryOperationId {
                    operation_id: operation.id.clone(),
                    client_name: operation.client_name.clone(),
                },
                operation.body.clone(),
            );
        }
    }
}

impl From<Vec<ManifestOperation>> for PersistedQueryManifest {
    fn from(operations: Vec<ManifestOperation>) -> Self {
        let mut manifest = PersistedQueryManifest::default();
        for operation in operations {
            manifest.insert(
                FullPersistedQueryOperationId {
                    operation_id: operation.id,
                    client_name: operation.client_name,
                },
                operation.body,
            );
        }
        manifest
    }
}

impl Deref for PersistedQueryManifest {
    type Target = HashMap<FullPersistedQueryOperationId, String>;

    fn deref(&self) -> &Self::Target {
        &self.inner
    }
}

impl DerefMut for PersistedQueryManifest {
    fn deref_mut(&mut self) -> &mut Self::Target {
        &mut self.inner
    }
}

```

--------------------------------------------------------------------------------
/CHANGELOG_SECTION.md:
--------------------------------------------------------------------------------

```markdown
# [1.1.0] - 2025-10-16

## ❗ BREAKING ❗

### Change default port from 5000 to 8000 - @DaleSeo PR #417

The default server port has been changed from `5000` to `8000` to avoid conflicts with common development tools and services that typically use port 5000 (such as macOS AirPlay, Flask development servers, and other local services).

**Migration**: If you were relying on the default port 5000, you can continue using it by explicitly setting the port in your configuration file or command line arguments.

- Before 

```yaml
transport:
  type: streamable_http
```

- After

```yaml
transport:
  type: streamable_http
  port: 5000
```

## 🚀 Features

### feat: Add configuration option for metric temporality - @swcollard PR #413

Creates a new configuration option for telemetry to set the Metric temporality to either Cumulative (default) or Delta.

* Cumulative - The metric value will be the overall value since the start of the measurement.
* Delta - The metric will be the difference in the measurement since the last time it was reported.

Some observability  vendors require that one is used over the other so we want to support the configuration in the MCP Server.

### Add support for forwarding headers from MCP clients to GraphQL APIs - @DaleSeo PR #428

Adds opt-in support for dynamic header forwarding, which enables metadata for A/B testing, feature flagging, geo information from CDNs, or internal instrumentation to be sent from MCP clients to downstream GraphQL APIs. It automatically blocks hop-by-hop headers according to the guidelines in [RFC 7230, section 6.1](https://datatracker.ietf.org/doc/html/rfc7230#section-6.1), and it only works with the Streamable HTTP transport.

You can configure using the `forward_headers` setting:

```yaml
forward_headers:
  - x-tenant-id
  - x-experiment-id
  - x-geo-country
```

Please note that this feature is not intended for passing through credentials as documented in the best practices page.

### feat: Add mcp-session-id header to HTTP request trace attributes - @swcollard PR #421

Includes the value of the [Mcp-Session-Id](https://modelcontextprotocol.io/specification/2025-06-18/basic/transports#session-management) HTTP header as an attribute of the trace for HTTP requests to the MCP Server

## 🐛 Fixes

### Fix compatibility issue with VSCode/Copilot - @DaleSeo PR #447

This updates Apollo MCP Server’s tool schemas from [Draft 2020-12](https://json-schema.org/draft/2020-12) to [Draft‑07](https://json-schema.org/draft-07) which is more widely supported across different validators. VSCode/Copilot still validate against Draft‑07, so rejects Apollo MCP Server’s tools. Our JSON schemas don’t rely on newer features, so downgrading improves compatibility across MCP clients with no practical impact.

## 🛠 Maintenance

### Update rmcp sdk to version 0.8.x - @swcollard PR #433 

Bumping the Rust MCP SDK version used in this server up to 0.8.x

### chore: Only initialize a single HTTP client for graphql requests - @swcollard PR #412

Currently the MCP Server spins up a new HTTP client every time it wants to make a request to the downstream graphql endpoint. This change creates a static reqwest client that gets initialized using LazyLock and reused on each graphql request.

This change is based on the suggestion from the reqwest [documentation](https://docs.rs/reqwest/latest/reqwest/struct.Client.html)
> "The Client holds a connection pool internally, so it is advised that you create one and reuse it."


```

--------------------------------------------------------------------------------
/scripts/windows/install.ps1:
--------------------------------------------------------------------------------

```
# Licensed under the MIT license
# <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your
# option. This file may not be copied, modified, or distributed
# except according to those terms.

# Installs the latest version of the Apollo MCP Server.
# Specify a specific version to install with the $VERSION variable.

# Apollo MCP Server version defined in apollo-mcp-server's Cargo.toml
# Note: Change this line manually during the release steps.
$package_version = 'v1.1.1'

function Install-Binary($apollo_mcp_server_install_args) {
  $old_erroractionpreference = $ErrorActionPreference
  $ErrorActionPreference = 'stop'

  Initialize-Environment

  # If the VERSION env var is set, we use it instead
  # of the version defined in Apollo MCP Server's cargo.toml
  $download_version = if (Test-Path env:VERSION) {
    $Env:VERSION
  } else {
    $package_version
  }

  $exe = Download($download_version)

  Move-Item -Path $exe -Destination .

  Write-Host "Run `".\apollo-mcp-server.exe`" to start the server"

  $ErrorActionPreference = $old_erroractionpreference
}

function Download($version) {
  $binary_download_prefix = $env:APOLLO_ROVER_BINARY_DOWNLOAD_PREFIX
  if (-not $binary_download_prefix) {
    $binary_download_prefix = "https://github.com/apollographql/apollo-mcp-server/releases/download"
  }
  $url = "$binary_download_prefix/$version/apollo-mcp-server-$version-x86_64-pc-windows-msvc.tar.gz"

  # Remove credentials from the URL for logging
  $safe_url = $url -replace "https://[^@]+@", "https://"

  "Downloading Rover from $safe_url" | Out-Host
  $tmp = New-Temp-Dir
  $dir_path = "$tmp\apollo_mcp_server.tar.gz"
  $wc = New-Object Net.Webclient
  $wc.downloadFile($url, $dir_path)
  tar -xkf $dir_path -C "$tmp"
  return "$tmp\dist\apollo-mcp-server.exe"
}

function Initialize-Environment() {
  If (($PSVersionTable.PSVersion.Major) -lt 5) {
    Write-Error "PowerShell 5 or later is required to install Apollo MCP Server."
    Write-Error "Upgrade PowerShell: https://docs.microsoft.com/en-us/powershell/scripting/setup/installing-windows-powershell"
    break
  }

  # show notification to change execution policy:
  $allowedExecutionPolicy = @('Unrestricted', 'RemoteSigned', 'ByPass')
  If ((Get-ExecutionPolicy).ToString() -notin $allowedExecutionPolicy) {
    Write-Error "PowerShell requires an execution policy in [$($allowedExecutionPolicy -join ", ")] to run Apollo MCP Server."
    Write-Error "For example, to set the execution policy to 'RemoteSigned' please run :"
    Write-Error "'Set-ExecutionPolicy RemoteSigned -scope CurrentUser'"
    break
  }

  # GitHub requires TLS 1.2
  If ([System.Enum]::GetNames([System.Net.SecurityProtocolType]) -notcontains 'Tls12') {
    Write-Error "Installing Apollo MCP Server requires at least .NET Framework 4.5"
    Write-Error "Please download and install it first:"
    Write-Error "https://www.microsoft.com/net/download"
    break
  }

  If (-Not (Get-Command 'tar')) {
    Write-Error "The tar command is not installed on this machine. Please install tar before installing Apollo MCP Server"
    # don't abort if invoked with iex that would close the PS session
    If ($myinvocation.mycommand.commandtype -eq 'Script') { return } else { exit 1 }
  }
}

function New-Temp-Dir() {
  [CmdletBinding(SupportsShouldProcess)]
  param()
  $parent = [System.IO.Path]::GetTempPath()
  [string] $name = [System.Guid]::NewGuid()
  New-Item -ItemType Directory -Path (Join-Path $parent $name)
}

Install-Binary "$Args"

```

--------------------------------------------------------------------------------
/crates/apollo-mcp-server/src/runtime/graphos.rs:
--------------------------------------------------------------------------------

```rust
use std::{ops::Not as _, time::Duration};

use apollo_mcp_registry::{
    platform_api::PlatformApiConfig,
    uplink::{Endpoints, SecretString, UplinkConfig},
};
use apollo_mcp_server::errors::ServerError;
use schemars::JsonSchema;
use serde::de::Error;
use serde::{Deserialize, Deserializer};
use url::Url;

#[cfg(test)]
use serde::Serialize;

const APOLLO_GRAPH_REF_ENV: &str = "APOLLO_GRAPH_REF";
const APOLLO_KEY_ENV: &str = "APOLLO_KEY";

fn apollo_uplink_endpoints_deserializer<'de, D>(deserializer: D) -> Result<Vec<Url>, D::Error>
where
    D: Deserializer<'de>,
{
    #[derive(Deserialize)]
    #[serde(untagged)]
    enum UrlListOrString {
        List(Vec<Url>),
        String(String),
    }

    match UrlListOrString::deserialize(deserializer)? {
        UrlListOrString::List(urls) => Ok(urls),
        UrlListOrString::String(s) => s
            .split(',')
            .map(|v| {
                Url::parse(v.trim()).map_err(|e| {
                    D::Error::custom(format!("Could not parse uplink endpoint URL: {e}"))
                })
            })
            .collect(),
    }
}

/// Credentials to use with GraphOS
#[derive(Debug, Deserialize, Default, JsonSchema)]
#[cfg_attr(test, derive(Serialize))]
#[serde(default)]
pub struct GraphOSConfig {
    /// The apollo key
    #[schemars(with = "Option<String>")]
    #[cfg_attr(test, serde(skip_serializing))]
    apollo_key: Option<SecretString>,

    /// The graph reference
    apollo_graph_ref: Option<String>,

    /// The URL to use for Apollo's registry
    apollo_registry_url: Option<Url>,

    /// List of uplink URL overrides
    #[serde(deserialize_with = "apollo_uplink_endpoints_deserializer")]
    apollo_uplink_endpoints: Vec<Url>,
}

impl GraphOSConfig {
    /// Extract the apollo graph reference from the config or from the current env
    #[allow(clippy::result_large_err)]
    pub fn graph_ref(&self) -> Result<String, ServerError> {
        self.apollo_graph_ref
            .clone()
            .ok_or_else(|| ServerError::EnvironmentVariable(APOLLO_GRAPH_REF_ENV.to_string()))
    }

    /// Extract the apollo key from the config or from the current env
    #[allow(clippy::result_large_err)]
    fn key(&self) -> Result<SecretString, ServerError> {
        self.apollo_key
            .clone()
            .ok_or_else(|| ServerError::EnvironmentVariable(APOLLO_GRAPH_REF_ENV.to_string()))
    }

    /// Generate an uplink config based on configuration params
    #[allow(clippy::result_large_err)]
    pub fn uplink_config(&self) -> Result<UplinkConfig, ServerError> {
        let config = UplinkConfig {
            apollo_key: self.key()?,

            apollo_graph_ref: self.graph_ref()?,
            endpoints: self.apollo_uplink_endpoints.is_empty().not().then_some(
                Endpoints::Fallback {
                    urls: self.apollo_uplink_endpoints.clone(),
                },
            ),
            poll_interval: Duration::from_secs(10),
            timeout: Duration::from_secs(30),
        };

        Ok(config)
    }

    /// Generate a platform API config based on configuration params
    #[allow(clippy::result_large_err)]
    pub fn platform_api_config(&self) -> Result<PlatformApiConfig, ServerError> {
        let config = PlatformApiConfig::new(
            self.apollo_key
                .clone()
                .ok_or(ServerError::EnvironmentVariable(APOLLO_KEY_ENV.to_string()))?,
            Duration::from_secs(30),
            Duration::from_secs(30),
            self.apollo_registry_url.clone(),
        );

        Ok(config)
    }
}

```

--------------------------------------------------------------------------------
/.github/workflows/release-container.yml:
--------------------------------------------------------------------------------

```yaml
name: Build Release Container
on:
  push:
    tags:
      - "v[0-9]+.[0-9]+.[0-9]+"
      - "v[0-9]+.[0-9]+.[0-9]+-rc.[0-9]+"
  workflow_dispatch:
    inputs: &release_inputs
      version:
        description: Version to publish
        required: true
        type: string
  workflow_call:
    inputs: *release_inputs

env:
  REGISTRY: ghcr.io
  FQDN: ghcr.io/${{ github.repository }}
  VERSION: ${{ inputs.version || github.ref_name }}

jobs:
  # Build a container for x86_64 and aarch64 linux
  build:
    name: Release Container
    strategy:
      matrix:
        os: ["ubuntu-24.04", "ubuntu-24.04-arm"]
    runs-on: ${{ matrix.os }}
    permissions:
      contents: read
      packages: write
      attestations: write
      id-token: write
    steps:
      - uses: actions/checkout@v5
        with:
          ref: ${{ github.ref }}

      - uses: nixbuild/nix-quick-install-action@v30
        with:
          nix_conf: ${{ env.nix_conf }}
      - name: Restore and save Nix store
        uses: nix-community/cache-nix-action@v6
        with:
          primary-key: build-${{ runner.os }}-${{ hashFiles('Cargo.lock', '**/Cargo.toml', 'flake.nix', 'flake.lock', 'rust-toolchain.toml') }}
          restore-prefixes-first-match: build-${{ runner.os }}-
          # We don't want to affect the cache when building the container
          purge: false
          save: false

      - name: Log in to the Container registry
        uses: docker/login-action@65b78e6e13532edd9afa3aa52ac7964289d1a9c1
        with:
          registry: ${{ env.REGISTRY }}
          username: ${{ github.actor }}
          password: ${{ secrets.GITHUB_TOKEN }}

      - id: build
        name: Build Container
        shell: bash
        run: |
          nix run .#streamImage | docker image load
          echo "id=`docker image ls -q | head -n1`" >> $GITHUB_OUTPUT
          echo "arch=`docker image ls --format '{{ .Tag }}' | head -n1`" >> $GITHUB_OUTPUT

      - id: deploy
        name: Tag and push the container
        env:
          TAG: ${{ env.VERSION }}-${{ steps.build.outputs.arch }}
        run: |
          docker image tag "${{ steps.build.outputs.id }}" "$FQDN:$TAG"
          docker image push "$FQDN:$TAG"
          echo "digest=`docker manifest inspect $FQDN:$TAG --verbose | nix run --inputs-from .# nixpkgs#jq -- -r .Descriptor.digest`" >> $GITHUB_OUTPUT

      - name: Generate artifact attestation
        uses: actions/attest-build-provenance@v2
        with:
          subject-name: ${{ env.FQDN }}
          subject-digest: ${{ steps.deploy.outputs.digest }}
          push-to-registry: true

  bundle:
    name: Bundle into multiarch container
    needs: build
    runs-on: ubuntu-24.04
    steps:
      - name: Log in to the Container registry
        uses: docker/login-action@65b78e6e13532edd9afa3aa52ac7964289d1a9c1
        with:
          registry: ${{ env.REGISTRY }}
          username: ${{ github.actor }}
          password: ${{ secrets.GITHUB_TOKEN }}
      - name: Create multiarch manifest
        run: |
          docker manifest create $FQDN:$VERSION $FQDN:$VERSION-amd64 $FQDN:$VERSION-arm64
          docker manifest annotate $FQDN:$VERSION $FQDN:$VERSION-amd64 --arch amd64
          docker manifest annotate $FQDN:$VERSION $FQDN:$VERSION-arm64 --arch arm64

          docker manifest create $FQDN:latest $FQDN:$VERSION-amd64 $FQDN:$VERSION-arm64
          docker manifest annotate $FQDN:latest $FQDN:$VERSION-amd64 --arch amd64
          docker manifest annotate $FQDN:latest $FQDN:$VERSION-arm64 --arch arm64
      - name: Push the multiarch manifests
        shell: bash
        run: |
          docker manifest push $FQDN:$VERSION
          
          # push :latest only if version DOES NOT start with canary OR end with -rc.<digits>
          if [[ ! "$VERSION" =~ (^canary|-rc\.[0-9]+$) ]]; then
            docker manifest push $FQDN:latest
          fi

```

--------------------------------------------------------------------------------
/crates/apollo-mcp-registry/src/logging.rs:
--------------------------------------------------------------------------------

```rust
#[macro_export]
/// This is a really simple macro to assert a snapshot of the logs.
/// To use it call `.with_subscriber(assert_snapshot_subscriber!())` in your test just before calling `await`.
/// This will assert a snapshot of the logs in pretty yaml format.
/// You can also use subscriber::with_default(assert_snapshot_subscriber!(), || { ... }) to assert the logs in non async code.
macro_rules! assert_snapshot_subscriber {
    () => {
        $crate::assert_snapshot_subscriber!(tracing_core::LevelFilter::INFO, {})
    };

    ($redactions:tt) => {
        $crate::assert_snapshot_subscriber!(tracing_core::LevelFilter::INFO, $redactions)
    };

    ($level:expr) => {
        $crate::assert_snapshot_subscriber!($level, {})
    };

    ($level:expr, $redactions:tt) => {
        $crate::logging::test::SnapshotSubscriber::create_subscriber($level, |yaml| {
            insta::with_settings!({sort_maps => true}, {
                // the tests here will force maps to sort
                let mut settings = insta::Settings::clone_current();
                settings.set_snapshot_suffix("logs");
                settings.set_sort_maps(true);
                settings.bind(|| {
                    insta::assert_yaml_snapshot!(yaml, $redactions);
                });
            });
        })
    };
}

#[cfg(test)]
pub(crate) mod test {
    use std::sync::Arc;
    use std::sync::Mutex;

    use serde_json::Value;
    use tracing_core::LevelFilter;
    use tracing_core::Subscriber;
    use tracing_subscriber::layer::SubscriberExt;

    pub(crate) struct SnapshotSubscriber {
        buffer: Arc<Mutex<Vec<u8>>>,
        assertion: fn(Value),
    }

    impl std::io::Write for SnapshotSubscriber {
        fn write(&mut self, buf: &[u8]) -> std::io::Result<usize> {
            let buf_len = buf.len();
            self.buffer.lock().unwrap().append(&mut buf.to_vec());
            Ok(buf_len)
        }

        fn flush(&mut self) -> std::io::Result<()> {
            Ok(())
        }
    }

    impl Drop for SnapshotSubscriber {
        fn drop(&mut self) {
            let log = String::from_utf8(self.buffer.lock().unwrap().to_vec()).unwrap();
            let parsed: Value = if log.is_empty() {
                serde_json::json!([])
            } else {
                let parsed_log: Vec<Value> = log
                    .lines()
                    .map(|line| {
                        let mut line: Value = serde_json::from_str(line).unwrap();
                        // move the message field to the top level
                        let fields = line
                            .as_object_mut()
                            .unwrap()
                            .get_mut("fields")
                            .unwrap()
                            .as_object_mut()
                            .unwrap();
                        let message = fields.remove("message").unwrap_or_default();
                        line.as_object_mut()
                            .unwrap()
                            .insert("message".to_string(), message);
                        line
                    })
                    .collect();
                serde_json::json!(parsed_log)
            };

            (self.assertion)(parsed)
        }
    }

    impl SnapshotSubscriber {
        pub(crate) fn create_subscriber(
            level: LevelFilter,
            assertion: fn(Value),
        ) -> impl Subscriber {
            let collector = Self {
                buffer: Arc::new(Mutex::new(Vec::new())),
                assertion,
            };

            tracing_subscriber::registry::Registry::default()
                .with(level)
                .with(
                    tracing_subscriber::fmt::Layer::default()
                        .json()
                        .without_time()
                        .with_target(false)
                        .with_file(false)
                        .with_line_number(false)
                        .with_writer(Mutex::new(collector)),
                )
        }
    }
}

```

--------------------------------------------------------------------------------
/crates/apollo-mcp-server/src/runtime/logging.rs:
--------------------------------------------------------------------------------

```rust
//! Logging config and utilities
//!
//! This module is only used by the main binary and provides logging config structures and setup
//! helper functions

mod defaults;
mod log_rotation_kind;
mod parsers;

use log_rotation_kind::LogRotationKind;
use schemars::JsonSchema;
use serde::Deserialize;
use std::path::PathBuf;
use tracing::Level;
use tracing_appender::rolling::RollingFileAppender;
use tracing_subscriber::EnvFilter;
use tracing_subscriber::fmt::Layer;
use tracing_subscriber::fmt::writer::BoxMakeWriter;

/// Logging related options
#[derive(Debug, Deserialize, JsonSchema)]
pub struct Logging {
    /// The log level to use for tracing
    #[serde(
        default = "defaults::log_level",
        deserialize_with = "parsers::from_str"
    )]
    #[schemars(schema_with = "level")]
    pub level: Level,

    /// The output path to use for logging
    #[serde(default)]
    pub path: Option<PathBuf>,

    /// Log file rotation period to use when log file path provided
    /// [default: Hourly]
    #[serde(default = "defaults::default_rotation")]
    pub rotation: LogRotationKind,
}

impl Default for Logging {
    fn default() -> Self {
        Self {
            level: defaults::log_level(),
            path: None,
            rotation: defaults::default_rotation(),
        }
    }
}

type LoggingLayerResult = (
    Layer<
        tracing_subscriber::Registry,
        tracing_subscriber::fmt::format::DefaultFields,
        tracing_subscriber::fmt::format::Format,
        BoxMakeWriter,
    >,
    Option<tracing_appender::non_blocking::WorkerGuard>,
);

impl Logging {
    pub fn env_filter(logging: &Logging) -> Result<EnvFilter, anyhow::Error> {
        let mut env_filter = EnvFilter::from_default_env().add_directive(logging.level.into());

        if logging.level == Level::INFO {
            env_filter = env_filter
                .add_directive("rmcp=warn".parse()?)
                .add_directive("tantivy=warn".parse()?);
        }
        Ok(env_filter)
    }

    pub fn logging_layer(logging: &Logging) -> Result<LoggingLayerResult, anyhow::Error> {
        macro_rules! log_error {
            () => {
                |e| eprintln!("Failed to setup logging: {e:?}")
            };
        }

        let (writer, guard, with_ansi) = match logging.path.clone() {
            Some(path) => std::fs::create_dir_all(&path)
                .map(|_| path)
                .inspect_err(log_error!())
                .ok()
                .and_then(|path| {
                    RollingFileAppender::builder()
                        .rotation(logging.rotation.clone().into())
                        .filename_prefix("apollo_mcp_server")
                        .filename_suffix("log")
                        .build(path)
                        .inspect_err(log_error!())
                        .ok()
                })
                .map(|appender| {
                    let (non_blocking_appender, guard) = tracing_appender::non_blocking(appender);
                    (
                        BoxMakeWriter::new(non_blocking_appender),
                        Some(guard),
                        false,
                    )
                })
                .unwrap_or_else(|| {
                    eprintln!("Log file setup failed - falling back to stderr");
                    (BoxMakeWriter::new(std::io::stderr), None, true)
                }),
            None => (BoxMakeWriter::new(std::io::stdout), None, true),
        };

        Ok((
            tracing_subscriber::fmt::layer()
                .with_writer(writer)
                .with_ansi(with_ansi)
                .with_target(false),
            guard,
        ))
    }
}

fn level(generator: &mut schemars::SchemaGenerator) -> schemars::Schema {
    /// Log level
    #[derive(JsonSchema)]
    #[schemars(rename_all = "lowercase")]
    // This is just an intermediate type to auto create schema information for,
    // so it is OK if it is never used
    #[allow(dead_code)]
    enum Level {
        Trace,
        Debug,
        Info,
        Warn,
        Error,
    }

    Level::json_schema(generator)
}

```

--------------------------------------------------------------------------------
/crates/apollo-mcp-server/src/runtime/config.rs:
--------------------------------------------------------------------------------

```rust
use std::path::PathBuf;

use apollo_mcp_server::{
    cors::CorsConfig, headers::ForwardHeaders, health::HealthCheckConfig, server::Transport,
};
use reqwest::header::HeaderMap;
use schemars::JsonSchema;
use serde::Deserialize;
use url::Url;

use super::{
    OperationSource, SchemaSource, endpoint::Endpoint, graphos::GraphOSConfig,
    introspection::Introspection, logging::Logging, overrides::Overrides, telemetry::Telemetry,
};

/// Configuration for the MCP server
#[derive(Debug, Default, Deserialize, JsonSchema)]
#[serde(default)]
pub struct Config {
    /// CORS configuration
    pub cors: CorsConfig,

    /// Path to a custom scalar map
    pub custom_scalars: Option<PathBuf>,

    /// The target GraphQL endpoint
    #[schemars(schema_with = "Url::json_schema")]
    pub endpoint: Endpoint,

    /// Apollo-specific credential overrides
    pub graphos: GraphOSConfig,

    /// List of hard-coded headers to include in all GraphQL requests
    #[serde(deserialize_with = "parsers::map_from_str")]
    #[schemars(schema_with = "super::schemas::header_map")]
    pub headers: HeaderMap,

    /// List of header names to forward from MCP client requests to GraphQL requests
    #[serde(default)]
    pub forward_headers: ForwardHeaders,

    /// Health check configuration
    #[serde(default)]
    pub health_check: HealthCheckConfig,

    /// Introspection configuration
    pub introspection: Introspection,

    /// Logging configuration
    pub logging: Logging,

    /// Telemetry configuration
    pub telemetry: Telemetry,

    /// Operations
    pub operations: OperationSource,

    /// Overrides for server behaviour
    pub overrides: Overrides,

    /// The schema to load for operations
    pub schema: SchemaSource,

    /// The type of server transport to use
    pub transport: Transport,
}

mod parsers {
    use std::str::FromStr;

    use reqwest::header::{HeaderMap, HeaderName, HeaderValue};
    use serde::Deserializer;

    pub(super) fn map_from_str<'de, D>(deserializer: D) -> Result<HeaderMap, D::Error>
    where
        D: Deserializer<'de>,
    {
        struct MapFromStrVisitor;
        impl<'de> serde::de::Visitor<'de> for MapFromStrVisitor {
            type Value = HeaderMap;

            fn expecting(&self, formatter: &mut std::fmt::Formatter) -> std::fmt::Result {
                formatter.write_str("a map of header string keys and values")
            }

            fn visit_map<A>(self, mut map: A) -> Result<Self::Value, A::Error>
            where
                A: serde::de::MapAccess<'de>,
            {
                let mut parsed = HeaderMap::with_capacity(map.size_hint().unwrap_or(0));

                // While there are entries remaining in the input, add them
                // into our map.
                while let Some((key, value)) = map.next_entry::<String, String>()? {
                    let key = HeaderName::from_str(&key)
                        .map_err(|e| serde::de::Error::custom(e.to_string()))?;
                    let value = HeaderValue::from_str(&value)
                        .map_err(|e| serde::de::Error::custom(e.to_string()))?;

                    parsed.insert(key, value);
                }

                Ok(parsed)
            }
        }

        deserializer.deserialize_map(MapFromStrVisitor)
    }
}

#[cfg(test)]
mod test {
    use super::Config;

    #[test]
    fn it_parses_a_minimal_config() {
        serde_json::from_str::<Config>("{}").unwrap();
    }

    #[test]
    fn it_contains_no_keys_with_double_underscore() {
        // The env functionality of the config expansion uses __ as a split key
        // when determining nested fields of any of the fields of the Config.
        // This test ensures that a field name isn't added that can no longer be
        // configured using the env extractor.
        //
        // See [runtime::read_config]
        //
        // TODO: This is a quick hack since traversing the nested (untyped) schema
        // object is probably overkill.
        let schema = schemars::schema_for!(Config).to_value().to_string();

        assert!(!schema.contains("__"))
    }
}

```

--------------------------------------------------------------------------------
/crates/apollo-mcp-server/src/introspection/tools/validate.rs:
--------------------------------------------------------------------------------

```rust
use crate::errors::McpError;
use crate::operations::operation_defs;
use crate::schema_from_type;
use apollo_compiler::Schema;
use apollo_compiler::parser::Parser;
use apollo_compiler::validation::Valid;
use rmcp::model::CallToolResult;
use rmcp::model::Content;
use rmcp::model::{ErrorCode, Tool};
use rmcp::schemars::JsonSchema;
use rmcp::serde_json::Value;
use rmcp::{schemars, serde_json};
use serde::Deserialize;
use std::sync::Arc;
use tokio::sync::Mutex;

/// The name of the tool to validate an ad hoc GraphQL operation
pub const VALIDATE_TOOL_NAME: &str = "validate";

#[derive(Clone)]
pub struct Validate {
    pub tool: Tool,
    schema: Arc<Mutex<Valid<Schema>>>,
}

/// Input for the validate tool
#[derive(JsonSchema, Deserialize, Debug)]
pub struct Input {
    /// The GraphQL operation
    operation: String,
}

impl Validate {
    pub fn new(schema: Arc<Mutex<Valid<Schema>>>) -> Self {
        Self {
            schema,
            tool: Tool::new(
                VALIDATE_TOOL_NAME,
                "Validates a GraphQL operation against the schema. \
                Use the `introspect` tool first to get information about the GraphQL schema. \
                Operations should be validated prior to calling the `execute` tool.",
                schema_from_type!(Input),
            ),
        }
    }

    /// Validates the provided GraphQL query
    #[tracing::instrument(skip(self))]
    pub async fn execute(&self, input: Value) -> Result<CallToolResult, McpError> {
        let input = serde_json::from_value::<Input>(input).map_err(|_| {
            McpError::new(ErrorCode::INVALID_PARAMS, "Invalid input".to_string(), None)
        })?;

        operation_defs(&input.operation, true, None)
            .map_err(|e| McpError::new(ErrorCode::INVALID_PARAMS, e.to_string(), None))?
            .ok_or_else(|| {
                McpError::new(
                    ErrorCode::INVALID_PARAMS,
                    "Invalid operation type".to_string(),
                    None,
                )
            })?;

        let schema_guard = self.schema.lock().await;
        Parser::new()
            .parse_executable(&schema_guard, input.operation.as_str(), "operation.graphql")
            .map_err(|e| McpError::new(ErrorCode::INVALID_PARAMS, e.to_string(), None))?
            .validate(&schema_guard)
            .map_err(|e| McpError::new(ErrorCode::INVALID_PARAMS, e.to_string(), None))?;
        Ok(CallToolResult {
            content: vec![Content::text("Operation is valid")],
            is_error: None,
            meta: None,

            // Note: We don't really return any meaningful content to the client here, so we can leave the
            // structured content as none.
            structured_content: None,
        })
    }
}

#[cfg(test)]
mod tests {
    use serde_json::json;

    use super::*;
    static SCHEMA: std::sync::LazyLock<Arc<Mutex<Valid<Schema>>>> =
        std::sync::LazyLock::new(|| {
            Arc::new(Mutex::new(
                Schema::parse_and_validate(
                    "type Query { id: ID! hello(name: String!): String! }",
                    "schema.graphql",
                )
                .unwrap(),
            ))
        });

    #[tokio::test]
    async fn validate_valid_query() {
        let validate = Validate::new(SCHEMA.clone());
        let input = json!({
            "operation": "query Test { id }"
        });
        assert!(validate.execute(input).await.is_ok());
    }

    #[tokio::test]
    async fn validate_invalid_graphql_query() {
        let validate = Validate::new(SCHEMA.clone());
        let input = json!({
            "operation": "query {"
        });
        assert!(validate.execute(input).await.is_err());
    }

    #[tokio::test]
    async fn validate_invalid_query_field() {
        let validate = Validate::new(SCHEMA.clone());
        let input = json!({
            "operation": "query { invalidField }"
        });
        assert!(validate.execute(input).await.is_err());
    }

    #[tokio::test]
    async fn validate_invalid_argument() {
        let validate = Validate::new(SCHEMA.clone());
        let input = json!({
            "operation": "query { hello }"
        });
        assert!(validate.execute(input).await.is_err());
    }
}

```

--------------------------------------------------------------------------------
/xtask/src/commands/changeset/matching_pull_request.rs:
--------------------------------------------------------------------------------

```rust
// THIS FILE IS GENERATED
// THIS FILE IS GENERATED
// THIS FILE IS GENERATED
// See the instructions in `./mod.rs` for how to regenerate it.  It is
// generated based on the operation that sits alongside it in this same file.
// Unfortunately, this comment will not be preserved and needs to be manually
// preserved if it's desired to keep it around.  Luckily, I don't think this
// operation will change very often.
// THIS FILE IS GENERATED
// THIS FILE IS GENERATED
// THIS FILE IS GENERATED

#![allow(clippy::all, warnings)]
pub struct MatchingPullRequest;
pub mod matching_pull_request {
    #![allow(dead_code)]
    use std::result::Result;
    pub const OPERATION_NAME: &str = "MatchingPullRequest";
    pub const QUERY : & str = "# This operation is used to generate Rust code which lives in a file directly\n# next to this with the same name but a `.rs` extension.  For instructions on\n# how to generate the code, see the top of `./mod.rs`.\nfragment PrInfo on PullRequest {\n  url\n  number\n  author {\n    __typename\n    login\n  }\n  title\n  closingIssuesReferences(last: 4) {\n    nodes {\n      url\n      number\n      repository {\n        nameWithOwner\n      }\n    }\n  }\n  body\n}\nfragment PrSearchResult on SearchResultItemConnection {\n  issueCount\n  nodes {\n    __typename\n    ...PrInfo\n  }\n }\n\nquery MatchingPullRequest($search: String!) {\n  search(\n    type: ISSUE\n    query: $search\n    first: 1\n  ) {\n    ...PrSearchResult\n  }\n}\n" ;
    use serde::Deserialize;
    use serde::Serialize;

    use super::*;
    #[allow(dead_code)]
    type Boolean = bool;
    #[allow(dead_code)]
    type Float = f64;
    #[allow(dead_code)]
    type Int = i64;
    #[allow(dead_code)]
    type ID = String;
    type URI = crate::commands::changeset::scalars::URI;
    #[derive(Serialize)]
    pub struct Variables {
        pub search: String,
    }
    impl Variables {}
    #[derive(Deserialize, Debug)]
    pub struct PrInfo {
        pub url: URI,
        pub number: Int,
        pub author: Option<PrInfoAuthor>,
        pub title: String,
        #[serde(rename = "closingIssuesReferences")]
        pub closing_issues_references: Option<PrInfoClosingIssuesReferences>,
        pub body: String,
    }
    #[derive(Deserialize, Debug)]
    pub struct PrInfoAuthor {
        pub login: String,
        #[serde(flatten)]
        pub on: PrInfoAuthorOn,
    }
    #[derive(Deserialize, Debug)]
    #[serde(tag = "__typename")]
    pub enum PrInfoAuthorOn {
        Bot,
        EnterpriseUserAccount,
        Mannequin,
        Organization,
        User,
    }
    #[derive(Deserialize, Debug)]
    pub struct PrInfoClosingIssuesReferences {
        pub nodes: Option<Vec<Option<PrInfoClosingIssuesReferencesNodes>>>,
    }
    #[derive(Deserialize, Debug)]
    pub struct PrInfoClosingIssuesReferencesNodes {
        pub url: URI,
        pub number: Int,
        pub repository: PrInfoClosingIssuesReferencesNodesRepository,
    }
    #[derive(Deserialize, Debug)]
    pub struct PrInfoClosingIssuesReferencesNodesRepository {
        #[serde(rename = "nameWithOwner")]
        pub name_with_owner: String,
    }
    #[derive(Deserialize, Debug)]
    pub struct PrSearchResult {
        #[serde(rename = "issueCount")]
        pub issue_count: Int,
        pub nodes: Option<Vec<Option<PrSearchResultNodes>>>,
    }
    #[derive(Deserialize, Debug)]
    #[serde(tag = "__typename")]
    pub enum PrSearchResultNodes {
        App,
        Discussion,
        Issue,
        MarketplaceListing,
        Organization,
        PullRequest(PrSearchResultNodesOnPullRequest),
        Repository,
        User,
    }
    pub type PrSearchResultNodesOnPullRequest = PrInfo;
    #[derive(Deserialize, Debug)]
    pub struct ResponseData {
        pub search: MatchingPullRequestSearch,
    }
    pub type MatchingPullRequestSearch = PrSearchResult;
}
impl graphql_client::GraphQLQuery for MatchingPullRequest {
    type Variables = matching_pull_request::Variables;
    type ResponseData = matching_pull_request::ResponseData;
    fn build_query(variables: Self::Variables) -> ::graphql_client::QueryBody<Self::Variables> {
        graphql_client::QueryBody {
            variables,
            query: matching_pull_request::QUERY,
            operation_name: matching_pull_request::OPERATION_NAME,
        }
    }
}

```

--------------------------------------------------------------------------------
/crates/apollo-mcp-registry/src/uplink/persisted_queries.rs:
--------------------------------------------------------------------------------

```rust
use graphql_client::GraphQLQuery;

pub mod event;
mod manifest;
mod manifest_poller;

pub use manifest::FullPersistedQueryOperationId;
pub use manifest::ManifestOperation;
pub use manifest::PersistedQueryManifest;
pub use manifest::SignedUrlChunk;
pub use manifest_poller::ManifestSource;
pub use manifest_poller::PersistedQueryManifestPollerState;

use crate::uplink::UplinkRequest;
use crate::uplink::UplinkResponse;

/// Persisted query manifest query definition
#[derive(GraphQLQuery)]
#[graphql(
    query_path = "src/uplink/persisted_queries/persisted_queries_manifest_query.graphql",
    schema_path = "src/uplink/uplink.graphql",
    request_derives = "Debug",
    response_derives = "PartialEq, Debug, Deserialize",
    deprecated = "warn"
)]
pub struct PersistedQueriesManifestQuery;

impl From<UplinkRequest> for persisted_queries_manifest_query::Variables {
    fn from(req: UplinkRequest) -> Self {
        persisted_queries_manifest_query::Variables {
            api_key: req.api_key,
            graph_ref: req.graph_ref,
            if_after_id: req.id,
        }
    }
}

#[derive(Debug, Clone, Eq, PartialEq)]
pub struct PersistedQueriesManifestChunk {
    pub id: String,
    pub urls: Vec<String>,
}

impl PersistedQueriesManifestChunk {
    fn from_query_chunks(
        query_chunks: &persisted_queries_manifest_query::PersistedQueriesManifestQueryPersistedQueriesOnPersistedQueriesResultChunks,
    ) -> Self {
        Self {
            id: query_chunks.id.clone(),
            urls: query_chunks.urls.clone(),
        }
    }
}

pub type PersistedQueriesManifestChunks = Vec<PersistedQueriesManifestChunk>;
pub type MaybePersistedQueriesManifestChunks = Option<PersistedQueriesManifestChunks>;

impl From<persisted_queries_manifest_query::ResponseData>
    for UplinkResponse<MaybePersistedQueriesManifestChunks>
{
    fn from(response: persisted_queries_manifest_query::ResponseData) -> Self {
        use persisted_queries_manifest_query::FetchErrorCode;
        use persisted_queries_manifest_query::PersistedQueriesManifestQueryPersistedQueries;

        match response.persisted_queries {
            PersistedQueriesManifestQueryPersistedQueries::PersistedQueriesResult(response) => {
                if let Some(chunks) = response.chunks {
                    let chunks = chunks
                        .iter()
                        .map(PersistedQueriesManifestChunk::from_query_chunks)
                        .collect();
                    UplinkResponse::New {
                        response: Some(chunks),
                        id: response.id,
                        // this will truncate the number of seconds to under u64::MAX, which should be
                        // a large enough delay anyway
                        delay: response.min_delay_seconds as u64,
                    }
                } else {
                    UplinkResponse::New {
                        // no persisted query list is associated with this variant
                        response: None,
                        id: response.id,
                        delay: response.min_delay_seconds as u64,
                    }
                }
            }
            PersistedQueriesManifestQueryPersistedQueries::Unchanged(response) => {
                UplinkResponse::Unchanged {
                    id: Some(response.id),
                    delay: Some(response.min_delay_seconds as u64),
                }
            }
            PersistedQueriesManifestQueryPersistedQueries::FetchError(err) => {
                UplinkResponse::Error {
                    retry_later: err.code == FetchErrorCode::RETRY_LATER,
                    code: match err.code {
                        FetchErrorCode::AUTHENTICATION_FAILED => {
                            "AUTHENTICATION_FAILED".to_string()
                        }
                        FetchErrorCode::ACCESS_DENIED => "ACCESS_DENIED".to_string(),
                        FetchErrorCode::UNKNOWN_REF => "UNKNOWN_REF".to_string(),
                        FetchErrorCode::RETRY_LATER => "RETRY_LATER".to_string(),
                        FetchErrorCode::NOT_IMPLEMENTED_ON_THIS_INSTANCE => {
                            "NOT_IMPLEMENTED_ON_THIS_INSTANCE".to_string()
                        }
                        FetchErrorCode::Other(other) => other,
                    },
                    message: err.message,
                }
            }
        }
    }
}

```

--------------------------------------------------------------------------------
/.github/workflows/ci.yml:
--------------------------------------------------------------------------------

```yaml
name: Nix CI
on:
  push:
    # don't run on tags, run on commits
    # https://github.com/orgs/community/discussions/25615
    tags-ignore:
      - "**"
    branches:
      - main
      - develop
  pull_request:
  workflow_dispatch:

env:
  # We want the cache to be as full as possible, so we instruct nix to keep derivations
  # and other related outputs around in its cache
  nix_conf: |
    keep-env-derivations = true
    keep-outputs = true

jobs:
  # Cache the nix store so that subsequent runs are almost instantaneous
  # See https://github.com/marketplace/actions/restore-and-save-nix-store#inputs
  cache:
    name: Cache nix store
    runs-on: ubuntu-24.04
    permissions:
      actions: write
      contents: read
    steps:
      - uses: actions/checkout@v5
        with:
          ref: ${{ github.event.pull_request.head.sha }}
      - uses: nixbuild/nix-quick-install-action@v30
        with:
          nix_conf: ${{ env.nix_conf }}
      - name: Restore and save Nix store
        uses: nix-community/cache-nix-action@v6
        with:
          primary-key: build-${{ runner.os }}-${{ hashFiles('Cargo.lock', '**/Cargo.toml', 'flake.nix', 'flake.lock', 'rust-toolchain.toml') }}
          restore-prefixes-first-match: build-${{ runner.os }}-
          purge: true
          purge-prefixes: build-${{ runner.os }}-
          purge-created: 0
          purge-primary-key: never
          gc-max-store-size: 5G
      - name: Save flake attributes from garbage collection
        run: nix profile install .#saveFromGC

  check:
    name: Run checks
    runs-on: ubuntu-24.04
    needs: cache
    permissions:
      actions: write
      contents: read
    steps:
      - uses: actions/checkout@v5
        with:
          ref: ${{ github.event.pull_request.head.sha }}
      - uses: nixbuild/nix-quick-install-action@v30
        with:
          nix_conf: ${{ env.nix_conf }}
      - name: Restore and save Nix store
        uses: nix-community/cache-nix-action@v6
        with:
          primary-key: build-${{ runner.os }}-${{ hashFiles('Cargo.lock', '**/Cargo.toml', 'flake.nix', 'flake.lock', 'rust-toolchain.toml') }}
          purge: true
          purge-prefixes: build-${{ runner.os }}-
          purge-created: 0
          purge-primary-key: never
          gc-max-store-size: 5G
      - name: Run checks
        run: nix flake check

  build:
    name: Build
    runs-on: ubuntu-24.04
    needs: cache
    permissions:
      actions: write
      contents: read
    steps:
      - uses: actions/checkout@v5
        with:
          ref: ${{ github.event.pull_request.head.sha }}
      - uses: nixbuild/nix-quick-install-action@v30
        with:
          nix_conf: ${{ env.nix_conf }}
      - name: Restore and save Nix store
        uses: nix-community/cache-nix-action@v6
        with:
          primary-key: build-${{ runner.os }}-${{ hashFiles('Cargo.lock', '**/Cargo.toml', 'flake.nix', 'flake.lock', 'rust-toolchain.toml') }}
          purge: true
          purge-prefixes: build-${{ runner.os }}-
          purge-created: 0
          purge-primary-key: never
          gc-max-store-size: 5G
      - name: Build
        run: nix build .#

  test:
    name: Run Tests
    runs-on: ubuntu-24.04
    needs: cache
    permissions:
      actions: write
      contents: read
    steps:
      - uses: actions/checkout@v5
        with:
          ref: ${{ github.event.pull_request.head.sha }}
      - uses: nixbuild/nix-quick-install-action@v30
        with:
          nix_conf: ${{ env.nix_conf }}
      - name: Restore and save Nix store
        uses: nix-community/cache-nix-action@v6
        with:
          primary-key: build-${{ runner.os }}-${{ hashFiles('Cargo.lock', '**/Cargo.toml', 'flake.nix', 'flake.lock', 'rust-toolchain.toml') }}
          purge: true
          purge-prefixes: build-${{ runner.os }}-
          purge-created: 0
          purge-primary-key: never
          gc-max-store-size: 5G
      - name: Run Tests
        run: 'nix develop --command bash -c "cargo test"'

  coverage:
    name: Run Coverage
    runs-on: ubuntu-24.04
    permissions:
      contents: read
    steps:
      - uses: actions/checkout@v5
        with:
          ref: ${{ github.event.pull_request.head.sha }}
      - uses: taiki-e/install-action@cargo-llvm-cov
      - name: Generate code coverage
        run: cargo llvm-cov --all-features --workspace --codecov --output-path codecov.json
      - name: Upload coverage to Codecov
        uses: codecov/codecov-action@v5
        with:
          token: ${{ secrets.CODECOV_TOKEN }} # not required for public repos
          files: codecov.json
          fail_ci_if_error: true

```

--------------------------------------------------------------------------------
/crates/apollo-mcp-server/src/operations/raw_operation.rs:
--------------------------------------------------------------------------------

```rust
use std::{collections::HashMap, str::FromStr as _};

use apollo_compiler::validation::Valid;
use apollo_mcp_registry::platform_api::operation_collections::{
    collection_poller::OperationData, error::CollectionError,
};
use http::{HeaderMap, HeaderName, HeaderValue};
use serde_json::Value;

use crate::{custom_scalar_map::CustomScalarMap, errors::OperationError};

use super::{MutationMode, operation::Operation};

#[derive(Debug, Clone)]
pub struct RawOperation {
    pub(super) source_text: String,
    pub(super) persisted_query_id: Option<String>,
    pub(super) headers: Option<HeaderMap<HeaderValue>>,
    pub(super) variables: Option<HashMap<String, Value>>,
    pub(super) source_path: Option<String>,
}

impl RawOperation {
    pub(crate) fn into_operation(
        self,
        schema: &Valid<apollo_compiler::Schema>,
        custom_scalars: Option<&CustomScalarMap>,
        mutation_mode: MutationMode,
        disable_type_description: bool,
        disable_schema_description: bool,
    ) -> Result<Option<Operation>, OperationError> {
        Operation::from_document(
            self,
            schema,
            custom_scalars,
            mutation_mode,
            disable_type_description,
            disable_schema_description,
        )
    }
}

impl From<(String, Option<String>)> for RawOperation {
    fn from((source_text, source_path): (String, Option<String>)) -> Self {
        Self {
            persisted_query_id: None,
            source_text,
            headers: None,
            variables: None,
            source_path,
        }
    }
}

impl From<(String, String)> for RawOperation {
    fn from((persisted_query_id, source_text): (String, String)) -> Self {
        Self {
            persisted_query_id: Some(persisted_query_id),
            source_text,
            headers: None,
            variables: None,
            source_path: None,
        }
    }
}

impl TryFrom<&OperationData> for RawOperation {
    type Error = CollectionError;

    fn try_from(operation_data: &OperationData) -> Result<Self, Self::Error> {
        let variables = if let Some(variables) = operation_data.variables.as_ref() {
            if variables.trim().is_empty() {
                Some(HashMap::new())
            } else {
                Some(
                    serde_json::from_str::<HashMap<String, Value>>(variables)
                        .map_err(|_| CollectionError::InvalidVariables(variables.clone()))?,
                )
            }
        } else {
            None
        };

        let headers = if let Some(headers) = operation_data.headers.as_ref() {
            let mut header_map = HeaderMap::new();
            for header in headers {
                header_map.insert(
                    HeaderName::from_str(&header.0).map_err(CollectionError::HeaderName)?,
                    HeaderValue::from_str(&header.1).map_err(CollectionError::HeaderValue)?,
                );
            }
            Some(header_map)
        } else {
            None
        };

        Ok(Self {
            persisted_query_id: None,
            source_text: operation_data.source_text.clone(),
            headers,
            variables,
            source_path: None,
        })
    }
}

// TODO: This can be greatly simplified by using `serde::serialize_with` on the specific field that does not
// implement `Serialize`.
// Custom Serialize implementation for RawOperation
// This is needed because reqwest HeaderMap/HeaderValue/HeaderName don't derive Serialize
impl serde::Serialize for RawOperation {
    fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
    where
        S: serde::Serializer,
    {
        use serde::ser::SerializeStruct;
        let mut state = serializer.serialize_struct("RawOperation", 4)?;
        state.serialize_field("source_text", &self.source_text)?;
        if let Some(ref id) = self.persisted_query_id {
            state.serialize_field("persisted_query_id", id)?;
        }
        if let Some(ref variables) = self.variables {
            state.serialize_field("variables", variables)?;
        }
        if let Some(ref headers) = self.headers {
            state.serialize_field(
                "headers",
                headers
                    .iter()
                    .map(|(name, value)| {
                        format!("{}: {}", name, value.to_str().unwrap_or_default())
                    })
                    .collect::<Vec<_>>()
                    .join("\n")
                    .as_str(),
            )?;
        }
        if let Some(ref path) = self.source_path {
            state.serialize_field("source_path", path)?;
        }

        state.end()
    }
}

```
Page 1/6FirstPrevNextLast