#
tokens: 48992/50000 28/187 files (page 2/8)
lines: on (toggle) GitHub
raw markdown copy reset
This is page 2 of 8. Use http://codebase.md/apollographql/apollo-mcp-server?lines=true&page={x} to view the full context.

# Directory Structure

```
├── .cargo
│   └── config.toml
├── .changesets
│   └── README.md
├── .envrc
├── .github
│   ├── CODEOWNERS
│   ├── renovate.json5
│   └── workflows
│       ├── canary-release.yml
│       ├── ci.yml
│       ├── prep-release.yml
│       ├── release-bins.yml
│       ├── release-container.yml
│       ├── sync-develop.yml
│       └── verify-changeset.yml
├── .gitignore
├── .idea
│   └── runConfigurations
│       ├── clippy.xml
│       ├── format___test___clippy.xml
│       ├── format.xml
│       ├── Run_spacedevs.xml
│       └── Test_apollo_mcp_server.xml
├── .vscode
│   ├── extensions.json
│   ├── launch.json
│   ├── settings.json
│   └── tasks.json
├── apollo.config.json
├── Cargo.lock
├── Cargo.toml
├── CHANGELOG_SECTION.md
├── CHANGELOG.md
├── clippy.toml
├── codecov.yml
├── CONTRIBUTING.md
├── crates
│   ├── apollo-mcp-registry
│   │   ├── Cargo.toml
│   │   └── src
│   │       ├── files.rs
│   │       ├── lib.rs
│   │       ├── logging.rs
│   │       ├── platform_api
│   │       │   ├── operation_collections
│   │       │   │   ├── collection_poller.rs
│   │       │   │   ├── error.rs
│   │       │   │   ├── event.rs
│   │       │   │   └── operation_collections.graphql
│   │       │   ├── operation_collections.rs
│   │       │   └── platform-api.graphql
│   │       ├── platform_api.rs
│   │       ├── testdata
│   │       │   ├── minimal_supergraph.graphql
│   │       │   └── supergraph.graphql
│   │       ├── uplink
│   │       │   ├── persisted_queries
│   │       │   │   ├── event.rs
│   │       │   │   ├── manifest_poller.rs
│   │       │   │   ├── manifest.rs
│   │       │   │   └── persisted_queries_manifest_query.graphql
│   │       │   ├── persisted_queries.rs
│   │       │   ├── schema
│   │       │   │   ├── event.rs
│   │       │   │   ├── schema_query.graphql
│   │       │   │   └── schema_stream.rs
│   │       │   ├── schema.rs
│   │       │   ├── snapshots
│   │       │   │   ├── apollo_mcp_registry__uplink__schema__tests__schema_by_url_all_fail@logs.snap
│   │       │   │   ├── apollo_mcp_registry__uplink__schema__tests__schema_by_url_fallback@logs.snap
│   │       │   │   └── apollo_mcp_registry__uplink__schema__tests__schema_by_url@logs.snap
│   │       │   └── uplink.graphql
│   │       └── uplink.rs
│   ├── apollo-mcp-server
│   │   ├── build.rs
│   │   ├── Cargo.toml
│   │   ├── src
│   │   │   ├── auth
│   │   │   │   ├── networked_token_validator.rs
│   │   │   │   ├── protected_resource.rs
│   │   │   │   ├── valid_token.rs
│   │   │   │   └── www_authenticate.rs
│   │   │   ├── auth.rs
│   │   │   ├── config_schema.rs
│   │   │   ├── cors.rs
│   │   │   ├── custom_scalar_map.rs
│   │   │   ├── errors.rs
│   │   │   ├── event.rs
│   │   │   ├── explorer.rs
│   │   │   ├── graphql.rs
│   │   │   ├── headers.rs
│   │   │   ├── health.rs
│   │   │   ├── introspection
│   │   │   │   ├── minify.rs
│   │   │   │   ├── snapshots
│   │   │   │   │   └── apollo_mcp_server__introspection__minify__tests__minify_schema.snap
│   │   │   │   ├── tools
│   │   │   │   │   ├── execute.rs
│   │   │   │   │   ├── introspect.rs
│   │   │   │   │   ├── search.rs
│   │   │   │   │   ├── snapshots
│   │   │   │   │   │   └── apollo_mcp_server__introspection__tools__search__tests__search_tool.snap
│   │   │   │   │   ├── testdata
│   │   │   │   │   │   └── schema.graphql
│   │   │   │   │   └── validate.rs
│   │   │   │   └── tools.rs
│   │   │   ├── introspection.rs
│   │   │   ├── json_schema.rs
│   │   │   ├── lib.rs
│   │   │   ├── main.rs
│   │   │   ├── meter.rs
│   │   │   ├── operations
│   │   │   │   ├── mutation_mode.rs
│   │   │   │   ├── operation_source.rs
│   │   │   │   ├── operation.rs
│   │   │   │   ├── raw_operation.rs
│   │   │   │   ├── schema_walker
│   │   │   │   │   ├── name.rs
│   │   │   │   │   └── type.rs
│   │   │   │   └── schema_walker.rs
│   │   │   ├── operations.rs
│   │   │   ├── runtime
│   │   │   │   ├── config.rs
│   │   │   │   ├── endpoint.rs
│   │   │   │   ├── filtering_exporter.rs
│   │   │   │   ├── graphos.rs
│   │   │   │   ├── introspection.rs
│   │   │   │   ├── logging
│   │   │   │   │   ├── defaults.rs
│   │   │   │   │   ├── log_rotation_kind.rs
│   │   │   │   │   └── parsers.rs
│   │   │   │   ├── logging.rs
│   │   │   │   ├── operation_source.rs
│   │   │   │   ├── overrides.rs
│   │   │   │   ├── schema_source.rs
│   │   │   │   ├── schemas.rs
│   │   │   │   ├── telemetry
│   │   │   │   │   └── sampler.rs
│   │   │   │   └── telemetry.rs
│   │   │   ├── runtime.rs
│   │   │   ├── sanitize.rs
│   │   │   ├── schema_tree_shake.rs
│   │   │   ├── server
│   │   │   │   ├── states
│   │   │   │   │   ├── configuring.rs
│   │   │   │   │   ├── operations_configured.rs
│   │   │   │   │   ├── running.rs
│   │   │   │   │   ├── schema_configured.rs
│   │   │   │   │   └── starting.rs
│   │   │   │   └── states.rs
│   │   │   ├── server.rs
│   │   │   └── telemetry_attributes.rs
│   │   └── telemetry.toml
│   └── apollo-schema-index
│       ├── Cargo.toml
│       └── src
│           ├── error.rs
│           ├── lib.rs
│           ├── path.rs
│           ├── snapshots
│           │   ├── apollo_schema_index__tests__search.snap
│           │   └── apollo_schema_index__traverse__tests__schema_traverse.snap
│           ├── testdata
│           │   └── schema.graphql
│           └── traverse.rs
├── docs
│   └── source
│       ├── _sidebar.yaml
│       ├── auth.mdx
│       ├── best-practices.mdx
│       ├── config-file.mdx
│       ├── cors.mdx
│       ├── custom-scalars.mdx
│       ├── debugging.mdx
│       ├── define-tools.mdx
│       ├── deploy.mdx
│       ├── guides
│       │   └── auth-auth0.mdx
│       ├── health-checks.mdx
│       ├── images
│       │   ├── auth0-permissions-enable.png
│       │   ├── mcp-getstarted-inspector-http.jpg
│       │   └── mcp-getstarted-inspector-stdio.jpg
│       ├── index.mdx
│       ├── licensing.mdx
│       ├── limitations.mdx
│       ├── quickstart.mdx
│       ├── run.mdx
│       └── telemetry.mdx
├── e2e
│   └── mcp-server-tester
│       ├── local-operations
│       │   ├── api.graphql
│       │   ├── config.yaml
│       │   ├── operations
│       │   │   ├── ExploreCelestialBodies.graphql
│       │   │   ├── GetAstronautDetails.graphql
│       │   │   ├── GetAstronautsCurrentlyInSpace.graphql
│       │   │   └── SearchUpcomingLaunches.graphql
│       │   └── tool-tests.yaml
│       ├── pq-manifest
│       │   ├── api.graphql
│       │   ├── apollo.json
│       │   ├── config.yaml
│       │   └── tool-tests.yaml
│       ├── run_tests.sh
│       └── server-config.template.json
├── flake.lock
├── flake.nix
├── graphql
│   ├── TheSpaceDevs
│   │   ├── .vscode
│   │   │   ├── extensions.json
│   │   │   └── tasks.json
│   │   ├── api.graphql
│   │   ├── apollo.config.json
│   │   ├── config.yaml
│   │   ├── operations
│   │   │   ├── ExploreCelestialBodies.graphql
│   │   │   ├── GetAstronautDetails.graphql
│   │   │   ├── GetAstronautsCurrentlyInSpace.graphql
│   │   │   └── SearchUpcomingLaunches.graphql
│   │   ├── persisted_queries
│   │   │   └── apollo.json
│   │   ├── persisted_queries.config.json
│   │   ├── README.md
│   │   └── supergraph.yaml
│   └── weather
│       ├── api.graphql
│       ├── config.yaml
│       ├── operations
│       │   ├── alerts.graphql
│       │   ├── all.graphql
│       │   └── forecast.graphql
│       ├── persisted_queries
│       │   └── apollo.json
│       ├── supergraph.graphql
│       ├── supergraph.yaml
│       └── weather.graphql
├── LICENSE
├── macos-entitlements.plist
├── nix
│   ├── apollo-mcp.nix
│   ├── cargo-zigbuild.patch
│   ├── mcp-server-tools
│   │   ├── default.nix
│   │   ├── node-generated
│   │   │   ├── default.nix
│   │   │   ├── node-env.nix
│   │   │   └── node-packages.nix
│   │   ├── node-mcp-servers.json
│   │   └── README.md
│   └── mcphost.nix
├── README.md
├── rust-toolchain.toml
├── scripts
│   ├── nix
│   │   └── install.sh
│   └── windows
│       └── install.ps1
└── xtask
    ├── Cargo.lock
    ├── Cargo.toml
    └── src
        ├── commands
        │   ├── changeset
        │   │   ├── matching_pull_request.graphql
        │   │   ├── matching_pull_request.rs
        │   │   ├── mod.rs
        │   │   ├── scalars.rs
        │   │   └── snapshots
        │   │       ├── xtask__commands__changeset__tests__it_templatizes_with_multiple_issues_in_title_and_multiple_prs_in_footer.snap
        │   │       ├── xtask__commands__changeset__tests__it_templatizes_with_multiple_issues_in_title.snap
        │   │       ├── xtask__commands__changeset__tests__it_templatizes_with_multiple_prs_in_footer.snap
        │   │       ├── xtask__commands__changeset__tests__it_templatizes_with_neither_issues_or_prs.snap
        │   │       ├── xtask__commands__changeset__tests__it_templatizes_with_prs_in_title_when_empty_issues.snap
        │   │       └── xtask__commands__changeset__tests__it_templatizes_without_prs_in_title_when_issues_present.snap
        │   └── mod.rs
        ├── lib.rs
        └── main.rs
```

# Files

--------------------------------------------------------------------------------
/CHANGELOG_SECTION.md:
--------------------------------------------------------------------------------

```markdown
 1 | # [1.1.0] - 2025-10-16
 2 | 
 3 | ## ❗ BREAKING ❗
 4 | 
 5 | ### Change default port from 5000 to 8000 - @DaleSeo PR #417
 6 | 
 7 | The default server port has been changed from `5000` to `8000` to avoid conflicts with common development tools and services that typically use port 5000 (such as macOS AirPlay, Flask development servers, and other local services).
 8 | 
 9 | **Migration**: If you were relying on the default port 5000, you can continue using it by explicitly setting the port in your configuration file or command line arguments.
10 | 
11 | - Before 
12 | 
13 | ```yaml
14 | transport:
15 |   type: streamable_http
16 | ```
17 | 
18 | - After
19 | 
20 | ```yaml
21 | transport:
22 |   type: streamable_http
23 |   port: 5000
24 | ```
25 | 
26 | ## 🚀 Features
27 | 
28 | ### feat: Add configuration option for metric temporality - @swcollard PR #413
29 | 
30 | Creates a new configuration option for telemetry to set the Metric temporality to either Cumulative (default) or Delta.
31 | 
32 | * Cumulative - The metric value will be the overall value since the start of the measurement.
33 | * Delta - The metric will be the difference in the measurement since the last time it was reported.
34 | 
35 | Some observability  vendors require that one is used over the other so we want to support the configuration in the MCP Server.
36 | 
37 | ### Add support for forwarding headers from MCP clients to GraphQL APIs - @DaleSeo PR #428
38 | 
39 | Adds opt-in support for dynamic header forwarding, which enables metadata for A/B testing, feature flagging, geo information from CDNs, or internal instrumentation to be sent from MCP clients to downstream GraphQL APIs. It automatically blocks hop-by-hop headers according to the guidelines in [RFC 7230, section 6.1](https://datatracker.ietf.org/doc/html/rfc7230#section-6.1), and it only works with the Streamable HTTP transport.
40 | 
41 | You can configure using the `forward_headers` setting:
42 | 
43 | ```yaml
44 | forward_headers:
45 |   - x-tenant-id
46 |   - x-experiment-id
47 |   - x-geo-country
48 | ```
49 | 
50 | Please note that this feature is not intended for passing through credentials as documented in the best practices page.
51 | 
52 | ### feat: Add mcp-session-id header to HTTP request trace attributes - @swcollard PR #421
53 | 
54 | Includes the value of the [Mcp-Session-Id](https://modelcontextprotocol.io/specification/2025-06-18/basic/transports#session-management) HTTP header as an attribute of the trace for HTTP requests to the MCP Server
55 | 
56 | ## 🐛 Fixes
57 | 
58 | ### Fix compatibility issue with VSCode/Copilot - @DaleSeo PR #447
59 | 
60 | This updates Apollo MCP Server’s tool schemas from [Draft 2020-12](https://json-schema.org/draft/2020-12) to [Draft‑07](https://json-schema.org/draft-07) which is more widely supported across different validators. VSCode/Copilot still validate against Draft‑07, so rejects Apollo MCP Server’s tools. Our JSON schemas don’t rely on newer features, so downgrading improves compatibility across MCP clients with no practical impact.
61 | 
62 | ## 🛠 Maintenance
63 | 
64 | ### Update rmcp sdk to version 0.8.x - @swcollard PR #433 
65 | 
66 | Bumping the Rust MCP SDK version used in this server up to 0.8.x
67 | 
68 | ### chore: Only initialize a single HTTP client for graphql requests - @swcollard PR #412
69 | 
70 | Currently the MCP Server spins up a new HTTP client every time it wants to make a request to the downstream graphql endpoint. This change creates a static reqwest client that gets initialized using LazyLock and reused on each graphql request.
71 | 
72 | This change is based on the suggestion from the reqwest [documentation](https://docs.rs/reqwest/latest/reqwest/struct.Client.html)
73 | > "The Client holds a connection pool internally, so it is advised that you create one and reuse it."
74 | 
75 | 
```

--------------------------------------------------------------------------------
/scripts/windows/install.ps1:
--------------------------------------------------------------------------------

```
 1 | # Licensed under the MIT license
 2 | # <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your
 3 | # option. This file may not be copied, modified, or distributed
 4 | # except according to those terms.
 5 | 
 6 | # Installs the latest version of the Apollo MCP Server.
 7 | # Specify a specific version to install with the $VERSION variable.
 8 | 
 9 | # Apollo MCP Server version defined in apollo-mcp-server's Cargo.toml
10 | # Note: Change this line manually during the release steps.
11 | $package_version = 'v1.1.1'
12 | 
13 | function Install-Binary($apollo_mcp_server_install_args) {
14 |   $old_erroractionpreference = $ErrorActionPreference
15 |   $ErrorActionPreference = 'stop'
16 | 
17 |   Initialize-Environment
18 | 
19 |   # If the VERSION env var is set, we use it instead
20 |   # of the version defined in Apollo MCP Server's cargo.toml
21 |   $download_version = if (Test-Path env:VERSION) {
22 |     $Env:VERSION
23 |   } else {
24 |     $package_version
25 |   }
26 | 
27 |   $exe = Download($download_version)
28 | 
29 |   Move-Item -Path $exe -Destination .
30 | 
31 |   Write-Host "Run `".\apollo-mcp-server.exe`" to start the server"
32 | 
33 |   $ErrorActionPreference = $old_erroractionpreference
34 | }
35 | 
36 | function Download($version) {
37 |   $binary_download_prefix = $env:APOLLO_ROVER_BINARY_DOWNLOAD_PREFIX
38 |   if (-not $binary_download_prefix) {
39 |     $binary_download_prefix = "https://github.com/apollographql/apollo-mcp-server/releases/download"
40 |   }
41 |   $url = "$binary_download_prefix/$version/apollo-mcp-server-$version-x86_64-pc-windows-msvc.tar.gz"
42 | 
43 |   # Remove credentials from the URL for logging
44 |   $safe_url = $url -replace "https://[^@]+@", "https://"
45 | 
46 |   "Downloading Rover from $safe_url" | Out-Host
47 |   $tmp = New-Temp-Dir
48 |   $dir_path = "$tmp\apollo_mcp_server.tar.gz"
49 |   $wc = New-Object Net.Webclient
50 |   $wc.downloadFile($url, $dir_path)
51 |   tar -xkf $dir_path -C "$tmp"
52 |   return "$tmp\dist\apollo-mcp-server.exe"
53 | }
54 | 
55 | function Initialize-Environment() {
56 |   If (($PSVersionTable.PSVersion.Major) -lt 5) {
57 |     Write-Error "PowerShell 5 or later is required to install Apollo MCP Server."
58 |     Write-Error "Upgrade PowerShell: https://docs.microsoft.com/en-us/powershell/scripting/setup/installing-windows-powershell"
59 |     break
60 |   }
61 | 
62 |   # show notification to change execution policy:
63 |   $allowedExecutionPolicy = @('Unrestricted', 'RemoteSigned', 'ByPass')
64 |   If ((Get-ExecutionPolicy).ToString() -notin $allowedExecutionPolicy) {
65 |     Write-Error "PowerShell requires an execution policy in [$($allowedExecutionPolicy -join ", ")] to run Apollo MCP Server."
66 |     Write-Error "For example, to set the execution policy to 'RemoteSigned' please run :"
67 |     Write-Error "'Set-ExecutionPolicy RemoteSigned -scope CurrentUser'"
68 |     break
69 |   }
70 | 
71 |   # GitHub requires TLS 1.2
72 |   If ([System.Enum]::GetNames([System.Net.SecurityProtocolType]) -notcontains 'Tls12') {
73 |     Write-Error "Installing Apollo MCP Server requires at least .NET Framework 4.5"
74 |     Write-Error "Please download and install it first:"
75 |     Write-Error "https://www.microsoft.com/net/download"
76 |     break
77 |   }
78 | 
79 |   If (-Not (Get-Command 'tar')) {
80 |     Write-Error "The tar command is not installed on this machine. Please install tar before installing Apollo MCP Server"
81 |     # don't abort if invoked with iex that would close the PS session
82 |     If ($myinvocation.mycommand.commandtype -eq 'Script') { return } else { exit 1 }
83 |   }
84 | }
85 | 
86 | function New-Temp-Dir() {
87 |   [CmdletBinding(SupportsShouldProcess)]
88 |   param()
89 |   $parent = [System.IO.Path]::GetTempPath()
90 |   [string] $name = [System.Guid]::NewGuid()
91 |   New-Item -ItemType Directory -Path (Join-Path $parent $name)
92 | }
93 | 
94 | Install-Binary "$Args"
95 | 
```

--------------------------------------------------------------------------------
/crates/apollo-mcp-server/src/runtime/graphos.rs:
--------------------------------------------------------------------------------

```rust
  1 | use std::{ops::Not as _, time::Duration};
  2 | 
  3 | use apollo_mcp_registry::{
  4 |     platform_api::PlatformApiConfig,
  5 |     uplink::{Endpoints, SecretString, UplinkConfig},
  6 | };
  7 | use apollo_mcp_server::errors::ServerError;
  8 | use schemars::JsonSchema;
  9 | use serde::de::Error;
 10 | use serde::{Deserialize, Deserializer};
 11 | use url::Url;
 12 | 
 13 | #[cfg(test)]
 14 | use serde::Serialize;
 15 | 
 16 | const APOLLO_GRAPH_REF_ENV: &str = "APOLLO_GRAPH_REF";
 17 | const APOLLO_KEY_ENV: &str = "APOLLO_KEY";
 18 | 
 19 | fn apollo_uplink_endpoints_deserializer<'de, D>(deserializer: D) -> Result<Vec<Url>, D::Error>
 20 | where
 21 |     D: Deserializer<'de>,
 22 | {
 23 |     #[derive(Deserialize)]
 24 |     #[serde(untagged)]
 25 |     enum UrlListOrString {
 26 |         List(Vec<Url>),
 27 |         String(String),
 28 |     }
 29 | 
 30 |     match UrlListOrString::deserialize(deserializer)? {
 31 |         UrlListOrString::List(urls) => Ok(urls),
 32 |         UrlListOrString::String(s) => s
 33 |             .split(',')
 34 |             .map(|v| {
 35 |                 Url::parse(v.trim()).map_err(|e| {
 36 |                     D::Error::custom(format!("Could not parse uplink endpoint URL: {e}"))
 37 |                 })
 38 |             })
 39 |             .collect(),
 40 |     }
 41 | }
 42 | 
 43 | /// Credentials to use with GraphOS
 44 | #[derive(Debug, Deserialize, Default, JsonSchema)]
 45 | #[cfg_attr(test, derive(Serialize))]
 46 | #[serde(default)]
 47 | pub struct GraphOSConfig {
 48 |     /// The apollo key
 49 |     #[schemars(with = "Option<String>")]
 50 |     #[cfg_attr(test, serde(skip_serializing))]
 51 |     apollo_key: Option<SecretString>,
 52 | 
 53 |     /// The graph reference
 54 |     apollo_graph_ref: Option<String>,
 55 | 
 56 |     /// The URL to use for Apollo's registry
 57 |     apollo_registry_url: Option<Url>,
 58 | 
 59 |     /// List of uplink URL overrides
 60 |     #[serde(deserialize_with = "apollo_uplink_endpoints_deserializer")]
 61 |     apollo_uplink_endpoints: Vec<Url>,
 62 | }
 63 | 
 64 | impl GraphOSConfig {
 65 |     /// Extract the apollo graph reference from the config or from the current env
 66 |     #[allow(clippy::result_large_err)]
 67 |     pub fn graph_ref(&self) -> Result<String, ServerError> {
 68 |         self.apollo_graph_ref
 69 |             .clone()
 70 |             .ok_or_else(|| ServerError::EnvironmentVariable(APOLLO_GRAPH_REF_ENV.to_string()))
 71 |     }
 72 | 
 73 |     /// Extract the apollo key from the config or from the current env
 74 |     #[allow(clippy::result_large_err)]
 75 |     fn key(&self) -> Result<SecretString, ServerError> {
 76 |         self.apollo_key
 77 |             .clone()
 78 |             .ok_or_else(|| ServerError::EnvironmentVariable(APOLLO_GRAPH_REF_ENV.to_string()))
 79 |     }
 80 | 
 81 |     /// Generate an uplink config based on configuration params
 82 |     #[allow(clippy::result_large_err)]
 83 |     pub fn uplink_config(&self) -> Result<UplinkConfig, ServerError> {
 84 |         let config = UplinkConfig {
 85 |             apollo_key: self.key()?,
 86 | 
 87 |             apollo_graph_ref: self.graph_ref()?,
 88 |             endpoints: self.apollo_uplink_endpoints.is_empty().not().then_some(
 89 |                 Endpoints::Fallback {
 90 |                     urls: self.apollo_uplink_endpoints.clone(),
 91 |                 },
 92 |             ),
 93 |             poll_interval: Duration::from_secs(10),
 94 |             timeout: Duration::from_secs(30),
 95 |         };
 96 | 
 97 |         Ok(config)
 98 |     }
 99 | 
100 |     /// Generate a platform API config based on configuration params
101 |     #[allow(clippy::result_large_err)]
102 |     pub fn platform_api_config(&self) -> Result<PlatformApiConfig, ServerError> {
103 |         let config = PlatformApiConfig::new(
104 |             self.apollo_key
105 |                 .clone()
106 |                 .ok_or(ServerError::EnvironmentVariable(APOLLO_KEY_ENV.to_string()))?,
107 |             Duration::from_secs(30),
108 |             Duration::from_secs(30),
109 |             self.apollo_registry_url.clone(),
110 |         );
111 | 
112 |         Ok(config)
113 |     }
114 | }
115 | 
```

--------------------------------------------------------------------------------
/.github/workflows/release-container.yml:
--------------------------------------------------------------------------------

```yaml
  1 | name: Build Release Container
  2 | on:
  3 |   push:
  4 |     tags:
  5 |       - "v[0-9]+.[0-9]+.[0-9]+"
  6 |       - "v[0-9]+.[0-9]+.[0-9]+-rc.[0-9]+"
  7 |   workflow_dispatch:
  8 |     inputs: &release_inputs
  9 |       version:
 10 |         description: Version to publish
 11 |         required: true
 12 |         type: string
 13 |   workflow_call:
 14 |     inputs: *release_inputs
 15 | 
 16 | env:
 17 |   REGISTRY: ghcr.io
 18 |   FQDN: ghcr.io/${{ github.repository }}
 19 |   VERSION: ${{ inputs.version || github.ref_name }}
 20 | 
 21 | jobs:
 22 |   # Build a container for x86_64 and aarch64 linux
 23 |   build:
 24 |     name: Release Container
 25 |     strategy:
 26 |       matrix:
 27 |         os: ["ubuntu-24.04", "ubuntu-24.04-arm"]
 28 |     runs-on: ${{ matrix.os }}
 29 |     permissions:
 30 |       contents: read
 31 |       packages: write
 32 |       attestations: write
 33 |       id-token: write
 34 |     steps:
 35 |       - uses: actions/checkout@v5
 36 |         with:
 37 |           ref: ${{ github.ref }}
 38 | 
 39 |       - uses: nixbuild/nix-quick-install-action@v30
 40 |         with:
 41 |           nix_conf: ${{ env.nix_conf }}
 42 |       - name: Restore and save Nix store
 43 |         uses: nix-community/cache-nix-action@v6
 44 |         with:
 45 |           primary-key: build-${{ runner.os }}-${{ hashFiles('Cargo.lock', '**/Cargo.toml', 'flake.nix', 'flake.lock', 'rust-toolchain.toml') }}
 46 |           restore-prefixes-first-match: build-${{ runner.os }}-
 47 |           # We don't want to affect the cache when building the container
 48 |           purge: false
 49 |           save: false
 50 | 
 51 |       - name: Log in to the Container registry
 52 |         uses: docker/login-action@65b78e6e13532edd9afa3aa52ac7964289d1a9c1
 53 |         with:
 54 |           registry: ${{ env.REGISTRY }}
 55 |           username: ${{ github.actor }}
 56 |           password: ${{ secrets.GITHUB_TOKEN }}
 57 | 
 58 |       - id: build
 59 |         name: Build Container
 60 |         shell: bash
 61 |         run: |
 62 |           nix run .#streamImage | docker image load
 63 |           echo "id=`docker image ls -q | head -n1`" >> $GITHUB_OUTPUT
 64 |           echo "arch=`docker image ls --format '{{ .Tag }}' | head -n1`" >> $GITHUB_OUTPUT
 65 | 
 66 |       - id: deploy
 67 |         name: Tag and push the container
 68 |         env:
 69 |           TAG: ${{ env.VERSION }}-${{ steps.build.outputs.arch }}
 70 |         run: |
 71 |           docker image tag "${{ steps.build.outputs.id }}" "$FQDN:$TAG"
 72 |           docker image push "$FQDN:$TAG"
 73 |           echo "digest=`docker manifest inspect $FQDN:$TAG --verbose | nix run --inputs-from .# nixpkgs#jq -- -r .Descriptor.digest`" >> $GITHUB_OUTPUT
 74 | 
 75 |       - name: Generate artifact attestation
 76 |         uses: actions/attest-build-provenance@v2
 77 |         with:
 78 |           subject-name: ${{ env.FQDN }}
 79 |           subject-digest: ${{ steps.deploy.outputs.digest }}
 80 |           push-to-registry: true
 81 | 
 82 |   bundle:
 83 |     name: Bundle into multiarch container
 84 |     needs: build
 85 |     runs-on: ubuntu-24.04
 86 |     steps:
 87 |       - name: Log in to the Container registry
 88 |         uses: docker/login-action@65b78e6e13532edd9afa3aa52ac7964289d1a9c1
 89 |         with:
 90 |           registry: ${{ env.REGISTRY }}
 91 |           username: ${{ github.actor }}
 92 |           password: ${{ secrets.GITHUB_TOKEN }}
 93 |       - name: Create multiarch manifest
 94 |         run: |
 95 |           docker manifest create $FQDN:$VERSION $FQDN:$VERSION-amd64 $FQDN:$VERSION-arm64
 96 |           docker manifest annotate $FQDN:$VERSION $FQDN:$VERSION-amd64 --arch amd64
 97 |           docker manifest annotate $FQDN:$VERSION $FQDN:$VERSION-arm64 --arch arm64
 98 | 
 99 |           docker manifest create $FQDN:latest $FQDN:$VERSION-amd64 $FQDN:$VERSION-arm64
100 |           docker manifest annotate $FQDN:latest $FQDN:$VERSION-amd64 --arch amd64
101 |           docker manifest annotate $FQDN:latest $FQDN:$VERSION-arm64 --arch arm64
102 |       - name: Push the multiarch manifests
103 |         shell: bash
104 |         run: |
105 |           docker manifest push $FQDN:$VERSION
106 |           
107 |           # push :latest only if version DOES NOT start with canary OR end with -rc.<digits>
108 |           if [[ ! "$VERSION" =~ (^canary|-rc\.[0-9]+$) ]]; then
109 |             docker manifest push $FQDN:latest
110 |           fi
111 | 
```

--------------------------------------------------------------------------------
/crates/apollo-mcp-registry/src/logging.rs:
--------------------------------------------------------------------------------

```rust
  1 | #[macro_export]
  2 | /// This is a really simple macro to assert a snapshot of the logs.
  3 | /// To use it call `.with_subscriber(assert_snapshot_subscriber!())` in your test just before calling `await`.
  4 | /// This will assert a snapshot of the logs in pretty yaml format.
  5 | /// You can also use subscriber::with_default(assert_snapshot_subscriber!(), || { ... }) to assert the logs in non async code.
  6 | macro_rules! assert_snapshot_subscriber {
  7 |     () => {
  8 |         $crate::assert_snapshot_subscriber!(tracing_core::LevelFilter::INFO, {})
  9 |     };
 10 | 
 11 |     ($redactions:tt) => {
 12 |         $crate::assert_snapshot_subscriber!(tracing_core::LevelFilter::INFO, $redactions)
 13 |     };
 14 | 
 15 |     ($level:expr) => {
 16 |         $crate::assert_snapshot_subscriber!($level, {})
 17 |     };
 18 | 
 19 |     ($level:expr, $redactions:tt) => {
 20 |         $crate::logging::test::SnapshotSubscriber::create_subscriber($level, |yaml| {
 21 |             insta::with_settings!({sort_maps => true}, {
 22 |                 // the tests here will force maps to sort
 23 |                 let mut settings = insta::Settings::clone_current();
 24 |                 settings.set_snapshot_suffix("logs");
 25 |                 settings.set_sort_maps(true);
 26 |                 settings.bind(|| {
 27 |                     insta::assert_yaml_snapshot!(yaml, $redactions);
 28 |                 });
 29 |             });
 30 |         })
 31 |     };
 32 | }
 33 | 
 34 | #[cfg(test)]
 35 | pub(crate) mod test {
 36 |     use std::sync::Arc;
 37 |     use std::sync::Mutex;
 38 | 
 39 |     use serde_json::Value;
 40 |     use tracing_core::LevelFilter;
 41 |     use tracing_core::Subscriber;
 42 |     use tracing_subscriber::layer::SubscriberExt;
 43 | 
 44 |     pub(crate) struct SnapshotSubscriber {
 45 |         buffer: Arc<Mutex<Vec<u8>>>,
 46 |         assertion: fn(Value),
 47 |     }
 48 | 
 49 |     impl std::io::Write for SnapshotSubscriber {
 50 |         fn write(&mut self, buf: &[u8]) -> std::io::Result<usize> {
 51 |             let buf_len = buf.len();
 52 |             self.buffer.lock().unwrap().append(&mut buf.to_vec());
 53 |             Ok(buf_len)
 54 |         }
 55 | 
 56 |         fn flush(&mut self) -> std::io::Result<()> {
 57 |             Ok(())
 58 |         }
 59 |     }
 60 | 
 61 |     impl Drop for SnapshotSubscriber {
 62 |         fn drop(&mut self) {
 63 |             let log = String::from_utf8(self.buffer.lock().unwrap().to_vec()).unwrap();
 64 |             let parsed: Value = if log.is_empty() {
 65 |                 serde_json::json!([])
 66 |             } else {
 67 |                 let parsed_log: Vec<Value> = log
 68 |                     .lines()
 69 |                     .map(|line| {
 70 |                         let mut line: Value = serde_json::from_str(line).unwrap();
 71 |                         // move the message field to the top level
 72 |                         let fields = line
 73 |                             .as_object_mut()
 74 |                             .unwrap()
 75 |                             .get_mut("fields")
 76 |                             .unwrap()
 77 |                             .as_object_mut()
 78 |                             .unwrap();
 79 |                         let message = fields.remove("message").unwrap_or_default();
 80 |                         line.as_object_mut()
 81 |                             .unwrap()
 82 |                             .insert("message".to_string(), message);
 83 |                         line
 84 |                     })
 85 |                     .collect();
 86 |                 serde_json::json!(parsed_log)
 87 |             };
 88 | 
 89 |             (self.assertion)(parsed)
 90 |         }
 91 |     }
 92 | 
 93 |     impl SnapshotSubscriber {
 94 |         pub(crate) fn create_subscriber(
 95 |             level: LevelFilter,
 96 |             assertion: fn(Value),
 97 |         ) -> impl Subscriber {
 98 |             let collector = Self {
 99 |                 buffer: Arc::new(Mutex::new(Vec::new())),
100 |                 assertion,
101 |             };
102 | 
103 |             tracing_subscriber::registry::Registry::default()
104 |                 .with(level)
105 |                 .with(
106 |                     tracing_subscriber::fmt::Layer::default()
107 |                         .json()
108 |                         .without_time()
109 |                         .with_target(false)
110 |                         .with_file(false)
111 |                         .with_line_number(false)
112 |                         .with_writer(Mutex::new(collector)),
113 |                 )
114 |         }
115 |     }
116 | }
117 | 
```

--------------------------------------------------------------------------------
/crates/apollo-mcp-server/src/runtime/logging.rs:
--------------------------------------------------------------------------------

```rust
  1 | //! Logging config and utilities
  2 | //!
  3 | //! This module is only used by the main binary and provides logging config structures and setup
  4 | //! helper functions
  5 | 
  6 | mod defaults;
  7 | mod log_rotation_kind;
  8 | mod parsers;
  9 | 
 10 | use log_rotation_kind::LogRotationKind;
 11 | use schemars::JsonSchema;
 12 | use serde::Deserialize;
 13 | use std::path::PathBuf;
 14 | use tracing::Level;
 15 | use tracing_appender::rolling::RollingFileAppender;
 16 | use tracing_subscriber::EnvFilter;
 17 | use tracing_subscriber::fmt::Layer;
 18 | use tracing_subscriber::fmt::writer::BoxMakeWriter;
 19 | 
 20 | /// Logging related options
 21 | #[derive(Debug, Deserialize, JsonSchema)]
 22 | pub struct Logging {
 23 |     /// The log level to use for tracing
 24 |     #[serde(
 25 |         default = "defaults::log_level",
 26 |         deserialize_with = "parsers::from_str"
 27 |     )]
 28 |     #[schemars(schema_with = "level")]
 29 |     pub level: Level,
 30 | 
 31 |     /// The output path to use for logging
 32 |     #[serde(default)]
 33 |     pub path: Option<PathBuf>,
 34 | 
 35 |     /// Log file rotation period to use when log file path provided
 36 |     /// [default: Hourly]
 37 |     #[serde(default = "defaults::default_rotation")]
 38 |     pub rotation: LogRotationKind,
 39 | }
 40 | 
 41 | impl Default for Logging {
 42 |     fn default() -> Self {
 43 |         Self {
 44 |             level: defaults::log_level(),
 45 |             path: None,
 46 |             rotation: defaults::default_rotation(),
 47 |         }
 48 |     }
 49 | }
 50 | 
 51 | type LoggingLayerResult = (
 52 |     Layer<
 53 |         tracing_subscriber::Registry,
 54 |         tracing_subscriber::fmt::format::DefaultFields,
 55 |         tracing_subscriber::fmt::format::Format,
 56 |         BoxMakeWriter,
 57 |     >,
 58 |     Option<tracing_appender::non_blocking::WorkerGuard>,
 59 | );
 60 | 
 61 | impl Logging {
 62 |     pub fn env_filter(logging: &Logging) -> Result<EnvFilter, anyhow::Error> {
 63 |         let mut env_filter = EnvFilter::from_default_env().add_directive(logging.level.into());
 64 | 
 65 |         if logging.level == Level::INFO {
 66 |             env_filter = env_filter
 67 |                 .add_directive("rmcp=warn".parse()?)
 68 |                 .add_directive("tantivy=warn".parse()?);
 69 |         }
 70 |         Ok(env_filter)
 71 |     }
 72 | 
 73 |     pub fn logging_layer(logging: &Logging) -> Result<LoggingLayerResult, anyhow::Error> {
 74 |         macro_rules! log_error {
 75 |             () => {
 76 |                 |e| eprintln!("Failed to setup logging: {e:?}")
 77 |             };
 78 |         }
 79 | 
 80 |         let (writer, guard, with_ansi) = match logging.path.clone() {
 81 |             Some(path) => std::fs::create_dir_all(&path)
 82 |                 .map(|_| path)
 83 |                 .inspect_err(log_error!())
 84 |                 .ok()
 85 |                 .and_then(|path| {
 86 |                     RollingFileAppender::builder()
 87 |                         .rotation(logging.rotation.clone().into())
 88 |                         .filename_prefix("apollo_mcp_server")
 89 |                         .filename_suffix("log")
 90 |                         .build(path)
 91 |                         .inspect_err(log_error!())
 92 |                         .ok()
 93 |                 })
 94 |                 .map(|appender| {
 95 |                     let (non_blocking_appender, guard) = tracing_appender::non_blocking(appender);
 96 |                     (
 97 |                         BoxMakeWriter::new(non_blocking_appender),
 98 |                         Some(guard),
 99 |                         false,
100 |                     )
101 |                 })
102 |                 .unwrap_or_else(|| {
103 |                     eprintln!("Log file setup failed - falling back to stderr");
104 |                     (BoxMakeWriter::new(std::io::stderr), None, true)
105 |                 }),
106 |             None => (BoxMakeWriter::new(std::io::stdout), None, true),
107 |         };
108 | 
109 |         Ok((
110 |             tracing_subscriber::fmt::layer()
111 |                 .with_writer(writer)
112 |                 .with_ansi(with_ansi)
113 |                 .with_target(false),
114 |             guard,
115 |         ))
116 |     }
117 | }
118 | 
119 | fn level(generator: &mut schemars::SchemaGenerator) -> schemars::Schema {
120 |     /// Log level
121 |     #[derive(JsonSchema)]
122 |     #[schemars(rename_all = "lowercase")]
123 |     // This is just an intermediate type to auto create schema information for,
124 |     // so it is OK if it is never used
125 |     #[allow(dead_code)]
126 |     enum Level {
127 |         Trace,
128 |         Debug,
129 |         Info,
130 |         Warn,
131 |         Error,
132 |     }
133 | 
134 |     Level::json_schema(generator)
135 | }
136 | 
```

--------------------------------------------------------------------------------
/crates/apollo-mcp-server/src/runtime/config.rs:
--------------------------------------------------------------------------------

```rust
  1 | use std::path::PathBuf;
  2 | 
  3 | use apollo_mcp_server::{
  4 |     cors::CorsConfig, headers::ForwardHeaders, health::HealthCheckConfig, server::Transport,
  5 | };
  6 | use reqwest::header::HeaderMap;
  7 | use schemars::JsonSchema;
  8 | use serde::Deserialize;
  9 | use url::Url;
 10 | 
 11 | use super::{
 12 |     OperationSource, SchemaSource, endpoint::Endpoint, graphos::GraphOSConfig,
 13 |     introspection::Introspection, logging::Logging, overrides::Overrides, telemetry::Telemetry,
 14 | };
 15 | 
 16 | /// Configuration for the MCP server
 17 | #[derive(Debug, Default, Deserialize, JsonSchema)]
 18 | #[serde(default)]
 19 | pub struct Config {
 20 |     /// CORS configuration
 21 |     pub cors: CorsConfig,
 22 | 
 23 |     /// Path to a custom scalar map
 24 |     pub custom_scalars: Option<PathBuf>,
 25 | 
 26 |     /// The target GraphQL endpoint
 27 |     #[schemars(schema_with = "Url::json_schema")]
 28 |     pub endpoint: Endpoint,
 29 | 
 30 |     /// Apollo-specific credential overrides
 31 |     pub graphos: GraphOSConfig,
 32 | 
 33 |     /// List of hard-coded headers to include in all GraphQL requests
 34 |     #[serde(deserialize_with = "parsers::map_from_str")]
 35 |     #[schemars(schema_with = "super::schemas::header_map")]
 36 |     pub headers: HeaderMap,
 37 | 
 38 |     /// List of header names to forward from MCP client requests to GraphQL requests
 39 |     #[serde(default)]
 40 |     pub forward_headers: ForwardHeaders,
 41 | 
 42 |     /// Health check configuration
 43 |     #[serde(default)]
 44 |     pub health_check: HealthCheckConfig,
 45 | 
 46 |     /// Introspection configuration
 47 |     pub introspection: Introspection,
 48 | 
 49 |     /// Logging configuration
 50 |     pub logging: Logging,
 51 | 
 52 |     /// Telemetry configuration
 53 |     pub telemetry: Telemetry,
 54 | 
 55 |     /// Operations
 56 |     pub operations: OperationSource,
 57 | 
 58 |     /// Overrides for server behaviour
 59 |     pub overrides: Overrides,
 60 | 
 61 |     /// The schema to load for operations
 62 |     pub schema: SchemaSource,
 63 | 
 64 |     /// The type of server transport to use
 65 |     pub transport: Transport,
 66 | }
 67 | 
 68 | mod parsers {
 69 |     use std::str::FromStr;
 70 | 
 71 |     use reqwest::header::{HeaderMap, HeaderName, HeaderValue};
 72 |     use serde::Deserializer;
 73 | 
 74 |     pub(super) fn map_from_str<'de, D>(deserializer: D) -> Result<HeaderMap, D::Error>
 75 |     where
 76 |         D: Deserializer<'de>,
 77 |     {
 78 |         struct MapFromStrVisitor;
 79 |         impl<'de> serde::de::Visitor<'de> for MapFromStrVisitor {
 80 |             type Value = HeaderMap;
 81 | 
 82 |             fn expecting(&self, formatter: &mut std::fmt::Formatter) -> std::fmt::Result {
 83 |                 formatter.write_str("a map of header string keys and values")
 84 |             }
 85 | 
 86 |             fn visit_map<A>(self, mut map: A) -> Result<Self::Value, A::Error>
 87 |             where
 88 |                 A: serde::de::MapAccess<'de>,
 89 |             {
 90 |                 let mut parsed = HeaderMap::with_capacity(map.size_hint().unwrap_or(0));
 91 | 
 92 |                 // While there are entries remaining in the input, add them
 93 |                 // into our map.
 94 |                 while let Some((key, value)) = map.next_entry::<String, String>()? {
 95 |                     let key = HeaderName::from_str(&key)
 96 |                         .map_err(|e| serde::de::Error::custom(e.to_string()))?;
 97 |                     let value = HeaderValue::from_str(&value)
 98 |                         .map_err(|e| serde::de::Error::custom(e.to_string()))?;
 99 | 
100 |                     parsed.insert(key, value);
101 |                 }
102 | 
103 |                 Ok(parsed)
104 |             }
105 |         }
106 | 
107 |         deserializer.deserialize_map(MapFromStrVisitor)
108 |     }
109 | }
110 | 
111 | #[cfg(test)]
112 | mod test {
113 |     use super::Config;
114 | 
115 |     #[test]
116 |     fn it_parses_a_minimal_config() {
117 |         serde_json::from_str::<Config>("{}").unwrap();
118 |     }
119 | 
120 |     #[test]
121 |     fn it_contains_no_keys_with_double_underscore() {
122 |         // The env functionality of the config expansion uses __ as a split key
123 |         // when determining nested fields of any of the fields of the Config.
124 |         // This test ensures that a field name isn't added that can no longer be
125 |         // configured using the env extractor.
126 |         //
127 |         // See [runtime::read_config]
128 |         //
129 |         // TODO: This is a quick hack since traversing the nested (untyped) schema
130 |         // object is probably overkill.
131 |         let schema = schemars::schema_for!(Config).to_value().to_string();
132 | 
133 |         assert!(!schema.contains("__"))
134 |     }
135 | }
136 | 
```

--------------------------------------------------------------------------------
/crates/apollo-mcp-server/src/introspection/tools/validate.rs:
--------------------------------------------------------------------------------

```rust
  1 | use crate::errors::McpError;
  2 | use crate::operations::operation_defs;
  3 | use crate::schema_from_type;
  4 | use apollo_compiler::Schema;
  5 | use apollo_compiler::parser::Parser;
  6 | use apollo_compiler::validation::Valid;
  7 | use rmcp::model::CallToolResult;
  8 | use rmcp::model::Content;
  9 | use rmcp::model::{ErrorCode, Tool};
 10 | use rmcp::schemars::JsonSchema;
 11 | use rmcp::serde_json::Value;
 12 | use rmcp::{schemars, serde_json};
 13 | use serde::Deserialize;
 14 | use std::sync::Arc;
 15 | use tokio::sync::Mutex;
 16 | 
 17 | /// The name of the tool to validate an ad hoc GraphQL operation
 18 | pub const VALIDATE_TOOL_NAME: &str = "validate";
 19 | 
 20 | #[derive(Clone)]
 21 | pub struct Validate {
 22 |     pub tool: Tool,
 23 |     schema: Arc<Mutex<Valid<Schema>>>,
 24 | }
 25 | 
 26 | /// Input for the validate tool
 27 | #[derive(JsonSchema, Deserialize, Debug)]
 28 | pub struct Input {
 29 |     /// The GraphQL operation
 30 |     operation: String,
 31 | }
 32 | 
 33 | impl Validate {
 34 |     pub fn new(schema: Arc<Mutex<Valid<Schema>>>) -> Self {
 35 |         Self {
 36 |             schema,
 37 |             tool: Tool::new(
 38 |                 VALIDATE_TOOL_NAME,
 39 |                 "Validates a GraphQL operation against the schema. \
 40 |                 Use the `introspect` tool first to get information about the GraphQL schema. \
 41 |                 Operations should be validated prior to calling the `execute` tool.",
 42 |                 schema_from_type!(Input),
 43 |             ),
 44 |         }
 45 |     }
 46 | 
 47 |     /// Validates the provided GraphQL query
 48 |     #[tracing::instrument(skip(self))]
 49 |     pub async fn execute(&self, input: Value) -> Result<CallToolResult, McpError> {
 50 |         let input = serde_json::from_value::<Input>(input).map_err(|_| {
 51 |             McpError::new(ErrorCode::INVALID_PARAMS, "Invalid input".to_string(), None)
 52 |         })?;
 53 | 
 54 |         operation_defs(&input.operation, true, None)
 55 |             .map_err(|e| McpError::new(ErrorCode::INVALID_PARAMS, e.to_string(), None))?
 56 |             .ok_or_else(|| {
 57 |                 McpError::new(
 58 |                     ErrorCode::INVALID_PARAMS,
 59 |                     "Invalid operation type".to_string(),
 60 |                     None,
 61 |                 )
 62 |             })?;
 63 | 
 64 |         let schema_guard = self.schema.lock().await;
 65 |         Parser::new()
 66 |             .parse_executable(&schema_guard, input.operation.as_str(), "operation.graphql")
 67 |             .map_err(|e| McpError::new(ErrorCode::INVALID_PARAMS, e.to_string(), None))?
 68 |             .validate(&schema_guard)
 69 |             .map_err(|e| McpError::new(ErrorCode::INVALID_PARAMS, e.to_string(), None))?;
 70 |         Ok(CallToolResult {
 71 |             content: vec![Content::text("Operation is valid")],
 72 |             is_error: None,
 73 |             meta: None,
 74 | 
 75 |             // Note: We don't really return any meaningful content to the client here, so we can leave the
 76 |             // structured content as none.
 77 |             structured_content: None,
 78 |         })
 79 |     }
 80 | }
 81 | 
 82 | #[cfg(test)]
 83 | mod tests {
 84 |     use serde_json::json;
 85 | 
 86 |     use super::*;
 87 |     static SCHEMA: std::sync::LazyLock<Arc<Mutex<Valid<Schema>>>> =
 88 |         std::sync::LazyLock::new(|| {
 89 |             Arc::new(Mutex::new(
 90 |                 Schema::parse_and_validate(
 91 |                     "type Query { id: ID! hello(name: String!): String! }",
 92 |                     "schema.graphql",
 93 |                 )
 94 |                 .unwrap(),
 95 |             ))
 96 |         });
 97 | 
 98 |     #[tokio::test]
 99 |     async fn validate_valid_query() {
100 |         let validate = Validate::new(SCHEMA.clone());
101 |         let input = json!({
102 |             "operation": "query Test { id }"
103 |         });
104 |         assert!(validate.execute(input).await.is_ok());
105 |     }
106 | 
107 |     #[tokio::test]
108 |     async fn validate_invalid_graphql_query() {
109 |         let validate = Validate::new(SCHEMA.clone());
110 |         let input = json!({
111 |             "operation": "query {"
112 |         });
113 |         assert!(validate.execute(input).await.is_err());
114 |     }
115 | 
116 |     #[tokio::test]
117 |     async fn validate_invalid_query_field() {
118 |         let validate = Validate::new(SCHEMA.clone());
119 |         let input = json!({
120 |             "operation": "query { invalidField }"
121 |         });
122 |         assert!(validate.execute(input).await.is_err());
123 |     }
124 | 
125 |     #[tokio::test]
126 |     async fn validate_invalid_argument() {
127 |         let validate = Validate::new(SCHEMA.clone());
128 |         let input = json!({
129 |             "operation": "query { hello }"
130 |         });
131 |         assert!(validate.execute(input).await.is_err());
132 |     }
133 | }
134 | 
```

--------------------------------------------------------------------------------
/xtask/src/commands/changeset/matching_pull_request.rs:
--------------------------------------------------------------------------------

```rust
  1 | // THIS FILE IS GENERATED
  2 | // THIS FILE IS GENERATED
  3 | // THIS FILE IS GENERATED
  4 | // See the instructions in `./mod.rs` for how to regenerate it.  It is
  5 | // generated based on the operation that sits alongside it in this same file.
  6 | // Unfortunately, this comment will not be preserved and needs to be manually
  7 | // preserved if it's desired to keep it around.  Luckily, I don't think this
  8 | // operation will change very often.
  9 | // THIS FILE IS GENERATED
 10 | // THIS FILE IS GENERATED
 11 | // THIS FILE IS GENERATED
 12 | 
 13 | #![allow(clippy::all, warnings)]
 14 | pub struct MatchingPullRequest;
 15 | pub mod matching_pull_request {
 16 |     #![allow(dead_code)]
 17 |     use std::result::Result;
 18 |     pub const OPERATION_NAME: &str = "MatchingPullRequest";
 19 |     pub const QUERY : & str = "# This operation is used to generate Rust code which lives in a file directly\n# next to this with the same name but a `.rs` extension.  For instructions on\n# how to generate the code, see the top of `./mod.rs`.\nfragment PrInfo on PullRequest {\n  url\n  number\n  author {\n    __typename\n    login\n  }\n  title\n  closingIssuesReferences(last: 4) {\n    nodes {\n      url\n      number\n      repository {\n        nameWithOwner\n      }\n    }\n  }\n  body\n}\nfragment PrSearchResult on SearchResultItemConnection {\n  issueCount\n  nodes {\n    __typename\n    ...PrInfo\n  }\n }\n\nquery MatchingPullRequest($search: String!) {\n  search(\n    type: ISSUE\n    query: $search\n    first: 1\n  ) {\n    ...PrSearchResult\n  }\n}\n" ;
 20 |     use serde::Deserialize;
 21 |     use serde::Serialize;
 22 | 
 23 |     use super::*;
 24 |     #[allow(dead_code)]
 25 |     type Boolean = bool;
 26 |     #[allow(dead_code)]
 27 |     type Float = f64;
 28 |     #[allow(dead_code)]
 29 |     type Int = i64;
 30 |     #[allow(dead_code)]
 31 |     type ID = String;
 32 |     type URI = crate::commands::changeset::scalars::URI;
 33 |     #[derive(Serialize)]
 34 |     pub struct Variables {
 35 |         pub search: String,
 36 |     }
 37 |     impl Variables {}
 38 |     #[derive(Deserialize, Debug)]
 39 |     pub struct PrInfo {
 40 |         pub url: URI,
 41 |         pub number: Int,
 42 |         pub author: Option<PrInfoAuthor>,
 43 |         pub title: String,
 44 |         #[serde(rename = "closingIssuesReferences")]
 45 |         pub closing_issues_references: Option<PrInfoClosingIssuesReferences>,
 46 |         pub body: String,
 47 |     }
 48 |     #[derive(Deserialize, Debug)]
 49 |     pub struct PrInfoAuthor {
 50 |         pub login: String,
 51 |         #[serde(flatten)]
 52 |         pub on: PrInfoAuthorOn,
 53 |     }
 54 |     #[derive(Deserialize, Debug)]
 55 |     #[serde(tag = "__typename")]
 56 |     pub enum PrInfoAuthorOn {
 57 |         Bot,
 58 |         EnterpriseUserAccount,
 59 |         Mannequin,
 60 |         Organization,
 61 |         User,
 62 |     }
 63 |     #[derive(Deserialize, Debug)]
 64 |     pub struct PrInfoClosingIssuesReferences {
 65 |         pub nodes: Option<Vec<Option<PrInfoClosingIssuesReferencesNodes>>>,
 66 |     }
 67 |     #[derive(Deserialize, Debug)]
 68 |     pub struct PrInfoClosingIssuesReferencesNodes {
 69 |         pub url: URI,
 70 |         pub number: Int,
 71 |         pub repository: PrInfoClosingIssuesReferencesNodesRepository,
 72 |     }
 73 |     #[derive(Deserialize, Debug)]
 74 |     pub struct PrInfoClosingIssuesReferencesNodesRepository {
 75 |         #[serde(rename = "nameWithOwner")]
 76 |         pub name_with_owner: String,
 77 |     }
 78 |     #[derive(Deserialize, Debug)]
 79 |     pub struct PrSearchResult {
 80 |         #[serde(rename = "issueCount")]
 81 |         pub issue_count: Int,
 82 |         pub nodes: Option<Vec<Option<PrSearchResultNodes>>>,
 83 |     }
 84 |     #[derive(Deserialize, Debug)]
 85 |     #[serde(tag = "__typename")]
 86 |     pub enum PrSearchResultNodes {
 87 |         App,
 88 |         Discussion,
 89 |         Issue,
 90 |         MarketplaceListing,
 91 |         Organization,
 92 |         PullRequest(PrSearchResultNodesOnPullRequest),
 93 |         Repository,
 94 |         User,
 95 |     }
 96 |     pub type PrSearchResultNodesOnPullRequest = PrInfo;
 97 |     #[derive(Deserialize, Debug)]
 98 |     pub struct ResponseData {
 99 |         pub search: MatchingPullRequestSearch,
100 |     }
101 |     pub type MatchingPullRequestSearch = PrSearchResult;
102 | }
103 | impl graphql_client::GraphQLQuery for MatchingPullRequest {
104 |     type Variables = matching_pull_request::Variables;
105 |     type ResponseData = matching_pull_request::ResponseData;
106 |     fn build_query(variables: Self::Variables) -> ::graphql_client::QueryBody<Self::Variables> {
107 |         graphql_client::QueryBody {
108 |             variables,
109 |             query: matching_pull_request::QUERY,
110 |             operation_name: matching_pull_request::OPERATION_NAME,
111 |         }
112 |     }
113 | }
114 | 
```

--------------------------------------------------------------------------------
/crates/apollo-mcp-registry/src/uplink/persisted_queries.rs:
--------------------------------------------------------------------------------

```rust
  1 | use graphql_client::GraphQLQuery;
  2 | 
  3 | pub mod event;
  4 | mod manifest;
  5 | mod manifest_poller;
  6 | 
  7 | pub use manifest::FullPersistedQueryOperationId;
  8 | pub use manifest::ManifestOperation;
  9 | pub use manifest::PersistedQueryManifest;
 10 | pub use manifest::SignedUrlChunk;
 11 | pub use manifest_poller::ManifestSource;
 12 | pub use manifest_poller::PersistedQueryManifestPollerState;
 13 | 
 14 | use crate::uplink::UplinkRequest;
 15 | use crate::uplink::UplinkResponse;
 16 | 
 17 | /// Persisted query manifest query definition
 18 | #[derive(GraphQLQuery)]
 19 | #[graphql(
 20 |     query_path = "src/uplink/persisted_queries/persisted_queries_manifest_query.graphql",
 21 |     schema_path = "src/uplink/uplink.graphql",
 22 |     request_derives = "Debug",
 23 |     response_derives = "PartialEq, Debug, Deserialize",
 24 |     deprecated = "warn"
 25 | )]
 26 | pub struct PersistedQueriesManifestQuery;
 27 | 
 28 | impl From<UplinkRequest> for persisted_queries_manifest_query::Variables {
 29 |     fn from(req: UplinkRequest) -> Self {
 30 |         persisted_queries_manifest_query::Variables {
 31 |             api_key: req.api_key,
 32 |             graph_ref: req.graph_ref,
 33 |             if_after_id: req.id,
 34 |         }
 35 |     }
 36 | }
 37 | 
 38 | #[derive(Debug, Clone, Eq, PartialEq)]
 39 | pub struct PersistedQueriesManifestChunk {
 40 |     pub id: String,
 41 |     pub urls: Vec<String>,
 42 | }
 43 | 
 44 | impl PersistedQueriesManifestChunk {
 45 |     fn from_query_chunks(
 46 |         query_chunks: &persisted_queries_manifest_query::PersistedQueriesManifestQueryPersistedQueriesOnPersistedQueriesResultChunks,
 47 |     ) -> Self {
 48 |         Self {
 49 |             id: query_chunks.id.clone(),
 50 |             urls: query_chunks.urls.clone(),
 51 |         }
 52 |     }
 53 | }
 54 | 
 55 | pub type PersistedQueriesManifestChunks = Vec<PersistedQueriesManifestChunk>;
 56 | pub type MaybePersistedQueriesManifestChunks = Option<PersistedQueriesManifestChunks>;
 57 | 
 58 | impl From<persisted_queries_manifest_query::ResponseData>
 59 |     for UplinkResponse<MaybePersistedQueriesManifestChunks>
 60 | {
 61 |     fn from(response: persisted_queries_manifest_query::ResponseData) -> Self {
 62 |         use persisted_queries_manifest_query::FetchErrorCode;
 63 |         use persisted_queries_manifest_query::PersistedQueriesManifestQueryPersistedQueries;
 64 | 
 65 |         match response.persisted_queries {
 66 |             PersistedQueriesManifestQueryPersistedQueries::PersistedQueriesResult(response) => {
 67 |                 if let Some(chunks) = response.chunks {
 68 |                     let chunks = chunks
 69 |                         .iter()
 70 |                         .map(PersistedQueriesManifestChunk::from_query_chunks)
 71 |                         .collect();
 72 |                     UplinkResponse::New {
 73 |                         response: Some(chunks),
 74 |                         id: response.id,
 75 |                         // this will truncate the number of seconds to under u64::MAX, which should be
 76 |                         // a large enough delay anyway
 77 |                         delay: response.min_delay_seconds as u64,
 78 |                     }
 79 |                 } else {
 80 |                     UplinkResponse::New {
 81 |                         // no persisted query list is associated with this variant
 82 |                         response: None,
 83 |                         id: response.id,
 84 |                         delay: response.min_delay_seconds as u64,
 85 |                     }
 86 |                 }
 87 |             }
 88 |             PersistedQueriesManifestQueryPersistedQueries::Unchanged(response) => {
 89 |                 UplinkResponse::Unchanged {
 90 |                     id: Some(response.id),
 91 |                     delay: Some(response.min_delay_seconds as u64),
 92 |                 }
 93 |             }
 94 |             PersistedQueriesManifestQueryPersistedQueries::FetchError(err) => {
 95 |                 UplinkResponse::Error {
 96 |                     retry_later: err.code == FetchErrorCode::RETRY_LATER,
 97 |                     code: match err.code {
 98 |                         FetchErrorCode::AUTHENTICATION_FAILED => {
 99 |                             "AUTHENTICATION_FAILED".to_string()
100 |                         }
101 |                         FetchErrorCode::ACCESS_DENIED => "ACCESS_DENIED".to_string(),
102 |                         FetchErrorCode::UNKNOWN_REF => "UNKNOWN_REF".to_string(),
103 |                         FetchErrorCode::RETRY_LATER => "RETRY_LATER".to_string(),
104 |                         FetchErrorCode::NOT_IMPLEMENTED_ON_THIS_INSTANCE => {
105 |                             "NOT_IMPLEMENTED_ON_THIS_INSTANCE".to_string()
106 |                         }
107 |                         FetchErrorCode::Other(other) => other,
108 |                     },
109 |                     message: err.message,
110 |                 }
111 |             }
112 |         }
113 |     }
114 | }
115 | 
```

--------------------------------------------------------------------------------
/.github/workflows/ci.yml:
--------------------------------------------------------------------------------

```yaml
  1 | name: Nix CI
  2 | on:
  3 |   push:
  4 |     # don't run on tags, run on commits
  5 |     # https://github.com/orgs/community/discussions/25615
  6 |     tags-ignore:
  7 |       - "**"
  8 |     branches:
  9 |       - main
 10 |       - develop
 11 |   pull_request:
 12 |   workflow_dispatch:
 13 | 
 14 | env:
 15 |   # We want the cache to be as full as possible, so we instruct nix to keep derivations
 16 |   # and other related outputs around in its cache
 17 |   nix_conf: |
 18 |     keep-env-derivations = true
 19 |     keep-outputs = true
 20 | 
 21 | jobs:
 22 |   # Cache the nix store so that subsequent runs are almost instantaneous
 23 |   # See https://github.com/marketplace/actions/restore-and-save-nix-store#inputs
 24 |   cache:
 25 |     name: Cache nix store
 26 |     runs-on: ubuntu-24.04
 27 |     permissions:
 28 |       actions: write
 29 |       contents: read
 30 |     steps:
 31 |       - uses: actions/checkout@v5
 32 |         with:
 33 |           ref: ${{ github.event.pull_request.head.sha }}
 34 |       - uses: nixbuild/nix-quick-install-action@v30
 35 |         with:
 36 |           nix_conf: ${{ env.nix_conf }}
 37 |       - name: Restore and save Nix store
 38 |         uses: nix-community/cache-nix-action@v6
 39 |         with:
 40 |           primary-key: build-${{ runner.os }}-${{ hashFiles('Cargo.lock', '**/Cargo.toml', 'flake.nix', 'flake.lock', 'rust-toolchain.toml') }}
 41 |           restore-prefixes-first-match: build-${{ runner.os }}-
 42 |           purge: true
 43 |           purge-prefixes: build-${{ runner.os }}-
 44 |           purge-created: 0
 45 |           purge-primary-key: never
 46 |           gc-max-store-size: 5G
 47 |       - name: Save flake attributes from garbage collection
 48 |         run: nix profile install .#saveFromGC
 49 | 
 50 |   check:
 51 |     name: Run checks
 52 |     runs-on: ubuntu-24.04
 53 |     needs: cache
 54 |     permissions:
 55 |       actions: write
 56 |       contents: read
 57 |     steps:
 58 |       - uses: actions/checkout@v5
 59 |         with:
 60 |           ref: ${{ github.event.pull_request.head.sha }}
 61 |       - uses: nixbuild/nix-quick-install-action@v30
 62 |         with:
 63 |           nix_conf: ${{ env.nix_conf }}
 64 |       - name: Restore and save Nix store
 65 |         uses: nix-community/cache-nix-action@v6
 66 |         with:
 67 |           primary-key: build-${{ runner.os }}-${{ hashFiles('Cargo.lock', '**/Cargo.toml', 'flake.nix', 'flake.lock', 'rust-toolchain.toml') }}
 68 |           purge: true
 69 |           purge-prefixes: build-${{ runner.os }}-
 70 |           purge-created: 0
 71 |           purge-primary-key: never
 72 |           gc-max-store-size: 5G
 73 |       - name: Run checks
 74 |         run: nix flake check
 75 | 
 76 |   build:
 77 |     name: Build
 78 |     runs-on: ubuntu-24.04
 79 |     needs: cache
 80 |     permissions:
 81 |       actions: write
 82 |       contents: read
 83 |     steps:
 84 |       - uses: actions/checkout@v5
 85 |         with:
 86 |           ref: ${{ github.event.pull_request.head.sha }}
 87 |       - uses: nixbuild/nix-quick-install-action@v30
 88 |         with:
 89 |           nix_conf: ${{ env.nix_conf }}
 90 |       - name: Restore and save Nix store
 91 |         uses: nix-community/cache-nix-action@v6
 92 |         with:
 93 |           primary-key: build-${{ runner.os }}-${{ hashFiles('Cargo.lock', '**/Cargo.toml', 'flake.nix', 'flake.lock', 'rust-toolchain.toml') }}
 94 |           purge: true
 95 |           purge-prefixes: build-${{ runner.os }}-
 96 |           purge-created: 0
 97 |           purge-primary-key: never
 98 |           gc-max-store-size: 5G
 99 |       - name: Build
100 |         run: nix build .#
101 | 
102 |   test:
103 |     name: Run Tests
104 |     runs-on: ubuntu-24.04
105 |     needs: cache
106 |     permissions:
107 |       actions: write
108 |       contents: read
109 |     steps:
110 |       - uses: actions/checkout@v5
111 |         with:
112 |           ref: ${{ github.event.pull_request.head.sha }}
113 |       - uses: nixbuild/nix-quick-install-action@v30
114 |         with:
115 |           nix_conf: ${{ env.nix_conf }}
116 |       - name: Restore and save Nix store
117 |         uses: nix-community/cache-nix-action@v6
118 |         with:
119 |           primary-key: build-${{ runner.os }}-${{ hashFiles('Cargo.lock', '**/Cargo.toml', 'flake.nix', 'flake.lock', 'rust-toolchain.toml') }}
120 |           purge: true
121 |           purge-prefixes: build-${{ runner.os }}-
122 |           purge-created: 0
123 |           purge-primary-key: never
124 |           gc-max-store-size: 5G
125 |       - name: Run Tests
126 |         run: 'nix develop --command bash -c "cargo test"'
127 | 
128 |   coverage:
129 |     name: Run Coverage
130 |     runs-on: ubuntu-24.04
131 |     permissions:
132 |       contents: read
133 |     steps:
134 |       - uses: actions/checkout@v5
135 |         with:
136 |           ref: ${{ github.event.pull_request.head.sha }}
137 |       - uses: taiki-e/install-action@cargo-llvm-cov
138 |       - name: Generate code coverage
139 |         run: cargo llvm-cov --all-features --workspace --codecov --output-path codecov.json
140 |       - name: Upload coverage to Codecov
141 |         uses: codecov/codecov-action@v5
142 |         with:
143 |           token: ${{ secrets.CODECOV_TOKEN }} # not required for public repos
144 |           files: codecov.json
145 |           fail_ci_if_error: true
146 | 
```

--------------------------------------------------------------------------------
/crates/apollo-mcp-server/src/operations/raw_operation.rs:
--------------------------------------------------------------------------------

```rust
  1 | use std::{collections::HashMap, str::FromStr as _};
  2 | 
  3 | use apollo_compiler::validation::Valid;
  4 | use apollo_mcp_registry::platform_api::operation_collections::{
  5 |     collection_poller::OperationData, error::CollectionError,
  6 | };
  7 | use http::{HeaderMap, HeaderName, HeaderValue};
  8 | use serde_json::Value;
  9 | 
 10 | use crate::{custom_scalar_map::CustomScalarMap, errors::OperationError};
 11 | 
 12 | use super::{MutationMode, operation::Operation};
 13 | 
 14 | #[derive(Debug, Clone)]
 15 | pub struct RawOperation {
 16 |     pub(super) source_text: String,
 17 |     pub(super) persisted_query_id: Option<String>,
 18 |     pub(super) headers: Option<HeaderMap<HeaderValue>>,
 19 |     pub(super) variables: Option<HashMap<String, Value>>,
 20 |     pub(super) source_path: Option<String>,
 21 | }
 22 | 
 23 | impl RawOperation {
 24 |     pub(crate) fn into_operation(
 25 |         self,
 26 |         schema: &Valid<apollo_compiler::Schema>,
 27 |         custom_scalars: Option<&CustomScalarMap>,
 28 |         mutation_mode: MutationMode,
 29 |         disable_type_description: bool,
 30 |         disable_schema_description: bool,
 31 |     ) -> Result<Option<Operation>, OperationError> {
 32 |         Operation::from_document(
 33 |             self,
 34 |             schema,
 35 |             custom_scalars,
 36 |             mutation_mode,
 37 |             disable_type_description,
 38 |             disable_schema_description,
 39 |         )
 40 |     }
 41 | }
 42 | 
 43 | impl From<(String, Option<String>)> for RawOperation {
 44 |     fn from((source_text, source_path): (String, Option<String>)) -> Self {
 45 |         Self {
 46 |             persisted_query_id: None,
 47 |             source_text,
 48 |             headers: None,
 49 |             variables: None,
 50 |             source_path,
 51 |         }
 52 |     }
 53 | }
 54 | 
 55 | impl From<(String, String)> for RawOperation {
 56 |     fn from((persisted_query_id, source_text): (String, String)) -> Self {
 57 |         Self {
 58 |             persisted_query_id: Some(persisted_query_id),
 59 |             source_text,
 60 |             headers: None,
 61 |             variables: None,
 62 |             source_path: None,
 63 |         }
 64 |     }
 65 | }
 66 | 
 67 | impl TryFrom<&OperationData> for RawOperation {
 68 |     type Error = CollectionError;
 69 | 
 70 |     fn try_from(operation_data: &OperationData) -> Result<Self, Self::Error> {
 71 |         let variables = if let Some(variables) = operation_data.variables.as_ref() {
 72 |             if variables.trim().is_empty() {
 73 |                 Some(HashMap::new())
 74 |             } else {
 75 |                 Some(
 76 |                     serde_json::from_str::<HashMap<String, Value>>(variables)
 77 |                         .map_err(|_| CollectionError::InvalidVariables(variables.clone()))?,
 78 |                 )
 79 |             }
 80 |         } else {
 81 |             None
 82 |         };
 83 | 
 84 |         let headers = if let Some(headers) = operation_data.headers.as_ref() {
 85 |             let mut header_map = HeaderMap::new();
 86 |             for header in headers {
 87 |                 header_map.insert(
 88 |                     HeaderName::from_str(&header.0).map_err(CollectionError::HeaderName)?,
 89 |                     HeaderValue::from_str(&header.1).map_err(CollectionError::HeaderValue)?,
 90 |                 );
 91 |             }
 92 |             Some(header_map)
 93 |         } else {
 94 |             None
 95 |         };
 96 | 
 97 |         Ok(Self {
 98 |             persisted_query_id: None,
 99 |             source_text: operation_data.source_text.clone(),
100 |             headers,
101 |             variables,
102 |             source_path: None,
103 |         })
104 |     }
105 | }
106 | 
107 | // TODO: This can be greatly simplified by using `serde::serialize_with` on the specific field that does not
108 | // implement `Serialize`.
109 | // Custom Serialize implementation for RawOperation
110 | // This is needed because reqwest HeaderMap/HeaderValue/HeaderName don't derive Serialize
111 | impl serde::Serialize for RawOperation {
112 |     fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
113 |     where
114 |         S: serde::Serializer,
115 |     {
116 |         use serde::ser::SerializeStruct;
117 |         let mut state = serializer.serialize_struct("RawOperation", 4)?;
118 |         state.serialize_field("source_text", &self.source_text)?;
119 |         if let Some(ref id) = self.persisted_query_id {
120 |             state.serialize_field("persisted_query_id", id)?;
121 |         }
122 |         if let Some(ref variables) = self.variables {
123 |             state.serialize_field("variables", variables)?;
124 |         }
125 |         if let Some(ref headers) = self.headers {
126 |             state.serialize_field(
127 |                 "headers",
128 |                 headers
129 |                     .iter()
130 |                     .map(|(name, value)| {
131 |                         format!("{}: {}", name, value.to_str().unwrap_or_default())
132 |                     })
133 |                     .collect::<Vec<_>>()
134 |                     .join("\n")
135 |                     .as_str(),
136 |             )?;
137 |         }
138 |         if let Some(ref path) = self.source_path {
139 |             state.serialize_field("source_path", path)?;
140 |         }
141 | 
142 |         state.end()
143 |     }
144 | }
145 | 
```

--------------------------------------------------------------------------------
/crates/apollo-mcp-server/src/server.rs:
--------------------------------------------------------------------------------

```rust
  1 | use std::net::{IpAddr, Ipv4Addr};
  2 | 
  3 | use apollo_mcp_registry::uplink::schema::SchemaSource;
  4 | use bon::bon;
  5 | use reqwest::header::{CONTENT_TYPE, HeaderMap, HeaderValue};
  6 | use schemars::JsonSchema;
  7 | use serde::Deserialize;
  8 | use url::Url;
  9 | 
 10 | use crate::auth;
 11 | use crate::cors::CorsConfig;
 12 | use crate::custom_scalar_map::CustomScalarMap;
 13 | use crate::errors::ServerError;
 14 | use crate::event::Event as ServerEvent;
 15 | use crate::headers::ForwardHeaders;
 16 | use crate::health::HealthCheckConfig;
 17 | use crate::operations::{MutationMode, OperationSource};
 18 | 
 19 | mod states;
 20 | 
 21 | use states::StateMachine;
 22 | 
 23 | /// An Apollo MCP Server
 24 | pub struct Server {
 25 |     transport: Transport,
 26 |     schema_source: SchemaSource,
 27 |     operation_source: OperationSource,
 28 |     endpoint: Url,
 29 |     headers: HeaderMap,
 30 |     forward_headers: ForwardHeaders,
 31 |     execute_introspection: bool,
 32 |     validate_introspection: bool,
 33 |     introspect_introspection: bool,
 34 |     introspect_minify: bool,
 35 |     search_minify: bool,
 36 |     search_introspection: bool,
 37 |     explorer_graph_ref: Option<String>,
 38 |     custom_scalar_map: Option<CustomScalarMap>,
 39 |     mutation_mode: MutationMode,
 40 |     disable_type_description: bool,
 41 |     disable_schema_description: bool,
 42 |     disable_auth_token_passthrough: bool,
 43 |     search_leaf_depth: usize,
 44 |     index_memory_bytes: usize,
 45 |     health_check: HealthCheckConfig,
 46 |     cors: CorsConfig,
 47 | }
 48 | 
 49 | #[derive(Debug, Clone, Deserialize, Default, JsonSchema)]
 50 | #[serde(tag = "type", rename_all = "snake_case")]
 51 | pub enum Transport {
 52 |     /// Use standard IO for server <> client communication
 53 |     #[default]
 54 |     Stdio,
 55 | 
 56 |     /// Host the MCP server on the supplied configuration, using SSE for communication
 57 |     ///
 58 |     /// Note: This is deprecated in favor of HTTP streams.
 59 |     #[serde(rename = "sse")]
 60 |     SSE {
 61 |         /// Authentication configuration
 62 |         #[serde(default)]
 63 |         auth: Option<auth::Config>,
 64 | 
 65 |         /// The IP address to bind to
 66 |         #[serde(default = "Transport::default_address")]
 67 |         address: IpAddr,
 68 | 
 69 |         /// The port to bind to
 70 |         #[serde(default = "Transport::default_port")]
 71 |         port: u16,
 72 |     },
 73 | 
 74 |     /// Host the MCP server on the configuration, using streamable HTTP messages.
 75 |     StreamableHttp {
 76 |         /// Authentication configuration
 77 |         #[serde(default)]
 78 |         auth: Option<auth::Config>,
 79 | 
 80 |         /// The IP address to bind to
 81 |         #[serde(default = "Transport::default_address")]
 82 |         address: IpAddr,
 83 | 
 84 |         /// The port to bind to
 85 |         #[serde(default = "Transport::default_port")]
 86 |         port: u16,
 87 | 
 88 |         #[serde(default = "Transport::default_stateful_mode")]
 89 |         stateful_mode: bool,
 90 |     },
 91 | }
 92 | 
 93 | impl Transport {
 94 |     fn default_address() -> IpAddr {
 95 |         IpAddr::V4(Ipv4Addr::LOCALHOST)
 96 |     }
 97 | 
 98 |     fn default_port() -> u16 {
 99 |         8000
100 |     }
101 | 
102 |     fn default_stateful_mode() -> bool {
103 |         true
104 |     }
105 | }
106 | 
107 | #[bon]
108 | impl Server {
109 |     #[builder]
110 |     pub fn new(
111 |         transport: Transport,
112 |         schema_source: SchemaSource,
113 |         operation_source: OperationSource,
114 |         endpoint: Url,
115 |         headers: HeaderMap,
116 |         forward_headers: ForwardHeaders,
117 |         execute_introspection: bool,
118 |         validate_introspection: bool,
119 |         introspect_introspection: bool,
120 |         search_introspection: bool,
121 |         introspect_minify: bool,
122 |         search_minify: bool,
123 |         explorer_graph_ref: Option<String>,
124 |         #[builder(required)] custom_scalar_map: Option<CustomScalarMap>,
125 |         mutation_mode: MutationMode,
126 |         disable_type_description: bool,
127 |         disable_schema_description: bool,
128 |         disable_auth_token_passthrough: bool,
129 |         search_leaf_depth: usize,
130 |         index_memory_bytes: usize,
131 |         health_check: HealthCheckConfig,
132 |         cors: CorsConfig,
133 |     ) -> Self {
134 |         let headers = {
135 |             let mut headers = headers.clone();
136 |             headers.insert(CONTENT_TYPE, HeaderValue::from_static("application/json"));
137 |             headers
138 |         };
139 |         Self {
140 |             transport,
141 |             schema_source,
142 |             operation_source,
143 |             endpoint,
144 |             headers,
145 |             forward_headers,
146 |             execute_introspection,
147 |             validate_introspection,
148 |             introspect_introspection,
149 |             search_introspection,
150 |             introspect_minify,
151 |             search_minify,
152 |             explorer_graph_ref,
153 |             custom_scalar_map,
154 |             mutation_mode,
155 |             disable_type_description,
156 |             disable_schema_description,
157 |             disable_auth_token_passthrough,
158 |             search_leaf_depth,
159 |             index_memory_bytes,
160 |             health_check,
161 |             cors,
162 |         }
163 |     }
164 | 
165 |     pub async fn start(self) -> Result<(), ServerError> {
166 |         StateMachine {}.start(self).await
167 |     }
168 | }
169 | 
```

--------------------------------------------------------------------------------
/graphql/weather/supergraph.graphql:
--------------------------------------------------------------------------------

```graphql
  1 | schema
  2 |   @link(url: "https://specs.apollo.dev/link/v1.0")
  3 |   @link(url: "https://specs.apollo.dev/join/v0.5", for: EXECUTION)
  4 |   @link(url: "https://specs.apollo.dev/tag/v0.3")
  5 |   @link(url: "https://specs.apollo.dev/connect/v0.1", for: EXECUTION)
  6 |   @join__directive(graphs: [WEATHER], name: "link", args: {url: "https://specs.apollo.dev/connect/v0.1", import: ["@connect", "@source"]})
  7 |   @join__directive(graphs: [WEATHER], name: "source", args: {name: "NWS", http: {baseURL: "https://api.weather.gov", headers: [{name: "User-Agent", value: "weather-app/1.0"}, {name: "Accept", value: "application/geo+json"}]}})
  8 | {
  9 |   query: Query
 10 | }
 11 | 
 12 | directive @join__directive(graphs: [join__Graph!], name: String!, args: join__DirectiveArguments) repeatable on SCHEMA | OBJECT | INTERFACE | FIELD_DEFINITION
 13 | 
 14 | directive @join__enumValue(graph: join__Graph!) repeatable on ENUM_VALUE
 15 | 
 16 | directive @join__field(graph: join__Graph, requires: join__FieldSet, provides: join__FieldSet, type: String, external: Boolean, override: String, usedOverridden: Boolean, overrideLabel: String, contextArguments: [join__ContextArgument!]) repeatable on FIELD_DEFINITION | INPUT_FIELD_DEFINITION
 17 | 
 18 | directive @join__graph(name: String!, url: String!) on ENUM_VALUE
 19 | 
 20 | directive @join__implements(graph: join__Graph!, interface: String!) repeatable on OBJECT | INTERFACE
 21 | 
 22 | directive @join__type(graph: join__Graph!, key: join__FieldSet, extension: Boolean! = false, resolvable: Boolean! = true, isInterfaceObject: Boolean! = false) repeatable on OBJECT | INTERFACE | UNION | ENUM | INPUT_OBJECT | SCALAR
 23 | 
 24 | directive @join__unionMember(graph: join__Graph!, member: String!) repeatable on UNION
 25 | 
 26 | directive @link(url: String, as: String, for: link__Purpose, import: [link__Import]) repeatable on SCHEMA
 27 | 
 28 | directive @tag(name: String!) repeatable on FIELD_DEFINITION | OBJECT | INTERFACE | UNION | ARGUMENT_DEFINITION | SCALAR | ENUM | ENUM_VALUE | INPUT_OBJECT | INPUT_FIELD_DEFINITION | SCHEMA
 29 | 
 30 | """A weather alert"""
 31 | type Alert
 32 |   @join__type(graph: WEATHER)
 33 |   @tag(name: "mcp")
 34 | {
 35 |   """The severity of this alert"""
 36 |   severity: String
 37 | 
 38 |   """A description of the alert"""
 39 |   description: String
 40 | 
 41 |   """Information about how people should respond to the alert"""
 42 |   instruction: String
 43 | }
 44 | 
 45 | """A coordinate, consisting of a latitude and longitude"""
 46 | type Coordinate
 47 |   @join__type(graph: WEATHER)
 48 | {
 49 |   """The latitude of this coordinate"""
 50 |   latitude: String!
 51 | 
 52 |   """The longitude of this coordinate"""
 53 |   longitude: String!
 54 | }
 55 | 
 56 | """A weather forecast"""
 57 | type Forecast
 58 |   @join__type(graph: WEATHER)
 59 | {
 60 |   """The coordinate associated with this forecast"""
 61 |   coordinate: Coordinate!
 62 | 
 63 |   """
 64 |   The National Weather Service (NWS) URL where the forecast data can be read
 65 |   """
 66 |   forecastURL: String!
 67 | 
 68 |   """A detailed weather forecast from the National Weather Service (NWS)"""
 69 |   detailed: String! @join__directive(graphs: [WEATHER], name: "connect", args: {http: {GET: "https://api.weather.gov/gridpoints/FFC/51,87/forecast", headers: [{name: "foo", value: "{$this.forecastURL}"}, {name: "Accept", value: "application/geo+json"}, {name: "User-Agent", value: "weather-app/1.0"}]}, selection: "$.properties.periods->first.detailedForecast"})
 70 | }
 71 | 
 72 | """A coordinate, consisting of a latitude and longitude"""
 73 | input InputCoordinate
 74 |   @join__type(graph: WEATHER)
 75 | {
 76 |   """The latitude of this coordinate"""
 77 |   latitude: String!
 78 | 
 79 |   """The longitude of this coordinate"""
 80 |   longitude: String!
 81 | }
 82 | 
 83 | input join__ContextArgument {
 84 |   name: String!
 85 |   type: String!
 86 |   context: String!
 87 |   selection: join__FieldValue!
 88 | }
 89 | 
 90 | scalar join__DirectiveArguments
 91 | 
 92 | scalar join__FieldSet
 93 | 
 94 | scalar join__FieldValue
 95 | 
 96 | enum join__Graph {
 97 |   WEATHER @join__graph(name: "weather", url: "http://localhost")
 98 | }
 99 | 
100 | scalar link__Import
101 | 
102 | enum link__Purpose {
103 |   """
104 |   `SECURITY` features provide metadata necessary to securely resolve fields.
105 |   """
106 |   SECURITY
107 | 
108 |   """
109 |   `EXECUTION` features provide metadata necessary for operation execution.
110 |   """
111 |   EXECUTION
112 | }
113 | 
114 | type Query
115 |   @join__type(graph: WEATHER)
116 | {
117 |   """Get the weather forecast for a coordinate"""
118 |   forecast(coordinate: InputCoordinate!): Forecast @join__directive(graphs: [WEATHER], name: "connect", args: {source: "NWS", http: {GET: "/points/{$args.coordinate.latitude},{$args.coordinate.longitude}"}, selection: "coordinate: {\n  latitude: $args.coordinate.latitude\n  longitude: $args.coordinate.longitude\n}\nforecastURL: properties.forecast", entity: true})
119 | 
120 |   """
121 |   Get the weather alerts for a state, using the two-letter abbreviation for the state - for example, CO for Colorado
122 |   """
123 |   alerts(state: String!): [Alert] @join__directive(graphs: [WEATHER], name: "connect", args: {source: "NWS", http: {GET: "/alerts/active/area/{$args.state}"}, selection: "$.features.properties {\n  severity\n  description\n  instruction\n}"}) @tag(name: "mcp")
124 | }
125 | 
```

--------------------------------------------------------------------------------
/crates/apollo-mcp-server/src/custom_scalar_map.rs:
--------------------------------------------------------------------------------

```rust
  1 | use crate::errors::ServerError;
  2 | use rmcp::serde_json;
  3 | use schemars::Schema;
  4 | use std::{collections::HashMap, path::PathBuf, str::FromStr};
  5 | 
  6 | impl FromStr for CustomScalarMap {
  7 |     type Err = ServerError;
  8 | 
  9 |     fn from_str(string_custom_scalar_file: &str) -> Result<Self, Self::Err> {
 10 |         // Parse the string into an initial map of serde_json::Values
 11 |         let parsed_custom_scalar_file: serde_json::Map<String, serde_json::Value> =
 12 |             serde_json::from_str(string_custom_scalar_file)
 13 |                 .map_err(ServerError::CustomScalarConfig)?;
 14 | 
 15 |         // Try to parse each as a schema
 16 |         let custom_scalar_map = parsed_custom_scalar_file
 17 |             .into_iter()
 18 |             .map(|(key, value)| {
 19 |                 // The schemars crate does not enforce schema validation anymore, so we use jsonschema
 20 |                 // to ensure that the supplied schema is valid.
 21 |                 if let Err(e) = jsonschema::meta::validate(&value) {
 22 |                     return Err(ServerError::CustomScalarJsonSchema(e.to_string()));
 23 |                 }
 24 | 
 25 |                 Schema::try_from(value.clone())
 26 |                     .map(|schema| (key, schema))
 27 |                     .map_err(|e| ServerError::CustomScalarJsonSchema(e.to_string()))
 28 |             })
 29 |             .collect::<Result<_, _>>()?;
 30 | 
 31 |         Ok(CustomScalarMap(custom_scalar_map))
 32 |     }
 33 | }
 34 | 
 35 | impl TryFrom<&PathBuf> for CustomScalarMap {
 36 |     type Error = ServerError;
 37 | 
 38 |     fn try_from(file_path_buf: &PathBuf) -> Result<Self, Self::Error> {
 39 |         let custom_scalars_config_path = file_path_buf.as_path();
 40 |         tracing::debug!(custom_scalars_config=?custom_scalars_config_path, "Loading custom_scalars_config");
 41 |         let string_custom_scalar_file = std::fs::read_to_string(custom_scalars_config_path)?;
 42 |         CustomScalarMap::from_str(string_custom_scalar_file.as_str())
 43 |     }
 44 | }
 45 | 
 46 | #[derive(Debug, Clone)]
 47 | pub struct CustomScalarMap(HashMap<String, Schema>);
 48 | 
 49 | impl CustomScalarMap {
 50 |     pub fn get(&self, key: &str) -> Option<&Schema> {
 51 |         self.0.get(key)
 52 |     }
 53 | }
 54 | 
 55 | #[cfg(test)]
 56 | mod tests {
 57 |     use std::{collections::HashMap, str::FromStr};
 58 | 
 59 |     use schemars::json_schema;
 60 | 
 61 |     use crate::custom_scalar_map::CustomScalarMap;
 62 | 
 63 |     #[test]
 64 |     fn empty_file() {
 65 |         let result = CustomScalarMap::from_str("").err().unwrap();
 66 | 
 67 |         insta::assert_debug_snapshot!(result, @r#"
 68 |         CustomScalarConfig(
 69 |             Error("EOF while parsing a value", line: 1, column: 0),
 70 |         )
 71 |         "#)
 72 |     }
 73 | 
 74 |     #[test]
 75 |     fn only_spaces() {
 76 |         let result =
 77 |             CustomScalarMap::from_str("    ").expect_err("empty space should be valid schema");
 78 | 
 79 |         insta::assert_debug_snapshot!(result, @r#"
 80 |         CustomScalarConfig(
 81 |             Error("EOF while parsing a value", line: 1, column: 4),
 82 |         )
 83 |         "#)
 84 |     }
 85 | 
 86 |     #[test]
 87 |     fn invalid_json() {
 88 |         let result = CustomScalarMap::from_str("Hello: }").err().unwrap();
 89 | 
 90 |         insta::assert_debug_snapshot!(result, @r#"
 91 |         CustomScalarConfig(
 92 |             Error("expected value", line: 1, column: 1),
 93 |         )
 94 |         "#)
 95 |     }
 96 | 
 97 |     #[test]
 98 |     fn invalid_simple_schema() {
 99 |         let result = CustomScalarMap::from_str(
100 |             r###"{
101 |                 "custom": {
102 |                     "type": "bool"
103 |                 }
104 |             }"###,
105 |         )
106 |         .expect_err("schema should have been invalid");
107 | 
108 |         insta::assert_debug_snapshot!(result, @r###"
109 |         CustomScalarJsonSchema(
110 |             "\"bool\" is not valid under any of the schemas listed in the 'anyOf' keyword",
111 |         )
112 |         "###)
113 |     }
114 | 
115 |     #[test]
116 |     fn invalid_complex_schema() {
117 |         let result = CustomScalarMap::from_str(
118 |             r###"{
119 |                 "custom": {
120 |                     "type": "object",
121 |                     "properties": {
122 |                         "test": {
123 |                             "type": "obbbject"
124 |                         }
125 |                     }
126 |                 }
127 |             }"###,
128 |         )
129 |         .expect_err("schema should have been invalid");
130 | 
131 |         insta::assert_debug_snapshot!(result, @r#"
132 |         CustomScalarJsonSchema(
133 |             "\"obbbject\" is not valid under any of the schemas listed in the 'anyOf' keyword",
134 |         )
135 |         "#)
136 |     }
137 | 
138 |     #[test]
139 |     fn valid_schema() {
140 |         let result = CustomScalarMap::from_str(
141 |             r###"
142 |         {
143 |             "simple": {
144 |                 "type": "string"
145 |             },
146 |             "complex": {
147 |                 "type": "object",
148 |                 "properties": { "name": { "type": "string" } }
149 |             }
150 |         }
151 |         "###,
152 |         )
153 |         .unwrap()
154 |         .0;
155 | 
156 |         let expected_data = HashMap::from_iter([
157 |             (
158 |                 "simple".to_string(),
159 |                 json_schema!({
160 |                     "type": "string",
161 |                 }),
162 |             ),
163 |             (
164 |                 "complex".to_string(),
165 |                 json_schema!({
166 |                     "type": "object",
167 |                     "properties": {
168 |                         "name": {
169 |                             "type": "string"
170 |                         }
171 |                     }
172 |                 }),
173 |             ),
174 |         ]);
175 | 
176 |         assert_eq!(result, expected_data);
177 |     }
178 | }
179 | 
```

--------------------------------------------------------------------------------
/crates/apollo-schema-index/src/testdata/schema.graphql:
--------------------------------------------------------------------------------

```graphql
  1 | scalar DateTime
  2 | scalar JSON
  3 | scalar Upload
  4 | 
  5 | enum UserRole {
  6 |   ADMIN
  7 |   MODERATOR
  8 |   USER
  9 |   GUEST
 10 | }
 11 | 
 12 | enum ContentStatus {
 13 |   DRAFT
 14 |   PUBLISHED
 15 |   ARCHIVED
 16 |   DELETED
 17 | }
 18 | 
 19 | enum NotificationPriority {
 20 |   LOW
 21 |   MEDIUM
 22 |   HIGH
 23 |   URGENT
 24 | }
 25 | 
 26 | enum MediaType {
 27 |   IMAGE
 28 |   VIDEO
 29 |   AUDIO
 30 |   DOCUMENT
 31 | }
 32 | 
 33 | interface Node {
 34 |   id: ID!
 35 |   createdAt: DateTime!
 36 |   updatedAt: DateTime!
 37 | }
 38 | 
 39 | interface Content {
 40 |   id: ID!
 41 |   title: String!
 42 |   status: ContentStatus!
 43 |   author: User!
 44 |   metadata: JSON
 45 | }
 46 | 
 47 | type User implements Node {
 48 |   id: ID!
 49 |   createdAt: DateTime!
 50 |   updatedAt: DateTime!
 51 |   username: String!
 52 |   email: String!
 53 |   role: UserRole!
 54 |   profile: UserProfile
 55 |   posts: [Post!]!
 56 |   comments: [Comment!]!
 57 |   notifications: [Notification!]!
 58 |   preferences: UserPreferences!
 59 | }
 60 | 
 61 | type UserProfile {
 62 |   firstName: String
 63 |   lastName: String
 64 |   bio: String
 65 |   avatar: Media
 66 |   socialLinks: [SocialLink!]!
 67 |   location: Location
 68 | }
 69 | 
 70 | type Location {
 71 |   country: String!
 72 |   city: String
 73 |   coordinates: Coordinates
 74 | }
 75 | 
 76 | type Coordinates {
 77 |   latitude: Float!
 78 |   longitude: Float!
 79 | }
 80 | 
 81 | type SocialLink {
 82 |   platform: String!
 83 |   url: String!
 84 |   verified: Boolean!
 85 | }
 86 | 
 87 | type Post implements Node & Content {
 88 |   id: ID!
 89 |   createdAt: DateTime!
 90 |   updatedAt: DateTime!
 91 |   title: String!
 92 |   content: String!
 93 |   status: ContentStatus!
 94 |   author: User!
 95 |   metadata: JSON
 96 |   comments: [Comment!]!
 97 |   media: [Media!]!
 98 |   tags: [Tag!]!
 99 |   analytics: PostAnalytics!
100 | }
101 | 
102 | type Comment implements Node {
103 |   id: ID!
104 |   createdAt: DateTime!
105 |   updatedAt: DateTime!
106 |   content: String!
107 |   author: User!
108 |   post: Post!
109 |   parentComment: Comment
110 |   replies: [Comment!]!
111 |   reactions: [Reaction!]!
112 | }
113 | 
114 | type Media {
115 |   id: ID!
116 |   type: MediaType!
117 |   url: String!
118 |   thumbnail: String
119 |   metadata: MediaMetadata!
120 |   uploader: User!
121 | }
122 | 
123 | type MediaMetadata {
124 |   size: Int!
125 |   format: String!
126 |   dimensions: Dimensions
127 |   duration: Int
128 | }
129 | 
130 | type Dimensions {
131 |   width: Int!
132 |   height: Int!
133 | }
134 | 
135 | type Tag {
136 |   id: ID!
137 |   name: String!
138 |   slug: String!
139 |   description: String
140 |   posts: [Post!]!
141 | }
142 | 
143 | type Reaction {
144 |   id: ID!
145 |   type: String!
146 |   user: User!
147 |   comment: Comment!
148 |   createdAt: DateTime!
149 | }
150 | 
151 | type Notification {
152 |   id: ID!
153 |   type: String!
154 |   priority: NotificationPriority!
155 |   message: String!
156 |   recipient: User!
157 |   read: Boolean!
158 |   createdAt: DateTime!
159 |   metadata: JSON
160 | }
161 | 
162 | type PostAnalytics {
163 |   views: Int!
164 |   likes: Int!
165 |   shares: Int!
166 |   comments: Int!
167 |   engagement: Float!
168 |   demographics: Demographics!
169 | }
170 | 
171 | type Demographics {
172 |   ageGroups: [AgeGroup!]!
173 |   locations: [LocationStats!]!
174 |   devices: [DeviceStats!]!
175 | }
176 | 
177 | type AgeGroup {
178 |   range: String!
179 |   percentage: Float!
180 | }
181 | 
182 | type LocationStats {
183 |   country: String!
184 |   count: Int!
185 | }
186 | 
187 | type DeviceStats {
188 |   type: String!
189 |   count: Int!
190 | }
191 | 
192 | type UserPreferences {
193 |   theme: String!
194 |   language: String!
195 |   notifications: NotificationPreferences!
196 |   privacy: PrivacySettings!
197 | }
198 | 
199 | type NotificationPreferences {
200 |   email: Boolean!
201 |   push: Boolean!
202 |   sms: Boolean!
203 |   frequency: String!
204 | }
205 | 
206 | type PrivacySettings {
207 |   profileVisibility: String!
208 |   showEmail: Boolean!
209 |   showLocation: Boolean!
210 | }
211 | 
212 | input CreateUserInput {
213 |   username: String!
214 |   email: String!
215 |   password: String!
216 |   role: UserRole = USER
217 |   profile: CreateUserProfileInput
218 | }
219 | 
220 | input CreateUserProfileInput {
221 |   firstName: String
222 |   lastName: String
223 |   bio: String
224 |   location: CreateLocationInput
225 | }
226 | 
227 | input CreateLocationInput {
228 |   country: String!
229 |   city: String
230 |   coordinates: CreateCoordinatesInput
231 | }
232 | 
233 | input CreateCoordinatesInput {
234 |   latitude: Float!
235 |   longitude: Float!
236 | }
237 | 
238 | input CreatePostInput {
239 |   title: String!
240 |   content: String!
241 |   status: ContentStatus = DRAFT
242 |   tags: [String!]
243 |   media: [Upload!]
244 | }
245 | 
246 | input UpdatePostInput {
247 |   title: String
248 |   content: String
249 |   status: ContentStatus
250 |   tags: [String!]
251 | }
252 | 
253 | input CreateCommentInput {
254 |   content: String!
255 |   postId: ID!
256 |   parentCommentId: ID
257 | }
258 | 
259 | input NotificationFilter {
260 |   priority: NotificationPriority
261 |   read: Boolean
262 |   type: String
263 |   startDate: DateTime
264 |   endDate: DateTime
265 | }
266 | 
267 | type Query {
268 |   node(id: ID!): Node
269 |   user(id: ID!): User
270 |   post(id: ID!): Post
271 |   posts(filter: PostFilter): [Post!]!
272 |   comments(postId: ID!): [Comment!]!
273 |   notifications(filter: NotificationFilter): [Notification!]!
274 |   search(query: String!): SearchResult!
275 | }
276 | 
277 | type Mutation {
278 |   createUser(input: CreateUserInput!): User!
279 |   createPost(input: CreatePostInput!): Post!
280 |   updatePost(id: ID!, input: UpdatePostInput!): Post!
281 |   createComment(input: CreateCommentInput!): Comment!
282 |   deletePost(id: ID!): Boolean!
283 |   uploadMedia(file: Upload!): Media!
284 |   updateUserPreferences(id: ID!, preferences: UserPreferencesInput!): UserPreferences!
285 | }
286 | 
287 | type Subscription {
288 |   postUpdated(id: ID!): Post!
289 |   newComment(postId: ID!): Comment!
290 |   notificationReceived(userId: ID!): Notification!
291 | }
292 | 
293 | union SearchResult = User | Post | Comment | Tag
294 | 
295 | input PostFilter {
296 |   status: ContentStatus
297 |   authorId: ID
298 |   tags: [String!]
299 |   dateRange: DateRangeInput
300 | }
301 | 
302 | input DateRangeInput {
303 |   start: DateTime!
304 |   end: DateTime!
305 | }
306 | 
307 | input UserPreferencesInput {
308 |   theme: String
309 |   language: String
310 |   notifications: NotificationPreferencesInput
311 |   privacy: PrivacySettingsInput
312 | }
313 | 
314 | input NotificationPreferencesInput {
315 |   email: Boolean
316 |   push: Boolean
317 |   sms: Boolean
318 |   frequency: String
319 | }
320 | 
321 | input PrivacySettingsInput {
322 |   profileVisibility: String
323 |   showEmail: Boolean
324 |   showLocation: Boolean
325 | }
326 | 
327 | directive @auth(requires: UserRole!) on FIELD_DEFINITION
328 | directive @cache(ttl: Int!) on FIELD_DEFINITION
329 | directive @deprecated(reason: String) on FIELD_DEFINITION 
```

--------------------------------------------------------------------------------
/crates/apollo-mcp-server/src/introspection/tools/testdata/schema.graphql:
--------------------------------------------------------------------------------

```graphql
  1 | scalar DateTime
  2 | scalar JSON
  3 | scalar Upload
  4 | 
  5 | enum UserRole {
  6 |   ADMIN
  7 |   MODERATOR
  8 |   USER
  9 |   GUEST
 10 | }
 11 | 
 12 | enum ContentStatus {
 13 |   DRAFT
 14 |   PUBLISHED
 15 |   ARCHIVED
 16 |   DELETED
 17 | }
 18 | 
 19 | enum NotificationPriority {
 20 |   LOW
 21 |   MEDIUM
 22 |   HIGH
 23 |   URGENT
 24 | }
 25 | 
 26 | enum MediaType {
 27 |   IMAGE
 28 |   VIDEO
 29 |   AUDIO
 30 |   DOCUMENT
 31 | }
 32 | 
 33 | interface Node {
 34 |   id: ID!
 35 |   createdAt: DateTime!
 36 |   updatedAt: DateTime!
 37 | }
 38 | 
 39 | interface Content {
 40 |   id: ID!
 41 |   title: String!
 42 |   status: ContentStatus!
 43 |   author: User!
 44 |   metadata: JSON
 45 | }
 46 | 
 47 | type User implements Node {
 48 |   id: ID!
 49 |   createdAt: DateTime!
 50 |   updatedAt: DateTime!
 51 |   username: String!
 52 |   email: String!
 53 |   role: UserRole!
 54 |   profile: UserProfile
 55 |   posts: [Post!]!
 56 |   comments: [Comment!]!
 57 |   notifications: [Notification!]!
 58 |   preferences: UserPreferences!
 59 | }
 60 | 
 61 | type UserProfile {
 62 |   firstName: String
 63 |   lastName: String
 64 |   bio: String
 65 |   avatar: Media
 66 |   socialLinks: [SocialLink!]!
 67 |   location: Location
 68 | }
 69 | 
 70 | type Location {
 71 |   country: String!
 72 |   city: String
 73 |   coordinates: Coordinates
 74 | }
 75 | 
 76 | type Coordinates {
 77 |   latitude: Float!
 78 |   longitude: Float!
 79 | }
 80 | 
 81 | type SocialLink {
 82 |   platform: String!
 83 |   url: String!
 84 |   verified: Boolean!
 85 | }
 86 | 
 87 | type Post implements Node & Content {
 88 |   id: ID!
 89 |   createdAt: DateTime!
 90 |   updatedAt: DateTime!
 91 |   title: String!
 92 |   content: String!
 93 |   status: ContentStatus!
 94 |   author: User!
 95 |   metadata: JSON
 96 |   comments: [Comment!]!
 97 |   media: [Media!]!
 98 |   tags: [Tag!]!
 99 |   analytics: PostAnalytics!
100 | }
101 | 
102 | type Comment implements Node {
103 |   id: ID!
104 |   createdAt: DateTime!
105 |   updatedAt: DateTime!
106 |   content: String!
107 |   author: User!
108 |   post: Post!
109 |   parentComment: Comment
110 |   replies: [Comment!]!
111 |   reactions: [Reaction!]!
112 | }
113 | 
114 | type Media {
115 |   id: ID!
116 |   type: MediaType!
117 |   url: String!
118 |   thumbnail: String
119 |   metadata: MediaMetadata!
120 |   uploader: User!
121 | }
122 | 
123 | type MediaMetadata {
124 |   size: Int!
125 |   format: String!
126 |   dimensions: Dimensions
127 |   duration: Int
128 | }
129 | 
130 | type Dimensions {
131 |   width: Int!
132 |   height: Int!
133 | }
134 | 
135 | type Tag {
136 |   id: ID!
137 |   name: String!
138 |   slug: String!
139 |   description: String
140 |   posts: [Post!]!
141 | }
142 | 
143 | type Reaction {
144 |   id: ID!
145 |   type: String!
146 |   user: User!
147 |   comment: Comment!
148 |   createdAt: DateTime!
149 | }
150 | 
151 | type Notification {
152 |   id: ID!
153 |   type: String!
154 |   priority: NotificationPriority!
155 |   message: String!
156 |   recipient: User!
157 |   read: Boolean!
158 |   createdAt: DateTime!
159 |   metadata: JSON
160 | }
161 | 
162 | type PostAnalytics {
163 |   views: Int!
164 |   likes: Int!
165 |   shares: Int!
166 |   comments: Int!
167 |   engagement: Float!
168 |   demographics: Demographics!
169 | }
170 | 
171 | type Demographics {
172 |   ageGroups: [AgeGroup!]!
173 |   locations: [LocationStats!]!
174 |   devices: [DeviceStats!]!
175 | }
176 | 
177 | type AgeGroup {
178 |   range: String!
179 |   percentage: Float!
180 | }
181 | 
182 | type LocationStats {
183 |   country: String!
184 |   count: Int!
185 | }
186 | 
187 | type DeviceStats {
188 |   type: String!
189 |   count: Int!
190 | }
191 | 
192 | type UserPreferences {
193 |   theme: String!
194 |   oldTheme: String @deprecated
195 |   language: String!
196 |   notifications: NotificationPreferences!
197 |   privacy: PrivacySettings!
198 | }
199 | 
200 | type NotificationPreferences {
201 |   email: Boolean!
202 |   push: Boolean!
203 |   sms: Boolean!
204 |   frequency: String!
205 | }
206 | 
207 | type PrivacySettings {
208 |   profileVisibility: String!
209 |   showEmail: Boolean!
210 |   showLocation: Boolean!
211 | }
212 | 
213 | input CreateUserInput {
214 |   username: String!
215 |   email: String!
216 |   password: String!
217 |   role: UserRole = USER
218 |   profile: CreateUserProfileInput
219 | }
220 | 
221 | input CreateUserProfileInput {
222 |   firstName: String
223 |   lastName: String
224 |   bio: String
225 |   location: CreateLocationInput
226 | }
227 | 
228 | input CreateLocationInput {
229 |   country: String!
230 |   city: String
231 |   coordinates: CreateCoordinatesInput
232 | }
233 | 
234 | input CreateCoordinatesInput {
235 |   latitude: Float!
236 |   longitude: Float!
237 | }
238 | 
239 | input CreatePostInput {
240 |   title: String!
241 |   content: String!
242 |   status: ContentStatus = DRAFT
243 |   tags: [String!]
244 |   media: [Upload!]
245 | }
246 | 
247 | input UpdatePostInput {
248 |   title: String
249 |   content: String
250 |   status: ContentStatus
251 |   tags: [String!]
252 | }
253 | 
254 | input CreateCommentInput {
255 |   content: String!
256 |   postId: ID!
257 |   parentCommentId: ID
258 | }
259 | 
260 | input NotificationFilter {
261 |   priority: NotificationPriority
262 |   read: Boolean
263 |   type: String
264 |   startDate: DateTime
265 |   endDate: DateTime
266 | }
267 | 
268 | type Query {
269 |   node(id: ID!): Node
270 |   user(id: ID!): User
271 |   post(id: ID!): Post
272 |   postsOld(filter: [ID!]) : [Post!]! @deprecated(reason: "Use posts instead")
273 |   posts(filter: PostFilter): [Post!]!
274 |   comments(postId: ID!): [Comment!]!
275 |   notifications(filter: NotificationFilter): [Notification!]!
276 |   search(query: String!): SearchResult!
277 | }
278 | 
279 | type Mutation {
280 |   createUser(input: CreateUserInput!): User!
281 |   createPost(input: CreatePostInput!): Post!
282 |   updatePost(id: ID!, input: UpdatePostInput!): Post!
283 |   createComment(input: CreateCommentInput!): Comment!
284 |   deletePost(id: ID!): Boolean!
285 |   uploadMedia(file: Upload!): Media!
286 |   updateUserPreferences(id: ID!, preferences: UserPreferencesInput!): UserPreferences!
287 | }
288 | 
289 | type Subscription {
290 |   postUpdated(id: ID!): Post!
291 |   newComment(postId: ID!): Comment!
292 |   notificationReceived(userId: ID!): Notification!
293 | }
294 | 
295 | union SearchResult = User | Post | Comment | Tag
296 | 
297 | input PostFilter {
298 |   status: ContentStatus
299 |   authorId: ID
300 |   tags: [String!]
301 |   dateRange: DateRangeInput
302 | }
303 | 
304 | input DateRangeInput {
305 |   start: DateTime!
306 |   end: DateTime!
307 | }
308 | 
309 | input UserPreferencesInput {
310 |   theme: String
311 |   language: String
312 |   notifications: NotificationPreferencesInput
313 |   privacy: PrivacySettingsInput
314 | }
315 | 
316 | input NotificationPreferencesInput {
317 |   email: Boolean
318 |   push: Boolean
319 |   sms: Boolean
320 |   frequency: String
321 | }
322 | 
323 | input PrivacySettingsInput {
324 |   profileVisibility: String
325 |   showEmail: Boolean
326 |   showLocation: Boolean
327 | }
328 | 
329 | directive @auth(requires: UserRole!) on FIELD_DEFINITION
330 | directive @cache(ttl: Int!) on FIELD_DEFINITION
331 | directive @deprecated(reason: String) on FIELD_DEFINITION 
```

--------------------------------------------------------------------------------
/crates/apollo-mcp-server/src/explorer.rs:
--------------------------------------------------------------------------------

```rust
  1 | use crate::errors::McpError;
  2 | use crate::schema_from_type;
  3 | use rmcp::model::{CallToolResult, Content, ErrorCode, Tool};
  4 | use rmcp::schemars::JsonSchema;
  5 | use rmcp::serde_json::Value;
  6 | use rmcp::{schemars, serde_json};
  7 | use serde::{Deserialize, Serialize};
  8 | use tracing::debug;
  9 | use tracing::log::Level::Debug;
 10 | use tracing::log::log_enabled;
 11 | 
 12 | pub(crate) const EXPLORER_TOOL_NAME: &str = "explorer";
 13 | 
 14 | #[derive(Clone)]
 15 | pub struct Explorer {
 16 |     graph_id: String,
 17 |     variant: String,
 18 |     pub tool: Tool,
 19 | }
 20 | 
 21 | #[derive(JsonSchema, Deserialize, Serialize)]
 22 | pub struct Input {
 23 |     /// The GraphQL document
 24 |     #[serde(default = "default_input")]
 25 |     document: String,
 26 | 
 27 |     /// Any variables used in the document
 28 |     #[serde(default = "default_input")]
 29 |     variables: String,
 30 | 
 31 |     /// Headers to be sent with the operation
 32 |     #[serde(default = "default_input")]
 33 |     headers: String,
 34 | }
 35 | 
 36 | fn default_input() -> String {
 37 |     "{}".to_string()
 38 | }
 39 | 
 40 | impl Explorer {
 41 |     pub fn new(graph_ref: String) -> Self {
 42 |         let (graph_id, variant) = match graph_ref.split_once('@') {
 43 |             Some((graph_id, variant)) => (graph_id.to_string(), variant.to_string()),
 44 |             None => (graph_ref, String::from("current")),
 45 |         };
 46 |         Self {
 47 |             graph_id,
 48 |             variant,
 49 |             tool: Tool::new(
 50 |                 EXPLORER_TOOL_NAME,
 51 |                 "Get the URL to open a GraphQL operation in Apollo Explorer",
 52 |                 schema_from_type!(Input),
 53 |             ),
 54 |         }
 55 |     }
 56 | 
 57 |     fn create_explorer_url(&self, input: Input) -> Result<String, McpError> {
 58 |         serde_json::to_string(&input)
 59 |             .map(|serialized| lz_str::compress_to_encoded_uri_component(serialized.as_str()))
 60 |             .map(|compressed| {
 61 |                 format!(
 62 |                     "https://studio.apollographql.com/graph/{graph_id}/variant/{variant}/explorer?explorerURLState={compressed}",
 63 |                     graph_id = self.graph_id,
 64 |                     variant = self.variant,
 65 |                 )
 66 |             })
 67 |             .map_err(|e| {
 68 |                 McpError::new(
 69 |                     ErrorCode::INTERNAL_ERROR,
 70 |                     format!("Unable to serialize input: {e}"),
 71 |                     None,
 72 |                 )
 73 |             })
 74 |     }
 75 | 
 76 |     pub async fn execute(&self, input: Input) -> Result<CallToolResult, McpError> {
 77 |         let pretty = if log_enabled!(Debug) {
 78 |             Some(serde_json::to_string_pretty(&input).unwrap_or("<unable to serialize>".into()))
 79 |         } else {
 80 |             None
 81 |         };
 82 |         let url = self.create_explorer_url(input)?;
 83 |         debug!(?url, input=?pretty, "Created URL to open operation in Apollo Explorer");
 84 |         Ok(CallToolResult {
 85 |             content: vec![Content::text(url.clone())],
 86 |             meta: None,
 87 |             is_error: None,
 88 |             structured_content: Some(Value::Array(vec![url.into()])),
 89 |         })
 90 |     }
 91 | }
 92 | 
 93 | #[cfg(test)]
 94 | mod tests {
 95 |     use super::*;
 96 |     use insta::assert_snapshot;
 97 |     use rmcp::serde_json::json;
 98 |     use rstest::rstest;
 99 | 
100 |     #[test]
101 |     fn test_create_explorer_url() {
102 |         let explorer = Explorer::new(String::from("mcp-example@mcp"));
103 |         let input = json!({
104 |             "document": "query GetWeatherAlerts($state: String!) {\n  alerts(state: $state) {\n    severity\n    description\n    instruction\n  }\n}",
105 |             "variables": "{\"state\": \"CO\"}",
106 |             "headers": "{\"x-foo\": \"bar\"}",
107 |         });
108 | 
109 |         let input: Input = serde_json::from_value(input).unwrap();
110 | 
111 |         let url = explorer.create_explorer_url(input).unwrap();
112 |         assert_snapshot!(
113 |             url,
114 |             @"https://studio.apollographql.com/graph/mcp-example/variant/mcp/explorer?explorerURLState=N4IgJg9gxgrgtgUwHYBcQC4QEcYIE4CeABAOIIoDqCAhigBb4CCANvigM4AUAJOyrQnREAyijwBLJAHMAhAEoiwADpIiRaqzwdOfAUN78UCBctVqi7BADd84lARXmiYBOygSADinEQkj85J8eDBQ3r7+AL4qESAANCBW1BLUAEas7BggyiC6RkoYRPkAwgDy+THxDNQueBmY2QAeALQAZhAQ+UL5KUnlIBFAA"
115 |         );
116 |     }
117 | 
118 |     #[tokio::test]
119 |     #[rstest]
120 |     #[case(json!({
121 |         "variables": "{\"state\": \"CA\"}",
122 |         "headers": "{}"
123 |     }), json!({
124 |         "document": "{}",
125 |         "variables": "{\"state\": \"CA\"}",
126 |         "headers": "{}"
127 |     }))]
128 |     #[case(json!({
129 |         "document": "query GetWeatherAlerts($state: String!) {\n  alerts(state: $state) {\n    severity\n    description\n    instruction\n  }\n}",
130 |         "headers": "{}"
131 |     }), json!({
132 |         "document": "query GetWeatherAlerts($state: String!) {\n  alerts(state: $state) {\n    severity\n    description\n    instruction\n  }\n}",
133 |         "variables": "{}",
134 |         "headers": "{}"
135 |     }))]
136 |     #[case(json!({
137 |         "document": "query GetWeatherAlerts($state: String!) {\n  alerts(state: $state) {\n    severity\n    description\n    instruction\n  }\n}",
138 |         "variables": "{\"state\": \"CA\"}",
139 |     }), json!({
140 |         "document": "query GetWeatherAlerts($state: String!) {\n  alerts(state: $state) {\n    severity\n    description\n    instruction\n  }\n}",
141 |         "variables": "{\"state\": \"CA\"}",
142 |         "headers": "{}"
143 |     }))]
144 |     async fn test_input_missing_fields(#[case] input: Value, #[case] input_with_default: Value) {
145 |         let input = serde_json::from_value::<Input>(input).unwrap();
146 |         let input_with_default = serde_json::from_value::<Input>(input_with_default).unwrap();
147 |         let explorer = Explorer::new(String::from("mcp-example@mcp"));
148 |         assert_eq!(
149 |             explorer.create_explorer_url(input),
150 |             explorer.create_explorer_url(input_with_default)
151 |         );
152 |     }
153 | }
154 | 
```

--------------------------------------------------------------------------------
/crates/apollo-mcp-server/build.rs:
--------------------------------------------------------------------------------

```rust
  1 | #![allow(clippy::unwrap_used)]
  2 | #![allow(clippy::expect_used)]
  3 | #![allow(clippy::panic)]
  4 | 
  5 | //! Build Script for the Apollo MCP Server
  6 | //!
  7 | //! This mostly compiles all the available telemetry attributes
  8 | use quote::__private::TokenStream;
  9 | use quote::quote;
 10 | use serde::Deserialize;
 11 | use std::io::Write;
 12 | use std::{collections::VecDeque, io::Read as _};
 13 | use syn::{Ident, parse2};
 14 | 
 15 | #[derive(Deserialize)]
 16 | struct TelemetryTomlData {
 17 |     attributes: toml::Table,
 18 |     metrics: toml::Table,
 19 | }
 20 | 
 21 | #[derive(Eq, PartialEq, Debug, Clone)]
 22 | struct TelemetryData {
 23 |     name: String,
 24 |     alias: String,
 25 |     value: String,
 26 |     description: String,
 27 | }
 28 | 
 29 | fn flatten(table: toml::Table) -> Vec<TelemetryData> {
 30 |     let mut to_visit = VecDeque::from_iter(table.into_iter().map(|(key, val)| (vec![key], val)));
 31 |     let mut telemetry_data = Vec::new();
 32 | 
 33 |     while let Some((key, value)) = to_visit.pop_front() {
 34 |         match value {
 35 |             toml::Value::String(val) => {
 36 |                 let last_key = key.last().unwrap().clone();
 37 |                 telemetry_data.push(TelemetryData {
 38 |                     name: cruet::to_pascal_case(last_key.as_str()),
 39 |                     alias: last_key,
 40 |                     value: key.join("."),
 41 |                     description: val,
 42 |                 });
 43 |             }
 44 |             toml::Value::Table(map) => to_visit.extend(
 45 |                 map.into_iter()
 46 |                     .map(|(nested_key, value)| ([key.clone(), vec![nested_key]].concat(), value)),
 47 |             ),
 48 | 
 49 |             _ => panic!("telemetry values should be string descriptions"),
 50 |         };
 51 |     }
 52 | 
 53 |     telemetry_data
 54 | }
 55 | 
 56 | fn generate_enum(telemetry_data: &[TelemetryData]) -> Vec<TokenStream> {
 57 |     telemetry_data
 58 |         .iter()
 59 |         .map(|t| {
 60 |             let enum_value_ident = quote::format_ident!("{}", &t.name);
 61 |             let alias = &t.alias;
 62 |             let doc_message = &t.description;
 63 |             quote! {
 64 |                 #[doc = #doc_message]
 65 |                 #[serde(alias = #alias)]
 66 |                 #enum_value_ident
 67 |             }
 68 |         })
 69 |         .collect::<Vec<_>>()
 70 | }
 71 | 
 72 | fn generate_enum_as_str_matches(
 73 |     telemetry_data: &[TelemetryData],
 74 |     enum_ident: Ident,
 75 | ) -> Vec<TokenStream> {
 76 |     telemetry_data
 77 |         .iter()
 78 |         .map(|t| {
 79 |             let name_ident = quote::format_ident!("{}", &t.name);
 80 |             let value = &t.value;
 81 |             quote! {
 82 |                 #enum_ident::#name_ident => #value
 83 |             }
 84 |         })
 85 |         .collect::<Vec<_>>()
 86 | }
 87 | 
 88 | fn main() {
 89 |     // Parse the telemetry file
 90 |     let telemetry: TelemetryTomlData = {
 91 |         let mut raw = String::new();
 92 |         std::fs::File::open("telemetry.toml")
 93 |             .expect("could not open telemetry file")
 94 |             .read_to_string(&mut raw)
 95 |             .expect("could not read telemetry file");
 96 | 
 97 |         toml::from_str(&raw).expect("could not parse telemetry file")
 98 |     };
 99 | 
100 |     // Generate the keys
101 |     let telemetry_attribute_data = flatten(telemetry.attributes);
102 |     let telemetry_metrics_data = flatten(telemetry.metrics);
103 | 
104 |     // Write out the generated keys
105 |     let out_dir = std::env::var_os("OUT_DIR").expect("could not retrieve output directory");
106 |     let dest_path = std::path::Path::new(&out_dir).join("telemetry_attributes.rs");
107 |     let mut generated_file =
108 |         std::fs::File::create(&dest_path).expect("could not create generated code file");
109 | 
110 |     let attribute_keys_len = telemetry_attribute_data.len();
111 |     let attribute_enum_keys = generate_enum(&telemetry_attribute_data);
112 |     let all_attribute_enum_values = &telemetry_attribute_data
113 |         .iter()
114 |         .map(|t| quote::format_ident!("{}", t.name));
115 |     let all_attribute_enum_values = (*all_attribute_enum_values).clone();
116 |     let attribute_enum_name = quote::format_ident!("{}", "TelemetryAttribute");
117 |     let attribute_enum_as_str_matches =
118 |         generate_enum_as_str_matches(&telemetry_attribute_data, attribute_enum_name.clone());
119 | 
120 |     let metric_enum_name = quote::format_ident!("{}", "TelemetryMetric");
121 |     let metric_enum_keys = generate_enum(&telemetry_metrics_data);
122 |     let metric_enum_as_str_matches =
123 |         generate_enum_as_str_matches(&telemetry_metrics_data, metric_enum_name.clone());
124 | 
125 |     let tokens = quote! {
126 |         /// All TelemetryAttribute values
127 |         pub const ALL_ATTRS: &[TelemetryAttribute; #attribute_keys_len] = &[#(TelemetryAttribute::#all_attribute_enum_values),*];
128 | 
129 |         /// Supported telemetry attribute (tags) values
130 |         #[derive(Debug, ::serde::Deserialize, ::schemars::JsonSchema, Clone, Eq, PartialEq, Hash, Copy)]
131 |         pub enum #attribute_enum_name {
132 |             #(#attribute_enum_keys),*
133 |         }
134 | 
135 |         impl #attribute_enum_name {
136 |             /// Converts TelemetryAttribute to &str
137 |             pub const fn as_str(&self) -> &'static str {
138 |                 match self {
139 |                    #(#attribute_enum_as_str_matches),*
140 |                 }
141 |             }
142 |         }
143 | 
144 |         /// Supported telemetry metrics
145 |         #[derive(Debug, ::serde::Deserialize, ::schemars::JsonSchema, Clone, Eq, PartialEq, Hash, Copy)]
146 |         pub enum #metric_enum_name {
147 |             #(#metric_enum_keys),*
148 |         }
149 | 
150 |         impl #metric_enum_name {
151 |             /// Converts TelemetryMetric to &str
152 |             pub const fn as_str(&self) -> &'static str {
153 |                 match self {
154 |                    #(#metric_enum_as_str_matches),*
155 |                 }
156 |             }
157 |         }
158 |     };
159 | 
160 |     let file = parse2(tokens).expect("Could not parse TokenStream");
161 |     let code = prettyplease::unparse(&file);
162 | 
163 |     write!(generated_file, "{}", code).expect("Failed to write generated code");
164 | 
165 |     // Inform cargo that we only want this to run when either this file or the telemetry
166 |     // one changes.
167 |     println!("cargo::rerun-if-changed=build.rs");
168 |     println!("cargo::rerun-if-changed=telemetry.toml");
169 | }
170 | 
```

--------------------------------------------------------------------------------
/crates/apollo-mcp-server/src/main.rs:
--------------------------------------------------------------------------------

```rust
  1 | use std::path::PathBuf;
  2 | 
  3 | use apollo_mcp_registry::platform_api::operation_collections::collection_poller::CollectionSource;
  4 | use apollo_mcp_registry::uplink::persisted_queries::ManifestSource;
  5 | use apollo_mcp_registry::uplink::schema::SchemaSource;
  6 | use apollo_mcp_server::custom_scalar_map::CustomScalarMap;
  7 | use apollo_mcp_server::errors::ServerError;
  8 | use apollo_mcp_server::operations::OperationSource;
  9 | use apollo_mcp_server::server::Server;
 10 | use clap::Parser;
 11 | use clap::builder::Styles;
 12 | use clap::builder::styling::{AnsiColor, Effects};
 13 | use runtime::IdOrDefault;
 14 | use tracing::{info, warn};
 15 | 
 16 | mod runtime;
 17 | 
 18 | /// Clap styling
 19 | const STYLES: Styles = Styles::styled()
 20 |     .header(AnsiColor::Green.on_default().effects(Effects::BOLD))
 21 |     .usage(AnsiColor::Green.on_default().effects(Effects::BOLD))
 22 |     .literal(AnsiColor::Cyan.on_default().effects(Effects::BOLD))
 23 |     .placeholder(AnsiColor::Cyan.on_default());
 24 | 
 25 | /// Arguments to the MCP server
 26 | #[derive(Debug, Parser)]
 27 | #[command(
 28 |     version,
 29 |     styles = STYLES,
 30 |     about = "Apollo MCP Server - invoke GraphQL operations from an AI agent",
 31 | )]
 32 | struct Args {
 33 |     /// Path to the config file
 34 |     config: Option<PathBuf>,
 35 | }
 36 | 
 37 | #[tokio::main]
 38 | async fn main() -> anyhow::Result<()> {
 39 |     let config: runtime::Config = match Args::parse().config {
 40 |         Some(config_path) => runtime::read_config(config_path)?,
 41 |         None => runtime::read_config_from_env().unwrap_or_default(),
 42 |     };
 43 | 
 44 |     let _guard = runtime::telemetry::init_tracing_subscriber(&config)?;
 45 | 
 46 |     info!(
 47 |         "Apollo MCP Server v{} // (c) Apollo Graph, Inc. // Licensed under MIT",
 48 |         env!("CARGO_PKG_VERSION")
 49 |     );
 50 | 
 51 |     let schema_source = match config.schema {
 52 |         runtime::SchemaSource::Local { path } => SchemaSource::File { path, watch: true },
 53 |         runtime::SchemaSource::Uplink => SchemaSource::Registry(config.graphos.uplink_config()?),
 54 |     };
 55 | 
 56 |     let operation_source = match config.operations {
 57 |         // Default collection is special and requires other information
 58 |         runtime::OperationSource::Collection {
 59 |             id: IdOrDefault::Default,
 60 |         } => OperationSource::Collection(CollectionSource::Default(
 61 |             config.graphos.graph_ref()?,
 62 |             config.graphos.platform_api_config()?,
 63 |         )),
 64 | 
 65 |         runtime::OperationSource::Collection {
 66 |             id: IdOrDefault::Id(collection_id),
 67 |         } => OperationSource::Collection(CollectionSource::Id(
 68 |             collection_id,
 69 |             config.graphos.platform_api_config()?,
 70 |         )),
 71 |         runtime::OperationSource::Introspect => OperationSource::None,
 72 |         runtime::OperationSource::Local { paths } if !paths.is_empty() => {
 73 |             OperationSource::from(paths)
 74 |         }
 75 |         runtime::OperationSource::Manifest { path } => {
 76 |             OperationSource::from(ManifestSource::LocalHotReload(vec![path]))
 77 |         }
 78 |         runtime::OperationSource::Uplink => {
 79 |             OperationSource::from(ManifestSource::Uplink(config.graphos.uplink_config()?))
 80 |         }
 81 | 
 82 |         // TODO: Inference requires many different combinations and preferences
 83 |         // TODO: We should maybe make this more explicit.
 84 |         runtime::OperationSource::Local { .. } | runtime::OperationSource::Infer => {
 85 |             if config.introspection.any_enabled() {
 86 |                 warn!("No operations specified, falling back to introspection");
 87 |                 OperationSource::None
 88 |             } else if let Ok(graph_ref) = config.graphos.graph_ref() {
 89 |                 warn!(
 90 |                     "No operations specified, falling back to the default collection in {}",
 91 |                     graph_ref
 92 |                 );
 93 |                 OperationSource::Collection(CollectionSource::Default(
 94 |                     graph_ref,
 95 |                     config.graphos.platform_api_config()?,
 96 |                 ))
 97 |             } else {
 98 |                 anyhow::bail!(ServerError::NoOperations)
 99 |             }
100 |         }
101 |     };
102 | 
103 |     let explorer_graph_ref = config
104 |         .overrides
105 |         .enable_explorer
106 |         .then(|| config.graphos.graph_ref())
107 |         .transpose()?;
108 | 
109 |     let transport = config.transport.clone();
110 | 
111 |     Ok(Server::builder()
112 |         .transport(config.transport)
113 |         .schema_source(schema_source)
114 |         .operation_source(operation_source)
115 |         .endpoint(config.endpoint.into_inner())
116 |         .maybe_explorer_graph_ref(explorer_graph_ref)
117 |         .headers(config.headers)
118 |         .forward_headers(config.forward_headers)
119 |         .execute_introspection(config.introspection.execute.enabled)
120 |         .validate_introspection(config.introspection.validate.enabled)
121 |         .introspect_introspection(config.introspection.introspect.enabled)
122 |         .introspect_minify(config.introspection.introspect.minify)
123 |         .search_minify(config.introspection.search.minify)
124 |         .search_introspection(config.introspection.search.enabled)
125 |         .mutation_mode(config.overrides.mutation_mode)
126 |         .disable_type_description(config.overrides.disable_type_description)
127 |         .disable_schema_description(config.overrides.disable_schema_description)
128 |         .disable_auth_token_passthrough(match transport {
129 |             apollo_mcp_server::server::Transport::Stdio => false,
130 |             apollo_mcp_server::server::Transport::SSE { auth, .. } => auth
131 |                 .map(|a| a.disable_auth_token_passthrough)
132 |                 .unwrap_or(false),
133 |             apollo_mcp_server::server::Transport::StreamableHttp { auth, .. } => auth
134 |                 .map(|a| a.disable_auth_token_passthrough)
135 |                 .unwrap_or(false),
136 |         })
137 |         .custom_scalar_map(
138 |             config
139 |                 .custom_scalars
140 |                 .map(|custom_scalars_config| CustomScalarMap::try_from(&custom_scalars_config))
141 |                 .transpose()?,
142 |         )
143 |         .search_leaf_depth(config.introspection.search.leaf_depth)
144 |         .index_memory_bytes(config.introspection.search.index_memory_bytes)
145 |         .health_check(config.health_check)
146 |         .cors(config.cors)
147 |         .build()
148 |         .start()
149 |         .await?)
150 | }
151 | 
```

--------------------------------------------------------------------------------
/crates/apollo-schema-index/src/traverse.rs:
--------------------------------------------------------------------------------

```rust
  1 | //! Provides an extension trait for traversing GraphQL schemas, using a depth-first traversal
  2 | //! starting at the specified root operation types (query, mutation, subscription).
  3 | 
  4 | use crate::OperationType;
  5 | use crate::path::PathNode;
  6 | use apollo_compiler::Schema;
  7 | use apollo_compiler::ast::NamedType;
  8 | use apollo_compiler::schema::ExtendedType;
  9 | use enumset::EnumSet;
 10 | use std::collections::HashMap;
 11 | use std::collections::hash_map::Entry;
 12 | 
 13 | /// Extension trait to allow traversing a schema
 14 | pub trait SchemaExt {
 15 |     /// Traverse the type hierarchy in the schema in depth-first order, starting with the specified
 16 |     /// root operation types
 17 |     fn traverse(
 18 |         &self,
 19 |         root_types: EnumSet<OperationType>,
 20 |     ) -> Box<dyn Iterator<Item = (&ExtendedType, PathNode)> + '_>;
 21 | }
 22 | 
 23 | impl SchemaExt for Schema {
 24 |     fn traverse(
 25 |         &self,
 26 |         root_types: EnumSet<OperationType>,
 27 |     ) -> Box<dyn Iterator<Item = (&ExtendedType, PathNode)> + '_> {
 28 |         let mut stack = vec![];
 29 |         let mut references: HashMap<&NamedType, Vec<NamedType>> = HashMap::default();
 30 |         for root_type in root_types
 31 |             .iter()
 32 |             .rev()
 33 |             .filter_map(|rt| self.root_operation(rt.into()))
 34 |         {
 35 |             stack.push((root_type, PathNode::new(root_type.clone())));
 36 |         }
 37 |         Box::new(std::iter::from_fn(move || {
 38 |             while let Some((named_type, current_path)) = stack.pop() {
 39 |                 if current_path.has_cycle() {
 40 |                     continue;
 41 |                 }
 42 |                 let references = references.entry(named_type);
 43 | 
 44 |                 // Only traverse the children of a type the first time we visit it.
 45 |                 // After that, we still visit unique paths to the type, but not the child paths.
 46 |                 let traverse_children: bool = matches!(references, Entry::Vacant(_));
 47 | 
 48 |                 references.or_insert(
 49 |                     current_path
 50 |                         .referencing_type()
 51 |                         .map(|(t, _, _)| vec![t.clone()])
 52 |                         .unwrap_or_default(),
 53 |                 );
 54 | 
 55 |                 let cloned = current_path.clone();
 56 |                 if let Some(extended_type) = self.types.get(named_type)
 57 |                     && !extended_type.is_built_in()
 58 |                 {
 59 |                     if traverse_children {
 60 |                         match extended_type {
 61 |                             ExtendedType::Object(obj) => {
 62 |                                 stack.extend(obj.fields.values().map(|field| {
 63 |                                     let field_type = field.ty.inner_named_type();
 64 |                                     let field_args = field
 65 |                                         .arguments
 66 |                                         .iter()
 67 |                                         .map(|arg| arg.ty.inner_named_type().clone())
 68 |                                         .collect::<Vec<_>>();
 69 |                                     (
 70 |                                         field_type,
 71 |                                         current_path.clone().add_child(
 72 |                                             Some(field.name.clone()),
 73 |                                             field_args,
 74 |                                             field_type.clone(),
 75 |                                         ),
 76 |                                     )
 77 |                                 }));
 78 |                             }
 79 |                             ExtendedType::Interface(interface) => {
 80 |                                 stack.extend(interface.fields.values().map(|field| {
 81 |                                     let field_type = field.ty.inner_named_type();
 82 |                                     let field_args = field
 83 |                                         .arguments
 84 |                                         .iter()
 85 |                                         .map(|arg| arg.ty.inner_named_type().clone())
 86 |                                         .collect::<Vec<_>>();
 87 |                                     (
 88 |                                         field_type,
 89 |                                         current_path.clone().add_child(
 90 |                                             Some(field.name.clone()),
 91 |                                             field_args,
 92 |                                             field_type.clone(),
 93 |                                         ),
 94 |                                     )
 95 |                                 }));
 96 |                             }
 97 |                             ExtendedType::Union(union) => {
 98 |                                 stack.extend(union.members.iter().map(|member| &member.name).map(
 99 |                                     |next_type| {
100 |                                         (
101 |                                             next_type,
102 |                                             current_path.clone().add_child(
103 |                                                 None,
104 |                                                 vec![],
105 |                                                 next_type.clone(),
106 |                                             ),
107 |                                         )
108 |                                     },
109 |                                 ));
110 |                             }
111 |                             _ => {}
112 |                         }
113 |                     }
114 |                     return Some((extended_type, cloned));
115 |                 }
116 |             }
117 |             None
118 |         }))
119 |     }
120 | }
121 | 
122 | #[cfg(test)]
123 | mod tests {
124 |     use super::*;
125 |     use apollo_compiler::validation::Valid;
126 |     use rstest::{fixture, rstest};
127 | 
128 |     const TEST_SCHEMA: &str = include_str!("testdata/schema.graphql");
129 | 
130 |     #[fixture]
131 |     fn schema() -> Valid<Schema> {
132 |         Schema::parse(TEST_SCHEMA, "schema.graphql")
133 |             .expect("Failed to parse test schema")
134 |             .validate()
135 |             .expect("Failed to validate test schema")
136 |     }
137 | 
138 |     #[rstest]
139 |     fn test_schema_traverse(schema: Valid<Schema>) {
140 |         let mut paths = vec![];
141 |         for (_extended_type, path) in schema
142 |             .traverse(OperationType::Query | OperationType::Mutation | OperationType::Subscription)
143 |         {
144 |             paths.push(path.to_string());
145 |         }
146 |         insta::assert_debug_snapshot!(paths);
147 |     }
148 | }
149 | 
```

--------------------------------------------------------------------------------
/docs/source/run.mdx:
--------------------------------------------------------------------------------

```markdown
  1 | ---
  2 | title: Running the Apollo MCP Server
  3 | ---
  4 | 
  5 | There are multiple ways to run the Apollo MCP server.
  6 | 
  7 | - If you have an existing GraphQL API deployed, use the standalone MCP server binary to get started quickly.
  8 | 
  9 | - If you use Docker in your developer workflow, use the Apollo MCP Server Docker image.
 10 | 
 11 | - If you are running your GraphQL API locally with Rover, you can use the Rover CLI's `rover dev` command to run the MCP server alongside your local graph.
 12 | 
 13 | - If you are using the Apollo Runtime Container, you can use the container to run both the MCP server and the Apollo Router in a single container.
 14 | 
 15 | ## With the Rover CLI
 16 | 
 17 | The Rover CLI is a tool for working with GraphQL APIs locally.
 18 | 
 19 | You can use the [`rover dev`](/rover/commands/dev) command of Rover CLI `v0.35` or later to run an Apollo MCP Server instance alongside your local graph. Use the `--mcp` flag to start an MCP server and provide an optional configuration file.
 20 | 
 21 | ```sh
 22 | rover dev --mcp <PATH/TO/CONFIG> [...other rover dev flags]
 23 | ```
 24 | 
 25 | For more information, see the [Rover CLI documentation](/rover).
 26 | 
 27 | ## Standalone MCP server binary
 28 | 
 29 | To install or upgrade to the **latest release** of Apollo MCP Server:
 30 | 
 31 | <Tabs>
 32 | 
 33 |     <Tab label="Linux / MacOS">
 34 | 
 35 |     ```terminal showLineNumbers=false
 36 |     curl -sSL https://mcp.apollo.dev/download/nix/latest | sh
 37 |     ```
 38 | 
 39 |     </Tab>
 40 | 
 41 |     <Tab label="Windows">
 42 | 
 43 |     ```terminal showLineNumbers=false
 44 |     iwr 'https://mcp.apollo.dev/download/win/latest' | iex  
 45 |     ```
 46 |     </Tab>
 47 | 
 48 | </Tabs>
 49 | 
 50 | To install or upgrade to a **specific version** of Apollo MCP Server (recommended for CI environments to ensure predictable behavior):
 51 | 
 52 | <Tabs>
 53 | 
 54 |     <Tab label="Linux / MacOS">
 55 | 
 56 |     ```terminal showLineNumbers=false
 57 |     # Note the `v` prefixing the version number
 58 |     curl -sSL https://mcp.apollo.dev/download/nix/v1.1.1 | sh
 59 |     ```
 60 | 
 61 |     </Tab>
 62 | 
 63 |     <Tab label="Windows">
 64 | 
 65 |     ```terminal showLineNumbers=false
 66 |     # Note the `v` prefixing the version number
 67 |     iwr 'https://mcp.apollo.dev/download/win/v1.1.1' | iex
 68 |     ```
 69 |     </Tab>
 70 | 
 71 | </Tabs>
 72 | 
 73 | To install or upgrade to a specific version of Apollo MCP Server that is a **release candidate** (recommended for those that want to test early builds):
 74 | 
 75 | <Tabs>
 76 | 
 77 |     <Tab label="Linux / MacOS">
 78 | 
 79 |     ```terminal showLineNumbers=false
 80 |     # Note the `v` prefixing the version number and the `-rc` suffix
 81 |     curl -sSL https://mcp.apollo.dev/download/nix/v1.1.1-rc.1 | sh
 82 |     ```
 83 | 
 84 |     </Tab>
 85 | 
 86 |     <Tab label="Windows">
 87 | 
 88 |     ```terminal showLineNumbers=false
 89 |     # Note the `v` prefixing the version number and the `-rc` suffix
 90 |     iwr 'https://mcp.apollo.dev/download/win/v1.1.1-rc.1' | iex
 91 |     ```
 92 |     </Tab>
 93 | 
 94 | </Tabs>
 95 | 
 96 | You can configure the Apollo MCP server using a [YAML configuration file](/apollo-mcp-server/config-file).
 97 | 
 98 | If the file is not provided, environment variables for your Apollo graph credentials (`APOLLO_GRAPH_REF` and `APOLLO_KEY`) are required for the server to run.
 99 | 
100 | After installing the MCP server, you can run it using the following command:
101 | 
102 | ```sh showLineNumbers=false
103 | ./apollo-mcp-server [OPTIONS] <PATH/TO/CONFIG/FILE>
104 | ```
105 | 
106 | ### CLI options
107 | 
108 | | Option          | Description               |
109 | | :-------------- | :------------------------ |
110 | | `-h, --help`    | Print help information    |
111 | | `-V, --version` | Print version information |
112 | 
113 | ## With Docker
114 | 
115 | A container is built for the Apollo MCP Server with every release at `ghcr.io/apollographql/apollo-mcp-server`.
116 | 
117 | To download the **latest release** Docker container of Apollo MCP Server:
118 | 
119 | ```bash
120 | docker image pull ghcr.io/apollographql/apollo-mcp-server:latest
121 | ```
122 | 
123 | To download a **specific version** of Apollo MCP Server (recommended for CI environments to ensure predictable behavior):
124 | 
125 | ```bash
126 | # Note the `v` prefixing the version number
127 | docker image pull ghcr.io/apollographql/apollo-mcp-server:v1.1.1
128 | ```
129 | 
130 | To download a specific version of Apollo MCP Server that is a release candidate:
131 | 
132 | ```bash
133 | # Note the `v` prefixing the version number and the `-rc` suffix
134 | docker image pull ghcr.io/apollographql/apollo-mcp-server:v1.1.1-rc.1
135 | ```
136 | 
137 | <Note>
138 | 
139 | The container sets a few defaults for ease of use:
140 | 
141 | - **Working Directory is `/data`**: Make sure to mount static schemas / operations to this location
142 |   using the volume flag when running [(`-v` / `--volume`)](https://docs.docker.com/reference/cli/docker/container/run/#volume).
143 | - **HTTP Streamable Transport on port 8000**: Make sure to export container port 8000 for HTTP Streamable connections to
144 |   the MCP server using the port flag when running [(`-p` / `--port`)](https://docs.docker.com/reference/cli/docker/container/run/#publish)
145 | 
146 | </Note>
147 | 
148 | Run the following Docker command to start the MCP Server, replacing the values for the paths to the config file and project root with your own:
149 | 
150 | ```sh showLineNumbers=false
151 | docker run \
152 |   -it --rm \
153 |   --name apollo-mcp-server \
154 |   -p 8000:8000 \
155 |   -v <PATH/TO/CONFIG/FILE>:/config.yaml \
156 |   -v <PATH/TO/PROJECT/ROOT>:/data \
157 |   --pull always \
158 |   ghcr.io/apollographql/apollo-mcp-server:latest /config.yaml
159 | ```
160 | 
161 | This command:
162 | 
163 | - Starts an MCP Server in a Docker container
164 | - Maps configuration files into the proper place for the Apollo MCP Server container
165 | - Forwards port 8000 for accessing the MCP Server
166 | 
167 | ## With the Apollo Runtime Container
168 | 
169 | The Apollo Runtime Container runs both the MCP Server and the [Apollo Router](https://www.apollographql.com/docs/graphos/routing) in a single container. It's useful for local development, testing, and production deployments.
170 | 
171 | The Apollo Runtime container includes all services necessary to serve GraphQL and MCP requests, including the Router and MCP Server. It is the easiest way to operate a GraphQL API with MCP support.
172 | 
173 | To serve both MCP and GraphQL requests, both port `4000` and `8000` will need to be exposed. An example command which retrieves the schema from Uplink is:
174 | 
175 | ```bash title="Docker" {3, 6}
176 | docker run \
177 |   -p 4000:4000 \
178 |   -p 8000:8000 \
179 |   --env APOLLO_GRAPH_REF="<your-graph-ref>" \
180 |   --env APOLLO_KEY="<your-graph-api-key>" \
181 |   --env MCP_ENABLE=1 \
182 |   --rm \
183 |   ghcr.io/apollographql/apollo-runtime:latest
184 | ```
185 | 
186 | To learn more, review the [Apollo Runtime container documentation](/graphos/routing/self-hosted/containerization/docker).
187 | 
```

--------------------------------------------------------------------------------
/crates/apollo-mcp-registry/src/files.rs:
--------------------------------------------------------------------------------

```rust
  1 | use std::path::{Path, PathBuf};
  2 | use std::time::Duration;
  3 | 
  4 | use futures::prelude::*;
  5 | use notify::Config;
  6 | use notify::EventKind;
  7 | use notify::PollWatcher;
  8 | use notify::RecursiveMode;
  9 | use notify::Watcher;
 10 | use notify::event::DataChange;
 11 | use notify::event::MetadataKind;
 12 | use notify::event::ModifyKind;
 13 | use tokio::sync::mpsc;
 14 | use tokio::sync::mpsc::error::TrySendError;
 15 | 
 16 | #[cfg(not(test))]
 17 | const DEFAULT_WATCH_DURATION: Duration = Duration::from_secs(3);
 18 | 
 19 | #[cfg(test)]
 20 | const DEFAULT_WATCH_DURATION: Duration = Duration::from_millis(100);
 21 | 
 22 | /// Creates a stream events whenever the file at the path has changes. The stream never terminates
 23 | /// and must be dropped to finish watching.
 24 | ///
 25 | /// # Arguments
 26 | ///
 27 | /// * `path`: The file to watch
 28 | ///
 29 | /// returns: impl Stream<Item=()>
 30 | ///
 31 | pub fn watch(path: &Path) -> impl Stream<Item = ()> + use<> {
 32 |     watch_with_duration(path, DEFAULT_WATCH_DURATION)
 33 | }
 34 | 
 35 | #[allow(clippy::panic)] // TODO: code copied from router contained existing panics
 36 | fn watch_with_duration(path: &Path, duration: Duration) -> impl Stream<Item = ()> + use<> {
 37 |     let path = PathBuf::from(path);
 38 |     let is_dir = path.is_dir();
 39 |     let watched_path = path.clone();
 40 | 
 41 |     let (watch_sender, watch_receiver) = mpsc::channel(1);
 42 |     let watch_receiver_stream = tokio_stream::wrappers::ReceiverStream::new(watch_receiver);
 43 |     // We can't use the recommended watcher, because there's just too much variation across
 44 |     // platforms and file systems. We use the Poll Watcher, which is implemented consistently
 45 |     // across all platforms. Less reactive than other mechanisms, but at least it's predictable
 46 |     // across all environments. We compare contents as well, which reduces false positives with
 47 |     // some additional processing burden.
 48 |     let config = Config::default()
 49 |         .with_poll_interval(duration)
 50 |         .with_compare_contents(true);
 51 |     let mut watcher = PollWatcher::new(
 52 |         move |res: Result<notify::Event, notify::Error>| match res {
 53 |             Ok(event) => {
 54 |                 // Events of interest are writes to the timestamp of a watched file or directory,
 55 |                 // changes to the data of a watched file, and the addition or removal of a file.
 56 |                 if matches!(
 57 |                     event.kind,
 58 |                     EventKind::Modify(ModifyKind::Metadata(MetadataKind::WriteTime))
 59 |                         | EventKind::Modify(ModifyKind::Data(DataChange::Any))
 60 |                         | EventKind::Create(_)
 61 |                         | EventKind::Remove(_)
 62 |                 ) {
 63 |                     if !(event.paths.contains(&watched_path)
 64 |                         || (is_dir && event.paths.iter().any(|p| p.starts_with(&watched_path)))) {
 65 |                         tracing::trace!(
 66 |                             "Ignoring change event with paths {:?} and kind {:?} - watched paths are {:?}",
 67 |                             event.paths,
 68 |                             event.kind,
 69 |                             watched_path
 70 |                         );
 71 |                     } else {
 72 |                         loop {
 73 |                             match watch_sender.try_send(()) {
 74 |                                 Ok(_) => break,
 75 |                                 Err(err) => {
 76 |                                     tracing::warn!(
 77 |                                         "could not process file watch notification. {}",
 78 |                                         err.to_string()
 79 |                                     );
 80 |                                     if matches!(err, TrySendError::Full(_)) {
 81 |                                         std::thread::sleep(Duration::from_millis(50));
 82 |                                     } else {
 83 |                                         panic!("event channel failed: {err}");
 84 |                                     }
 85 |                                 }
 86 |                             }
 87 |                         }
 88 |                     }
 89 |                 }
 90 |             }
 91 |             Err(e) => tracing::error!("event error: {:?}", e),
 92 |         },
 93 |         config,
 94 |     )
 95 |     .unwrap_or_else(|_| panic!("could not create watch on: {path:?}"));
 96 |     watcher
 97 |         .watch(&path, RecursiveMode::NonRecursive)
 98 |         .unwrap_or_else(|_| panic!("could not watch: {path:?}"));
 99 |     // Tell watchers once they should read the file once,
100 |     // then listen to fs events.
101 |     stream::once(future::ready(()))
102 |         .chain(watch_receiver_stream)
103 |         .chain(stream::once(async move {
104 |             // This exists to give the stream ownership of the hotwatcher.
105 |             // Without it hotwatch will get dropped and the stream will terminate.
106 |             // This code never actually gets run.
107 |             // The ideal would be that hotwatch implements a stream, and
108 |             // therefore we don't need this hackery.
109 |             drop(watcher);
110 |         }))
111 |         .boxed()
112 | }
113 | 
114 | #[cfg(test)]
115 | pub(crate) mod tests {
116 |     use std::env::temp_dir;
117 |     use std::fs::File;
118 |     use std::io::Seek;
119 |     use std::io::Write;
120 |     use std::path::PathBuf;
121 | 
122 |     use test_log::test;
123 | 
124 |     use super::*;
125 | 
126 |     #[test(tokio::test)]
127 |     async fn basic_watch() {
128 |         let (path, mut file) = create_temp_file();
129 |         let mut watch = watch_with_duration(&path, Duration::from_millis(100));
130 |         // This test can be very racy. Without synchronisation, all
131 |         // we can hope is that if we wait long enough between each
132 |         // write/flush then the future will become ready.
133 |         // Signal telling us we are ready
134 |         assert!(futures::poll!(watch.next()).is_ready());
135 |         write_and_flush(&mut file, "Some data 1").await;
136 |         assert!(futures::poll!(watch.next()).is_ready());
137 |         write_and_flush(&mut file, "Some data 2").await;
138 |         assert!(futures::poll!(watch.next()).is_ready())
139 |     }
140 | 
141 |     pub(crate) fn create_temp_file() -> (PathBuf, File) {
142 |         let path = temp_dir().join(format!("{}", uuid::Uuid::new_v4()));
143 |         let file = File::create(&path).unwrap();
144 |         (path, file)
145 |     }
146 | 
147 |     pub(crate) async fn write_and_flush(file: &mut File, contents: &str) {
148 |         file.rewind().unwrap();
149 |         file.set_len(0).unwrap();
150 |         file.write_all(contents.as_bytes()).unwrap();
151 |         file.flush().unwrap();
152 |         tokio::time::sleep(Duration::from_millis(500)).await;
153 |     }
154 | }
155 | 
```

--------------------------------------------------------------------------------
/crates/apollo-mcp-server/src/auth.rs:
--------------------------------------------------------------------------------

```rust
  1 | use axum::{
  2 |     Json, Router,
  3 |     extract::{Request, State},
  4 |     http::StatusCode,
  5 |     middleware::Next,
  6 |     response::Response,
  7 |     routing::get,
  8 | };
  9 | use axum_extra::{
 10 |     TypedHeader,
 11 |     headers::{Authorization, authorization::Bearer},
 12 | };
 13 | use http::Method;
 14 | use networked_token_validator::NetworkedTokenValidator;
 15 | use schemars::JsonSchema;
 16 | use serde::Deserialize;
 17 | use tower_http::cors::{Any, CorsLayer};
 18 | use url::Url;
 19 | 
 20 | mod networked_token_validator;
 21 | mod protected_resource;
 22 | mod valid_token;
 23 | mod www_authenticate;
 24 | 
 25 | use protected_resource::ProtectedResource;
 26 | pub(crate) use valid_token::ValidToken;
 27 | use valid_token::ValidateToken;
 28 | use www_authenticate::WwwAuthenticate;
 29 | 
 30 | /// Auth configuration options
 31 | #[derive(Debug, Clone, Deserialize, JsonSchema)]
 32 | pub struct Config {
 33 |     /// List of upstream OAuth servers to delegate auth
 34 |     pub servers: Vec<Url>,
 35 | 
 36 |     /// List of accepted audiences for the OAuth tokens
 37 |     pub audiences: Vec<String>,
 38 | 
 39 |     /// The resource to protect.
 40 |     ///
 41 |     /// Note: This is usually the publicly accessible URL of this running MCP server
 42 |     pub resource: Url,
 43 | 
 44 |     /// Link to documentation related to the protected resource
 45 |     pub resource_documentation: Option<Url>,
 46 | 
 47 |     /// Supported OAuth scopes by this resource server
 48 |     pub scopes: Vec<String>,
 49 | 
 50 |     /// Whether to disable the auth token passthrough to upstream API
 51 |     #[serde(default)]
 52 |     pub disable_auth_token_passthrough: bool,
 53 | }
 54 | 
 55 | impl Config {
 56 |     pub fn enable_middleware(&self, router: Router) -> Router {
 57 |         /// Simple handler to encode our config into the desired OAuth 2.1 protected
 58 |         /// resource format
 59 |         async fn protected_resource(State(auth_config): State<Config>) -> Json<ProtectedResource> {
 60 |             Json(auth_config.into())
 61 |         }
 62 | 
 63 |         // Set up auth routes. NOTE: CORs needs to allow for get requests to the
 64 |         // metadata information paths.
 65 |         let cors = CorsLayer::new()
 66 |             .allow_methods([Method::GET])
 67 |             .allow_origin(Any);
 68 |         let auth_router = Router::new()
 69 |             .route(
 70 |                 "/.well-known/oauth-protected-resource",
 71 |                 get(protected_resource),
 72 |             )
 73 |             .with_state(self.clone())
 74 |             .layer(cors);
 75 | 
 76 |         // Merge with MCP server routes
 77 |         Router::new()
 78 |             .merge(auth_router)
 79 |             .merge(router.layer(axum::middleware::from_fn_with_state(
 80 |                 self.clone(),
 81 |                 oauth_validate,
 82 |             )))
 83 |     }
 84 | }
 85 | 
 86 | /// Validate that requests made have a corresponding bearer JWT token
 87 | #[tracing::instrument(skip_all, fields(status_code, reason))]
 88 | async fn oauth_validate(
 89 |     State(auth_config): State<Config>,
 90 |     token: Option<TypedHeader<Authorization<Bearer>>>,
 91 |     mut request: Request,
 92 |     next: Next,
 93 | ) -> Result<Response, (StatusCode, TypedHeader<WwwAuthenticate>)> {
 94 |     // Consolidated unauthorized error for use with any fallible step in this process
 95 |     let unauthorized_error = || {
 96 |         let mut resource = auth_config.resource.clone();
 97 |         resource.set_path("/.well-known/oauth-protected-resource");
 98 | 
 99 |         (
100 |             StatusCode::UNAUTHORIZED,
101 |             TypedHeader(WwwAuthenticate::Bearer {
102 |                 resource_metadata: resource,
103 |             }),
104 |         )
105 |     };
106 | 
107 |     let validator = NetworkedTokenValidator::new(&auth_config.audiences, &auth_config.servers);
108 |     let token = token.ok_or_else(|| {
109 |         tracing::Span::current().record("reason", "missing_token");
110 |         tracing::Span::current().record("status_code", StatusCode::UNAUTHORIZED.as_u16());
111 |         unauthorized_error()
112 |     })?;
113 | 
114 |     let valid_token = validator.validate(token.0).await.ok_or_else(|| {
115 |         tracing::Span::current().record("reason", "invalid_token");
116 |         tracing::Span::current().record("status_code", StatusCode::UNAUTHORIZED.as_u16());
117 |         unauthorized_error()
118 |     })?;
119 | 
120 |     // Insert new context to ensure that handlers only use our enforced token verification
121 |     // for propagation
122 |     request.extensions_mut().insert(valid_token);
123 | 
124 |     let response = next.run(request).await;
125 |     tracing::Span::current().record("status_code", response.status().as_u16());
126 |     Ok(response)
127 | }
128 | 
129 | #[cfg(test)]
130 | mod tests {
131 |     use super::*;
132 |     use axum::middleware::from_fn_with_state;
133 |     use axum::routing::get;
134 |     use axum::{
135 |         Router,
136 |         body::Body,
137 |         http::{Request, StatusCode},
138 |     };
139 |     use http::header::{AUTHORIZATION, WWW_AUTHENTICATE};
140 |     use tower::ServiceExt; // for .oneshot()
141 |     use url::Url;
142 | 
143 |     fn test_config() -> Config {
144 |         Config {
145 |             servers: vec![Url::parse("http://localhost:1234").unwrap()],
146 |             audiences: vec!["test-audience".to_string()],
147 |             resource: Url::parse("http://localhost:4000").unwrap(),
148 |             resource_documentation: None,
149 |             scopes: vec!["read".to_string()],
150 |             disable_auth_token_passthrough: false,
151 |         }
152 |     }
153 | 
154 |     fn test_router(config: Config) -> Router {
155 |         Router::new()
156 |             .route("/test", get(|| async { "ok" }))
157 |             .layer(from_fn_with_state(config, oauth_validate))
158 |     }
159 | 
160 |     #[tokio::test]
161 |     async fn missing_token_returns_unauthorized() {
162 |         let config = test_config();
163 |         let app = test_router(config.clone());
164 |         let req = Request::builder().uri("/test").body(Body::empty()).unwrap();
165 |         let res = app.oneshot(req).await.unwrap();
166 |         assert_eq!(res.status(), StatusCode::UNAUTHORIZED);
167 |         let headers = res.headers();
168 |         let www_auth = headers.get(WWW_AUTHENTICATE).unwrap().to_str().unwrap();
169 |         assert!(www_auth.contains("Bearer"));
170 |         assert!(www_auth.contains("resource_metadata"));
171 |     }
172 | 
173 |     #[tokio::test]
174 |     async fn invalid_token_returns_unauthorized() {
175 |         let config = test_config();
176 |         let app = test_router(config.clone());
177 |         let req = Request::builder()
178 |             .uri("/test")
179 |             .header(AUTHORIZATION, "Bearer invalidtoken")
180 |             .body(Body::empty())
181 |             .unwrap();
182 |         let res = app.oneshot(req).await.unwrap();
183 |         assert_eq!(res.status(), StatusCode::UNAUTHORIZED);
184 |         let headers = res.headers();
185 |         let www_auth = headers.get(WWW_AUTHENTICATE).unwrap().to_str().unwrap();
186 |         assert!(www_auth.contains("Bearer"));
187 |         assert!(www_auth.contains("resource_metadata"));
188 |     }
189 | }
190 | 
```

--------------------------------------------------------------------------------
/docs/source/deploy.mdx:
--------------------------------------------------------------------------------

```markdown
  1 | ---
  2 | title: Deploy the MCP Server
  3 | subtitle: Deployment using Docker containers, when to choose which option, and production considerations
  4 | ---
  5 | 
  6 | To deploy Apollo MCP Server in your production environment, use the recommended [Apollo Runtime Container](#apollo-runtime-container-recommended). You can also use a [standalone Apollo MCP Server container](#standalone-apollo-mcp-server-container) if needed.
  7 | 
  8 | ## Apollo Runtime Container (Recommended)
  9 | 
 10 | For most production deployments, use the all-in-one [Apollo Runtime Container](/graphos/routing/self-hosted/containerization/docker). It includes everything you need to serve both GraphQL and MCP requests in a single, optimized container.
 11 | 
 12 | ### Why choose the Apollo Runtime Container?
 13 | 
 14 | - **Simplified operations**: Single container to deploy and manage
 15 | - **Optimized performance**: Apollo Router and Apollo MCP Server are co-located
 16 | - **Built-in best practices**: Pre-configured for production use
 17 | - **Easier scaling**: Scale both GraphQL and MCP together
 18 | - **Unified monitoring**: Single service to monitor and debug
 19 | 
 20 | ### Deploy the Apollo Runtime Container
 21 | 
 22 | The Apollo Runtime Container includes all services necessary to serve GraphQL and MCP requests, including Apollo Router and Apollo MCP Server. Both port `4000` (GraphQL) and `8000` (MCP) are exposed.
 23 | 
 24 | ```bash title="Deploy with GraphOS (Recommended)"
 25 | docker run \
 26 |   -p 4000:4000 \
 27 |   -p 8000:8000 \
 28 |   --env APOLLO_GRAPH_REF="<your-graph-ref>" \
 29 |   --env APOLLO_KEY="<your-graph-api-key>" \
 30 |   --env MCP_ENABLE=1 \
 31 |   -v /path/to/config:/config/mcp_config.yaml \
 32 |   --rm \
 33 |   ghcr.io/apollographql/apollo-runtime:latest
 34 | ```
 35 | 
 36 | When you run this, it will:
 37 | 
 38 | - Fetch your schema from GraphOS using your graph credentials (`APOLLO_GRAPH_REF` and `APOLLO_KEY`)
 39 | - Start the Apollo Router with your graph configuration
 40 | - Provides a configuration file for the MCP server by mounting it to `config/mcp_config.yaml`
 41 | - Enable the Apollo MCP Server endpoint at `/mcp`
 42 | 
 43 | This command uses GraphOS-managed persisted queries for MCP tools. You'll need to publish your operations to the [GraphOS-managed persisted queries list](/apollo-mcp-server/define-tools#from-graphos-managed-persisted-queries). If you want to use other methods for defining MCP tools, see the [Define MCP Tools](/apollo-mcp-server/define-tools) page.
 44 | 
 45 | To learn more, see the [Apollo Runtime Container documentation](/graphos/routing/self-hosted/containerization/docker).
 46 | 
 47 | ## Standalone Apollo MCP Server container
 48 | 
 49 | Use the standalone Apollo MCP Server container if you already have a GraphQL API running elsewhere and want to add MCP capabilities to it.
 50 | 
 51 | ### Deploy standalone Apollo MCP Server container
 52 | 
 53 | Apollo MCP Server is available as a standalone Docker container. Container images are downloadable using the image `ghcr.io/apollographql/apollo-mcp-server`.
 54 | 
 55 | By default, the container expects all schema and operation files to be present in the `/data` directory within the container and that clients use Streamable HTTP transport on container port `8000`.
 56 | 
 57 | Here's an example `docker run` command that runs Apollo MCP Server for an example using [TheSpaceDevs graph](https://thespacedevs-production.up.railway.app/):
 58 | 
 59 | ```yaml title="mcp_config.yaml"
 60 | endpoint: https://thespacedevs-production.up.railway.app/
 61 | operations:
 62 |   source: local
 63 |   paths:
 64 |     - /data/operations/
 65 | schema:
 66 |   source: local
 67 |   path: /data/api.graphql
 68 | ```
 69 | 
 70 | ```sh showLineNumbers=false
 71 | docker run \
 72 |   -it --rm \
 73 |   --name apollo-mcp-server \
 74 |   -p 8000:8000 \
 75 |   -v <path/to>/mcp_config.yaml:/config.yaml \
 76 |   -v $PWD/graphql/TheSpaceDevs:/data \
 77 |   --pull always \
 78 |   ghcr.io/apollographql/apollo-mcp-server:latest /config.yaml
 79 | ```
 80 | 
 81 | ## When to choose which option?
 82 | 
 83 | | Scenario                            | Recommended Option           | Why                                                                                                       |
 84 | | ----------------------------------- | ---------------------------- | --------------------------------------------------------------------------------------------------------- |
 85 | | New GraphQL + MCP deployment        | Apollo Runtime Container     | Single container, easier to manage, optimized performance                                                 |
 86 | | GraphOS-managed graph               | Apollo Runtime Container     | Automatic sync for schema and persisted queries, unified telemetry                                        |
 87 | | Kubernetes/orchestrated environment | Apollo Runtime Container     | Fewer moving parts, simpler networking                                                                    |
 88 | | Adding MCP to existing GraphQL API  | Standalone Apollo MCP Server | Connect to your existing GraphQL endpoint                                                                 |
 89 | | Local development                   | `rover dev`                  | [Run `rover dev`](/apollo-mcp-server/run#with-the-rover-cli) to develop locally with both GraphQL and MCP |
 90 |           
 91 | ## Production Considerations
 92 | 
 93 | ### Load Balancing & Session Affinity
 94 | 
 95 | MCP is a stateful protocol that requires session affinity (sticky sessions).
 96 | 
 97 | When an MCP client initializes a session with Apollo MCP Server, it receives a session identifier unique to that server instance through the `mcp-session-id` header. You must enable session affinity in your load balancer so that all requests sharing the same `mcp-session-id` are routed to the same backend instance.
 98 | 
 99 | Most cloud load balancers (ALB, GCP LB) don't support header-based session affinity. Use Nginx, HAProxy, or Envoy/Istio for proper session routing.
100 | 
101 | #### Stateless mode
102 | 
103 | Although MCP is a stateful protocol by default, the Streamable HTTP transport supports operating in a stateless mode.
104 | This means that the session ID will not be passed back and forth between the client and server and each request made to the MCP server happens in its own HTTP POST.
105 | Disabling the session state being managed in memory by a single host allows for horizontal scaling of the server, though could lead to unknown issues if your MCP client has a dependency on sticky sessions.
106 | 
107 | You can configure stateless mode in the transport config section:
108 | 
109 | ```yaml
110 | transport:
111 |   type: streamable_http
112 |   stateful_mode: false
113 | ```
114 | 
115 | ### Scaling Recommendations
116 | 
117 | For the Apollo Runtime Container:
118 | 
119 | - Scale both GraphQL and MCP together as a single unit
120 | - Simpler horizontal scaling
121 | - Consistent performance characteristics
122 | 
123 | For the standalone Apollo MCP Server container:
124 | 
125 | - Scale Apollo MCP Server independently of your GraphQL API
126 | - More complex but enables fine-tuned resource allocation
127 | 
128 | ### Next steps
129 | 
130 | After you deploy, configure:
131 | 
132 | 1. [Health checks](/apollo-mcp-server/health-checks) for monitoring
133 | 2. [CORS settings](/apollo-mcp-server/cors) for browser clients
134 | 3. [Authorization](/apollo-mcp-server/auth) for production security
135 | 
```

--------------------------------------------------------------------------------
/scripts/nix/install.sh:
--------------------------------------------------------------------------------

```bash
  1 | #!/bin/bash
  2 | #
  3 | # Licensed under the MIT license
  4 | # <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your
  5 | # option. This file may not be copied, modified, or distributed
  6 | # except according to those terms.
  7 | 
  8 | # Installs the latest version of the Apollo MCP Server.
  9 | # Specify a specific version to install with the $VERSION variable.
 10 | 
 11 | set -u
 12 | 
 13 | BINARY_DOWNLOAD_PREFIX="${APOLLO_MCP_SERVER_BINARY_DOWNLOAD_PREFIX:="https://github.com/apollographql/apollo-mcp-server/releases/download"}"
 14 | 
 15 | # Apollo MCP Server version defined in apollo-mcp-server's Cargo.toml
 16 | # Note: Change this line manually during the release steps.
 17 | PACKAGE_VERSION="v1.1.1"
 18 | 
 19 | download_binary_and_run_installer() {
 20 |     downloader --check
 21 |     need_cmd mktemp
 22 |     need_cmd chmod
 23 |     need_cmd mkdir
 24 |     need_cmd rm
 25 |     need_cmd rmdir
 26 |     need_cmd tar
 27 |     need_cmd which
 28 |     need_cmd dirname
 29 |     need_cmd awk
 30 |     need_cmd cut
 31 | 
 32 |     # if $VERSION isn't provided or has 0 length, use version from Rover cargo.toml
 33 |     # ${VERSION:-} checks if version exists, and if doesn't uses the default
 34 |     # which is after the :-, which in this case is empty. -z checks for empty str
 35 |     if [ -z ${VERSION:-} ]; then
 36 |         # VERSION is either not set or empty
 37 |         DOWNLOAD_VERSION=$PACKAGE_VERSION
 38 |     else
 39 |         # VERSION set and not empty
 40 |         DOWNLOAD_VERSION=$VERSION
 41 |     fi
 42 | 
 43 | 
 44 |     get_architecture || return 1
 45 |     local _arch="$RETVAL"
 46 |     assert_nz "$_arch" "arch"
 47 | 
 48 |     local _ext=""
 49 |     case "$_arch" in
 50 |         *windows*)
 51 |             _ext=".exe"
 52 |             ;;
 53 |     esac
 54 | 
 55 |     local _tardir="apollo-mcp-server-$DOWNLOAD_VERSION-${_arch}"
 56 |     local _url="$BINARY_DOWNLOAD_PREFIX/$DOWNLOAD_VERSION/${_tardir}.tar.gz"
 57 |     local _dir="$(mktemp -d 2>/dev/null || ensure mktemp -d -t apollo-mcp-server)"
 58 |     local _file="$_dir/input.tar.gz"
 59 |     local _apollo_mcp_server="$_dir/apollo-mcp-server$_ext"
 60 |     local _safe_url
 61 | 
 62 |     # Remove credentials from the URL for logging
 63 |     _safe_url=$(echo "$_url" | awk '{sub("https://[^@]+@","https://");}1')
 64 |     say "downloading apollo-mcp-server from $_safe_url" 1>&2
 65 | 
 66 |     ensure mkdir -p "$_dir"
 67 |     downloader "$_url" "$_file"
 68 |     if [ $? != 0 ]; then
 69 |       say "failed to download $_safe_url"
 70 |       say "this may be a standard network error, but it may also indicate"
 71 |       say "that the MCP Server's release process is not working. When in doubt"
 72 |       say "please feel free to open an issue!"
 73 |       say "https://github.com/apollographql/apollo-mcp-server/issues/new/choose"
 74 |       exit 1
 75 |     fi
 76 | 
 77 |     ensure tar xf "$_file" --strip-components 1 -C "$_dir"
 78 | 
 79 |     outfile="./apollo-mcp-server"
 80 | 
 81 |     say "Moving $_apollo_mcp_server to $outfile ..."
 82 |     mv "$_apollo_mcp_server" "$outfile"
 83 | 
 84 |     local _retval=$?
 85 | 
 86 |     say ""
 87 |     say "You can now run the Apollo MCP Server using '$outfile'"
 88 | 
 89 |     ignore rm -rf "$_dir"
 90 | 
 91 |     return "$_retval"
 92 | }
 93 | 
 94 | get_architecture() {
 95 |     local _ostype="$(uname -s)"
 96 |     local _cputype="$(uname -m)"
 97 | 
 98 |     if [ "$_ostype" = Darwin -a "$_cputype" = i386 ]; then
 99 |         # Darwin `uname -s` lies
100 |         if sysctl hw.optional.x86_64 | grep -q ': 1'; then
101 |             local _cputype=x86_64
102 |         fi
103 |     fi
104 | 
105 |     if [ "$_ostype" = Darwin -a "$_cputype" = arm64 ]; then
106 |         # Darwin `uname -s` doesn't seem to lie on Big Sur
107 |         # but the cputype we want is called aarch64, not arm64 (they are equivalent)
108 |         local _cputype=aarch64
109 |     fi
110 | 
111 |     case "$_ostype" in
112 |         Linux)
113 |             if has_required_glibc; then
114 |                 local _ostype=unknown-linux-gnu
115 |             else
116 |                 local _ostype=unknown-linux-musl
117 | 
118 |                 # We do not currently release builds for aarch64-unknown-linux-musl
119 |                 if [ "$_cputype" = aarch64 ]; then
120 |                     err "Unsupported platform: aarch64-$_ostype"
121 |                 fi
122 | 
123 |                 say "Downloading musl binary"
124 |             fi
125 |             ;;
126 | 
127 |         Darwin)
128 |             local _ostype=apple-darwin
129 |             ;;
130 | 
131 |         MINGW* | MSYS* | CYGWIN*)
132 |             local _ostype=pc-windows-msvc
133 |             ;;
134 | 
135 |         *)
136 |             err "no precompiled binaries available for OS: $_ostype"
137 |             ;;
138 |     esac
139 | 
140 |     case "$_cputype" in
141 |         # these are the only two acceptable values for cputype
142 |         x86_64 | aarch64 )
143 |             ;;
144 |         *)
145 |             err "no precompiled binaries available for CPU architecture: $_cputype"
146 | 
147 |     esac
148 | 
149 |     local _arch="$_cputype-$_ostype"
150 | 
151 |     RETVAL="$_arch"
152 | }
153 | 
154 | say() {
155 |     local green=`tput setaf 2 2>/dev/null || echo ''`
156 |     local reset=`tput sgr0 2>/dev/null || echo ''`
157 |     echo "$1"
158 | }
159 | 
160 | err() {
161 |     local red=`tput setaf 1 2>/dev/null || echo ''`
162 |     local reset=`tput sgr0 2>/dev/null || echo ''`
163 |     say "${red}ERROR${reset}: $1" >&2
164 |     exit 1
165 | }
166 | 
167 | has_required_glibc() {
168 |     local _ldd_version="$(ldd --version 2>&1 | head -n1)"
169 |     # glibc version string is inconsistent across distributions
170 |     # instead check if the string does not contain musl (case insensitive)
171 |     if echo "${_ldd_version}" | grep -iv musl >/dev/null; then
172 |         local _glibc_version=$(echo "${_ldd_version}" | awk 'NR==1 { print $NF }')
173 |         local _glibc_major_version=$(echo "${_glibc_version}" | cut -d. -f1)
174 |         local _glibc_min_version=$(echo "${_glibc_version}" | cut -d. -f2)
175 |         local _min_major_version=2
176 |         local _min_minor_version=17
177 |         if [ "${_glibc_major_version}" -gt "${_min_major_version}" ] \
178 |             || { [ "${_glibc_major_version}" -eq "${_min_major_version}" ] \
179 |             && [ "${_glibc_min_version}" -ge "${_min_minor_version}" ]; }; then
180 |             return 0
181 |         else
182 |             say "This operating system needs glibc >= ${_min_major_version}.${_min_minor_version}, but only has ${_libc_version} installed."
183 |         fi
184 |     else
185 |         say "This operating system does not support dynamic linking to glibc."
186 |     fi
187 | 
188 |     return 1
189 | }
190 | 
191 | need_cmd() {
192 |     if ! check_cmd "$1"
193 |     then err "need '$1' (command not found)"
194 |     fi
195 | }
196 | 
197 | check_cmd() {
198 |     command -v "$1" > /dev/null 2>&1
199 |     return $?
200 | }
201 | 
202 | need_ok() {
203 |     if [ $? != 0 ]; then err "$1"; fi
204 | }
205 | 
206 | assert_nz() {
207 |     if [ -z "$1" ]; then err "assert_nz $2"; fi
208 | }
209 | 
210 | # Run a command that should never fail. If the command fails execution
211 | # will immediately terminate with an error showing the failing
212 | # command.
213 | ensure() {
214 |     "$@"
215 |     need_ok "command failed: $*"
216 | }
217 | 
218 | # This is just for indicating that commands' results are being
219 | # intentionally ignored. Usually, because it's being executed
220 | # as part of error handling.
221 | ignore() {
222 |     "$@"
223 | }
224 | 
225 | # This wraps curl or wget. Try curl first, if not installed,
226 | # use wget instead.
227 | downloader() {
228 |     if check_cmd curl
229 |     then _dld=curl
230 |     elif check_cmd wget
231 |     then _dld=wget
232 |     else _dld='curl or wget' # to be used in error message of need_cmd
233 |     fi
234 | 
235 |     if [ "$1" = --check ]
236 |     then need_cmd "$_dld"
237 |     elif [ "$_dld" = curl ]
238 |     then curl -sSfL "$1" -o "$2"
239 |     elif [ "$_dld" = wget ]
240 |     then wget "$1" -O "$2"
241 |     else err "Unknown downloader"   # should not reach here
242 |     fi
243 | }
244 | 
245 | download_binary_and_run_installer "$@" || exit 1
246 | 
```

--------------------------------------------------------------------------------
/docs/source/index.mdx:
--------------------------------------------------------------------------------

```markdown
  1 | ---
  2 | title: Apollo MCP Server
  3 | subtitle: Enable graph-based API orchestration with AI
  4 | redirectFrom:
  5 |     - /apollo-mcp-server/user-guide
  6 |     - /apollo-mcp-server/guides
  7 | ---
  8 | 
  9 | Apollo MCP Server provides a standard way for AI models to access and orchestrate your APIs running with Apollo.
 10 | 
 11 | ## What is MCP?
 12 | 
 13 | [Model Context Protocol (MCP)](https://modelcontextprotocol.io/introduction) is an open protocol that standardizes how applications provide context to AI models like Large Language Models (LLM). MCP enables LLMs and AI agents to indirectly fetch data from external sources.
 14 | 
 15 | MCP follows a client-server architecture. MCP servers expose functions, called _tools_, that MCP clients can invoke.
 16 | 
 17 | ## What is Apollo MCP Server? 
 18 | 
 19 | Apollo MCP Server is an implementation of an MCP server. It makes GraphQL API operations available to AI clients as MCP tools. You can use Apollo MCP Server with any GraphQL API.
 20 | 
 21 | The GraphQL operations can be configured from persisted queries, which are predefined, approved lists of operations that are registered with and maintained by a graph. The operations can also be determined by AI introspecting your graph schema.
 22 | 
 23 | Apollo MCP Server is deployable in local environments via Apollo's Rover CLI or in containerized services in your cloud infrastructure. It can expose an MCP endpoint using Streamable HTTP for communication with AI clients.
 24 | 
 25 | ## How Apollo MCP Server works
 26 | 
 27 | Apollo MCP Server bridges AI applications and your GraphQL APIs, translating GraphQL operations into MCP tools that AI models can discover and use.
 28 | 
 29 | ```mermaid
 30 | graph LR
 31 |     %% Nodes
 32 |     AI["AI Application\n(Claude, ChatGPT, etc.)"]
 33 |     MCPClient["MCP Client\n(Built into AI app)"]
 34 |     MCPServer["Apollo MCP Server"]
 35 |     GraphQL["GraphQL API\n(Your Graph)"]
 36 |     Data["Your Data Sources\n(Databases, APIs, etc.)"]
 37 | 
 38 |     %% Connections
 39 |     AI <-->|"Natural Language\nRequests"| MCPClient
 40 |     MCPClient <-->|"MCP Protocol\n(stdio/Streamable HTTP)"| MCPServer
 41 |     MCPServer <-->|"GraphQL\nOperations"| GraphQL
 42 |     GraphQL <-->|"Data\nQueries"| Data
 43 | 
 44 |     %% Tool Generation
 45 |     subgraph ToolGeneration[Tool Generation]
 46 |         direction TB
 47 |         OpFiles["Operation Files\n(.graphql)"]
 48 |         PQM["Persisted Query\nManifests"]
 49 |         Introspection["Schema\nIntrospection"]
 50 |         Tools["MCP Tools"]
 51 |         
 52 |         OpFiles --> Tools
 53 |         PQM --> Tools
 54 |         Introspection --> Tools
 55 |     end
 56 | 
 57 |     MCPServer -.->|"Exposes"| Tools
 58 |     Tools -.->|"Available to"| MCPClient
 59 | 
 60 |     %% Styling
 61 |     classDef default stroke-width:1px
 62 |     classDef aiClient stroke-width:2px
 63 |     classDef mcpComponent stroke-width:2px
 64 |     classDef apolloComponent stroke-width:2px
 65 |     classDef apiComponent stroke-width:2px
 66 |     classDef dataComponent stroke-width:2px
 67 | 
 68 |     class AI aiClient
 69 |     class MCPClient mcpComponent
 70 |     class MCPServer apolloComponent
 71 |     class GraphQL apiComponent
 72 |     class Data dataComponent
 73 |     class OpFiles,PQM,Introspection apolloComponent
 74 |     class Tools mcpComponent
 75 | ```
 76 | 
 77 | The architecture enables intelligent API orchestration through these components:
 78 | 
 79 | * AI Applications: Tools like Claude Desktop or ChatGPT connect to Apollo MCP Server through their built-in MCP clients, making requests in natural language.
 80 | * Transport Options: Communication happens over stdio for local development or Streamable HTTP. 
 81 | * Tool Generation: Apollo MCP Server creates MCP tools from your GraphQL operations using:
 82 |     * Operation Files: Individual `.graphql` files for specific queries or mutations
 83 |     * Persisted Query Manifests: Pre-approved operation lists from Apollo GraphOS
 84 |     * Schema Introspection: Dynamic operation discovery for flexible AI exploration
 85 | 
 86 | * Secure Execution: When invoked, the server executes GraphQL operations against your API endpoint, respecting all existing authentication, headers, and security policies.
 87 | * Existing Infrastructure: Your GraphQL API handles requests normally, with Apollo MCP Server acting as a controlled gateway rather than requiring any changes to your graph.
 88 | 
 89 | This design lets you expose precise GraphQL capabilities to AI while maintaining complete control over data access and security.
 90 | 
 91 | ### Example usage
 92 | 
 93 | Once configured, AI applications can use your GraphQL operations naturally:
 94 | 
 95 | > User: "Show me the astronauts currently in space"
 96 | >
 97 | > Claude: *Uses GetAstronautsCurrentlyInSpace tool to query your GraphQL API*
 98 | >
 99 | > "There are currently 7 astronauts aboard the ISS..."
100 | 
101 | ## Why GraphQL for AI?
102 | 
103 | GraphQL's architecture provides unique advantages for AI-powered API orchestration:
104 | 
105 | **🎯 Deterministic Execution**: GraphQL's built-in relationship handling and query structure eliminate guesswork for AI models. The graph defines clear paths between data types, ensuring AI agents execute operations in the correct sequence without complex prompt engineering or error-prone orchestration logic.
106 | 
107 | **🛡️ Policy Enforcement**: Security policies and access controls apply consistently across all services within a single GraphQL query context. This unified enforcement model ensures AI operations respect organizational boundaries, even when spanning multiple underlying APIs or microservices.
108 | 
109 | **⚡ Efficiency**: AI agents can request precisely the data needed in a single GraphQL query, reducing API calls, network overhead, and token usage. This focused approach delivers faster responses and lower operational costs compared to orchestrating multiple REST endpoints.
110 | 
111 | **🔄 Agility**: The pace of AI development demands infrastructure that can evolve daily. GraphQL's declarative approach lets teams rapidly create, modify, and deploy new AI capabilities through self-service tooling. Product teams can wire up new MCP tools without waiting for custom development, keeping pace with AI's unprecedented velocity.
112 | 
113 | With Apollo MCP Server, these GraphQL advantages become immediately accessible to AI applications through standardized MCP tools.
114 | 
115 | ## Benefits of Apollo MCP Server
116 | 
117 | - **🤖 Enable AI-enabled API orchestration**. With Apollo MCP Server, AI models can act as intelligent orchestrators of their GraphQL API operations. By exposing GraphQL operations as distinct MCP tools, AI clients can dynamically chain these operations together, in combination with other MCP servers and tools to execute complex workflows and automate multi-step processes. 
118 | 
119 | - **🚀 Connect AI to GraphQL in Minutes**. Developers can expose existing or new GraphQL API operations to AI clients without building complex custom integrations. By translating GraphQL functionalities into standardized MCP tools, Apollo MCP Server can significantly reduce the effort needed to connect AI to diverse data sources.
120 | 
121 | - **🔒 Maintain Full Security Control**. By using pre-defined, pre-approved persisted queries, developers can maintain precise governance over which data and operations AI clients can access. This ensures that AI uses existing security protocols and data access policies.
122 | 
123 | ## Prerequisites
124 | 
125 | - A GraphQL API
126 | - An MCP Client
127 | 
128 | ## Getting started
129 | 
130 | Ready to connect AI to your GraphQL API? Follow our [5-minute quickstart](/apollo-mcp-server/quickstart) to see Apollo MCP Server in action, or explore the [config file reference](/apollo-mcp-server/config-file) for detailed configuration options.
131 | 
```

--------------------------------------------------------------------------------
/crates/apollo-mcp-registry/src/uplink/schema/schema_stream.rs:
--------------------------------------------------------------------------------

```rust
  1 | // tonic does not derive `Eq` for the gRPC message types, which causes a warning from Clippy. The
  2 | // current suggestion is to explicitly allow the lint in the module that imports the protos.
  3 | // Read more: https://github.com/hyperium/tonic/issues/1056
  4 | #![allow(clippy::derive_partial_eq_without_eq)]
  5 | 
  6 | use crate::uplink::UplinkRequest;
  7 | use crate::uplink::UplinkResponse;
  8 | use crate::uplink::schema::SchemaState;
  9 | use crate::uplink::schema::schema_stream::supergraph_sdl_query::FetchErrorCode;
 10 | use crate::uplink::schema::schema_stream::supergraph_sdl_query::SupergraphSdlQueryRouterConfig;
 11 | use graphql_client::GraphQLQuery;
 12 | 
 13 | #[derive(GraphQLQuery)]
 14 | #[graphql(
 15 |     query_path = "src/uplink/schema/schema_query.graphql",
 16 |     schema_path = "src/uplink/uplink.graphql",
 17 |     request_derives = "Debug",
 18 |     response_derives = "PartialEq, Debug, Deserialize",
 19 |     deprecated = "warn"
 20 | )]
 21 | pub(crate) struct SupergraphSdlQuery;
 22 | 
 23 | impl From<UplinkRequest> for supergraph_sdl_query::Variables {
 24 |     fn from(req: UplinkRequest) -> Self {
 25 |         supergraph_sdl_query::Variables {
 26 |             api_key: req.api_key,
 27 |             graph_ref: req.graph_ref,
 28 |             if_after_id: req.id,
 29 |         }
 30 |     }
 31 | }
 32 | 
 33 | impl From<supergraph_sdl_query::ResponseData> for UplinkResponse<String> {
 34 |     fn from(response: supergraph_sdl_query::ResponseData) -> Self {
 35 |         match response.router_config {
 36 |             SupergraphSdlQueryRouterConfig::RouterConfigResult(result) => UplinkResponse::New {
 37 |                 response: result.supergraph_sdl,
 38 |                 id: result.id,
 39 |                 // this will truncate the number of seconds to under u64::MAX, which should be
 40 |                 // a large enough delay anyway
 41 |                 delay: result.min_delay_seconds as u64,
 42 |             },
 43 |             SupergraphSdlQueryRouterConfig::Unchanged(response) => UplinkResponse::Unchanged {
 44 |                 id: Some(response.id),
 45 |                 delay: Some(response.min_delay_seconds as u64),
 46 |             },
 47 |             SupergraphSdlQueryRouterConfig::FetchError(err) => UplinkResponse::Error {
 48 |                 retry_later: err.code == FetchErrorCode::RETRY_LATER,
 49 |                 code: match err.code {
 50 |                     FetchErrorCode::AUTHENTICATION_FAILED => "AUTHENTICATION_FAILED".to_string(),
 51 |                     FetchErrorCode::ACCESS_DENIED => "ACCESS_DENIED".to_string(),
 52 |                     FetchErrorCode::UNKNOWN_REF => "UNKNOWN_REF".to_string(),
 53 |                     FetchErrorCode::RETRY_LATER => "RETRY_LATER".to_string(),
 54 |                     FetchErrorCode::NOT_IMPLEMENTED_ON_THIS_INSTANCE => {
 55 |                         "NOT_IMPLEMENTED_ON_THIS_INSTANCE".to_string()
 56 |                     }
 57 |                     FetchErrorCode::Other(other) => other,
 58 |                 },
 59 |                 message: err.message,
 60 |             },
 61 |         }
 62 |     }
 63 | }
 64 | 
 65 | impl From<supergraph_sdl_query::ResponseData> for UplinkResponse<SchemaState> {
 66 |     fn from(response: supergraph_sdl_query::ResponseData) -> Self {
 67 |         match response.router_config {
 68 |             SupergraphSdlQueryRouterConfig::RouterConfigResult(result) => UplinkResponse::New {
 69 |                 response: SchemaState {
 70 |                     sdl: result.supergraph_sdl,
 71 |                     launch_id: Some(result.id.clone()),
 72 |                 },
 73 |                 id: result.id,
 74 |                 // this will truncate the number of seconds to under u64::MAX, which should be
 75 |                 // a large enough delay anyway
 76 |                 delay: result.min_delay_seconds as u64,
 77 |             },
 78 |             SupergraphSdlQueryRouterConfig::Unchanged(response) => UplinkResponse::Unchanged {
 79 |                 id: Some(response.id),
 80 |                 delay: Some(response.min_delay_seconds as u64),
 81 |             },
 82 |             SupergraphSdlQueryRouterConfig::FetchError(err) => UplinkResponse::Error {
 83 |                 retry_later: err.code == FetchErrorCode::RETRY_LATER,
 84 |                 code: match err.code {
 85 |                     FetchErrorCode::AUTHENTICATION_FAILED => "AUTHENTICATION_FAILED".to_string(),
 86 |                     FetchErrorCode::ACCESS_DENIED => "ACCESS_DENIED".to_string(),
 87 |                     FetchErrorCode::UNKNOWN_REF => "UNKNOWN_REF".to_string(),
 88 |                     FetchErrorCode::RETRY_LATER => "RETRY_LATER".to_string(),
 89 |                     FetchErrorCode::NOT_IMPLEMENTED_ON_THIS_INSTANCE => {
 90 |                         "NOT_IMPLEMENTED_ON_THIS_INSTANCE".to_string()
 91 |                     }
 92 |                     FetchErrorCode::Other(other) => other,
 93 |                 },
 94 |                 message: err.message,
 95 |             },
 96 |         }
 97 |     }
 98 | }
 99 | 
100 | #[cfg(test)]
101 | mod test {
102 |     use super::*;
103 | 
104 |     #[test]
105 |     fn test_uplink_request_to_graphql_variables() {
106 |         let request = UplinkRequest {
107 |             api_key: "test_key".to_string(),
108 |             graph_ref: "test_ref".to_string(),
109 |             id: Some("test_id".to_string()),
110 |         };
111 | 
112 |         let variables: supergraph_sdl_query::Variables = request.into();
113 | 
114 |         assert_eq!(variables.api_key, "test_key");
115 |         assert_eq!(variables.graph_ref, "test_ref");
116 |         assert_eq!(variables.if_after_id, Some("test_id".to_string()));
117 |     }
118 | 
119 |     #[test]
120 |     fn test_graphql_response_to_uplink_response_new() {
121 |         let response = supergraph_sdl_query::ResponseData {
122 |             router_config: SupergraphSdlQueryRouterConfig::RouterConfigResult(
123 |                 supergraph_sdl_query::SupergraphSdlQueryRouterConfigOnRouterConfigResult {
124 |                     supergraph_sdl: "test_sdl".to_string(),
125 |                     id: "result_id".to_string(),
126 |                     min_delay_seconds: 42.0,
127 |                 },
128 |             ),
129 |         };
130 | 
131 |         let uplink_response: UplinkResponse<String> = response.into();
132 | 
133 |         assert!(matches!(
134 |             uplink_response,
135 |             UplinkResponse::New { response, id, delay }
136 |             if response == "test_sdl" && id == "result_id" && delay == 42
137 |         ));
138 |     }
139 | 
140 |     #[test]
141 |     fn test_graphql_response_to_uplink_response_unchanged() {
142 |         let response = supergraph_sdl_query::ResponseData {
143 |             router_config: SupergraphSdlQueryRouterConfig::Unchanged(
144 |                 supergraph_sdl_query::SupergraphSdlQueryRouterConfigOnUnchanged {
145 |                     id: "unchanged_id".to_string(),
146 |                     min_delay_seconds: 30.0,
147 |                 },
148 |             ),
149 |         };
150 | 
151 |         let uplink_response: UplinkResponse<String> = response.into();
152 | 
153 |         assert!(matches!(
154 |             uplink_response,
155 |             UplinkResponse::Unchanged { id, delay }
156 |             if id == Some("unchanged_id".to_string()) && delay == Some(30)
157 |         ));
158 |     }
159 | 
160 |     #[test]
161 |     fn test_graphql_response_to_uplink_response_error() {
162 |         let response = supergraph_sdl_query::ResponseData {
163 |             router_config: SupergraphSdlQueryRouterConfig::FetchError(
164 |                 supergraph_sdl_query::SupergraphSdlQueryRouterConfigOnFetchError {
165 |                     code: FetchErrorCode::RETRY_LATER,
166 |                     message: "Try again later".to_string(),
167 |                 },
168 |             ),
169 |         };
170 | 
171 |         let uplink_response: UplinkResponse<String> = response.into();
172 | 
173 |         assert!(matches!(
174 |             uplink_response,
175 |             UplinkResponse::Error { retry_later, code, message }
176 |             if retry_later && code == "RETRY_LATER" && message == "Try again later"
177 |         ));
178 |     }
179 | }
180 | 
```
Page 2/8FirstPrevNextLast