#
tokens: 49818/50000 29/187 files (page 2/6)
lines: off (toggle) GitHub
raw markdown copy
This is page 2 of 6. Use http://codebase.md/apollographql/apollo-mcp-server?page={x} to view the full context.

# Directory Structure

```
├── .cargo
│   └── config.toml
├── .changesets
│   └── README.md
├── .envrc
├── .github
│   ├── CODEOWNERS
│   ├── renovate.json5
│   └── workflows
│       ├── canary-release.yml
│       ├── ci.yml
│       ├── prep-release.yml
│       ├── release-bins.yml
│       ├── release-container.yml
│       ├── sync-develop.yml
│       └── verify-changeset.yml
├── .gitignore
├── .idea
│   └── runConfigurations
│       ├── clippy.xml
│       ├── format___test___clippy.xml
│       ├── format.xml
│       ├── Run_spacedevs.xml
│       └── Test_apollo_mcp_server.xml
├── .vscode
│   ├── extensions.json
│   ├── launch.json
│   ├── settings.json
│   └── tasks.json
├── apollo.config.json
├── Cargo.lock
├── Cargo.toml
├── CHANGELOG_SECTION.md
├── CHANGELOG.md
├── clippy.toml
├── codecov.yml
├── CONTRIBUTING.md
├── crates
│   ├── apollo-mcp-registry
│   │   ├── Cargo.toml
│   │   └── src
│   │       ├── files.rs
│   │       ├── lib.rs
│   │       ├── logging.rs
│   │       ├── platform_api
│   │       │   ├── operation_collections
│   │       │   │   ├── collection_poller.rs
│   │       │   │   ├── error.rs
│   │       │   │   ├── event.rs
│   │       │   │   └── operation_collections.graphql
│   │       │   ├── operation_collections.rs
│   │       │   └── platform-api.graphql
│   │       ├── platform_api.rs
│   │       ├── testdata
│   │       │   ├── minimal_supergraph.graphql
│   │       │   └── supergraph.graphql
│   │       ├── uplink
│   │       │   ├── persisted_queries
│   │       │   │   ├── event.rs
│   │       │   │   ├── manifest_poller.rs
│   │       │   │   ├── manifest.rs
│   │       │   │   └── persisted_queries_manifest_query.graphql
│   │       │   ├── persisted_queries.rs
│   │       │   ├── schema
│   │       │   │   ├── event.rs
│   │       │   │   ├── schema_query.graphql
│   │       │   │   └── schema_stream.rs
│   │       │   ├── schema.rs
│   │       │   ├── snapshots
│   │       │   │   ├── apollo_mcp_registry__uplink__schema__tests__schema_by_url_all_fail@logs.snap
│   │       │   │   ├── apollo_mcp_registry__uplink__schema__tests__schema_by_url_fallback@logs.snap
│   │       │   │   └── apollo_mcp_registry__uplink__schema__tests__schema_by_url@logs.snap
│   │       │   └── uplink.graphql
│   │       └── uplink.rs
│   ├── apollo-mcp-server
│   │   ├── build.rs
│   │   ├── Cargo.toml
│   │   ├── src
│   │   │   ├── auth
│   │   │   │   ├── networked_token_validator.rs
│   │   │   │   ├── protected_resource.rs
│   │   │   │   ├── valid_token.rs
│   │   │   │   └── www_authenticate.rs
│   │   │   ├── auth.rs
│   │   │   ├── config_schema.rs
│   │   │   ├── cors.rs
│   │   │   ├── custom_scalar_map.rs
│   │   │   ├── errors.rs
│   │   │   ├── event.rs
│   │   │   ├── explorer.rs
│   │   │   ├── graphql.rs
│   │   │   ├── headers.rs
│   │   │   ├── health.rs
│   │   │   ├── introspection
│   │   │   │   ├── minify.rs
│   │   │   │   ├── snapshots
│   │   │   │   │   └── apollo_mcp_server__introspection__minify__tests__minify_schema.snap
│   │   │   │   ├── tools
│   │   │   │   │   ├── execute.rs
│   │   │   │   │   ├── introspect.rs
│   │   │   │   │   ├── search.rs
│   │   │   │   │   ├── snapshots
│   │   │   │   │   │   └── apollo_mcp_server__introspection__tools__search__tests__search_tool.snap
│   │   │   │   │   ├── testdata
│   │   │   │   │   │   └── schema.graphql
│   │   │   │   │   └── validate.rs
│   │   │   │   └── tools.rs
│   │   │   ├── introspection.rs
│   │   │   ├── json_schema.rs
│   │   │   ├── lib.rs
│   │   │   ├── main.rs
│   │   │   ├── meter.rs
│   │   │   ├── operations
│   │   │   │   ├── mutation_mode.rs
│   │   │   │   ├── operation_source.rs
│   │   │   │   ├── operation.rs
│   │   │   │   ├── raw_operation.rs
│   │   │   │   ├── schema_walker
│   │   │   │   │   ├── name.rs
│   │   │   │   │   └── type.rs
│   │   │   │   └── schema_walker.rs
│   │   │   ├── operations.rs
│   │   │   ├── runtime
│   │   │   │   ├── config.rs
│   │   │   │   ├── endpoint.rs
│   │   │   │   ├── filtering_exporter.rs
│   │   │   │   ├── graphos.rs
│   │   │   │   ├── introspection.rs
│   │   │   │   ├── logging
│   │   │   │   │   ├── defaults.rs
│   │   │   │   │   ├── log_rotation_kind.rs
│   │   │   │   │   └── parsers.rs
│   │   │   │   ├── logging.rs
│   │   │   │   ├── operation_source.rs
│   │   │   │   ├── overrides.rs
│   │   │   │   ├── schema_source.rs
│   │   │   │   ├── schemas.rs
│   │   │   │   ├── telemetry
│   │   │   │   │   └── sampler.rs
│   │   │   │   └── telemetry.rs
│   │   │   ├── runtime.rs
│   │   │   ├── sanitize.rs
│   │   │   ├── schema_tree_shake.rs
│   │   │   ├── server
│   │   │   │   ├── states
│   │   │   │   │   ├── configuring.rs
│   │   │   │   │   ├── operations_configured.rs
│   │   │   │   │   ├── running.rs
│   │   │   │   │   ├── schema_configured.rs
│   │   │   │   │   └── starting.rs
│   │   │   │   └── states.rs
│   │   │   ├── server.rs
│   │   │   └── telemetry_attributes.rs
│   │   └── telemetry.toml
│   └── apollo-schema-index
│       ├── Cargo.toml
│       └── src
│           ├── error.rs
│           ├── lib.rs
│           ├── path.rs
│           ├── snapshots
│           │   ├── apollo_schema_index__tests__search.snap
│           │   └── apollo_schema_index__traverse__tests__schema_traverse.snap
│           ├── testdata
│           │   └── schema.graphql
│           └── traverse.rs
├── docs
│   └── source
│       ├── _sidebar.yaml
│       ├── auth.mdx
│       ├── best-practices.mdx
│       ├── config-file.mdx
│       ├── cors.mdx
│       ├── custom-scalars.mdx
│       ├── debugging.mdx
│       ├── define-tools.mdx
│       ├── deploy.mdx
│       ├── guides
│       │   └── auth-auth0.mdx
│       ├── health-checks.mdx
│       ├── images
│       │   ├── auth0-permissions-enable.png
│       │   ├── mcp-getstarted-inspector-http.jpg
│       │   └── mcp-getstarted-inspector-stdio.jpg
│       ├── index.mdx
│       ├── licensing.mdx
│       ├── limitations.mdx
│       ├── quickstart.mdx
│       ├── run.mdx
│       └── telemetry.mdx
├── e2e
│   └── mcp-server-tester
│       ├── local-operations
│       │   ├── api.graphql
│       │   ├── config.yaml
│       │   ├── operations
│       │   │   ├── ExploreCelestialBodies.graphql
│       │   │   ├── GetAstronautDetails.graphql
│       │   │   ├── GetAstronautsCurrentlyInSpace.graphql
│       │   │   └── SearchUpcomingLaunches.graphql
│       │   └── tool-tests.yaml
│       ├── pq-manifest
│       │   ├── api.graphql
│       │   ├── apollo.json
│       │   ├── config.yaml
│       │   └── tool-tests.yaml
│       ├── run_tests.sh
│       └── server-config.template.json
├── flake.lock
├── flake.nix
├── graphql
│   ├── TheSpaceDevs
│   │   ├── .vscode
│   │   │   ├── extensions.json
│   │   │   └── tasks.json
│   │   ├── api.graphql
│   │   ├── apollo.config.json
│   │   ├── config.yaml
│   │   ├── operations
│   │   │   ├── ExploreCelestialBodies.graphql
│   │   │   ├── GetAstronautDetails.graphql
│   │   │   ├── GetAstronautsCurrentlyInSpace.graphql
│   │   │   └── SearchUpcomingLaunches.graphql
│   │   ├── persisted_queries
│   │   │   └── apollo.json
│   │   ├── persisted_queries.config.json
│   │   ├── README.md
│   │   └── supergraph.yaml
│   └── weather
│       ├── api.graphql
│       ├── config.yaml
│       ├── operations
│       │   ├── alerts.graphql
│       │   ├── all.graphql
│       │   └── forecast.graphql
│       ├── persisted_queries
│       │   └── apollo.json
│       ├── supergraph.graphql
│       ├── supergraph.yaml
│       └── weather.graphql
├── LICENSE
├── macos-entitlements.plist
├── nix
│   ├── apollo-mcp.nix
│   ├── cargo-zigbuild.patch
│   ├── mcp-server-tools
│   │   ├── default.nix
│   │   ├── node-generated
│   │   │   ├── default.nix
│   │   │   ├── node-env.nix
│   │   │   └── node-packages.nix
│   │   ├── node-mcp-servers.json
│   │   └── README.md
│   └── mcphost.nix
├── README.md
├── rust-toolchain.toml
├── scripts
│   ├── nix
│   │   └── install.sh
│   └── windows
│       └── install.ps1
└── xtask
    ├── Cargo.lock
    ├── Cargo.toml
    └── src
        ├── commands
        │   ├── changeset
        │   │   ├── matching_pull_request.graphql
        │   │   ├── matching_pull_request.rs
        │   │   ├── mod.rs
        │   │   ├── scalars.rs
        │   │   └── snapshots
        │   │       ├── xtask__commands__changeset__tests__it_templatizes_with_multiple_issues_in_title_and_multiple_prs_in_footer.snap
        │   │       ├── xtask__commands__changeset__tests__it_templatizes_with_multiple_issues_in_title.snap
        │   │       ├── xtask__commands__changeset__tests__it_templatizes_with_multiple_prs_in_footer.snap
        │   │       ├── xtask__commands__changeset__tests__it_templatizes_with_neither_issues_or_prs.snap
        │   │       ├── xtask__commands__changeset__tests__it_templatizes_with_prs_in_title_when_empty_issues.snap
        │   │       └── xtask__commands__changeset__tests__it_templatizes_without_prs_in_title_when_issues_present.snap
        │   └── mod.rs
        ├── lib.rs
        └── main.rs
```

# Files

--------------------------------------------------------------------------------
/crates/apollo-mcp-server/src/server.rs:
--------------------------------------------------------------------------------

```rust
use std::net::{IpAddr, Ipv4Addr};

use apollo_mcp_registry::uplink::schema::SchemaSource;
use bon::bon;
use reqwest::header::{CONTENT_TYPE, HeaderMap, HeaderValue};
use schemars::JsonSchema;
use serde::Deserialize;
use url::Url;

use crate::auth;
use crate::cors::CorsConfig;
use crate::custom_scalar_map::CustomScalarMap;
use crate::errors::ServerError;
use crate::event::Event as ServerEvent;
use crate::headers::ForwardHeaders;
use crate::health::HealthCheckConfig;
use crate::operations::{MutationMode, OperationSource};

mod states;

use states::StateMachine;

/// An Apollo MCP Server
pub struct Server {
    transport: Transport,
    schema_source: SchemaSource,
    operation_source: OperationSource,
    endpoint: Url,
    headers: HeaderMap,
    forward_headers: ForwardHeaders,
    execute_introspection: bool,
    validate_introspection: bool,
    introspect_introspection: bool,
    introspect_minify: bool,
    search_minify: bool,
    search_introspection: bool,
    explorer_graph_ref: Option<String>,
    custom_scalar_map: Option<CustomScalarMap>,
    mutation_mode: MutationMode,
    disable_type_description: bool,
    disable_schema_description: bool,
    disable_auth_token_passthrough: bool,
    search_leaf_depth: usize,
    index_memory_bytes: usize,
    health_check: HealthCheckConfig,
    cors: CorsConfig,
}

#[derive(Debug, Clone, Deserialize, Default, JsonSchema)]
#[serde(tag = "type", rename_all = "snake_case")]
pub enum Transport {
    /// Use standard IO for server <> client communication
    #[default]
    Stdio,

    /// Host the MCP server on the supplied configuration, using SSE for communication
    ///
    /// Note: This is deprecated in favor of HTTP streams.
    #[serde(rename = "sse")]
    SSE {
        /// Authentication configuration
        #[serde(default)]
        auth: Option<auth::Config>,

        /// The IP address to bind to
        #[serde(default = "Transport::default_address")]
        address: IpAddr,

        /// The port to bind to
        #[serde(default = "Transport::default_port")]
        port: u16,
    },

    /// Host the MCP server on the configuration, using streamable HTTP messages.
    StreamableHttp {
        /// Authentication configuration
        #[serde(default)]
        auth: Option<auth::Config>,

        /// The IP address to bind to
        #[serde(default = "Transport::default_address")]
        address: IpAddr,

        /// The port to bind to
        #[serde(default = "Transport::default_port")]
        port: u16,

        #[serde(default = "Transport::default_stateful_mode")]
        stateful_mode: bool,
    },
}

impl Transport {
    fn default_address() -> IpAddr {
        IpAddr::V4(Ipv4Addr::LOCALHOST)
    }

    fn default_port() -> u16 {
        8000
    }

    fn default_stateful_mode() -> bool {
        true
    }
}

#[bon]
impl Server {
    #[builder]
    pub fn new(
        transport: Transport,
        schema_source: SchemaSource,
        operation_source: OperationSource,
        endpoint: Url,
        headers: HeaderMap,
        forward_headers: ForwardHeaders,
        execute_introspection: bool,
        validate_introspection: bool,
        introspect_introspection: bool,
        search_introspection: bool,
        introspect_minify: bool,
        search_minify: bool,
        explorer_graph_ref: Option<String>,
        #[builder(required)] custom_scalar_map: Option<CustomScalarMap>,
        mutation_mode: MutationMode,
        disable_type_description: bool,
        disable_schema_description: bool,
        disable_auth_token_passthrough: bool,
        search_leaf_depth: usize,
        index_memory_bytes: usize,
        health_check: HealthCheckConfig,
        cors: CorsConfig,
    ) -> Self {
        let headers = {
            let mut headers = headers.clone();
            headers.insert(CONTENT_TYPE, HeaderValue::from_static("application/json"));
            headers
        };
        Self {
            transport,
            schema_source,
            operation_source,
            endpoint,
            headers,
            forward_headers,
            execute_introspection,
            validate_introspection,
            introspect_introspection,
            search_introspection,
            introspect_minify,
            search_minify,
            explorer_graph_ref,
            custom_scalar_map,
            mutation_mode,
            disable_type_description,
            disable_schema_description,
            disable_auth_token_passthrough,
            search_leaf_depth,
            index_memory_bytes,
            health_check,
            cors,
        }
    }

    pub async fn start(self) -> Result<(), ServerError> {
        StateMachine {}.start(self).await
    }
}

```

--------------------------------------------------------------------------------
/graphql/weather/supergraph.graphql:
--------------------------------------------------------------------------------

```graphql
schema
  @link(url: "https://specs.apollo.dev/link/v1.0")
  @link(url: "https://specs.apollo.dev/join/v0.5", for: EXECUTION)
  @link(url: "https://specs.apollo.dev/tag/v0.3")
  @link(url: "https://specs.apollo.dev/connect/v0.1", for: EXECUTION)
  @join__directive(graphs: [WEATHER], name: "link", args: {url: "https://specs.apollo.dev/connect/v0.1", import: ["@connect", "@source"]})
  @join__directive(graphs: [WEATHER], name: "source", args: {name: "NWS", http: {baseURL: "https://api.weather.gov", headers: [{name: "User-Agent", value: "weather-app/1.0"}, {name: "Accept", value: "application/geo+json"}]}})
{
  query: Query
}

directive @join__directive(graphs: [join__Graph!], name: String!, args: join__DirectiveArguments) repeatable on SCHEMA | OBJECT | INTERFACE | FIELD_DEFINITION

directive @join__enumValue(graph: join__Graph!) repeatable on ENUM_VALUE

directive @join__field(graph: join__Graph, requires: join__FieldSet, provides: join__FieldSet, type: String, external: Boolean, override: String, usedOverridden: Boolean, overrideLabel: String, contextArguments: [join__ContextArgument!]) repeatable on FIELD_DEFINITION | INPUT_FIELD_DEFINITION

directive @join__graph(name: String!, url: String!) on ENUM_VALUE

directive @join__implements(graph: join__Graph!, interface: String!) repeatable on OBJECT | INTERFACE

directive @join__type(graph: join__Graph!, key: join__FieldSet, extension: Boolean! = false, resolvable: Boolean! = true, isInterfaceObject: Boolean! = false) repeatable on OBJECT | INTERFACE | UNION | ENUM | INPUT_OBJECT | SCALAR

directive @join__unionMember(graph: join__Graph!, member: String!) repeatable on UNION

directive @link(url: String, as: String, for: link__Purpose, import: [link__Import]) repeatable on SCHEMA

directive @tag(name: String!) repeatable on FIELD_DEFINITION | OBJECT | INTERFACE | UNION | ARGUMENT_DEFINITION | SCALAR | ENUM | ENUM_VALUE | INPUT_OBJECT | INPUT_FIELD_DEFINITION | SCHEMA

"""A weather alert"""
type Alert
  @join__type(graph: WEATHER)
  @tag(name: "mcp")
{
  """The severity of this alert"""
  severity: String

  """A description of the alert"""
  description: String

  """Information about how people should respond to the alert"""
  instruction: String
}

"""A coordinate, consisting of a latitude and longitude"""
type Coordinate
  @join__type(graph: WEATHER)
{
  """The latitude of this coordinate"""
  latitude: String!

  """The longitude of this coordinate"""
  longitude: String!
}

"""A weather forecast"""
type Forecast
  @join__type(graph: WEATHER)
{
  """The coordinate associated with this forecast"""
  coordinate: Coordinate!

  """
  The National Weather Service (NWS) URL where the forecast data can be read
  """
  forecastURL: String!

  """A detailed weather forecast from the National Weather Service (NWS)"""
  detailed: String! @join__directive(graphs: [WEATHER], name: "connect", args: {http: {GET: "https://api.weather.gov/gridpoints/FFC/51,87/forecast", headers: [{name: "foo", value: "{$this.forecastURL}"}, {name: "Accept", value: "application/geo+json"}, {name: "User-Agent", value: "weather-app/1.0"}]}, selection: "$.properties.periods->first.detailedForecast"})
}

"""A coordinate, consisting of a latitude and longitude"""
input InputCoordinate
  @join__type(graph: WEATHER)
{
  """The latitude of this coordinate"""
  latitude: String!

  """The longitude of this coordinate"""
  longitude: String!
}

input join__ContextArgument {
  name: String!
  type: String!
  context: String!
  selection: join__FieldValue!
}

scalar join__DirectiveArguments

scalar join__FieldSet

scalar join__FieldValue

enum join__Graph {
  WEATHER @join__graph(name: "weather", url: "http://localhost")
}

scalar link__Import

enum link__Purpose {
  """
  `SECURITY` features provide metadata necessary to securely resolve fields.
  """
  SECURITY

  """
  `EXECUTION` features provide metadata necessary for operation execution.
  """
  EXECUTION
}

type Query
  @join__type(graph: WEATHER)
{
  """Get the weather forecast for a coordinate"""
  forecast(coordinate: InputCoordinate!): Forecast @join__directive(graphs: [WEATHER], name: "connect", args: {source: "NWS", http: {GET: "/points/{$args.coordinate.latitude},{$args.coordinate.longitude}"}, selection: "coordinate: {\n  latitude: $args.coordinate.latitude\n  longitude: $args.coordinate.longitude\n}\nforecastURL: properties.forecast", entity: true})

  """
  Get the weather alerts for a state, using the two-letter abbreviation for the state - for example, CO for Colorado
  """
  alerts(state: String!): [Alert] @join__directive(graphs: [WEATHER], name: "connect", args: {source: "NWS", http: {GET: "/alerts/active/area/{$args.state}"}, selection: "$.features.properties {\n  severity\n  description\n  instruction\n}"}) @tag(name: "mcp")
}

```

--------------------------------------------------------------------------------
/crates/apollo-mcp-server/src/custom_scalar_map.rs:
--------------------------------------------------------------------------------

```rust
use crate::errors::ServerError;
use rmcp::serde_json;
use schemars::Schema;
use std::{collections::HashMap, path::PathBuf, str::FromStr};

impl FromStr for CustomScalarMap {
    type Err = ServerError;

    fn from_str(string_custom_scalar_file: &str) -> Result<Self, Self::Err> {
        // Parse the string into an initial map of serde_json::Values
        let parsed_custom_scalar_file: serde_json::Map<String, serde_json::Value> =
            serde_json::from_str(string_custom_scalar_file)
                .map_err(ServerError::CustomScalarConfig)?;

        // Try to parse each as a schema
        let custom_scalar_map = parsed_custom_scalar_file
            .into_iter()
            .map(|(key, value)| {
                // The schemars crate does not enforce schema validation anymore, so we use jsonschema
                // to ensure that the supplied schema is valid.
                if let Err(e) = jsonschema::meta::validate(&value) {
                    return Err(ServerError::CustomScalarJsonSchema(e.to_string()));
                }

                Schema::try_from(value.clone())
                    .map(|schema| (key, schema))
                    .map_err(|e| ServerError::CustomScalarJsonSchema(e.to_string()))
            })
            .collect::<Result<_, _>>()?;

        Ok(CustomScalarMap(custom_scalar_map))
    }
}

impl TryFrom<&PathBuf> for CustomScalarMap {
    type Error = ServerError;

    fn try_from(file_path_buf: &PathBuf) -> Result<Self, Self::Error> {
        let custom_scalars_config_path = file_path_buf.as_path();
        tracing::debug!(custom_scalars_config=?custom_scalars_config_path, "Loading custom_scalars_config");
        let string_custom_scalar_file = std::fs::read_to_string(custom_scalars_config_path)?;
        CustomScalarMap::from_str(string_custom_scalar_file.as_str())
    }
}

#[derive(Debug, Clone)]
pub struct CustomScalarMap(HashMap<String, Schema>);

impl CustomScalarMap {
    pub fn get(&self, key: &str) -> Option<&Schema> {
        self.0.get(key)
    }
}

#[cfg(test)]
mod tests {
    use std::{collections::HashMap, str::FromStr};

    use schemars::json_schema;

    use crate::custom_scalar_map::CustomScalarMap;

    #[test]
    fn empty_file() {
        let result = CustomScalarMap::from_str("").err().unwrap();

        insta::assert_debug_snapshot!(result, @r#"
        CustomScalarConfig(
            Error("EOF while parsing a value", line: 1, column: 0),
        )
        "#)
    }

    #[test]
    fn only_spaces() {
        let result =
            CustomScalarMap::from_str("    ").expect_err("empty space should be valid schema");

        insta::assert_debug_snapshot!(result, @r#"
        CustomScalarConfig(
            Error("EOF while parsing a value", line: 1, column: 4),
        )
        "#)
    }

    #[test]
    fn invalid_json() {
        let result = CustomScalarMap::from_str("Hello: }").err().unwrap();

        insta::assert_debug_snapshot!(result, @r#"
        CustomScalarConfig(
            Error("expected value", line: 1, column: 1),
        )
        "#)
    }

    #[test]
    fn invalid_simple_schema() {
        let result = CustomScalarMap::from_str(
            r###"{
                "custom": {
                    "type": "bool"
                }
            }"###,
        )
        .expect_err("schema should have been invalid");

        insta::assert_debug_snapshot!(result, @r###"
        CustomScalarJsonSchema(
            "\"bool\" is not valid under any of the schemas listed in the 'anyOf' keyword",
        )
        "###)
    }

    #[test]
    fn invalid_complex_schema() {
        let result = CustomScalarMap::from_str(
            r###"{
                "custom": {
                    "type": "object",
                    "properties": {
                        "test": {
                            "type": "obbbject"
                        }
                    }
                }
            }"###,
        )
        .expect_err("schema should have been invalid");

        insta::assert_debug_snapshot!(result, @r#"
        CustomScalarJsonSchema(
            "\"obbbject\" is not valid under any of the schemas listed in the 'anyOf' keyword",
        )
        "#)
    }

    #[test]
    fn valid_schema() {
        let result = CustomScalarMap::from_str(
            r###"
        {
            "simple": {
                "type": "string"
            },
            "complex": {
                "type": "object",
                "properties": { "name": { "type": "string" } }
            }
        }
        "###,
        )
        .unwrap()
        .0;

        let expected_data = HashMap::from_iter([
            (
                "simple".to_string(),
                json_schema!({
                    "type": "string",
                }),
            ),
            (
                "complex".to_string(),
                json_schema!({
                    "type": "object",
                    "properties": {
                        "name": {
                            "type": "string"
                        }
                    }
                }),
            ),
        ]);

        assert_eq!(result, expected_data);
    }
}

```

--------------------------------------------------------------------------------
/crates/apollo-schema-index/src/testdata/schema.graphql:
--------------------------------------------------------------------------------

```graphql
scalar DateTime
scalar JSON
scalar Upload

enum UserRole {
  ADMIN
  MODERATOR
  USER
  GUEST
}

enum ContentStatus {
  DRAFT
  PUBLISHED
  ARCHIVED
  DELETED
}

enum NotificationPriority {
  LOW
  MEDIUM
  HIGH
  URGENT
}

enum MediaType {
  IMAGE
  VIDEO
  AUDIO
  DOCUMENT
}

interface Node {
  id: ID!
  createdAt: DateTime!
  updatedAt: DateTime!
}

interface Content {
  id: ID!
  title: String!
  status: ContentStatus!
  author: User!
  metadata: JSON
}

type User implements Node {
  id: ID!
  createdAt: DateTime!
  updatedAt: DateTime!
  username: String!
  email: String!
  role: UserRole!
  profile: UserProfile
  posts: [Post!]!
  comments: [Comment!]!
  notifications: [Notification!]!
  preferences: UserPreferences!
}

type UserProfile {
  firstName: String
  lastName: String
  bio: String
  avatar: Media
  socialLinks: [SocialLink!]!
  location: Location
}

type Location {
  country: String!
  city: String
  coordinates: Coordinates
}

type Coordinates {
  latitude: Float!
  longitude: Float!
}

type SocialLink {
  platform: String!
  url: String!
  verified: Boolean!
}

type Post implements Node & Content {
  id: ID!
  createdAt: DateTime!
  updatedAt: DateTime!
  title: String!
  content: String!
  status: ContentStatus!
  author: User!
  metadata: JSON
  comments: [Comment!]!
  media: [Media!]!
  tags: [Tag!]!
  analytics: PostAnalytics!
}

type Comment implements Node {
  id: ID!
  createdAt: DateTime!
  updatedAt: DateTime!
  content: String!
  author: User!
  post: Post!
  parentComment: Comment
  replies: [Comment!]!
  reactions: [Reaction!]!
}

type Media {
  id: ID!
  type: MediaType!
  url: String!
  thumbnail: String
  metadata: MediaMetadata!
  uploader: User!
}

type MediaMetadata {
  size: Int!
  format: String!
  dimensions: Dimensions
  duration: Int
}

type Dimensions {
  width: Int!
  height: Int!
}

type Tag {
  id: ID!
  name: String!
  slug: String!
  description: String
  posts: [Post!]!
}

type Reaction {
  id: ID!
  type: String!
  user: User!
  comment: Comment!
  createdAt: DateTime!
}

type Notification {
  id: ID!
  type: String!
  priority: NotificationPriority!
  message: String!
  recipient: User!
  read: Boolean!
  createdAt: DateTime!
  metadata: JSON
}

type PostAnalytics {
  views: Int!
  likes: Int!
  shares: Int!
  comments: Int!
  engagement: Float!
  demographics: Demographics!
}

type Demographics {
  ageGroups: [AgeGroup!]!
  locations: [LocationStats!]!
  devices: [DeviceStats!]!
}

type AgeGroup {
  range: String!
  percentage: Float!
}

type LocationStats {
  country: String!
  count: Int!
}

type DeviceStats {
  type: String!
  count: Int!
}

type UserPreferences {
  theme: String!
  language: String!
  notifications: NotificationPreferences!
  privacy: PrivacySettings!
}

type NotificationPreferences {
  email: Boolean!
  push: Boolean!
  sms: Boolean!
  frequency: String!
}

type PrivacySettings {
  profileVisibility: String!
  showEmail: Boolean!
  showLocation: Boolean!
}

input CreateUserInput {
  username: String!
  email: String!
  password: String!
  role: UserRole = USER
  profile: CreateUserProfileInput
}

input CreateUserProfileInput {
  firstName: String
  lastName: String
  bio: String
  location: CreateLocationInput
}

input CreateLocationInput {
  country: String!
  city: String
  coordinates: CreateCoordinatesInput
}

input CreateCoordinatesInput {
  latitude: Float!
  longitude: Float!
}

input CreatePostInput {
  title: String!
  content: String!
  status: ContentStatus = DRAFT
  tags: [String!]
  media: [Upload!]
}

input UpdatePostInput {
  title: String
  content: String
  status: ContentStatus
  tags: [String!]
}

input CreateCommentInput {
  content: String!
  postId: ID!
  parentCommentId: ID
}

input NotificationFilter {
  priority: NotificationPriority
  read: Boolean
  type: String
  startDate: DateTime
  endDate: DateTime
}

type Query {
  node(id: ID!): Node
  user(id: ID!): User
  post(id: ID!): Post
  posts(filter: PostFilter): [Post!]!
  comments(postId: ID!): [Comment!]!
  notifications(filter: NotificationFilter): [Notification!]!
  search(query: String!): SearchResult!
}

type Mutation {
  createUser(input: CreateUserInput!): User!
  createPost(input: CreatePostInput!): Post!
  updatePost(id: ID!, input: UpdatePostInput!): Post!
  createComment(input: CreateCommentInput!): Comment!
  deletePost(id: ID!): Boolean!
  uploadMedia(file: Upload!): Media!
  updateUserPreferences(id: ID!, preferences: UserPreferencesInput!): UserPreferences!
}

type Subscription {
  postUpdated(id: ID!): Post!
  newComment(postId: ID!): Comment!
  notificationReceived(userId: ID!): Notification!
}

union SearchResult = User | Post | Comment | Tag

input PostFilter {
  status: ContentStatus
  authorId: ID
  tags: [String!]
  dateRange: DateRangeInput
}

input DateRangeInput {
  start: DateTime!
  end: DateTime!
}

input UserPreferencesInput {
  theme: String
  language: String
  notifications: NotificationPreferencesInput
  privacy: PrivacySettingsInput
}

input NotificationPreferencesInput {
  email: Boolean
  push: Boolean
  sms: Boolean
  frequency: String
}

input PrivacySettingsInput {
  profileVisibility: String
  showEmail: Boolean
  showLocation: Boolean
}

directive @auth(requires: UserRole!) on FIELD_DEFINITION
directive @cache(ttl: Int!) on FIELD_DEFINITION
directive @deprecated(reason: String) on FIELD_DEFINITION 
```

--------------------------------------------------------------------------------
/crates/apollo-mcp-server/src/introspection/tools/testdata/schema.graphql:
--------------------------------------------------------------------------------

```graphql
scalar DateTime
scalar JSON
scalar Upload

enum UserRole {
  ADMIN
  MODERATOR
  USER
  GUEST
}

enum ContentStatus {
  DRAFT
  PUBLISHED
  ARCHIVED
  DELETED
}

enum NotificationPriority {
  LOW
  MEDIUM
  HIGH
  URGENT
}

enum MediaType {
  IMAGE
  VIDEO
  AUDIO
  DOCUMENT
}

interface Node {
  id: ID!
  createdAt: DateTime!
  updatedAt: DateTime!
}

interface Content {
  id: ID!
  title: String!
  status: ContentStatus!
  author: User!
  metadata: JSON
}

type User implements Node {
  id: ID!
  createdAt: DateTime!
  updatedAt: DateTime!
  username: String!
  email: String!
  role: UserRole!
  profile: UserProfile
  posts: [Post!]!
  comments: [Comment!]!
  notifications: [Notification!]!
  preferences: UserPreferences!
}

type UserProfile {
  firstName: String
  lastName: String
  bio: String
  avatar: Media
  socialLinks: [SocialLink!]!
  location: Location
}

type Location {
  country: String!
  city: String
  coordinates: Coordinates
}

type Coordinates {
  latitude: Float!
  longitude: Float!
}

type SocialLink {
  platform: String!
  url: String!
  verified: Boolean!
}

type Post implements Node & Content {
  id: ID!
  createdAt: DateTime!
  updatedAt: DateTime!
  title: String!
  content: String!
  status: ContentStatus!
  author: User!
  metadata: JSON
  comments: [Comment!]!
  media: [Media!]!
  tags: [Tag!]!
  analytics: PostAnalytics!
}

type Comment implements Node {
  id: ID!
  createdAt: DateTime!
  updatedAt: DateTime!
  content: String!
  author: User!
  post: Post!
  parentComment: Comment
  replies: [Comment!]!
  reactions: [Reaction!]!
}

type Media {
  id: ID!
  type: MediaType!
  url: String!
  thumbnail: String
  metadata: MediaMetadata!
  uploader: User!
}

type MediaMetadata {
  size: Int!
  format: String!
  dimensions: Dimensions
  duration: Int
}

type Dimensions {
  width: Int!
  height: Int!
}

type Tag {
  id: ID!
  name: String!
  slug: String!
  description: String
  posts: [Post!]!
}

type Reaction {
  id: ID!
  type: String!
  user: User!
  comment: Comment!
  createdAt: DateTime!
}

type Notification {
  id: ID!
  type: String!
  priority: NotificationPriority!
  message: String!
  recipient: User!
  read: Boolean!
  createdAt: DateTime!
  metadata: JSON
}

type PostAnalytics {
  views: Int!
  likes: Int!
  shares: Int!
  comments: Int!
  engagement: Float!
  demographics: Demographics!
}

type Demographics {
  ageGroups: [AgeGroup!]!
  locations: [LocationStats!]!
  devices: [DeviceStats!]!
}

type AgeGroup {
  range: String!
  percentage: Float!
}

type LocationStats {
  country: String!
  count: Int!
}

type DeviceStats {
  type: String!
  count: Int!
}

type UserPreferences {
  theme: String!
  oldTheme: String @deprecated
  language: String!
  notifications: NotificationPreferences!
  privacy: PrivacySettings!
}

type NotificationPreferences {
  email: Boolean!
  push: Boolean!
  sms: Boolean!
  frequency: String!
}

type PrivacySettings {
  profileVisibility: String!
  showEmail: Boolean!
  showLocation: Boolean!
}

input CreateUserInput {
  username: String!
  email: String!
  password: String!
  role: UserRole = USER
  profile: CreateUserProfileInput
}

input CreateUserProfileInput {
  firstName: String
  lastName: String
  bio: String
  location: CreateLocationInput
}

input CreateLocationInput {
  country: String!
  city: String
  coordinates: CreateCoordinatesInput
}

input CreateCoordinatesInput {
  latitude: Float!
  longitude: Float!
}

input CreatePostInput {
  title: String!
  content: String!
  status: ContentStatus = DRAFT
  tags: [String!]
  media: [Upload!]
}

input UpdatePostInput {
  title: String
  content: String
  status: ContentStatus
  tags: [String!]
}

input CreateCommentInput {
  content: String!
  postId: ID!
  parentCommentId: ID
}

input NotificationFilter {
  priority: NotificationPriority
  read: Boolean
  type: String
  startDate: DateTime
  endDate: DateTime
}

type Query {
  node(id: ID!): Node
  user(id: ID!): User
  post(id: ID!): Post
  postsOld(filter: [ID!]) : [Post!]! @deprecated(reason: "Use posts instead")
  posts(filter: PostFilter): [Post!]!
  comments(postId: ID!): [Comment!]!
  notifications(filter: NotificationFilter): [Notification!]!
  search(query: String!): SearchResult!
}

type Mutation {
  createUser(input: CreateUserInput!): User!
  createPost(input: CreatePostInput!): Post!
  updatePost(id: ID!, input: UpdatePostInput!): Post!
  createComment(input: CreateCommentInput!): Comment!
  deletePost(id: ID!): Boolean!
  uploadMedia(file: Upload!): Media!
  updateUserPreferences(id: ID!, preferences: UserPreferencesInput!): UserPreferences!
}

type Subscription {
  postUpdated(id: ID!): Post!
  newComment(postId: ID!): Comment!
  notificationReceived(userId: ID!): Notification!
}

union SearchResult = User | Post | Comment | Tag

input PostFilter {
  status: ContentStatus
  authorId: ID
  tags: [String!]
  dateRange: DateRangeInput
}

input DateRangeInput {
  start: DateTime!
  end: DateTime!
}

input UserPreferencesInput {
  theme: String
  language: String
  notifications: NotificationPreferencesInput
  privacy: PrivacySettingsInput
}

input NotificationPreferencesInput {
  email: Boolean
  push: Boolean
  sms: Boolean
  frequency: String
}

input PrivacySettingsInput {
  profileVisibility: String
  showEmail: Boolean
  showLocation: Boolean
}

directive @auth(requires: UserRole!) on FIELD_DEFINITION
directive @cache(ttl: Int!) on FIELD_DEFINITION
directive @deprecated(reason: String) on FIELD_DEFINITION 
```

--------------------------------------------------------------------------------
/crates/apollo-mcp-server/src/explorer.rs:
--------------------------------------------------------------------------------

```rust
use crate::errors::McpError;
use crate::schema_from_type;
use rmcp::model::{CallToolResult, Content, ErrorCode, Tool};
use rmcp::schemars::JsonSchema;
use rmcp::serde_json::Value;
use rmcp::{schemars, serde_json};
use serde::{Deserialize, Serialize};
use tracing::debug;
use tracing::log::Level::Debug;
use tracing::log::log_enabled;

pub(crate) const EXPLORER_TOOL_NAME: &str = "explorer";

#[derive(Clone)]
pub struct Explorer {
    graph_id: String,
    variant: String,
    pub tool: Tool,
}

#[derive(JsonSchema, Deserialize, Serialize)]
pub struct Input {
    /// The GraphQL document
    #[serde(default = "default_input")]
    document: String,

    /// Any variables used in the document
    #[serde(default = "default_input")]
    variables: String,

    /// Headers to be sent with the operation
    #[serde(default = "default_input")]
    headers: String,
}

fn default_input() -> String {
    "{}".to_string()
}

impl Explorer {
    pub fn new(graph_ref: String) -> Self {
        let (graph_id, variant) = match graph_ref.split_once('@') {
            Some((graph_id, variant)) => (graph_id.to_string(), variant.to_string()),
            None => (graph_ref, String::from("current")),
        };
        Self {
            graph_id,
            variant,
            tool: Tool::new(
                EXPLORER_TOOL_NAME,
                "Get the URL to open a GraphQL operation in Apollo Explorer",
                schema_from_type!(Input),
            ),
        }
    }

    fn create_explorer_url(&self, input: Input) -> Result<String, McpError> {
        serde_json::to_string(&input)
            .map(|serialized| lz_str::compress_to_encoded_uri_component(serialized.as_str()))
            .map(|compressed| {
                format!(
                    "https://studio.apollographql.com/graph/{graph_id}/variant/{variant}/explorer?explorerURLState={compressed}",
                    graph_id = self.graph_id,
                    variant = self.variant,
                )
            })
            .map_err(|e| {
                McpError::new(
                    ErrorCode::INTERNAL_ERROR,
                    format!("Unable to serialize input: {e}"),
                    None,
                )
            })
    }

    pub async fn execute(&self, input: Input) -> Result<CallToolResult, McpError> {
        let pretty = if log_enabled!(Debug) {
            Some(serde_json::to_string_pretty(&input).unwrap_or("<unable to serialize>".into()))
        } else {
            None
        };
        let url = self.create_explorer_url(input)?;
        debug!(?url, input=?pretty, "Created URL to open operation in Apollo Explorer");
        Ok(CallToolResult {
            content: vec![Content::text(url.clone())],
            meta: None,
            is_error: None,
            structured_content: Some(Value::Array(vec![url.into()])),
        })
    }
}

#[cfg(test)]
mod tests {
    use super::*;
    use insta::assert_snapshot;
    use rmcp::serde_json::json;
    use rstest::rstest;

    #[test]
    fn test_create_explorer_url() {
        let explorer = Explorer::new(String::from("mcp-example@mcp"));
        let input = json!({
            "document": "query GetWeatherAlerts($state: String!) {\n  alerts(state: $state) {\n    severity\n    description\n    instruction\n  }\n}",
            "variables": "{\"state\": \"CO\"}",
            "headers": "{\"x-foo\": \"bar\"}",
        });

        let input: Input = serde_json::from_value(input).unwrap();

        let url = explorer.create_explorer_url(input).unwrap();
        assert_snapshot!(
            url,
            @"https://studio.apollographql.com/graph/mcp-example/variant/mcp/explorer?explorerURLState=N4IgJg9gxgrgtgUwHYBcQC4QEcYIE4CeABAOIIoDqCAhigBb4CCANvigM4AUAJOyrQnREAyijwBLJAHMAhAEoiwADpIiRaqzwdOfAUN78UCBctVqi7BADd84lARXmiYBOygSADinEQkj85J8eDBQ3r7+AL4qESAANCBW1BLUAEas7BggyiC6RkoYRPkAwgDy+THxDNQueBmY2QAeALQAZhAQ+UL5KUnlIBFAA"
        );
    }

    #[tokio::test]
    #[rstest]
    #[case(json!({
        "variables": "{\"state\": \"CA\"}",
        "headers": "{}"
    }), json!({
        "document": "{}",
        "variables": "{\"state\": \"CA\"}",
        "headers": "{}"
    }))]
    #[case(json!({
        "document": "query GetWeatherAlerts($state: String!) {\n  alerts(state: $state) {\n    severity\n    description\n    instruction\n  }\n}",
        "headers": "{}"
    }), json!({
        "document": "query GetWeatherAlerts($state: String!) {\n  alerts(state: $state) {\n    severity\n    description\n    instruction\n  }\n}",
        "variables": "{}",
        "headers": "{}"
    }))]
    #[case(json!({
        "document": "query GetWeatherAlerts($state: String!) {\n  alerts(state: $state) {\n    severity\n    description\n    instruction\n  }\n}",
        "variables": "{\"state\": \"CA\"}",
    }), json!({
        "document": "query GetWeatherAlerts($state: String!) {\n  alerts(state: $state) {\n    severity\n    description\n    instruction\n  }\n}",
        "variables": "{\"state\": \"CA\"}",
        "headers": "{}"
    }))]
    async fn test_input_missing_fields(#[case] input: Value, #[case] input_with_default: Value) {
        let input = serde_json::from_value::<Input>(input).unwrap();
        let input_with_default = serde_json::from_value::<Input>(input_with_default).unwrap();
        let explorer = Explorer::new(String::from("mcp-example@mcp"));
        assert_eq!(
            explorer.create_explorer_url(input),
            explorer.create_explorer_url(input_with_default)
        );
    }
}

```

--------------------------------------------------------------------------------
/crates/apollo-mcp-server/build.rs:
--------------------------------------------------------------------------------

```rust
#![allow(clippy::unwrap_used)]
#![allow(clippy::expect_used)]
#![allow(clippy::panic)]

//! Build Script for the Apollo MCP Server
//!
//! This mostly compiles all the available telemetry attributes
use quote::__private::TokenStream;
use quote::quote;
use serde::Deserialize;
use std::io::Write;
use std::{collections::VecDeque, io::Read as _};
use syn::{Ident, parse2};

#[derive(Deserialize)]
struct TelemetryTomlData {
    attributes: toml::Table,
    metrics: toml::Table,
}

#[derive(Eq, PartialEq, Debug, Clone)]
struct TelemetryData {
    name: String,
    alias: String,
    value: String,
    description: String,
}

fn flatten(table: toml::Table) -> Vec<TelemetryData> {
    let mut to_visit = VecDeque::from_iter(table.into_iter().map(|(key, val)| (vec![key], val)));
    let mut telemetry_data = Vec::new();

    while let Some((key, value)) = to_visit.pop_front() {
        match value {
            toml::Value::String(val) => {
                let last_key = key.last().unwrap().clone();
                telemetry_data.push(TelemetryData {
                    name: cruet::to_pascal_case(last_key.as_str()),
                    alias: last_key,
                    value: key.join("."),
                    description: val,
                });
            }
            toml::Value::Table(map) => to_visit.extend(
                map.into_iter()
                    .map(|(nested_key, value)| ([key.clone(), vec![nested_key]].concat(), value)),
            ),

            _ => panic!("telemetry values should be string descriptions"),
        };
    }

    telemetry_data
}

fn generate_enum(telemetry_data: &[TelemetryData]) -> Vec<TokenStream> {
    telemetry_data
        .iter()
        .map(|t| {
            let enum_value_ident = quote::format_ident!("{}", &t.name);
            let alias = &t.alias;
            let doc_message = &t.description;
            quote! {
                #[doc = #doc_message]
                #[serde(alias = #alias)]
                #enum_value_ident
            }
        })
        .collect::<Vec<_>>()
}

fn generate_enum_as_str_matches(
    telemetry_data: &[TelemetryData],
    enum_ident: Ident,
) -> Vec<TokenStream> {
    telemetry_data
        .iter()
        .map(|t| {
            let name_ident = quote::format_ident!("{}", &t.name);
            let value = &t.value;
            quote! {
                #enum_ident::#name_ident => #value
            }
        })
        .collect::<Vec<_>>()
}

fn main() {
    // Parse the telemetry file
    let telemetry: TelemetryTomlData = {
        let mut raw = String::new();
        std::fs::File::open("telemetry.toml")
            .expect("could not open telemetry file")
            .read_to_string(&mut raw)
            .expect("could not read telemetry file");

        toml::from_str(&raw).expect("could not parse telemetry file")
    };

    // Generate the keys
    let telemetry_attribute_data = flatten(telemetry.attributes);
    let telemetry_metrics_data = flatten(telemetry.metrics);

    // Write out the generated keys
    let out_dir = std::env::var_os("OUT_DIR").expect("could not retrieve output directory");
    let dest_path = std::path::Path::new(&out_dir).join("telemetry_attributes.rs");
    let mut generated_file =
        std::fs::File::create(&dest_path).expect("could not create generated code file");

    let attribute_keys_len = telemetry_attribute_data.len();
    let attribute_enum_keys = generate_enum(&telemetry_attribute_data);
    let all_attribute_enum_values = &telemetry_attribute_data
        .iter()
        .map(|t| quote::format_ident!("{}", t.name));
    let all_attribute_enum_values = (*all_attribute_enum_values).clone();
    let attribute_enum_name = quote::format_ident!("{}", "TelemetryAttribute");
    let attribute_enum_as_str_matches =
        generate_enum_as_str_matches(&telemetry_attribute_data, attribute_enum_name.clone());

    let metric_enum_name = quote::format_ident!("{}", "TelemetryMetric");
    let metric_enum_keys = generate_enum(&telemetry_metrics_data);
    let metric_enum_as_str_matches =
        generate_enum_as_str_matches(&telemetry_metrics_data, metric_enum_name.clone());

    let tokens = quote! {
        /// All TelemetryAttribute values
        pub const ALL_ATTRS: &[TelemetryAttribute; #attribute_keys_len] = &[#(TelemetryAttribute::#all_attribute_enum_values),*];

        /// Supported telemetry attribute (tags) values
        #[derive(Debug, ::serde::Deserialize, ::schemars::JsonSchema, Clone, Eq, PartialEq, Hash, Copy)]
        pub enum #attribute_enum_name {
            #(#attribute_enum_keys),*
        }

        impl #attribute_enum_name {
            /// Converts TelemetryAttribute to &str
            pub const fn as_str(&self) -> &'static str {
                match self {
                   #(#attribute_enum_as_str_matches),*
                }
            }
        }

        /// Supported telemetry metrics
        #[derive(Debug, ::serde::Deserialize, ::schemars::JsonSchema, Clone, Eq, PartialEq, Hash, Copy)]
        pub enum #metric_enum_name {
            #(#metric_enum_keys),*
        }

        impl #metric_enum_name {
            /// Converts TelemetryMetric to &str
            pub const fn as_str(&self) -> &'static str {
                match self {
                   #(#metric_enum_as_str_matches),*
                }
            }
        }
    };

    let file = parse2(tokens).expect("Could not parse TokenStream");
    let code = prettyplease::unparse(&file);

    write!(generated_file, "{}", code).expect("Failed to write generated code");

    // Inform cargo that we only want this to run when either this file or the telemetry
    // one changes.
    println!("cargo::rerun-if-changed=build.rs");
    println!("cargo::rerun-if-changed=telemetry.toml");
}

```

--------------------------------------------------------------------------------
/crates/apollo-mcp-server/src/main.rs:
--------------------------------------------------------------------------------

```rust
use std::path::PathBuf;

use apollo_mcp_registry::platform_api::operation_collections::collection_poller::CollectionSource;
use apollo_mcp_registry::uplink::persisted_queries::ManifestSource;
use apollo_mcp_registry::uplink::schema::SchemaSource;
use apollo_mcp_server::custom_scalar_map::CustomScalarMap;
use apollo_mcp_server::errors::ServerError;
use apollo_mcp_server::operations::OperationSource;
use apollo_mcp_server::server::Server;
use clap::Parser;
use clap::builder::Styles;
use clap::builder::styling::{AnsiColor, Effects};
use runtime::IdOrDefault;
use tracing::{info, warn};

mod runtime;

/// Clap styling
const STYLES: Styles = Styles::styled()
    .header(AnsiColor::Green.on_default().effects(Effects::BOLD))
    .usage(AnsiColor::Green.on_default().effects(Effects::BOLD))
    .literal(AnsiColor::Cyan.on_default().effects(Effects::BOLD))
    .placeholder(AnsiColor::Cyan.on_default());

/// Arguments to the MCP server
#[derive(Debug, Parser)]
#[command(
    version,
    styles = STYLES,
    about = "Apollo MCP Server - invoke GraphQL operations from an AI agent",
)]
struct Args {
    /// Path to the config file
    config: Option<PathBuf>,
}

#[tokio::main]
async fn main() -> anyhow::Result<()> {
    let config: runtime::Config = match Args::parse().config {
        Some(config_path) => runtime::read_config(config_path)?,
        None => runtime::read_config_from_env().unwrap_or_default(),
    };

    let _guard = runtime::telemetry::init_tracing_subscriber(&config)?;

    info!(
        "Apollo MCP Server v{} // (c) Apollo Graph, Inc. // Licensed under MIT",
        env!("CARGO_PKG_VERSION")
    );

    let schema_source = match config.schema {
        runtime::SchemaSource::Local { path } => SchemaSource::File { path, watch: true },
        runtime::SchemaSource::Uplink => SchemaSource::Registry(config.graphos.uplink_config()?),
    };

    let operation_source = match config.operations {
        // Default collection is special and requires other information
        runtime::OperationSource::Collection {
            id: IdOrDefault::Default,
        } => OperationSource::Collection(CollectionSource::Default(
            config.graphos.graph_ref()?,
            config.graphos.platform_api_config()?,
        )),

        runtime::OperationSource::Collection {
            id: IdOrDefault::Id(collection_id),
        } => OperationSource::Collection(CollectionSource::Id(
            collection_id,
            config.graphos.platform_api_config()?,
        )),
        runtime::OperationSource::Introspect => OperationSource::None,
        runtime::OperationSource::Local { paths } if !paths.is_empty() => {
            OperationSource::from(paths)
        }
        runtime::OperationSource::Manifest { path } => {
            OperationSource::from(ManifestSource::LocalHotReload(vec![path]))
        }
        runtime::OperationSource::Uplink => {
            OperationSource::from(ManifestSource::Uplink(config.graphos.uplink_config()?))
        }

        // TODO: Inference requires many different combinations and preferences
        // TODO: We should maybe make this more explicit.
        runtime::OperationSource::Local { .. } | runtime::OperationSource::Infer => {
            if config.introspection.any_enabled() {
                warn!("No operations specified, falling back to introspection");
                OperationSource::None
            } else if let Ok(graph_ref) = config.graphos.graph_ref() {
                warn!(
                    "No operations specified, falling back to the default collection in {}",
                    graph_ref
                );
                OperationSource::Collection(CollectionSource::Default(
                    graph_ref,
                    config.graphos.platform_api_config()?,
                ))
            } else {
                anyhow::bail!(ServerError::NoOperations)
            }
        }
    };

    let explorer_graph_ref = config
        .overrides
        .enable_explorer
        .then(|| config.graphos.graph_ref())
        .transpose()?;

    let transport = config.transport.clone();

    Ok(Server::builder()
        .transport(config.transport)
        .schema_source(schema_source)
        .operation_source(operation_source)
        .endpoint(config.endpoint.into_inner())
        .maybe_explorer_graph_ref(explorer_graph_ref)
        .headers(config.headers)
        .forward_headers(config.forward_headers)
        .execute_introspection(config.introspection.execute.enabled)
        .validate_introspection(config.introspection.validate.enabled)
        .introspect_introspection(config.introspection.introspect.enabled)
        .introspect_minify(config.introspection.introspect.minify)
        .search_minify(config.introspection.search.minify)
        .search_introspection(config.introspection.search.enabled)
        .mutation_mode(config.overrides.mutation_mode)
        .disable_type_description(config.overrides.disable_type_description)
        .disable_schema_description(config.overrides.disable_schema_description)
        .disable_auth_token_passthrough(match transport {
            apollo_mcp_server::server::Transport::Stdio => false,
            apollo_mcp_server::server::Transport::SSE { auth, .. } => auth
                .map(|a| a.disable_auth_token_passthrough)
                .unwrap_or(false),
            apollo_mcp_server::server::Transport::StreamableHttp { auth, .. } => auth
                .map(|a| a.disable_auth_token_passthrough)
                .unwrap_or(false),
        })
        .custom_scalar_map(
            config
                .custom_scalars
                .map(|custom_scalars_config| CustomScalarMap::try_from(&custom_scalars_config))
                .transpose()?,
        )
        .search_leaf_depth(config.introspection.search.leaf_depth)
        .index_memory_bytes(config.introspection.search.index_memory_bytes)
        .health_check(config.health_check)
        .cors(config.cors)
        .build()
        .start()
        .await?)
}

```

--------------------------------------------------------------------------------
/crates/apollo-schema-index/src/traverse.rs:
--------------------------------------------------------------------------------

```rust
//! Provides an extension trait for traversing GraphQL schemas, using a depth-first traversal
//! starting at the specified root operation types (query, mutation, subscription).

use crate::OperationType;
use crate::path::PathNode;
use apollo_compiler::Schema;
use apollo_compiler::ast::NamedType;
use apollo_compiler::schema::ExtendedType;
use enumset::EnumSet;
use std::collections::HashMap;
use std::collections::hash_map::Entry;

/// Extension trait to allow traversing a schema
pub trait SchemaExt {
    /// Traverse the type hierarchy in the schema in depth-first order, starting with the specified
    /// root operation types
    fn traverse(
        &self,
        root_types: EnumSet<OperationType>,
    ) -> Box<dyn Iterator<Item = (&ExtendedType, PathNode)> + '_>;
}

impl SchemaExt for Schema {
    fn traverse(
        &self,
        root_types: EnumSet<OperationType>,
    ) -> Box<dyn Iterator<Item = (&ExtendedType, PathNode)> + '_> {
        let mut stack = vec![];
        let mut references: HashMap<&NamedType, Vec<NamedType>> = HashMap::default();
        for root_type in root_types
            .iter()
            .rev()
            .filter_map(|rt| self.root_operation(rt.into()))
        {
            stack.push((root_type, PathNode::new(root_type.clone())));
        }
        Box::new(std::iter::from_fn(move || {
            while let Some((named_type, current_path)) = stack.pop() {
                if current_path.has_cycle() {
                    continue;
                }
                let references = references.entry(named_type);

                // Only traverse the children of a type the first time we visit it.
                // After that, we still visit unique paths to the type, but not the child paths.
                let traverse_children: bool = matches!(references, Entry::Vacant(_));

                references.or_insert(
                    current_path
                        .referencing_type()
                        .map(|(t, _, _)| vec![t.clone()])
                        .unwrap_or_default(),
                );

                let cloned = current_path.clone();
                if let Some(extended_type) = self.types.get(named_type)
                    && !extended_type.is_built_in()
                {
                    if traverse_children {
                        match extended_type {
                            ExtendedType::Object(obj) => {
                                stack.extend(obj.fields.values().map(|field| {
                                    let field_type = field.ty.inner_named_type();
                                    let field_args = field
                                        .arguments
                                        .iter()
                                        .map(|arg| arg.ty.inner_named_type().clone())
                                        .collect::<Vec<_>>();
                                    (
                                        field_type,
                                        current_path.clone().add_child(
                                            Some(field.name.clone()),
                                            field_args,
                                            field_type.clone(),
                                        ),
                                    )
                                }));
                            }
                            ExtendedType::Interface(interface) => {
                                stack.extend(interface.fields.values().map(|field| {
                                    let field_type = field.ty.inner_named_type();
                                    let field_args = field
                                        .arguments
                                        .iter()
                                        .map(|arg| arg.ty.inner_named_type().clone())
                                        .collect::<Vec<_>>();
                                    (
                                        field_type,
                                        current_path.clone().add_child(
                                            Some(field.name.clone()),
                                            field_args,
                                            field_type.clone(),
                                        ),
                                    )
                                }));
                            }
                            ExtendedType::Union(union) => {
                                stack.extend(union.members.iter().map(|member| &member.name).map(
                                    |next_type| {
                                        (
                                            next_type,
                                            current_path.clone().add_child(
                                                None,
                                                vec![],
                                                next_type.clone(),
                                            ),
                                        )
                                    },
                                ));
                            }
                            _ => {}
                        }
                    }
                    return Some((extended_type, cloned));
                }
            }
            None
        }))
    }
}

#[cfg(test)]
mod tests {
    use super::*;
    use apollo_compiler::validation::Valid;
    use rstest::{fixture, rstest};

    const TEST_SCHEMA: &str = include_str!("testdata/schema.graphql");

    #[fixture]
    fn schema() -> Valid<Schema> {
        Schema::parse(TEST_SCHEMA, "schema.graphql")
            .expect("Failed to parse test schema")
            .validate()
            .expect("Failed to validate test schema")
    }

    #[rstest]
    fn test_schema_traverse(schema: Valid<Schema>) {
        let mut paths = vec![];
        for (_extended_type, path) in schema
            .traverse(OperationType::Query | OperationType::Mutation | OperationType::Subscription)
        {
            paths.push(path.to_string());
        }
        insta::assert_debug_snapshot!(paths);
    }
}

```

--------------------------------------------------------------------------------
/docs/source/run.mdx:
--------------------------------------------------------------------------------

```markdown
---
title: Running the Apollo MCP Server
---

There are multiple ways to run the Apollo MCP server.

- If you have an existing GraphQL API deployed, use the standalone MCP server binary to get started quickly.

- If you use Docker in your developer workflow, use the Apollo MCP Server Docker image.

- If you are running your GraphQL API locally with Rover, you can use the Rover CLI's `rover dev` command to run the MCP server alongside your local graph.

- If you are using the Apollo Runtime Container, you can use the container to run both the MCP server and the Apollo Router in a single container.

## With the Rover CLI

The Rover CLI is a tool for working with GraphQL APIs locally.

You can use the [`rover dev`](/rover/commands/dev) command of Rover CLI `v0.35` or later to run an Apollo MCP Server instance alongside your local graph. Use the `--mcp` flag to start an MCP server and provide an optional configuration file.

```sh
rover dev --mcp <PATH/TO/CONFIG> [...other rover dev flags]
```

For more information, see the [Rover CLI documentation](/rover).

## Standalone MCP server binary

To install or upgrade to the **latest release** of Apollo MCP Server:

<Tabs>

    <Tab label="Linux / MacOS">

    ```terminal showLineNumbers=false
    curl -sSL https://mcp.apollo.dev/download/nix/latest | sh
    ```

    </Tab>

    <Tab label="Windows">

    ```terminal showLineNumbers=false
    iwr 'https://mcp.apollo.dev/download/win/latest' | iex  
    ```
    </Tab>

</Tabs>

To install or upgrade to a **specific version** of Apollo MCP Server (recommended for CI environments to ensure predictable behavior):

<Tabs>

    <Tab label="Linux / MacOS">

    ```terminal showLineNumbers=false
    # Note the `v` prefixing the version number
    curl -sSL https://mcp.apollo.dev/download/nix/v1.1.1 | sh
    ```

    </Tab>

    <Tab label="Windows">

    ```terminal showLineNumbers=false
    # Note the `v` prefixing the version number
    iwr 'https://mcp.apollo.dev/download/win/v1.1.1' | iex
    ```
    </Tab>

</Tabs>

To install or upgrade to a specific version of Apollo MCP Server that is a **release candidate** (recommended for those that want to test early builds):

<Tabs>

    <Tab label="Linux / MacOS">

    ```terminal showLineNumbers=false
    # Note the `v` prefixing the version number and the `-rc` suffix
    curl -sSL https://mcp.apollo.dev/download/nix/v1.1.1-rc.1 | sh
    ```

    </Tab>

    <Tab label="Windows">

    ```terminal showLineNumbers=false
    # Note the `v` prefixing the version number and the `-rc` suffix
    iwr 'https://mcp.apollo.dev/download/win/v1.1.1-rc.1' | iex
    ```
    </Tab>

</Tabs>

You can configure the Apollo MCP server using a [YAML configuration file](/apollo-mcp-server/config-file).

If the file is not provided, environment variables for your Apollo graph credentials (`APOLLO_GRAPH_REF` and `APOLLO_KEY`) are required for the server to run.

After installing the MCP server, you can run it using the following command:

```sh showLineNumbers=false
./apollo-mcp-server [OPTIONS] <PATH/TO/CONFIG/FILE>
```

### CLI options

| Option          | Description               |
| :-------------- | :------------------------ |
| `-h, --help`    | Print help information    |
| `-V, --version` | Print version information |

## With Docker

A container is built for the Apollo MCP Server with every release at `ghcr.io/apollographql/apollo-mcp-server`.

To download the **latest release** Docker container of Apollo MCP Server:

```bash
docker image pull ghcr.io/apollographql/apollo-mcp-server:latest
```

To download a **specific version** of Apollo MCP Server (recommended for CI environments to ensure predictable behavior):

```bash
# Note the `v` prefixing the version number
docker image pull ghcr.io/apollographql/apollo-mcp-server:v1.1.1
```

To download a specific version of Apollo MCP Server that is a release candidate:

```bash
# Note the `v` prefixing the version number and the `-rc` suffix
docker image pull ghcr.io/apollographql/apollo-mcp-server:v1.1.1-rc.1
```

<Note>

The container sets a few defaults for ease of use:

- **Working Directory is `/data`**: Make sure to mount static schemas / operations to this location
  using the volume flag when running [(`-v` / `--volume`)](https://docs.docker.com/reference/cli/docker/container/run/#volume).
- **HTTP Streamable Transport on port 8000**: Make sure to export container port 8000 for HTTP Streamable connections to
  the MCP server using the port flag when running [(`-p` / `--port`)](https://docs.docker.com/reference/cli/docker/container/run/#publish)

</Note>

Run the following Docker command to start the MCP Server, replacing the values for the paths to the config file and project root with your own:

```sh showLineNumbers=false
docker run \
  -it --rm \
  --name apollo-mcp-server \
  -p 8000:8000 \
  -v <PATH/TO/CONFIG/FILE>:/config.yaml \
  -v <PATH/TO/PROJECT/ROOT>:/data \
  --pull always \
  ghcr.io/apollographql/apollo-mcp-server:latest /config.yaml
```

This command:

- Starts an MCP Server in a Docker container
- Maps configuration files into the proper place for the Apollo MCP Server container
- Forwards port 8000 for accessing the MCP Server

## With the Apollo Runtime Container

The Apollo Runtime Container runs both the MCP Server and the [Apollo Router](https://www.apollographql.com/docs/graphos/routing) in a single container. It's useful for local development, testing, and production deployments.

The Apollo Runtime container includes all services necessary to serve GraphQL and MCP requests, including the Router and MCP Server. It is the easiest way to operate a GraphQL API with MCP support.

To serve both MCP and GraphQL requests, both port `4000` and `8000` will need to be exposed. An example command which retrieves the schema from Uplink is:

```bash title="Docker" {3, 6}
docker run \
  -p 4000:4000 \
  -p 8000:8000 \
  --env APOLLO_GRAPH_REF="<your-graph-ref>" \
  --env APOLLO_KEY="<your-graph-api-key>" \
  --env MCP_ENABLE=1 \
  --rm \
  ghcr.io/apollographql/apollo-runtime:latest
```

To learn more, review the [Apollo Runtime container documentation](/graphos/routing/self-hosted/containerization/docker).

```

--------------------------------------------------------------------------------
/crates/apollo-mcp-registry/src/files.rs:
--------------------------------------------------------------------------------

```rust
use std::path::{Path, PathBuf};
use std::time::Duration;

use futures::prelude::*;
use notify::Config;
use notify::EventKind;
use notify::PollWatcher;
use notify::RecursiveMode;
use notify::Watcher;
use notify::event::DataChange;
use notify::event::MetadataKind;
use notify::event::ModifyKind;
use tokio::sync::mpsc;
use tokio::sync::mpsc::error::TrySendError;

#[cfg(not(test))]
const DEFAULT_WATCH_DURATION: Duration = Duration::from_secs(3);

#[cfg(test)]
const DEFAULT_WATCH_DURATION: Duration = Duration::from_millis(100);

/// Creates a stream events whenever the file at the path has changes. The stream never terminates
/// and must be dropped to finish watching.
///
/// # Arguments
///
/// * `path`: The file to watch
///
/// returns: impl Stream<Item=()>
///
pub fn watch(path: &Path) -> impl Stream<Item = ()> + use<> {
    watch_with_duration(path, DEFAULT_WATCH_DURATION)
}

#[allow(clippy::panic)] // TODO: code copied from router contained existing panics
fn watch_with_duration(path: &Path, duration: Duration) -> impl Stream<Item = ()> + use<> {
    let path = PathBuf::from(path);
    let is_dir = path.is_dir();
    let watched_path = path.clone();

    let (watch_sender, watch_receiver) = mpsc::channel(1);
    let watch_receiver_stream = tokio_stream::wrappers::ReceiverStream::new(watch_receiver);
    // We can't use the recommended watcher, because there's just too much variation across
    // platforms and file systems. We use the Poll Watcher, which is implemented consistently
    // across all platforms. Less reactive than other mechanisms, but at least it's predictable
    // across all environments. We compare contents as well, which reduces false positives with
    // some additional processing burden.
    let config = Config::default()
        .with_poll_interval(duration)
        .with_compare_contents(true);
    let mut watcher = PollWatcher::new(
        move |res: Result<notify::Event, notify::Error>| match res {
            Ok(event) => {
                // Events of interest are writes to the timestamp of a watched file or directory,
                // changes to the data of a watched file, and the addition or removal of a file.
                if matches!(
                    event.kind,
                    EventKind::Modify(ModifyKind::Metadata(MetadataKind::WriteTime))
                        | EventKind::Modify(ModifyKind::Data(DataChange::Any))
                        | EventKind::Create(_)
                        | EventKind::Remove(_)
                ) {
                    if !(event.paths.contains(&watched_path)
                        || (is_dir && event.paths.iter().any(|p| p.starts_with(&watched_path)))) {
                        tracing::trace!(
                            "Ignoring change event with paths {:?} and kind {:?} - watched paths are {:?}",
                            event.paths,
                            event.kind,
                            watched_path
                        );
                    } else {
                        loop {
                            match watch_sender.try_send(()) {
                                Ok(_) => break,
                                Err(err) => {
                                    tracing::warn!(
                                        "could not process file watch notification. {}",
                                        err.to_string()
                                    );
                                    if matches!(err, TrySendError::Full(_)) {
                                        std::thread::sleep(Duration::from_millis(50));
                                    } else {
                                        panic!("event channel failed: {err}");
                                    }
                                }
                            }
                        }
                    }
                }
            }
            Err(e) => tracing::error!("event error: {:?}", e),
        },
        config,
    )
    .unwrap_or_else(|_| panic!("could not create watch on: {path:?}"));
    watcher
        .watch(&path, RecursiveMode::NonRecursive)
        .unwrap_or_else(|_| panic!("could not watch: {path:?}"));
    // Tell watchers once they should read the file once,
    // then listen to fs events.
    stream::once(future::ready(()))
        .chain(watch_receiver_stream)
        .chain(stream::once(async move {
            // This exists to give the stream ownership of the hotwatcher.
            // Without it hotwatch will get dropped and the stream will terminate.
            // This code never actually gets run.
            // The ideal would be that hotwatch implements a stream, and
            // therefore we don't need this hackery.
            drop(watcher);
        }))
        .boxed()
}

#[cfg(test)]
pub(crate) mod tests {
    use std::env::temp_dir;
    use std::fs::File;
    use std::io::Seek;
    use std::io::Write;
    use std::path::PathBuf;

    use test_log::test;

    use super::*;

    #[test(tokio::test)]
    async fn basic_watch() {
        let (path, mut file) = create_temp_file();
        let mut watch = watch_with_duration(&path, Duration::from_millis(100));
        // This test can be very racy. Without synchronisation, all
        // we can hope is that if we wait long enough between each
        // write/flush then the future will become ready.
        // Signal telling us we are ready
        assert!(futures::poll!(watch.next()).is_ready());
        write_and_flush(&mut file, "Some data 1").await;
        assert!(futures::poll!(watch.next()).is_ready());
        write_and_flush(&mut file, "Some data 2").await;
        assert!(futures::poll!(watch.next()).is_ready())
    }

    pub(crate) fn create_temp_file() -> (PathBuf, File) {
        let path = temp_dir().join(format!("{}", uuid::Uuid::new_v4()));
        let file = File::create(&path).unwrap();
        (path, file)
    }

    pub(crate) async fn write_and_flush(file: &mut File, contents: &str) {
        file.rewind().unwrap();
        file.set_len(0).unwrap();
        file.write_all(contents.as_bytes()).unwrap();
        file.flush().unwrap();
        tokio::time::sleep(Duration::from_millis(500)).await;
    }
}

```

--------------------------------------------------------------------------------
/crates/apollo-mcp-server/src/auth.rs:
--------------------------------------------------------------------------------

```rust
use axum::{
    Json, Router,
    extract::{Request, State},
    http::StatusCode,
    middleware::Next,
    response::Response,
    routing::get,
};
use axum_extra::{
    TypedHeader,
    headers::{Authorization, authorization::Bearer},
};
use http::Method;
use networked_token_validator::NetworkedTokenValidator;
use schemars::JsonSchema;
use serde::Deserialize;
use tower_http::cors::{Any, CorsLayer};
use url::Url;

mod networked_token_validator;
mod protected_resource;
mod valid_token;
mod www_authenticate;

use protected_resource::ProtectedResource;
pub(crate) use valid_token::ValidToken;
use valid_token::ValidateToken;
use www_authenticate::WwwAuthenticate;

/// Auth configuration options
#[derive(Debug, Clone, Deserialize, JsonSchema)]
pub struct Config {
    /// List of upstream OAuth servers to delegate auth
    pub servers: Vec<Url>,

    /// List of accepted audiences for the OAuth tokens
    pub audiences: Vec<String>,

    /// The resource to protect.
    ///
    /// Note: This is usually the publicly accessible URL of this running MCP server
    pub resource: Url,

    /// Link to documentation related to the protected resource
    pub resource_documentation: Option<Url>,

    /// Supported OAuth scopes by this resource server
    pub scopes: Vec<String>,

    /// Whether to disable the auth token passthrough to upstream API
    #[serde(default)]
    pub disable_auth_token_passthrough: bool,
}

impl Config {
    pub fn enable_middleware(&self, router: Router) -> Router {
        /// Simple handler to encode our config into the desired OAuth 2.1 protected
        /// resource format
        async fn protected_resource(State(auth_config): State<Config>) -> Json<ProtectedResource> {
            Json(auth_config.into())
        }

        // Set up auth routes. NOTE: CORs needs to allow for get requests to the
        // metadata information paths.
        let cors = CorsLayer::new()
            .allow_methods([Method::GET])
            .allow_origin(Any);
        let auth_router = Router::new()
            .route(
                "/.well-known/oauth-protected-resource",
                get(protected_resource),
            )
            .with_state(self.clone())
            .layer(cors);

        // Merge with MCP server routes
        Router::new()
            .merge(auth_router)
            .merge(router.layer(axum::middleware::from_fn_with_state(
                self.clone(),
                oauth_validate,
            )))
    }
}

/// Validate that requests made have a corresponding bearer JWT token
#[tracing::instrument(skip_all, fields(status_code, reason))]
async fn oauth_validate(
    State(auth_config): State<Config>,
    token: Option<TypedHeader<Authorization<Bearer>>>,
    mut request: Request,
    next: Next,
) -> Result<Response, (StatusCode, TypedHeader<WwwAuthenticate>)> {
    // Consolidated unauthorized error for use with any fallible step in this process
    let unauthorized_error = || {
        let mut resource = auth_config.resource.clone();
        resource.set_path("/.well-known/oauth-protected-resource");

        (
            StatusCode::UNAUTHORIZED,
            TypedHeader(WwwAuthenticate::Bearer {
                resource_metadata: resource,
            }),
        )
    };

    let validator = NetworkedTokenValidator::new(&auth_config.audiences, &auth_config.servers);
    let token = token.ok_or_else(|| {
        tracing::Span::current().record("reason", "missing_token");
        tracing::Span::current().record("status_code", StatusCode::UNAUTHORIZED.as_u16());
        unauthorized_error()
    })?;

    let valid_token = validator.validate(token.0).await.ok_or_else(|| {
        tracing::Span::current().record("reason", "invalid_token");
        tracing::Span::current().record("status_code", StatusCode::UNAUTHORIZED.as_u16());
        unauthorized_error()
    })?;

    // Insert new context to ensure that handlers only use our enforced token verification
    // for propagation
    request.extensions_mut().insert(valid_token);

    let response = next.run(request).await;
    tracing::Span::current().record("status_code", response.status().as_u16());
    Ok(response)
}

#[cfg(test)]
mod tests {
    use super::*;
    use axum::middleware::from_fn_with_state;
    use axum::routing::get;
    use axum::{
        Router,
        body::Body,
        http::{Request, StatusCode},
    };
    use http::header::{AUTHORIZATION, WWW_AUTHENTICATE};
    use tower::ServiceExt; // for .oneshot()
    use url::Url;

    fn test_config() -> Config {
        Config {
            servers: vec![Url::parse("http://localhost:1234").unwrap()],
            audiences: vec!["test-audience".to_string()],
            resource: Url::parse("http://localhost:4000").unwrap(),
            resource_documentation: None,
            scopes: vec!["read".to_string()],
            disable_auth_token_passthrough: false,
        }
    }

    fn test_router(config: Config) -> Router {
        Router::new()
            .route("/test", get(|| async { "ok" }))
            .layer(from_fn_with_state(config, oauth_validate))
    }

    #[tokio::test]
    async fn missing_token_returns_unauthorized() {
        let config = test_config();
        let app = test_router(config.clone());
        let req = Request::builder().uri("/test").body(Body::empty()).unwrap();
        let res = app.oneshot(req).await.unwrap();
        assert_eq!(res.status(), StatusCode::UNAUTHORIZED);
        let headers = res.headers();
        let www_auth = headers.get(WWW_AUTHENTICATE).unwrap().to_str().unwrap();
        assert!(www_auth.contains("Bearer"));
        assert!(www_auth.contains("resource_metadata"));
    }

    #[tokio::test]
    async fn invalid_token_returns_unauthorized() {
        let config = test_config();
        let app = test_router(config.clone());
        let req = Request::builder()
            .uri("/test")
            .header(AUTHORIZATION, "Bearer invalidtoken")
            .body(Body::empty())
            .unwrap();
        let res = app.oneshot(req).await.unwrap();
        assert_eq!(res.status(), StatusCode::UNAUTHORIZED);
        let headers = res.headers();
        let www_auth = headers.get(WWW_AUTHENTICATE).unwrap().to_str().unwrap();
        assert!(www_auth.contains("Bearer"));
        assert!(www_auth.contains("resource_metadata"));
    }
}

```

--------------------------------------------------------------------------------
/docs/source/deploy.mdx:
--------------------------------------------------------------------------------

```markdown
---
title: Deploy the MCP Server
subtitle: Deployment using Docker containers, when to choose which option, and production considerations
---

To deploy Apollo MCP Server in your production environment, use the recommended [Apollo Runtime Container](#apollo-runtime-container-recommended). You can also use a [standalone Apollo MCP Server container](#standalone-apollo-mcp-server-container) if needed.

## Apollo Runtime Container (Recommended)

For most production deployments, use the all-in-one [Apollo Runtime Container](/graphos/routing/self-hosted/containerization/docker). It includes everything you need to serve both GraphQL and MCP requests in a single, optimized container.

### Why choose the Apollo Runtime Container?

- **Simplified operations**: Single container to deploy and manage
- **Optimized performance**: Apollo Router and Apollo MCP Server are co-located
- **Built-in best practices**: Pre-configured for production use
- **Easier scaling**: Scale both GraphQL and MCP together
- **Unified monitoring**: Single service to monitor and debug

### Deploy the Apollo Runtime Container

The Apollo Runtime Container includes all services necessary to serve GraphQL and MCP requests, including Apollo Router and Apollo MCP Server. Both port `4000` (GraphQL) and `8000` (MCP) are exposed.

```bash title="Deploy with GraphOS (Recommended)"
docker run \
  -p 4000:4000 \
  -p 8000:8000 \
  --env APOLLO_GRAPH_REF="<your-graph-ref>" \
  --env APOLLO_KEY="<your-graph-api-key>" \
  --env MCP_ENABLE=1 \
  -v /path/to/config:/config/mcp_config.yaml \
  --rm \
  ghcr.io/apollographql/apollo-runtime:latest
```

When you run this, it will:

- Fetch your schema from GraphOS using your graph credentials (`APOLLO_GRAPH_REF` and `APOLLO_KEY`)
- Start the Apollo Router with your graph configuration
- Provides a configuration file for the MCP server by mounting it to `config/mcp_config.yaml`
- Enable the Apollo MCP Server endpoint at `/mcp`

This command uses GraphOS-managed persisted queries for MCP tools. You'll need to publish your operations to the [GraphOS-managed persisted queries list](/apollo-mcp-server/define-tools#from-graphos-managed-persisted-queries). If you want to use other methods for defining MCP tools, see the [Define MCP Tools](/apollo-mcp-server/define-tools) page.

To learn more, see the [Apollo Runtime Container documentation](/graphos/routing/self-hosted/containerization/docker).

## Standalone Apollo MCP Server container

Use the standalone Apollo MCP Server container if you already have a GraphQL API running elsewhere and want to add MCP capabilities to it.

### Deploy standalone Apollo MCP Server container

Apollo MCP Server is available as a standalone Docker container. Container images are downloadable using the image `ghcr.io/apollographql/apollo-mcp-server`.

By default, the container expects all schema and operation files to be present in the `/data` directory within the container and that clients use Streamable HTTP transport on container port `8000`.

Here's an example `docker run` command that runs Apollo MCP Server for an example using [TheSpaceDevs graph](https://thespacedevs-production.up.railway.app/):

```yaml title="mcp_config.yaml"
endpoint: https://thespacedevs-production.up.railway.app/
operations:
  source: local
  paths:
    - /data/operations/
schema:
  source: local
  path: /data/api.graphql
```

```sh showLineNumbers=false
docker run \
  -it --rm \
  --name apollo-mcp-server \
  -p 8000:8000 \
  -v <path/to>/mcp_config.yaml:/config.yaml \
  -v $PWD/graphql/TheSpaceDevs:/data \
  --pull always \
  ghcr.io/apollographql/apollo-mcp-server:latest /config.yaml
```

## When to choose which option?

| Scenario                            | Recommended Option           | Why                                                                                                       |
| ----------------------------------- | ---------------------------- | --------------------------------------------------------------------------------------------------------- |
| New GraphQL + MCP deployment        | Apollo Runtime Container     | Single container, easier to manage, optimized performance                                                 |
| GraphOS-managed graph               | Apollo Runtime Container     | Automatic sync for schema and persisted queries, unified telemetry                                        |
| Kubernetes/orchestrated environment | Apollo Runtime Container     | Fewer moving parts, simpler networking                                                                    |
| Adding MCP to existing GraphQL API  | Standalone Apollo MCP Server | Connect to your existing GraphQL endpoint                                                                 |
| Local development                   | `rover dev`                  | [Run `rover dev`](/apollo-mcp-server/run#with-the-rover-cli) to develop locally with both GraphQL and MCP |
          
## Production Considerations

### Load Balancing & Session Affinity

MCP is a stateful protocol that requires session affinity (sticky sessions).

When an MCP client initializes a session with Apollo MCP Server, it receives a session identifier unique to that server instance through the `mcp-session-id` header. You must enable session affinity in your load balancer so that all requests sharing the same `mcp-session-id` are routed to the same backend instance.

Most cloud load balancers (ALB, GCP LB) don't support header-based session affinity. Use Nginx, HAProxy, or Envoy/Istio for proper session routing.

#### Stateless mode

Although MCP is a stateful protocol by default, the Streamable HTTP transport supports operating in a stateless mode.
This means that the session ID will not be passed back and forth between the client and server and each request made to the MCP server happens in its own HTTP POST.
Disabling the session state being managed in memory by a single host allows for horizontal scaling of the server, though could lead to unknown issues if your MCP client has a dependency on sticky sessions.

You can configure stateless mode in the transport config section:

```yaml
transport:
  type: streamable_http
  stateful_mode: false
```

### Scaling Recommendations

For the Apollo Runtime Container:

- Scale both GraphQL and MCP together as a single unit
- Simpler horizontal scaling
- Consistent performance characteristics

For the standalone Apollo MCP Server container:

- Scale Apollo MCP Server independently of your GraphQL API
- More complex but enables fine-tuned resource allocation

### Next steps

After you deploy, configure:

1. [Health checks](/apollo-mcp-server/health-checks) for monitoring
2. [CORS settings](/apollo-mcp-server/cors) for browser clients
3. [Authorization](/apollo-mcp-server/auth) for production security

```

--------------------------------------------------------------------------------
/scripts/nix/install.sh:
--------------------------------------------------------------------------------

```bash
#!/bin/bash
#
# Licensed under the MIT license
# <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your
# option. This file may not be copied, modified, or distributed
# except according to those terms.

# Installs the latest version of the Apollo MCP Server.
# Specify a specific version to install with the $VERSION variable.

set -u

BINARY_DOWNLOAD_PREFIX="${APOLLO_MCP_SERVER_BINARY_DOWNLOAD_PREFIX:="https://github.com/apollographql/apollo-mcp-server/releases/download"}"

# Apollo MCP Server version defined in apollo-mcp-server's Cargo.toml
# Note: Change this line manually during the release steps.
PACKAGE_VERSION="v1.1.1"

download_binary_and_run_installer() {
    downloader --check
    need_cmd mktemp
    need_cmd chmod
    need_cmd mkdir
    need_cmd rm
    need_cmd rmdir
    need_cmd tar
    need_cmd which
    need_cmd dirname
    need_cmd awk
    need_cmd cut

    # if $VERSION isn't provided or has 0 length, use version from Rover cargo.toml
    # ${VERSION:-} checks if version exists, and if doesn't uses the default
    # which is after the :-, which in this case is empty. -z checks for empty str
    if [ -z ${VERSION:-} ]; then
        # VERSION is either not set or empty
        DOWNLOAD_VERSION=$PACKAGE_VERSION
    else
        # VERSION set and not empty
        DOWNLOAD_VERSION=$VERSION
    fi


    get_architecture || return 1
    local _arch="$RETVAL"
    assert_nz "$_arch" "arch"

    local _ext=""
    case "$_arch" in
        *windows*)
            _ext=".exe"
            ;;
    esac

    local _tardir="apollo-mcp-server-$DOWNLOAD_VERSION-${_arch}"
    local _url="$BINARY_DOWNLOAD_PREFIX/$DOWNLOAD_VERSION/${_tardir}.tar.gz"
    local _dir="$(mktemp -d 2>/dev/null || ensure mktemp -d -t apollo-mcp-server)"
    local _file="$_dir/input.tar.gz"
    local _apollo_mcp_server="$_dir/apollo-mcp-server$_ext"
    local _safe_url

    # Remove credentials from the URL for logging
    _safe_url=$(echo "$_url" | awk '{sub("https://[^@]+@","https://");}1')
    say "downloading apollo-mcp-server from $_safe_url" 1>&2

    ensure mkdir -p "$_dir"
    downloader "$_url" "$_file"
    if [ $? != 0 ]; then
      say "failed to download $_safe_url"
      say "this may be a standard network error, but it may also indicate"
      say "that the MCP Server's release process is not working. When in doubt"
      say "please feel free to open an issue!"
      say "https://github.com/apollographql/apollo-mcp-server/issues/new/choose"
      exit 1
    fi

    ensure tar xf "$_file" --strip-components 1 -C "$_dir"

    outfile="./apollo-mcp-server"

    say "Moving $_apollo_mcp_server to $outfile ..."
    mv "$_apollo_mcp_server" "$outfile"

    local _retval=$?

    say ""
    say "You can now run the Apollo MCP Server using '$outfile'"

    ignore rm -rf "$_dir"

    return "$_retval"
}

get_architecture() {
    local _ostype="$(uname -s)"
    local _cputype="$(uname -m)"

    if [ "$_ostype" = Darwin -a "$_cputype" = i386 ]; then
        # Darwin `uname -s` lies
        if sysctl hw.optional.x86_64 | grep -q ': 1'; then
            local _cputype=x86_64
        fi
    fi

    if [ "$_ostype" = Darwin -a "$_cputype" = arm64 ]; then
        # Darwin `uname -s` doesn't seem to lie on Big Sur
        # but the cputype we want is called aarch64, not arm64 (they are equivalent)
        local _cputype=aarch64
    fi

    case "$_ostype" in
        Linux)
            if has_required_glibc; then
                local _ostype=unknown-linux-gnu
            else
                local _ostype=unknown-linux-musl

                # We do not currently release builds for aarch64-unknown-linux-musl
                if [ "$_cputype" = aarch64 ]; then
                    err "Unsupported platform: aarch64-$_ostype"
                fi

                say "Downloading musl binary"
            fi
            ;;

        Darwin)
            local _ostype=apple-darwin
            ;;

        MINGW* | MSYS* | CYGWIN*)
            local _ostype=pc-windows-msvc
            ;;

        *)
            err "no precompiled binaries available for OS: $_ostype"
            ;;
    esac

    case "$_cputype" in
        # these are the only two acceptable values for cputype
        x86_64 | aarch64 )
            ;;
        *)
            err "no precompiled binaries available for CPU architecture: $_cputype"

    esac

    local _arch="$_cputype-$_ostype"

    RETVAL="$_arch"
}

say() {
    local green=`tput setaf 2 2>/dev/null || echo ''`
    local reset=`tput sgr0 2>/dev/null || echo ''`
    echo "$1"
}

err() {
    local red=`tput setaf 1 2>/dev/null || echo ''`
    local reset=`tput sgr0 2>/dev/null || echo ''`
    say "${red}ERROR${reset}: $1" >&2
    exit 1
}

has_required_glibc() {
    local _ldd_version="$(ldd --version 2>&1 | head -n1)"
    # glibc version string is inconsistent across distributions
    # instead check if the string does not contain musl (case insensitive)
    if echo "${_ldd_version}" | grep -iv musl >/dev/null; then
        local _glibc_version=$(echo "${_ldd_version}" | awk 'NR==1 { print $NF }')
        local _glibc_major_version=$(echo "${_glibc_version}" | cut -d. -f1)
        local _glibc_min_version=$(echo "${_glibc_version}" | cut -d. -f2)
        local _min_major_version=2
        local _min_minor_version=17
        if [ "${_glibc_major_version}" -gt "${_min_major_version}" ] \
            || { [ "${_glibc_major_version}" -eq "${_min_major_version}" ] \
            && [ "${_glibc_min_version}" -ge "${_min_minor_version}" ]; }; then
            return 0
        else
            say "This operating system needs glibc >= ${_min_major_version}.${_min_minor_version}, but only has ${_libc_version} installed."
        fi
    else
        say "This operating system does not support dynamic linking to glibc."
    fi

    return 1
}

need_cmd() {
    if ! check_cmd "$1"
    then err "need '$1' (command not found)"
    fi
}

check_cmd() {
    command -v "$1" > /dev/null 2>&1
    return $?
}

need_ok() {
    if [ $? != 0 ]; then err "$1"; fi
}

assert_nz() {
    if [ -z "$1" ]; then err "assert_nz $2"; fi
}

# Run a command that should never fail. If the command fails execution
# will immediately terminate with an error showing the failing
# command.
ensure() {
    "$@"
    need_ok "command failed: $*"
}

# This is just for indicating that commands' results are being
# intentionally ignored. Usually, because it's being executed
# as part of error handling.
ignore() {
    "$@"
}

# This wraps curl or wget. Try curl first, if not installed,
# use wget instead.
downloader() {
    if check_cmd curl
    then _dld=curl
    elif check_cmd wget
    then _dld=wget
    else _dld='curl or wget' # to be used in error message of need_cmd
    fi

    if [ "$1" = --check ]
    then need_cmd "$_dld"
    elif [ "$_dld" = curl ]
    then curl -sSfL "$1" -o "$2"
    elif [ "$_dld" = wget ]
    then wget "$1" -O "$2"
    else err "Unknown downloader"   # should not reach here
    fi
}

download_binary_and_run_installer "$@" || exit 1

```

--------------------------------------------------------------------------------
/docs/source/index.mdx:
--------------------------------------------------------------------------------

```markdown
---
title: Apollo MCP Server
subtitle: Enable graph-based API orchestration with AI
redirectFrom:
    - /apollo-mcp-server/user-guide
    - /apollo-mcp-server/guides
---

Apollo MCP Server provides a standard way for AI models to access and orchestrate your APIs running with Apollo.

## What is MCP?

[Model Context Protocol (MCP)](https://modelcontextprotocol.io/introduction) is an open protocol that standardizes how applications provide context to AI models like Large Language Models (LLM). MCP enables LLMs and AI agents to indirectly fetch data from external sources.

MCP follows a client-server architecture. MCP servers expose functions, called _tools_, that MCP clients can invoke.

## What is Apollo MCP Server? 

Apollo MCP Server is an implementation of an MCP server. It makes GraphQL API operations available to AI clients as MCP tools. You can use Apollo MCP Server with any GraphQL API.

The GraphQL operations can be configured from persisted queries, which are predefined, approved lists of operations that are registered with and maintained by a graph. The operations can also be determined by AI introspecting your graph schema.

Apollo MCP Server is deployable in local environments via Apollo's Rover CLI or in containerized services in your cloud infrastructure. It can expose an MCP endpoint using Streamable HTTP for communication with AI clients.

## How Apollo MCP Server works

Apollo MCP Server bridges AI applications and your GraphQL APIs, translating GraphQL operations into MCP tools that AI models can discover and use.

```mermaid
graph LR
    %% Nodes
    AI["AI Application\n(Claude, ChatGPT, etc.)"]
    MCPClient["MCP Client\n(Built into AI app)"]
    MCPServer["Apollo MCP Server"]
    GraphQL["GraphQL API\n(Your Graph)"]
    Data["Your Data Sources\n(Databases, APIs, etc.)"]

    %% Connections
    AI <-->|"Natural Language\nRequests"| MCPClient
    MCPClient <-->|"MCP Protocol\n(stdio/Streamable HTTP)"| MCPServer
    MCPServer <-->|"GraphQL\nOperations"| GraphQL
    GraphQL <-->|"Data\nQueries"| Data

    %% Tool Generation
    subgraph ToolGeneration[Tool Generation]
        direction TB
        OpFiles["Operation Files\n(.graphql)"]
        PQM["Persisted Query\nManifests"]
        Introspection["Schema\nIntrospection"]
        Tools["MCP Tools"]
        
        OpFiles --> Tools
        PQM --> Tools
        Introspection --> Tools
    end

    MCPServer -.->|"Exposes"| Tools
    Tools -.->|"Available to"| MCPClient

    %% Styling
    classDef default stroke-width:1px
    classDef aiClient stroke-width:2px
    classDef mcpComponent stroke-width:2px
    classDef apolloComponent stroke-width:2px
    classDef apiComponent stroke-width:2px
    classDef dataComponent stroke-width:2px

    class AI aiClient
    class MCPClient mcpComponent
    class MCPServer apolloComponent
    class GraphQL apiComponent
    class Data dataComponent
    class OpFiles,PQM,Introspection apolloComponent
    class Tools mcpComponent
```

The architecture enables intelligent API orchestration through these components:

* AI Applications: Tools like Claude Desktop or ChatGPT connect to Apollo MCP Server through their built-in MCP clients, making requests in natural language.
* Transport Options: Communication happens over stdio for local development or Streamable HTTP. 
* Tool Generation: Apollo MCP Server creates MCP tools from your GraphQL operations using:
    * Operation Files: Individual `.graphql` files for specific queries or mutations
    * Persisted Query Manifests: Pre-approved operation lists from Apollo GraphOS
    * Schema Introspection: Dynamic operation discovery for flexible AI exploration

* Secure Execution: When invoked, the server executes GraphQL operations against your API endpoint, respecting all existing authentication, headers, and security policies.
* Existing Infrastructure: Your GraphQL API handles requests normally, with Apollo MCP Server acting as a controlled gateway rather than requiring any changes to your graph.

This design lets you expose precise GraphQL capabilities to AI while maintaining complete control over data access and security.

### Example usage

Once configured, AI applications can use your GraphQL operations naturally:

> User: "Show me the astronauts currently in space"
>
> Claude: *Uses GetAstronautsCurrentlyInSpace tool to query your GraphQL API*
>
> "There are currently 7 astronauts aboard the ISS..."

## Why GraphQL for AI?

GraphQL's architecture provides unique advantages for AI-powered API orchestration:

**🎯 Deterministic Execution**: GraphQL's built-in relationship handling and query structure eliminate guesswork for AI models. The graph defines clear paths between data types, ensuring AI agents execute operations in the correct sequence without complex prompt engineering or error-prone orchestration logic.

**🛡️ Policy Enforcement**: Security policies and access controls apply consistently across all services within a single GraphQL query context. This unified enforcement model ensures AI operations respect organizational boundaries, even when spanning multiple underlying APIs or microservices.

**⚡ Efficiency**: AI agents can request precisely the data needed in a single GraphQL query, reducing API calls, network overhead, and token usage. This focused approach delivers faster responses and lower operational costs compared to orchestrating multiple REST endpoints.

**🔄 Agility**: The pace of AI development demands infrastructure that can evolve daily. GraphQL's declarative approach lets teams rapidly create, modify, and deploy new AI capabilities through self-service tooling. Product teams can wire up new MCP tools without waiting for custom development, keeping pace with AI's unprecedented velocity.

With Apollo MCP Server, these GraphQL advantages become immediately accessible to AI applications through standardized MCP tools.

## Benefits of Apollo MCP Server

- **🤖 Enable AI-enabled API orchestration**. With Apollo MCP Server, AI models can act as intelligent orchestrators of their GraphQL API operations. By exposing GraphQL operations as distinct MCP tools, AI clients can dynamically chain these operations together, in combination with other MCP servers and tools to execute complex workflows and automate multi-step processes. 

- **🚀 Connect AI to GraphQL in Minutes**. Developers can expose existing or new GraphQL API operations to AI clients without building complex custom integrations. By translating GraphQL functionalities into standardized MCP tools, Apollo MCP Server can significantly reduce the effort needed to connect AI to diverse data sources.

- **🔒 Maintain Full Security Control**. By using pre-defined, pre-approved persisted queries, developers can maintain precise governance over which data and operations AI clients can access. This ensures that AI uses existing security protocols and data access policies.

## Prerequisites

- A GraphQL API
- An MCP Client

## Getting started

Ready to connect AI to your GraphQL API? Follow our [5-minute quickstart](/apollo-mcp-server/quickstart) to see Apollo MCP Server in action, or explore the [config file reference](/apollo-mcp-server/config-file) for detailed configuration options.

```

--------------------------------------------------------------------------------
/crates/apollo-mcp-registry/src/uplink/schema/schema_stream.rs:
--------------------------------------------------------------------------------

```rust
// tonic does not derive `Eq` for the gRPC message types, which causes a warning from Clippy. The
// current suggestion is to explicitly allow the lint in the module that imports the protos.
// Read more: https://github.com/hyperium/tonic/issues/1056
#![allow(clippy::derive_partial_eq_without_eq)]

use crate::uplink::UplinkRequest;
use crate::uplink::UplinkResponse;
use crate::uplink::schema::SchemaState;
use crate::uplink::schema::schema_stream::supergraph_sdl_query::FetchErrorCode;
use crate::uplink::schema::schema_stream::supergraph_sdl_query::SupergraphSdlQueryRouterConfig;
use graphql_client::GraphQLQuery;

#[derive(GraphQLQuery)]
#[graphql(
    query_path = "src/uplink/schema/schema_query.graphql",
    schema_path = "src/uplink/uplink.graphql",
    request_derives = "Debug",
    response_derives = "PartialEq, Debug, Deserialize",
    deprecated = "warn"
)]
pub(crate) struct SupergraphSdlQuery;

impl From<UplinkRequest> for supergraph_sdl_query::Variables {
    fn from(req: UplinkRequest) -> Self {
        supergraph_sdl_query::Variables {
            api_key: req.api_key,
            graph_ref: req.graph_ref,
            if_after_id: req.id,
        }
    }
}

impl From<supergraph_sdl_query::ResponseData> for UplinkResponse<String> {
    fn from(response: supergraph_sdl_query::ResponseData) -> Self {
        match response.router_config {
            SupergraphSdlQueryRouterConfig::RouterConfigResult(result) => UplinkResponse::New {
                response: result.supergraph_sdl,
                id: result.id,
                // this will truncate the number of seconds to under u64::MAX, which should be
                // a large enough delay anyway
                delay: result.min_delay_seconds as u64,
            },
            SupergraphSdlQueryRouterConfig::Unchanged(response) => UplinkResponse::Unchanged {
                id: Some(response.id),
                delay: Some(response.min_delay_seconds as u64),
            },
            SupergraphSdlQueryRouterConfig::FetchError(err) => UplinkResponse::Error {
                retry_later: err.code == FetchErrorCode::RETRY_LATER,
                code: match err.code {
                    FetchErrorCode::AUTHENTICATION_FAILED => "AUTHENTICATION_FAILED".to_string(),
                    FetchErrorCode::ACCESS_DENIED => "ACCESS_DENIED".to_string(),
                    FetchErrorCode::UNKNOWN_REF => "UNKNOWN_REF".to_string(),
                    FetchErrorCode::RETRY_LATER => "RETRY_LATER".to_string(),
                    FetchErrorCode::NOT_IMPLEMENTED_ON_THIS_INSTANCE => {
                        "NOT_IMPLEMENTED_ON_THIS_INSTANCE".to_string()
                    }
                    FetchErrorCode::Other(other) => other,
                },
                message: err.message,
            },
        }
    }
}

impl From<supergraph_sdl_query::ResponseData> for UplinkResponse<SchemaState> {
    fn from(response: supergraph_sdl_query::ResponseData) -> Self {
        match response.router_config {
            SupergraphSdlQueryRouterConfig::RouterConfigResult(result) => UplinkResponse::New {
                response: SchemaState {
                    sdl: result.supergraph_sdl,
                    launch_id: Some(result.id.clone()),
                },
                id: result.id,
                // this will truncate the number of seconds to under u64::MAX, which should be
                // a large enough delay anyway
                delay: result.min_delay_seconds as u64,
            },
            SupergraphSdlQueryRouterConfig::Unchanged(response) => UplinkResponse::Unchanged {
                id: Some(response.id),
                delay: Some(response.min_delay_seconds as u64),
            },
            SupergraphSdlQueryRouterConfig::FetchError(err) => UplinkResponse::Error {
                retry_later: err.code == FetchErrorCode::RETRY_LATER,
                code: match err.code {
                    FetchErrorCode::AUTHENTICATION_FAILED => "AUTHENTICATION_FAILED".to_string(),
                    FetchErrorCode::ACCESS_DENIED => "ACCESS_DENIED".to_string(),
                    FetchErrorCode::UNKNOWN_REF => "UNKNOWN_REF".to_string(),
                    FetchErrorCode::RETRY_LATER => "RETRY_LATER".to_string(),
                    FetchErrorCode::NOT_IMPLEMENTED_ON_THIS_INSTANCE => {
                        "NOT_IMPLEMENTED_ON_THIS_INSTANCE".to_string()
                    }
                    FetchErrorCode::Other(other) => other,
                },
                message: err.message,
            },
        }
    }
}

#[cfg(test)]
mod test {
    use super::*;

    #[test]
    fn test_uplink_request_to_graphql_variables() {
        let request = UplinkRequest {
            api_key: "test_key".to_string(),
            graph_ref: "test_ref".to_string(),
            id: Some("test_id".to_string()),
        };

        let variables: supergraph_sdl_query::Variables = request.into();

        assert_eq!(variables.api_key, "test_key");
        assert_eq!(variables.graph_ref, "test_ref");
        assert_eq!(variables.if_after_id, Some("test_id".to_string()));
    }

    #[test]
    fn test_graphql_response_to_uplink_response_new() {
        let response = supergraph_sdl_query::ResponseData {
            router_config: SupergraphSdlQueryRouterConfig::RouterConfigResult(
                supergraph_sdl_query::SupergraphSdlQueryRouterConfigOnRouterConfigResult {
                    supergraph_sdl: "test_sdl".to_string(),
                    id: "result_id".to_string(),
                    min_delay_seconds: 42.0,
                },
            ),
        };

        let uplink_response: UplinkResponse<String> = response.into();

        assert!(matches!(
            uplink_response,
            UplinkResponse::New { response, id, delay }
            if response == "test_sdl" && id == "result_id" && delay == 42
        ));
    }

    #[test]
    fn test_graphql_response_to_uplink_response_unchanged() {
        let response = supergraph_sdl_query::ResponseData {
            router_config: SupergraphSdlQueryRouterConfig::Unchanged(
                supergraph_sdl_query::SupergraphSdlQueryRouterConfigOnUnchanged {
                    id: "unchanged_id".to_string(),
                    min_delay_seconds: 30.0,
                },
            ),
        };

        let uplink_response: UplinkResponse<String> = response.into();

        assert!(matches!(
            uplink_response,
            UplinkResponse::Unchanged { id, delay }
            if id == Some("unchanged_id".to_string()) && delay == Some(30)
        ));
    }

    #[test]
    fn test_graphql_response_to_uplink_response_error() {
        let response = supergraph_sdl_query::ResponseData {
            router_config: SupergraphSdlQueryRouterConfig::FetchError(
                supergraph_sdl_query::SupergraphSdlQueryRouterConfigOnFetchError {
                    code: FetchErrorCode::RETRY_LATER,
                    message: "Try again later".to_string(),
                },
            ),
        };

        let uplink_response: UplinkResponse<String> = response.into();

        assert!(matches!(
            uplink_response,
            UplinkResponse::Error { retry_later, code, message }
            if retry_later && code == "RETRY_LATER" && message == "Try again later"
        ));
    }
}

```

--------------------------------------------------------------------------------
/docs/source/cors.mdx:
--------------------------------------------------------------------------------

```markdown
---
title: Configuring CORS
---

## Configuring CORS

Control browser access to your MCP server

---

**This article describes CORS configuration that's specific to Apollo MCP Server**. For a more general introduction to CORS and common considerations, see [MDN's CORS documentation](https://developer.mozilla.org/en-US/docs/Web/HTTP/CORS).

By default, Apollo MCP Server has CORS disabled. If your MCP server serves tools to browser-based applications, you need to enable CORS and configure one of the following in the `cors` section of your server's YAML config file:

- Add the origins of those web applications to the server's list of allowed `origins`.
  - Use this option if there is a known, finite list of web applications that consume your MCP server.
- Add a regex that matches the origins of those web applications to the server's list of allowed `match_origins`.
  - This option comes in handy if you want to match origins against a pattern, see the example below that matches subdomains of a specific namespace.
- Enable the `allow_any_origin` option.
  - Use this option if your MCP server is a public API with arbitrarily many web app consumers.
  - With this option enabled, the server sends the wildcard (\*) value for the `Access-Control-Allow-Origin` header. This enables _any_ website to initiate browser connections to it (but they can't provide cookies or other credentials).
- If clients need to authenticate their requests with cookies, you _must_ use either `origins`, `match_origins`, or the combination of both options. When using both options, note that `origins` is evaluated before `match_origins`.

The following snippet includes an example of each option (use either `allow_any_origin`, or `origins` and/or `match_origins`):

```yaml title="mcp.yaml"
transport:
  type: streamable_http
  port: 8000

cors:
  # Enable CORS support
  enabled: true

  # Set to true to allow any origin
  # (Defaults to false)
  allow_any_origin: true

  # List of accepted origins
  # (Ignored if allow_any_origin is true)
  #
  # An origin is a combination of scheme, hostname and port.
  # It does not have any path section, so no trailing slash.
  origins:
    - https://www.your-app.example.com

  # List of origin patterns (regex matching)
  match_origins:
    - "^https://([a-z0-9]+[.])*api[.]example[.]com$" # any host that uses https and ends with .api.example.com
```

You can also disable CORS entirely by setting `enabled` to `false` or omitting the `cors` section:

```yaml title="mcp.yaml"
cors:
  enabled: false
```

If your MCP server serves exclusively _non_-browser-based clients, you probably don't need to enable CORS configuration.

### Passing credentials

If your MCP server requires requests to include a user's credentials (e.g., via cookies), you need to modify your CORS configuration to tell the browser those credentials are allowed.

You can enable credentials with CORS by setting the Access-Control-Allow-Credentials HTTP header to `true`.

To allow browsers to pass credentials to the server, set `allow_credentials` to `true`, like so:

```yaml title="mcp.yaml"
cors:
  enabled: true
  origins:
    - https://www.your-app.example.com
  allow_credentials: true
```

**To support credentialed requests, your server's config file must specify individual `origins` or `match_origins`**. If your server enables `allow_any_origin`, your browser will refuse to send credentials.

### All `cors` options

The following snippet shows all CORS configuration defaults for Apollo MCP Server:

```yaml title="mcp.yaml"
#
# CORS (Cross Origin Resource Sharing)
#
cors:
  # Enable CORS support
  enabled: false

  # Set to true to allow any origin
  allow_any_origin: false

  # List of accepted origins
  # (Ignored if allow_any_origin is set to true)
  #
  # An origin is a combination of scheme, hostname and port.
  # It does not have any path section, so no trailing slash.
  origins: []

  # List of origin patterns (regex matching)
  # Useful for matching dynamic ports or subdomains
  match_origins: []

  # Set to true to add the `Access-Control-Allow-Credentials` header
  allow_credentials: false

  # Allowed request methods
  allow_methods:
    - GET
    - POST

  # The headers to allow.
  # These are the default headers required for MCP protocol and trace context
  allow_headers:
    - accept
    - content-type
    - mcp-protocol-version
    - mcp-session-id
    - traceparent # W3C Trace Context
    - tracestate # W3C Trace Context

  # Which response headers are available to scripts running in the
  # browser in response to a cross-origin request.
  # The mcp-session-id header should be exposed for MCP session management.
  # Trace context headers are exposed for distributed tracing.
  expose_headers:
    - mcp-session-id
    - traceparent # W3C Trace Context
    - tracestate # W3C Trace Context

  # Adds the Access-Control-Max-Age header
  # Maximum age (in seconds) for preflight cache
  max_age: 7200 # 2 hours
```

### Origin matching

Apollo MCP Server supports two types of origin matching:

#### Exact origins

Use the `origins` array for exact origin matches:

```yaml
cors:
  enabled: true
  origins:
    - http://localhost:3000
    - https://myapp.example.com
```

#### Pattern matching

Use the `match_origins` array for regex pattern matching:

```yaml
cors:
  enabled: true
  match_origins:
    - "^https://localhost:[0-9]+$" # Any localhost HTTPS port
    - "^http://localhost:[0-9]+$" # Any localhost HTTP port
    - "^https://.*\\.example\\.com$" # Any subdomain of example.com
```

### Common configurations

#### Development setup

For local development with hot reloading and various ports:

```yaml title="mcp.yaml"
cors:
  enabled: true
  match_origins:
    - "^http://localhost:[0-9]+$"
  allow_credentials: true
```

#### Production setup

For production with specific known origins:

```yaml title="mcp.yaml"
cors:
  enabled: true
  origins:
    - https://myapp.example.com
  allow_credentials: true
  max_age: 86400 # 24 hours
```

#### Public API setup

For public APIs that don't require credentials:

```yaml title="mcp.yaml"
cors:
  enabled: true
  allow_any_origin: true
  allow_credentials: false # Cannot use credentials with any origin
```

### Browser integration example

Here's a simple example of connecting to Apollo MCP Server from a browser:

```javascript
async function connectToMCP() {
  const response = await fetch("http://127.0.0.1:8000/mcp", {
    method: "POST",
    headers: {
      Accept: "application/json, text/event-stream",
      "Content-Type": "application/json",
      "MCP-Protocol-Version": "2025-06-18",
    },
    body: JSON.stringify({
      jsonrpc: "2.0",
      method: "initialize",
      params: {
        protocolVersion: "2025-06-18",
        capabilities: {},
        clientInfo: { name: "Browser Client", version: "1.0" },
      },
      id: 1,
    }),
  });

  // Extract session ID from response headers (automatically exposed)
  const sessionId = response.headers.get("mcp-session-id");

  // Handle SSE format response (starts with "data: ")
  const responseText = await response.text();
  const jsonData = responseText.startsWith("data: ")
    ? responseText.slice(6) // Remove "data: " prefix
    : responseText;

  const result = JSON.parse(jsonData);
  console.log("Connected:", result);
  console.log("Session ID:", sessionId);
}

connectToMCP();
```

```

--------------------------------------------------------------------------------
/crates/apollo-mcp-server/src/runtime/filtering_exporter.rs:
--------------------------------------------------------------------------------

```rust
use opentelemetry::{Key, KeyValue};
use opentelemetry_sdk::Resource;
use opentelemetry_sdk::error::OTelSdkResult;
use opentelemetry_sdk::trace::{SpanData, SpanExporter};
use std::collections::HashSet;
use std::fmt::Debug;

#[derive(Debug)]
pub struct FilteringExporter<E> {
    inner: E,
    omitted: HashSet<Key>,
}

impl<E> FilteringExporter<E> {
    pub fn new(inner: E, omitted: impl IntoIterator<Item = Key>) -> Self {
        Self {
            inner,
            omitted: omitted.into_iter().collect(),
        }
    }
}

impl<E> SpanExporter for FilteringExporter<E>
where
    E: SpanExporter + Send + Sync,
{
    fn export(&self, mut batch: Vec<SpanData>) -> impl Future<Output = OTelSdkResult> + Send {
        for span in &mut batch {
            span.attributes
                .retain(|kv| filter_omitted_apollo_attributes(kv, &self.omitted));
        }

        self.inner.export(batch)
    }

    fn shutdown(&mut self) -> OTelSdkResult {
        self.inner.shutdown()
    }
    fn force_flush(&mut self) -> OTelSdkResult {
        self.inner.force_flush()
    }
    fn set_resource(&mut self, r: &Resource) {
        self.inner.set_resource(r)
    }
}

fn filter_omitted_apollo_attributes(kv: &KeyValue, omitted_attributes: &HashSet<Key>) -> bool {
    !kv.key.as_str().starts_with("apollo.") || !omitted_attributes.contains(&kv.key)
}

#[cfg(test)]
mod tests {
    use crate::runtime::filtering_exporter::FilteringExporter;
    use opentelemetry::trace::{SpanContext, SpanKind, Status, TraceState};
    use opentelemetry::{InstrumentationScope, Key, KeyValue, SpanId, TraceFlags, TraceId};
    use opentelemetry_sdk::Resource;
    use opentelemetry_sdk::error::{OTelSdkError, OTelSdkResult};
    use opentelemetry_sdk::trace::{SpanData, SpanEvents, SpanExporter, SpanLinks};
    use std::collections::HashSet;
    use std::fmt::Debug;
    use std::future::ready;
    use std::time::SystemTime;

    #[cfg_attr(coverage_nightly, coverage(off))]
    fn create_mock_span_data() -> SpanData {
        let span_context: SpanContext = SpanContext::new(
            TraceId::from_u128(1),
            SpanId::from_u64(12345),
            TraceFlags::default(),
            true, // is_remote
            TraceState::default(),
        );

        SpanData {
            span_context,
            parent_span_id: SpanId::from_u64(54321),
            span_kind: SpanKind::Internal,
            name: "test-span".into(),
            start_time: SystemTime::UNIX_EPOCH,
            end_time: SystemTime::UNIX_EPOCH,
            attributes: vec![
                KeyValue::new("http.method", "GET"),
                KeyValue::new("apollo.mock", "mock"),
            ],
            dropped_attributes_count: 0,
            events: SpanEvents::default(),
            links: SpanLinks::default(),
            status: Status::Ok,
            instrumentation_scope: InstrumentationScope::builder("test-service")
                .with_version("1.0.0")
                .build(),
        }
    }

    #[tokio::test]
    async fn filtering_exporter_filters_omitted_apollo_attributes() {
        #[derive(Debug)]
        struct TestExporter {}

        #[cfg_attr(coverage_nightly, coverage(off))]
        impl SpanExporter for TestExporter {
            fn export(&self, batch: Vec<SpanData>) -> impl Future<Output = OTelSdkResult> + Send {
                batch.into_iter().for_each(|span| {
                    if span
                        .attributes
                        .iter()
                        .any(|kv| kv.key.as_str().starts_with("apollo."))
                    {
                        panic!("Omitted attributes were not filtered");
                    }
                });

                ready(Ok(()))
            }

            fn shutdown(&mut self) -> OTelSdkResult {
                Ok(())
            }

            fn force_flush(&mut self) -> OTelSdkResult {
                Ok(())
            }

            fn set_resource(&mut self, _resource: &Resource) {}
        }

        let mut omitted = HashSet::new();
        omitted.insert(Key::from_static_str("apollo.mock"));
        let mock_exporter = TestExporter {};
        let mock_span_data = create_mock_span_data();

        let filtering_exporter = FilteringExporter::new(mock_exporter, omitted);
        filtering_exporter
            .export(vec![mock_span_data])
            .await
            .expect("Export error");
    }

    #[tokio::test]
    async fn filtering_exporter_calls_inner_exporter_on_shutdown() {
        #[derive(Debug)]
        struct TestExporter {}

        #[cfg_attr(coverage_nightly, coverage(off))]
        impl SpanExporter for TestExporter {
            fn export(&self, _batch: Vec<SpanData>) -> impl Future<Output = OTelSdkResult> + Send {
                ready(Err(OTelSdkError::InternalFailure(
                    "unexpected call".to_string(),
                )))
            }

            fn shutdown(&mut self) -> OTelSdkResult {
                Ok(())
            }

            fn force_flush(&mut self) -> OTelSdkResult {
                Err(OTelSdkError::InternalFailure("unexpected call".to_string()))
            }

            fn set_resource(&mut self, _resource: &Resource) {
                unreachable!("should not be called");
            }
        }

        let mock_exporter = TestExporter {};

        let mut filtering_exporter = FilteringExporter::new(mock_exporter, HashSet::new());
        assert!(filtering_exporter.shutdown().is_ok());
    }

    #[tokio::test]
    async fn filtering_exporter_calls_inner_exporter_on_force_flush() {
        #[derive(Debug)]
        struct TestExporter {}

        #[cfg_attr(coverage_nightly, coverage(off))]
        impl SpanExporter for TestExporter {
            fn export(&self, _batch: Vec<SpanData>) -> impl Future<Output = OTelSdkResult> + Send {
                ready(Err(OTelSdkError::InternalFailure(
                    "unexpected call".to_string(),
                )))
            }

            fn shutdown(&mut self) -> OTelSdkResult {
                Err(OTelSdkError::InternalFailure("unexpected call".to_string()))
            }

            fn force_flush(&mut self) -> OTelSdkResult {
                Ok(())
            }

            fn set_resource(&mut self, _resource: &Resource) {
                unreachable!("should not be called");
            }
        }

        let mock_exporter = TestExporter {};

        let mut filtering_exporter = FilteringExporter::new(mock_exporter, HashSet::new());
        assert!(filtering_exporter.force_flush().is_ok());
    }

    #[tokio::test]
    async fn filtering_exporter_calls_inner_exporter_on_set_resource() {
        #[derive(Debug)]
        struct TestExporter {}

        #[cfg_attr(coverage_nightly, coverage(off))]
        impl SpanExporter for TestExporter {
            fn export(&self, _batch: Vec<SpanData>) -> impl Future<Output = OTelSdkResult> + Send {
                ready(Err(OTelSdkError::InternalFailure(
                    "unexpected call".to_string(),
                )))
            }

            fn shutdown(&mut self) -> OTelSdkResult {
                Err(OTelSdkError::InternalFailure("unexpected call".to_string()))
            }

            fn force_flush(&mut self) -> OTelSdkResult {
                Err(OTelSdkError::InternalFailure("unexpected call".to_string()))
            }

            fn set_resource(&mut self, _resource: &Resource) {}
        }

        let mock_exporter = TestExporter {};

        let mut filtering_exporter = FilteringExporter::new(mock_exporter, HashSet::new());
        filtering_exporter.set_resource(&Resource::builder_empty().build());
    }
}

```

--------------------------------------------------------------------------------
/crates/apollo-mcp-server/src/operations/operation_source.rs:
--------------------------------------------------------------------------------

```rust
use std::{
    collections::HashMap,
    fs,
    path::PathBuf,
    sync::{Arc, Mutex},
};

use apollo_mcp_registry::{
    files,
    platform_api::operation_collections::{
        collection_poller::CollectionSource, event::CollectionEvent,
    },
    uplink::persisted_queries::{ManifestSource, event::Event as ManifestEvent},
};
use futures::{Stream, StreamExt as _};
use tracing::warn;

use crate::event::Event;

use super::RawOperation;

const OPERATION_DOCUMENT_EXTENSION: &str = "graphql";

/// The source of the operations exposed as MCP tools
#[derive(Clone, Debug)]
pub enum OperationSource {
    /// GraphQL document files
    Files(Vec<PathBuf>),

    /// Persisted Query manifest
    Manifest(ManifestSource),

    /// Operation collection
    Collection(CollectionSource),

    /// No operations provided
    None,
}

impl OperationSource {
    #[tracing::instrument(skip_all, fields(operation_source = ?self))]
    pub async fn into_stream(self) -> impl Stream<Item = Event> {
        match self {
            OperationSource::Files(paths) => Self::stream_file_changes(paths).boxed(),
            OperationSource::Manifest(manifest_source) => manifest_source
                .into_stream()
                .await
                .map(|event| {
                    let ManifestEvent::UpdateManifest(operations) = event;
                    Event::OperationsUpdated(
                        operations.into_iter().map(RawOperation::from).collect(),
                    )
                })
                .boxed(),
            OperationSource::Collection(collection_source) => collection_source
                .into_stream()
                .map(|event| match event {
                    CollectionEvent::UpdateOperationCollection(operations) => {
                        match operations
                            .iter()
                            .map(RawOperation::try_from)
                            .collect::<Result<Vec<_>, _>>()
                        {
                            Ok(operations) => Event::OperationsUpdated(operations),
                            Err(e) => Event::CollectionError(e),
                        }
                    }
                    CollectionEvent::CollectionError(error) => Event::CollectionError(error),
                })
                .boxed(),
            OperationSource::None => {
                futures::stream::once(async { Event::OperationsUpdated(vec![]) }).boxed()
            }
        }
    }

    #[tracing::instrument]
    fn stream_file_changes(paths: Vec<PathBuf>) -> impl Stream<Item = Event> {
        let path_count = paths.len();
        let state = Arc::new(Mutex::new(HashMap::<PathBuf, Vec<RawOperation>>::new()));
        futures::stream::select_all(paths.into_iter().map(|path| {
            let state = Arc::clone(&state);
            files::watch(path.as_ref())
                .filter_map(move |_| {
                    let path = path.clone();
                    let state = Arc::clone(&state);
                    async move {
                        let mut operations = Vec::new();
                        if path.is_dir() {
                            // Handle a directory
                            if let Ok(entries) = fs::read_dir(&path) {
                                for entry in entries.flatten() {
                                    let entry_path = entry.path();
                                    if entry_path.extension().and_then(|e| e.to_str())
                                        == Some(OPERATION_DOCUMENT_EXTENSION)
                                    {
                                        match fs::read_to_string(&entry_path) {
                                            Ok(content) => {
                                                // Be forgiving of empty files in the directory case.
                                                // It likely means a new file was created in an editor,
                                                // but the operation hasn't been written yet.
                                                if !content.trim().is_empty() {
                                                    operations.push(RawOperation::from((
                                                        content,
                                                        entry_path.to_str().map(|s| s.to_string()),
                                                    )));
                                                }
                                            }
                                            Err(e) => {
                                                return Some(Event::OperationError(
                                                    e,
                                                    path.to_str().map(|s| s.to_string()),
                                                ));
                                            }
                                        }
                                    }
                                }
                            }
                        } else {
                            // Handle a single file
                            match fs::read_to_string(&path) {
                                Ok(content) => {
                                    if !content.trim().is_empty() {
                                        operations.push(RawOperation::from((
                                            content,
                                            path.to_str().map(|s| s.to_string()),
                                        )));
                                    } else {
                                        warn!(?path, "Empty operation file");
                                    }
                                }
                                Err(e) => {
                                    return Some(Event::OperationError(
                                        e,
                                        path.to_str().map(|s| s.to_string()),
                                    ));
                                }
                            }
                        }
                        match state.lock() {
                            Ok(mut state) => {
                                state.insert(path.clone(), operations);
                                // All paths send an initial event on startup. To avoid repeated
                                // operation events on startup, wait until all paths have been
                                // loaded, then send a single event with the operations for all
                                // paths.
                                if state.len() == path_count {
                                    Some(Event::OperationsUpdated(
                                        state.values().flatten().cloned().collect::<Vec<_>>(),
                                    ))
                                } else {
                                    None
                                }
                            }
                            Err(_) => Some(Event::OperationError(
                                std::io::Error::other("State mutex poisoned"),
                                path.to_str().map(|s| s.to_string()),
                            )),
                        }
                    }
                })
                .boxed()
        }))
        .boxed()
    }
}

impl From<ManifestSource> for OperationSource {
    fn from(manifest_source: ManifestSource) -> Self {
        OperationSource::Manifest(manifest_source)
    }
}

impl From<Vec<PathBuf>> for OperationSource {
    fn from(paths: Vec<PathBuf>) -> Self {
        OperationSource::Files(paths)
    }
}

```

--------------------------------------------------------------------------------
/crates/apollo-mcp-server/src/health.rs:
--------------------------------------------------------------------------------

```rust
//! Health Check module for Apollo MCP Server
//!
//! Provides liveness and readiness checks for the MCP server, inspired by Apollo Router's health check implementation.
//!
//! The health check is exposed via HTTP endpoints and can be used by load balancers, container orchestrators, and monitoring systems to determine server health.

use std::{
    sync::{
        Arc,
        atomic::{AtomicBool, AtomicUsize, Ordering},
    },
    time::Duration,
};

use axum::http::StatusCode;
use schemars::JsonSchema;
use serde::{Deserialize, Serialize};
use tokio::time::Instant;
use tracing::debug;

/// Health status enumeration
#[derive(Debug, Serialize)]
#[serde(rename_all = "UPPERCASE")]
pub enum HealthStatus {
    Up,
    Down,
}

/// Health response structure
#[derive(Debug, Serialize)]
pub struct Health {
    status: HealthStatus,
}

/// Configuration options for the readiness health interval sub-component.
#[derive(Clone, Debug, Deserialize, Serialize, JsonSchema)]
#[serde(deny_unknown_fields)]
#[serde(default)]
pub struct ReadinessIntervalConfig {
    #[serde(deserialize_with = "humantime_serde::deserialize", default)]
    #[serde(serialize_with = "humantime_serde::serialize")]
    #[schemars(with = "Option<String>", default)]
    /// The sampling interval (default: 5s)
    pub sampling: Duration,

    #[serde(deserialize_with = "humantime_serde::deserialize")]
    #[serde(serialize_with = "humantime_serde::serialize")]
    #[schemars(with = "Option<String>")]
    /// The unready interval (default: 2 * sampling interval)
    pub unready: Option<Duration>,
}

impl Default for ReadinessIntervalConfig {
    fn default() -> Self {
        Self {
            sampling: Duration::from_secs(5),
            unready: None,
        }
    }
}

/// Configuration options for the readiness health sub-component.
#[derive(Clone, Debug, Deserialize, Serialize, JsonSchema)]
#[serde(deny_unknown_fields)]
#[serde(default)]
pub struct ReadinessConfig {
    /// The readiness interval configuration
    pub interval: ReadinessIntervalConfig,

    /// How many rejections are allowed in an interval (default: 100)
    /// If this number is exceeded, the server will start to report unready.
    pub allowed: usize,
}

impl Default for ReadinessConfig {
    fn default() -> Self {
        Self {
            interval: Default::default(),
            allowed: 100,
        }
    }
}

/// Configuration options for the health check component.
#[derive(Debug, Clone, Deserialize, Serialize, JsonSchema)]
#[serde(deny_unknown_fields)]
#[serde(default)]
pub struct HealthCheckConfig {
    /// Set to false to disable the health check
    pub enabled: bool,

    /// Optionally set a custom healthcheck path
    /// Defaults to /health
    pub path: String,

    /// Optionally specify readiness configuration
    pub readiness: ReadinessConfig,
}

impl Default for HealthCheckConfig {
    fn default() -> Self {
        Self {
            enabled: false,
            path: "/health".to_string(),
            readiness: Default::default(),
        }
    }
}

#[derive(Clone)]
pub struct HealthCheck {
    config: HealthCheckConfig,
    live: Arc<AtomicBool>,
    ready: Arc<AtomicBool>,
    rejected: Arc<AtomicUsize>,
    ticker: Arc<tokio::task::JoinHandle<()>>,
}

impl HealthCheck {
    pub fn new(config: HealthCheckConfig) -> Self {
        let live = Arc::new(AtomicBool::new(true)); // Start as live
        let ready = Arc::new(AtomicBool::new(true)); // Start as ready
        let rejected = Arc::new(AtomicUsize::new(0));

        let allowed = config.readiness.allowed;
        let sampling_interval = config.readiness.interval.sampling;
        let recovery_interval = config
            .readiness
            .interval
            .unready
            .unwrap_or(2 * sampling_interval);

        let my_rejected = rejected.clone();
        let my_ready = ready.clone();

        let ticker = tokio::spawn(async move {
            loop {
                let start = Instant::now() + sampling_interval;
                let mut interval = tokio::time::interval_at(start, sampling_interval);
                loop {
                    interval.tick().await;
                    if my_rejected.load(Ordering::Relaxed) > allowed {
                        debug!("Health check readiness threshold exceeded, marking as unready");
                        my_ready.store(false, Ordering::SeqCst);
                        tokio::time::sleep(recovery_interval).await;
                        my_rejected.store(0, Ordering::Relaxed);
                        my_ready.store(true, Ordering::SeqCst);
                        debug!("Health check readiness restored");
                        break;
                    }
                }
            }
        });

        Self {
            config,
            live,
            ready,
            rejected,
            ticker: Arc::new(ticker),
        }
    }

    pub fn record_rejection(&self) {
        self.rejected.fetch_add(1, Ordering::Relaxed);
    }

    pub fn config(&self) -> &HealthCheckConfig {
        &self.config
    }

    pub fn get_health_state(&self, query: Option<&str>) -> (Health, StatusCode) {
        let mut status_code = StatusCode::OK;

        let health = if let Some(query) = query {
            let query_upper = query.to_ascii_uppercase();

            if query_upper.starts_with("READY") {
                let status = if self.ready.load(Ordering::SeqCst) {
                    HealthStatus::Up
                } else {
                    status_code = StatusCode::SERVICE_UNAVAILABLE;
                    HealthStatus::Down
                };
                Health { status }
            } else if query_upper.starts_with("LIVE") {
                let status = if self.live.load(Ordering::SeqCst) {
                    HealthStatus::Up
                } else {
                    status_code = StatusCode::SERVICE_UNAVAILABLE;
                    HealthStatus::Down
                };
                Health { status }
            } else {
                Health {
                    status: HealthStatus::Up,
                }
            }
        } else {
            Health {
                status: HealthStatus::Up,
            }
        };

        (health, status_code)
    }
}

impl Drop for HealthCheck {
    fn drop(&mut self) {
        self.ticker.abort();
    }
}

#[cfg(test)]
mod tests {
    use super::*;
    use tokio::time::{Duration, sleep};

    #[test]
    fn test_health_check_default_config() {
        let config = HealthCheckConfig::default();
        assert!(!config.enabled);
        assert_eq!(config.path, "/health");
        assert_eq!(config.readiness.allowed, 100);
        assert_eq!(config.readiness.interval.sampling, Duration::from_secs(5));
        assert!(config.readiness.interval.unready.is_none());
    }

    #[tokio::test]
    async fn test_health_check_rejection_tracking() {
        let mut config = HealthCheckConfig::default();
        config.readiness.allowed = 2;
        config.readiness.interval.sampling = Duration::from_millis(50);
        config.readiness.interval.unready = Some(Duration::from_millis(100));

        let health_check = HealthCheck::new(config);

        // Should be live and ready initially
        assert!(health_check.live.load(Ordering::SeqCst));
        assert!(health_check.ready.load(Ordering::SeqCst));

        // Record rejections beyond threshold
        for _ in 0..5 {
            health_check.record_rejection();
        }

        // Wait for the ticker to process
        sleep(Duration::from_millis(100)).await;

        // Should be still live but unready now
        assert!(health_check.live.load(Ordering::SeqCst));
        assert!(!health_check.ready.load(Ordering::SeqCst));
    }
}

```

--------------------------------------------------------------------------------
/.github/workflows/release-bins.yml:
--------------------------------------------------------------------------------

```yaml
name: Build Release Binaries
on:
  push:
    tags:
      - "v[0-9]+.[0-9]+.[0-9]+"
      - "v[0-9]+.[0-9]+.[0-9]+-rc.[0-9]+"
  workflow_dispatch:
    inputs:
      version:
        description: Version to publish
        required: true
        type: string

env:
  VERSION: ${{ inputs.version || github.ref_name }}

jobs:
  build:
    name: Release binaries
    strategy:
      matrix:
        include:
          # Linux compiles itself
          - os: ubuntu-24.04
            bundle: linux
            targets: cross-aarch64-unknown-linux-gnu cross-aarch64-unknown-linux-musl cross-x86_64-unknown-linux-gnu cross-x86_64-unknown-linux-musl

          # We can compile the windows target from linux
          - os: ubuntu-24.04
            bundle: windows
            targets: cross-aarch64-pc-windows-gnullvm cross-x86_64-pc-windows-gnullvm

          # Apple SDK does not allow us to cross compile from non-apple-branded
          # machines, so we run that bundle on a macOS runner
          - os: macos-latest
            bundle: darwin
            targets: cross-aarch64-apple-darwin cross-x86_64-apple-darwin
    runs-on: ${{ matrix.os }}
    permissions:
      contents: write
      packages: write
      attestations: write
      id-token: write
    steps:
      - uses: actions/checkout@v5
        with:
          ref: ${{ github.ref }}
      - uses: nixbuild/nix-quick-install-action@v30
        with:
          nix_conf: ${{ env.nix_conf }}
      - name: Restore and save Nix store
        uses: nix-community/cache-nix-action@v6
        with:
          primary-key: release-${{ matrix.bundle }}-${{ hashFiles('Cargo.lock', '**/Cargo.toml', 'flake.nix', 'flake.lock', 'rust-toolchain.toml') }}
          restore-prefixes-first-match: |
            release-${{ matrix.bundle }}-
            build-${{ runner.os }}-
          purge: true
          purge-prefixes: release-${{ matrix.bundle }}-
          purge-created: 0
          purge-primary-key: never
          gc-max-store-size: 5G

      - name: Build binaries
        run: |
          mkdir release
          for BUILD_TARGET in ${{ matrix.targets }}; do
            TARGET=${BUILD_TARGET#"cross-"}

            echo "Scaffolding release for $TARGET..."
            mkdir -p "release/$TARGET/dist"
            cp README.md LICENSE "release/$TARGET/dist"

            echo "Building release for $TARGET..."
            nix build .#$BUILD_TARGET
            cp result/bin/* "release/$TARGET/dist/"
          done

      - name: Sign Apple Binary
        if: ${{ runner.os == 'macOS' }}
        env:
          MACOS_CERT_BUNDLE_PASSWORD: ${{ secrets.MACOS_CERT_BUNDLE_PASSWORD }}
          MACOS_CERT_BUNDLE_BASE64: ${{ secrets.MACOS_CERT_BUNDLE_BASE64 }}
          MACOS_KEYCHAIN_PASSWORD: ${{ secrets.MACOS_KEYCHAIN_PASSWORD }}

          APPLE_NOTARIZATION_PASSWORD: ${{ secrets.APPLE_NOTARIZATION_PASSWORD }}
          APPLE_TEAM_ID: ${{ secrets.APPLE_TEAM_ID }}
          APPLE_USERNAME: ${{ secrets.APPLE_USERNAME }}

          KEYCHAIN_NAME: "apollo-mcp-server-keychain"
          ENTITLEMENTS_PATH: "macos-entitlements.plist"
        run: |
          echo "Pre-check: Valid Codesigning Identify"
          security find-identity -v -p codesigning
          echo "Pre-check: Codesigning Identify"
          security find-identity -p codesigning
          echo "Pre-check: Any Identify"
          security find-identity

          echo "|||||||||||||||||||||||||||||||||||||||||||||"

          # Create a temporary keychain
          EPHEMERAL_KEYCHAIN=`mktemp -d`

          echo "Creating keychain..."
          security create-keychain -p "${MACOS_KEYCHAIN_PASSWORD}" $KEYCHAIN_NAME
          echo "Removing relock timeout on keychain..."
          security set-keychain-settings $KEYCHAIN_NAME

          echo "Decoding certificate bundle..."
          echo "${MACOS_CERT_BUNDLE_BASE64}" | base64 --decode > $EPHEMERAL_KEYCHAIN/certificate.p12

          echo "Importing codesigning certificate to build keychain..."
          security import $EPHEMERAL_KEYCHAIN/certificate.p12 -k $KEYCHAIN_NAME -P "${MACOS_CERT_BUNDLE_PASSWORD}" -T /usr/bin/codesign

          echo "Adding the codesign tool to the security partition-list..."
          security set-key-partition-list -S "apple-tool:,apple:,codesign:" -s -k "${MACOS_KEYCHAIN_PASSWORD}" $KEYCHAIN_NAME

          echo "Setting default keychain..."
          security default-keychain -d user -s $KEYCHAIN_NAME

          echo "Unlocking keychain..."
          security unlock-keychain -p "${MACOS_KEYCHAIN_PASSWORD}" $KEYCHAIN_NAME

          echo "Verifying keychain is set up correctly..."
          security find-identity -v -p codesigning

          echo "|||||||||||||||||||||||||||||||||||||||||||||"

          echo "Post-check: Valid Codesigning Identify"
          security find-identity -v -p codesigning
          echo "Post-check: Codesigning Identify"
          security find-identity -p codesigning
          echo "Post-check: Any Identify"
          security find-identity

          echo "|||||||||||||||||||||||||||||||||||||||||||||"
          # Sign each binary
          for RELEASE in release/*/; do
            RELEASE=${RELEASE%/}
            RELEASE=${RELEASE#"release/"}

            BINARY_PATH="release/$RELEASE/dist/apollo-mcp-server"
            echo "Starting code signing for $RELEASE..."

            echo "> Signing code (step 1)..."
            codesign --sign "$APPLE_TEAM_ID" --options runtime --entitlements $ENTITLEMENTS_PATH --force --timestamp "$BINARY_PATH" -v

            echo "> Signing code (step 2)..."
            codesign -vvv --deep --strict "$BINARY_PATH"

            echo "> Zipping dist..."
            TMP_DIST=`mktemp -d`
            mkdir $TMP_DIST/dist
            cp "$BINARY_PATH" "$TMP_DIST/dist/"
            zip -r "$TMP_DIST/apollo-mcp-server-$VERSION.zip" "$TMP_DIST/dist"

            echo "> Beginning notarization process (might take up to 20m)..."
            xcrun notarytool submit "$TMP_DIST/apollo-mcp-server-$VERSION.zip" \
              --apple-id "$APPLE_USERNAME" \
              --password "$APPLE_NOTARIZATION_PASSWORD" \
              --team-id "$APPLE_TEAM_ID" \
              --wait \
              --timeout 20m

            echo "> Cleaning up release..."
            rm -rf $TMP_DIST
          done

          echo "Cleaning up ephemeral keychain..."
          rm -rf $EPHEMERAL_KEYCHAIN/

      - name: Create release bundles
        run: |
          mkdir artifacts
          for RELEASE in release/*/; do
            # Remove trailing slash and leading parent
            RELEASE=${RELEASE%/}
            RELEASE=${RELEASE#"release/"}
            RENAMED=${RELEASE/x86_64-pc-windows-gnullvm/x86_64-pc-windows-msvc}
            RENAMED=${RENAMED/aarch64-pc-windows-gnullvm/aarch64-pc-windows-msvc}

            echo "Creating an artifact for $RELEASE"
            tar -C release/$RELEASE -cf - dist/ | gzip -9 > artifacts/apollo-mcp-server-$VERSION-$RENAMED.tar.gz
          done

      # We only need to generate the config schema for a release once, so we do it
      # on the linux host since it is the cheapest.
      - name: Generate config schema
        if: ${{ matrix.bundle == 'linux' }}
        run: |
          ./release/x86_64-unknown-linux-musl/dist/config-schema > artifacts/config.schema.json

      - name: Upload release artifacts
        uses: softprops/action-gh-release@v2
        with:
          files: artifacts/*
          prerelease: ${{ contains(env.VERSION, '-rc.') }}
          make_latest: false # this runs for each combination in the matrix - don't mark as latest until all are done

      - name: Generate artifact attestation
        uses: actions/attest-build-provenance@v2
        with:
          subject-path: "artifacts/*"

  publish:
    name: Publish the release
    needs: build
    runs-on: ubuntu-24.04
    steps:
      - name: Make latest
        uses: softprops/action-gh-release@v2
        with:
          prerelease: ${{ contains(env.VERSION, '-rc.') }}

```

--------------------------------------------------------------------------------
/crates/apollo-mcp-registry/src/uplink/persisted_queries/manifest_poller.rs:
--------------------------------------------------------------------------------

```rust
use std::collections::HashMap;
use std::path::PathBuf;
use std::pin::Pin;

use super::event::Event;
use crate::uplink::UplinkConfig;
use crate::uplink::persisted_queries::manifest::PersistedQueryManifest;
use crate::uplink::persisted_queries::manifest::SignedUrlChunk;
use crate::uplink::persisted_queries::{
    MaybePersistedQueriesManifestChunks, PersistedQueriesManifestChunk,
    PersistedQueriesManifestQuery,
};
use crate::uplink::stream_from_uplink_transforming_new_response;
use futures::prelude::*;
use reqwest::Client;
use tokio::fs::read_to_string;
use tower::BoxError;

/// Holds the current state of persisted queries
#[derive(Debug)]
pub struct PersistedQueryManifestPollerState {
    /// The current persisted query manifest
    pub persisted_query_manifest: PersistedQueryManifest,
}

#[derive(Clone, Debug)]
pub enum ManifestSource {
    LocalStatic(Vec<PathBuf>),
    LocalHotReload(Vec<PathBuf>),
    Uplink(UplinkConfig),
}

impl ManifestSource {
    pub async fn into_stream(self) -> impl Stream<Item = Event> {
        match create_manifest_stream(self).await {
            Ok(stream) => stream
                .map(|result| match result {
                    Ok(manifest) => Event::UpdateManifest(
                        manifest
                            .iter()
                            .map(|(k, v)| (k.operation_id.clone(), v.clone()))
                            .collect(),
                    ),
                    Err(e) => {
                        tracing::error!("error from manifest stream: {}", e);
                        Event::UpdateManifest(vec![])
                    }
                })
                .boxed(),
            Err(e) => {
                tracing::error!("failed to create manifest stream: {}", e);
                futures::stream::empty().boxed()
            }
        }
    }
}

async fn manifest_from_uplink_chunks(
    new_chunks: Vec<PersistedQueriesManifestChunk>,
    http_client: Client,
) -> Result<PersistedQueryManifest, BoxError> {
    let mut new_persisted_query_manifest = PersistedQueryManifest::default();
    tracing::debug!("ingesting new persisted queries: {:?}", &new_chunks);
    // TODO: consider doing these fetches in parallel
    for new_chunk in new_chunks {
        fetch_chunk_into_manifest(
            new_chunk,
            &mut new_persisted_query_manifest,
            http_client.clone(),
        )
        .await?
    }

    tracing::debug!(
        "Loaded {} persisted queries.",
        new_persisted_query_manifest.len()
    );

    Ok(new_persisted_query_manifest)
}

async fn fetch_chunk_into_manifest(
    chunk: PersistedQueriesManifestChunk,
    manifest: &mut PersistedQueryManifest,
    http_client: Client,
) -> Result<(), BoxError> {
    let mut it = chunk.urls.iter().peekable();
    while let Some(chunk_url) = it.next() {
        match fetch_chunk(http_client.clone(), chunk_url).await {
            Ok(chunk) => {
                manifest.add_chunk(&chunk);
                return Ok(());
            }
            Err(e) => {
                if it.peek().is_some() {
                    // There's another URL to try, so log as debug and move on.
                    tracing::debug!(
                        "failed to fetch persisted query list chunk from {}: {}. \
                         Other endpoints will be tried",
                        chunk_url,
                        e
                    );
                    continue;
                } else {
                    // No more URLs; fail the function.
                    return Err(e);
                }
            }
        }
    }
    // The loop always returns unless there's another iteration after it, so the
    // only way we can fall off the loop is if we never entered it.
    Err("persisted query chunk did not include any URLs to fetch operations from".into())
}

async fn fetch_chunk(http_client: Client, chunk_url: &String) -> Result<SignedUrlChunk, BoxError> {
    let chunk = http_client
        .get(chunk_url.clone())
        .send()
        .await
        .and_then(|r| r.error_for_status())
        .map_err(|e| -> BoxError {
            format!("error fetching persisted queries manifest chunk from {chunk_url}: {e}").into()
        })?
        .json::<SignedUrlChunk>()
        .await
        .map_err(|e| -> BoxError {
            format!("error reading body of persisted queries manifest chunk from {chunk_url}: {e}")
                .into()
        })?;

    chunk.validate()
}

/// A stream of manifest updates
type ManifestStream = dyn Stream<Item = Result<PersistedQueryManifest, BoxError>> + Send + 'static;

async fn create_manifest_stream(
    source: ManifestSource,
) -> Result<Pin<Box<ManifestStream>>, BoxError> {
    match source {
        ManifestSource::LocalStatic(paths) => Ok(stream::once(load_local_manifests(paths)).boxed()),
        ManifestSource::LocalHotReload(paths) => Ok(create_hot_reload_stream(paths).boxed()),
        ManifestSource::Uplink(uplink_config) => {
            let client = Client::builder()
                .timeout(uplink_config.timeout)
                .gzip(true)
                .build()?;
            Ok(create_uplink_stream(uplink_config, client).boxed())
        }
    }
}

async fn load_local_manifests(paths: Vec<PathBuf>) -> Result<PersistedQueryManifest, BoxError> {
    let mut complete_manifest = PersistedQueryManifest::default();

    for path in paths.iter() {
        let raw_file_contents = read_to_string(path).await.map_err(|e| -> BoxError {
            format!(
                "Failed to read persisted query list file at path: {}, {}",
                path.to_string_lossy(),
                e
            )
            .into()
        })?;

        let chunk = SignedUrlChunk::parse_and_validate(&raw_file_contents)?;
        complete_manifest.add_chunk(&chunk);
    }

    tracing::debug!(
        "Loaded {} persisted queries from local files.",
        complete_manifest.len()
    );

    Ok(complete_manifest)
}

fn create_uplink_stream(
    uplink_config: UplinkConfig,
    http_client: Client,
) -> impl Stream<Item = Result<PersistedQueryManifest, BoxError>> {
    stream_from_uplink_transforming_new_response::<
        PersistedQueriesManifestQuery,
        MaybePersistedQueriesManifestChunks,
        Option<PersistedQueryManifest>,
    >(uplink_config, move |response| {
        let http_client = http_client.clone();
        Box::new(Box::pin(async move {
            match response {
                Some(chunks) => manifest_from_uplink_chunks(chunks, http_client)
                    .await
                    .map(Some)
                    .map_err(|e| -> BoxError { e }),
                None => Ok(None),
            }
        }))
    })
    .filter_map(|result| async move {
        match result {
            Ok(Some(manifest)) => Some(Ok(manifest)),
            Ok(None) => Some(Ok(PersistedQueryManifest::default())),
            Err(e) => Some(Err(e.into())),
        }
    })
}

fn create_hot_reload_stream(
    paths: Vec<PathBuf>,
) -> impl Stream<Item = Result<PersistedQueryManifest, BoxError>> {
    // Create file watchers for each path
    let file_watchers = paths.into_iter().map(|raw_path| {
        crate::files::watch(raw_path.as_ref()).then(move |_| {
            let path = raw_path.clone();
            async move {
                match read_to_string(&path).await {
                    Ok(raw_file_contents) => {
                        match SignedUrlChunk::parse_and_validate(&raw_file_contents) {
                            Ok(chunk) => Ok((path, chunk)),
                            Err(e) => Err(e),
                        }
                    }
                    Err(e) => Err(e.into()),
                }
            }
            .boxed()
        })
    });

    // We need to keep track of the local manifest chunks so we can replace them when
    // they change.
    let mut chunks: HashMap<String, SignedUrlChunk> = HashMap::new();

    // Combine all watchers into a single stream
    stream::select_all(file_watchers).map(move |result| {
        result.map(|(path, chunk)| {
            tracing::debug!(
                "hot reloading persisted query manifest file at path: {}",
                path.to_string_lossy()
            );
            chunks.insert(path.to_string_lossy().to_string(), chunk);

            let mut manifest = PersistedQueryManifest::default();
            for chunk in chunks.values() {
                manifest.add_chunk(chunk);
            }

            manifest
        })
    })
}

```

--------------------------------------------------------------------------------
/.github/workflows/sync-develop.yml:
--------------------------------------------------------------------------------

```yaml
name: Sync main → develop

on:
  pull_request:
    types: [closed]
    branches: [main]
  workflow_dispatch:
    inputs:
      head_branch:
        description: "Branch to merge FROM (default: main)."
        required: false
        default: "main"
      base_branch:
        description: "Branch to merge INTO (default: develop)."
        required: false
        default: "develop"
      source_pr_number:
        description: "If testing, the PR number to comment on (optional)."
        required: false
      test_mode:
        description: "Bypass PR/push guards for manual testing"
        required: false
        default: "true"

permissions:
  contents: write
  pull-requests: write
  issues: write

concurrency:
  group: sync-main-into-develop
  cancel-in-progress: false

jobs:
  open-sync-pr:
    if: |
      github.actor != 'github-actions[bot]' && (
        (
            github.event_name == 'pull_request' && github.event.pull_request.merged == true
        ) || (
            github.event_name == 'workflow_dispatch' && (inputs.test_mode == 'true')
        )
      )
    runs-on: ubuntu-latest

    env:
      # Use inputs for dispatch (testing), defaults for normal triggers
      HEAD_BRANCH: ${{ (github.event_name == 'workflow_dispatch' && inputs.head_branch) || 'main' }}
      BASE_BRANCH: ${{ (github.event_name == 'workflow_dispatch' && inputs.base_branch) || 'develop' }}
      SOURCE_PR: ${{ (github.event_name == 'pull_request' && github.event.pull_request.number) || inputs.source_pr_number || '' }}
      GH_TOKEN: ${{ secrets.GH_PAT }}

    steps:
      - uses: actions/checkout@v5
        with:
          fetch-depth: 0
          token: ${{ secrets.GH_PAT }}

      - name: Configure git author
        run: |
          git config --local user.name "Apollo Bot"
          git config --local user.email "[email protected]"

      # Generate branch name from PR# when available, otherwise use first 7 commit SHA characters
      - name: Compute branch/name metadata
        id: meta
        run: |
          pr=${{ github.event.pull_request.number }}
          echo "sync_branch=sync/main-into-develop-pr-${pr}" >> $GITHUB_OUTPUT
          echo "sync_title=Sync main → develop (PR #${pr})" >> $GITHUB_OUTPUT
          echo "sync_body=Auto-opened after merging \`${{ github.event.pull_request.head.ref }}\` into \`main\`. Source PR: #${pr}." >> $GITHUB_OUTPUT
          echo "conflict_branch=conflict/main-into-develop-pr-${pr}" >> $GITHUB_OUTPUT
          echo "conflict_title=Sync main → develop (resolve conflicts)" >> $GITHUB_OUTPUT
          echo "conflict_body=Opened from a copy of \`main\` so conflicts can be resolved without pushing to a protected branch." >> $GITHUB_OUTPUT

      # Short-lived sync branch from develop and merge main into it (do NOT rebase)
      # use +e to stop errors from short-circuiting the script
      - name: Prepare sync branch
        id: prep
        run: |
          set -e
          git fetch origin "${BASE_BRANCH}" "${HEAD_BRANCH}"
          git switch -c "${{ steps.meta.outputs.sync_branch }}" "origin/${BASE_BRANCH}"
          set +e
          git merge --no-ff "origin/${HEAD_BRANCH}"
          rc=$?
          set -e
          git add -A || true
          git commit -m "WIP: merge ${HEAD_BRANCH} into ${BASE_BRANCH} via ${{ steps.meta.outputs.branch }}" || true
          git push origin HEAD
          
          right=$(git rev-list --count --right-only "origin/${BASE_BRANCH}...HEAD")
          
          echo "merge_status=$rc" >> "$GITHUB_OUTPUT"
          echo "sync_right=$right" >> "$GITHUB_OUTPUT"
          echo "Merge exit=$rc, sync branch ahead-by=$right"

      # If no merge conflicts and there are changes, open the PR targeting develop
      - name: Open clean PR to develop
        id: sync_pr
        if: ${{ steps.prep.outputs.merge_status == '0' && steps.prep.outputs.sync_right != '0' }}
        run: |
          # Avoid duplicate PRs
          existing=$(gh pr list --base "${BASE_BRANCH}" --head "${{ steps.meta.outputs.sync_branch }}" --state open --json number --jq '.[0].number' || true)
          if [ -n "$existing" ] && [ "$existing" != "null" ]; then
            echo "pr_number=$existing" >> "$GITHUB_OUTPUT"
            url=$(gh pr view "$existing" --json url --jq .url)
            echo "pr_url=$url" >> "$GITHUB_OUTPUT"
            exit 0
          fi

          gh pr create \
            --base "${BASE_BRANCH}" \
            --head "${{ steps.meta.outputs.sync_branch }}" \
            --title "${{ steps.meta.outputs.sync_title }}" \
            --body  "${{ steps.meta.outputs.sync_body }} (created via gh CLI)" \
            --label back-merge \
            --label skip-changeset \
            --label automation

          # Fetch the newly created PR number, then its URL so that we display in a PR comment
          num=$(gh pr list --base "${BASE_BRANCH}" --head "${{ steps.meta.outputs.sync_branch }}" --state open --json number --jq '.[0].number')
          url=$(gh pr view "$num" --json url --jq .url)
          echo "pr_number=$num" >> "$GITHUB_OUTPUT"
          echo "pr_url=$url" >> "$GITHUB_OUTPUT"

      # If the merge hit conflicts, open a DIRECT PR: HEAD_BRANCH -> BASE_BRANCH so conflicts can be resolved prior to merge
      - name: Open conflict PR
        id: conflict_pr
        if: ${{ steps.prep.outputs.merge_status != '0' }}
        run: |
          set -e
          git fetch origin "${HEAD_BRANCH}" "${BASE_BRANCH}"

          git switch -c "${{ steps.meta.outputs.conflict_branch }}" "origin/${HEAD_BRANCH}"
          git push -u origin HEAD
          
          # Skip if no diff between conflict branch and base (should be unlikely)
          right=$(git rev-list --right-only --count "origin/${BASE_BRANCH}...origin/${{ steps.meta.outputs.conflict_branch }}")
          if [ "$right" -eq 0 ]; then
            echo "No diff between ${HEAD_BRANCH} and ${BASE_BRANCH}; nothing to open."
            exit 0
          fi
          
          # Reuse existing open PR if present
          existing=$(gh pr list --base "${BASE_BRANCH}" --head "${{ steps.meta.outputs.conflict_branch }}" --state open --json number --jq '.[0].number' || true)
          if [ -n "$existing" ] && [ "$existing" != "null" ]; then
            echo "pr_number=$existing" >> "$GITHUB_OUTPUT"
            url=$(gh pr view "$existing" --json url --jq .url)
            echo "pr_url=$url" >> "$GITHUB_OUTPUT"
            exit 0
          fi
          
          gh pr create \
            --base "${BASE_BRANCH}" \
            --head "${{ steps.meta.outputs.conflict_branch }}" \
            --title "${{ steps.meta.outputs.conflict_title }}" \
            --body  "${{ steps.meta.outputs.conflict_body }}" \
            --label back-merge \
            --label automation \
            --label skip-changeset \
            --label conflicts
          
          # Fetch the newly created conflict PR number, then its URL so that we display in a PR comment
          num=$(gh pr list --base "${BASE_BRANCH}" --head "${{ steps.meta.outputs.conflict_branch }}" --state open --json number --jq '.[0].number')
          url=$(gh pr view "$num" --json url --jq .url)
          echo "pr_number=$num" >> "$GITHUB_OUTPUT"
          echo "pr_url=$url" >> "$GITHUB_OUTPUT"

      # Comment back on the ORIGINAL merged PR with a link to the sync PR
      - name: Comment on source PR with sync PR link
        if: ${{ env.SOURCE_PR != '' && (steps.sync_pr.outputs.pr_number != '' || steps.conflict_pr.outputs.pr_number != '') }}
        uses: actions/github-script@v7
        with:
          script: |
            const owner = context.repo.owner;
            const repo = context.repo.repo;
            const issue_number = Number(process.env.SOURCE_PR);

            const hadConflicts = '${{ steps.prep.outputs.merge_status }}' !== '0';
            const syncUrl = '${{ steps.sync_pr.outputs.pr_url || steps.conflict_pr.outputs.pr_url }}';
            const head = process.env.HEAD_BRANCH;
            const base = process.env.BASE_BRANCH;

            const status = hadConflicts ? 'conflicts ❗' : 'clean ✅';
            const note = hadConflicts
              ? 'Opened from a copy of main so conflicts can be resolved safely.'
              : 'Opened from a sync branch created off develop.';

            const body = [
            `Opened sync PR **${head} → ${base}**: ${syncUrl}`,
            ``,
            `Merge status: **${status}**`,
            note
            ].join('\n');
            
            await github.rest.issues.createComment({ owner, repo, issue_number, body });
```

--------------------------------------------------------------------------------
/docs/source/telemetry.mdx:
--------------------------------------------------------------------------------

```markdown
---
title: OpenTelemetry Integration
---

AI agents create unpredictable usage patterns and complex request flows that are hard to monitor with traditional methods. The Apollo MCP Server's OpenTelemetry integration provides the visibility you need to run a reliable service for AI agents.

## What you can monitor

- **Agent behavior**: Which tools and operations are used most frequently
- **Performance**: Response times and bottlenecks across tool executions and GraphQL operations  
- **Reliability**: Error rates, failed operations, and request success patterns
- **Distributed request flows**: Complete traces from agent request through your Apollo Router and subgraphs, with automatic trace context propagation

## How it works

The server exports metrics, traces, and events using the OpenTelemetry Protocol (OTLP), ensuring compatibility with your existing observability stack and seamless integration with other instrumented Apollo services.

## Usage guide

### Quick start: Local development

The fastest way to see Apollo MCP Server telemetry in action is with a local setup that requires only Docker.

#### 5-minute setup
1. Start local observability stack: 
<code>docker run -p 3000:3000 -p 4317:4317 -p 4318:4318 --rm -ti grafana/otel-lgtm</code>
1. Add telemetry config to your `config.yaml`:
   ```yaml
   telemetry:
     exporters:
       metrics:
         otlp:
           endpoint: "http://localhost:4318/v1/metrics"
           protocol: "http/protobuf"
       tracing:
         otlp:
           endpoint: "http://localhost:4318/v1/traces" 
           protocol: "http/protobuf"
   ```
1. Restart your MCP server with the updated config
1. Open Grafana at `http://localhost:3000` and explore your telemetry data. Default credentials are username `admin` with password `admin`.

### Production deployment

For production environments, configure your MCP server to send telemetry to any OTLP-compatible backend. The Apollo MCP Server uses standard OpenTelemetry protocols, ensuring compatibility with all major observability platforms.

#### Configuration example

```yaml
telemetry:
  service_name: "mcp-server-prod"      # Custom service name
  exporters:
    metrics:
      otlp:
        endpoint: "https://your-metrics-endpoint"
        protocol: "http/protobuf"       # or "grpc"
    tracing:
      otlp:
        endpoint: "https://your-traces-endpoint" 
        protocol: "http/protobuf"
```

#### Observability platform integration

The MCP server works with any OTLP-compatible backend. Consult your provider's documentation for specific endpoint URLs and authentication:

- [Datadog OTLP Integration](https://docs.datadoghq.com/opentelemetry/setup/otlp_ingest_in_the_agent/) - Native OTLP support
- [New Relic OpenTelemetry](https://docs.newrelic.com/docs/opentelemetry/best-practices/opentelemetry-otlp/) - Direct OTLP ingestion
- [AWS Observability](https://aws-otel.github.io/docs/introduction) - Via AWS Distro for OpenTelemetry
- [Grafana Cloud](https://grafana.com/docs/grafana-cloud/send-data/otlp/) - Hosted Grafana with OTLP
- [Honeycomb](https://docs.honeycomb.io/getting-data-in/opentelemetry/) - OpenTelemetry-native platform
- [Jaeger](https://www.jaegertracing.io/docs/1.50/deployment/) - Self-hosted tracing
- [OpenTelemetry Collector](https://opentelemetry.io/docs/collector/deployment/) - Self-hosted with flexible routing

#### Production configuration best practices

##### Environment and security
```yaml
# Set via environment variable
export ENVIRONMENT=production

telemetry:
  service_name: "apollo-mcp-server"
  version: "1.0.0"                     # Version for correlation
  exporters:
    metrics:
      otlp:
        endpoint: "https://secure-endpoint"  # Always use HTTPS
        protocol: "http/protobuf"           # Generally more reliable than gRPC
```

##### Performance considerations
- **Protocol choice**: `http/protobuf` is often more reliable through firewalls and load balancers than `grpc`
- **Batch export**: OpenTelemetry automatically batches telemetry data for efficiency
- **Network timeouts**: Default timeouts are usually appropriate, but monitor for network issues

##### Resource correlation
- The `ENVIRONMENT` variable automatically tags all telemetry with `deployment.environment.name`
- Use consistent `service_name` across all your Apollo infrastructure (Router, subgraphs, MCP server)
- Set `version` to track releases and correlate issues with deployments

#### Troubleshooting

##### Common issues
- **Connection refused**: Verify endpoint URL and network connectivity
- **Authentication errors**: Check if your provider requires API keys or special headers
- **Missing data**: Confirm your observability platform supports OTLP and is configured to receive data
- **High memory usage**: Monitor telemetry export frequency and consider sampling for high-volume environments

##### Verification
```bash
# Check if telemetry is being exported (look for connection attempts)
curl -v https://your-endpoint/v1/metrics

# Monitor server logs for OpenTelemetry export errors
./apollo-mcp-server --config config.yaml 2>&1 | grep -i "otel\|telemetry"
```

## Configuration Reference

The OpenTelemetry integration is configured via the `telemetry` section of the [configuration reference page](/apollo-mcp-server/config-file#telemetry).

## Emitted Metrics

The server emits the following metrics, which are invaluable for monitoring and alerting. All duration metrics are in milliseconds.

| Metric Name | Type | Description | Attributes |
|---|---|---|---|
| `apollo.mcp.initialize.count` | Counter | Incremented for each `initialize` request. | (none) |
| `apollo.mcp.list_tools.count` | Counter | Incremented for each `list_tools` request. | (none) |
| `apollo.mcp.get_info.count` | Counter | Incremented for each `get_info` request. | (none) |
| `apollo.mcp.tool.count` | Counter | Incremented for each tool call. | `tool_name`, `success` (bool) |
| `apollo.mcp.tool.duration` | Histogram | Measures the execution duration of each tool call. | `tool_name`, `success` (bool) |
| `apollo.mcp.operation.count`| Counter | Incremented for each downstream GraphQL operation executed by a tool. | `operation.id`, `operation.type` ("persisted_query" or "operation"), `success` (bool) |
| `apollo.mcp.operation.duration`| Histogram | Measures the round-trip duration of each downstream GraphQL operation. | `operation.id`, `operation.type`, `success` (bool) |

In addition to these metrics, the server also emits standard [HTTP server metrics](https://opentelemetry.io/docs/specs/semconv/http/http-metrics/) (e.g., `http.server.duration`, `http.server.active_requests`) courtesy of the `axum-otel-metrics` library.


## Emitted Traces

Spans are generated for the following actions:

-   **Incoming HTTP Requests**: A root span is created for every HTTP request to the MCP server.
-   **MCP Handler Methods**: Nested spans are created for each of the main MCP protocol methods (`initialize`, `call_tool`, `list_tools`).
-   **Tool Execution**: `call_tool` spans contain nested spans for the specific tool being executed (e.g., `introspect`, `search`, or a custom GraphQL operation).
-   **Downstream GraphQL Calls**: The `execute` tool and custom operation tools create child spans for their outgoing `reqwest` HTTP calls, capturing the duration of the downstream request. The `traceparent` and `tracestate` headers are propagated automatically, enabling distributed traces.

### Cardinality Control

High-cardinality metrics can occur in MCP Servers with large number of tools or when clients are allowed to generate freeform operations.
To prevent performance issues and reduce costs, the Apollo MCP Server provides two mechanisms to control metric cardinality, trace sampling and attribute filtering.

#### Trace Sampling

Configure the Apollo MCP Server to sample traces sent to your OpenTelemetry Collector using the `sampler` field in the `telemetry.tracing` configuration:

- **always_on** - Send every trace
- **always_off** - Disable trace collection entirely
- **0.0-1.0** - Send a specified percentage of traces

#### Attribute Filtering

The Apollo MCP Server configuration also allows for omitting attributes such as `tool_name` or `operation_id` that can often lead to high cardinality metrics in systems that treat each collected attribute value as a new metric.
Both traces and metrics have an `omitted_attributes` option that takes a list of strings. Any attribute name in the list will be filtered out and not sent to the collector.
For detailed configuration options, see the [telemetry configuration reference](/apollo-mcp-server/config-file#telemetry).

```

--------------------------------------------------------------------------------
/crates/apollo-mcp-server/src/introspection/minify.rs:
--------------------------------------------------------------------------------

```rust
use apollo_compiler::schema::{ExtendedType, Type};
use regex::Regex;
use std::{collections::HashMap, sync::OnceLock};

pub trait MinifyExt {
    /// Serialize in minified form
    fn minify(&self) -> String;
}

impl MinifyExt for ExtendedType {
    fn minify(&self) -> String {
        match self {
            ExtendedType::Scalar(scalar_type) => minify_scalar(scalar_type),
            ExtendedType::Object(object_type) => minify_object(object_type),
            ExtendedType::Interface(interface_type) => minify_interface(interface_type),
            ExtendedType::Union(union_type) => minify_union(union_type),
            ExtendedType::Enum(enum_type) => minify_enum(enum_type),
            ExtendedType::InputObject(input_object_type) => minify_input_object(input_object_type),
        }
    }
}

fn minify_scalar(scalar_type: &apollo_compiler::schema::ScalarType) -> String {
    shorten_scalar_names(scalar_type.name.as_str()).to_string()
}

fn minify_object(object_type: &apollo_compiler::schema::ObjectType) -> String {
    let fields = minify_fields(&object_type.fields);
    let type_name = format_type_name_with_description(&object_type.name, &object_type.description);
    let interfaces = format_interfaces(&object_type.implements_interfaces);

    if interfaces.is_empty() {
        format!("T:{type_name}:{fields}")
    } else {
        format!("T:{type_name}<{interfaces}>:{fields}")
    }
}

fn minify_interface(interface_type: &apollo_compiler::schema::InterfaceType) -> String {
    let fields = minify_fields(&interface_type.fields);
    let type_name =
        format_type_name_with_description(&interface_type.name, &interface_type.description);
    format!("F:{type_name}:{fields}")
}

fn minify_union(union_type: &apollo_compiler::schema::UnionType) -> String {
    let member_types = union_type
        .members
        .iter()
        .map(|member| member.as_str())
        .collect::<Vec<&str>>()
        .join(",");
    let type_name = format_type_name_with_description(&union_type.name, &union_type.description);
    format!("U:{type_name}:{member_types}")
}

fn minify_enum(enum_type: &apollo_compiler::schema::EnumType) -> String {
    let values = enum_type
        .values
        .keys()
        .map(|value| value.as_str())
        .collect::<Vec<&str>>()
        .join(",");
    let type_name = format_type_name_with_description(&enum_type.name, &enum_type.description);
    format!("E:{type_name}:{values}")
}

fn minify_input_object(input_object_type: &apollo_compiler::schema::InputObjectType) -> String {
    let fields = minify_input_fields(&input_object_type.fields);
    let type_name =
        format_type_name_with_description(&input_object_type.name, &input_object_type.description);
    format!("I:{type_name}:{fields}")
}

// We should only minify directives that assist the LLM in understanding the schema. This included @deprecated
fn minify_directives(directives: &apollo_compiler::ast::DirectiveList) -> String {
    let mut result = String::new();

    static DIRECTIVES_TO_MINIFY: OnceLock<HashMap<&str, &str>> = OnceLock::new();
    let directives_to_minify =
        DIRECTIVES_TO_MINIFY.get_or_init(|| HashMap::from([("deprecated", "D")]));

    for directive in directives.iter() {
        if let Some(minified_name) = directives_to_minify.get(directive.name.as_str()) {
            // Since we're only handling @deprecated right now we can just add the reason and minify it.
            // We should handle this more generically in the future.
            if !directive.arguments.is_empty()
                && let Some(reason) = directive
                    .arguments
                    .iter()
                    .find(|a| a.name == "reason")
                    .and_then(|a| a.value.as_str())
            {
                result.push_str(&format!(
                    "@{}(\"{}\")",
                    minified_name,
                    normalize_description(reason)
                ));
            } else {
                result.push_str(&format!("@{}", minified_name));
            }
        }
    }
    result
}

fn minify_fields(
    fields: &apollo_compiler::collections::IndexMap<
        apollo_compiler::Name,
        apollo_compiler::schema::Component<apollo_compiler::ast::FieldDefinition>,
    >,
) -> String {
    let mut result = String::new();

    for (field_name, field) in fields.iter() {
        // Add description if present
        if let Some(desc) = field.description.as_ref() {
            result.push_str(&format!("\"{}\"", normalize_description(desc)));
        }

        // Add field name
        result.push_str(field_name.as_str());

        // Add arguments if present
        if !field.arguments.is_empty() {
            result.push('(');
            result.push_str(&minify_arguments(&field.arguments));
            result.push(')');
        }

        // Add field type
        result.push(':');
        result.push_str(&type_name(&field.ty));
        result.push_str(&minify_directives(&field.directives));

        result.push(',');
    }

    // Remove trailing comma
    if !result.is_empty() {
        result.pop();
    }

    result
}

fn minify_input_fields(
    fields: &apollo_compiler::collections::IndexMap<
        apollo_compiler::Name,
        apollo_compiler::schema::Component<apollo_compiler::ast::InputValueDefinition>,
    >,
) -> String {
    let mut result = String::new();

    for (field_name, field) in fields.iter() {
        // Add description if present
        if let Some(desc) = field.description.as_ref() {
            result.push_str(&format!("\"{}\"", normalize_description(desc)));
        }

        // Add field name and type
        result.push_str(field_name.as_str());
        result.push(':');
        result.push_str(&type_name(&field.ty));
        result.push_str(&minify_directives(&field.directives));
        result.push(',');
    }

    // Remove trailing comma
    if !result.is_empty() {
        result.pop();
    }

    result
}

fn minify_arguments(
    arguments: &[apollo_compiler::Node<apollo_compiler::ast::InputValueDefinition>],
) -> String {
    arguments
        .iter()
        .map(|arg| {
            if let Some(desc) = arg.description.as_ref() {
                format!(
                    "\"{}\"{}:{}{}",
                    normalize_description(desc),
                    arg.name.as_str(),
                    type_name(&arg.ty),
                    minify_directives(&arg.directives)
                )
            } else {
                format!(
                    "{}:{}{}",
                    arg.name.as_str(),
                    type_name(&arg.ty),
                    minify_directives(&arg.directives)
                )
            }
        })
        .collect::<Vec<String>>()
        .join(",")
}

fn format_type_name_with_description(
    name: &apollo_compiler::Name,
    description: &Option<apollo_compiler::Node<str>>,
) -> String {
    if let Some(desc) = description.as_ref() {
        format!("\"{}\"{}", normalize_description(desc), name)
    } else {
        name.to_string()
    }
}

fn format_interfaces(
    interfaces: &apollo_compiler::collections::IndexSet<apollo_compiler::schema::ComponentName>,
) -> String {
    interfaces
        .iter()
        .map(|interface| interface.as_str())
        .collect::<Vec<&str>>()
        .join(",")
}

fn type_name(ty: &Type) -> String {
    let name = shorten_scalar_names(ty.inner_named_type().as_str());
    if ty.is_list() {
        format!("[{name}]")
    } else if ty.is_non_null() {
        format!("{name}!")
    } else {
        name.to_string()
    }
}

fn shorten_scalar_names(name: &str) -> &str {
    match name {
        "String" => "s",
        "Int" => "i",
        "Float" => "f",
        "Boolean" => "b",
        "ID" => "d",
        _ => name,
    }
}

/// Normalize description formatting
#[allow(clippy::expect_used)]
fn normalize_description(desc: &str) -> String {
    // LLMs can typically process descriptions just fine without whitespace
    static WHITESPACE_PATTERN: OnceLock<Regex> = OnceLock::new();
    let re = WHITESPACE_PATTERN.get_or_init(|| Regex::new(r"\s+").expect("regex pattern compiles"));
    re.replace_all(desc, "").to_string()
}

#[cfg(test)]
mod tests {
    use super::*;

    const TEST_SCHEMA: &str = include_str!("tools/testdata/schema.graphql");

    #[test]
    fn test_minify_schema() {
        let schema = apollo_compiler::schema::Schema::parse(TEST_SCHEMA, "schema.graphql")
            .expect("Failed to parse schema")
            .validate()
            .expect("Failed to validate schema");

        let minified = schema
            .types
            .iter()
            .map(|(_, type_)| format!("{}: {}", type_.name().as_str(), type_.minify()))
            .collect::<Vec<String>>()
            .join("\n");

        insta::assert_snapshot!(minified);
    }
}

```

--------------------------------------------------------------------------------
/crates/apollo-mcp-server/src/operations/schema_walker/name.rs:
--------------------------------------------------------------------------------

```rust
use apollo_compiler::{Name as GraphQLName, Node, Schema as GraphQLSchema, schema::ExtendedType};
use schemars::{Schema as JSONSchema, json_schema};
use serde_json::{Map, Value};
use tracing::warn;

use crate::custom_scalar_map::CustomScalarMap;

use super::{r#type::Type, with_desc};

/// A GraphQL Named Walker
pub(super) struct Name<'a> {
    /// The definition cache which contains full schemas for nested types
    pub(super) cache: &'a mut Map<String, Value>,

    /// Custom scalar map for supplementing information from the GraphQL schema
    pub(super) custom_scalar_map: Option<&'a CustomScalarMap>,

    /// The optional description of the named type, from comments in the schema
    pub(super) description: &'a Option<String>,

    /// The actual named type to translate into a JSON schema
    pub(super) name: &'a GraphQLName,

    /// The original GraphQL schema with all type information
    pub(super) schema: &'a GraphQLSchema,
}

impl From<Name<'_>> for JSONSchema {
    fn from(
        Name {
            cache,
            custom_scalar_map,
            description,
            name,
            schema,
        }: Name,
    ) -> Self {
        let unknown_type = json_schema!({});

        let result = match name.as_str() {
            // Basic types map nicely
            "String" | "ID" => json_schema!({"type": "string"}),
            "Int" | "Float" => json_schema!({"type": "number"}),
            "Boolean" => json_schema!({"type": "boolean"}),

            // If we've already cached it, then return the reference immediately
            cached if cache.contains_key(cached) => {
                JSONSchema::new_ref(format!("#/definitions/{cached}"))
            }

            // Otherwise generate the dependent type
            other => match schema.types.get(other) {
                // Enums need to collect descriptions per field while also enumerating
                // all possible values
                Some(ExtendedType::Enum(r#enum)) => {
                    // Collect all fields such that each field is shown as
                    // <Description>: <Enum value>
                    let values = r#enum
                        .values
                        .iter()
                        .map(|(name, value)| {
                            format!(
                                "{}: {}",
                                name,
                                value
                                    .description
                                    .as_ref()
                                    .map(|d| d.to_string())
                                    .unwrap_or_default()
                            )
                        })
                        .collect::<Vec<_>>()
                        .join("\n");

                    // Consolidate all of the values such that we get a high-level
                    // description (from the schema) followed by its values
                    let description = format!(
                        "{}\n\nValues:\n{}",
                        r#enum
                            .description
                            .as_ref()
                            .map(Node::as_str)
                            .unwrap_or_default(),
                        values
                    );

                    cache.insert(
                        other.to_string(),
                        with_desc(json_schema!({
                                "type": "string",
                                "enum": r#enum.values.iter().map(|(_, value)| serde_json::json!(value.value)).collect::<Vec<_>>(),
                            }),
                            &Some(description),
                        ).into(),
                    );
                    JSONSchema::new_ref(format!("#/definitions/{other}"))
                }

                // Input types need to be traversed over their fields to ensure that they copy over
                // nested structure.
                Some(ExtendedType::InputObject(input)) => {
                    // Insert temporary value into map so any recursive references will not try to also create it.
                    cache.insert(other.to_string(), Default::default());

                    let mut input_schema = with_desc(
                        json_schema!({"type": "object", "properties": {}}),
                        &input.description.as_ref().map(Node::to_string),
                    );
                    for (name, field) in input.fields.iter() {
                        let field_description = field.description.as_ref().map(|n| n.to_string());
                        input_schema
                            .ensure_object()
                            .entry("properties")
                            .or_insert(Value::Object(Default::default()))
                            .as_object_mut()
                            .get_or_insert(&mut Map::default())
                            .insert(
                                name.to_string(),
                                JSONSchema::from(Type {
                                    cache,
                                    custom_scalar_map,
                                    description: &field_description,
                                    schema,
                                    r#type: &field.ty,
                                })
                                .into(),
                            );

                        // Mark any non-nullable fields as being required
                        if field.is_required() {
                            input_schema
                                .ensure_object()
                                .entry("required")
                                .or_insert(Value::Array(Default::default()))
                                .as_array_mut()
                                .get_or_insert(&mut Vec::default())
                                .push(name.to_string().into());
                        }
                    }

                    cache.insert(other.to_string(), input_schema.into());
                    JSONSchema::new_ref(format!("#/definitions/{other}"))
                }

                // Custom scalars need to be opaquely copied over as types with no further processing
                Some(ExtendedType::Scalar(scalar)) => {
                    // The default scalar description should always be from the scalar in the schema itself
                    let default_scalar_description =
                        scalar.description.as_ref().map(Node::to_string);

                    if let Some(custom_scalar_map) = custom_scalar_map {
                        if let Some(custom_scalar_schema_object) = custom_scalar_map.get(other) {
                            // The custom scalar schema might have an override for the description, so we extract it here.
                            let mut scalar_schema = custom_scalar_schema_object.clone();
                            let description = scalar_schema
                                .ensure_object()
                                .get("description")
                                .and_then(Value::as_str)
                                .map(str::to_string);

                            cache.insert(
                                other.to_string(),
                                with_desc(
                                    custom_scalar_schema_object.clone(),
                                    // The description could have been overridden by the custom schema, so we prioritize it here
                                    &description.or(default_scalar_description),
                                )
                                .into(),
                            );
                        } else {
                            warn!(name=?other, "custom scalar missing from custom_scalar_map");
                            cache.insert(
                                other.to_string(),
                                with_desc(JSONSchema::default(), &default_scalar_description)
                                    .into(),
                            );
                        }
                    } else {
                        warn!(name=?other, "custom scalars aren't currently supported without a custom_scalar_map");
                        cache.insert(
                            other.to_string(),
                            with_desc(JSONSchema::default(), &default_scalar_description).into(),
                        );
                    }

                    JSONSchema::new_ref(format!("#/definitions/{other}"))
                }

                // Anything else is unhandled
                _ => {
                    warn!(name=?other, "Type not found in schema");
                    unknown_type
                }
            },
        };

        with_desc(result, description)
    }
}

```

--------------------------------------------------------------------------------
/crates/apollo-mcp-server/src/introspection/tools/execute.rs:
--------------------------------------------------------------------------------

```rust
use crate::errors::McpError;
use crate::operations::{MutationMode, operation_defs, operation_name};
use crate::{
    graphql::{self, OperationDetails},
    schema_from_type,
};
use reqwest::header::{HeaderMap, HeaderValue};
use rmcp::model::{ErrorCode, Tool};
use rmcp::schemars::JsonSchema;
use rmcp::serde_json::Value;
use rmcp::{schemars, serde_json};
use serde::Deserialize;

/// The name of the tool to execute an ad hoc GraphQL operation
pub const EXECUTE_TOOL_NAME: &str = "execute";

#[derive(Clone)]
pub struct Execute {
    pub tool: Tool,
    mutation_mode: MutationMode,
}

/// Input for the execute tool.
#[derive(JsonSchema, Deserialize)]
pub struct Input {
    /// The GraphQL operation
    query: String,

    /// The variable values represented as JSON
    #[schemars(schema_with = "String::json_schema", default)]
    variables: Option<Value>,
}

impl Execute {
    pub fn new(mutation_mode: MutationMode) -> Self {
        Self {
            mutation_mode,
            tool: Tool::new(
                EXECUTE_TOOL_NAME,
                "Execute a GraphQL operation. Use the `introspect` tool to get information about the GraphQL schema. Always use the schema to create operations - do not try arbitrary operations. If available, first use the `validate` tool to validate operations. DO NOT try to execute introspection queries.",
                schema_from_type!(Input),
            ),
        }
    }
}

impl graphql::Executable for Execute {
    fn persisted_query_id(&self) -> Option<String> {
        None
    }

    fn operation(&self, input: Value) -> Result<OperationDetails, McpError> {
        let input = serde_json::from_value::<Input>(input).map_err(|_| {
            McpError::new(ErrorCode::INVALID_PARAMS, "Invalid input".to_string(), None)
        })?;

        let (_, operation_def, source_path) =
            operation_defs(&input.query, self.mutation_mode == MutationMode::All, None)
                .map_err(|e| McpError::new(ErrorCode::INVALID_PARAMS, e.to_string(), None))?
                .ok_or_else(|| {
                    McpError::new(
                        ErrorCode::INVALID_PARAMS,
                        "Invalid operation type".to_string(),
                        None,
                    )
                })?;

        Ok(OperationDetails {
            query: input.query,
            operation_name: operation_name(&operation_def, source_path).ok(),
        })
    }

    fn variables(&self, input: Value) -> Result<Value, McpError> {
        let input = serde_json::from_value::<Input>(input).map_err(|_| {
            McpError::new(ErrorCode::INVALID_PARAMS, "Invalid input".to_string(), None)
        })?;
        match input.variables {
            None => Ok(Value::Null),
            Some(Value::Null) => Ok(Value::Null),
            Some(Value::String(s)) => serde_json::from_str(&s).map_err(|_| {
                McpError::new(ErrorCode::INVALID_PARAMS, "Invalid input".to_string(), None)
            }),
            Some(obj) if obj.is_object() => Ok(obj),
            _ => Err(McpError::new(
                ErrorCode::INVALID_PARAMS,
                "Invalid input".to_string(),
                None,
            )),
        }
    }

    fn headers(&self, default_headers: &HeaderMap<HeaderValue>) -> HeaderMap<HeaderValue> {
        default_headers.clone()
    }
}

#[cfg(test)]
mod tests {
    use crate::errors::McpError;
    use crate::graphql::{Executable, OperationDetails};
    use crate::introspection::tools::execute::Execute;
    use crate::operations::MutationMode;
    use rmcp::model::ErrorCode;
    use rmcp::serde_json::{Value, json};

    #[test]
    fn execute_query_with_variables_as_string() {
        let execute = Execute::new(MutationMode::None);

        let query = "query GetUser($id: ID!) { user(id: $id) { id name } }";
        let variables = json!({ "id": "123" });

        let input = json!({
            "query": query,
            "variables": variables.to_string()
        });

        assert_eq!(
            Executable::operation(&execute, input.clone()),
            Ok(OperationDetails {
                query: query.to_string(),
                operation_name: Some("GetUser".to_string()),
            })
        );
        assert_eq!(Executable::variables(&execute, input), Ok(variables));
    }

    #[test]
    fn execute_query_with_variables_as_json() {
        let execute = Execute::new(MutationMode::None);

        let query = "query GetUser($id: ID!) { user(id: $id) { id name } }";
        let variables = json!({ "id": "123" });

        let input = json!({
            "query": query,
            "variables": variables
        });

        assert_eq!(
            Executable::operation(&execute, input.clone()),
            Ok(OperationDetails {
                query: query.to_string(),
                operation_name: Some("GetUser".to_string()),
            })
        );
        assert_eq!(Executable::variables(&execute, input), Ok(variables));
    }

    #[test]
    fn execute_query_without_variables() {
        let execute = Execute::new(MutationMode::None);

        let query = "query GetUser($id: ID!) { user(id: $id) { id name } }";

        let input = json!({
            "query": query,
        });

        assert_eq!(
            Executable::operation(&execute, input.clone()),
            Ok(OperationDetails {
                query: query.to_string(),
                operation_name: Some("GetUser".to_string()),
            })
        );
        assert_eq!(Executable::variables(&execute, input), Ok(Value::Null));
    }

    #[test]
    fn execute_query_anonymous_operation() {
        let execute = Execute::new(MutationMode::None);

        let query = "{ user(id: \"123\") { id name } }";
        let input = json!({
            "query": query,
        });

        assert_eq!(
            Executable::operation(&execute, input.clone()),
            Ok(OperationDetails {
                query: query.to_string(),
                operation_name: None,
            })
        );
    }

    #[test]
    fn execute_query_err_with_mutation_when_mutation_mode_is_none() {
        let execute = Execute::new(MutationMode::None);

        let query = "mutation MutationName { id }".to_string();
        let input = json!({
            "query": query,
        });

        assert_eq!(
            Executable::operation(&execute, input),
            Err(McpError::new(
                ErrorCode::INVALID_PARAMS,
                "Invalid operation type".to_string(),
                None
            ))
        );
    }

    #[test]
    fn execute_query_ok_with_mutation_when_mutation_mode_is_all() {
        let execute = Execute::new(MutationMode::All);

        let query = "mutation MutationName { id }".to_string();
        let input = json!({
            "query": query,
        });

        assert_eq!(
            Executable::operation(&execute, input),
            Ok(OperationDetails {
                query: query.to_string(),
                operation_name: Some("MutationName".to_string()),
            })
        );
    }

    #[test]
    fn execute_query_err_with_subscription_regardless_of_mutation_mode() {
        for mutation_mode in [
            MutationMode::None,
            MutationMode::Explicit,
            MutationMode::All,
        ] {
            let execute = Execute::new(mutation_mode);

            let input = json!({
                "query": "subscription SubscriptionName { id }",
            });

            assert_eq!(
                Executable::operation(&execute, input),
                Err(McpError::new(
                    ErrorCode::INVALID_PARAMS,
                    "Invalid operation type".to_string(),
                    None
                ))
            );
        }
    }

    #[test]
    fn execute_query_invalid_input() {
        let execute = Execute::new(MutationMode::None);

        let input = json!({
            "nonsense": "whatever",
        });

        assert_eq!(
            Executable::operation(&execute, input.clone()),
            Err(McpError::new(
                ErrorCode::INVALID_PARAMS,
                "Invalid input".to_string(),
                None
            ))
        );
        assert_eq!(
            Executable::variables(&execute, input),
            Err(McpError::new(
                ErrorCode::INVALID_PARAMS,
                "Invalid input".to_string(),
                None
            ))
        );
    }

    #[test]
    fn execute_query_invalid_variables() {
        let execute = Execute::new(MutationMode::None);

        let input = json!({
            "query": "query GetUser($id: ID!) { user(id: $id) { id name } }",
            "variables": "garbage",
        });

        assert_eq!(
            Executable::variables(&execute, input),
            Err(McpError::new(
                ErrorCode::INVALID_PARAMS,
                "Invalid input".to_string(),
                None
            ))
        );
    }
}

```

--------------------------------------------------------------------------------
/e2e/mcp-server-tester/local-operations/api.graphql:
--------------------------------------------------------------------------------

```graphql
type Agency {
  id: ID!
  name: String
  abbrev: String
  type: String
  featured: Boolean
  country: [Country]
  description: String
  administrator: String
  foundingYear: Int
  spacecraft: String
  image: Image
  logo: Image
  socialLogo: Image
  totalLaunchCount: Int
  consecutiveSuccessfulLaunches: Int
  successfulLaunches: Int
  failedLaunches: Int
  pendingLaunches: Int
  consecutiveSuccessfulLandings: Int
  successfulLandings: Int
  failedLandings: Int
  attemptedLandings: Int
  successfulLandingsSpacecraft: Int
  failedLandingsSpacecraft: Int
  attemptedLandingsSpacecraft: Int
  successfulLandingsPayload: Int
  failedLandingsPayload: Int
  attemptedLandingsPayload: Int
  infoUrl: String
  wikiUrl: String
  socialMediaLinks: [SocialMediaLink]
}

type AgencyConnection {
  pageInfo: PageInfo
  results: [Agency]
}

type ApiThrottle {
  yourRequestLimit: Int
  limitFrequencySecs: Int
  currentUse: Int
  nextUseSecs: Int
  ident: String
}

type Astronaut {
  id: ID!
  name: String
  status: String
  agency: Agency
  image: Image
  type: String
  inSpace: Boolean
  timeInSpace: String
  evaTime: String
  age: Int
  dateOfBirth: String
  dateOfDeath: String
  nationality: Country
  bio: String
  wiki: String
  lastFlight: String
  firstFlight: String
  socialMediaLinks: [SocialMediaLink]
}

type AstronautConnection {
  pageInfo: PageInfo
  results: [Astronaut]
}

input AstronautFilters {
  search: String
  inSpace: Boolean
}

type CelestialBody {
  id: ID!
  name: String
  type: CelestialType
  diameter: Float
  mass: Float
  gravity: Float
  lengthOfDay: String
  atmosphere: Boolean
  image: Image
  description: String
  wikiUrl: String
}

type CelestialBodyConnection {
  pageInfo: PageInfo
  results: [CelestialBody]
}

type CelestialType {
  id: ID!
  name: String
}

type Country {
  id: ID!
  name: String
  alpha2Code: String
  alpha3Code: String
  nationalityName: String
  nationalityNameComposed: String
}

type DockingEvent {
  id: ID!
  docking: String
  departure: String
  dockingLocation: DockingLocation
  spaceStationTarget: SpaceStationTarget
  flightVehicleTarget: FlightVehicleTarget
  payloadFlightTarget: PayloadFlightTarget
  flightVehicleChaser: FlightVehicleChaser
  spaceStationChaser: SpaceStationChaser
  payloadFlightChaser: PayloadFlightChaser
}

type DockingEventConnection {
  pageInfo: PageInfo
  results: [DockingEvent]
}

type DockingLocation {
  id: ID!
  name: String
  spacestation: SpaceStation
  spacecraft: Spacecraft
  payload: Payload
}

type FlightVehicleChaser {
  id: ID!
  destination: String
  missionEnd: String
  spacecraft: Spacecraft
  launch: Launch
  landing: Landing
}

type FlightVehicleTarget {
  id: ID!
  destination: String
  missionEnd: String
  spacecraft: Spacecraft
}

type Image {
  id: ID!
  name: String
  url: String
  thumbnail: String
  credit: String
  singleUse: Boolean
  license: ImageLicense
}

type ImageLicense {
  name: String
  link: String
}

type InfoUrl {
  priority: Int
  source: String
  title: String
  description: String
  featureImage: String
  url: String
  type: String
  language: Language
}

type Landing {
  id: ID!
  type: LandingType
  attempt: Boolean
  success: Boolean
  description: String
  downrangeDistance: String
  landingLocation: LandingLocation
}

type LandingLocation {
  id: ID!
  name: String
  active: Boolean
  abbrev: String
  description: String
  location: Location
  longitude: String
  latitude: String
  image: Image
  landings: SuccessCount
  celestialBody: CelestialBody
}

type LandingType {
  id: ID!
  name: String
  abbrev: String
  description: String
}

type Language {
  id: ID!
  name: String
  code: String
}

type Launch {
  id: ID!
  name: String
  launchDesignator: String
  status: LaunchStatus
  lastUpdated: String
  net: String
  netPrecision: String
  window: LaunchWindow
  image: Image
  infographic: String
  probability: Float
  weatherConcerns: String
  failreason: String
  hashtag: String
  provider: Agency
  rocket: Rocket
  mission: Mission
  pad: Pad
  webcastLive: Boolean
  program: Program
  orbitalLaunchAttemps: Int
  locationLaunchAttemps: Int
  padLaunchAttemps: Int
  agencyLaunchAttemps: Int
  orbitalLaunchAttempsYear: Int
  locationLaunchAttempsYear: Int
  padLaunchAttempsYear: Int
  agencyLaunchAttempsYear: Int
}

type LaunchConnection {
  pageInfo: PageInfo
  results: [Launch]
}

type LaunchStatus {
  id: ID!
  name: String
  abbrev: String
  description: String
}

type LaunchWindow {
  start: String
  end: String
}

type Location {
  id: ID!
  name: String
  active: Boolean
  country: Country
  image: Image
  mapImage: String
  longitude: String
  latitude: String
  totalLaunchCount: Int
  totalLandingCount: Int
  description: String
  timezone: String
}

type Manufacturer {
  id: ID!
  name: String
  abbrev: String
  type: String
  featured: Boolean
  country: Country
  description: String
  administrator: String
  foundingYear: Int
  spacecraft: String
  image: Image
  logo: Image
  socialLogo: Image
}

type Mission {
  id: ID!
  name: String
  type: String
  description: String
  image: Image
  orbit: Orbit
  agencies: [Agency]
  infoUrls: [InfoUrl]
  vidUrls: [VideoUrl]
}

type MissionPatch {
  id: ID!
  name: String
  priority: Int
  imageUrl: String
  agency: Agency
}

type Orbit {
  id: ID!
  name: String
  abbrev: String
  celestialBody: CelestialBody
}

type Pad {
  id: ID!
  active: Boolean
  agencies: [Agency]
  name: String
  image: Image
  description: String
  infoUrl: String
  wikiUrl: String
  mapUrl: String
  latitude: Float
  longitude: Float
  country: Country
  mapImage: String
  launchTotalCount: Int
  orbitalLaunchAttemptCount: Int
  fastestTurnaround: String
  location: Location
}

type PageInfo {
  count: Int
  next: String
  previous: String
}

type Payload {
  id: ID!
  name: String
  type: String
  manufacturer: Manufacturer
  operator: Agency
  image: Image
  wikiLink: String
  infoLink: String
  program: Program
  cost: Float
  mass: Float
  description: String
}

type PayloadFlightChaser {
  id: ID!
  url: String
  destination: String
  amount: String
  payload: Payload
  launch: Launch
  landing: Landing
}

type PayloadFlightTarget {
  id: ID!
  destination: String
  amount: String
  payload: Payload
  launch: Launch
  landing: Landing
}

type Program {
  id: ID!
  name: String
  image: Image
  infoUrl: String
  wikiUrl: String
  description: String
  agencies: [Agency]
  startDate: String
  endDate: String
  missionPatches: [MissionPatch]
}

type Query {
  agency(id: ID!): Agency
  agencies(search: String, offset: Int = 0, limit: Int = 20): AgencyConnection
  apiThrottle: ApiThrottle
  astronaut(id: ID!): Astronaut
  astronauts(filters: AstronautFilters, offset: Int = 0, limit: Int = 20): AstronautConnection
  celestialBody(id: ID!): CelestialBody
  celestialBodies(search: String, offset: Int = 0, limit: Int = 20): CelestialBodyConnection
  dockingEvent(id: ID!): DockingEvent
  dockingEvents(search: String, offset: Int = 0, limit: Int = 20): DockingEventConnection
  launch(id: ID!): Launch
  launches(search: String, limit: Int = 5, offset: Int = 0): LaunchConnection
  previousLaunces(search: String, limit: Int = 5, offset: Int = 0): LaunchConnection
  upcomingLaunches(search: String, limit: Int = 5, offset: Int = 0): LaunchConnection
}

type Rocket {
  id: ID!
  configuration: RocketLaunchConfigurations
}

type RocketFamily {
  id: ID!
  name: String
}

type RocketLaunchConfigurations {
  id: ID!
  name: String
  fullName: String
  variant: String
  families: [RocketFamily]
}

type SocialMedia {
  id: ID!
  name: String
  url: String
  logo: Image
}

type SocialMediaLink {
  id: ID!
  url: String
  socialMedia: SocialMedia
}

type Spacecraft {
  id: ID!
  name: String
  type: String
  agency: Agency
  family: SpacecraftFamily
  inUse: Boolean
  serialNumber: String
  isPlaceholder: Boolean
  image: Image
  inSpace: Boolean
  timeInSpace: String
  timeDocked: String
  flightsCount: Int
  missionEndsCount: Int
  status: String
  description: String
  spacecraftConfig: SpacecraftConfig
  fastestTurnaround: String
}

type SpacecraftConfig {
  id: ID!
  name: String
  type: String
  agency: Agency
  family: SpacecraftFamily
  inUse: Boolean
  image: Image
}

type SpacecraftFamily {
  id: ID!
  name: String
  description: String
  manufacturer: Manufacturer
  maidenFlight: String
}

type SpaceStation {
  id: ID!
  name: String
  image: Image
}

type SpaceStationChaser {
  id: ID!
  name: String
  image: Image
  status: String
  founded: String
  deorbited: String
  description: String
  orbit: String
  type: String
}

type SpaceStationTarget {
  id: ID!
  name: String
  image: Image
}

type SuccessCount {
  total: Int
  successful: Int
  failed: Int
}

type VideoUrl {
  priority: Int
  source: String
  publisher: String
  title: String
  description: String
  featureImage: String
  url: String
  type: String
  language: Language
  startTime: String
  endTime: String
  live: Boolean
}
```

--------------------------------------------------------------------------------
/e2e/mcp-server-tester/pq-manifest/api.graphql:
--------------------------------------------------------------------------------

```graphql
type Agency {
  id: ID!
  name: String
  abbrev: String
  type: String
  featured: Boolean
  country: [Country]
  description: String
  administrator: String
  foundingYear: Int
  spacecraft: String
  image: Image
  logo: Image
  socialLogo: Image
  totalLaunchCount: Int
  consecutiveSuccessfulLaunches: Int
  successfulLaunches: Int
  failedLaunches: Int
  pendingLaunches: Int
  consecutiveSuccessfulLandings: Int
  successfulLandings: Int
  failedLandings: Int
  attemptedLandings: Int
  successfulLandingsSpacecraft: Int
  failedLandingsSpacecraft: Int
  attemptedLandingsSpacecraft: Int
  successfulLandingsPayload: Int
  failedLandingsPayload: Int
  attemptedLandingsPayload: Int
  infoUrl: String
  wikiUrl: String
  socialMediaLinks: [SocialMediaLink]
}

type AgencyConnection {
  pageInfo: PageInfo
  results: [Agency]
}

type ApiThrottle {
  yourRequestLimit: Int
  limitFrequencySecs: Int
  currentUse: Int
  nextUseSecs: Int
  ident: String
}

type Astronaut {
  id: ID!
  name: String
  status: String
  agency: Agency
  image: Image
  type: String
  inSpace: Boolean
  timeInSpace: String
  evaTime: String
  age: Int
  dateOfBirth: String
  dateOfDeath: String
  nationality: Country
  bio: String
  wiki: String
  lastFlight: String
  firstFlight: String
  socialMediaLinks: [SocialMediaLink]
}

type AstronautConnection {
  pageInfo: PageInfo
  results: [Astronaut]
}

input AstronautFilters {
  search: String
  inSpace: Boolean
}

type CelestialBody {
  id: ID!
  name: String
  type: CelestialType
  diameter: Float
  mass: Float
  gravity: Float
  lengthOfDay: String
  atmosphere: Boolean
  image: Image
  description: String
  wikiUrl: String
}

type CelestialBodyConnection {
  pageInfo: PageInfo
  results: [CelestialBody]
}

type CelestialType {
  id: ID!
  name: String
}

type Country {
  id: ID!
  name: String
  alpha2Code: String
  alpha3Code: String
  nationalityName: String
  nationalityNameComposed: String
}

type DockingEvent {
  id: ID!
  docking: String
  departure: String
  dockingLocation: DockingLocation
  spaceStationTarget: SpaceStationTarget
  flightVehicleTarget: FlightVehicleTarget
  payloadFlightTarget: PayloadFlightTarget
  flightVehicleChaser: FlightVehicleChaser
  spaceStationChaser: SpaceStationChaser
  payloadFlightChaser: PayloadFlightChaser
}

type DockingEventConnection {
  pageInfo: PageInfo
  results: [DockingEvent]
}

type DockingLocation {
  id: ID!
  name: String
  spacestation: SpaceStation
  spacecraft: Spacecraft
  payload: Payload
}

type FlightVehicleChaser {
  id: ID!
  destination: String
  missionEnd: String
  spacecraft: Spacecraft
  launch: Launch
  landing: Landing
}

type FlightVehicleTarget {
  id: ID!
  destination: String
  missionEnd: String
  spacecraft: Spacecraft
}

type Image {
  id: ID!
  name: String
  url: String
  thumbnail: String
  credit: String
  singleUse: Boolean
  license: ImageLicense
}

type ImageLicense {
  name: String
  link: String
}

type InfoUrl {
  priority: Int
  source: String
  title: String
  description: String
  featureImage: String
  url: String
  type: String
  language: Language
}

type Landing {
  id: ID!
  type: LandingType
  attempt: Boolean
  success: Boolean
  description: String
  downrangeDistance: String
  landingLocation: LandingLocation
}

type LandingLocation {
  id: ID!
  name: String
  active: Boolean
  abbrev: String
  description: String
  location: Location
  longitude: String
  latitude: String
  image: Image
  landings: SuccessCount
  celestialBody: CelestialBody
}

type LandingType {
  id: ID!
  name: String
  abbrev: String
  description: String
}

type Language {
  id: ID!
  name: String
  code: String
}

type Launch {
  id: ID!
  name: String
  launchDesignator: String
  status: LaunchStatus
  lastUpdated: String
  net: String
  netPrecision: String
  window: LaunchWindow
  image: Image
  infographic: String
  probability: Float
  weatherConcerns: String
  failreason: String
  hashtag: String
  provider: Agency
  rocket: Rocket
  mission: Mission
  pad: Pad
  webcastLive: Boolean
  program: Program
  orbitalLaunchAttemps: Int
  locationLaunchAttemps: Int
  padLaunchAttemps: Int
  agencyLaunchAttemps: Int
  orbitalLaunchAttempsYear: Int
  locationLaunchAttempsYear: Int
  padLaunchAttempsYear: Int
  agencyLaunchAttempsYear: Int
}

type LaunchConnection {
  pageInfo: PageInfo
  results: [Launch]
}

type LaunchStatus {
  id: ID!
  name: String
  abbrev: String
  description: String
}

type LaunchWindow {
  start: String
  end: String
}

type Location {
  id: ID!
  name: String
  active: Boolean
  country: Country
  image: Image
  mapImage: String
  longitude: String
  latitude: String
  totalLaunchCount: Int
  totalLandingCount: Int
  description: String
  timezone: String
}

type Manufacturer {
  id: ID!
  name: String
  abbrev: String
  type: String
  featured: Boolean
  country: Country
  description: String
  administrator: String
  foundingYear: Int
  spacecraft: String
  image: Image
  logo: Image
  socialLogo: Image
}

type Mission {
  id: ID!
  name: String
  type: String
  description: String
  image: Image
  orbit: Orbit
  agencies: [Agency]
  infoUrls: [InfoUrl]
  vidUrls: [VideoUrl]
}

type MissionPatch {
  id: ID!
  name: String
  priority: Int
  imageUrl: String
  agency: Agency
}

type Orbit {
  id: ID!
  name: String
  abbrev: String
  celestialBody: CelestialBody
}

type Pad {
  id: ID!
  active: Boolean
  agencies: [Agency]
  name: String
  image: Image
  description: String
  infoUrl: String
  wikiUrl: String
  mapUrl: String
  latitude: Float
  longitude: Float
  country: Country
  mapImage: String
  launchTotalCount: Int
  orbitalLaunchAttemptCount: Int
  fastestTurnaround: String
  location: Location
}

type PageInfo {
  count: Int
  next: String
  previous: String
}

type Payload {
  id: ID!
  name: String
  type: String
  manufacturer: Manufacturer
  operator: Agency
  image: Image
  wikiLink: String
  infoLink: String
  program: Program
  cost: Float
  mass: Float
  description: String
}

type PayloadFlightChaser {
  id: ID!
  url: String
  destination: String
  amount: String
  payload: Payload
  launch: Launch
  landing: Landing
}

type PayloadFlightTarget {
  id: ID!
  destination: String
  amount: String
  payload: Payload
  launch: Launch
  landing: Landing
}

type Program {
  id: ID!
  name: String
  image: Image
  infoUrl: String
  wikiUrl: String
  description: String
  agencies: [Agency]
  startDate: String
  endDate: String
  missionPatches: [MissionPatch]
}

type Query {
  agency(id: ID!): Agency
  agencies(search: String, offset: Int = 0, limit: Int = 20): AgencyConnection
  apiThrottle: ApiThrottle
  astronaut(id: ID!): Astronaut
  astronauts(filters: AstronautFilters, offset: Int = 0, limit: Int = 20): AstronautConnection
  celestialBody(id: ID!): CelestialBody
  celestialBodies(search: String, offset: Int = 0, limit: Int = 20): CelestialBodyConnection
  dockingEvent(id: ID!): DockingEvent
  dockingEvents(search: String, offset: Int = 0, limit: Int = 20): DockingEventConnection
  launch(id: ID!): Launch
  launches(search: String, limit: Int = 5, offset: Int = 0): LaunchConnection
  previousLaunces(search: String, limit: Int = 5, offset: Int = 0): LaunchConnection
  upcomingLaunches(search: String, limit: Int = 5, offset: Int = 0): LaunchConnection
}

type Rocket {
  id: ID!
  configuration: RocketLaunchConfigurations
}

type RocketFamily {
  id: ID!
  name: String
}

type RocketLaunchConfigurations {
  id: ID!
  name: String
  fullName: String
  variant: String
  families: [RocketFamily]
}

type SocialMedia {
  id: ID!
  name: String
  url: String
  logo: Image
}

type SocialMediaLink {
  id: ID!
  url: String
  socialMedia: SocialMedia
}

type Spacecraft {
  id: ID!
  name: String
  type: String
  agency: Agency
  family: SpacecraftFamily
  inUse: Boolean
  serialNumber: String
  isPlaceholder: Boolean
  image: Image
  inSpace: Boolean
  timeInSpace: String
  timeDocked: String
  flightsCount: Int
  missionEndsCount: Int
  status: String
  description: String
  spacecraftConfig: SpacecraftConfig
  fastestTurnaround: String
}

type SpacecraftConfig {
  id: ID!
  name: String
  type: String
  agency: Agency
  family: SpacecraftFamily
  inUse: Boolean
  image: Image
}

type SpacecraftFamily {
  id: ID!
  name: String
  description: String
  manufacturer: Manufacturer
  maidenFlight: String
}

type SpaceStation {
  id: ID!
  name: String
  image: Image
}

type SpaceStationChaser {
  id: ID!
  name: String
  image: Image
  status: String
  founded: String
  deorbited: String
  description: String
  orbit: String
  type: String
}

type SpaceStationTarget {
  id: ID!
  name: String
  image: Image
}

type SuccessCount {
  total: Int
  successful: Int
  failed: Int
}

type VideoUrl {
  priority: Int
  source: String
  publisher: String
  title: String
  description: String
  featureImage: String
  url: String
  type: String
  language: Language
  startTime: String
  endTime: String
  live: Boolean
}
```
Page 2/6FirstPrevNextLast