# Directory Structure
```
├── .cursor
│ └── mcp.json
├── .env.template
├── .gitignore
├── bun.lock
├── docker-compose.yml
├── index.ts
├── LICENSE
├── package.json
├── README.md
├── scripts
│ └── jwt.ts
├── src
│ ├── app.stateful.ts
│ ├── app.stateless.ts
│ ├── lib
│ │ ├── errors.ts
│ │ ├── extended-oauth-proxy-provider.ts
│ │ ├── storage
│ │ │ ├── in-memory.ts
│ │ │ └── redis.ts
│ │ └── types.ts
│ └── mcp-server.ts
└── tsconfig.json
```
# Files
--------------------------------------------------------------------------------
/.env.template:
--------------------------------------------------------------------------------
```
OAUTH_CLIENT_ID=<your Auth0 client ID>
OAUTH_CLIENT_SECRET=<your Auth0 client secret>
OAUTH_REGISTRATION_URL=https://<your auth0 tenant domain>/oidc/register
OAUTH_ISSUER_URL=https://<your auth0 tenant domain>
OAUTH_AUTHORIZATION_URL=https://<your auth0 tenant domain>/authorize
OAUTH_TOKEN_URL=https://<your auth0 tenant domain>/oauth/token
#OAUTH_REVOCATION_URL=
# the host & port the server is deployed to; used for calback URL; used for registering callback URL to Auth0
THIS_HOSTNAME="http://localhost:5050"
LOG_LEVEL=debug
DEBUG=index.ts
TOKEN_STORAGE_STRATEGY='memory' # 'memory' for in-memory stroage; use 'redis' please.
REDIS_DSN=redis://127.0.0.1:6379
```
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
```
# Logs
logs
*.log
npm-debug.log*
yarn-debug.log*
yarn-error.log*
lerna-debug.log*
.pnpm-debug.log*
# Diagnostic reports (https://nodejs.org/api/report.html)
report.[0-9]*.[0-9]*.[0-9]*.[0-9]*.json
# Runtime data
pids
*.pid
*.seed
*.pid.lock
# Directory for instrumented libs generated by jscoverage/JSCover
lib-cov
# Coverage directory used by tools like istanbul
coverage
*.lcov
# nyc test coverage
.nyc_output
# Grunt intermediate storage (https://gruntjs.com/creating-plugins#storing-task-files)
.grunt
# Bower dependency directory (https://bower.io/)
bower_components
# node-waf configuration
.lock-wscript
# Compiled binary addons (https://nodejs.org/api/addons.html)
build/Release
# Dependency directories
node_modules/
jspm_packages/
# Snowpack dependency directory (https://snowpack.dev/)
web_modules/
# TypeScript cache
*.tsbuildinfo
# Optional npm cache directory
.npm
# Optional eslint cache
.eslintcache
# Optional stylelint cache
.stylelintcache
# Microbundle cache
.rpt2_cache/
.rts2_cache_cjs/
.rts2_cache_es/
.rts2_cache_umd/
# Optional REPL history
.node_repl_history
# Output of 'npm pack'
*.tgz
# Yarn Integrity file
.yarn-integrity
# dotenv environment variable files
.env
.env.development.local
.env.test.local
.env.production.local
.env.local
# parcel-bundler cache (https://parceljs.org/)
.cache
.parcel-cache
# Next.js build output
.next
out
# Nuxt.js build / generate output
.nuxt
dist
# Gatsby files
.cache/
# Comment in the public line in if your project uses Gatsby and not Next.js
# https://nextjs.org/blog/next-9-1#public-directory-support
# public
# vuepress build output
.vuepress/dist
# vuepress v2.x temp and cache directory
.temp
.cache
# vitepress build output
**/.vitepress/dist
# vitepress cache directory
**/.vitepress/cache
# Docusaurus cache and generated files
.docusaurus
# Serverless directories
.serverless/
# FuseBox cache
.fusebox/
# DynamoDB Local files
.dynamodb/
# TernJS port file
.tern-port
# Stores VSCode versions used for testing VSCode extensions
.vscode-test
# yarn v2
.yarn/cache
.yarn/unplugged
.yarn/build-state.yml
.yarn/install-state.gz
.pnp.*
*.env
```
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
```markdown
# 🌊 HTTP + SSE MCP Server w/ OAuth
## Introduction
This repo provides a reference implementation for creating a remote MCP server that supports the Streamable HTTP & SSE Transports, authorized with OAuth based on the MCP specification.
Note that the MCP server in this repo is logically separate from the application that handles the report SSE + HTTP transports, and from OAuth.
As a result, you can easily fork this repo, and plug in your own MCP server and OAuth credentials for a working SSE/HTTP + OAuth MCP server with your own functionality.
> **But, why?**
Great question! The MCP specification added the authorization specification based on OAuth on March 25, 2025. At present, as of May 1, 2025:
- The Typescript SDK contains many of the building blocks for accomplishing an OAuth-authorized MCP server with streamable HTTP, **but there is no documentation or tutorial** on how to build such a server
- The Python SDK contains neither an implementation of the streamable HTTP transport, nor an implementation of the OAuth building blocks that are present in the typescript SDK
- The Streamable HTTP transport is broadly unsupported by MCP host applications such as Cursor and Claude desktop, though it may be integrated directly into agents written in JavaScript using the JS/TS SDK's `StreamableHttpClientTransport` class
At [Naptha AI](https://naptha.ai), we really wanted to build an OAuth-authorized MCP server on the streamable HTTP transport, and couldn't find any reference implementations, so we decided to build one ourselves!
## Dependencies
[Bun](https://bun.sh), a fast all-in-one JavaScript runtime, is the recommended runtime and package manager for this repository. Limited compatibility testing has been done with `npm` + `tsc`.
## Overview
This repository provides the following:
1. An MCP server, which you can easily replace with your own
2. An express.js application that manages _both_ the SSE and Streamable HTTP transports _and_ OAuth authorization.
This express application is what you plug your credentials and MCP server into.
Note that while this express app implements the required OAuth endpoints including `/authorize` and the Authorization Server Metadata endpoint ([RFC8414](https://datatracker.ietf.org/doc/html/rfc8414)), _it does not implement an OAuth authorization server!_
This example proxies OAuth to an upstream OAuth server which supports dynamic client registration ([RFC7591](https://datatracker.ietf.org/doc/html/rfc7591)). To use this example, you will need to bring your own authorization server. We recommend using [Auth0](https://auth0.com); see the ["Setting up OAuth" Section](https://github.com/NapthaAI/http-oauth-mcp-server?tab=readme-ov-file#setting-up-oauth) below.
## Configuring your server
### Notes on OAuth & Dynamic Client Registration
To use this example, you need an OAuth authorization server. _Do not implement this yourself!_ For the purposes of creating our demo, we used [Auth0](https://auth0.com) -- this is a great option, though there are many others.
The MCP specification requires support for an uncommon OAuth feature, specifically [RFC7591](https://datatracker.ietf.org/doc/html/rfc7591), Dynamic Client Registration. The [MCP specification](https://modelcontextprotocol.io/specification/2025-03-2026) specifies that MCP clients and servers should support the Dynamic client registration protocol, so that MCP clients (whever your client transport lives) can obtain Client IDs without user registration. This allows new clients (agents, apps, etc.) to automatically register with new servers. More details on this can be found [in the authorization section of the MCP specification](https://modelcontextprotocol.io/specification/2025-03-26/basic/authorization#2-4-dynamic-client-registration), but this means that unfortunately, you cannot simply proxy directly to a provider like Google or GitHub, which do not support dynamic client registration (they require you to register clients in their UI).
This leaves you with two options:
1. Pick an upstream OAuth provider like Auth0 which allows you to use OIDC IDPs like Google and GitHub for authentication, and which _does_ support dynamic client registration, or
2. implement dynamic client registration in the application yourself (i.e., the express application becomes not just a simple OAuth proxy but a complete or partially-complete OAuth server). Cloudflare implemented something like this for their Workers OAuth MCP servers, which we may extend this project with later. You can find that [here](https://github.com/cloudflare/workers-oauth-provider).
For simplicity, we have opted for the former option using Auth0.
> [!NOTE]
> Since this implementation proxies the upstream OAuth server, the default approach of forwarding the access token from the OAuth server to the client would expose the user's upstream access token to the downstream client & MCP host. This is not suitable for many use-cases, so this approach re-implements some `@modelcontextprotocol/typescript-sdk` classes to fix this issue.
Note that while we are proxying the upstream authorization server, we are _not_ returning the end-user's auth token to the MCP client / host - instead, we are issuing our own, and allowing the client / host to use that token to authorize with our server. This prevents a malicious client or host from abusing the token, or from it being abused if it's leaked.
### Setting up OAuth with Auth0
To get started with Auth0:
1. Create an Auth0 account at [Auth0.com](https://auth0.com/).
2. Create at least one connection to an IDP such as Google or GitHub. You can [learn how to do this here](https://auth0.com/docs/authenticate/identity-providers).
3. Promote the connection to a _domain-level connection_. Since new OAuth clients are registered by each MCP client, you can't configure your IDP connections on a per-application/client basis. This means your connections need to be available for all apps in your domain. You can [learn how do this here](https://auth0.com/docs/authenticate/identity-providers/promote-connections-to-domain-level).
4. Enable Dynamic Client Registration (auth0 also calls this "Dynamic Application Registration"). You can [learn how to do this here](https://auth0.com/docs/get-started/applications/dynamic-client-registration).
Once all of this has been set up, you will need the following information:
* your Auth0 client ID
* your Auth0 client secret
* your Auth0 tenant domain
Make sure to fill this information into your `.env`. Copy `.env.template` and then update the values with your configurations & secrets.
## Running the server
This repository includes two separate stand-alone servers:
- a **stateless** implementation of the streamable HTTP server at `src/app.stateless.ts`. This only supports the streamable HTTP transport, and is (theoretically) suitable for serverless deployment
- a **stateful** implementation of both SSE and streamable HTTP at `src/app.stateful.ts`. This app offers both transports, but maintains in-memory state even when using the `redis` storage strategy (connections must be persisted in-memory), so it is not suitable for serverless deployment or trivial horizontal scaling.
You can run either of them with `bun`:
```shell
bun run src/app.stateless.ts
# or,
bun run src/app.stateful.ts
```
## Putting it All Together
To test out our MCP server with streamable HTTP and OAuth support, you have a couple options.
As noted above, the Python MCP SDK does not support these features, so currently you can either plug our remote server into an MCP host like Cursor or Claude Desktop, or into a TypeScript/JavaScript application directly - but not into a Python one.
### Plugging your server into your MCP Host (Cursor / Claude)
Since most MCP hosts don't support either streamable HTTP (which is superior to SSE in a number of ways) _or_ OAuth, we recommend using the `mcp-remote` npm package which will handle the OAuth authorization, and bridging the remote transport into a STDIO transport for your host.
the command will look like this:
```shell
bunx mcp-remote --transport http-first https://some-domain.server.com/mcp
# or,
npx mcp-remote --transport http-first https://some-domain.server.com/mcp
```
You have a couple of options for the `--transport` option:
- `http-first` (default): Tries HTTP transport first, falls back to SSE if HTTP fails with a 404 error
- `sse-first`: Tries SSE transport first, falls back to HTTP if SSE fails with a 405 error
- `http-only`: Only uses HTTP transport, fails if the server doesn't support it
- `sse-only`: Only uses SSE transport, fails if the server doesn't support it
> [!NOTE]
> If you launch the _stateless_ version of the server with `src/app.stateless.ts`, the SSE transport is not available, so you should use `--transport http-only`. SSE transport should not be expected to work if you use this entrypoint.
### Plugging you server into your agent
You can plug your Streamable HTTP server into an agent in JS/TS using `StreamableHTTPClientTransport`. However, this will not work with OAuth-protected servers. Instead, you should use the `Authorization` header on the client side, with a valid access token on the server side.
You can implement this with client credentials, API keys or something else. That pattern is not supported in this repository, but it would look like this using the [Vercel AI SDK](https://ai-sdk.dev/cookbook/node/mcp-tools#mcp-tools):
```typescript
import { openai } from '@ai-sdk/openai';
import { experimental_createMCPClient as createMcpClient, generateText } from 'ai';
import { StreamableHTTPClientTransport } from "@modelcontextprotocol/sdk/client/streamableHttp.js";
const mcpClient = await createMcpClient({
transport: new StreamableHTTPClientTransport(
new URL("http://localhost:5050/mcp"), {
requestInit: {
headers: {
Authorization: "Bearer YOUR TOKEN HERE",
},
},
// TODO add OAuth client provider if you want
authProvider: undefined,
}),
});
const tools = await mcpClient.tools();
await generateText({
model: openai("gpt-4o"),
prompt: "Hello, world!",
tools: {
...(await mcpClient.tools())
}
});
```
```
--------------------------------------------------------------------------------
/index.ts:
--------------------------------------------------------------------------------
```typescript
console.log("Hello via Bun!");
```
--------------------------------------------------------------------------------
/src/lib/errors.ts:
--------------------------------------------------------------------------------
```typescript
export class InvalidAccessTokenError extends Error {}
```
--------------------------------------------------------------------------------
/docker-compose.yml:
--------------------------------------------------------------------------------
```yaml
services:
valkey:
image: valkey/valkey:latest
ports:
- 127.0.0.1:6379:6379
```
--------------------------------------------------------------------------------
/.cursor/mcp.json:
--------------------------------------------------------------------------------
```json
{
"mcpServers": {
"streamable": {
"command": "bunx",
"args": [
"mcp-remote",
"http://localhost:5050/mcp",
"--transport",
"http-first"
]
}
}
}
```
--------------------------------------------------------------------------------
/package.json:
--------------------------------------------------------------------------------
```json
{
"name": "http-oauth-mcp-server",
"module": "index.ts",
"type": "module",
"private": true,
"devDependencies": {
"@types/bun": "latest",
"@types/jsonwebtoken": "^9.0.9"
},
"peerDependencies": {
"typescript": "^5"
},
"dependencies": {
"@modelcontextprotocol/sdk": "^1.11.0",
"dotenv": "^16.5.0",
"express": "^5.1.0",
"ioredis": "^5.6.1",
"jsonwebtoken": "^9.0.2",
"jwks-rsa": "^3.2.0",
"logging": "^3.3.0"
}
}
```
--------------------------------------------------------------------------------
/src/mcp-server.ts:
--------------------------------------------------------------------------------
```typescript
#!/usr/bin/env bun
import { McpServer } from "@modelcontextprotocol/sdk/server/mcp.js";
import { z } from "zod";
export const server = new McpServer({
name: "Math-MCP-Server",
version: "1.0.0",
});
server.tool(
"add",
"Add two numbers",
{ l: z.number(), r: z.number() },
async ({ l, r }) => ({
content: [
{
type: "text",
text: String(l + r),
},
],
}),
);
server.tool(
"divide",
"Divide two numbers",
{ l: z.number(), r: z.number() },
async ({ l, r }) => ({
content: [
{
type: "text",
text: String(l / r),
},
],
}),
);
```
--------------------------------------------------------------------------------
/tsconfig.json:
--------------------------------------------------------------------------------
```json
{
"compilerOptions": {
// Environment setup & latest features
"lib": [
"ESNext"
],
"target": "ESNext",
"module": "ESNext",
"moduleDetection": "force",
"jsx": "react-jsx",
"allowJs": true,
// Bundler mode
"moduleResolution": "bundler",
"allowImportingTsExtensions": false,
"verbatimModuleSyntax": true,
"noEmit": false,
// Best practices
"strict": true,
"skipLibCheck": true,
"noFallthroughCasesInSwitch": true,
"noUncheckedIndexedAccess": true,
// Some stricter flags (disabled by default)
"noUnusedLocals": false,
"noUnusedParameters": false,
"noPropertyAccessFromIndexSignature": false
}
}
```
--------------------------------------------------------------------------------
/src/lib/types.ts:
--------------------------------------------------------------------------------
```typescript
import type { OAuthClientInformationFull } from "@modelcontextprotocol/sdk/shared/auth.js";
// Define JSON types
export type JsonValue =
| string
| number
| boolean
| null
| JsonValue[]
| { [key: string]: JsonValue };
export type JsonObject = { [key: string]: JsonValue };
// Define the type for the save function
export type SaveClientInfoFunction = (
clientId: string,
data: OAuthClientInformationFull,
) => Promise<void>;
export type GetClientInfoFunction = (
clientId: string,
) => Promise<OAuthClientInformationFull | undefined>;
export type SaveAccessTokenFunction = (
{
accessToken,
idToken,
refreshToken,
clientId,
scope,
}: {
accessToken: string;
idToken?: string;
refreshToken?: string;
clientId: string;
scope: string;
},
expiresInSeconds: number,
) => Promise<string>;
export type GetAccessTokenFunction = (accessToken: string) => Promise<
| {
scopes: Array<string>;
clientId: string;
accessToken: string;
idToken?: string;
refreshToken?: string;
expiresInSeconds: number;
}
| undefined
>;
export type OAuthProxyStorageManager = {
saveClient: SaveClientInfoFunction;
getClient: GetClientInfoFunction;
saveAccessToken: SaveAccessTokenFunction;
getAccessToken: GetAccessTokenFunction;
};
```
--------------------------------------------------------------------------------
/scripts/jwt.ts:
--------------------------------------------------------------------------------
```typescript
import jwt from "jsonwebtoken";
import jwksClient from "jwks-rsa";
const client = jwksClient({
jwksUri: "https://naptha.jp.auth0.com/.well-known/jwks.json",
timeout: 30_000, // 30 seconds
});
const jwtToken =
"eyJhbGciOiJSUzI1NiIsInR5cCI6IkpXVCIsImtpZCI6InoxVmx1eHV5Wk5IUHNvbXA0WkxEVCJ9.eyJuaWNrbmFtZSI6IkstTWlzdGVsZSIsIm5hbWUiOiJLeWxlIE1pc3RlbGUiLCJwaWN0dXJlIjoiaHR0cHM6Ly9hdmF0YXJzLmdpdGh1YnVzZXJjb250ZW50LmNvbS91LzE4NDMwNTU1P3Y9NCIsInVwZGF0ZWRfYXQiOiIyMDI1LTA1LTAyVDAzOjAzOjIyLjAyOVoiLCJlbWFpbCI6Imt5bGVAbWlzdGVsZS5jb20iLCJlbWFpbF92ZXJpZmllZCI6dHJ1ZSwiaXNzIjoiaHR0cHM6Ly9uYXB0aGEuanAuYXV0aDAuY29tLyIsImF1ZCI6IndsYkp4R25CTjRUZ015SXh4MVNwRWQySlJ0TDlmY3FwIiwic3ViIjoiZ2l0aHVifDE4NDMwNTU1IiwiaWF0IjoxNzQ2MjA2NTczLCJleHAiOjE3NDYyNDI1NzMsInNpZCI6Iks1ZmpLWEJnU1hZV1QtMW85a0RscmxERTE3WnhxT2EzIn0.MBxl6VuFZKzwfGBRep4aWyYKu1f6kY0ZT15yC2Ba66gQVzmVWF88aA4IgLwuGkakLACTlHXu49N_lZykl-1JuuDl62d5eWkJDD642D_5iiYMVuK_0ac50ZXpQOiX25PBfXwDR1a4FE7YaL87fLhUaQtF8WXBHiSXMzKkAI-JjAQzC7EuHbtXVK-NP3aW4QncLEEqKhhSulB1oTGX4Y1lvm0kJTutPsvIVjPmkU2m95UxFEg9e0xL1E87rMMX35BvTwzgbEbRNjttzGN_W9fBqJSqYGFctt-0GcH8op2n5EwfnYoQo4iEI90CtYPa3AfMhb7MwBQnsUSS0hCX4pSzPw";
const tokenInfo = jwt.decode(jwtToken, { complete: true });
console.log(tokenInfo);
const key = await client.getSigningKey(tokenInfo?.header.kid);
const signingKey = key.getPublicKey();
const verified = jwt.verify(jwtToken, signingKey);
console.log("verified info:", verified);
```
--------------------------------------------------------------------------------
/src/lib/storage/in-memory.ts:
--------------------------------------------------------------------------------
```typescript
import { randomBytes } from "node:crypto";
/**
* This file presents a simple in-memory storage implementation for the OAuth proxy. useful if you are locally debugging
* or don't want to set up a database or something. don't use this in production
*/
import type { OAuthClientInformationFull } from "@modelcontextprotocol/sdk/shared/auth.js";
import type {
GetAccessTokenFunction,
OAuthProxyStorageManager,
} from "../types";
// Local storage for the OAuth Proxy
const clients: Record<string, OAuthClientInformationFull> = {};
const accessTokens: Record<
string,
Awaited<ReturnType<GetAccessTokenFunction>>
> = {};
export const InMemoryStorage: OAuthProxyStorageManager = {
saveClient: async (clientId: string, data: OAuthClientInformationFull) => {
clients[clientId] = data;
},
getClient: async (clientId: string) => {
return clients[clientId];
},
saveAccessToken: async (
{ accessToken, idToken, clientId, scope },
expiresInSeconds: number,
) => {
const locallyIssuedAccessToken = randomBytes(64).toString("hex");
// save the read access token and other information under the "proxied" access token
accessTokens[locallyIssuedAccessToken] = {
scopes: scope?.split(" ") ?? [],
clientId,
idToken: idToken ?? "",
accessToken: accessToken,
expiresInSeconds,
};
setTimeout(() => {
delete accessTokens[locallyIssuedAccessToken];
}, expiresInSeconds * 1000);
return locallyIssuedAccessToken;
},
getAccessToken: async (locallyIssuedAccessToken: string) => {
return accessTokens[locallyIssuedAccessToken];
},
};
export default InMemoryStorage;
```
--------------------------------------------------------------------------------
/src/lib/storage/redis.ts:
--------------------------------------------------------------------------------
```typescript
/**
* This file defines a Redis-based storage implementation for the OAuth proxy.
* This is useful for production use, as it allows the OAuth proxy to be horizontally scalable (if you can solve the transport in-memory issue...)
*
*/
import type { OAuthClientInformationFull } from "@modelcontextprotocol/sdk/shared/auth.js";
import Redis from "ioredis";
import createLogger from "logging";
import { randomBytes } from "node:crypto";
import type { OAuthProxyStorageManager } from "../types";
const logger = createLogger("RedisStorage", {
debugFunction: (...args) => console.log(...args),
});
const redis = new Redis(process.env.REDIS_DSN ?? "redis://localhost:6379");
redis.on("connecting", () => logger.debug("Redis connecting..."));
redis.on("connect", () => logger.info("Redis connected!"));
redis.on("error", (err) => logger.error("Redis error", err));
redis.on("close", () => logger.info("Redis closed!"));
export const RedisStorage: OAuthProxyStorageManager = {
saveClient: async (clientId: string, data: OAuthClientInformationFull) => {
await redis.set(clientId, JSON.stringify(data));
},
getClient: async (clientId: string) => {
const data = await redis.get(clientId);
return data ? JSON.parse(data) : undefined;
},
saveAccessToken: async (
{ accessToken, idToken, refreshToken, clientId, scope },
expiresInSeconds: number,
) => {
const locallyIssuedAccessToken = randomBytes(64).toString("hex");
await redis.setex(
locallyIssuedAccessToken,
expiresInSeconds,
JSON.stringify({
idToken,
refreshToken,
clientId,
scopes: scope.split(" "),
accessToken,
}),
);
return locallyIssuedAccessToken;
},
getAccessToken: async (locallyIssuedAccessToken: string) => {
const data = await redis.get(locallyIssuedAccessToken);
return data ? JSON.parse(data) : undefined;
},
};
export default RedisStorage;
```
--------------------------------------------------------------------------------
/src/app.stateless.ts:
--------------------------------------------------------------------------------
```typescript
import { requireBearerAuth } from "@modelcontextprotocol/sdk/server/auth/middleware/bearerAuth.js";
import { mcpAuthRouter } from "@modelcontextprotocol/sdk/server/auth/router.js";
import { StreamableHTTPServerTransport } from "@modelcontextprotocol/sdk/server/streamableHttp.js";
import { config } from "dotenv";
import express, {
type NextFunction,
type Request,
type Response,
} from "express";
import createLogger from "logging";
import { InvalidAccessTokenError } from "./lib/errors";
import { ExtendedProxyOAuthServerProvider } from "./lib/extended-oauth-proxy-provider";
import InMemoryStorage from "./lib/storage/in-memory";
import { RedisStorage } from "./lib/storage/redis";
import type { OAuthProxyStorageManager } from "./lib/types";
import { server } from "./mcp-server";
config();
const logger = createLogger(__filename.split("/").pop() ?? "", {
debugFunction: (...args) => {
console.log(...args);
},
});
const {
OAUTH_ISSUER_URL,
OAUTH_AUTHORIZATION_URL,
OAUTH_TOKEN_URL,
OAUTH_REVOCATION_URL,
OAUTH_REGISTRATION_URL,
THIS_HOSTNAME,
} = process.env;
if (
!OAUTH_ISSUER_URL ||
!OAUTH_AUTHORIZATION_URL ||
!OAUTH_TOKEN_URL ||
!OAUTH_REGISTRATION_URL ||
!THIS_HOSTNAME
) {
throw new Error("Missing environment variables");
}
// NOTE ideally we don't do this in memory since it's not horizontally scalable easily
// but these are stateful objects with connections from the client so they can't just
// be written to a database.
const transports: {
streamable: { [sessionId: string]: StreamableHTTPServerTransport };
} = {
streamable: {},
};
let storageStrategy: OAuthProxyStorageManager;
if (process.env.TOKEN_STORAGE_STRATEGY === "redis") {
logger.info("Using redis storage strategy!");
storageStrategy = RedisStorage;
} else {
logger.warn(
"Using in-memory storage strategy. DO NOT USE THIS IN PRODUCTION!",
);
storageStrategy = InMemoryStorage;
}
process.env.TOKEN_STORAGE_STRATEGY === "memory"
? InMemoryStorage
: RedisStorage;
const app = express();
app.use(express.json());
app.use(express.urlencoded({ extended: false }));
// Set up the OAuth Proxy provider; configured in .env to use Naptha's Auth0 tenant
const proxyProvider = new ExtendedProxyOAuthServerProvider({
endpoints: {
authorizationUrl: `${OAUTH_AUTHORIZATION_URL}`,
tokenUrl: `${OAUTH_TOKEN_URL}`,
revocationUrl: OAUTH_REVOCATION_URL,
registrationUrl: `${OAUTH_REGISTRATION_URL}`,
},
storageManager: storageStrategy, // configure with process.env.TOKEN_STORAGE_STRATEGY
});
// Set up the middleware that verifies the issued bearer tokens. Note that these are NOT
// the auth tokens from the upstream IDP.
const bearerAuthMiddleware = requireBearerAuth({
provider: proxyProvider,
requiredScopes: [],
});
// Mount the router that handles the OAuth Proxy's endoints, discovery etc.
app.use(
mcpAuthRouter({
provider: proxyProvider,
issuerUrl: new URL(`${OAUTH_ISSUER_URL}`), // address of issuer, auth0
baseUrl: new URL(`${THIS_HOSTNAME}`), // address of local server
}),
);
/**
* Set up the streamable HTTP MCP router
*/
app.use("/", async (req, res, next) => {
logger.debug(req.method, req.url, req.headers, req.body);
await next();
logger.debug(res.headersSent, res.statusCode);
});
app.post("/mcp", async (req: Request, res: Response, next: NextFunction) => {
logger.debug("POST /mcp");
const transport: StreamableHTTPServerTransport =
new StreamableHTTPServerTransport({
sessionIdGenerator: undefined, // explicitly disable session ID generation since stateless
});
await server.connect(transport);
await transport.handleRequest(req, res, req.body);
res.on("close", () => {
console.log("Closing connection");
transport.close();
server.close();
});
});
app.use("/mcp", async (req: Request, res: Response, next: NextFunction) => {
if (req.method === "GET" || req.method === "DELETE") {
console.log(`Unsupported ${req.method} ${req.url} to stateless server`);
res.writeHead(405).json({
jsonrpc: "2.0",
error: {
code: -32000,
message: "Method not allowed.",
},
id: null,
});
}
return next();
});
app.use((error: Error, req: Request, res: Response, next: NextFunction) => {
logger.info("Error", error);
if (!res.headersSent) {
if (error instanceof InvalidAccessTokenError) {
res.status(401).json({
jsonrpc: "2.0",
error: {
code: -32_000,
message: "Invalid access token",
},
id: null,
});
} else {
res.status(500).json({
jsonrpc: "2.0",
error: {
code: -32_000,
message: "Internal server error",
},
id: null,
});
}
} else {
logger.warn("headers already sent so no response sent");
}
});
const httpServer = app.listen(process.env.PORT ?? 5050, () => {
logger.info(`Server is running on port ${process.env.PORT ?? 5050}`);
});
//httpServer.setTimeout(1_000 * 60 * 60 * 6); // 6 hours
```
--------------------------------------------------------------------------------
/src/lib/extended-oauth-proxy-provider.ts:
--------------------------------------------------------------------------------
```typescript
import type { OAuthRegisteredClientsStore } from "@modelcontextprotocol/sdk/server/auth/clients.js";
import {
InvalidTokenError,
ServerError,
} from "@modelcontextprotocol/sdk/server/auth/errors.js";
import type { AuthorizationParams } from "@modelcontextprotocol/sdk/server/auth/provider.js";
import {
ProxyOAuthServerProvider,
type ProxyOptions,
} from "@modelcontextprotocol/sdk/server/auth/providers/proxyProvider.js";
import {
type OAuthClientInformationFull,
OAuthClientInformationFullSchema,
type OAuthTokens,
OAuthTokensSchema,
} from "@modelcontextprotocol/sdk/shared/auth.js";
import type { Response } from "express";
import createLogger from "logging";
import type { OAuthProxyStorageManager } from "./types";
const logger = createLogger(__filename.split("/").pop() ?? "", {
debugFunction: (...args) => {
console.log(...args);
},
});
export type ExtendedOAuthTokens = OAuthTokens & {
id_token?: string;
};
/**
* This type extends the ProxyOptions to add a saveClient method.
* This can be provided by the server implementation for storing client information.
*/
export type ExtendedProxyOptions = Omit<
ProxyOptions,
"getClient" | "verifyAccessToken"
> & {
storageManager: OAuthProxyStorageManager;
};
/**
* This class extends the ProxyOAuthServerProvider to add a saveClient method.
* That can be provided by the server implementation for storing client information.
*
* This way we don't have to hard-code return values like in the example
*/
export class ExtendedProxyOAuthServerProvider extends ProxyOAuthServerProvider {
public readonly storageManager: OAuthProxyStorageManager;
constructor(options: ExtendedProxyOptions) {
// call the super constructor, but instead of having the user specify a custom getClient function like in the middleware,
// we'll use the storageManager.getClient function
super({
...options,
getClient: options.storageManager.getClient,
verifyAccessToken: async (locallyIssuedAccessToken: string) => {
const data = await this.storageManager.getAccessToken(
locallyIssuedAccessToken,
);
if (!data) {
// This will return a 401 to the client, resulting in auth
throw new InvalidTokenError("Invalid access token");
}
return {
token: locallyIssuedAccessToken, // NOT the upstream IDP token.
scopes: data.scopes,
clientId: data.clientId,
expiresInSeconds: data.expiresInSeconds,
};
},
});
this.storageManager = options.storageManager;
}
public override get clientsStore(): OAuthRegisteredClientsStore {
const registrationUrl = this._endpoints.registrationUrl;
return {
getClient: this.storageManager.getClient,
...(registrationUrl && {
registerClient: async (client: OAuthClientInformationFull) => {
const response = await fetch(registrationUrl, {
method: "POST",
headers: {
"Content-Type": "application/json",
},
body: JSON.stringify(client),
});
if (!response.ok) {
throw new ServerError(
`Client registration failed: ${response.status}`,
);
}
const data = await response.json();
const parsedClient = OAuthClientInformationFullSchema.parse(data);
/**
* NOTE this is the only change to this function from the original implementation
* There's nowehere else that this information can be accessed.
*
* See @file{src/server/auth/handlers/register.ts}
*/
await this.storageManager.saveClient(
parsedClient.client_id,
parsedClient,
);
return parsedClient;
},
}),
};
}
/**
* Using this overridden method so we can do some logging and stuff
*/
public override async exchangeAuthorizationCode(
client: OAuthClientInformationFull,
authorizationCode: string,
codeVerifier?: string,
): Promise<OAuthTokens> {
const redirectUri = client.redirect_uris[0];
if (redirectUri) {
logger.debug(
"Exchanging authorization code with client redirect URI: ",
redirectUri,
authorizationCode,
codeVerifier,
);
} else {
logger.error(
"No redirect URI found for client",
client.client_id,
client,
);
throw new ServerError("No redirect URI found for client");
}
const params = new URLSearchParams({
grant_type: "authorization_code",
client_id: client.client_id,
redirect_uri: redirectUri,
code: authorizationCode,
});
if (client.client_secret) {
params.append("client_secret", client.client_secret);
}
if (codeVerifier) {
params.append("code_verifier", codeVerifier);
}
const response = await fetch(this._endpoints.tokenUrl, {
method: "POST",
headers: {
"Content-Type": "application/x-www-form-urlencoded",
},
body: params.toString(),
});
if (!response.ok) {
logger.error(
"Token exchange failed",
response.status,
response.statusText,
);
logger.error("JSON:", await response.json());
throw new ServerError(`Token exchange failed: ${response.status}`);
}
const data = (await response.json()) as ExtendedOAuthTokens;
logger.debug("Saving access token", data.access_token);
const locallyIssuedAccessToken = await this.storageManager.saveAccessToken(
{
accessToken: data.access_token,
idToken: data.id_token,
refreshToken: data.refresh_token,
clientId: client.client_id,
scope: data.scope ?? "",
},
data.expires_in ?? 86400, // default to 1 day
);
return OAuthTokensSchema.parse({
...data,
access_token: locallyIssuedAccessToken,
});
}
public override async authorize(
client: OAuthClientInformationFull,
params: AuthorizationParams,
res: Response,
): Promise<void> {
// Start with required OAuth parameters
const targetUrl = new URL(this._endpoints.authorizationUrl);
const searchParams = new URLSearchParams({
client_id: client.client_id,
response_type: "code",
redirect_uri: params.redirectUri,
code_challenge: params.codeChallenge,
code_challenge_method: "S256",
});
logger.debug("authorize", {
client,
params,
targetUrl,
searchParams,
});
// Add optional standard OAuth parameters
if (params.state) searchParams.set("state", params.state);
searchParams.set(
"scope",
params.scopes?.length
? params.scopes.join(" ")
: ["email", "profile", "openid"].join(" "),
);
targetUrl.search = searchParams.toString();
res.redirect(targetUrl.toString());
}
}
```
--------------------------------------------------------------------------------
/src/app.stateful.ts:
--------------------------------------------------------------------------------
```typescript
import { requireBearerAuth } from "@modelcontextprotocol/sdk/server/auth/middleware/bearerAuth.js";
import { mcpAuthRouter } from "@modelcontextprotocol/sdk/server/auth/router.js";
import { SSEServerTransport } from "@modelcontextprotocol/sdk/server/sse.js";
import { StreamableHTTPServerTransport } from "@modelcontextprotocol/sdk/server/streamableHttp.js";
import { isInitializeRequest } from "@modelcontextprotocol/sdk/types.js";
import { config } from "dotenv";
import express, {
type NextFunction,
type Request,
type Response,
} from "express";
import createLogger from "logging";
import { randomUUID } from "node:crypto";
import { InvalidAccessTokenError } from "./lib/errors";
import { ExtendedProxyOAuthServerProvider } from "./lib/extended-oauth-proxy-provider";
import InMemoryStorage from "./lib/storage/in-memory";
import { RedisStorage } from "./lib/storage/redis";
import type { OAuthProxyStorageManager } from "./lib/types";
import { server } from "./mcp-server";
config();
const logger = createLogger(__filename.split("/").pop() ?? "", {
debugFunction: (...args) => {
console.log(...args);
},
});
const {
OAUTH_ISSUER_URL,
OAUTH_AUTHORIZATION_URL,
OAUTH_TOKEN_URL,
OAUTH_REVOCATION_URL,
OAUTH_REGISTRATION_URL,
THIS_HOSTNAME,
} = process.env;
if (
!OAUTH_ISSUER_URL ||
!OAUTH_AUTHORIZATION_URL ||
!OAUTH_TOKEN_URL ||
!OAUTH_REGISTRATION_URL ||
!THIS_HOSTNAME
) {
throw new Error("Missing environment variables");
}
// NOTE ideally we don't do this in memory since it's not horizontally scalable easily
// but these are stateful objects with connections from the client so they can't just
// be written to a database.
const transports: {
sse: { [sessionId: string]: SSEServerTransport };
streamable: { [sessionId: string]: StreamableHTTPServerTransport };
} = {
sse: {},
streamable: {},
};
let storageStrategy: OAuthProxyStorageManager;
if (process.env.TOKEN_STORAGE_STRATEGY === "redis") {
logger.info("Using redis storage strategy!");
storageStrategy = RedisStorage;
} else {
logger.warn(
"Using in-memory storage strategy. DO NOT USE THIS IN PRODUCTION!",
);
storageStrategy = InMemoryStorage;
}
process.env.TOKEN_STORAGE_STRATEGY === "memory"
? InMemoryStorage
: RedisStorage;
const app = express();
app.use(express.json());
app.use(express.urlencoded({ extended: false }));
// Set up the OAuth Proxy provider; configured in .env to use Naptha's Auth0 tenant
const proxyProvider = new ExtendedProxyOAuthServerProvider({
endpoints: {
authorizationUrl: `${OAUTH_AUTHORIZATION_URL}`,
tokenUrl: `${OAUTH_TOKEN_URL}`,
revocationUrl: OAUTH_REVOCATION_URL,
registrationUrl: `${OAUTH_REGISTRATION_URL}`,
},
storageManager: storageStrategy, // configure with process.env.TOKEN_STORAGE_STRATEGY
});
// Set up the middleware that verifies the issued bearer tokens. Note that these are NOT
// the auth tokens from the upstream IDP.
const bearerAuthMiddleware = requireBearerAuth({
provider: proxyProvider,
requiredScopes: [],
});
// Mount the router that handles the OAuth Proxy's endoints, discovery etc.
app.use(
mcpAuthRouter({
provider: proxyProvider,
issuerUrl: new URL(`${OAUTH_ISSUER_URL}`), // address of issuer, auth0
baseUrl: new URL(`${THIS_HOSTNAME}`), // address of local server
}),
);
/**
* Set up the SSE MCP router
*/
app.get("/sse", bearerAuthMiddleware, async (req, res) => {
logger.debug("SSE headers:", req.headers);
logger.debug("SSE body:", req.body);
const transport = new SSEServerTransport("/messages", res);
transports.sse[transport.sessionId] = transport;
res.setTimeout(1_000 * 60 * 60 * 6); // 6 hours
res.on("close", () => {
delete transports.sse[transport.sessionId];
});
await server.connect(transport);
});
// Legacy message endpoint for older clients
app.post("/messages", bearerAuthMiddleware, async (req, res) => {
const sessionId = req.query.sessionId as string;
logger.debug("SSE", sessionId, "Received message");
const transport = transports.sse[sessionId];
if (transport) {
logger.debug("SSE", sessionId, "Transport found for sessionId");
await transport.handlePostMessage(req, res, req.body);
logger.debug(
"SSE",
sessionId,
"Message handled by transport for sessionId",
);
} else {
logger.warn("SSE", sessionId, "No transport found for sessionId");
res.status(400).send("No transport found for sessionId");
}
});
/**
* Set up the streamable HTTP MCP router
*/
app.use("/", async (req, res, next) => {
logger.debug(req.method, req.url, req.headers, req.body);
await next();
logger.debug(res.headersSent, res.statusCode);
});
app.post("/mcp", bearerAuthMiddleware, async (req, res, next) => {
const sessionId = req.headers["mcp-session-id"] as string | undefined;
logger.info("Streamable", "Received message for session", sessionId);
logger.debug(req.body);
logger.debug(
"Streamable",
"is initialize request?",
isInitializeRequest(req.body),
);
let transport: StreamableHTTPServerTransport;
// If the sessionID is set and it's associated with a transport, use it
if (sessionId && transports.streamable[sessionId]) {
transport = transports.streamable[sessionId];
logger.info("Streamable", "Transport found for sessionId", sessionId);
// if the session id IS NOT available and it's an initialize request, set up a new one
} else if (!sessionId && isInitializeRequest(req.body)) {
logger.info("Streamable", "Setting up a new transport");
// Create a new transport with a UUID as sesssion ID; saving it to the transports object
transport = new StreamableHTTPServerTransport({
sessionIdGenerator: randomUUID,
onsessioninitialized(sessionId) {
transports.streamable[sessionId] = transport;
},
});
transport.onclose = () => {
if (transport.sessionId)
delete transports.streamable[transport.sessionId];
};
logger.info("Streamable", transport.sessionId, "Transport constructed");
// connect to the new server
await server.connect(transport);
logger.info(
"Streamable",
transport.sessionId,
"Server connected to transport",
);
} else {
logger.warn("Streamable", sessionId, "No transport found for sessionId");
res.status(400).json({
jsonrpc: "2.0",
error: {
code: -32_000,
message: "No transport found for sessionId",
},
id: null,
});
return next();
}
await transport.handleRequest(req, res, req.body);
logger.info(
"Streamable",
"Message handled by transport for session",
sessionId,
);
});
// Reusable handler for GET and delete requests
const handleSessionRequest = async (
req: Request,
res: Response,
next: NextFunction,
) => {
const sessionId = req.headers["mcp-session-id"] as string | undefined;
if (!sessionId || !transports.streamable[sessionId]) {
logger.warn("Streamable", sessionId, "No transport found for sessionId");
res.status(400).json({
jsonrpc: "2.0",
error: {
code: -32_000,
message: "No transport found for sessionId",
},
id: null,
});
return next();
}
const transport = transports.streamable[sessionId];
await transport.handleRequest(req, res);
};
app.get("/mcp", handleSessionRequest);
app.delete("/mcp", handleSessionRequest);
app.use((error: Error, req: Request, res: Response, next: NextFunction) => {
logger.info("Error", error);
if (!res.headersSent) {
if (error instanceof InvalidAccessTokenError) {
res.status(401).json({
jsonrpc: "2.0",
error: {
code: -32_000,
message: "Invalid access token",
},
id: null,
});
} else {
res.status(500).json({
jsonrpc: "2.0",
error: {
code: -32_000,
message: "Internal server error",
},
id: null,
});
}
} else {
logger.warn("headers already sent so no response sent");
}
});
const httpServer = app.listen(process.env.PORT ?? 5050, () => {
logger.info(`Server is running on port ${process.env.PORT ?? 5050}`);
});
//httpServer.setTimeout(1_000 * 60 * 60 * 6); // 6 hours
```