#
tokens: 44522/50000 31/35 files (page 1/4)
lines: on (toggle) GitHub
raw markdown copy reset
This is page 1 of 4. Use http://codebase.md/calvernaz/alphavantage?lines=true&page={x} to view the full context.

# Directory Structure

```
├── .bumpversion.cfg
├── .github
│   ├── FUNDING.yml
│   └── workflows
│       └── publish.yml
├── .gitignore
├── .python-version
├── CONTRIBUTING.md
├── deploy
│   └── aws-stateless-mcp-lambda
│       ├── .aws-sam
│       │   └── build.toml
│       ├── deploy.sh
│       ├── lambda_function.py
│       ├── README.md
│       ├── requirements.txt
│       └── template.yaml
├── DEVELOPMENT.md
├── Dockerfile
├── LICENSE
├── pyproject.toml
├── pytest.ini
├── README.md
├── scripts
│   └── publish.py
├── smithery.yaml
├── src
│   ├── alphavantage_mcp_client
│   │   └── client.py
│   └── alphavantage_mcp_server
│       ├── __init__.py
│       ├── api.py
│       ├── oauth.py
│       ├── prompts.py
│       ├── response_utils.py
│       ├── server.py
│       ├── telemetry_bootstrap.py
│       ├── telemetry_instrument.py
│       └── tools.py
├── tests
│   ├── test_api.py
│   ├── test_http_mcp_client.py
│   ├── test_http_transport.py
│   ├── test_integration.py
│   ├── test_stdio_transport.py
│   └── test_telemetry.py
└── uv.lock
```

# Files

--------------------------------------------------------------------------------
/.python-version:
--------------------------------------------------------------------------------

```
1 | 3.12.7
2 | 
```

--------------------------------------------------------------------------------
/.bumpversion.cfg:
--------------------------------------------------------------------------------

```
1 | [bumpversion]
2 | current_version = 0.3.24
3 | commit = True
4 | tag = True
5 | 
6 | [bumpversion:file:pyproject.toml]
7 | search = version = "{current_version}"
8 | replace = version = "{new_version}"
9 | 
```

--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------

```
  1 | .idea
  2 | aws-chalice
  3 | # Logs
  4 | logs
  5 | *.log
  6 | npm-debug.log*
  7 | yarn-debug.log*
  8 | yarn-error.log*
  9 | lerna-debug.log*
 10 | .pnpm-debug.log*
 11 | 
 12 | # Diagnostic reports (https://nodejs.org/api/report.html)
 13 | report.[0-9]*.[0-9]*.[0-9]*.[0-9]*.json
 14 | 
 15 | # Runtime data
 16 | pids
 17 | *.pid
 18 | *.seed
 19 | *.pid.lock
 20 | 
 21 | # Directory for instrumented libs generated by jscoverage/JSCover
 22 | lib-cov
 23 | 
 24 | # Coverage directory used by tools like istanbul
 25 | coverage
 26 | *.lcov
 27 | 
 28 | # nyc test coverage
 29 | .nyc_output
 30 | 
 31 | # Grunt intermediate storage (https://gruntjs.com/creating-plugins#storing-task-files)
 32 | .grunt
 33 | 
 34 | # Bower dependency directory (https://bower.io/)
 35 | bower_components
 36 | 
 37 | # node-waf configuration
 38 | .lock-wscript
 39 | 
 40 | # Compiled binary addons (https://nodejs.org/api/addons.html)
 41 | build/Release
 42 | 
 43 | # Dependency directories
 44 | node_modules/
 45 | jspm_packages/
 46 | 
 47 | # Snowpack dependency directory (https://snowpack.dev/)
 48 | web_modules/
 49 | 
 50 | # TypeScript cache
 51 | *.tsbuildinfo
 52 | 
 53 | # Optional npm cache directory
 54 | .npm
 55 | 
 56 | # Optional eslint cache
 57 | .eslintcache
 58 | 
 59 | # Optional stylelint cache
 60 | .stylelintcache
 61 | 
 62 | # Microbundle cache
 63 | .rpt2_cache/
 64 | .rts2_cache_cjs/
 65 | .rts2_cache_es/
 66 | .rts2_cache_umd/
 67 | 
 68 | # Optional REPL history
 69 | .node_repl_history
 70 | 
 71 | # Output of 'npm pack'
 72 | *.tgz
 73 | 
 74 | # Yarn Integrity file
 75 | .yarn-integrity
 76 | 
 77 | # dotenv environment variable files
 78 | .env
 79 | .env.development.local
 80 | .env.test.local
 81 | .env.production.local
 82 | .env.local
 83 | .env
 84 | 
 85 | # parcel-bundler cache (https://parceljs.org/)
 86 | .cache
 87 | .parcel-cache
 88 | 
 89 | # Next.js build output
 90 | .next
 91 | out
 92 | 
 93 | # Nuxt.js build / generate output
 94 | .nuxt
 95 | dist
 96 | 
 97 | # Gatsby files
 98 | .cache/
 99 | # Comment in the public line in if your project uses Gatsby and not Next.js
100 | # https://nextjs.org/blog/next-9-1#public-directory-support
101 | # public
102 | 
103 | # vuepress build output
104 | .vuepress/dist
105 | 
106 | # vuepress v2.x temp and cache directory
107 | .temp
108 | .cache
109 | 
110 | # Docusaurus cache and generated files
111 | .docusaurus
112 | 
113 | # Serverless directories
114 | .serverless/
115 | 
116 | # FuseBox cache
117 | .fusebox/
118 | 
119 | # DynamoDB Local files
120 | .dynamodb/
121 | 
122 | # TernJS port file
123 | .tern-port
124 | 
125 | # Stores VSCode versions used for testing VSCode extensions
126 | .vscode-test
127 | 
128 | # yarn v2
129 | .yarn/cache
130 | .yarn/unplugged
131 | .yarn/build-state.yml
132 | .yarn/install-state.gz
133 | .pnp.*
134 | 
135 | build/
136 | 
137 | gcp-oauth.keys.json
138 | .*-server-credentials.json
139 | 
140 | # Byte-compiled / optimized / DLL files
141 | __pycache__/
142 | *.py[cod]
143 | *$py.class
144 | 
145 | # C extensions
146 | *.so
147 | 
148 | # Distribution / packaging
149 | .Python
150 | build/
151 | develop-eggs/
152 | dist/
153 | downloads/
154 | eggs/
155 | .eggs/
156 | lib/
157 | lib64/
158 | parts/
159 | sdist/
160 | var/
161 | wheels/
162 | share/python-wheels/
163 | *.egg-info/
164 | .installed.cfg
165 | *.egg
166 | MANIFEST
167 | 
168 | # PyInstaller
169 | #  Usually these files are written by a python script from a template
170 | #  before PyInstaller builds the exe, so as to inject date/other infos into it.
171 | *.manifest
172 | *.spec
173 | 
174 | # Installer logs
175 | pip-log.txt
176 | pip-delete-this-directory.txt
177 | 
178 | # Unit test / coverage reports
179 | htmlcov/
180 | .tox/
181 | .nox/
182 | .coverage
183 | .coverage.*
184 | .cache
185 | nosetests.xml
186 | coverage.xml
187 | *.cover
188 | *.py,cover
189 | .hypothesis/
190 | .pytest_cache/
191 | cover/
192 | 
193 | # Translations
194 | *.mo
195 | *.pot
196 | 
197 | # Django stuff:
198 | *.log
199 | local_settings.py
200 | db.sqlite3
201 | db.sqlite3-journal
202 | 
203 | # Flask stuff:
204 | instance/
205 | .webassets-cache
206 | 
207 | # Scrapy stuff:
208 | .scrapy
209 | 
210 | # Sphinx documentation
211 | docs/_build/
212 | 
213 | # PyBuilder
214 | .pybuilder/
215 | target/
216 | 
217 | # Jupyter Notebook
218 | .ipynb_checkpoints
219 | 
220 | # IPython
221 | profile_default/
222 | ipython_config.py
223 | 
224 | # pyenv
225 | #   For a library or package, you might want to ignore these files since the code is
226 | #   intended to run in multiple environments; otherwise, check them in:
227 | # .python-version
228 | 
229 | # pipenv
230 | #   According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
231 | #   However, in case of collaboration, if having platform-specific dependencies or dependencies
232 | #   having no cross-platform support, pipenv may install dependencies that don't work, or not
233 | #   install all needed dependencies.
234 | #Pipfile.lock
235 | 
236 | # poetry
237 | #   Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
238 | #   This is especially recommended for binary packages to ensure reproducibility, and is more
239 | #   commonly ignored for libraries.
240 | #   https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
241 | #poetry.lock
242 | 
243 | # pdm
244 | #   Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
245 | #pdm.lock
246 | #   pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
247 | #   in version control.
248 | #   https://pdm.fming.dev/latest/usage/project/#working-with-version-control
249 | .pdm.toml
250 | .pdm-python
251 | .pdm-build/
252 | 
253 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
254 | __pypackages__/
255 | 
256 | # Celery stuff
257 | celerybeat-schedule
258 | celerybeat.pid
259 | 
260 | # SageMath parsed files
261 | *.sage.py
262 | 
263 | # Environments
264 | .env
265 | .venv
266 | env/
267 | venv/
268 | ENV/
269 | env.bak/
270 | venv.bak/
271 | 
272 | # Spyder project settings
273 | .spyderproject
274 | .spyproject
275 | 
276 | # Rope project settings
277 | .ropeproject
278 | 
279 | # mkdocs documentation
280 | /site
281 | 
282 | # mypy
283 | .mypy_cache/
284 | .dmypy.json
285 | dmypy.json
286 | 
287 | # Pyre type checker
288 | .pyre/
289 | 
290 | # pytype static type analyzer
291 | .pytype/
292 | 
293 | # Cython debug symbols
294 | cython_debug/
295 | 
296 | .DS_Store
297 | 
298 | # PyCharm
299 | #  JetBrains specific template is maintained in a separate JetBrains.gitignore that can
300 | #  be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
301 | #  and can be added to the global gitignore or merged into this file.  For a more nuclear
302 | #  option (not recommended) you can uncomment the following to ignore the entire idea folder.
303 | #.idea/
304 | windsurfrules.md
305 | 
306 | 
307 | deploy/aws-chalice/vendor
308 | deploy/aws-chalice/.chalice
```

--------------------------------------------------------------------------------
/deploy/aws-stateless-mcp-lambda/README.md:
--------------------------------------------------------------------------------

```markdown
  1 | # AWS Stateless MCP Lambda Deployment
  2 | 
  3 | This deployment uses the **stateless MCP pattern** from [aws-samples/sample-serverless-mcp-servers](https://github.com/aws-samples/sample-serverless-mcp-servers/tree/main/stateless-mcp-on-lambda-python) to deploy the AlphaVantage MCP Server on AWS Lambda.
  4 | 
  5 | ## 🎯 Why Stateless MCP?
  6 | 
  7 | Unlike our previous attempts with Chalice and Lambda Web Adapter, this approach is specifically designed for **stateless MCP servers** that work perfectly with Lambda's execution model:
  8 | 
  9 | - ✅ **No session state management** - Each request is independent
 10 | - ✅ **Perfect for Lambda** - Stateless execution model matches Lambda
 11 | - ✅ **Horizontal scaling** - Seamless elasticity and load distribution
 12 | - ✅ **AWS-recommended pattern** - Based on official AWS samples
 13 | 
 14 | ## 🏗️ Architecture
 15 | 
 16 | ```
 17 | Internet → API Gateway → Lambda Function → AlphaVantage MCP Server → AlphaVantage API
 18 | ```
 19 | 
 20 | Each Lambda invocation:
 21 | 1. Receives MCP JSON-RPC request via API Gateway
 22 | 2. Calls appropriate AlphaVantage MCP server function directly
 23 | 3. Returns MCP-compliant JSON response
 24 | 4. No persistent connections or session state required
 25 | 
 26 | ## 🚀 Quick Start
 27 | 
 28 | ### Prerequisites
 29 | 
 30 | ```bash
 31 | # Install AWS CLI
 32 | pip install awscli
 33 | 
 34 | # Install AWS SAM CLI
 35 | pip install aws-sam-cli
 36 | 
 37 | # Configure AWS credentials
 38 | aws configure
 39 | ```
 40 | 
 41 | ### Deploy
 42 | 
 43 | ```bash
 44 | # Set your AlphaVantage API key
 45 | export ALPHAVANTAGE_API_KEY=your_api_key_here
 46 | 
 47 | # Optional: Enable OAuth 2.1
 48 | export OAUTH_ENABLED=true
 49 | export OAUTH_AUTHORIZATION_SERVER_URL=https://your-oauth-server.com
 50 | 
 51 | # Deploy
 52 | cd deploy/aws-stateless-mcp-lambda
 53 | chmod +x deploy.sh
 54 | ./deploy.sh
 55 | ```
 56 | 
 57 | ## 🧪 Testing
 58 | 
 59 | After deployment, test with these commands:
 60 | 
 61 | ### 1. Initialize MCP Session
 62 | ```bash
 63 | curl -X POST 'https://your-api-id.execute-api.region.amazonaws.com/prod/mcp' \
 64 |   -H 'Content-Type: application/json' \
 65 |   -H 'Accept: application/json' \
 66 |   -d '{
 67 |     "jsonrpc": "2.0",
 68 |     "id": 1,
 69 |     "method": "initialize",
 70 |     "params": {
 71 |       "protocolVersion": "2024-11-05",
 72 |       "capabilities": {},
 73 |       "clientInfo": {"name": "test-client", "version": "1.0.0"}
 74 |     }
 75 |   }'
 76 | ```
 77 | 
 78 | ### 2. List Available Tools
 79 | ```bash
 80 | curl -X POST 'https://your-api-id.execute-api.region.amazonaws.com/prod/mcp' \
 81 |   -H 'Content-Type: application/json' \
 82 |   -H 'Accept: application/json' \
 83 |   -d '{
 84 |     "jsonrpc": "2.0",
 85 |     "id": 2,
 86 |     "method": "tools/list"
 87 |   }'
 88 | ```
 89 | 
 90 | ### 3. Call a Tool
 91 | ```bash
 92 | curl -X POST 'https://your-api-id.execute-api.region.amazonaws.com/prod/mcp' \
 93 |   -H 'Content-Type: application/json' \
 94 |   -H 'Accept: application/json' \
 95 |   -d '{
 96 |     "jsonrpc": "2.0",
 97 |     "id": 3,
 98 |     "method": "tools/call",
 99 |     "params": {
100 |       "name": "stock_quote",
101 |       "arguments": {"symbol": "AAPL"}
102 |     }
103 |   }'
104 | ```
105 | 
106 | ## 🔐 OAuth 2.1 Support
107 | 
108 | Enable OAuth authentication by setting environment variables:
109 | 
110 | ```bash
111 | export OAUTH_ENABLED=true
112 | export OAUTH_AUTHORIZATION_SERVER_URL=https://your-oauth-server.com
113 | export OAUTH_CLIENT_ID=your_client_id
114 | export OAUTH_CLIENT_SECRET=your_client_secret
115 | ```
116 | 
117 | When OAuth is enabled, include Bearer token in requests:
118 | 
119 | ```bash
120 | curl -X POST 'https://your-api-id.execute-api.region.amazonaws.com/prod/mcp' \
121 |   -H 'Content-Type: application/json' \
122 |   -H 'Authorization: Bearer your_access_token' \
123 |   -d '{"jsonrpc":"2.0","id":2,"method":"tools/list"}'
124 | ```
125 | 
126 | ## 📊 Available Tools
127 | 
128 | The AlphaVantage MCP Server provides 50+ financial data tools:
129 | 
130 | ### Stock Data
131 | - `get_stock_quote` - Real-time stock quotes
132 | - `get_intraday_data` - Intraday time series
133 | - `get_daily_data` - Daily time series
134 | - `get_weekly_data` - Weekly time series
135 | - `get_monthly_data` - Monthly time series
136 | 
137 | ### Technical Indicators
138 | - `get_sma` - Simple Moving Average
139 | - `get_ema` - Exponential Moving Average
140 | - `get_rsi` - Relative Strength Index
141 | - `get_macd` - MACD indicator
142 | - And 30+ more technical indicators
143 | 
144 | ### Fundamental Data
145 | - `get_company_overview` - Company fundamentals
146 | - `get_income_statement` - Income statements
147 | - `get_balance_sheet` - Balance sheets
148 | - `get_cash_flow` - Cash flow statements
149 | 
150 | ### Economic Data
151 | - `get_gdp` - GDP data
152 | - `get_inflation` - Inflation rates
153 | - `get_unemployment` - Unemployment rates
154 | - And more economic indicators
155 | 
156 | ## 🔍 Monitoring
157 | 
158 | ### CloudWatch Logs
159 | ```bash
160 | # Follow Lambda logs
161 | aws logs tail /aws/lambda/alphavantage-stateless-mcp-alphavantage-mcp --follow
162 | 
163 | # Get function metrics
164 | aws lambda get-function --function-name alphavantage-stateless-mcp-alphavantage-mcp
165 | ```
166 | 
167 | ### API Gateway Metrics
168 | - Monitor request count, latency, and errors in CloudWatch
169 | - Set up alarms for high error rates or latency
170 | 
171 | ## 🛠️ Troubleshooting
172 | 
173 | ### Common Issues
174 | 
175 | **1. Import Errors**
176 | ```
177 | ModuleNotFoundError: No module named 'alphavantage_mcp_server'
178 | ```
179 | - **Solution**: Ensure the Lambda layer is properly built with source code
180 | 
181 | **2. API Key Errors**
182 | ```
183 | {"error": "API key required"}
184 | ```
185 | - **Solution**: Verify `ALPHAVANTAGE_API_KEY` environment variable is set
186 | 
187 | **3. Tool Not Found**
188 | ```
189 | {"error": {"code": -32601, "message": "Method not found"}}
190 | ```
191 | - **Solution**: Check tool name spelling and availability with `tools/list`
192 | 
193 | ### Debug Mode
194 | 
195 | Enable debug logging by setting environment variable:
196 | ```bash
197 | export DEBUG=true
198 | ```
199 | 
200 | ## 💰 Cost Estimation
201 | 
202 | ### Lambda Costs
203 | - **Requests**: $0.20 per 1M requests
204 | - **Duration**: $0.0000166667 per GB-second
205 | - **Example**: 10,000 requests/month ≈ $2-5/month
206 | 
207 | ### API Gateway Costs
208 | - **REST API**: $3.50 per million API calls
209 | - **Data transfer**: $0.09 per GB
210 | 
211 | ### Total Estimated Cost
212 | - **Light usage** (1K requests/month): ~$1/month
213 | - **Moderate usage** (10K requests/month): ~$5/month
214 | - **Heavy usage** (100K requests/month): ~$40/month
215 | 
216 | ## 🧹 Cleanup
217 | 
218 | Remove all AWS resources:
219 | 
220 | ```bash
221 | aws cloudformation delete-stack --stack-name alphavantage-stateless-mcp
222 | ```
223 | 
224 | 
225 | ## 📚 References
226 | 
227 | - [AWS Sample Serverless MCP Servers](https://github.com/aws-samples/sample-serverless-mcp-servers)
228 | - [MCP Specification](https://modelcontextprotocol.io/specification/2025-03-26/basic/transports)
229 | - [AlphaVantage API Documentation](https://www.alphavantage.co/documentation/)
230 | - [AWS Lambda Documentation](https://docs.aws.amazon.com/lambda/)
231 | 
232 | ## 🤝 Contributing
233 | 
234 | This deployment is based on the official AWS sample pattern. For improvements:
235 | 
236 | 1. Test changes locally with SAM
237 | 2. Update the Lambda function code
238 | 3. Redeploy with `./deploy.sh`
239 | 4. Verify with test commands
240 | 
241 | ## 📄 License
242 | 
243 | This deployment follows the same MIT-0 license as the AWS sample repository.
244 | 
```

--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------

```markdown
  1 | # ✅ Official Alpha Vantage MCP Server
  2 | 
  3 | [![smithery badge](https://smithery.ai/badge/@calvernaz/alphavantage)](https://smithery.ai/server/@calvernaz/alphavantage)
  4 | [![Verified on MseeP](https://mseep.ai/badge.svg)](https://mseep.ai/app/b76d0966-edd1-46fd-9cfb-b29a6d8cb563)
  5 | 
  6 | A MCP server for the stock market data API, Alphavantage API.
  7 | 
  8 | **MCP Server URL**: https://mcp.alphavantage.co
  9 | 
 10 | **PyPi**: https://pypi.org/project/alphavantage-mcp/
 11 | 
 12 | ## Configuration
 13 | 
 14 | ### Getting an API Key
 15 | 1. Sign up for a [Free Alphavantage API key](https://www.alphavantage.co/support/#api-key)
 16 | 2. Add the API key to your environment variables as `ALPHAVANTAGE_API_KEY`
 17 | 
 18 | 
 19 | ## Installation
 20 | 
 21 | ### Option 1: Using uvx (Recommended)
 22 | 
 23 | The easiest way to use the AlphaVantage MCP server is with `uvx`:
 24 | 
 25 | ```bash
 26 | # Run directly without installation
 27 | uvx alphavantage-mcp
 28 | 
 29 | # Or with specific arguments
 30 | uvx alphavantage-mcp --server http --port 8080
 31 | ```
 32 | 
 33 | ### Option 2: Using pip
 34 | 
 35 | ```bash
 36 | pip install alphavantage-mcp
 37 | alphavantage-mcp
 38 | ```
 39 | 
 40 | ### Option 3: From source
 41 | 
 42 | ```bash
 43 | git clone https://github.com/calvernaz/alphavantage.git
 44 | cd alphavantage
 45 | uv run alphavantage
 46 | ```
 47 | 
 48 | ## Server Modes
 49 | 
 50 | The AlphaVantage server can run in two different modes:
 51 | 
 52 | ### Stdio Server (Default)
 53 | This is the standard MCP server mode used for tools like Claude Desktop.
 54 | 
 55 | ```bash
 56 | alphavantage
 57 | # or explicitly:
 58 | alphavantage --server stdio
 59 | ```
 60 | 
 61 | ### Streamable HTTP Server
 62 | This mode provides real-time updates via HTTP streaming.
 63 | 
 64 | ```bash
 65 | alphavantage --server http --port 8080
 66 | ```
 67 | 
 68 | ### Streamable HTTP Server with OAuth 2.1 Authentication
 69 | This mode adds OAuth 2.1 authentication to the HTTP server, following the MCP specification for secure access.
 70 | 
 71 | ```bash
 72 | alphavantage --server http --port 8080 --oauth
 73 | ```
 74 | 
 75 | #### OAuth Configuration
 76 | 
 77 | When using the `--oauth` flag, the server requires OAuth 2.1 configuration via environment variables:
 78 | 
 79 | **Required Environment Variables:**
 80 | ```bash
 81 | export OAUTH_AUTHORIZATION_SERVER_URL="https://your-auth-server.com/realms/your-realm"
 82 | export OAUTH_RESOURCE_SERVER_URI="https://your-mcp-server.com"
 83 | ```
 84 | 
 85 | **Optional Environment Variables:**
 86 | ```bash
 87 | # Token validation method (default: jwt)
 88 | export OAUTH_TOKEN_VALIDATION_METHOD="jwt"  # or "introspection"
 89 | 
 90 | # For JWT validation
 91 | export OAUTH_JWT_PUBLIC_KEY="-----BEGIN PUBLIC KEY-----\n...\n-----END PUBLIC KEY-----"
 92 | export OAUTH_JWT_ALGORITHM="RS256"  # default
 93 | 
 94 | # For token introspection validation
 95 | export OAUTH_INTROSPECTION_ENDPOINT="https://your-auth-server.com/realms/your-realm/protocol/openid-connect/token/introspect"
 96 | export OAUTH_INTROSPECTION_CLIENT_ID="your-client-id"
 97 | export OAUTH_INTROSPECTION_CLIENT_SECRET="your-client-secret"
 98 | 
 99 | # Optional: Required scopes (space-separated)
100 | export OAUTH_REQUIRED_SCOPES="mcp:access mcp:read"
101 | 
102 | # Optional: Enable session binding for additional security (default: true)
103 | export OAUTH_SESSION_BINDING_ENABLED="true"
104 | ```
105 | 
106 | #### OAuth Features
107 | 
108 | The OAuth implementation provides:
109 | 
110 | - **OAuth 2.0 Protected Resource Metadata** endpoint (`/.well-known/oauth-protected-resource`)
111 | - **Bearer token authentication** for all MCP requests
112 | - **JWT and Token Introspection** validation methods
113 | - **MCP Security Best Practices** compliance:
114 |   - Token audience validation (prevents token passthrough attacks)
115 |   - Session hijacking prevention with secure session IDs
116 |   - User-bound sessions for additional security
117 |   - Proper WWW-Authenticate headers for 401 responses
118 | 
119 | #### Example: Keycloak Configuration
120 | 
121 | For testing with Keycloak:
122 | 
123 | ```bash
124 | # Keycloak OAuth configuration
125 | export OAUTH_AUTHORIZATION_SERVER_URL="https://keycloak.example.com/realms/mcp-realm"
126 | export OAUTH_RESOURCE_SERVER_URI="https://mcp.example.com"
127 | export OAUTH_TOKEN_VALIDATION_METHOD="introspection"
128 | export OAUTH_INTROSPECTION_ENDPOINT="https://keycloak.example.com/realms/mcp-realm/protocol/openid-connect/token/introspect"
129 | export OAUTH_INTROSPECTION_CLIENT_ID="mcp-server"
130 | export OAUTH_INTROSPECTION_CLIENT_SECRET="your-keycloak-client-secret"
131 | export OAUTH_REQUIRED_SCOPES="mcp:access"
132 | 
133 | # Start server with OAuth
134 | alphavantage --server http --port 8080 --oauth
135 | ```
136 | 
137 | #### OAuth Client Flow
138 | 
139 | When OAuth is enabled, MCP clients must:
140 | 
141 | 1. **Discover** the authorization server via `GET /.well-known/oauth-protected-resource`
142 | 2. **Register** with the authorization server (if using Dynamic Client Registration)
143 | 3. **Obtain access tokens** from the authorization server
144 | 4. **Include tokens** in requests: `Authorization: Bearer <access-token>`
145 | 5. **Handle 401/403 responses** and refresh tokens as needed
146 | 
147 | Options:
148 | - `--server`: Choose between `stdio` (default) or `http` server mode
149 | - `--port`: Specify the port for the Streamable HTTP server (default: 8080)
150 | - `--oauth`: Enable OAuth 2.1 authentication (requires `--server http`)
151 | 
152 | ## 📊 Telemetry
153 | 
154 | The AlphaVantage MCP server includes optional Prometheus metrics for monitoring and observability.
155 | 
156 | ### Enabling Telemetry
157 | 
158 | Set the following environment variables to enable telemetry:
159 | 
160 | ```bash
161 | # Enable telemetry (default: true)
162 | export MCP_TELEMETRY_ENABLED=true
163 | 
164 | # Server identification (optional)
165 | export MCP_SERVER_NAME=alphavantage
166 | export MCP_SERVER_VERSION=1.0.0
167 | 
168 | # Metrics server port (default: 9464)
169 | export MCP_METRICS_PORT=9464
170 | ```
171 | 
172 | ### Metrics Endpoint
173 | 
174 | When telemetry is enabled, Prometheus metrics are available at:
175 | 
176 | ```
177 | http://localhost:9464/metrics
178 | ```
179 | 
180 | ### Available Metrics
181 | 
182 | The server collects the following metrics for each tool call:
183 | 
184 | - **`mcp_tool_calls_total`** - Total number of tool calls (labeled by tool and outcome)
185 | - **`mcp_tool_latency_seconds`** - Tool execution latency histogram
186 | - **`mcp_tool_request_bytes`** - Request payload size histogram
187 | - **`mcp_tool_response_bytes`** - Response payload size histogram
188 | - **`mcp_tool_active_concurrency`** - Active concurrent tool calls gauge
189 | - **`mcp_tool_errors_total`** - Total errors by type (timeout, bad_input, connection, unknown)
190 | 
191 | ### Example Usage with Telemetry
192 | 
193 | ```bash
194 | # Start server with telemetry enabled
195 | export MCP_TELEMETRY_ENABLED=true
196 | export MCP_SERVER_NAME=alphavantage-prod
197 | export ALPHAVANTAGE_API_KEY=your_api_key
198 | alphavantage --server http --port 8080
199 | 
200 | # View metrics
201 | curl http://localhost:9464/metrics
202 | ```
203 | 
204 | ## 🚀 AWS Serverless Deployment
205 | 
206 | Deploy the AlphaVantage MCP Server on AWS Lambda using the stateless MCP pattern for production-ready, scalable deployment.
207 | 
208 | ### Quick AWS Deployment
209 | 
210 | ```bash
211 | cd deploy/aws-stateless-mcp-lambda
212 | export ALPHAVANTAGE_API_KEY=your_api_key_here
213 | ./deploy.sh
214 | ```
215 | 
216 | **Features:**
217 | - ✅ **Stateless MCP pattern** - Perfect for Lambda's execution model
218 | - ✅ **Auto-scaling** - Handles any load with AWS Lambda + API Gateway
219 | - ✅ **Cost-effective** - Pay only for requests (~$1-5/month for typical usage)
220 | - ✅ **Production-ready** - Based on AWS official sample patterns
221 | - ✅ **OAuth 2.1 support** - Optional authentication for secure access
222 | 
223 | **📖 Full Documentation:** See [AWS Deployment Guide](deploy/aws-stateless-mcp-lambda/README.md) for complete setup instructions, testing, monitoring, and troubleshooting.
224 | 
225 | ### Usage with Claude Desktop
226 | 
227 | #### Option 1: Using uvx (Recommended)
228 | Add this to your `claude_desktop_config.json`:
229 | 
230 | ```json
231 | {
232 |   "mcpServers": {
233 |     "alphavantage": {
234 |       "command": "uvx",
235 |       "args": ["alphavantage-mcp"],
236 |       "env": {
237 |         "ALPHAVANTAGE_API_KEY": "YOUR_API_KEY_HERE"
238 |       }
239 |     }
240 |   }
241 | }
242 | ```
243 | 
244 | #### Option 2: From source
245 | If you cloned the repository, use this configuration:
246 | 
247 | ```json
248 | {
249 |   "mcpServers": {
250 |     "alphavantage": {
251 |       "command": "uv",
252 |       "args": [
253 |         "--directory",
254 |         "<DIRECTORY-OF-CLONED-PROJECT>/alphavantage",
255 |         "run",
256 |         "alphavantage"
257 |       ],
258 |       "env": {
259 |         "ALPHAVANTAGE_API_KEY": "YOUR_API_KEY_HERE"
260 |       }
261 |     }
262 |   }
263 | }
264 | ```
265 | #### Running the Server in Streamable HTTP Mode
266 | 
267 | **Using uvx:**
268 | ```json
269 | {
270 |   "mcpServers": {
271 |     "alphavantage": {
272 |       "command": "uvx",
273 |       "args": ["alphavantage-mcp", "--server", "http", "--port", "8080"],
274 |       "env": {
275 |         "ALPHAVANTAGE_API_KEY": "YOUR_API_KEY_HERE"
276 |       }
277 |     }
278 |   }
279 | }
280 | ```
281 | 
282 | **From source:**
283 | ```json
284 | {
285 |   "mcpServers": {
286 |     "alphavantage": {
287 |       "command": "uv",
288 |       "args": [
289 |         "--directory",
290 |         "<DIRECTORY-OF-CLONED-PROJECT>/alphavantage",
291 |         "run",
292 |         "alphavantage",
293 |         "--server",
294 |         "http",
295 |         "--port",
296 |         "8080"
297 |       ],
298 |       "env": {
299 |         "ALPHAVANTAGE_API_KEY": "YOUR_API_KEY_HERE"
300 |       }
301 |     }
302 |   }
303 | }
304 | ```
305 | 
306 | 
307 | ## 📺 Demo Video
308 | 
309 | Watch a quick demonstration of the Alpha Vantage MCP Server in action:
310 | 
311 | [![Alpha Vantage MCP Server Demo](https://github.com/user-attachments/assets/bc9ecffb-eab6-4a4d-bbf6-9fc8178f15c3)](https://github.com/user-attachments/assets/bc9ecffb-eab6-4a4d-bbf6-9fc8178f15c3)
312 | 
313 | 
314 | ## 🔧 Development & Publishing
315 | 
316 | ### Publishing to PyPI
317 | 
318 | This project includes scripts for publishing to PyPI and TestPyPI:
319 | 
320 | ```bash
321 | # Publish to TestPyPI (for testing)
322 | python scripts/publish.py --test
323 | 
324 | # Publish to PyPI (production)
325 | python scripts/publish.py
326 | 
327 | # Use uv publish instead of twine
328 | python scripts/publish.py --test --use-uv
329 | ```
330 | 
331 | The script uses `twine` by default (recommended) but can also use `uv publish` with the `--use-uv` flag.
332 | 
333 | ### GitHub Actions
334 | 
335 | The repository includes a GitHub Actions workflow for automated publishing:
336 | 
337 | - **Trusted Publishing**: Uses PyPA's official publish action with OpenID Connect
338 | - **Manual Trigger**: Can be triggered manually with options for TestPyPI vs PyPI
339 | - **Twine Fallback**: Supports both trusted publishing and twine-based publishing
340 | 
341 | To set up publishing:
342 | 
343 | 1. **For Trusted Publishing** (recommended):
344 |    - Configure trusted publishing on PyPI/TestPyPI with your GitHub repository
345 |    - No secrets needed - uses OpenID Connect
346 | 
347 | 2. **For Token-based Publishing**:
348 |    - Add `PYPI_API_TOKEN` and `TEST_PYPI_API_TOKEN` secrets to your repository
349 |    - Use the "Use twine" option in the workflow dispatch
350 | 
351 | ## 🤝 Contributing
352 | 
353 | We welcome contributions from the community! To get started, check out our [contribution](CONTRIBUTING.md) guide for setup instructions,
354 | development tips, and guidelines.
355 | 
```

--------------------------------------------------------------------------------
/CONTRIBUTING.md:
--------------------------------------------------------------------------------

```markdown
 1 | # Contributing to AlphaVantage MCP Server
 2 | 
 3 | Thanks for your interest in contributing! 🎉  
 4 | This project is the official [MCP (Model Context Protocol)](https://github.com/modelcontextprotocol/servers) server for [Alpha Vantage](https://www.alphavantage.co). Here's how to get started with local development and testing.
 5 | 
 6 | ---
 7 | 
 8 | ## 🚀 Getting Started
 9 | 
10 | ### 1. Clone the repo
11 | 
12 | ```bash
13 | git clone https://github.com/calvernaz/alphavantage.git
14 | cd alphavantage
15 | ```
16 | ## Set up your environment
17 | 
18 | You'll need an [Alpha Vantage API key](https://www.alphavantage.co/support/#api-key).
19 | 
20 | Create a .env file in the project root:
21 | 
22 | ```bash
23 | touch .env
24 | ```
25 | 
26 | Add your API key to the .env file:
27 | 
28 | ```bash
29 | ALPHAVANTAGE_API_KEY=your_api_key_here
30 | ```
31 | 
32 | Alternatively, you can export it directly in your terminal:
33 | ```bash
34 | export ALPHAVANTAGE_API_KEY=your_api_key_here
35 | ```
36 | 
37 | ## 🧪 Running Locally with Inspector
38 | 
39 | Use the MCP Inspector to run and test your server locally with hot reload.
40 | 
41 | ```bash
42 | npm install -g @modelcontextprotocol/inspector
43 | ```
44 | 
45 | Then, run the server:
46 | 
47 | ```bash
48 | npx @modelcontextprotocol/inspector uv --directory ~/alphavantage run alphavantage
49 | ```
50 | > Replace ~/code/alphavantage with your actual path to this repo.
51 | 
52 | 
```

--------------------------------------------------------------------------------
/.github/FUNDING.yml:
--------------------------------------------------------------------------------

```yaml
1 | github: [calvernaz]
2 | 
```

--------------------------------------------------------------------------------
/pytest.ini:
--------------------------------------------------------------------------------

```
1 | [pytest]
2 | asyncio_mode=auto
3 | asyncio_default_fixture_loop_scope="function"
```

--------------------------------------------------------------------------------
/deploy/aws-stateless-mcp-lambda/requirements.txt:
--------------------------------------------------------------------------------

```
 1 | # Core MCP dependencies
 2 | mcp>=1.0.0
 3 | 
 4 | # AlphaVantage MCP server dependencies  
 5 | requests>=2.31.0
 6 | aiohttp>=3.9.0
 7 | pydantic>=2.5.0
 8 | toml>=0.10.2
 9 | uvicorn>=0.24.0
10 | starlette>=0.27.0
11 | 
12 | # OAuth 2.1 support
13 | PyJWT>=2.8.0
14 | httpx>=0.25.0
15 | cryptography>=41.0.0
16 | 
17 | # AWS Lambda runtime
18 | boto3>=1.34.0
19 | 
```

--------------------------------------------------------------------------------
/smithery.yaml:
--------------------------------------------------------------------------------

```yaml
 1 | # Smithery configuration file: https://smithery.ai/docs/config#smitheryyaml
 2 | 
 3 | startCommand:
 4 |   type: http
 5 |   configSchema:
 6 |     # JSON Schema defining the configuration options for the MCP.
 7 |     type: object
 8 |     required:
 9 |       - alphavantageApiKey
10 |     properties:
11 |       alphavantageApiKey:
12 |         type: string
13 |         description: The API key for the Alphavantage server.
14 |   commandFunction:
15 |     # A function that produces the CLI command to start the MCP on stdio.
16 |     |-
17 |     config => ({ command: 'alphavantage', env: { ALPHAVANTAGE_API_KEY: config.alphavantageApiKey } })
18 | 
```

--------------------------------------------------------------------------------
/deploy/aws-stateless-mcp-lambda/.aws-sam/build.toml:
--------------------------------------------------------------------------------

```toml
 1 | # This file is auto generated by SAM CLI build command
 2 | 
 3 | [function_build_definitions.f642707c-4652-4f92-8ce7-4478f43aac20]
 4 | codeuri = "/Users/medusa/code/alphavantage/deploy/aws-stateless-mcp-lambda"
 5 | runtime = "python3.12"
 6 | architecture = "x86_64"
 7 | handler = "lambda_function.lambda_handler"
 8 | manifest_hash = ""
 9 | packagetype = "Zip"
10 | functions = ["AlphaVantageMCPFunction"]
11 | 
12 | [layer_build_definitions.02ea97b0-d24e-4cf4-b3e6-7682870a9300]
13 | layer_name = "AlphaVantageMCPLayer"
14 | codeuri = "/Users/medusa/code/alphavantage/src"
15 | build_method = "python3.12"
16 | compatible_runtimes = ["python3.12"]
17 | architecture = "x86_64"
18 | manifest_hash = ""
19 | layer = "AlphaVantageMCPLayer"
20 | 
```

--------------------------------------------------------------------------------
/Dockerfile:
--------------------------------------------------------------------------------

```dockerfile
 1 | # Use a Python image with uv pre-installed
 2 | FROM ghcr.io/astral-sh/uv:python3.12-alpine
 3 | 
 4 | # Install the project into `/app`
 5 | WORKDIR /app
 6 | 
 7 | # Enable bytecode compilation
 8 | ENV UV_COMPILE_BYTECODE=1
 9 | 
10 | # Copy from the cache instead of linking since it's a mounted volume
11 | ENV UV_LINK_MODE=copy
12 | 
13 | # Install the project's dependencies using the lockfile and settings
14 | RUN --mount=type=cache,target=/root/.cache/uv \
15 |     --mount=type=bind,source=uv.lock,target=uv.lock \
16 |     --mount=type=bind,source=pyproject.toml,target=pyproject.toml \
17 |     uv sync --locked --no-install-project --no-dev --python /usr/local/bin/python3
18 | 
19 | # Then, add the rest of the project source code and install it
20 | # Installing separately from its dependencies allows optimal layer caching
21 | COPY . /app
22 | 
23 | # Remove any existing virtual environment that might have been copied from host
24 | RUN rm -rf .venv
25 | 
26 | RUN --mount=type=cache,target=/root/.cache/uv \
27 |     uv sync --locked --no-dev --python /usr/local/bin/python3
28 | 
29 | # Place executables in the environment at the front of the path
30 | ENV PATH="/app/.venv/bin:$PATH"
31 | 
32 | # Set transport mode to HTTP and port to 8080 as required by Smithery proxy
33 | ENV TRANSPORT=http
34 | ENV PORT=8080
35 | EXPOSE 8080
36 | 
37 | # Reset the entrypoint, don't invoke `uv`
38 | ENTRYPOINT []
39 | 
40 | # Run the application directly using the venv Python
41 | CMD ["alphavantage"]
```

--------------------------------------------------------------------------------
/pyproject.toml:
--------------------------------------------------------------------------------

```toml
 1 | [project]
 2 | name = "alphavantage-mcp"
 3 | version = "0.3.24"
 4 | description = "AlphaVantage MCP server - Financial data tools for Model Context Protocol"
 5 | readme = "README.md"
 6 | requires-python = ">=3.12"
 7 | keywords = ["mcp", "alphavantage", "financial", "stocks", "api", "server"]
 8 | license = {text = "MIT"}
 9 | classifiers = [
10 |     "Development Status :: 4 - Beta",
11 |     "Intended Audience :: Developers",
12 |     "License :: OSI Approved :: MIT License",
13 |     "Programming Language :: Python :: 3",
14 |     "Programming Language :: Python :: 3.12",
15 |     "Topic :: Office/Business :: Financial",
16 |     "Topic :: Software Development :: Libraries :: Python Modules",
17 | ]
18 | dependencies = [
19 |     "mcp>=1.9.4",
20 |     "httpx>=0.28.1",
21 |     "starlette>=0.47.2",
22 |     "uvicorn>=0.32.1",
23 |     "PyJWT>=2.10.1",
24 |     "prometheus-client>=0.20.0",
25 |     "toml>=0.10.2",
26 |     "packaging>=21.0",
27 | ]
28 | [[project.authors]]
29 | name = "Cesar Alvernaz"
30 | email = "[email protected]"
31 | 
32 | [project.urls]
33 | Homepage = "https://github.com/calvernaz/alphavantage"
34 | Repository = "https://github.com/calvernaz/alphavantage"
35 | Issues = "https://github.com/calvernaz/alphavantage/issues"
36 | Documentation = "https://github.com/calvernaz/alphavantage#readme"
37 | 
38 | 
39 | [build-system]
40 | requires = [ "hatchling",]
41 | build-backend = "hatchling.build"
42 | 
43 | [tool.hatch.build.targets.wheel]
44 | packages = ["src/alphavantage_mcp_server"]
45 | 
46 | [dependency-groups]
47 | dev = [
48 |     "pytest>=8.4.1",
49 |     "pytest-asyncio>=0.24.0",
50 |     "ruff>=0.9.9",
51 |     "build>=1.0.0",
52 |     "twine>=4.0.0",
53 | ]
54 | 
55 | [project.scripts]
56 | alphavantage-mcp = "alphavantage_mcp_server:main"
57 | 
```

--------------------------------------------------------------------------------
/src/alphavantage_mcp_server/__init__.py:
--------------------------------------------------------------------------------

```python
 1 | import asyncio
 2 | import argparse
 3 | import os
 4 | from . import server
 5 | 
 6 | 
 7 | def main():
 8 |     """Main entry point for the package."""
 9 |     parser = argparse.ArgumentParser(description="AlphaVantage MCP Server")
10 |     parser.add_argument(
11 |         "--server",
12 |         type=str,
13 |         choices=["stdio", "http"],
14 |         help="Server type: stdio or http (default: stdio, or from TRANSPORT env var)",
15 |     )
16 |     parser.add_argument(
17 |         "--port",
18 |         type=int,
19 |         help="Port for HTTP server (default: 8080, or from PORT env var)",
20 |     )
21 |     parser.add_argument(
22 |         "--oauth",
23 |         action="store_true",
24 |         help="Enable OAuth 2.1 authentication for HTTP server (requires --server http)",
25 |     )
26 | 
27 |     args = parser.parse_args()
28 | 
29 |     # Determine server type: command line arg takes precedence, then env var, then default to stdio
30 |     server_type = args.server
31 |     if server_type is None:
32 |         transport_env = os.getenv("TRANSPORT", "").lower()
33 |         if transport_env == "http":
34 |             server_type = "http"
35 |         else:
36 |             server_type = "stdio"
37 | 
38 |     # Determine port: command line arg takes precedence, then env var, then default to 8080
39 |     port = args.port
40 |     if port is None:
41 |         try:
42 |             port = int(os.getenv("PORT", "8080"))
43 |         except ValueError:
44 |             port = 8080
45 | 
46 |     # Validate OAuth flag usage
47 |     if args.oauth and server_type != "http":
48 |         parser.error(
49 |             "--oauth flag can only be used with --server http or TRANSPORT=http"
50 |         )
51 | 
52 |     # Use the patched server.main function directly
53 |     asyncio.run(
54 |         server.main(server_type=server_type, port=port, oauth_enabled=args.oauth)
55 |     )
56 | 
57 | 
58 | if __name__ == "__main__":
59 |     main()
60 | 
61 | 
62 | __all__ = ["main", "server"]
63 | 
```

--------------------------------------------------------------------------------
/tests/test_telemetry.py:
--------------------------------------------------------------------------------

```python
 1 | """
 2 | Unit tests for telemetry modules.
 3 | """
 4 | 
 5 | import sys
 6 | from unittest.mock import patch, MagicMock
 7 | 
 8 | import pytest
 9 | 
10 | # Mock the external dependencies
11 | sys.modules["prometheus_client"] = MagicMock()
12 | 
13 | 
14 | class TestTelemetryInstrumentation:
15 |     """Test telemetry instrumentation functionality."""
16 | 
17 |     def test_instrument_tool_decorator_disabled(self):
18 |         """Test that decorator does nothing when telemetry is disabled."""
19 |         with patch(
20 |             "src.alphavantage_mcp_server.telemetry_instrument.is_telemetry_enabled",
21 |             return_value=False,
22 |         ):
23 |             from src.alphavantage_mcp_server.telemetry_instrument import instrument_tool
24 | 
25 |             @instrument_tool("test_tool")
26 |             def test_function(x, y):
27 |                 return x + y
28 | 
29 |             result = test_function(2, 3)
30 |             assert result == 5
31 | 
32 |     def test_error_classification(self):
33 |         """Test error classification logic."""
34 |         from src.alphavantage_mcp_server.telemetry_instrument import _classify_error
35 | 
36 |         assert _classify_error(TimeoutError()) == "timeout"
37 |         assert _classify_error(ValueError()) == "bad_input"
38 |         assert _classify_error(TypeError()) == "bad_input"
39 |         assert _classify_error(KeyError()) == "bad_input"
40 |         assert _classify_error(ConnectionError()) == "connection"
41 |         assert _classify_error(RuntimeError()) == "unknown"
42 | 
43 |     def test_size_calculation(self):
44 |         """Test size calculation function."""
45 |         from src.alphavantage_mcp_server.telemetry_instrument import _get_size_bytes
46 | 
47 |         assert _get_size_bytes("hello") == 5
48 |         assert _get_size_bytes(b"hello") == 5
49 |         assert _get_size_bytes(None) == 0
50 |         assert _get_size_bytes(123) > 0
51 | 
52 | 
53 | if __name__ == "__main__":
54 |     pytest.main([__file__])
55 | 
```

--------------------------------------------------------------------------------
/.github/workflows/publish.yml:
--------------------------------------------------------------------------------

```yaml
 1 | name: Publish to PyPI
 2 | 
 3 | on:
 4 |   release:
 5 |     types: [published]
 6 |   workflow_dispatch:
 7 |     inputs:
 8 |       test_pypi:
 9 |         description: 'Publish to TestPyPI instead of PyPI'
10 |         required: false
11 |         default: false
12 |         type: boolean
13 |       use_twine:
14 |         description: 'Use twine instead of trusted publishing'
15 |         required: false
16 |         default: false
17 |         type: boolean
18 | 
19 | jobs:
20 |   publish:
21 |     runs-on: ubuntu-latest
22 |     environment:
23 |       name: ${{ github.event.inputs.test_pypi == 'true' && 'testpypi' || 'pypi' }}
24 |     permissions:
25 |       id-token: write  # For trusted publishing
26 |       contents: read
27 | 
28 |     steps:
29 |     - uses: actions/checkout@v4
30 | 
31 |     - name: Install uv
32 |       uses: astral-sh/setup-uv@v4
33 |       with:
34 |         version: "latest"
35 | 
36 |     - name: Set up Python
37 |       run: uv python install 3.12
38 | 
39 |     - name: Install dependencies
40 |       run: uv sync --dev
41 | 
42 |     - name: Build package
43 |       run: uv build
44 | 
45 |     - name: Publish to TestPyPI (trusted publishing)
46 |       if: github.event.inputs.test_pypi == 'true' && github.event.inputs.use_twine != 'true'
47 |       uses: pypa/gh-action-pypi-publish@release/v1
48 |       with:
49 |         repository-url: https://test.pypi.org/legacy/
50 | 
51 |     - name: Publish to PyPI (trusted publishing)
52 |       if: github.event.inputs.test_pypi != 'true' && github.event.inputs.use_twine != 'true'
53 |       uses: pypa/gh-action-pypi-publish@release/v1
54 | 
55 |     - name: Publish to TestPyPI (twine)
56 |       if: github.event.inputs.test_pypi == 'true' && github.event.inputs.use_twine == 'true'
57 |       env:
58 |         TWINE_USERNAME: __token__
59 |         TWINE_PASSWORD: ${{ secrets.TEST_PYPI_API_TOKEN }}
60 |       run: uv run twine upload --repository testpypi dist/*
61 | 
62 |     - name: Publish to PyPI (twine)
63 |       if: github.event.inputs.test_pypi != 'true' && github.event.inputs.use_twine == 'true'
64 |       env:
65 |         TWINE_USERNAME: __token__
66 |         TWINE_PASSWORD: ${{ secrets.PYPI_API_TOKEN }}
67 |       run: uv run twine upload dist/*
68 | 
```

--------------------------------------------------------------------------------
/DEVELOPMENT.md:
--------------------------------------------------------------------------------

```markdown
  1 | # Run the server
  2 | 
  3 | Set the environment variable `ALPHAVANTAGE_API_KEY` to your Alphavantage API key.
  4 | 
  5 | ```bash
  6 | uv --directory ~/code/alphavantage run alphavantage
  7 | ```
  8 | 
  9 | ### Response Limiting Utilities
 10 | 
 11 | #### 1. Modify API Functions
 12 | Add `max_data_points` parameter to technical indicator functions:
 13 | 
 14 | ```python
 15 | async def fetch_sma(
 16 |     symbol: str,
 17 |     interval: str = None,
 18 |     month: str = None,
 19 |     time_period: int = None,
 20 |     series_type: str = None,
 21 |     datatype: str = "json",
 22 |     max_data_points: int = 100,  # NEW PARAMETER
 23 | ) -> dict[str, str] | str:
 24 | ```
 25 | 
 26 | #### 2. Apply Response Limiting Logic
 27 | ```python
 28 | # In fetch_sma and other technical indicator functions
 29 | if datatype == "csv":
 30 |     return response.text
 31 |     
 32 | # For JSON responses, apply response limiting
 33 | full_response = response.json()
 34 | 
 35 | from .response_utils import limit_time_series_response, should_limit_response
 36 | 
 37 | if should_limit_response(full_response):
 38 |     return limit_time_series_response(full_response, max_data_points)
 39 | 
 40 | return full_response
 41 | ```
 42 | 
 43 | #### 3. Update Tool Definitions
 44 | Add `max_data_points` parameter to tool schemas:
 45 | 
 46 | ```python
 47 | types.Tool(
 48 |     name=AlphavantageTools.SMA.value,
 49 |     description="Fetch simple moving average",
 50 |     inputSchema={
 51 |         "type": "object",
 52 |         "properties": {
 53 |             "symbol": {"type": "string"},
 54 |             "interval": {"type": "string"},
 55 |             "month": {"type": "string"},
 56 |             "time_period": {"type": "number"},
 57 |             "series_type": {"type": "string"},
 58 |             "datatype": {"type": "string"},
 59 |             "max_data_points": {
 60 |                 "type": "number", 
 61 |                 "description": "Maximum number of data points to return (default: 100)",
 62 |                 "default": 100
 63 |             },
 64 |         },
 65 |         "required": ["symbol", "interval", "time_period", "series_type"],
 66 |     },
 67 | ),
 68 | ```
 69 | 
 70 | #### 4. Update Tool Handlers
 71 | Pass `max_data_points` parameter to API functions:
 72 | 
 73 | ```python
 74 | case AlphavantageTools.SMA.value:
 75 |     symbol = arguments.get("symbol")
 76 |     interval = arguments.get("interval")
 77 |     month = arguments.get("month")
 78 |     time_period = arguments.get("time_period")
 79 |     series_type = arguments.get("series_type")
 80 |     datatype = arguments.get("datatype", "json")
 81 |     max_data_points = arguments.get("max_data_points", 100)  # NEW
 82 | 
 83 |     if not symbol or not interval or not time_period or not series_type:
 84 |         raise ValueError(
 85 |             "Missing required arguments: symbol, interval, time_period, series_type"
 86 |         )
 87 | 
 88 |     result = await fetch_sma(
 89 |         symbol, interval, month, time_period, series_type, datatype, max_data_points
 90 |     )
 91 | ```
 92 | 
 93 | # Format
 94 | 
 95 | ```bash
 96 | ruff check src/alphavantage_mcp_server/  --fix
 97 | ```
 98 | 
 99 | # Run Tests
100 | 
101 | ```bash
102 | pytest tests/*.py
103 | ```
104 | 
105 | # Versioning
106 | 
107 | ```bash
108 | bumpversion patch
109 | ```
```

--------------------------------------------------------------------------------
/deploy/aws-stateless-mcp-lambda/template.yaml:
--------------------------------------------------------------------------------

```yaml
  1 | AWSTemplateFormatVersion: '2010-09-09'
  2 | Transform: AWS::Serverless-2016-10-31
  3 | Description: >
  4 |   Stateless AlphaVantage MCP Server on AWS Lambda
  5 |   Based on aws-samples/sample-serverless-mcp-servers pattern
  6 | 
  7 | Parameters:
  8 |   AlphaVantageApiKey:
  9 |     Type: String
 10 |     Description: AlphaVantage API Key
 11 |     NoEcho: true
 12 |   
 13 |   OAuthEnabled:
 14 |     Type: String
 15 |     Default: 'false'
 16 |     AllowedValues: ['true', 'false']
 17 |     Description: Enable OAuth 2.1 authentication
 18 |   
 19 |   OAuthAuthorizationServerUrl:
 20 |     Type: String
 21 |     Default: ''
 22 |     Description: OAuth Authorization Server URL (optional)
 23 | 
 24 | Globals:
 25 |   Function:
 26 |     Timeout: 30
 27 |     MemorySize: 512
 28 |     Runtime: python3.12
 29 |     Environment:
 30 |       Variables:
 31 |         ALPHAVANTAGE_API_KEY: !Ref AlphaVantageApiKey
 32 |         OAUTH_ENABLED: !Ref OAuthEnabled
 33 |         OAUTH_AUTHORIZATION_SERVER_URL: !Ref OAuthAuthorizationServerUrl
 34 | 
 35 | Resources:
 36 |   AlphaVantageMCPFunction:
 37 |     Type: AWS::Serverless::Function
 38 |     Properties:
 39 |       FunctionName: !Sub "${AWS::StackName}-alphavantage-mcp"
 40 |       CodeUri: .
 41 |       Handler: lambda_function.lambda_handler
 42 |       Description: Stateless AlphaVantage MCP Server
 43 |       Layers:
 44 |         - !Ref AlphaVantageMCPLayer
 45 |       Events:
 46 |         McpApiPost:
 47 |           Type: Api
 48 |           Properties:
 49 |             Path: /mcp
 50 |             Method: post
 51 |         McpApiOptions:
 52 |           Type: Api
 53 |           Properties:
 54 |             Path: /mcp
 55 |             Method: options
 56 |       Environment:
 57 |         Variables:
 58 |           PYTHONPATH: /var/task:/opt/python
 59 | 
 60 |   # Lambda Layer for AlphaVantage MCP Server source code
 61 |   AlphaVantageMCPLayer:
 62 |     Type: AWS::Serverless::LayerVersion
 63 |     Properties:
 64 |       LayerName: !Sub "${AWS::StackName}-alphavantage-mcp-layer"
 65 |       Description: AlphaVantage MCP Server source code
 66 |       ContentUri: ../../src/
 67 |       CompatibleRuntimes:
 68 |         - python3.12
 69 |     Metadata:
 70 |       BuildMethod: python3.12
 71 | 
 72 | Outputs:
 73 |   McpApiUrl:
 74 |     Description: "API Gateway endpoint URL for MCP requests"
 75 |     Value: !Sub "https://${ServerlessRestApi}.execute-api.${AWS::Region}.amazonaws.com/Prod/mcp"
 76 |     Export:
 77 |       Name: !Sub "${AWS::StackName}-McpApiUrl"
 78 |   
 79 |   FunctionName:
 80 |     Description: "Lambda Function Name"
 81 |     Value: !Ref AlphaVantageMCPFunction
 82 |     Export:
 83 |       Name: !Sub "${AWS::StackName}-FunctionName"
 84 |   
 85 |   TestCommands:
 86 |     Description: "Test commands for the deployed MCP server"
 87 |     Value: !Sub |
 88 |       # Initialize MCP session
 89 |       curl -X POST https://${ServerlessRestApi}.execute-api.${AWS::Region}.amazonaws.com/Prod/mcp \
 90 |         -H 'Content-Type: application/json' \
 91 |         -H 'Accept: application/json' \
 92 |         -d '{"jsonrpc":"2.0","id":1,"method":"initialize","params":{"protocolVersion":"2024-11-05","capabilities":{},"clientInfo":{"name":"test-client","version":"1.0.0"}}}'
 93 |       
 94 |       # List available tools
 95 |       curl -X POST https://${ServerlessRestApi}.execute-api.${AWS::Region}.amazonaws.com/Prod/mcp \
 96 |         -H 'Content-Type: application/json' \
 97 |         -H 'Accept: application/json' \
 98 |         -d '{"jsonrpc":"2.0","id":2,"method":"tools/list"}'
 99 |       
100 |       # Call a tool (example: get stock quote)
101 |       curl -X POST https://${ServerlessRestApi}.execute-api.${AWS::Region}.amazonaws.com/Prod/mcp \
102 |         -H 'Content-Type: application/json' \
103 |         -H 'Accept: application/json' \
104 |         -d '{"jsonrpc":"2.0","id":3,"method":"tools/call","params":{"name":"get_stock_quote","arguments":{"symbol":"AAPL"}}}'
105 | 
```

--------------------------------------------------------------------------------
/tests/test_stdio_transport.py:
--------------------------------------------------------------------------------

```python
  1 | import json
  2 | import sys
  3 | 
  4 | import pytest
  5 | from mcp import ClientSession
  6 | from mcp.client.stdio import stdio_client, StdioServerParameters
  7 | 
  8 | 
  9 | @pytest.mark.asyncio
 10 | async def test_stdio_stock_quote():
 11 |     """Test stdio transport with stock_quote tool"""
 12 | 
 13 |     # Start the MCP server as a subprocess
 14 |     server_params = StdioServerParameters(
 15 |         command=sys.executable,
 16 |         args=[
 17 |             "-c",
 18 |             "import sys; sys.path.insert(0, 'src'); from alphavantage_mcp_server import main; main()",
 19 |             "--server",
 20 |             "stdio",
 21 |         ],
 22 |     )
 23 | 
 24 |     # Connect to the server using stdio client
 25 |     client = stdio_client(server_params)
 26 |     async with client as (read_stream, write_stream):
 27 |         async with ClientSession(read_stream, write_stream) as session:
 28 |             # Initialize the session
 29 |             await session.initialize()
 30 | 
 31 |             # List available tools
 32 |             response = await session.list_tools()
 33 |             tools = response.tools
 34 |             tool_names = [tool.name for tool in tools]
 35 | 
 36 |             # Verify stock_quote tool is available
 37 |             assert "stock_quote" in tool_names, (
 38 |                 f"stock_quote not found in tools: {tool_names}"
 39 |             )
 40 | 
 41 |             # Find the stock_quote tool
 42 |             next(tool for tool in tools if tool.name == "stock_quote")
 43 | 
 44 |             # Test calling the stock_quote tool
 45 |             result = await session.call_tool("stock_quote", {"symbol": "AAPL"})
 46 | 
 47 |             # Verify we got a result
 48 |             assert result is not None
 49 |             assert hasattr(result, "content")
 50 |             assert len(result.content) > 0
 51 | 
 52 |             # Parse the JSON result to verify it contains stock data
 53 |             content_text = result.content[0].text
 54 |             stock_data = json.loads(content_text)
 55 | 
 56 |             # Verify the response contains expected stock quote fields
 57 |             assert "Global Quote" in stock_data or "Error Message" in stock_data
 58 | 
 59 |             if "Global Quote" in stock_data:
 60 |                 global_quote = stock_data["Global Quote"]
 61 |                 assert "01. symbol" in global_quote
 62 |                 assert "05. price" in global_quote
 63 |                 assert global_quote["01. symbol"] == "AAPL"
 64 | 
 65 | 
 66 | @pytest.mark.asyncio
 67 | async def test_stdio_tool_list():
 68 |     """Test that stdio transport can list all available tools"""
 69 | 
 70 |     server_params = StdioServerParameters(
 71 |         command=sys.executable,
 72 |         args=[
 73 |             "-c",
 74 |             "import sys; sys.path.insert(0, 'src'); from alphavantage_mcp_server import main; main()",
 75 |             "--server",
 76 |             "stdio",
 77 |         ],
 78 |     )
 79 | 
 80 |     client = stdio_client(server_params)
 81 |     async with client as (read_stream, write_stream):
 82 |         async with ClientSession(read_stream, write_stream) as session:
 83 |             await session.initialize()
 84 | 
 85 |             response = await session.list_tools()
 86 |             tools = response.tools
 87 | 
 88 |             # Verify we have tools
 89 |             assert len(tools) > 0
 90 | 
 91 |             # Verify essential tools are present
 92 |             tool_names = [tool.name for tool in tools]
 93 |             expected_tools = ["stock_quote", "time_series_daily"]
 94 | 
 95 |             for expected_tool in expected_tools:
 96 |                 assert expected_tool in tool_names, (
 97 |                     f"{expected_tool} not found in tools"
 98 |                 )
 99 | 
100 |             # Verify each tool has required attributes
101 |             for tool in tools:
102 |                 assert hasattr(tool, "name")
103 |                 assert hasattr(tool, "description")
104 |                 assert hasattr(tool, "inputSchema")
105 |                 assert tool.name is not None
106 |                 assert tool.description is not None
107 |                 assert tool.inputSchema is not None
108 | 
```

--------------------------------------------------------------------------------
/scripts/publish.py:
--------------------------------------------------------------------------------

```python
  1 | #!/usr/bin/env python3
  2 | """
  3 | Script to build and publish the alphavantage-mcp package to PyPI.
  4 | 
  5 | Usage:
  6 |     python scripts/publish.py --test           # Publish to TestPyPI using twine
  7 |     python scripts/publish.py                  # Publish to PyPI using twine
  8 |     python scripts/publish.py --test --use-uv  # Publish to TestPyPI using uv publish
  9 |     python scripts/publish.py --use-uv         # Publish to PyPI using uv publish
 10 | """
 11 | 
 12 | import argparse
 13 | import subprocess
 14 | import sys
 15 | from pathlib import Path
 16 | 
 17 | 
 18 | def run_command(cmd: list[str], description: str) -> bool:
 19 |     """Run a command and return True if successful."""
 20 |     print(f"🔄 {description}...")
 21 |     try:
 22 |         result = subprocess.run(cmd, check=True, capture_output=True, text=True)
 23 |         print(f"✅ {description} completed successfully")
 24 |         if result.stdout:
 25 |             print(f"   Output: {result.stdout.strip()}")
 26 |         return True
 27 |     except subprocess.CalledProcessError as e:
 28 |         print(f"❌ {description} failed")
 29 |         print(f"   Error: {e.stderr.strip()}")
 30 |         return False
 31 | 
 32 | 
 33 | def main():
 34 |     parser = argparse.ArgumentParser(description="Build and publish alphavantage-mcp package")
 35 |     parser.add_argument(
 36 |         "--test", 
 37 |         action="store_true", 
 38 |         help="Publish to TestPyPI instead of PyPI"
 39 |     )
 40 |     parser.add_argument(
 41 |         "--skip-build",
 42 |         action="store_true",
 43 |         help="Skip building and only upload existing dist files"
 44 |     )
 45 |     parser.add_argument(
 46 |         "--use-uv",
 47 |         action="store_true",
 48 |         help="Use uv publish instead of twine (default: use twine)"
 49 |     )
 50 |     args = parser.parse_args()
 51 | 
 52 |     # Change to project root
 53 |     project_root = Path(__file__).parent.parent
 54 |     print(f"📁 Working in: {project_root}")
 55 |     
 56 |     if not args.skip_build:
 57 |         # Clean previous builds
 58 |         dist_dir = project_root / "dist"
 59 |         if dist_dir.exists():
 60 |             print("🧹 Cleaning previous builds...")
 61 |             import shutil
 62 |             shutil.rmtree(dist_dir)
 63 | 
 64 |         # Build the package
 65 |         if not run_command(
 66 |             ["uv", "build"], 
 67 |             "Building package"
 68 |         ):
 69 |             sys.exit(1)
 70 | 
 71 |     # Check if dist files exist
 72 |     dist_dir = project_root / "dist"
 73 |     if not dist_dir.exists() or not list(dist_dir.glob("*.whl")):
 74 |         print("❌ No built packages found in dist/")
 75 |         print("   Run without --skip-build to build the package first")
 76 |         sys.exit(1)
 77 | 
 78 |     # Upload to PyPI or TestPyPI
 79 |     if args.test:
 80 |         repository_name = "testpypi"
 81 |         print("🧪 Publishing to TestPyPI...")
 82 |         print("   You can install with: pip install -i https://test.pypi.org/simple/ alphavantage-mcp")
 83 |     else:
 84 |         repository_name = "pypi"
 85 |         print("🚀 Publishing to PyPI...")
 86 |         print("   After publishing, users can install with: uvx alphavantage-mcp")
 87 | 
 88 |     # Choose upload method
 89 |     if args.use_uv:
 90 |         # Use uv publish
 91 |         repository_url = "https://test.pypi.org/legacy/" if args.test else "https://upload.pypi.org/legacy/"
 92 |         upload_cmd = [
 93 |             "uv", "publish",
 94 |             "--publish-url", repository_url,
 95 |             str(dist_dir / "*")
 96 |         ]
 97 |         auth_help = "uv publish --help"
 98 |     else:
 99 |         # Use twine (default)
100 |         upload_cmd = [
101 |             "uv", "run", "twine", "upload",
102 |             "--repository", repository_name,
103 |             str(dist_dir / "*")
104 |         ]
105 |         auth_help = "~/.pypirc file or environment variables"
106 | 
107 |     if not run_command(upload_cmd, f"Uploading to {repository_name}"):
108 |         print(f"\n💡 If authentication failed, make sure you have:")
109 |         print(f"   1. Created an account on {repository_name}")
110 |         print(f"   2. Generated an API token")
111 |         print(f"   3. Set up authentication with: {auth_help}")
112 |         sys.exit(1)
113 | 
114 |     print(f"\n🎉 Package successfully published to {repository_name.upper()}!")
115 | 
116 |     if args.test:
117 |         print("\n📋 Next steps:")
118 |         print("   1. Test the installation: pip install -i https://test.pypi.org/simple/ alphavantage-mcp")
119 |         print("   2. If everything works, publish to PyPI: python scripts/publish.py")
120 |     else:
121 |         print("\n📋 Users can now install with:")
122 |         print("   uvx alphavantage-mcp")
123 |         print("   or")
124 |         print("   pip install alphavantage-mcp")
125 | 
126 | 
127 | if __name__ == "__main__":
128 |     main()
129 | 
```

--------------------------------------------------------------------------------
/src/alphavantage_mcp_client/client.py:
--------------------------------------------------------------------------------

```python
  1 | import os
  2 | import sys
  3 | import json
  4 | import asyncio
  5 | from typing import Optional, Literal
  6 | from contextlib import AsyncExitStack
  7 | 
  8 | from mcp import ClientSession
  9 | from mcp.client.sse import sse_client
 10 | from mcp.client.streamable_http import streamablehttp_client
 11 | 
 12 | from openai import OpenAI
 13 | 
 14 | BASE_URL = "http://localhost:8080/"
 15 | 
 16 | 
 17 | class MCPClient:
 18 |     def __init__(self):
 19 |         # Initialize session and client objects
 20 |         self.session: Optional[ClientSession] = None
 21 |         self.exit_stack = AsyncExitStack()
 22 |         self.client = OpenAI(
 23 |             base_url=os.getenv("OPENAI_BASE_URL"),
 24 |             api_key=os.getenv("OPENAI_API_KEY"),
 25 |         )
 26 | 
 27 |     async def connect_to_server(
 28 |         self, server_protocol: Literal["sse", "streamable-http"]
 29 |     ):
 30 |         """Connect to an MCP server"""
 31 |         if server_protocol == "sse":
 32 |             client = sse_client(BASE_URL + "sse")
 33 |             rs, ws = await self.exit_stack.enter_async_context(client)
 34 |         elif server_protocol == "streamable-http":
 35 |             client = streamablehttp_client(BASE_URL + "mcp")
 36 |             rs, ws, _ = await self.exit_stack.enter_async_context(client)
 37 |         else:
 38 |             raise Exception("Unknown transport protocol")
 39 |         self.session = await self.exit_stack.enter_async_context(ClientSession(rs, ws))
 40 | 
 41 |         await self.session.initialize()
 42 | 
 43 |         # List available tools
 44 |         response = await self.session.list_tools()
 45 |         tools = response.tools_definitions
 46 |         print("\nConnected to server with tools:", [tool.name for tool in tools])
 47 | 
 48 |     async def process_query(self, query: str) -> str:
 49 |         """Process a query using LLM and available tools"""
 50 |         messages = [{"role": "user", "content": query}]
 51 | 
 52 |         response = await self.session.list_tools()
 53 |         available_tools = [
 54 |             {
 55 |                 "type": "function",
 56 |                 "function": {
 57 |                     "name": tool.name,
 58 |                     "description": tool.description,
 59 |                     "parameters": tool.inputSchema,
 60 |                 },
 61 |             }
 62 |             for tool in response.tools
 63 |         ]
 64 | 
 65 |         # Initial LLM API call
 66 |         response = self.client.chat.completions.create(
 67 |             model=self.client.models.list().data[0].id,
 68 |             messages=messages,
 69 |             tools=available_tools,
 70 |             tool_choice="auto",
 71 |         )
 72 | 
 73 |         # Process response and handle tool calls
 74 |         final_text = []
 75 | 
 76 |         content = response.choices[0].message.content
 77 |         final_text.append(content)
 78 | 
 79 |         if response.choices[0].message.tool_calls:
 80 |             for tc in response.choices[0].message.tool_calls:
 81 |                 f = tc.function
 82 |                 tool_name = f.name
 83 |                 tool_args = f.arguments
 84 | 
 85 |                 # Execute tool call
 86 |                 result = await self.session.call_tool(tool_name, json.loads(tool_args))
 87 |                 final_text.append(f"[Calling tool {tool_name} with args {tool_args}]")
 88 | 
 89 |                 # Continue conversation with tool results
 90 |                 messages.append({"role": "assistant", "content": content})
 91 |                 messages.append({"role": "user", "content": result.content})
 92 | 
 93 |             # Get next response from LLM
 94 |             response = self.client.chat.completions.create(
 95 |                 model=self.client.models.list().data[0].id,
 96 |                 messages=messages,
 97 |             )
 98 | 
 99 |             final_text.append(response.choices[0].message.content)
100 | 
101 |         return "\n".join(final_text)
102 | 
103 |     async def chat_loop(self):
104 |         """Run an interactive chat loop"""
105 |         print("\nMCP Client Started!")
106 |         print("Type your queries or 'quit' to exit.")
107 | 
108 |         while True:
109 |             try:
110 |                 query = input("\nQuery: ").strip()
111 | 
112 |                 if query.lower() == "quit":
113 |                     break
114 | 
115 |                 response = await self.process_query(query)
116 |                 print("\n" + response)
117 | 
118 |             except Exception as e:
119 |                 raise e
120 |                 print(f"\nError: {str(e)}")
121 | 
122 |     async def cleanup(self):
123 |         """Clean up resources"""
124 |         await self.exit_stack.aclose()
125 | 
126 | 
127 | async def start():
128 |     if len(sys.argv) == 2 and sys.argv[1] in ("sse", "streamable-http"):
129 |         client = MCPClient()
130 |         try:
131 |             await client.connect_to_server(sys.argv[1])
132 |             await client.chat_loop()
133 |         finally:
134 |             await client.cleanup()
135 |     else:
136 |         raise Exception("Usage: uv run client sse|streamable-http")
137 | 
138 | 
139 | def main():
140 |     asyncio.run(start())
141 | 
142 | 
143 | if __name__ == "__main__":
144 |     main()
145 | 
```

--------------------------------------------------------------------------------
/src/alphavantage_mcp_server/telemetry_bootstrap.py:
--------------------------------------------------------------------------------

```python
  1 | """
  2 | Telemetry Bootstrap Module
  3 | 
  4 | This module initializes Prometheus metrics for the AlphaVantage MCP server.
  5 | It provides centralized configuration and setup for telemetry components.
  6 | """
  7 | 
  8 | import os
  9 | import logging
 10 | import threading
 11 | from typing import Optional
 12 | from prometheus_client import Counter, Histogram, Gauge, start_http_server
 13 | 
 14 | logger = logging.getLogger(__name__)
 15 | 
 16 | # Environment variable configuration
 17 | MCP_TELEMETRY_ENABLED = os.getenv("MCP_TELEMETRY_ENABLED", "true").lower() == "true"
 18 | MCP_SERVER_NAME = os.getenv("MCP_SERVER_NAME", "alphavantage")
 19 | MCP_SERVER_VERSION = os.getenv("MCP_SERVER_VERSION", "dev")
 20 | MCP_METRICS_PORT = int(os.getenv("MCP_METRICS_PORT", "9464"))
 21 | 
 22 | # Global telemetry state
 23 | _telemetry_initialized = False
 24 | _metrics_server_started = False
 25 | _metrics_server_thread: Optional[threading.Thread] = None
 26 | 
 27 | # Prometheus metrics - these will be initialized in init_telemetry()
 28 | MCP_CALLS: Optional[Counter] = None
 29 | MCP_ERRS: Optional[Counter] = None
 30 | MCP_LAT: Optional[Histogram] = None
 31 | MCP_REQ_B: Optional[Histogram] = None
 32 | MCP_RES_B: Optional[Histogram] = None
 33 | MCP_CONC: Optional[Gauge] = None
 34 | 
 35 | 
 36 | def _create_prometheus_metrics():
 37 |     """Create and return Prometheus metrics objects."""
 38 |     global MCP_CALLS, MCP_ERRS, MCP_LAT, MCP_REQ_B, MCP_RES_B, MCP_CONC
 39 | 
 40 |     MCP_CALLS = Counter(
 41 |         "mcp_tool_calls_total",
 42 |         "Total number of MCP tool calls",
 43 |         ["tool", "server", "version", "outcome"],
 44 |     )
 45 | 
 46 |     MCP_ERRS = Counter(
 47 |         "mcp_tool_errors_total",
 48 |         "Total number of MCP tool errors",
 49 |         ["tool", "error_kind"],
 50 |     )
 51 | 
 52 |     MCP_LAT = Histogram(
 53 |         "mcp_tool_latency_seconds",
 54 |         "MCP tool call latency in seconds",
 55 |         ["tool", "server", "version"],
 56 |         buckets=[0.001, 0.005, 0.01, 0.025, 0.05, 0.1, 0.25, 0.5, 1.0, 2.5, 5.0, 10.0],
 57 |     )
 58 | 
 59 |     MCP_REQ_B = Histogram(
 60 |         "mcp_tool_request_bytes",
 61 |         "MCP tool request size in bytes",
 62 |         ["tool"],
 63 |         buckets=[64, 256, 1024, 4096, 16384, 65536, 262144, 1048576],
 64 |     )
 65 | 
 66 |     MCP_RES_B = Histogram(
 67 |         "mcp_tool_response_bytes",
 68 |         "MCP tool response size in bytes",
 69 |         ["tool"],
 70 |         buckets=[64, 256, 1024, 4096, 16384, 65536, 262144, 1048576, 4194304],
 71 |     )
 72 | 
 73 |     MCP_CONC = Gauge(
 74 |         "mcp_tool_active_concurrency",
 75 |         "Number of currently active MCP tool calls",
 76 |         ["tool"],
 77 |     )
 78 | 
 79 | 
 80 | def _start_metrics_server():
 81 |     """Start the Prometheus metrics HTTP server."""
 82 |     global _metrics_server_started, _metrics_server_thread
 83 | 
 84 |     if _metrics_server_started:
 85 |         return
 86 | 
 87 |     try:
 88 | 
 89 |         def run_server():
 90 |             try:
 91 |                 start_http_server(MCP_METRICS_PORT, addr="127.0.0.1")
 92 |                 logger.info(
 93 |                     f"Prometheus metrics server started on 127.0.0.1:{MCP_METRICS_PORT}"
 94 |                 )
 95 |             except Exception as e:
 96 |                 logger.error(f"Failed to start metrics server: {e}")
 97 | 
 98 |         _metrics_server_thread = threading.Thread(target=run_server, daemon=True)
 99 |         _metrics_server_thread.start()
100 |         _metrics_server_started = True
101 | 
102 |     except Exception as e:
103 |         logger.error(f"Failed to start metrics server thread: {e}")
104 | 
105 | 
106 | def init_telemetry(start_metrics: bool = True) -> None:
107 |     """
108 |     Initialize telemetry system.
109 | 
110 |     Args:
111 |         start_metrics: Whether to start the Prometheus metrics HTTP server.
112 |                       Set to False for Lambda environments.
113 |     """
114 |     global _telemetry_initialized
115 | 
116 |     if _telemetry_initialized:
117 |         logger.debug("Telemetry already initialized")
118 |         return
119 | 
120 |     if not MCP_TELEMETRY_ENABLED:
121 |         logger.info("Telemetry disabled via MCP_TELEMETRY_ENABLED")
122 |         return
123 | 
124 |     try:
125 |         logger.info(
126 |             f"Initializing telemetry for {MCP_SERVER_NAME} v{MCP_SERVER_VERSION}"
127 |         )
128 | 
129 |         # Initialize Prometheus metrics
130 |         _create_prometheus_metrics()
131 |         logger.debug("Prometheus metrics created")
132 | 
133 |         # Start metrics server if requested
134 |         if start_metrics:
135 |             _start_metrics_server()
136 | 
137 |         _telemetry_initialized = True
138 |         logger.info("Telemetry initialization complete")
139 | 
140 |     except Exception as e:
141 |         logger.error(f"Failed to initialize telemetry: {e}")
142 |         raise
143 | 
144 | 
145 | def is_telemetry_enabled() -> bool:
146 |     """Check if telemetry is enabled and initialized."""
147 |     return MCP_TELEMETRY_ENABLED and _telemetry_initialized
148 | 
149 | 
150 | # Export the metric objects for use by other modules
151 | __all__ = [
152 |     "init_telemetry",
153 |     "is_telemetry_enabled",
154 |     "MCP_CALLS",
155 |     "MCP_ERRS",
156 |     "MCP_LAT",
157 |     "MCP_REQ_B",
158 |     "MCP_RES_B",
159 |     "MCP_CONC",
160 |     "MCP_SERVER_NAME",
161 |     "MCP_SERVER_VERSION",
162 | ]
163 | 
```

--------------------------------------------------------------------------------
/src/alphavantage_mcp_server/response_utils.py:
--------------------------------------------------------------------------------

```python
  1 | """
  2 | Response utilities for handling large API responses and preventing token limit issues.
  3 | """
  4 | 
  5 | import json
  6 | from typing import Dict, Any
  7 | 
  8 | 
  9 | def limit_time_series_response(
 10 |     response: Dict[str, Any], max_data_points: int = 100, preserve_metadata: bool = True
 11 | ) -> Dict[str, Any]:
 12 |     """
 13 |     Limit the number of data points in a time series response to prevent token limit issues.
 14 | 
 15 |     Args:
 16 |         response: The full API response from AlphaVantage
 17 |         max_data_points: Maximum number of data points to include (default: 100)
 18 |         preserve_metadata: Whether to preserve metadata sections (default: True)
 19 | 
 20 |     Returns:
 21 |         Limited response with reduced data points
 22 |     """
 23 |     if not isinstance(response, dict):
 24 |         return response
 25 | 
 26 |     limited_response = {}
 27 | 
 28 |     # Preserve metadata sections (they're usually small)
 29 |     if preserve_metadata:
 30 |         for key, value in response.items():
 31 |             if not isinstance(value, dict) or len(value) < 50:
 32 |                 limited_response[key] = value
 33 | 
 34 |     # Find and limit the main time series data section
 35 |     time_series_keys = [
 36 |         key
 37 |         for key in response.keys()
 38 |         if any(
 39 |             indicator in key.lower()
 40 |             for indicator in [
 41 |                 "time series",
 42 |                 "technical analysis",
 43 |                 "sma",
 44 |                 "ema",
 45 |                 "rsi",
 46 |                 "macd",
 47 |                 "bbands",
 48 |                 "stoch",
 49 |                 "adx",
 50 |                 "aroon",
 51 |                 "cci",
 52 |                 "mom",
 53 |                 "roc",
 54 |                 "willr",
 55 |                 "ad",
 56 |                 "obv",
 57 |                 "ht_",
 58 |                 "atr",
 59 |                 "natr",
 60 |                 "trix",
 61 |                 "ultosc",
 62 |                 "dx",
 63 |                 "minus_di",
 64 |                 "plus_di",
 65 |                 "minus_dm",
 66 |                 "plus_dm",
 67 |                 "midpoint",
 68 |                 "midprice",
 69 |                 "sar",
 70 |                 "trange",
 71 |                 "adosc",
 72 |             ]
 73 |         )
 74 |     ]
 75 | 
 76 |     for ts_key in time_series_keys:
 77 |         if ts_key in response and isinstance(response[ts_key], dict):
 78 |             time_series_data = response[ts_key]
 79 | 
 80 |             # Get the most recent data points (sorted by date descending)
 81 |             sorted_dates = sorted(time_series_data.keys(), reverse=True)
 82 |             limited_dates = sorted_dates[:max_data_points]
 83 | 
 84 |             # Create limited time series with only recent data
 85 |             limited_time_series = {
 86 |                 date: time_series_data[date] for date in limited_dates
 87 |             }
 88 | 
 89 |             limited_response[ts_key] = limited_time_series
 90 | 
 91 |             # Add summary info about the limitation
 92 |             if len(sorted_dates) > max_data_points:
 93 |                 limited_response[f"{ts_key}_summary"] = {
 94 |                     "total_data_points_available": len(sorted_dates),
 95 |                     "data_points_returned": len(limited_dates),
 96 |                     "date_range_returned": {
 97 |                         "from": min(limited_dates),
 98 |                         "to": max(limited_dates),
 99 |                     },
100 |                     "note": f"Response limited to {max_data_points} most recent data points to prevent token limit issues",
101 |                 }
102 | 
103 |     return limited_response
104 | 
105 | 
106 | def estimate_response_size(response: Any) -> int:
107 |     """
108 |     Estimate the token size of a response (rough approximation).
109 | 
110 |     Args:
111 |         response: The response to estimate
112 | 
113 |     Returns:
114 |         Estimated number of tokens
115 |     """
116 |     try:
117 |         json_str = json.dumps(response, indent=2)
118 |         # Rough approximation: 1 token ≈ 4 characters
119 |         return len(json_str) // 4
120 |     except Exception:
121 |         return 0
122 | 
123 | 
124 | def should_limit_response(response: Any, max_tokens: int = 15000) -> bool:
125 |     """
126 |     Check if a response should be limited based on estimated token count.
127 | 
128 |     Args:
129 |         response: The response to check
130 |         max_tokens: Maximum allowed tokens (default: 15000)
131 | 
132 |     Returns:
133 |         True if response should be limited
134 |     """
135 |     estimated_tokens = estimate_response_size(response)
136 |     return estimated_tokens > max_tokens
137 | 
138 | 
139 | def create_response_summary(response: Dict[str, Any]) -> Dict[str, Any]:
140 |     """
141 |     Create a summary of a large response instead of returning the full data.
142 | 
143 |     Args:
144 |         response: The full response to summarize
145 | 
146 |     Returns:
147 |         Summary of the response
148 |     """
149 |     summary = {
150 |         "response_type": "summary",
151 |         "reason": "Full response too large, showing summary to prevent token limit issues",
152 |     }
153 | 
154 |     # Add metadata sections
155 |     for key, value in response.items():
156 |         if not isinstance(value, dict) or len(value) < 10:
157 |             summary[key] = value
158 | 
159 |     # Summarize large data sections
160 |     for key, value in response.items():
161 |         if isinstance(value, dict) and len(value) >= 10:
162 |             summary[f"{key}_info"] = {
163 |                 "data_points": len(value),
164 |                 "date_range": {
165 |                     "earliest": min(value.keys()) if value else None,
166 |                     "latest": max(value.keys()) if value else None,
167 |                 },
168 |                 "sample_fields": list(list(value.values())[0].keys()) if value else [],
169 |                 "note": "Use a more specific date range or limit parameter to get actual data",
170 |             }
171 | 
172 |     return summary
173 | 
```

--------------------------------------------------------------------------------
/deploy/aws-stateless-mcp-lambda/deploy.sh:
--------------------------------------------------------------------------------

```bash
  1 | #!/bin/bash
  2 | 
  3 | # AWS Stateless MCP Lambda Deployment Script
  4 | # Based on aws-samples/sample-serverless-mcp-servers pattern
  5 | 
  6 | set -e
  7 | 
  8 | echo "🚀 AlphaVantage Stateless MCP Server Deployment"
  9 | echo "=============================================="
 10 | 
 11 | # Check prerequisites
 12 | echo "📋 Checking prerequisites..."
 13 | 
 14 | # Check if AWS CLI is installed
 15 | if ! command -v aws &> /dev/null; then
 16 |     echo "❌ AWS CLI is not installed. Please install it first."
 17 |     exit 1
 18 | fi
 19 | 
 20 | # Check if SAM CLI is installed
 21 | if ! command -v sam &> /dev/null; then
 22 |     echo "❌ AWS SAM CLI is not installed. Please install it first."
 23 |     echo "   Install with: pip install aws-sam-cli"
 24 |     exit 1
 25 | fi
 26 | 
 27 | # Check AWS credentials
 28 | if ! aws sts get-caller-identity &> /dev/null; then
 29 |     echo "❌ AWS credentials not configured. Please run 'aws configure' first."
 30 |     exit 1
 31 | fi
 32 | 
 33 | # Check required environment variables
 34 | if [ -z "$ALPHAVANTAGE_API_KEY" ]; then
 35 |     echo "❌ ALPHAVANTAGE_API_KEY environment variable is required."
 36 |     echo "   Set it with: export ALPHAVANTAGE_API_KEY=your_api_key_here"
 37 |     exit 1
 38 | fi
 39 | 
 40 | echo "✅ Prerequisites check passed"
 41 | 
 42 | # Set deployment parameters
 43 | STACK_NAME="alphavantage-stateless-mcp"
 44 | OAUTH_ENABLED="${OAUTH_ENABLED:-false}"
 45 | OAUTH_AUTHORIZATION_SERVER_URL="${OAUTH_AUTHORIZATION_SERVER_URL:-}"
 46 | 
 47 | echo ""
 48 | echo "📦 Deployment Configuration:"
 49 | echo "   Stack Name: $STACK_NAME"
 50 | echo "   OAuth Enabled: $OAUTH_ENABLED"
 51 | echo "   OAuth Server URL: ${OAUTH_AUTHORIZATION_SERVER_URL:-'(not set)'}"
 52 | echo ""
 53 | 
 54 | # Confirm deployment
 55 | read -p "🤔 Do you want to proceed with deployment? (y/N): " -n 1 -r
 56 | echo
 57 | if [[ ! $REPLY =~ ^[Yy]$ ]]; then
 58 |     echo "❌ Deployment cancelled."
 59 |     exit 1
 60 | fi
 61 | 
 62 | # Build the SAM application
 63 | echo ""
 64 | echo "🔨 Building SAM application..."
 65 | sam build --use-container
 66 | 
 67 | if [ $? -ne 0 ]; then
 68 |     echo "❌ SAM build failed."
 69 |     exit 1
 70 | fi
 71 | 
 72 | echo "✅ Build completed successfully"
 73 | 
 74 | # Deploy the SAM application
 75 | echo ""
 76 | echo "🚀 Deploying to AWS..."
 77 | 
 78 | # Deploy with conditional parameter handling
 79 | if [ -n "$OAUTH_AUTHORIZATION_SERVER_URL" ]; then
 80 |     # Deploy with OAuth URL
 81 |     sam deploy \
 82 |         --stack-name "$STACK_NAME" \
 83 |         --capabilities CAPABILITY_IAM \
 84 |         --resolve-s3 \
 85 |         --parameter-overrides \
 86 |             "AlphaVantageApiKey=$ALPHAVANTAGE_API_KEY" \
 87 |             "OAuthEnabled=$OAUTH_ENABLED" \
 88 |             "OAuthAuthorizationServerUrl=$OAUTH_AUTHORIZATION_SERVER_URL" \
 89 |         --no-confirm-changeset \
 90 |         --no-fail-on-empty-changeset
 91 | else
 92 |     # Deploy without OAuth URL (use default empty value)
 93 |     sam deploy \
 94 |         --stack-name "$STACK_NAME" \
 95 |         --capabilities CAPABILITY_IAM \
 96 |         --resolve-s3 \
 97 |         --parameter-overrides \
 98 |             "AlphaVantageApiKey=$ALPHAVANTAGE_API_KEY" \
 99 |             "OAuthEnabled=$OAUTH_ENABLED" \
100 |         --no-confirm-changeset \
101 |         --no-fail-on-empty-changeset
102 | fi
103 | 
104 | if [ $? -ne 0 ]; then
105 |     echo "❌ Deployment failed."
106 |     exit 1
107 | fi
108 | 
109 | echo "✅ Deployment completed successfully!"
110 | 
111 | # Get the API endpoint
112 | echo ""
113 | echo "📡 Getting deployment information..."
114 | API_URL=$(aws cloudformation describe-stacks \
115 |     --stack-name "$STACK_NAME" \
116 |     --query 'Stacks[0].Outputs[?OutputKey==`McpApiUrl`].OutputValue' \
117 |     --output text)
118 | 
119 | FUNCTION_NAME=$(aws cloudformation describe-stacks \
120 |     --stack-name "$STACK_NAME" \
121 |     --query 'Stacks[0].Outputs[?OutputKey==`FunctionName`].OutputValue' \
122 |     --output text)
123 | 
124 | echo ""
125 | echo "🎉 Deployment Summary:"
126 | echo "======================"
127 | echo "   API Endpoint: $API_URL"
128 | echo "   Function Name: $FUNCTION_NAME"
129 | echo "   Stack Name: $STACK_NAME"
130 | echo ""
131 | 
132 | # Test the deployment
133 | echo "🧪 Testing the deployment..."
134 | echo ""
135 | 
136 | echo "1️⃣ Testing MCP Initialize..."
137 | INIT_RESPONSE=$(curl -s -X POST "$API_URL" \
138 |     -H 'Content-Type: application/json' \
139 |     -H 'Accept: application/json' \
140 |     -d '{"jsonrpc":"2.0","id":1,"method":"initialize","params":{"protocolVersion":"2024-11-05","capabilities":{},"clientInfo":{"name":"test-client","version":"1.0.0"}}}')
141 | 
142 | if echo "$INIT_RESPONSE" | grep -q '"result"'; then
143 |     echo "✅ Initialize test passed"
144 | else
145 |     echo "❌ Initialize test failed"
146 |     echo "Response: $INIT_RESPONSE"
147 | fi
148 | 
149 | echo ""
150 | echo "2️⃣ Testing Tools List..."
151 | TOOLS_RESPONSE=$(curl -s -X POST "$API_URL" \
152 |     -H 'Content-Type: application/json' \
153 |     -H 'Accept: application/json' \
154 |     -d '{"jsonrpc":"2.0","id":2,"method":"tools/list"}')
155 | 
156 | if echo "$TOOLS_RESPONSE" | grep -q '"tools"'; then
157 |     TOOL_COUNT=$(echo "$TOOLS_RESPONSE" | grep -o '"name"' | wc -l)
158 |     echo "✅ Tools list test passed - Found $TOOL_COUNT tools"
159 | else
160 |     echo "❌ Tools list test failed"
161 |     echo "Response: $TOOLS_RESPONSE"
162 | fi
163 | 
164 | echo ""
165 | echo "🎯 Manual Test Commands:"
166 | echo "========================"
167 | echo ""
168 | echo "# Initialize MCP session:"
169 | echo "curl -X POST '$API_URL' \\"
170 | echo "  -H 'Content-Type: application/json' \\"
171 | echo "  -H 'Accept: application/json' \\"
172 | echo "  -d '{\"jsonrpc\":\"2.0\",\"id\":1,\"method\":\"initialize\",\"params\":{\"protocolVersion\":\"2024-11-05\",\"capabilities\":{},\"clientInfo\":{\"name\":\"test-client\",\"version\":\"1.0.0\"}}}'"
173 | echo ""
174 | echo "# List available tools:"
175 | echo "curl -X POST '$API_URL' \\"
176 | echo "  -H 'Content-Type: application/json' \\"
177 | echo "  -H 'Accept: application/json' \\"
178 | echo "  -d '{\"jsonrpc\":\"2.0\",\"id\":2,\"method\":\"tools/list\"}'"
179 | echo ""
180 | echo "# Call a tool (get stock quote for AAPL):"
181 | echo "curl -X POST '$API_URL' \\"
182 | echo "  -H 'Content-Type: application/json' \\"
183 | echo "  -H 'Accept: application/json' \\"
184 | echo "  -d '{\"jsonrpc\":\"2.0\",\"id\":3,\"method\":\"tools/call\",\"params\":{\"name\":\"get_stock_quote\",\"arguments\":{\"symbol\":\"AAPL\"}}}'"
185 | echo ""
186 | 
187 | echo "📊 Monitoring:"
188 | echo "=============="
189 | echo "   CloudWatch Logs: aws logs tail /aws/lambda/$FUNCTION_NAME --follow"
190 | echo "   Function Metrics: aws lambda get-function --function-name $FUNCTION_NAME"
191 | echo ""
192 | 
193 | echo "🧹 Cleanup (when done testing):"
194 | echo "================================"
195 | echo "   aws cloudformation delete-stack --stack-name $STACK_NAME"
196 | echo ""
197 | 
198 | echo "✅ Stateless MCP deployment completed successfully!"
199 | echo "   Your AlphaVantage MCP server is now running serverlessly on AWS Lambda!"
200 | 
```

--------------------------------------------------------------------------------
/src/alphavantage_mcp_server/telemetry_instrument.py:
--------------------------------------------------------------------------------

```python
  1 | """
  2 | Telemetry Instrumentation Module
  3 | 
  4 | This module provides the @instrument_tool decorator for wrapping MCP tool functions
  5 | with telemetry collection using Prometheus metrics.
  6 | """
  7 | 
  8 | import asyncio
  9 | import functools
 10 | import logging
 11 | import time
 12 | from typing import Any, Callable, Optional
 13 | 
 14 | from .telemetry_bootstrap import (
 15 |     MCP_CALLS,
 16 |     MCP_ERRS,
 17 |     MCP_LAT,
 18 |     MCP_REQ_B,
 19 |     MCP_RES_B,
 20 |     MCP_CONC,
 21 |     is_telemetry_enabled,
 22 | )
 23 | 
 24 | logger = logging.getLogger(__name__)
 25 | 
 26 | 
 27 | def _classify_error(error: Exception) -> str:
 28 |     """
 29 |     Classify an error into a category for metrics labeling.
 30 | 
 31 |     Args:
 32 |         error: The exception to classify
 33 | 
 34 |     Returns:
 35 |         Error category string: "timeout", "bad_input", "connection", or "unknown"
 36 |     """
 37 |     if isinstance(error, (TimeoutError, asyncio.TimeoutError)):
 38 |         return "timeout"
 39 |     elif isinstance(error, (ValueError, TypeError, KeyError, AttributeError)):
 40 |         return "bad_input"
 41 |     elif isinstance(error, (ConnectionError, OSError)):
 42 |         return "connection"
 43 |     else:
 44 |         return "unknown"
 45 | 
 46 | 
 47 | def _get_size_bytes(obj: Any) -> int:
 48 |     """
 49 |     Calculate the approximate size of an object in bytes.
 50 | 
 51 |     Args:
 52 |         obj: Object to measure
 53 | 
 54 |     Returns:
 55 |         Size in bytes (0 if measurement fails)
 56 |     """
 57 |     try:
 58 |         if obj is None:
 59 |             return 0
 60 |         elif isinstance(obj, (str, bytes)):
 61 |             return len(obj)
 62 |         else:
 63 |             # For other objects, convert to string and measure
 64 |             return len(str(obj))
 65 |     except Exception:
 66 |         return 0
 67 | 
 68 | 
 69 | def instrument_tool(tool_name: str, transport: Optional[str] = None) -> Callable:
 70 |     """
 71 |     Decorator to instrument MCP tool functions with telemetry collection.
 72 | 
 73 |     This decorator:
 74 |     - Increments/decrements active concurrency gauge
 75 |     - Measures execution duration
 76 |     - Classifies and counts errors
 77 |     - Emits calls_total metric with outcome
 78 |     - Measures request and response payload sizes
 79 | 
 80 |     Args:
 81 |         tool_name: Name of the tool for metrics labeling
 82 |         transport: Transport type ("stdio", "http", etc.) for metrics labeling
 83 | 
 84 |     Returns:
 85 |         Decorated function with telemetry instrumentation
 86 |     """
 87 | 
 88 |     def decorator(func: Callable) -> Callable:
 89 |         if not is_telemetry_enabled():
 90 |             # If telemetry is disabled, return the original function unchanged
 91 |             return func
 92 | 
 93 |         @functools.wraps(func)
 94 |         async def async_wrapper(*args, **kwargs) -> Any:
 95 |             """Async wrapper for instrumented functions."""
 96 |             start_time = time.time()
 97 |             outcome = "error"
 98 |             error_kind = None
 99 | 
100 |             # Increment active concurrency
101 |             if MCP_CONC:
102 |                 MCP_CONC.labels(tool=tool_name).inc()
103 | 
104 |             try:
105 |                 # Measure request size (approximate)
106 |                 request_data = {"args": args, "kwargs": kwargs}
107 |                 request_size = _get_size_bytes(request_data)
108 |                 if MCP_REQ_B:
109 |                     MCP_REQ_B.labels(tool=tool_name).observe(request_size)
110 | 
111 |                 # Execute the actual function
112 |                 result = await func(*args, **kwargs)
113 | 
114 |                 # Measure response size
115 |                 response_size = _get_size_bytes(result)
116 |                 if MCP_RES_B:
117 |                     MCP_RES_B.labels(tool=tool_name).observe(response_size)
118 | 
119 |                 outcome = "ok"
120 |                 return result
121 | 
122 |             except Exception as e:
123 |                 error_kind = _classify_error(e)
124 | 
125 |                 # Increment error counter
126 |                 if MCP_ERRS:
127 |                     MCP_ERRS.labels(tool=tool_name, error_kind=error_kind).inc()
128 | 
129 |                 logger.warning(f"Tool {tool_name} failed with {error_kind} error: {e}")
130 |                 raise
131 | 
132 |             finally:
133 |                 # Record execution time
134 |                 duration = time.time() - start_time
135 |                 if MCP_LAT:
136 |                     MCP_LAT.labels(tool=tool_name).observe(duration)
137 | 
138 |                 # Increment total calls counter
139 |                 if MCP_CALLS:
140 |                     MCP_CALLS.labels(tool=tool_name, outcome=outcome).inc()
141 | 
142 |                 # Decrement active concurrency
143 |                 if MCP_CONC:
144 |                     MCP_CONC.labels(tool=tool_name).dec()
145 | 
146 |         @functools.wraps(func)
147 |         def sync_wrapper(*args, **kwargs) -> Any:
148 |             """Sync wrapper for instrumented functions."""
149 |             start_time = time.time()
150 |             outcome = "error"
151 |             error_kind = None
152 | 
153 |             # Increment active concurrency
154 |             if MCP_CONC:
155 |                 MCP_CONC.labels(tool=tool_name).inc()
156 | 
157 |             try:
158 |                 # Measure request size (approximate)
159 |                 request_data = {"args": args, "kwargs": kwargs}
160 |                 request_size = _get_size_bytes(request_data)
161 |                 if MCP_REQ_B:
162 |                     MCP_REQ_B.labels(tool=tool_name).observe(request_size)
163 | 
164 |                 # Execute the actual function
165 |                 result = func(*args, **kwargs)
166 | 
167 |                 # Measure response size
168 |                 response_size = _get_size_bytes(result)
169 |                 if MCP_RES_B:
170 |                     MCP_RES_B.labels(tool=tool_name).observe(response_size)
171 | 
172 |                 outcome = "ok"
173 |                 return result
174 | 
175 |             except Exception as e:
176 |                 error_kind = _classify_error(e)
177 | 
178 |                 # Increment error counter
179 |                 if MCP_ERRS:
180 |                     MCP_ERRS.labels(tool=tool_name, error_kind=error_kind).inc()
181 | 
182 |                 logger.warning(f"Tool {tool_name} failed with {error_kind} error: {e}")
183 |                 raise
184 | 
185 |             finally:
186 |                 # Record execution time
187 |                 duration = time.time() - start_time
188 |                 if MCP_LAT:
189 |                     MCP_LAT.labels(tool=tool_name).observe(duration)
190 | 
191 |                 # Increment total calls counter
192 |                 if MCP_CALLS:
193 |                     MCP_CALLS.labels(tool=tool_name, outcome=outcome).inc()
194 | 
195 |                 # Decrement active concurrency
196 |                 if MCP_CONC:
197 |                     MCP_CONC.labels(tool=tool_name).dec()
198 | 
199 |         # Return appropriate wrapper based on whether function is async
200 |         if asyncio.iscoroutinefunction(func):
201 |             return async_wrapper
202 |         else:
203 |             return sync_wrapper
204 | 
205 |     return decorator
206 | 
207 | 
208 | # Export the decorator
209 | __all__ = ["instrument_tool"]
210 | 
```

--------------------------------------------------------------------------------
/tests/test_http_mcp_client.py:
--------------------------------------------------------------------------------

```python
  1 | import asyncio
  2 | import json
  3 | import subprocess
  4 | import sys
  5 | 
  6 | import pytest
  7 | from mcp import ClientSession
  8 | from mcp.client.streamable_http import streamablehttp_client
  9 | 
 10 | 
 11 | @pytest.mark.asyncio
 12 | async def test_http_transport_with_mcp_client():
 13 |     """Test HTTP transport using proper MCP streamable HTTP client"""
 14 | 
 15 |     # Start the HTTP server
 16 |     server_process = subprocess.Popen(
 17 |         [
 18 |             sys.executable,
 19 |             "-c",
 20 |             "import sys; sys.path.insert(0, 'src'); from alphavantage_mcp_server import main; main()",
 21 |             "--server",
 22 |             "http",
 23 |             "--port",
 24 |             "8087",
 25 |         ],
 26 |         stdout=subprocess.PIPE,
 27 |         stderr=subprocess.PIPE,
 28 |         text=True,
 29 |     )
 30 | 
 31 |     try:
 32 |         # Give server time to start
 33 |         await asyncio.sleep(4)
 34 | 
 35 |         base_url = "http://localhost:8087/"
 36 | 
 37 |         client = streamablehttp_client(base_url + "mcp")
 38 | 
 39 |         async with client as streams:
 40 |             # Extract streams from the context
 41 |             if len(streams) == 3:
 42 |                 read_stream, write_stream, session_manager = streams
 43 |             else:
 44 |                 read_stream, write_stream = streams
 45 | 
 46 |             async with ClientSession(read_stream, write_stream) as session:
 47 |                 # Initialize the session
 48 |                 init_result = await session.initialize()
 49 |                 assert init_result is not None
 50 |                 assert init_result.serverInfo.name == "alphavantage"
 51 | 
 52 |                 # List available tools
 53 |                 response = await session.list_tools()
 54 |                 tools = response.tools
 55 |                 tool_names = [tool.name for tool in tools]
 56 | 
 57 |                 # Verify essential tools are present
 58 |                 assert "stock_quote" in tool_names
 59 |                 assert len(tools) > 0
 60 | 
 61 |                 # Test calling the stock_quote tool
 62 |                 result = await session.call_tool("stock_quote", {"symbol": "AAPL"})
 63 | 
 64 |                 # Verify we got a result
 65 |                 assert result is not None
 66 |                 assert hasattr(result, "content")
 67 |                 assert len(result.content) > 0
 68 | 
 69 |                 # Parse the JSON result
 70 |                 content_text = result.content[0].text
 71 |                 stock_data = json.loads(content_text)
 72 | 
 73 |                 assert "Global Quote" in stock_data or "Error Message" in stock_data
 74 |                 assert stock_data["Global Quote"]["01. symbol"] == "AAPL"
 75 | 
 76 |     finally:
 77 |         # Clean up
 78 |         server_process.terminate()
 79 |         try:
 80 |             server_process.wait(timeout=5)
 81 |         except subprocess.TimeoutExpired:
 82 |             server_process.kill()
 83 | 
 84 | 
 85 | @pytest.mark.asyncio
 86 | async def test_http_transport_tool_listing():
 87 |     """Test HTTP transport tool listing functionality"""
 88 | 
 89 |     # Start the HTTP server
 90 |     server_process = subprocess.Popen(
 91 |         [
 92 |             sys.executable,
 93 |             "-c",
 94 |             "import sys; sys.path.insert(0, 'src'); from alphavantage_mcp_server import main; main()",
 95 |             "--server",
 96 |             "http",
 97 |             "--port",
 98 |             "8088",
 99 |         ],
100 |         stdout=subprocess.PIPE,
101 |         stderr=subprocess.PIPE,
102 |         text=True,
103 |     )
104 | 
105 |     try:
106 |         # Give server time to start
107 |         await asyncio.sleep(4)
108 | 
109 |         base_url = "http://localhost:8088/"
110 | 
111 |         client = streamablehttp_client(base_url + "mcp")
112 | 
113 |         async with client as streams:
114 |             if len(streams) == 3:
115 |                 read_stream, write_stream, session_manager = streams
116 |             else:
117 |                 read_stream, write_stream = streams
118 | 
119 |             async with ClientSession(read_stream, write_stream) as session:
120 |                 await session.initialize()
121 | 
122 |                 response = await session.list_tools()
123 |                 tools = response.tools
124 | 
125 |                 # Verify we have tools
126 |                 assert len(tools) > 0
127 | 
128 |                 # Verify essential tools are present
129 |                 tool_names = [tool.name for tool in tools]
130 |                 expected_tools = ["stock_quote", "time_series_daily"]
131 | 
132 |                 for expected_tool in expected_tools:
133 |                     assert expected_tool in tool_names, (
134 |                         f"{expected_tool} not found in tools"
135 |                     )
136 | 
137 |                 # Verify each tool has required attributes
138 |                 for tool in tools:
139 |                     assert hasattr(tool, "name")
140 |                     assert hasattr(tool, "description")
141 |                     assert hasattr(tool, "inputSchema")
142 |                     assert tool.name is not None
143 |                     assert tool.description is not None
144 |                     assert tool.inputSchema is not None
145 | 
146 |     finally:
147 |         # Clean up
148 |         server_process.terminate()
149 |         try:
150 |             server_process.wait(timeout=5)
151 |         except subprocess.TimeoutExpired:
152 |             server_process.kill()
153 | 
154 | 
155 | @pytest.mark.asyncio
156 | async def test_http_transport_tool_call():
157 |     """Test HTTP transport tool calling functionality"""
158 | 
159 |     # Start the HTTP server
160 |     server_process = subprocess.Popen(
161 |         [
162 |             sys.executable,
163 |             "-c",
164 |             "import sys; sys.path.insert(0, 'src'); from alphavantage_mcp_server import main; main()",
165 |             "--server",
166 |             "http",
167 |             "--port",
168 |             "8089",
169 |         ],
170 |         stdout=subprocess.PIPE,
171 |         stderr=subprocess.PIPE,
172 |         text=True,
173 |     )
174 | 
175 |     try:
176 |         # Give server time to start
177 |         await asyncio.sleep(4)
178 | 
179 |         base_url = "http://localhost:8089/"
180 | 
181 |         client = streamablehttp_client(base_url + "mcp")
182 | 
183 |         async with client as streams:
184 |             if len(streams) == 3:
185 |                 read_stream, write_stream, session_manager = streams
186 |             else:
187 |                 read_stream, write_stream = streams
188 | 
189 |             async with ClientSession(read_stream, write_stream) as session:
190 |                 # Initialize the session
191 |                 await session.initialize()
192 | 
193 |                 # Call the stock_quote tool
194 |                 stock_result = await session.call_tool(
195 |                     "stock_quote", {"symbol": "MSFT"}
196 |                 )
197 | 
198 |                 # Verify we got a result
199 |                 assert stock_result is not None
200 |                 assert hasattr(stock_result, "content")
201 |                 assert len(stock_result.content) > 0
202 | 
203 |                 # Parse and validate the result
204 |                 stock_content = stock_result.content[0].text
205 |                 stock_data = json.loads(stock_content)
206 |                 assert "Global Quote" in stock_data
207 |                 assert stock_data["Global Quote"]["01. symbol"] == "MSFT"
208 | 
209 |                 # Call time_series_daily tool
210 |                 time_series_result = await session.call_tool(
211 |                     "time_series_daily", {"symbol": "IBM", "outputsize": "compact"}
212 |                 )
213 | 
214 |                 # Verify we got a result
215 |                 assert time_series_result is not None
216 |                 assert hasattr(time_series_result, "content")
217 |                 assert len(time_series_result.content) > 0
218 | 
219 |                 # Parse and validate the result
220 |                 ts_content = time_series_result.content[0].text
221 |                 ts_data = json.loads(ts_content)
222 |                 assert "Time Series (Daily)" in ts_data
223 |                 assert "Meta Data" in ts_data
224 |                 assert ts_data["Meta Data"]["2. Symbol"] == "IBM"
225 | 
226 |     finally:
227 |         # Clean up
228 |         server_process.terminate()
229 |         try:
230 |             server_process.wait(timeout=5)
231 |         except subprocess.TimeoutExpired:
232 |             server_process.kill()
233 | 
```

--------------------------------------------------------------------------------
/tests/test_http_transport.py:
--------------------------------------------------------------------------------

```python
  1 | import asyncio
  2 | import json
  3 | import subprocess
  4 | import sys
  5 | 
  6 | import pytest
  7 | from mcp import ClientSession
  8 | from mcp.client.streamable_http import streamablehttp_client
  9 | 
 10 | 
 11 | @pytest.mark.asyncio
 12 | async def test_http_stock_quote():
 13 |     """Test streamable-http transport with stock_quote tool"""
 14 | 
 15 |     # Start the HTTP server
 16 |     server_process = subprocess.Popen(
 17 |         [
 18 |             sys.executable,
 19 |             "-c",
 20 |             "import sys; sys.path.insert(0, 'src'); from alphavantage_mcp_server import main; main()",
 21 |             "--server",
 22 |             "http",
 23 |             "--port",
 24 |             "8091",
 25 |         ],
 26 |         stdout=subprocess.PIPE,
 27 |         stderr=subprocess.PIPE,
 28 |         text=True,
 29 |     )
 30 | 
 31 |     try:
 32 |         # Give server time to start
 33 |         await asyncio.sleep(4)
 34 | 
 35 |         base_url = "http://localhost:8091/"
 36 | 
 37 |         # Connect to the server using streamable-http client
 38 |         client = streamablehttp_client(base_url + "mcp")
 39 | 
 40 |         async with client as streams:
 41 |             # Handle different return formats from the client
 42 |             if len(streams) == 3:
 43 |                 read_stream, write_stream, session_manager = streams
 44 |             else:
 45 |                 read_stream, write_stream = streams
 46 | 
 47 |             async with ClientSession(read_stream, write_stream) as session:
 48 |                 # Initialize the session
 49 |                 await session.initialize()
 50 | 
 51 |                 # List available tools
 52 |                 response = await session.list_tools()
 53 |                 tools = response.tools
 54 |                 tool_names = [tool.name for tool in tools]
 55 | 
 56 |                 # Verify stock_quote tool is available
 57 |                 assert "stock_quote" in tool_names, (
 58 |                     f"stock_quote not found in tools: {tool_names}"
 59 |                 )
 60 | 
 61 |                 # Test calling the stock_quote tool
 62 |                 result = await session.call_tool("stock_quote", {"symbol": "AAPL"})
 63 | 
 64 |                 # Verify we got a result
 65 |                 assert result is not None
 66 |                 assert hasattr(result, "content")
 67 |                 assert len(result.content) > 0
 68 | 
 69 |                 # Parse the JSON result to verify it contains stock data
 70 |                 content_text = result.content[0].text
 71 |                 stock_data = json.loads(content_text)
 72 | 
 73 |                 # Verify the response contains expected stock quote fields
 74 |                 assert "Global Quote" in stock_data or "Error Message" in stock_data
 75 | 
 76 |                 if "Global Quote" in stock_data:
 77 |                     global_quote = stock_data["Global Quote"]
 78 |                     assert "01. symbol" in global_quote
 79 |                     assert "05. price" in global_quote
 80 |                     assert global_quote["01. symbol"] == "AAPL"
 81 | 
 82 |     finally:
 83 |         # Clean up
 84 |         server_process.terminate()
 85 |         try:
 86 |             server_process.wait(timeout=5)
 87 |         except subprocess.TimeoutExpired:
 88 |             server_process.kill()
 89 | 
 90 | 
 91 | @pytest.mark.asyncio
 92 | async def test_http_tool_list():
 93 |     """Test that streamable-http transport can list all available tools"""
 94 | 
 95 |     # Start the HTTP server
 96 |     server_process = subprocess.Popen(
 97 |         [
 98 |             sys.executable,
 99 |             "-c",
100 |             "import sys; sys.path.insert(0, 'src'); from alphavantage_mcp_server import main; main()",
101 |             "--server",
102 |             "http",
103 |             "--port",
104 |             "8092",
105 |         ],
106 |         stdout=subprocess.PIPE,
107 |         stderr=subprocess.PIPE,
108 |         text=True,
109 |     )
110 | 
111 |     try:
112 |         # Give server time to start
113 |         await asyncio.sleep(4)
114 | 
115 |         base_url = "http://localhost:8092/"
116 | 
117 |         client = streamablehttp_client(base_url + "mcp")
118 | 
119 |         async with client as streams:
120 |             if len(streams) == 3:
121 |                 read_stream, write_stream, session_manager = streams
122 |             else:
123 |                 read_stream, write_stream = streams
124 | 
125 |             async with ClientSession(read_stream, write_stream) as session:
126 |                 await session.initialize()
127 | 
128 |                 response = await session.list_tools()
129 |                 tools = response.tools
130 | 
131 |                 # Verify we have tools
132 |                 assert len(tools) > 0
133 | 
134 |                 # Verify essential tools are present
135 |                 tool_names = [tool.name for tool in tools]
136 |                 expected_tools = ["stock_quote", "time_series_daily"]
137 | 
138 |                 for expected_tool in expected_tools:
139 |                     assert expected_tool in tool_names, (
140 |                         f"{expected_tool} not found in tools"
141 |                     )
142 | 
143 |                 # Verify each tool has required attributes
144 |                 for tool in tools:
145 |                     assert hasattr(tool, "name")
146 |                     assert hasattr(tool, "description")
147 |                     assert hasattr(tool, "inputSchema")
148 |                     assert tool.name is not None
149 |                     assert tool.description is not None
150 |                     assert tool.inputSchema is not None
151 | 
152 |     finally:
153 |         # Clean up
154 |         server_process.terminate()
155 |         try:
156 |             server_process.wait(timeout=5)
157 |         except subprocess.TimeoutExpired:
158 |             server_process.kill()
159 | 
160 | 
161 | @pytest.mark.asyncio
162 | async def test_http_multiple_calls():
163 |     """Test making multiple tool calls over streamable-http transport"""
164 | 
165 |     # Start the HTTP server
166 |     server_process = subprocess.Popen(
167 |         [
168 |             sys.executable,
169 |             "-c",
170 |             "import sys; sys.path.insert(0, 'src'); from alphavantage_mcp_server import main; main()",
171 |             "--server",
172 |             "http",
173 |             "--port",
174 |             "8093",
175 |         ],
176 |         stdout=subprocess.PIPE,
177 |         stderr=subprocess.PIPE,
178 |         text=True,
179 |     )
180 | 
181 |     try:
182 |         # Give server time to start
183 |         await asyncio.sleep(4)
184 | 
185 |         base_url = "http://localhost:8093/"
186 | 
187 |         client = streamablehttp_client(base_url + "mcp")
188 | 
189 |         async with client as streams:
190 |             if len(streams) == 3:
191 |                 read_stream, write_stream, session_manager = streams
192 |             else:
193 |                 read_stream, write_stream = streams
194 | 
195 |             async with ClientSession(read_stream, write_stream) as session:
196 |                 await session.initialize()
197 | 
198 |                 # Make multiple calls to test session persistence
199 |                 symbols = ["AAPL", "GOOGL", "MSFT"]
200 |                 results = []
201 | 
202 |                 for symbol in symbols:
203 |                     result = await session.call_tool("stock_quote", {"symbol": symbol})
204 |                     assert result is not None
205 |                     assert hasattr(result, "content")
206 |                     assert len(result.content) > 0
207 | 
208 |                     content_text = result.content[0].text
209 |                     stock_data = json.loads(content_text)
210 |                     results.append(stock_data)
211 | 
212 |                 # Verify we got results for all symbols
213 |                 assert len(results) == len(symbols)
214 | 
215 |                 # Verify each result contains stock data or error message
216 |                 for i, result in enumerate(results):
217 |                     assert "Global Quote" in result or "Error Message" in result, (
218 |                         f"Invalid result for {symbols[i]}"
219 |                     )
220 | 
221 |     finally:
222 |         # Clean up
223 |         server_process.terminate()
224 |         try:
225 |             server_process.wait(timeout=5)
226 |         except subprocess.TimeoutExpired:
227 |             server_process.kill()
228 | 
229 | 
230 | @pytest.mark.asyncio
231 | async def test_http_server_info():
232 |     """Test retrieving server information over streamable-http transport"""
233 | 
234 |     # Start the HTTP server
235 |     server_process = subprocess.Popen(
236 |         [
237 |             sys.executable,
238 |             "-c",
239 |             "import sys; sys.path.insert(0, 'src'); from alphavantage_mcp_server import main; main()",
240 |             "--server",
241 |             "http",
242 |             "--port",
243 |             "8094",
244 |         ],
245 |         stdout=subprocess.PIPE,
246 |         stderr=subprocess.PIPE,
247 |         text=True,
248 |     )
249 | 
250 |     try:
251 |         # Give server time to start
252 |         await asyncio.sleep(4)
253 | 
254 |         base_url = "http://localhost:8094/"
255 | 
256 |         client = streamablehttp_client(base_url + "mcp")
257 | 
258 |         async with client as streams:
259 |             if len(streams) == 3:
260 |                 read_stream, write_stream, session_manager = streams
261 |             else:
262 |                 read_stream, write_stream = streams
263 | 
264 |             async with ClientSession(read_stream, write_stream) as session:
265 |                 # Initialize and get server info
266 |                 init_result = await session.initialize()
267 | 
268 |                 # Verify server info is present
269 |                 assert hasattr(init_result, "serverInfo")
270 |                 assert init_result.serverInfo.name == "alphavantage"
271 |                 assert init_result.serverInfo.version is not None
272 | 
273 |                 # Verify protocol version
274 |                 assert init_result.protocolVersion is not None
275 | 
276 |     finally:
277 |         # Clean up
278 |         server_process.terminate()
279 |         try:
280 |             server_process.wait(timeout=5)
281 |         except subprocess.TimeoutExpired:
282 |             server_process.kill()
283 | 
```

--------------------------------------------------------------------------------
/tests/test_integration.py:
--------------------------------------------------------------------------------

```python
  1 | import asyncio
  2 | import json
  3 | import subprocess
  4 | import sys
  5 | 
  6 | import httpx
  7 | import pytest
  8 | from mcp import ClientSession
  9 | from mcp.client.stdio import stdio_client, StdioServerParameters
 10 | 
 11 | 
 12 | @pytest.mark.asyncio
 13 | async def test_stdio_basic_connection():
 14 |     """Test basic stdio connection and tool listing"""
 15 | 
 16 |     server_params = StdioServerParameters(
 17 |         command=sys.executable,
 18 |         args=[
 19 |             "-c",
 20 |             "import sys; sys.path.insert(0, 'src'); from alphavantage_mcp_server import main; main()",
 21 |             "--server",
 22 |             "stdio",
 23 |         ],
 24 |     )
 25 | 
 26 |     client = stdio_client(server_params)
 27 |     async with client as (read_stream, write_stream):
 28 |         async with ClientSession(read_stream, write_stream) as session:
 29 |             # Initialize the session
 30 |             init_result = await session.initialize()
 31 |             assert init_result is not None
 32 |             assert init_result.serverInfo.name == "alphavantage"
 33 | 
 34 |             # List available tools
 35 |             response = await session.list_tools()
 36 |             tools = response.tools
 37 |             tool_names = [tool.name for tool in tools]
 38 | 
 39 |             # Verify essential tools are present
 40 |             assert "stock_quote" in tool_names
 41 |             assert len(tools) > 0
 42 | 
 43 | 
 44 | @pytest.mark.asyncio
 45 | async def test_http_basic_connection():
 46 |     """Test basic HTTP connection using direct HTTP requests"""
 47 | 
 48 |     # Start the HTTP server
 49 |     server_process = subprocess.Popen(
 50 |         [
 51 |             sys.executable,
 52 |             "-c",
 53 |             "import sys; sys.path.insert(0, 'src'); from alphavantage_mcp_server import main; main()",
 54 |             "--server",
 55 |             "http",
 56 |             "--port",
 57 |             "8084",
 58 |         ],
 59 |         stdout=subprocess.PIPE,
 60 |         stderr=subprocess.PIPE,
 61 |         text=True,
 62 |     )
 63 | 
 64 |     try:
 65 |         # Give server time to start
 66 |         await asyncio.sleep(4)
 67 | 
 68 |         # Test basic HTTP endpoint
 69 |         async with httpx.AsyncClient() as client:
 70 |             # Test initialization request
 71 |             init_request = {
 72 |                 "jsonrpc": "2.0",
 73 |                 "id": 1,
 74 |                 "method": "initialize",
 75 |                 "params": {
 76 |                     "protocolVersion": "2025-06-18",
 77 |                     "capabilities": {},
 78 |                     "clientInfo": {"name": "test-client", "version": "1.0.0"},
 79 |                 },
 80 |             }
 81 | 
 82 |             response = await client.post(
 83 |                 "http://localhost:8084/mcp",
 84 |                 json=init_request,
 85 |                 headers={
 86 |                     "Content-Type": "application/json",
 87 |                     "Accept": "application/json, text/event-stream",
 88 |                     "MCP-Protocol-Version": "2025-06-18",
 89 |                 },
 90 |                 timeout=10.0,
 91 |             )
 92 | 
 93 |             assert response.status_code == 200
 94 | 
 95 |             json_data = response.json()
 96 |             assert json_data is not None
 97 |             assert "result" in json_data
 98 |             assert json_data["result"]["serverInfo"]["name"] == "alphavantage"
 99 | 
100 |             # Send initialized notification (required after initialize)
101 |             initialized_notification = {
102 |                 "jsonrpc": "2.0",
103 |                 "method": "notifications/initialized",
104 |             }
105 | 
106 |             await client.post(
107 |                 "http://localhost:8084/mcp",
108 |                 json=initialized_notification,
109 |                 headers={
110 |                     "Content-Type": "application/json",
111 |                     "Accept": "application/json, text/event-stream",
112 |                     "MCP-Protocol-Version": "2025-06-18",
113 |                 },
114 |                 timeout=10.0,
115 |             )
116 | 
117 |             # Test tools/list request
118 |             # https://modelcontextprotocol.io/specification/2025-06-18/server/tools#listing-tools
119 |             tools_request = {"jsonrpc": "2.0", "id": 2, "method": "tools/list"}
120 | 
121 |             response = await client.post(
122 |                 "http://localhost:8084/mcp",
123 |                 json=tools_request,
124 |                 headers={
125 |                     "Content-Type": "application/json",
126 |                     "Accept": "application/json, text/event-stream",
127 |                     "MCP-Protocol-Version": "2025-06-18",
128 |                 },
129 |                 timeout=10.0,
130 |             )
131 | 
132 |             assert response.status_code == 200
133 | 
134 |             json_data = response.json()
135 |             assert json_data is not None
136 |             assert "result" in json_data
137 |             assert "tools" in json_data["result"]
138 | 
139 |             tools = json_data["result"]["tools"]
140 |             tool_names = [tool["name"] for tool in tools]
141 |             assert "stock_quote" in tool_names
142 |             assert len(tools) > 0
143 | 
144 |     finally:
145 |         # Clean up
146 |         server_process.terminate()
147 |         try:
148 |             server_process.wait(timeout=5)
149 |         except subprocess.TimeoutExpired:
150 |             server_process.kill()
151 | 
152 | 
153 | @pytest.mark.asyncio
154 | async def test_stdio_stock_quote_call():
155 |     """Test calling STOCK_QUOTE tool via stdio"""
156 | 
157 |     server_params = StdioServerParameters(
158 |         command=sys.executable,
159 |         args=[
160 |             "-c",
161 |             "import sys; sys.path.insert(0, 'src'); from alphavantage_mcp_server import main; main()",
162 |             "--server",
163 |             "stdio",
164 |         ],
165 |     )
166 | 
167 |     client = stdio_client(server_params)
168 |     async with client as (read_stream, write_stream):
169 |         async with ClientSession(read_stream, write_stream) as session:
170 |             # Initialize
171 |             await session.initialize()
172 | 
173 |             # Call stock_quote tool
174 |             result = await session.call_tool("stock_quote", {"symbol": "AAPL"})
175 | 
176 |             # Verify we got a result
177 |             assert result is not None
178 |             assert hasattr(result, "content")
179 |             assert len(result.content) > 0
180 | 
181 |             # Parse the JSON result
182 |             content_text = result.content[0].text
183 |             stock_data = json.loads(content_text)
184 | 
185 |             # Should contain either valid data or error message
186 |             assert "Global Quote" in stock_data or "Error Message" in stock_data
187 | 
188 | 
189 | @pytest.mark.asyncio
190 | async def test_http_stock_quote_call():
191 |     """Test calling STOCK_QUOTE tool via HTTP"""
192 | 
193 |     # Start the HTTP server
194 |     server_process = subprocess.Popen(
195 |         [
196 |             sys.executable,
197 |             "-c",
198 |             "import sys; sys.path.insert(0, 'src'); from alphavantage_mcp_server import main; main()",
199 |             "--server",
200 |             "http",
201 |             "--port",
202 |             "8085",
203 |         ],
204 |         stdout=subprocess.PIPE,
205 |         stderr=subprocess.PIPE,
206 |         text=True,
207 |     )
208 | 
209 |     try:
210 |         # Give server time to start
211 |         await asyncio.sleep(4)
212 | 
213 |         async with httpx.AsyncClient() as client:
214 |             # Initialize first
215 |             init_request = {
216 |                 "jsonrpc": "2.0",
217 |                 "id": 1,
218 |                 "method": "initialize",
219 |                 "params": {
220 |                     "protocolVersion": "2024-11-05",
221 |                     "capabilities": {},
222 |                     "clientInfo": {"name": "test-client", "version": "1.0.0"},
223 |                 },
224 |             }
225 | 
226 |             await client.post(
227 |                 "http://localhost:8085/mcp",
228 |                 json=init_request,
229 |                 headers={
230 |                     "Content-Type": "application/json",
231 |                     "Accept": "application/json, text/event-stream",
232 |                 },
233 |                 timeout=10.0,
234 |             )
235 | 
236 |             # Send initialized notification
237 |             initialized_notification = {
238 |                 "jsonrpc": "2.0",
239 |                 "method": "notifications/initialized",
240 |             }
241 | 
242 |             await client.post(
243 |                 "http://localhost:8085/mcp",
244 |                 json=initialized_notification,
245 |                 headers={
246 |                     "Content-Type": "application/json",
247 |                     "Accept": "application/json, text/event-stream",
248 |                 },
249 |                 timeout=10.0,
250 |             )
251 | 
252 |             # Call stock_quote tool
253 |             tool_request = {
254 |                 "jsonrpc": "2.0",
255 |                 "id": 2,
256 |                 "method": "tools/call",
257 |                 "params": {"name": "stock_quote", "arguments": {"symbol": "AAPL"}},
258 |             }
259 | 
260 |             response = await client.post(
261 |                 "http://localhost:8085/mcp",
262 |                 json=tool_request,
263 |                 headers={
264 |                     "Content-Type": "application/json",
265 |                     "Accept": "application/json, text/event-stream",
266 |                 },
267 |                 timeout=30.0,
268 |             )
269 | 
270 |             assert response.status_code == 200
271 | 
272 |             # Parse SSE response for tool call
273 |             json_data = response.json()
274 |             assert json_data is not None
275 |             assert "result" in json_data
276 |             assert "content" in json_data["result"]
277 | 
278 |             content = json_data["result"]["content"]
279 |             assert len(content) > 0
280 | 
281 |             # Parse the JSON result
282 |             content_text = content[0]["text"]
283 |             stock_data = json.loads(content_text)
284 | 
285 |             # Should contain either valid data or error message
286 |             assert "Global Quote" in stock_data or "Error Message" in stock_data
287 | 
288 |     finally:
289 |         # Clean up
290 |         server_process.terminate()
291 |         try:
292 |             server_process.wait(timeout=5)
293 |         except subprocess.TimeoutExpired:
294 |             server_process.kill()
295 | 
```

--------------------------------------------------------------------------------
/tests/test_api.py:
--------------------------------------------------------------------------------

```python
  1 | import csv
  2 | import os
  3 | from io import StringIO
  4 | 
  5 | import pytest
  6 | 
  7 | from alphavantage_mcp_server.api import (
  8 |     fetch_earnings_calendar,
  9 |     fetch_earnings_call_transcript,
 10 |     fetch_sma,
 11 | )
 12 | 
 13 | 
 14 | @pytest.mark.asyncio
 15 | async def test_fetch_earnings_call_transcript():
 16 |     """Test fetching earnings call transcript with real API call."""
 17 |     data = await fetch_earnings_call_transcript(symbol="IBM", quarter="2024Q1")
 18 | 
 19 |     assert isinstance(data, dict), "API should return JSON data as string"
 20 | 
 21 |     assert "symbol" in data, "JSON should contain 'symbol' field"
 22 |     assert "quarter" in data, "JSON should contain 'quarter' field"
 23 |     assert "transcript" in data, "JSON should contain 'transcript' field"
 24 | 
 25 |     assert data["symbol"] == "IBM", "Should find IBM data in the response"
 26 |     assert data["transcript"], "Transcript should not be empty"
 27 | 
 28 |     first_entry = data["transcript"][0]
 29 |     required_fields = ["speaker", "title", "content", "sentiment"]
 30 |     for field in required_fields:
 31 |         assert field in first_entry, f"Field '{field}' missing from transcript entry"
 32 | 
 33 |     assert first_entry["content"], "Transcript content should not be empty"
 34 | 
 35 | 
 36 | @pytest.mark.asyncio
 37 | async def test_fetch_earnings_calendar():
 38 |     """Test fetching earnings calendar with real API call."""
 39 |     api_key = os.getenv("ALPHAVANTAGE_API_KEY")
 40 |     assert api_key, "ALPHAVANTAGE_API_KEY must be set in environment"
 41 | 
 42 |     result = await fetch_earnings_calendar(symbol="AAPL", horizon="3month")
 43 | 
 44 |     assert isinstance(result, str), "API should return CSV data as string"
 45 | 
 46 |     # Parse CSV data
 47 |     csv_reader = csv.DictReader(StringIO(result))
 48 |     rows = list(csv_reader)
 49 | 
 50 |     # Basic validation of structure
 51 |     assert rows, "CSV should contain at least one row"
 52 | 
 53 |     # Check required fields in first row
 54 |     first_row = rows[0]
 55 |     required_fields = ["symbol", "name", "reportDate"]
 56 |     for field in required_fields:
 57 |         assert field in first_row, f"Field '{field}' missing from CSV data"
 58 | 
 59 |     # Check if we found AAPL data
 60 |     apple_entries = [row for row in rows if row["symbol"] == "AAPL"]
 61 |     assert apple_entries, "Should find AAPL entries in the response"
 62 | 
 63 | 
 64 | @pytest.mark.asyncio
 65 | async def test_fetch_sma():
 66 |     """Test fetching SMA (Simple Moving Average) with real API call."""
 67 |     api_key = os.getenv("ALPHAVANTAGE_API_KEY")
 68 |     assert api_key, "ALPHAVANTAGE_API_KEY must be set in environment"
 69 | 
 70 |     # Test with common parameters that should work
 71 |     result = await fetch_sma(
 72 |         symbol="AAPL", interval="daily", time_period=20, series_type="close"
 73 |     )
 74 | 
 75 |     assert isinstance(result, dict), "API should return JSON data as dict"
 76 | 
 77 |     # Check for expected structure in SMA response
 78 |     assert "Meta Data" in result, "Response should contain 'Meta Data' section"
 79 | 
 80 |     # Find the technical analysis key (it varies by indicator)
 81 |     tech_analysis_key = None
 82 |     for key in result.keys():
 83 |         if "Technical Analysis" in key and "SMA" in key:
 84 |             tech_analysis_key = key
 85 |             break
 86 | 
 87 |     assert tech_analysis_key is not None, (
 88 |         "Response should contain Technical Analysis section for SMA"
 89 |     )
 90 | 
 91 |     # Validate metadata
 92 |     meta_data = result["Meta Data"]
 93 |     assert "1: Symbol" in meta_data, "Meta Data should contain symbol"
 94 |     assert "2: Indicator" in meta_data, "Meta Data should contain indicator type"
 95 |     assert "3: Last Refreshed" in meta_data, (
 96 |         "Meta Data should contain last refreshed date"
 97 |     )
 98 |     assert "4: Interval" in meta_data, "Meta Data should contain interval"
 99 |     assert "5: Time Period" in meta_data, "Meta Data should contain time period"
100 |     assert "6: Series Type" in meta_data, "Meta Data should contain series type"
101 | 
102 |     assert meta_data["1: Symbol"] == "AAPL", "Symbol should match request"
103 |     assert meta_data["2: Indicator"] == "Simple Moving Average (SMA)", (
104 |         "Indicator should be SMA"
105 |     )
106 |     assert meta_data["4: Interval"] == "daily", "Interval should match request"
107 |     assert meta_data["5: Time Period"] == 20, "Time period should match request"
108 |     assert meta_data["6: Series Type"] == "close", "Series type should match request"
109 | 
110 |     # Validate technical analysis data
111 |     sma_data = result[tech_analysis_key]
112 |     assert isinstance(sma_data, dict), "SMA data should be a dictionary"
113 |     assert len(sma_data) > 0, "SMA data should contain at least one data point"
114 | 
115 |     # Check structure of first data point
116 |     first_date = list(sma_data.keys())[0]
117 |     first_data_point = sma_data[first_date]
118 |     assert isinstance(first_data_point, dict), "Each data point should be a dictionary"
119 |     assert "SMA" in first_data_point, "Data point should contain SMA value"
120 | 
121 |     # Validate that SMA value is numeric
122 |     sma_value = first_data_point["SMA"]
123 |     assert isinstance(sma_value, str), "SMA value should be string (as returned by API)"
124 |     float(sma_value)  # Should not raise exception if valid number
125 | 
126 | 
127 | @pytest.mark.asyncio
128 | async def test_fetch_sma_with_month():
129 |     """Test fetching SMA with month parameter for intraday data."""
130 |     api_key = os.getenv("ALPHAVANTAGE_API_KEY")
131 |     assert api_key, "ALPHAVANTAGE_API_KEY must be set in environment"
132 | 
133 |     # Test with intraday interval and month parameter
134 |     result = await fetch_sma(
135 |         symbol="MSFT",
136 |         interval="60min",
137 |         time_period=14,
138 |         series_type="close",
139 |         month="2024-01",
140 |     )
141 | 
142 |     assert isinstance(result, dict), "API should return JSON data as dict"
143 |     assert "Meta Data" in result, "Response should contain 'Meta Data' section"
144 | 
145 |     # Validate that month parameter was applied
146 |     meta_data = result["Meta Data"]
147 |     assert "7: Time Zone" in meta_data, "Meta Data should contain time zone"
148 |     assert meta_data["7: Time Zone"] == "US/Eastern", "Time zone should be US/Eastern"
149 | 
150 | 
151 | @pytest.mark.asyncio
152 | async def test_fetch_sma_csv_format():
153 |     """Test fetching SMA in CSV format."""
154 |     api_key = os.getenv("ALPHAVANTAGE_API_KEY")
155 |     assert api_key, "ALPHAVANTAGE_API_KEY must be set in environment"
156 | 
157 |     result = await fetch_sma(
158 |         symbol="GOOGL",
159 |         interval="daily",
160 |         time_period=10,
161 |         series_type="close",
162 |         datatype="csv",
163 |     )
164 | 
165 |     assert isinstance(result, str), "CSV format should return string data"
166 |     assert len(result) > 0, "CSV data should not be empty"
167 | 
168 |     # Basic CSV validation
169 |     lines = result.strip().split("\n")
170 |     assert len(lines) > 1, "CSV should have header and at least one data row"
171 | 
172 |     # Check CSV header
173 |     header = lines[0]
174 |     assert "time" in header.lower(), "CSV should contain time column"
175 |     assert "sma" in header.lower(), "CSV should contain SMA column"
176 | 
177 | 
178 | @pytest.mark.asyncio
179 | async def test_fetch_sma_with_response_limiting():
180 |     """Test SMA response limiting functionality to prevent token limit issues."""
181 |     api_key = os.getenv("ALPHAVANTAGE_API_KEY")
182 |     assert api_key, "ALPHAVANTAGE_API_KEY must be set in environment"
183 | 
184 |     # Test with a small max_data_points to demonstrate limiting
185 |     result = await fetch_sma(
186 |         symbol="NVDA",
187 |         interval="daily",
188 |         time_period=14,
189 |         series_type="close",
190 |         max_data_points=10,  # Limit to only 10 data points
191 |     )
192 | 
193 |     assert isinstance(result, dict), "API should return JSON data as dict"
194 |     assert "Meta Data" in result, "Response should contain 'Meta Data' section"
195 | 
196 |     # Find the technical analysis key
197 |     tech_analysis_key = None
198 |     for key in result.keys():
199 |         if "Technical Analysis" in key and "SMA" in key:
200 |             tech_analysis_key = key
201 |             break
202 | 
203 |     assert tech_analysis_key is not None, (
204 |         "Response should contain Technical Analysis section for SMA"
205 |     )
206 | 
207 |     # Check that response was limited
208 |     sma_data = result[tech_analysis_key]
209 |     assert len(sma_data) <= 10, (
210 |         f"Response should be limited to 10 data points, got {len(sma_data)}"
211 |     )
212 | 
213 |     # Check for summary information if response was limited
214 |     summary_key = f"{tech_analysis_key}_summary"
215 |     if summary_key in result:
216 |         summary = result[summary_key]
217 |         assert "total_data_points_available" in summary, (
218 |             "Summary should show total available data points"
219 |         )
220 |         assert "data_points_returned" in summary, (
221 |             "Summary should show returned data points"
222 |         )
223 |         assert "note" in summary, "Summary should contain explanation note"
224 |         assert summary["data_points_returned"] == len(sma_data), (
225 |             "Summary count should match actual data"
226 |         )
227 | 
228 |     # Verify dates are in descending order (most recent first)
229 |     dates = list(sma_data.keys())
230 |     sorted_dates = sorted(dates, reverse=True)
231 |     assert dates == sorted_dates, "Data points should be ordered by most recent first"
232 | 
233 | 
234 | @pytest.mark.asyncio
235 | async def test_fetch_sma_large_response_handling():
236 |     """Test SMA handling of potentially large responses."""
237 |     api_key = os.getenv("ALPHAVANTAGE_API_KEY")
238 |     assert api_key, "ALPHAVANTAGE_API_KEY must be set in environment"
239 | 
240 |     # Test with default max_data_points (100)
241 |     result = await fetch_sma(
242 |         symbol="AAPL",
243 |         interval="daily",
244 |         time_period=20,
245 |         series_type="close",
246 |         # Using default max_data_points=100
247 |     )
248 | 
249 |     assert isinstance(result, dict), "API should return JSON data as dict"
250 | 
251 |     # Find the technical analysis key
252 |     tech_analysis_key = None
253 |     for key in result.keys():
254 |         if "Technical Analysis" in key and "SMA" in key:
255 |             tech_analysis_key = key
256 |             break
257 | 
258 |     assert tech_analysis_key is not None, (
259 |         "Response should contain Technical Analysis section"
260 |     )
261 | 
262 |     # Check that response respects the default limit
263 |     sma_data = result[tech_analysis_key]
264 |     assert len(sma_data) <= 100, (
265 |         f"Response should be limited to 100 data points by default, got {len(sma_data)}"
266 |     )
267 | 
268 |     # Verify all data points have valid SMA values
269 |     for date, data_point in sma_data.items():
270 |         assert "SMA" in data_point, f"Data point for {date} should contain SMA value"
271 |         sma_value = data_point["SMA"]
272 |         assert isinstance(sma_value, str), "SMA value should be string"
273 |         float(sma_value)  # Should not raise exception if valid number
274 | 
```

--------------------------------------------------------------------------------
/deploy/aws-stateless-mcp-lambda/lambda_function.py:
--------------------------------------------------------------------------------

```python
  1 | """
  2 | AWS Lambda function for stateless AlphaVantage MCP Server.
  3 | Based on aws-samples/sample-serverless-mcp-servers/stateless-mcp-on-lambda-python pattern.
  4 | """
  5 | 
  6 | import asyncio
  7 | import json
  8 | import os
  9 | import sys
 10 | from typing import Any, Dict
 11 | 
 12 | # Add the source directory to Python path for imports
 13 | sys.path.insert(0, "/opt/python")
 14 | 
 15 | # Import MCP components
 16 | 
 17 | # Import AlphaVantage MCP server components
 18 | from alphavantage_mcp_server.server import (
 19 |     handle_list_tools,
 20 |     handle_call_tool,
 21 |     list_prompts,
 22 |     get_prompt,
 23 |     get_version,
 24 | )
 25 | from alphavantage_mcp_server.oauth import (
 26 |     OAuthResourceServer,
 27 |     create_oauth_config_from_env,
 28 | )
 29 | 
 30 | 
 31 | def lambda_handler(event: Dict[str, Any], context: Any) -> Dict[str, Any]:
 32 |     """
 33 |     AWS Lambda handler for stateless MCP requests.
 34 |     Each request is handled independently without session state.
 35 |     """
 36 |     try:
 37 |         # Parse the incoming request
 38 |         if "body" not in event:
 39 |             return create_error_response(400, "Missing request body")
 40 | 
 41 |         # Handle both string and already-parsed JSON bodies
 42 |         if isinstance(event["body"], str):
 43 |             try:
 44 |                 request_data = json.loads(event["body"])
 45 |             except json.JSONDecodeError:
 46 |                 return create_error_response(400, "Invalid JSON in request body")
 47 |         else:
 48 |             request_data = event["body"]
 49 | 
 50 |         # Validate JSON-RPC format
 51 |         if not isinstance(request_data, dict) or "jsonrpc" not in request_data:
 52 |             return create_error_response(400, "Invalid JSON-RPC request")
 53 | 
 54 |         # Handle OAuth if enabled
 55 |         oauth_server = None
 56 |         oauth_enabled = os.environ.get("OAUTH_ENABLED", "false").lower() == "true"
 57 |         if oauth_enabled:
 58 |             oauth_config = create_oauth_config_from_env()
 59 |             if oauth_config:
 60 |                 oauth_server = OAuthResourceServer(oauth_config)
 61 | 
 62 |                 # Check authentication for non-initialize requests
 63 |                 method = request_data.get("method", "")
 64 |                 if method != "initialize":
 65 |                     auth_result = validate_oauth_request(event, oauth_server)
 66 |                     if not auth_result["authenticated"]:
 67 |                         return auth_result["response"]
 68 | 
 69 |         # Process the MCP request
 70 |         response = asyncio.run(handle_mcp_request(request_data, oauth_server))
 71 | 
 72 |         return {
 73 |             "statusCode": 200,
 74 |             "headers": {
 75 |                 "Content-Type": "application/json",
 76 |                 "Access-Control-Allow-Origin": "*",
 77 |                 "Access-Control-Allow-Methods": "GET, POST, OPTIONS",
 78 |                 "Access-Control-Allow-Headers": "Content-Type, Authorization, X-Session-ID",
 79 |             },
 80 |             "body": json.dumps(response),
 81 |         }
 82 | 
 83 |     except Exception as e:
 84 |         print(f"Lambda handler error: {str(e)}")
 85 |         return create_error_response(500, f"Internal server error: {str(e)}")
 86 | 
 87 | 
 88 | async def handle_mcp_request(
 89 |     request_data: Dict[str, Any], oauth_server: OAuthResourceServer = None
 90 | ) -> Dict[str, Any]:
 91 |     """
 92 |     Handle MCP request in stateless mode.
 93 |     Each request creates a fresh server instance.
 94 |     """
 95 |     method = request_data.get("method", "")
 96 |     request_id = request_data.get("id", 1)
 97 |     params = request_data.get("params", {})
 98 | 
 99 |     try:
100 |         # Handle different MCP methods
101 |         if method == "initialize":
102 |             return handle_initialize(request_id, params)
103 | 
104 |         elif method == "tools/list":
105 |             return await handle_tools_list_request(request_id)
106 | 
107 |         elif method == "tools/call":
108 |             return await handle_tools_call_request(request_id, params)
109 | 
110 |         elif method == "prompts/list":
111 |             return await handle_prompts_list_request(request_id)
112 | 
113 |         elif method == "prompts/get":
114 |             return await handle_prompts_get_request(request_id, params)
115 | 
116 |         else:
117 |             return create_jsonrpc_error(
118 |                 request_id, -32601, f"Method not found: {method}"
119 |             )
120 | 
121 |     except Exception as e:
122 |         print(f"MCP request error: {str(e)}")
123 |         return create_jsonrpc_error(request_id, -32603, f"Internal error: {str(e)}")
124 | 
125 | 
126 | def handle_initialize(request_id: Any, params: Dict[str, Any]) -> Dict[str, Any]:
127 |     """Handle MCP initialize request - stateless mode"""
128 |     try:
129 |         version = get_version()
130 |     except Exception:
131 |         version = "0.3.17"  # Fallback version for Lambda
132 | 
133 |     return {
134 |         "jsonrpc": "2.0",
135 |         "id": request_id,
136 |         "result": {
137 |             "protocolVersion": "2024-11-05",
138 |             "capabilities": {
139 |                 "experimental": {},
140 |                 "prompts": {"listChanged": False},
141 |                 "tools": {"listChanged": False},
142 |             },
143 |             "serverInfo": {"name": "alphavantage", "version": version},
144 |         },
145 |     }
146 | 
147 | 
148 | async def handle_tools_list_request(request_id: Any) -> Dict[str, Any]:
149 |     """Handle tools/list request - get all available tools"""
150 |     try:
151 |         # Call the AlphaVantage server's handle_list_tools function directly
152 |         tools = await handle_list_tools()
153 | 
154 |         # Convert MCP Tool objects to JSON-serializable format
155 |         tools_json = []
156 |         for tool in tools:
157 |             tool_dict = {"name": tool.name, "description": tool.description}
158 |             if hasattr(tool, "inputSchema") and tool.inputSchema:
159 |                 tool_dict["inputSchema"] = tool.inputSchema
160 |             tools_json.append(tool_dict)
161 | 
162 |         return {"jsonrpc": "2.0", "id": request_id, "result": {"tools": tools_json}}
163 | 
164 |     except Exception as e:
165 |         print(f"Tools list error: {str(e)}")
166 |         return create_jsonrpc_error(
167 |             request_id, -32603, f"Failed to list tools: {str(e)}"
168 |         )
169 | 
170 | 
171 | async def handle_tools_call_request(
172 |     request_id: Any, params: Dict[str, Any]
173 | ) -> Dict[str, Any]:
174 |     """Handle tools/call request - execute a tool"""
175 |     try:
176 |         tool_name = params.get("name")
177 |         arguments = params.get("arguments", {})
178 | 
179 |         if not tool_name:
180 |             return create_jsonrpc_error(request_id, -32602, "Missing tool name")
181 | 
182 |         # Call the tool using the AlphaVantage server's handle_call_tool function
183 |         result = await handle_call_tool(tool_name, arguments)
184 | 
185 |         # Convert MCP result objects to JSON-serializable format
186 |         content_list = []
187 |         if isinstance(result, list):
188 |             for item in result:
189 |                 if hasattr(item, "text"):
190 |                     # TextContent object
191 |                     content_list.append({"type": "text", "text": item.text})
192 |                 elif hasattr(item, "data"):
193 |                     # ImageContent object
194 |                     content_list.append(
195 |                         {
196 |                             "type": "image",
197 |                             "data": item.data,
198 |                             "mimeType": getattr(item, "mimeType", "image/png"),
199 |                         }
200 |                     )
201 |                 elif hasattr(item, "uri"):
202 |                     # EmbeddedResource object
203 |                     content_list.append(
204 |                         {
205 |                             "type": "resource",
206 |                             "resource": {
207 |                                 "uri": item.uri,
208 |                                 "text": getattr(item, "text", ""),
209 |                                 "mimeType": getattr(item, "mimeType", "text/plain"),
210 |                             },
211 |                         }
212 |                     )
213 |                 else:
214 |                     # Fallback for unknown types
215 |                     content_list.append({"type": "text", "text": str(item)})
216 |         else:
217 |             # Single result
218 |             if hasattr(result, "text"):
219 |                 content_list.append({"type": "text", "text": result.text})
220 |             else:
221 |                 content_list.append({"type": "text", "text": str(result)})
222 | 
223 |         return {"jsonrpc": "2.0", "id": request_id, "result": {"content": content_list}}
224 | 
225 |     except Exception as e:
226 |         print(f"Tool call error: {str(e)}")
227 |         return create_jsonrpc_error(
228 |             request_id, -32603, f"Tool execution failed: {str(e)}"
229 |         )
230 | 
231 | 
232 | async def handle_prompts_list_request(request_id: Any) -> Dict[str, Any]:
233 |     """Handle prompts/list request"""
234 |     try:
235 |         # Call the AlphaVantage server's list_prompts function directly
236 |         prompts = await list_prompts()
237 | 
238 |         # Convert to JSON-serializable format
239 |         prompts_json = []
240 |         for prompt in prompts:
241 |             prompt_dict = {"name": prompt.name, "description": prompt.description}
242 |             if hasattr(prompt, "arguments") and prompt.arguments:
243 |                 prompt_dict["arguments"] = prompt.arguments
244 |             prompts_json.append(prompt_dict)
245 | 
246 |         return {"jsonrpc": "2.0", "id": request_id, "result": {"prompts": prompts_json}}
247 | 
248 |     except Exception as e:
249 |         print(f"Prompts list error: {str(e)}")
250 |         return create_jsonrpc_error(
251 |             request_id, -32603, f"Failed to list prompts: {str(e)}"
252 |         )
253 | 
254 | 
255 | async def handle_prompts_get_request(
256 |     request_id: Any, params: Dict[str, Any]
257 | ) -> Dict[str, Any]:
258 |     """Handle prompts/get request"""
259 |     try:
260 |         prompt_name = params.get("name")
261 |         arguments = params.get("arguments", {})
262 | 
263 |         if not prompt_name:
264 |             return create_jsonrpc_error(request_id, -32602, "Missing prompt name")
265 | 
266 |         # Call the prompt using the AlphaVantage server's get_prompt function
267 |         result = await get_prompt(prompt_name, arguments)
268 | 
269 |         return {"jsonrpc": "2.0", "id": request_id, "result": result}
270 | 
271 |     except Exception as e:
272 |         print(f"Prompt get error: {str(e)}")
273 |         return create_jsonrpc_error(
274 |             request_id, -32603, f"Failed to get prompt: {str(e)}"
275 |         )
276 | 
277 | 
278 | def validate_oauth_request(
279 |     event: Dict[str, Any], oauth_server: OAuthResourceServer
280 | ) -> Dict[str, Any]:
281 |     """Validate OAuth authentication for the request"""
282 |     try:
283 |         # Extract authorization header
284 |         headers = event.get("headers", {})
285 |         auth_header = headers.get("Authorization") or headers.get("authorization")
286 | 
287 |         if not auth_header or not auth_header.startswith("Bearer "):
288 |             return {
289 |                 "authenticated": False,
290 |                 "response": create_oauth_error_response(
291 |                     401, "invalid_token", "Missing or invalid authorization header"
292 |                 ),
293 |             }
294 | 
295 |         # TODO: Implement OAuth token validation using oauth_server
296 |         # For now, return authenticated=True to allow requests
297 |         return {"authenticated": True}
298 | 
299 |     except Exception as e:
300 |         return {
301 |             "authenticated": False,
302 |             "response": create_oauth_error_response(
303 |                 500, "server_error", f"OAuth validation error: {str(e)}"
304 |             ),
305 |         }
306 | 
307 | 
308 | def create_error_response(status_code: int, message: str) -> Dict[str, Any]:
309 |     """Create a standard error response"""
310 |     return {
311 |         "statusCode": status_code,
312 |         "headers": {
313 |             "Content-Type": "application/json",
314 |             "Access-Control-Allow-Origin": "*",
315 |         },
316 |         "body": json.dumps({"error": {"code": status_code, "message": message}}),
317 |     }
318 | 
319 | 
320 | def create_jsonrpc_error(request_id: Any, code: int, message: str) -> Dict[str, Any]:
321 |     """Create a JSON-RPC error response"""
322 |     return {
323 |         "jsonrpc": "2.0",
324 |         "id": request_id,
325 |         "error": {"code": code, "message": message},
326 |     }
327 | 
328 | 
329 | def create_oauth_error_response(
330 |     status_code: int, error_type: str, description: str
331 | ) -> Dict[str, Any]:
332 |     """Create an OAuth error response"""
333 |     return {
334 |         "statusCode": status_code,
335 |         "headers": {
336 |             "Content-Type": "application/json",
337 |             "WWW-Authenticate": f'Bearer error="{error_type}", error_description="{description}"',
338 |             "Access-Control-Allow-Origin": "*",
339 |         },
340 |         "body": json.dumps({"error": error_type, "error_description": description}),
341 |     }
342 | 
```

--------------------------------------------------------------------------------
/src/alphavantage_mcp_server/oauth.py:
--------------------------------------------------------------------------------

```python
  1 | """
  2 | OAuth 2.1 implementation for AlphaVantage MCP Server.
  3 | 
  4 | This module provides OAuth 2.1 resource server functionality as required by the
  5 | Model Context Protocol specification (2025-06-18). It supports configuration-driven
  6 | OAuth that works with any compliant OAuth 2.1 authorization server.
  7 | 
  8 | Key features:
  9 | - OAuth 2.0 Protected Resource Metadata (RFC 9728)
 10 | - Access token validation (JWT and introspection)
 11 | - WWW-Authenticate header handling
 12 | - Configuration-driven authorization server discovery
 13 | - MCP Security Best Practices compliance
 14 | 
 15 | Security Features:
 16 | - Token audience validation (prevents token passthrough attacks)
 17 | - Secure session ID generation and binding
 18 | - User-specific session binding to prevent session hijacking
 19 | - Proper error handling with OAuth-compliant responses
 20 | """
 21 | 
 22 | import logging
 23 | import secrets
 24 | from dataclasses import dataclass
 25 | from typing import Dict, List, Optional, Tuple, Union
 26 | from urllib.parse import urljoin
 27 | 
 28 | import httpx
 29 | import jwt
 30 | from starlette.requests import Request
 31 | from starlette.responses import JSONResponse, Response
 32 | 
 33 | logger = logging.getLogger(__name__)
 34 | 
 35 | 
 36 | @dataclass
 37 | class OAuthConfig:
 38 |     """Configuration for OAuth 2.1 resource server functionality."""
 39 | 
 40 |     # Authorization server configuration
 41 |     authorization_server_url: str
 42 |     """URL of the OAuth 2.1 authorization server (e.g., https://auth.example.com)"""
 43 | 
 44 |     # Resource server identity
 45 |     resource_server_uri: str
 46 |     """Canonical URI of this MCP server (e.g., https://mcp.example.com)"""
 47 | 
 48 |     # Token validation settings
 49 |     token_validation_method: str = "jwt"
 50 |     """Method for token validation: 'jwt' or 'introspection'"""
 51 | 
 52 |     jwt_public_key: Optional[str] = None
 53 |     """Public key for JWT validation (PEM format)"""
 54 | 
 55 |     jwt_algorithm: str = "RS256"
 56 |     """JWT signing algorithm"""
 57 | 
 58 |     introspection_endpoint: Optional[str] = None
 59 |     """Token introspection endpoint URL (RFC 7662)"""
 60 | 
 61 |     introspection_client_id: Optional[str] = None
 62 |     """Client ID for introspection requests"""
 63 | 
 64 |     introspection_client_secret: Optional[str] = None
 65 |     """Client secret for introspection requests"""
 66 | 
 67 |     # Metadata configuration
 68 |     resource_metadata_path: str = "/.well-known/oauth-protected-resource"
 69 |     """Path for OAuth 2.0 Protected Resource Metadata endpoint"""
 70 | 
 71 |     # Optional scopes
 72 |     required_scopes: List[str] = None
 73 |     """List of required scopes for accessing this resource"""
 74 | 
 75 |     # Security settings
 76 |     session_binding_enabled: bool = True
 77 |     """Enable user-specific session binding for security"""
 78 | 
 79 |     def __post_init__(self):
 80 |         """Validate configuration after initialization."""
 81 |         if not self.authorization_server_url:
 82 |             raise ValueError("authorization_server_url is required")
 83 |         if not self.resource_server_uri:
 84 |             raise ValueError("resource_server_uri is required")
 85 | 
 86 |         if self.token_validation_method == "jwt" and not self.jwt_public_key:
 87 |             logger.warning("JWT validation selected but no public key provided")
 88 |         elif (
 89 |             self.token_validation_method == "introspection"
 90 |             and not self.introspection_endpoint
 91 |         ):
 92 |             raise ValueError(
 93 |                 "introspection_endpoint required for introspection validation"
 94 |             )
 95 | 
 96 |         if self.required_scopes is None:
 97 |             self.required_scopes = []
 98 | 
 99 | 
100 | class OAuthError(Exception):
101 |     """Base exception for OAuth-related errors."""
102 | 
103 |     def __init__(self, error: str, description: str = None, status_code: int = 401):
104 |         self.error = error
105 |         self.description = description
106 |         self.status_code = status_code
107 |         super().__init__(f"{error}: {description}" if description else error)
108 | 
109 | 
110 | class TokenValidationResult:
111 |     """Result of token validation."""
112 | 
113 |     def __init__(self, valid: bool, claims: Dict = None, error: str = None):
114 |         self.valid = valid
115 |         self.claims = claims or {}
116 |         self.error = error
117 | 
118 |     @property
119 |     def subject(self) -> Optional[str]:
120 |         """Get the subject (user) from token claims."""
121 |         return self.claims.get("sub")
122 | 
123 |     @property
124 |     def audience(self) -> Optional[Union[str, List[str]]]:
125 |         """Get the audience from token claims."""
126 |         return self.claims.get("aud")
127 | 
128 |     @property
129 |     def scopes(self) -> List[str]:
130 |         """Get the scopes from token claims."""
131 |         scope_claim = self.claims.get("scope", "")
132 |         if isinstance(scope_claim, str):
133 |             return scope_claim.split() if scope_claim else []
134 |         elif isinstance(scope_claim, list):
135 |             return scope_claim
136 |         return []
137 | 
138 |     @property
139 |     def user_id(self) -> Optional[str]:
140 |         """Get a unique user identifier for session binding."""
141 |         # Use subject as the primary user identifier
142 |         return self.subject
143 | 
144 | 
145 | class SecureSessionManager:
146 |     """
147 |     Secure session management following MCP security best practices.
148 | 
149 |     Implements:
150 |     - Secure, non-deterministic session IDs
151 |     - User-specific session binding
152 |     - Session validation
153 |     """
154 | 
155 |     def __init__(self):
156 |         self._sessions: Dict[str, Dict] = {}
157 | 
158 |     def generate_session_id(self, user_id: str) -> str:
159 |         """
160 |         Generate a secure session ID bound to a user.
161 | 
162 |         Format: <user_id_hash>:<secure_random_token>
163 |         This prevents session hijacking by binding sessions to users.
164 |         """
165 |         # Generate cryptographically secure random token
166 |         secure_token = secrets.token_urlsafe(32)
167 | 
168 |         # Create a hash of user_id for binding (not reversible)
169 |         import hashlib
170 | 
171 |         user_hash = hashlib.sha256(user_id.encode()).hexdigest()[:16]
172 | 
173 |         session_id = f"{user_hash}:{secure_token}"
174 | 
175 |         # Store session metadata
176 |         self._sessions[session_id] = {
177 |             "user_id": user_id,
178 |             "created_at": __import__("time").time(),
179 |         }
180 | 
181 |         logger.debug(f"Generated secure session ID for user: {user_id}")
182 |         return session_id
183 | 
184 |     def validate_session(self, session_id: str, user_id: str) -> bool:
185 |         """
186 |         Validate that a session ID belongs to the specified user.
187 | 
188 |         This prevents session hijacking attacks.
189 |         """
190 |         if not session_id or session_id not in self._sessions:
191 |             return False
192 | 
193 |         session_data = self._sessions[session_id]
194 |         return session_data.get("user_id") == user_id
195 | 
196 |     def cleanup_expired_sessions(self, max_age_seconds: int = 3600):
197 |         """Clean up expired sessions."""
198 |         current_time = __import__("time").time()
199 |         expired_sessions = [
200 |             sid
201 |             for sid, data in self._sessions.items()
202 |             if current_time - data.get("created_at", 0) > max_age_seconds
203 |         ]
204 | 
205 |         for sid in expired_sessions:
206 |             del self._sessions[sid]
207 | 
208 |         if expired_sessions:
209 |             logger.info(f"Cleaned up {len(expired_sessions)} expired sessions")
210 | 
211 | 
212 | class OAuthResourceServer:
213 |     """OAuth 2.1 Resource Server implementation for MCP."""
214 | 
215 |     def __init__(self, config: OAuthConfig):
216 |         self.config = config
217 |         self.session_manager = SecureSessionManager()
218 |         self._http_client = httpx.AsyncClient()
219 |         logger.info(
220 |             f"Initialized OAuth resource server for {config.resource_server_uri}"
221 |         )
222 | 
223 |     async def get_protected_resource_metadata(self) -> Dict:
224 |         """
225 |         Generate OAuth 2.0 Protected Resource Metadata (RFC 9728).
226 | 
227 |         Returns metadata that clients use to discover the authorization server.
228 |         """
229 |         metadata = {
230 |             "resource": self.config.resource_server_uri,
231 |             "authorization_servers": [self.config.authorization_server_url],
232 |         }
233 | 
234 |         if self.config.required_scopes:
235 |             metadata["scopes_supported"] = self.config.required_scopes
236 | 
237 |         logger.debug(f"Generated resource metadata: {metadata}")
238 |         return metadata
239 | 
240 |     async def handle_resource_metadata_request(self, request: Request) -> JSONResponse:
241 |         """Handle requests to the protected resource metadata endpoint."""
242 |         try:
243 |             metadata = await self.get_protected_resource_metadata()
244 |             return JSONResponse(
245 |                 content=metadata, headers={"Content-Type": "application/json"}
246 |             )
247 |         except Exception as e:
248 |             logger.error(f"Error serving resource metadata: {e}")
249 |             return JSONResponse(
250 |                 content={
251 |                     "error": "server_error",
252 |                     "error_description": "Failed to generate metadata",
253 |                 },
254 |                 status_code=500,
255 |             )
256 | 
257 |     def extract_bearer_token(self, request: Request) -> Optional[str]:
258 |         """Extract Bearer token from Authorization header."""
259 |         auth_header = request.headers.get("Authorization", "")
260 |         if not auth_header.startswith("Bearer "):
261 |             return None
262 |         return auth_header[7:]  # Remove "Bearer " prefix
263 | 
264 |     async def validate_jwt_token(self, token: str) -> TokenValidationResult:
265 |         """Validate JWT access token with audience validation."""
266 |         if not self.config.jwt_public_key:
267 |             return TokenValidationResult(False, error="JWT public key not configured")
268 | 
269 |         try:
270 |             # Decode and verify JWT with strict audience validation
271 |             # This prevents token passthrough attacks (MCP Security Best Practice)
272 |             claims = jwt.decode(
273 |                 token,
274 |                 self.config.jwt_public_key,
275 |                 algorithms=[self.config.jwt_algorithm],
276 |                 audience=self.config.resource_server_uri,  # Strict audience validation
277 |                 options={"verify_aud": True},  # Ensure audience is verified
278 |             )
279 | 
280 |             logger.debug(f"JWT validation successful for subject: {claims.get('sub')}")
281 |             return TokenValidationResult(True, claims)
282 | 
283 |         except jwt.ExpiredSignatureError:
284 |             logger.warning("Token validation failed: Token expired")
285 |             return TokenValidationResult(False, error="Token expired")
286 |         except jwt.InvalidAudienceError:
287 |             logger.warning(
288 |                 f"Token validation failed: Invalid audience. Expected: {self.config.resource_server_uri}"
289 |             )
290 |             return TokenValidationResult(False, error="Invalid audience")
291 |         except jwt.InvalidTokenError as e:
292 |             logger.warning(f"Token validation failed: {str(e)}")
293 |             return TokenValidationResult(False, error=f"Invalid token: {str(e)}")
294 |         except Exception as e:
295 |             logger.error(f"JWT validation error: {e}")
296 |             return TokenValidationResult(False, error="Token validation failed")
297 | 
298 |     async def validate_token_introspection(self, token: str) -> TokenValidationResult:
299 |         """Validate token using OAuth 2.0 Token Introspection (RFC 7662)."""
300 |         if not self.config.introspection_endpoint:
301 |             return TokenValidationResult(
302 |                 False, error="Introspection endpoint not configured"
303 |             )
304 | 
305 |         try:
306 |             # Prepare introspection request
307 |             auth = None
308 |             if (
309 |                 self.config.introspection_client_id
310 |                 and self.config.introspection_client_secret
311 |             ):
312 |                 auth = (
313 |                     self.config.introspection_client_id,
314 |                     self.config.introspection_client_secret,
315 |                 )
316 | 
317 |             response = await self._http_client.post(
318 |                 self.config.introspection_endpoint,
319 |                 data={"token": token},
320 |                 auth=auth,
321 |                 headers={"Content-Type": "application/x-www-form-urlencoded"},
322 |             )
323 | 
324 |             if response.status_code != 200:
325 |                 logger.warning(
326 |                     f"Introspection failed with status: {response.status_code}"
327 |                 )
328 |                 return TokenValidationResult(False, error="Introspection failed")
329 | 
330 |             introspection_result = response.json()
331 | 
332 |             # Check if token is active
333 |             if not introspection_result.get("active", False):
334 |                 return TokenValidationResult(False, error="Token inactive")
335 | 
336 |             # Validate audience (prevents token passthrough attacks)
337 |             token_audience = introspection_result.get("aud")
338 |             if token_audience:
339 |                 if isinstance(token_audience, list):
340 |                     if self.config.resource_server_uri not in token_audience:
341 |                         logger.warning(
342 |                             f"Token audience mismatch. Expected: {self.config.resource_server_uri}, Got: {token_audience}"
343 |                         )
344 |                         return TokenValidationResult(False, error="Invalid audience")
345 |                 elif token_audience != self.config.resource_server_uri:
346 |                     logger.warning(
347 |                         f"Token audience mismatch. Expected: {self.config.resource_server_uri}, Got: {token_audience}"
348 |                     )
349 |                     return TokenValidationResult(False, error="Invalid audience")
350 | 
351 |             logger.debug(
352 |                 f"Token introspection successful for subject: {introspection_result.get('sub')}"
353 |             )
354 |             return TokenValidationResult(True, introspection_result)
355 | 
356 |         except Exception as e:
357 |             logger.error(f"Token introspection error: {e}")
358 |             return TokenValidationResult(False, error="Introspection failed")
359 | 
360 |     async def validate_access_token(self, token: str) -> TokenValidationResult:
361 |         """Validate access token using configured method."""
362 |         if self.config.token_validation_method == "jwt":
363 |             result = await self.validate_jwt_token(token)
364 |         elif self.config.token_validation_method == "introspection":
365 |             result = await self.validate_token_introspection(token)
366 |         else:
367 |             return TokenValidationResult(False, error="Unknown validation method")
368 | 
369 |         # Check required scopes if token is valid
370 |         if result.valid and self.config.required_scopes:
371 |             token_scopes = result.scopes
372 |             missing_scopes = set(self.config.required_scopes) - set(token_scopes)
373 |             if missing_scopes:
374 |                 logger.warning(f"Token missing required scopes: {missing_scopes}")
375 |                 return TokenValidationResult(False, error="Insufficient scopes")
376 | 
377 |         return result
378 | 
379 |     async def authenticate_request(
380 |         self, request: Request, session_id: str = None
381 |     ) -> Tuple[bool, Optional[TokenValidationResult]]:
382 |         """
383 |         Authenticate an incoming request with session validation.
384 | 
385 |         Implements MCP security best practices:
386 |         - Verifies all inbound requests when OAuth is enabled
387 |         - Validates session binding to prevent hijacking
388 | 
389 |         Returns:
390 |             Tuple of (is_authenticated, validation_result)
391 |         """
392 |         token = self.extract_bearer_token(request)
393 |         if not token:
394 |             logger.debug("No Bearer token found in request")
395 |             return False, None
396 | 
397 |         result = await self.validate_access_token(token)
398 |         if not result.valid:
399 |             logger.warning(f"Token validation failed: {result.error}")
400 |             return False, result
401 | 
402 |         # Additional session validation if session binding is enabled
403 |         if self.config.session_binding_enabled and session_id and result.user_id:
404 |             if not self.session_manager.validate_session(session_id, result.user_id):
405 |                 logger.warning(f"Session validation failed for user: {result.user_id}")
406 |                 return False, TokenValidationResult(False, error="Invalid session")
407 | 
408 |         logger.debug(f"Request authenticated for subject: {result.subject}")
409 |         return True, result
410 | 
411 |     def create_www_authenticate_header(self) -> str:
412 |         """Create WWW-Authenticate header for 401 responses."""
413 |         metadata_url = urljoin(
414 |             self.config.resource_server_uri, self.config.resource_metadata_path
415 |         )
416 |         return f'Bearer resource="{metadata_url}"'
417 | 
418 |     async def create_unauthorized_response(
419 |         self, error: str = "invalid_token", description: str = None
420 |     ) -> Response:
421 |         """Create a 401 Unauthorized response with proper headers."""
422 |         www_auth = self.create_www_authenticate_header()
423 | 
424 |         error_response = {"error": error}
425 |         if description:
426 |             error_response["error_description"] = description
427 | 
428 |         return JSONResponse(
429 |             content=error_response,
430 |             status_code=401,
431 |             headers={"WWW-Authenticate": www_auth},
432 |         )
433 | 
434 |     async def create_forbidden_response(
435 |         self, error: str = "insufficient_scope", description: str = None
436 |     ) -> Response:
437 |         """Create a 403 Forbidden response."""
438 |         error_response = {"error": error}
439 |         if description:
440 |             error_response["error_description"] = description
441 | 
442 |         return JSONResponse(content=error_response, status_code=403)
443 | 
444 |     def generate_secure_session(self, user_id: str) -> str:
445 |         """Generate a secure session ID for a user."""
446 |         return self.session_manager.generate_session_id(user_id)
447 | 
448 |     async def cleanup(self):
449 |         """Cleanup resources."""
450 |         await self._http_client.aclose()
451 |         self.session_manager.cleanup_expired_sessions()
452 | 
453 | 
454 | def create_oauth_config_from_env() -> Optional[OAuthConfig]:
455 |     """Create OAuth configuration from environment variables."""
456 |     import os
457 | 
458 |     auth_server_url = os.getenv("OAUTH_AUTHORIZATION_SERVER_URL")
459 |     resource_uri = os.getenv("OAUTH_RESOURCE_SERVER_URI")
460 | 
461 |     if not auth_server_url or not resource_uri:
462 |         logger.info("OAuth environment variables not found, OAuth disabled")
463 |         return None
464 | 
465 |     return OAuthConfig(
466 |         authorization_server_url=auth_server_url,
467 |         resource_server_uri=resource_uri,
468 |         token_validation_method=os.getenv("OAUTH_TOKEN_VALIDATION_METHOD", "jwt"),
469 |         jwt_public_key=os.getenv("OAUTH_JWT_PUBLIC_KEY"),
470 |         jwt_algorithm=os.getenv("OAUTH_JWT_ALGORITHM", "RS256"),
471 |         introspection_endpoint=os.getenv("OAUTH_INTROSPECTION_ENDPOINT"),
472 |         introspection_client_id=os.getenv("OAUTH_INTROSPECTION_CLIENT_ID"),
473 |         introspection_client_secret=os.getenv("OAUTH_INTROSPECTION_CLIENT_SECRET"),
474 |         required_scopes=os.getenv("OAUTH_REQUIRED_SCOPES", "").split()
475 |         if os.getenv("OAUTH_REQUIRED_SCOPES")
476 |         else [],
477 |         session_binding_enabled=os.getenv(
478 |             "OAUTH_SESSION_BINDING_ENABLED", "true"
479 |         ).lower()
480 |         == "true",
481 |     )
482 | 
```
Page 1/4FirstPrevNextLast