#
tokens: 31521/50000 6/35 files (page 2/2)
lines: on (toggle) GitHub
raw markdown copy reset
This is page 2 of 2. Use http://codebase.md/pab1it0/prometheus-mcp-server?lines=true&page={x} to view the full context.

# Directory Structure

```
├── .dockerignore
├── .env.template
├── .github
│   ├── dependabot.yml
│   ├── ISSUE_TEMPLATE
│   │   ├── bug_report.yml
│   │   ├── config.yml
│   │   ├── feature_request.yml
│   │   └── question.yml
│   ├── TRIAGE_AUTOMATION.md
│   ├── VALIDATION_SUMMARY.md
│   └── workflows
│       ├── bug-triage.yml
│       ├── ci.yml
│       ├── claude.yml
│       ├── issue-management.yml
│       ├── label-management.yml
│       ├── security.yml
│       ├── sync-version.yml
│       └── triage-metrics.yml
├── .gitignore
├── CONTRIBUTING.md
├── Dockerfile
├── LICENSE
├── pyproject.toml
├── README.md
├── server.json
├── src
│   └── prometheus_mcp_server
│       ├── __init__.py
│       ├── logging_config.py
│       ├── main.py
│       └── server.py
├── tests
│   ├── test_docker_integration.py
│   ├── test_logging_config.py
│   ├── test_main.py
│   ├── test_mcp_2025_direct.py
│   ├── test_mcp_2025_features.py
│   ├── test_mcp_protocol_compliance.py
│   ├── test_server.py
│   └── test_tools.py
└── uv.lock
```

# Files

--------------------------------------------------------------------------------
/src/prometheus_mcp_server/server.py:
--------------------------------------------------------------------------------

```python
  1 | #!/usr/bin/env python
  2 | 
  3 | import os
  4 | import json
  5 | from typing import Any, Dict, List, Optional, Union
  6 | from dataclasses import dataclass
  7 | import time
  8 | from datetime import datetime, timedelta
  9 | from enum import Enum
 10 | 
 11 | import dotenv
 12 | import requests
 13 | from fastmcp import FastMCP, Context
 14 | from prometheus_mcp_server.logging_config import get_logger
 15 | 
 16 | dotenv.load_dotenv()
 17 | mcp = FastMCP("Prometheus MCP")
 18 | 
 19 | # Cache for metrics list to improve completion performance
 20 | _metrics_cache = {"data": None, "timestamp": 0}
 21 | _CACHE_TTL = 300  # 5 minutes
 22 | 
 23 | # Get logger instance
 24 | logger = get_logger()
 25 | 
 26 | # Health check tool for Docker containers and monitoring
 27 | @mcp.tool(
 28 |     description="Health check endpoint for container monitoring and status verification",
 29 |     annotations={
 30 |         "title": "Health Check",
 31 |         "icon": "❤️",
 32 |         "readOnlyHint": True,
 33 |         "destructiveHint": False,
 34 |         "idempotentHint": True,
 35 |         "openWorldHint": True
 36 |     }
 37 | )
 38 | async def health_check() -> Dict[str, Any]:
 39 |     """Return health status of the MCP server and Prometheus connection.
 40 | 
 41 |     Returns:
 42 |         Health status including service information, configuration, and connectivity
 43 |     """
 44 |     try:
 45 |         health_status = {
 46 |             "status": "healthy",
 47 |             "service": "prometheus-mcp-server",
 48 |             "version": "1.5.0",
 49 |             "timestamp": datetime.utcnow().isoformat(),
 50 |             "transport": config.mcp_server_config.mcp_server_transport if config.mcp_server_config else "stdio",
 51 |             "configuration": {
 52 |                 "prometheus_url_configured": bool(config.url),
 53 |                 "authentication_configured": bool(config.username or config.token),
 54 |                 "org_id_configured": bool(config.org_id)
 55 |             }
 56 |         }
 57 |         
 58 |         # Test Prometheus connectivity if configured
 59 |         if config.url:
 60 |             try:
 61 |                 # Quick connectivity test
 62 |                 make_prometheus_request("query", params={"query": "up", "time": str(int(time.time()))})
 63 |                 health_status["prometheus_connectivity"] = "healthy"
 64 |                 health_status["prometheus_url"] = config.url
 65 |             except Exception as e:
 66 |                 health_status["prometheus_connectivity"] = "unhealthy"
 67 |                 health_status["prometheus_error"] = str(e)
 68 |                 health_status["status"] = "degraded"
 69 |         else:
 70 |             health_status["status"] = "unhealthy"
 71 |             health_status["error"] = "PROMETHEUS_URL not configured"
 72 |         
 73 |         logger.info("Health check completed", status=health_status["status"])
 74 |         return health_status
 75 |         
 76 |     except Exception as e:
 77 |         logger.error("Health check failed", error=str(e))
 78 |         return {
 79 |             "status": "unhealthy",
 80 |             "service": "prometheus-mcp-server",
 81 |             "error": str(e),
 82 |             "timestamp": datetime.utcnow().isoformat()
 83 |         }
 84 | 
 85 | 
 86 | class TransportType(str, Enum):
 87 |     """Supported MCP server transport types."""
 88 | 
 89 |     STDIO = "stdio"
 90 |     HTTP = "http"
 91 |     SSE = "sse"
 92 | 
 93 |     @classmethod
 94 |     def values(cls) -> list[str]:
 95 |         """Get all valid transport values."""
 96 |         return [transport.value for transport in cls]
 97 | 
 98 | @dataclass
 99 | class MCPServerConfig:
100 |     """Global Configuration for MCP."""
101 |     mcp_server_transport: TransportType = None
102 |     mcp_bind_host: str = None
103 |     mcp_bind_port: int = None
104 | 
105 |     def __post_init__(self):
106 |         """Validate mcp configuration."""
107 |         if not self.mcp_server_transport:
108 |             raise ValueError("MCP SERVER TRANSPORT is required")
109 |         if not self.mcp_bind_host:
110 |             raise ValueError(f"MCP BIND HOST is required")
111 |         if not self.mcp_bind_port:
112 |             raise ValueError(f"MCP BIND PORT is required")
113 | 
114 | @dataclass
115 | class PrometheusConfig:
116 |     url: str
117 |     url_ssl_verify: bool = True
118 |     disable_prometheus_links: bool = False
119 |     # Optional credentials
120 |     username: Optional[str] = None
121 |     password: Optional[str] = None
122 |     token: Optional[str] = None
123 |     # Optional Org ID for multi-tenant setups
124 |     org_id: Optional[str] = None
125 |     # Optional Custom MCP Server Configuration
126 |     mcp_server_config: Optional[MCPServerConfig] = None
127 |     # Optional custom headers for Prometheus requests
128 |     custom_headers: Optional[Dict[str, str]] = None
129 | 
130 | config = PrometheusConfig(
131 |     url=os.environ.get("PROMETHEUS_URL", ""),
132 |     url_ssl_verify=os.environ.get("PROMETHEUS_URL_SSL_VERIFY", "True").lower() in ("true", "1", "yes"),
133 |     disable_prometheus_links=os.environ.get("PROMETHEUS_DISABLE_LINKS", "False").lower() in ("true", "1", "yes"),
134 |     username=os.environ.get("PROMETHEUS_USERNAME", ""),
135 |     password=os.environ.get("PROMETHEUS_PASSWORD", ""),
136 |     token=os.environ.get("PROMETHEUS_TOKEN", ""),
137 |     org_id=os.environ.get("ORG_ID", ""),
138 |     mcp_server_config=MCPServerConfig(
139 |         mcp_server_transport=os.environ.get("PROMETHEUS_MCP_SERVER_TRANSPORT", "stdio").lower(),
140 |         mcp_bind_host=os.environ.get("PROMETHEUS_MCP_BIND_HOST", "127.0.0.1"),
141 |         mcp_bind_port=int(os.environ.get("PROMETHEUS_MCP_BIND_PORT", "8080"))
142 |     ),
143 |     custom_headers=json.loads(os.environ.get("PROMETHEUS_CUSTOM_HEADERS")) if os.environ.get("PROMETHEUS_CUSTOM_HEADERS") else None,
144 | )
145 | 
146 | def get_prometheus_auth():
147 |     """Get authentication for Prometheus based on provided credentials."""
148 |     if config.token:
149 |         return {"Authorization": f"Bearer {config.token}"}
150 |     elif config.username and config.password:
151 |         return requests.auth.HTTPBasicAuth(config.username, config.password)
152 |     return None
153 | 
154 | def make_prometheus_request(endpoint, params=None):
155 |     """Make a request to the Prometheus API with proper authentication and headers."""
156 |     if not config.url:
157 |         logger.error("Prometheus configuration missing", error="PROMETHEUS_URL not set")
158 |         raise ValueError("Prometheus configuration is missing. Please set PROMETHEUS_URL environment variable.")
159 |     if not config.url_ssl_verify:
160 |         logger.warning("SSL certificate verification is disabled. This is insecure and should not be used in production environments.", endpoint=endpoint)
161 | 
162 |     url = f"{config.url.rstrip('/')}/api/v1/{endpoint}"
163 |     url_ssl_verify = config.url_ssl_verify
164 |     auth = get_prometheus_auth()
165 |     headers = {}
166 | 
167 |     if isinstance(auth, dict):  # Token auth is passed via headers
168 |         headers.update(auth)
169 |         auth = None  # Clear auth for requests.get if it's already in headers
170 |     
171 |     # Add OrgID header if specified
172 |     if config.org_id:
173 |         headers["X-Scope-OrgID"] = config.org_id
174 | 
175 |     if config.custom_headers:
176 |         headers.update(config.custom_headers)
177 | 
178 |     try:
179 |         logger.debug("Making Prometheus API request", endpoint=endpoint, url=url, params=params, headers=headers)
180 | 
181 |         # Make the request with appropriate headers and auth
182 |         response = requests.get(url, params=params, auth=auth, headers=headers, verify=url_ssl_verify)
183 |         
184 |         response.raise_for_status()
185 |         result = response.json()
186 |         
187 |         if result["status"] != "success":
188 |             error_msg = result.get('error', 'Unknown error')
189 |             logger.error("Prometheus API returned error", endpoint=endpoint, error=error_msg, status=result["status"])
190 |             raise ValueError(f"Prometheus API error: {error_msg}")
191 |         
192 |         data_field = result.get("data", {})
193 |         if isinstance(data_field, dict):
194 |             result_type = data_field.get("resultType")
195 |         else:
196 |             result_type = "list"
197 |         logger.debug("Prometheus API request successful", endpoint=endpoint, result_type=result_type)
198 |         return result["data"]
199 |     
200 |     except requests.exceptions.RequestException as e:
201 |         logger.error("HTTP request to Prometheus failed", endpoint=endpoint, url=url, error=str(e), error_type=type(e).__name__)
202 |         raise
203 |     except json.JSONDecodeError as e:
204 |         logger.error("Failed to parse Prometheus response as JSON", endpoint=endpoint, url=url, error=str(e))
205 |         raise ValueError(f"Invalid JSON response from Prometheus: {str(e)}")
206 |     except Exception as e:
207 |         logger.error("Unexpected error during Prometheus request", endpoint=endpoint, url=url, error=str(e), error_type=type(e).__name__)
208 |         raise
209 | 
210 | def get_cached_metrics() -> List[str]:
211 |     """Get metrics list with caching to improve completion performance.
212 | 
213 |     This helper function is available for future completion support when
214 |     FastMCP implements the completion capability. For now, it can be used
215 |     internally to optimize repeated metric list requests.
216 |     """
217 |     current_time = time.time()
218 | 
219 |     # Check if cache is valid
220 |     if _metrics_cache["data"] is not None and (current_time - _metrics_cache["timestamp"]) < _CACHE_TTL:
221 |         logger.debug("Using cached metrics list", cache_age=current_time - _metrics_cache["timestamp"])
222 |         return _metrics_cache["data"]
223 | 
224 |     # Fetch fresh metrics
225 |     try:
226 |         data = make_prometheus_request("label/__name__/values")
227 |         _metrics_cache["data"] = data
228 |         _metrics_cache["timestamp"] = current_time
229 |         logger.debug("Refreshed metrics cache", metric_count=len(data))
230 |         return data
231 |     except Exception as e:
232 |         logger.error("Failed to fetch metrics for cache", error=str(e))
233 |         # Return cached data if available, even if expired
234 |         return _metrics_cache["data"] if _metrics_cache["data"] is not None else []
235 | 
236 | # Note: Argument completions will be added when FastMCP supports the completion
237 | # capability. The get_cached_metrics() function above is ready for that integration.
238 | 
239 | @mcp.tool(
240 |     description="Execute a PromQL instant query against Prometheus",
241 |     annotations={
242 |         "title": "Execute PromQL Query",
243 |         "icon": "📊",
244 |         "readOnlyHint": True,
245 |         "destructiveHint": False,
246 |         "idempotentHint": True,
247 |         "openWorldHint": True
248 |     }
249 | )
250 | async def execute_query(query: str, time: Optional[str] = None) -> Dict[str, Any]:
251 |     """Execute an instant query against Prometheus.
252 | 
253 |     Args:
254 |         query: PromQL query string
255 |         time: Optional RFC3339 or Unix timestamp (default: current time)
256 | 
257 |     Returns:
258 |         Query result with type (vector, matrix, scalar, string) and values
259 |     """
260 |     params = {"query": query}
261 |     if time:
262 |         params["time"] = time
263 |     
264 |     logger.info("Executing instant query", query=query, time=time)
265 |     data = make_prometheus_request("query", params=params)
266 | 
267 |     result = {
268 |         "resultType": data["resultType"],
269 |         "result": data["result"]
270 |     }
271 | 
272 |     if not config.disable_prometheus_links:
273 |         from urllib.parse import urlencode
274 |         ui_params = {"g0.expr": query, "g0.tab": "0"}
275 |         if time:
276 |             ui_params["g0.moment_input"] = time
277 |         prometheus_ui_link = f"{config.url.rstrip('/')}/graph?{urlencode(ui_params)}"
278 |         result["links"] = [{
279 |             "href": prometheus_ui_link,
280 |             "rel": "prometheus-ui",
281 |             "title": "View in Prometheus UI"
282 |         }]
283 | 
284 |     logger.info("Instant query completed",
285 |                 query=query,
286 |                 result_type=data["resultType"],
287 |                 result_count=len(data["result"]) if isinstance(data["result"], list) else 1)
288 | 
289 |     return result
290 | 
291 | @mcp.tool(
292 |     description="Execute a PromQL range query with start time, end time, and step interval",
293 |     annotations={
294 |         "title": "Execute PromQL Range Query",
295 |         "icon": "📈",
296 |         "readOnlyHint": True,
297 |         "destructiveHint": False,
298 |         "idempotentHint": True,
299 |         "openWorldHint": True
300 |     }
301 | )
302 | async def execute_range_query(query: str, start: str, end: str, step: str, ctx: Context | None = None) -> Dict[str, Any]:
303 |     """Execute a range query against Prometheus.
304 | 
305 |     Args:
306 |         query: PromQL query string
307 |         start: Start time as RFC3339 or Unix timestamp
308 |         end: End time as RFC3339 or Unix timestamp
309 |         step: Query resolution step width (e.g., '15s', '1m', '1h')
310 | 
311 |     Returns:
312 |         Range query result with type (usually matrix) and values over time
313 |     """
314 |     params = {
315 |         "query": query,
316 |         "start": start,
317 |         "end": end,
318 |         "step": step
319 |     }
320 | 
321 |     logger.info("Executing range query", query=query, start=start, end=end, step=step)
322 | 
323 |     # Report progress if context available
324 |     if ctx:
325 |         await ctx.report_progress(progress=0, total=100, message="Initiating range query...")
326 | 
327 |     data = make_prometheus_request("query_range", params=params)
328 | 
329 |     # Report progress
330 |     if ctx:
331 |         await ctx.report_progress(progress=50, total=100, message="Processing query results...")
332 | 
333 |     result = {
334 |         "resultType": data["resultType"],
335 |         "result": data["result"]
336 |     }
337 | 
338 |     if not config.disable_prometheus_links:
339 |         from urllib.parse import urlencode
340 |         ui_params = {
341 |             "g0.expr": query,
342 |             "g0.tab": "0",
343 |             "g0.range_input": f"{start} to {end}",
344 |             "g0.step_input": step
345 |         }
346 |         prometheus_ui_link = f"{config.url.rstrip('/')}/graph?{urlencode(ui_params)}"
347 |         result["links"] = [{
348 |             "href": prometheus_ui_link,
349 |             "rel": "prometheus-ui",
350 |             "title": "View in Prometheus UI"
351 |         }]
352 | 
353 |     # Report completion
354 |     if ctx:
355 |         await ctx.report_progress(progress=100, total=100, message="Range query completed")
356 | 
357 |     logger.info("Range query completed",
358 |                 query=query,
359 |                 result_type=data["resultType"],
360 |                 result_count=len(data["result"]) if isinstance(data["result"], list) else 1)
361 | 
362 |     return result
363 | 
364 | @mcp.tool(
365 |     description="List all available metrics in Prometheus with optional pagination support",
366 |     annotations={
367 |         "title": "List Available Metrics",
368 |         "icon": "📋",
369 |         "readOnlyHint": True,
370 |         "destructiveHint": False,
371 |         "idempotentHint": True,
372 |         "openWorldHint": True
373 |     }
374 | )
375 | async def list_metrics(
376 |     limit: Optional[int] = None,
377 |     offset: int = 0,
378 |     filter_pattern: Optional[str] = None,
379 |     ctx: Context | None = None
380 | ) -> Dict[str, Any]:
381 |     """Retrieve a list of all metric names available in Prometheus.
382 | 
383 |     Args:
384 |         limit: Maximum number of metrics to return (default: all metrics)
385 |         offset: Number of metrics to skip for pagination (default: 0)
386 |         filter_pattern: Optional substring to filter metric names (case-insensitive)
387 | 
388 |     Returns:
389 |         Dictionary containing:
390 |         - metrics: List of metric names
391 |         - total_count: Total number of metrics (before pagination)
392 |         - returned_count: Number of metrics returned
393 |         - offset: Current offset
394 |         - has_more: Whether more metrics are available
395 |     """
396 |     logger.info("Listing available metrics", limit=limit, offset=offset, filter_pattern=filter_pattern)
397 | 
398 |     # Report progress if context available
399 |     if ctx:
400 |         await ctx.report_progress(progress=0, total=100, message="Fetching metrics list...")
401 | 
402 |     data = make_prometheus_request("label/__name__/values")
403 | 
404 |     if ctx:
405 |         await ctx.report_progress(progress=50, total=100, message=f"Processing {len(data)} metrics...")
406 | 
407 |     # Apply filter if provided
408 |     if filter_pattern:
409 |         filtered_data = [m for m in data if filter_pattern.lower() in m.lower()]
410 |         logger.debug("Applied filter", original_count=len(data), filtered_count=len(filtered_data), pattern=filter_pattern)
411 |         data = filtered_data
412 | 
413 |     total_count = len(data)
414 | 
415 |     # Apply pagination
416 |     start_idx = offset
417 |     end_idx = offset + limit if limit is not None else len(data)
418 |     paginated_data = data[start_idx:end_idx]
419 | 
420 |     result = {
421 |         "metrics": paginated_data,
422 |         "total_count": total_count,
423 |         "returned_count": len(paginated_data),
424 |         "offset": offset,
425 |         "has_more": end_idx < total_count
426 |     }
427 | 
428 |     if ctx:
429 |         await ctx.report_progress(progress=100, total=100, message=f"Retrieved {len(paginated_data)} of {total_count} metrics")
430 | 
431 |     logger.info("Metrics list retrieved",
432 |                 total_count=total_count,
433 |                 returned_count=len(paginated_data),
434 |                 offset=offset,
435 |                 has_more=result["has_more"])
436 | 
437 |     return result
438 | 
439 | @mcp.tool(
440 |     description="Get metadata for a specific metric",
441 |     annotations={
442 |         "title": "Get Metric Metadata",
443 |         "icon": "ℹ️",
444 |         "readOnlyHint": True,
445 |         "destructiveHint": False,
446 |         "idempotentHint": True,
447 |         "openWorldHint": True
448 |     }
449 | )
450 | async def get_metric_metadata(metric: str) -> List[Dict[str, Any]]:
451 |     """Get metadata about a specific metric.
452 | 
453 |     Args:
454 |         metric: The name of the metric to retrieve metadata for
455 | 
456 |     Returns:
457 |         List of metadata entries for the metric
458 |     """
459 |     logger.info("Retrieving metric metadata", metric=metric)
460 |     endpoint = f"metadata?metric={metric}"
461 |     data = make_prometheus_request(endpoint, params=None)
462 |     if "metadata" in data:
463 |         metadata = data["metadata"]
464 |     elif "data" in data:
465 |         metadata = data["data"]
466 |     else:
467 |         metadata = data
468 |     if isinstance(metadata, dict):
469 |         metadata = [metadata]
470 |     logger.info("Metric metadata retrieved", metric=metric, metadata_count=len(metadata))
471 |     return metadata
472 | 
473 | @mcp.tool(
474 |     description="Get information about all scrape targets",
475 |     annotations={
476 |         "title": "Get Scrape Targets",
477 |         "icon": "🎯",
478 |         "readOnlyHint": True,
479 |         "destructiveHint": False,
480 |         "idempotentHint": True,
481 |         "openWorldHint": True
482 |     }
483 | )
484 | async def get_targets() -> Dict[str, List[Dict[str, Any]]]:
485 |     """Get information about all Prometheus scrape targets.
486 | 
487 |     Returns:
488 |         Dictionary with active and dropped targets information
489 |     """
490 |     logger.info("Retrieving scrape targets information")
491 |     data = make_prometheus_request("targets")
492 |     
493 |     result = {
494 |         "activeTargets": data["activeTargets"],
495 |         "droppedTargets": data["droppedTargets"]
496 |     }
497 |     
498 |     logger.info("Scrape targets retrieved", 
499 |                 active_targets=len(data["activeTargets"]), 
500 |                 dropped_targets=len(data["droppedTargets"]))
501 |     
502 |     return result
503 | 
504 | if __name__ == "__main__":
505 |     logger.info("Starting Prometheus MCP Server", mode="direct")
506 |     mcp.run()
507 | 
```

--------------------------------------------------------------------------------
/tests/test_mcp_2025_direct.py:
--------------------------------------------------------------------------------

```python
  1 | """Direct function tests for MCP 2025 features to improve diff coverage.
  2 | 
  3 | This module tests features by calling functions directly rather than through
  4 | the MCP client, allowing us to test code paths that require direct context
  5 | passing (like progress notifications with ctx parameter).
  6 | """
  7 | 
  8 | import pytest
  9 | from unittest.mock import patch, MagicMock, AsyncMock
 10 | from datetime import datetime
 11 | from prometheus_mcp_server.server import (
 12 |     execute_query,
 13 |     execute_range_query,
 14 |     list_metrics,
 15 |     get_metric_metadata,
 16 |     get_targets,
 17 |     health_check,
 18 |     config
 19 | )
 20 | 
 21 | 
 22 | @pytest.fixture
 23 | def mock_make_request():
 24 |     """Mock the make_prometheus_request function."""
 25 |     with patch("prometheus_mcp_server.server.make_prometheus_request") as mock:
 26 |         yield mock
 27 | 
 28 | 
 29 | class TestDirectFunctionCalls:
 30 |     """Test functions called directly to cover context-dependent code paths."""
 31 | 
 32 |     @pytest.mark.asyncio
 33 |     async def test_execute_query_direct_call(self, mock_make_request):
 34 |         """Test execute_query by calling it directly."""
 35 |         mock_make_request.return_value = {
 36 |             "resultType": "vector",
 37 |             "result": [{"metric": {"__name__": "up"}, "value": [1617898448.214, "1"]}]
 38 |         }
 39 | 
 40 |         # Access the underlying function from FunctionTool
 41 |         result = await execute_query.fn(query="up", time="2023-01-01T00:00:00Z")
 42 | 
 43 |         assert "resultType" in result
 44 |         assert "result" in result
 45 |         assert "links" in result
 46 |         assert result["links"][0]["rel"] == "prometheus-ui"
 47 |         assert "up" in result["links"][0]["href"]
 48 | 
 49 |     @pytest.mark.asyncio
 50 |     async def test_execute_range_query_with_context(self, mock_make_request):
 51 |         """Test execute_range_query with context for progress reporting."""
 52 |         mock_make_request.return_value = {
 53 |             "resultType": "matrix",
 54 |             "result": [{"metric": {"__name__": "up"}, "values": [[1617898400, "1"]]}]
 55 |         }
 56 | 
 57 |         # Create mock context
 58 |         mock_ctx = AsyncMock()
 59 |         mock_ctx.report_progress = AsyncMock()
 60 | 
 61 |         result = await execute_range_query.fn(
 62 |             query="up",
 63 |             start="2023-01-01T00:00:00Z",
 64 |             end="2023-01-01T01:00:00Z",
 65 |             step="15s",
 66 |             ctx=mock_ctx
 67 |         )
 68 | 
 69 |         # Verify progress was reported
 70 |         assert mock_ctx.report_progress.call_count >= 3
 71 |         calls = mock_ctx.report_progress.call_args_list
 72 | 
 73 |         # Check initial progress
 74 |         assert calls[0].kwargs["progress"] == 0
 75 |         assert calls[0].kwargs["total"] == 100
 76 |         assert "Initiating" in calls[0].kwargs["message"]
 77 | 
 78 |         # Check completion progress
 79 |         assert calls[-1].kwargs["progress"] == 100
 80 |         assert calls[-1].kwargs["total"] == 100
 81 |         assert "completed" in calls[-1].kwargs["message"]
 82 | 
 83 |         # Verify result includes links
 84 |         assert "links" in result
 85 |         assert result["links"][0]["rel"] == "prometheus-ui"
 86 | 
 87 |     @pytest.mark.asyncio
 88 |     async def test_execute_range_query_without_context(self, mock_make_request):
 89 |         """Test execute_range_query without context (backward compatibility)."""
 90 |         mock_make_request.return_value = {
 91 |             "resultType": "matrix",
 92 |             "result": []
 93 |         }
 94 | 
 95 |         # Call without context - should not error
 96 |         result = await execute_range_query.fn(
 97 |             query="up",
 98 |             start="2023-01-01T00:00:00Z",
 99 |             end="2023-01-01T01:00:00Z",
100 |             step="15s",
101 |             ctx=None
102 |         )
103 | 
104 |         assert "resultType" in result
105 |         assert "links" in result
106 | 
107 |     @pytest.mark.asyncio
108 |     async def test_list_metrics_with_context(self, mock_make_request):
109 |         """Test list_metrics with context for progress reporting."""
110 |         mock_make_request.return_value = ["metric1", "metric2", "metric3"]
111 | 
112 |         # Create mock context
113 |         mock_ctx = AsyncMock()
114 |         mock_ctx.report_progress = AsyncMock()
115 | 
116 |         result = await list_metrics.fn(ctx=mock_ctx)
117 | 
118 |         # Verify progress was reported (now expects 3 calls: start, processing, completion)
119 |         assert mock_ctx.report_progress.call_count >= 2
120 |         calls = mock_ctx.report_progress.call_args_list
121 | 
122 |         # Check initial progress
123 |         assert calls[0].kwargs["progress"] == 0
124 |         assert calls[0].kwargs["total"] == 100
125 |         assert "Fetching" in calls[0].kwargs["message"]
126 | 
127 |         # Check completion progress with count
128 |         assert calls[-1].kwargs["progress"] == 100
129 |         assert calls[-1].kwargs["total"] == 100
130 |         assert "3" in calls[-1].kwargs["message"]
131 | 
132 |         # Verify result - now returns a dict with pagination info
133 |         assert isinstance(result, dict)
134 |         assert result["total_count"] == 3
135 |         assert result["returned_count"] == 3
136 |         assert "metric1" in result["metrics"]
137 | 
138 |     @pytest.mark.asyncio
139 |     async def test_list_metrics_without_context(self, mock_make_request):
140 |         """Test list_metrics without context (backward compatibility)."""
141 |         mock_make_request.return_value = ["metric1", "metric2"]
142 | 
143 |         result = await list_metrics.fn(ctx=None)
144 | 
145 |         # Now returns a dict with pagination info
146 |         assert isinstance(result, dict)
147 |         assert result["total_count"] == 2
148 |         assert result["returned_count"] == 2
149 |         assert "metric1" in result["metrics"]
150 | 
151 |     @pytest.mark.asyncio
152 |     async def test_get_metric_metadata_direct_call(self, mock_make_request):
153 |         """Test get_metric_metadata by calling it directly."""
154 |         # Test when data is in "metadata" key
155 |         mock_make_request.return_value = {
156 |             "metadata": [
157 |                 {"metric": "up", "type": "gauge", "help": "Up status", "unit": ""}
158 |             ]
159 |         }
160 | 
161 |         result = await get_metric_metadata.fn(metric="up")
162 | 
163 |         assert len(result) == 1
164 |         assert result[0]["metric"] == "up"
165 |         assert result[0]["type"] == "gauge"
166 | 
167 |     @pytest.mark.asyncio
168 |     async def test_get_metric_metadata_data_key(self, mock_make_request):
169 |         """Test get_metric_metadata when data is in 'data' key instead of 'metadata'."""
170 |         # Test when data is in "data" key (fallback path)
171 |         mock_make_request.return_value = {
172 |             "data": [
173 |                 {"metric": "http_requests", "type": "counter", "help": "HTTP requests", "unit": ""}
174 |             ]
175 |         }
176 | 
177 |         result = await get_metric_metadata.fn(metric="http_requests")
178 | 
179 |         assert len(result) == 1
180 |         assert result[0]["metric"] == "http_requests"
181 |         assert result[0]["type"] == "counter"
182 | 
183 |     @pytest.mark.asyncio
184 |     async def test_get_metric_metadata_fallback_to_raw_data(self, mock_make_request):
185 |         """Test get_metric_metadata when neither 'metadata' nor 'data' keys exist."""
186 |         # Test when data is returned directly (neither "metadata" nor "data" keys exist)
187 |         mock_make_request.return_value = [
188 |             {"metric": "cpu_usage", "type": "gauge", "help": "CPU usage", "unit": "percent"}
189 |         ]
190 | 
191 |         result = await get_metric_metadata.fn(metric="cpu_usage")
192 | 
193 |         assert len(result) == 1
194 |         assert result[0]["metric"] == "cpu_usage"
195 |         assert result[0]["type"] == "gauge"
196 | 
197 |     @pytest.mark.asyncio
198 |     async def test_get_metric_metadata_dict_to_list_conversion(self, mock_make_request):
199 |         """Test get_metric_metadata when metadata is a dict and needs conversion to list."""
200 |         # Test when metadata is a single dict that needs to be converted to a list
201 |         mock_make_request.return_value = {
202 |             "metadata": {"metric": "memory_usage", "type": "gauge", "help": "Memory usage", "unit": "bytes"}
203 |         }
204 | 
205 |         result = await get_metric_metadata.fn(metric="memory_usage")
206 | 
207 |         assert isinstance(result, list)
208 |         assert len(result) == 1
209 |         assert result[0]["metric"] == "memory_usage"
210 |         assert result[0]["type"] == "gauge"
211 | 
212 |     @pytest.mark.asyncio
213 |     async def test_get_metric_metadata_data_key_dict_to_list(self, mock_make_request):
214 |         """Test get_metric_metadata when data is in 'data' key as a dict."""
215 |         # Test when data is in "data" key as a dict that needs conversion
216 |         mock_make_request.return_value = {
217 |             "data": {"metric": "disk_usage", "type": "gauge", "help": "Disk usage", "unit": "bytes"}
218 |         }
219 | 
220 |         result = await get_metric_metadata.fn(metric="disk_usage")
221 | 
222 |         assert isinstance(result, list)
223 |         assert len(result) == 1
224 |         assert result[0]["metric"] == "disk_usage"
225 |         assert result[0]["type"] == "gauge"
226 | 
227 |     @pytest.mark.asyncio
228 |     async def test_get_metric_metadata_raw_dict_to_list(self, mock_make_request):
229 |         """Test get_metric_metadata when raw data is a dict (fallback path with dict)."""
230 |         # Test when data is returned directly as a dict (neither "metadata" nor "data" keys)
231 |         mock_make_request.return_value = {
232 |             "metric": "network_bytes", "type": "counter", "help": "Network bytes", "unit": "bytes"
233 |         }
234 | 
235 |         result = await get_metric_metadata.fn(metric="network_bytes")
236 | 
237 |         assert isinstance(result, list)
238 |         assert len(result) == 1
239 |         assert result[0]["metric"] == "network_bytes"
240 |         assert result[0]["type"] == "counter"
241 | 
242 |     @pytest.mark.asyncio
243 |     async def test_get_targets_direct_call(self, mock_make_request):
244 |         """Test get_targets by calling it directly."""
245 |         mock_make_request.return_value = {
246 |             "activeTargets": [
247 |                 {
248 |                     "discoveredLabels": {"__address__": "localhost:9090"},
249 |                     "labels": {"job": "prometheus"},
250 |                     "health": "up"
251 |                 }
252 |             ],
253 |             "droppedTargets": [
254 |                 {
255 |                     "discoveredLabels": {"__address__": "localhost:9091"}
256 |                 }
257 |             ]
258 |         }
259 | 
260 |         result = await get_targets.fn()
261 | 
262 |         assert "activeTargets" in result
263 |         assert "droppedTargets" in result
264 |         assert len(result["activeTargets"]) == 1
265 |         assert result["activeTargets"][0]["health"] == "up"
266 |         assert len(result["droppedTargets"]) == 1
267 | 
268 | 
269 | class TestHealthCheckFunction:
270 |     """Test health_check function directly to improve coverage."""
271 | 
272 |     @pytest.mark.asyncio
273 |     async def test_health_check_healthy_with_prometheus(self, mock_make_request):
274 |         """Test health_check when Prometheus is accessible."""
275 |         mock_make_request.return_value = {
276 |             "resultType": "vector",
277 |             "result": []
278 |         }
279 | 
280 |         with patch("prometheus_mcp_server.server.config") as mock_config:
281 |             mock_config.url = "http://prometheus:9090"
282 |             mock_config.username = "admin"
283 |             mock_config.password = "secret"
284 |             mock_config.org_id = None
285 |             mock_config.mcp_server_config = MagicMock()
286 |             mock_config.mcp_server_config.mcp_server_transport = "stdio"
287 | 
288 |             result = await health_check.fn()
289 | 
290 |             assert result["status"] == "healthy"
291 |             assert result["service"] == "prometheus-mcp-server"
292 |             assert "version" in result  # Version exists but value is managed by maintainers
293 |             assert "timestamp" in result
294 |             assert result["prometheus_connectivity"] == "healthy"
295 |             assert result["prometheus_url"] == "http://prometheus:9090"
296 |             assert result["configuration"]["prometheus_url_configured"] is True
297 |             assert result["configuration"]["authentication_configured"] is True
298 | 
299 |     @pytest.mark.asyncio
300 |     async def test_health_check_degraded_prometheus_error(self, mock_make_request):
301 |         """Test health_check when Prometheus is not accessible."""
302 |         mock_make_request.side_effect = Exception("Connection refused")
303 | 
304 |         with patch("prometheus_mcp_server.server.config") as mock_config:
305 |             mock_config.url = "http://prometheus:9090"
306 |             mock_config.username = None
307 |             mock_config.password = None
308 |             mock_config.token = None
309 |             mock_config.org_id = None
310 |             mock_config.mcp_server_config = MagicMock()
311 |             mock_config.mcp_server_config.mcp_server_transport = "http"
312 | 
313 |             result = await health_check.fn()
314 | 
315 |             assert result["status"] == "degraded"
316 |             assert result["prometheus_connectivity"] == "unhealthy"
317 |             assert "prometheus_error" in result
318 |             assert "Connection refused" in result["prometheus_error"]
319 | 
320 |     @pytest.mark.asyncio
321 |     async def test_health_check_unhealthy_no_url(self):
322 |         """Test health_check when PROMETHEUS_URL is not configured."""
323 |         with patch("prometheus_mcp_server.server.config") as mock_config:
324 |             mock_config.url = ""
325 |             mock_config.username = None
326 |             mock_config.password = None
327 |             mock_config.token = None
328 |             mock_config.org_id = None
329 |             mock_config.mcp_server_config = MagicMock()
330 |             mock_config.mcp_server_config.mcp_server_transport = "stdio"
331 | 
332 |             result = await health_check.fn()
333 | 
334 |             assert result["status"] == "unhealthy"
335 |             assert "error" in result
336 |             assert "PROMETHEUS_URL not configured" in result["error"]
337 |             assert result["configuration"]["prometheus_url_configured"] is False
338 | 
339 |     @pytest.mark.asyncio
340 |     async def test_health_check_with_token_auth(self, mock_make_request):
341 |         """Test health_check with token authentication."""
342 |         mock_make_request.return_value = {
343 |             "resultType": "vector",
344 |             "result": []
345 |         }
346 | 
347 |         with patch("prometheus_mcp_server.server.config") as mock_config:
348 |             mock_config.url = "http://prometheus:9090"
349 |             mock_config.username = None
350 |             mock_config.password = None
351 |             mock_config.token = "bearer-token-123"
352 |             mock_config.org_id = "org-1"
353 |             mock_config.mcp_server_config = MagicMock()
354 |             mock_config.mcp_server_config.mcp_server_transport = "sse"
355 | 
356 |             result = await health_check.fn()
357 | 
358 |             assert result["status"] == "healthy"
359 |             assert result["configuration"]["authentication_configured"] is True
360 |             assert result["configuration"]["org_id_configured"] is True
361 |             assert result["transport"] == "sse"
362 | 
363 |     @pytest.mark.asyncio
364 |     async def test_health_check_exception_handling(self):
365 |         """Test health_check handles unexpected exceptions."""
366 |         with patch("prometheus_mcp_server.server.config") as mock_config:
367 |             # Make accessing config.url raise an exception
368 |             type(mock_config).url = property(lambda self: (_ for _ in ()).throw(RuntimeError("Unexpected error")))
369 | 
370 |             result = await health_check.fn()
371 | 
372 |             assert result["status"] == "unhealthy"
373 |             assert "error" in result
374 |             assert "Unexpected error" in result["error"]
375 | 
376 |     @pytest.mark.asyncio
377 |     async def test_health_check_with_org_id(self, mock_make_request):
378 |         """Test health_check includes org_id configuration."""
379 |         mock_make_request.return_value = {
380 |             "resultType": "vector",
381 |             "result": []
382 |         }
383 | 
384 |         with patch("prometheus_mcp_server.server.config") as mock_config:
385 |             mock_config.url = "http://prometheus:9090"
386 |             mock_config.username = None
387 |             mock_config.password = None
388 |             mock_config.token = None
389 |             mock_config.org_id = "tenant-123"
390 |             mock_config.mcp_server_config = MagicMock()
391 |             mock_config.mcp_server_config.mcp_server_transport = "stdio"
392 | 
393 |             result = await health_check.fn()
394 | 
395 |             assert result["configuration"]["org_id_configured"] is True
396 | 
397 |     @pytest.mark.asyncio
398 |     async def test_health_check_no_mcp_server_config(self, mock_make_request):
399 |         """Test health_check when mcp_server_config is None."""
400 |         mock_make_request.return_value = {
401 |             "resultType": "vector",
402 |             "result": []
403 |         }
404 | 
405 |         with patch("prometheus_mcp_server.server.config") as mock_config:
406 |             mock_config.url = "http://prometheus:9090"
407 |             mock_config.username = None
408 |             mock_config.password = None
409 |             mock_config.token = None
410 |             mock_config.org_id = None
411 |             mock_config.mcp_server_config = None
412 | 
413 |             result = await health_check.fn()
414 | 
415 |             assert result["status"] == "healthy"
416 |             assert result["transport"] == "stdio"
417 | 
418 | 
419 | class TestProgressNotificationsPaths:
420 |     """Test progress notification code paths for complete coverage."""
421 | 
422 |     @pytest.mark.asyncio
423 |     async def test_range_query_progress_all_stages(self, mock_make_request):
424 |         """Test all three progress stages in execute_range_query."""
425 |         mock_make_request.return_value = {
426 |             "resultType": "matrix",
427 |             "result": []
428 |         }
429 | 
430 |         mock_ctx = AsyncMock()
431 |         mock_ctx.report_progress = AsyncMock()
432 | 
433 |         await execute_range_query.fn(
434 |             query="up",
435 |             start="2023-01-01T00:00:00Z",
436 |             end="2023-01-01T01:00:00Z",
437 |             step="15s",
438 |             ctx=mock_ctx
439 |         )
440 | 
441 |         # Verify all three stages
442 |         calls = [call.kwargs for call in mock_ctx.report_progress.call_args_list]
443 | 
444 |         # Stage 1: Initiation (0%)
445 |         assert any(c["progress"] == 0 and "Initiating" in c["message"] for c in calls)
446 | 
447 |         # Stage 2: Processing (50%)
448 |         assert any(c["progress"] == 50 and "Processing" in c["message"] for c in calls)
449 | 
450 |         # Stage 3: Completion (100%)
451 |         assert any(c["progress"] == 100 and "completed" in c["message"] for c in calls)
452 | 
453 |     @pytest.mark.asyncio
454 |     async def test_list_metrics_progress_both_stages(self, mock_make_request):
455 |         """Test both progress stages in list_metrics."""
456 |         mock_make_request.return_value = ["m1", "m2", "m3", "m4", "m5"]
457 | 
458 |         mock_ctx = AsyncMock()
459 |         mock_ctx.report_progress = AsyncMock()
460 | 
461 |         await list_metrics.fn(ctx=mock_ctx)
462 | 
463 |         calls = [call.kwargs for call in mock_ctx.report_progress.call_args_list]
464 | 
465 |         # Stage 1: Fetching (0%)
466 |         assert any(c["progress"] == 0 and "Fetching" in c["message"] for c in calls)
467 | 
468 |         # Stage 2: Completion (100%) with count
469 |         assert any(c["progress"] == 100 and "5" in c["message"] for c in calls)
470 | 
```

--------------------------------------------------------------------------------
/.github/workflows/triage-metrics.yml:
--------------------------------------------------------------------------------

```yaml
  1 | name: Triage Metrics & Reporting
  2 | 
  3 | on:
  4 |   schedule:
  5 |     # Daily metrics at 8 AM UTC
  6 |     - cron: '0 8 * * *'
  7 |     # Weekly detailed report on Mondays at 9 AM UTC
  8 |     - cron: '0 9 * * 1'
  9 |   workflow_dispatch:
 10 |     inputs:
 11 |       report_type:
 12 |         description: 'Type of report to generate'
 13 |         required: true
 14 |         default: 'daily'
 15 |         type: choice
 16 |         options:
 17 |         - daily
 18 |         - weekly
 19 |         - monthly
 20 |         - custom
 21 |       days_back:
 22 |         description: 'Days back to analyze (for custom reports)'
 23 |         required: false
 24 |         default: '7'
 25 |         type: string
 26 | 
 27 | permissions:
 28 |   issues: read
 29 |   contents: write
 30 |   pull-requests: read
 31 | 
 32 | jobs:
 33 |   collect-metrics:
 34 |     runs-on: ubuntu-latest
 35 |     outputs:
 36 |       metrics_json: ${{ steps.calculate.outputs.metrics }}
 37 |     
 38 |     steps:
 39 |       - name: Calculate Triage Metrics
 40 |         id: calculate
 41 |         uses: actions/github-script@v7
 42 |         with:
 43 |           script: |
 44 |             const reportType = '${{ github.event.inputs.report_type }}' || 'daily';
 45 |             const daysBack = parseInt('${{ github.event.inputs.days_back }}' || '7');
 46 |             
 47 |             // Determine date range based on report type
 48 |             const now = new Date();
 49 |             let startDate;
 50 |             
 51 |             switch (reportType) {
 52 |               case 'daily':
 53 |                 startDate = new Date(now.getTime() - (1 * 24 * 60 * 60 * 1000));
 54 |                 break;
 55 |               case 'weekly':
 56 |                 startDate = new Date(now.getTime() - (7 * 24 * 60 * 60 * 1000));
 57 |                 break;
 58 |               case 'monthly':
 59 |                 startDate = new Date(now.getTime() - (30 * 24 * 60 * 60 * 1000));
 60 |                 break;
 61 |               case 'custom':
 62 |                 startDate = new Date(now.getTime() - (daysBack * 24 * 60 * 60 * 1000));
 63 |                 break;
 64 |               default:
 65 |                 startDate = new Date(now.getTime() - (7 * 24 * 60 * 60 * 1000));
 66 |             }
 67 | 
 68 |             console.log(`Analyzing ${reportType} metrics from ${startDate.toISOString()} to ${now.toISOString()}`);
 69 | 
 70 |             // Fetch all issues and PRs
 71 |             const allIssues = [];
 72 |             let page = 1;
 73 |             let hasMore = true;
 74 | 
 75 |             while (hasMore && page <= 10) { // Limit to prevent excessive API calls
 76 |               const { data: pageIssues } = await github.rest.issues.listForRepo({
 77 |                 owner: context.repo.owner,
 78 |                 repo: context.repo.repo,
 79 |                 state: 'all',
 80 |                 sort: 'updated',
 81 |                 direction: 'desc',
 82 |                 per_page: 100,
 83 |                 page: page
 84 |               });
 85 | 
 86 |               allIssues.push(...pageIssues);
 87 |               
 88 |               // Check if we've gone back far enough
 89 |               const oldestInPage = new Date(Math.min(...pageIssues.map(i => new Date(i.updated_at))));
 90 |               hasMore = pageIssues.length === 100 && oldestInPage > startDate;
 91 |               page++;
 92 |             }
 93 | 
 94 |             // Initialize metrics
 95 |             const metrics = {
 96 |               period: {
 97 |                 type: reportType,
 98 |                 start: startDate.toISOString(),
 99 |                 end: now.toISOString(),
100 |                 days: Math.ceil((now - startDate) / (1000 * 60 * 60 * 24))
101 |               },
102 |               overview: {
103 |                 total_issues: 0,
104 |                 total_prs: 0,
105 |                 open_issues: 0,
106 |                 closed_issues: 0,
107 |                 new_issues: 0,
108 |                 resolved_issues: 0
109 |               },
110 |               triage: {
111 |                 needs_triage: 0,
112 |                 triaged_this_period: 0,
113 |                 avg_triage_time_hours: 0,
114 |                 overdue_triage: 0
115 |               },
116 |               labels: {
117 |                 by_priority: {},
118 |                 by_component: {},
119 |                 by_type: {},
120 |                 by_status: {}
121 |               },
122 |               response_times: {
123 |                 avg_first_response_hours: 0,
124 |                 avg_resolution_time_hours: 0,
125 |                 issues_without_response: 0
126 |               },
127 |               contributors: {
128 |                 issue_creators: new Set(),
129 |                 comment_authors: new Set(),
130 |                 assignees: new Set()
131 |               },
132 |               quality: {
133 |                 issues_with_templates: 0,
134 |                 issues_missing_info: 0,
135 |                 duplicate_issues: 0,
136 |                 stale_issues: 0
137 |               }
138 |             };
139 | 
140 |             const triageEvents = [];
141 |             const responseTimeData = [];
142 | 
143 |             // Analyze each issue
144 |             for (const issue of allIssues) {
145 |               const createdAt = new Date(issue.created_at);
146 |               const updatedAt = new Date(issue.updated_at);
147 |               const closedAt = issue.closed_at ? new Date(issue.closed_at) : null;
148 |               const isPR = !!issue.pull_request;
149 |               const isInPeriod = updatedAt >= startDate;
150 |               
151 |               if (!isInPeriod && createdAt < startDate) continue;
152 | 
153 |               // Basic counts
154 |               if (isPR) {
155 |                 metrics.overview.total_prs++;
156 |               } else {
157 |                 metrics.overview.total_issues++;
158 |                 
159 |                 if (issue.state === 'open') {
160 |                   metrics.overview.open_issues++;
161 |                 } else {
162 |                   metrics.overview.closed_issues++;
163 |                 }
164 |                 
165 |                 // New issues in period
166 |                 if (createdAt >= startDate) {
167 |                   metrics.overview.new_issues++;
168 |                   metrics.contributors.issue_creators.add(issue.user.login);
169 |                 }
170 |                 
171 |                 // Resolved issues in period
172 |                 if (closedAt && closedAt >= startDate) {
173 |                   metrics.overview.resolved_issues++;
174 |                 }
175 |               }
176 | 
177 |               if (isPR) continue; // Skip PRs for issue-specific analysis
178 | 
179 |               // Triage analysis
180 |               const hasNeedsTriageLabel = issue.labels.some(l => l.name === 'status: needs-triage');
181 |               if (hasNeedsTriageLabel) {
182 |                 metrics.triage.needs_triage++;
183 |                 const daysSinceCreated = (now - createdAt) / (1000 * 60 * 60 * 24);
184 |                 if (daysSinceCreated > 3) {
185 |                   metrics.triage.overdue_triage++;
186 |                 }
187 |               }
188 | 
189 |               // Label analysis
190 |               for (const label of issue.labels) {
191 |                 const labelName = label.name;
192 |                 
193 |                 if (labelName.startsWith('priority: ')) {
194 |                   const priority = labelName.replace('priority: ', '');
195 |                   metrics.labels.by_priority[priority] = (metrics.labels.by_priority[priority] || 0) + 1;
196 |                 }
197 |                 
198 |                 if (labelName.startsWith('component: ')) {
199 |                   const component = labelName.replace('component: ', '');
200 |                   metrics.labels.by_component[component] = (metrics.labels.by_component[component] || 0) + 1;
201 |                 }
202 |                 
203 |                 if (labelName.startsWith('type: ')) {
204 |                   const type = labelName.replace('type: ', '');
205 |                   metrics.labels.by_type[type] = (metrics.labels.by_type[type] || 0) + 1;
206 |                 }
207 |                 
208 |                 if (labelName.startsWith('status: ')) {
209 |                   const status = labelName.replace('status: ', '');
210 |                   metrics.labels.by_status[status] = (metrics.labels.by_status[status] || 0) + 1;
211 |                 }
212 |               }
213 | 
214 |               // Assignment analysis
215 |               if (issue.assignees.length > 0) {
216 |                 issue.assignees.forEach(assignee => {
217 |                   metrics.contributors.assignees.add(assignee.login);
218 |                 });
219 |               }
220 | 
221 |               // Quality analysis
222 |               const bodyLength = issue.body ? issue.body.length : 0;
223 |               if (bodyLength > 100 && issue.body.includes('###')) {
224 |                 metrics.quality.issues_with_templates++;
225 |               } else if (bodyLength < 50) {
226 |                 metrics.quality.issues_missing_info++;
227 |               }
228 | 
229 |               // Check for stale issues
230 |               const daysSinceUpdate = (now - updatedAt) / (1000 * 60 * 60 * 24);
231 |               if (issue.state === 'open' && daysSinceUpdate > 30) {
232 |                 metrics.quality.stale_issues++;
233 |               }
234 | 
235 |               // Get comments for response time analysis
236 |               if (createdAt >= startDate) {
237 |                 try {
238 |                   const { data: comments } = await github.rest.issues.listComments({
239 |                     owner: context.repo.owner,
240 |                     repo: context.repo.repo,
241 |                     issue_number: issue.number
242 |                   });
243 | 
244 |                   comments.forEach(comment => {
245 |                     metrics.contributors.comment_authors.add(comment.user.login);
246 |                   });
247 | 
248 |                   // Find first maintainer response
249 |                   const maintainerResponse = comments.find(comment => 
250 |                     comment.user.login === 'pab1it0' ||
251 |                     comment.author_association === 'OWNER' ||
252 |                     comment.author_association === 'MEMBER'
253 |                   );
254 | 
255 |                   if (maintainerResponse) {
256 |                     const responseTime = (new Date(maintainerResponse.created_at) - createdAt) / (1000 * 60 * 60);
257 |                     responseTimeData.push(responseTime);
258 |                   } else {
259 |                     metrics.response_times.issues_without_response++;
260 |                   }
261 | 
262 |                   // Check for triage events
263 |                   const events = await github.rest.issues.listEvents({
264 |                     owner: context.repo.owner,
265 |                     repo: context.repo.repo,
266 |                     issue_number: issue.number
267 |                   });
268 | 
269 |                   for (const event of events.data) {
270 |                     if (event.event === 'labeled' && event.created_at >= startDate.toISOString()) {
271 |                       const labelName = event.label?.name;
272 |                       if (labelName && !labelName.startsWith('status: needs-triage')) {
273 |                         const triageTime = (new Date(event.created_at) - createdAt) / (1000 * 60 * 60);
274 |                         triageEvents.push(triageTime);
275 |                         metrics.triage.triaged_this_period++;
276 |                         break;
277 |                       }
278 |                     }
279 |                   }
280 |                 } catch (error) {
281 |                   console.log(`Error fetching comments/events for issue #${issue.number}: ${error.message}`);
282 |                 }
283 |               }
284 |             }
285 | 
286 |             // Calculate averages
287 |             if (responseTimeData.length > 0) {
288 |               metrics.response_times.avg_first_response_hours = 
289 |                 Math.round(responseTimeData.reduce((a, b) => a + b, 0) / responseTimeData.length * 100) / 100;
290 |             }
291 | 
292 |             if (triageEvents.length > 0) {
293 |               metrics.triage.avg_triage_time_hours = 
294 |                 Math.round(triageEvents.reduce((a, b) => a + b, 0) / triageEvents.length * 100) / 100;
295 |             }
296 | 
297 |             // Convert sets to counts
298 |             metrics.contributors.unique_issue_creators = metrics.contributors.issue_creators.size;
299 |             metrics.contributors.unique_commenters = metrics.contributors.comment_authors.size;
300 |             metrics.contributors.unique_assignees = metrics.contributors.assignees.size;
301 | 
302 |             // Clean up for JSON serialization
303 |             delete metrics.contributors.issue_creators;
304 |             delete metrics.contributors.comment_authors;
305 |             delete metrics.contributors.assignees;
306 | 
307 |             console.log('Metrics calculation completed');
308 |             core.setOutput('metrics', JSON.stringify(metrics, null, 2));
309 |             
310 |             return metrics;
311 | 
312 |   generate-report:
313 |     runs-on: ubuntu-latest
314 |     needs: collect-metrics
315 |     
316 |     steps:
317 |       - name: Checkout repository
318 |         uses: actions/checkout@v4
319 | 
320 |       - name: Generate Markdown Report
321 |         uses: actions/github-script@v7
322 |         with:
323 |           script: |
324 |             const metrics = JSON.parse('${{ needs.collect-metrics.outputs.metrics_json }}');
325 |             
326 |             // Generate markdown report
327 |             let report = `# 📊 Issue Triage Report\n\n`;
328 |             report += `**Period**: ${metrics.period.type} (${metrics.period.days} days)\n`;
329 |             report += `**Generated**: ${new Date().toISOString()}\n\n`;
330 | 
331 |             // Overview Section
332 |             report += `## 📈 Overview\n\n`;
333 |             report += `| Metric | Count |\n`;
334 |             report += `|--------|-------|\n`;
335 |             report += `| Total Issues | ${metrics.overview.total_issues} |\n`;
336 |             report += `| Open Issues | ${metrics.overview.open_issues} |\n`;
337 |             report += `| Closed Issues | ${metrics.overview.closed_issues} |\n`;
338 |             report += `| New Issues | ${metrics.overview.new_issues} |\n`;
339 |             report += `| Resolved Issues | ${metrics.overview.resolved_issues} |\n`;
340 |             report += `| Total PRs | ${metrics.overview.total_prs} |\n\n`;
341 | 
342 |             // Triage Section
343 |             report += `## 🏷️ Triage Status\n\n`;
344 |             report += `| Metric | Value |\n`;
345 |             report += `|--------|-------|\n`;
346 |             report += `| Issues Needing Triage | ${metrics.triage.needs_triage} |\n`;
347 |             report += `| Issues Triaged This Period | ${metrics.triage.triaged_this_period} |\n`;
348 |             report += `| Average Triage Time | ${metrics.triage.avg_triage_time_hours}h |\n`;
349 |             report += `| Overdue Triage (>3 days) | ${metrics.triage.overdue_triage} |\n\n`;
350 | 
351 |             // Response Times Section
352 |             report += `## ⏱️ Response Times\n\n`;
353 |             report += `| Metric | Value |\n`;
354 |             report += `|--------|-------|\n`;
355 |             report += `| Average First Response | ${metrics.response_times.avg_first_response_hours}h |\n`;
356 |             report += `| Issues Without Response | ${metrics.response_times.issues_without_response} |\n\n`;
357 | 
358 |             // Labels Distribution
359 |             report += `## 🏷️ Label Distribution\n\n`;
360 |             
361 |             if (Object.keys(metrics.labels.by_priority).length > 0) {
362 |               report += `### Priority Distribution\n`;
363 |               for (const [priority, count] of Object.entries(metrics.labels.by_priority)) {
364 |                 report += `- **${priority}**: ${count} issues\n`;
365 |               }
366 |               report += `\n`;
367 |             }
368 | 
369 |             if (Object.keys(metrics.labels.by_component).length > 0) {
370 |               report += `### Component Distribution\n`;
371 |               for (const [component, count] of Object.entries(metrics.labels.by_component)) {
372 |                 report += `- **${component}**: ${count} issues\n`;
373 |               }
374 |               report += `\n`;
375 |             }
376 | 
377 |             if (Object.keys(metrics.labels.by_type).length > 0) {
378 |               report += `### Type Distribution\n`;
379 |               for (const [type, count] of Object.entries(metrics.labels.by_type)) {
380 |                 report += `- **${type}**: ${count} issues\n`;
381 |               }
382 |               report += `\n`;
383 |             }
384 | 
385 |             // Contributors Section
386 |             report += `## 👥 Contributors\n\n`;
387 |             report += `| Metric | Count |\n`;
388 |             report += `|--------|-------|\n`;
389 |             report += `| Unique Issue Creators | ${metrics.contributors.unique_issue_creators} |\n`;
390 |             report += `| Unique Commenters | ${metrics.contributors.unique_commenters} |\n`;
391 |             report += `| Active Assignees | ${metrics.contributors.unique_assignees} |\n\n`;
392 | 
393 |             // Quality Metrics Section
394 |             report += `## ✅ Quality Metrics\n\n`;
395 |             report += `| Metric | Count |\n`;
396 |             report += `|--------|-------|\n`;
397 |             report += `| Issues Using Templates | ${metrics.quality.issues_with_templates} |\n`;
398 |             report += `| Issues Missing Information | ${metrics.quality.issues_missing_info} |\n`;
399 |             report += `| Stale Issues (>30 days) | ${metrics.quality.stale_issues} |\n\n`;
400 | 
401 |             // Recommendations Section
402 |             report += `## 💡 Recommendations\n\n`;
403 |             
404 |             if (metrics.triage.overdue_triage > 0) {
405 |               report += `- ⚠️ **${metrics.triage.overdue_triage} issues need immediate triage** (overdue >3 days)\n`;
406 |             }
407 |             
408 |             if (metrics.response_times.issues_without_response > 0) {
409 |               report += `- 📝 **${metrics.response_times.issues_without_response} issues lack maintainer response**\n`;
410 |             }
411 |             
412 |             if (metrics.quality.stale_issues > 5) {
413 |               report += `- 🧹 **Consider reviewing ${metrics.quality.stale_issues} stale issues** for closure\n`;
414 |             }
415 |             
416 |             if (metrics.quality.issues_missing_info > metrics.quality.issues_with_templates) {
417 |               report += `- 📋 **Improve issue template adoption** - many issues lack sufficient information\n`;
418 |             }
419 | 
420 |             const triageEfficiency = metrics.triage.triaged_this_period / (metrics.triage.triaged_this_period + metrics.triage.needs_triage) * 100;
421 |             if (triageEfficiency < 80) {
422 |               report += `- ⏰ **Triage efficiency is ${Math.round(triageEfficiency)}%** - consider increasing triage frequency\n`;
423 |             }
424 | 
425 |             report += `\n---\n`;
426 |             report += `*Report generated automatically by GitHub Actions*\n`;
427 | 
428 |             // Save report as an artifact and optionally create an issue
429 |             const fs = require('fs');
430 |             const reportPath = `/tmp/triage-report-${new Date().toISOString().split('T')[0]}.md`;
431 |             fs.writeFileSync(reportPath, report);
432 |             
433 |             console.log('Generated triage report:');
434 |             console.log(report);
435 | 
436 |             // For weekly reports, create a discussion or issue with the report
437 |             if (metrics.period.type === 'weekly' || '${{ github.event_name }}' === 'workflow_dispatch') {
438 |               try {
439 |                 await github.rest.issues.create({
440 |                   owner: context.repo.owner,
441 |                   repo: context.repo.repo,
442 |                   title: `📊 Weekly Triage Report - ${new Date().toISOString().split('T')[0]}`,
443 |                   body: report,
444 |                   labels: ['type: maintenance', 'status: informational']
445 |                 });
446 |               } catch (error) {
447 |                 console.log(`Could not create issue with report: ${error.message}`);
448 |               }
449 |             }
450 | 
451 |       - name: Upload Report Artifact
452 |         uses: actions/upload-artifact@v4
453 |         with:
454 |           name: triage-report-${{ github.run_id }}
455 |           path: /tmp/triage-report-*.md
456 |           retention-days: 30
```

--------------------------------------------------------------------------------
/tests/test_mcp_2025_features.py:
--------------------------------------------------------------------------------

```python
  1 | """Tests for MCP 2025 specification features (v1.4.1).
  2 | 
  3 | This module tests the following features added in v1.4.1:
  4 | - Tool annotations (readOnlyHint, destructiveHint, idempotentHint, openWorldHint)
  5 | - Tool titles for human-friendly display
  6 | - Progress notifications for long-running operations
  7 | - Resource links in query results
  8 | - Metrics caching infrastructure
  9 | """
 10 | 
 11 | import pytest
 12 | import json
 13 | import time
 14 | from unittest.mock import patch, MagicMock, AsyncMock, call
 15 | from fastmcp import Client
 16 | from prometheus_mcp_server.server import (
 17 |     mcp,
 18 |     get_cached_metrics,
 19 |     _metrics_cache,
 20 |     _CACHE_TTL
 21 | )
 22 | 
 23 | 
 24 | @pytest.fixture
 25 | def mock_make_request():
 26 |     """Mock the make_prometheus_request function."""
 27 |     with patch("prometheus_mcp_server.server.make_prometheus_request") as mock:
 28 |         yield mock
 29 | 
 30 | 
 31 | class TestToolAnnotations:
 32 |     """Tests for MCP 2025 tool annotations."""
 33 | 
 34 |     @pytest.mark.asyncio
 35 |     async def test_all_tools_have_annotations(self):
 36 |         """Verify all tools have proper MCP 2025 annotations."""
 37 |         async with Client(mcp) as client:
 38 |             tools = await client.list_tools()
 39 | 
 40 |             # All tools should have annotations
 41 |             expected_tools = [
 42 |                 "health_check",
 43 |                 "execute_query",
 44 |                 "execute_range_query",
 45 |                 "list_metrics",
 46 |                 "get_metric_metadata",
 47 |                 "get_targets"
 48 |             ]
 49 | 
 50 |             tool_names = [tool.name for tool in tools]
 51 |             for expected_tool in expected_tools:
 52 |                 assert expected_tool in tool_names, f"Tool {expected_tool} not found"
 53 | 
 54 |     @pytest.mark.asyncio
 55 |     async def test_tools_have_readonly_annotation(self):
 56 |         """Verify all tools are marked as read-only."""
 57 |         async with Client(mcp) as client:
 58 |             tools = await client.list_tools()
 59 | 
 60 |             for tool in tools:
 61 |                 # All Prometheus query tools should be read-only
 62 |                 if hasattr(tool, 'annotations') and tool.annotations:
 63 |                     assert tool.annotations.readOnlyHint is True, \
 64 |                         f"Tool {tool.name} should have readOnlyHint=True"
 65 | 
 66 |     @pytest.mark.asyncio
 67 |     async def test_tools_have_non_destructive_annotation(self):
 68 |         """Verify all tools are marked as non-destructive."""
 69 |         async with Client(mcp) as client:
 70 |             tools = await client.list_tools()
 71 | 
 72 |             for tool in tools:
 73 |                 # All Prometheus query tools should be non-destructive
 74 |                 if hasattr(tool, 'annotations') and tool.annotations:
 75 |                     assert tool.annotations.destructiveHint is False, \
 76 |                         f"Tool {tool.name} should have destructiveHint=False"
 77 | 
 78 |     @pytest.mark.asyncio
 79 |     async def test_tools_have_idempotent_annotation(self):
 80 |         """Verify all tools are marked as idempotent."""
 81 |         async with Client(mcp) as client:
 82 |             tools = await client.list_tools()
 83 | 
 84 |             for tool in tools:
 85 |                 # All Prometheus query tools should be idempotent
 86 |                 if hasattr(tool, 'annotations') and tool.annotations:
 87 |                     assert tool.annotations.idempotentHint is True, \
 88 |                         f"Tool {tool.name} should have idempotentHint=True"
 89 | 
 90 |     @pytest.mark.asyncio
 91 |     async def test_tools_have_openworld_annotation(self):
 92 |         """Verify all tools are marked as open-world (accessing external resources)."""
 93 |         async with Client(mcp) as client:
 94 |             tools = await client.list_tools()
 95 | 
 96 |             for tool in tools:
 97 |                 # All Prometheus tools access external Prometheus server
 98 |                 if hasattr(tool, 'annotations') and tool.annotations:
 99 |                     assert tool.annotations.openWorldHint is True, \
100 |                         f"Tool {tool.name} should have openWorldHint=True"
101 | 
102 | 
103 | class TestToolTitles:
104 |     """Tests for human-friendly tool titles."""
105 | 
106 |     @pytest.mark.asyncio
107 |     async def test_all_tools_have_titles(self):
108 |         """Verify all tools have human-friendly titles."""
109 |         async with Client(mcp) as client:
110 |             tools = await client.list_tools()
111 | 
112 |             expected_titles = {
113 |                 "health_check": "Health Check",
114 |                 "execute_query": "Execute PromQL Query",
115 |                 "execute_range_query": "Execute PromQL Range Query",
116 |                 "list_metrics": "List Available Metrics",
117 |                 "get_metric_metadata": "Get Metric Metadata",
118 |                 "get_targets": "Get Scrape Targets"
119 |             }
120 | 
121 |             for tool in tools:
122 |                 if tool.name in expected_titles:
123 |                     if hasattr(tool, 'annotations') and tool.annotations:
124 |                         assert hasattr(tool.annotations, 'title'), \
125 |                             f"Tool {tool.name} should have a title"
126 |                         assert tool.annotations.title == expected_titles[tool.name], \
127 |                             f"Tool {tool.name} has incorrect title"
128 | 
129 |     @pytest.mark.asyncio
130 |     async def test_tool_titles_are_descriptive(self):
131 |         """Verify tool titles are more descriptive than function names."""
132 |         async with Client(mcp) as client:
133 |             tools = await client.list_tools()
134 | 
135 |             for tool in tools:
136 |                 if hasattr(tool, 'annotations') and tool.annotations and hasattr(tool.annotations, 'title'):
137 |                     title = tool.annotations.title
138 |                     # Title should be different from function name (more readable)
139 |                     assert title != tool.name, \
140 |                         f"Tool {tool.name} title should differ from function name"
141 |                     # Title should have spaces (human-friendly)
142 |                     assert ' ' in title or len(title.split()) > 1 or title[0].isupper(), \
143 |                         f"Tool {tool.name} title should be human-friendly"
144 | 
145 | 
146 | class TestProgressNotifications:
147 |     """Tests for progress notification support.
148 | 
149 |     Note: Progress notifications are tested indirectly through the MCP client,
150 |     as they are an internal implementation detail that gets handled by FastMCP.
151 |     """
152 | 
153 |     @pytest.mark.asyncio
154 |     async def test_execute_range_query_with_progress_works(self, mock_make_request):
155 |         """Verify execute_range_query works with progress support."""
156 |         mock_make_request.return_value = {
157 |             "resultType": "matrix",
158 |             "result": [{"metric": {"__name__": "up"}, "values": [[1617898400, "1"]]}]
159 |         }
160 | 
161 |         async with Client(mcp) as client:
162 |             # Execute - should not error even though progress is implemented
163 |             result = await client.call_tool(
164 |                 "execute_range_query",
165 |                 {
166 |                     "query": "up",
167 |                     "start": "2023-01-01T00:00:00Z",
168 |                     "end": "2023-01-01T01:00:00Z",
169 |                     "step": "15s"
170 |                 }
171 |             )
172 | 
173 |             # Verify result is valid
174 |             assert result.data["resultType"] == "matrix"
175 |             assert len(result.data["result"]) == 1
176 | 
177 |     @pytest.mark.asyncio
178 |     async def test_list_metrics_with_progress_works(self, mock_make_request):
179 |         """Verify list_metrics works with progress support."""
180 |         mock_make_request.return_value = ["metric1", "metric2", "metric3"]
181 | 
182 |         async with Client(mcp) as client:
183 |             # Execute - should not error even though progress is implemented
184 |             result = await client.call_tool("list_metrics", {})
185 | 
186 |             # Verify result is valid - now returns a dict with pagination info
187 |             assert isinstance(result.data, dict)
188 |             assert result.data["total_count"] == 3
189 |             assert result.data["returned_count"] == 3
190 |             assert "metric1" in result.data["metrics"]
191 | 
192 | 
193 | class TestResourceLinks:
194 |     """Tests for resource links in query results."""
195 | 
196 |     @pytest.mark.asyncio
197 |     @pytest.mark.parametrize("disable_links,should_have_links", [
198 |         (False, True),
199 |         (True, False),
200 |     ])
201 |     async def test_execute_query_includes_prometheus_ui_link(self, mock_make_request, disable_links, should_have_links):
202 |         """Verify execute_query includes/excludes Prometheus UI link based on config."""
203 |         with patch("prometheus_mcp_server.server.config.disable_prometheus_links", disable_links):
204 |             mock_make_request.return_value = {
205 |                 "resultType": "vector",
206 |                 "result": [{"metric": {"__name__": "up"}, "value": [1617898448.214, "1"]}]
207 |             }
208 | 
209 |             async with Client(mcp) as client:
210 |                 result = await client.call_tool("execute_query", {"query": "up"})
211 | 
212 |                 if should_have_links:
213 |                     assert "links" in result.data, "Result should include links"
214 |                     assert len(result.data["links"]) > 0, "Should have at least one link"
215 | 
216 |                     # Check link structure
217 |                     link = result.data["links"][0]
218 |                     assert "href" in link, "Link should have href"
219 |                     assert "rel" in link, "Link should have rel"
220 |                     assert "title" in link, "Link should have title"
221 | 
222 |                     # Verify link points to Prometheus
223 |                     assert "/graph?" in link["href"]
224 |                     assert link["rel"] == "prometheus-ui"
225 |                     assert "up" in link["href"], "Query should be included in link"
226 |                 else:
227 |                     assert "links" not in result.data, "Result should not include links when disabled"
228 | 
229 |     @pytest.mark.asyncio
230 |     @pytest.mark.parametrize("disable_links,should_have_links", [
231 |         (False, True),
232 |         (True, False),
233 |     ])
234 |     async def test_execute_range_query_includes_prometheus_ui_link(self, mock_make_request, disable_links, should_have_links):
235 |         """Verify execute_range_query includes/excludes Prometheus UI link based on config."""
236 |         with patch("prometheus_mcp_server.server.config.disable_prometheus_links", disable_links):
237 |             mock_make_request.return_value = {
238 |                 "resultType": "matrix",
239 |                 "result": []
240 |             }
241 | 
242 |             async with Client(mcp) as client:
243 |                 result = await client.call_tool(
244 |                     "execute_range_query",
245 |                     {
246 |                         "query": "rate(http_requests_total[5m])",
247 |                         "start": "2023-01-01T00:00:00Z",
248 |                         "end": "2023-01-01T01:00:00Z",
249 |                         "step": "15s"
250 |                     }
251 |                 )
252 | 
253 |                 if should_have_links:
254 |                     assert "links" in result.data
255 |                     link = result.data["links"][0]
256 | 
257 |                     # Verify time parameters are in the link
258 |                     assert "rate" in link["href"] or "http_requests_total" in link["href"]
259 |                     assert link["rel"] == "prometheus-ui"
260 |                 else:
261 |                     assert "links" not in result.data, "Result should not include links when disabled"
262 | 
263 |     @pytest.mark.asyncio
264 |     async def test_query_link_includes_time_parameter(self, mock_make_request):
265 |         """Verify instant query link includes time parameter when provided."""
266 |         mock_make_request.return_value = {
267 |             "resultType": "vector",
268 |             "result": []
269 |         }
270 | 
271 |         async with Client(mcp) as client:
272 |             result = await client.call_tool(
273 |                 "execute_query",
274 |                 {
275 |                     "query": "up",
276 |                     "time": "2023-01-01T00:00:00Z"
277 |                 }
278 |             )
279 | 
280 |             link = result.data["links"][0]
281 |             # Link should include the time parameter
282 |             assert "2023-01-01" in link["href"] or "moment" in link["href"]
283 | 
284 |     @pytest.mark.asyncio
285 |     async def test_links_include_required_fields(self, mock_make_request):
286 |         """Verify all links have required fields."""
287 |         mock_make_request.return_value = {
288 |             "resultType": "vector",
289 |             "result": []
290 |         }
291 | 
292 |         async with Client(mcp) as client:
293 |             result = await client.call_tool("execute_query", {"query": "up"})
294 | 
295 |             link = result.data["links"][0]
296 |             assert "href" in link, "Link must have href"
297 |             assert "rel" in link, "Link must have rel"
298 |             assert "title" in link, "Link must have title"
299 |             assert link["rel"] == "prometheus-ui"
300 | 
301 | 
302 | class TestMetricsCaching:
303 |     """Tests for metrics caching infrastructure."""
304 | 
305 |     def test_get_cached_metrics_returns_list(self):
306 |         """Verify get_cached_metrics returns a list of metrics."""
307 |         with patch("prometheus_mcp_server.server.make_prometheus_request") as mock_request:
308 |             mock_request.return_value = ["metric1", "metric2", "metric3"]
309 | 
310 |             result = get_cached_metrics()
311 | 
312 |             assert isinstance(result, list)
313 |             assert len(result) == 3
314 |             assert "metric1" in result
315 | 
316 |     def test_metrics_are_cached(self):
317 |         """Verify metrics are cached and subsequent calls use cache."""
318 |         with patch("prometheus_mcp_server.server.make_prometheus_request") as mock_request:
319 |             mock_request.return_value = ["metric1", "metric2"]
320 | 
321 |             # Clear cache
322 |             _metrics_cache["data"] = None
323 |             _metrics_cache["timestamp"] = 0
324 | 
325 |             # First call should fetch from Prometheus
326 |             result1 = get_cached_metrics()
327 |             assert mock_request.call_count == 1
328 | 
329 |             # Second call should use cache
330 |             result2 = get_cached_metrics()
331 |             assert mock_request.call_count == 1  # Still 1, not called again
332 | 
333 |             assert result1 == result2
334 | 
335 |     def test_cache_expires_after_ttl(self):
336 |         """Verify cache expires after TTL and refreshes."""
337 |         with patch("prometheus_mcp_server.server.make_prometheus_request") as mock_request:
338 |             with patch("prometheus_mcp_server.server.time") as mock_time:
339 |                 mock_request.return_value = ["metric1", "metric2"]
340 | 
341 |                 # Clear cache
342 |                 _metrics_cache["data"] = None
343 |                 _metrics_cache["timestamp"] = 0
344 | 
345 |                 # First call at time 0
346 |                 mock_time.time.return_value = 0
347 |                 result1 = get_cached_metrics()
348 |                 assert mock_request.call_count == 1
349 | 
350 |                 # Call within TTL (at time 100, TTL is 300)
351 |                 mock_time.time.return_value = 100
352 |                 result2 = get_cached_metrics()
353 |                 assert mock_request.call_count == 1  # Still using cache
354 | 
355 |                 # Call after TTL (at time 400, beyond 300s TTL)
356 |                 mock_time.time.return_value = 400
357 |                 mock_request.return_value = ["metric1", "metric2", "metric3"]
358 |                 result3 = get_cached_metrics()
359 |                 assert mock_request.call_count == 2  # Cache refreshed
360 |                 assert len(result3) == 3
361 | 
362 |     def test_cache_ttl_is_5_minutes(self):
363 |         """Verify cache TTL is set to 5 minutes (300 seconds)."""
364 |         assert _CACHE_TTL == 300, "Cache TTL should be 5 minutes (300 seconds)"
365 | 
366 |     def test_cache_handles_errors_gracefully(self):
367 |         """Verify cache returns stale data on error rather than failing."""
368 |         with patch("prometheus_mcp_server.server.make_prometheus_request") as mock_request:
369 |             # First successful call
370 |             mock_request.return_value = ["metric1", "metric2"]
371 |             _metrics_cache["data"] = None
372 |             _metrics_cache["timestamp"] = 0
373 | 
374 |             result1 = get_cached_metrics()
375 |             assert len(result1) == 2
376 | 
377 |             # Expire cache and make request fail
378 |             _metrics_cache["timestamp"] = 0
379 |             mock_request.side_effect = Exception("Connection error")
380 | 
381 |             # Should return stale cache data instead of raising
382 |             result2 = get_cached_metrics()
383 |             assert result2 == ["metric1", "metric2"], \
384 |                 "Should return stale cache data on error"
385 | 
386 |     def test_cache_returns_empty_list_when_no_data(self):
387 |         """Verify cache returns empty list when no data available."""
388 |         with patch("prometheus_mcp_server.server.make_prometheus_request") as mock_request:
389 |             mock_request.side_effect = Exception("Connection error")
390 | 
391 |             # Clear cache completely
392 |             _metrics_cache["data"] = None
393 |             _metrics_cache["timestamp"] = 0
394 | 
395 |             result = get_cached_metrics()
396 |             assert result == [], "Should return empty list when no data available"
397 | 
398 | 
399 | class TestBackwardCompatibility:
400 |     """Tests to ensure new features don't break existing functionality."""
401 | 
402 |     @pytest.mark.asyncio
403 |     async def test_query_results_still_include_resulttype(self, mock_make_request):
404 |         """Verify query results still include original resultType field."""
405 |         mock_make_request.return_value = {
406 |             "resultType": "vector",
407 |             "result": []
408 |         }
409 | 
410 |         async with Client(mcp) as client:
411 |             result = await client.call_tool("execute_query", {"query": "up"})
412 | 
413 |             assert "resultType" in result.data
414 |             assert "result" in result.data
415 | 
416 |     @pytest.mark.asyncio
417 |     async def test_tools_work_via_mcp_client(self, mock_make_request):
418 |         """Verify all tools work when called via MCP client."""
419 |         mock_make_request.return_value = {
420 |             "resultType": "vector",
421 |             "result": []
422 |         }
423 | 
424 |         async with Client(mcp) as client:
425 |             # Should not raise any errors
426 |             result1 = await client.call_tool("execute_query", {"query": "up"})
427 | 
428 |             mock_make_request.return_value = {
429 |                 "resultType": "matrix",
430 |                 "result": []
431 |             }
432 | 
433 |             result2 = await client.call_tool(
434 |                 "execute_range_query",
435 |                 {
436 |                     "query": "up",
437 |                     "start": "2023-01-01T00:00:00Z",
438 |                     "end": "2023-01-01T01:00:00Z",
439 |                     "step": "15s"
440 |                 }
441 |             )
442 | 
443 |             mock_make_request.return_value = ["metric1"]
444 |             result3 = await client.call_tool("list_metrics", {})
445 | 
446 |             assert result1 is not None
447 |             assert result2 is not None
448 |             assert result3 is not None
449 | 
450 | 
451 | class TestMCP2025Integration:
452 |     """Integration tests for MCP 2025 features working together."""
453 | 
454 |     @pytest.mark.asyncio
455 |     async def test_full_query_workflow_with_all_features(self, mock_make_request):
456 |         """Test a complete query workflow using all MCP 2025 features."""
457 |         mock_make_request.return_value = {
458 |             "resultType": "vector",
459 |             "result": [{"metric": {"__name__": "up"}, "value": [1617898448, "1"]}]
460 |         }
461 | 
462 |         async with Client(mcp) as client:
463 |             # List tools and verify annotations
464 |             tools = await client.list_tools()
465 |             assert len(tools) > 0
466 | 
467 |             # Execute query and verify result includes links
468 |             result = await client.call_tool("execute_query", {"query": "up"})
469 |             result_data = result.data
470 | 
471 |             assert "resultType" in result_data
472 |             assert "result" in result_data
473 |             assert "links" in result_data
474 |             assert len(result_data["links"]) > 0
475 | 
476 |     @pytest.mark.asyncio
477 |     async def test_range_query_includes_links(self, mock_make_request):
478 |         """Test range query includes resource links."""
479 |         mock_make_request.return_value = {
480 |             "resultType": "matrix",
481 |             "result": []
482 |         }
483 | 
484 |         async with Client(mcp) as client:
485 |             result = await client.call_tool(
486 |                 "execute_range_query",
487 |                 {
488 |                     "query": "up",
489 |                     "start": "2023-01-01T00:00:00Z",
490 |                     "end": "2023-01-01T01:00:00Z",
491 |                     "step": "15s"
492 |                 }
493 |             )
494 | 
495 |             # Verify links are included
496 |             assert "links" in result.data
497 |             assert len(result.data["links"]) > 0
498 |             assert result.data["links"][0]["rel"] == "prometheus-ui"
499 | 
```

--------------------------------------------------------------------------------
/tests/test_mcp_protocol_compliance.py:
--------------------------------------------------------------------------------

```python
  1 | """Tests for MCP protocol compliance and tool functionality."""
  2 | 
  3 | import pytest
  4 | import json
  5 | import asyncio
  6 | from unittest.mock import patch, MagicMock, AsyncMock
  7 | from datetime import datetime
  8 | from prometheus_mcp_server import server
  9 | from prometheus_mcp_server.server import (
 10 |     make_prometheus_request, get_prometheus_auth, config, TransportType,
 11 |     execute_query, execute_range_query, list_metrics, get_metric_metadata, get_targets, health_check
 12 | )
 13 | 
 14 | # Test the MCP tools by testing them through async wrappers
 15 | async def execute_query_wrapper(query: str, time=None):
 16 |     """Wrapper to test execute_query functionality."""
 17 |     params = {"query": query}
 18 |     if time:
 19 |         params["time"] = time
 20 |     data = make_prometheus_request("query", params=params)
 21 |     return {"resultType": data["resultType"], "result": data["result"]}
 22 | 
 23 | async def execute_range_query_wrapper(query: str, start: str, end: str, step: str):
 24 |     """Wrapper to test execute_range_query functionality."""  
 25 |     params = {"query": query, "start": start, "end": end, "step": step}
 26 |     data = make_prometheus_request("query_range", params=params)
 27 |     return {"resultType": data["resultType"], "result": data["result"]}
 28 | 
 29 | async def list_metrics_wrapper():
 30 |     """Wrapper to test list_metrics functionality."""
 31 |     return make_prometheus_request("label/__name__/values")
 32 | 
 33 | async def get_metric_metadata_wrapper(metric: str):
 34 |     """Wrapper to test get_metric_metadata functionality."""
 35 |     params = {"metric": metric}
 36 |     data = make_prometheus_request("metadata", params=params)
 37 |     return data["data"][metric]
 38 | 
 39 | async def get_targets_wrapper():
 40 |     """Wrapper to test get_targets functionality."""
 41 |     data = make_prometheus_request("targets")
 42 |     return {"activeTargets": data["activeTargets"], "droppedTargets": data["droppedTargets"]}
 43 | 
 44 | async def health_check_wrapper():
 45 |     """Wrapper to test health_check functionality."""
 46 |     try:
 47 |         health_status = {
 48 |             "status": "healthy",
 49 |             "service": "prometheus-mcp-server", 
 50 |             "version": "1.2.3",
 51 |             "timestamp": datetime.utcnow().isoformat(),
 52 |             "transport": config.mcp_server_config.mcp_server_transport if config.mcp_server_config else "stdio",
 53 |             "configuration": {
 54 |                 "prometheus_url_configured": bool(config.url),
 55 |                 "authentication_configured": bool(config.username or config.token),
 56 |                 "org_id_configured": bool(config.org_id)
 57 |             }
 58 |         }
 59 |         
 60 |         if config.url:
 61 |             try:
 62 |                 make_prometheus_request("query", params={"query": "up", "time": str(int(datetime.utcnow().timestamp()))})
 63 |                 health_status["prometheus_connectivity"] = "healthy"
 64 |                 health_status["prometheus_url"] = config.url
 65 |             except Exception as e:
 66 |                 health_status["prometheus_connectivity"] = "unhealthy"
 67 |                 health_status["prometheus_error"] = str(e)
 68 |                 health_status["status"] = "degraded"
 69 |         else:
 70 |             health_status["status"] = "unhealthy"
 71 |             health_status["error"] = "PROMETHEUS_URL not configured"
 72 |         
 73 |         return health_status
 74 |     except Exception as e:
 75 |         return {
 76 |             "status": "unhealthy",
 77 |             "service": "prometheus-mcp-server",
 78 |             "error": str(e),
 79 |             "timestamp": datetime.utcnow().isoformat()
 80 |         }
 81 | 
 82 | 
 83 | @pytest.fixture
 84 | def mock_prometheus_response():
 85 |     """Mock successful Prometheus API response."""
 86 |     return {
 87 |         "status": "success",
 88 |         "data": {
 89 |             "resultType": "vector",
 90 |             "result": [
 91 |                 {
 92 |                     "metric": {"__name__": "up", "instance": "localhost:9090"},
 93 |                     "value": [1609459200, "1"]
 94 |                 }
 95 |             ]
 96 |         }
 97 |     }
 98 | 
 99 | 
100 | @pytest.fixture
101 | def mock_metrics_response():
102 |     """Mock Prometheus metrics list response."""
103 |     return {
104 |         "status": "success", 
105 |         "data": ["up", "prometheus_build_info", "prometheus_config_last_reload_successful"]
106 |     }
107 | 
108 | 
109 | @pytest.fixture
110 | def mock_metadata_response():
111 |     """Mock Prometheus metadata response."""
112 |     return {
113 |         "status": "success",
114 |         "data": {
115 |             "data": {
116 |                 "up": [
117 |                     {
118 |                         "type": "gauge",
119 |                         "help": "1 if the instance is healthy, 0 otherwise",
120 |                         "unit": ""
121 |                     }
122 |                 ]
123 |             }
124 |         }
125 |     }
126 | 
127 | 
128 | @pytest.fixture
129 | def mock_targets_response():
130 |     """Mock Prometheus targets response."""
131 |     return {
132 |         "status": "success",
133 |         "data": {
134 |             "activeTargets": [
135 |                 {
136 |                     "discoveredLabels": {"__address__": "localhost:9090"},
137 |                     "labels": {"instance": "localhost:9090", "job": "prometheus"},
138 |                     "scrapePool": "prometheus",
139 |                     "scrapeUrl": "http://localhost:9090/metrics",
140 |                     "lastError": "",
141 |                     "lastScrape": "2023-01-01T00:00:00Z",
142 |                     "lastScrapeDuration": 0.001,
143 |                     "health": "up"
144 |                 }
145 |             ],
146 |             "droppedTargets": []
147 |         }
148 |     }
149 | 
150 | 
151 | class TestMCPToolCompliance:
152 |     """Test MCP tool interface compliance."""
153 |     
154 |     @patch('test_mcp_protocol_compliance.make_prometheus_request')
155 |     @pytest.mark.asyncio  
156 |     async def test_execute_query_tool_signature(self, mock_request, mock_prometheus_response):
157 |         """Test execute_query tool has correct MCP signature."""
158 |         mock_request.return_value = mock_prometheus_response["data"]
159 |         
160 |         # Ensure config has a URL set for tests
161 |         original_url = config.url
162 |         if not config.url:
163 |             config.url = "http://test-prometheus:9090"
164 |             
165 |         try:
166 |             # Test required parameters
167 |             result = await execute_query_wrapper("up")
168 |             assert isinstance(result, dict)
169 |             assert "resultType" in result
170 |             assert "result" in result
171 |             
172 |             # Test optional parameters
173 |             result = await execute_query_wrapper("up", time="2023-01-01T00:00:00Z")
174 |             assert isinstance(result, dict)
175 |         finally:
176 |             config.url = original_url
177 |     
178 |     @patch('test_mcp_protocol_compliance.make_prometheus_request')
179 |     @pytest.mark.asyncio
180 |     async def test_execute_range_query_tool_signature(self, mock_request, mock_prometheus_response):
181 |         """Test execute_range_query tool has correct MCP signature."""
182 |         mock_request.return_value = mock_prometheus_response["data"]
183 |         
184 |         # Test all required parameters
185 |         result = await execute_range_query_wrapper(
186 |             query="up",
187 |             start="2023-01-01T00:00:00Z", 
188 |             end="2023-01-01T01:00:00Z",
189 |             step="1m"
190 |         )
191 |         assert isinstance(result, dict)
192 |         assert "resultType" in result
193 |         assert "result" in result
194 |     
195 |     @patch('test_mcp_protocol_compliance.make_prometheus_request')
196 |     @pytest.mark.asyncio
197 |     async def test_list_metrics_tool_signature(self, mock_request, mock_metrics_response):
198 |         """Test list_metrics tool has correct MCP signature."""
199 |         mock_request.return_value = mock_metrics_response["data"]
200 |         
201 |         result = await list_metrics_wrapper()
202 |         assert isinstance(result, list)
203 |         assert all(isinstance(metric, str) for metric in result)
204 |     
205 |     @patch('test_mcp_protocol_compliance.make_prometheus_request')
206 |     @pytest.mark.asyncio
207 |     async def test_get_metric_metadata_tool_signature(self, mock_request, mock_metadata_response):
208 |         """Test get_metric_metadata tool has correct MCP signature."""
209 |         mock_request.return_value = mock_metadata_response["data"]
210 |         
211 |         result = await get_metric_metadata_wrapper("up")
212 |         assert isinstance(result, list)
213 |         assert all(isinstance(metadata, dict) for metadata in result)
214 |     
215 |     @patch('test_mcp_protocol_compliance.make_prometheus_request')
216 |     @pytest.mark.asyncio
217 |     async def test_get_targets_tool_signature(self, mock_request, mock_targets_response):
218 |         """Test get_targets tool has correct MCP signature."""
219 |         mock_request.return_value = mock_targets_response["data"]
220 |         
221 |         result = await get_targets_wrapper()
222 |         assert isinstance(result, dict)
223 |         assert "activeTargets" in result
224 |         assert "droppedTargets" in result
225 |         assert isinstance(result["activeTargets"], list)
226 |         assert isinstance(result["droppedTargets"], list)
227 |     
228 |     @patch('test_mcp_protocol_compliance.make_prometheus_request')
229 |     @pytest.mark.asyncio
230 |     async def test_health_check_tool_signature(self, mock_request):
231 |         """Test health_check tool has correct MCP signature."""
232 |         # Mock successful Prometheus connectivity
233 |         mock_request.return_value = {"resultType": "vector", "result": []}
234 |         
235 |         result = await health_check_wrapper()
236 |         assert isinstance(result, dict)
237 |         assert "status" in result
238 |         assert "service" in result
239 |         assert "timestamp" in result
240 |         assert result["service"] == "prometheus-mcp-server"
241 | 
242 | 
243 | class TestMCPToolErrorHandling:
244 |     """Test MCP tool error handling compliance."""
245 |     
246 |     @patch('test_mcp_protocol_compliance.make_prometheus_request')
247 |     @pytest.mark.asyncio
248 |     async def test_execute_query_handles_prometheus_errors(self, mock_request):
249 |         """Test execute_query handles Prometheus API errors gracefully."""
250 |         mock_request.side_effect = ValueError("Prometheus API error: query timeout")
251 |         
252 |         with pytest.raises(ValueError):
253 |             await execute_query_wrapper("invalid_query{")
254 |     
255 |     @patch('test_mcp_protocol_compliance.make_prometheus_request')
256 |     @pytest.mark.asyncio
257 |     async def test_execute_range_query_handles_network_errors(self, mock_request):
258 |         """Test execute_range_query handles network errors gracefully."""
259 |         import requests
260 |         mock_request.side_effect = requests.exceptions.ConnectionError("Connection refused")
261 |         
262 |         with pytest.raises(requests.exceptions.ConnectionError):
263 |             await execute_range_query_wrapper("up", "now-1h", "now", "1m")
264 |     
265 |     @patch('test_mcp_protocol_compliance.make_prometheus_request')
266 |     @pytest.mark.asyncio
267 |     async def test_health_check_handles_configuration_errors(self, mock_request):
268 |         """Test health_check handles configuration errors gracefully."""
269 |         # Test with missing Prometheus URL
270 |         original_url = config.url
271 |         config.url = ""
272 |         
273 |         try:
274 |             result = await health_check_wrapper()
275 |             assert result["status"] == "unhealthy" 
276 |             assert "error" in result or "PROMETHEUS_URL" in str(result)
277 |         finally:
278 |             config.url = original_url
279 |     
280 |     @patch('test_mcp_protocol_compliance.make_prometheus_request')
281 |     @pytest.mark.asyncio
282 |     async def test_health_check_handles_connectivity_errors(self, mock_request):
283 |         """Test health_check handles Prometheus connectivity errors."""
284 |         mock_request.side_effect = Exception("Connection timeout")
285 |         
286 |         result = await health_check_wrapper()
287 |         assert result["status"] in ["unhealthy", "degraded"]
288 |         assert "prometheus_connectivity" in result or "error" in result
289 | 
290 | 
291 | class TestMCPDataFormats:
292 |     """Test MCP tool data format compliance."""
293 |     
294 |     @patch('test_mcp_protocol_compliance.make_prometheus_request')
295 |     @pytest.mark.asyncio
296 |     async def test_execute_query_returns_valid_json(self, mock_request, mock_prometheus_response):
297 |         """Test execute_query returns JSON-serializable data."""
298 |         mock_request.return_value = mock_prometheus_response["data"]
299 |         
300 |         result = await execute_query_wrapper("up")
301 |         
302 |         # Verify JSON serializability
303 |         json_str = json.dumps(result)
304 |         assert json_str is not None
305 |         
306 |         # Verify structure
307 |         parsed = json.loads(json_str)
308 |         assert "resultType" in parsed
309 |         assert "result" in parsed
310 |     
311 |     @patch('test_mcp_protocol_compliance.make_prometheus_request')
312 |     @pytest.mark.asyncio
313 |     async def test_all_tools_return_json_serializable_data(self, mock_request):
314 |         """Test all MCP tools return JSON-serializable data."""
315 |         # Setup various mock responses
316 |         mock_request.side_effect = [
317 |             {"resultType": "vector", "result": []},  # execute_query
318 |             {"resultType": "matrix", "result": []},  # execute_range_query
319 |             ["metric1", "metric2"],  # list_metrics
320 |             {"data": {"metric1": [{"type": "gauge", "help": "test"}]}},  # get_metric_metadata
321 |             {"activeTargets": [], "droppedTargets": []},  # get_targets
322 |         ]
323 |         
324 |         # Test all tools
325 |         tools_and_calls = [
326 |             (execute_query_wrapper, ("up",)),
327 |             (execute_range_query_wrapper, ("up", "now-1h", "now", "1m")),
328 |             (list_metrics_wrapper, ()),
329 |             (get_metric_metadata_wrapper, ("metric1",)),
330 |             (get_targets_wrapper, ()),
331 |         ]
332 |         
333 |         for tool, args in tools_and_calls:
334 |             result = await tool(*args)
335 |             
336 |             # Verify JSON serializability
337 |             try:
338 |                 json_str = json.dumps(result)
339 |                 assert json_str is not None
340 |             except (TypeError, ValueError) as e:
341 |                 pytest.fail(f"Tool {tool.__name__} returned non-JSON-serializable data: {e}")
342 | 
343 | 
344 | class TestMCPServerConfiguration:
345 |     """Test MCP server configuration compliance."""
346 |     
347 |     def test_transport_type_validation(self):
348 |         """Test transport type validation works correctly."""
349 |         # Valid transport types
350 |         valid_transports = ["stdio", "http", "sse"]
351 |         for transport in valid_transports:
352 |             assert transport in TransportType.values()
353 |         
354 |         # Invalid transport types should not be in values
355 |         invalid_transports = ["tcp", "websocket", "grpc"]
356 |         for transport in invalid_transports:
357 |             assert transport not in TransportType.values()
358 |     
359 |     def test_server_config_validation(self):
360 |         """Test server configuration validation."""
361 |         from prometheus_mcp_server.server import MCPServerConfig, PrometheusConfig
362 |         
363 |         # Valid configuration
364 |         mcp_config = MCPServerConfig(
365 |             mcp_server_transport="http",
366 |             mcp_bind_host="127.0.0.1", 
367 |             mcp_bind_port=8080
368 |         )
369 |         assert mcp_config.mcp_server_transport == "http"
370 |         
371 |         # Test Prometheus config
372 |         prometheus_config = PrometheusConfig(
373 |             url="http://prometheus:9090",
374 |             mcp_server_config=mcp_config
375 |         )
376 |         assert prometheus_config.url == "http://prometheus:9090"
377 |     
378 |     def test_authentication_configuration(self):
379 |         """Test authentication configuration options."""
380 |         from prometheus_mcp_server.server import get_prometheus_auth
381 |         
382 |         # Test with no authentication
383 |         original_config = {
384 |             'username': config.username,
385 |             'password': config.password, 
386 |             'token': config.token
387 |         }
388 |         
389 |         try:
390 |             config.username = ""
391 |             config.password = ""
392 |             config.token = ""
393 |             
394 |             auth = get_prometheus_auth()
395 |             assert auth is None
396 |             
397 |             # Test with basic auth
398 |             config.username = "testuser"
399 |             config.password = "testpass"
400 |             config.token = ""
401 |             
402 |             auth = get_prometheus_auth()
403 |             assert auth is not None
404 |             
405 |             # Test with token auth (should take precedence)
406 |             config.token = "test-token"
407 |             
408 |             auth = get_prometheus_auth()
409 |             assert auth is not None
410 |             assert "Authorization" in auth
411 |             assert "Bearer" in auth["Authorization"]
412 |             
413 |         finally:
414 |             # Restore original config
415 |             config.username = original_config['username']
416 |             config.password = original_config['password']
417 |             config.token = original_config['token']
418 | 
419 | 
420 | class TestMCPProtocolVersioning:
421 |     """Test MCP protocol versioning and capabilities."""
422 |     
423 |     def test_mcp_server_info(self):
424 |         """Test MCP server provides correct server information."""
425 |         # Test FastMCP server instantiation
426 |         from prometheus_mcp_server.server import mcp
427 |         
428 |         assert mcp is not None
429 |         # FastMCP should have a name
430 |         assert hasattr(mcp, 'name') or hasattr(mcp, '_name')
431 |     
432 |     @patch('test_mcp_protocol_compliance.make_prometheus_request')
433 |     @pytest.mark.asyncio
434 |     async def test_tool_descriptions_are_present(self, mock_request):
435 |         """Test that all MCP tools have proper descriptions."""
436 |         # All tools should be registered with descriptions
437 |         tools = [
438 |             execute_query,
439 |             execute_range_query,
440 |             list_metrics,
441 |             get_metric_metadata,
442 |             get_targets,
443 |             health_check
444 |         ]
445 |         
446 |         for tool in tools:
447 |             # Each tool should have a description (FastMCP tools have description attribute)
448 |             assert hasattr(tool, 'description')
449 |             assert tool.description is not None and tool.description.strip() != ""
450 |     
451 |     def test_server_capabilities(self):
452 |         """Test server declares proper MCP capabilities."""
453 |         # Test that the server supports the expected transports
454 |         transports = ["stdio", "http", "sse"]
455 |         
456 |         for transport in transports:
457 |             assert transport in TransportType.values()
458 |     
459 |     @pytest.mark.asyncio
460 |     async def test_error_response_format(self):
461 |         """Test that error responses follow MCP format."""
462 |         # Test with invalid configuration to trigger errors
463 |         original_url = config.url
464 |         config.url = ""
465 |         
466 |         try:
467 |             result = await health_check_wrapper()
468 |             
469 |             # Error responses should be structured
470 |             assert isinstance(result, dict)
471 |             assert "status" in result
472 |             assert result["status"] in ["unhealthy", "degraded", "error"]
473 |             
474 |         finally:
475 |             config.url = original_url
476 | 
477 | 
478 | class TestMCPConcurrencyAndPerformance:
479 |     """Test MCP tools handle concurrency and perform well."""
480 |     
481 |     @patch('test_mcp_protocol_compliance.make_prometheus_request')
482 |     @pytest.mark.asyncio
483 |     async def test_concurrent_tool_execution(self, mock_request, mock_prometheus_response):
484 |         """Test tools can handle concurrent execution."""
485 |         def mock_side_effect(endpoint, params=None):
486 |             if endpoint == "targets":
487 |                 return {"activeTargets": [], "droppedTargets": []}
488 |             elif endpoint == "label/__name__/values":
489 |                 return ["up", "prometheus_build_info"]
490 |             else:
491 |                 return mock_prometheus_response["data"]
492 |         
493 |         mock_request.side_effect = mock_side_effect
494 |         
495 |         # Create multiple concurrent tasks
496 |         tasks = [
497 |             execute_query_wrapper("up"),
498 |             execute_query_wrapper("prometheus_build_info"),
499 |             list_metrics_wrapper(),
500 |             get_targets_wrapper()
501 |         ]
502 |         
503 |         # Execute concurrently
504 |         results = await asyncio.gather(*tasks)
505 |         
506 |         # All should complete successfully
507 |         assert len(results) == 4
508 |         for result in results:
509 |             assert result is not None
510 |     
511 |     @patch('test_mcp_protocol_compliance.make_prometheus_request')
512 |     @pytest.mark.asyncio
513 |     async def test_tool_timeout_handling(self, mock_request):
514 |         """Test tools handle timeouts gracefully."""
515 |         # Simulate slow response
516 |         def slow_response(*args, **kwargs):
517 |             import time
518 |             time.sleep(0.1)
519 |             return {"resultType": "vector", "result": []}
520 |         
521 |         mock_request.side_effect = slow_response
522 |         
523 |         # This should complete (not testing actual timeout, just that it's async)
524 |         result = await execute_query_wrapper("up")
525 |         assert result is not None
```

--------------------------------------------------------------------------------
/.github/workflows/bug-triage.yml:
--------------------------------------------------------------------------------

```yaml
  1 | name: Bug Triage Automation
  2 | 
  3 | on:
  4 |   issues:
  5 |     types: [opened, edited, labeled, unlabeled, assigned, unassigned]
  6 |   issue_comment:
  7 |     types: [created, edited]
  8 |   pull_request:
  9 |     types: [opened, closed, merged]
 10 |   schedule:
 11 |     # Run triage check every 6 hours
 12 |     - cron: '0 */6 * * *'
 13 |   workflow_dispatch:
 14 |     inputs:
 15 |       triage_all:
 16 |         description: 'Re-triage all open issues'
 17 |         required: false
 18 |         default: false
 19 |         type: boolean
 20 | 
 21 | jobs:
 22 |   auto-triage:
 23 |     runs-on: ubuntu-latest
 24 |     if: github.event_name == 'issues' || github.event_name == 'issue_comment'
 25 |     permissions:
 26 |       issues: write
 27 |       contents: read
 28 |       pull-requests: read
 29 | 
 30 |     steps:
 31 |       - name: Checkout repository
 32 |         uses: actions/checkout@v4
 33 | 
 34 |       - name: Auto-label new issues
 35 |         if: github.event.action == 'opened' && github.event_name == 'issues'
 36 |         uses: actions/github-script@v7
 37 |         with:
 38 |           script: |
 39 |             const issue = context.payload.issue;
 40 |             const title = issue.title.toLowerCase();
 41 |             const body = issue.body ? issue.body.toLowerCase() : '';
 42 |             const labels = [];
 43 | 
 44 |             // Severity-based labeling
 45 |             if (title.includes('critical') || title.includes('crash') || title.includes('data loss') || 
 46 |                 body.includes('critical') || body.includes('crash') || body.includes('data loss')) {
 47 |               labels.push('priority: critical');
 48 |             } else if (title.includes('urgent') || title.includes('blocking') || 
 49 |                       body.includes('urgent') || body.includes('blocking')) {
 50 |               labels.push('priority: high');
 51 |             } else if (title.includes('minor') || title.includes('cosmetic') ||
 52 |                       body.includes('minor') || body.includes('cosmetic')) {
 53 |               labels.push('priority: low');
 54 |             } else {
 55 |               labels.push('priority: medium');
 56 |             }
 57 | 
 58 |             // Component-based labeling
 59 |             if (title.includes('prometheus') || title.includes('metrics') || title.includes('query') ||
 60 |                 body.includes('prometheus') || body.includes('metrics') || body.includes('promql')) {
 61 |               labels.push('component: prometheus');
 62 |             }
 63 |             if (title.includes('mcp') || title.includes('server') || title.includes('transport') ||
 64 |                 body.includes('mcp') || body.includes('server') || body.includes('transport')) {
 65 |               labels.push('component: mcp-server');
 66 |             }
 67 |             if (title.includes('docker') || title.includes('container') || title.includes('deployment') ||
 68 |                 body.includes('docker') || body.includes('container') || body.includes('deployment')) {
 69 |               labels.push('component: deployment');
 70 |             }
 71 |             if (title.includes('auth') || title.includes('authentication') || title.includes('token') ||
 72 |                 body.includes('auth') || body.includes('authentication') || body.includes('token')) {
 73 |               labels.push('component: authentication');
 74 |             }
 75 | 
 76 |             // Type-based labeling
 77 |             if (title.includes('feature') || title.includes('enhancement') || title.includes('improvement') ||
 78 |                 body.includes('feature request') || body.includes('enhancement')) {
 79 |               labels.push('type: feature');
 80 |             } else if (title.includes('doc') || title.includes('documentation') ||
 81 |                       body.includes('documentation')) {
 82 |               labels.push('type: documentation');
 83 |             } else if (title.includes('test') || body.includes('test')) {
 84 |               labels.push('type: testing');
 85 |             } else if (title.includes('performance') || body.includes('performance') || 
 86 |                       title.includes('slow') || body.includes('slow')) {
 87 |               labels.push('type: performance');
 88 |             } else {
 89 |               labels.push('type: bug');
 90 |             }
 91 | 
 92 |             // Environment-based labeling
 93 |             if (body.includes('windows') || title.includes('windows')) {
 94 |               labels.push('env: windows');
 95 |             } else if (body.includes('macos') || body.includes('mac') || title.includes('macos')) {
 96 |               labels.push('env: macos');
 97 |             } else if (body.includes('linux') || title.includes('linux')) {
 98 |               labels.push('env: linux');
 99 |             }
100 | 
101 |             // Add status label
102 |             labels.push('status: needs-triage');
103 | 
104 |             if (labels.length > 0) {
105 |               await github.rest.issues.addLabels({
106 |                 owner: context.repo.owner,
107 |                 repo: context.repo.repo,
108 |                 issue_number: issue.number,
109 |                 labels: labels
110 |               });
111 |             }
112 | 
113 |       - name: Auto-assign based on component
114 |         if: github.event.action == 'labeled' && github.event_name == 'issues'
115 |         uses: actions/github-script@v7
116 |         with:
117 |           script: |
118 |             const issue = context.payload.issue;
119 |             const labelName = context.payload.label.name;
120 |             
121 |             // Define component maintainers
122 |             const componentAssignees = {
123 |               'component: prometheus': ['pab1it0'],
124 |               'component: mcp-server': ['pab1it0'],
125 |               'component: deployment': ['pab1it0'],
126 |               'component: authentication': ['pab1it0']
127 |             };
128 |             
129 |             if (componentAssignees[labelName] && issue.assignees.length === 0) {
130 |               await github.rest.issues.addAssignees({
131 |                 owner: context.repo.owner,
132 |                 repo: context.repo.repo,
133 |                 issue_number: issue.number,
134 |                 assignees: componentAssignees[labelName]
135 |               });
136 |             }
137 | 
138 |       - name: Update triage status
139 |         if: github.event.action == 'assigned' && github.event_name == 'issues'
140 |         uses: actions/github-script@v7
141 |         with:
142 |           script: |
143 |             const issue = context.payload.issue;
144 |             const hasTriageLabel = issue.labels.some(label => label.name === 'status: needs-triage');
145 |             
146 |             if (hasTriageLabel) {
147 |               await github.rest.issues.removeLabel({
148 |                 owner: context.repo.owner,
149 |                 repo: context.repo.repo,
150 |                 issue_number: issue.number,
151 |                 name: 'status: needs-triage'
152 |               });
153 |               
154 |               await github.rest.issues.addLabels({
155 |                 owner: context.repo.owner,
156 |                 repo: context.repo.repo,
157 |                 issue_number: issue.number,
158 |                 labels: ['status: in-progress']
159 |               });
160 |             }
161 | 
162 |       - name: Welcome new contributors
163 |         if: github.event.action == 'opened' && github.event_name == 'issues'
164 |         uses: actions/github-script@v7
165 |         with:
166 |           script: |
167 |             const issue = context.payload.issue;
168 |             const author = issue.user.login;
169 |             
170 |             // Check if this is the user's first issue
171 |             const issues = await github.rest.issues.listForRepo({
172 |               owner: context.repo.owner,
173 |               repo: context.repo.repo,
174 |               creator: author,
175 |               state: 'all'
176 |             });
177 |             
178 |             if (issues.data.length === 1) {
179 |               const welcomeMessage = `
180 |             👋 Welcome to the Prometheus MCP Server project, @${author}!
181 | 
182 |             Thank you for taking the time to report this issue. This project provides AI assistants with access to Prometheus metrics through the Model Context Protocol (MCP).
183 | 
184 |             To help us resolve your issue quickly:
185 |             - Please ensure you've filled out all relevant sections of the issue template
186 |             - Include your environment details (OS, Python version, Prometheus version)
187 |             - Provide steps to reproduce if applicable
188 |             - Check if this might be related to Prometheus configuration rather than the MCP server
189 | 
190 |             A maintainer will review and triage your issue soon. If you're interested in contributing a fix, please feel free to submit a pull request!
191 | 
192 |             **Useful resources:**
193 |             - [Configuration Guide](https://github.com/pab1it0/prometheus-mcp-server/blob/main/docs/configuration.md)
194 |             - [Installation Guide](https://github.com/pab1it0/prometheus-mcp-server/blob/main/docs/installation.md)
195 |             - [Contributing Guidelines](https://github.com/pab1it0/prometheus-mcp-server/blob/main/docs/contributing.md)
196 |             `;
197 |               
198 |               await github.rest.issues.createComment({
199 |                 owner: context.repo.owner,
200 |                 repo: context.repo.repo,
201 |                 issue_number: issue.number,
202 |                 body: welcomeMessage
203 |               });
204 |             }
205 | 
206 |   scheduled-triage:
207 |     runs-on: ubuntu-latest
208 |     if: github.event_name == 'schedule' || github.event.inputs.triage_all == 'true'
209 |     permissions:
210 |       issues: write
211 |       contents: read
212 | 
213 |     steps:
214 |       - name: Checkout repository
215 |         uses: actions/checkout@v4
216 | 
217 |       - name: Triage stale issues
218 |         uses: actions/github-script@v7
219 |         with:
220 |           script: |
221 |             const { data: issues } = await github.rest.issues.listForRepo({
222 |               owner: context.repo.owner,
223 |               repo: context.repo.repo,
224 |               state: 'open',
225 |               sort: 'updated',
226 |               direction: 'asc',
227 |               per_page: 100
228 |             });
229 | 
230 |             const now = new Date();
231 |             const sevenDaysAgo = new Date(now.getTime() - (7 * 24 * 60 * 60 * 1000));
232 |             const thirtyDaysAgo = new Date(now.getTime() - (30 * 24 * 60 * 60 * 1000));
233 | 
234 |             for (const issue of issues) {
235 |               if (issue.pull_request) continue; // Skip PRs
236 |               
237 |               const updatedAt = new Date(issue.updated_at);
238 |               const hasNeedsTriageLabel = issue.labels.some(label => label.name === 'status: needs-triage');
239 |               const hasStaleLabel = issue.labels.some(label => label.name === 'status: stale');
240 |               const hasWaitingLabel = issue.labels.some(label => label.name === 'status: waiting-for-response');
241 | 
242 |               // Mark issues as stale if no activity for 30 days
243 |               if (updatedAt < thirtyDaysAgo && !hasStaleLabel && !hasWaitingLabel) {
244 |                 await github.rest.issues.addLabels({
245 |                   owner: context.repo.owner,
246 |                   repo: context.repo.repo,
247 |                   issue_number: issue.number,
248 |                   labels: ['status: stale']
249 |                 });
250 | 
251 |                 await github.rest.issues.createComment({
252 |                   owner: context.repo.owner,
253 |                   repo: context.repo.repo,
254 |                   issue_number: issue.number,
255 |                   body: `This issue has been automatically marked as stale because it has not had recent activity. It will be closed in 7 days if no further activity occurs. Thank you for your contributions.`
256 |                 });
257 |               }
258 | 
259 |               // Auto-close issues that have been stale for 7 days
260 |               else if (updatedAt < thirtyDaysAgo && hasStaleLabel) {
261 |                 const comments = await github.rest.issues.listComments({
262 |                   owner: context.repo.owner,
263 |                   repo: context.repo.repo,
264 |                   issue_number: issue.number
265 |                 });
266 | 
267 |                 const staleComment = comments.data.find(comment => 
268 |                   comment.body.includes('automatically marked as stale')
269 |                 );
270 | 
271 |                 if (staleComment) {
272 |                   const staleCommentDate = new Date(staleComment.created_at);
273 |                   const sevenDaysAfterStale = new Date(staleCommentDate.getTime() + (7 * 24 * 60 * 60 * 1000));
274 | 
275 |                   if (now > sevenDaysAfterStale) {
276 |                     await github.rest.issues.update({
277 |                       owner: context.repo.owner,
278 |                       repo: context.repo.repo,
279 |                       issue_number: issue.number,
280 |                       state: 'closed'
281 |                     });
282 | 
283 |                     await github.rest.issues.createComment({
284 |                       owner: context.repo.owner,
285 |                       repo: context.repo.repo,
286 |                       issue_number: issue.number,
287 |                       body: `This issue has been automatically closed due to inactivity. If you believe this issue is still relevant, please reopen it with updated information.`
288 |                     });
289 |                   }
290 |                 }
291 |               }
292 | 
293 |               // Remove needs-triage if issue has been responded to by maintainer
294 |               else if (hasNeedsTriageLabel && updatedAt > sevenDaysAgo) {
295 |                 const comments = await github.rest.issues.listComments({
296 |                   owner: context.repo.owner,
297 |                   repo: context.repo.repo,
298 |                   issue_number: issue.number
299 |                 });
300 | 
301 |                 const maintainerResponse = comments.data.some(comment => 
302 |                   comment.user.login === 'pab1it0' && 
303 |                   new Date(comment.created_at) > sevenDaysAgo
304 |                 );
305 | 
306 |                 if (maintainerResponse) {
307 |                   await github.rest.issues.removeLabel({
308 |                     owner: context.repo.owner,
309 |                     repo: context.repo.repo,
310 |                     issue_number: issue.number,
311 |                     name: 'status: needs-triage'
312 |                   });
313 |                 }
314 |               }
315 |             }
316 | 
317 |   metrics-report:
318 |     runs-on: ubuntu-latest
319 |     if: github.event_name == 'schedule'
320 |     permissions:
321 |       issues: read
322 |       contents: read
323 | 
324 |     steps:
325 |       - name: Generate triage metrics
326 |         uses: actions/github-script@v7
327 |         with:
328 |           script: |
329 |             const { data: issues } = await github.rest.issues.listForRepo({
330 |               owner: context.repo.owner,
331 |               repo: context.repo.repo,
332 |               state: 'all',
333 |               per_page: 100
334 |             });
335 | 
336 |             const now = new Date();
337 |             const oneWeekAgo = new Date(now.getTime() - (7 * 24 * 60 * 60 * 1000));
338 |             const oneMonthAgo = new Date(now.getTime() - (30 * 24 * 60 * 60 * 1000));
339 | 
340 |             let metrics = {
341 |               total_open: 0,
342 |               needs_triage: 0,
343 |               in_progress: 0,
344 |               waiting_response: 0,
345 |               stale: 0,
346 |               new_this_week: 0,
347 |               closed_this_week: 0,
348 |               by_priority: { critical: 0, high: 0, medium: 0, low: 0 },
349 |               by_component: { prometheus: 0, 'mcp-server': 0, deployment: 0, authentication: 0 },
350 |               by_type: { bug: 0, feature: 0, documentation: 0, performance: 0 }
351 |             };
352 | 
353 |             for (const issue of issues) {
354 |               if (issue.pull_request) continue;
355 | 
356 |               const createdAt = new Date(issue.created_at);
357 |               const closedAt = issue.closed_at ? new Date(issue.closed_at) : null;
358 | 
359 |               if (issue.state === 'open') {
360 |                 metrics.total_open++;
361 | 
362 |                 // Count by status
363 |                 issue.labels.forEach(label => {
364 |                   if (label.name === 'status: needs-triage') metrics.needs_triage++;
365 |                   if (label.name === 'status: in-progress') metrics.in_progress++;
366 |                   if (label.name === 'status: waiting-for-response') metrics.waiting_response++;
367 |                   if (label.name === 'status: stale') metrics.stale++;
368 | 
369 |                   // Count by priority
370 |                   if (label.name.startsWith('priority: ')) {
371 |                     const priority = label.name.replace('priority: ', '');
372 |                     if (metrics.by_priority[priority] !== undefined) {
373 |                       metrics.by_priority[priority]++;
374 |                     }
375 |                   }
376 | 
377 |                   // Count by component
378 |                   if (label.name.startsWith('component: ')) {
379 |                     const component = label.name.replace('component: ', '');
380 |                     if (metrics.by_component[component] !== undefined) {
381 |                       metrics.by_component[component]++;
382 |                     }
383 |                   }
384 | 
385 |                   // Count by type
386 |                   if (label.name.startsWith('type: ')) {
387 |                     const type = label.name.replace('type: ', '');
388 |                     if (metrics.by_type[type] !== undefined) {
389 |                       metrics.by_type[type]++;
390 |                     }
391 |                   }
392 |                 });
393 |               }
394 | 
395 |               // Count new issues this week
396 |               if (createdAt > oneWeekAgo) {
397 |                 metrics.new_this_week++;
398 |               }
399 | 
400 |               // Count closed issues this week
401 |               if (closedAt && closedAt > oneWeekAgo) {
402 |                 metrics.closed_this_week++;
403 |               }
404 |             }
405 | 
406 |             // Log metrics (can be extended to send to external systems)
407 |             console.log('=== ISSUE TRIAGE METRICS ===');
408 |             console.log(`Total Open Issues: ${metrics.total_open}`);
409 |             console.log(`Needs Triage: ${metrics.needs_triage}`);
410 |             console.log(`In Progress: ${metrics.in_progress}`);
411 |             console.log(`Waiting for Response: ${metrics.waiting_response}`);
412 |             console.log(`Stale Issues: ${metrics.stale}`);
413 |             console.log(`New This Week: ${metrics.new_this_week}`);
414 |             console.log(`Closed This Week: ${metrics.closed_this_week}`);
415 |             console.log('Priority Distribution:', JSON.stringify(metrics.by_priority));
416 |             console.log('Component Distribution:', JSON.stringify(metrics.by_component));
417 |             console.log('Type Distribution:', JSON.stringify(metrics.by_type));
418 | 
419 |   pr-integration:
420 |     runs-on: ubuntu-latest
421 |     if: github.event_name == 'pull_request'
422 |     permissions:
423 |       issues: write
424 |       pull-requests: write
425 |       contents: read
426 | 
427 |     steps:
428 |       - name: Link PR to issues
429 |         if: github.event.action == 'opened'
430 |         uses: actions/github-script@v7
431 |         with:
432 |           script: |
433 |             const pr = context.payload.pull_request;
434 |             const body = pr.body || '';
435 |             
436 |             // Extract issue numbers from PR body
437 |             const issueMatches = body.match(/(close|closes|closed|fix|fixes|fixed|resolve|resolves|resolved)\s+#(\d+)/gi);
438 |             
439 |             if (issueMatches) {
440 |               for (const match of issueMatches) {
441 |                 const issueNumber = match.match(/#(\d+)/)[1];
442 |                 
443 |                 try {
444 |                   // Add a comment to the issue
445 |                   await github.rest.issues.createComment({
446 |                     owner: context.repo.owner,
447 |                     repo: context.repo.repo,
448 |                     issue_number: parseInt(issueNumber),
449 |                     body: `🔗 This issue is being addressed by PR #${pr.number}`
450 |                   });
451 |                   
452 |                   // Add in-review label to the issue
453 |                   await github.rest.issues.addLabels({
454 |                     owner: context.repo.owner,
455 |                     repo: context.repo.repo,
456 |                     issue_number: parseInt(issueNumber),
457 |                     labels: ['status: in-review']
458 |                   });
459 |                 } catch (error) {
460 |                   console.log(`Could not update issue #${issueNumber}: ${error.message}`);
461 |                 }
462 |               }
463 |             }
464 | 
465 |       - name: Update issue status on PR merge
466 |         if: github.event.action == 'closed' && github.event.pull_request.merged
467 |         uses: actions/github-script@v7
468 |         with:
469 |           script: |
470 |             const pr = context.payload.pull_request;
471 |             const body = pr.body || '';
472 |             
473 |             // Extract issue numbers from PR body
474 |             const issueMatches = body.match(/(close|closes|closed|fix|fixes|fixed|resolve|resolves|resolved)\s+#(\d+)/gi);
475 |             
476 |             if (issueMatches) {
477 |               for (const match of issueMatches) {
478 |                 const issueNumber = match.match(/#(\d+)/)[1];
479 |                 
480 |                 try {
481 |                   // Add a comment to the issue
482 |                   await github.rest.issues.createComment({
483 |                     owner: context.repo.owner,
484 |                     repo: context.repo.repo,
485 |                     issue_number: parseInt(issueNumber),
486 |                     body: `✅ This issue has been resolved by PR #${pr.number} which was merged in commit ${pr.merge_commit_sha}`
487 |                   });
488 |                   
489 |                   // Remove in-review label
490 |                   try {
491 |                     await github.rest.issues.removeLabel({
492 |                       owner: context.repo.owner,
493 |                       repo: context.repo.repo,
494 |                       issue_number: parseInt(issueNumber),
495 |                       name: 'status: in-review'
496 |                     });
497 |                   } catch (error) {
498 |                     // Label might not exist, ignore
499 |                   }
500 |                   
501 |                 } catch (error) {
502 |                   console.log(`Could not update issue #${issueNumber}: ${error.message}`);
503 |                 }
504 |               }
505 |             }
```
Page 2/2FirstPrevNextLast