#
tokens: 8612/50000 2/38 files (page 2/2)
lines: off (toggle) GitHub
raw markdown copy
This is page 2 of 2. Use http://codebase.md/pab1it0/prometheus-mcp-server?page={x} to view the full context.

# Directory Structure

```
├── .dockerignore
├── .env.template
├── .github
│   ├── ISSUE_TEMPLATE
│   │   ├── bug_report.yml
│   │   ├── config.yml
│   │   ├── feature_request.yml
│   │   └── question.yml
│   ├── TRIAGE_AUTOMATION.md
│   ├── VALIDATION_SUMMARY.md
│   └── workflows
│       ├── bug-triage.yml
│       ├── ci.yml
│       ├── claude-code-review.yml
│       ├── claude.yml
│       ├── issue-management.yml
│       ├── label-management.yml
│       ├── security.yml
│       └── triage-metrics.yml
├── .gitignore
├── Dockerfile
├── docs
│   ├── api_reference.md
│   ├── configuration.md
│   ├── contributing.md
│   ├── deploying_with_toolhive.md
│   ├── docker_deployment.md
│   ├── installation.md
│   └── usage.md
├── LICENSE
├── pyproject.toml
├── README.md
├── server.json
├── src
│   └── prometheus_mcp_server
│       ├── __init__.py
│       ├── logging_config.py
│       ├── main.py
│       └── server.py
├── tests
│   ├── test_docker_integration.py
│   ├── test_logging_config.py
│   ├── test_main.py
│   ├── test_mcp_protocol_compliance.py
│   ├── test_server.py
│   └── test_tools.py
└── uv.lock
```

# Files

--------------------------------------------------------------------------------
/tests/test_mcp_protocol_compliance.py:
--------------------------------------------------------------------------------

```python
"""Tests for MCP protocol compliance and tool functionality."""

import pytest
import json
import asyncio
from unittest.mock import patch, MagicMock, AsyncMock
from datetime import datetime
from prometheus_mcp_server import server
from prometheus_mcp_server.server import (
    make_prometheus_request, get_prometheus_auth, config, TransportType,
    execute_query, execute_range_query, list_metrics, get_metric_metadata, get_targets, health_check
)

# Test the MCP tools by testing them through async wrappers
async def execute_query_wrapper(query: str, time=None):
    """Wrapper to test execute_query functionality."""
    params = {"query": query}
    if time:
        params["time"] = time
    data = make_prometheus_request("query", params=params)
    return {"resultType": data["resultType"], "result": data["result"]}

async def execute_range_query_wrapper(query: str, start: str, end: str, step: str):
    """Wrapper to test execute_range_query functionality."""  
    params = {"query": query, "start": start, "end": end, "step": step}
    data = make_prometheus_request("query_range", params=params)
    return {"resultType": data["resultType"], "result": data["result"]}

async def list_metrics_wrapper():
    """Wrapper to test list_metrics functionality."""
    return make_prometheus_request("label/__name__/values")

async def get_metric_metadata_wrapper(metric: str):
    """Wrapper to test get_metric_metadata functionality."""
    params = {"metric": metric}
    data = make_prometheus_request("metadata", params=params)
    return data

async def get_targets_wrapper():
    """Wrapper to test get_targets functionality."""
    data = make_prometheus_request("targets")
    return {"activeTargets": data["activeTargets"], "droppedTargets": data["droppedTargets"]}

async def health_check_wrapper():
    """Wrapper to test health_check functionality."""
    try:
        health_status = {
            "status": "healthy",
            "service": "prometheus-mcp-server", 
            "version": "1.2.3",
            "timestamp": datetime.utcnow().isoformat(),
            "transport": config.mcp_server_config.mcp_server_transport if config.mcp_server_config else "stdio",
            "configuration": {
                "prometheus_url_configured": bool(config.url),
                "authentication_configured": bool(config.username or config.token),
                "org_id_configured": bool(config.org_id)
            }
        }
        
        if config.url:
            try:
                make_prometheus_request("query", params={"query": "up", "time": str(int(datetime.utcnow().timestamp()))})
                health_status["prometheus_connectivity"] = "healthy"
                health_status["prometheus_url"] = config.url
            except Exception as e:
                health_status["prometheus_connectivity"] = "unhealthy"
                health_status["prometheus_error"] = str(e)
                health_status["status"] = "degraded"
        else:
            health_status["status"] = "unhealthy"
            health_status["error"] = "PROMETHEUS_URL not configured"
        
        return health_status
    except Exception as e:
        return {
            "status": "unhealthy",
            "service": "prometheus-mcp-server",
            "error": str(e),
            "timestamp": datetime.utcnow().isoformat()
        }


@pytest.fixture
def mock_prometheus_response():
    """Mock successful Prometheus API response."""
    return {
        "status": "success",
        "data": {
            "resultType": "vector",
            "result": [
                {
                    "metric": {"__name__": "up", "instance": "localhost:9090"},
                    "value": [1609459200, "1"]
                }
            ]
        }
    }


@pytest.fixture
def mock_metrics_response():
    """Mock Prometheus metrics list response."""
    return {
        "status": "success", 
        "data": ["up", "prometheus_build_info", "prometheus_config_last_reload_successful"]
    }


@pytest.fixture
def mock_metadata_response():
    """Mock Prometheus metadata response."""
    return {
        "status": "success",
        "data": {
            "up": [
                {
                    "type": "gauge",
                    "help": "1 if the instance is healthy, 0 otherwise",
                    "unit": ""
                }
            ]
        }
    }


@pytest.fixture
def mock_targets_response():
    """Mock Prometheus targets response."""
    return {
        "status": "success",
        "data": {
            "activeTargets": [
                {
                    "discoveredLabels": {"__address__": "localhost:9090"},
                    "labels": {"instance": "localhost:9090", "job": "prometheus"},
                    "scrapePool": "prometheus",
                    "scrapeUrl": "http://localhost:9090/metrics",
                    "lastError": "",
                    "lastScrape": "2023-01-01T00:00:00Z",
                    "lastScrapeDuration": 0.001,
                    "health": "up"
                }
            ],
            "droppedTargets": []
        }
    }


class TestMCPToolCompliance:
    """Test MCP tool interface compliance."""
    
    @patch('test_mcp_protocol_compliance.make_prometheus_request')
    @pytest.mark.asyncio  
    async def test_execute_query_tool_signature(self, mock_request, mock_prometheus_response):
        """Test execute_query tool has correct MCP signature."""
        mock_request.return_value = mock_prometheus_response["data"]
        
        # Ensure config has a URL set for tests
        original_url = config.url
        if not config.url:
            config.url = "http://test-prometheus:9090"
            
        try:
            # Test required parameters
            result = await execute_query_wrapper("up")
            assert isinstance(result, dict)
            assert "resultType" in result
            assert "result" in result
            
            # Test optional parameters
            result = await execute_query_wrapper("up", time="2023-01-01T00:00:00Z")
            assert isinstance(result, dict)
        finally:
            config.url = original_url
    
    @patch('test_mcp_protocol_compliance.make_prometheus_request')
    @pytest.mark.asyncio
    async def test_execute_range_query_tool_signature(self, mock_request, mock_prometheus_response):
        """Test execute_range_query tool has correct MCP signature."""
        mock_request.return_value = mock_prometheus_response["data"]
        
        # Test all required parameters
        result = await execute_range_query_wrapper(
            query="up",
            start="2023-01-01T00:00:00Z", 
            end="2023-01-01T01:00:00Z",
            step="1m"
        )
        assert isinstance(result, dict)
        assert "resultType" in result
        assert "result" in result
    
    @patch('test_mcp_protocol_compliance.make_prometheus_request')
    @pytest.mark.asyncio
    async def test_list_metrics_tool_signature(self, mock_request, mock_metrics_response):
        """Test list_metrics tool has correct MCP signature."""
        mock_request.return_value = mock_metrics_response["data"]
        
        result = await list_metrics_wrapper()
        assert isinstance(result, list)
        assert all(isinstance(metric, str) for metric in result)
    
    @patch('test_mcp_protocol_compliance.make_prometheus_request')
    @pytest.mark.asyncio
    async def test_get_metric_metadata_tool_signature(self, mock_request, mock_metadata_response):
        """Test get_metric_metadata tool has correct MCP signature."""
        mock_request.return_value = mock_metadata_response["data"]
        
        result = await get_metric_metadata_wrapper("up")
        assert isinstance(result, dict)
        # Check that the result contains metric names as keys and metadata lists as values
        for metric_name, metadata_list in result.items():
            assert isinstance(metric_name, str)
            assert isinstance(metadata_list, list)
            assert all(isinstance(metadata, dict) for metadata in metadata_list)
    
    @patch('test_mcp_protocol_compliance.make_prometheus_request')
    @pytest.mark.asyncio
    async def test_get_targets_tool_signature(self, mock_request, mock_targets_response):
        """Test get_targets tool has correct MCP signature."""
        mock_request.return_value = mock_targets_response["data"]
        
        result = await get_targets_wrapper()
        assert isinstance(result, dict)
        assert "activeTargets" in result
        assert "droppedTargets" in result
        assert isinstance(result["activeTargets"], list)
        assert isinstance(result["droppedTargets"], list)
    
    @patch('test_mcp_protocol_compliance.make_prometheus_request')
    @pytest.mark.asyncio
    async def test_health_check_tool_signature(self, mock_request):
        """Test health_check tool has correct MCP signature."""
        # Mock successful Prometheus connectivity
        mock_request.return_value = {"resultType": "vector", "result": []}
        
        result = await health_check_wrapper()
        assert isinstance(result, dict)
        assert "status" in result
        assert "service" in result
        assert "timestamp" in result
        assert result["service"] == "prometheus-mcp-server"


class TestMCPToolErrorHandling:
    """Test MCP tool error handling compliance."""
    
    @patch('test_mcp_protocol_compliance.make_prometheus_request')
    @pytest.mark.asyncio
    async def test_execute_query_handles_prometheus_errors(self, mock_request):
        """Test execute_query handles Prometheus API errors gracefully."""
        mock_request.side_effect = ValueError("Prometheus API error: query timeout")
        
        with pytest.raises(ValueError):
            await execute_query_wrapper("invalid_query{")
    
    @patch('test_mcp_protocol_compliance.make_prometheus_request')
    @pytest.mark.asyncio
    async def test_execute_range_query_handles_network_errors(self, mock_request):
        """Test execute_range_query handles network errors gracefully."""
        import requests
        mock_request.side_effect = requests.exceptions.ConnectionError("Connection refused")
        
        with pytest.raises(requests.exceptions.ConnectionError):
            await execute_range_query_wrapper("up", "now-1h", "now", "1m")
    
    @patch('test_mcp_protocol_compliance.make_prometheus_request')
    @pytest.mark.asyncio
    async def test_health_check_handles_configuration_errors(self, mock_request):
        """Test health_check handles configuration errors gracefully."""
        # Test with missing Prometheus URL
        original_url = config.url
        config.url = ""
        
        try:
            result = await health_check_wrapper()
            assert result["status"] == "unhealthy" 
            assert "error" in result or "PROMETHEUS_URL" in str(result)
        finally:
            config.url = original_url
    
    @patch('test_mcp_protocol_compliance.make_prometheus_request')
    @pytest.mark.asyncio
    async def test_health_check_handles_connectivity_errors(self, mock_request):
        """Test health_check handles Prometheus connectivity errors."""
        mock_request.side_effect = Exception("Connection timeout")
        
        result = await health_check_wrapper()
        assert result["status"] in ["unhealthy", "degraded"]
        assert "prometheus_connectivity" in result or "error" in result


class TestMCPDataFormats:
    """Test MCP tool data format compliance."""
    
    @patch('test_mcp_protocol_compliance.make_prometheus_request')
    @pytest.mark.asyncio
    async def test_execute_query_returns_valid_json(self, mock_request, mock_prometheus_response):
        """Test execute_query returns JSON-serializable data."""
        mock_request.return_value = mock_prometheus_response["data"]
        
        result = await execute_query_wrapper("up")
        
        # Verify JSON serializability
        json_str = json.dumps(result)
        assert json_str is not None
        
        # Verify structure
        parsed = json.loads(json_str)
        assert "resultType" in parsed
        assert "result" in parsed
    
    @patch('test_mcp_protocol_compliance.make_prometheus_request')
    @pytest.mark.asyncio
    async def test_all_tools_return_json_serializable_data(self, mock_request):
        """Test all MCP tools return JSON-serializable data."""
        # Setup various mock responses
        mock_request.side_effect = [
            {"resultType": "vector", "result": []},  # execute_query
            {"resultType": "matrix", "result": []},  # execute_range_query
            ["metric1", "metric2"],  # list_metrics
            {"metric1": [{"type": "gauge", "help": "test"}]},  # get_metric_metadata
            {"activeTargets": [], "droppedTargets": []},  # get_targets
        ]
        
        # Test all tools
        tools_and_calls = [
            (execute_query_wrapper, ("up",)),
            (execute_range_query_wrapper, ("up", "now-1h", "now", "1m")),
            (list_metrics_wrapper, ()),
            (get_metric_metadata_wrapper, ("metric1",)),
            (get_targets_wrapper, ()),
        ]
        
        for tool, args in tools_and_calls:
            result = await tool(*args)
            
            # Verify JSON serializability
            try:
                json_str = json.dumps(result)
                assert json_str is not None
            except (TypeError, ValueError) as e:
                pytest.fail(f"Tool {tool.__name__} returned non-JSON-serializable data: {e}")


class TestMCPServerConfiguration:
    """Test MCP server configuration compliance."""
    
    def test_transport_type_validation(self):
        """Test transport type validation works correctly."""
        # Valid transport types
        valid_transports = ["stdio", "http", "sse"]
        for transport in valid_transports:
            assert transport in TransportType.values()
        
        # Invalid transport types should not be in values
        invalid_transports = ["tcp", "websocket", "grpc"]
        for transport in invalid_transports:
            assert transport not in TransportType.values()
    
    def test_server_config_validation(self):
        """Test server configuration validation."""
        from prometheus_mcp_server.server import MCPServerConfig, PrometheusConfig
        
        # Valid configuration
        mcp_config = MCPServerConfig(
            mcp_server_transport="http",
            mcp_bind_host="127.0.0.1", 
            mcp_bind_port=8080
        )
        assert mcp_config.mcp_server_transport == "http"
        
        # Test Prometheus config
        prometheus_config = PrometheusConfig(
            url="http://prometheus:9090",
            mcp_server_config=mcp_config
        )
        assert prometheus_config.url == "http://prometheus:9090"
    
    def test_authentication_configuration(self):
        """Test authentication configuration options."""
        from prometheus_mcp_server.server import get_prometheus_auth
        
        # Test with no authentication
        original_config = {
            'username': config.username,
            'password': config.password, 
            'token': config.token
        }
        
        try:
            config.username = ""
            config.password = ""
            config.token = ""
            
            auth = get_prometheus_auth()
            assert auth is None
            
            # Test with basic auth
            config.username = "testuser"
            config.password = "testpass"
            config.token = ""
            
            auth = get_prometheus_auth()
            assert auth is not None
            
            # Test with token auth (should take precedence)
            config.token = "test-token"
            
            auth = get_prometheus_auth()
            assert auth is not None
            assert "Authorization" in auth
            assert "Bearer" in auth["Authorization"]
            
        finally:
            # Restore original config
            config.username = original_config['username']
            config.password = original_config['password']
            config.token = original_config['token']


class TestMCPProtocolVersioning:
    """Test MCP protocol versioning and capabilities."""
    
    def test_mcp_server_info(self):
        """Test MCP server provides correct server information."""
        # Test FastMCP server instantiation
        from prometheus_mcp_server.server import mcp
        
        assert mcp is not None
        # FastMCP should have a name
        assert hasattr(mcp, 'name') or hasattr(mcp, '_name')
    
    @patch('test_mcp_protocol_compliance.make_prometheus_request')
    @pytest.mark.asyncio
    async def test_tool_descriptions_are_present(self, mock_request):
        """Test that all MCP tools have proper descriptions."""
        # All tools should be registered with descriptions
        tools = [
            execute_query,
            execute_range_query,
            list_metrics,
            get_metric_metadata,
            get_targets,
            health_check
        ]
        
        for tool in tools:
            # Each tool should have a description (FastMCP tools have description attribute)
            assert hasattr(tool, 'description')
            assert tool.description is not None and tool.description.strip() != ""
    
    def test_server_capabilities(self):
        """Test server declares proper MCP capabilities."""
        # Test that the server supports the expected transports
        transports = ["stdio", "http", "sse"]
        
        for transport in transports:
            assert transport in TransportType.values()
    
    @pytest.mark.asyncio
    async def test_error_response_format(self):
        """Test that error responses follow MCP format."""
        # Test with invalid configuration to trigger errors
        original_url = config.url
        config.url = ""
        
        try:
            result = await health_check_wrapper()
            
            # Error responses should be structured
            assert isinstance(result, dict)
            assert "status" in result
            assert result["status"] in ["unhealthy", "degraded", "error"]
            
        finally:
            config.url = original_url


class TestMCPConcurrencyAndPerformance:
    """Test MCP tools handle concurrency and perform well."""
    
    @patch('test_mcp_protocol_compliance.make_prometheus_request')
    @pytest.mark.asyncio
    async def test_concurrent_tool_execution(self, mock_request, mock_prometheus_response):
        """Test tools can handle concurrent execution."""
        def mock_side_effect(endpoint, params=None):
            if endpoint == "targets":
                return {"activeTargets": [], "droppedTargets": []}
            elif endpoint == "label/__name__/values":
                return ["up", "prometheus_build_info"]
            else:
                return mock_prometheus_response["data"]
        
        mock_request.side_effect = mock_side_effect
        
        # Create multiple concurrent tasks
        tasks = [
            execute_query_wrapper("up"),
            execute_query_wrapper("prometheus_build_info"),
            list_metrics_wrapper(),
            get_targets_wrapper()
        ]
        
        # Execute concurrently
        results = await asyncio.gather(*tasks)
        
        # All should complete successfully
        assert len(results) == 4
        for result in results:
            assert result is not None
    
    @patch('test_mcp_protocol_compliance.make_prometheus_request')
    @pytest.mark.asyncio
    async def test_tool_timeout_handling(self, mock_request):
        """Test tools handle timeouts gracefully."""
        # Simulate slow response
        def slow_response(*args, **kwargs):
            import time
            time.sleep(0.1)
            return {"resultType": "vector", "result": []}
        
        mock_request.side_effect = slow_response
        
        # This should complete (not testing actual timeout, just that it's async)
        result = await execute_query_wrapper("up")
        assert result is not None
```

--------------------------------------------------------------------------------
/.github/workflows/bug-triage.yml:
--------------------------------------------------------------------------------

```yaml
name: Bug Triage Automation

on:
  issues:
    types: [opened, edited, labeled, unlabeled, assigned, unassigned]
  issue_comment:
    types: [created, edited]
  pull_request:
    types: [opened, closed, merged]
  schedule:
    # Run triage check every 6 hours
    - cron: '0 */6 * * *'
  workflow_dispatch:
    inputs:
      triage_all:
        description: 'Re-triage all open issues'
        required: false
        default: false
        type: boolean

jobs:
  auto-triage:
    runs-on: ubuntu-latest
    if: github.event_name == 'issues' || github.event_name == 'issue_comment'
    permissions:
      issues: write
      contents: read
      pull-requests: read

    steps:
      - name: Checkout repository
        uses: actions/checkout@v4

      - name: Auto-label new issues
        if: github.event.action == 'opened' && github.event_name == 'issues'
        uses: actions/github-script@v7
        with:
          script: |
            const issue = context.payload.issue;
            const title = issue.title.toLowerCase();
            const body = issue.body ? issue.body.toLowerCase() : '';
            const labels = [];

            // Severity-based labeling
            if (title.includes('critical') || title.includes('crash') || title.includes('data loss') || 
                body.includes('critical') || body.includes('crash') || body.includes('data loss')) {
              labels.push('priority: critical');
            } else if (title.includes('urgent') || title.includes('blocking') || 
                      body.includes('urgent') || body.includes('blocking')) {
              labels.push('priority: high');
            } else if (title.includes('minor') || title.includes('cosmetic') ||
                      body.includes('minor') || body.includes('cosmetic')) {
              labels.push('priority: low');
            } else {
              labels.push('priority: medium');
            }

            // Component-based labeling
            if (title.includes('prometheus') || title.includes('metrics') || title.includes('query') ||
                body.includes('prometheus') || body.includes('metrics') || body.includes('promql')) {
              labels.push('component: prometheus');
            }
            if (title.includes('mcp') || title.includes('server') || title.includes('transport') ||
                body.includes('mcp') || body.includes('server') || body.includes('transport')) {
              labels.push('component: mcp-server');
            }
            if (title.includes('docker') || title.includes('container') || title.includes('deployment') ||
                body.includes('docker') || body.includes('container') || body.includes('deployment')) {
              labels.push('component: deployment');
            }
            if (title.includes('auth') || title.includes('authentication') || title.includes('token') ||
                body.includes('auth') || body.includes('authentication') || body.includes('token')) {
              labels.push('component: authentication');
            }

            // Type-based labeling
            if (title.includes('feature') || title.includes('enhancement') || title.includes('improvement') ||
                body.includes('feature request') || body.includes('enhancement')) {
              labels.push('type: feature');
            } else if (title.includes('doc') || title.includes('documentation') ||
                      body.includes('documentation')) {
              labels.push('type: documentation');
            } else if (title.includes('test') || body.includes('test')) {
              labels.push('type: testing');
            } else if (title.includes('performance') || body.includes('performance') || 
                      title.includes('slow') || body.includes('slow')) {
              labels.push('type: performance');
            } else {
              labels.push('type: bug');
            }

            // Environment-based labeling
            if (body.includes('windows') || title.includes('windows')) {
              labels.push('env: windows');
            } else if (body.includes('macos') || body.includes('mac') || title.includes('macos')) {
              labels.push('env: macos');
            } else if (body.includes('linux') || title.includes('linux')) {
              labels.push('env: linux');
            }

            // Add status label
            labels.push('status: needs-triage');

            if (labels.length > 0) {
              await github.rest.issues.addLabels({
                owner: context.repo.owner,
                repo: context.repo.repo,
                issue_number: issue.number,
                labels: labels
              });
            }

      - name: Auto-assign based on component
        if: github.event.action == 'labeled' && github.event_name == 'issues'
        uses: actions/github-script@v7
        with:
          script: |
            const issue = context.payload.issue;
            const labelName = context.payload.label.name;
            
            // Define component maintainers
            const componentAssignees = {
              'component: prometheus': ['pab1it0'],
              'component: mcp-server': ['pab1it0'],
              'component: deployment': ['pab1it0'],
              'component: authentication': ['pab1it0']
            };
            
            if (componentAssignees[labelName] && issue.assignees.length === 0) {
              await github.rest.issues.addAssignees({
                owner: context.repo.owner,
                repo: context.repo.repo,
                issue_number: issue.number,
                assignees: componentAssignees[labelName]
              });
            }

      - name: Update triage status
        if: github.event.action == 'assigned' && github.event_name == 'issues'
        uses: actions/github-script@v7
        with:
          script: |
            const issue = context.payload.issue;
            const hasTriageLabel = issue.labels.some(label => label.name === 'status: needs-triage');
            
            if (hasTriageLabel) {
              await github.rest.issues.removeLabel({
                owner: context.repo.owner,
                repo: context.repo.repo,
                issue_number: issue.number,
                name: 'status: needs-triage'
              });
              
              await github.rest.issues.addLabels({
                owner: context.repo.owner,
                repo: context.repo.repo,
                issue_number: issue.number,
                labels: ['status: in-progress']
              });
            }

      - name: Welcome new contributors
        if: github.event.action == 'opened' && github.event_name == 'issues'
        uses: actions/github-script@v7
        with:
          script: |
            const issue = context.payload.issue;
            const author = issue.user.login;
            
            // Check if this is the user's first issue
            const issues = await github.rest.issues.listForRepo({
              owner: context.repo.owner,
              repo: context.repo.repo,
              creator: author,
              state: 'all'
            });
            
            if (issues.data.length === 1) {
              const welcomeMessage = `
            👋 Welcome to the Prometheus MCP Server project, @${author}!

            Thank you for taking the time to report this issue. This project provides AI assistants with access to Prometheus metrics through the Model Context Protocol (MCP).

            To help us resolve your issue quickly:
            - Please ensure you've filled out all relevant sections of the issue template
            - Include your environment details (OS, Python version, Prometheus version)
            - Provide steps to reproduce if applicable
            - Check if this might be related to Prometheus configuration rather than the MCP server

            A maintainer will review and triage your issue soon. If you're interested in contributing a fix, please feel free to submit a pull request!

            **Useful resources:**
            - [Configuration Guide](https://github.com/pab1it0/prometheus-mcp-server/blob/main/docs/configuration.md)
            - [Installation Guide](https://github.com/pab1it0/prometheus-mcp-server/blob/main/docs/installation.md)
            - [Contributing Guidelines](https://github.com/pab1it0/prometheus-mcp-server/blob/main/docs/contributing.md)
            `;
              
              await github.rest.issues.createComment({
                owner: context.repo.owner,
                repo: context.repo.repo,
                issue_number: issue.number,
                body: welcomeMessage
              });
            }

  scheduled-triage:
    runs-on: ubuntu-latest
    if: github.event_name == 'schedule' || github.event.inputs.triage_all == 'true'
    permissions:
      issues: write
      contents: read

    steps:
      - name: Checkout repository
        uses: actions/checkout@v4

      - name: Triage stale issues
        uses: actions/github-script@v7
        with:
          script: |
            const { data: issues } = await github.rest.issues.listForRepo({
              owner: context.repo.owner,
              repo: context.repo.repo,
              state: 'open',
              sort: 'updated',
              direction: 'asc',
              per_page: 100
            });

            const now = new Date();
            const sevenDaysAgo = new Date(now.getTime() - (7 * 24 * 60 * 60 * 1000));
            const thirtyDaysAgo = new Date(now.getTime() - (30 * 24 * 60 * 60 * 1000));

            for (const issue of issues) {
              if (issue.pull_request) continue; // Skip PRs
              
              const updatedAt = new Date(issue.updated_at);
              const hasNeedsTriageLabel = issue.labels.some(label => label.name === 'status: needs-triage');
              const hasStaleLabel = issue.labels.some(label => label.name === 'status: stale');
              const hasWaitingLabel = issue.labels.some(label => label.name === 'status: waiting-for-response');

              // Mark issues as stale if no activity for 30 days
              if (updatedAt < thirtyDaysAgo && !hasStaleLabel && !hasWaitingLabel) {
                await github.rest.issues.addLabels({
                  owner: context.repo.owner,
                  repo: context.repo.repo,
                  issue_number: issue.number,
                  labels: ['status: stale']
                });

                await github.rest.issues.createComment({
                  owner: context.repo.owner,
                  repo: context.repo.repo,
                  issue_number: issue.number,
                  body: `This issue has been automatically marked as stale because it has not had recent activity. It will be closed in 7 days if no further activity occurs. Thank you for your contributions.`
                });
              }

              // Auto-close issues that have been stale for 7 days
              else if (updatedAt < thirtyDaysAgo && hasStaleLabel) {
                const comments = await github.rest.issues.listComments({
                  owner: context.repo.owner,
                  repo: context.repo.repo,
                  issue_number: issue.number
                });

                const staleComment = comments.data.find(comment => 
                  comment.body.includes('automatically marked as stale')
                );

                if (staleComment) {
                  const staleCommentDate = new Date(staleComment.created_at);
                  const sevenDaysAfterStale = new Date(staleCommentDate.getTime() + (7 * 24 * 60 * 60 * 1000));

                  if (now > sevenDaysAfterStale) {
                    await github.rest.issues.update({
                      owner: context.repo.owner,
                      repo: context.repo.repo,
                      issue_number: issue.number,
                      state: 'closed'
                    });

                    await github.rest.issues.createComment({
                      owner: context.repo.owner,
                      repo: context.repo.repo,
                      issue_number: issue.number,
                      body: `This issue has been automatically closed due to inactivity. If you believe this issue is still relevant, please reopen it with updated information.`
                    });
                  }
                }
              }

              // Remove needs-triage if issue has been responded to by maintainer
              else if (hasNeedsTriageLabel && updatedAt > sevenDaysAgo) {
                const comments = await github.rest.issues.listComments({
                  owner: context.repo.owner,
                  repo: context.repo.repo,
                  issue_number: issue.number
                });

                const maintainerResponse = comments.data.some(comment => 
                  comment.user.login === 'pab1it0' && 
                  new Date(comment.created_at) > sevenDaysAgo
                );

                if (maintainerResponse) {
                  await github.rest.issues.removeLabel({
                    owner: context.repo.owner,
                    repo: context.repo.repo,
                    issue_number: issue.number,
                    name: 'status: needs-triage'
                  });
                }
              }
            }

  metrics-report:
    runs-on: ubuntu-latest
    if: github.event_name == 'schedule'
    permissions:
      issues: read
      contents: read

    steps:
      - name: Generate triage metrics
        uses: actions/github-script@v7
        with:
          script: |
            const { data: issues } = await github.rest.issues.listForRepo({
              owner: context.repo.owner,
              repo: context.repo.repo,
              state: 'all',
              per_page: 100
            });

            const now = new Date();
            const oneWeekAgo = new Date(now.getTime() - (7 * 24 * 60 * 60 * 1000));
            const oneMonthAgo = new Date(now.getTime() - (30 * 24 * 60 * 60 * 1000));

            let metrics = {
              total_open: 0,
              needs_triage: 0,
              in_progress: 0,
              waiting_response: 0,
              stale: 0,
              new_this_week: 0,
              closed_this_week: 0,
              by_priority: { critical: 0, high: 0, medium: 0, low: 0 },
              by_component: { prometheus: 0, 'mcp-server': 0, deployment: 0, authentication: 0 },
              by_type: { bug: 0, feature: 0, documentation: 0, performance: 0 }
            };

            for (const issue of issues) {
              if (issue.pull_request) continue;

              const createdAt = new Date(issue.created_at);
              const closedAt = issue.closed_at ? new Date(issue.closed_at) : null;

              if (issue.state === 'open') {
                metrics.total_open++;

                // Count by status
                issue.labels.forEach(label => {
                  if (label.name === 'status: needs-triage') metrics.needs_triage++;
                  if (label.name === 'status: in-progress') metrics.in_progress++;
                  if (label.name === 'status: waiting-for-response') metrics.waiting_response++;
                  if (label.name === 'status: stale') metrics.stale++;

                  // Count by priority
                  if (label.name.startsWith('priority: ')) {
                    const priority = label.name.replace('priority: ', '');
                    if (metrics.by_priority[priority] !== undefined) {
                      metrics.by_priority[priority]++;
                    }
                  }

                  // Count by component
                  if (label.name.startsWith('component: ')) {
                    const component = label.name.replace('component: ', '');
                    if (metrics.by_component[component] !== undefined) {
                      metrics.by_component[component]++;
                    }
                  }

                  // Count by type
                  if (label.name.startsWith('type: ')) {
                    const type = label.name.replace('type: ', '');
                    if (metrics.by_type[type] !== undefined) {
                      metrics.by_type[type]++;
                    }
                  }
                });
              }

              // Count new issues this week
              if (createdAt > oneWeekAgo) {
                metrics.new_this_week++;
              }

              // Count closed issues this week
              if (closedAt && closedAt > oneWeekAgo) {
                metrics.closed_this_week++;
              }
            }

            // Log metrics (can be extended to send to external systems)
            console.log('=== ISSUE TRIAGE METRICS ===');
            console.log(`Total Open Issues: ${metrics.total_open}`);
            console.log(`Needs Triage: ${metrics.needs_triage}`);
            console.log(`In Progress: ${metrics.in_progress}`);
            console.log(`Waiting for Response: ${metrics.waiting_response}`);
            console.log(`Stale Issues: ${metrics.stale}`);
            console.log(`New This Week: ${metrics.new_this_week}`);
            console.log(`Closed This Week: ${metrics.closed_this_week}`);
            console.log('Priority Distribution:', JSON.stringify(metrics.by_priority));
            console.log('Component Distribution:', JSON.stringify(metrics.by_component));
            console.log('Type Distribution:', JSON.stringify(metrics.by_type));

  pr-integration:
    runs-on: ubuntu-latest
    if: github.event_name == 'pull_request'
    permissions:
      issues: write
      pull-requests: write
      contents: read

    steps:
      - name: Link PR to issues
        if: github.event.action == 'opened'
        uses: actions/github-script@v7
        with:
          script: |
            const pr = context.payload.pull_request;
            const body = pr.body || '';
            
            // Extract issue numbers from PR body
            const issueMatches = body.match(/(close|closes|closed|fix|fixes|fixed|resolve|resolves|resolved)\s+#(\d+)/gi);
            
            if (issueMatches) {
              for (const match of issueMatches) {
                const issueNumber = match.match(/#(\d+)/)[1];
                
                try {
                  // Add a comment to the issue
                  await github.rest.issues.createComment({
                    owner: context.repo.owner,
                    repo: context.repo.repo,
                    issue_number: parseInt(issueNumber),
                    body: `🔗 This issue is being addressed by PR #${pr.number}`
                  });
                  
                  // Add in-review label to the issue
                  await github.rest.issues.addLabels({
                    owner: context.repo.owner,
                    repo: context.repo.repo,
                    issue_number: parseInt(issueNumber),
                    labels: ['status: in-review']
                  });
                } catch (error) {
                  console.log(`Could not update issue #${issueNumber}: ${error.message}`);
                }
              }
            }

      - name: Update issue status on PR merge
        if: github.event.action == 'closed' && github.event.pull_request.merged
        uses: actions/github-script@v7
        with:
          script: |
            const pr = context.payload.pull_request;
            const body = pr.body || '';
            
            // Extract issue numbers from PR body
            const issueMatches = body.match(/(close|closes|closed|fix|fixes|fixed|resolve|resolves|resolved)\s+#(\d+)/gi);
            
            if (issueMatches) {
              for (const match of issueMatches) {
                const issueNumber = match.match(/#(\d+)/)[1];
                
                try {
                  // Add a comment to the issue
                  await github.rest.issues.createComment({
                    owner: context.repo.owner,
                    repo: context.repo.repo,
                    issue_number: parseInt(issueNumber),
                    body: `✅ This issue has been resolved by PR #${pr.number} which was merged in commit ${pr.merge_commit_sha}`
                  });
                  
                  // Remove in-review label
                  try {
                    await github.rest.issues.removeLabel({
                      owner: context.repo.owner,
                      repo: context.repo.repo,
                      issue_number: parseInt(issueNumber),
                      name: 'status: in-review'
                    });
                  } catch (error) {
                    // Label might not exist, ignore
                  }
                  
                } catch (error) {
                  console.log(`Could not update issue #${issueNumber}: ${error.message}`);
                }
              }
            }
```
Page 2/2FirstPrevNextLast