#
tokens: 44739/50000 40/42 files (page 1/2)
lines: on (toggle) GitHub
raw markdown copy reset
This is page 1 of 2. Use http://codebase.md/qainsights/jmeter-mcp-server?lines=true&page={x} to view the full context.

# Directory Structure

```
├── .dockerignore
├── .env.example
├── .gitignore
├── .python-version
├── analyzer
│   ├── __init__.py
│   ├── analyzer.py
│   ├── bottleneck
│   │   ├── __init__.py
│   │   └── analyzer.py
│   ├── insights
│   │   ├── __init__.py
│   │   └── generator.py
│   ├── mcp
│   │   └── __init__.py
│   ├── metrics
│   │   ├── __init__.py
│   │   └── calculator.py
│   ├── models.py
│   ├── parser
│   │   ├── __init__.py
│   │   ├── base.py
│   │   ├── csv_parser.py
│   │   └── xml_parser.py
│   └── visualization
│       ├── __init__.py
│       └── engine.py
├── Dockerfile
├── images
│   ├── Anthropic-MCP.png
│   ├── Cursor.png
│   └── Windsurf.png
├── jmeter_report.html
├── jmeter_server.py
├── main.py
├── mcp_config.json
├── pyproject.toml
├── README.md
├── requirements_windsurf_reader.txt
├── requirements.txt
├── sample_test.jmx
├── smithery.yaml
├── tests
│   ├── __init__.py
│   ├── test_analyzer_models.py
│   ├── test_analyzer_parser.py
│   ├── test_bottleneck_analyzer.py
│   ├── test_csv_parser.py
│   ├── test_insights_generator.py
│   ├── test_jmeter_server.py
│   ├── test_metrics_calculator.py
│   ├── test_visualization_engine.py
│   └── test_xml_parser.py
├── windsurf_db_reader_alternative.py
└── windsurf_db_reader.py
```

# Files

--------------------------------------------------------------------------------
/.python-version:
--------------------------------------------------------------------------------

```
1 | 3.13
2 | 
```

--------------------------------------------------------------------------------
/.dockerignore:
--------------------------------------------------------------------------------

```
 1 | .git
 2 | .gitignore
 3 | __pycache__
 4 | *.pyc
 5 | *.pyo
 6 | *.pyd
 7 | .Python
 8 | env/
 9 | venv/
10 | .env
11 | *.log
12 | .DS_Store
13 | Dockerfile
14 | .dockerignore
15 | README.md 
```

--------------------------------------------------------------------------------
/.env.example:
--------------------------------------------------------------------------------

```
1 | # JMeter Configuration
2 | JMETER_HOME=/path/to/apache-jmeter-5.6.3
3 | JMETER_BIN=${JMETER_HOME}/bin/jmeter
4 | 
5 | # Optional: JMeter Java options
6 | JMETER_JAVA_OPTS="-Xms1g -Xmx2g"
7 | 
```

--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------

```
  1 | # Byte-compiled / optimized / DLL files
  2 | __pycache__/
  3 | *.py[cod]
  4 | *$py.class
  5 | 
  6 | # C extensions
  7 | *.so
  8 | 
  9 | # Distribution / packaging
 10 | .Python
 11 | build/
 12 | develop-eggs/
 13 | dist/
 14 | downloads/
 15 | eggs/
 16 | .eggs/
 17 | lib/
 18 | lib64/
 19 | parts/
 20 | sdist/
 21 | var/
 22 | wheels/
 23 | share/python-wheels/
 24 | *.egg-info/
 25 | .installed.cfg
 26 | *.egg
 27 | MANIFEST
 28 | 
 29 | # PyInstaller
 30 | #  Usually these files are written by a python script from a template
 31 | #  before PyInstaller builds the exe, so as to inject date/other infos into it.
 32 | *.manifest
 33 | *.spec
 34 | 
 35 | # Installer logs
 36 | pip-log.txt
 37 | pip-delete-this-directory.txt
 38 | 
 39 | # Unit test / coverage reports
 40 | htmlcov/
 41 | .tox/
 42 | .nox/
 43 | .coverage
 44 | .coverage.*
 45 | .cache
 46 | nosetests.xml
 47 | coverage.xml
 48 | *.cover
 49 | *.py,cover
 50 | .hypothesis/
 51 | .pytest_cache/
 52 | cover/
 53 | 
 54 | # Translations
 55 | *.mo
 56 | *.pot
 57 | 
 58 | # Django stuff:
 59 | *.log
 60 | local_settings.py
 61 | db.sqlite3
 62 | db.sqlite3-journal
 63 | 
 64 | # Flask stuff:
 65 | instance/
 66 | .webassets-cache
 67 | 
 68 | # Scrapy stuff:
 69 | .scrapy
 70 | 
 71 | # Sphinx documentation
 72 | docs/_build/
 73 | 
 74 | # PyBuilder
 75 | .pybuilder/
 76 | target/
 77 | 
 78 | # Jupyter Notebook
 79 | .ipynb_checkpoints
 80 | 
 81 | # IPython
 82 | profile_default/
 83 | ipython_config.py
 84 | 
 85 | # pyenv
 86 | #   For a library or package, you might want to ignore these files since the code is
 87 | #   intended to run in multiple environments; otherwise, check them in:
 88 | # .python-version
 89 | 
 90 | # pipenv
 91 | #   According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
 92 | #   However, in case of collaboration, if having platform-specific dependencies or dependencies
 93 | #   having no cross-platform support, pipenv may install dependencies that don't work, or not
 94 | #   install all needed dependencies.
 95 | #Pipfile.lock
 96 | 
 97 | # UV
 98 | #   Similar to Pipfile.lock, it is generally recommended to include uv.lock in version control.
 99 | #   This is especially recommended for binary packages to ensure reproducibility, and is more
100 | #   commonly ignored for libraries.
101 | uv.lock
102 | 
103 | # poetry
104 | #   Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
105 | #   This is especially recommended for binary packages to ensure reproducibility, and is more
106 | #   commonly ignored for libraries.
107 | #   https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
108 | #poetry.lock
109 | 
110 | # pdm
111 | #   Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
112 | #pdm.lock
113 | #   pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
114 | #   in version control.
115 | #   https://pdm.fming.dev/latest/usage/project/#working-with-version-control
116 | .pdm.toml
117 | .pdm-python
118 | .pdm-build/
119 | 
120 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
121 | __pypackages__/
122 | 
123 | # Celery stuff
124 | celerybeat-schedule
125 | celerybeat.pid
126 | 
127 | # SageMath parsed files
128 | *.sage.py
129 | 
130 | # Environments
131 | .env
132 | .venv
133 | env/
134 | venv/
135 | ENV/
136 | env.bak/
137 | venv.bak/
138 | 
139 | # Spyder project settings
140 | .spyderproject
141 | .spyproject
142 | 
143 | # Rope project settings
144 | .ropeproject
145 | 
146 | # mkdocs documentation
147 | /site
148 | 
149 | # mypy
150 | .mypy_cache/
151 | .dmypy.json
152 | dmypy.json
153 | 
154 | # Pyre type checker
155 | .pyre/
156 | 
157 | # pytype static type analyzer
158 | .pytype/
159 | 
160 | # Cython debug symbols
161 | cython_debug/
162 | 
163 | # PyCharm
164 | #  JetBrains specific template is maintained in a separate JetBrains.gitignore that can
165 | #  be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
166 | #  and can be added to the global gitignore or merged into this file.  For a more nuclear
167 | #  option (not recommended) you can uncomment the following to ignore the entire idea folder.
168 | #.idea/
169 | 
170 | # Ruff stuff:
171 | .ruff_cache/
172 | 
173 | # PyPI configuration file
174 | .pypirc
175 | 
176 | .DS_Store
177 | 
178 | .env
179 | *.jtl
180 | *.csv
181 | .kiro/
182 | *.zip
183 | *.chat
184 | .kiro/debug/chats/1.chat
185 | 
```

--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------

```markdown
  1 | # 🚀 JMeter MCP Server
  2 | 
  3 | This is a Model Context Protocol (MCP) server that allows executing JMeter tests through MCP-compatible clients and analyzing test results.
  4 | 
  5 | > [!IMPORTANT]
  6 | > 📢 Looking for an AI Assistant inside JMeter? 🚀
  7 | > Check out [Feather Wand](https://jmeter.ai)
  8 | 
  9 | ![Anthropic](./images/Anthropic-MCP.png)
 10 | ![Cursor](./images/Cursor.png)
 11 | ![Windsurf](./images/Windsurf.png)
 12 | 
 13 | ## 📋 Features
 14 | 
 15 | ### JMeter Execution
 16 | - 📊 Execute JMeter tests in non-GUI mode
 17 | - 🖥️ Launch JMeter in GUI mode
 18 | - 📝 Capture and return execution output
 19 | - 📊 Generate JMeter report dashboard
 20 | 
 21 | ### Test Results Analysis
 22 | - 📈 Parse and analyze JMeter test results (JTL files)
 23 | - 📊 Calculate comprehensive performance metrics
 24 | - 🔍 Identify performance bottlenecks automatically
 25 | - 💡 Generate actionable insights and recommendations
 26 | - 📊 Create visualizations of test results
 27 | - 📑 Generate HTML reports with analysis results
 28 | 
 29 | ## 🛠️ Installation
 30 | 
 31 | ### Local Installation
 32 | 
 33 | 1. Install [`uv`](https://github.com/astral-sh/uv):
 34 | 
 35 | 2. Ensure JMeter is installed on your system and accessible via the command line.
 36 | 
 37 | ⚠️ **Important**: Make sure JMeter is executable. You can do this by running:
 38 | 
 39 | ```bash
 40 | chmod +x /path/to/jmeter/bin/jmeter
 41 | ```
 42 | 
 43 | 3. Install required Python dependencies:
 44 | 
 45 | ```bash
 46 | pip install numpy matplotlib
 47 | ```
 48 | 
 49 | 4. Configure the `.env` file, refer to the `.env.example` file for details.
 50 | 
 51 | ```bash
 52 | # JMeter Configuration
 53 | JMETER_HOME=/path/to/apache-jmeter-5.6.3
 54 | JMETER_BIN=${JMETER_HOME}/bin/jmeter
 55 | 
 56 | # Optional: JMeter Java options
 57 | JMETER_JAVA_OPTS="-Xms1g -Xmx2g"
 58 | ```
 59 | 
 60 | ### 💻 MCP Usage
 61 | 
 62 | 1. Connect to the server using an MCP-compatible client (e.g., Claude Desktop, Cursor, Windsurf)
 63 | 
 64 | 2. Send a prompt to the server:
 65 | 
 66 | ```
 67 | Run JMeter test /path/to/test.jmx
 68 | ```
 69 | 
 70 | 3. MCP compatible client will use the available tools:
 71 | 
 72 | #### JMeter Execution Tools
 73 | - 🖥️ `execute_jmeter_test`: Launches JMeter in GUI mode, but doesn't execute test as per the JMeter design
 74 | - 🚀 `execute_jmeter_test_non_gui`: Execute a JMeter test in non-GUI mode (default mode for better performance)
 75 | 
 76 | #### Test Results Analysis Tools
 77 | - 📊 `analyze_jmeter_results`: Analyze JMeter test results and provide a summary of key metrics and insights
 78 | - 🔍 `identify_performance_bottlenecks`: Identify performance bottlenecks in JMeter test results
 79 | - 💡 `get_performance_insights`: Get insights and recommendations for improving performance
 80 | - 📈 `generate_visualization`: Generate visualizations of JMeter test results
 81 | 
 82 | ## 🏗️ MCP Configuration
 83 | 
 84 | Add the following configuration to your MCP client config:
 85 | 
 86 | ```json
 87 | {
 88 |     "mcpServers": {
 89 |       "jmeter": {
 90 |         "command": "/path/to/uv",
 91 |         "args": [
 92 |           "--directory",
 93 |           "/path/to/jmeter-mcp-server",
 94 |           "run",
 95 |           "jmeter_server.py"
 96 |         ]
 97 |       }
 98 |     }
 99 | }
100 | ```
101 | 
102 | ## ✨ Use Cases
103 | 
104 | ### Test Execution
105 | - Run JMeter tests in non-GUI mode for better performance
106 | - Launch JMeter in GUI mode for test development
107 | - Generate JMeter report dashboards
108 | 
109 | ### Test Results Analysis
110 | - Analyze JTL files to understand performance characteristics
111 | - Identify performance bottlenecks and their severity
112 | - Get actionable recommendations for performance improvements
113 | - Generate visualizations for better understanding of results
114 | - Create comprehensive HTML reports for sharing with stakeholders
115 | 
116 | ## 🛑 Error Handling
117 | 
118 | The server will:
119 | 
120 | - Validate that the test file exists
121 | - Check that the file has a .jmx extension
122 | - Validate that JTL files exist and have valid formats
123 | - Capture and return any execution or analysis errors
124 | 
125 | ## 📊 Test Results Analyzer
126 | 
127 | The Test Results Analyzer is a powerful feature that helps you understand your JMeter test results better. It consists of several components:
128 | 
129 | ### Parser Module
130 | - Supports both XML and CSV JTL formats
131 | - Efficiently processes large files with streaming parsers
132 | - Validates file formats and handles errors gracefully
133 | 
134 | ### Metrics Calculator
135 | - Calculates overall performance metrics (average, median, percentiles)
136 | - Provides endpoint-specific metrics for detailed analysis
137 | - Generates time series metrics to track performance over time
138 | - Compares metrics with benchmarks for context
139 | 
140 | ### Bottleneck Analyzer
141 | - Identifies slow endpoints based on response times
142 | - Detects error-prone endpoints with high error rates
143 | - Finds response time anomalies and outliers
144 | - Analyzes the impact of concurrency on performance
145 | 
146 | ### Insights Generator
147 | - Provides specific recommendations for addressing bottlenecks
148 | - Analyzes error patterns and suggests solutions
149 | - Generates insights on scaling behavior and capacity limits
150 | - Prioritizes recommendations based on potential impact
151 | 
152 | ### Visualization Engine
153 | - Creates time series graphs showing performance over time
154 | - Generates distribution graphs for response time analysis
155 | - Produces endpoint comparison charts for identifying issues
156 | - Creates comprehensive HTML reports with all analysis results
157 | 
158 | ## 📝 Example Usage
159 | 
160 | ```
161 | # Run a JMeter test and generate a results file
162 | Run JMeter test sample_test.jmx in non-GUI mode and save results to results.jtl
163 | 
164 | # Analyze the results
165 | Analyze the JMeter test results in results.jtl and provide detailed insights
166 | 
167 | # Identify bottlenecks
168 | What are the performance bottlenecks in the results.jtl file?
169 | 
170 | # Get recommendations
171 | What recommendations do you have for improving performance based on results.jtl?
172 | 
173 | # Generate visualizations
174 | Create a time series graph of response times from results.jtl
175 | ```
```

--------------------------------------------------------------------------------
/requirements_windsurf_reader.txt:
--------------------------------------------------------------------------------

```
1 | plyvel>=1.3.0
2 | 
```

--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------

```
1 | mcp[cli]<1.6.0
2 | 
```

--------------------------------------------------------------------------------
/tests/__init__.py:
--------------------------------------------------------------------------------

```python
1 | """
2 | Test package initializer for jmeter_server tests.
3 | """
```

--------------------------------------------------------------------------------
/analyzer/mcp/__init__.py:
--------------------------------------------------------------------------------

```python
1 | """
2 | MCP interface for the JMeter Test Results Analyzer.
3 | 
4 | This module provides MCP tools for analyzing JMeter test results.
5 | """
```

--------------------------------------------------------------------------------
/pyproject.toml:
--------------------------------------------------------------------------------

```toml
 1 | [project]
 2 | name = "jmeter-mcp-server"
 3 | version = "0.1.0"
 4 | description = "JMeter MCP Server"
 5 | readme = "README.md"
 6 | requires-python = ">=3.13"
 7 | dependencies = [
 8 |     "httpx>=0.28.1",
 9 |     "mcp[cli]>=1.6.0",
10 | ]
11 | 
```

--------------------------------------------------------------------------------
/mcp_config.json:
--------------------------------------------------------------------------------

```json
 1 | {
 2 |     "mcpServers": {
 3 |       "jmeter": {
 4 |         "command": "/path/to/uv",
 5 |         "args": [
 6 |           "--directory",
 7 |           "/path/to/jmeter-mcp-server",
 8 |           "run",
 9 |           "jmeter_server.py"
10 |         ]
11 |       }
12 |     }
13 | }
14 | 
```

--------------------------------------------------------------------------------
/analyzer/metrics/__init__.py:
--------------------------------------------------------------------------------

```python
 1 | """
 2 | Metrics module for JMeter test results.
 3 | 
 4 | This module provides functionality for calculating performance metrics
 5 | from JMeter test results.
 6 | """
 7 | 
 8 | from analyzer.metrics.calculator import MetricsCalculator
 9 | 
10 | __all__ = ['MetricsCalculator']
```

--------------------------------------------------------------------------------
/analyzer/__init__.py:
--------------------------------------------------------------------------------

```python
1 | """
2 | JMeter Test Results Analyzer module.
3 | 
4 | This module provides functionality for analyzing JMeter test results,
5 | calculating performance metrics, identifying bottlenecks, and generating
6 | insights and recommendations.
7 | """
8 | 
9 | __version__ = '0.1.0'
```

--------------------------------------------------------------------------------
/analyzer/bottleneck/__init__.py:
--------------------------------------------------------------------------------

```python
 1 | """
 2 | Bottleneck analyzer module for JMeter test results.
 3 | 
 4 | This module provides functionality for identifying performance bottlenecks
 5 | in JMeter test results.
 6 | """
 7 | 
 8 | from analyzer.bottleneck.analyzer import BottleneckAnalyzer
 9 | 
10 | __all__ = ['BottleneckAnalyzer']
```

--------------------------------------------------------------------------------
/analyzer/visualization/__init__.py:
--------------------------------------------------------------------------------

```python
 1 | """
 2 | Visualization module for JMeter test results.
 3 | 
 4 | This module provides functionality for creating visual representations
 5 | of JMeter test results analysis.
 6 | """
 7 | 
 8 | from analyzer.visualization.engine import VisualizationEngine
 9 | 
10 | __all__ = ['VisualizationEngine']
```

--------------------------------------------------------------------------------
/analyzer/insights/__init__.py:
--------------------------------------------------------------------------------

```python
 1 | """
 2 | Insights module for JMeter test results.
 3 | 
 4 | This module provides functionality for generating insights and recommendations
 5 | based on JMeter test results analysis.
 6 | """
 7 | 
 8 | from analyzer.insights.generator import InsightsGenerator
 9 | 
10 | __all__ = ['InsightsGenerator']
```

--------------------------------------------------------------------------------
/analyzer/parser/__init__.py:
--------------------------------------------------------------------------------

```python
 1 | """
 2 | Parser module for JMeter test results.
 3 | 
 4 | This module provides functionality for parsing JMeter test results
 5 | from JTL files in both XML and CSV formats.
 6 | """
 7 | 
 8 | from analyzer.parser.base import JTLParser
 9 | from analyzer.parser.xml_parser import XMLJTLParser
10 | from analyzer.parser.csv_parser import CSVJTLParser
11 | 
12 | __all__ = ['JTLParser', 'XMLJTLParser', 'CSVJTLParser']
```

--------------------------------------------------------------------------------
/main.py:
--------------------------------------------------------------------------------

```python
 1 | from dotenv import load_dotenv
 2 | from mcp.server.fastmcp import FastMCP
 3 | import os
 4 | 
 5 | # Load environment variables
 6 | load_dotenv()
 7 | 
 8 | # Initialize MCP server
 9 | mcp = FastMCP("jmeter")
10 | 
11 | def main():
12 |     print("Starting JMeter MCP server...")
13 |     print(os.getenv('JMETER_HOME'))
14 |     print(os.getenv('JMETER_BIN'))
15 |     print(os.getenv('JMETER_JAVA_OPTS'))
16 |     mcp.run(transport='stdio')
17 | 
18 | if __name__ == "__main__":
19 |     main()
20 | 
```

--------------------------------------------------------------------------------
/smithery.yaml:
--------------------------------------------------------------------------------

```yaml
 1 | # Smithery configuration file: https://smithery.ai/docs/config#smitheryyaml
 2 | 
 3 | startCommand:
 4 |   type: stdio
 5 |   configSchema:
 6 |     # JSON Schema defining the configuration options for the MCP.
 7 |     type: object
 8 |     properties: {}
 9 |   commandFunction:
10 |     # A function that produces the CLI command to start the MCP on stdio.
11 |     |-
12 |     (config) => ({command: 'python', args: [
13 |         "jmeter_server.py"
14 |       ], env: {}})
15 | 
```

--------------------------------------------------------------------------------
/Dockerfile:
--------------------------------------------------------------------------------

```dockerfile
 1 | # Use Python base image
 2 | FROM python:3.10-slim
 3 | 
 4 | # Install OpenJDK and build dependencies
 5 | RUN apt-get update && \
 6 |     apt-get install -y default-jdk wget && \
 7 |     apt-get clean && \
 8 |     rm -rf /var/lib/apt/lists/*
 9 | 
10 | # Install JMeter
11 | ENV JMETER_VERSION="5.6.3"
12 | ENV JMETER_HOME="/opt/apache-jmeter-${JMETER_VERSION}"
13 | ENV PATH="$JMETER_HOME/bin:$PATH"
14 | 
15 | RUN wget https://archive.apache.org/dist/jmeter/binaries/apache-jmeter-${JMETER_VERSION}.tgz && \
16 |     tar -xzf apache-jmeter-${JMETER_VERSION}.tgz -C /opt && \
17 |     rm apache-jmeter-${JMETER_VERSION}.tgz
18 | 
19 | # Set working directory
20 | WORKDIR /app
21 | 
22 | # Copy application files
23 | COPY . .
24 | 
25 | # Install Python dependencies
26 | RUN pip install --upgrade pip && \
27 |     pip install "mcp[cli]<1.6.0" && \
28 |     pip install --no-cache-dir -r requirements.txt
29 | 
30 | # Expose port (adjust if your server uses a different port)
31 | EXPOSE 8000
32 | 
33 | # Run the server
34 | CMD ["python", "jmeter_server.py"] 
```

--------------------------------------------------------------------------------
/analyzer/models.py:
--------------------------------------------------------------------------------

```python
  1 | """
  2 | Data models for the JMeter Test Results Analyzer.
  3 | 
  4 | This module defines the core data structures used throughout the analyzer,
  5 | including TestResults, Sample, and various metrics classes.
  6 | """
  7 | 
  8 | from dataclasses import dataclass, field
  9 | from datetime import datetime
 10 | from typing import Dict, List, Optional
 11 | 
 12 | 
 13 | @dataclass
 14 | class Sample:
 15 |     """Represents a single sample/request in a JMeter test."""
 16 |     
 17 |     timestamp: datetime
 18 |     label: str
 19 |     response_time: int  # in milliseconds
 20 |     success: bool
 21 |     response_code: str
 22 |     error_message: Optional[str] = None
 23 |     thread_name: Optional[str] = None
 24 |     bytes_sent: Optional[int] = None
 25 |     bytes_received: Optional[int] = None
 26 |     latency: Optional[int] = None  # in milliseconds
 27 |     connect_time: Optional[int] = None  # in milliseconds
 28 | 
 29 | 
 30 | @dataclass
 31 | class TestResults:
 32 |     """Represents the results of a JMeter test."""
 33 |     
 34 |     samples: List[Sample] = field(default_factory=list)
 35 |     start_time: Optional[datetime] = None
 36 |     end_time: Optional[datetime] = None
 37 |     
 38 |     def add_sample(self, sample: Sample) -> None:
 39 |         """Add a sample to the test results."""
 40 |         self.samples.append(sample)
 41 |         
 42 |         # Update start and end times
 43 |         if self.start_time is None or sample.timestamp < self.start_time:
 44 |             self.start_time = sample.timestamp
 45 |         if self.end_time is None or sample.timestamp > self.end_time:
 46 |             self.end_time = sample.timestamp
 47 | 
 48 | 
 49 | @dataclass
 50 | class OverallMetrics:
 51 |     """Represents overall metrics for a test or endpoint."""
 52 |     
 53 |     total_samples: int = 0
 54 |     error_count: int = 0
 55 |     error_rate: float = 0.0
 56 |     average_response_time: float = 0.0
 57 |     median_response_time: float = 0.0
 58 |     percentile_90: float = 0.0
 59 |     percentile_95: float = 0.0
 60 |     percentile_99: float = 0.0
 61 |     min_response_time: float = 0.0
 62 |     max_response_time: float = 0.0
 63 |     throughput: float = 0.0  # requests per second
 64 |     test_duration: float = 0.0  # in seconds
 65 | 
 66 | 
 67 | @dataclass
 68 | class EndpointMetrics(OverallMetrics):
 69 |     """Represents metrics for a specific endpoint/sampler."""
 70 |     
 71 |     endpoint: str = ""
 72 | 
 73 | 
 74 | @dataclass
 75 | class TimeSeriesMetrics:
 76 |     """Represents metrics for a specific time interval."""
 77 |     
 78 |     timestamp: datetime
 79 |     active_threads: int = 0
 80 |     throughput: float = 0.0
 81 |     average_response_time: float = 0.0
 82 |     error_rate: float = 0.0
 83 | 
 84 | 
 85 | @dataclass
 86 | class Bottleneck:
 87 |     """Represents a performance bottleneck."""
 88 |     
 89 |     endpoint: str
 90 |     metric_type: str  # response_time, error_rate, etc.
 91 |     value: float
 92 |     threshold: float
 93 |     severity: str  # high, medium, low
 94 | 
 95 | 
 96 | @dataclass
 97 | class Anomaly:
 98 |     """Represents a performance anomaly."""
 99 |     
100 |     timestamp: datetime
101 |     endpoint: str
102 |     expected_value: float
103 |     actual_value: float
104 |     deviation_percentage: float
105 | 
106 | 
107 | @dataclass
108 | class Recommendation:
109 |     """Represents a performance improvement recommendation."""
110 |     
111 |     issue: str
112 |     recommendation: str
113 |     expected_impact: str
114 |     implementation_difficulty: str  # high, medium, low
115 | 
116 | 
117 | @dataclass
118 | class Insight:
119 |     """Represents a performance insight."""
120 |     
121 |     topic: str
122 |     description: str
123 |     supporting_data: Dict = field(default_factory=dict)
```

--------------------------------------------------------------------------------
/analyzer/parser/base.py:
--------------------------------------------------------------------------------

```python
  1 | """
  2 | Base parser interface for JMeter test results.
  3 | 
  4 | This module defines the base interface for JTL parsers.
  5 | """
  6 | 
  7 | import abc
  8 | from pathlib import Path
  9 | from typing import Union
 10 | 
 11 | from analyzer.models import TestResults
 12 | 
 13 | 
 14 | class JTLParser(abc.ABC):
 15 |     """Base class for JTL parsers."""
 16 |     
 17 |     @abc.abstractmethod
 18 |     def parse_file(self, file_path: Union[str, Path]) -> TestResults:
 19 |         """Parse a JTL file and return structured test results.
 20 |         
 21 |         Args:
 22 |             file_path: Path to the JTL file
 23 |             
 24 |         Returns:
 25 |             TestResults object containing parsed data
 26 |             
 27 |         Raises:
 28 |             FileNotFoundError: If the file does not exist
 29 |             ValueError: If the file format is invalid
 30 |         """
 31 |         pass
 32 |     
 33 |     @staticmethod
 34 |     def validate_file(file_path: Union[str, Path]) -> bool:
 35 |         """Validate that the file exists and has a valid extension.
 36 |         
 37 |         Args:
 38 |             file_path: Path to the JTL file
 39 |             
 40 |         Returns:
 41 |             True if the file is valid, False otherwise
 42 |         """
 43 |         path = Path(file_path)
 44 |         
 45 |         # Check if file exists
 46 |         if not path.exists():
 47 |             return False
 48 |         
 49 |         # Check if file has a valid extension
 50 |         valid_extensions = ['.jtl', '.xml', '.csv']
 51 |         if path.suffix.lower() not in valid_extensions:
 52 |             return False
 53 |         
 54 |         return True
 55 |     
 56 |     @staticmethod
 57 |     def detect_format(file_path: Union[str, Path]) -> str:
 58 |         """Detect whether the JTL file is in XML or CSV format.
 59 |         
 60 |         Args:
 61 |             file_path: Path to the JTL file
 62 |             
 63 |         Returns:
 64 |             'xml' or 'csv'
 65 |             
 66 |         Raises:
 67 |             FileNotFoundError: If the file does not exist
 68 |             ValueError: If the format cannot be determined
 69 |         """
 70 |         path = Path(file_path)
 71 |         
 72 |         # Check if file exists
 73 |         if not path.exists():
 74 |             raise FileNotFoundError(f"File not found: {file_path}")
 75 |         
 76 |         # Try to determine format based on content
 77 |         with open(path, 'r', encoding='utf-8') as f:
 78 |             first_line = f.readline().strip()
 79 |             
 80 |             # Check for XML declaration
 81 |             if first_line.startswith('<?xml'):
 82 |                 return 'xml'
 83 |             
 84 |             # Check for CSV header
 85 |             if ',' in first_line and ('timeStamp' in first_line or 'elapsed' in first_line):
 86 |                 return 'csv'
 87 |             
 88 |             # If we can't determine from the first line, check file extension
 89 |             if path.suffix.lower() == '.xml':
 90 |                 return 'xml'
 91 |             if path.suffix.lower() == '.csv':
 92 |                 return 'csv'
 93 |             if path.suffix.lower() == '.jtl':
 94 |                 # For .jtl files, we need to look at more content
 95 |                 f.seek(0)
 96 |                 content = f.read(1000)  # Read first 1000 chars
 97 |                 if '<?xml' in content:
 98 |                     return 'xml'
 99 |                 if ',' in content and ('timeStamp' in content or 'elapsed' in content):
100 |                     return 'csv'
101 |         
102 |         raise ValueError(f"Could not determine format of file: {file_path}")
```

--------------------------------------------------------------------------------
/tests/test_xml_parser.py:
--------------------------------------------------------------------------------

```python
 1 | """
 2 | Tests for the XML JTL parser.
 3 | """
 4 | 
 5 | import os
 6 | import tempfile
 7 | import unittest
 8 | from datetime import datetime
 9 | from pathlib import Path
10 | 
11 | from analyzer.parser.xml_parser import XMLJTLParser
12 | 
13 | 
14 | class TestXMLJTLParser(unittest.TestCase):
15 |     """Tests for the XMLJTLParser class."""
16 |     
17 |     def setUp(self):
18 |         """Set up test fixtures."""
19 |         self.parser = XMLJTLParser()
20 |         
21 |         # Create a sample XML JTL file
22 |         self.xml_content = """<?xml version="1.0" encoding="UTF-8"?>
23 | <testResults version="1.2">
24 | <httpSample t="1234" lt="1000" ts="1625097600000" s="true" lb="Home Page" rc="200" rm="" tn="Thread Group 1-1" by="1234" sby="1234" ct="800"/>
25 | <httpSample t="2345" lt="2000" ts="1625097601000" s="true" lb="Login Page" rc="200" rm="" tn="Thread Group 1-1" by="2345" sby="2345" ct="900"/>
26 | <httpSample t="3456" lt="3000" ts="1625097602000" s="false" lb="API Call" rc="500" rm="Internal Server Error" tn="Thread Group 1-2" by="3456" sby="345" ct="1000"/>
27 | </testResults>
28 | """
29 |         self.xml_file = tempfile.NamedTemporaryFile(suffix='.xml', mode='w', delete=False)
30 |         self.xml_file.write(self.xml_content)
31 |         self.xml_file.close()
32 |     
33 |     def tearDown(self):
34 |         """Tear down test fixtures."""
35 |         os.unlink(self.xml_file.name)
36 |     
37 |     def test_parse_file(self):
38 |         """Test parsing an XML JTL file."""
39 |         test_results = self.parser.parse_file(self.xml_file.name)
40 |         
41 |         # Check that we have the correct number of samples
42 |         self.assertEqual(len(test_results.samples), 3)
43 |         
44 |         # Check the first sample
45 |         sample1 = test_results.samples[0]
46 |         self.assertEqual(sample1.label, "Home Page")
47 |         self.assertEqual(sample1.response_time, 1234)
48 |         self.assertTrue(sample1.success)
49 |         self.assertEqual(sample1.response_code, "200")
50 |         self.assertEqual(sample1.thread_name, "Thread Group 1-1")
51 |         self.assertEqual(sample1.bytes_received, 1234)
52 |         self.assertEqual(sample1.bytes_sent, 1234)
53 |         self.assertEqual(sample1.latency, 1000)
54 |         self.assertEqual(sample1.connect_time, 800)
55 |         
56 |         # Check the third sample (error)
57 |         sample3 = test_results.samples[2]
58 |         self.assertEqual(sample3.label, "API Call")
59 |         self.assertEqual(sample3.response_time, 3456)
60 |         self.assertFalse(sample3.success)
61 |         self.assertEqual(sample3.response_code, "500")
62 |         self.assertEqual(sample3.error_message, "Internal Server Error")
63 |         
64 |         # Check start and end times
65 |         expected_start = datetime.fromtimestamp(1625097600)
66 |         expected_end = datetime.fromtimestamp(1625097602)
67 |         self.assertEqual(test_results.start_time, expected_start)
68 |         self.assertEqual(test_results.end_time, expected_end)
69 |     
70 |     def test_file_not_found(self):
71 |         """Test parsing a non-existent file."""
72 |         with self.assertRaises(FileNotFoundError):
73 |             self.parser.parse_file('/path/to/nonexistent/file.xml')
74 |     
75 |     def test_invalid_format(self):
76 |         """Test parsing a file with invalid format."""
77 |         # Create a non-XML file
78 |         with tempfile.NamedTemporaryFile(suffix='.xml', mode='w', delete=False) as tmp:
79 |             tmp.write("This is not XML")
80 |         
81 |         try:
82 |             with self.assertRaises(ValueError):
83 |                 self.parser.parse_file(tmp.name)
84 |         finally:
85 |             os.unlink(tmp.name)
86 | 
87 | 
88 | if __name__ == '__main__':
89 |     unittest.main()
```

--------------------------------------------------------------------------------
/analyzer/parser/xml_parser.py:
--------------------------------------------------------------------------------

```python
  1 | """
  2 | XML parser for JMeter test results.
  3 | 
  4 | This module provides functionality for parsing JMeter test results
  5 | from JTL files in XML format using SAX for efficient processing.
  6 | """
  7 | 
  8 | import xml.sax
  9 | from datetime import datetime
 10 | from pathlib import Path
 11 | from typing import Union
 12 | 
 13 | from analyzer.models import Sample, TestResults
 14 | from analyzer.parser.base import JTLParser
 15 | 
 16 | 
 17 | class JMeterXMLHandler(xml.sax.ContentHandler):
 18 |     """SAX handler for JMeter XML results."""
 19 |     
 20 |     def __init__(self, test_results: TestResults):
 21 |         """Initialize the handler.
 22 |         
 23 |         Args:
 24 |             test_results: TestResults object to populate
 25 |         """
 26 |         super().__init__()
 27 |         self.test_results = test_results
 28 |     
 29 |     def startElement(self, tag, attributes):
 30 |         """Process start element.
 31 |         
 32 |         Args:
 33 |             tag: Element tag name
 34 |             attributes: Element attributes
 35 |         """
 36 |         # Process httpSample or sample elements
 37 |         if tag in ["httpSample", "sample"]:
 38 |             try:
 39 |                 # Parse timestamp
 40 |                 ts = int(attributes.get("ts", "0")) / 1000  # Convert from ms to seconds
 41 |                 timestamp = datetime.fromtimestamp(ts)
 42 |                 
 43 |                 # Create sample
 44 |                 sample = Sample(
 45 |                     timestamp=timestamp,
 46 |                     label=attributes.get("lb", ""),
 47 |                     response_time=int(attributes.get("t", "0")),
 48 |                     success=attributes.get("s", "true").lower() == "true",
 49 |                     response_code=attributes.get("rc", ""),
 50 |                     error_message=attributes.get("rm", ""),
 51 |                     thread_name=attributes.get("tn", ""),
 52 |                     bytes_received=int(attributes.get("by", "0")),
 53 |                     bytes_sent=int(attributes.get("sby", "0")),
 54 |                     latency=int(attributes.get("lt", "0")),
 55 |                     connect_time=int(attributes.get("ct", "0"))
 56 |                 )
 57 |                 
 58 |                 # Add sample to test results
 59 |                 self.test_results.add_sample(sample)
 60 |                 
 61 |             except (ValueError, KeyError) as e:
 62 |                 # Log error but continue processing
 63 |                 print(f"Error parsing sample: {e}")
 64 | 
 65 | 
 66 | class XMLJTLParser(JTLParser):
 67 |     """Parser for JMeter JTL files in XML format."""
 68 |     
 69 |     def parse_file(self, file_path: Union[str, Path]) -> TestResults:
 70 |         """Parse a JTL file in XML format.
 71 |         
 72 |         Args:
 73 |             file_path: Path to the JTL file
 74 |             
 75 |         Returns:
 76 |             TestResults object containing parsed data
 77 |             
 78 |         Raises:
 79 |             FileNotFoundError: If the file does not exist
 80 |             ValueError: If the file format is invalid
 81 |         """
 82 |         path = Path(file_path)
 83 |         
 84 |         # Validate file
 85 |         if not path.exists():
 86 |             raise FileNotFoundError(f"File not found: {file_path}")
 87 |         
 88 |         # Detect format
 89 |         format_name = self.detect_format(path)
 90 |         if format_name != "xml":
 91 |             raise ValueError(f"Invalid file format. Expected XML, got {format_name}")
 92 |         
 93 |         # Create test results object
 94 |         test_results = TestResults()
 95 |         
 96 |         # Create SAX parser
 97 |         parser = xml.sax.make_parser()
 98 |         parser.setFeature(xml.sax.handler.feature_namespaces, 0)
 99 |         
100 |         # Create and set content handler
101 |         handler = JMeterXMLHandler(test_results)
102 |         parser.setContentHandler(handler)
103 |         
104 |         try:
105 |             # Parse the file
106 |             parser.parse(str(path))
107 |         except Exception as e:
108 |             raise ValueError(f"Error parsing XML file: {e}")
109 |         
110 |         return test_results
```

--------------------------------------------------------------------------------
/tests/test_analyzer_parser.py:
--------------------------------------------------------------------------------

```python
  1 | """
  2 | Tests for the analyzer parser module.
  3 | """
  4 | 
  5 | import os
  6 | import tempfile
  7 | import unittest
  8 | from pathlib import Path
  9 | 
 10 | from analyzer.parser.base import JTLParser
 11 | 
 12 | 
 13 | class TestJTLParserBase(unittest.TestCase):
 14 |     """Tests for the base JTLParser class."""
 15 |     
 16 |     def test_validate_file_exists(self):
 17 |         """Test validating that a file exists."""
 18 |         # Create a temporary file
 19 |         with tempfile.NamedTemporaryFile(suffix='.jtl') as tmp:
 20 |             self.assertTrue(JTLParser.validate_file(tmp.name))
 21 |     
 22 |     def test_validate_file_not_exists(self):
 23 |         """Test validating a non-existent file."""
 24 |         self.assertFalse(JTLParser.validate_file('/path/to/nonexistent/file.jtl'))
 25 |     
 26 |     def test_validate_file_extension(self):
 27 |         """Test validating file extensions."""
 28 |         # Create temporary files with different extensions
 29 |         with tempfile.NamedTemporaryFile(suffix='.jtl') as jtl_file, \
 30 |              tempfile.NamedTemporaryFile(suffix='.xml') as xml_file, \
 31 |              tempfile.NamedTemporaryFile(suffix='.csv') as csv_file, \
 32 |              tempfile.NamedTemporaryFile(suffix='.txt') as txt_file:
 33 |             
 34 |             self.assertTrue(JTLParser.validate_file(jtl_file.name))
 35 |             self.assertTrue(JTLParser.validate_file(xml_file.name))
 36 |             self.assertTrue(JTLParser.validate_file(csv_file.name))
 37 |             self.assertFalse(JTLParser.validate_file(txt_file.name))
 38 |     
 39 |     def test_detect_format_xml(self):
 40 |         """Test detecting XML format."""
 41 |         # Create a temporary XML file
 42 |         with tempfile.NamedTemporaryFile(suffix='.xml', mode='w', delete=False) as tmp:
 43 |             tmp.write('<?xml version="1.0" encoding="UTF-8"?>\n<testResults>\n</testResults>')
 44 |         
 45 |         try:
 46 |             self.assertEqual(JTLParser.detect_format(tmp.name), 'xml')
 47 |         finally:
 48 |             os.unlink(tmp.name)
 49 |     
 50 |     def test_detect_format_csv(self):
 51 |         """Test detecting CSV format."""
 52 |         # Create a temporary CSV file
 53 |         with tempfile.NamedTemporaryFile(suffix='.csv', mode='w', delete=False) as tmp:
 54 |             tmp.write('timeStamp,elapsed,label,responseCode,success\n')
 55 |             tmp.write('1625097600000,100,Test,200,true\n')
 56 |         
 57 |         try:
 58 |             self.assertEqual(JTLParser.detect_format(tmp.name), 'csv')
 59 |         finally:
 60 |             os.unlink(tmp.name)
 61 |     
 62 |     def test_detect_format_jtl_xml(self):
 63 |         """Test detecting XML format in a .jtl file."""
 64 |         # Create a temporary JTL file with XML content
 65 |         with tempfile.NamedTemporaryFile(suffix='.jtl', mode='w', delete=False) as tmp:
 66 |             tmp.write('<?xml version="1.0" encoding="UTF-8"?>\n<testResults>\n</testResults>')
 67 |         
 68 |         try:
 69 |             self.assertEqual(JTLParser.detect_format(tmp.name), 'xml')
 70 |         finally:
 71 |             os.unlink(tmp.name)
 72 |     
 73 |     def test_detect_format_jtl_csv(self):
 74 |         """Test detecting CSV format in a .jtl file."""
 75 |         # Create a temporary JTL file with CSV content
 76 |         with tempfile.NamedTemporaryFile(suffix='.jtl', mode='w', delete=False) as tmp:
 77 |             tmp.write('timeStamp,elapsed,label,responseCode,success\n')
 78 |             tmp.write('1625097600000,100,Test,200,true\n')
 79 |         
 80 |         try:
 81 |             self.assertEqual(JTLParser.detect_format(tmp.name), 'csv')
 82 |         finally:
 83 |             os.unlink(tmp.name)
 84 |     
 85 |     def test_detect_format_file_not_found(self):
 86 |         """Test detecting format of a non-existent file."""
 87 |         with self.assertRaises(FileNotFoundError):
 88 |             JTLParser.detect_format('/path/to/nonexistent/file.jtl')
 89 |     
 90 |     def test_detect_format_unknown(self):
 91 |         """Test detecting format of a file with unknown format."""
 92 |         # Create a temporary file with unknown content
 93 |         with tempfile.NamedTemporaryFile(suffix='.txt', mode='w', delete=False) as tmp:
 94 |             tmp.write('This is not a JTL file\n')
 95 |         
 96 |         try:
 97 |             with self.assertRaises(ValueError):
 98 |                 JTLParser.detect_format(tmp.name)
 99 |         finally:
100 |             os.unlink(tmp.name)
101 | 
102 | 
103 | if __name__ == '__main__':
104 |     unittest.main()
```

--------------------------------------------------------------------------------
/tests/test_jmeter_server.py:
--------------------------------------------------------------------------------

```python
  1 | import sys
  2 | import types
  3 | import os
  4 | import tempfile
  5 | import unittest
  6 | from unittest import mock
  7 | 
  8 | # Stub external dependencies before importing jmeter_server
  9 | sys.modules['mcp'] = types.ModuleType('mcp')
 10 | sys.modules['mcp.server'] = types.ModuleType('mcp.server')
 11 | fastmcp_mod = types.ModuleType('mcp.server.fastmcp')
 12 | class FastMCP:
 13 |     def __init__(self, *args, **kwargs):
 14 |         pass
 15 |     def tool(self, *args, **kwargs):
 16 |         def decorator(func):
 17 |             return func
 18 |         return decorator
 19 |     def run(self, *args, **kwargs):
 20 |         pass
 21 | fastmcp_mod.FastMCP = FastMCP
 22 | sys.modules['mcp.server.fastmcp'] = fastmcp_mod
 23 | # Stub dotenv.load_dotenv
 24 | sys.modules['dotenv'] = types.ModuleType('dotenv')
 25 | sys.modules['dotenv'].load_dotenv = lambda: None
 26 | 
 27 | import jmeter_server
 28 | 
 29 | 
 30 | class TestRunJMeter(unittest.IsolatedAsyncioTestCase):
 31 |     async def test_file_not_found(self):
 32 |         result = await jmeter_server.run_jmeter("nonexistent.jmx")
 33 |         self.assertEqual(
 34 |             result,
 35 |             "Error: Test file not found: nonexistent.jmx"
 36 |         )
 37 | 
 38 |     async def test_invalid_file_type(self):
 39 |         with tempfile.NamedTemporaryFile(suffix=".txt") as tmp:
 40 |             result = await jmeter_server.run_jmeter(tmp.name)
 41 |             self.assertEqual(
 42 |                 result,
 43 |                 f"Error: Invalid file type. Expected .jmx file: {tmp.name}"
 44 |             )
 45 | 
 46 |     @mock.patch('jmeter_server.subprocess.run')
 47 |     async def test_non_gui_success(self, mock_run):
 48 |         # Prepare a dummy .jmx file
 49 |         with tempfile.NamedTemporaryFile(suffix=".jmx", delete=False) as tmp:
 50 |             test_file = tmp.name
 51 |         # Fake successful subprocess result
 52 |         class DummyResult:
 53 |             returncode = 0
 54 |             stdout = "Success output"
 55 |             stderr = ""
 56 | 
 57 |         mock_run.return_value = DummyResult()
 58 |         result = await jmeter_server.run_jmeter(test_file, non_gui=True)
 59 |         self.assertEqual(result, "Success output")
 60 |         os.unlink(test_file)
 61 | 
 62 |     @mock.patch('jmeter_server.subprocess.run')
 63 |     async def test_non_gui_failure(self, mock_run):
 64 |         # Prepare a dummy .jmx file
 65 |         with tempfile.NamedTemporaryFile(suffix=".jmx", delete=False) as tmp:
 66 |             test_file = tmp.name
 67 |         # Fake failing subprocess result
 68 |         class DummyResult:
 69 |             returncode = 1
 70 |             stdout = ""
 71 |             stderr = "Error occurred"
 72 | 
 73 |         mock_run.return_value = DummyResult()
 74 |         result = await jmeter_server.run_jmeter(test_file, non_gui=True)
 75 |         self.assertEqual(
 76 |             result,
 77 |             "Error executing JMeter test:\nError occurred"
 78 |         )
 79 |         os.unlink(test_file)
 80 | 
 81 |     @mock.patch('jmeter_server.subprocess.Popen')
 82 |     async def test_gui_mode(self, mock_popen):
 83 |         # Prepare a dummy .jmx file
 84 |         with tempfile.NamedTemporaryFile(suffix=".jmx", delete=False) as tmp:
 85 |             test_file = tmp.name
 86 |         result = await jmeter_server.run_jmeter(test_file, non_gui=False)
 87 |         self.assertEqual(result, "JMeter GUI launched successfully")
 88 |         mock_popen.assert_called()
 89 |         os.unlink(test_file)
 90 | 
 91 |     @mock.patch('jmeter_server.run_jmeter', new_callable=mock.AsyncMock)
 92 |     async def test_execute_jmeter_test_default(self, mock_run_jmeter):
 93 |         mock_run_jmeter.return_value = "wrapped output"
 94 |         result = await jmeter_server.execute_jmeter_test("file.jmx")
 95 |         mock_run_jmeter.assert_awaited_with("file.jmx", non_gui=True)
 96 |         self.assertEqual(result, "wrapped output")
 97 | 
 98 |     @mock.patch('jmeter_server.run_jmeter', new_callable=mock.AsyncMock)
 99 |     async def test_execute_jmeter_test_gui(self, mock_run_jmeter):
100 |         mock_run_jmeter.return_value = "gui output"
101 |         result = await jmeter_server.execute_jmeter_test("file.jmx", gui_mode=True)
102 |         mock_run_jmeter.assert_awaited_with("file.jmx", non_gui=False)
103 |         self.assertEqual(result, "gui output")
104 | 
105 |     @mock.patch('jmeter_server.run_jmeter', new_callable=mock.AsyncMock)
106 |     async def test_execute_jmeter_test_non_gui(self, mock_run_jmeter):
107 |         mock_run_jmeter.return_value = "non-gui output"
108 |         result = await jmeter_server.execute_jmeter_test_non_gui("file.jmx")
109 |         mock_run_jmeter.assert_awaited_with("file.jmx", non_gui=True)
110 |         self.assertEqual(result, "non-gui output")
111 | 
112 | 
113 | class TestUnexpectedError(unittest.IsolatedAsyncioTestCase):
114 |     @mock.patch('jmeter_server.Path.resolve', side_effect=Exception("resolve error"))
115 |     async def test_unexpected_error(self, mock_resolve):
116 |         result = await jmeter_server.run_jmeter("any.jmx")
117 |         self.assertTrue(result.startswith("Unexpected error: resolve error"))
```

--------------------------------------------------------------------------------
/analyzer/parser/csv_parser.py:
--------------------------------------------------------------------------------

```python
  1 | """
  2 | CSV parser for JMeter test results.
  3 | 
  4 | This module provides functionality for parsing JMeter test results
  5 | from JTL files in CSV format using streaming for efficient processing.
  6 | """
  7 | 
  8 | import csv
  9 | from datetime import datetime
 10 | from pathlib import Path
 11 | from typing import Dict, List, Union
 12 | 
 13 | from analyzer.models import Sample, TestResults
 14 | from analyzer.parser.base import JTLParser
 15 | 
 16 | 
 17 | class CSVJTLParser(JTLParser):
 18 |     """Parser for JMeter JTL files in CSV format."""
 19 |     
 20 |     # Default column mappings for JMeter CSV output
 21 |     DEFAULT_COLUMN_MAPPINGS = {
 22 |         'timestamp': 'timeStamp',
 23 |         'label': 'label',
 24 |         'response_time': 'elapsed',
 25 |         'success': 'success',
 26 |         'response_code': 'responseCode',
 27 |         'error_message': 'responseMessage',
 28 |         'thread_name': 'threadName',
 29 |         'bytes_received': 'bytes',
 30 |         'bytes_sent': 'sentBytes',
 31 |         'latency': 'Latency',
 32 |         'connect_time': 'Connect'
 33 |     }
 34 |     
 35 |     def __init__(self, column_mappings: Dict[str, str] = None):
 36 |         """Initialize the parser.
 37 |         
 38 |         Args:
 39 |             column_mappings: Custom column mappings (default: None)
 40 |         """
 41 |         self.column_mappings = column_mappings or self.DEFAULT_COLUMN_MAPPINGS
 42 |     
 43 |     def parse_file(self, file_path: Union[str, Path]) -> TestResults:
 44 |         """Parse a JTL file in CSV format.
 45 |         
 46 |         Args:
 47 |             file_path: Path to the JTL file
 48 |             
 49 |         Returns:
 50 |             TestResults object containing parsed data
 51 |             
 52 |         Raises:
 53 |             FileNotFoundError: If the file does not exist
 54 |             ValueError: If the file format is invalid
 55 |         """
 56 |         path = Path(file_path)
 57 |         
 58 |         # Validate file
 59 |         if not path.exists():
 60 |             raise FileNotFoundError(f"File not found: {file_path}")
 61 |         
 62 |         # Detect format
 63 |         format_name = self.detect_format(path)
 64 |         if format_name != "csv":
 65 |             raise ValueError(f"Invalid file format. Expected CSV, got {format_name}")
 66 |         
 67 |         # Create test results object
 68 |         test_results = TestResults()
 69 |         
 70 |         try:
 71 |             # Open and parse the CSV file
 72 |             with open(path, 'r', newline='', encoding='utf-8') as csvfile:
 73 |                 reader = csv.DictReader(csvfile)
 74 |                 
 75 |                 # Validate that required columns are present
 76 |                 if not reader.fieldnames:
 77 |                     raise ValueError("CSV file has no header row")
 78 |                 
 79 |                 # Check if we can map all required columns
 80 |                 missing_columns = []
 81 |                 column_indices = {}
 82 |                 
 83 |                 for model_field, csv_field in self.column_mappings.items():
 84 |                     if csv_field not in reader.fieldnames:
 85 |                         missing_columns.append(csv_field)
 86 |                     else:
 87 |                         column_indices[model_field] = reader.fieldnames.index(csv_field)
 88 |                 
 89 |                 if missing_columns:
 90 |                     raise ValueError(f"CSV file is missing required columns: {', '.join(missing_columns)}")
 91 |                 
 92 |                 # Process each row
 93 |                 for row in reader:
 94 |                     try:
 95 |                         # Parse timestamp (convert from milliseconds to seconds)
 96 |                         ts = int(row[self.column_mappings['timestamp']]) / 1000
 97 |                         timestamp = datetime.fromtimestamp(ts)
 98 |                         
 99 |                         # Parse success (convert string to boolean)
100 |                         success_str = row[self.column_mappings['success']].lower()
101 |                         success = success_str == "true" or success_str == "1"
102 |                         
103 |                         # Create sample
104 |                         sample = Sample(
105 |                             timestamp=timestamp,
106 |                             label=row[self.column_mappings['label']],
107 |                             response_time=int(row[self.column_mappings['response_time']]),
108 |                             success=success,
109 |                             response_code=row[self.column_mappings['response_code']],
110 |                             error_message=row.get(self.column_mappings['error_message'], ""),
111 |                             thread_name=row.get(self.column_mappings['thread_name'], ""),
112 |                             bytes_received=int(row.get(self.column_mappings['bytes_received'], 0)),
113 |                             bytes_sent=int(row.get(self.column_mappings['bytes_sent'], 0)),
114 |                             latency=int(row.get(self.column_mappings['latency'], 0)),
115 |                             connect_time=int(row.get(self.column_mappings['connect_time'], 0))
116 |                         )
117 |                         
118 |                         # Add sample to test results
119 |                         test_results.add_sample(sample)
120 |                         
121 |                     except (ValueError, KeyError) as e:
122 |                         # Log error but continue processing
123 |                         print(f"Error parsing row: {e}")
124 |                         continue
125 |         
126 |         except Exception as e:
127 |             raise ValueError(f"Error parsing CSV file: {e}")
128 |         
129 |         return test_results
```

--------------------------------------------------------------------------------
/tests/test_csv_parser.py:
--------------------------------------------------------------------------------

```python
  1 | """
  2 | Tests for the CSV JTL parser.
  3 | """
  4 | 
  5 | import os
  6 | import tempfile
  7 | import unittest
  8 | from datetime import datetime
  9 | from pathlib import Path
 10 | 
 11 | from analyzer.parser.csv_parser import CSVJTLParser
 12 | 
 13 | 
 14 | class TestCSVJTLParser(unittest.TestCase):
 15 |     """Tests for the CSVJTLParser class."""
 16 |     
 17 |     def setUp(self):
 18 |         """Set up test fixtures."""
 19 |         self.parser = CSVJTLParser()
 20 |         
 21 |         # Create a sample CSV JTL file
 22 |         self.csv_content = """timeStamp,elapsed,label,responseCode,success,threadName,bytes,sentBytes,Latency,Connect,responseMessage
 23 | 1625097600000,1234,Home Page,200,true,Thread Group 1-1,12345,1234,1000,800,
 24 | 1625097601000,2345,Login Page,200,true,Thread Group 1-1,23456,2345,2000,900,
 25 | 1625097602000,3456,API Call,500,false,Thread Group 1-2,3456,345,3000,1000,Internal Server Error
 26 | """
 27 |         self.csv_file = tempfile.NamedTemporaryFile(suffix='.csv', mode='w', delete=False)
 28 |         self.csv_file.write(self.csv_content)
 29 |         self.csv_file.close()
 30 |     
 31 |     def tearDown(self):
 32 |         """Tear down test fixtures."""
 33 |         os.unlink(self.csv_file.name)
 34 |     
 35 |     def test_parse_file(self):
 36 |         """Test parsing a CSV JTL file."""
 37 |         test_results = self.parser.parse_file(self.csv_file.name)
 38 |         
 39 |         # Check that we have the correct number of samples
 40 |         self.assertEqual(len(test_results.samples), 3)
 41 |         
 42 |         # Check the first sample
 43 |         sample1 = test_results.samples[0]
 44 |         self.assertEqual(sample1.label, "Home Page")
 45 |         self.assertEqual(sample1.response_time, 1234)
 46 |         self.assertTrue(sample1.success)
 47 |         self.assertEqual(sample1.response_code, "200")
 48 |         self.assertEqual(sample1.thread_name, "Thread Group 1-1")
 49 |         self.assertEqual(sample1.bytes_received, 12345)
 50 |         self.assertEqual(sample1.bytes_sent, 1234)
 51 |         self.assertEqual(sample1.latency, 1000)
 52 |         self.assertEqual(sample1.connect_time, 800)
 53 |         
 54 |         # Check the third sample (error)
 55 |         sample3 = test_results.samples[2]
 56 |         self.assertEqual(sample3.label, "API Call")
 57 |         self.assertEqual(sample3.response_time, 3456)
 58 |         self.assertFalse(sample3.success)
 59 |         self.assertEqual(sample3.response_code, "500")
 60 |         self.assertEqual(sample3.error_message, "Internal Server Error")
 61 |         
 62 |         # Check start and end times
 63 |         expected_start = datetime.fromtimestamp(1625097600)
 64 |         expected_end = datetime.fromtimestamp(1625097602)
 65 |         self.assertEqual(test_results.start_time, expected_start)
 66 |         self.assertEqual(test_results.end_time, expected_end)
 67 |     
 68 |     def test_file_not_found(self):
 69 |         """Test parsing a non-existent file."""
 70 |         with self.assertRaises(FileNotFoundError):
 71 |             self.parser.parse_file('/path/to/nonexistent/file.csv')
 72 |     
 73 |     def test_invalid_format(self):
 74 |         """Test parsing a file with invalid format."""
 75 |         # Create a non-CSV file
 76 |         with tempfile.NamedTemporaryFile(suffix='.csv', mode='w', delete=False) as tmp:
 77 |             tmp.write("This is not CSV")
 78 |         
 79 |         try:
 80 |             with self.assertRaises(ValueError):
 81 |                 self.parser.parse_file(tmp.name)
 82 |         finally:
 83 |             os.unlink(tmp.name)
 84 |     
 85 |     def test_missing_columns(self):
 86 |         """Test parsing a CSV file with missing required columns."""
 87 |         # Create a CSV file with missing columns
 88 |         with tempfile.NamedTemporaryFile(suffix='.csv', mode='w', delete=False) as tmp:
 89 |             tmp.write("timestamp,label,responseCode\n")
 90 |             tmp.write("1625097600000,Home Page,200\n")
 91 |         
 92 |         try:
 93 |             with self.assertRaises(ValueError):
 94 |                 self.parser.parse_file(tmp.name)
 95 |         finally:
 96 |             os.unlink(tmp.name)
 97 |     
 98 |     def test_custom_column_mappings(self):
 99 |         """Test parsing a CSV file with custom column mappings."""
100 |         # Create a CSV file with different column names but standard format
101 |         # to pass the format detection
102 |         custom_csv_content = """timeStamp,elapsed,label,responseCode,success,threadName,bytes,sentBytes,Latency,Connect,responseMessage
103 | 1625097600000,1234,Home Page,200,true,Thread Group 1-1,12345,1234,1000,800,
104 | """
105 |         with tempfile.NamedTemporaryFile(suffix='.csv', mode='w', delete=False) as tmp:
106 |             tmp.write(custom_csv_content)
107 |         
108 |         try:
109 |             # Create parser with custom column mappings
110 |             custom_mappings = {
111 |                 'timestamp': 'timeStamp',
112 |                 'label': 'label',
113 |                 'response_time': 'elapsed',
114 |                 'success': 'success',
115 |                 'response_code': 'responseCode',
116 |                 'error_message': 'responseMessage',
117 |                 'thread_name': 'threadName',
118 |                 'bytes_received': 'bytes',
119 |                 'bytes_sent': 'sentBytes',
120 |                 'latency': 'Latency',
121 |                 'connect_time': 'Connect'
122 |             }
123 |             custom_parser = CSVJTLParser(column_mappings=custom_mappings)
124 |             
125 |             # This should work with our custom mappings
126 |             test_results = custom_parser.parse_file(tmp.name)
127 |             self.assertEqual(len(test_results.samples), 1)
128 |             self.assertEqual(test_results.samples[0].label, "Home Page")
129 |         finally:
130 |             os.unlink(tmp.name)
131 | 
132 | 
133 | if __name__ == '__main__':
134 |     unittest.main()
```

--------------------------------------------------------------------------------
/tests/test_analyzer_models.py:
--------------------------------------------------------------------------------

```python
  1 | """
  2 | Tests for the analyzer models.
  3 | """
  4 | 
  5 | import unittest
  6 | from datetime import datetime
  7 | 
  8 | from analyzer.models import Sample, TestResults, OverallMetrics, EndpointMetrics
  9 | 
 10 | 
 11 | class TestSample(unittest.TestCase):
 12 |     """Tests for the Sample class."""
 13 |     
 14 |     def test_sample_creation(self):
 15 |         """Test creating a Sample instance."""
 16 |         timestamp = datetime.now()
 17 |         sample = Sample(
 18 |             timestamp=timestamp,
 19 |             label="Test Sample",
 20 |             response_time=100,
 21 |             success=True,
 22 |             response_code="200",
 23 |             error_message=None,
 24 |             thread_name="Thread Group 1-1",
 25 |             bytes_sent=150,
 26 |             bytes_received=1024,
 27 |             latency=50,
 28 |             connect_time=20
 29 |         )
 30 |         
 31 |         self.assertEqual(sample.timestamp, timestamp)
 32 |         self.assertEqual(sample.label, "Test Sample")
 33 |         self.assertEqual(sample.response_time, 100)
 34 |         self.assertTrue(sample.success)
 35 |         self.assertEqual(sample.response_code, "200")
 36 |         self.assertIsNone(sample.error_message)
 37 |         self.assertEqual(sample.thread_name, "Thread Group 1-1")
 38 |         self.assertEqual(sample.bytes_sent, 150)
 39 |         self.assertEqual(sample.bytes_received, 1024)
 40 |         self.assertEqual(sample.latency, 50)
 41 |         self.assertEqual(sample.connect_time, 20)
 42 | 
 43 | 
 44 | class TestTestResults(unittest.TestCase):
 45 |     """Tests for the TestResults class."""
 46 |     
 47 |     def test_add_sample(self):
 48 |         """Test adding samples to TestResults."""
 49 |         results = TestResults()
 50 |         self.assertEqual(len(results.samples), 0)
 51 |         
 52 |         # Add a sample
 53 |         timestamp1 = datetime(2023, 1, 1, 12, 0, 0)
 54 |         sample1 = Sample(
 55 |             timestamp=timestamp1,
 56 |             label="Sample 1",
 57 |             response_time=100,
 58 |             success=True,
 59 |             response_code="200"
 60 |         )
 61 |         results.add_sample(sample1)
 62 |         
 63 |         self.assertEqual(len(results.samples), 1)
 64 |         self.assertEqual(results.start_time, timestamp1)
 65 |         self.assertEqual(results.end_time, timestamp1)
 66 |         
 67 |         # Add another sample with earlier timestamp
 68 |         timestamp2 = datetime(2023, 1, 1, 11, 0, 0)
 69 |         sample2 = Sample(
 70 |             timestamp=timestamp2,
 71 |             label="Sample 2",
 72 |             response_time=200,
 73 |             success=True,
 74 |             response_code="200"
 75 |         )
 76 |         results.add_sample(sample2)
 77 |         
 78 |         self.assertEqual(len(results.samples), 2)
 79 |         self.assertEqual(results.start_time, timestamp2)  # Should update to earlier time
 80 |         self.assertEqual(results.end_time, timestamp1)
 81 |         
 82 |         # Add another sample with later timestamp
 83 |         timestamp3 = datetime(2023, 1, 1, 13, 0, 0)
 84 |         sample3 = Sample(
 85 |             timestamp=timestamp3,
 86 |             label="Sample 3",
 87 |             response_time=300,
 88 |             success=True,
 89 |             response_code="200"
 90 |         )
 91 |         results.add_sample(sample3)
 92 |         
 93 |         self.assertEqual(len(results.samples), 3)
 94 |         self.assertEqual(results.start_time, timestamp2)
 95 |         self.assertEqual(results.end_time, timestamp3)  # Should update to later time
 96 | 
 97 | 
 98 | class TestMetrics(unittest.TestCase):
 99 |     """Tests for the metrics classes."""
100 |     
101 |     def test_overall_metrics(self):
102 |         """Test creating OverallMetrics instance."""
103 |         metrics = OverallMetrics(
104 |             total_samples=100,
105 |             error_count=5,
106 |             error_rate=5.0,
107 |             average_response_time=250.5,
108 |             median_response_time=220.0,
109 |             percentile_90=400.0,
110 |             percentile_95=450.0,
111 |             percentile_99=500.0,
112 |             min_response_time=100.0,
113 |             max_response_time=600.0,
114 |             throughput=10.5,
115 |             test_duration=60.0
116 |         )
117 |         
118 |         self.assertEqual(metrics.total_samples, 100)
119 |         self.assertEqual(metrics.error_count, 5)
120 |         self.assertEqual(metrics.error_rate, 5.0)
121 |         self.assertEqual(metrics.average_response_time, 250.5)
122 |         self.assertEqual(metrics.median_response_time, 220.0)
123 |         self.assertEqual(metrics.percentile_90, 400.0)
124 |         self.assertEqual(metrics.percentile_95, 450.0)
125 |         self.assertEqual(metrics.percentile_99, 500.0)
126 |         self.assertEqual(metrics.min_response_time, 100.0)
127 |         self.assertEqual(metrics.max_response_time, 600.0)
128 |         self.assertEqual(metrics.throughput, 10.5)
129 |         self.assertEqual(metrics.test_duration, 60.0)
130 |     
131 |     def test_endpoint_metrics(self):
132 |         """Test creating EndpointMetrics instance."""
133 |         metrics = EndpointMetrics(
134 |             endpoint="Test Endpoint",
135 |             total_samples=50,
136 |             error_count=2,
137 |             error_rate=4.0,
138 |             average_response_time=200.5,
139 |             median_response_time=180.0,
140 |             percentile_90=350.0,
141 |             percentile_95=400.0,
142 |             percentile_99=450.0,
143 |             min_response_time=90.0,
144 |             max_response_time=500.0,
145 |             throughput=8.5,
146 |             test_duration=60.0
147 |         )
148 |         
149 |         self.assertEqual(metrics.endpoint, "Test Endpoint")
150 |         self.assertEqual(metrics.total_samples, 50)
151 |         self.assertEqual(metrics.error_count, 2)
152 |         self.assertEqual(metrics.error_rate, 4.0)
153 |         self.assertEqual(metrics.average_response_time, 200.5)
154 |         self.assertEqual(metrics.median_response_time, 180.0)
155 |         self.assertEqual(metrics.percentile_90, 350.0)
156 |         self.assertEqual(metrics.percentile_95, 400.0)
157 |         self.assertEqual(metrics.percentile_99, 450.0)
158 |         self.assertEqual(metrics.min_response_time, 90.0)
159 |         self.assertEqual(metrics.max_response_time, 500.0)
160 |         self.assertEqual(metrics.throughput, 8.5)
161 |         self.assertEqual(metrics.test_duration, 60.0)
162 | 
163 | 
164 | if __name__ == '__main__':
165 |     unittest.main()
```

--------------------------------------------------------------------------------
/tests/test_insights_generator.py:
--------------------------------------------------------------------------------

```python
  1 | """
  2 | Tests for the insights generator.
  3 | """
  4 | 
  5 | import unittest
  6 | 
  7 | from analyzer.insights.generator import InsightsGenerator
  8 | from analyzer.models import Bottleneck, Recommendation
  9 | 
 10 | 
 11 | class TestInsightsGenerator(unittest.TestCase):
 12 |     """Tests for the InsightsGenerator class."""
 13 |     
 14 |     def setUp(self):
 15 |         """Set up test fixtures."""
 16 |         self.generator = InsightsGenerator()
 17 |         
 18 |         # Create bottlenecks
 19 |         self.bottlenecks = [
 20 |             Bottleneck(
 21 |                 endpoint="Slow Endpoint",
 22 |                 metric_type="response_time",
 23 |                 value=500.0,
 24 |                 threshold=200.0,
 25 |                 severity="high"
 26 |             ),
 27 |             Bottleneck(
 28 |                 endpoint="Medium Endpoint",
 29 |                 metric_type="response_time",
 30 |                 value=300.0,
 31 |                 threshold=200.0,
 32 |                 severity="medium"
 33 |             ),
 34 |             Bottleneck(
 35 |                 endpoint="Error Endpoint",
 36 |                 metric_type="error_rate",
 37 |                 value=15.0,
 38 |                 threshold=5.0,
 39 |                 severity="high"
 40 |             )
 41 |         ]
 42 |         
 43 |         # Create error analysis
 44 |         self.error_analysis = {
 45 |             "error_types": {
 46 |                 "Connection timeout": 10,
 47 |                 "500 Internal Server Error": 5,
 48 |                 "404 Not Found": 3
 49 |             },
 50 |             "error_patterns": [
 51 |                 {
 52 |                     "type": "spike",
 53 |                     "timestamp": "2023-01-01T12:00:00",
 54 |                     "error_count": 8
 55 |                 }
 56 |             ]
 57 |         }
 58 |         
 59 |         # Create concurrency analysis
 60 |         self.concurrency_analysis = {
 61 |             "correlation": 0.85,
 62 |             "degradation_threshold": 50,
 63 |             "has_degradation": True
 64 |         }
 65 |     
 66 |     def test_generate_bottleneck_recommendations(self):
 67 |         """Test generating recommendations for bottlenecks."""
 68 |         recommendations = self.generator.generate_bottleneck_recommendations(self.bottlenecks)
 69 |         
 70 |         # We should have at least 2 recommendations (one for response time, one for error rate)
 71 |         self.assertGreaterEqual(len(recommendations), 2)
 72 |         
 73 |         # Check that we have recommendations for both types of bottlenecks
 74 |         recommendation_issues = [r.issue for r in recommendations]
 75 |         self.assertTrue(any("response time" in issue.lower() for issue in recommendation_issues))
 76 |         self.assertTrue(any("error rate" in issue.lower() for issue in recommendation_issues))
 77 |         
 78 |         # Check that recommendations have all required fields
 79 |         for recommendation in recommendations:
 80 |             self.assertIsNotNone(recommendation.issue)
 81 |             self.assertIsNotNone(recommendation.recommendation)
 82 |             self.assertIsNotNone(recommendation.expected_impact)
 83 |             self.assertIsNotNone(recommendation.implementation_difficulty)
 84 |     
 85 |     def test_generate_error_recommendations(self):
 86 |         """Test generating recommendations for error patterns."""
 87 |         recommendations = self.generator.generate_error_recommendations(self.error_analysis)
 88 |         
 89 |         # We should have at least 3 recommendations (one for each error type)
 90 |         self.assertGreaterEqual(len(recommendations), 3)
 91 |         
 92 |         # Check that we have recommendations for the error types
 93 |         recommendation_issues = [r.issue for r in recommendations]
 94 |         self.assertTrue(any("timeout" in issue.lower() for issue in recommendation_issues))
 95 |         self.assertTrue(any("server" in issue.lower() for issue in recommendation_issues))
 96 |         
 97 |         # Check that recommendations have all required fields
 98 |         for recommendation in recommendations:
 99 |             self.assertIsNotNone(recommendation.issue)
100 |             self.assertIsNotNone(recommendation.recommendation)
101 |             self.assertIsNotNone(recommendation.expected_impact)
102 |             self.assertIsNotNone(recommendation.implementation_difficulty)
103 |     
104 |     def test_generate_scaling_insights(self):
105 |         """Test generating insights on scaling behavior."""
106 |         insights = self.generator.generate_scaling_insights(self.concurrency_analysis)
107 |         
108 |         # We should have at least 2 insights
109 |         self.assertGreaterEqual(len(insights), 2)
110 |         
111 |         # Check that we have insights about correlation and degradation
112 |         insight_topics = [i.topic for i in insights]
113 |         self.assertTrue(any("correlation" in topic.lower() for topic in insight_topics))
114 |         self.assertTrue(any("degradation" in topic.lower() for topic in insight_topics))
115 |         
116 |         # Check that insights have all required fields
117 |         for insight in insights:
118 |             self.assertIsNotNone(insight.topic)
119 |             self.assertIsNotNone(insight.description)
120 |             self.assertIsNotNone(insight.supporting_data)
121 |     
122 |     def test_prioritize_recommendations(self):
123 |         """Test prioritizing recommendations."""
124 |         # Create some recommendations
125 |         recommendations = [
126 |             Recommendation(
127 |                 issue="Critical response time issues",
128 |                 recommendation="Optimize database queries",
129 |                 expected_impact="Significant reduction in response times",
130 |                 implementation_difficulty="medium"
131 |             ),
132 |             Recommendation(
133 |                 issue="Moderate error rates",
134 |                 recommendation="Add error handling",
135 |                 expected_impact="Reduction in error rates",
136 |                 implementation_difficulty="low"
137 |             ),
138 |             Recommendation(
139 |                 issue="Minor UI issues",
140 |                 recommendation="Fix UI bugs",
141 |                 expected_impact="Improved user experience",
142 |                 implementation_difficulty="high"
143 |             )
144 |         ]
145 |         
146 |         prioritized = self.generator.prioritize_recommendations(recommendations)
147 |         
148 |         # We should have 3 prioritized recommendations
149 |         self.assertEqual(len(prioritized), 3)
150 |         
151 |         # Check that they are sorted by priority score (descending)
152 |         self.assertGreaterEqual(prioritized[0]["priority_score"], prioritized[1]["priority_score"])
153 |         self.assertGreaterEqual(prioritized[1]["priority_score"], prioritized[2]["priority_score"])
154 |         
155 |         # Check that each prioritized recommendation has the required fields
156 |         for item in prioritized:
157 |             self.assertIn("recommendation", item)
158 |             self.assertIn("priority_score", item)
159 |             self.assertIn("priority_level", item)
160 |     
161 |     def test_empty_inputs(self):
162 |         """Test handling of empty inputs."""
163 |         self.assertEqual(len(self.generator.generate_bottleneck_recommendations([])), 0)
164 |         self.assertEqual(len(self.generator.generate_error_recommendations({})), 0)
165 |         self.assertGreaterEqual(len(self.generator.generate_scaling_insights({})), 1)  # Should still generate at least one insight
166 |         self.assertEqual(len(self.generator.prioritize_recommendations([])), 0)
167 | 
168 | 
169 | if __name__ == '__main__':
170 |     unittest.main()
```

--------------------------------------------------------------------------------
/tests/test_bottleneck_analyzer.py:
--------------------------------------------------------------------------------

```python
  1 | """
  2 | Tests for the bottleneck analyzer.
  3 | """
  4 | 
  5 | import unittest
  6 | from datetime import datetime, timedelta
  7 | 
  8 | from analyzer.bottleneck.analyzer import BottleneckAnalyzer
  9 | from analyzer.models import EndpointMetrics, TimeSeriesMetrics
 10 | 
 11 | 
 12 | class TestBottleneckAnalyzer(unittest.TestCase):
 13 |     """Tests for the BottleneckAnalyzer class."""
 14 |     
 15 |     def setUp(self):
 16 |         """Set up test fixtures."""
 17 |         self.analyzer = BottleneckAnalyzer()
 18 |         
 19 |         # Create endpoint metrics
 20 |         self.endpoint_metrics = {
 21 |             "Fast Endpoint": EndpointMetrics(
 22 |                 endpoint="Fast Endpoint",
 23 |                 total_samples=100,
 24 |                 error_count=0,
 25 |                 error_rate=0.0,
 26 |                 average_response_time=100.0,
 27 |                 median_response_time=95.0,
 28 |                 percentile_90=150.0,
 29 |                 percentile_95=180.0,
 30 |                 percentile_99=200.0,
 31 |                 min_response_time=50.0,
 32 |                 max_response_time=250.0,
 33 |                 throughput=10.0,
 34 |                 test_duration=10.0
 35 |             ),
 36 |             "Medium Endpoint": EndpointMetrics(
 37 |                 endpoint="Medium Endpoint",
 38 |                 total_samples=100,
 39 |                 error_count=2,
 40 |                 error_rate=2.0,
 41 |                 average_response_time=200.0,
 42 |                 median_response_time=190.0,
 43 |                 percentile_90=300.0,
 44 |                 percentile_95=350.0,
 45 |                 percentile_99=400.0,
 46 |                 min_response_time=100.0,
 47 |                 max_response_time=450.0,
 48 |                 throughput=10.0,
 49 |                 test_duration=10.0
 50 |             ),
 51 |             "Slow Endpoint": EndpointMetrics(
 52 |                 endpoint="Slow Endpoint",
 53 |                 total_samples=100,
 54 |                 error_count=5,
 55 |                 error_rate=5.0,
 56 |                 average_response_time=500.0,
 57 |                 median_response_time=450.0,
 58 |                 percentile_90=800.0,
 59 |                 percentile_95=900.0,
 60 |                 percentile_99=1000.0,
 61 |                 min_response_time=200.0,
 62 |                 max_response_time=1200.0,
 63 |                 throughput=10.0,
 64 |                 test_duration=10.0
 65 |             ),
 66 |             "Error Endpoint": EndpointMetrics(
 67 |                 endpoint="Error Endpoint",
 68 |                 total_samples=100,
 69 |                 error_count=15,
 70 |                 error_rate=15.0,
 71 |                 average_response_time=300.0,
 72 |                 median_response_time=280.0,
 73 |                 percentile_90=450.0,
 74 |                 percentile_95=500.0,
 75 |                 percentile_99=600.0,
 76 |                 min_response_time=150.0,
 77 |                 max_response_time=700.0,
 78 |                 throughput=10.0,
 79 |                 test_duration=10.0
 80 |             )
 81 |         }
 82 |         
 83 |         # Create time series metrics
 84 |         base_time = datetime(2023, 1, 1, 12, 0, 0)
 85 |         self.time_series_metrics = [
 86 |             TimeSeriesMetrics(
 87 |                 timestamp=base_time + timedelta(seconds=i * 5),
 88 |                 active_threads=i + 1,
 89 |                 throughput=10.0,
 90 |                 average_response_time=100.0 + i * 20,
 91 |                 error_rate=0.0 if i < 8 else 5.0
 92 |             )
 93 |             for i in range(10)
 94 |         ]
 95 |         
 96 |         # Add an anomaly
 97 |         self.time_series_metrics[5].average_response_time = 500.0  # Spike in the middle
 98 |     
 99 |     def test_identify_slow_endpoints(self):
100 |         """Test identifying slow endpoints."""
101 |         # Use a higher threshold factor to get only the Slow Endpoint
102 |         bottlenecks = self.analyzer.identify_slow_endpoints(self.endpoint_metrics, threshold_factor=2.0)
103 |         
104 |         # We should have identified the slow endpoint
105 |         self.assertEqual(len(bottlenecks), 1)
106 |         self.assertEqual(bottlenecks[0].endpoint, "Slow Endpoint")
107 |         self.assertEqual(bottlenecks[0].metric_type, "response_time")
108 |         # With threshold_factor=2.0, the severity should be medium or high
109 |         self.assertIn(bottlenecks[0].severity, ["medium", "high"])
110 |         
111 |         # Test with a lower threshold factor to catch more endpoints
112 |         bottlenecks = self.analyzer.identify_slow_endpoints(self.endpoint_metrics, threshold_factor=0.8)
113 |         self.assertGreaterEqual(len(bottlenecks), 2)
114 |         self.assertEqual(bottlenecks[0].endpoint, "Slow Endpoint")  # Should still be first
115 |     
116 |     def test_identify_error_prone_endpoints(self):
117 |         """Test identifying error-prone endpoints."""
118 |         bottlenecks = self.analyzer.identify_error_prone_endpoints(self.endpoint_metrics, threshold_error_rate=3.0)
119 |         
120 |         # We should have identified both error-prone endpoints
121 |         self.assertEqual(len(bottlenecks), 2)
122 |         self.assertEqual(bottlenecks[0].endpoint, "Error Endpoint")  # Higher error rate should be first
123 |         self.assertEqual(bottlenecks[0].metric_type, "error_rate")
124 |         self.assertEqual(bottlenecks[0].severity, "high")
125 |         
126 |         self.assertEqual(bottlenecks[1].endpoint, "Slow Endpoint")
127 |         self.assertEqual(bottlenecks[1].metric_type, "error_rate")
128 |         self.assertEqual(bottlenecks[1].severity, "medium")
129 |         
130 |         # Test with a higher threshold to catch fewer endpoints
131 |         bottlenecks = self.analyzer.identify_error_prone_endpoints(self.endpoint_metrics, threshold_error_rate=10.0)
132 |         self.assertEqual(len(bottlenecks), 1)
133 |         self.assertEqual(bottlenecks[0].endpoint, "Error Endpoint")
134 |     
135 |     def test_detect_anomalies(self):
136 |         """Test detecting response time anomalies."""
137 |         anomalies = self.analyzer.detect_anomalies(self.time_series_metrics)
138 |         
139 |         # We should have detected the spike
140 |         self.assertGreaterEqual(len(anomalies), 1)
141 |         
142 |         # The spike should be the first anomaly
143 |         spike_anomaly = anomalies[0]
144 |         self.assertEqual(spike_anomaly.timestamp, datetime(2023, 1, 1, 12, 0, 25))  # 5th interval
145 |         self.assertGreater(abs(spike_anomaly.deviation_percentage), 50)  # Should be a significant deviation
146 |     
147 |     def test_analyze_concurrency_impact(self):
148 |         """Test analyzing concurrency impact."""
149 |         # Our time series has increasing thread counts and response times
150 |         analysis = self.analyzer.analyze_concurrency_impact(self.time_series_metrics)
151 |         
152 |         # There should be a positive correlation
153 |         self.assertGreater(analysis["correlation"], 0.5)
154 |         
155 |         # Create a new time series with no correlation
156 |         no_correlation_series = [
157 |             TimeSeriesMetrics(
158 |                 timestamp=datetime(2023, 1, 1, 12, 0, 0) + timedelta(seconds=i * 5),
159 |                 active_threads=i + 1,
160 |                 throughput=10.0,
161 |                 average_response_time=200.0,  # Constant response time
162 |                 error_rate=0.0
163 |             )
164 |             for i in range(10)
165 |         ]
166 |         
167 |         analysis = self.analyzer.analyze_concurrency_impact(no_correlation_series)
168 |         self.assertLess(analysis["correlation"], 0.5)
169 |         self.assertFalse(analysis["has_degradation"])
170 |     
171 |     def test_empty_inputs(self):
172 |         """Test handling of empty inputs."""
173 |         self.assertEqual(len(self.analyzer.identify_slow_endpoints({})), 0)
174 |         self.assertEqual(len(self.analyzer.identify_error_prone_endpoints({})), 0)
175 |         self.assertEqual(len(self.analyzer.detect_anomalies([])), 0)
176 |         
177 |         analysis = self.analyzer.analyze_concurrency_impact([])
178 |         self.assertEqual(analysis["correlation"], 0)
179 |         self.assertFalse(analysis["has_degradation"])
180 | 
181 | 
182 | if __name__ == '__main__':
183 |     unittest.main()
```

--------------------------------------------------------------------------------
/windsurf_db_reader.py:
--------------------------------------------------------------------------------

```python
  1 | #!/usr/bin/env python3
  2 | """
  3 | Windsurf LevelDB Reader
  4 | A tool to read and explore Windsurf's local storage database.
  5 | """
  6 | 
  7 | import os
  8 | import json
  9 | import sys
 10 | from pathlib import Path
 11 | 
 12 | try:
 13 |     import plyvel
 14 | except ImportError:
 15 |     print("plyvel not installed. Install with: pip install plyvel")
 16 |     sys.exit(1)
 17 | 
 18 | class WindsurfDBReader:
 19 |     def __init__(self, db_path=None):
 20 |         if db_path is None:
 21 |             # Default path for Windsurf Next on macOS
 22 |             home = Path.home()
 23 |             db_path = home / "Library/Application Support/Windsurf - Next/Local Storage/leveldb"
 24 |         
 25 |         self.db_path = Path(db_path)
 26 |         self.db = None
 27 |         
 28 |     def connect(self):
 29 |         """Connect to the LevelDB database"""
 30 |         try:
 31 |             self.db = plyvel.DB(str(self.db_path), create_if_missing=False)
 32 |             print(f"✅ Connected to database: {self.db_path}")
 33 |             return True
 34 |         except Exception as e:
 35 |             print(f"❌ Failed to connect to database: {e}")
 36 |             return False
 37 |     
 38 |     def close(self):
 39 |         """Close the database connection"""
 40 |         if self.db:
 41 |             self.db.close()
 42 |             print("🔒 Database connection closed")
 43 |     
 44 |     def list_all_keys(self, limit=50):
 45 |         """List all keys in the database"""
 46 |         if not self.db:
 47 |             print("❌ Database not connected")
 48 |             return
 49 |         
 50 |         print(f"\n📋 Listing up to {limit} keys:")
 51 |         count = 0
 52 |         for key, value in self.db:
 53 |             try:
 54 |                 key_str = key.decode('utf-8', errors='ignore')
 55 |                 value_preview = str(value[:100]) if len(value) > 100 else str(value)
 56 |                 print(f"{count + 1:3d}. Key: {key_str}")
 57 |                 print(f"     Value preview: {value_preview}")
 58 |                 print(f"     Value size: {len(value)} bytes")
 59 |                 print("-" * 50)
 60 |                 
 61 |                 count += 1
 62 |                 if count >= limit:
 63 |                     break
 64 |             except Exception as e:
 65 |                 print(f"Error reading key {count + 1}: {e}")
 66 |                 count += 1
 67 |     
 68 |     def search_keys(self, pattern):
 69 |         """Search for keys containing a specific pattern"""
 70 |         if not self.db:
 71 |             print("❌ Database not connected")
 72 |             return
 73 |         
 74 |         print(f"\n🔍 Searching for keys containing '{pattern}':")
 75 |         found = 0
 76 |         for key, value in self.db:
 77 |             try:
 78 |                 key_str = key.decode('utf-8', errors='ignore')
 79 |                 if pattern.lower() in key_str.lower():
 80 |                     print(f"Found: {key_str}")
 81 |                     print(f"Value size: {len(value)} bytes")
 82 |                     
 83 |                     # Try to decode value if it looks like JSON
 84 |                     try:
 85 |                         if value.startswith(b'{') or value.startswith(b'['):
 86 |                             json_data = json.loads(value.decode('utf-8'))
 87 |                             print(f"JSON preview: {json.dumps(json_data, indent=2)[:200]}...")
 88 |                     except:
 89 |                         pass
 90 |                     
 91 |                     print("-" * 50)
 92 |                     found += 1
 93 |             except Exception as e:
 94 |                 print(f"Error searching key: {e}")
 95 |         
 96 |         if found == 0:
 97 |             print(f"No keys found containing '{pattern}'")
 98 |     
 99 |     def get_value(self, key):
100 |         """Get a specific value by key"""
101 |         if not self.db:
102 |             print("❌ Database not connected")
103 |             return None
104 |         
105 |         try:
106 |             key_bytes = key.encode('utf-8') if isinstance(key, str) else key
107 |             value = self.db.get(key_bytes)
108 |             
109 |             if value is None:
110 |                 print(f"❌ Key '{key}' not found")
111 |                 return None
112 |             
113 |             print(f"✅ Found value for key '{key}':")
114 |             print(f"Size: {len(value)} bytes")
115 |             
116 |             # Try to decode as JSON
117 |             try:
118 |                 if value.startswith(b'{') or value.startswith(b'['):
119 |                     json_data = json.loads(value.decode('utf-8'))
120 |                     print("JSON content:")
121 |                     print(json.dumps(json_data, indent=2))
122 |                     return json_data
123 |             except:
124 |                 pass
125 |             
126 |             # Try to decode as text
127 |             try:
128 |                 text = value.decode('utf-8')
129 |                 print("Text content:")
130 |                 print(text)
131 |                 return text
132 |             except:
133 |                 print("Binary content (showing first 200 bytes):")
134 |                 print(value[:200])
135 |                 return value
136 |                 
137 |         except Exception as e:
138 |             print(f"❌ Error getting value: {e}")
139 |             return None
140 |     
141 |     def export_to_json(self, output_file="windsurf_db_export.json", max_entries=1000):
142 |         """Export database contents to JSON file"""
143 |         if not self.db:
144 |             print("❌ Database not connected")
145 |             return
146 |         
147 |         export_data = {}
148 |         count = 0
149 |         
150 |         print(f"📤 Exporting database to {output_file}...")
151 |         
152 |         for key, value in self.db:
153 |             if count >= max_entries:
154 |                 break
155 |                 
156 |             try:
157 |                 key_str = key.decode('utf-8', errors='ignore')
158 |                 
159 |                 # Try to decode value as JSON first
160 |                 try:
161 |                     if value.startswith(b'{') or value.startswith(b'['):
162 |                         value_data = json.loads(value.decode('utf-8'))
163 |                     else:
164 |                         value_data = value.decode('utf-8', errors='ignore')
165 |                 except:
166 |                     value_data = f"<binary data: {len(value)} bytes>"
167 |                 
168 |                 export_data[key_str] = value_data
169 |                 count += 1
170 |                 
171 |             except Exception as e:
172 |                 print(f"Error exporting entry {count}: {e}")
173 |         
174 |         try:
175 |             with open(output_file, 'w', encoding='utf-8') as f:
176 |                 json.dump(export_data, f, indent=2, ensure_ascii=False)
177 |             print(f"✅ Exported {count} entries to {output_file}")
178 |         except Exception as e:
179 |             print(f"❌ Error writing export file: {e}")
180 | 
181 | def main():
182 |     """Main function with interactive menu"""
183 |     reader = WindsurfDBReader()
184 |     
185 |     if not reader.connect():
186 |         return
187 |     
188 |     try:
189 |         while True:
190 |             print("\n" + "="*60)
191 |             print("🌊 Windsurf Database Reader")
192 |             print("="*60)
193 |             print("1. List all keys (first 50)")
194 |             print("2. Search keys by pattern")
195 |             print("3. Get value by key")
196 |             print("4. Export to JSON")
197 |             print("5. Search for 'memory' related keys")
198 |             print("6. Search for 'conversation' related keys")
199 |             print("0. Exit")
200 |             print("-"*60)
201 |             
202 |             choice = input("Enter your choice (0-6): ").strip()
203 |             
204 |             if choice == '0':
205 |                 break
206 |             elif choice == '1':
207 |                 reader.list_all_keys()
208 |             elif choice == '2':
209 |                 pattern = input("Enter search pattern: ").strip()
210 |                 if pattern:
211 |                     reader.search_keys(pattern)
212 |             elif choice == '3':
213 |                 key = input("Enter key: ").strip()
214 |                 if key:
215 |                     reader.get_value(key)
216 |             elif choice == '4':
217 |                 filename = input("Enter output filename (default: windsurf_db_export.json): ").strip()
218 |                 if not filename:
219 |                     filename = "windsurf_db_export.json"
220 |                 reader.export_to_json(filename)
221 |             elif choice == '5':
222 |                 reader.search_keys('memory')
223 |             elif choice == '6':
224 |                 reader.search_keys('conversation')
225 |             else:
226 |                 print("❌ Invalid choice. Please try again.")
227 |                 
228 |     except KeyboardInterrupt:
229 |         print("\n👋 Goodbye!")
230 |     finally:
231 |         reader.close()
232 | 
233 | if __name__ == "__main__":
234 |     main()
235 | 
```

--------------------------------------------------------------------------------
/tests/test_metrics_calculator.py:
--------------------------------------------------------------------------------

```python
  1 | """
  2 | Tests for the metrics calculator.
  3 | """
  4 | 
  5 | import unittest
  6 | from datetime import datetime, timedelta
  7 | 
  8 | from analyzer.metrics.calculator import MetricsCalculator
  9 | from analyzer.models import Sample, TestResults
 10 | 
 11 | 
 12 | class TestMetricsCalculator(unittest.TestCase):
 13 |     """Tests for the MetricsCalculator class."""
 14 |     
 15 |     def setUp(self):
 16 |         """Set up test fixtures."""
 17 |         self.calculator = MetricsCalculator()
 18 |         
 19 |         # Create test results with samples
 20 |         self.test_results = TestResults()
 21 |         
 22 |         # Add samples for endpoint 1
 23 |         base_time = datetime(2023, 1, 1, 12, 0, 0)
 24 |         for i in range(10):
 25 |             sample = Sample(
 26 |                 timestamp=base_time + timedelta(seconds=i),
 27 |                 label="Endpoint1",
 28 |                 response_time=100 + i * 10,  # 100, 110, 120, ..., 190
 29 |                 success=True,
 30 |                 response_code="200"
 31 |             )
 32 |             self.test_results.add_sample(sample)
 33 |         
 34 |         # Add samples for endpoint 2 (including some errors)
 35 |         for i in range(5):
 36 |             sample = Sample(
 37 |                 timestamp=base_time + timedelta(seconds=i + 10),
 38 |                 label="Endpoint2",
 39 |                 response_time=200 + i * 20,  # 200, 220, 240, 260, 280
 40 |                 success=i < 4,  # Last one is an error
 41 |                 response_code="200" if i < 4 else "500",
 42 |                 error_message="" if i < 4 else "Internal Server Error"
 43 |             )
 44 |             self.test_results.add_sample(sample)
 45 |     
 46 |     def test_calculate_overall_metrics(self):
 47 |         """Test calculating overall metrics."""
 48 |         metrics = self.calculator.calculate_overall_metrics(self.test_results)
 49 |         
 50 |         # Check basic metrics
 51 |         self.assertEqual(metrics.total_samples, 15)
 52 |         self.assertEqual(metrics.error_count, 1)
 53 |         self.assertAlmostEqual(metrics.error_rate, 100 * 1/15)
 54 |         
 55 |         # Check response time metrics
 56 |         expected_response_times = [100, 110, 120, 130, 140, 150, 160, 170, 180, 190, 200, 220, 240, 260, 280]
 57 |         self.assertAlmostEqual(metrics.average_response_time, sum(expected_response_times) / len(expected_response_times))
 58 |         self.assertAlmostEqual(metrics.median_response_time, 170)  # Median of the 15 values
 59 |         self.assertAlmostEqual(metrics.min_response_time, 100)
 60 |         self.assertAlmostEqual(metrics.max_response_time, 280)
 61 |         
 62 |         # Check percentiles
 63 |         self.assertAlmostEqual(metrics.percentile_90, 260)
 64 |         self.assertAlmostEqual(metrics.percentile_95, 270)
 65 |         self.assertAlmostEqual(metrics.percentile_99, 278)
 66 |         
 67 |         # Check throughput and duration
 68 |         self.assertEqual(metrics.test_duration, 14)  # 14 seconds from first to last sample
 69 |         self.assertAlmostEqual(metrics.throughput, 15 / 14)  # 15 samples over 14 seconds
 70 |     
 71 |     def test_calculate_endpoint_metrics(self):
 72 |         """Test calculating endpoint-specific metrics."""
 73 |         endpoint_metrics = self.calculator.calculate_endpoint_metrics(self.test_results)
 74 |         
 75 |         # Check that we have metrics for both endpoints
 76 |         self.assertEqual(len(endpoint_metrics), 2)
 77 |         self.assertIn("Endpoint1", endpoint_metrics)
 78 |         self.assertIn("Endpoint2", endpoint_metrics)
 79 |         
 80 |         # Check metrics for endpoint 1
 81 |         metrics1 = endpoint_metrics["Endpoint1"]
 82 |         self.assertEqual(metrics1.endpoint, "Endpoint1")
 83 |         self.assertEqual(metrics1.total_samples, 10)
 84 |         self.assertEqual(metrics1.error_count, 0)
 85 |         self.assertEqual(metrics1.error_rate, 0)
 86 |         self.assertAlmostEqual(metrics1.average_response_time, 145)  # Average of 100, 110, ..., 190
 87 |         
 88 |         # Check metrics for endpoint 2
 89 |         metrics2 = endpoint_metrics["Endpoint2"]
 90 |         self.assertEqual(metrics2.endpoint, "Endpoint2")
 91 |         self.assertEqual(metrics2.total_samples, 5)
 92 |         self.assertEqual(metrics2.error_count, 1)
 93 |         self.assertAlmostEqual(metrics2.error_rate, 20)  # 1 error out of 5 samples
 94 |         self.assertAlmostEqual(metrics2.average_response_time, 240)  # Average of 200, 220, 240, 260, 280
 95 |     
 96 |     def test_calculate_time_series_metrics(self):
 97 |         """Test calculating time series metrics."""
 98 |         # Use a 5-second interval
 99 |         time_series = self.calculator.calculate_time_series_metrics(self.test_results, interval_seconds=5)
100 |         
101 |         # We should have 3 intervals: 0-5s, 5-10s, 10-15s
102 |         self.assertEqual(len(time_series), 3)
103 |         
104 |         # Check first interval (0-5s)
105 |         self.assertEqual(time_series[0].timestamp, datetime(2023, 1, 1, 12, 0, 0))
106 |         self.assertEqual(time_series[0].active_threads, 0)  # No thread names in our test data
107 |         self.assertAlmostEqual(time_series[0].throughput, 5 / 5)  # 5 samples over 5 seconds
108 |         self.assertAlmostEqual(time_series[0].average_response_time, (100 + 110 + 120 + 130 + 140) / 5)
109 |         self.assertEqual(time_series[0].error_rate, 0)  # No errors in first interval
110 |         
111 |         # Check third interval (10-15s)
112 |         self.assertEqual(time_series[2].timestamp, datetime(2023, 1, 1, 12, 0, 10))
113 |         self.assertAlmostEqual(time_series[2].throughput, 5 / 5)  # 5 samples over 5 seconds
114 |         self.assertAlmostEqual(time_series[2].average_response_time, (200 + 220 + 240 + 260 + 280) / 5)
115 |         self.assertAlmostEqual(time_series[2].error_rate, 20)  # 1 error out of 5 samples
116 |     
117 |     def test_compare_with_benchmarks(self):
118 |         """Test comparing metrics with benchmarks."""
119 |         # Calculate metrics
120 |         metrics = self.calculator.calculate_overall_metrics(self.test_results)
121 |         
122 |         # Define benchmarks
123 |         benchmarks = {
124 |             "average_response_time": 150,
125 |             "error_rate": 0,
126 |             "throughput": 2
127 |         }
128 |         
129 |         # Compare with benchmarks
130 |         comparison = self.calculator.compare_with_benchmarks(metrics, benchmarks)
131 |         
132 |         # Check comparison results
133 |         self.assertIn("average_response_time", comparison)
134 |         self.assertIn("error_rate", comparison)
135 |         self.assertIn("throughput", comparison)
136 |         
137 |         # Check average_response_time comparison
138 |         avg_rt_comp = comparison["average_response_time"]
139 |         self.assertEqual(avg_rt_comp["benchmark"], 150)
140 |         self.assertAlmostEqual(avg_rt_comp["actual"], metrics.average_response_time)
141 |         self.assertAlmostEqual(avg_rt_comp["difference"], metrics.average_response_time - 150)
142 |         self.assertAlmostEqual(avg_rt_comp["percent_difference"], 
143 |                               (metrics.average_response_time - 150) / 150 * 100)
144 |         
145 |         # Check error_rate comparison
146 |         error_rate_comp = comparison["error_rate"]
147 |         self.assertEqual(error_rate_comp["benchmark"], 0)
148 |         self.assertAlmostEqual(error_rate_comp["actual"], metrics.error_rate)
149 |         self.assertAlmostEqual(error_rate_comp["difference"], metrics.error_rate)
150 |         self.assertEqual(error_rate_comp["percent_difference"], float('inf'))  # Division by zero
151 |         
152 |         # Check throughput comparison
153 |         throughput_comp = comparison["throughput"]
154 |         self.assertEqual(throughput_comp["benchmark"], 2)
155 |         self.assertAlmostEqual(throughput_comp["actual"], metrics.throughput)
156 |         self.assertAlmostEqual(throughput_comp["difference"], metrics.throughput - 2)
157 |         self.assertAlmostEqual(throughput_comp["percent_difference"], 
158 |                               (metrics.throughput - 2) / 2 * 100)
159 |     
160 |     def test_empty_results(self):
161 |         """Test calculating metrics for empty test results."""
162 |         empty_results = TestResults()
163 |         
164 |         with self.assertRaises(ValueError):
165 |             self.calculator.calculate_overall_metrics(empty_results)
166 |         
167 |         with self.assertRaises(ValueError):
168 |             self.calculator.calculate_endpoint_metrics(empty_results)
169 |         
170 |         with self.assertRaises(ValueError):
171 |             self.calculator.calculate_time_series_metrics(empty_results)
172 |     
173 |     def test_invalid_interval(self):
174 |         """Test calculating time series metrics with invalid interval."""
175 |         with self.assertRaises(ValueError):
176 |             self.calculator.calculate_time_series_metrics(self.test_results, interval_seconds=0)
177 |         
178 |         with self.assertRaises(ValueError):
179 |             self.calculator.calculate_time_series_metrics(self.test_results, interval_seconds=-1)
180 | 
181 | 
182 | if __name__ == '__main__':
183 |     unittest.main()
```

--------------------------------------------------------------------------------
/jmeter_report.html:
--------------------------------------------------------------------------------

```html
  1 | 
  2 |         <!DOCTYPE html>
  3 |         <html>
  4 |         <head>
  5 |             <title>JMeter Test Results Analysis</title>
  6 |             <style>
  7 |                 body { font-family: Arial, sans-serif; margin: 20px; }
  8 |                 h1, h2, h3 { color: #333; }
  9 |                 table { border-collapse: collapse; width: 100%; margin-bottom: 20px; }
 10 |                 th, td { border: 1px solid #ddd; padding: 8px; text-align: left; }
 11 |                 th { background-color: #f2f2f2; }
 12 |                 tr:nth-child(even) { background-color: #f9f9f9; }
 13 |                 .chart { margin: 20px 0; max-width: 100%; }
 14 |                 .section { margin-bottom: 30px; }
 15 |                 .severity-high { color: #d9534f; }
 16 |                 .severity-medium { color: #f0ad4e; }
 17 |                 .severity-low { color: #5bc0de; }
 18 |             </style>
 19 |         </head>
 20 |         <body>
 21 |             <h1>JMeter Test Results Analysis</h1>
 22 |             
 23 |             <div class="section">
 24 |                 <h2>Summary</h2>
 25 |                 <table>
 26 |                     <tr><th>Metric</th><th>Value</th></tr>
 27 |                     <tr><td>Total Samples</td><td>96</td></tr>
 28 |                     <tr><td>Error Count</td><td>0</td></tr>
 29 |                     <tr><td>Error Rate</td><td>0.00%</td></tr>
 30 |                     <tr><td>Average Response Time</td><td>222.18 ms</td></tr>
 31 |                     <tr><td>Median Response Time</td><td>209.00 ms</td></tr>
 32 |                     <tr><td>90th Percentile</td><td>335.50 ms</td></tr>
 33 |                     <tr><td>95th Percentile</td><td>345.25 ms</td></tr>
 34 |                     <tr><td>99th Percentile</td><td>356.00 ms</td></tr>
 35 |                     <tr><td>Min Response Time</td><td>105.00 ms</td></tr>
 36 |                     <tr><td>Max Response Time</td><td>356.00 ms</td></tr>
 37 |                     <tr><td>Throughput</td><td>0.01 requests/second</td></tr>
 38 |                     <tr><td>Start Time</td><td>2025-04-09 20:38:16.160000</td></tr>
 39 |                     <tr><td>End Time</td><td>2025-04-09 23:17:02.381000</td></tr>
 40 |                     <tr><td>Duration</td><td>9526.22 seconds</td></tr>
 41 |                 </table>
 42 |             </div>
 43 |         
 44 |                 <div class="section">
 45 |                     <h2>Endpoint Analysis</h2>
 46 |                     <table>
 47 |                         <tr>
 48 |                             <th>Endpoint</th>
 49 |                             <th>Samples</th>
 50 |                             <th>Errors</th>
 51 |                             <th>Error Rate</th>
 52 |                             <th>Avg Response Time</th>
 53 |                             <th>95th Percentile</th>
 54 |                             <th>Throughput</th>
 55 |                         </tr>
 56 |                 
 57 |                         <tr>
 58 |                             <td>Login as u1</td>
 59 |                             <td>8</td>
 60 |                             <td>0</td>
 61 |                             <td>0.00%</td>
 62 |                             <td>174.62 ms</td>
 63 |                             <td>271.10 ms</td>
 64 |                             <td>0.00 req/s</td>
 65 |                         </tr>
 66 |                     
 67 |                         <tr>
 68 |                             <td>Action = none</td>
 69 |                             <td>16</td>
 70 |                             <td>0</td>
 71 |                             <td>0.00%</td>
 72 |                             <td>253.12 ms</td>
 73 |                             <td>350.25 ms</td>
 74 |                             <td>0.00 req/s</td>
 75 |                         </tr>
 76 |                     
 77 |                         <tr>
 78 |                             <td>Action a</td>
 79 |                             <td>8</td>
 80 |                             <td>0</td>
 81 |                             <td>0.00%</td>
 82 |                             <td>235.25 ms</td>
 83 |                             <td>327.15 ms</td>
 84 |                             <td>0.00 req/s</td>
 85 |                         </tr>
 86 |                     
 87 |                         <tr>
 88 |                             <td>Action b</td>
 89 |                             <td>8</td>
 90 |                             <td>0</td>
 91 |                             <td>0.00%</td>
 92 |                             <td>224.00 ms</td>
 93 |                             <td>317.00 ms</td>
 94 |                             <td>0.00 req/s</td>
 95 |                         </tr>
 96 |                     
 97 |                         <tr>
 98 |                             <td>Action c</td>
 99 |                             <td>8</td>
100 |                             <td>0</td>
101 |                             <td>0.00%</td>
102 |                             <td>231.00 ms</td>
103 |                             <td>349.35 ms</td>
104 |                             <td>0.00 req/s</td>
105 |                         </tr>
106 |                     
107 |                         <tr>
108 |                             <td>Action d</td>
109 |                             <td>8</td>
110 |                             <td>0</td>
111 |                             <td>0.00%</td>
112 |                             <td>215.50 ms</td>
113 |                             <td>270.65 ms</td>
114 |                             <td>0.00 req/s</td>
115 |                         </tr>
116 |                     
117 |                         <tr>
118 |                             <td>Logout</td>
119 |                             <td>16</td>
120 |                             <td>0</td>
121 |                             <td>0.00%</td>
122 |                             <td>214.56 ms</td>
123 |                             <td>356.00 ms</td>
124 |                             <td>0.00 req/s</td>
125 |                         </tr>
126 |                     
127 |                         <tr>
128 |                             <td>Action = <EOF></td>
129 |                             <td>16</td>
130 |                             <td>0</td>
131 |                             <td>0.00%</td>
132 |                             <td>224.12 ms</td>
133 |                             <td>341.00 ms</td>
134 |                             <td>0.00 req/s</td>
135 |                         </tr>
136 |                     
137 |                         <tr>
138 |                             <td>Login as u2</td>
139 |                             <td>8</td>
140 |                             <td>0</td>
141 |                             <td>0.00%</td>
142 |                             <td>202.12 ms</td>
143 |                             <td>297.30 ms</td>
144 |                             <td>0.00 req/s</td>
145 |                         </tr>
146 |                     
147 |                     </table>
148 |                 </div>
149 |                 
150 |                 <div class="section">
151 |                     <h2>Bottleneck Analysis</h2>
152 |                 
153 |                     <h3>Slow Endpoints</h3>
154 |                     <table>
155 |                         <tr>
156 |                             <th>Endpoint</th>
157 |                             <th>Response Time</th>
158 |                             <th>Threshold</th>
159 |                             <th>Severity</th>
160 |                         </tr>
161 |                     
162 |                             <tr>
163 |                                 <td>Logout</td>
164 |                                 <td>356.00 ms</td>
165 |                                 <td>329.05 ms</td>
166 |                                 <td class="severity-low">LOW</td>
167 |                             </tr>
168 |                         
169 |                             <tr>
170 |                                 <td>Action = none</td>
171 |                                 <td>350.25 ms</td>
172 |                                 <td>329.05 ms</td>
173 |                                 <td class="severity-low">LOW</td>
174 |                             </tr>
175 |                         
176 |                             <tr>
177 |                                 <td>Action c</td>
178 |                                 <td>349.35 ms</td>
179 |                                 <td>329.05 ms</td>
180 |                                 <td class="severity-low">LOW</td>
181 |                             </tr>
182 |                         
183 |                             <tr>
184 |                                 <td>Action = <EOF></td>
185 |                                 <td>341.00 ms</td>
186 |                                 <td>329.05 ms</td>
187 |                                 <td class="severity-low">LOW</td>
188 |                             </tr>
189 |                         
190 |                     </table>
191 |                     
192 |                 </div>
193 |                 
194 |                 <div class="section">
195 |                     <h2>Insights and Recommendations</h2>
196 |                 
197 |                     <h3>Scaling Insights</h3>
198 |                     <table>
199 |                         <tr>
200 |                             <th>Topic</th>
201 |                             <th>Description</th>
202 |                         </tr>
203 |                     
204 |                             <tr>
205 |                                 <td>No Correlation with Concurrency</td>
206 |                                 <td>There is little to no correlation between the number of concurrent users and response times, suggesting good scalability</td>
207 |                             </tr>
208 |                         
209 |                             <tr>
210 |                                 <td>No Performance Degradation Detected</td>
211 |                                 <td>No significant performance degradation was detected with increasing concurrent users within the tested range</td>
212 |                             </tr>
213 |                         
214 |                     </table>
215 |                     
216 |                 </div>
217 |                 
218 |         </body>
219 |         </html>
220 |         
```

--------------------------------------------------------------------------------
/windsurf_db_reader_alternative.py:
--------------------------------------------------------------------------------

```python
  1 | #!/usr/bin/env python3
  2 | """
  3 | Windsurf LevelDB Reader - Alternative Implementation
  4 | A tool to read and explore Windsurf's local storage database using pure Python.
  5 | """
  6 | 
  7 | import os
  8 | import json
  9 | import sys
 10 | import struct
 11 | from pathlib import Path
 12 | 
 13 | class SimpleLevelDBReader:
 14 |     """
 15 |     A simple LevelDB reader that can extract basic data without full LevelDB library.
 16 |     This is a simplified approach that may not work for all LevelDB files but can
 17 |     extract readable data from many cases.
 18 |     """
 19 |     
 20 |     def __init__(self, db_path=None):
 21 |         if db_path is None:
 22 |             # Default path for Windsurf Next on macOS
 23 |             home = Path.home()
 24 |             db_path = home / "Library/Application Support/Windsurf - Next/Local Storage/leveldb"
 25 |         
 26 |         self.db_path = Path(db_path)
 27 |         
 28 |     def read_ldb_files(self):
 29 |         """Read .ldb files and try to extract readable data"""
 30 |         if not self.db_path.exists():
 31 |             print(f"❌ Database path does not exist: {self.db_path}")
 32 |             return []
 33 |         
 34 |         ldb_files = list(self.db_path.glob("*.ldb"))
 35 |         if not ldb_files:
 36 |             print("❌ No .ldb files found")
 37 |             return []
 38 |         
 39 |         print(f"📁 Found {len(ldb_files)} .ldb files")
 40 |         
 41 |         all_data = []
 42 |         for ldb_file in ldb_files:
 43 |             print(f"📖 Reading {ldb_file.name}...")
 44 |             data = self._extract_strings_from_ldb(ldb_file)
 45 |             all_data.extend(data)
 46 |         
 47 |         return all_data
 48 |     
 49 |     def _extract_strings_from_ldb(self, file_path):
 50 |         """Extract readable strings from an LDB file"""
 51 |         extracted_data = []
 52 |         
 53 |         try:
 54 |             with open(file_path, 'rb') as f:
 55 |                 content = f.read()
 56 |                 
 57 |             # Look for JSON-like structures and readable strings
 58 |             current_string = ""
 59 |             in_string = False
 60 |             
 61 |             for i, byte in enumerate(content):
 62 |                 char = chr(byte) if 32 <= byte <= 126 else None  # Printable ASCII
 63 |                 
 64 |                 if char:
 65 |                     current_string += char
 66 |                     in_string = True
 67 |                 else:
 68 |                     if in_string and len(current_string) > 10:  # Only keep longer strings
 69 |                         # Check if it looks like JSON or contains useful data
 70 |                         if (current_string.startswith('{') or 
 71 |                             current_string.startswith('[') or
 72 |                             'memory' in current_string.lower() or
 73 |                             'conversation' in current_string.lower() or
 74 |                             'windsurf' in current_string.lower()):
 75 |                             extracted_data.append({
 76 |                                 'file': file_path.name,
 77 |                                 'offset': i - len(current_string),
 78 |                                 'content': current_string,
 79 |                                 'type': self._guess_content_type(current_string)
 80 |                             })
 81 |                     current_string = ""
 82 |                     in_string = False
 83 |             
 84 |             # Don't forget the last string
 85 |             if in_string and len(current_string) > 10:
 86 |                 extracted_data.append({
 87 |                     'file': file_path.name,
 88 |                     'offset': len(content) - len(current_string),
 89 |                     'content': current_string,
 90 |                     'type': self._guess_content_type(current_string)
 91 |                 })
 92 |                 
 93 |         except Exception as e:
 94 |             print(f"❌ Error reading {file_path}: {e}")
 95 |         
 96 |         return extracted_data
 97 |     
 98 |     def _guess_content_type(self, content):
 99 |         """Guess the type of content"""
100 |         content_lower = content.lower()
101 |         
102 |         if content.startswith('{') and content.endswith('}'):
103 |             return 'json_object'
104 |         elif content.startswith('[') and content.endswith(']'):
105 |             return 'json_array'
106 |         elif 'memory' in content_lower:
107 |             return 'memory_related'
108 |         elif 'conversation' in content_lower:
109 |             return 'conversation_related'
110 |         elif any(keyword in content_lower for keyword in ['windsurf', 'cascade', 'user', 'assistant']):
111 |             return 'windsurf_related'
112 |         else:
113 |             return 'text'
114 |     
115 |     def search_data(self, data, pattern):
116 |         """Search extracted data for a pattern"""
117 |         results = []
118 |         pattern_lower = pattern.lower()
119 |         
120 |         for item in data:
121 |             if pattern_lower in item['content'].lower():
122 |                 results.append(item)
123 |         
124 |         return results
125 |     
126 |     def export_data(self, data, output_file="windsurf_extracted_data.json"):
127 |         """Export extracted data to JSON"""
128 |         try:
129 |             with open(output_file, 'w', encoding='utf-8') as f:
130 |                 json.dump(data, f, indent=2, ensure_ascii=False)
131 |             print(f"✅ Exported {len(data)} items to {output_file}")
132 |         except Exception as e:
133 |             print(f"❌ Error exporting data: {e}")
134 |     
135 |     def analyze_data(self, data):
136 |         """Analyze the extracted data"""
137 |         if not data:
138 |             print("❌ No data to analyze")
139 |             return
140 |         
141 |         print(f"\n📊 Analysis of {len(data)} extracted items:")
142 |         print("-" * 50)
143 |         
144 |         # Count by type
145 |         type_counts = {}
146 |         for item in data:
147 |             item_type = item['type']
148 |             type_counts[item_type] = type_counts.get(item_type, 0) + 1
149 |         
150 |         print("📈 Content types:")
151 |         for content_type, count in sorted(type_counts.items()):
152 |             print(f"  {content_type}: {count}")
153 |         
154 |         # Count by file
155 |         file_counts = {}
156 |         for item in data:
157 |             file_name = item['file']
158 |             file_counts[file_name] = file_counts.get(file_name, 0) + 1
159 |         
160 |         print(f"\n📁 Items per file:")
161 |         for file_name, count in sorted(file_counts.items()):
162 |             print(f"  {file_name}: {count}")
163 |         
164 |         # Show some examples
165 |         print(f"\n🔍 Sample content:")
166 |         for i, item in enumerate(data[:5]):  # Show first 5 items
167 |             content_preview = item['content'][:100] + "..." if len(item['content']) > 100 else item['content']
168 |             print(f"  {i+1}. [{item['type']}] {content_preview}")
169 | 
170 | def main():
171 |     """Main function with interactive menu"""
172 |     reader = SimpleLevelDBReader()
173 |     
174 |     print("🌊 Windsurf Database Reader (Alternative)")
175 |     print("=" * 60)
176 |     print("This tool extracts readable strings from LevelDB files.")
177 |     print("It may not capture all data but can find JSON and text content.")
178 |     print("=" * 60)
179 |     
180 |     try:
181 |         while True:
182 |             print("\nOptions:")
183 |             print("1. Extract all readable data")
184 |             print("2. Search for specific pattern")
185 |             print("3. Analyze extracted data")
186 |             print("4. Export data to JSON")
187 |             print("0. Exit")
188 |             print("-" * 40)
189 |             
190 |             choice = input("Enter your choice (0-4): ").strip()
191 |             
192 |             if choice == '0':
193 |                 break
194 |             elif choice == '1':
195 |                 print("🔄 Extracting data from LevelDB files...")
196 |                 data = reader.read_ldb_files()
197 |                 if data:
198 |                     reader.analyze_data(data)
199 |                     # Store data for other operations
200 |                     globals()['extracted_data'] = data
201 |                 else:
202 |                     print("❌ No readable data found")
203 |             elif choice == '2':
204 |                 if 'extracted_data' not in globals():
205 |                     print("❌ Please extract data first (option 1)")
206 |                     continue
207 |                 pattern = input("Enter search pattern: ").strip()
208 |                 if pattern:
209 |                     results = reader.search_data(globals()['extracted_data'], pattern)
210 |                     print(f"\n🔍 Found {len(results)} matches for '{pattern}':")
211 |                     for i, item in enumerate(results[:10]):  # Show first 10 matches
212 |                         content_preview = item['content'][:200] + "..." if len(item['content']) > 200 else item['content']
213 |                         print(f"\n{i+1}. File: {item['file']}, Type: {item['type']}")
214 |                         print(f"Content: {content_preview}")
215 |                         print("-" * 40)
216 |             elif choice == '3':
217 |                 if 'extracted_data' not in globals():
218 |                     print("❌ Please extract data first (option 1)")
219 |                     continue
220 |                 reader.analyze_data(globals()['extracted_data'])
221 |             elif choice == '4':
222 |                 if 'extracted_data' not in globals():
223 |                     print("❌ Please extract data first (option 1)")
224 |                     continue
225 |                 filename = input("Enter output filename (default: windsurf_extracted_data.json): ").strip()
226 |                 if not filename:
227 |                     filename = "windsurf_extracted_data.json"
228 |                 reader.export_data(globals()['extracted_data'], filename)
229 |             else:
230 |                 print("❌ Invalid choice. Please try again.")
231 |                 
232 |     except KeyboardInterrupt:
233 |         print("\n👋 Goodbye!")
234 | 
235 | if __name__ == "__main__":
236 |     main()
237 | 
```

--------------------------------------------------------------------------------
/analyzer/metrics/calculator.py:
--------------------------------------------------------------------------------

```python
  1 | """
  2 | Metrics calculator for JMeter test results.
  3 | 
  4 | This module provides functionality for calculating performance metrics
  5 | from JMeter test results, including overall metrics, endpoint-specific metrics,
  6 | and time series metrics.
  7 | """
  8 | 
  9 | import math
 10 | import statistics
 11 | from collections import defaultdict
 12 | from datetime import datetime, timedelta
 13 | from typing import Dict, List, Optional, Tuple
 14 | 
 15 | from analyzer.models import (EndpointMetrics, OverallMetrics, Sample,
 16 |                            TestResults, TimeSeriesMetrics)
 17 | 
 18 | 
 19 | class MetricsCalculator:
 20 |     """Calculator for performance metrics from test results."""
 21 |     
 22 |     def calculate_overall_metrics(self, test_results: TestResults) -> OverallMetrics:
 23 |         """Calculate overall metrics for the entire test.
 24 |         
 25 |         Args:
 26 |             test_results: TestResults object containing samples
 27 |             
 28 |         Returns:
 29 |             OverallMetrics object with calculated metrics
 30 |             
 31 |         Raises:
 32 |             ValueError: If test_results contains no samples
 33 |         """
 34 |         if not test_results.samples:
 35 |             raise ValueError("Cannot calculate metrics for empty test results")
 36 |         
 37 |         # Extract response times and success status
 38 |         response_times = [sample.response_time for sample in test_results.samples]
 39 |         success_count = sum(1 for sample in test_results.samples if sample.success)
 40 |         error_count = len(test_results.samples) - success_count
 41 |         
 42 |         # Calculate duration
 43 |         if test_results.start_time and test_results.end_time:
 44 |             duration = (test_results.end_time - test_results.start_time).total_seconds()
 45 |         else:
 46 |             duration = 0
 47 |         
 48 |         # Calculate throughput (requests per second)
 49 |         throughput = len(test_results.samples) / duration if duration > 0 else 0
 50 |         
 51 |         # Calculate percentiles
 52 |         response_times_sorted = sorted(response_times)
 53 |         
 54 |         # Create metrics object
 55 |         metrics = OverallMetrics(
 56 |             total_samples=len(test_results.samples),
 57 |             error_count=error_count,
 58 |             error_rate=(error_count / len(test_results.samples)) * 100 if test_results.samples else 0,
 59 |             average_response_time=statistics.mean(response_times) if response_times else 0,
 60 |             median_response_time=statistics.median(response_times) if response_times else 0,
 61 |             percentile_90=self._calculate_percentile(response_times_sorted, 90),
 62 |             percentile_95=self._calculate_percentile(response_times_sorted, 95),
 63 |             percentile_99=self._calculate_percentile(response_times_sorted, 99),
 64 |             min_response_time=min(response_times) if response_times else 0,
 65 |             max_response_time=max(response_times) if response_times else 0,
 66 |             throughput=throughput,
 67 |             test_duration=duration
 68 |         )
 69 |         
 70 |         return metrics
 71 |     
 72 |     def calculate_endpoint_metrics(self, test_results: TestResults) -> Dict[str, EndpointMetrics]:
 73 |         """Calculate metrics broken down by endpoint/sampler.
 74 |         
 75 |         Args:
 76 |             test_results: TestResults object containing samples
 77 |             
 78 |         Returns:
 79 |             Dictionary mapping endpoint names to EndpointMetrics objects
 80 |             
 81 |         Raises:
 82 |             ValueError: If test_results contains no samples
 83 |         """
 84 |         if not test_results.samples:
 85 |             raise ValueError("Cannot calculate metrics for empty test results")
 86 |         
 87 |         # Group samples by endpoint
 88 |         endpoints = defaultdict(list)
 89 |         for sample in test_results.samples:
 90 |             endpoints[sample.label].append(sample)
 91 |         
 92 |         # Calculate metrics for each endpoint
 93 |         endpoint_metrics = {}
 94 |         for endpoint, samples in endpoints.items():
 95 |             # Create a temporary TestResults object with only samples for this endpoint
 96 |             temp_results = TestResults()
 97 |             for sample in samples:
 98 |                 temp_results.add_sample(sample)
 99 |             
100 |             # Calculate overall metrics for this endpoint
101 |             overall_metrics = self.calculate_overall_metrics(temp_results)
102 |             
103 |             # Create endpoint metrics
104 |             metrics = EndpointMetrics(
105 |                 endpoint=endpoint,
106 |                 total_samples=overall_metrics.total_samples,
107 |                 error_count=overall_metrics.error_count,
108 |                 error_rate=overall_metrics.error_rate,
109 |                 average_response_time=overall_metrics.average_response_time,
110 |                 median_response_time=overall_metrics.median_response_time,
111 |                 percentile_90=overall_metrics.percentile_90,
112 |                 percentile_95=overall_metrics.percentile_95,
113 |                 percentile_99=overall_metrics.percentile_99,
114 |                 min_response_time=overall_metrics.min_response_time,
115 |                 max_response_time=overall_metrics.max_response_time,
116 |                 throughput=overall_metrics.throughput,
117 |                 test_duration=overall_metrics.test_duration
118 |             )
119 |             
120 |             endpoint_metrics[endpoint] = metrics
121 |         
122 |         return endpoint_metrics
123 |     
124 |     def calculate_time_series_metrics(self, test_results: TestResults, 
125 |                                      interval_seconds: int = 5) -> List[TimeSeriesMetrics]:
126 |         """Calculate metrics over time using the specified interval.
127 |         
128 |         Args:
129 |             test_results: TestResults object containing samples
130 |             interval_seconds: Time interval in seconds (default: 5)
131 |             
132 |         Returns:
133 |             List of TimeSeriesMetrics objects, one for each interval
134 |             
135 |         Raises:
136 |             ValueError: If test_results contains no samples or if interval_seconds <= 0
137 |         """
138 |         if not test_results.samples:
139 |             raise ValueError("Cannot calculate metrics for empty test results")
140 |         
141 |         if interval_seconds <= 0:
142 |             raise ValueError("Interval must be positive")
143 |         
144 |         if not test_results.start_time or not test_results.end_time:
145 |             raise ValueError("Test results must have start and end times")
146 |         
147 |         # Create time intervals
148 |         start_time = test_results.start_time
149 |         end_time = test_results.end_time
150 |         
151 |         # Ensure we have at least one interval
152 |         if (end_time - start_time).total_seconds() < interval_seconds:
153 |             end_time = start_time + timedelta(seconds=interval_seconds)
154 |         
155 |         intervals = []
156 |         current_time = start_time
157 |         while current_time < end_time:
158 |             next_time = current_time + timedelta(seconds=interval_seconds)
159 |             intervals.append((current_time, next_time))
160 |             current_time = next_time
161 |         
162 |         # Group samples by interval
163 |         interval_samples = [[] for _ in range(len(intervals))]
164 |         for sample in test_results.samples:
165 |             for i, (start, end) in enumerate(intervals):
166 |                 if start <= sample.timestamp < end:
167 |                     interval_samples[i].append(sample)
168 |                     break
169 |         
170 |         # Calculate metrics for each interval
171 |         time_series_metrics = []
172 |         for i, (start, end) in enumerate(intervals):
173 |             samples = interval_samples[i]
174 |             
175 |             # Skip intervals with no samples
176 |             if not samples:
177 |                 continue
178 |             
179 |             # Calculate metrics for this interval
180 |             response_times = [sample.response_time for sample in samples]
181 |             error_count = sum(1 for sample in samples if not sample.success)
182 |             
183 |             # Count active threads (approximation based on unique thread names)
184 |             thread_names = set(sample.thread_name for sample in samples if sample.thread_name)
185 |             active_threads = len(thread_names)
186 |             
187 |             # Calculate throughput for this interval
188 |             interval_duration = (end - start).total_seconds()
189 |             throughput = len(samples) / interval_duration if interval_duration > 0 else 0
190 |             
191 |             # Create metrics object
192 |             metrics = TimeSeriesMetrics(
193 |                 timestamp=start,
194 |                 active_threads=active_threads,
195 |                 throughput=throughput,
196 |                 average_response_time=statistics.mean(response_times) if response_times else 0,
197 |                 error_rate=(error_count / len(samples)) * 100 if samples else 0
198 |             )
199 |             
200 |             time_series_metrics.append(metrics)
201 |         
202 |         return time_series_metrics
203 |     
204 |     def compare_with_benchmarks(self, metrics: OverallMetrics, 
205 |                                benchmarks: Dict[str, float]) -> Dict[str, Dict[str, float]]:
206 |         """Compare metrics with benchmarks.
207 |         
208 |         Args:
209 |             metrics: OverallMetrics object
210 |             benchmarks: Dictionary mapping metric names to benchmark values
211 |             
212 |         Returns:
213 |             Dictionary with comparison results
214 |         """
215 |         comparison = {}
216 |         
217 |         for metric_name, benchmark_value in benchmarks.items():
218 |             if hasattr(metrics, metric_name):
219 |                 actual_value = getattr(metrics, metric_name)
220 |                 difference = actual_value - benchmark_value
221 |                 percent_difference = (difference / benchmark_value) * 100 if benchmark_value != 0 else float('inf')
222 |                 
223 |                 comparison[metric_name] = {
224 |                     'benchmark': benchmark_value,
225 |                     'actual': actual_value,
226 |                     'difference': difference,
227 |                     'percent_difference': percent_difference
228 |                 }
229 |         
230 |         return comparison
231 |     
232 |     def _calculate_percentile(self, sorted_values: List[float], percentile: float) -> float:
233 |         """Calculate a percentile from sorted values.
234 |         
235 |         Args:
236 |             sorted_values: List of values, sorted in ascending order
237 |             percentile: Percentile to calculate (0-100)
238 |             
239 |         Returns:
240 |             Percentile value
241 |         """
242 |         if not sorted_values:
243 |             return 0
244 |         
245 |         # Calculate percentile index
246 |         index = (percentile / 100) * (len(sorted_values) - 1)
247 |         
248 |         # If index is an integer, return the value at that index
249 |         if index.is_integer():
250 |             return sorted_values[int(index)]
251 |         
252 |         # Otherwise, interpolate between the two nearest values
253 |         lower_index = math.floor(index)
254 |         upper_index = math.ceil(index)
255 |         lower_value = sorted_values[lower_index]
256 |         upper_value = sorted_values[upper_index]
257 |         fraction = index - lower_index
258 |         
259 |         return lower_value + (upper_value - lower_value) * fraction
```

--------------------------------------------------------------------------------
/analyzer/bottleneck/analyzer.py:
--------------------------------------------------------------------------------

```python
  1 | """
  2 | Bottleneck analyzer for JMeter test results.
  3 | 
  4 | This module provides functionality for identifying performance bottlenecks
  5 | in JMeter test results, including slow endpoints, error-prone endpoints,
  6 | response time anomalies, and concurrency impact analysis.
  7 | """
  8 | 
  9 | import statistics
 10 | from typing import Dict, List, Optional, Tuple
 11 | 
 12 | from analyzer.models import (Anomaly, Bottleneck, EndpointMetrics,
 13 |                            OverallMetrics, Sample, TestResults,
 14 |                            TimeSeriesMetrics)
 15 | 
 16 | 
 17 | class BottleneckAnalyzer:
 18 |     """Analyzer for identifying performance bottlenecks."""
 19 |     
 20 |     def identify_slow_endpoints(self, endpoint_metrics: Dict[str, EndpointMetrics], 
 21 |                                threshold_percentile: float = 95,
 22 |                                threshold_factor: float = 1.5) -> List[Bottleneck]:
 23 |         """Identify endpoints with the highest response times.
 24 |         
 25 |         Args:
 26 |             endpoint_metrics: Dictionary mapping endpoint names to EndpointMetrics objects
 27 |             threshold_percentile: Percentile to use for response time threshold (default: 95)
 28 |             threshold_factor: Factor to multiply the average response time by (default: 1.5)
 29 |             
 30 |         Returns:
 31 |             List of Bottleneck objects for slow endpoints
 32 |         """
 33 |         if not endpoint_metrics:
 34 |             return []
 35 |         
 36 |         # Calculate average response time across all endpoints
 37 |         avg_response_times = [metrics.average_response_time for metrics in endpoint_metrics.values()]
 38 |         overall_avg_response_time = statistics.mean(avg_response_times) if avg_response_times else 0
 39 |         
 40 |         # Calculate threshold
 41 |         threshold = overall_avg_response_time * threshold_factor
 42 |         
 43 |         # Identify slow endpoints
 44 |         bottlenecks = []
 45 |         for endpoint, metrics in endpoint_metrics.items():
 46 |             # Get the response time at the specified percentile
 47 |             percentile_rt = getattr(metrics, f"percentile_{int(threshold_percentile)}", metrics.average_response_time)
 48 |             
 49 |             # Check if the endpoint is slow
 50 |             if percentile_rt > threshold:
 51 |                 # Determine severity based on how much it exceeds the threshold
 52 |                 if percentile_rt > threshold * 2:
 53 |                     severity = "high"
 54 |                 elif percentile_rt > threshold * 1.5:
 55 |                     severity = "medium"
 56 |                 else:
 57 |                     severity = "low"
 58 |                 
 59 |                 bottleneck = Bottleneck(
 60 |                     endpoint=endpoint,
 61 |                     metric_type="response_time",
 62 |                     value=percentile_rt,
 63 |                     threshold=threshold,
 64 |                     severity=severity
 65 |                 )
 66 |                 
 67 |                 bottlenecks.append(bottleneck)
 68 |         
 69 |         # Sort bottlenecks by severity and then by value (descending)
 70 |         severity_order = {"high": 0, "medium": 1, "low": 2}
 71 |         bottlenecks.sort(key=lambda b: (severity_order.get(b.severity, 3), -b.value))
 72 |         
 73 |         return bottlenecks
 74 |     
 75 |     def identify_error_prone_endpoints(self, endpoint_metrics: Dict[str, EndpointMetrics], 
 76 |                                       threshold_error_rate: float = 1.0) -> List[Bottleneck]:
 77 |         """Identify endpoints with the highest error rates.
 78 |         
 79 |         Args:
 80 |             endpoint_metrics: Dictionary mapping endpoint names to EndpointMetrics objects
 81 |             threshold_error_rate: Minimum error rate to consider as a bottleneck (default: 1.0%)
 82 |             
 83 |         Returns:
 84 |             List of Bottleneck objects for error-prone endpoints
 85 |         """
 86 |         if not endpoint_metrics:
 87 |             return []
 88 |         
 89 |         # Identify error-prone endpoints
 90 |         bottlenecks = []
 91 |         for endpoint, metrics in endpoint_metrics.items():
 92 |             # Skip endpoints with no errors
 93 |             if metrics.error_count == 0:
 94 |                 continue
 95 |             
 96 |             # Check if the endpoint has a high error rate
 97 |             if metrics.error_rate >= threshold_error_rate:
 98 |                 # Determine severity based on error rate
 99 |                 if metrics.error_rate >= 10.0:
100 |                     severity = "high"
101 |                 elif metrics.error_rate >= 5.0:
102 |                     severity = "medium"
103 |                 else:
104 |                     severity = "low"
105 |                 
106 |                 bottleneck = Bottleneck(
107 |                     endpoint=endpoint,
108 |                     metric_type="error_rate",
109 |                     value=metrics.error_rate,
110 |                     threshold=threshold_error_rate,
111 |                     severity=severity
112 |                 )
113 |                 
114 |                 bottlenecks.append(bottleneck)
115 |         
116 |         # Sort bottlenecks by severity and then by value (descending)
117 |         severity_order = {"high": 0, "medium": 1, "low": 2}
118 |         bottlenecks.sort(key=lambda b: (severity_order.get(b.severity, 3), -b.value))
119 |         
120 |         return bottlenecks
121 |     
122 |     def detect_anomalies(self, time_series_metrics: List[TimeSeriesMetrics], 
123 |                         z_score_threshold: float = 2.0) -> List[Anomaly]:
124 |         """Detect response time anomalies and outliers.
125 |         
126 |         Args:
127 |             time_series_metrics: List of TimeSeriesMetrics objects
128 |             z_score_threshold: Z-score threshold for anomaly detection (default: 2.0)
129 |             
130 |         Returns:
131 |             List of Anomaly objects
132 |         """
133 |         if not time_series_metrics:
134 |             return []
135 |         
136 |         # Extract response times
137 |         response_times = [metrics.average_response_time for metrics in time_series_metrics]
138 |         
139 |         # Calculate mean and standard deviation
140 |         mean_rt = statistics.mean(response_times)
141 |         stdev_rt = statistics.stdev(response_times) if len(response_times) > 1 else 0
142 |         
143 |         # Detect anomalies
144 |         anomalies = []
145 |         for metrics in time_series_metrics:
146 |             # Skip if standard deviation is zero (all values are the same)
147 |             if stdev_rt == 0:
148 |                 continue
149 |             
150 |             # Calculate z-score
151 |             z_score = (metrics.average_response_time - mean_rt) / stdev_rt
152 |             
153 |             # Check if the response time is an anomaly
154 |             if abs(z_score) >= z_score_threshold:
155 |                 # Calculate deviation percentage
156 |                 deviation_percentage = ((metrics.average_response_time - mean_rt) / mean_rt) * 100
157 |                 
158 |                 anomaly = Anomaly(
159 |                     timestamp=metrics.timestamp,
160 |                     endpoint="overall",  # Overall anomaly, not endpoint-specific
161 |                     expected_value=mean_rt,
162 |                     actual_value=metrics.average_response_time,
163 |                     deviation_percentage=deviation_percentage
164 |                 )
165 |                 
166 |                 anomalies.append(anomaly)
167 |         
168 |         # Sort anomalies by deviation percentage (descending)
169 |         anomalies.sort(key=lambda a: abs(a.deviation_percentage), reverse=True)
170 |         
171 |         return anomalies
172 |     
173 |     def analyze_concurrency_impact(self, time_series_metrics: List[TimeSeriesMetrics]) -> Dict:
174 |         """Analyze the impact of concurrency on performance.
175 |         
176 |         Args:
177 |             time_series_metrics: List of TimeSeriesMetrics objects
178 |             
179 |         Returns:
180 |             Dictionary containing concurrency analysis results
181 |         """
182 |         if not time_series_metrics:
183 |             return {"correlation": 0, "degradation_threshold": 0, "has_degradation": False}
184 |         
185 |         # Extract thread counts and response times
186 |         thread_counts = [metrics.active_threads for metrics in time_series_metrics]
187 |         response_times = [metrics.average_response_time for metrics in time_series_metrics]
188 |         
189 |         # Skip if there's no variation in thread counts
190 |         if len(set(thread_counts)) <= 1:
191 |             return {"correlation": 0, "degradation_threshold": 0, "has_degradation": False}
192 |         
193 |         # Calculate correlation between thread count and response time
194 |         try:
195 |             correlation = self._calculate_correlation(thread_counts, response_times)
196 |         except (ValueError, ZeroDivisionError):
197 |             correlation = 0
198 |         
199 |         # Identify potential degradation threshold
200 |         degradation_threshold = 0
201 |         has_degradation = False
202 |         
203 |         if correlation > 0.5:  # Strong positive correlation
204 |             # Group by thread count
205 |             thread_rt_map = {}
206 |             for metrics in time_series_metrics:
207 |                 if metrics.active_threads not in thread_rt_map:
208 |                     thread_rt_map[metrics.active_threads] = []
209 |                 thread_rt_map[metrics.active_threads].append(metrics.average_response_time)
210 |             
211 |             # Calculate average response time for each thread count
212 |             thread_avg_rt = {
213 |                 threads: statistics.mean(rts)
214 |                 for threads, rts in thread_rt_map.items()
215 |             }
216 |             
217 |             # Sort by thread count
218 |             sorted_threads = sorted(thread_avg_rt.keys())
219 |             
220 |             # Look for significant increases in response time
221 |             for i in range(1, len(sorted_threads)):
222 |                 prev_threads = sorted_threads[i-1]
223 |                 curr_threads = sorted_threads[i]
224 |                 prev_rt = thread_avg_rt[prev_threads]
225 |                 curr_rt = thread_avg_rt[curr_threads]
226 |                 
227 |                 # Check if response time increased by more than 50%
228 |                 if curr_rt > prev_rt * 1.5:
229 |                     degradation_threshold = curr_threads
230 |                     has_degradation = True
231 |                     break
232 |         
233 |         return {
234 |             "correlation": correlation,
235 |             "degradation_threshold": degradation_threshold,
236 |             "has_degradation": has_degradation
237 |         }
238 |     
239 |     def _calculate_correlation(self, x: List[float], y: List[float]) -> float:
240 |         """Calculate Pearson correlation coefficient between two lists.
241 |         
242 |         Args:
243 |             x: First list of values
244 |             y: Second list of values
245 |             
246 |         Returns:
247 |             Correlation coefficient (-1 to 1)
248 |         """
249 |         if len(x) != len(y) or len(x) < 2:
250 |             return 0
251 |         
252 |         # Calculate means
253 |         mean_x = statistics.mean(x)
254 |         mean_y = statistics.mean(y)
255 |         
256 |         # Calculate numerator and denominators
257 |         numerator = sum((xi - mean_x) * (yi - mean_y) for xi, yi in zip(x, y))
258 |         denom_x = sum((xi - mean_x) ** 2 for xi in x)
259 |         denom_y = sum((yi - mean_y) ** 2 for yi in y)
260 |         
261 |         # Calculate correlation
262 |         if denom_x == 0 or denom_y == 0:
263 |             return 0
264 |         
265 |         return numerator / ((denom_x ** 0.5) * (denom_y ** 0.5))
```

--------------------------------------------------------------------------------
/tests/test_visualization_engine.py:
--------------------------------------------------------------------------------

```python
  1 | """
  2 | Tests for the visualization engine.
  3 | """
  4 | 
  5 | import os
  6 | import tempfile
  7 | import unittest
  8 | from datetime import datetime, timedelta
  9 | 
 10 | from analyzer.models import EndpointMetrics, TimeSeriesMetrics
 11 | from analyzer.visualization.engine import VisualizationEngine
 12 | 
 13 | 
 14 | class TestVisualizationEngine(unittest.TestCase):
 15 |     """Tests for the VisualizationEngine class."""
 16 |     
 17 |     def setUp(self):
 18 |         """Set up test fixtures."""
 19 |         # Create a temporary directory for output files
 20 |         self.temp_dir = tempfile.mkdtemp()
 21 |         self.engine = VisualizationEngine(output_dir=self.temp_dir)
 22 |         
 23 |         # Create time series metrics
 24 |         base_time = datetime(2023, 1, 1, 12, 0, 0)
 25 |         self.time_series_metrics = [
 26 |             TimeSeriesMetrics(
 27 |                 timestamp=base_time + timedelta(seconds=i * 5),
 28 |                 active_threads=i + 1,
 29 |                 throughput=10.0 + i * 0.5,
 30 |                 average_response_time=100.0 + i * 20,
 31 |                 error_rate=0.0 if i < 8 else 5.0
 32 |             )
 33 |             for i in range(10)
 34 |         ]
 35 |         
 36 |         # Create endpoint metrics
 37 |         self.endpoint_metrics = {
 38 |             "Endpoint 1": EndpointMetrics(
 39 |                 endpoint="Endpoint 1",
 40 |                 total_samples=100,
 41 |                 error_count=0,
 42 |                 error_rate=0.0,
 43 |                 average_response_time=100.0,
 44 |                 median_response_time=95.0,
 45 |                 percentile_90=150.0,
 46 |                 percentile_95=180.0,
 47 |                 percentile_99=200.0,
 48 |                 min_response_time=50.0,
 49 |                 max_response_time=250.0,
 50 |                 throughput=10.0,
 51 |                 test_duration=10.0
 52 |             ),
 53 |             "Endpoint 2": EndpointMetrics(
 54 |                 endpoint="Endpoint 2",
 55 |                 total_samples=100,
 56 |                 error_count=5,
 57 |                 error_rate=5.0,
 58 |                 average_response_time=200.0,
 59 |                 median_response_time=190.0,
 60 |                 percentile_90=300.0,
 61 |                 percentile_95=350.0,
 62 |                 percentile_99=400.0,
 63 |                 min_response_time=100.0,
 64 |                 max_response_time=450.0,
 65 |                 throughput=8.0,
 66 |                 test_duration=10.0
 67 |             ),
 68 |             "Endpoint 3": EndpointMetrics(
 69 |                 endpoint="Endpoint 3",
 70 |                 total_samples=100,
 71 |                 error_count=10,
 72 |                 error_rate=10.0,
 73 |                 average_response_time=300.0,
 74 |                 median_response_time=280.0,
 75 |                 percentile_90=450.0,
 76 |                 percentile_95=500.0,
 77 |                 percentile_99=600.0,
 78 |                 min_response_time=150.0,
 79 |                 max_response_time=700.0,
 80 |                 throughput=5.0,
 81 |                 test_duration=10.0
 82 |             )
 83 |         }
 84 |         
 85 |         # Create response times
 86 |         self.response_times = [100, 120, 130, 140, 150, 160, 170, 180, 190, 200, 
 87 |                               220, 240, 260, 280, 300, 350, 400, 450, 500, 600]
 88 |         
 89 |         # Create analysis results
 90 |         self.analysis_results = {
 91 |             "summary": {
 92 |                 "total_samples": 300,
 93 |                 "error_count": 15,
 94 |                 "error_rate": 5.0,
 95 |                 "average_response_time": 200.0,
 96 |                 "median_response_time": 180.0,
 97 |                 "percentile_90": 350.0,
 98 |                 "percentile_95": 400.0,
 99 |                 "percentile_99": 500.0,
100 |                 "min_response_time": 100.0,
101 |                 "max_response_time": 600.0,
102 |                 "throughput": 7.5,
103 |                 "start_time": datetime(2023, 1, 1, 12, 0, 0),
104 |                 "end_time": datetime(2023, 1, 1, 12, 0, 40),
105 |                 "duration": 40.0
106 |             },
107 |             "detailed": {
108 |                 "endpoints": {
109 |                     "Endpoint 1": {
110 |                         "total_samples": 100,
111 |                         "error_count": 0,
112 |                         "error_rate": 0.0,
113 |                         "average_response_time": 100.0,
114 |                         "median_response_time": 95.0,
115 |                         "percentile_90": 150.0,
116 |                         "percentile_95": 180.0,
117 |                         "percentile_99": 200.0,
118 |                         "min_response_time": 50.0,
119 |                         "max_response_time": 250.0,
120 |                         "throughput": 10.0
121 |                     },
122 |                     "Endpoint 2": {
123 |                         "total_samples": 100,
124 |                         "error_count": 5,
125 |                         "error_rate": 5.0,
126 |                         "average_response_time": 200.0,
127 |                         "median_response_time": 190.0,
128 |                         "percentile_90": 300.0,
129 |                         "percentile_95": 350.0,
130 |                         "percentile_99": 400.0,
131 |                         "min_response_time": 100.0,
132 |                         "max_response_time": 450.0,
133 |                         "throughput": 8.0
134 |                     },
135 |                     "Endpoint 3": {
136 |                         "total_samples": 100,
137 |                         "error_count": 10,
138 |                         "error_rate": 10.0,
139 |                         "average_response_time": 300.0,
140 |                         "median_response_time": 280.0,
141 |                         "percentile_90": 450.0,
142 |                         "percentile_95": 500.0,
143 |                         "percentile_99": 600.0,
144 |                         "min_response_time": 150.0,
145 |                         "max_response_time": 700.0,
146 |                         "throughput": 5.0
147 |                     }
148 |                 },
149 |                 "bottlenecks": {
150 |                     "slow_endpoints": [
151 |                         {
152 |                             "endpoint": "Endpoint 3",
153 |                             "response_time": 300.0,
154 |                             "threshold": 200.0,
155 |                             "severity": "high"
156 |                         }
157 |                     ],
158 |                     "error_prone_endpoints": [
159 |                         {
160 |                             "endpoint": "Endpoint 3",
161 |                             "error_rate": 10.0,
162 |                             "threshold": 5.0,
163 |                             "severity": "medium"
164 |                         }
165 |                     ]
166 |                 },
167 |                 "insights": {
168 |                     "recommendations": [
169 |                         {
170 |                             "issue": "High response time in Endpoint 3",
171 |                             "recommendation": "Optimize database queries",
172 |                             "expected_impact": "Reduced response time",
173 |                             "implementation_difficulty": "medium",
174 |                             "priority_level": "high"
175 |                         }
176 |                     ],
177 |                     "scaling_insights": [
178 |                         {
179 |                             "topic": "Concurrency Impact",
180 |                             "description": "Performance degrades with increasing concurrency"
181 |                         }
182 |                     ]
183 |                 }
184 |             }
185 |         }
186 |     
187 |     def tearDown(self):
188 |         """Tear down test fixtures."""
189 |         # Clean up temporary directory
190 |         for file in os.listdir(self.temp_dir):
191 |             os.remove(os.path.join(self.temp_dir, file))
192 |         os.rmdir(self.temp_dir)
193 |     
194 |     def test_create_time_series_graph(self):
195 |         """Test creating a time series graph."""
196 |         # Test with default parameters
197 |         graph = self.engine.create_time_series_graph(self.time_series_metrics)
198 |         self.assertIsNotNone(graph)
199 |         self.assertEqual(graph["type"], "time_series")
200 |         
201 |         # Test with output file
202 |         output_file = "time_series.txt"
203 |         output_path = self.engine.create_time_series_graph(
204 |             self.time_series_metrics, output_file=output_file)
205 |         self.assertTrue(os.path.exists(output_path))
206 |         
207 |         # Test with different metric
208 |         graph = self.engine.create_time_series_graph(
209 |             self.time_series_metrics, metric_name="throughput")
210 |         self.assertIsNotNone(graph)
211 |         self.assertEqual(graph["y_label"], "Throughput (requests/second)")
212 |         
213 |         # Test with empty metrics
214 |         with self.assertRaises(ValueError):
215 |             self.engine.create_time_series_graph([])
216 |         
217 |         # Test with invalid metric name
218 |         with self.assertRaises(ValueError):
219 |             self.engine.create_time_series_graph(
220 |                 self.time_series_metrics, metric_name="invalid_metric")
221 |     
222 |     def test_create_distribution_graph(self):
223 |         """Test creating a distribution graph."""
224 |         # Test with default parameters
225 |         graph = self.engine.create_distribution_graph(self.response_times)
226 |         self.assertIsNotNone(graph)
227 |         self.assertEqual(graph["type"], "distribution")
228 |         
229 |         # Test with output file
230 |         output_file = "distribution.txt"
231 |         output_path = self.engine.create_distribution_graph(
232 |             self.response_times, output_file=output_file)
233 |         self.assertTrue(os.path.exists(output_path))
234 |         
235 |         # Test with custom percentiles
236 |         graph = self.engine.create_distribution_graph(
237 |             self.response_times, percentiles=[25, 50, 75])
238 |         self.assertIsNotNone(graph)
239 |         self.assertIn(25, graph["percentiles"])
240 |         self.assertIn(50, graph["percentiles"])
241 |         self.assertIn(75, graph["percentiles"])
242 |         
243 |         # Test with empty response times
244 |         with self.assertRaises(ValueError):
245 |             self.engine.create_distribution_graph([])
246 |     
247 |     def test_create_endpoint_comparison_chart(self):
248 |         """Test creating an endpoint comparison chart."""
249 |         # Test with default parameters
250 |         chart = self.engine.create_endpoint_comparison_chart(self.endpoint_metrics)
251 |         self.assertIsNotNone(chart)
252 |         self.assertEqual(chart["type"], "endpoint_comparison")
253 |         
254 |         # Test with output file
255 |         output_file = "comparison.txt"
256 |         output_path = self.engine.create_endpoint_comparison_chart(
257 |             self.endpoint_metrics, output_file=output_file)
258 |         self.assertTrue(os.path.exists(output_path))
259 |         
260 |         # Test with different metric
261 |         chart = self.engine.create_endpoint_comparison_chart(
262 |             self.endpoint_metrics, metric_name="error_rate")
263 |         self.assertIsNotNone(chart)
264 |         self.assertEqual(chart["x_label"], "Error Rate (%)")
265 |         
266 |         # Test with empty metrics
267 |         with self.assertRaises(ValueError):
268 |             self.engine.create_endpoint_comparison_chart({})
269 |         
270 |         # Test with invalid metric name
271 |         with self.assertRaises(ValueError):
272 |             self.engine.create_endpoint_comparison_chart(
273 |                 self.endpoint_metrics, metric_name="invalid_metric")
274 |     
275 |     def test_create_html_report(self):
276 |         """Test creating an HTML report."""
277 |         output_file = "report.html"
278 |         output_path = self.engine.create_html_report(
279 |             self.analysis_results, output_file=output_file)
280 |         
281 |         # Check that the file exists
282 |         self.assertTrue(os.path.exists(output_path))
283 |         
284 |         # Check that the file contains expected content
285 |         with open(output_path, 'r') as f:
286 |             content = f.read()
287 |             self.assertIn("JMeter Test Results Analysis", content)
288 |             self.assertIn("Endpoint Analysis", content)
289 |             self.assertIn("Bottleneck Analysis", content)
290 |             self.assertIn("Insights and Recommendations", content)
291 |     
292 |     def test_figure_to_base64(self):
293 |         """Test converting a figure to base64."""
294 |         graph = self.engine.create_time_series_graph(self.time_series_metrics)
295 |         base64_str = self.engine.figure_to_base64(graph)
296 |         
297 |         # Check that the result is a non-empty string
298 |         self.assertIsInstance(base64_str, str)
299 |         self.assertTrue(len(base64_str) > 0)
300 | 
301 | 
302 | if __name__ == '__main__':
303 |     unittest.main()
```

--------------------------------------------------------------------------------
/analyzer/analyzer.py:
--------------------------------------------------------------------------------

```python
  1 | """
  2 | Main analyzer module for JMeter test results.
  3 | 
  4 | This module provides the main entry point for analyzing JMeter test results.
  5 | It orchestrates the flow of data through the various components of the analyzer.
  6 | """
  7 | 
  8 | from pathlib import Path
  9 | from datetime import timedelta
 10 | from typing import Dict, List, Optional, Union
 11 | 
 12 | from analyzer.models import TestResults, OverallMetrics, Bottleneck
 13 | from analyzer.parser.base import JTLParser
 14 | from analyzer.parser.xml_parser import XMLJTLParser
 15 | from analyzer.parser.csv_parser import CSVJTLParser
 16 | from analyzer.metrics.calculator import MetricsCalculator
 17 | from analyzer.bottleneck.analyzer import BottleneckAnalyzer
 18 | from analyzer.insights.generator import InsightsGenerator
 19 | 
 20 | 
 21 | class TestResultsAnalyzer:
 22 |     """Main analyzer class for JMeter test results."""
 23 |     
 24 |     def __init__(self):
 25 |         """Initialize the analyzer."""
 26 |         self.parsers = {}
 27 |         self.metrics_calculator = MetricsCalculator()
 28 |         self.bottleneck_analyzer = BottleneckAnalyzer()
 29 |         self.insights_generator = InsightsGenerator()
 30 |         
 31 |         # Register default parsers
 32 |         self.register_parser('xml', XMLJTLParser())
 33 |         self.register_parser('csv', CSVJTLParser())
 34 |     
 35 |     def register_parser(self, format_name: str, parser: JTLParser) -> None:
 36 |         """Register a parser for a specific format.
 37 |         
 38 |         Args:
 39 |             format_name: Name of the format (e.g., 'xml', 'csv')
 40 |             parser: Parser instance
 41 |         """
 42 |         self.parsers[format_name] = parser
 43 |     
 44 |     def analyze_file(self, file_path: Union[str, Path], 
 45 |                     detailed: bool = False) -> Dict:
 46 |         """Analyze a JTL file and return the results.
 47 |         
 48 |         Args:
 49 |             file_path: Path to the JTL file
 50 |             detailed: Whether to include detailed analysis
 51 |             
 52 |         Returns:
 53 |             Dictionary containing analysis results
 54 |             
 55 |         Raises:
 56 |             FileNotFoundError: If the file does not exist
 57 |             ValueError: If the file format is invalid or unsupported
 58 |         """
 59 |         path = Path(file_path)
 60 |         
 61 |         # Validate file
 62 |         if not path.exists():
 63 |             raise FileNotFoundError(f"File not found: {file_path}")
 64 |         
 65 |         # Detect format
 66 |         format_name = JTLParser.detect_format(path)
 67 |         
 68 |         # Get appropriate parser
 69 |         if format_name not in self.parsers:
 70 |             raise ValueError(f"No parser available for format: {format_name}")
 71 |         
 72 |         parser = self.parsers[format_name]
 73 |         
 74 |         # Parse file
 75 |         test_results = parser.parse_file(path)
 76 |         
 77 |         # Perform analysis
 78 |         analysis_results = self._analyze_results(test_results, detailed)
 79 |         
 80 |         return analysis_results
 81 |     
 82 |     def _analyze_results(self, test_results: TestResults, 
 83 |                         detailed: bool = False) -> Dict:
 84 |         """Analyze test results and return the analysis.
 85 |         
 86 |         Args:
 87 |             test_results: TestResults object
 88 |             detailed: Whether to include detailed analysis
 89 |             
 90 |         Returns:
 91 |             Dictionary containing analysis results
 92 |         """
 93 |         # Calculate overall metrics
 94 |         overall_metrics = self.metrics_calculator.calculate_overall_metrics(test_results)
 95 |         
 96 |         # Create basic results structure
 97 |         results = {
 98 |             "summary": {
 99 |                 "total_samples": overall_metrics.total_samples,
100 |                 "error_count": overall_metrics.error_count,
101 |                 "error_rate": overall_metrics.error_rate,
102 |                 "average_response_time": overall_metrics.average_response_time,
103 |                 "median_response_time": overall_metrics.median_response_time,
104 |                 "percentile_90": overall_metrics.percentile_90,
105 |                 "percentile_95": overall_metrics.percentile_95,
106 |                 "percentile_99": overall_metrics.percentile_99,
107 |                 "min_response_time": overall_metrics.min_response_time,
108 |                 "max_response_time": overall_metrics.max_response_time,
109 |                 "throughput": overall_metrics.throughput,
110 |                 "start_time": test_results.start_time,
111 |                 "end_time": test_results.end_time,
112 |                 "duration": overall_metrics.test_duration
113 |             }
114 |         }
115 |         
116 |         # Add detailed analysis if requested
117 |         if detailed:
118 |             # Calculate endpoint metrics
119 |             endpoint_metrics = self.metrics_calculator.calculate_endpoint_metrics(test_results)
120 |             
121 |             # Calculate time series metrics (5-second intervals)
122 |             try:
123 |                 time_series_metrics = self.metrics_calculator.calculate_time_series_metrics(
124 |                     test_results, interval_seconds=5)
125 |             except ValueError:
126 |                 time_series_metrics = []
127 |             
128 |             # Identify bottlenecks
129 |             slow_endpoints = self.bottleneck_analyzer.identify_slow_endpoints(endpoint_metrics)
130 |             error_prone_endpoints = self.bottleneck_analyzer.identify_error_prone_endpoints(endpoint_metrics)
131 |             anomalies = self.bottleneck_analyzer.detect_anomalies(time_series_metrics)
132 |             concurrency_impact = self.bottleneck_analyzer.analyze_concurrency_impact(time_series_metrics)
133 |             
134 |             # Generate insights and recommendations
135 |             all_bottlenecks = slow_endpoints + error_prone_endpoints
136 |             bottleneck_recommendations = self.insights_generator.generate_bottleneck_recommendations(all_bottlenecks)
137 |             
138 |             # Create error analysis
139 |             error_analysis = self._create_error_analysis(test_results)
140 |             error_recommendations = self.insights_generator.generate_error_recommendations(error_analysis)
141 |             
142 |             # Generate scaling insights
143 |             scaling_insights = self.insights_generator.generate_scaling_insights(concurrency_impact)
144 |             
145 |             # Prioritize all recommendations
146 |             all_recommendations = bottleneck_recommendations + error_recommendations
147 |             prioritized_recommendations = self.insights_generator.prioritize_recommendations(all_recommendations)
148 |             
149 |             # Add to results
150 |             results["detailed"] = {
151 |                 "samples_count": len(test_results.samples),
152 |                 "endpoints": {
153 |                     endpoint: {
154 |                         "total_samples": metrics.total_samples,
155 |                         "error_count": metrics.error_count,
156 |                         "error_rate": metrics.error_rate,
157 |                         "average_response_time": metrics.average_response_time,
158 |                         "median_response_time": metrics.median_response_time,
159 |                         "percentile_90": metrics.percentile_90,
160 |                         "percentile_95": metrics.percentile_95,
161 |                         "percentile_99": metrics.percentile_99,
162 |                         "min_response_time": metrics.min_response_time,
163 |                         "max_response_time": metrics.max_response_time,
164 |                         "throughput": metrics.throughput
165 |                     }
166 |                     for endpoint, metrics in endpoint_metrics.items()
167 |                 },
168 |                 "time_series": [
169 |                     {
170 |                         "timestamp": metrics.timestamp.isoformat(),
171 |                         "active_threads": metrics.active_threads,
172 |                         "throughput": metrics.throughput,
173 |                         "average_response_time": metrics.average_response_time,
174 |                         "error_rate": metrics.error_rate
175 |                     }
176 |                     for metrics in time_series_metrics
177 |                 ],
178 |                 "bottlenecks": {
179 |                     "slow_endpoints": [
180 |                         {
181 |                             "endpoint": bottleneck.endpoint,
182 |                             "response_time": bottleneck.value,
183 |                             "threshold": bottleneck.threshold,
184 |                             "severity": bottleneck.severity
185 |                         }
186 |                         for bottleneck in slow_endpoints
187 |                     ],
188 |                     "error_prone_endpoints": [
189 |                         {
190 |                             "endpoint": bottleneck.endpoint,
191 |                             "error_rate": bottleneck.value,
192 |                             "threshold": bottleneck.threshold,
193 |                             "severity": bottleneck.severity
194 |                         }
195 |                         for bottleneck in error_prone_endpoints
196 |                     ],
197 |                     "anomalies": [
198 |                         {
199 |                             "timestamp": anomaly.timestamp.isoformat(),
200 |                             "expected_value": anomaly.expected_value,
201 |                             "actual_value": anomaly.actual_value,
202 |                             "deviation_percentage": anomaly.deviation_percentage
203 |                         }
204 |                         for anomaly in anomalies
205 |                     ],
206 |                     "concurrency_impact": concurrency_impact
207 |                 },
208 |                 "insights": {
209 |                     "recommendations": [
210 |                         {
211 |                             "issue": rec["recommendation"].issue,
212 |                             "recommendation": rec["recommendation"].recommendation,
213 |                             "expected_impact": rec["recommendation"].expected_impact,
214 |                             "implementation_difficulty": rec["recommendation"].implementation_difficulty,
215 |                             "priority_level": rec["priority_level"]
216 |                         }
217 |                         for rec in prioritized_recommendations
218 |                     ],
219 |                     "scaling_insights": [
220 |                         {
221 |                             "topic": insight.topic,
222 |                             "description": insight.description
223 |                         }
224 |                         for insight in scaling_insights
225 |                     ]
226 |                 }
227 |             }
228 |         
229 |         return results
230 |     
231 |     def _create_error_analysis(self, test_results: TestResults) -> Dict:
232 |         """Create error analysis from test results.
233 |         
234 |         Args:
235 |             test_results: TestResults object
236 |             
237 |         Returns:
238 |             Dictionary containing error analysis
239 |         """
240 |         # Extract error samples
241 |         error_samples = [sample for sample in test_results.samples if not sample.success]
242 |         
243 |         if not error_samples:
244 |             return {"error_types": {}, "error_patterns": []}
245 |         
246 |         # Count error types
247 |         error_types = {}
248 |         for sample in error_samples:
249 |             error_message = sample.error_message or f"HTTP {sample.response_code}"
250 |             if error_message in error_types:
251 |                 error_types[error_message] += 1
252 |             else:
253 |                 error_types[error_message] = 1
254 |         
255 |         # Detect error patterns
256 |         error_patterns = []
257 |         
258 |         # Check for error spikes
259 |         if test_results.start_time and test_results.end_time:
260 |             # Group errors by time intervals (5-second intervals)
261 |             interval_seconds = 5
262 |             duration = (test_results.end_time - test_results.start_time).total_seconds()
263 |             num_intervals = int(duration / interval_seconds) + 1
264 |             
265 |             # Count errors in each interval
266 |             interval_errors = [0] * num_intervals
267 |             for sample in error_samples:
268 |                 interval_index = int((sample.timestamp - test_results.start_time).total_seconds() / interval_seconds)
269 |                 if 0 <= interval_index < num_intervals:
270 |                     interval_errors[interval_index] += 1
271 |             
272 |             # Calculate average errors per interval
273 |             avg_errors = sum(interval_errors) / len(interval_errors) if interval_errors else 0
274 |             
275 |             # Detect spikes (intervals with errors > 2 * average)
276 |             for i, error_count in enumerate(interval_errors):
277 |                 if error_count > 2 * avg_errors and error_count > 1:
278 |                     spike_time = test_results.start_time + timedelta(seconds=i * interval_seconds)
279 |                     error_patterns.append({
280 |                         "type": "spike",
281 |                         "timestamp": spike_time.isoformat(),
282 |                         "error_count": error_count
283 |                     })
284 |         
285 |         return {
286 |             "error_types": error_types,
287 |             "error_patterns": error_patterns
288 |         }
```

--------------------------------------------------------------------------------
/analyzer/insights/generator.py:
--------------------------------------------------------------------------------

```python
  1 | """
  2 | Insights generator for JMeter test results.
  3 | 
  4 | This module provides functionality for generating insights and recommendations
  5 | based on JMeter test results analysis, including bottleneck recommendations,
  6 | error pattern analysis, scaling insights, and recommendation prioritization.
  7 | """
  8 | 
  9 | from typing import Dict, List, Optional, Tuple, Union
 10 | 
 11 | from analyzer.models import (Bottleneck, EndpointMetrics, Insight,
 12 |                            OverallMetrics, Recommendation, TestResults)
 13 | 
 14 | 
 15 | class InsightsGenerator:
 16 |     """Generator for insights and recommendations based on test results analysis."""
 17 |     
 18 |     def generate_bottleneck_recommendations(self, bottlenecks: List[Bottleneck]) -> List[Recommendation]:
 19 |         """Generate recommendations for addressing identified bottlenecks.
 20 |         
 21 |         Args:
 22 |             bottlenecks: List of Bottleneck objects
 23 |             
 24 |         Returns:
 25 |             List of Recommendation objects
 26 |         """
 27 |         recommendations = []
 28 |         
 29 |         # Process response time bottlenecks
 30 |         response_time_bottlenecks = [b for b in bottlenecks if b.metric_type == "response_time"]
 31 |         if response_time_bottlenecks:
 32 |             # Group by severity
 33 |             high_severity = [b for b in response_time_bottlenecks if b.severity == "high"]
 34 |             medium_severity = [b for b in response_time_bottlenecks if b.severity == "medium"]
 35 |             
 36 |             # Generate recommendations for high severity bottlenecks
 37 |             if high_severity:
 38 |                 endpoints = ", ".join(b.endpoint for b in high_severity[:3])
 39 |                 recommendation = Recommendation(
 40 |                     issue=f"Critical response time issues in endpoints: {endpoints}",
 41 |                     recommendation="Optimize database queries, add caching, or consider asynchronous processing for these endpoints",
 42 |                     expected_impact="Significant reduction in response times and improved user experience",
 43 |                     implementation_difficulty="medium"
 44 |                 )
 45 |                 recommendations.append(recommendation)
 46 |             
 47 |             # Generate recommendations for medium severity bottlenecks
 48 |             if medium_severity:
 49 |                 endpoints = ", ".join(b.endpoint for b in medium_severity[:3])
 50 |                 recommendation = Recommendation(
 51 |                     issue=f"Moderate response time issues in endpoints: {endpoints}",
 52 |                     recommendation="Profile the code to identify bottlenecks and optimize the most expensive operations",
 53 |                     expected_impact="Moderate improvement in response times",
 54 |                     implementation_difficulty="medium"
 55 |                 )
 56 |                 recommendations.append(recommendation)
 57 |         
 58 |         # Process error rate bottlenecks
 59 |         error_rate_bottlenecks = [b for b in bottlenecks if b.metric_type == "error_rate"]
 60 |         if error_rate_bottlenecks:
 61 |             # Group by severity
 62 |             high_severity = [b for b in error_rate_bottlenecks if b.severity == "high"]
 63 |             medium_severity = [b for b in error_rate_bottlenecks if b.severity == "medium"]
 64 |             
 65 |             # Generate recommendations for high severity bottlenecks
 66 |             if high_severity:
 67 |                 endpoints = ", ".join(b.endpoint for b in high_severity[:3])
 68 |                 recommendation = Recommendation(
 69 |                     issue=f"High error rates in endpoints: {endpoints}",
 70 |                     recommendation="Investigate error logs, add proper error handling, and fix the root causes of errors",
 71 |                     expected_impact="Significant reduction in error rates and improved reliability",
 72 |                     implementation_difficulty="high"
 73 |                 )
 74 |                 recommendations.append(recommendation)
 75 |             
 76 |             # Generate recommendations for medium severity bottlenecks
 77 |             if medium_severity:
 78 |                 endpoints = ", ".join(b.endpoint for b in medium_severity[:3])
 79 |                 recommendation = Recommendation(
 80 |                     issue=f"Moderate error rates in endpoints: {endpoints}",
 81 |                     recommendation="Review error handling and add appropriate validation and error recovery mechanisms",
 82 |                     expected_impact="Moderate reduction in error rates",
 83 |                     implementation_difficulty="medium"
 84 |                 )
 85 |                 recommendations.append(recommendation)
 86 |         
 87 |         return recommendations
 88 |     
 89 |     def generate_error_recommendations(self, error_analysis: Dict) -> List[Recommendation]:
 90 |         """Generate recommendations for addressing error patterns.
 91 |         
 92 |         Args:
 93 |             error_analysis: Dictionary containing error analysis results
 94 |             
 95 |         Returns:
 96 |             List of Recommendation objects
 97 |         """
 98 |         recommendations = []
 99 |         
100 |         # Process error types
101 |         error_types = error_analysis.get("error_types", {})
102 |         if error_types:
103 |             # Find the most common error types
104 |             sorted_errors = sorted(error_types.items(), key=lambda x: x[1], reverse=True)
105 |             top_errors = sorted_errors[:3]
106 |             
107 |             for error_type, count in top_errors:
108 |                 if "timeout" in error_type.lower():
109 |                     recommendation = Recommendation(
110 |                         issue=f"Timeout errors ({count} occurrences)",
111 |                         recommendation="Increase timeout thresholds, optimize slow operations, or implement circuit breakers",
112 |                         expected_impact="Reduction in timeout errors and improved reliability",
113 |                         implementation_difficulty="medium"
114 |                     )
115 |                     recommendations.append(recommendation)
116 |                 
117 |                 elif "connection" in error_type.lower():
118 |                     recommendation = Recommendation(
119 |                         issue=f"Connection errors ({count} occurrences)",
120 |                         recommendation="Implement connection pooling, retry mechanisms, or check network configuration",
121 |                         expected_impact="Improved connection stability and reduced errors",
122 |                         implementation_difficulty="medium"
123 |                     )
124 |                     recommendations.append(recommendation)
125 |                 
126 |                 elif "500" in error_type or "server" in error_type.lower():
127 |                     recommendation = Recommendation(
128 |                         issue=f"Server errors ({count} occurrences)",
129 |                         recommendation="Check server logs, fix application bugs, and add proper error handling",
130 |                         expected_impact="Reduction in server errors and improved reliability",
131 |                         implementation_difficulty="high"
132 |                     )
133 |                     recommendations.append(recommendation)
134 |                 
135 |                 elif "400" in error_type or "client" in error_type.lower():
136 |                     recommendation = Recommendation(
137 |                         issue=f"Client errors ({count} occurrences)",
138 |                         recommendation="Validate input data, fix client-side issues, and improve error messages",
139 |                         expected_impact="Reduction in client errors and improved user experience",
140 |                         implementation_difficulty="medium"
141 |                     )
142 |                     recommendations.append(recommendation)
143 |                 
144 |                 else:
145 |                     recommendation = Recommendation(
146 |                         issue=f"{error_type} errors ({count} occurrences)",
147 |                         recommendation="Investigate the root cause and implement appropriate error handling",
148 |                         expected_impact="Reduction in errors and improved reliability",
149 |                         implementation_difficulty="medium"
150 |                     )
151 |                     recommendations.append(recommendation)
152 |         
153 |         # Process error patterns
154 |         error_patterns = error_analysis.get("error_patterns", [])
155 |         if error_patterns:
156 |             for pattern in error_patterns:
157 |                 pattern_type = pattern.get("type", "")
158 |                 
159 |                 if pattern_type == "spike":
160 |                     recommendation = Recommendation(
161 |                         issue="Error spike detected during the test",
162 |                         recommendation="Investigate what happened during the spike period and address the underlying cause",
163 |                         expected_impact="Prevention of error spikes in production",
164 |                         implementation_difficulty="medium"
165 |                     )
166 |                     recommendations.append(recommendation)
167 |                 
168 |                 elif pattern_type == "increasing":
169 |                     recommendation = Recommendation(
170 |                         issue="Increasing error rate over time",
171 |                         recommendation="Check for resource leaks, memory issues, or degrading performance under load",
172 |                         expected_impact="Stable error rates during extended usage",
173 |                         implementation_difficulty="high"
174 |                     )
175 |                     recommendations.append(recommendation)
176 |         
177 |         return recommendations
178 |     
179 |     def generate_scaling_insights(self, concurrency_analysis: Dict) -> List[Insight]:
180 |         """Generate insights on scaling behavior and capacity limits.
181 |         
182 |         Args:
183 |             concurrency_analysis: Dictionary containing concurrency analysis results
184 |             
185 |         Returns:
186 |             List of Insight objects
187 |         """
188 |         insights = []
189 |         
190 |         correlation = concurrency_analysis.get("correlation", 0)
191 |         has_degradation = concurrency_analysis.get("has_degradation", False)
192 |         degradation_threshold = concurrency_analysis.get("degradation_threshold", 0)
193 |         
194 |         # Generate insights based on correlation
195 |         if correlation > 0.8:
196 |             insight = Insight(
197 |                 topic="Strong Correlation with Concurrency",
198 |                 description="There is a strong correlation between the number of concurrent users and response times, indicating potential scalability issues",
199 |                 supporting_data={"correlation": correlation}
200 |             )
201 |             insights.append(insight)
202 |         elif correlation > 0.5:
203 |             insight = Insight(
204 |                 topic="Moderate Correlation with Concurrency",
205 |                 description="There is a moderate correlation between the number of concurrent users and response times, suggesting some scalability concerns",
206 |                 supporting_data={"correlation": correlation}
207 |             )
208 |             insights.append(insight)
209 |         elif correlation < 0.2 and correlation > -0.2:
210 |             insight = Insight(
211 |                 topic="No Correlation with Concurrency",
212 |                 description="There is little to no correlation between the number of concurrent users and response times, suggesting good scalability",
213 |                 supporting_data={"correlation": correlation}
214 |             )
215 |             insights.append(insight)
216 |         
217 |         # Generate insights based on degradation threshold
218 |         if has_degradation:
219 |             insight = Insight(
220 |                 topic="Performance Degradation Threshold",
221 |                 description=f"Performance begins to degrade significantly at {degradation_threshold} concurrent users, indicating a potential capacity limit",
222 |                 supporting_data={"degradation_threshold": degradation_threshold}
223 |             )
224 |             insights.append(insight)
225 |             
226 |             # Add recommendation for addressing the degradation
227 |             if degradation_threshold > 0:
228 |                 insight = Insight(
229 |                     topic="Scaling Recommendation",
230 |                     description=f"Consider horizontal scaling or optimization before reaching {degradation_threshold} concurrent users to maintain performance",
231 |                     supporting_data={"degradation_threshold": degradation_threshold}
232 |                 )
233 |                 insights.append(insight)
234 |         else:
235 |             insight = Insight(
236 |                 topic="No Performance Degradation Detected",
237 |                 description="No significant performance degradation was detected with increasing concurrent users within the tested range",
238 |                 supporting_data={}
239 |             )
240 |             insights.append(insight)
241 |         
242 |         return insights
243 |     
244 |     def prioritize_recommendations(self, recommendations: List[Recommendation]) -> List[Dict]:
245 |         """Prioritize recommendations based on potential impact.
246 |         
247 |         Args:
248 |             recommendations: List of Recommendation objects
249 |             
250 |         Returns:
251 |             List of dictionaries containing prioritized recommendations
252 |         """
253 |         if not recommendations:
254 |             return []
255 |         
256 |         # Define scoring system
257 |         severity_scores = {
258 |             "high": 3,
259 |             "medium": 2,
260 |             "low": 1
261 |         }
262 |         
263 |         difficulty_scores = {
264 |             "low": 3,
265 |             "medium": 2,
266 |             "high": 1
267 |         }
268 |         
269 |         # Calculate priority score for each recommendation
270 |         prioritized = []
271 |         for recommendation in recommendations:
272 |             # Extract severity from the issue (if available)
273 |             severity = "medium"  # Default
274 |             if "critical" in recommendation.issue.lower():
275 |                 severity = "high"
276 |             elif "moderate" in recommendation.issue.lower():
277 |                 severity = "medium"
278 |             elif "minor" in recommendation.issue.lower():
279 |                 severity = "low"
280 |             
281 |             # Get difficulty
282 |             difficulty = recommendation.implementation_difficulty
283 |             
284 |             # Calculate priority score (higher is more important)
285 |             severity_score = severity_scores.get(severity, 2)
286 |             difficulty_score = difficulty_scores.get(difficulty, 2)
287 |             
288 |             # Priority formula: severity * 2 + ease of implementation
289 |             # This weights severity more heavily than implementation difficulty
290 |             priority_score = severity_score * 2 + difficulty_score
291 |             
292 |             prioritized.append({
293 |                 "recommendation": recommendation,
294 |                 "priority_score": priority_score,
295 |                 "priority_level": self._get_priority_level(priority_score)
296 |             })
297 |         
298 |         # Sort by priority score (descending)
299 |         prioritized.sort(key=lambda x: x["priority_score"], reverse=True)
300 |         
301 |         return prioritized
302 |     
303 |     def _get_priority_level(self, score: int) -> str:
304 |         """Convert a priority score to a priority level.
305 |         
306 |         Args:
307 |             score: Priority score
308 |             
309 |         Returns:
310 |             Priority level string
311 |         """
312 |         if score >= 7:
313 |             return "critical"
314 |         elif score >= 5:
315 |             return "high"
316 |         elif score >= 3:
317 |             return "medium"
318 |         else:
319 |             return "low"
```
Page 1/2FirstPrevNextLast