#
tokens: 13012/50000 2/42 files (page 2/2)
lines: on (toggle) GitHub
raw markdown copy reset
This is page 2 of 2. Use http://codebase.md/qainsights/jmeter-mcp-server?lines=true&page={x} to view the full context.

# Directory Structure

```
├── .dockerignore
├── .env.example
├── .gitignore
├── .python-version
├── analyzer
│   ├── __init__.py
│   ├── analyzer.py
│   ├── bottleneck
│   │   ├── __init__.py
│   │   └── analyzer.py
│   ├── insights
│   │   ├── __init__.py
│   │   └── generator.py
│   ├── mcp
│   │   └── __init__.py
│   ├── metrics
│   │   ├── __init__.py
│   │   └── calculator.py
│   ├── models.py
│   ├── parser
│   │   ├── __init__.py
│   │   ├── base.py
│   │   ├── csv_parser.py
│   │   └── xml_parser.py
│   └── visualization
│       ├── __init__.py
│       └── engine.py
├── Dockerfile
├── images
│   ├── Anthropic-MCP.png
│   ├── Cursor.png
│   └── Windsurf.png
├── jmeter_report.html
├── jmeter_server.py
├── main.py
├── mcp_config.json
├── pyproject.toml
├── README.md
├── requirements_windsurf_reader.txt
├── requirements.txt
├── sample_test.jmx
├── smithery.yaml
├── tests
│   ├── __init__.py
│   ├── test_analyzer_models.py
│   ├── test_analyzer_parser.py
│   ├── test_bottleneck_analyzer.py
│   ├── test_csv_parser.py
│   ├── test_insights_generator.py
│   ├── test_jmeter_server.py
│   ├── test_metrics_calculator.py
│   ├── test_visualization_engine.py
│   └── test_xml_parser.py
├── windsurf_db_reader_alternative.py
└── windsurf_db_reader.py
```

# Files

--------------------------------------------------------------------------------
/analyzer/visualization/engine.py:
--------------------------------------------------------------------------------

```python
  1 | """
  2 | Visualization engine for JMeter test results.
  3 | 
  4 | This module provides functionality for creating visual representations
  5 | of JMeter test results analysis, including time series graphs, distribution
  6 | graphs, endpoint comparison charts, and visualization output formats.
  7 | """
  8 | 
  9 | import base64
 10 | import io
 11 | import os
 12 | from datetime import datetime
 13 | from pathlib import Path
 14 | from typing import Dict, List, Optional, Tuple, Union
 15 | 
 16 | # Note: In a real implementation, we would use matplotlib for visualization.
 17 | # However, for the purpose of this implementation, we'll create a simplified version
 18 | # that doesn't rely on external libraries.
 19 | 
 20 | class VisualizationEngine:
 21 |     """Engine for creating visual representations of test results analysis."""
 22 |     
 23 |     def __init__(self, output_dir: Optional[str] = None):
 24 |         """Initialize the visualization engine.
 25 |         
 26 |         Args:
 27 |             output_dir: Directory to save visualization files (default: None)
 28 |         """
 29 |         self.output_dir = output_dir
 30 |         if output_dir:
 31 |             os.makedirs(output_dir, exist_ok=True)
 32 |     
 33 |     def create_time_series_graph(self, time_series_metrics: List,
 34 |                                 metric_name: str = "average_response_time",
 35 |                                 title: Optional[str] = None,
 36 |                                 output_file: Optional[str] = None) -> Union[str, Dict]:
 37 |         """Create a time-series graph showing performance over the test duration.
 38 |         
 39 |         Args:
 40 |             time_series_metrics: List of TimeSeriesMetrics objects
 41 |             metric_name: Name of the metric to plot (default: "average_response_time")
 42 |             title: Graph title (default: None)
 43 |             output_file: Path to save the graph (default: None)
 44 |             
 45 |         Returns:
 46 |             Path to the saved graph file or Figure object
 47 |         """
 48 |         if not time_series_metrics:
 49 |             raise ValueError("No time series metrics provided")
 50 |         
 51 |         # Extract data
 52 |         timestamps = [metrics.timestamp for metrics in time_series_metrics]
 53 |         
 54 |         if metric_name == "average_response_time":
 55 |             values = [metrics.average_response_time for metrics in time_series_metrics]
 56 |             y_label = "Response Time (ms)"
 57 |             graph_title = title or "Response Time Over Time"
 58 |         elif metric_name == "throughput":
 59 |             values = [metrics.throughput for metrics in time_series_metrics]
 60 |             y_label = "Throughput (requests/second)"
 61 |             graph_title = title or "Throughput Over Time"
 62 |         elif metric_name == "error_rate":
 63 |             values = [metrics.error_rate for metrics in time_series_metrics]
 64 |             y_label = "Error Rate (%)"
 65 |             graph_title = title or "Error Rate Over Time"
 66 |         elif metric_name == "active_threads":
 67 |             values = [metrics.active_threads for metrics in time_series_metrics]
 68 |             y_label = "Active Threads"
 69 |             graph_title = title or "Active Threads Over Time"
 70 |         else:
 71 |             raise ValueError(f"Unknown metric name: {metric_name}")
 72 |         
 73 |         # Create a simple representation of the graph
 74 |         graph = {
 75 |             "type": "time_series",
 76 |             "title": graph_title,
 77 |             "x_label": "Time",
 78 |             "y_label": y_label,
 79 |             "timestamps": [ts.isoformat() for ts in timestamps],
 80 |             "values": values
 81 |         }
 82 |         
 83 |         # Save or return
 84 |         if output_file:
 85 |             output_path = self._get_output_path(output_file)
 86 |             with open(output_path, 'w') as f:
 87 |                 f.write(f"Time Series Graph: {graph_title}\n")
 88 |                 f.write(f"X-axis: Time\n")
 89 |                 f.write(f"Y-axis: {y_label}\n")
 90 |                 f.write("Data:\n")
 91 |                 for ts, val in zip(timestamps, values):
 92 |                     f.write(f"{ts.isoformat()}: {val}\n")
 93 |             return output_path
 94 |         else:
 95 |             return graph
 96 |     
 97 |     def create_distribution_graph(self, response_times: List[float],
 98 |                                 percentiles: List[int] = [50, 90, 95, 99],
 99 |                                 title: Optional[str] = None,
100 |                                 output_file: Optional[str] = None) -> Union[str, Dict]:
101 |         """Create a distribution graph showing response time distributions.
102 |         
103 |         Args:
104 |             response_times: List of response times
105 |             percentiles: List of percentiles to mark (default: [50, 90, 95, 99])
106 |             title: Graph title (default: None)
107 |             output_file: Path to save the graph (default: None)
108 |             
109 |         Returns:
110 |             Path to the saved graph file or Figure object
111 |         """
112 |         if not response_times:
113 |             raise ValueError("No response times provided")
114 |         
115 |         # Calculate percentile values
116 |         percentile_values = {}
117 |         for p in percentiles:
118 |             percentile_values[p] = self._calculate_percentile(response_times, p)
119 |         
120 |         # Create a simple representation of the graph
121 |         graph_title = title or "Response Time Distribution"
122 |         graph = {
123 |             "type": "distribution",
124 |             "title": graph_title,
125 |             "x_label": "Response Time (ms)",
126 |             "y_label": "Frequency",
127 |             "response_times": response_times,
128 |             "percentiles": percentile_values
129 |         }
130 |         
131 |         # Save or return
132 |         if output_file:
133 |             output_path = self._get_output_path(output_file)
134 |             with open(output_path, 'w') as f:
135 |                 f.write(f"Distribution Graph: {graph_title}\n")
136 |                 f.write(f"X-axis: Response Time (ms)\n")
137 |                 f.write(f"Y-axis: Frequency\n")
138 |                 f.write("Percentiles:\n")
139 |                 for p, val in percentile_values.items():
140 |                     f.write(f"{p}th Percentile: {val:.2f} ms\n")
141 |             return output_path
142 |         else:
143 |             return graph
144 |     
145 |     def create_endpoint_comparison_chart(self, endpoint_metrics: Dict,
146 |                                         metric_name: str = "average_response_time",
147 |                                         top_n: int = 10,
148 |                                         title: Optional[str] = None,
149 |                                         output_file: Optional[str] = None) -> Union[str, Dict]:
150 |         """Create a comparison chart for different endpoints.
151 |         
152 |         Args:
153 |             endpoint_metrics: Dictionary mapping endpoint names to EndpointMetrics objects
154 |             metric_name: Name of the metric to compare (default: "average_response_time")
155 |             top_n: Number of top endpoints to include (default: 10)
156 |             title: Chart title (default: None)
157 |             output_file: Path to save the chart (default: None)
158 |             
159 |         Returns:
160 |             Path to the saved chart file or Figure object
161 |         """
162 |         if not endpoint_metrics:
163 |             raise ValueError("No endpoint metrics provided")
164 |         
165 |         # Extract data
166 |         if metric_name == "average_response_time":
167 |             values = {endpoint: metrics.average_response_time for endpoint, metrics in endpoint_metrics.items()}
168 |             y_label = "Average Response Time (ms)"
169 |             chart_title = title or "Endpoint Response Time Comparison"
170 |         elif metric_name == "error_rate":
171 |             values = {endpoint: metrics.error_rate for endpoint, metrics in endpoint_metrics.items()}
172 |             y_label = "Error Rate (%)"
173 |             chart_title = title or "Endpoint Error Rate Comparison"
174 |         elif metric_name == "throughput":
175 |             values = {endpoint: metrics.throughput for endpoint, metrics in endpoint_metrics.items()}
176 |             y_label = "Throughput (requests/second)"
177 |             chart_title = title or "Endpoint Throughput Comparison"
178 |         else:
179 |             raise ValueError(f"Unknown metric name: {metric_name}")
180 |         
181 |         # Sort endpoints by value (descending) and take top N
182 |         sorted_endpoints = sorted(values.items(), key=lambda x: x[1], reverse=True)[:top_n]
183 |         endpoints = [item[0] for item in sorted_endpoints]
184 |         values_list = [item[1] for item in sorted_endpoints]
185 |         
186 |         # Create a simple representation of the chart
187 |         chart = {
188 |             "type": "endpoint_comparison",
189 |             "title": chart_title,
190 |             "x_label": y_label,
191 |             "y_label": "Endpoint",
192 |             "endpoints": endpoints,
193 |             "values": values_list
194 |         }
195 |         
196 |         # Save or return
197 |         if output_file:
198 |             output_path = self._get_output_path(output_file)
199 |             with open(output_path, 'w') as f:
200 |                 f.write(f"Endpoint Comparison Chart: {chart_title}\n")
201 |                 f.write(f"X-axis: {y_label}\n")
202 |                 f.write(f"Y-axis: Endpoint\n")
203 |                 f.write("Data:\n")
204 |                 for endpoint, value in zip(endpoints, values_list):
205 |                     f.write(f"{endpoint}: {value:.2f}\n")
206 |             return output_path
207 |         else:
208 |             return chart
209 |     
210 |     def create_html_report(self, analysis_results: Dict, output_file: str) -> str:
211 |         """Create an HTML report from analysis results.
212 |         
213 |         Args:
214 |             analysis_results: Dictionary containing analysis results
215 |             output_file: Path to save the HTML report
216 |             
217 |         Returns:
218 |             Path to the saved HTML report
219 |         """
220 |         # Extract data
221 |         summary = analysis_results.get("summary", {})
222 |         detailed = analysis_results.get("detailed", {})
223 |         
224 |         # Create HTML content
225 |         html_content = f"""
226 |         <!DOCTYPE html>
227 |         <html>
228 |         <head>
229 |             <title>JMeter Test Results Analysis</title>
230 |             <style>
231 |                 body {{ font-family: Arial, sans-serif; margin: 20px; }}
232 |                 h1, h2, h3 {{ color: #333; }}
233 |                 table {{ border-collapse: collapse; width: 100%; margin-bottom: 20px; }}
234 |                 th, td {{ border: 1px solid #ddd; padding: 8px; text-align: left; }}
235 |                 th {{ background-color: #f2f2f2; }}
236 |                 tr:nth-child(even) {{ background-color: #f9f9f9; }}
237 |                 .chart {{ margin: 20px 0; max-width: 100%; }}
238 |                 .section {{ margin-bottom: 30px; }}
239 |                 .severity-high {{ color: #d9534f; }}
240 |                 .severity-medium {{ color: #f0ad4e; }}
241 |                 .severity-low {{ color: #5bc0de; }}
242 |             </style>
243 |         </head>
244 |         <body>
245 |             <h1>JMeter Test Results Analysis</h1>
246 |             
247 |             <div class="section">
248 |                 <h2>Summary</h2>
249 |                 <table>
250 |                     <tr><th>Metric</th><th>Value</th></tr>
251 |                     <tr><td>Total Samples</td><td>{summary.get('total_samples', 'N/A')}</td></tr>
252 |                     <tr><td>Error Count</td><td>{summary.get('error_count', 'N/A')}</td></tr>
253 |                     <tr><td>Error Rate</td><td>{summary.get('error_rate', 'N/A'):.2f}%</td></tr>
254 |                     <tr><td>Average Response Time</td><td>{summary.get('average_response_time', 'N/A'):.2f} ms</td></tr>
255 |                     <tr><td>Median Response Time</td><td>{summary.get('median_response_time', 'N/A'):.2f} ms</td></tr>
256 |                     <tr><td>90th Percentile</td><td>{summary.get('percentile_90', 'N/A'):.2f} ms</td></tr>
257 |                     <tr><td>95th Percentile</td><td>{summary.get('percentile_95', 'N/A'):.2f} ms</td></tr>
258 |                     <tr><td>99th Percentile</td><td>{summary.get('percentile_99', 'N/A'):.2f} ms</td></tr>
259 |                     <tr><td>Min Response Time</td><td>{summary.get('min_response_time', 'N/A'):.2f} ms</td></tr>
260 |                     <tr><td>Max Response Time</td><td>{summary.get('max_response_time', 'N/A'):.2f} ms</td></tr>
261 |                     <tr><td>Throughput</td><td>{summary.get('throughput', 'N/A'):.2f} requests/second</td></tr>
262 |                     <tr><td>Start Time</td><td>{summary.get('start_time', 'N/A')}</td></tr>
263 |                     <tr><td>End Time</td><td>{summary.get('end_time', 'N/A')}</td></tr>
264 |                     <tr><td>Duration</td><td>{summary.get('duration', 'N/A'):.2f} seconds</td></tr>
265 |                 </table>
266 |             </div>
267 |         """
268 |         
269 |         # Add detailed information if available
270 |         if detailed:
271 |             # Add endpoint information
272 |             endpoints = detailed.get("endpoints", {})
273 |             if endpoints:
274 |                 html_content += """
275 |                 <div class="section">
276 |                     <h2>Endpoint Analysis</h2>
277 |                     <table>
278 |                         <tr>
279 |                             <th>Endpoint</th>
280 |                             <th>Samples</th>
281 |                             <th>Errors</th>
282 |                             <th>Error Rate</th>
283 |                             <th>Avg Response Time</th>
284 |                             <th>95th Percentile</th>
285 |                             <th>Throughput</th>
286 |                         </tr>
287 |                 """
288 |                 
289 |                 for endpoint, metrics in endpoints.items():
290 |                     html_content += f"""
291 |                         <tr>
292 |                             <td>{endpoint}</td>
293 |                             <td>{metrics['total_samples']}</td>
294 |                             <td>{metrics['error_count']}</td>
295 |                             <td>{metrics['error_rate']:.2f}%</td>
296 |                             <td>{metrics['average_response_time']:.2f} ms</td>
297 |                             <td>{metrics['percentile_95']:.2f} ms</td>
298 |                             <td>{metrics['throughput']:.2f} req/s</td>
299 |                         </tr>
300 |                     """
301 |                 
302 |                 html_content += """
303 |                     </table>
304 |                 </div>
305 |                 """
306 |             
307 |             # Add bottleneck information
308 |             bottlenecks = detailed.get("bottlenecks", {})
309 |             if bottlenecks:
310 |                 html_content += """
311 |                 <div class="section">
312 |                     <h2>Bottleneck Analysis</h2>
313 |                 """
314 |                 
315 |                 # Slow endpoints
316 |                 slow_endpoints = bottlenecks.get("slow_endpoints", [])
317 |                 if slow_endpoints:
318 |                     html_content += """
319 |                     <h3>Slow Endpoints</h3>
320 |                     <table>
321 |                         <tr>
322 |                             <th>Endpoint</th>
323 |                             <th>Response Time</th>
324 |                             <th>Threshold</th>
325 |                             <th>Severity</th>
326 |                         </tr>
327 |                     """
328 |                     
329 |                     for endpoint in slow_endpoints:
330 |                         severity_class = f"severity-{endpoint.get('severity', 'medium')}"
331 |                         html_content += f"""
332 |                             <tr>
333 |                                 <td>{endpoint.get('endpoint')}</td>
334 |                                 <td>{endpoint.get('response_time', 'N/A'):.2f} ms</td>
335 |                                 <td>{endpoint.get('threshold', 'N/A'):.2f} ms</td>
336 |                                 <td class="{severity_class}">{endpoint.get('severity', 'N/A').upper()}</td>
337 |                             </tr>
338 |                         """
339 |                     
340 |                     html_content += """
341 |                     </table>
342 |                     """
343 |                 
344 |                 # Error-prone endpoints
345 |                 error_endpoints = bottlenecks.get("error_prone_endpoints", [])
346 |                 if error_endpoints:
347 |                     html_content += """
348 |                     <h3>Error-Prone Endpoints</h3>
349 |                     <table>
350 |                         <tr>
351 |                             <th>Endpoint</th>
352 |                             <th>Error Rate</th>
353 |                             <th>Threshold</th>
354 |                             <th>Severity</th>
355 |                         </tr>
356 |                     """
357 |                     
358 |                     for endpoint in error_endpoints:
359 |                         severity_class = f"severity-{endpoint.get('severity', 'medium')}"
360 |                         html_content += f"""
361 |                             <tr>
362 |                                 <td>{endpoint.get('endpoint')}</td>
363 |                                 <td>{endpoint.get('error_rate', 'N/A'):.2f}%</td>
364 |                                 <td>{endpoint.get('threshold', 'N/A'):.2f}%</td>
365 |                                 <td class="{severity_class}">{endpoint.get('severity', 'N/A').upper()}</td>
366 |                             </tr>
367 |                         """
368 |                     
369 |                     html_content += """
370 |                     </table>
371 |                     """
372 |                 
373 |                 html_content += """
374 |                 </div>
375 |                 """
376 |             
377 |             # Add insights and recommendations
378 |             insights = detailed.get("insights", {})
379 |             if insights:
380 |                 html_content += """
381 |                 <div class="section">
382 |                     <h2>Insights and Recommendations</h2>
383 |                 """
384 |                 
385 |                 # Recommendations
386 |                 recommendations = insights.get("recommendations", [])
387 |                 if recommendations:
388 |                     html_content += """
389 |                     <h3>Recommendations</h3>
390 |                     <table>
391 |                         <tr>
392 |                             <th>Priority</th>
393 |                             <th>Issue</th>
394 |                             <th>Recommendation</th>
395 |                             <th>Expected Impact</th>
396 |                         </tr>
397 |                     """
398 |                     
399 |                     for rec in recommendations:
400 |                         priority_level = rec.get('priority_level', 'medium')
401 |                         severity_class = f"severity-{priority_level}"
402 |                         html_content += f"""
403 |                             <tr>
404 |                                 <td class="{severity_class}">{priority_level.upper()}</td>
405 |                                 <td>{rec.get('issue')}</td>
406 |                                 <td>{rec.get('recommendation')}</td>
407 |                                 <td>{rec.get('expected_impact')}</td>
408 |                             </tr>
409 |                         """
410 |                     
411 |                     html_content += """
412 |                     </table>
413 |                     """
414 |                 
415 |                 # Scaling insights
416 |                 scaling_insights = insights.get("scaling_insights", [])
417 |                 if scaling_insights:
418 |                     html_content += """
419 |                     <h3>Scaling Insights</h3>
420 |                     <table>
421 |                         <tr>
422 |                             <th>Topic</th>
423 |                             <th>Description</th>
424 |                         </tr>
425 |                     """
426 |                     
427 |                     for insight in scaling_insights:
428 |                         html_content += f"""
429 |                             <tr>
430 |                                 <td>{insight.get('topic')}</td>
431 |                                 <td>{insight.get('description')}</td>
432 |                             </tr>
433 |                         """
434 |                     
435 |                     html_content += """
436 |                     </table>
437 |                     """
438 |                 
439 |                 html_content += """
440 |                 </div>
441 |                 """
442 |         
443 |         # Close HTML
444 |         html_content += """
445 |         </body>
446 |         </html>
447 |         """
448 |         
449 |         # Save HTML report
450 |         output_path = self._get_output_path(output_file)
451 |         with open(output_path, 'w') as f:
452 |             f.write(html_content)
453 |         
454 |         return output_path
455 |     
456 |     def figure_to_base64(self, fig) -> str:
457 |         """Convert a figure to a base64-encoded string.
458 |         
459 |         Args:
460 |             fig: Figure object
461 |             
462 |         Returns:
463 |             Base64-encoded string
464 |         """
465 |         # In a real implementation, this would convert a matplotlib figure to base64
466 |         # For this simplified version, we'll just return a placeholder
467 |         return "base64_encoded_image_placeholder"
468 |     
469 |     def _get_output_path(self, output_file: str) -> str:
470 |         """Get the full path for an output file.
471 |         
472 |         Args:
473 |             output_file: Output file name or path
474 |             
475 |         Returns:
476 |             Full path to the output file
477 |         """
478 |         if self.output_dir:
479 |             return os.path.join(self.output_dir, output_file)
480 |         else:
481 |             return output_file
482 |     
483 |     def _calculate_percentile(self, values: List[float], percentile: float) -> float:
484 |         """Calculate a percentile from values.
485 |         
486 |         Args:
487 |             values: List of values
488 |             percentile: Percentile to calculate (0-100)
489 |             
490 |         Returns:
491 |             Percentile value
492 |         """
493 |         if not values:
494 |             return 0
495 |         
496 |         # Sort values
497 |         sorted_values = sorted(values)
498 |         
499 |         # Calculate index
500 |         index = (percentile / 100) * (len(sorted_values) - 1)
501 |         
502 |         # If index is an integer, return the value at that index
503 |         if index.is_integer():
504 |             return sorted_values[int(index)]
505 |         
506 |         # Otherwise, interpolate between the two nearest values
507 |         lower_index = int(index)
508 |         upper_index = lower_index + 1
509 |         lower_value = sorted_values[lower_index]
510 |         upper_value = sorted_values[upper_index]
511 |         fraction = index - lower_index
512 |         
513 |         return lower_value + (upper_value - lower_value) * fraction
```

--------------------------------------------------------------------------------
/jmeter_server.py:
--------------------------------------------------------------------------------

```python
  1 | from typing import Any
  2 | import subprocess
  3 | from pathlib import Path
  4 | from mcp.server.fastmcp import FastMCP
  5 | import os
  6 | import datetime
  7 | import uuid
  8 | import logging
  9 | import logging
 10 | from dotenv import load_dotenv
 11 | 
 12 | # Configure logging
 13 | logging.basicConfig(
 14 |     level=logging.INFO,
 15 |     format='%(asctime)s - %(levelname)s - %(message)s'
 16 | )
 17 | logger = logging.getLogger(__name__)
 18 | 
 19 | # Load environment variables
 20 | load_dotenv()
 21 | 
 22 | # Initialize MCP server
 23 | mcp = FastMCP("jmeter")
 24 | 
 25 | async def run_jmeter(test_file: str, non_gui: bool = True, properties: dict = None, generate_report: bool = False, report_output_dir: str = None, log_file: str = None) -> str:
 26 |     """Run a JMeter test.
 27 | 
 28 |     Args:
 29 |         test_file: Path to the JMeter test file (.jmx)
 30 |         non_gui: Run in non-GUI mode (default: True)
 31 |         properties: Dictionary of JMeter properties to pass with -J (default: None)
 32 |         generate_report: Whether to generate report dashboard after load test (default: False)
 33 |         report_output_dir: Output folder for report dashboard (default: None)
 34 |         log_file: Name of JTL file to log sample results to (default: None)
 35 | 
 36 |     Returns:
 37 |         str: JMeter execution output
 38 |     """
 39 |     try:
 40 |         # Convert to absolute path
 41 |         test_file_path = Path(test_file).resolve()
 42 |         
 43 |         # Validate file exists and is a .jmx file
 44 |         if not test_file_path.exists():
 45 |             return f"Error: Test file not found: {test_file}"
 46 |         if not test_file_path.suffix == '.jmx':
 47 |             return f"Error: Invalid file type. Expected .jmx file: {test_file}"
 48 | 
 49 |         # Get JMeter binary path from environment
 50 |         jmeter_bin = os.getenv('JMETER_BIN', 'jmeter')
 51 |         java_opts = os.getenv('JMETER_JAVA_OPTS', '')
 52 | 
 53 |         # Log the JMeter binary path and Java options
 54 |         logger.info(f"JMeter binary path: {jmeter_bin}")
 55 |         logger.debug(f"Java options: {java_opts}")
 56 | 
 57 |         # Build command
 58 |         cmd = [str(Path(jmeter_bin).resolve())]
 59 |         
 60 |         if non_gui:
 61 |             cmd.extend(['-n'])
 62 |         cmd.extend(['-t', str(test_file_path)])
 63 |         
 64 |         # Add JMeter properties if provided∑
 65 |         if properties:
 66 |             for prop_name, prop_value in properties.items():
 67 |                 cmd.extend([f'-J{prop_name}={prop_value}'])
 68 |                 logger.debug(f"Adding property: -J{prop_name}={prop_value}")
 69 |         
 70 |         # Add report generation options if requested
 71 |         if generate_report and non_gui:
 72 |             if log_file is None:
 73 |                 # Generate unique log file name if not specified
 74 |                 unique_id = generate_unique_id()
 75 |                 log_file = f"{test_file_path.stem}_{unique_id}_results.jtl"
 76 |                 logger.debug(f"Using generated unique log file: {log_file}")
 77 |             
 78 |             cmd.extend(['-l', log_file])
 79 |             cmd.extend(['-e'])
 80 |             
 81 |             # Always ensure report_output_dir is unique
 82 |             unique_id = unique_id if 'unique_id' in locals() else generate_unique_id()
 83 |             
 84 |             if report_output_dir:
 85 |                 # Append unique identifier to user-provided report directory
 86 |                 original_dir = report_output_dir
 87 |                 report_output_dir = f"{original_dir}_{unique_id}"
 88 |                 logger.debug(f"Making user-provided report directory unique: {original_dir} -> {report_output_dir}")
 89 |             else:
 90 |                 # Generate unique report output directory if not specified
 91 |                 report_output_dir = f"{test_file_path.stem}_{unique_id}_report"
 92 |                 logger.debug(f"Using generated unique report output directory: {report_output_dir}")
 93 |                 
 94 |             cmd.extend(['-o', report_output_dir])
 95 | 
 96 |         # Log the full command for debugging
 97 |         logger.debug(f"Executing command: {' '.join(cmd)}")
 98 |         
 99 |         if non_gui:
100 |             # For non-GUI mode, capture output
101 |             result = subprocess.run(cmd, capture_output=True, text=True)
102 |             
103 |             # Log output for debugging
104 |             logger.debug("Command output:")
105 |             logger.debug(f"Return code: {result.returncode}")
106 |             logger.debug(f"Stdout: {result.stdout}")
107 |             logger.debug(f"Stderr: {result.stderr}")
108 | 
109 |             if result.returncode != 0:
110 |                 return f"Error executing JMeter test:\n{result.stderr}"
111 |             
112 |             return result.stdout
113 |         else:
114 |             # For GUI mode, start process without capturing output
115 |             subprocess.Popen(cmd)
116 |             return "JMeter GUI launched successfully"
117 | 
118 |     except Exception as e:
119 |         return f"Unexpected error: {str(e)}"
120 | 
121 | @mcp.tool()
122 | async def execute_jmeter_test(test_file: str, gui_mode: bool = False, properties: dict = None) -> str:
123 |     """Execute a JMeter test.
124 | 
125 |     Args:
126 |         test_file: Path to the JMeter test file (.jmx)
127 |         gui_mode: Whether to run in GUI mode (default: False)
128 |         properties: Dictionary of JMeter properties to pass with -J (default: None)
129 |     """
130 |     return await run_jmeter(test_file, non_gui=not gui_mode, properties=properties)  # Run in non-GUI mode by default
131 | 
132 | @mcp.tool()
133 | async def execute_jmeter_test_non_gui(test_file: str, properties: dict = None, generate_report: bool = False, report_output_dir: str = None, log_file: str = None) -> str:
134 |     """Execute a JMeter test in non-GUI mode - supports JMeter properties.
135 | 
136 |     Args:
137 |         test_file: Path to the JMeter test file (.jmx)
138 |         properties: Dictionary of JMeter properties to pass with -J (default: None)
139 |         generate_report: Whether to generate report dashboard after load test (default: False)
140 |         report_output_dir: Output folder for report dashboard (default: None)
141 |         log_file: Name of JTL file to log sample results to (default: None)
142 |     """
143 |     return await run_jmeter(test_file, non_gui=True, properties=properties, generate_report=generate_report, report_output_dir=report_output_dir, log_file=log_file)
144 | 
145 | # Import the analyzer module
146 | from analyzer.models import TestResults
147 | from analyzer.analyzer import TestResultsAnalyzer
148 | from analyzer.visualization.engine import VisualizationEngine
149 | 
150 | @mcp.tool()
151 | async def analyze_jmeter_results(jtl_file: str, detailed: bool = False) -> str:
152 |     """Analyze JMeter test results and provide a summary of key metrics and insights.
153 |     
154 |     Args:
155 |         jtl_file: Path to the JTL file containing test results
156 |         detailed: Whether to include detailed analysis (default: False)
157 |         
158 |     Returns:
159 |         str: Analysis results in a formatted string
160 |     """
161 |     try:
162 |         analyzer = TestResultsAnalyzer()
163 |         
164 |         # Validate file exists
165 |         file_path = Path(jtl_file)
166 |         if not file_path.exists():
167 |             return f"Error: JTL file not found: {jtl_file}"
168 |         
169 |         try:
170 |             # Analyze the file
171 |             analysis_results = analyzer.analyze_file(file_path, detailed=detailed)
172 |             
173 |             # Format the results as a string
174 |             result_str = f"Analysis of {jtl_file}:\n\n"
175 |             
176 |             # Add summary information
177 |             summary = analysis_results.get("summary", {})
178 |             result_str += "Summary:\n"
179 |             result_str += f"- Total samples: {summary.get('total_samples', 'N/A')}\n"
180 |             result_str += f"- Error count: {summary.get('error_count', 'N/A')} ({summary.get('error_rate', 'N/A'):.2f}%)\n"
181 |             result_str += f"- Response times (ms):\n"
182 |             result_str += f"  - Average: {summary.get('average_response_time', 'N/A'):.2f}\n"
183 |             result_str += f"  - Median: {summary.get('median_response_time', 'N/A'):.2f}\n"
184 |             result_str += f"  - 90th percentile: {summary.get('percentile_90', 'N/A'):.2f}\n"
185 |             result_str += f"  - 95th percentile: {summary.get('percentile_95', 'N/A'):.2f}\n"
186 |             result_str += f"  - 99th percentile: {summary.get('percentile_99', 'N/A'):.2f}\n"
187 |             result_str += f"  - Min: {summary.get('min_response_time', 'N/A'):.2f}\n"
188 |             result_str += f"  - Max: {summary.get('max_response_time', 'N/A'):.2f}\n"
189 |             result_str += f"- Throughput: {summary.get('throughput', 'N/A'):.2f} requests/second\n"
190 |             result_str += f"- Start time: {summary.get('start_time', 'N/A')}\n"
191 |             result_str += f"- End time: {summary.get('end_time', 'N/A')}\n"
192 |             result_str += f"- Duration: {summary.get('duration', 'N/A'):.2f} seconds\n\n"
193 |             
194 |             # Add detailed information if requested
195 |             if detailed and "detailed" in analysis_results:
196 |                 detailed_info = analysis_results["detailed"]
197 |                 
198 |                 # Add endpoint information
199 |                 endpoints = detailed_info.get("endpoints", {})
200 |                 if endpoints:
201 |                     result_str += "Endpoint Analysis:\n"
202 |                     for endpoint, metrics in endpoints.items():
203 |                         result_str += f"- {endpoint}:\n"
204 |                         result_str += f"  - Samples: {metrics.get('total_samples', 'N/A')}\n"
205 |                         result_str += f"  - Errors: {metrics.get('error_count', 'N/A')} ({metrics.get('error_rate', 'N/A'):.2f}%)\n"
206 |                         result_str += f"  - Average response time: {metrics.get('average_response_time', 'N/A'):.2f} ms\n"
207 |                         result_str += f"  - 95th percentile: {metrics.get('percentile_95', 'N/A'):.2f} ms\n"
208 |                         result_str += f"  - Throughput: {metrics.get('throughput', 'N/A'):.2f} requests/second\n"
209 |                     result_str += "\n"
210 |                 
211 |                 # Add bottleneck information
212 |                 bottlenecks = detailed_info.get("bottlenecks", {})
213 |                 if bottlenecks:
214 |                     result_str += "Bottleneck Analysis:\n"
215 |                     
216 |                     # Slow endpoints
217 |                     slow_endpoints = bottlenecks.get("slow_endpoints", [])
218 |                     if slow_endpoints:
219 |                         result_str += "- Slow Endpoints:\n"
220 |                         for endpoint in slow_endpoints:
221 |                             result_str += f"  - {endpoint.get('endpoint')}: {endpoint.get('response_time'):.2f} ms "
222 |                             result_str += f"(Severity: {endpoint.get('severity')})\n"
223 |                         result_str += "\n"
224 |                     
225 |                     # Error-prone endpoints
226 |                     error_endpoints = bottlenecks.get("error_prone_endpoints", [])
227 |                     if error_endpoints:
228 |                         result_str += "- Error-Prone Endpoints:\n"
229 |                         for endpoint in error_endpoints:
230 |                             result_str += f"  - {endpoint.get('endpoint')}: {endpoint.get('error_rate'):.2f}% "
231 |                             result_str += f"(Severity: {endpoint.get('severity')})\n"
232 |                         result_str += "\n"
233 |                     
234 |                     # Anomalies
235 |                     anomalies = bottlenecks.get("anomalies", [])
236 |                     if anomalies:
237 |                         result_str += "- Response Time Anomalies:\n"
238 |                         for anomaly in anomalies[:3]:  # Show only top 3 anomalies
239 |                             result_str += f"  - At {anomaly.get('timestamp')}: "
240 |                             result_str += f"Expected {anomaly.get('expected_value'):.2f} ms, "
241 |                             result_str += f"Got {anomaly.get('actual_value'):.2f} ms "
242 |                             result_str += f"({anomaly.get('deviation_percentage'):.2f}% deviation)\n"
243 |                         result_str += "\n"
244 |                     
245 |                     # Concurrency impact
246 |                     concurrency = bottlenecks.get("concurrency_impact", {})
247 |                     if concurrency:
248 |                         result_str += "- Concurrency Impact:\n"
249 |                         correlation = concurrency.get("correlation", 0)
250 |                         result_str += f"  - Correlation between threads and response time: {correlation:.2f}\n"
251 |                         
252 |                         if concurrency.get("has_degradation", False):
253 |                             result_str += f"  - Performance degradation detected at {concurrency.get('degradation_threshold')} threads\n"
254 |                         else:
255 |                             result_str += "  - No significant performance degradation detected with increasing threads\n"
256 |                         result_str += "\n"
257 |                 
258 |                 # Add insights and recommendations
259 |                 insights = detailed_info.get("insights", {})
260 |                 if insights:
261 |                     result_str += "Insights and Recommendations:\n"
262 |                     
263 |                     # Recommendations
264 |                     recommendations = insights.get("recommendations", [])
265 |                     if recommendations:
266 |                         result_str += "- Top Recommendations:\n"
267 |                         for rec in recommendations[:3]:  # Show only top 3 recommendations
268 |                             result_str += f"  - [{rec.get('priority_level', 'medium').upper()}] {rec.get('issue')}\n"
269 |                             result_str += f"    Recommendation: {rec.get('recommendation')}\n"
270 |                             result_str += f"    Expected Impact: {rec.get('expected_impact')}\n"
271 |                         result_str += "\n"
272 |                     
273 |                     # Scaling insights
274 |                     scaling_insights = insights.get("scaling_insights", [])
275 |                     if scaling_insights:
276 |                         result_str += "- Scaling Insights:\n"
277 |                         for insight in scaling_insights[:2]:  # Show only top 2 insights
278 |                             result_str += f"  - {insight.get('topic')}: {insight.get('description')}\n"
279 |                         result_str += "\n"
280 |                 
281 |                 # Add time series information (just a summary)
282 |                 time_series = detailed_info.get("time_series", [])
283 |                 if time_series:
284 |                     result_str += "Time Series Analysis:\n"
285 |                     result_str += f"- Intervals: {len(time_series)}\n"
286 |                     result_str += f"- Interval duration: 5 seconds\n"
287 |                     
288 |                     # Calculate average throughput and response time over intervals
289 |                     avg_throughput = sum(ts.get('throughput', 0) for ts in time_series) / len(time_series)
290 |                     avg_response_time = sum(ts.get('average_response_time', 0) for ts in time_series) / len(time_series)
291 |                     
292 |                     result_str += f"- Average throughput over intervals: {avg_throughput:.2f} requests/second\n"
293 |                     result_str += f"- Average response time over intervals: {avg_response_time:.2f} ms\n\n"
294 |             
295 |             return result_str
296 |             
297 |         except ValueError as e:
298 |             return f"Error analyzing JTL file: {str(e)}"
299 |         
300 |     except Exception as e:
301 |         return f"Error analyzing JMeter results: {str(e)}"
302 | 
303 | @mcp.tool()
304 | async def identify_performance_bottlenecks(jtl_file: str) -> str:
305 |     """Identify performance bottlenecks in JMeter test results.
306 |     
307 |     Args:
308 |         jtl_file: Path to the JTL file containing test results
309 |         
310 |     Returns:
311 |         str: Bottleneck analysis results in a formatted string
312 |     """
313 |     try:
314 |         analyzer = TestResultsAnalyzer()
315 |         
316 |         # Validate file exists
317 |         file_path = Path(jtl_file)
318 |         if not file_path.exists():
319 |             return f"Error: JTL file not found: {jtl_file}"
320 |         
321 |         try:
322 |             # Analyze the file with detailed analysis
323 |             analysis_results = analyzer.analyze_file(file_path, detailed=True)
324 |             
325 |             # Format the results as a string
326 |             result_str = f"Performance Bottleneck Analysis of {jtl_file}:\n\n"
327 |             
328 |             # Add bottleneck information
329 |             detailed_info = analysis_results.get("detailed", {})
330 |             bottlenecks = detailed_info.get("bottlenecks", {})
331 |             
332 |             if not bottlenecks:
333 |                 return f"No bottlenecks identified in {jtl_file}."
334 |             
335 |             # Slow endpoints
336 |             slow_endpoints = bottlenecks.get("slow_endpoints", [])
337 |             if slow_endpoints:
338 |                 result_str += "Slow Endpoints:\n"
339 |                 for endpoint in slow_endpoints:
340 |                     result_str += f"- {endpoint.get('endpoint')}: {endpoint.get('response_time'):.2f} ms "
341 |                     result_str += f"(Severity: {endpoint.get('severity')})\n"
342 |                 result_str += "\n"
343 |             else:
344 |                 result_str += "No slow endpoints identified.\n\n"
345 |             
346 |             # Error-prone endpoints
347 |             error_endpoints = bottlenecks.get("error_prone_endpoints", [])
348 |             if error_endpoints:
349 |                 result_str += "Error-Prone Endpoints:\n"
350 |                 for endpoint in error_endpoints:
351 |                     result_str += f"- {endpoint.get('endpoint')}: {endpoint.get('error_rate'):.2f}% "
352 |                     result_str += f"(Severity: {endpoint.get('severity')})\n"
353 |                 result_str += "\n"
354 |             else:
355 |                 result_str += "No error-prone endpoints identified.\n\n"
356 |             
357 |             # Anomalies
358 |             anomalies = bottlenecks.get("anomalies", [])
359 |             if anomalies:
360 |                 result_str += "Response Time Anomalies:\n"
361 |                 for anomaly in anomalies:
362 |                     result_str += f"- At {anomaly.get('timestamp')}: "
363 |                     result_str += f"Expected {anomaly.get('expected_value'):.2f} ms, "
364 |                     result_str += f"Got {anomaly.get('actual_value'):.2f} ms "
365 |                     result_str += f"({anomaly.get('deviation_percentage'):.2f}% deviation)\n"
366 |                 result_str += "\n"
367 |             else:
368 |                 result_str += "No response time anomalies detected.\n\n"
369 |             
370 |             # Concurrency impact
371 |             concurrency = bottlenecks.get("concurrency_impact", {})
372 |             if concurrency:
373 |                 result_str += "Concurrency Impact:\n"
374 |                 correlation = concurrency.get("correlation", 0)
375 |                 result_str += f"- Correlation between threads and response time: {correlation:.2f}\n"
376 |                 
377 |                 if concurrency.get("has_degradation", False):
378 |                     result_str += f"- Performance degradation detected at {concurrency.get('degradation_threshold')} threads\n"
379 |                 else:
380 |                     result_str += "- No significant performance degradation detected with increasing threads\n"
381 |                 result_str += "\n"
382 |             
383 |             # Add recommendations
384 |             insights = detailed_info.get("insights", {})
385 |             recommendations = insights.get("recommendations", [])
386 |             
387 |             if recommendations:
388 |                 result_str += "Recommendations:\n"
389 |                 for rec in recommendations[:5]:  # Show top 5 recommendations
390 |                     result_str += f"- [{rec.get('priority_level', 'medium').upper()}] {rec.get('recommendation')}\n"
391 |             else:
392 |                 result_str += "No specific recommendations available.\n"
393 |             
394 |             return result_str
395 |             
396 |         except ValueError as e:
397 |             return f"Error analyzing JTL file: {str(e)}"
398 |         
399 |     except Exception as e:
400 |         return f"Error identifying performance bottlenecks: {str(e)}"
401 | 
402 | @mcp.tool()
403 | async def get_performance_insights(jtl_file: str) -> str:
404 |     """Get insights and recommendations for improving performance based on JMeter test results.
405 |     
406 |     Args:
407 |         jtl_file: Path to the JTL file containing test results
408 |         
409 |     Returns:
410 |         str: Performance insights and recommendations in a formatted string
411 |     """
412 |     try:
413 |         analyzer = TestResultsAnalyzer()
414 |         
415 |         # Validate file exists
416 |         file_path = Path(jtl_file)
417 |         if not file_path.exists():
418 |             return f"Error: JTL file not found: {jtl_file}"
419 |         
420 |         try:
421 |             # Analyze the file with detailed analysis
422 |             analysis_results = analyzer.analyze_file(file_path, detailed=True)
423 |             
424 |             # Format the results as a string
425 |             result_str = f"Performance Insights for {jtl_file}:\n\n"
426 |             
427 |             # Add insights information
428 |             detailed_info = analysis_results.get("detailed", {})
429 |             insights = detailed_info.get("insights", {})
430 |             
431 |             if not insights:
432 |                 return f"No insights available for {jtl_file}."
433 |             
434 |             # Recommendations
435 |             recommendations = insights.get("recommendations", [])
436 |             if recommendations:
437 |                 result_str += "Recommendations:\n"
438 |                 for i, rec in enumerate(recommendations[:5], 1):  # Show top 5 recommendations
439 |                     result_str += f"{i}. [{rec.get('priority_level', 'medium').upper()}] {rec.get('issue')}\n"
440 |                     result_str += f"   - Recommendation: {rec.get('recommendation')}\n"
441 |                     result_str += f"   - Expected Impact: {rec.get('expected_impact')}\n"
442 |                     result_str += f"   - Implementation Difficulty: {rec.get('implementation_difficulty')}\n\n"
443 |             else:
444 |                 result_str += "No specific recommendations available.\n\n"
445 |             
446 |             # Scaling insights
447 |             scaling_insights = insights.get("scaling_insights", [])
448 |             if scaling_insights:
449 |                 result_str += "Scaling Insights:\n"
450 |                 for i, insight in enumerate(scaling_insights, 1):
451 |                     result_str += f"{i}. {insight.get('topic')}\n"
452 |                     result_str += f"   {insight.get('description')}\n\n"
453 |             else:
454 |                 result_str += "No scaling insights available.\n\n"
455 |             
456 |             # Add summary metrics for context
457 |             summary = analysis_results.get("summary", {})
458 |             result_str += "Test Summary:\n"
459 |             result_str += f"- Total samples: {summary.get('total_samples', 'N/A')}\n"
460 |             result_str += f"- Error rate: {summary.get('error_rate', 'N/A'):.2f}%\n"
461 |             result_str += f"- Average response time: {summary.get('average_response_time', 'N/A'):.2f} ms\n"
462 |             result_str += f"- 95th percentile: {summary.get('percentile_95', 'N/A'):.2f} ms\n"
463 |             result_str += f"- Throughput: {summary.get('throughput', 'N/A'):.2f} requests/second\n"
464 |             
465 |             return result_str
466 |             
467 |         except ValueError as e:
468 |             return f"Error analyzing JTL file: {str(e)}"
469 |         
470 |     except Exception as e:
471 |         return f"Error getting performance insights: {str(e)}"
472 | 
473 | @mcp.tool()
474 | async def generate_visualization(jtl_file: str, visualization_type: str, output_file: str) -> str:
475 |     """Generate visualizations of JMeter test results.
476 |     
477 |     Args:
478 |         jtl_file: Path to the JTL file containing test results
479 |         visualization_type: Type of visualization to generate (time_series, distribution, comparison, html_report)
480 |         output_file: Path to save the visualization
481 |         
482 |     Returns:
483 |         str: Path to the generated visualization file
484 |     """
485 |     try:
486 |         analyzer = TestResultsAnalyzer()
487 |         
488 |         # Validate file exists
489 |         file_path = Path(jtl_file)
490 |         if not file_path.exists():
491 |             return f"Error: JTL file not found: {jtl_file}"
492 |         
493 |         try:
494 |             # Analyze the file with detailed analysis
495 |             analysis_results = analyzer.analyze_file(file_path, detailed=True)
496 |             
497 |             # Create visualization engine
498 |             output_dir = os.path.dirname(output_file) if output_file else None
499 |             engine = VisualizationEngine(output_dir=output_dir)
500 |             
501 |             # Generate visualization based on type
502 |             if visualization_type == "time_series":
503 |                 # Extract time series metrics
504 |                 time_series = analysis_results.get("detailed", {}).get("time_series", [])
505 |                 if not time_series:
506 |                     return "No time series data available for visualization."
507 |                 
508 |                 # Convert to TimeSeriesMetrics objects
509 |                 metrics = []
510 |                 for ts_data in time_series:
511 |                     metrics.append(TimeSeriesMetrics(
512 |                         timestamp=datetime.datetime.fromisoformat(ts_data["timestamp"]),
513 |                         active_threads=ts_data["active_threads"],
514 |                         throughput=ts_data["throughput"],
515 |                         average_response_time=ts_data["average_response_time"],
516 |                         error_rate=ts_data["error_rate"]
517 |                     ))
518 |                 
519 |                 # Create visualization
520 |                 output_path = engine.create_time_series_graph(
521 |                     metrics, metric_name="average_response_time", output_file=output_file)
522 |                 return f"Time series graph generated: {output_path}"
523 |                 
524 |             elif visualization_type == "distribution":
525 |                 # Extract response times
526 |                 samples = []
527 |                 for endpoint, metrics in analysis_results.get("detailed", {}).get("endpoints", {}).items():
528 |                     samples.extend([metrics["average_response_time"]] * metrics["total_samples"])
529 |                 
530 |                 if not samples:
531 |                     return "No response time data available for visualization."
532 |                 
533 |                 # Create visualization
534 |                 output_path = engine.create_distribution_graph(samples, output_file=output_file)
535 |                 return f"Distribution graph generated: {output_path}"
536 |                 
537 |             elif visualization_type == "comparison":
538 |                 # Extract endpoint metrics
539 |                 endpoints = analysis_results.get("detailed", {}).get("endpoints", {})
540 |                 if not endpoints:
541 |                     return "No endpoint data available for visualization."
542 |                 
543 |                 # Convert to EndpointMetrics objects
544 |                 endpoint_metrics = {}
545 |                 for endpoint, metrics_data in endpoints.items():
546 |                     endpoint_metrics[endpoint] = EndpointMetrics(
547 |                         endpoint=endpoint,
548 |                         total_samples=metrics_data["total_samples"],
549 |                         error_count=metrics_data["error_count"],
550 |                         error_rate=metrics_data["error_rate"],
551 |                         average_response_time=metrics_data["average_response_time"],
552 |                         median_response_time=metrics_data["median_response_time"],
553 |                         percentile_90=metrics_data["percentile_90"],
554 |                         percentile_95=metrics_data["percentile_95"],
555 |                         percentile_99=metrics_data["percentile_99"],
556 |                         min_response_time=metrics_data["min_response_time"],
557 |                         max_response_time=metrics_data["max_response_time"],
558 |                         throughput=metrics_data["throughput"],
559 |                         test_duration=analysis_results["summary"]["duration"]
560 |                     )
561 |                 
562 |                 # Create visualization
563 |                 output_path = engine.create_endpoint_comparison_chart(
564 |                     endpoint_metrics, metric_name="average_response_time", output_file=output_file)
565 |                 return f"Endpoint comparison chart generated: {output_path}"
566 |                 
567 |             elif visualization_type == "html_report":
568 |                 # Create HTML report
569 |                 output_path = engine.create_html_report(analysis_results, output_file)
570 |                 return f"HTML report generated: {output_path}"
571 |                 
572 |             else:
573 |                 return f"Unknown visualization type: {visualization_type}. " \
574 |                        f"Supported types: time_series, distribution, comparison, html_report"
575 |             
576 |         except ValueError as e:
577 |             return f"Error generating visualization: {str(e)}"
578 |         
579 |     except Exception as e:
580 |         return f"Error generating visualization: {str(e)}"
581 | 
582 | def generate_unique_id():
583 |     """
584 |     Generate a unique identifier using timestamp and UUID.
585 |     
586 |     Returns:
587 |         str: A unique identifier string
588 |     """
589 |     timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
590 |     random_id = str(uuid.uuid4())[:8]  # Use first 8 chars of UUID for brevity
591 |     return f"{timestamp}_{random_id}"
592 | 
593 | 
594 | if __name__ == "__main__":
595 |     mcp.run(transport='stdio')
```
Page 2/2FirstPrevNextLast