This is page 2 of 4. Use http://codebase.md/sedwardstx/demomcp?lines=true&page={x} to view the full context.
# Directory Structure
```
├── .gitignore
├── .mcp.json
├── check_server.py
├── CLAUDE.md
├── config
│ └── default.yml
├── docs
│ ├── api_reference.md
│ ├── demo-recording
│ │ └── MCPDemo.gif
│ ├── example-context-docs
│ │ ├── mcp-ai-agent-architecture.md
│ │ ├── mcp-ai-agent-dev-task.md
│ │ └── mcp-ai-agent-prd.md
│ └── getting_started.md
├── LICENSE
├── main_tcp.py
├── main.py
├── mcp_tcp_client.py
├── pyproject.toml
├── QUICK_START.md
├── README.md
├── scripts
│ └── test_server.py
├── setup.py
├── src
│ └── mcp_log_analyzer
│ ├── __init__.py
│ ├── api
│ │ ├── __init__.py
│ │ └── server.py
│ ├── config
│ │ ├── __init__.py
│ │ └── settings.py
│ ├── core
│ │ ├── __init__.py
│ │ ├── config.py
│ │ ├── models.py
│ │ └── state_manager.py
│ ├── mcp_server
│ │ ├── __init__.py
│ │ ├── models
│ │ │ ├── __init__.py
│ │ │ └── schemas.py
│ │ ├── prompts
│ │ │ ├── __init__.py
│ │ │ ├── linux_testing_prompt.py
│ │ │ ├── log_management_prompt.py
│ │ │ ├── mcp_assets_overview_prompt.py
│ │ │ ├── network_testing_prompt.py
│ │ │ ├── process_monitoring_prompt.py
│ │ │ └── windows_testing_prompt.py
│ │ ├── resources
│ │ │ ├── __init__.py
│ │ │ ├── linux_resources.py
│ │ │ ├── logs_resources.py
│ │ │ ├── network_resources.py
│ │ │ ├── process_resources.py
│ │ │ └── windows_resources.py
│ │ ├── server.py
│ │ └── tools
│ │ ├── __init__.py
│ │ ├── health_check_tools.py
│ │ ├── linux_test_tools.py
│ │ ├── log_management_tools.py
│ │ ├── network_test_tools.py
│ │ ├── process_test_tools.py
│ │ └── windows_test_tools.py
│ ├── parsers
│ │ ├── __init__.py
│ │ ├── base.py
│ │ ├── csv_parser.py
│ │ ├── etl_cached_parser.py
│ │ ├── etl_large_file_parser.py
│ │ ├── etl_parser.py
│ │ ├── etl_windows_parser.py
│ │ └── evt_parser.py
│ └── tcp_proxy.py
├── TCP_PROXY_README.md
├── tcp_proxy.py
├── tcp_server.py
├── test_server.py
├── test_tcp_proxy.py
├── test_windows_setup.py
└── tests
├── test_base_parser.py
├── test_mcp_server.py
├── test_tool_utils.py
└── test_utils.py
```
# Files
--------------------------------------------------------------------------------
/src/mcp_log_analyzer/parsers/csv_parser.py:
--------------------------------------------------------------------------------
```python
1 | """CSV log parser implementation."""
2 |
3 | import csv
4 | from datetime import datetime
5 | from pathlib import Path
6 | from typing import Any, Dict, Iterator, List, Optional, Union
7 |
8 | from ..core.models import LogRecord, LogSource, LogType
9 | from .base import BaseParser
10 |
11 |
12 | class CsvLogParser(BaseParser):
13 | """Parser for CSV log files."""
14 |
15 | def __init__(self, config: Optional[Dict[str, Any]] = None):
16 | """Initialize CSV parser.
17 |
18 | Args:
19 | config: Parser configuration with optional CSV-specific settings.
20 | """
21 | super().__init__(config)
22 | self.delimiter = self.config.get("delimiter", ",")
23 | self.header_row = self.config.get("header_row", 0)
24 | self.has_header = self.config.get("has_header", True)
25 | self.field_names = self.config.get("field_names", [])
26 |
27 | def parse_file(
28 | self, source: LogSource, file_path: Union[str, Path]
29 | ) -> Iterator[LogRecord]:
30 | """Parse CSV log records from a file.
31 |
32 | Args:
33 | source: The log source information.
34 | file_path: Path to the CSV file.
35 |
36 | Yields:
37 | LogRecord objects parsed from the CSV file.
38 | """
39 | path = Path(file_path)
40 | if not path.exists():
41 | raise FileNotFoundError(f"Log file not found: {file_path}")
42 |
43 | with open(path, "r", encoding="utf-8") as file:
44 | content = file.read()
45 | yield from self.parse_content(source, content)
46 |
47 | def parse_content(self, source: LogSource, content: str) -> Iterator[LogRecord]:
48 | """Parse CSV log records from content string.
49 |
50 | Args:
51 | source: The log source information.
52 | content: CSV content string.
53 |
54 | Yields:
55 | LogRecord objects parsed from the CSV content.
56 | """
57 | lines = content.strip().split("\n")
58 | reader = csv.reader(lines, delimiter=self.delimiter)
59 |
60 | # Handle header row
61 | if self.has_header:
62 | try:
63 | header = next(reader)
64 | field_names = header
65 | except StopIteration:
66 | return
67 | else:
68 | field_names = self.field_names or [
69 | f"field_{i}" for i in range(len(lines[0].split(self.delimiter)))
70 | ]
71 |
72 | # Parse data rows
73 | for row_num, row in enumerate(reader, start=1):
74 | if len(row) == 0:
75 | continue
76 |
77 | # Create record data dictionary
78 | record_data = {}
79 | for i, value in enumerate(row):
80 | field_name = field_names[i] if i < len(field_names) else f"field_{i}"
81 | record_data[field_name] = (
82 | value.strip() if isinstance(value, str) else value
83 | )
84 |
85 | # Try to parse timestamp
86 | timestamp = self._parse_timestamp(record_data)
87 |
88 | # Create log record
89 | yield LogRecord(
90 | source_id=source.id,
91 | timestamp=timestamp,
92 | data=record_data,
93 | raw_data=self.delimiter.join(row),
94 | )
95 |
96 | def _parse_timestamp(self, record_data: Dict[str, Any]) -> Optional[datetime]:
97 | """Parse timestamp from record data.
98 |
99 | Args:
100 | record_data: Record data dictionary.
101 |
102 | Returns:
103 | Parsed datetime object or None.
104 | """
105 | # Try common timestamp field names
106 | timestamp_fields = [
107 | "timestamp",
108 | "time",
109 | "date",
110 | "datetime",
111 | "@timestamp",
112 | "created_at",
113 | "field_0",
114 | ]
115 |
116 | for field in timestamp_fields:
117 | if field in record_data:
118 | timestamp_str = str(record_data[field])
119 |
120 | # Try common timestamp formats
121 | formats = [
122 | "%Y-%m-%d %H:%M:%S.%f",
123 | "%Y-%m-%d %H:%M:%S",
124 | "%Y/%m/%d %H:%M:%S",
125 | "%m/%d/%Y %H:%M:%S",
126 | "%d/%m/%Y %H:%M:%S",
127 | "%Y-%m-%d",
128 | "%m/%d/%Y",
129 | "%d/%m/%Y",
130 | ]
131 |
132 | for fmt in formats:
133 | try:
134 | return datetime.strptime(timestamp_str, fmt)
135 | except ValueError:
136 | continue
137 |
138 | return None
139 |
140 | def analyze(
141 | self, records: List[LogRecord], analysis_type: str = "summary"
142 | ) -> Dict[str, Any]:
143 | """Analyze CSV log records.
144 |
145 | Args:
146 | records: List of log records to analyze.
147 | analysis_type: Type of analysis to perform.
148 |
149 | Returns:
150 | Analysis results dictionary.
151 | """
152 | if not records:
153 | return {
154 | "analysis_type": analysis_type,
155 | "summary": {"total_records": 0, "message": "No records to analyze"},
156 | }
157 |
158 | # Basic statistics
159 | total_records = len(records)
160 | records_with_timestamps = sum(1 for r in records if r.timestamp is not None)
161 |
162 | # Time range analysis
163 | timestamps = [r.timestamp for r in records if r.timestamp is not None]
164 | time_range = {}
165 | if timestamps:
166 | time_range = {
167 | "earliest": min(timestamps).isoformat(),
168 | "latest": max(timestamps).isoformat(),
169 | "span_hours": (max(timestamps) - min(timestamps)).total_seconds()
170 | / 3600,
171 | }
172 |
173 | # Field analysis
174 | all_fields = set()
175 | field_counts = {}
176 | for record in records:
177 | for field in record.data.keys():
178 | all_fields.add(field)
179 | field_counts[field] = field_counts.get(field, 0) + 1
180 |
181 | # Value analysis for key fields
182 | value_analysis = {}
183 | for field in list(all_fields)[:10]: # Analyze top 10 fields
184 | values = [
185 | str(record.data.get(field, ""))
186 | for record in records
187 | if field in record.data
188 | ]
189 | unique_values = set(values)
190 | value_analysis[field] = {
191 | "total_values": len(values),
192 | "unique_values": len(unique_values),
193 | "top_values": list(
194 | sorted(unique_values, key=lambda x: values.count(x), reverse=True)[
195 | :5
196 | ]
197 | ),
198 | }
199 |
200 | summary = {
201 | "total_records": total_records,
202 | "records_with_timestamps": records_with_timestamps,
203 | "time_range": time_range,
204 | "total_fields": len(all_fields),
205 | "field_names": list(all_fields),
206 | "field_coverage": field_counts,
207 | "value_analysis": value_analysis,
208 | }
209 |
210 | result = {"analysis_type": analysis_type, "summary": summary}
211 |
212 | if analysis_type == "pattern":
213 | result["patterns"] = self._analyze_patterns(records)
214 | elif analysis_type == "anomaly":
215 | result["anomalies"] = self._analyze_anomalies(records)
216 |
217 | return result
218 |
219 | def _analyze_patterns(self, records: List[LogRecord]) -> List[Dict[str, Any]]:
220 | """Analyze patterns in the log records."""
221 | patterns = []
222 |
223 | # Pattern analysis for fabric traces
224 | component_counts = {}
225 | level_counts = {}
226 |
227 | for record in records:
228 | # Analyze component patterns (assuming fabric traces format)
229 | if "field_4" in record.data: # Component field
230 | component = record.data["field_4"]
231 | component_counts[component] = component_counts.get(component, 0) + 1
232 |
233 | if "field_1" in record.data: # Level field
234 | level = record.data["field_1"]
235 | level_counts[level] = level_counts.get(level, 0) + 1
236 |
237 | if component_counts:
238 | patterns.append(
239 | {
240 | "type": "component_frequency",
241 | "description": "Most active components",
242 | "data": dict(
243 | sorted(
244 | component_counts.items(), key=lambda x: x[1], reverse=True
245 | )[:10]
246 | ),
247 | }
248 | )
249 |
250 | if level_counts:
251 | patterns.append(
252 | {
253 | "type": "log_level_distribution",
254 | "description": "Log level distribution",
255 | "data": level_counts,
256 | }
257 | )
258 |
259 | return patterns
260 |
261 | def _analyze_anomalies(self, records: List[LogRecord]) -> List[Dict[str, Any]]:
262 | """Analyze anomalies in the log records."""
263 | anomalies = []
264 |
265 | # Simple anomaly detection based on unusual patterns
266 | if len(records) > 100:
267 | # Check for unusual time gaps
268 | timestamps = [r.timestamp for r in records if r.timestamp is not None]
269 | if len(timestamps) > 1:
270 | time_diffs = [
271 | (timestamps[i + 1] - timestamps[i]).total_seconds()
272 | for i in range(len(timestamps) - 1)
273 | ]
274 | avg_diff = sum(time_diffs) / len(time_diffs)
275 | large_gaps = [diff for diff in time_diffs if diff > avg_diff * 10]
276 |
277 | if large_gaps:
278 | anomalies.append(
279 | {
280 | "type": "time_gap_anomaly",
281 | "description": f"Found {len(large_gaps)} unusually large time gaps",
282 | "details": f"Average gap: {avg_diff:.2f}s, Max gap: {max(large_gaps):.2f}s",
283 | }
284 | )
285 |
286 | return anomalies
287 |
```
--------------------------------------------------------------------------------
/docs/example-context-docs/mcp-ai-agent-dev-task.md:
--------------------------------------------------------------------------------
```markdown
1 | # MCP AI Agent Development Task
2 |
3 | ## Project Overview
4 | Build a command-line AI agent in Python that connects to and utilizes an MCP (Model Context Protocol) server running on the local network. The agent should provide an interactive CLI interface for users to communicate with the AI while leveraging MCP server capabilities.
5 |
6 | ## Project Structure
7 | ```
8 | mcp-ai-agent/
9 | ├── src/
10 | │ ├── __init__.py
11 | │ ├── mcp_client.py # MCP server connection handling
12 | │ ├── ai_agent.py # AI agent logic and processing
13 | │ ├── cli.py # Command-line interface
14 | │ └── config.py # Configuration management
15 | ├── tests/
16 | │ ├── __init__.py
17 | │ ├── test_mcp_client.py
18 | │ ├── test_ai_agent.py
19 | │ └── test_cli.py
20 | ├── .env.example
21 | ├── requirements.txt
22 | ├── setup.py
23 | ├── README.md
24 | └── main.py # Entry point
25 | ```
26 |
27 | ## Development Prompts for Claude Code
28 |
29 | ### Phase 1: Project Setup and Core Structure
30 |
31 | **Prompt 1.1 - Initialize Project**
32 | ```
33 | Create a new Python project directory called 'mcp-ai-agent' with the following:
34 | 1. Create the directory structure as shown above
35 | 2. Initialize a virtual environment
36 | 3. Create a requirements.txt with these dependencies:
37 | - python-dotenv>=1.0.0
38 | - httpx>=0.24.0
39 | - websockets>=11.0
40 | - pydantic>=2.0.0
41 | - click>=8.1.0
42 | - rich>=13.0.0
43 | - pytest>=7.0.0
44 | - pytest-asyncio>=0.21.0
45 | - pytest-mock>=3.10.0
46 | 4. Create a .env.example file with placeholders for MCP server configuration
47 | 5. Create a setup.py for package installation
48 | 6. Initialize git repository with appropriate .gitignore
49 | ```
50 |
51 | **Prompt 1.2 - Configuration Module**
52 | ```
53 | Create src/config.py that:
54 | 1. Uses pydantic BaseSettings for configuration management
55 | 2. Loads environment variables from .env file
56 | 3. Includes these configuration fields:
57 | - MCP_SERVER_URL (with validation for URL format)
58 | - MCP_SERVER_PORT (integer between 1-65535)
59 | - MCP_API_KEY (optional, for authenticated servers)
60 | - AI_MODEL (default to a standard model)
61 | - LOG_LEVEL (default to INFO)
62 | - CONNECTION_TIMEOUT (default to 30 seconds)
63 | - RETRY_ATTEMPTS (default to 3)
64 | 4. Implements validation and provides clear error messages
65 | 5. Include docstrings explaining each configuration option
66 | ```
67 |
68 | ### Phase 2: MCP Client Implementation
69 |
70 | **Prompt 2.1 - MCP Client Base**
71 | ```
72 | Create src/mcp_client.py with:
73 | 1. An async MCPClient class that handles connection to the MCP server
74 | 2. Methods for:
75 | - connect(): Establish connection (support both HTTP and WebSocket)
76 | - disconnect(): Clean shutdown
77 | - send_request(): Send requests to MCP server
78 | - receive_response(): Handle responses
79 | - ping(): Health check functionality
80 | 3. Implement connection retry logic with exponential backoff
81 | 4. Add proper error handling for network issues
82 | 5. Include logging for debugging
83 | 6. Support both synchronous and asynchronous operation modes
84 | ```
85 |
86 | **Prompt 2.2 - MCP Protocol Handling**
87 | ```
88 | Extend src/mcp_client.py to:
89 | 1. Implement the MCP protocol specification:
90 | - Message formatting (JSON-RPC 2.0 if applicable)
91 | - Request/response correlation
92 | - Error response handling
93 | 2. Add methods for common MCP operations:
94 | - list_tools(): Get available tools from server
95 | - execute_tool(): Execute a specific tool
96 | - get_context(): Retrieve context information
97 | 3. Implement message queuing for concurrent requests
98 | 4. Add request/response validation using pydantic models
99 | ```
100 |
101 | ### Phase 3: AI Agent Core
102 |
103 | **Prompt 3.1 - AI Agent Implementation**
104 | ```
105 | Create src/ai_agent.py with:
106 | 1. An AIAgent class that:
107 | - Initializes with an MCP client instance
108 | - Maintains conversation context
109 | - Processes user inputs and generates responses
110 | 2. Implement these methods:
111 | - process_message(): Main message handling
112 | - use_mcp_tool(): Decide when to use MCP tools
113 | - format_response(): Format AI responses for CLI
114 | 3. Add conversation memory management
115 | 4. Implement tool selection logic based on user queries
116 | 5. Include error recovery for failed MCP operations
117 | ```
118 |
119 | **Prompt 3.2 - AI Integration**
120 | ```
121 | Enhance src/ai_agent.py to:
122 | 1. Integrate with an AI model (use OpenAI API or similar as placeholder)
123 | 2. Implement prompt engineering for:
124 | - System prompts that explain available MCP tools
125 | - User message formatting
126 | - Tool usage instructions
127 | 3. Add response streaming support
128 | 4. Implement token counting and management
129 | 5. Add safety checks and content filtering
130 | ```
131 |
132 | ### Phase 4: CLI Interface
133 |
134 | **Prompt 4.1 - CLI Implementation**
135 | ```
136 | Create src/cli.py using Click framework:
137 | 1. Main command group with these subcommands:
138 | - start: Start interactive chat session
139 | - list-tools: Show available MCP tools
140 | - config: Display current configuration
141 | - test-connection: Test MCP server connectivity
142 | 2. For the interactive chat:
143 | - Use Rich for colorful output
144 | - Show typing indicators during processing
145 | - Support multi-line input (Ctrl+Enter)
146 | - Add command history
147 | - Include /help, /clear, /exit commands
148 | 3. Add progress bars for long operations
149 | 4. Implement graceful shutdown on Ctrl+C
150 | ```
151 |
152 | **Prompt 4.2 - Enhanced CLI Features**
153 | ```
154 | Extend src/cli.py with:
155 | 1. Session management:
156 | - Save/load conversation history
157 | - Export conversations to markdown
158 | 2. Advanced commands:
159 | - /tools: List and describe available tools
160 | - /use <tool>: Explicitly use a specific tool
161 | - /context: Show current context
162 | - /stats: Display session statistics
163 | 3. Add syntax highlighting for code blocks
164 | 4. Implement auto-completion for commands
165 | 5. Add configuration override options via CLI flags
166 | ```
167 |
168 | ### Phase 5: Testing Suite
169 |
170 | **Prompt 5.1 - Unit Tests**
171 | ```
172 | Create comprehensive unit tests:
173 | 1. tests/test_mcp_client.py:
174 | - Test connection establishment
175 | - Mock server responses
176 | - Test retry logic
177 | - Verify error handling
178 | 2. tests/test_ai_agent.py:
179 | - Test message processing
180 | - Mock MCP tool usage
181 | - Verify context management
182 | 3. tests/test_cli.py:
183 | - Test command parsing
184 | - Verify output formatting
185 | - Test interactive mode
186 | 4. Use pytest fixtures for common test setup
187 | 5. Aim for >80% code coverage
188 | ```
189 |
190 | **Prompt 5.2 - Integration Tests**
191 | ```
192 | Create tests/test_integration.py with:
193 | 1. End-to-end tests using a mock MCP server
194 | 2. Test full conversation flows
195 | 3. Verify tool execution chains
196 | 4. Test error recovery scenarios
197 | 5. Performance tests for concurrent operations
198 | 6. Create docker-compose.yml for test environment
199 | ```
200 |
201 | ### Phase 6: Main Entry Point and Documentation
202 |
203 | **Prompt 6.1 - Main Application**
204 | ```
205 | Create main.py that:
206 | 1. Serves as the application entry point
207 | 2. Handles initialization of all components
208 | 3. Implements proper async context management
209 | 4. Adds global exception handling
210 | 5. Includes signal handlers for graceful shutdown
211 | 6. Provides --debug flag for verbose logging
212 | ```
213 |
214 | **Prompt 6.2 - Documentation**
215 | ```
216 | Create comprehensive documentation:
217 | 1. README.md with:
218 | - Project description and features
219 | - Installation instructions
220 | - Quick start guide
221 | - Configuration options
222 | - Usage examples
223 | - Troubleshooting section
224 | 2. Add inline code documentation following Google style
225 | 3. Create docs/ directory with:
226 | - Architecture overview
227 | - MCP protocol details
228 | - API reference
229 | - Contributing guidelines
230 | ```
231 |
232 | ### Phase 7: Advanced Features
233 |
234 | **Prompt 7.1 - Plugin System**
235 | ```
236 | Implement a plugin system:
237 | 1. Create src/plugins/ directory structure
238 | 2. Define plugin interface for extending functionality
239 | 3. Add plugin loader in main application
240 | 4. Create example plugins:
241 | - Custom output formatters
242 | - Additional CLI commands
243 | - Response processors
244 | 5. Document plugin development
245 | ```
246 |
247 | **Prompt 7.2 - Performance and Monitoring**
248 | ```
249 | Add performance monitoring:
250 | 1. Implement metrics collection:
251 | - Response times
252 | - Token usage
253 | - Error rates
254 | - MCP server latency
255 | 2. Add optional Prometheus metrics endpoint
256 | 3. Create performance profiling mode
257 | 4. Add request/response logging with rotation
258 | 5. Implement caching for frequently used MCP tools
259 | ```
260 |
261 | ## Testing Instructions
262 |
263 | ### Manual Testing Checklist
264 | 1. **Connection Testing**
265 | - Test with valid MCP server
266 | - Test with invalid server URL
267 | - Test connection timeout
268 | - Test authentication (if applicable)
269 |
270 | 2. **Functionality Testing**
271 | - Send various user queries
272 | - Test all CLI commands
273 | - Verify tool execution
274 | - Test error scenarios
275 |
276 | 3. **Performance Testing**
277 | - Test with concurrent requests
278 | - Measure response times
279 | - Check memory usage
280 | - Test with long conversations
281 |
282 | ### Automated Testing
283 | ```bash
284 | # Run all tests
285 | pytest
286 |
287 | # Run with coverage
288 | pytest --cov=src --cov-report=html
289 |
290 | # Run specific test file
291 | pytest tests/test_mcp_client.py
292 |
293 | # Run integration tests
294 | pytest tests/test_integration.py -v
295 | ```
296 |
297 | ## Deployment Considerations
298 |
299 | 1. **Security**
300 | - Never commit .env files
301 | - Use secure communication with MCP server
302 | - Implement rate limiting
303 | - Add input sanitization
304 |
305 | 2. **Scalability**
306 | - Design for horizontal scaling
307 | - Implement connection pooling
308 | - Add request queuing
309 | - Consider caching strategies
310 |
311 | 3. **Monitoring**
312 | - Set up logging aggregation
313 | - Implement health checks
314 | - Add alerting for failures
315 | - Monitor resource usage
316 |
317 | ## Example Usage
318 |
319 | ```bash
320 | # Install the package
321 | pip install -e .
322 |
323 | # Configure environment
324 | cp .env.example .env
325 | # Edit .env with your MCP server details
326 |
327 | # Test connection
328 | mcp-agent test-connection
329 |
330 | # Start interactive session
331 | mcp-agent start
332 |
333 | # List available tools
334 | mcp-agent list-tools
335 |
336 | # Start with debug logging
337 | mcp-agent --debug start
338 | ```
339 |
340 | ## Additional Notes
341 |
342 | - Ensure all async operations use proper context managers
343 | - Implement graceful degradation when MCP server is unavailable
344 | - Add comprehensive error messages for better user experience
345 | - Consider implementing a web UI as a future enhancement
346 | - Keep dependencies minimal and well-maintained
347 | - Follow semantic versioning for releases
```
--------------------------------------------------------------------------------
/src/mcp_log_analyzer/mcp_server/tools/log_management_tools.py:
--------------------------------------------------------------------------------
```python
1 | """
2 | Core log management MCP tools.
3 | """
4 |
5 | import asyncio
6 | from datetime import datetime
7 | from typing import Any, Dict
8 |
9 | from mcp.server import FastMCP
10 | from pydantic import BaseModel, Field
11 |
12 |
13 | # Tool Models
14 | class RegisterLogSourceRequest(BaseModel):
15 | """Request model for registering a log source."""
16 |
17 | name: str = Field(..., description="Unique name for the log source")
18 | source_type: str = Field(
19 | ..., description="Type of log source (evt, etl, json, xml, csv, text)"
20 | )
21 | path: str = Field(..., description="Path to the log file or directory")
22 | config: Dict[str, Any] = Field(
23 | default_factory=dict, description="Additional configuration for the parser"
24 | )
25 |
26 |
27 | class QueryLogsRequest(BaseModel):
28 | """Request model for querying logs."""
29 |
30 | source_name: str = Field(..., description="Name of the log source to query")
31 | filters: Dict[str, Any] = Field(
32 | default_factory=dict, description="Filters to apply"
33 | )
34 | start_time: datetime = Field(None, description="Start time for the query")
35 | end_time: datetime = Field(None, description="End time for the query")
36 | limit: int = Field(100, description="Maximum number of logs to return")
37 | offset: int = Field(0, description="Number of logs to skip")
38 |
39 |
40 | class AnalyzeLogsRequest(BaseModel):
41 | """Request model for analyzing logs."""
42 |
43 | source_name: str = Field(..., description="Name of the log source to analyze")
44 | analysis_type: str = Field(
45 | "summary", description="Type of analysis (summary, pattern, anomaly)"
46 | )
47 | filters: Dict[str, Any] = Field(
48 | default_factory=dict, description="Filters to apply before analysis"
49 | )
50 | start_time: datetime = Field(None, description="Start time for the analysis")
51 | end_time: datetime = Field(None, description="End time for the analysis")
52 |
53 |
54 | def register_log_management_tools(mcp: FastMCP):
55 | """Register all log management tools with the MCP server."""
56 |
57 | @mcp.tool()
58 | async def register_log_source(request: RegisterLogSourceRequest) -> Dict[str, Any]:
59 | """
60 | Register a new log source for analysis.
61 |
62 | This tool allows you to register various types of log sources including:
63 | - Windows Event Logs (evt)
64 | - Windows Event Trace Logs (etl)
65 | - JSON logs
66 | - XML logs
67 | - CSV logs
68 | - Unstructured text logs
69 | """
70 | from mcp_log_analyzer.core.models import LogSource
71 |
72 | from ..server import log_sources, parsers, parser_aliases
73 |
74 | if request.name in log_sources:
75 | return {"error": f"Log source '{request.name}' already exists"}
76 |
77 | # Check if source_type is an alias and map it to the actual type
78 | actual_source_type = parser_aliases.get(request.source_type, request.source_type)
79 |
80 | if actual_source_type not in parsers:
81 | supported_types = list(parsers.keys())
82 | return {"error": f"Unsupported source type: {request.source_type}. Supported types are: {', '.join(supported_types)}"}
83 |
84 | log_source = LogSource(
85 | name=request.name,
86 | type=actual_source_type,
87 | path=request.path,
88 | metadata=request.config,
89 | )
90 |
91 | log_sources[request.name] = log_source
92 |
93 | # Persist state
94 | from ..server import state_manager, get_log_sources
95 | state_manager.save_log_sources(get_log_sources())
96 |
97 | return {
98 | "message": f"Log source '{request.name}' registered successfully",
99 | "source": log_source.model_dump(),
100 | }
101 |
102 | @mcp.tool()
103 | async def list_log_sources() -> Dict[str, Any]:
104 | """
105 | List all registered log sources.
106 |
107 | Returns information about all currently registered log sources
108 | including their names, types, and paths.
109 | """
110 | from ..server import log_sources
111 |
112 | return {
113 | "sources": [source.model_dump() for source in log_sources.values()],
114 | "count": len(log_sources),
115 | }
116 |
117 | @mcp.tool()
118 | async def get_log_source(name: str) -> Dict[str, Any]:
119 | """
120 | Get details about a specific log source.
121 |
122 | Args:
123 | name: The name of the log source to retrieve
124 | """
125 | from ..server import log_sources
126 |
127 | if name not in log_sources:
128 | return {"error": f"Log source '{name}' not found"}
129 |
130 | return {"source": log_sources[name].model_dump()}
131 |
132 | @mcp.tool()
133 | async def delete_log_source(name: str) -> Dict[str, Any]:
134 | """
135 | Delete a registered log source.
136 |
137 | Args:
138 | name: The name of the log source to delete
139 | """
140 | from ..server import log_sources
141 |
142 | if name not in log_sources:
143 | return {"error": f"Log source '{name}' not found"}
144 |
145 | # Get source details before deletion
146 | source = log_sources[name]
147 |
148 | # Clean up ETL cache if this is an ETL source
149 | if source.type == "etl":
150 | try:
151 | from mcp_log_analyzer.parsers.etl_cached_parser import EtlCachedParser
152 | EtlCachedParser.cleanup_cache_for_source(source.path)
153 | except Exception as e:
154 | # Log error but don't fail the deletion
155 | pass
156 |
157 | del log_sources[name]
158 |
159 | # Persist state
160 | from ..server import state_manager, get_log_sources
161 | state_manager.save_log_sources(get_log_sources())
162 |
163 | return {"message": f"Log source '{name}' deleted successfully"}
164 |
165 | @mcp.tool()
166 | async def query_logs(request: QueryLogsRequest) -> Dict[str, Any]:
167 | """
168 | Query logs from a registered source.
169 |
170 | This tool allows you to:
171 | - Filter logs by various criteria
172 | - Specify time ranges
173 | - Paginate through results
174 | """
175 | from ..server import log_sources, parsers
176 | import logging
177 |
178 | logger = logging.getLogger(__name__)
179 |
180 | if request.source_name not in log_sources:
181 | return {"error": f"Log source '{request.source_name}' not found"}
182 |
183 | source = log_sources[request.source_name]
184 | parser = parsers[source.type]
185 |
186 | try:
187 | # Use longer timeout for ETL files (10 minutes)
188 | timeout = 600.0 if source.type == "etl" else 30.0
189 |
190 | # Log start of operation for ETL files
191 | if source.type == "etl":
192 | import os
193 | file_size_mb = os.path.getsize(source.path) / (1024 * 1024)
194 | logger.info(f"Starting ETL query for {source.name} ({file_size_mb:.1f} MB file)")
195 |
196 | # Add timeout for log parsing
197 | logs = await asyncio.wait_for(
198 | asyncio.to_thread(
199 | parser.parse,
200 | source.path,
201 | filters=request.filters,
202 | start_time=request.start_time,
203 | end_time=request.end_time,
204 | limit=request.limit,
205 | offset=request.offset,
206 | ),
207 | timeout=timeout
208 | )
209 |
210 | logger.info(f"Successfully queried {len(logs)} logs from {request.source_name}")
211 |
212 | return {
213 | "logs": [log.model_dump() for log in logs],
214 | "count": len(logs),
215 | "source": request.source_name,
216 | }
217 | except asyncio.TimeoutError:
218 | timeout = 600.0 if source.type == "etl" else 30.0
219 | logger.error(f"Query timed out after {timeout} seconds for {request.source_name}")
220 | return {"error": f"Query timed out after {timeout} seconds. The log file may be too large or complex to parse."}
221 | except Exception as e:
222 | logger.error(f"Query failed for {request.source_name}: {str(e)}")
223 | return {"error": f"Failed to query logs: {str(e)}"}
224 |
225 | @mcp.tool()
226 | async def analyze_logs(request: AnalyzeLogsRequest) -> Dict[str, Any]:
227 | """
228 | Analyze logs from a registered source.
229 |
230 | Available analysis types:
231 | - summary: General statistics and overview
232 | - pattern: Pattern detection and frequency analysis
233 | - anomaly: Anomaly detection
234 | """
235 | from ..server import log_sources, parsers
236 | import logging
237 |
238 | logger = logging.getLogger(__name__)
239 |
240 | if request.source_name not in log_sources:
241 | return {"error": f"Log source '{request.source_name}' not found"}
242 |
243 | source = log_sources[request.source_name]
244 | parser = parsers[source.type]
245 |
246 | try:
247 | # First, get the logs with timeout
248 | timeout = 600.0 if source.type == "etl" else 30.0
249 |
250 | # Log start of operation for ETL files
251 | if source.type == "etl":
252 | import os
253 | file_size_mb = os.path.getsize(source.path) / (1024 * 1024)
254 | logger.info(f"Starting ETL analysis for {source.name} ({file_size_mb:.1f} MB file)")
255 |
256 | logs = await asyncio.wait_for(
257 | asyncio.to_thread(
258 | parser.parse,
259 | source.path,
260 | filters=request.filters,
261 | start_time=request.start_time,
262 | end_time=request.end_time,
263 | ),
264 | timeout=timeout
265 | )
266 |
267 | # Then analyze them
268 | result = await asyncio.to_thread(
269 | parser.analyze, logs, analysis_type=request.analysis_type
270 | )
271 |
272 | return {
273 | "result": result.model_dump(),
274 | "source": request.source_name,
275 | "analysis_type": request.analysis_type,
276 | }
277 | except asyncio.TimeoutError:
278 | timeout = 600.0 if source.type == "etl" else 30.0
279 | logger.error(f"Analysis timed out after {timeout} seconds for {request.source_name}")
280 | return {"error": f"Analysis timed out after {timeout} seconds. The log file may be too large or complex to parse."}
281 | except Exception as e:
282 | logger.error(f"Analysis failed for {request.source_name}: {str(e)}")
283 | return {"error": f"Failed to analyze logs: {str(e)}"}
284 |
```
--------------------------------------------------------------------------------
/src/mcp_log_analyzer/api/server.py:
--------------------------------------------------------------------------------
```python
1 | """MCP server API implementation."""
2 |
3 | import argparse
4 | import logging
5 | import os
6 | from typing import Dict, List, Optional, Union
7 | from uuid import UUID
8 |
9 | import uvicorn
10 | from fastapi import Depends, FastAPI, HTTPException, Path, Query, Request
11 | from fastapi.middleware.cors import CORSMiddleware
12 | from fastapi.responses import JSONResponse
13 |
14 | from ..core.config import Config, load_config
15 | from ..core.models import (
16 | LogAnalysisRequest,
17 | LogAnalysisResponse,
18 | LogQueryRequest,
19 | LogQueryResponse,
20 | LogRecord,
21 | LogSource,
22 | LogSourceRequest,
23 | LogSourceResponse,
24 | LogType,
25 | MCPContext,
26 | MCPError,
27 | )
28 | from ..parsers import get_parser_for_type
29 |
30 | # Configure logging
31 | logging.basicConfig(
32 | level=logging.INFO,
33 | format="%(asctime)s - %(name)s - %(levelname)s - %(message)s",
34 | )
35 | logger = logging.getLogger("mcp_server")
36 |
37 | # Create FastAPI app
38 | app = FastAPI(
39 | title="MCP Log Analyzer",
40 | description="Model Context Protocol server for analyzing various types of logs",
41 | version="0.1.0",
42 | )
43 |
44 | # Add CORS middleware
45 | app.add_middleware(
46 | CORSMiddleware,
47 | allow_origins=["*"],
48 | allow_credentials=True,
49 | allow_methods=["*"],
50 | allow_headers=["*"],
51 | )
52 |
53 | # Store for registered log sources
54 | log_sources: Dict[UUID, LogSource] = {}
55 |
56 | # In-memory context store (in a real application, this would be a database)
57 | contexts: Dict[UUID, MCPContext] = {}
58 |
59 |
60 | def get_config() -> Config:
61 | """Get application configuration.
62 |
63 | Returns:
64 | The application configuration.
65 | """
66 | config_path = os.environ.get("MCP_CONFIG")
67 | return load_config(config_path)
68 |
69 |
70 | @app.exception_handler(MCPError)
71 | async def mcp_error_handler(request: Request, exc: MCPError) -> JSONResponse:
72 | """Handle MCP errors.
73 |
74 | Args:
75 | request: The request.
76 | exc: The exception.
77 |
78 | Returns:
79 | JSON response with error details.
80 | """
81 | return JSONResponse(
82 | status_code=exc.status_code,
83 | content={"status": "error", "error": exc.message},
84 | )
85 |
86 |
87 | @app.get("/api/health")
88 | async def health() -> Dict[str, str]:
89 | """Health check endpoint.
90 |
91 | Returns:
92 | Health status.
93 | """
94 | return {"status": "ok"}
95 |
96 |
97 | @app.post("/api/sources", response_model=LogSourceResponse)
98 | async def register_source(
99 | request: LogSourceRequest, config: Config = Depends(get_config)
100 | ) -> LogSourceResponse:
101 | """Register a log source.
102 |
103 | Args:
104 | request: The log source registration request.
105 | config: The application configuration.
106 |
107 | Returns:
108 | The registered log source.
109 |
110 | Raises:
111 | HTTPException: If the log source is invalid.
112 | """
113 | logger.info(f"Registering log source: {request.name} ({request.type})")
114 |
115 | # Create log source
116 | source = LogSource(
117 | name=request.name,
118 | type=request.type,
119 | path=request.path,
120 | metadata=request.metadata,
121 | )
122 |
123 | # Validate source with appropriate parser
124 | try:
125 | parser = get_parser_for_type(source.type, config.parsers)
126 | if not parser.validate_file(source.path):
127 | raise MCPError(f"Invalid log source: {source.path}", status_code=400)
128 | except Exception as e:
129 | logger.exception(f"Error validating log source: {e}")
130 | raise MCPError(f"Error validating log source: {str(e)}", status_code=400)
131 |
132 | # Store log source
133 | log_sources[source.id] = source
134 |
135 | # Create response
136 | return LogSourceResponse(request_id=request.request_id, source=source)
137 |
138 |
139 | @app.get("/api/sources", response_model=List[LogSource])
140 | async def list_sources() -> List[LogSource]:
141 | """List all registered log sources.
142 |
143 | Returns:
144 | List of registered log sources.
145 | """
146 | return list(log_sources.values())
147 |
148 |
149 | @app.get("/api/sources/{source_id}", response_model=LogSource)
150 | async def get_source(source_id: UUID = Path(..., description="Source ID")) -> LogSource:
151 | """Get a log source by ID.
152 |
153 | Args:
154 | source_id: The source ID.
155 |
156 | Returns:
157 | The log source.
158 |
159 | Raises:
160 | HTTPException: If the log source is not found.
161 | """
162 | if source_id not in log_sources:
163 | raise MCPError(f"Log source not found: {source_id}", status_code=404)
164 | return log_sources[source_id]
165 |
166 |
167 | @app.delete("/api/sources/{source_id}")
168 | async def delete_source(
169 | source_id: UUID = Path(..., description="Source ID")
170 | ) -> Dict[str, str]:
171 | """Delete a log source.
172 |
173 | Args:
174 | source_id: The source ID.
175 |
176 | Returns:
177 | Success message.
178 |
179 | Raises:
180 | HTTPException: If the log source is not found.
181 | """
182 | if source_id not in log_sources:
183 | raise MCPError(f"Log source not found: {source_id}", status_code=404)
184 | del log_sources[source_id]
185 | return {"status": "success", "message": f"Log source {source_id} deleted"}
186 |
187 |
188 | @app.post("/api/query", response_model=LogQueryResponse)
189 | async def query_logs(
190 | request: LogQueryRequest, config: Config = Depends(get_config)
191 | ) -> LogQueryResponse:
192 | """Query logs.
193 |
194 | Args:
195 | request: The log query request.
196 | config: The application configuration.
197 |
198 | Returns:
199 | The query response.
200 | """
201 | query = request.query
202 | logger.info(f"Querying logs: {query}")
203 |
204 | # Collect records from all sources
205 | records: List[LogRecord] = []
206 | total_records = 0
207 |
208 | # Create context
209 | context = MCPContext(request_id=request.request_id, client_id=request.client_id)
210 | contexts[request.request_id] = context
211 |
212 | # If source_ids is specified, filter by source IDs
213 | source_filter = {}
214 | if query.source_ids:
215 | source_filter = {
216 | sid: log_sources.get(sid) for sid in query.source_ids if sid in log_sources
217 | }
218 | else:
219 | source_filter = log_sources
220 |
221 | # Filter by log types if specified
222 | if query.types:
223 | source_filter = {
224 | sid: source
225 | for sid, source in source_filter.items()
226 | if source.type in query.types
227 | }
228 |
229 | # Get records from each source
230 | for source_id, source in source_filter.items():
231 | try:
232 | parser = get_parser_for_type(source.type, config.parsers)
233 | source_records = list(parser.parse_file(source, source.path))
234 | total_records += len(source_records)
235 |
236 | # Apply time filter
237 | if query.start_time:
238 | source_records = [
239 | r
240 | for r in source_records
241 | if r.timestamp and r.timestamp >= query.start_time
242 | ]
243 | if query.end_time:
244 | source_records = [
245 | r
246 | for r in source_records
247 | if r.timestamp and r.timestamp <= query.end_time
248 | ]
249 |
250 | # Apply custom filters if any
251 | for field, value in query.filters.items():
252 | source_records = [
253 | r
254 | for r in source_records
255 | if field in r.data and r.data[field] == value
256 | ]
257 |
258 | records.extend(source_records)
259 | except Exception as e:
260 | logger.exception(f"Error parsing log source {source_id}: {e}")
261 | # Continue with other sources on error
262 |
263 | # Apply pagination
264 | start = query.offset
265 | end = query.offset + query.limit
266 | paginated_records = records[start:end] if start < len(records) else []
267 |
268 | return LogQueryResponse(
269 | request_id=request.request_id,
270 | records=paginated_records,
271 | total=total_records,
272 | limit=query.limit,
273 | offset=query.offset,
274 | )
275 |
276 |
277 | @app.post("/api/analyze", response_model=LogAnalysisResponse)
278 | async def analyze_logs(
279 | request: LogAnalysisRequest, config: Config = Depends(get_config)
280 | ) -> LogAnalysisResponse:
281 | """Analyze logs.
282 |
283 | Args:
284 | request: The log analysis request.
285 | config: The application configuration.
286 |
287 | Returns:
288 | The analysis response.
289 | """
290 | # This is a placeholder for the actual analysis logic
291 | # In a real implementation, this would call different analysis modules
292 | logger.info(f"Analyzing logs: {request.analysis_type}")
293 |
294 | # Basic implementation - just return a summary of the query
295 | analysis_results = {
296 | "analysis_type": request.analysis_type,
297 | "parameters": request.parameters,
298 | "summary": "Analysis completed successfully",
299 | "details": {
300 | "source_count": (
301 | len(request.query.source_ids)
302 | if request.query.source_ids
303 | else len(log_sources)
304 | ),
305 | "type_count": (
306 | len(request.query.types) if request.query.types else len(LogType)
307 | ),
308 | "start_time": (
309 | str(request.query.start_time)
310 | if request.query.start_time
311 | else "Not specified"
312 | ),
313 | "end_time": (
314 | str(request.query.end_time)
315 | if request.query.end_time
316 | else "Not specified"
317 | ),
318 | },
319 | }
320 |
321 | return LogAnalysisResponse(
322 | request_id=request.request_id,
323 | results=analysis_results,
324 | query=request.query,
325 | )
326 |
327 |
328 | def main() -> None:
329 | """Run the MCP server."""
330 | parser = argparse.ArgumentParser(description="MCP Log Analyzer Server")
331 | parser.add_argument(
332 | "--config",
333 | help="Path to configuration file",
334 | default=os.environ.get("MCP_CONFIG"),
335 | )
336 | parser.add_argument("--host", help="Host to bind to", default=None)
337 | parser.add_argument("--port", help="Port to bind to", type=int, default=None)
338 | parser.add_argument("--reload", help="Enable auto-reload", action="store_true")
339 | args = parser.parse_args()
340 |
341 | # Load configuration
342 | if args.config:
343 | os.environ["MCP_CONFIG"] = args.config
344 | config = load_config(args.config)
345 |
346 | # Override with command line arguments
347 | host = args.host or config.server.host
348 | port = args.port or config.server.port
349 |
350 | # Configure logging
351 | log_level = getattr(logging, config.logging.level.upper(), logging.INFO)
352 | logging.basicConfig(level=log_level, format=config.logging.format)
353 | if config.logging.file:
354 | handler = logging.FileHandler(config.logging.file)
355 | handler.setFormatter(logging.Formatter(config.logging.format))
356 | logger.addHandler(handler)
357 |
358 | # Start server
359 | logger.info(f"Starting MCP server at {host}:{port}")
360 | uvicorn.run(
361 | "mcp_log_analyzer.api.server:app",
362 | host=host,
363 | port=port,
364 | reload=args.reload,
365 | log_level="info" if not config.server.debug else "debug",
366 | )
367 |
368 |
369 | if __name__ == "__main__":
370 | main()
371 |
```
--------------------------------------------------------------------------------
/src/mcp_log_analyzer/mcp_server/resources/windows_resources.py:
--------------------------------------------------------------------------------
```python
1 | """
2 | Windows system monitoring MCP resources.
3 | """
4 |
5 | import platform
6 | from datetime import datetime
7 |
8 | from mcp.server import FastMCP
9 |
10 |
11 | def register_windows_resources(mcp: FastMCP):
12 | """Register all Windows-related resources with the MCP server."""
13 |
14 | from ..server import parse_time_param
15 |
16 | @mcp.resource("system://windows-event-logs")
17 | async def get_windows_event_logs() -> str:
18 | """
19 | Get Windows System and Application event logs with default parameters.
20 |
21 | Use parameterized versions for more control:
22 | - system://windows-event-logs/last/20 - Last 20 entries
23 | - system://windows-event-logs/time/30m - Last 30 minutes
24 | - system://windows-event-logs/range/2025-01-07 13:00/2025-01-07 14:00 - Time range
25 | """
26 | # Default to last 10 entries
27 | return await get_windows_event_logs_with_count("10")
28 |
29 | @mcp.resource("system://windows-event-logs/last/{count}")
30 | async def get_windows_event_logs_with_count(count: str) -> str:
31 | """
32 | Get recent Windows System and Application event logs by count.
33 |
34 | Args:
35 | count: Number of entries to retrieve (e.g., "20")
36 | """
37 | if platform.system() != "Windows":
38 | return "This resource is only available on Windows systems."
39 |
40 | try:
41 | import win32evtlog
42 | import win32evtlogutil
43 | from win32con import EVENTLOG_BACKWARDS_READ, EVENTLOG_SEQUENTIAL_READ
44 |
45 | max_count = int(count)
46 | result = []
47 | result.append(f"=== Windows Event Logs (Last {max_count} entries) ===\n")
48 |
49 | for log_type in ["System", "Application"]:
50 | result.append(f"\n--- {log_type} Log ---")
51 |
52 | try:
53 | hand = win32evtlog.OpenEventLog(None, log_type)
54 | flags = EVENTLOG_BACKWARDS_READ | EVENTLOG_SEQUENTIAL_READ
55 |
56 | count = 0
57 | while count < max_count:
58 | events = win32evtlog.ReadEventLog(hand, flags, 0)
59 | if not events:
60 | break # No more events to read
61 |
62 | for event in events:
63 | if count >= max_count:
64 | break
65 |
66 | result.append(f"\nTime: {event.TimeGenerated}")
67 | result.append(f"Source: {event.SourceName}")
68 | result.append(f"Event ID: {event.EventID & 0xFFFFFFFF}") # Convert to unsigned
69 | result.append(
70 | f"Type: {['Info', 'Warning', 'Error'][event.EventType - 1] if event.EventType <= 3 else 'Unknown'}"
71 | )
72 |
73 | try:
74 | message = win32evtlogutil.SafeFormatMessage(event, log_type)
75 | if message:
76 | result.append(f"Message: {message[:200]}...")
77 | except:
78 | result.append("Message: (Unable to format message)")
79 |
80 | count += 1
81 |
82 | win32evtlog.CloseEventLog(hand)
83 |
84 | except Exception as e:
85 | result.append(f"Error reading {log_type} log: {str(e)}")
86 |
87 | return "\n".join(result)
88 |
89 | except ImportError:
90 | return "Windows Event Log access requires pywin32 package."
91 | except ValueError:
92 | return f"Invalid count parameter: {count}"
93 | except Exception as e:
94 | return f"Error accessing Windows Event Logs: {str(e)}"
95 |
96 | @mcp.resource("system://windows-event-logs/time/{duration}")
97 | async def get_windows_event_logs_by_time(duration: str) -> str:
98 | """
99 | Get Windows event logs from the last N minutes/hours/days.
100 |
101 | Args:
102 | duration: Time duration (e.g., "30m", "2h", "1d")
103 | """
104 | if platform.system() != "Windows":
105 | return "This resource is only available on Windows systems."
106 |
107 | try:
108 | start_time = parse_time_param(duration)
109 | if not start_time:
110 | return "Invalid duration format. Use format like '30m', '2h', or '1d'."
111 |
112 | import win32evtlog
113 | import win32evtlogutil
114 | from win32con import EVENTLOG_BACKWARDS_READ, EVENTLOG_SEQUENTIAL_READ
115 |
116 | result = []
117 | result.append(
118 | f"=== Windows Event Logs (Since {start_time.strftime('%Y-%m-%d %H:%M:%S')}) ===\n"
119 | )
120 |
121 | for log_type in ["System", "Application"]:
122 | result.append(f"\n--- {log_type} Log ---")
123 |
124 | try:
125 | hand = win32evtlog.OpenEventLog(None, log_type)
126 | flags = EVENTLOG_BACKWARDS_READ | EVENTLOG_SEQUENTIAL_READ
127 |
128 | event_count = 0
129 | done_reading = False
130 |
131 | while not done_reading:
132 | events = win32evtlog.ReadEventLog(hand, flags, 0)
133 | if not events:
134 | break # No more events to read
135 |
136 | for event in events:
137 | # Check if event is within time range
138 | if event.TimeGenerated < start_time:
139 | done_reading = True
140 | break
141 |
142 | result.append(f"\nTime: {event.TimeGenerated}")
143 | result.append(f"Source: {event.SourceName}")
144 | result.append(f"Event ID: {event.EventID & 0xFFFFFFFF}") # Convert to unsigned
145 | result.append(
146 | f"Type: {['Info', 'Warning', 'Error'][event.EventType - 1] if event.EventType <= 3 else 'Unknown'}"
147 | )
148 |
149 | try:
150 | message = win32evtlogutil.SafeFormatMessage(event, log_type)
151 | if message:
152 | result.append(f"Message: {message[:200]}...")
153 | except:
154 | result.append("Message: (Unable to format message)")
155 |
156 | event_count += 1
157 |
158 | win32evtlog.CloseEventLog(hand)
159 | result.append(f"\n{log_type}: {event_count} events found")
160 |
161 | except Exception as e:
162 | result.append(f"Error reading {log_type} log: {str(e)}")
163 |
164 | return "\n".join(result)
165 |
166 | except ImportError:
167 | return "Windows Event Log access requires pywin32 package."
168 | except ValueError as e:
169 | return f"Invalid time parameter: {str(e)}"
170 | except Exception as e:
171 | return f"Error accessing Windows Event Logs: {str(e)}"
172 |
173 | @mcp.resource("system://windows-event-logs/range/{start}/{end}")
174 | async def get_windows_event_logs_by_range(start: str, end: str) -> str:
175 | """
176 | Get Windows event logs within a specific time range.
177 |
178 | Args:
179 | start: Start time (e.g., "2025-01-07 13:00")
180 | end: End time (e.g., "2025-01-07 14:00")
181 | """
182 | if platform.system() != "Windows":
183 | return "This resource is only available on Windows systems."
184 |
185 | try:
186 | start_time = parse_time_param(start)
187 | end_time = parse_time_param(end)
188 |
189 | if not start_time or not end_time:
190 | return "Invalid time format. Use format like '2025-01-07 13:00'."
191 |
192 | import win32evtlog
193 | import win32evtlogutil
194 | from win32con import EVENTLOG_BACKWARDS_READ, EVENTLOG_SEQUENTIAL_READ
195 |
196 | result = []
197 | result.append(
198 | f"=== Windows Event Logs ({start_time.strftime('%Y-%m-%d %H:%M')} to {end_time.strftime('%Y-%m-%d %H:%M')}) ===\n"
199 | )
200 |
201 | for log_type in ["System", "Application"]:
202 | result.append(f"\n--- {log_type} Log ---")
203 |
204 | try:
205 | hand = win32evtlog.OpenEventLog(None, log_type)
206 | flags = EVENTLOG_BACKWARDS_READ | EVENTLOG_SEQUENTIAL_READ
207 |
208 | event_count = 0
209 | done_reading = False
210 |
211 | while not done_reading:
212 | events = win32evtlog.ReadEventLog(hand, flags, 0)
213 | if not events:
214 | break # No more events to read
215 |
216 | for event in events:
217 | # Check if we've gone past the time range
218 | if event.TimeGenerated < start_time:
219 | done_reading = True
220 | break
221 |
222 | # Check if event is within time range
223 | if event.TimeGenerated > end_time:
224 | continue
225 |
226 | result.append(f"\nTime: {event.TimeGenerated}")
227 | result.append(f"Source: {event.SourceName}")
228 | result.append(f"Event ID: {event.EventID & 0xFFFFFFFF}") # Convert to unsigned
229 | result.append(
230 | f"Type: {['Info', 'Warning', 'Error'][event.EventType - 1] if event.EventType <= 3 else 'Unknown'}"
231 | )
232 |
233 | try:
234 | message = win32evtlogutil.SafeFormatMessage(event, log_type)
235 | if message:
236 | result.append(f"Message: {message[:200]}...")
237 | except:
238 | result.append("Message: (Unable to format message)")
239 |
240 | event_count += 1
241 |
242 | win32evtlog.CloseEventLog(hand)
243 | result.append(f"\n{log_type}: {event_count} events found")
244 |
245 | except Exception as e:
246 | result.append(f"Error reading {log_type} log: {str(e)}")
247 |
248 | return "\n".join(result)
249 |
250 | except ImportError:
251 | return "Windows Event Log access requires pywin32 package."
252 | except ValueError as e:
253 | return f"Invalid time parameter: {str(e)}"
254 | except Exception as e:
255 | return f"Error accessing Windows Event Logs: {str(e)}"
256 |
```
--------------------------------------------------------------------------------
/src/mcp_log_analyzer/parsers/etl_windows_parser.py:
--------------------------------------------------------------------------------
```python
1 | """Windows ETL parser using native Windows tools as fallback."""
2 |
3 | import json
4 | import os
5 | import platform
6 | import subprocess
7 | import tempfile
8 | from datetime import datetime
9 | from pathlib import Path
10 | from typing import Any, Dict, Iterator, List, Optional, Union
11 | from uuid import uuid4
12 |
13 | from ..core.models import LogRecord, LogSource, LogType
14 | from .base import BaseParser
15 |
16 |
17 | class EtlWindowsParser(BaseParser):
18 | """ETL parser using Windows native tools (tracerpt.exe)."""
19 |
20 | def __init__(self, config: Optional[Dict[str, Any]] = None):
21 | """Initialize Windows ETL parser.
22 |
23 | Args:
24 | config: Parser configuration.
25 | """
26 | super().__init__(config)
27 | self.use_tracerpt = self.config.get("use_tracerpt", True)
28 | self.tracerpt_path = self._find_tracerpt()
29 |
30 | def _find_tracerpt(self) -> Optional[str]:
31 | """Find tracerpt.exe on the system."""
32 | if platform.system() != "Windows":
33 | return None
34 |
35 | # Common locations for tracerpt.exe
36 | possible_paths = [
37 | r"C:\Windows\System32\tracerpt.exe",
38 | r"C:\Windows\SysWOW64\tracerpt.exe",
39 | ]
40 |
41 | for path in possible_paths:
42 | if os.path.exists(path):
43 | return path
44 |
45 | # Try to find it in PATH
46 | try:
47 | result = subprocess.run(
48 | ["where", "tracerpt.exe"],
49 | capture_output=True,
50 | text=True,
51 | check=False
52 | )
53 | if result.returncode == 0 and result.stdout.strip():
54 | return result.stdout.strip().split('\n')[0]
55 | except:
56 | pass
57 |
58 | return None
59 |
60 | def is_available(self) -> bool:
61 | """Check if Windows ETL parsing is available."""
62 | return self.tracerpt_path is not None
63 |
64 | def parse_file(
65 | self, source: LogSource, file_path: Union[str, Path]
66 | ) -> Iterator[LogRecord]:
67 | """Parse ETL log records using Windows tracerpt.
68 |
69 | Args:
70 | source: The log source information.
71 | file_path: Path to the ETL file.
72 |
73 | Yields:
74 | LogRecord objects parsed from the ETL file.
75 | """
76 | if not self.is_available():
77 | raise RuntimeError(
78 | "Windows ETL parsing is not available. tracerpt.exe not found."
79 | )
80 |
81 | path = Path(file_path)
82 | if not path.exists():
83 | raise FileNotFoundError(f"ETL file not found: {file_path}")
84 |
85 | if not str(path).lower().endswith('.etl'):
86 | raise ValueError(f"File does not appear to be an ETL file: {file_path}")
87 |
88 | # Create temporary directory for output
89 | with tempfile.TemporaryDirectory() as temp_dir:
90 | output_file = os.path.join(temp_dir, "output.csv")
91 |
92 | try:
93 | # Run tracerpt to convert ETL to CSV
94 | cmd = [
95 | self.tracerpt_path,
96 | str(path),
97 | "-o", output_file,
98 | "-of", "CSV",
99 | "-y" # Overwrite without prompting
100 | ]
101 |
102 | result = subprocess.run(
103 | cmd,
104 | capture_output=True,
105 | text=True,
106 | timeout=300 # 5 minute timeout
107 | )
108 |
109 | if result.returncode != 0:
110 | raise RuntimeError(
111 | f"tracerpt failed with code {result.returncode}: {result.stderr}"
112 | )
113 |
114 | # Parse the CSV output
115 | if os.path.exists(output_file):
116 | with open(output_file, 'r', encoding='utf-8', errors='ignore') as f:
117 | import csv
118 | reader = csv.DictReader(f)
119 |
120 | for row in reader:
121 | log_record = self._convert_csv_row(source, row)
122 | if log_record:
123 | yield log_record
124 |
125 | except subprocess.TimeoutExpired:
126 | raise RuntimeError("tracerpt timed out after 5 minutes")
127 | except Exception as e:
128 | raise RuntimeError(f"Failed to parse ETL file: {e}")
129 |
130 | def _convert_csv_row(self, source: LogSource, row: Dict[str, str]) -> Optional[LogRecord]:
131 | """Convert a CSV row from tracerpt to a LogRecord.
132 |
133 | Args:
134 | source: The log source information.
135 | row: CSV row dictionary.
136 |
137 | Returns:
138 | LogRecord or None if conversion fails.
139 | """
140 | try:
141 | # Common tracerpt CSV columns
142 | record_data = {}
143 |
144 | # Map known columns
145 | field_mappings = {
146 | "Event Name": "event_name",
147 | "Type": "event_type",
148 | "Event ID": "event_id",
149 | "Version": "version",
150 | "Channel": "channel",
151 | "Level": "level",
152 | "Task": "task",
153 | "Opcode": "opcode",
154 | "Keyword": "keywords",
155 | "PID": "process_id",
156 | "TID": "thread_id",
157 | "Processor Number": "processor",
158 | "Instance ID": "instance_id",
159 | "Parent Instance ID": "parent_instance_id",
160 | "Activity ID": "activity_id",
161 | "Related Activity ID": "related_activity_id",
162 | "Provider Name": "provider_name",
163 | "Provider ID": "provider_id",
164 | "Message": "message",
165 | "Process Name": "process_name",
166 | }
167 |
168 | for csv_field, record_field in field_mappings.items():
169 | if csv_field in row and row[csv_field]:
170 | record_data[record_field] = row[csv_field]
171 |
172 | # Try to parse timestamp
173 | timestamp = None
174 | if "Clock-Time" in row:
175 | try:
176 | timestamp = datetime.strptime(
177 | row["Clock-Time"],
178 | "%Y-%m-%d %H:%M:%S.%f"
179 | )
180 | except:
181 | pass
182 |
183 | # Include any additional fields
184 | for key, value in row.items():
185 | if key not in field_mappings and value:
186 | # Clean up field name
187 | clean_key = key.lower().replace(' ', '_').replace('-', '_')
188 | record_data[clean_key] = value
189 |
190 | return LogRecord(
191 | source_id=source.id,
192 | timestamp=timestamp,
193 | data=record_data,
194 | raw_content=None # CSV rows are already processed
195 | )
196 |
197 | except Exception as e:
198 | if self.config.get("verbose", False):
199 | print(f"Failed to convert CSV row: {e}")
200 | return None
201 |
202 | def parse_content(self, source: LogSource, content: str) -> Iterator[LogRecord]:
203 | """Parse ETL log records from content string.
204 |
205 | Note: ETL files are binary and cannot be parsed from string content.
206 |
207 | Args:
208 | source: The log source information.
209 | content: String content (not supported for ETL).
210 |
211 | Raises:
212 | NotImplementedError: ETL files must be parsed from file.
213 | """
214 | raise NotImplementedError(
215 | "ETL files are binary and must be parsed from file, not string content"
216 | )
217 |
218 | def validate_file(self, file_path: Union[str, Path]) -> bool:
219 | """Validate if the file can be parsed by this parser.
220 |
221 | Args:
222 | file_path: Path to validate.
223 |
224 | Returns:
225 | True if file appears to be an ETL file.
226 | """
227 | path = Path(file_path)
228 |
229 | # Check file extension
230 | if not str(path).lower().endswith('.etl'):
231 | return False
232 |
233 | # Check if file exists and is readable
234 | if not path.exists() or not path.is_file():
235 | return False
236 |
237 | # Check if we have tracerpt available
238 | if not self.is_available():
239 | return False
240 |
241 | return True
242 |
243 | def parse(
244 | self, path: str, filters: Optional[Dict[str, Any]] = None,
245 | start_time: Optional[datetime] = None, end_time: Optional[datetime] = None,
246 | limit: int = 1000, offset: int = 0
247 | ) -> List[LogRecord]:
248 | """Parse ETL file with filtering and pagination.
249 |
250 | Args:
251 | path: Path to the ETL file.
252 | filters: Optional filters to apply.
253 | start_time: Optional start time filter.
254 | end_time: Optional end time filter.
255 | limit: Maximum number of records to return.
256 | offset: Number of records to skip.
257 |
258 | Returns:
259 | List of LogRecord objects.
260 | """
261 | # Create a temporary log source for parsing
262 | temp_source = LogSource(
263 | name="temp_etl",
264 | type=LogType.ETL,
265 | path=path,
266 | metadata={}
267 | )
268 |
269 | records = []
270 | skipped = 0
271 |
272 | for record in self.parse_file(temp_source, path):
273 | # Apply time filters
274 | if start_time and record.timestamp and record.timestamp < start_time:
275 | continue
276 | if end_time and record.timestamp and record.timestamp > end_time:
277 | continue
278 |
279 | # Apply custom filters
280 | if filters:
281 | if not self._match_filters(record, filters):
282 | continue
283 |
284 | # Handle pagination
285 | if skipped < offset:
286 | skipped += 1
287 | continue
288 |
289 | records.append(record)
290 |
291 | if len(records) >= limit:
292 | break
293 |
294 | return records
295 |
296 | def _match_filters(self, record: LogRecord, filters: Dict[str, Any]) -> bool:
297 | """Check if a record matches the provided filters.
298 |
299 | Args:
300 | record: The log record to check.
301 | filters: Dictionary of filters to apply.
302 |
303 | Returns:
304 | True if record matches all filters.
305 | """
306 | for key, value in filters.items():
307 | record_value = record.data.get(key)
308 |
309 | if isinstance(value, list):
310 | if record_value not in value:
311 | return False
312 | else:
313 | if record_value != value:
314 | return False
315 |
316 | return True
```
--------------------------------------------------------------------------------
/src/mcp_log_analyzer/parsers/etl_parser.py:
--------------------------------------------------------------------------------
```python
1 | """Windows ETL (Event Trace Log) file parser implementation."""
2 |
3 | import os
4 | import platform
5 | from datetime import datetime
6 | from pathlib import Path
7 | from typing import Any, Dict, Iterator, List, Optional, Union
8 | from uuid import uuid4
9 |
10 | from ..core.models import LogRecord, LogSource, LogType
11 | from .base import BaseParser
12 |
13 |
14 | class EtlParser(BaseParser):
15 | """Parser for Windows ETL (Event Trace Log) files."""
16 |
17 | def __init__(self, config: Optional[Dict[str, Any]] = None):
18 | """Initialize ETL parser.
19 |
20 | Args:
21 | config: Parser configuration.
22 | """
23 | super().__init__(config)
24 | self.etl_parser = None
25 | self.windows_parser = None
26 | self._init_parser()
27 |
28 | def _init_parser(self):
29 | """Initialize the ETL parser library."""
30 | try:
31 | # Try to import etl-parser library
32 | import etl_parser
33 | self.etl_parser = etl_parser
34 | except ImportError:
35 | self.etl_parser = None
36 | # Try to use Windows native parser as fallback
37 | try:
38 | from .etl_windows_parser import EtlWindowsParser
39 | self.windows_parser = EtlWindowsParser()
40 | if not self.windows_parser.is_available():
41 | self.windows_parser = None
42 | except:
43 | self.windows_parser = None
44 |
45 | def is_available(self) -> bool:
46 | """Check if ETL parsing is available."""
47 | return self.etl_parser is not None or self.windows_parser is not None
48 |
49 | def parse_file(
50 | self, source: LogSource, file_path: Union[str, Path]
51 | ) -> Iterator[LogRecord]:
52 | """Parse ETL log records from a file.
53 |
54 | Args:
55 | source: The log source information.
56 | file_path: Path to the ETL file.
57 |
58 | Yields:
59 | LogRecord objects parsed from the ETL file.
60 | """
61 | if not self.is_available():
62 | raise RuntimeError(
63 | "ETL parsing is not available. Please install etl-parser: pip install etl-parser "
64 | "or ensure tracerpt.exe is available on Windows."
65 | )
66 |
67 | path = Path(file_path)
68 | if not path.exists():
69 | raise FileNotFoundError(f"ETL file not found: {file_path}")
70 |
71 | if not str(path).lower().endswith('.etl'):
72 | raise ValueError(f"File does not appear to be an ETL file: {file_path}")
73 |
74 | # Always try cached parser first for better performance
75 | try:
76 | from .etl_cached_parser import EtlCachedParser
77 | cached_parser = EtlCachedParser(self.config)
78 | if cached_parser.is_available():
79 | yield from cached_parser.parse_file(source, file_path)
80 | return
81 | except Exception as e:
82 | # Fall back to streaming parser for large files
83 | file_size_mb = path.stat().st_size / (1024 * 1024)
84 | if file_size_mb > 50: # Use streaming parser for files > 50MB
85 | try:
86 | from .etl_large_file_parser import EtlLargeFileParser
87 | large_parser = EtlLargeFileParser(self.config)
88 | if large_parser.is_available():
89 | yield from large_parser.parse_file(source, file_path)
90 | return
91 | except Exception as e:
92 | # Fall back to regular parsing
93 | pass
94 |
95 | # Try etl-parser first if available
96 | if self.etl_parser is not None:
97 | try:
98 | # Create an ETL parser instance
99 | from etl_parser import ETL, ETLParser, build_from_stream
100 |
101 | # Parse the ETL file
102 | with open(path, 'rb') as etl_file:
103 | parser = ETLParser(etl_file)
104 |
105 | # Process all records in the ETL file
106 | for record in parser:
107 | # Convert ETL record to LogRecord
108 | log_record = self._convert_etl_record(source, record)
109 | if log_record:
110 | yield log_record
111 | return # Success, exit
112 | except Exception as e:
113 | # If etl-parser fails, try Windows parser
114 | if self.windows_parser is None:
115 | raise RuntimeError(f"Failed to parse ETL file: {e}")
116 |
117 | # Fall back to Windows native parser
118 | if self.windows_parser is not None:
119 | try:
120 | yield from self.windows_parser.parse_file(source, file_path)
121 | except Exception as e:
122 | raise RuntimeError(f"Failed to parse ETL file with Windows parser: {e}")
123 | else:
124 | raise RuntimeError("No ETL parser available")
125 |
126 | def _convert_etl_record(self, source: LogSource, etl_record: Any) -> Optional[LogRecord]:
127 | """Convert an ETL record to a LogRecord.
128 |
129 | Args:
130 | source: The log source information.
131 | etl_record: The ETL record from etl-parser.
132 |
133 | Returns:
134 | LogRecord or None if conversion fails.
135 | """
136 | try:
137 | # Extract common fields from ETL record
138 | record_data = {
139 | "provider_name": getattr(etl_record, "provider_name", "Unknown"),
140 | "event_id": getattr(etl_record, "event_id", 0),
141 | "level": getattr(etl_record, "level", 0),
142 | "task": getattr(etl_record, "task", 0),
143 | "opcode": getattr(etl_record, "opcode", 0),
144 | "keywords": getattr(etl_record, "keywords", 0),
145 | "process_id": getattr(etl_record, "process_id", 0),
146 | "thread_id": getattr(etl_record, "thread_id", 0),
147 | }
148 |
149 | # Try to get timestamp
150 | timestamp = None
151 | if hasattr(etl_record, "system_time"):
152 | timestamp = etl_record.system_time
153 | elif hasattr(etl_record, "timestamp"):
154 | timestamp = etl_record.timestamp
155 |
156 | # Try to get event data
157 | if hasattr(etl_record, "user_data"):
158 | record_data["user_data"] = etl_record.user_data
159 | elif hasattr(etl_record, "event_data"):
160 | record_data["event_data"] = etl_record.event_data
161 |
162 | # Add any extended data
163 | if hasattr(etl_record, "extended_data"):
164 | record_data["extended_data"] = etl_record.extended_data
165 |
166 | # Create LogRecord
167 | return LogRecord(
168 | source_id=source.id,
169 | timestamp=timestamp,
170 | data=record_data,
171 | raw_content=str(etl_record) if self.config.get("include_raw", False) else None
172 | )
173 |
174 | except Exception as e:
175 | # Log error but continue processing
176 | if self.config.get("verbose", False):
177 | print(f"Failed to convert ETL record: {e}")
178 | return None
179 |
180 | def parse_content(self, source: LogSource, content: str) -> Iterator[LogRecord]:
181 | """Parse ETL log records from content string.
182 |
183 | Note: ETL files are binary and cannot be parsed from string content.
184 |
185 | Args:
186 | source: The log source information.
187 | content: String content (not supported for ETL).
188 |
189 | Raises:
190 | NotImplementedError: ETL files must be parsed from file.
191 | """
192 | raise NotImplementedError(
193 | "ETL files are binary and must be parsed from file, not string content"
194 | )
195 |
196 | def validate_file(self, file_path: Union[str, Path]) -> bool:
197 | """Validate if the file can be parsed by this parser.
198 |
199 | Args:
200 | file_path: Path to validate.
201 |
202 | Returns:
203 | True if file appears to be an ETL file.
204 | """
205 | path = Path(file_path)
206 |
207 | # Check file extension
208 | if not str(path).lower().endswith('.etl'):
209 | return False
210 |
211 | # Check if file exists and is readable
212 | if not path.exists() or not path.is_file():
213 | return False
214 |
215 | # Check if we have any parser available
216 | if not self.is_available():
217 | return False
218 |
219 | # Could add binary file signature check here
220 | # ETL files typically start with specific magic bytes
221 |
222 | return True
223 |
224 | def parse(
225 | self, path: str, filters: Optional[Dict[str, Any]] = None,
226 | start_time: Optional[datetime] = None, end_time: Optional[datetime] = None,
227 | limit: int = 1000, offset: int = 0
228 | ) -> List[LogRecord]:
229 | """Parse ETL file with filtering and pagination.
230 |
231 | Args:
232 | path: Path to the ETL file.
233 | filters: Optional filters to apply.
234 | start_time: Optional start time filter.
235 | end_time: Optional end time filter.
236 | limit: Maximum number of records to return.
237 | offset: Number of records to skip.
238 |
239 | Returns:
240 | List of LogRecord objects.
241 | """
242 | # Create a temporary log source for parsing
243 | temp_source = LogSource(
244 | name="temp_etl",
245 | type=LogType.ETL,
246 | path=path,
247 | metadata={}
248 | )
249 |
250 | records = []
251 | skipped = 0
252 |
253 | for record in self.parse_file(temp_source, path):
254 | # Apply time filters
255 | if start_time and record.timestamp and record.timestamp < start_time:
256 | continue
257 | if end_time and record.timestamp and record.timestamp > end_time:
258 | continue
259 |
260 | # Apply custom filters
261 | if filters:
262 | if not self._match_filters(record, filters):
263 | continue
264 |
265 | # Handle pagination
266 | if skipped < offset:
267 | skipped += 1
268 | continue
269 |
270 | records.append(record)
271 |
272 | if len(records) >= limit:
273 | break
274 |
275 | return records
276 |
277 | def _match_filters(self, record: LogRecord, filters: Dict[str, Any]) -> bool:
278 | """Check if a record matches the provided filters.
279 |
280 | Args:
281 | record: The log record to check.
282 | filters: Dictionary of filters to apply.
283 |
284 | Returns:
285 | True if record matches all filters.
286 | """
287 | for key, value in filters.items():
288 | record_value = record.data.get(key)
289 |
290 | # Handle different filter types
291 | if isinstance(value, list):
292 | # Match any value in list
293 | if record_value not in value:
294 | return False
295 | elif isinstance(value, dict):
296 | # Handle complex filters (e.g., {"$gte": 4} for level >= 4)
297 | if not self._match_complex_filter(record_value, value):
298 | return False
299 | else:
300 | # Exact match
301 | if record_value != value:
302 | return False
303 |
304 | return True
305 |
306 | def _match_complex_filter(self, value: Any, filter_spec: Dict[str, Any]) -> bool:
307 | """Match a value against a complex filter specification.
308 |
309 | Args:
310 | value: The value to check.
311 | filter_spec: Dictionary with filter operators.
312 |
313 | Returns:
314 | True if value matches the filter.
315 | """
316 | for op, filter_value in filter_spec.items():
317 | if op == "$gte" and not (value >= filter_value):
318 | return False
319 | elif op == "$gt" and not (value > filter_value):
320 | return False
321 | elif op == "$lte" and not (value <= filter_value):
322 | return False
323 | elif op == "$lt" and not (value < filter_value):
324 | return False
325 | elif op == "$ne" and not (value != filter_value):
326 | return False
327 | elif op == "$in" and value not in filter_value:
328 | return False
329 | elif op == "$nin" and value in filter_value:
330 | return False
331 |
332 | return True
```
--------------------------------------------------------------------------------
/docs/example-context-docs/mcp-ai-agent-prd.md:
--------------------------------------------------------------------------------
```markdown
1 | # MCP AI Agent Product Requirements Document (PRD)
2 |
3 | ## Document Information
4 | - **Product Name**: MCP AI Agent
5 | - **Version**: 1.0
6 | - **Date**: January 2025
7 | - **Status**: Draft
8 | - **Stakeholders**: Development Team, DevOps, Product Management
9 |
10 | ## Table of Contents
11 | 1. [Executive Summary](#executive-summary)
12 | 2. [Product Overview](#product-overview)
13 | 3. [Business Objectives](#business-objectives)
14 | 4. [User Personas](#user-personas)
15 | 5. [Functional Requirements](#functional-requirements)
16 | 6. [Non-Functional Requirements](#non-functional-requirements)
17 | 7. [User Stories](#user-stories)
18 | 8. [Technical Requirements](#technical-requirements)
19 | 9. [Success Metrics](#success-metrics)
20 | 10. [Risks and Mitigations](#risks-and-mitigations)
21 | 11. [Timeline and Milestones](#timeline-and-milestones)
22 | 12. [Appendix](#appendix)
23 |
24 | ## Executive Summary
25 |
26 | The MCP AI Agent is a command-line tool that enables users to interact with MCP (Model Context Protocol) servers through an intelligent conversational interface. By combining AI capabilities with MCP server functionality, the agent provides an intuitive way to access and utilize MCP tools without requiring deep technical knowledge of the underlying protocol.
27 |
28 | ### Key Value Propositions
29 | - **Simplified Access**: Natural language interface to complex MCP operations
30 | - **Intelligent Automation**: AI-driven tool selection and execution
31 | - **Developer Productivity**: Reduce time spent on manual MCP interactions
32 | - **Extensible Platform**: Plugin architecture for custom functionality
33 |
34 | ## Product Overview
35 |
36 | ### Problem Statement
37 | Currently, interacting with MCP servers requires:
38 | - Deep understanding of the MCP protocol
39 | - Manual crafting of JSON-RPC messages
40 | - Complex error handling and retry logic
41 | - Separate tools for different MCP operations
42 |
43 | This creates barriers for adoption and reduces productivity for developers and teams using MCP-based systems.
44 |
45 | ### Solution
46 | The MCP AI Agent solves these problems by providing:
47 | - Natural language interface for MCP interactions
48 | - Automatic protocol handling and error recovery
49 | - Intelligent tool selection based on user intent
50 | - Unified interface for all MCP operations
51 |
52 | ### Target Market
53 | - **Primary**: Software developers working with MCP servers
54 | - **Secondary**: DevOps engineers managing MCP infrastructure
55 | - **Tertiary**: Technical teams adopting MCP-based architectures
56 |
57 | ## Business Objectives
58 |
59 | ### Primary Objectives
60 | 1. **Increase MCP Adoption**: Lower barrier to entry for MCP usage
61 | 2. **Improve Developer Productivity**: Reduce time spent on MCP operations by 60%
62 | 3. **Enable Innovation**: Provide platform for building MCP-based solutions
63 |
64 | ### Success Criteria
65 | - 1,000+ active users within 6 months
66 | - 80% user satisfaction rating
67 | - 50% reduction in MCP-related support tickets
68 | - 10+ community-contributed plugins
69 |
70 | ## User Personas
71 |
72 | ### 1. Alex - Senior Developer
73 | - **Background**: 8 years experience, works with microservices
74 | - **Goals**: Quickly interact with MCP servers during development
75 | - **Pain Points**: Context switching between code and MCP tools
76 | - **Needs**: Fast, reliable CLI tool with good documentation
77 |
78 | ### 2. Jordan - DevOps Engineer
79 | - **Background**: 5 years experience, manages production infrastructure
80 | - **Goals**: Monitor and troubleshoot MCP server issues
81 | - **Pain Points**: Lack of unified tooling for MCP operations
82 | - **Needs**: Scriptable interface, logging, and monitoring capabilities
83 |
84 | ### 3. Sam - Junior Developer
85 | - **Background**: 1 year experience, learning MCP architecture
86 | - **Goals**: Understand and use MCP servers effectively
87 | - **Pain Points**: Steep learning curve for MCP protocol
88 | - **Needs**: Intuitive interface with helpful error messages
89 |
90 | ## Functional Requirements
91 |
92 | ### Core Features
93 |
94 | #### FR1: Connection Management
95 | - **FR1.1**: Connect to MCP servers via WebSocket or HTTP
96 | - **FR1.2**: Support authenticated and unauthenticated connections
97 | - **FR1.3**: Automatic reconnection with exponential backoff
98 | - **FR1.4**: Connection health monitoring and status display
99 |
100 | #### FR2: Conversational Interface
101 | - **FR2.1**: Natural language input processing
102 | - **FR2.2**: Context-aware responses
103 | - **FR2.3**: Multi-turn conversation support
104 | - **FR2.4**: Command history and session management
105 |
106 | #### FR3: MCP Tool Integration
107 | - **FR3.1**: Automatic tool discovery from MCP server
108 | - **FR3.2**: Intelligent tool selection based on user queries
109 | - **FR3.3**: Tool parameter validation and formatting
110 | - **FR3.4**: Tool execution with progress tracking
111 |
112 | #### FR4: CLI Commands
113 | - **FR4.1**: Interactive chat mode (`mcp-agent start`)
114 | - **FR4.2**: Tool listing (`mcp-agent list-tools`)
115 | - **FR4.3**: Configuration display (`mcp-agent config`)
116 | - **FR4.4**: Connection testing (`mcp-agent test-connection`)
117 |
118 | #### FR5: Session Management
119 | - **FR5.1**: Save and restore conversation history
120 | - **FR5.2**: Export conversations to various formats
121 | - **FR5.3**: Search through past conversations
122 | - **FR5.4**: Session statistics and analytics
123 |
124 | ### Advanced Features
125 |
126 | #### FR6: Plugin System
127 | - **FR6.1**: Dynamic plugin loading
128 | - **FR6.2**: Plugin marketplace/registry
129 | - **FR6.3**: Plugin development SDK
130 | - **FR6.4**: Plugin sandboxing for security
131 |
132 | #### FR7: Monitoring and Logging
133 | - **FR7.1**: Structured logging with configurable levels
134 | - **FR7.2**: Performance metrics collection
135 | - **FR7.3**: Error tracking and reporting
136 | - **FR7.4**: Audit trail for tool executions
137 |
138 | ## Non-Functional Requirements
139 |
140 | ### Performance Requirements
141 | - **NFR1**: Response time < 2 seconds for 95% of requests
142 | - **NFR2**: Support 100 concurrent connections per instance
143 | - **NFR3**: Memory usage < 500MB under normal operation
144 | - **NFR4**: Startup time < 3 seconds
145 |
146 | ### Reliability Requirements
147 | - **NFR5**: 99.9% uptime for core functionality
148 | - **NFR6**: Graceful degradation when MCP server unavailable
149 | - **NFR7**: Zero data loss for conversation history
150 | - **NFR8**: Automatic recovery from transient failures
151 |
152 | ### Security Requirements
153 | - **NFR9**: Secure storage of API keys and credentials
154 | - **NFR10**: TLS/SSL for all network communications
155 | - **NFR11**: Input sanitization to prevent injection attacks
156 | - **NFR12**: Role-based access control for multi-user scenarios
157 |
158 | ### Usability Requirements
159 | - **NFR13**: Intuitive CLI with helpful error messages
160 | - **NFR14**: Comprehensive documentation and examples
161 | - **NFR15**: Context-sensitive help system
162 | - **NFR16**: Support for common terminal emulators
163 |
164 | ### Compatibility Requirements
165 | - **NFR17**: Python 3.8+ compatibility
166 | - **NFR18**: Cross-platform support (Windows, macOS, Linux)
167 | - **NFR19**: MCP protocol version compatibility
168 | - **NFR20**: AI model provider agnostic
169 |
170 | ## User Stories
171 |
172 | ### Epic 1: Basic MCP Interaction
173 |
174 | **US1.1**: As a developer, I want to connect to my MCP server using simple commands so that I can start working quickly.
175 | ```
176 | GIVEN I have the MCP server URL and credentials
177 | WHEN I run `mcp-agent start`
178 | THEN I should be connected and see a confirmation message
179 | ```
180 |
181 | **US1.2**: As a developer, I want to ask questions in natural language so that I don't need to learn MCP protocol details.
182 | ```
183 | GIVEN I am connected to an MCP server
184 | WHEN I type "search for user documentation"
185 | THEN the agent should use the appropriate search tool and display results
186 | ```
187 |
188 | ### Epic 2: Tool Management
189 |
190 | **US2.1**: As a developer, I want to see all available tools so that I know what capabilities are available.
191 | ```
192 | GIVEN I am connected to an MCP server
193 | WHEN I run `/tools` or `mcp-agent list-tools`
194 | THEN I should see a formatted list of all available tools with descriptions
195 | ```
196 |
197 | **US2.2**: As a power user, I want to explicitly use specific tools so that I have fine-grained control.
198 | ```
199 | GIVEN I know a specific tool exists
200 | WHEN I type `/use search query="specific term"`
201 | THEN the agent should execute that exact tool with the provided parameters
202 | ```
203 |
204 | ### Epic 3: Session Management
205 |
206 | **US3.1**: As a user, I want my conversation history saved so that I can reference previous interactions.
207 | ```
208 | GIVEN I have been using the agent
209 | WHEN I restart the application
210 | THEN I should be able to access my previous conversations
211 | ```
212 |
213 | **US3.2**: As a team lead, I want to export conversations so that I can share knowledge with my team.
214 | ```
215 | GIVEN I have a conversation with useful information
216 | WHEN I run `/export markdown`
217 | THEN I should get a markdown file with the formatted conversation
218 | ```
219 |
220 | ## Technical Requirements
221 |
222 | ### System Architecture
223 | - **Microservices-compatible**: Designed for distributed systems
224 | - **Event-driven**: Async operations throughout
225 | - **Pluggable**: Extension points for customization
226 | - **Observable**: Built-in monitoring and tracing
227 |
228 | ### Development Requirements
229 | - **Language**: Python 3.8+
230 | - **Framework**: Click for CLI, Rich for terminal UI
231 | - **Testing**: Pytest with 80% coverage minimum
232 | - **Documentation**: Sphinx-generated API docs
233 |
234 | ### Integration Requirements
235 | - **MCP Protocol**: Full compliance with MCP specification
236 | - **AI Models**: Support for OpenAI, Anthropic, and local models
237 | - **Authentication**: OAuth2, API keys, and custom auth
238 | - **Monitoring**: Prometheus metrics, OpenTelemetry traces
239 |
240 | ### Deployment Requirements
241 | - **Containerization**: Docker images with multi-stage builds
242 | - **Package Management**: PyPI distribution
243 | - **Configuration**: Environment-based configuration
244 | - **Platforms**: Windows, macOS, Linux support
245 |
246 | ## Success Metrics
247 |
248 | ### User Adoption Metrics
249 | - **MAU** (Monthly Active Users): Target 1,000 within 6 months
250 | - **User Retention**: 60% 30-day retention rate
251 | - **Session Duration**: Average 15 minutes per session
252 | - **Feature Adoption**: 80% of users using AI features
253 |
254 | ### Performance Metrics
255 | - **Response Time**: P95 < 2 seconds
256 | - **Error Rate**: < 1% of requests fail
257 | - **Availability**: 99.9% uptime
258 | - **Throughput**: 100 requests/second per instance
259 |
260 | ### Business Impact Metrics
261 | - **Productivity Gain**: 60% reduction in MCP task time
262 | - **Support Tickets**: 50% reduction in MCP-related issues
263 | - **Community Growth**: 50+ GitHub stars per month
264 | - **Plugin Ecosystem**: 10+ community plugins
265 |
266 | ## Risks and Mitigations
267 |
268 | ### Technical Risks
269 |
270 | | Risk | Impact | Probability | Mitigation |
271 | |------|--------|-------------|------------|
272 | | MCP Protocol Changes | High | Medium | Version detection and compatibility layer |
273 | | AI Model Unavailability | High | Low | Fallback to basic mode, local model support |
274 | | Performance Degradation | Medium | Medium | Caching, connection pooling, monitoring |
275 | | Security Vulnerabilities | High | Low | Security audits, dependency scanning |
276 |
277 | ### Business Risks
278 |
279 | | Risk | Impact | Probability | Mitigation |
280 | |------|--------|-------------|------------|
281 | | Low User Adoption | High | Medium | User feedback loops, documentation |
282 | | Competition | Medium | High | Unique features, plugin ecosystem |
283 | | Maintenance Burden | Medium | Medium | Automated testing, CI/CD |
284 |
285 | ## Timeline and Milestones
286 |
287 | ### Phase 1: MVP (Weeks 1-4)
288 | - Basic MCP connection
289 | - Simple CLI interface
290 | - Core chat functionality
291 | - Basic error handling
292 |
293 | ### Phase 2: Core Features (Weeks 5-8)
294 | - AI integration
295 | - Tool discovery and execution
296 | - Session management
297 | - Comprehensive testing
298 |
299 | ### Phase 3: Advanced Features (Weeks 9-12)
300 | - Plugin system
301 | - Performance optimization
302 | - Monitoring integration
303 | - Documentation
304 |
305 | ### Phase 4: Beta Release (Weeks 13-16)
306 | - Beta testing program
307 | - Bug fixes and improvements
308 | - Performance tuning
309 | - Launch preparation
310 |
311 | ### Phase 5: GA Release (Week 17+)
312 | - Public release
313 | - Marketing campaign
314 | - Community building
315 | - Ongoing maintenance
316 |
317 | ## Appendix
318 |
319 | ### A. Glossary
320 | - **MCP**: Model Context Protocol - Protocol for AI model communication
321 | - **CLI**: Command Line Interface
322 | - **WebSocket**: Protocol for bidirectional communication
323 | - **JSON-RPC**: JSON Remote Procedure Call protocol
324 |
325 | ### B. References
326 | - [MCP Protocol Specification](https://github.com/modelcontextprotocol/specification)
327 | - [Click Documentation](https://click.palletsprojects.com/)
328 | - [Rich Documentation](https://rich.readthedocs.io/)
329 | - [Pydantic Documentation](https://docs.pydantic.dev/)
330 |
331 | ### C. Mockups and Wireframes
332 |
333 | #### CLI Interface Example
334 | ```
335 | $ mcp-agent start
336 | 🤖 MCP AI Agent v1.0
337 | 📡 Connecting to mcp://localhost:3000...
338 | ✅ Connected successfully!
339 |
340 | You: What tools are available?
```
--------------------------------------------------------------------------------
/QUICK_START.md:
--------------------------------------------------------------------------------
```markdown
1 | # MCP Log Analyzer - Quick Start Guide
2 |
3 | **Version**: 1.0
4 | **Date**: July 16, 2025
5 |
6 | ## Table of Contents
7 |
8 | 1. [Overview](#overview)
9 | 2. [Installation](#installation)
10 | 3. [Local Usage (Single Machine)](#local-usage-single-machine)
11 | 4. [Network Usage (Multi-Machine)](#network-usage-multi-machine)
12 | 5. [AI Agent Client Integration](#ai-agent-client-integration)
13 | 6. [Security Considerations](#security-considerations)
14 | 7. [Production Deployment](#production-deployment)
15 | 8. [Troubleshooting](#troubleshooting)
16 |
17 | ## Overview
18 |
19 | The MCP Log Analyzer Server provides comprehensive log analysis and system monitoring capabilities through the Model Context Protocol (MCP). It offers:
20 |
21 | - **18 Tools** across 5 categories (log management, Windows, Linux, process monitoring, network diagnostics)
22 | - **15+ Resources** for real-time system information
23 | - **12 Prompts** for comprehensive user guidance
24 | - **Cross-platform support** (Windows and Linux)
25 | - **Multiple transport options** (stdio, TCP, HTTP, SSE)
26 |
27 | ## Installation
28 |
29 | ### Prerequisites
30 |
31 | - **Python 3.12+**
32 | - **Platform-specific dependencies**:
33 | - Windows: `pywin32>=300` for Event Log access
34 | - Linux: Standard system tools (journalctl, netstat, etc.)
35 |
36 | ### Install Package
37 |
38 | ```bash
39 | # Navigate to project directory
40 | cd /path/to/MCPsvr
41 |
42 | # Install in development mode
43 | pip install -e .
44 |
45 | # Install with development dependencies
46 | pip install -e ".[dev]"
47 |
48 | # On Windows, ensure pywin32 is properly installed
49 | pip install pywin32>=300
50 | python -c "import win32api" # Test Windows API access
51 | ```
52 |
53 | ### Verify Installation
54 |
55 | ```bash
56 | # Test server import
57 | PYTHONPATH=src python3 -c "from mcp_log_analyzer.mcp_server.server import mcp; print('Server import successful')"
58 |
59 | # Test server functionality
60 | python check_server.py
61 | ```
62 |
63 | ## Local Usage (Single Machine)
64 |
65 | ### Standard MCP Mode (Recommended)
66 |
67 | Use this mode for connecting with Claude Code or other MCP clients:
68 |
69 | ```bash
70 | # Start MCP server (stdio mode)
71 | python main.py
72 |
73 | # Add to Claude Code
74 | claude mcp add mcp-log-analyzer python main.py
75 |
76 | # List MCP servers
77 | claude mcp list
78 |
79 | # Remove MCP server
80 | claude mcp remove mcp-log-analyzer
81 | ```
82 |
83 | **Important**: MCP servers don't show output when started - they communicate via stdin/stdout with MCP clients.
84 |
85 | ### TCP Mode (For Testing/Development)
86 |
87 | ```bash
88 | # Start server in TCP mode for local testing
89 | python main_tcp.py --tcp --host 127.0.0.1 --port 8080
90 |
91 | # Start with verbose logging
92 | python main_tcp.py --tcp --host 127.0.0.1 --port 8080 --verbose
93 |
94 | # Start in stdio mode (default)
95 | python main_tcp.py
96 |
97 | # Add to Claude
98 | claude mcp add remote-log-analyzer python3 /home/steve/git/MCPsvr/src/mcp_log_analyzer/tcp_proxy.py 192.168.2.202 8088
99 | ```
100 |
101 | ## Network Usage (Multi-Machine)
102 |
103 | ### Single Command for Network Access
104 |
105 | ```bash
106 | # Start MCP server accessible across network
107 | python main_tcp.py --tcp --host 0.0.0.0 --port 8080
108 | ```
109 |
110 | ### Multi-Machine Deployment
111 |
112 | #### Windows Machines (Example: 5 servers)
113 |
114 | ```cmd
115 | # Machine 1 (Windows Server 1)
116 | cd C:\path\to\MCPsvr
117 | python main_tcp.py --tcp --host 0.0.0.0 --port 8080
118 |
119 | # Machine 2 (Windows Server 2)
120 | cd C:\path\to\MCPsvr
121 | python main_tcp.py --tcp --host 0.0.0.0 --port 8081
122 |
123 | # Machine 3 (Windows Server 3)
124 | cd C:\path\to\MCPsvr
125 | python main_tcp.py --tcp --host 0.0.0.0 --port 8082
126 |
127 | # Machine 4 (Windows Server 4)
128 | cd C:\path\to\MCPsvr
129 | python main_tcp.py --tcp --host 0.0.0.0 --port 8083
130 |
131 | # Machine 5 (Windows Server 5)
132 | cd C:\path\to\MCPsvr
133 | python main_tcp.py --tcp --host 0.0.0.0 --port 8084
134 | ```
135 |
136 | #### Linux Machines (Example: 5 servers)
137 |
138 | ```bash
139 | # Machine 1 (Linux Server 1)
140 | cd /path/to/MCPsvr
141 | python main_tcp.py --tcp --host 0.0.0.0 --port 8080
142 |
143 | # Machine 2 (Linux Server 2)
144 | cd /path/to/MCPsvr
145 | python main_tcp.py --tcp --host 0.0.0.0 --port 8081
146 |
147 | # Machine 3 (Linux Server 3)
148 | cd /path/to/MCPsvr
149 | python main_tcp.py --tcp --host 0.0.0.0 --port 8082
150 |
151 | # Machine 4 (Linux Server 4)
152 | cd /path/to/MCPsvr
153 | python main_tcp.py --tcp --host 0.0.0.0 --port 8083
154 |
155 | # Machine 5 (Linux Server 5)
156 | cd /path/to/MCPsvr
157 | python main_tcp.py --tcp --host 0.0.0.0 --port 8084
158 | ```
159 |
160 | ### Command Line Options
161 |
162 | ```bash
163 | python main_tcp.py [OPTIONS]
164 |
165 | OPTIONS:
166 | --tcp Enable TCP server mode (default: stdio)
167 | --host HOST Host to bind to (default: 0.0.0.0)
168 | --port PORT Port to bind to (default: 8080)
169 | --verbose Enable verbose logging
170 | --help Show help message
171 | ```
172 |
173 | ## AI Agent Client Integration
174 |
175 | ### Connection Configuration
176 |
177 | Your AI Agent client can connect to multiple servers simultaneously:
178 |
179 | ```python
180 | # Example server configuration for AI Agent
181 | servers = [
182 | # Windows machines
183 | {"host": "192.168.1.100", "port": 8080, "name": "Windows-Server-1"},
184 | {"host": "192.168.1.101", "port": 8081, "name": "Windows-Server-2"},
185 | {"host": "192.168.1.102", "port": 8082, "name": "Windows-Server-3"},
186 | {"host": "192.168.1.103", "port": 8083, "name": "Windows-Server-4"},
187 | {"host": "192.168.1.104", "port": 8084, "name": "Windows-Server-5"},
188 |
189 | # Linux machines
190 | {"host": "192.168.1.200", "port": 8080, "name": "Linux-Server-1"},
191 | {"host": "192.168.1.201", "port": 8081, "name": "Linux-Server-2"},
192 | {"host": "192.168.1.202", "port": 8082, "name": "Linux-Server-3"},
193 | {"host": "192.168.1.203", "port": 8083, "name": "Linux-Server-4"},
194 | {"host": "192.168.1.204", "port": 8084, "name": "Linux-Server-5"},
195 | ]
196 | ```
197 |
198 | ### Available Capabilities
199 |
200 | Each server provides:
201 |
202 | #### Tools (18 total)
203 | - **Log Management**: register_log_source, list_log_sources, query_logs, analyze_logs
204 | - **Windows System**: test_windows_event_log_access, get_windows_event_log_info, query_windows_events_by_criteria, get_windows_system_health
205 | - **Linux System**: test_linux_log_access, query_systemd_journal, analyze_linux_services, get_linux_system_overview
206 | - **Process Monitoring**: analyze_system_performance, find_resource_intensive_processes, monitor_process_health, get_system_health_summary
207 | - **Network Diagnostics**: test_network_connectivity, test_port_connectivity, analyze_network_connections, diagnose_network_issues
208 |
209 | #### Resources (15+)
210 | - **Log Resources**: `logs/sources`, `logs/types`, `logs/analysis-types`
211 | - **Windows Resources**: `windows/system-events/{param}`, `windows/application-events/{param}`
212 | - **Linux Resources**: `linux/systemd-logs/{param}`, `linux/system-logs/{param}`
213 | - **Process Resources**: `processes/list`, `processes/summary`
214 | - **Network Resources**: `network/listening-ports`, `network/established-connections`, `network/all-connections`
215 |
216 | #### Prompts (12 total)
217 | - Comprehensive guides for log management, Windows diagnostics, Linux diagnostics, process monitoring, and network troubleshooting
218 |
219 | ## Security Considerations
220 |
221 | ### Network Security
222 |
223 | ```bash
224 | # Bind to specific interface instead of 0.0.0.0 for security
225 | python main_tcp.py --tcp --host 192.168.1.100 --port 8080
226 |
227 | # Use non-default port
228 | python main_tcp.py --tcp --host 0.0.0.0 --port 9999
229 | ```
230 |
231 | ### Firewall Configuration
232 |
233 | #### Windows
234 | ```cmd
235 | # Allow through Windows Firewall
236 | netsh advfirewall firewall add rule name="MCP Log Analyzer" dir=in action=allow protocol=TCP localport=8080
237 | ```
238 |
239 | #### Linux
240 | ```bash
241 | # UFW (Ubuntu/Debian)
242 | sudo ufw allow 8080/tcp
243 |
244 | # iptables (CentOS/RHEL)
245 | sudo iptables -A INPUT -p tcp --dport 8080 -j ACCEPT
246 | ```
247 |
248 | ### Access Control
249 |
250 | - **Read-only operations**: Server performs only read operations on system data
251 | - **No system modification**: No capability to modify system configuration
252 | - **Input validation**: Comprehensive Pydantic model validation
253 | - **Error sanitization**: Safe error messages without sensitive data exposure
254 |
255 | ## Production Deployment
256 |
257 | ### Windows Service
258 |
259 | ```cmd
260 | # Install as Windows service (requires additional setup)
261 | sc create MCPLogAnalyzer binPath="python C:\path\to\MCPsvr\main_tcp.py --tcp --host 0.0.0.0 --port 8080"
262 | sc start MCPLogAnalyzer
263 | ```
264 |
265 | ### Linux Systemd Service
266 |
267 | ```bash
268 | # Create service file: /etc/systemd/system/mcp-log-analyzer.service
269 | [Unit]
270 | Description=MCP Log Analyzer Server
271 | After=network.target
272 |
273 | [Service]
274 | Type=simple
275 | User=your-user
276 | WorkingDirectory=/path/to/MCPsvr
277 | ExecStart=/usr/bin/python3 main_tcp.py --tcp --host 0.0.0.0 --port 8080
278 | Restart=always
279 | RestartSec=10
280 |
281 | [Install]
282 | WantedBy=multi-user.target
283 |
284 | # Enable and start service
285 | sudo systemctl enable mcp-log-analyzer
286 | sudo systemctl start mcp-log-analyzer
287 | sudo systemctl status mcp-log-analyzer
288 | ```
289 |
290 | ### Docker Deployment
291 |
292 | ```bash
293 | # Build Docker image
294 | docker build -t mcp-log-analyzer .
295 |
296 | # Run container
297 | docker run -d -p 8080:8080 --name mcp-log-analyzer mcp-log-analyzer
298 |
299 | # Run with custom configuration
300 | docker run -d -p 9999:8080 --name mcp-log-analyzer mcp-log-analyzer
301 | ```
302 |
303 | ### Kubernetes Deployment
304 |
305 | ```yaml
306 | apiVersion: apps/v1
307 | kind: Deployment
308 | metadata:
309 | name: mcp-log-analyzer
310 | spec:
311 | replicas: 3
312 | selector:
313 | matchLabels:
314 | app: mcp-log-analyzer
315 | template:
316 | metadata:
317 | labels:
318 | app: mcp-log-analyzer
319 | spec:
320 | containers:
321 | - name: mcp-log-analyzer
322 | image: mcp-log-analyzer:latest
323 | ports:
324 | - containerPort: 8080
325 | command: ["python", "main_tcp.py", "--tcp", "--host", "0.0.0.0", "--port", "8080"]
326 | resources:
327 | limits:
328 | memory: "1Gi"
329 | cpu: "500m"
330 | requests:
331 | memory: "512Mi"
332 | cpu: "250m"
333 | ---
334 | apiVersion: v1
335 | kind: Service
336 | metadata:
337 | name: mcp-log-analyzer-service
338 | spec:
339 | selector:
340 | app: mcp-log-analyzer
341 | ports:
342 | - port: 8080
343 | targetPort: 8080
344 | type: LoadBalancer
345 | ```
346 |
347 | ## Troubleshooting
348 |
349 | ### Common Issues
350 |
351 | #### Server Not Starting
352 | ```bash
353 | # Check Python version
354 | python --version # Should be 3.12+
355 |
356 | # Test dependencies
357 | pip install -e .
358 |
359 | # Check server import
360 | PYTHONPATH=src python3 -c "from mcp_log_analyzer.mcp_server.server import mcp; print('OK')"
361 | ```
362 |
363 | #### Network Connection Issues
364 | ```bash
365 | # Check if server is running
366 | netstat -tuln | grep 8080
367 |
368 | # Test connection
369 | telnet your-server-ip 8080
370 |
371 | # Check firewall
372 | # Windows: Check Windows Firewall settings
373 | # Linux: Check iptables/ufw rules
374 | ```
375 |
376 | #### Windows Event Log Access
377 | ```bash
378 | # Test Windows API access
379 | python -c "import win32api; print('Windows API available')"
380 |
381 | # Install pywin32 if missing
382 | pip install pywin32>=300
383 | ```
384 |
385 | #### Linux System Access
386 | ```bash
387 | # Test systemd access
388 | journalctl --version
389 |
390 | # Test network tools
391 | which netstat ss ping
392 | ```
393 |
394 | ### Debug Mode
395 |
396 | ```bash
397 | # Start with verbose logging
398 | python main_tcp.py --tcp --host 0.0.0.0 --port 8080 --verbose
399 |
400 | # Check server functionality
401 | python check_server.py
402 |
403 | # Test specific tools
404 | python -c "
405 | from mcp_log_analyzer.mcp_server.server import mcp
406 | # Test server capabilities
407 | "
408 | ```
409 |
410 | ### Performance Monitoring
411 |
412 | ```bash
413 | # Monitor server resource usage
414 | htop # Linux
415 | taskmgr # Windows
416 |
417 | # Check network connections
418 | netstat -an | grep 8080
419 |
420 | # Monitor logs
421 | tail -f /var/log/syslog # Linux
422 | # Check Event Viewer on Windows
423 | ```
424 |
425 | ## Development and Testing
426 |
427 | ### Code Quality
428 |
429 | ```bash
430 | # Format code
431 | black .
432 | isort .
433 |
434 | # Type checking
435 | mypy src
436 |
437 | # Linting
438 | flake8
439 |
440 | # Run all quality checks
441 | black . && isort . && mypy src && flake8
442 | ```
443 |
444 | ### Testing
445 |
446 | ```bash
447 | # Run all tests with proper PYTHONPATH
448 | PYTHONPATH=src python3 -m pytest tests/ -v
449 |
450 | # Run tests with coverage
451 | PYTHONPATH=src python3 -m pytest --cov=mcp_log_analyzer tests/
452 |
453 | # Run specific test file
454 | PYTHONPATH=src python3 -m pytest tests/test_base_parser.py -v
455 | ```
456 |
457 | ### Build and Install
458 |
459 | ```bash
460 | # Install the package in development mode
461 | pip install -e .
462 |
463 | # Install with development dependencies
464 | pip install -e ".[dev]"
465 |
466 | # Build distribution
467 | python -m build
468 | ```
469 |
470 | ## Support and Documentation
471 |
472 | - **Architecture Documentation**: `/docs/planning/Agent/MCP_Server_Architecture.md`
473 | - **Development Guide**: `/CLAUDE.md`
474 | - **API Documentation**: Auto-generated from code
475 | - **Issues**: Report issues via your project's issue tracker
476 |
477 | ## Quick Reference
478 |
479 | | Command | Purpose |
480 | |---------|---------|
481 | | `python main.py` | Start stdio MCP server (standard mode) |
482 | | `python main_tcp.py --tcp --host 0.0.0.0 --port 8080` | Start network-accessible TCP server |
483 | | `python check_server.py` | Test server functionality |
484 | | `claude mcp add mcp-log-analyzer python main.py` | Add to Claude Code |
485 | | `PYTHONPATH=src python3 -m pytest tests/ -v` | Run tests |
486 | | `black . && isort . && mypy src && flake8` | Code quality checks |
487 |
488 | This guide provides everything you need to get started with the MCP Log Analyzer Server in both local and network configurations.
```
--------------------------------------------------------------------------------
/src/mcp_log_analyzer/mcp_server/server.py:
--------------------------------------------------------------------------------
```python
1 | """
2 | MCP Log Analyzer Server using FastMCP framework.
3 | """
4 |
5 | import re
6 | from datetime import datetime, timedelta
7 | from pathlib import Path
8 | from typing import Any, Dict, Iterator, List, Optional
9 | from uuid import UUID
10 |
11 | from mcp.server import FastMCP
12 |
13 | from mcp_log_analyzer.core.config import Settings
14 | from mcp_log_analyzer.core.models import AnalysisResult, LogRecord, LogSource
15 | from mcp_log_analyzer.parsers.base import BaseParser
16 |
17 | # Initialize settings
18 | settings = Settings()
19 |
20 | # Initialize MCP server with custom error handler
21 | import functools
22 | import json
23 |
24 | # Track request/response for debugging
25 | request_response_log = []
26 |
27 | # Custom error handler to log details
28 | def log_mcp_errors(func):
29 | """Decorator to log MCP errors with full request details."""
30 | @functools.wraps(func)
31 | async def wrapper(*args, **kwargs):
32 | try:
33 | return await func(*args, **kwargs)
34 | except Exception as e:
35 | logger.error(f"Error in {func.__name__}: {e}")
36 | logger.error(f"Error type: {type(e).__name__}")
37 | logger.error(f"Function: {func.__name__}")
38 | logger.error(f"Args: {args}")
39 | logger.error(f"Kwargs: {kwargs}")
40 | raise
41 | return wrapper
42 |
43 | # Tool wrapper to log all tool calls
44 | def debug_tool(tool_func):
45 | """Wrapper to debug tool function calls."""
46 | @functools.wraps(tool_func)
47 | async def wrapper(*args, **kwargs):
48 | logger.info(f"Tool '{tool_func.__name__}' called")
49 | logger.info(f" Args: {args}")
50 | logger.info(f" Kwargs: {kwargs}")
51 |
52 | try:
53 | result = await tool_func(*args, **kwargs)
54 | logger.info(f"Tool '{tool_func.__name__}' returned successfully")
55 | return result
56 | except Exception as e:
57 | logger.error(f"Tool '{tool_func.__name__}' failed: {e}")
58 | logger.error(f" Exception type: {type(e).__name__}")
59 | logger.error(f" Exception args: {e.args}")
60 | raise
61 |
62 | return wrapper
63 |
64 | # For now, we'll use the standard FastMCP and add logging via decorators
65 | # The "Invalid request parameters" error is likely coming from the MCP protocol layer
66 | # when tool arguments don't match expected signatures
67 |
68 | # Initialize MCP server
69 | mcp = FastMCP(
70 | name="mcp-log-analyzer",
71 | version="0.1.0",
72 | dependencies=["pandas>=1.3.0", "psutil>=5.9.0"],
73 | )
74 |
75 | # Storage with persistence
76 | from mcp_log_analyzer.core.state_manager import get_state_manager
77 |
78 | state_manager = get_state_manager()
79 | _log_sources: Optional[Dict[str, LogSource]] = None # Lazy loaded
80 | parsers: Dict[str, BaseParser] = {}
81 |
82 | # Log loaded sources
83 | import logging
84 | logger = logging.getLogger(__name__)
85 |
86 | # Lazy loading wrapper for log sources
87 | def get_log_sources() -> Dict[str, LogSource]:
88 | """Get log sources with lazy loading."""
89 | global _log_sources
90 | if _log_sources is None:
91 | logger.info("Lazy loading persisted log sources...")
92 | _log_sources = state_manager.load_log_sources()
93 | logger.info(f"Loaded {len(_log_sources)} persisted log sources")
94 | return _log_sources
95 |
96 | # Create a proxy object that acts like a dict but lazy loads
97 | class LazyLogSources:
98 | """Proxy for log sources dictionary with lazy loading."""
99 |
100 | def __getitem__(self, key):
101 | return get_log_sources()[key]
102 |
103 | def __setitem__(self, key, value):
104 | sources = get_log_sources()
105 | sources[key] = value
106 | # Also update the global reference
107 | global _log_sources
108 | _log_sources = sources
109 |
110 | def __delitem__(self, key):
111 | sources = get_log_sources()
112 | del sources[key]
113 | # Also update the global reference
114 | global _log_sources
115 | _log_sources = sources
116 |
117 | def __contains__(self, key):
118 | return key in get_log_sources()
119 |
120 | def __len__(self):
121 | return len(get_log_sources())
122 |
123 | def __iter__(self):
124 | return iter(get_log_sources())
125 |
126 | def values(self):
127 | return get_log_sources().values()
128 |
129 | def keys(self):
130 | return get_log_sources().keys()
131 |
132 | def items(self):
133 | return get_log_sources().items()
134 |
135 | def get(self, key, default=None):
136 | return get_log_sources().get(key, default)
137 |
138 | def clear(self):
139 | get_log_sources().clear()
140 |
141 | # Use the lazy loader
142 | log_sources = LazyLogSources()
143 |
144 |
145 | # Add a mock parser for testing on non-Windows systems
146 | class MockParser(BaseParser):
147 | """Mock parser for testing."""
148 |
149 | def parse_file(self, source: LogSource, file_path: Path) -> Iterator[LogRecord]:
150 | yield LogRecord(source_id=source.id, data={"test": "data"})
151 |
152 | def parse_content(self, source: LogSource, content: str) -> Iterator[LogRecord]:
153 | yield LogRecord(source_id=source.id, data={"test": "data"})
154 |
155 | def parse(self, path: str, **kwargs) -> List[LogRecord]:
156 | return [
157 | LogRecord(
158 | source_id=UUID("00000000-0000-0000-0000-000000000000"),
159 | data={"test": "data"},
160 | )
161 | ]
162 |
163 | def analyze(
164 | self, logs: List[LogRecord], analysis_type: str = "summary"
165 | ) -> AnalysisResult:
166 | return AnalysisResult(analysis_type=analysis_type, summary={"total": len(logs)})
167 |
168 |
169 | # Only initialize Windows Event Log parser on Windows
170 | import platform
171 |
172 | if platform.system() == "Windows":
173 | try:
174 | from mcp_log_analyzer.parsers.evt_parser import EventLogParser
175 |
176 | parsers["evt"] = EventLogParser()
177 | except ImportError:
178 | # pywin32 not available, use mock parser
179 | parsers["evt"] = MockParser()
180 | else:
181 | # Use mock parser on non-Windows systems
182 | parsers["evt"] = MockParser()
183 |
184 | # Add real CSV parser and mock parsers for others
185 | try:
186 | from mcp_log_analyzer.parsers.csv_parser import CsvLogParser
187 |
188 | parsers["csv"] = CsvLogParser()
189 | except ImportError:
190 | parsers["csv"] = MockParser()
191 |
192 | # Add ETL parser for Windows Event Trace Logs
193 | try:
194 | from mcp_log_analyzer.parsers.etl_parser import EtlParser
195 |
196 | etl_parser = EtlParser()
197 | if etl_parser.is_available():
198 | parsers["etl"] = etl_parser
199 | else:
200 | parsers["etl"] = MockParser()
201 | except ImportError:
202 | parsers["etl"] = MockParser()
203 |
204 | # Add mock parsers for other types not yet implemented
205 | parsers["json"] = MockParser()
206 | parsers["xml"] = MockParser()
207 | parsers["text"] = MockParser()
208 |
209 | # Add alias for backward compatibility
210 | # Allow "event" to map to "evt" for users who were using the old name
211 | parser_aliases = {
212 | "event": "evt" # Map old name to new name
213 | }
214 |
215 |
216 | # Utility Functions
217 | def parse_time_param(time_str: str) -> Optional[datetime]:
218 | """Parse time parameter in various formats."""
219 | if not time_str or time_str == "none":
220 | return None
221 |
222 | # Try parsing as relative time (e.g., "30m", "1h", "2d")
223 | relative_pattern = r"^(\d+)([mhd])$"
224 | match = re.match(relative_pattern, time_str)
225 | if match:
226 | value, unit = match.groups()
227 | value = int(value)
228 | if unit == "m":
229 | return datetime.now() - timedelta(minutes=value)
230 | elif unit == "h":
231 | return datetime.now() - timedelta(hours=value)
232 | elif unit == "d":
233 | return datetime.now() - timedelta(days=value)
234 |
235 | # Try parsing as absolute datetime
236 | datetime_formats = [
237 | "%Y-%m-%d %H:%M:%S",
238 | "%Y-%m-%d %H:%M",
239 | "%Y-%m-%d",
240 | "%d/%m/%Y %H:%M:%S",
241 | "%d/%m/%Y %H:%M",
242 | "%d/%m/%Y",
243 | ]
244 |
245 | for fmt in datetime_formats:
246 | try:
247 | return datetime.strptime(time_str, fmt)
248 | except ValueError:
249 | continue
250 |
251 | raise ValueError(f"Cannot parse time: {time_str}")
252 |
253 |
254 | # Prompts
255 | @mcp.prompt()
256 | async def log_analysis_quickstart() -> str:
257 | """
258 | A guide to get started with log analysis.
259 |
260 | This prompt provides step-by-step instructions for
261 | beginning log analysis with the MCP Log Analyzer.
262 | """
263 | return """Welcome to MCP Log Analyzer! Here's how to get started:
264 |
265 | 1. **Register a Log Source**
266 | First, register the log file or directory you want to analyze:
267 | - Use the `register_log_source` tool
268 | - Specify a unique name, source type, and path
269 | - Example: Register Windows System logs as "system-logs"
270 |
271 | 2. **Query Logs**
272 | Retrieve logs from your registered source:
273 | - Use the `query_logs` tool
274 | - Apply filters, time ranges, and pagination
275 | - Start with a small limit to preview the data
276 |
277 | 3. **Analyze Logs**
278 | Perform deeper analysis on your logs:
279 | - Use the `analyze_logs` tool
280 | - Choose from summary, pattern, or anomaly analysis
281 | - Review the results to gain insights
282 |
283 | 4. **Test System Resources**
284 | Use diagnostic tools to test system health:
285 | - `test_system_resources_access` - Check system monitoring capabilities
286 | - `test_windows_event_log_access` - Test Windows Event Log access
287 | - `test_linux_log_access` - Test Linux log file access
288 | - `test_network_tools_availability` - Check network diagnostic tools
289 |
290 | 5. **Explore Resources**
291 | Check available resources for more information:
292 | - logs://sources - View registered sources
293 | - logs://types - See supported log formats
294 | - logs://analysis-types - Learn about analysis options
295 |
296 | Need help with a specific log type? Just ask!"""
297 |
298 |
299 | @mcp.prompt()
300 | async def troubleshooting_guide() -> str:
301 | """
302 | A guide for troubleshooting common log analysis issues.
303 |
304 | This prompt helps users resolve common problems when
305 | working with log files.
306 | """
307 | return """Log Analysis Troubleshooting Guide:
308 |
309 | **Common Issues and Solutions:**
310 |
311 | 1. **"Access Denied" Errors**
312 | - Ensure you have read permissions for the log files
313 | - For Windows Event Logs, run with appropriate privileges
314 | - Check file paths are correct and accessible
315 | - Use `test_windows_event_log_access` or `test_linux_log_access` tools
316 |
317 | 2. **"Parser Not Found" Errors**
318 | - Verify the source_type matches supported types
319 | - Use logs://types resource to see available parsers
320 | - Ensure the log format matches the selected parser
321 |
322 | 3. **Empty Results**
323 | - Check your filters aren't too restrictive
324 | - Verify the time range includes log entries
325 | - Ensure the log file isn't empty or corrupted
326 |
327 | 4. **Performance Issues**
328 | - Use pagination (limit/offset) for large log files
329 | - Apply filters to reduce data volume
330 | - Consider analyzing smaller time ranges
331 | - Use `analyze_system_performance` tool to check system resources
332 |
333 | 5. **Network Issues**
334 | - Use `test_network_connectivity` to check internet access
335 | - Use `test_port_connectivity` to check specific ports
336 | - Use `diagnose_network_issues` for comprehensive network diagnosis
337 |
338 | 6. **System Health**
339 | - Use `get_system_health_summary` for overall system status
340 | - Use `monitor_process_health` to check specific processes
341 | - Use `find_resource_intensive_processes` to identify performance issues
342 |
343 | Still having issues? Provide the error message and log source details for specific help."""
344 |
345 |
346 | @mcp.prompt()
347 | async def windows_event_log_guide() -> str:
348 | """
349 | A comprehensive guide for analyzing Windows Event Logs.
350 |
351 | This prompt provides detailed information about working
352 | with Windows Event Logs specifically.
353 | """
354 | return """Windows Event Log Analysis Guide:
355 |
356 | **Getting Started with Windows Event Logs:**
357 |
358 | 1. **Common Log Types**
359 | - System: Hardware, drivers, system components
360 | - Application: Software events and errors
361 | - Security: Audit and security events
362 | - Setup: Installation and update logs
363 |
364 | 2. **Testing Access**
365 | Use `test_windows_event_log_access` tool to check if you can access Event Logs
366 | Use `get_windows_system_health` tool for a quick health overview
367 |
368 | 3. **Registering Event Logs**
369 | ```
370 | register_log_source(
371 | name="system-events",
372 | source_type="evt",
373 | path="System" # Use log name, not file path
374 | )
375 | ```
376 |
377 | 4. **Diagnostic Tools**
378 | - `get_windows_event_log_info` - Get detailed info about specific logs
379 | - `query_windows_events_by_criteria` - Filter events by ID, level, or time
380 | - `get_windows_system_health` - Overall system health from Event Logs
381 |
382 | 5. **Useful Filters**
383 | - Event ID: Filter specific event types
384 | - Level: Error, Warning, Information
385 | - Source: Filter by event source
386 | - Time range: Focus on specific periods
387 |
388 | 6. **Common Event IDs**
389 | - 6005/6006: System startup/shutdown
390 | - 7001/7002: User logon/logoff
391 | - 41: Unexpected shutdown
392 | - 1074: System restart/shutdown reason
393 |
394 | 7. **Analysis Tips**
395 | - Start with summary analysis for overview
396 | - Use pattern analysis to find recurring issues
397 | - Apply anomaly detection for unusual events
398 | - Correlate events across different logs
399 |
400 | **Example Workflow:**
401 | 1. Test access with `test_windows_event_log_access`
402 | 2. Get system health with `get_windows_system_health`
403 | 3. Register System and Application logs
404 | 4. Query recent errors and warnings with `query_windows_events_by_criteria`
405 | 5. Analyze patterns in error events
406 |
407 | Need help with specific event IDs or analysis scenarios? Just ask!"""
408 |
409 |
410 | from .prompts import register_all_prompts
411 | from .resources import register_all_resources
412 |
413 | # Register all tools, resources, and prompts
414 | from .tools import register_all_tools
415 |
416 | register_all_tools(mcp)
417 | register_all_resources(mcp)
418 | register_all_prompts(mcp)
419 |
```
--------------------------------------------------------------------------------
/mcp_tcp_client.py:
--------------------------------------------------------------------------------
```python
1 | #!/usr/bin/env python3
2 | """
3 | TCP client for connecting to remote MCP servers.
4 | Bridges stdio to TCP connection with automatic reconnection and heartbeat support.
5 | """
6 |
7 | import asyncio
8 | import sys
9 | import json
10 | import argparse
11 | import logging
12 | import time
13 | from typing import Optional, Tuple, Dict, Any
14 | from collections import deque
15 |
16 | logging.basicConfig(level=logging.WARNING)
17 | logger = logging.getLogger(__name__)
18 |
19 |
20 | class TCPClient:
21 | """TCP client with automatic reconnection and heartbeat support."""
22 |
23 | def __init__(self, host: str, port: int, reconnect_delay: float = 2.0,
24 | max_reconnect_attempts: int = 50, heartbeat_interval: float = 30.0):
25 | self.host = host
26 | self.port = port
27 | self.reconnect_delay = reconnect_delay
28 | self.max_reconnect_attempts = max_reconnect_attempts
29 | self.heartbeat_interval = heartbeat_interval
30 | self.heartbeat_timeout = heartbeat_interval * 2
31 | self.connection_state = {
32 | 'connected': False,
33 | 'last_heartbeat_sent': 0,
34 | 'last_heartbeat_received': 0,
35 | 'server_supports_heartbeat': False,
36 | 'mcp_initialized': False,
37 | 'initialization_in_progress': False,
38 | 'pending_initialize_id': None
39 | }
40 | self.reader: Optional[asyncio.StreamReader] = None
41 | self.writer: Optional[asyncio.StreamWriter] = None
42 | self.reconnect_attempts = 0
43 | self.should_reconnect = True
44 | self.buffered_requests = deque()
45 |
46 | async def connect(self) -> Tuple[asyncio.StreamReader, asyncio.StreamWriter]:
47 | """Connect to TCP server with retry logic."""
48 | while self.reconnect_attempts < self.max_reconnect_attempts:
49 | try:
50 | logger.info(f"Connecting to {self.host}:{self.port} (attempt {self.reconnect_attempts + 1}/{self.max_reconnect_attempts})")
51 | reader, writer = await asyncio.open_connection(self.host, self.port)
52 | logger.info(f"Connected to {self.host}:{self.port}")
53 |
54 | self.reader = reader
55 | self.writer = writer
56 | self.connection_state['connected'] = True
57 | self.connection_state['mcp_initialized'] = False
58 | self.connection_state['initialization_in_progress'] = False
59 | self.connection_state['pending_initialize_id'] = None
60 | self.buffered_requests.clear()
61 | self.reconnect_attempts = 0
62 |
63 | # Send handshake to indicate we support heartbeat
64 | await self.send_handshake()
65 |
66 | return reader, writer
67 |
68 | except Exception as e:
69 | self.reconnect_attempts += 1
70 | logger.error(f"Connection failed: {e}")
71 |
72 | if self.reconnect_attempts < self.max_reconnect_attempts:
73 | logger.info(f"Retrying in {self.reconnect_delay} seconds...")
74 | await asyncio.sleep(self.reconnect_delay)
75 | else:
76 | logger.error("Max reconnection attempts reached")
77 | raise
78 |
79 | raise Exception("Failed to connect after maximum attempts")
80 |
81 | async def send_handshake(self):
82 | """Send handshake message to indicate heartbeat support."""
83 | if self.writer:
84 | handshake = {
85 | 'type': 'handshake',
86 | 'client_version': '1.0',
87 | 'supports_heartbeat': True
88 | }
89 | message_str = json.dumps(handshake) + '\n'
90 | self.writer.write(message_str.encode('utf-8'))
91 | await self.writer.drain()
92 |
93 | async def send_heartbeat(self):
94 | """Send heartbeat message."""
95 | if self.writer and not self.writer.is_closing():
96 | heartbeat = {
97 | 'type': 'heartbeat',
98 | 'timestamp': time.time()
99 | }
100 | message_str = json.dumps(heartbeat) + '\n'
101 | self.writer.write(message_str.encode('utf-8'))
102 | await self.writer.drain()
103 | self.connection_state['last_heartbeat_sent'] = time.time()
104 |
105 | async def handle_heartbeat_response(self, message: dict):
106 | """Handle heartbeat response from server."""
107 | self.connection_state['last_heartbeat_received'] = time.time()
108 | self.connection_state['server_supports_heartbeat'] = True
109 |
110 | if not message.get('mcp_alive', True):
111 | logger.warning("Server reports MCP process is not alive")
112 |
113 | async def disconnect(self):
114 | """Disconnect from server."""
115 | self.connection_state['connected'] = False
116 | if self.writer:
117 | self.writer.close()
118 | await self.writer.wait_closed()
119 |
120 | async def run(self):
121 | """Main client loop with automatic reconnection."""
122 | while self.should_reconnect:
123 | try:
124 | # Connect to server
125 | reader, writer = await self.connect()
126 |
127 | # Create tasks for communication and heartbeat
128 | stdin_to_tcp = asyncio.create_task(self.forward_stdin_to_tcp())
129 | tcp_to_stdout = asyncio.create_task(self.forward_tcp_to_stdout())
130 | heartbeat_task = asyncio.create_task(self.heartbeat_loop())
131 |
132 | # Wait for any task to complete
133 | done, pending = await asyncio.wait(
134 | [stdin_to_tcp, tcp_to_stdout, heartbeat_task],
135 | return_when=asyncio.FIRST_COMPLETED
136 | )
137 |
138 | # Cancel remaining tasks
139 | for task in pending:
140 | task.cancel()
141 |
142 | # Wait for cancellation
143 | await asyncio.gather(*pending, return_exceptions=True)
144 |
145 | # Disconnect
146 | await self.disconnect()
147 |
148 | # Check if we should reconnect
149 | if self.should_reconnect and self.reconnect_attempts < self.max_reconnect_attempts:
150 | logger.info(f"Connection lost, reconnecting in {self.reconnect_delay} seconds...")
151 | await asyncio.sleep(self.reconnect_delay)
152 | else:
153 | break
154 |
155 | except KeyboardInterrupt:
156 | logger.info("Interrupted by user")
157 | self.should_reconnect = False
158 | break
159 | except Exception as e:
160 | logger.error(f"Unexpected error: {e}")
161 | if self.should_reconnect:
162 | await asyncio.sleep(self.reconnect_delay)
163 | else:
164 | break
165 |
166 | async def forward_stdin_to_tcp(self):
167 | """Forward stdin to TCP connection with MCP initialization tracking."""
168 | loop = asyncio.get_event_loop()
169 | stdin = sys.stdin
170 |
171 | while self.connection_state['connected']:
172 | try:
173 | # Read line from stdin (blocking in executor)
174 | line = await loop.run_in_executor(None, stdin.readline)
175 | if not line:
176 | logger.info("Stdin closed")
177 | self.should_reconnect = False
178 | break
179 |
180 | # Parse and check if it's an MCP message
181 | try:
182 | message = json.loads(line.strip())
183 |
184 | # Handle initialize request
185 | if message.get('method') == 'initialize':
186 | logger.debug("Detected initialize request")
187 | self.connection_state['initialization_in_progress'] = True
188 | self.connection_state['pending_initialize_id'] = message.get('id')
189 |
190 | # Handle initialized notification
191 | elif message.get('method') == 'notifications/initialized':
192 | logger.debug("Detected initialized notification")
193 | self.connection_state['mcp_initialized'] = True
194 | self.connection_state['initialization_in_progress'] = False
195 |
196 | # Process any buffered requests after initialization
197 | while self.buffered_requests:
198 | buffered_line = self.buffered_requests.popleft()
199 | if self.writer and not self.writer.is_closing():
200 | self.writer.write(buffered_line.encode('utf-8'))
201 | await self.writer.drain()
202 | await asyncio.sleep(0.01)
203 |
204 | # Buffer non-init requests if not initialized
205 | elif (not self.connection_state['mcp_initialized'] and
206 | 'method' in message and
207 | message['method'] not in ['initialize', 'notifications/initialized']):
208 | logger.warning(f"Buffering request '{message.get('method')}' until initialization complete")
209 | self.buffered_requests.append(line)
210 |
211 | # Send temporary error response
212 | if 'id' in message:
213 | error_response = {
214 | 'jsonrpc': '2.0',
215 | 'id': message['id'],
216 | 'error': {
217 | 'code': -32002,
218 | 'message': 'Server initialization pending',
219 | 'data': 'Waiting for MCP initialization to complete'
220 | }
221 | }
222 | sys.stdout.write(json.dumps(error_response) + '\n')
223 | sys.stdout.flush()
224 | continue
225 |
226 | except json.JSONDecodeError:
227 | # If not JSON, still forward it
228 | pass
229 |
230 | # Send to TCP
231 | if self.writer and not self.writer.is_closing():
232 | self.writer.write(line.encode('utf-8'))
233 | await self.writer.drain()
234 | else:
235 | logger.warning("Connection closed, unable to send")
236 | break
237 |
238 | except Exception as e:
239 | logger.error(f"Error forwarding stdin: {e}")
240 | break
241 |
242 | async def forward_tcp_to_stdout(self):
243 | """Forward TCP to stdout, handling heartbeats and tracking initialization."""
244 | while self.connection_state['connected']:
245 | try:
246 | # Read from TCP
247 | if not self.reader:
248 | break
249 |
250 | line = await self.reader.readline()
251 | if not line:
252 | logger.info("TCP connection closed")
253 | break
254 |
255 | # Try to parse as JSON to check for heartbeat or initialization response
256 | try:
257 | message_str = line.decode('utf-8').strip()
258 | message = json.loads(message_str)
259 |
260 | # Handle heartbeat responses
261 | if message.get('type') == 'heartbeat_response':
262 | await self.handle_heartbeat_response(message)
263 | # Don't forward heartbeats to stdout
264 | continue
265 |
266 | # Check if this is a response to initialize request
267 | if (self.connection_state['initialization_in_progress'] and
268 | 'id' in message and
269 | message['id'] == self.connection_state['pending_initialize_id']):
270 | logger.debug("Received initialize response")
271 | # Initialization response received, waiting for initialized notification
272 |
273 | except json.JSONDecodeError:
274 | # Not JSON, just forward as-is
275 | pass
276 |
277 | # Write to stdout
278 | sys.stdout.write(line.decode('utf-8'))
279 | sys.stdout.flush()
280 |
281 | except Exception as e:
282 | logger.error(f"Error forwarding TCP: {e}")
283 | break
284 |
285 | async def heartbeat_loop(self):
286 | """Send periodic heartbeats to server."""
287 | await asyncio.sleep(5) # Initial delay
288 |
289 | while self.connection_state['connected']:
290 | try:
291 | # Send heartbeat
292 | await self.send_heartbeat()
293 |
294 | # Wait for next interval
295 | await asyncio.sleep(self.heartbeat_interval)
296 |
297 | # Check if server supports heartbeat and we haven't received one recently
298 | if self.connection_state['server_supports_heartbeat']:
299 | time_since_last = time.time() - self.connection_state['last_heartbeat_received']
300 | if time_since_last > self.heartbeat_timeout:
301 | logger.warning(f"Server heartbeat timeout ({time_since_last:.1f}s since last response)")
302 | # Connection might be dead, let TCP detect it
303 |
304 | except asyncio.CancelledError:
305 | break
306 | except Exception as e:
307 | logger.error(f"Error in heartbeat loop: {e}")
308 | await asyncio.sleep(self.heartbeat_interval)
309 |
310 |
311 | async def bridge_stdio_to_tcp(host: str, port: int, **kwargs):
312 | """Bridge stdio to TCP connection with automatic reconnection."""
313 | client = TCPClient(host, port, **kwargs)
314 | await client.run()
315 |
316 |
317 |
318 |
319 | def main():
320 | parser = argparse.ArgumentParser(description='TCP client for MCP servers with reconnection support')
321 | parser.add_argument('host', help='Remote host to connect to')
322 | parser.add_argument('port', type=int, help='Remote port to connect to')
323 | parser.add_argument('--reconnect-delay', type=float, default=2.0,
324 | help='Delay between reconnection attempts in seconds (default: 2)')
325 | parser.add_argument('--max-reconnect-attempts', type=int, default=50,
326 | help='Maximum number of reconnection attempts (default: 50)')
327 | parser.add_argument('--heartbeat-interval', type=float, default=30.0,
328 | help='Heartbeat interval in seconds (default: 30)')
329 | parser.add_argument('--no-reconnect', action='store_true',
330 | help='Disable automatic reconnection')
331 | parser.add_argument('--debug', action='store_true', help='Enable debug logging')
332 |
333 | args = parser.parse_args()
334 |
335 | if args.debug:
336 | logging.getLogger().setLevel(logging.DEBUG)
337 |
338 | # Set up client parameters
339 | client_kwargs = {
340 | 'reconnect_delay': args.reconnect_delay,
341 | 'max_reconnect_attempts': 1 if args.no_reconnect else args.max_reconnect_attempts,
342 | 'heartbeat_interval': args.heartbeat_interval
343 | }
344 |
345 | asyncio.run(bridge_stdio_to_tcp(args.host, args.port, **client_kwargs))
346 |
347 |
348 | if __name__ == "__main__":
349 | main()
```
--------------------------------------------------------------------------------
/src/mcp_log_analyzer/mcp_server/resources/network_resources.py:
--------------------------------------------------------------------------------
```python
1 | """
2 | Network monitoring MCP resources (netstat functionality).
3 | """
4 |
5 | import platform
6 | import subprocess
7 |
8 | from mcp.server import FastMCP
9 |
10 |
11 | def register_network_resources(mcp: FastMCP):
12 | """Register all network-related resources with the MCP server."""
13 |
14 | @mcp.resource("system://netstat")
15 | async def get_netstat() -> str:
16 | """
17 | Get network connections and statistics with default options.
18 |
19 | Use parameterized versions for specific troubleshooting:
20 | - system://netstat/listening - Show only listening ports
21 | - system://netstat/established - Show only established connections
22 | - system://netstat/all - Show all connections with process info
23 | - system://netstat/stats - Show network statistics
24 | - system://netstat/routing - Show routing table
25 | - system://netstat/port/80 - Show connections on specific port
26 | """
27 | # Default to listening ports for quick overview
28 | return await get_netstat_listening()
29 |
30 | @mcp.resource("system://netstat/listening")
31 | async def get_netstat_listening() -> str:
32 | """
33 | Show all listening ports and services.
34 |
35 | Useful for checking which services are running and what ports are open.
36 | """
37 | result = []
38 | result.append("=== Listening Ports ===\n")
39 |
40 | try:
41 | if platform.system() == "Windows":
42 | # Windows netstat command
43 | cmd_output = subprocess.run(
44 | ["netstat", "-an", "-p", "TCP"],
45 | capture_output=True,
46 | text=True,
47 | timeout=10,
48 | )
49 | if cmd_output.returncode == 0:
50 | lines = cmd_output.stdout.split("\n")
51 | result.append("Protocol Local Address State")
52 | result.append("-" * 50)
53 | for line in lines:
54 | if "LISTENING" in line:
55 | result.append(line.strip())
56 |
57 | # Also show UDP listening
58 | cmd_output = subprocess.run(
59 | ["netstat", "-an", "-p", "UDP"],
60 | capture_output=True,
61 | text=True,
62 | timeout=10,
63 | )
64 | if cmd_output.returncode == 0:
65 | result.append("\nUDP Listening:")
66 | result.append("Protocol Local Address")
67 | result.append("-" * 30)
68 | lines = cmd_output.stdout.split("\n")
69 | for line in lines:
70 | if "UDP" in line and "*:*" in line:
71 | result.append(line.strip())
72 |
73 | else:
74 | # Linux - try ss first (modern), then netstat (legacy)
75 | try:
76 | cmd_output = subprocess.run(
77 | ["ss", "-tlnp"], capture_output=True, text=True, timeout=10
78 | )
79 | if cmd_output.returncode == 0:
80 | result.append("TCP Listening Ports (ss):")
81 | result.append(cmd_output.stdout)
82 |
83 | # UDP listening
84 | cmd_output = subprocess.run(
85 | ["ss", "-ulnp"], capture_output=True, text=True, timeout=10
86 | )
87 | if cmd_output.returncode == 0:
88 | result.append("\nUDP Listening Ports (ss):")
89 | result.append(cmd_output.stdout)
90 |
91 | except FileNotFoundError:
92 | # Fall back to netstat if ss not available
93 | cmd_output = subprocess.run(
94 | ["netstat", "-tlnp"], capture_output=True, text=True, timeout=10
95 | )
96 | if cmd_output.returncode == 0:
97 | result.append("TCP Listening Ports (netstat):")
98 | result.append(cmd_output.stdout)
99 |
100 | cmd_output = subprocess.run(
101 | ["netstat", "-ulnp"], capture_output=True, text=True, timeout=10
102 | )
103 | if cmd_output.returncode == 0:
104 | result.append("\nUDP Listening Ports (netstat):")
105 | result.append(cmd_output.stdout)
106 |
107 | except Exception as e:
108 | result.append(f"Error getting listening ports: {str(e)}")
109 |
110 | return "\n".join(result)
111 |
112 | @mcp.resource("system://netstat/established")
113 | async def get_netstat_established() -> str:
114 | """
115 | Show all established network connections.
116 |
117 | Useful for seeing active connections and identifying communication patterns.
118 | """
119 | result = []
120 | result.append("=== Established Connections ===\n")
121 |
122 | try:
123 | if platform.system() == "Windows":
124 | cmd_output = subprocess.run(
125 | ["netstat", "-an", "-p", "TCP"],
126 | capture_output=True,
127 | text=True,
128 | timeout=10,
129 | )
130 | if cmd_output.returncode == 0:
131 | lines = cmd_output.stdout.split("\n")
132 | result.append(
133 | "Protocol Local Address Foreign Address State"
134 | )
135 | result.append("-" * 70)
136 | for line in lines:
137 | if "ESTABLISHED" in line:
138 | result.append(line.strip())
139 | else:
140 | # Linux
141 | try:
142 | cmd_output = subprocess.run(
143 | ["ss", "-tnp", "state", "established"],
144 | capture_output=True,
145 | text=True,
146 | timeout=10,
147 | )
148 | if cmd_output.returncode == 0:
149 | result.append("Established TCP Connections (ss):")
150 | result.append(cmd_output.stdout)
151 | except FileNotFoundError:
152 | cmd_output = subprocess.run(
153 | ["netstat", "-tnp"], capture_output=True, text=True, timeout=10
154 | )
155 | if cmd_output.returncode == 0:
156 | lines = cmd_output.stdout.split("\n")
157 | result.append("Established TCP Connections (netstat):")
158 | result.append(
159 | "Proto Local Address Foreign Address State PID/Program"
160 | )
161 | result.append("-" * 90)
162 | for line in lines:
163 | if "ESTABLISHED" in line:
164 | result.append(line.strip())
165 |
166 | except Exception as e:
167 | result.append(f"Error getting established connections: {str(e)}")
168 |
169 | return "\n".join(result)
170 |
171 | @mcp.resource("system://netstat/all")
172 | async def get_netstat_all() -> str:
173 | """
174 | Show all network connections with process information.
175 |
176 | Comprehensive view including listening, established, and other states.
177 | """
178 | result = []
179 | result.append("=== All Network Connections ===\n")
180 |
181 | try:
182 | if platform.system() == "Windows":
183 | cmd_output = subprocess.run(
184 | ["netstat", "-ano"], capture_output=True, text=True, timeout=15
185 | )
186 | if cmd_output.returncode == 0:
187 | result.append("All TCP/UDP Connections:")
188 | result.append(cmd_output.stdout)
189 | else:
190 | try:
191 | # Use ss for comprehensive output
192 | cmd_output = subprocess.run(
193 | ["ss", "-tulanp"], capture_output=True, text=True, timeout=15
194 | )
195 | if cmd_output.returncode == 0:
196 | result.append("All Network Connections (ss):")
197 | result.append(cmd_output.stdout)
198 | except FileNotFoundError:
199 | cmd_output = subprocess.run(
200 | ["netstat", "-tulanp"],
201 | capture_output=True,
202 | text=True,
203 | timeout=15,
204 | )
205 | if cmd_output.returncode == 0:
206 | result.append("All Network Connections (netstat):")
207 | result.append(cmd_output.stdout)
208 |
209 | except Exception as e:
210 | result.append(f"Error getting all connections: {str(e)}")
211 |
212 | return "\n".join(result)
213 |
214 | @mcp.resource("system://netstat/stats")
215 | async def get_netstat_stats() -> str:
216 | """
217 | Show network interface statistics and protocol statistics.
218 |
219 | Useful for identifying network performance issues and packet loss.
220 | """
221 | result = []
222 | result.append("=== Network Statistics ===\n")
223 |
224 | try:
225 | if platform.system() == "Windows":
226 | # Network statistics
227 | cmd_output = subprocess.run(
228 | ["netstat", "-s"], capture_output=True, text=True, timeout=10
229 | )
230 | if cmd_output.returncode == 0:
231 | result.append("Protocol Statistics:")
232 | result.append(cmd_output.stdout)
233 |
234 | # Interface statistics
235 | cmd_output = subprocess.run(
236 | ["netstat", "-e"], capture_output=True, text=True, timeout=10
237 | )
238 | if cmd_output.returncode == 0:
239 | result.append("\nInterface Statistics:")
240 | result.append(cmd_output.stdout)
241 | else:
242 | # Linux interface statistics
243 | try:
244 | cmd_output = subprocess.run(
245 | ["ss", "-i"], capture_output=True, text=True, timeout=10
246 | )
247 | if cmd_output.returncode == 0:
248 | result.append("Interface Statistics (ss):")
249 | result.append(cmd_output.stdout)
250 | except FileNotFoundError:
251 | cmd_output = subprocess.run(
252 | ["netstat", "-i"], capture_output=True, text=True, timeout=10
253 | )
254 | if cmd_output.returncode == 0:
255 | result.append("Interface Statistics (netstat):")
256 | result.append(cmd_output.stdout)
257 |
258 | # Protocol statistics if available
259 | try:
260 | cmd_output = subprocess.run(
261 | ["netstat", "-s"], capture_output=True, text=True, timeout=10
262 | )
263 | if cmd_output.returncode == 0:
264 | result.append("\nProtocol Statistics:")
265 | result.append(cmd_output.stdout)
266 | except:
267 | pass
268 |
269 | except Exception as e:
270 | result.append(f"Error getting network statistics: {str(e)}")
271 |
272 | return "\n".join(result)
273 |
274 | @mcp.resource("system://netstat/routing")
275 | async def get_netstat_routing() -> str:
276 | """
277 | Show routing table information.
278 |
279 | Useful for diagnosing routing issues and network connectivity problems.
280 | """
281 | result = []
282 | result.append("=== Routing Table ===\n")
283 |
284 | try:
285 | if platform.system() == "Windows":
286 | cmd_output = subprocess.run(
287 | ["netstat", "-r"], capture_output=True, text=True, timeout=10
288 | )
289 | if cmd_output.returncode == 0:
290 | result.append("IPv4 Routing Table:")
291 | result.append(cmd_output.stdout)
292 | else:
293 | # Linux routing table
294 | try:
295 | cmd_output = subprocess.run(
296 | ["ip", "route", "show"],
297 | capture_output=True,
298 | text=True,
299 | timeout=10,
300 | )
301 | if cmd_output.returncode == 0:
302 | result.append("Routing Table (ip route):")
303 | result.append(cmd_output.stdout)
304 | except FileNotFoundError:
305 | cmd_output = subprocess.run(
306 | ["netstat", "-r"], capture_output=True, text=True, timeout=10
307 | )
308 | if cmd_output.returncode == 0:
309 | result.append("Routing Table (netstat):")
310 | result.append(cmd_output.stdout)
311 |
312 | except Exception as e:
313 | result.append(f"Error getting routing table: {str(e)}")
314 |
315 | return "\n".join(result)
316 |
317 | @mcp.resource("system://netstat/port/{port}")
318 | async def get_netstat_port(port: str) -> str:
319 | """
320 | Show connections on a specific port.
321 |
322 | Args:
323 | port: Port number to check (e.g., "80", "443", "22")
324 |
325 | Useful for checking if a service is running on a specific port.
326 | """
327 | try:
328 | port_num = int(port)
329 | except ValueError:
330 | return f"Invalid port number: {port}"
331 |
332 | result = []
333 | result.append(f"=== Connections on Port {port} ===\n")
334 |
335 | try:
336 | if platform.system() == "Windows":
337 | cmd_output = subprocess.run(
338 | ["netstat", "-ano"], capture_output=True, text=True, timeout=10
339 | )
340 | if cmd_output.returncode == 0:
341 | lines = cmd_output.stdout.split("\n")
342 | result.append(
343 | "Protocol Local Address Foreign Address State PID"
344 | )
345 | result.append("-" * 80)
346 | for line in lines:
347 | if f":{port}" in line:
348 | result.append(line.strip())
349 | else:
350 | # Linux
351 | try:
352 | cmd_output = subprocess.run(
353 | ["ss", "-tulanp", f"sport = :{port}"],
354 | capture_output=True,
355 | text=True,
356 | timeout=10,
357 | )
358 | if cmd_output.returncode == 0:
359 | result.append(f"Connections on port {port} (ss):")
360 | result.append(cmd_output.stdout)
361 |
362 | # Also check for connections TO this port
363 | cmd_output = subprocess.run(
364 | ["ss", "-tulanp", f"dport = :{port}"],
365 | capture_output=True,
366 | text=True,
367 | timeout=10,
368 | )
369 | if cmd_output.returncode == 0:
370 | result.append(f"\nConnections TO port {port} (ss):")
371 | result.append(cmd_output.stdout)
372 |
373 | except FileNotFoundError:
374 | cmd_output = subprocess.run(
375 | ["netstat", "-tulanp"],
376 | capture_output=True,
377 | text=True,
378 | timeout=10,
379 | )
380 | if cmd_output.returncode == 0:
381 | lines = cmd_output.stdout.split("\n")
382 | result.append(f"Connections involving port {port} (netstat):")
383 | result.append(
384 | "Proto Local Address Foreign Address State PID/Program"
385 | )
386 | result.append("-" * 90)
387 | for line in lines:
388 | if f":{port}" in line:
389 | result.append(line.strip())
390 |
391 | except Exception as e:
392 | result.append(f"Error checking port {port}: {str(e)}")
393 |
394 | return "\n".join(result)
395 |
```
--------------------------------------------------------------------------------
/src/mcp_log_analyzer/parsers/evt_parser.py:
--------------------------------------------------------------------------------
```python
1 | """Parser for Windows Event Logs."""
2 |
3 | import datetime
4 | import platform
5 | from pathlib import Path
6 | from typing import Any, Dict, Iterator, List, Optional, Union
7 | from uuid import UUID
8 |
9 | from ..core.models import AnalysisResult, LogRecord, LogSource
10 | from .base import BaseParser
11 |
12 | # Only import Windows-specific modules on Windows
13 | if platform.system() == "Windows":
14 | import win32evtlog
15 | import win32evtlogutil
16 | import win32api
17 | import win32con
18 | from win32con import EVENTLOG_BACKWARDS_READ, EVENTLOG_SEQUENTIAL_READ
19 | import pywintypes
20 | import xml.etree.ElementTree as ET
21 | else:
22 | # Mock objects for non-Windows platforms
23 | win32evtlog = None
24 | win32evtlogutil = None
25 | win32api = None
26 | win32con = None
27 | pywintypes = None
28 | ET = None
29 | EVENTLOG_BACKWARDS_READ = None
30 | EVENTLOG_SEQUENTIAL_READ = None
31 |
32 |
33 | class EventLogParser(BaseParser):
34 | """Parser for Windows Event Logs."""
35 |
36 | def __init__(self, config: Optional[Dict[str, Any]] = None):
37 | """Initialize the Windows Event Log parser.
38 |
39 | Args:
40 | config: Optional configuration dictionary
41 | """
42 | super().__init__(config)
43 | if platform.system() != "Windows":
44 | raise RuntimeError("Windows Event Log parser is only available on Windows")
45 |
46 | def parse_file(self, source: LogSource, file_path: Path) -> Iterator[LogRecord]:
47 | """Parse a Windows Event Log file.
48 |
49 | Args:
50 | source: The log source
51 | file_path: Path to the event log file
52 |
53 | Yields:
54 | LogRecord: Parsed log records
55 | """
56 | # Windows Event Logs are typically accessed by name, not file path
57 | # This method would need special handling for .evt/.evtx files
58 | raise NotImplementedError(
59 | "Direct file parsing not implemented for Windows Event Logs"
60 | )
61 |
62 | def parse_content(self, source: LogSource, content: str) -> Iterator[LogRecord]:
63 | """Parse Windows Event Log content.
64 |
65 | Args:
66 | source: The log source
67 | content: The log content (not used for Windows Event Logs)
68 |
69 | Yields:
70 | LogRecord: Parsed log records
71 | """
72 | # Windows Event Logs are binary and accessed via API, not text content
73 | raise NotImplementedError(
74 | "Content parsing not applicable for Windows Event Logs"
75 | )
76 |
77 | def parse(
78 | self,
79 | path: str,
80 | filters: Optional[Dict[str, Any]] = None,
81 | start_time: Optional[datetime.datetime] = None,
82 | end_time: Optional[datetime.datetime] = None,
83 | limit: int = 100,
84 | offset: int = 0,
85 | ) -> List[LogRecord]:
86 | """Parse Windows Event Logs.
87 |
88 | Args:
89 | path: Event log name (e.g., "System", "Application")
90 | filters: Optional filters
91 | start_time: Start time filter
92 | end_time: End time filter
93 | limit: Maximum number of records
94 | offset: Number of records to skip
95 |
96 | Returns:
97 | List of parsed log records
98 | """
99 | if platform.system() != "Windows":
100 | return []
101 |
102 | records = []
103 |
104 | # Handle both standard logs (System, Application) and custom logs
105 | # Custom logs like "Microsoft-Service Fabric/Admin" need the newer API
106 | if "/" in path or "\\" in path:
107 | # This is a custom Application and Services log - use newer EvtQuery API
108 | records = self._parse_custom_event_log(
109 | path, filters, start_time, end_time, limit, offset
110 | )
111 | else:
112 | # Standard log - use legacy API
113 | try:
114 | hand = win32evtlog.OpenEventLog(None, path)
115 | flags = EVENTLOG_BACKWARDS_READ | EVENTLOG_SEQUENTIAL_READ
116 |
117 | # Continue reading until we have enough records
118 | events_read = 0
119 | while len(records) < limit:
120 | events = win32evtlog.ReadEventLog(hand, flags, 0)
121 | if not events:
122 | break # No more events to read
123 |
124 | for event in events:
125 | if events_read >= offset:
126 | record = self._parse_event(event, path)
127 | if self._matches_filters(
128 | record, filters, start_time, end_time
129 | ):
130 | records.append(record)
131 | if len(records) >= limit:
132 | break
133 | events_read += 1
134 |
135 | win32evtlog.CloseEventLog(hand)
136 |
137 | except Exception as e:
138 | # Log error or handle appropriately
139 | print(f"Error parsing Windows Event Log '{path}': {str(e)}")
140 | pass
141 |
142 | return records
143 |
144 | def analyze(
145 | self, logs: List[LogRecord], analysis_type: str = "summary"
146 | ) -> AnalysisResult:
147 | """Analyze Windows Event Logs.
148 |
149 | Args:
150 | logs: List of log records to analyze
151 | analysis_type: Type of analysis to perform
152 |
153 | Returns:
154 | Analysis result
155 | """
156 | if analysis_type == "summary":
157 | return self._summary_analysis(logs)
158 | elif analysis_type == "pattern":
159 | return self._pattern_analysis(logs)
160 | elif analysis_type == "anomaly":
161 | return self._anomaly_analysis(logs)
162 | else:
163 | raise ValueError(f"Unknown analysis type: {analysis_type}")
164 |
165 | def _parse_custom_event_log(
166 | self,
167 | path: str,
168 | filters: Optional[Dict[str, Any]],
169 | start_time: Optional[datetime.datetime],
170 | end_time: Optional[datetime.datetime],
171 | limit: int,
172 | offset: int,
173 | ) -> List[LogRecord]:
174 | """Parse custom Application and Services logs using the newer Windows Event Log API."""
175 | records = []
176 |
177 | try:
178 | # Build query for the custom log
179 | query_flags = (
180 | win32evtlog.EvtQueryChannelPath | win32evtlog.EvtQueryReverseDirection
181 | )
182 |
183 | # Build XPath query if we have filters
184 | xpath_query = "*"
185 | if filters or start_time or end_time:
186 | conditions = []
187 |
188 | if start_time:
189 | # Convert to Windows file time format
190 | start_ms = int(start_time.timestamp() * 1000)
191 | conditions.append(f"TimeCreated[@SystemTime >= '{start_ms}']")
192 |
193 | if end_time:
194 | end_ms = int(end_time.timestamp() * 1000)
195 | conditions.append(f"TimeCreated[@SystemTime <= '{end_ms}']")
196 |
197 | if filters and "EventID" in filters:
198 | conditions.append(f"EventID={filters['EventID']}")
199 |
200 | if conditions:
201 | xpath_query = f"*[System[{' and '.join(conditions)}]]"
202 |
203 | # Query the event log
204 | query_handle = win32evtlog.EvtQuery(path, query_flags, xpath_query)
205 |
206 | # Read events
207 | events_read = 0
208 | while len(records) < limit:
209 | # Get batch of events
210 | events = win32evtlog.EvtNext(query_handle, 10) # Read 10 at a time
211 | if not events:
212 | break
213 |
214 | for event in events:
215 | if events_read >= offset:
216 | # Render event as XML to extract data
217 | xml_content = win32evtlog.EvtRender(
218 | event, win32evtlog.EvtRenderEventXml
219 | )
220 | record = self._parse_event_xml(xml_content, path)
221 |
222 | if self._matches_filters(record, filters, start_time, end_time):
223 | records.append(record)
224 | if len(records) >= limit:
225 | break
226 |
227 | events_read += 1
228 | win32evtlog.EvtClose(event)
229 |
230 | win32evtlog.EvtClose(query_handle)
231 |
232 | except Exception as e:
233 | print(f"Error parsing custom event log '{path}': {str(e)}")
234 | # Fall back to empty list if the API is not available or fails
235 | pass
236 |
237 | return records
238 |
239 | def _parse_event_xml(self, xml_content: str, log_name: str) -> LogRecord:
240 | """Parse event data from XML format."""
241 | try:
242 | root = ET.fromstring(xml_content)
243 |
244 | # Extract system data
245 | system = root.find(".//System")
246 | event_id = (
247 | int(system.find("EventID").text)
248 | if system.find("EventID") is not None
249 | else 0
250 | )
251 |
252 | # Handle unsigned conversion
253 | event_id = event_id & 0xFFFFFFFF
254 |
255 | provider = system.find("Provider")
256 | provider_name = (
257 | provider.get("Name", "Unknown") if provider is not None else "Unknown"
258 | )
259 |
260 | computer = system.find("Computer")
261 | computer_name = computer.text if computer is not None else "Unknown"
262 |
263 | time_created = system.find("TimeCreated")
264 | if time_created is not None:
265 | system_time = time_created.get("SystemTime", "")
266 | # Parse ISO format timestamp
267 | try:
268 | timestamp = datetime.datetime.fromisoformat(
269 | system_time.replace("Z", "+00:00")
270 | )
271 | except:
272 | timestamp = datetime.datetime.now()
273 | else:
274 | timestamp = datetime.datetime.now()
275 |
276 | level = system.find("Level")
277 | event_type = (
278 | int(level.text) if level is not None else 4
279 | ) # Default to Information
280 |
281 | # Map levels to event types (1=Error, 2=Warning, 4=Information)
282 | level_map = {
283 | 1: 2,
284 | 2: 3,
285 | 3: 4,
286 | 4: 4,
287 | 5: 4,
288 | } # Critical=1, Error=2, Warning=3, Info=4, Verbose=5
289 | event_type = level_map.get(event_type, 4)
290 |
291 | # Extract event data
292 | event_data = {}
293 | data_elem = root.find(".//EventData")
294 | if data_elem is not None:
295 | for data in data_elem:
296 | name = data.get("Name", "")
297 | if name:
298 | event_data[name] = data.text or ""
299 |
300 | # Try to get rendered message
301 | message = ""
302 | rendering = root.find(".//RenderingInfo/Message")
303 | if rendering is not None:
304 | message = rendering.text or ""
305 | else:
306 | # Build message from event data
307 | if event_data:
308 | message = "; ".join(f"{k}: {v}" for k, v in event_data.items())
309 |
310 | return LogRecord(
311 | source_id=UUID("00000000-0000-0000-0000-000000000000"),
312 | timestamp=timestamp,
313 | data={
314 | "EventID": event_id,
315 | "EventType": event_type,
316 | "EventCategory": 0, # Not available in new API
317 | "SourceName": provider_name,
318 | "ComputerName": computer_name,
319 | "Message": message,
320 | "EventData": event_data,
321 | "LogName": log_name,
322 | },
323 | )
324 |
325 | except Exception as e:
326 | print(f"Error parsing event XML: {str(e)}")
327 | # Return a basic record on error
328 | return LogRecord(
329 | source_id=UUID("00000000-0000-0000-0000-000000000000"),
330 | timestamp=datetime.datetime.now(),
331 | data={
332 | "EventID": 0,
333 | "EventType": 4,
334 | "EventCategory": 0,
335 | "SourceName": "Unknown",
336 | "ComputerName": "Unknown",
337 | "Message": f"Error parsing event: {str(e)}",
338 | "LogName": log_name,
339 | },
340 | )
341 |
342 | def _parse_event(self, event, log_name: str = None) -> LogRecord:
343 | """Parse a single Windows event."""
344 | try:
345 | message = win32evtlogutil.SafeFormatMessage(event, log_name)
346 | except:
347 | message = "(Unable to format message)"
348 |
349 | return LogRecord(
350 | source_id=UUID("00000000-0000-0000-0000-000000000000"), # Placeholder
351 | timestamp=event.TimeGenerated,
352 | data={
353 | "EventID": event.EventID & 0xFFFFFFFF, # Convert to unsigned
354 | "EventType": event.EventType,
355 | "EventCategory": event.EventCategory,
356 | "SourceName": event.SourceName,
357 | "ComputerName": event.ComputerName,
358 | "Message": message,
359 | },
360 | )
361 |
362 | def _matches_filters(
363 | self,
364 | record: LogRecord,
365 | filters: Optional[Dict[str, Any]],
366 | start_time: Optional[datetime.datetime],
367 | end_time: Optional[datetime.datetime],
368 | ) -> bool:
369 | """Check if a record matches the given filters."""
370 | if start_time and record.timestamp and record.timestamp < start_time:
371 | return False
372 | if end_time and record.timestamp and record.timestamp > end_time:
373 | return False
374 |
375 | if filters:
376 | for key, value in filters.items():
377 | if key in record.data and record.data[key] != value:
378 | return False
379 |
380 | return True
381 |
382 | def _summary_analysis(self, logs: List[LogRecord]) -> AnalysisResult:
383 | """Perform summary analysis."""
384 | event_types = {}
385 | sources = {}
386 |
387 | for log in logs:
388 | event_type = log.data.get("EventType", "Unknown")
389 | event_types[event_type] = event_types.get(event_type, 0) + 1
390 |
391 | source = log.data.get("SourceName", "Unknown")
392 | sources[source] = sources.get(source, 0) + 1
393 |
394 | return AnalysisResult(
395 | analysis_type="summary",
396 | summary={
397 | "total_events": len(logs),
398 | "event_types": event_types,
399 | "sources": sources,
400 | },
401 | )
402 |
403 | def _pattern_analysis(self, logs: List[LogRecord]) -> AnalysisResult:
404 | """Perform pattern analysis."""
405 | # Simplified pattern analysis
406 | patterns = []
407 |
408 | # Group by EventID
409 | event_groups = {}
410 | for log in logs:
411 | event_id = log.data.get("EventID", "Unknown")
412 | if event_id not in event_groups:
413 | event_groups[event_id] = []
414 | event_groups[event_id].append(log)
415 |
416 | for event_id, events in event_groups.items():
417 | if len(events) > 1:
418 | patterns.append(
419 | {
420 | "pattern": f"EventID {event_id}",
421 | "count": len(events),
422 | "frequency": len(events) / len(logs),
423 | }
424 | )
425 |
426 | return AnalysisResult(
427 | analysis_type="pattern",
428 | summary={"total_patterns": len(patterns)},
429 | patterns=patterns,
430 | )
431 |
432 | def _anomaly_analysis(self, logs: List[LogRecord]) -> AnalysisResult:
433 | """Perform anomaly analysis."""
434 | # Simplified anomaly detection
435 | anomalies = []
436 |
437 | # Look for error events
438 | for log in logs:
439 | if log.data.get("EventType") == 1: # Error
440 | anomalies.append(
441 | {
442 | "type": "error_event",
443 | "event_id": log.data.get("EventID"),
444 | "source": log.data.get("SourceName"),
445 | "message": log.data.get("Message", "")[:100],
446 | }
447 | )
448 |
449 | return AnalysisResult(
450 | analysis_type="anomaly",
451 | summary={"total_anomalies": len(anomalies)},
452 | anomalies=anomalies,
453 | )
454 |
455 |
456 | # For backward compatibility
457 | EvtParser = EventLogParser
458 |
```
--------------------------------------------------------------------------------
/src/mcp_log_analyzer/mcp_server/tools/linux_test_tools.py:
--------------------------------------------------------------------------------
```python
1 | """
2 | Linux system monitoring and log testing MCP tools.
3 | """
4 |
5 | import platform
6 | import subprocess
7 | from pathlib import Path
8 | from typing import Any, Dict, List
9 |
10 | from mcp.server import FastMCP
11 | from pydantic import BaseModel, Field
12 |
13 |
14 | class LinuxLogTestRequest(BaseModel):
15 | """Request model for testing Linux log access."""
16 |
17 | log_paths: List[str] = Field(
18 | default_factory=lambda: ["/var/log/syslog", "/var/log/messages"],
19 | description="List of log file paths to test",
20 | )
21 | check_journalctl: bool = Field(
22 | True, description="Whether to test systemd journal access"
23 | )
24 |
25 |
26 | class LinuxLogQueryRequest(BaseModel):
27 | """Request model for querying Linux logs."""
28 |
29 | service_name: str = Field(None, description="Specific service to query logs for")
30 | priority: str = Field(
31 | None,
32 | description="Log priority (emerg, alert, crit, err, warning, notice, info, debug)",
33 | )
34 | time_duration: str = Field(
35 | "1h", description="Time duration (e.g., '30m', '2h', '1d')"
36 | )
37 | max_lines: int = Field(100, description="Maximum number of log lines to return")
38 |
39 |
40 | class LinuxServiceAnalysisRequest(BaseModel):
41 | """Request model for analyzing Linux services."""
42 |
43 | service_pattern: str = Field(
44 | None, description="Service name pattern to analyze (e.g., 'nginx', 'apache')"
45 | )
46 | include_failed: bool = Field(
47 | True, description="Include failed services in analysis"
48 | )
49 |
50 |
51 | def register_linux_test_tools(mcp: FastMCP):
52 | """Register all Linux testing tools with the MCP server."""
53 |
54 | @mcp.tool()
55 | async def test_linux_log_access() -> Dict[str, Any]:
56 | """
57 | Test Linux log file and systemd journal access.
58 |
59 | This tool checks if the system can access various Linux log sources
60 | and provides diagnostic information about available logs.
61 | """
62 | if platform.system() != "Linux":
63 | return {
64 | "status": "unavailable",
65 | "message": "Linux log tools are only available on Linux systems",
66 | "platform": platform.system(),
67 | }
68 |
69 | test_results = {
70 | "log_files": {},
71 | "systemd_journal": {"available": False, "accessible": False},
72 | "commands": {},
73 | }
74 |
75 | # Test common log file access
76 | common_logs = {
77 | "/var/log/syslog": "System log (Debian/Ubuntu)",
78 | "/var/log/messages": "System log (RHEL/CentOS)",
79 | "/var/log/auth.log": "Authentication log",
80 | "/var/log/kern.log": "Kernel log",
81 | "/var/log/dmesg": "Boot messages",
82 | }
83 |
84 | for log_path, description in common_logs.items():
85 | path_obj = Path(log_path)
86 | if path_obj.exists():
87 | try:
88 | # Test read access
89 | with open(log_path, "r") as f:
90 | f.read(100) # Read first 100 chars
91 | test_results["log_files"][log_path] = {
92 | "exists": True,
93 | "readable": True,
94 | "description": description,
95 | "size_mb": round(path_obj.stat().st_size / (1024 * 1024), 2),
96 | }
97 | except PermissionError:
98 | test_results["log_files"][log_path] = {
99 | "exists": True,
100 | "readable": False,
101 | "description": description,
102 | "error": "Permission denied",
103 | }
104 | except Exception as e:
105 | test_results["log_files"][log_path] = {
106 | "exists": True,
107 | "readable": False,
108 | "description": description,
109 | "error": str(e),
110 | }
111 | else:
112 | test_results["log_files"][log_path] = {
113 | "exists": False,
114 | "readable": False,
115 | "description": description,
116 | }
117 |
118 | # Test systemd journal access
119 | try:
120 | result = subprocess.run(
121 | ["journalctl", "--version"],
122 | capture_output=True,
123 | text=True,
124 | timeout=5,
125 | )
126 | if result.returncode == 0:
127 | test_results["systemd_journal"]["available"] = True
128 |
129 | # Test actual journal access
130 | try:
131 | result = subprocess.run(
132 | ["journalctl", "-n", "1", "--no-pager"],
133 | capture_output=True,
134 | text=True,
135 | timeout=5,
136 | )
137 | test_results["systemd_journal"]["accessible"] = (
138 | result.returncode == 0
139 | )
140 | if result.returncode != 0:
141 | test_results["systemd_journal"]["error"] = result.stderr
142 | except Exception as e:
143 | test_results["systemd_journal"]["error"] = str(e)
144 | except FileNotFoundError:
145 | test_results["systemd_journal"]["available"] = False
146 | except Exception as e:
147 | test_results["systemd_journal"]["error"] = str(e)
148 |
149 | # Test common system commands
150 | commands_to_test = ["ss", "netstat", "ps", "top", "systemctl"]
151 | for cmd in commands_to_test:
152 | try:
153 | result = subprocess.run(
154 | [cmd, "--version"],
155 | capture_output=True,
156 | text=True,
157 | timeout=5,
158 | )
159 | test_results["commands"][cmd] = {"available": True}
160 | except FileNotFoundError:
161 | test_results["commands"][cmd] = {"available": False}
162 | except Exception:
163 | # Some commands might not support --version but still exist
164 | test_results["commands"][cmd] = {
165 | "available": True,
166 | "version_check_failed": True,
167 | }
168 |
169 | return {
170 | "status": "completed",
171 | "platform": platform.system(),
172 | "distribution": platform.platform(),
173 | "test_results": test_results,
174 | }
175 |
176 | @mcp.tool()
177 | async def query_systemd_journal(request: LinuxLogQueryRequest) -> Dict[str, Any]:
178 | """
179 | Query systemd journal with specific criteria.
180 |
181 | This tool allows filtering systemd journal entries by service,
182 | priority, and time range for targeted analysis.
183 | """
184 | if platform.system() != "Linux":
185 | return {"error": "This tool is only available on Linux systems"}
186 |
187 | try:
188 | from ..server import parse_time_param
189 |
190 | # Build journalctl command
191 | cmd = ["journalctl", "--no-pager", "-o", "short"]
192 |
193 | # Add time filter
194 | if request.time_duration:
195 | try:
196 | start_time = parse_time_param(request.time_duration)
197 | since_arg = f"--since={start_time.strftime('%Y-%m-%d %H:%M:%S')}"
198 | cmd.append(since_arg)
199 | except Exception as e:
200 | return {"error": f"Invalid time duration: {str(e)}"}
201 |
202 | # Add service filter
203 | if request.service_name:
204 | cmd.extend(["-u", request.service_name])
205 |
206 | # Add priority filter
207 | if request.priority:
208 | cmd.extend(["-p", request.priority])
209 |
210 | # Add line limit
211 | cmd.extend(["-n", str(request.max_lines)])
212 |
213 | # Execute command
214 | result = subprocess.run(cmd, capture_output=True, text=True, timeout=10)
215 |
216 | if result.returncode == 0:
217 | lines = result.stdout.strip().split("\n")
218 | return {
219 | "query_criteria": {
220 | "service_name": request.service_name,
221 | "priority": request.priority,
222 | "time_duration": request.time_duration,
223 | "max_lines": request.max_lines,
224 | },
225 | "log_entries": lines,
226 | "total_entries": len(lines),
227 | "command_used": " ".join(cmd),
228 | }
229 | else:
230 | return {
231 | "error": f"journalctl command failed: {result.stderr}",
232 | "command_used": " ".join(cmd),
233 | }
234 |
235 | except Exception as e:
236 | return {"error": f"Error querying systemd journal: {str(e)}"}
237 |
238 | @mcp.tool()
239 | async def analyze_linux_services(
240 | request: LinuxServiceAnalysisRequest,
241 | ) -> Dict[str, Any]:
242 | """
243 | Analyze Linux services status and recent activity.
244 |
245 | This tool provides an overview of systemd services, their status,
246 | and recent log activity for system health assessment.
247 | """
248 | if platform.system() != "Linux":
249 | return {"error": "This tool is only available on Linux systems"}
250 |
251 | try:
252 | # Get service status
253 | cmd = ["systemctl", "list-units", "--type=service", "--no-pager"]
254 | if request.include_failed:
255 | cmd.append("--failed")
256 |
257 | result = subprocess.run(cmd, capture_output=True, text=True, timeout=10)
258 |
259 | services_info = {"active": [], "failed": [], "summary": {}}
260 |
261 | if result.returncode == 0:
262 | lines = result.stdout.strip().split("\n")
263 |
264 | for line in lines[1:]: # Skip header
265 | if not line.strip() or "LOAD" in line:
266 | continue
267 |
268 | parts = line.split()
269 | if len(parts) >= 4:
270 | service_name = parts[0]
271 | load_state = parts[1]
272 | active_state = parts[2]
273 | sub_state = parts[3]
274 |
275 | # Filter by pattern if specified
276 | if (
277 | request.service_pattern
278 | and request.service_pattern.lower()
279 | not in service_name.lower()
280 | ):
281 | continue
282 |
283 | service_info = {
284 | "name": service_name,
285 | "load": load_state,
286 | "active": active_state,
287 | "sub": sub_state,
288 | }
289 |
290 | if active_state == "active":
291 | services_info["active"].append(service_info)
292 | elif active_state == "failed":
293 | services_info["failed"].append(service_info)
294 |
295 | # Get recent journal entries for failed services
296 | if services_info["failed"]:
297 | for service in services_info["failed"][
298 | :5
299 | ]: # Limit to first 5 failed services
300 | try:
301 | journal_cmd = [
302 | "journalctl",
303 | "-u",
304 | service["name"],
305 | "-n",
306 | "10",
307 | "--no-pager",
308 | "--since",
309 | "1 hour ago",
310 | ]
311 | journal_result = subprocess.run(
312 | journal_cmd, capture_output=True, text=True, timeout=5
313 | )
314 | if journal_result.returncode == 0:
315 | service["recent_logs"] = (
316 | journal_result.stdout.strip().split("\n")[-5:]
317 | )
318 | except Exception:
319 | service["recent_logs"] = ["Unable to fetch recent logs"]
320 |
321 | services_info["summary"] = {
322 | "total_active": len(services_info["active"]),
323 | "total_failed": len(services_info["failed"]),
324 | "pattern_filter": request.service_pattern,
325 | }
326 |
327 | # Overall system health assessment
328 | if len(services_info["failed"]) == 0:
329 | health_status = "healthy"
330 | elif len(services_info["failed"]) < 3:
331 | health_status = "fair"
332 | else:
333 | health_status = "concerning"
334 |
335 | return {
336 | "health_status": health_status,
337 | "services": services_info,
338 | "analysis_criteria": {
339 | "service_pattern": request.service_pattern,
340 | "include_failed": request.include_failed,
341 | },
342 | "timestamp": subprocess.run(
343 | ["date", "+%Y-%m-%d %H:%M:%S"], capture_output=True, text=True
344 | ).stdout.strip(),
345 | }
346 |
347 | except Exception as e:
348 | return {"error": f"Error analyzing Linux services: {str(e)}"}
349 |
350 | @mcp.tool()
351 | async def get_linux_system_overview() -> Dict[str, Any]:
352 | """
353 | Get comprehensive Linux system overview.
354 |
355 | This tool provides system information, resource usage,
356 | and health indicators for Linux systems.
357 | """
358 | if platform.system() != "Linux":
359 | return {"error": "This tool is only available on Linux systems"}
360 |
361 | try:
362 | system_info = {}
363 |
364 | # Basic system information
365 | system_info["system"] = {
366 | "hostname": subprocess.run(
367 | ["hostname"], capture_output=True, text=True
368 | ).stdout.strip(),
369 | "uptime": subprocess.run(
370 | ["uptime"], capture_output=True, text=True
371 | ).stdout.strip(),
372 | "kernel": subprocess.run(
373 | ["uname", "-r"], capture_output=True, text=True
374 | ).stdout.strip(),
375 | "distribution": platform.platform(),
376 | }
377 |
378 | # Memory information
379 | try:
380 | with open("/proc/meminfo", "r") as f:
381 | meminfo = f.read()
382 | mem_lines = meminfo.split("\n")
383 | mem_total = next(
384 | (line for line in mem_lines if line.startswith("MemTotal:")), ""
385 | )
386 | mem_available = next(
387 | (
388 | line
389 | for line in mem_lines
390 | if line.startswith("MemAvailable:")
391 | ),
392 | "",
393 | )
394 |
395 | system_info["memory"] = {
396 | "total": mem_total.split()[1] + " kB" if mem_total else "Unknown",
397 | "available": (
398 | mem_available.split()[1] + " kB" if mem_available else "Unknown"
399 | ),
400 | }
401 | except Exception:
402 | system_info["memory"] = {"error": "Unable to read memory information"}
403 |
404 | # CPU information
405 | try:
406 | with open("/proc/loadavg", "r") as f:
407 | loadavg = f.read().strip()
408 | system_info["cpu"] = {"load_average": loadavg}
409 | except Exception:
410 | system_info["cpu"] = {"error": "Unable to read CPU information"}
411 |
412 | # Disk usage for root filesystem
413 | try:
414 | df_result = subprocess.run(
415 | ["df", "-h", "/"], capture_output=True, text=True
416 | )
417 | if df_result.returncode == 0:
418 | df_lines = df_result.stdout.strip().split("\n")
419 | if len(df_lines) > 1:
420 | root_disk = df_lines[1].split()
421 | system_info["disk"] = {
422 | "filesystem": root_disk[0],
423 | "size": root_disk[1],
424 | "used": root_disk[2],
425 | "available": root_disk[3],
426 | "use_percent": root_disk[4],
427 | }
428 | except Exception:
429 | system_info["disk"] = {"error": "Unable to read disk information"}
430 |
431 | # Recent critical logs
432 | try:
433 | journal_result = subprocess.run(
434 | [
435 | "journalctl",
436 | "-p",
437 | "err",
438 | "-n",
439 | "5",
440 | "--no-pager",
441 | "--since",
442 | "1 hour ago",
443 | ],
444 | capture_output=True,
445 | text=True,
446 | timeout=5,
447 | )
448 | if journal_result.returncode == 0:
449 | recent_errors = journal_result.stdout.strip().split("\n")
450 | system_info["recent_errors"] = (
451 | recent_errors if recent_errors != [""] else []
452 | )
453 | else:
454 | system_info["recent_errors"] = ["Unable to fetch recent error logs"]
455 | except Exception:
456 | system_info["recent_errors"] = ["Error accessing systemd journal"]
457 |
458 | return {
459 | "status": "success",
460 | "system_overview": system_info,
461 | "timestamp": subprocess.run(
462 | ["date", "+%Y-%m-%d %H:%M:%S"], capture_output=True, text=True
463 | ).stdout.strip(),
464 | }
465 |
466 | except Exception as e:
467 | return {"error": f"Error getting Linux system overview: {str(e)}"}
468 |
```