This is page 1 of 4. Use http://codebase.md/sedwardstx/demomcp?lines=true&page={x} to view the full context.
# Directory Structure
```
├── .gitignore
├── .mcp.json
├── check_server.py
├── CLAUDE.md
├── config
│ └── default.yml
├── docs
│ ├── api_reference.md
│ ├── demo-recording
│ │ └── MCPDemo.gif
│ ├── example-context-docs
│ │ ├── mcp-ai-agent-architecture.md
│ │ ├── mcp-ai-agent-dev-task.md
│ │ └── mcp-ai-agent-prd.md
│ └── getting_started.md
├── LICENSE
├── main_tcp.py
├── main.py
├── mcp_tcp_client.py
├── pyproject.toml
├── QUICK_START.md
├── README.md
├── scripts
│ └── test_server.py
├── setup.py
├── src
│ └── mcp_log_analyzer
│ ├── __init__.py
│ ├── api
│ │ ├── __init__.py
│ │ └── server.py
│ ├── config
│ │ ├── __init__.py
│ │ └── settings.py
│ ├── core
│ │ ├── __init__.py
│ │ ├── config.py
│ │ ├── models.py
│ │ └── state_manager.py
│ ├── mcp_server
│ │ ├── __init__.py
│ │ ├── models
│ │ │ ├── __init__.py
│ │ │ └── schemas.py
│ │ ├── prompts
│ │ │ ├── __init__.py
│ │ │ ├── linux_testing_prompt.py
│ │ │ ├── log_management_prompt.py
│ │ │ ├── mcp_assets_overview_prompt.py
│ │ │ ├── network_testing_prompt.py
│ │ │ ├── process_monitoring_prompt.py
│ │ │ └── windows_testing_prompt.py
│ │ ├── resources
│ │ │ ├── __init__.py
│ │ │ ├── linux_resources.py
│ │ │ ├── logs_resources.py
│ │ │ ├── network_resources.py
│ │ │ ├── process_resources.py
│ │ │ └── windows_resources.py
│ │ ├── server.py
│ │ └── tools
│ │ ├── __init__.py
│ │ ├── health_check_tools.py
│ │ ├── linux_test_tools.py
│ │ ├── log_management_tools.py
│ │ ├── network_test_tools.py
│ │ ├── process_test_tools.py
│ │ └── windows_test_tools.py
│ ├── parsers
│ │ ├── __init__.py
│ │ ├── base.py
│ │ ├── csv_parser.py
│ │ ├── etl_cached_parser.py
│ │ ├── etl_large_file_parser.py
│ │ ├── etl_parser.py
│ │ ├── etl_windows_parser.py
│ │ └── evt_parser.py
│ └── tcp_proxy.py
├── TCP_PROXY_README.md
├── tcp_proxy.py
├── tcp_server.py
├── test_server.py
├── test_tcp_proxy.py
├── test_windows_setup.py
└── tests
├── test_base_parser.py
├── test_mcp_server.py
├── test_tool_utils.py
└── test_utils.py
```
# Files
--------------------------------------------------------------------------------
/.mcp.json:
--------------------------------------------------------------------------------
```json
1 | {
2 | "mcps": {
3 | "mcp-log-analyzer": {
4 | "command": "python",
5 | "args": ["main.py"],
6 | "env": {}
7 | }
8 | }
9 | }
```
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
```
1 | # Byte-compiled / optimized / DLL files
2 | __pycache__/
3 | *.py[cod]
4 | *$py.class
5 |
6 | # C extensions
7 | *.so
8 |
9 | # Distribution / packaging
10 | .Python
11 | build/
12 | develop-eggs/
13 | dist/
14 | downloads/
15 | eggs/
16 | .eggs/
17 | lib/
18 | lib64/
19 | parts/
20 | sdist/
21 | var/
22 | wheels/
23 | share/python-wheels/
24 | *.egg-info/
25 | .installed.cfg
26 | *.egg
27 | MANIFEST
28 |
29 | # PyInstaller
30 | # Usually these files are written by a python script from a template
31 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
32 | *.manifest
33 | *.spec
34 |
35 | # Installer logs
36 | pip-log.txt
37 | pip-delete-this-directory.txt
38 |
39 | # Unit test / coverage reports
40 | htmlcov/
41 | .tox/
42 | .nox/
43 | .coverage
44 | .coverage.*
45 | .cache
46 | nosetests.xml
47 | coverage.xml
48 | *.cover
49 | *.py,cover
50 | .hypothesis/
51 | .pytest_cache/
52 | cover/
53 |
54 | # Translations
55 | *.mo
56 | *.pot
57 |
58 | # Django stuff:
59 | *.log
60 | local_settings.py
61 | db.sqlite3
62 | db.sqlite3-journal
63 |
64 | # Flask stuff:
65 | instance/
66 | .webassets-cache
67 |
68 | # Scrapy stuff:
69 | .scrapy
70 |
71 | # Sphinx documentation
72 | docs/_build/
73 |
74 | # PyBuilder
75 | .pybuilder/
76 | target/
77 |
78 | # Jupyter Notebook
79 | .ipynb_checkpoints
80 |
81 | # IPython
82 | profile_default/
83 | ipython_config.py
84 |
85 | # pyenv
86 | # For a library or package, you might want to ignore these files since the code is
87 | # intended to run in multiple environments; otherwise, check them in:
88 | # .python-version
89 |
90 | # pipenv
91 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
92 | # However, in case of collaboration, if having platform-specific dependencies or dependencies
93 | # having no cross-platform support, pipenv may install dependencies that don't work, or not
94 | # install all needed dependencies.
95 | #Pipfile.lock
96 |
97 | # poetry
98 | # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
99 | # This is especially recommended for binary packages to ensure reproducibility, and is more
100 | # commonly ignored for libraries.
101 | # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
102 | #poetry.lock
103 |
104 | # pdm
105 | # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
106 | #pdm.lock
107 | # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
108 | # in version control.
109 | # https://pdm.fming.dev/#use-with-ide
110 | .pdm.toml
111 |
112 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
113 | __pypackages__/
114 |
115 | # Celery stuff
116 | celerybeat-schedule
117 | celerybeat.pid
118 |
119 | # SageMath parsed files
120 | *.sage.py
121 |
122 | # Environments
123 | .env
124 | .venv
125 | env/
126 | venv/
127 | ENV/
128 | env.bak/
129 | venv.bak/
130 |
131 | # Spyder project settings
132 | .spyderproject
133 | .spyproject
134 |
135 | # Rope project settings
136 | .ropeproject
137 |
138 | # mkdocs documentation
139 | /site
140 |
141 | # mypy
142 | .mypy_cache/
143 | .dmypy.json
144 | dmypy.json
145 |
146 | # Pyre type checker
147 | .pyre/
148 |
149 | # pytype static type analyzer
150 | .pytype/
151 |
152 | # Cython debug symbols
153 | cython_debug/
154 |
155 | # PyCharm
156 | # JetBrains specific template is maintained in a separate JetBrains.gitignore that can
157 | # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
158 | # and can be added to the global gitignore or merged into this file. For a more nuclear
159 | # option (not recommended) you can uncomment the following to ignore the entire idea folder.
160 | .idea/
161 |
162 | # VS Code
163 | .vscode/
164 | *.code-workspace
165 |
166 | # Local development
167 | local/
168 | tmp/
169 | temp/
170 |
171 | # OS specific
172 | .DS_Store
173 | .DS_Store?
174 | ._*
175 | .Spotlight-V100
176 | .Trashes
177 | ehthumbs.db
178 | Thumbs.db
179 |
180 | # Project specific
181 | data/
182 | logs/
183 | *.db
184 | *.sqlite
185 | *.sqlite3
186 | config/local.yaml
187 | config/local.yml
188 | config/secrets.yaml
189 | config/secrets.yml
190 |
191 | # MCP specific
192 | .mcp/
193 | mcp_logs/
194 | mcp_data/
195 |
196 | # Test artifacts
197 | .pytest_cache/
198 | test-results/
199 | test-reports/
200 | *.coverage
201 | htmlcov/
202 |
203 | # Documentation build
204 | docs/_build/
205 | docs/.doctrees/
206 |
207 | # Backup files
208 | *.bak
209 | *.backup
210 | *~
211 | *.swp
212 | *.swo
213 |
214 | # Package files
215 | *.tar.gz
216 | *.zip
217 | *.7z
218 | *.rar
219 |
220 | # SSL certificates (for development)
221 | *.pem
222 | *.key
223 | *.crt
224 | *.csr
225 |
226 | # Environment-specific files
227 | .env.local
228 | .env.development
229 | .env.test
230 | .env.production
231 |
232 | # Log files
233 | *.log
234 | logs/
235 | *.log.*
236 |
237 | # Database files
238 | *.db
239 | *.sqlite
240 | *.sqlite3
241 | data/
242 |
243 | # Cache directories
244 | .cache/
245 | __pycache__/
246 | *.pyc
247 |
248 | # Temporary files
249 | *.tmp
250 | *.temp
251 | ~$*
252 |
253 | # Editor backup files
254 | *~
255 | \#*\#
256 | .\#*
257 |
258 | # macOS
259 | .DS_Store
260 | .AppleDouble
261 | .LSOverride
262 |
263 | # Windows
264 | Thumbs.db
265 | ehthumbs.db
266 | Desktop.ini
267 |
268 | # Linux
269 | .directory
270 | .Trash-*
271 |
272 | # JetBrains IDEs
273 | .idea/
274 | *.iml
275 | *.iws
276 | *.ipr
277 |
278 | # Visual Studio Code
279 | .vscode/
280 | !.vscode/settings.json
281 | !.vscode/tasks.json
282 | !.vscode/launch.json
283 | !.vscode/extensions.json
284 |
285 | # Sublime Text
286 | *.sublime-workspace
287 | *.sublime-project
288 |
289 | # Vim
290 | *.swp
291 | *.swo
292 | *~
293 |
294 | # Emacs
295 | *~
296 | \#*\#
297 | .\#*
298 | .projectile
299 |
300 | # Project specific ignores
301 | /data/
302 | /logs/
303 | /models/
304 | /output/
305 | /results/
306 | /temp/
307 | /tmp/
308 | /cache/
309 | *.pid
310 | *.seed
311 | *.pid.lock
312 |
```
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
```markdown
1 | # MCP Log Analyzer
2 |
3 | A Model Context Protocol (MCP) server for analyzing different types of logs on Windows systems, built with the FastMCP framework.
4 |
5 | ## Features
6 |
7 | - **Multiple Log Format Support**
8 | - Windows Event Logs (EVT/EVTX)
9 | - Windows Event Trace Logs (ETL)
10 | - Structured Logs (JSON, XML)
11 | - CSV Logs
12 | - Unstructured Text Logs
13 |
14 | - **MCP Tools**
15 | - `register_log_source`: Register new log sources
16 | - `list_log_sources`: View all registered sources
17 | - `get_log_source`: Get details about a specific source
18 | - `delete_log_source`: Remove a log source
19 | - `query_logs`: Query logs with filters and pagination
20 | - `analyze_logs`: Perform analysis (summary, pattern, anomaly)
21 |
22 | - **MCP Resources**
23 | - `logs://sources`: View registered log sources
24 | - `logs://types`: Learn about supported log types
25 | - `logs://analysis-types`: Understand analysis options
26 | - `system://windows-event-logs`: Recent Windows System and Application event logs
27 | - `system://linux-logs`: Linux systemd journal and application logs
28 | - `system://process-list`: Current processes with PID, CPU, and memory usage
29 | - `system://netstat`: Network connections and statistics for troubleshooting
30 |
31 | - **MCP Prompts**
32 | - Log analysis quickstart guide
33 | - Troubleshooting guide
34 | - Windows Event Log specific guide
35 |
36 | ## Installation
37 |
38 | ```bash
39 | # Clone the repository
40 | git clone https://github.com/your-username/mcp-log-analyzer.git
41 | cd mcp-log-analyzer
42 |
43 | # Install the package
44 | pip install -e .
45 |
46 | # For ETL file support (optional)
47 | pip install -e ".[etl]"
48 |
49 | # For development dependencies
50 | pip install -e ".[dev]"
51 | ```
52 |
53 | ### Windows Setup
54 |
55 | On Windows, the package includes Windows Event Log support via `pywin32`. If you encounter import errors:
56 |
57 | ```powershell
58 | # Ensure Windows dependencies are installed
59 | pip install pywin32>=300
60 |
61 | # Test the setup
62 | python test_windows_setup.py
63 |
64 | # If successful, start the server
65 | python main.py
66 | ```
67 |
68 | **Note**: On first install of `pywin32`, you may need to run the post-install script:
69 | ```powershell
70 | python Scripts/pywin32_postinstall.py -install
71 | ```
72 |
73 | ## Usage
74 |
75 | ### Understanding MCP Servers
76 |
77 | MCP (Model Context Protocol) servers don't have traditional web endpoints. They communicate via stdin/stdout with MCP clients (like Claude Code). When you run `python main.py`, the server starts silently and waits for MCP protocol messages.
78 |
79 | ### Testing the Server
80 |
81 | ```bash
82 | # Test that the server is working
83 | python check_server.py
84 |
85 | # See usage instructions
86 | python check_server.py --usage
87 | ```
88 |
89 | ### Starting the MCP Server
90 |
91 | ```bash
92 | # Run directly
93 | python main.py
94 |
95 | # Or use Claude Code's MCP integration
96 | claude mcp add mcp-log-analyzer python main.py
97 | ```
98 |
99 | ### Using with Claude Code
100 |
101 | 1. **Add the server to Claude Code:**
102 | ```bash
103 | claude mcp add mcp-log-analyzer python /path/to/main.py
104 | ```
105 |
106 | 2. **Use the tools in Claude Code:**
107 | - Register a log source: Use the `register_log_source` tool
108 | - Query logs: Use the `query_logs` tool
109 | - Analyze logs: Use the `analyze_logs` tool
110 |
111 | 3. **Access resources:**
112 | - Reference resources using `@mcp-log-analyzer:logs://sources`
113 | - Get help with prompts like `/mcp__mcp-log-analyzer__log_analysis_quickstart`
114 |
115 | ## System Monitoring Resources
116 |
117 | These resources provide real-time system information without needing to register log sources:
118 |
119 | 1. **Check System Processes:**
120 | - Access via `@mcp-log-analyzer:system://process-list`
121 | - Shows top processes by CPU usage with memory information
122 |
123 | 2. **Windows Event Logs** (Windows only):
124 | - Default: `@mcp-log-analyzer:system://windows-event-logs` (last 10 entries)
125 | - By count: `@mcp-log-analyzer:system://windows-event-logs/last/50` (last 50 entries)
126 | - By time: `@mcp-log-analyzer:system://windows-event-logs/time/30m` (last 30 minutes)
127 | - By range: `@mcp-log-analyzer:system://windows-event-logs/range/2025-01-07 13:00/2025-01-07 14:00`
128 | - Shows System and Application event log entries
129 |
130 | 3. **Linux System Logs** (Linux only):
131 | - Default: `@mcp-log-analyzer:system://linux-logs` (last 50 lines)
132 | - By count: `@mcp-log-analyzer:system://linux-logs/last/100` (last 100 lines)
133 | - By time: `@mcp-log-analyzer:system://linux-logs/time/1h` (last hour)
134 | - By range: `@mcp-log-analyzer:system://linux-logs/range/2025-01-07 13:00/2025-01-07 14:00`
135 | - Shows systemd journal, syslog, and common application logs
136 |
137 | 4. **Network Monitoring** (Cross-platform):
138 | - Default: `@mcp-log-analyzer:system://netstat` (listening ports)
139 | - Listening ports: `@mcp-log-analyzer:system://netstat/listening`
140 | - Established connections: `@mcp-log-analyzer:system://netstat/established`
141 | - All connections: `@mcp-log-analyzer:system://netstat/all`
142 | - Network statistics: `@mcp-log-analyzer:system://netstat/stats`
143 | - Routing table: `@mcp-log-analyzer:system://netstat/routing`
144 | - Port-specific: `@mcp-log-analyzer:system://netstat/port/80`
145 | - Uses netstat on Windows, ss (preferred) or netstat on Linux
146 |
147 | ### Time Format Examples:
148 | - **Relative time**: `30m` (30 minutes), `2h` (2 hours), `1d` (1 day)
149 | - **Absolute time**: `2025-01-07 13:00`, `2025-01-07 13:30:15`, `07/01/2025 13:00`
150 |
151 | ## Example Workflow
152 |
153 | 1. **Register a Windows System Log:**
154 | ```
155 | Use register_log_source tool with:
156 | - name: "system-logs"
157 | - source_type: "evt"
158 | - path: "System"
159 | ```
160 |
161 | 2. **Query Recent Errors:**
162 | ```
163 | Use query_logs tool with:
164 | - source_name: "system-logs"
165 | - filters: {"level": "Error"}
166 | - limit: 10
167 | ```
168 |
169 | 3. **Analyze Patterns:**
170 | ```
171 | Use analyze_logs tool with:
172 | - source_name: "system-logs"
173 | - analysis_type: "pattern"
174 | ```
175 |
176 | 4. **Register an ETL File:**
177 | ```
178 | Use register_log_source tool with:
179 | - name: "network-trace"
180 | - source_type: "etl"
181 | - path: "C:\\Traces\\network.etl"
182 | ```
183 |
184 | ## Development
185 |
186 | ```bash
187 | # Run tests
188 | pytest
189 |
190 | # Code formatting
191 | black .
192 | isort .
193 |
194 | # Type checking
195 | mypy src
196 |
197 | # Run all quality checks
198 | black . && isort . && mypy src && flake8
199 | ```
200 |
201 | ## Project Structure
202 |
203 | - `src/mcp_log_analyzer/`: Main package
204 | - `mcp_server/`: MCP server implementation using FastMCP
205 | - `core/`: Core functionality and models
206 | - `parsers/`: Log parsers for different formats
207 | - `main.py`: Server entry point
208 | - `.mcp.json`: MCP configuration
209 | - `tests/`: Test files
210 |
211 | ## Requirements
212 |
213 | - Python 3.12+
214 | - Windows OS (for Event Log support)
215 | - See `pyproject.toml` for full dependencies
216 |
217 | ## License
218 |
219 | MIT
```
--------------------------------------------------------------------------------
/CLAUDE.md:
--------------------------------------------------------------------------------
```markdown
1 | # CLAUDE.md
2 |
3 | This file provides guidance to Claude Code (claude.ai/code) when working with code in this repository.
4 |
5 | ## Development Commands
6 |
7 | ### Build and Install
8 | ```bash
9 | # Install the package in development mode
10 | pip install -e .
11 |
12 | # Install with development dependencies
13 | pip install -e ".[dev]"
14 |
15 | # On Windows, ensure pywin32 is properly installed for Event Log access
16 | # If you encounter issues, try:
17 | pip install pywin32>=300
18 | python -c "import win32api" # Test Windows API access
19 | ```
20 |
21 | ### Code Quality
22 | ```bash
23 | # Format code
24 | black .
25 | isort .
26 |
27 | # Type checking
28 | mypy src
29 |
30 | # Linting
31 | flake8
32 |
33 | # Run all quality checks
34 | black . && isort . && mypy src && flake8
35 | ```
36 |
37 | ### Testing
38 | ```bash
39 | # Run all tests with proper PYTHONPATH
40 | PYTHONPATH=src python3 -m pytest tests/ -v
41 |
42 | # Run tests with coverage
43 | PYTHONPATH=src python3 -m pytest --cov=mcp_log_analyzer tests/
44 |
45 | # Run specific test file
46 | PYTHONPATH=src python3 -m pytest tests/test_base_parser.py -v
47 |
48 | # Test server import
49 | PYTHONPATH=src python3 -c "from mcp_log_analyzer.mcp_server.server import mcp; print('Server import successful')"
50 | ```
51 |
52 | ### Running the MCP Server
53 |
54 | **Important**: MCP servers don't show output when started - they communicate via stdin/stdout with MCP clients.
55 |
56 | ```bash
57 | # Start the MCP server (runs silently)
58 | python main.py
59 |
60 | # Test the server is working
61 | python check_server.py
62 |
63 | # Add to Claude Code
64 | claude mcp add mcp-log-analyzer python main.py
65 |
66 | # List MCP servers
67 | claude mcp list
68 |
69 | # Remove MCP server
70 | claude mcp remove mcp-log-analyzer
71 | ```
72 |
73 | **No Output is Normal**: When you run `python main.py`, you won't see any console output. The server is running and waiting for MCP protocol messages from clients like Claude Code.
74 |
75 | ## Architecture Overview
76 |
77 | ### MCP Server Structure
78 | This project follows the FastMCP framework pattern, refactored from the quick-data-mcp architecture:
79 |
80 | - **Entry Point** (`main.py`): Simple script that imports and runs the MCP server
81 | - **MCP Server** (`src/mcp_log_analyzer/mcp_server/server.py`): FastMCP server coordinator
82 | - **Tools** (`src/mcp_log_analyzer/mcp_server/tools/`): Organized MCP tools by category
83 | - **Resources** (`src/mcp_log_analyzer/mcp_server/resources/`): System monitoring resources
84 | - **Prompts** (`src/mcp_log_analyzer/mcp_server/prompts/`): Comprehensive user guides
85 | - **Core Logic** (`src/mcp_log_analyzer/core/`): Models and configuration
86 | - **Parsers** (`src/mcp_log_analyzer/parsers/`): Log format-specific parsers
87 |
88 | ### Organized Tool Structure
89 |
90 | **1. Core Log Management Tools** (`tools/log_management_tools.py`):
91 | - `register_log_source`: Register new log sources (Windows Event Logs, JSON, XML, CSV, text)
92 | - `list_log_sources`: List all registered log sources
93 | - `get_log_source`: Get details about specific log source
94 | - `delete_log_source`: Remove log source
95 | - `query_logs`: Query logs with filters and time ranges
96 | - `analyze_logs`: Perform pattern detection and anomaly analysis
97 |
98 | **2. Windows System Tools** (`tools/windows_test_tools.py`):
99 | - `test_windows_event_log_access`: Test Windows Event Log access and permissions
100 | - `get_windows_event_log_info`: Get detailed Windows Event Log information
101 | - `query_windows_events_by_criteria`: Query Windows Events with specific filters
102 | - `get_windows_system_health`: Windows system health overview from Event Logs
103 |
104 | **3. Linux System Tools** (`tools/linux_test_tools.py`):
105 | - `test_linux_log_access`: Test Linux log file and systemd journal access
106 | - `query_systemd_journal`: Query systemd journal with specific criteria
107 | - `analyze_linux_services`: Analyze Linux services status and recent activity
108 | - `get_linux_system_overview`: Comprehensive Linux system overview
109 |
110 | **4. Process Monitoring Tools** (`tools/process_test_tools.py`):
111 | - `test_system_resources_access`: Test system resource monitoring capabilities
112 | - `analyze_system_performance`: Analyze current system performance and resource usage
113 | - `find_resource_intensive_processes`: Find processes consuming significant resources
114 | - `monitor_process_health`: Monitor health and status of specific processes
115 | - `get_system_health_summary`: Overall system health summary
116 |
117 | **5. Network Diagnostic Tools** (`tools/network_test_tools.py`):
118 | - `test_network_tools_availability`: Test availability of network diagnostic tools
119 | - `test_port_connectivity`: Test connectivity to specific ports
120 | - `test_network_connectivity`: Test network connectivity to various hosts
121 | - `analyze_network_connections`: Analyze current network connections and listening ports
122 | - `diagnose_network_issues`: Comprehensive network diagnostics
123 |
124 | ### Organized Resource Structure
125 |
126 | **1. Log Management Resources** (`resources/logs_resources.py`):
127 | - `logs/sources`: List of registered log sources
128 | - `logs/source/{name}`: Details about specific log source
129 |
130 | **2. Windows Resources** (`resources/windows_resources.py`):
131 | - `windows/system-events/{param}`: Windows System Event logs with configurable parameters
132 | - `windows/application-events/{param}`: Windows Application Event logs with configurable parameters
133 |
134 | **3. Linux Resources** (`resources/linux_resources.py`):
135 | - `linux/systemd-logs/{param}`: Linux systemd journal logs with configurable parameters
136 | - `linux/system-logs/{param}`: Linux system logs with configurable parameters
137 |
138 | **4. Process Resources** (`resources/process_resources.py`):
139 | - `processes/list`: Current running processes with PID, CPU, and memory usage
140 | - `processes/summary`: Process summary statistics
141 |
142 | **5. Network Resources** (`resources/network_resources.py`):
143 | - `network/listening-ports`: Currently listening network ports
144 | - `network/established-connections`: Active network connections
145 | - `network/all-connections`: All network connections and statistics
146 | - `network/statistics`: Network interface statistics
147 | - `network/routing-table`: Network routing table
148 | - `network/port/{port}`: Specific port information
149 |
150 | ### Resource Parameters
151 | System monitoring resources support flexible parameters:
152 | - `/last/{n}` - Get last N entries (e.g., `/last/50`)
153 | - `/time/{duration}` - Get entries from time duration (e.g., `/time/30m`, `/time/2h`, `/time/1d`)
154 | - `/range/{start}/{end}` - Get entries from time range (e.g., `/range/2025-01-07 13:00/2025-01-07 14:00`)
155 |
156 | **Time Format Support**:
157 | - Relative: `30m`, `2h`, `1d`
158 | - Absolute: `2025-01-07 13:00`, `07/01/2025 13:30`
159 |
160 | ### Comprehensive Prompt System
161 |
162 | **1. MCP Assets Overview** (`prompts/mcp_assets_overview_prompt.py`):
163 | - Complete reference to all 18 tools, 10+ resources, and usage examples
164 | - Platform support information and getting started guide
165 |
166 | **2. Log Management Prompts** (`prompts/log_management_prompt.py`):
167 | - `log_management_guide`: Comprehensive log management and analysis guide
168 | - `log_troubleshooting_guide`: Troubleshooting common log analysis issues
169 |
170 | **3. Windows Testing Prompts** (`prompts/windows_testing_prompt.py`):
171 | - `windows_diagnostics_guide`: Windows system diagnostics and Event Log analysis
172 | - `windows_event_reference`: Quick reference for Windows Event IDs and meanings
173 |
174 | **4. Linux Testing Prompts** (`prompts/linux_testing_prompt.py`):
175 | - `linux_diagnostics_guide`: Linux system diagnostics and systemd troubleshooting
176 | - `linux_systemd_reference`: systemd services and log patterns reference
177 |
178 | **5. Process Monitoring Prompts** (`prompts/process_monitoring_prompt.py`):
179 | - `process_monitoring_guide`: System resource monitoring and performance analysis
180 | - `process_troubleshooting_guide`: Troubleshooting process and performance issues
181 |
182 | **6. Network Testing Prompts** (`prompts/network_testing_prompt.py`):
183 | - `network_diagnostics_guide`: Network diagnostics and troubleshooting
184 | - `network_troubleshooting_scenarios`: Specific network troubleshooting scenarios
185 |
186 | ### Cross-Platform Support
187 |
188 | **Windows Features**:
189 | - Full Windows Event Log support with pywin32
190 | - System and Application Event Log analysis
191 | - **Custom Application and Services logs support** (e.g., "Microsoft-Service Fabric/Admin")
192 | - Event ID reference and health assessment
193 | - Configurable time-based filtering
194 | - Automatic API selection (legacy for standard logs, EvtQuery for custom logs)
195 |
196 | **Linux Features**:
197 | - systemd journal and traditional log file support
198 | - Service status analysis and troubleshooting
199 | - Cross-distribution compatibility
200 | - Network diagnostic tool integration
201 |
202 | **Cross-Platform Features**:
203 | - Process monitoring with psutil
204 | - Network diagnostics with platform-specific tools
205 | - Core log management for various formats
206 | - Comprehensive error handling and permission checking
207 |
208 | ### Adding New Features
209 |
210 | **Adding New Tools**:
211 | 1. Choose appropriate category folder under `tools/`
212 | 2. Create Pydantic models for requests in the tool file
213 | 3. Add async function decorated with `@mcp.tool()`
214 | 4. Update the category's `register_*_tools()` function
215 | 5. Add comprehensive docstring and error handling
216 |
217 | **Adding New Resources**:
218 | 1. Choose appropriate category folder under `resources/`
219 | 2. Add async function decorated with `@mcp.resource()`
220 | 3. Implement parameterization if needed
221 | 4. Update the category's `register_*_resources()` function
222 | 5. Add to the main assets overview prompt
223 |
224 | **Adding New Prompts**:
225 | 1. Choose appropriate category folder under `prompts/`
226 | 2. Add async function decorated with `@mcp.prompt()`
227 | 3. Follow the established format with emojis and markdown
228 | 4. Update the category's `register_*_prompts()` function
229 | 5. Include practical examples and troubleshooting guidance
230 |
231 | ### Design Patterns
232 |
233 | - **Modular Organization**: Tools, resources, and prompts organized by functional category
234 | - **Async-First**: All MCP functions are async for better performance
235 | - **Type Safety**: Pydantic models for all requests/responses with comprehensive validation
236 | - **Error Handling**: Consistent error format with helpful messages
237 | - **Cross-Platform**: Platform detection and appropriate tool/command selection
238 | - **Parameterized Resources**: Flexible resource access with time-based and count-based parameters
239 | - **Comprehensive Documentation**: Rich prompts with step-by-step guidance and troubleshooting
240 |
241 | ### Dependencies
242 |
243 | **Core Requirements**:
244 | - `mcp[cli]>=1.9.2`: Model Context Protocol framework
245 | - `pydantic>=1.8.0`: Data validation and serialization
246 | - `python-dotenv>=0.19.0`: Environment variable management
247 | - `pandas>=1.3.0`: Data analysis capabilities
248 | - `psutil>=5.9.0`: System and process monitoring
249 |
250 | **Platform-Specific**:
251 | - `pywin32>=300`: Windows Event Log access (Windows only)
252 |
253 | **Development**:
254 | - `pytest`: Testing framework
255 | - `black`: Code formatting
256 | - `isort`: Import sorting
257 | - `mypy`: Type checking
258 | - `flake8`: Linting
259 |
260 | ### Testing Notes
261 |
262 | - Tests require `PYTHONPATH=src` to properly import modules
263 | - Some tests are platform-specific (Windows Event Logs, Linux systemd)
264 | - Network tests may require internet connectivity
265 | - Process monitoring tests interact with real system resources
266 |
267 | ### MCP Integration Notes
268 |
269 | - Server communicates via stdio with Claude Code
270 | - Tools appear as callable functions in Claude conversations
271 | - Resources can be referenced with URIs like `logs/sources` or `processes/list`
272 | - Prompts provide comprehensive guidance for effective system administration
273 | - Cross-platform compatibility ensures consistent experience across environments
```
--------------------------------------------------------------------------------
/src/mcp_log_analyzer/mcp_server/models/__init__.py:
--------------------------------------------------------------------------------
```python
1 | """Models package."""
2 |
```
--------------------------------------------------------------------------------
/src/mcp_log_analyzer/config/__init__.py:
--------------------------------------------------------------------------------
```python
1 | """Configuration package."""
2 |
```
--------------------------------------------------------------------------------
/setup.py:
--------------------------------------------------------------------------------
```python
1 | from setuptools import setup
2 |
3 | setup()
4 |
```
--------------------------------------------------------------------------------
/src/mcp_log_analyzer/api/__init__.py:
--------------------------------------------------------------------------------
```python
1 | """API implementation for the MCP Log Analyzer."""
2 |
```
--------------------------------------------------------------------------------
/src/mcp_log_analyzer/core/__init__.py:
--------------------------------------------------------------------------------
```python
1 | """Core functionality for the MCP Log Analyzer."""
2 |
```
--------------------------------------------------------------------------------
/src/mcp_log_analyzer/__init__.py:
--------------------------------------------------------------------------------
```python
1 | """MCP Log Analyzer package."""
2 |
3 | __version__ = "0.1.0"
4 |
```
--------------------------------------------------------------------------------
/src/mcp_log_analyzer/mcp_server/__init__.py:
--------------------------------------------------------------------------------
```python
1 | """MCP server module for log analysis."""
2 |
3 | from .server import mcp
4 |
5 | __all__ = ["mcp"]
6 |
```
--------------------------------------------------------------------------------
/src/mcp_log_analyzer/core/config.py:
--------------------------------------------------------------------------------
```python
1 | """Configuration for MCP Log Analyzer."""
2 |
3 | from pydantic import BaseModel
4 |
5 |
6 | class Settings(BaseModel):
7 | """Simple settings for MCP Log Analyzer."""
8 |
9 | server_name: str = "mcp-log-analyzer"
10 | version: str = "0.1.0"
11 |
12 | # Parser settings
13 | max_file_size_mb: int = 100
14 | max_events: int = 10000
15 | batch_size: int = 1000
16 |
17 | # Cache settings
18 | cache_dir: str = "cache"
19 | max_cache_size_mb: int = 1024
20 |
```
--------------------------------------------------------------------------------
/src/mcp_log_analyzer/config/settings.py:
--------------------------------------------------------------------------------
```python
1 | """Server configuration settings."""
2 |
3 | import os
4 | from typing import Optional
5 |
6 |
7 | class Settings:
8 | """Application settings."""
9 |
10 | def __init__(self):
11 | self.server_name = "SF Support Diagnostics MCP Server"
12 | self.version = "0.1.0"
13 | self.log_level = os.getenv("LOG_LEVEL", "INFO")
14 | self.api_key: Optional[str] = os.getenv("API_KEY")
15 | self.database_url: Optional[str] = os.getenv("DATABASE_URL")
16 |
17 | @property
18 | def server_info(self) -> dict:
19 | """Get server information."""
20 | return {
21 | "name": self.server_name,
22 | "version": self.version,
23 | "log_level": self.log_level,
24 | }
25 |
26 |
27 | settings = Settings()
28 |
```
--------------------------------------------------------------------------------
/config/default.yml:
--------------------------------------------------------------------------------
```yaml
1 | # MCP Log Analyzer Default Configuration
2 |
3 | server:
4 | host: "127.0.0.1"
5 | port: 5000
6 | debug: false
7 | workers: 4
8 |
9 | logging:
10 | level: "INFO"
11 | format: "%(asctime)s - %(name)s - %(levelname)s - %(message)s"
12 | file: "logs/mcp_server.log"
13 |
14 | storage:
15 | cache_dir: "cache"
16 | max_cache_size_mb: 1024
17 |
18 | parsers:
19 | evt:
20 | batch_size: 1000
21 | max_events: 10000
22 | structured:
23 | allowed_formats: ["json", "xml"]
24 | max_file_size_mb: 100
25 | csv:
26 | default_delimiter: ","
27 | infer_types: true
28 | sample_size: 1000
29 | unstructured:
30 | max_line_length: 10000
31 | max_file_size_mb: 50
32 |
33 | models:
34 | embedding_model: "sentence-transformers/all-MiniLM-L6-v2"
35 | default_max_tokens: 2048
36 | max_context_window: 16384
```
--------------------------------------------------------------------------------
/src/mcp_log_analyzer/mcp_server/prompts/__init__.py:
--------------------------------------------------------------------------------
```python
1 | """
2 | Prompts package for the MCP Log Analyzer server.
3 | """
4 |
5 | from mcp.server import FastMCP
6 |
7 | from .linux_testing_prompt import register_linux_testing_prompts
8 | from .log_management_prompt import register_log_management_prompts
9 | from .mcp_assets_overview_prompt import register_mcp_assets_prompts
10 | from .network_testing_prompt import register_network_testing_prompts
11 | from .process_monitoring_prompt import register_process_monitoring_prompts
12 | from .windows_testing_prompt import register_windows_testing_prompts
13 |
14 |
15 | def register_all_prompts(mcp: FastMCP):
16 | """Register all prompts with the MCP server."""
17 | register_mcp_assets_prompts(mcp)
18 | register_log_management_prompts(mcp)
19 | register_windows_testing_prompts(mcp)
20 | register_linux_testing_prompts(mcp)
21 | register_process_monitoring_prompts(mcp)
22 | register_network_testing_prompts(mcp)
23 |
```
--------------------------------------------------------------------------------
/src/mcp_log_analyzer/mcp_server/resources/__init__.py:
--------------------------------------------------------------------------------
```python
1 | """
2 | MCP Resources module.
3 |
4 | This module contains all the MCP resources organized by category.
5 | """
6 |
7 | from .linux_resources import register_linux_resources
8 | from .logs_resources import register_logs_resources
9 | from .network_resources import register_network_resources
10 | from .process_resources import register_process_resources
11 | from .windows_resources import register_windows_resources
12 |
13 |
14 | def register_all_resources(mcp):
15 | """Register all resources with the MCP server."""
16 | register_logs_resources(mcp)
17 | register_windows_resources(mcp)
18 | register_linux_resources(mcp)
19 | register_process_resources(mcp)
20 | register_network_resources(mcp)
21 |
22 |
23 | __all__ = [
24 | "register_all_resources",
25 | "register_logs_resources",
26 | "register_windows_resources",
27 | "register_linux_resources",
28 | "register_process_resources",
29 | "register_network_resources",
30 | ]
31 |
```
--------------------------------------------------------------------------------
/src/mcp_log_analyzer/mcp_server/tools/__init__.py:
--------------------------------------------------------------------------------
```python
1 | """
2 | MCP Tools module.
3 |
4 | This module contains all the MCP tools organized by category.
5 | """
6 |
7 | from .linux_test_tools import register_linux_test_tools
8 | from .log_management_tools import register_log_management_tools
9 | from .network_test_tools import register_network_test_tools
10 | from .process_test_tools import register_process_test_tools
11 | from .windows_test_tools import register_windows_test_tools
12 | from .health_check_tools import register_health_check_tools
13 |
14 |
15 | def register_all_tools(mcp):
16 | """Register all tools with the MCP server."""
17 | register_log_management_tools(mcp)
18 | register_windows_test_tools(mcp)
19 | register_linux_test_tools(mcp)
20 | register_process_test_tools(mcp)
21 | register_network_test_tools(mcp)
22 | register_health_check_tools(mcp)
23 |
24 |
25 | __all__ = [
26 | "register_all_tools",
27 | "register_log_management_tools",
28 | "register_windows_test_tools",
29 | "register_linux_test_tools",
30 | "register_process_test_tools",
31 | "register_network_test_tools",
32 | "register_health_check_tools",
33 | ]
34 |
```
--------------------------------------------------------------------------------
/test_server.py:
--------------------------------------------------------------------------------
```python
1 | import sys
2 |
3 | sys.path.insert(0, "src")
4 |
5 | try:
6 | from mcp_log_analyzer.mcp_server.server import mcp
7 |
8 | print("=== MCP Server Debug Info ===")
9 | print(f"Server name: {mcp.name}")
10 | print("Server imported successfully")
11 |
12 | # Check what attributes the mcp object has
13 | print(f"MCP object type: {type(mcp)}")
14 | print(f'MCP attributes: {[attr for attr in dir(mcp) if not attr.startswith("_")]}')
15 |
16 | # Check for tools specifically
17 | if hasattr(mcp, "_tools"):
18 | tools = mcp._tools
19 | print(f"Tools found via _tools: {len(tools)}")
20 | if tools:
21 | print("Tool names:", list(tools.keys())[:5])
22 |
23 | if hasattr(mcp, "_handlers"):
24 | handlers = mcp._handlers
25 | print(f"Handlers found: {len(handlers)}")
26 | tool_handlers = {k: v for k, v in handlers.items() if "tool" in str(v)}
27 | print(f"Tool handlers: {len(tool_handlers)}")
28 | if tool_handlers:
29 | print("Tool handler names:", list(tool_handlers.keys())[:5])
30 |
31 | print("\nTesting tool import...")
32 | from mcp_log_analyzer.mcp_server.tools.network_test_tools import (
33 | register_network_test_tools,
34 | )
35 |
36 | print("✅ Network tools module imported successfully")
37 |
38 | except Exception as e:
39 | print(f"❌ Error: {e}")
40 | import traceback
41 |
42 | traceback.print_exc()
43 |
```
--------------------------------------------------------------------------------
/pyproject.toml:
--------------------------------------------------------------------------------
```toml
1 | [build-system]
2 | requires = ["setuptools>=64", "wheel"]
3 | build-backend = "setuptools.build_meta"
4 |
5 | [project]
6 | name = "mcp-log-analyzer"
7 | version = "0.1.0"
8 | description = "Model Context Protocol server for analyzing various types of logs"
9 | readme = "README.md"
10 | authors = [
11 | {name = "MCP Log Analyzer Team"}
12 | ]
13 | requires-python = ">=3.12"
14 | classifiers = [
15 | "Development Status :: 3 - Alpha",
16 | "Intended Audience :: Developers",
17 | "Programming Language :: Python :: 3",
18 | "Programming Language :: Python :: 3.8",
19 | "Programming Language :: Python :: 3.9",
20 | "Programming Language :: Python :: 3.10",
21 | ]
22 | dependencies = [
23 | "mcp[cli]>=1.9.2",
24 | "pydantic>=1.8.0",
25 | "python-dotenv>=0.19.0",
26 | "pandas>=1.3.0",
27 | "pywin32>=300; sys_platform == 'win32'", # For Windows Event Log access
28 | "psutil>=5.9.0", # For process monitoring
29 | ]
30 |
31 | [project.optional-dependencies]
32 | etl = [
33 | "etl-parser>=1.0.1", # For parsing Windows ETL files
34 | ]
35 | dev = [
36 | "pytest>=7.0.0",
37 | "pytest-asyncio>=1.0.0",
38 | "black>=22.0.0",
39 | "isort>=5.10.0",
40 | "mypy>=0.910",
41 | "flake8>=4.0.0",
42 | ]
43 |
44 | [tool.setuptools]
45 | package-dir = {"" = "src"}
46 |
47 | [tool.setuptools.packages.find]
48 | where = ["src"]
49 |
50 | [tool.black]
51 | line-length = 88
52 | target-version = ["py38"]
53 |
54 | [tool.isort]
55 | profile = "black"
56 | line_length = 88
57 |
58 | [tool.mypy]
59 | python_version = "3.12"
60 | warn_return_any = true
61 | warn_unused_configs = true
62 | disallow_untyped_defs = true
63 | disallow_incomplete_defs = true
```
--------------------------------------------------------------------------------
/src/mcp_log_analyzer/mcp_server/resources/logs_resources.py:
--------------------------------------------------------------------------------
```python
1 | """
2 | Log-related MCP resources.
3 | """
4 |
5 | from mcp.server import FastMCP
6 |
7 |
8 | # This will be imported by the main server
9 | def register_logs_resources(mcp: FastMCP):
10 | """Register all log-related resources with the MCP server."""
11 |
12 | @mcp.resource("logs://sources")
13 | async def get_log_sources_resource() -> str:
14 | """
15 | Get information about all registered log sources.
16 |
17 | This resource provides a comprehensive view of all log sources
18 | currently registered in the system.
19 | """
20 | from ..server import log_sources
21 |
22 | sources_info = []
23 | for name, source in log_sources.items():
24 | sources_info.append(f"- {name}: {source.type} at {source.path}")
25 |
26 | if not sources_info:
27 | return "No log sources registered."
28 |
29 | return "Registered Log Sources:\n" + "\n".join(sources_info)
30 |
31 | @mcp.resource("logs://types")
32 | async def get_supported_log_types() -> str:
33 | """
34 | Get information about supported log types.
35 |
36 | This resource lists all the log types that can be analyzed
37 | by the MCP Log Analyzer.
38 | """
39 | return """Supported Log Types:
40 |
41 | 1. Windows Event Logs (evt/evtx)
42 | - System, Application, Security logs
43 | - Custom Windows event logs
44 |
45 | 2. Structured Logs
46 | - JSON format
47 | - XML format
48 |
49 | 3. CSV Logs
50 | - Comma-separated values with headers
51 |
52 | 4. Unstructured Text Logs
53 | - Plain text logs with customizable parsing
54 |
55 | Each log type has specific parsers optimized for that format."""
56 |
57 | @mcp.resource("logs://analysis-types")
58 | async def get_analysis_types() -> str:
59 | """
60 | Get information about available analysis types.
61 |
62 | This resource describes the different types of analysis
63 | that can be performed on logs.
64 | """
65 | return """Available Analysis Types:
66 |
67 | 1. Summary Analysis
68 | - Log count and time range
69 | - Event type distribution
70 | - Severity levels
71 | - Source statistics
72 |
73 | 2. Pattern Analysis
74 | - Common patterns detection
75 | - Frequency analysis
76 | - Recurring events
77 | - Pattern timeline
78 |
79 | 3. Anomaly Detection
80 | - Unusual event patterns
81 | - Spike detection
82 | - Rare events
83 | - Deviation from baseline"""
84 |
```
--------------------------------------------------------------------------------
/src/mcp_log_analyzer/parsers/__init__.py:
--------------------------------------------------------------------------------
```python
1 | """Log parsers for different log formats."""
2 |
3 | from typing import Any, Dict, Optional, Type
4 |
5 | from ..core.models import LogType
6 |
7 | # Import parser base class for type checking
8 | from .base import BaseParser
9 |
10 | # Dictionary to store parser classes by log type
11 | _parsers: Dict[LogType, Type[BaseParser]] = {}
12 |
13 |
14 | def register_parser(log_type: LogType, parser_class: Type[BaseParser]) -> None:
15 | """Register a parser for a log type.
16 |
17 | Args:
18 | log_type: The log type.
19 | parser_class: The parser class.
20 | """
21 | _parsers[log_type] = parser_class
22 |
23 |
24 | def get_parser_for_type(
25 | log_type: LogType, config: Optional[Dict[str, Any]] = None
26 | ) -> BaseParser:
27 | """Get a parser for a log type.
28 |
29 | Args:
30 | log_type: The log type.
31 | config: Parser configuration.
32 |
33 | Returns:
34 | An instance of the parser for the log type.
35 |
36 | Raises:
37 | ValueError: If no parser is registered for the log type.
38 | """
39 | if log_type not in _parsers:
40 | raise ValueError(f"No parser registered for log type: {log_type}")
41 |
42 | # Get parser-specific configuration
43 | parser_config = None
44 | if config is not None:
45 | if log_type == LogType.EVENT and hasattr(config, "evt"):
46 | parser_config = config.evt
47 | elif log_type == LogType.STRUCTURED and hasattr(config, "structured"):
48 | parser_config = config.structured
49 | elif log_type == LogType.CSV and hasattr(config, "csv"):
50 | parser_config = config.csv
51 | elif log_type == LogType.UNSTRUCTURED and hasattr(config, "unstructured"):
52 | parser_config = config.unstructured
53 |
54 | # Create parser instance
55 | return _parsers[log_type](parser_config)
56 |
57 |
58 | # Import parser implementations and register them
59 | try:
60 | from .evt_parser import EventLogParser
61 |
62 | register_parser(LogType.EVENT, EventLogParser)
63 | except ImportError:
64 | # Windows Event Log parser might not be available on non-Windows platforms
65 | pass
66 |
67 | # Import and register CSV parser
68 | try:
69 | from .csv_parser import CsvLogParser
70 |
71 | register_parser(LogType.CSV, CsvLogParser)
72 | except ImportError:
73 | pass
74 |
75 | # TODO: Implement and register parsers for other log types:
76 | # - StructuredLogParser (JSON, XML)
77 | # - UnstructuredLogParser
78 |
```
--------------------------------------------------------------------------------
/src/mcp_log_analyzer/parsers/base.py:
--------------------------------------------------------------------------------
```python
1 | """Base parser interface for all log types."""
2 |
3 | import abc
4 | from pathlib import Path
5 | from typing import Any, Dict, Iterator, List, Optional, Union
6 |
7 | from ..core.models import LogRecord, LogSource
8 |
9 |
10 | class BaseParser(abc.ABC):
11 | """Base parser interface for all log types."""
12 |
13 | def __init__(self, config: Optional[Dict[str, Any]] = None):
14 | """Initialize parser with configuration.
15 |
16 | Args:
17 | config: Parser configuration.
18 | """
19 | self.config = config or {}
20 |
21 | @abc.abstractmethod
22 | def parse_file(
23 | self, source: LogSource, file_path: Union[str, Path]
24 | ) -> Iterator[LogRecord]:
25 | """Parse log records from a file.
26 |
27 | Args:
28 | source: The log source information.
29 | file_path: Path to the log file.
30 |
31 | Yields:
32 | Log records from the file.
33 | """
34 | pass
35 |
36 | @abc.abstractmethod
37 | def parse_content(self, source: LogSource, content: str) -> Iterator[LogRecord]:
38 | """Parse log records from a string.
39 |
40 | Args:
41 | source: The log source information.
42 | content: String content to parse.
43 |
44 | Yields:
45 | Log records from the content.
46 | """
47 | pass
48 |
49 | def validate_file(self, file_path: Union[str, Path]) -> bool:
50 | """Validate if the file can be parsed by this parser.
51 |
52 | Args:
53 | file_path: Path to the log file.
54 |
55 | Returns:
56 | True if the file can be parsed, False otherwise.
57 | """
58 | path = Path(file_path)
59 | return path.exists() and path.is_file()
60 |
61 | def extract_timestamp(self, record_data: Dict[str, Any]) -> Optional[str]:
62 | """Extract timestamp from record data.
63 |
64 | Args:
65 | record_data: Record data.
66 |
67 | Returns:
68 | Timestamp as string if found, None otherwise.
69 | """
70 | # Default implementation looks for common timestamp field names
71 | timestamp_fields = [
72 | "timestamp",
73 | "time",
74 | "date",
75 | "datetime",
76 | "@timestamp",
77 | "created_at",
78 | ]
79 | for field in timestamp_fields:
80 | if field in record_data:
81 | return str(record_data[field])
82 | return None
83 |
```
--------------------------------------------------------------------------------
/src/mcp_log_analyzer/mcp_server/resources/process_resources.py:
--------------------------------------------------------------------------------
```python
1 | """
2 | Process monitoring MCP resources.
3 | """
4 |
5 | from datetime import datetime
6 |
7 | import psutil
8 | from mcp.server import FastMCP
9 |
10 |
11 | def register_process_resources(mcp: FastMCP):
12 | """Register all process-related resources with the MCP server."""
13 |
14 | @mcp.resource("system://process-list")
15 | async def get_process_list() -> str:
16 | """
17 | Get current process list with PID, CPU, and memory usage.
18 |
19 | This resource provides a snapshot of running processes
20 | with their resource utilization for troubleshooting.
21 | """
22 | result = []
23 | result.append("=== Process List ===")
24 | result.append(f"Time: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}")
25 | result.append(f"CPU Count: {psutil.cpu_count()}")
26 | result.append(
27 | f"Total Memory: {psutil.virtual_memory().total / (1024**3):.2f} GB"
28 | )
29 | result.append(
30 | f"Available Memory: {psutil.virtual_memory().available / (1024**3):.2f} GB"
31 | )
32 | result.append(f"CPU Usage: {psutil.cpu_percent(interval=1)}%\n")
33 |
34 | result.append(
35 | f"{'PID':<8} {'Name':<25} {'CPU%':<8} {'Memory%':<10} {'Status':<12}"
36 | )
37 | result.append("-" * 75)
38 |
39 | # Get processes sorted by CPU usage
40 | processes = []
41 | for proc in psutil.process_iter(
42 | ["pid", "name", "cpu_percent", "memory_percent", "status"]
43 | ):
44 | try:
45 | proc_info = proc.info
46 | if proc_info["cpu_percent"] is None:
47 | proc_info["cpu_percent"] = proc.cpu_percent(interval=0.1)
48 | processes.append(proc_info)
49 | except (psutil.NoSuchProcess, psutil.AccessDenied):
50 | continue
51 |
52 | # Sort by CPU usage (descending)
53 | processes.sort(key=lambda x: x.get("cpu_percent", 0), reverse=True)
54 |
55 | # Show top 20 processes
56 | for proc in processes[:20]:
57 | result.append(
58 | f"{proc['pid']:<8} "
59 | f"{proc['name'][:24]:<25} "
60 | f"{proc.get('cpu_percent', 0):<8.1f} "
61 | f"{proc.get('memory_percent', 0):<10.2f} "
62 | f"{proc.get('status', 'unknown'):<12}"
63 | )
64 |
65 | result.append(f"\nTotal processes: {len(processes)}")
66 |
67 | return "\n".join(result)
68 |
```
--------------------------------------------------------------------------------
/TCP_PROXY_README.md:
--------------------------------------------------------------------------------
```markdown
1 | # TCP Proxy for MCP Servers
2 |
3 | This TCP proxy allows MCP servers that use stdio communication to be accessed over TCP connections.
4 |
5 | ## Architecture
6 |
7 | The proxy works by:
8 | 1. Accepting TCP connections from clients
9 | 2. Spawning a new MCP server process for each client
10 | 3. Bridging communication between the TCP socket and the MCP process stdio
11 |
12 | ## Usage
13 |
14 | ### Running the TCP Proxy
15 |
16 | ```bash
17 | # Basic usage
18 | python tcp_proxy.py python main.py
19 |
20 | # With custom host and port
21 | python tcp_proxy.py --host 0.0.0.0 --port 9000 python main.py
22 |
23 | # With debug logging
24 | python tcp_proxy.py --debug python main.py
25 |
26 | # For any MCP server command
27 | python tcp_proxy.py node my-mcp-server.js
28 | python tcp_proxy.py ./my-mcp-binary
29 | ```
30 |
31 | ### Testing the Proxy
32 |
33 | ```bash
34 | # Run the test script
35 | python test_tcp_proxy.py
36 | ```
37 |
38 | The test script will:
39 | 1. Connect to the TCP proxy
40 | 2. Send initialize request
41 | 3. List available tools
42 | 4. List available resources
43 | 5. Call a sample tool
44 | 6. Send shutdown request
45 |
46 | ### Example Client Code
47 |
48 | ```python
49 | import asyncio
50 | import json
51 |
52 | async def connect_to_mcp():
53 | reader, writer = await asyncio.open_connection('localhost', 8080)
54 |
55 | # Send initialize
56 | request = {
57 | "jsonrpc": "2.0",
58 | "id": 1,
59 | "method": "initialize",
60 | "params": {
61 | "protocolVersion": "2024-11-05",
62 | "capabilities": {},
63 | "clientInfo": {
64 | "name": "my-client",
65 | "version": "1.0.0"
66 | }
67 | }
68 | }
69 |
70 | writer.write((json.dumps(request) + '\n').encode('utf-8'))
71 | await writer.drain()
72 |
73 | # Read response
74 | response = await reader.readline()
75 | print(json.loads(response))
76 |
77 | # Close connection
78 | writer.close()
79 | await writer.wait_closed()
80 |
81 | asyncio.run(connect_to_mcp())
82 | ```
83 |
84 | ## Features
85 |
86 | - **Process Isolation**: Each client gets its own MCP server process
87 | - **Bidirectional Communication**: Full duplex between TCP and stdio
88 | - **Error Handling**: Graceful handling of disconnections and errors
89 | - **Debug Logging**: Optional debug mode to trace all messages
90 | - **Stderr Monitoring**: Captures and logs MCP server stderr output
91 |
92 | ## Protocol
93 |
94 | The proxy uses newline-delimited JSON-RPC 2.0 messages:
95 | - Each message must be a complete JSON object
96 | - Messages are separated by newline characters (`\n`)
97 | - The proxy does not modify messages, it only forwards them
98 |
99 | ## Limitations
100 |
101 | - The current `main_tcp.py` implementation has issues with stdio redirection
102 | - Use `tcp_proxy.py` instead for reliable TCP access to MCP servers
103 | - Each connection spawns a new process (consider connection pooling for production)
104 |
105 | ## Troubleshooting
106 |
107 | If you get connection refused errors:
108 | 1. Make sure the proxy is running
109 | 2. Check the port is not already in use
110 | 3. Verify firewall settings
111 |
112 | If you get timeout errors:
113 | 1. The MCP server may be taking too long to start
114 | 2. Check for errors in the MCP server stderr (use --debug)
115 | 3. Verify the MCP command is correct
```
--------------------------------------------------------------------------------
/src/mcp_log_analyzer/mcp_server/models/schemas.py:
--------------------------------------------------------------------------------
```python
1 | """Core models for the SF MCP Log Analyzer."""
2 |
3 | from datetime import datetime
4 | from enum import Enum
5 | from typing import Any, Dict, List, Optional, Union
6 | from uuid import UUID, uuid4
7 |
8 | from pydantic import BaseModel, Field
9 |
10 |
11 | class LogType(str, Enum):
12 | """Supported log types."""
13 |
14 | EVENT = "event"
15 | STRUCTURED = "structured"
16 | CSV = "csv"
17 | UNSTRUCTURED = "unstructured"
18 |
19 |
20 | class LogSource(BaseModel):
21 | """Log source information."""
22 |
23 | id: UUID = Field(default_factory=uuid4)
24 | name: str
25 | type: LogType
26 | path: str
27 | created_at: datetime = Field(default_factory=datetime.now)
28 | updated_at: datetime = Field(default_factory=datetime.now)
29 | metadata: Dict[str, Any] = Field(default_factory=dict)
30 |
31 |
32 | class LogRecord(BaseModel):
33 | """Base log record."""
34 |
35 | id: UUID = Field(default_factory=uuid4)
36 | source_id: UUID
37 | timestamp: Optional[datetime] = None
38 | data: Dict[str, Any]
39 | raw_data: Optional[str] = None
40 |
41 |
42 | class LogQuery(BaseModel):
43 | """Query parameters for log retrieval."""
44 |
45 | source_ids: Optional[List[UUID]] = None
46 | types: Optional[List[LogType]] = None
47 | start_time: Optional[datetime] = None
48 | end_time: Optional[datetime] = None
49 | filters: Dict[str, Any] = Field(default_factory=dict)
50 | limit: int = 100
51 | offset: int = 0
52 |
53 |
54 | class MCPRequest(BaseModel):
55 | """Base MCP request model."""
56 |
57 | request_id: UUID = Field(default_factory=uuid4)
58 | timestamp: datetime = Field(default_factory=datetime.now)
59 | client_id: Optional[str] = None
60 |
61 |
62 | class LogSourceRequest(MCPRequest):
63 | """Request to register a log source."""
64 |
65 | name: str
66 | type: LogType
67 | path: str
68 | metadata: Dict[str, Any] = Field(default_factory=dict)
69 |
70 |
71 | class LogQueryRequest(MCPRequest):
72 | """Request to query logs."""
73 |
74 | query: LogQuery
75 |
76 |
77 | class LogAnalysisRequest(MCPRequest):
78 | """Request to analyze logs."""
79 |
80 | query: LogQuery
81 | analysis_type: str
82 | parameters: Dict[str, Any] = Field(default_factory=dict)
83 |
84 |
85 | class MCPResponse(BaseModel):
86 | """Base MCP response model."""
87 |
88 | request_id: UUID
89 | timestamp: datetime = Field(default_factory=datetime.now)
90 | status: str = "success"
91 | error: Optional[str] = None
92 |
93 |
94 | class LogSourceResponse(MCPResponse):
95 | """Response for log source registration."""
96 |
97 | source: LogSource
98 |
99 |
100 | class LogQueryResponse(MCPResponse):
101 | """Response for log query."""
102 |
103 | records: List[LogRecord]
104 | total: int
105 | limit: int
106 | offset: int
107 |
108 |
109 | class LogAnalysisResponse(MCPResponse):
110 | """Response for log analysis."""
111 |
112 | results: Dict[str, Any]
113 | query: LogQuery
114 |
115 |
116 | class MCPContext(BaseModel):
117 | """Context for processing MCP requests."""
118 |
119 | request_id: UUID
120 | start_time: datetime = Field(default_factory=datetime.now)
121 | client_id: Optional[str] = None
122 | log_sources: Dict[UUID, LogSource] = Field(default_factory=dict)
123 |
124 |
125 | class MCPError(Exception):
126 | """Base error for MCP operations."""
127 |
128 | def __init__(self, message: str, status_code: int = 400):
129 | self.message = message
130 | self.status_code = status_code
131 | super().__init__(self.message)
132 |
```
--------------------------------------------------------------------------------
/tests/test_base_parser.py:
--------------------------------------------------------------------------------
```python
1 | """Tests for the base parser."""
2 |
3 | import unittest
4 | from pathlib import Path
5 | from typing import Dict, Iterator
6 | from unittest.mock import MagicMock, patch
7 | from uuid import UUID, uuid4
8 |
9 | from mcp_log_analyzer.core.models import LogRecord, LogSource, LogType
10 | from mcp_log_analyzer.parsers.base import BaseParser
11 |
12 |
13 | class MockParser(BaseParser):
14 | """Mock parser implementation for testing."""
15 |
16 | def parse_file(self, source: LogSource, file_path: Path) -> Iterator[LogRecord]:
17 | """Parse a file."""
18 | yield LogRecord(
19 | source_id=source.id,
20 | data={"test": "value"},
21 | )
22 |
23 | def parse_content(self, source: LogSource, content: str) -> Iterator[LogRecord]:
24 | """Parse content."""
25 | yield LogRecord(
26 | source_id=source.id,
27 | data={"test": "value"},
28 | )
29 |
30 |
31 | class TestBaseParser(unittest.TestCase):
32 | """Tests for the base parser."""
33 |
34 | def setUp(self) -> None:
35 | """Set up test fixtures."""
36 | self.parser = MockParser()
37 | self.source = LogSource(
38 | id=uuid4(),
39 | name="Test Source",
40 | type=LogType.EVENT,
41 | path="test/path",
42 | )
43 |
44 | def test_validate_file(self) -> None:
45 | """Test validate_file method."""
46 | with patch("pathlib.Path.exists", return_value=True), patch(
47 | "pathlib.Path.is_file", return_value=True
48 | ):
49 | self.assertTrue(self.parser.validate_file("test/path"))
50 |
51 | with patch("pathlib.Path.exists", return_value=False):
52 | self.assertFalse(self.parser.validate_file("test/path"))
53 |
54 | with patch("pathlib.Path.exists", return_value=True), patch(
55 | "pathlib.Path.is_file", return_value=False
56 | ):
57 | self.assertFalse(self.parser.validate_file("test/path"))
58 |
59 | def test_extract_timestamp(self) -> None:
60 | """Test extract_timestamp method."""
61 | # Test with timestamp field
62 | record_data: Dict[str, str] = {"timestamp": "2023-05-02T12:00:00Z"}
63 | self.assertEqual(
64 | self.parser.extract_timestamp(record_data), "2023-05-02T12:00:00Z"
65 | )
66 |
67 | # Test with time field
68 | record_data = {"time": "2023-05-02T12:00:00Z"}
69 | self.assertEqual(
70 | self.parser.extract_timestamp(record_data), "2023-05-02T12:00:00Z"
71 | )
72 |
73 | # Test with date field
74 | record_data = {"date": "2023-05-02"}
75 | self.assertEqual(self.parser.extract_timestamp(record_data), "2023-05-02")
76 |
77 | # Test with datetime field
78 | record_data = {"datetime": "2023-05-02T12:00:00Z"}
79 | self.assertEqual(
80 | self.parser.extract_timestamp(record_data), "2023-05-02T12:00:00Z"
81 | )
82 |
83 | # Test with @timestamp field
84 | record_data = {"@timestamp": "2023-05-02T12:00:00Z"}
85 | self.assertEqual(
86 | self.parser.extract_timestamp(record_data), "2023-05-02T12:00:00Z"
87 | )
88 |
89 | # Test with created_at field
90 | record_data = {"created_at": "2023-05-02T12:00:00Z"}
91 | self.assertEqual(
92 | self.parser.extract_timestamp(record_data), "2023-05-02T12:00:00Z"
93 | )
94 |
95 | # Test with no timestamp field
96 | record_data = {"other": "value"}
97 | self.assertIsNone(self.parser.extract_timestamp(record_data))
98 |
99 |
100 | if __name__ == "__main__":
101 | unittest.main()
102 |
```
--------------------------------------------------------------------------------
/src/mcp_log_analyzer/core/models.py:
--------------------------------------------------------------------------------
```python
1 | """Core models for the MCP Log Analyzer."""
2 |
3 | from datetime import datetime
4 | from enum import Enum
5 | from typing import Any, Dict, List, Optional, Union
6 | from uuid import UUID, uuid4
7 |
8 | from pydantic import BaseModel, Field
9 |
10 |
11 | class LogType(str, Enum):
12 | """Supported log types."""
13 |
14 | EVENT = "evt"
15 | ETL = "etl"
16 | JSON = "json"
17 | XML = "xml"
18 | CSV = "csv"
19 | TEXT = "text"
20 |
21 |
22 | class LogSource(BaseModel):
23 | """Log source information."""
24 |
25 | id: UUID = Field(default_factory=uuid4)
26 | name: str
27 | type: LogType
28 | path: str
29 | created_at: datetime = Field(default_factory=datetime.now)
30 | updated_at: datetime = Field(default_factory=datetime.now)
31 | metadata: Dict[str, Any] = Field(default_factory=dict)
32 |
33 |
34 | class LogRecord(BaseModel):
35 | """Base log record."""
36 |
37 | id: UUID = Field(default_factory=uuid4)
38 | source_id: UUID
39 | timestamp: Optional[datetime] = None
40 | data: Dict[str, Any]
41 | raw_data: Optional[str] = None
42 |
43 |
44 | class LogQuery(BaseModel):
45 | """Query parameters for log retrieval."""
46 |
47 | source_ids: Optional[List[UUID]] = None
48 | types: Optional[List[LogType]] = None
49 | start_time: Optional[datetime] = None
50 | end_time: Optional[datetime] = None
51 | filters: Dict[str, Any] = Field(default_factory=dict)
52 | limit: int = 100
53 | offset: int = 0
54 |
55 |
56 | class MCPRequest(BaseModel):
57 | """Base MCP request model."""
58 |
59 | request_id: UUID = Field(default_factory=uuid4)
60 | timestamp: datetime = Field(default_factory=datetime.now)
61 | client_id: Optional[str] = None
62 |
63 |
64 | class LogSourceRequest(MCPRequest):
65 | """Request to register a log source."""
66 |
67 | name: str
68 | type: LogType
69 | path: str
70 | metadata: Dict[str, Any] = Field(default_factory=dict)
71 |
72 |
73 | class LogQueryRequest(MCPRequest):
74 | """Request to query logs."""
75 |
76 | query: LogQuery
77 |
78 |
79 | class LogAnalysisRequest(MCPRequest):
80 | """Request to analyze logs."""
81 |
82 | query: LogQuery
83 | analysis_type: str
84 | parameters: Dict[str, Any] = Field(default_factory=dict)
85 |
86 |
87 | class MCPResponse(BaseModel):
88 | """Base MCP response model."""
89 |
90 | request_id: UUID
91 | timestamp: datetime = Field(default_factory=datetime.now)
92 | status: str = "success"
93 | error: Optional[str] = None
94 |
95 |
96 | class LogSourceResponse(MCPResponse):
97 | """Response for log source registration."""
98 |
99 | source: LogSource
100 |
101 |
102 | class LogQueryResponse(MCPResponse):
103 | """Response for log query."""
104 |
105 | records: List[LogRecord]
106 | total: int
107 | limit: int
108 | offset: int
109 |
110 |
111 | class LogAnalysisResponse(MCPResponse):
112 | """Response for log analysis."""
113 |
114 | results: Dict[str, Any]
115 | query: LogQuery
116 |
117 |
118 | class AnalysisResult(BaseModel):
119 | """Result of log analysis."""
120 |
121 | analysis_type: str
122 | summary: Dict[str, Any]
123 | patterns: Optional[List[Dict[str, Any]]] = None
124 | anomalies: Optional[List[Dict[str, Any]]] = None
125 | metadata: Dict[str, Any] = Field(default_factory=dict)
126 |
127 |
128 | class MCPContext(BaseModel):
129 | """Context for processing MCP requests."""
130 |
131 | request_id: UUID
132 | start_time: datetime = Field(default_factory=datetime.now)
133 | client_id: Optional[str] = None
134 | log_sources: Dict[UUID, LogSource] = Field(default_factory=dict)
135 |
136 |
137 | class MCPError(Exception):
138 | """Base error for MCP operations."""
139 |
140 | def __init__(self, message: str, status_code: int = 400):
141 | self.message = message
142 | self.status_code = status_code
143 | super().__init__(self.message)
144 |
```
--------------------------------------------------------------------------------
/tests/test_tool_utils.py:
--------------------------------------------------------------------------------
```python
1 | """
2 | Test utilities to access tool functions for testing.
3 | """
4 |
5 | import asyncio
6 | from typing import Any, Dict
7 |
8 | from mcp_log_analyzer.core.models import LogSource
9 |
10 | from .server import log_sources, parsers
11 | from .tools.log_management_tools import (
12 | AnalyzeLogsRequest,
13 | QueryLogsRequest,
14 | RegisterLogSourceRequest,
15 | )
16 |
17 |
18 | # Create direct tool function wrappers for testing
19 | async def register_log_source(request: RegisterLogSourceRequest) -> Dict[str, Any]:
20 | """Direct wrapper for register_log_source tool."""
21 | if request.name in log_sources:
22 | return {"error": f"Log source '{request.name}' already exists"}
23 |
24 | if request.source_type not in parsers:
25 | return {"error": f"Unsupported source type: {request.source_type}"}
26 |
27 | log_source = LogSource(
28 | name=request.name,
29 | type=request.source_type,
30 | path=request.path,
31 | metadata=request.config or {},
32 | )
33 |
34 | log_sources[request.name] = log_source
35 |
36 | return {
37 | "message": f"Log source '{request.name}' registered successfully",
38 | "source": log_source.model_dump(),
39 | }
40 |
41 |
42 | async def list_log_sources() -> Dict[str, Any]:
43 | """Direct wrapper for list_log_sources tool."""
44 | return {
45 | "sources": [source.model_dump() for source in log_sources.values()],
46 | "count": len(log_sources),
47 | }
48 |
49 |
50 | async def get_log_source(name: str) -> Dict[str, Any]:
51 | """Direct wrapper for get_log_source tool."""
52 | if name not in log_sources:
53 | return {"error": f"Log source '{name}' not found"}
54 |
55 | return {"source": log_sources[name].model_dump()}
56 |
57 |
58 | async def delete_log_source(name: str) -> Dict[str, Any]:
59 | """Direct wrapper for delete_log_source tool."""
60 | if name not in log_sources:
61 | return {"error": f"Log source '{name}' not found"}
62 |
63 | del log_sources[name]
64 | return {"message": f"Log source '{name}' deleted successfully"}
65 |
66 |
67 | async def query_logs(request: QueryLogsRequest) -> Dict[str, Any]:
68 | """Direct wrapper for query_logs tool."""
69 | if request.source_name not in log_sources:
70 | return {"error": f"Log source '{request.source_name}' not found"}
71 |
72 | source = log_sources[request.source_name]
73 | parser = parsers[source.type]
74 |
75 | try:
76 | logs = await asyncio.to_thread(
77 | parser.parse,
78 | source.path,
79 | filters=request.filters,
80 | start_time=request.start_time,
81 | end_time=request.end_time,
82 | limit=request.limit,
83 | offset=request.offset,
84 | )
85 |
86 | return {
87 | "logs": [log.model_dump() for log in logs],
88 | "count": len(logs),
89 | "source": request.source_name,
90 | }
91 | except Exception as e:
92 | return {"error": f"Failed to query logs: {str(e)}"}
93 |
94 |
95 | async def analyze_logs(request: AnalyzeLogsRequest) -> Dict[str, Any]:
96 | """Direct wrapper for analyze_logs tool."""
97 | if request.source_name not in log_sources:
98 | return {"error": f"Log source '{request.source_name}' not found"}
99 |
100 | source = log_sources[request.source_name]
101 | parser = parsers[source.type]
102 |
103 | try:
104 | # First, get the logs
105 | logs = await asyncio.to_thread(
106 | parser.parse,
107 | source.path,
108 | filters=request.filters,
109 | start_time=request.start_time,
110 | end_time=request.end_time,
111 | )
112 |
113 | # Then analyze them
114 | result = await asyncio.to_thread(
115 | parser.analyze, logs, analysis_type=request.analysis_type
116 | )
117 |
118 | return {
119 | "result": result.model_dump(),
120 | "source": request.source_name,
121 | "analysis_type": request.analysis_type,
122 | }
123 | except Exception as e:
124 | return {"error": f"Failed to analyze logs: {str(e)}"}
125 |
```
--------------------------------------------------------------------------------
/docs/getting_started.md:
--------------------------------------------------------------------------------
```markdown
1 | # Getting Started with MCP Log Analyzer
2 |
3 | This guide will help you get started with the MCP Log Analyzer server.
4 |
5 | ## Prerequisites
6 |
7 | - Python 3.8 or higher
8 | - Windows OS (for Windows Event Log functionality)
9 | - pywin32 package
10 |
11 | ## Installation
12 |
13 | 1. Clone the repository:
14 |
15 | ```bash
16 | git clone https://github.com/your-username/mcp-log-analyzer.git
17 | cd mcp-log-analyzer
18 | ```
19 |
20 | 2. Install the package and dependencies:
21 |
22 | ```bash
23 | pip install -e . # Install the package in development mode
24 | pip install -e ".[dev]" # Install development dependencies (optional)
25 | ```
26 |
27 | ## Configuration
28 |
29 | The MCP Log Analyzer can be configured using a YAML file or environment variables.
30 |
31 | ### Configuration File
32 |
33 | The default configuration file is `config/default.yml`. You can create a custom configuration file and specify its path when running the server.
34 |
35 | ### Environment Variables
36 |
37 | Configuration can also be provided using environment variables:
38 |
39 | - `MCP_CONFIG`: Path to the configuration file
40 | - `MCP_SERVER_HOST`: Server host
41 | - `MCP_SERVER_PORT`: Server port
42 | - `MCP_DEBUG`: Enable debug mode (`true` or `false`)
43 |
44 | ## Running the Server
45 |
46 | To run the server with the default configuration:
47 |
48 | ```bash
49 | python -m mcp_log_analyzer.api.server
50 | ```
51 |
52 | To run the server with a custom configuration file:
53 |
54 | ```bash
55 | python -m mcp_log_analyzer.api.server --config path/to/config.yml
56 | ```
57 |
58 | To specify host and port directly:
59 |
60 | ```bash
61 | python -m mcp_log_analyzer.api.server --host 0.0.0.0 --port 8000
62 | ```
63 |
64 | To enable auto-reload during development:
65 |
66 | ```bash
67 | python -m mcp_log_analyzer.api.server --reload
68 | ```
69 |
70 | ## Testing the Server
71 |
72 | You can use the provided test script to test the server:
73 |
74 | ```bash
75 | python scripts/test_server.py --url http://localhost:5000
76 | ```
77 |
78 | The test script will register a Windows Event Log source, query logs, and analyze logs.
79 |
80 | ## Accessing the API
81 |
82 | Once the server is running, you can access the API at `http://localhost:5000/api`.
83 |
84 | The API documentation is available at [API Reference](api_reference.md).
85 |
86 | ## Using with Windows Event Logs
87 |
88 | The MCP Log Analyzer can analyze Windows Event Logs using the Windows Event Log API. To register a Windows Event Log source:
89 |
90 | ```
91 | POST /api/sources
92 | ```
93 |
94 | ```json
95 | {
96 | "request_id": "e77e5d1e-8a7e-4e2f-9ea2-3b9ac5f5c161",
97 | "timestamp": "2023-05-02T12:00:00Z",
98 | "client_id": "test-client",
99 | "name": "System Event Log",
100 | "type": "event",
101 | "path": "System",
102 | "metadata": {
103 | "description": "Windows System Event Log"
104 | }
105 | }
106 | ```
107 |
108 | The `path` can be one of the standard Windows Event Log names:
109 | - `Application`
110 | - `System`
111 | - `Security`
112 | - Other event log names
113 |
114 | ## Example Workflow
115 |
116 | 1. Start the server:
117 |
118 | ```bash
119 | python -m mcp_log_analyzer.api.server
120 | ```
121 |
122 | 2. Register a log source:
123 |
124 | ```bash
125 | curl -X POST http://localhost:5000/api/sources \
126 | -H "Content-Type: application/json" \
127 | -d '{
128 | "request_id": "e77e5d1e-8a7e-4e2f-9ea2-3b9ac5f5c161",
129 | "timestamp": "2023-05-02T12:00:00Z",
130 | "client_id": "test-client",
131 | "name": "System Event Log",
132 | "type": "event",
133 | "path": "System",
134 | "metadata": {
135 | "description": "Windows System Event Log"
136 | }
137 | }'
138 | ```
139 |
140 | 3. Get the source ID from the response.
141 |
142 | 4. Query logs:
143 |
144 | ```bash
145 | curl -X POST http://localhost:5000/api/query \
146 | -H "Content-Type: application/json" \
147 | -d '{
148 | "request_id": "f88e6d2e-9b8f-5f3g-0fb3-4c0bd6g6d272",
149 | "timestamp": "2023-05-02T12:01:00Z",
150 | "client_id": "test-client",
151 | "query": {
152 | "source_ids": ["source-id-from-previous-response"],
153 | "limit": 10,
154 | "offset": 0
155 | }
156 | }'
157 | ```
158 |
159 | 5. Analyze logs:
160 |
161 | ```bash
162 | curl -X POST http://localhost:5000/api/analyze \
163 | -H "Content-Type: application/json" \
164 | -d '{
165 | "request_id": "g99f7e3f-0c9g-6g4h-1gc4-5d1ce7h7e383",
166 | "timestamp": "2023-05-02T12:02:00Z",
167 | "client_id": "test-client",
168 | "query": {
169 | "source_ids": ["source-id-from-previous-response"],
170 | "limit": 100,
171 | "offset": 0
172 | },
173 | "analysis_type": "summary",
174 | "parameters": {
175 | "include_statistics": true
176 | }
177 | }'
178 | ```
```
--------------------------------------------------------------------------------
/test_windows_setup.py:
--------------------------------------------------------------------------------
```python
1 | #!/usr/bin/env python3
2 | """
3 | Test script to verify Windows setup is working correctly.
4 | Run this script after installing dependencies with: pip install -e .
5 | """
6 |
7 | import platform
8 | import sys
9 |
10 |
11 | def test_basic_imports():
12 | """Test basic package imports."""
13 | print("🔍 Testing basic imports...")
14 |
15 | try:
16 | from mcp_log_analyzer.core.models import LogSource, LogType
17 | print(" ✅ Core models import successful")
18 | except Exception as e:
19 | print(f" ❌ Core models import failed: {e}")
20 | return False
21 |
22 | try:
23 | from mcp_log_analyzer.parsers.csv_parser import CsvLogParser
24 | print(" ✅ CSV parser import successful")
25 | except Exception as e:
26 | print(f" ❌ CSV parser import failed: {e}")
27 | return False
28 |
29 | return True
30 |
31 |
32 | def test_windows_specific():
33 | """Test Windows-specific functionality."""
34 | print("\n🪟 Testing Windows-specific functionality...")
35 |
36 | if platform.system() != "Windows":
37 | print(" ⚠️ Skipping Windows tests (not on Windows)")
38 | return True
39 |
40 | try:
41 | import win32evtlog
42 | import win32evtlogutil
43 | import win32con
44 | print(" ✅ Windows Event Log modules available")
45 |
46 | from mcp_log_analyzer.parsers.evt_parser import EvtParser
47 | print(" ✅ Event Log parser import successful")
48 | return True
49 | except ImportError as e:
50 | print(f" ❌ Windows Event Log modules not available: {e}")
51 | print(" 💡 Install with: pip install pywin32>=300")
52 | return False
53 |
54 |
55 | def test_server_startup():
56 | """Test MCP server startup."""
57 | print("\n🚀 Testing MCP server startup...")
58 |
59 | try:
60 | from mcp_log_analyzer.mcp_server.server import mcp
61 | print(" ✅ MCP server import successful")
62 |
63 | # Check available parsers
64 | from mcp_log_analyzer.mcp_server.server import parsers
65 | print(f" 📋 Available parsers: {list(parsers.keys())}")
66 |
67 | return True
68 | except Exception as e:
69 | print(f" ❌ MCP server startup failed: {e}")
70 | return False
71 |
72 |
73 | def test_csv_functionality():
74 | """Test CSV parsing functionality."""
75 | print("\n📊 Testing CSV functionality...")
76 |
77 | try:
78 | from mcp_log_analyzer.core.models import LogSource, LogType
79 | from mcp_log_analyzer.parsers.csv_parser import CsvLogParser
80 |
81 | # Create test data
82 | source = LogSource(name="test", type=LogType.CSV, path="test.csv")
83 | parser = CsvLogParser({
84 | 'has_header': False,
85 | 'field_names': ['timestamp', 'level', 'message']
86 | })
87 |
88 | # Test parsing
89 | test_content = """2025-01-01 10:00:00,INFO,Test message
90 | 2025-01-01 10:01:00,ERROR,Test error"""
91 |
92 | records = list(parser.parse_content(source, test_content))
93 | print(f" ✅ Parsed {len(records)} test records")
94 |
95 | # Test analysis
96 | analysis = parser.analyze(records)
97 | print(f" ✅ Analysis completed: {analysis['summary']['total_records']} records")
98 |
99 | return True
100 | except Exception as e:
101 | print(f" ❌ CSV functionality test failed: {e}")
102 | return False
103 |
104 |
105 | def main():
106 | """Run all tests."""
107 | print("🧪 MCP Log Analyzer Windows Setup Test")
108 | print("=" * 50)
109 | print(f"Platform: {platform.system()} {platform.release()}")
110 | print(f"Python: {sys.version}")
111 | print()
112 |
113 | tests = [
114 | test_basic_imports,
115 | test_windows_specific,
116 | test_server_startup,
117 | test_csv_functionality,
118 | ]
119 |
120 | passed = 0
121 | for test in tests:
122 | if test():
123 | passed += 1
124 |
125 | print("\n" + "=" * 50)
126 | print(f"📊 Test Results: {passed}/{len(tests)} tests passed")
127 |
128 | if passed == len(tests):
129 | print("🎉 All tests passed! The setup is working correctly.")
130 | print("\n💡 You can now run: python main.py")
131 | else:
132 | print("⚠️ Some tests failed. Please check the error messages above.")
133 | if platform.system() == "Windows":
134 | print("\n💡 Try installing Windows dependencies:")
135 | print(" pip install pywin32>=300")
136 |
137 | return passed == len(tests)
138 |
139 |
140 | if __name__ == "__main__":
141 | success = main()
142 | sys.exit(0 if success else 1)
```
--------------------------------------------------------------------------------
/main.py:
--------------------------------------------------------------------------------
```python
1 | #!/usr/bin/env python3
2 | """
3 | Main entry point for the MCP Log Analyzer server with graceful shutdown support.
4 | """
5 |
6 | import asyncio
7 | import atexit
8 | import signal
9 | import sys
10 | import logging
11 | from pathlib import Path
12 |
13 | # Add src to Python path
14 | sys.path.insert(0, str(Path(__file__).parent / "src"))
15 |
16 | from mcp_log_analyzer.mcp_server.server import mcp, log_sources, get_log_sources
17 |
18 | logger = logging.getLogger(__name__)
19 |
20 | # Track if cleanup has been performed
21 | _cleanup_done = False
22 |
23 |
24 | def cleanup_resources():
25 | """Clean up all resources on shutdown."""
26 | global _cleanup_done
27 |
28 | # Avoid duplicate cleanup
29 | if _cleanup_done:
30 | return
31 |
32 | _cleanup_done = True
33 | logger.info("Cleaning up resources on shutdown...")
34 |
35 | # Clean up ETL caches for all ETL sources
36 | etl_sources = [source for source in get_log_sources().values() if source.type == "etl"]
37 | if etl_sources:
38 | try:
39 | from mcp_log_analyzer.parsers.etl_cached_parser import EtlCachedParser
40 | logger.info(f"Cleaning up ETL caches for {len(etl_sources)} source(s)")
41 | for source in etl_sources:
42 | logger.info(f"Cleaning up ETL cache for: {source.name} ({source.path})")
43 | EtlCachedParser.cleanup_cache_for_source(source.path)
44 | except Exception as e:
45 | logger.error(f"Error cleaning up ETL caches: {e}")
46 |
47 | # Clean up all cached ETL files (including orphaned ones)
48 | try:
49 | from mcp_log_analyzer.parsers.etl_cached_parser import EtlCachedParser
50 | logger.info("Cleaning up any remaining ETL cache files...")
51 | EtlCachedParser.cleanup_all_cache()
52 | except Exception as e:
53 | logger.error(f"Error cleaning up all ETL caches: {e}")
54 |
55 | # Don't clear log sources - they should persist across restarts
56 | # log_sources.clear()
57 | logger.info(f"Keeping {len(get_log_sources())} log sources for next startup")
58 |
59 |
60 | def signal_handler(signum, frame):
61 | """Handle shutdown signals."""
62 | logger.info(f"Received signal {signum}, initiating graceful shutdown...")
63 | cleanup_resources()
64 | sys.exit(0)
65 |
66 |
67 | async def async_signal_handler():
68 | """Async signal handler for asyncio."""
69 | cleanup_resources()
70 |
71 |
72 | def setup_signal_handlers():
73 | """Set up signal handlers for graceful shutdown."""
74 | # Handle CTRL+C (SIGINT) and termination signals
75 | signal.signal(signal.SIGINT, signal_handler)
76 | signal.signal(signal.SIGTERM, signal_handler)
77 |
78 | # For Windows, also handle CTRL+BREAK
79 | if sys.platform == "win32":
80 | signal.signal(signal.SIGBREAK, signal_handler)
81 |
82 | # Set up asyncio signal handlers if running in event loop
83 | try:
84 | loop = asyncio.get_running_loop()
85 | for sig in (signal.SIGINT, signal.SIGTERM):
86 | loop.add_signal_handler(sig, lambda: asyncio.create_task(async_signal_handler()))
87 | except RuntimeError:
88 | # No event loop running yet
89 | pass
90 |
91 |
92 | def main():
93 | """Run the MCP server with cleanup support."""
94 | # Set up logging with DEBUG level to see more details
95 | logging.basicConfig(
96 | level=logging.DEBUG,
97 | format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
98 | )
99 |
100 | # Register cleanup function with atexit
101 | atexit.register(cleanup_resources)
102 |
103 | # Set up signal handlers
104 | setup_signal_handlers()
105 |
106 | try:
107 | logger.info("Starting MCP Log Analyzer server...")
108 |
109 | # Add a handler to catch any unhandled exceptions
110 | import sys
111 | def handle_exception(exc_type, exc_value, exc_traceback):
112 | if issubclass(exc_type, KeyboardInterrupt):
113 | sys.__excepthook__(exc_type, exc_value, exc_traceback)
114 | return
115 | logger.error("Uncaught exception", exc_info=(exc_type, exc_value, exc_traceback))
116 |
117 | sys.excepthook = handle_exception
118 |
119 | mcp.run()
120 | logger.info("MCP server run() method returned - server shutting down")
121 | except KeyboardInterrupt:
122 | logger.info("Server interrupted by user")
123 | cleanup_resources()
124 | except Exception as e:
125 | logger.error(f"Server error: {e}")
126 | logger.error(f"Error type: {type(e).__name__}")
127 | logger.error(f"Error args: {e.args}")
128 | import traceback
129 | logger.error(f"Traceback: {traceback.format_exc()}")
130 | cleanup_resources()
131 | raise
132 | finally:
133 | # Final cleanup if not already done
134 | logger.info("In finally block - checking if cleanup needed")
135 | try:
136 | # Check if any log sources are loaded (use try/except in case lazy loading hasn't happened)
137 | sources = get_log_sources()
138 | if sources:
139 | logger.info(f"Found {len(sources)} log sources, cleaning up")
140 | cleanup_resources()
141 | except:
142 | # If there's any error checking, skip cleanup
143 | pass
144 | logger.info("MCP server process ending")
145 |
146 |
147 | if __name__ == "__main__":
148 | main()
```
--------------------------------------------------------------------------------
/src/mcp_log_analyzer/tcp_proxy.py:
--------------------------------------------------------------------------------
```python
1 | #!/usr/bin/env python3
2 | """
3 | TCP Proxy for MCP Server
4 | ========================
5 |
6 | This script creates a local stdio-to-TCP proxy that allows Claude Code to connect
7 | to remote MCP servers running on different machines.
8 |
9 | Usage:
10 | python tcp_proxy.py <remote_host> <remote_port>
11 |
12 | Example:
13 | python tcp_proxy.py 192.168.1.100 8088
14 |
15 | Add to Claude Code:
16 | claude mcp add remote-mcp-server python /path/to/tcp_proxy.py 192.168.1.100 8088
17 | """
18 |
19 | import socket
20 | import sys
21 | import threading
22 | import logging
23 | import argparse
24 | from typing import Optional
25 |
26 | # Configure logging
27 | logging.basicConfig(
28 | level=logging.INFO,
29 | format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
30 | stream=sys.stderr # Log to stderr to avoid interfering with stdio protocol
31 | )
32 | logger = logging.getLogger(__name__)
33 |
34 |
35 | class MCPTCPProxy:
36 | """Proxy that bridges stdio MCP communication to a remote TCP server."""
37 |
38 | def __init__(self, host: str, port: int):
39 | self.host = host
40 | self.port = port
41 | self.socket: Optional[socket.socket] = None
42 | self.running = False
43 |
44 | def connect(self) -> bool:
45 | """Connect to the remote MCP server."""
46 | try:
47 | logger.info(f"Connecting to MCP server at {self.host}:{self.port}")
48 | self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
49 | self.socket.connect((self.host, self.port))
50 | logger.info("Successfully connected to remote MCP server")
51 | return True
52 | except Exception as e:
53 | logger.error(f"Failed to connect to {self.host}:{self.port}: {e}")
54 | return False
55 |
56 | def stdin_to_socket(self):
57 | """Forward stdin to the TCP socket."""
58 | try:
59 | while self.running:
60 | # Read from stdin (binary mode for proper handling)
61 | data = sys.stdin.buffer.read1(4096)
62 | if not data:
63 | logger.info("stdin closed, stopping stdin->socket forwarding")
64 | break
65 |
66 | # Send to socket
67 | self.socket.sendall(data)
68 | logger.debug(f"Forwarded {len(data)} bytes from stdin to socket")
69 |
70 | except Exception as e:
71 | logger.error(f"Error in stdin->socket forwarding: {e}")
72 | finally:
73 | self.running = False
74 |
75 | def socket_to_stdout(self):
76 | """Forward TCP socket data to stdout."""
77 | try:
78 | while self.running:
79 | # Receive from socket
80 | data = self.socket.recv(4096)
81 | if not data:
82 | logger.info("Socket closed, stopping socket->stdout forwarding")
83 | break
84 |
85 | # Write to stdout
86 | sys.stdout.buffer.write(data)
87 | sys.stdout.buffer.flush()
88 | logger.debug(f"Forwarded {len(data)} bytes from socket to stdout")
89 |
90 | except Exception as e:
91 | logger.error(f"Error in socket->stdout forwarding: {e}")
92 | finally:
93 | self.running = False
94 |
95 | def run(self):
96 | """Run the proxy."""
97 | if not self.connect():
98 | sys.exit(1)
99 |
100 | self.running = True
101 |
102 | # Create forwarding threads
103 | stdin_thread = threading.Thread(target=self.stdin_to_socket, daemon=True)
104 | socket_thread = threading.Thread(target=self.socket_to_stdout, daemon=True)
105 |
106 | # Start threads
107 | stdin_thread.start()
108 | socket_thread.start()
109 |
110 | try:
111 | # Wait for threads to complete
112 | stdin_thread.join()
113 | socket_thread.join()
114 | except KeyboardInterrupt:
115 | logger.info("Proxy interrupted by user")
116 | finally:
117 | self.cleanup()
118 |
119 | def cleanup(self):
120 | """Clean up resources."""
121 | self.running = False
122 | if self.socket:
123 | try:
124 | self.socket.close()
125 | logger.info("Socket closed")
126 | except Exception as e:
127 | logger.error(f"Error closing socket: {e}")
128 |
129 |
130 | def main():
131 | """Main entry point."""
132 | parser = argparse.ArgumentParser(
133 | description="TCP Proxy for connecting Claude Code to remote MCP servers",
134 | epilog="Example: python tcp_proxy.py 192.168.1.100 8088"
135 | )
136 | parser.add_argument(
137 | "host",
138 | help="Remote MCP server host (IP address or hostname)"
139 | )
140 | parser.add_argument(
141 | "port",
142 | type=int,
143 | help="Remote MCP server port"
144 | )
145 | parser.add_argument(
146 | "--debug",
147 | action="store_true",
148 | help="Enable debug logging"
149 | )
150 |
151 | args = parser.parse_args()
152 |
153 | # Configure logging level
154 | if args.debug:
155 | logging.getLogger().setLevel(logging.DEBUG)
156 |
157 | # Create and run proxy
158 | proxy = MCPTCPProxy(args.host, args.port)
159 |
160 | try:
161 | proxy.run()
162 | except Exception as e:
163 | logger.error(f"Proxy failed: {e}")
164 | sys.exit(1)
165 |
166 |
167 | if __name__ == "__main__":
168 | main()
```
--------------------------------------------------------------------------------
/docs/api_reference.md:
--------------------------------------------------------------------------------
```markdown
1 | # MCP Log Analyzer API Reference
2 |
3 | ## Overview
4 |
5 | The MCP (Model Context Protocol) Log Analyzer provides a REST API for analyzing different types of logs on Windows systems. This document describes the available endpoints and their usage.
6 |
7 | ## Base URL
8 |
9 | All API endpoints are relative to the base URL:
10 |
11 | ```
12 | http://localhost:5000/api
13 | ```
14 |
15 | ## Authentication
16 |
17 | Authentication is not implemented in the current version. In a production environment, you would want to add proper authentication mechanisms.
18 |
19 | ## Common Response Format
20 |
21 | All API responses follow a common format:
22 |
23 | ```json
24 | {
25 | "request_id": "uuid-string",
26 | "timestamp": "iso-datetime-string",
27 | "status": "success|error",
28 | "error": "error-message-if-status-is-error",
29 | ... endpoint-specific fields ...
30 | }
31 | ```
32 |
33 | ## Endpoints
34 |
35 | ### Health Check
36 |
37 | ```
38 | GET /health
39 | ```
40 |
41 | Check if the server is running.
42 |
43 | **Response:**
44 |
45 | ```json
46 | {
47 | "status": "ok"
48 | }
49 | ```
50 |
51 | ### Register Log Source
52 |
53 | ```
54 | POST /sources
55 | ```
56 |
57 | Register a new log source for analysis.
58 |
59 | **Request:**
60 |
61 | ```json
62 | {
63 | "request_id": "uuid-string",
64 | "timestamp": "iso-datetime-string",
65 | "client_id": "optional-client-id",
66 | "name": "log-source-name",
67 | "type": "event|structured|csv|unstructured",
68 | "path": "path-to-log-source",
69 | "metadata": {
70 | "optional-key1": "optional-value1",
71 | "optional-key2": "optional-value2"
72 | }
73 | }
74 | ```
75 |
76 | **Response:**
77 |
78 | ```json
79 | {
80 | "request_id": "uuid-string",
81 | "timestamp": "iso-datetime-string",
82 | "status": "success",
83 | "source": {
84 | "id": "uuid-string",
85 | "name": "log-source-name",
86 | "type": "event|structured|csv|unstructured",
87 | "path": "path-to-log-source",
88 | "created_at": "iso-datetime-string",
89 | "updated_at": "iso-datetime-string",
90 | "metadata": {
91 | "key1": "value1",
92 | "key2": "value2"
93 | }
94 | }
95 | }
96 | ```
97 |
98 | ### List Log Sources
99 |
100 | ```
101 | GET /sources
102 | ```
103 |
104 | List all registered log sources.
105 |
106 | **Response:**
107 |
108 | ```json
109 | [
110 | {
111 | "id": "uuid-string",
112 | "name": "log-source-name",
113 | "type": "event|structured|csv|unstructured",
114 | "path": "path-to-log-source",
115 | "created_at": "iso-datetime-string",
116 | "updated_at": "iso-datetime-string",
117 | "metadata": {
118 | "key1": "value1",
119 | "key2": "value2"
120 | }
121 | },
122 | ...
123 | ]
124 | ```
125 |
126 | ### Get Log Source
127 |
128 | ```
129 | GET /sources/{source_id}
130 | ```
131 |
132 | Get details of a specific log source.
133 |
134 | **Response:**
135 |
136 | ```json
137 | {
138 | "id": "uuid-string",
139 | "name": "log-source-name",
140 | "type": "event|structured|csv|unstructured",
141 | "path": "path-to-log-source",
142 | "created_at": "iso-datetime-string",
143 | "updated_at": "iso-datetime-string",
144 | "metadata": {
145 | "key1": "value1",
146 | "key2": "value2"
147 | }
148 | }
149 | ```
150 |
151 | ### Delete Log Source
152 |
153 | ```
154 | DELETE /sources/{source_id}
155 | ```
156 |
157 | Delete a registered log source.
158 |
159 | **Response:**
160 |
161 | ```json
162 | {
163 | "status": "success",
164 | "message": "Log source {source_id} deleted"
165 | }
166 | ```
167 |
168 | ### Query Logs
169 |
170 | ```
171 | POST /query
172 | ```
173 |
174 | Query logs from registered sources.
175 |
176 | **Request:**
177 |
178 | ```json
179 | {
180 | "request_id": "uuid-string",
181 | "timestamp": "iso-datetime-string",
182 | "client_id": "optional-client-id",
183 | "query": {
184 | "source_ids": ["optional-uuid-string1", "optional-uuid-string2"],
185 | "types": ["optional-log-type1", "optional-log-type2"],
186 | "start_time": "optional-iso-datetime-string",
187 | "end_time": "optional-iso-datetime-string",
188 | "filters": {
189 | "optional-field1": "optional-value1",
190 | "optional-field2": "optional-value2"
191 | },
192 | "limit": 100,
193 | "offset": 0
194 | }
195 | }
196 | ```
197 |
198 | **Response:**
199 |
200 | ```json
201 | {
202 | "request_id": "uuid-string",
203 | "timestamp": "iso-datetime-string",
204 | "status": "success",
205 | "records": [
206 | {
207 | "id": "uuid-string",
208 | "source_id": "uuid-string",
209 | "timestamp": "iso-datetime-string",
210 | "data": {
211 | "field1": "value1",
212 | "field2": "value2",
213 | ...
214 | },
215 | "raw_data": "optional-raw-data-string"
216 | },
217 | ...
218 | ],
219 | "total": 1234,
220 | "limit": 100,
221 | "offset": 0
222 | }
223 | ```
224 |
225 | ### Analyze Logs
226 |
227 | ```
228 | POST /analyze
229 | ```
230 |
231 | Analyze logs from registered sources.
232 |
233 | **Request:**
234 |
235 | ```json
236 | {
237 | "request_id": "uuid-string",
238 | "timestamp": "iso-datetime-string",
239 | "client_id": "optional-client-id",
240 | "query": {
241 | "source_ids": ["optional-uuid-string1", "optional-uuid-string2"],
242 | "types": ["optional-log-type1", "optional-log-type2"],
243 | "start_time": "optional-iso-datetime-string",
244 | "end_time": "optional-iso-datetime-string",
245 | "filters": {
246 | "optional-field1": "optional-value1",
247 | "optional-field2": "optional-value2"
248 | },
249 | "limit": 100,
250 | "offset": 0
251 | },
252 | "analysis_type": "analysis-type",
253 | "parameters": {
254 | "optional-param1": "optional-value1",
255 | "optional-param2": "optional-value2"
256 | }
257 | }
258 | ```
259 |
260 | **Response:**
261 |
262 | ```json
263 | {
264 | "request_id": "uuid-string",
265 | "timestamp": "iso-datetime-string",
266 | "status": "success",
267 | "results": {
268 | "analysis_type": "analysis-type",
269 | "parameters": {
270 | "param1": "value1",
271 | "param2": "value2"
272 | },
273 | "summary": "summary-string",
274 | "details": {
275 | ... analysis-specific-details ...
276 | }
277 | },
278 | "query": {
279 | ... query-object-from-request ...
280 | }
281 | }
282 | ```
283 |
284 | ## Error Handling
285 |
286 | If an error occurs, the response will have a status of "error" and an error message:
287 |
288 | ```json
289 | {
290 | "status": "error",
291 | "error": "Error message"
292 | }
293 | ```
294 |
295 | HTTP status codes are also used to indicate errors:
296 | - 400: Bad Request - The request was invalid
297 | - 404: Not Found - The requested resource was not found
298 | - 500: Internal Server Error - An unexpected error occurred on the server
299 |
300 | ## Log Types
301 |
302 | The following log types are supported:
303 |
304 | - `event`: Windows Event Logs (EVT/EVTX)
305 | - `structured`: Structured Logs (JSON, XML)
306 | - `csv`: CSV Logs
307 | - `unstructured`: Unstructured Text Logs
```
--------------------------------------------------------------------------------
/src/mcp_log_analyzer/core/state_manager.py:
--------------------------------------------------------------------------------
```python
1 | """
2 | State persistence manager for MCP Log Analyzer.
3 |
4 | Manages persistent storage of log sources and other state that needs
5 | to survive server restarts.
6 | """
7 |
8 | import json
9 | import os
10 | from pathlib import Path
11 | from typing import Dict, Any, Optional
12 | import logging
13 | from datetime import datetime
14 |
15 | from .models import LogSource, LogType
16 |
17 | logger = logging.getLogger(__name__)
18 |
19 |
20 | class StateManager:
21 | """Manages persistent state for the MCP server."""
22 |
23 | def __init__(self, state_dir: Optional[Path] = None):
24 | """Initialize state manager.
25 |
26 | Args:
27 | state_dir: Directory to store state files. Defaults to user's app data.
28 | """
29 | if state_dir is None:
30 | # Use platform-appropriate directory
31 | if os.name == 'nt': # Windows
32 | app_data = os.environ.get('APPDATA', os.path.expanduser('~'))
33 | state_dir = Path(app_data) / 'MCPLogAnalyzer'
34 | else: # Unix-like
35 | state_dir = Path.home() / '.config' / 'mcp-log-analyzer'
36 |
37 | self.state_dir = Path(state_dir)
38 | self.state_dir.mkdir(parents=True, exist_ok=True)
39 |
40 | self.state_file = self.state_dir / 'server_state.json'
41 | self.sources_file = self.state_dir / 'log_sources.json'
42 |
43 | logger.info(f"State manager initialized with directory: {self.state_dir}")
44 |
45 | def save_log_sources(self, log_sources: Dict[str, LogSource]) -> None:
46 | """Save log sources to persistent storage.
47 |
48 | Args:
49 | log_sources: Dictionary of log sources to save.
50 | """
51 | try:
52 | sources_data = {
53 | name: {
54 | 'id': str(source.id),
55 | 'name': source.name,
56 | 'type': source.type,
57 | 'path': source.path,
58 | 'metadata': source.metadata,
59 | 'created_at': source.created_at.isoformat() if source.created_at else None,
60 | 'updated_at': source.updated_at.isoformat() if source.updated_at else None,
61 | }
62 | for name, source in log_sources.items()
63 | }
64 |
65 | with open(self.sources_file, 'w') as f:
66 | json.dump(sources_data, f, indent=2)
67 |
68 | logger.info(f"Saved {len(log_sources)} log sources to {self.sources_file}")
69 |
70 | except Exception as e:
71 | logger.error(f"Failed to save log sources: {e}")
72 |
73 | def load_log_sources(self) -> Dict[str, LogSource]:
74 | """Load log sources from persistent storage.
75 |
76 | Returns:
77 | Dictionary of loaded log sources.
78 | """
79 | log_sources = {}
80 |
81 | if not self.sources_file.exists():
82 | logger.info("No saved log sources found")
83 | return log_sources
84 |
85 | try:
86 | with open(self.sources_file, 'r') as f:
87 | sources_data = json.load(f)
88 |
89 | for name, data in sources_data.items():
90 | # Convert datetime strings back to datetime objects
91 | if data.get('created_at'):
92 | data['created_at'] = datetime.fromisoformat(data['created_at'])
93 | if data.get('updated_at'):
94 | data['updated_at'] = datetime.fromisoformat(data['updated_at'])
95 |
96 | # Create LogSource instance
97 | source = LogSource(
98 | id=data['id'],
99 | name=data['name'],
100 | type=data['type'],
101 | path=data['path'],
102 | metadata=data.get('metadata', {}),
103 | created_at=data.get('created_at'),
104 | updated_at=data.get('updated_at')
105 | )
106 |
107 | log_sources[name] = source
108 |
109 | logger.info(f"Loaded {len(log_sources)} log sources from {self.sources_file}")
110 |
111 | except Exception as e:
112 | logger.error(f"Failed to load log sources: {e}")
113 |
114 | return log_sources
115 |
116 | def save_server_state(self, state: Dict[str, Any]) -> None:
117 | """Save general server state.
118 |
119 | Args:
120 | state: State dictionary to save.
121 | """
122 | try:
123 | with open(self.state_file, 'w') as f:
124 | json.dump(state, f, indent=2)
125 |
126 | logger.info(f"Saved server state to {self.state_file}")
127 |
128 | except Exception as e:
129 | logger.error(f"Failed to save server state: {e}")
130 |
131 | def load_server_state(self) -> Dict[str, Any]:
132 | """Load general server state.
133 |
134 | Returns:
135 | Loaded state dictionary or empty dict if none exists.
136 | """
137 | if not self.state_file.exists():
138 | logger.info("No saved server state found")
139 | return {}
140 |
141 | try:
142 | with open(self.state_file, 'r') as f:
143 | state = json.load(f)
144 |
145 | logger.info(f"Loaded server state from {self.state_file}")
146 | return state
147 |
148 | except Exception as e:
149 | logger.error(f"Failed to load server state: {e}")
150 | return {}
151 |
152 | def clear_state(self) -> None:
153 | """Clear all saved state."""
154 | try:
155 | if self.sources_file.exists():
156 | self.sources_file.unlink()
157 | logger.info("Cleared log sources state")
158 |
159 | if self.state_file.exists():
160 | self.state_file.unlink()
161 | logger.info("Cleared server state")
162 |
163 | except Exception as e:
164 | logger.error(f"Failed to clear state: {e}")
165 |
166 |
167 | # Global state manager instance
168 | _state_manager: Optional[StateManager] = None
169 |
170 |
171 | def get_state_manager() -> StateManager:
172 | """Get the global state manager instance."""
173 | global _state_manager
174 | if _state_manager is None:
175 | _state_manager = StateManager()
176 | return _state_manager
```
--------------------------------------------------------------------------------
/test_tcp_proxy.py:
--------------------------------------------------------------------------------
```python
1 | #!/usr/bin/env python3
2 | """
3 | Test script to verify TCP proxy functionality.
4 | """
5 |
6 | import asyncio
7 | import json
8 | import sys
9 | import time
10 |
11 | async def test_tcp_connection(host='localhost', port=8080):
12 | """Test connection to TCP MCP server."""
13 | print(f"Testing TCP MCP proxy at {host}:{port}...")
14 |
15 | try:
16 | reader, writer = await asyncio.open_connection(host, port)
17 | print("✓ Connected to TCP server")
18 |
19 | # Test 1: Initialize request
20 | print("\n1. Testing initialize request...")
21 | init_request = {
22 | "jsonrpc": "2.0",
23 | "id": 1,
24 | "method": "initialize",
25 | "params": {
26 | "protocolVersion": "2024-11-05",
27 | "capabilities": {},
28 | "clientInfo": {
29 | "name": "test-client",
30 | "version": "1.0.0"
31 | }
32 | }
33 | }
34 |
35 | # Send request
36 | request_line = json.dumps(init_request) + '\n'
37 | writer.write(request_line.encode('utf-8'))
38 | await writer.drain()
39 | print(" Sent initialize request")
40 |
41 | # Read response
42 | response_line = await asyncio.wait_for(reader.readline(), timeout=5.0)
43 | if response_line:
44 | response = json.loads(response_line.decode('utf-8'))
45 | print(" ✓ Received initialize response")
46 | print(f" Server: {response.get('result', {}).get('serverInfo', {})}")
47 | else:
48 | print(" ✗ No response received")
49 | return
50 |
51 | # Test 2: List tools
52 | print("\n2. Testing tools/list request...")
53 | tools_request = {
54 | "jsonrpc": "2.0",
55 | "id": 2,
56 | "method": "tools/list",
57 | "params": {}
58 | }
59 |
60 | request_line = json.dumps(tools_request) + '\n'
61 | writer.write(request_line.encode('utf-8'))
62 | await writer.drain()
63 | print(" Sent tools/list request")
64 |
65 | response_line = await asyncio.wait_for(reader.readline(), timeout=5.0)
66 | if response_line:
67 | response = json.loads(response_line.decode('utf-8'))
68 | tools = response.get('result', {}).get('tools', [])
69 | print(f" ✓ Received {len(tools)} tools")
70 | for tool in tools[:5]: # Show first 5 tools
71 | print(f" - {tool.get('name')}: {tool.get('description', '')[:50]}...")
72 |
73 | # Test 3: List resources
74 | print("\n3. Testing resources/list request...")
75 | resources_request = {
76 | "jsonrpc": "2.0",
77 | "id": 3,
78 | "method": "resources/list",
79 | "params": {}
80 | }
81 |
82 | request_line = json.dumps(resources_request) + '\n'
83 | writer.write(request_line.encode('utf-8'))
84 | await writer.drain()
85 | print(" Sent resources/list request")
86 |
87 | response_line = await asyncio.wait_for(reader.readline(), timeout=5.0)
88 | if response_line:
89 | response = json.loads(response_line.decode('utf-8'))
90 | resources = response.get('result', {}).get('resources', [])
91 | print(f" ✓ Received {len(resources)} resources")
92 | for resource in resources[:5]: # Show first 5 resources
93 | print(f" - {resource.get('uri')}: {resource.get('name', '')}")
94 |
95 | # Test 4: Call a tool
96 | print("\n4. Testing tool call (list_log_sources)...")
97 | tool_request = {
98 | "jsonrpc": "2.0",
99 | "id": 4,
100 | "method": "tools/call",
101 | "params": {
102 | "name": "list_log_sources",
103 | "arguments": {}
104 | }
105 | }
106 |
107 | request_line = json.dumps(tool_request) + '\n'
108 | writer.write(request_line.encode('utf-8'))
109 | await writer.drain()
110 | print(" Sent tool call request")
111 |
112 | response_line = await asyncio.wait_for(reader.readline(), timeout=5.0)
113 | if response_line:
114 | response = json.loads(response_line.decode('utf-8'))
115 | if 'result' in response:
116 | print(" ✓ Tool call successful")
117 | result = response.get('result', {})
118 | if 'content' in result:
119 | print(f" Result: {str(result['content'])[:100]}...")
120 | elif 'error' in response:
121 | print(f" ✗ Tool call error: {response['error']}")
122 |
123 | # Test 5: Shutdown
124 | print("\n5. Testing shutdown...")
125 | shutdown_request = {
126 | "jsonrpc": "2.0",
127 | "id": 5,
128 | "method": "shutdown",
129 | "params": {}
130 | }
131 |
132 | request_line = json.dumps(shutdown_request) + '\n'
133 | writer.write(request_line.encode('utf-8'))
134 | await writer.drain()
135 | print(" Sent shutdown request")
136 |
137 | response_line = await asyncio.wait_for(reader.readline(), timeout=5.0)
138 | if response_line:
139 | response = json.loads(response_line.decode('utf-8'))
140 | print(" ✓ Received shutdown response")
141 |
142 | # Close connection
143 | writer.close()
144 | await writer.wait_closed()
145 | print("\n✓ All tests completed successfully!")
146 |
147 | except asyncio.TimeoutError:
148 | print("✗ Timeout waiting for response")
149 | sys.exit(1)
150 | except ConnectionRefusedError:
151 | print("✗ Could not connect to TCP server. Make sure the proxy is running:")
152 | print(" python tcp_proxy.py python main.py")
153 | sys.exit(1)
154 | except Exception as e:
155 | print(f"✗ Error: {e}")
156 | import traceback
157 | traceback.print_exc()
158 | sys.exit(1)
159 |
160 | if __name__ == "__main__":
161 | import argparse
162 | parser = argparse.ArgumentParser(description='Test TCP proxy connection')
163 | parser.add_argument('--host', default='localhost', help='Host to connect to')
164 | parser.add_argument('--port', type=int, default=8080, help='Port to connect to')
165 | args = parser.parse_args()
166 |
167 | asyncio.run(test_tcp_connection(args.host, args.port))
```
--------------------------------------------------------------------------------
/scripts/test_server.py:
--------------------------------------------------------------------------------
```python
1 | #!/usr/bin/env python
2 | """Test script for the MCP Log Analyzer server."""
3 |
4 | import argparse
5 | import json
6 | import logging
7 | import sys
8 | import uuid
9 | from datetime import datetime, timedelta
10 | from typing import Any, Dict
11 |
12 | import requests
13 | from pydantic import BaseModel, Field
14 |
15 | # Configure logging
16 | logging.basicConfig(
17 | level=logging.INFO,
18 | format="%(asctime)s - %(name)s - %(levelname)s - %(message)s",
19 | )
20 | logger = logging.getLogger("test_server")
21 |
22 |
23 | class TestResult(BaseModel):
24 | """Test result."""
25 |
26 | name: str
27 | success: bool
28 | message: str
29 | response: Dict[str, Any] = Field(default_factory=dict)
30 |
31 |
32 | def run_test(name: str, func: callable, *args, **kwargs) -> TestResult:
33 | """Run a test function.
34 |
35 | Args:
36 | name: Test name.
37 | func: Test function.
38 | *args: Arguments for the test function.
39 | **kwargs: Keyword arguments for the test function.
40 |
41 | Returns:
42 | Test result.
43 | """
44 | logger.info(f"Running test: {name}")
45 | try:
46 | response = func(*args, **kwargs)
47 | return TestResult(
48 | name=name, success=True, message="Test successful", response=response
49 | )
50 | except Exception as e:
51 | logger.exception(f"Test failed: {e}")
52 | return TestResult(name=name, success=False, message=f"Test failed: {str(e)}")
53 |
54 |
55 | def test_health(base_url: str) -> Dict[str, Any]:
56 | """Test health endpoint.
57 |
58 | Args:
59 | base_url: Base URL of the server.
60 |
61 | Returns:
62 | Response from the server.
63 | """
64 | response = requests.get(f"{base_url}/api/health")
65 | response.raise_for_status()
66 | return response.json()
67 |
68 |
69 | def test_register_source(
70 | base_url: str, source_name: str, source_type: str, source_path: str
71 | ) -> Dict[str, Any]:
72 | """Test registering a log source.
73 |
74 | Args:
75 | base_url: Base URL of the server.
76 | source_name: Name of the source.
77 | source_type: Type of the source.
78 | source_path: Path to the source.
79 |
80 | Returns:
81 | Response from the server.
82 | """
83 | data = {
84 | "request_id": str(uuid.uuid4()),
85 | "timestamp": datetime.now().isoformat(),
86 | "client_id": "test-client",
87 | "name": source_name,
88 | "type": source_type,
89 | "path": source_path,
90 | "metadata": {"test": True, "description": "Test source"},
91 | }
92 | response = requests.post(f"{base_url}/api/sources", json=data)
93 | response.raise_for_status()
94 | return response.json()
95 |
96 |
97 | def test_list_sources(base_url: str) -> Dict[str, Any]:
98 | """Test listing log sources.
99 |
100 | Args:
101 | base_url: Base URL of the server.
102 |
103 | Returns:
104 | Response from the server.
105 | """
106 | response = requests.get(f"{base_url}/api/sources")
107 | response.raise_for_status()
108 | return response.json()
109 |
110 |
111 | def test_query_logs(base_url: str, source_id: str = None) -> Dict[str, Any]:
112 | """Test querying logs.
113 |
114 | Args:
115 | base_url: Base URL of the server.
116 | source_id: Source ID to query.
117 |
118 | Returns:
119 | Response from the server.
120 | """
121 | data = {
122 | "request_id": str(uuid.uuid4()),
123 | "timestamp": datetime.now().isoformat(),
124 | "client_id": "test-client",
125 | "query": {
126 | "source_ids": [source_id] if source_id else None,
127 | "start_time": (datetime.now() - timedelta(days=1)).isoformat(),
128 | "end_time": datetime.now().isoformat(),
129 | "limit": 10,
130 | "offset": 0,
131 | },
132 | }
133 | response = requests.post(f"{base_url}/api/query", json=data)
134 | response.raise_for_status()
135 | return response.json()
136 |
137 |
138 | def test_analyze_logs(base_url: str, source_id: str = None) -> Dict[str, Any]:
139 | """Test analyzing logs.
140 |
141 | Args:
142 | base_url: Base URL of the server.
143 | source_id: Source ID to analyze.
144 |
145 | Returns:
146 | Response from the server.
147 | """
148 | data = {
149 | "request_id": str(uuid.uuid4()),
150 | "timestamp": datetime.now().isoformat(),
151 | "client_id": "test-client",
152 | "query": {
153 | "source_ids": [source_id] if source_id else None,
154 | "start_time": (datetime.now() - timedelta(days=1)).isoformat(),
155 | "end_time": datetime.now().isoformat(),
156 | "limit": 10,
157 | "offset": 0,
158 | },
159 | "analysis_type": "summary",
160 | "parameters": {"include_statistics": True},
161 | }
162 | response = requests.post(f"{base_url}/api/analyze", json=data)
163 | response.raise_for_status()
164 | return response.json()
165 |
166 |
167 | def main() -> None:
168 | """Run the test script."""
169 | parser = argparse.ArgumentParser(description="Test MCP Log Analyzer Server")
170 | parser.add_argument("--url", help="Server URL", default="http://localhost:5000")
171 | parser.add_argument(
172 | "--source-name", help="Log source name", default="System Event Log"
173 | )
174 | parser.add_argument("--source-type", help="Log source type", default="event")
175 | parser.add_argument("--source-path", help="Log source path", default="System")
176 | parser.add_argument("--output", help="Output file for test results", default=None)
177 | args = parser.parse_args()
178 |
179 | logger.info(f"Testing server at {args.url}")
180 |
181 | # Run tests
182 | tests = []
183 |
184 | # Test health
185 | tests.append(run_test("health", test_health, args.url))
186 |
187 | # Test source registration
188 | register_result = run_test(
189 | "register_source",
190 | test_register_source,
191 | args.url,
192 | args.source_name,
193 | args.source_type,
194 | args.source_path,
195 | )
196 | tests.append(register_result)
197 |
198 | # Get source ID if registration was successful
199 | source_id = None
200 | if register_result.success and "source" in register_result.response:
201 | source_id = register_result.response["source"]["id"]
202 |
203 | # Test listing sources
204 | tests.append(run_test("list_sources", test_list_sources, args.url))
205 |
206 | # Test querying logs
207 | tests.append(run_test("query_logs", test_query_logs, args.url, source_id))
208 |
209 | # Test analyzing logs
210 | tests.append(run_test("analyze_logs", test_analyze_logs, args.url, source_id))
211 |
212 | # Print test results
213 | logger.info("Test results:")
214 | success_count = 0
215 | for test in tests:
216 | status = "✅ Success" if test.success else "❌ Failure"
217 | logger.info(f"{status}: {test.name} - {test.message}")
218 | if test.success:
219 | success_count += 1
220 |
221 | logger.info(f"{success_count}/{len(tests)} tests succeeded")
222 |
223 | # Write test results to file if specified
224 | if args.output:
225 | with open(args.output, "w") as f:
226 | json.dump([test.dict() for test in tests], f, indent=2)
227 |
228 | # Exit with error if any test failed
229 | if success_count < len(tests):
230 | sys.exit(1)
231 |
232 |
233 | if __name__ == "__main__":
234 | main()
235 |
```
--------------------------------------------------------------------------------
/src/mcp_log_analyzer/mcp_server/tools/health_check_tools.py:
--------------------------------------------------------------------------------
```python
1 | """
2 | Health check and diagnostic tools for MCP server.
3 | """
4 |
5 | import asyncio
6 | import time
7 | from datetime import datetime
8 | from typing import Dict, Any
9 |
10 | from mcp.server import FastMCP
11 |
12 |
13 | def register_health_check_tools(mcp: FastMCP):
14 | """Register health check tools with the MCP server."""
15 |
16 | # Server start time for uptime calculation
17 | server_start_time = time.time()
18 |
19 | @mcp.tool()
20 | async def debug_params(**kwargs) -> Dict[str, Any]:
21 | """
22 | Debug tool to see exactly what parameters are being passed.
23 |
24 | This tool accepts any parameters and returns them for inspection.
25 | """
26 | import logging
27 | logger = logging.getLogger(__name__)
28 |
29 | logger.info("=== DEBUG_PARAMS TOOL CALLED ===")
30 | logger.info(f"Received kwargs: {kwargs}")
31 | logger.info(f"Kwargs type: {type(kwargs)}")
32 | logger.info(f"Kwargs keys: {list(kwargs.keys()) if kwargs else 'None'}")
33 |
34 | return {
35 | "received_kwargs": kwargs,
36 | "kwargs_type": str(type(kwargs)),
37 | "kwargs_keys": list(kwargs.keys()) if kwargs else [],
38 | "timestamp": datetime.now().isoformat()
39 | }
40 |
41 | @mcp.tool()
42 | async def server_diagnostics() -> Dict[str, Any]:
43 | """
44 | Get detailed server diagnostics including internal state.
45 |
46 | This tool provides deep insights into the server's current state
47 | and can help diagnose issues like parameter errors.
48 | """
49 | import inspect
50 | from ..server import mcp as server_mcp
51 |
52 | diagnostics = {
53 | "timestamp": datetime.now().isoformat(),
54 | "server_type": type(server_mcp).__name__,
55 | "server_info": {
56 | "name": getattr(server_mcp, 'name', 'unknown'),
57 | "version": getattr(server_mcp, 'version', 'unknown')
58 | },
59 | "request_stats": {},
60 | "registered_tools": [],
61 | "internal_state": {}
62 | }
63 |
64 | # Get request statistics if available
65 | if hasattr(server_mcp, '_request_count'):
66 | diagnostics["request_stats"] = {
67 | "total_requests": server_mcp._request_count,
68 | "total_errors": getattr(server_mcp, '_error_count', 0),
69 | "consecutive_param_errors": getattr(server_mcp, '_consecutive_param_errors', 0)
70 | }
71 |
72 | # List registered tools
73 | if hasattr(server_mcp, '_tools'):
74 | diagnostics["registered_tools"] = list(server_mcp._tools.keys())
75 | elif hasattr(server_mcp, 'tools'):
76 | diagnostics["registered_tools"] = list(server_mcp.tools.keys())
77 |
78 | # Check for common issues
79 | diagnostics["health_checks"] = {
80 | "has_tools": len(diagnostics["registered_tools"]) > 0,
81 | "server_responsive": True # We're responding, so this is true
82 | }
83 |
84 | return diagnostics
85 |
86 | @mcp.tool()
87 | async def health_check() -> Dict[str, Any]:
88 | """
89 | Perform a health check on the MCP server.
90 |
91 | Returns server status, uptime, and basic diagnostic information.
92 | This can be used by clients to verify the server is responsive.
93 | """
94 | from ..server import log_sources, parsers
95 |
96 | current_time = time.time()
97 | uptime_seconds = current_time - server_start_time
98 |
99 | # Check ETL parser status
100 | etl_parser_info = {}
101 | if "etl" in parsers:
102 | parser = parsers["etl"]
103 | etl_parser_info = {
104 | "available": parser.is_available() if hasattr(parser, "is_available") else False,
105 | "type": type(parser).__name__
106 | }
107 |
108 | # Check for cached parser
109 | try:
110 | from mcp_log_analyzer.parsers.etl_cached_parser import EtlCachedParser
111 | EtlCachedParser._init_cache_dir()
112 | etl_parser_info["cache_dir"] = EtlCachedParser._cache_dir
113 | etl_parser_info["cached_files"] = len(EtlCachedParser._cache_registry)
114 | except:
115 | pass
116 |
117 | return {
118 | "status": "healthy",
119 | "timestamp": datetime.now().isoformat(),
120 | "uptime_seconds": uptime_seconds,
121 | "uptime_human": f"{uptime_seconds/3600:.1f} hours",
122 | "registered_sources": len(log_sources),
123 | "available_parsers": list(parsers.keys()),
124 | "etl_parser": etl_parser_info,
125 | "server_info": {
126 | "name": "mcp-log-analyzer",
127 | "version": "0.1.0"
128 | }
129 | }
130 |
131 | @mcp.tool()
132 | async def ping() -> Dict[str, Any]:
133 | """
134 | Simple ping endpoint for connection testing.
135 |
136 | Returns immediately with a timestamp. Useful for testing
137 | if the MCP connection is alive and responsive.
138 | """
139 | return {
140 | "pong": True,
141 | "timestamp": datetime.now().isoformat(),
142 | "server_time_ms": int(time.time() * 1000)
143 | }
144 |
145 | @mcp.tool()
146 | async def echo(message: str) -> Dict[str, Any]:
147 | """
148 | Echo back a message for connection testing.
149 |
150 | Args:
151 | message: The message to echo back
152 |
153 | Returns the message with a timestamp. Useful for testing
154 | round-trip communication with the server.
155 | """
156 | import logging
157 | logger = logging.getLogger(__name__)
158 | logger.info(f"Echo tool called with message: {message}")
159 |
160 | return {
161 | "echo": message,
162 | "timestamp": datetime.now().isoformat(),
163 | "received_at": time.time()
164 | }
165 |
166 | @mcp.tool()
167 | async def long_running_test(duration_seconds: int = 60) -> Dict[str, Any]:
168 | """
169 | Test long-running operations with periodic updates.
170 |
171 | Args:
172 | duration_seconds: How long to run the test (max 300 seconds)
173 |
174 | This simulates a long-running operation and can be used to test
175 | timeout handling and connection stability.
176 | """
177 | import logging
178 | logger = logging.getLogger(__name__)
179 |
180 | # Cap duration at 5 minutes
181 | duration_seconds = min(duration_seconds, 300)
182 |
183 | start_time = time.time()
184 | logger.info(f"Starting long-running test for {duration_seconds} seconds")
185 |
186 | # Log progress every 10 seconds
187 | for i in range(0, duration_seconds, 10):
188 | await asyncio.sleep(10)
189 | elapsed = time.time() - start_time
190 | logger.info(f"Long-running test progress: {elapsed:.0f}/{duration_seconds} seconds")
191 |
192 | # Handle remaining time
193 | remaining = duration_seconds % 10
194 | if remaining > 0:
195 | await asyncio.sleep(remaining)
196 |
197 | total_elapsed = time.time() - start_time
198 | logger.info(f"Long-running test completed after {total_elapsed:.1f} seconds")
199 |
200 | return {
201 | "status": "completed",
202 | "requested_duration": duration_seconds,
203 | "actual_duration": total_elapsed,
204 | "timestamp": datetime.now().isoformat()
205 | }
```
--------------------------------------------------------------------------------
/tcp_server.py:
--------------------------------------------------------------------------------
```python
1 | #!/usr/bin/env python3
2 | """
3 | Standalone TCP MCP server for remote connections.
4 | """
5 |
6 | import asyncio
7 | import json
8 | import logging
9 | import sys
10 | from pathlib import Path
11 |
12 | # Add src to Python path
13 | sys.path.insert(0, str(Path(__file__).parent / "src"))
14 |
15 | from mcp_log_analyzer.mcp_server.server import mcp
16 |
17 |
18 | class TCPMCPServer:
19 | """TCP-based MCP server for remote connections."""
20 |
21 | def __init__(self, host="0.0.0.0", port=8080):
22 | self.host = host
23 | self.port = port
24 | self.logger = logging.getLogger(__name__)
25 |
26 | async def handle_client(self, reader, writer):
27 | """Handle a single client connection."""
28 | client_addr = writer.get_extra_info('peername')
29 | self.logger.info(f"Client connected from {client_addr}")
30 |
31 | try:
32 | while True:
33 | # Read JSON-RPC message
34 | line = await reader.readline()
35 | if not line:
36 | break
37 |
38 | try:
39 | message = line.decode('utf-8').strip()
40 | if not message:
41 | continue
42 |
43 | self.logger.debug(f"Received: {message}")
44 |
45 | # Parse JSON-RPC message
46 | request = json.loads(message)
47 |
48 | # Process the request through the MCP server
49 | response = await self.process_mcp_request(request)
50 |
51 | # Send response
52 | if response:
53 | response_line = json.dumps(response) + '\n'
54 | writer.write(response_line.encode('utf-8'))
55 | await writer.drain()
56 | self.logger.debug(f"Sent: {response_line.strip()}")
57 |
58 | except json.JSONDecodeError as e:
59 | self.logger.error(f"JSON decode error: {e}")
60 | error_response = {
61 | "jsonrpc": "2.0",
62 | "error": {
63 | "code": -32700,
64 | "message": "Parse error"
65 | }
66 | }
67 | error_line = json.dumps(error_response) + '\n'
68 | writer.write(error_line.encode('utf-8'))
69 | await writer.drain()
70 |
71 | except Exception as e:
72 | self.logger.error(f"Error processing request: {e}")
73 | error_response = {
74 | "jsonrpc": "2.0",
75 | "error": {
76 | "code": -32603,
77 | "message": f"Internal error: {str(e)}"
78 | }
79 | }
80 | error_line = json.dumps(error_response) + '\n'
81 | writer.write(error_line.encode('utf-8'))
82 | await writer.drain()
83 |
84 | except Exception as e:
85 | self.logger.error(f"Connection error: {e}")
86 | finally:
87 | self.logger.info(f"Client {client_addr} disconnected")
88 | writer.close()
89 | await writer.wait_closed()
90 |
91 | async def process_mcp_request(self, request):
92 | """Process an MCP request and return the response."""
93 | # This is a simplified implementation
94 | # In practice, you'd need to integrate with the actual MCP server logic
95 |
96 | method = request.get('method')
97 | params = request.get('params', {})
98 | request_id = request.get('id')
99 |
100 | if method == 'initialize':
101 | return {
102 | "jsonrpc": "2.0",
103 | "id": request_id,
104 | "result": {
105 | "protocolVersion": "2024-11-05",
106 | "capabilities": {
107 | "tools": {"listChanged": False},
108 | "resources": {"subscribe": False, "listChanged": False},
109 | "prompts": {"listChanged": False}
110 | },
111 | "serverInfo": {
112 | "name": "mcp-log-analyzer",
113 | "version": "1.0.0"
114 | }
115 | }
116 | }
117 |
118 | elif method == 'notifications/initialized':
119 | # No response needed for notifications
120 | return None
121 |
122 | elif method == 'tools/list':
123 | # Return available tools
124 | return {
125 | "jsonrpc": "2.0",
126 | "id": request_id,
127 | "result": {
128 | "tools": [
129 | {
130 | "name": "register_log_source",
131 | "description": "Register a new log source for analysis",
132 | "inputSchema": {
133 | "type": "object",
134 | "properties": {
135 | "name": {"type": "string"},
136 | "source_type": {"type": "string"},
137 | "path": {"type": "string"}
138 | },
139 | "required": ["name", "source_type", "path"]
140 | }
141 | }
142 | # Add more tools as needed
143 | ]
144 | }
145 | }
146 |
147 | else:
148 | return {
149 | "jsonrpc": "2.0",
150 | "id": request_id,
151 | "error": {
152 | "code": -32601,
153 | "message": f"Method not found: {method}"
154 | }
155 | }
156 |
157 | async def start(self):
158 | """Start the TCP server."""
159 | self.logger.info(f"Starting MCP TCP server on {self.host}:{self.port}")
160 |
161 | server = await asyncio.start_server(
162 | self.handle_client,
163 | self.host,
164 | self.port
165 | )
166 |
167 | addr = server.sockets[0].getsockname()
168 | self.logger.info(f"MCP server listening on {addr[0]}:{addr[1]}")
169 |
170 | async with server:
171 | await server.serve_forever()
172 |
173 |
174 | async def main():
175 | """Main entry point."""
176 | import argparse
177 |
178 | parser = argparse.ArgumentParser(description='MCP Log Analyzer TCP Server')
179 | parser.add_argument('--host', default='0.0.0.0',
180 | help='Host to bind to (default: 0.0.0.0)')
181 | parser.add_argument('--port', type=int, default=8080,
182 | help='Port to bind to (default: 8080)')
183 | parser.add_argument('--debug', action='store_true',
184 | help='Enable debug logging')
185 |
186 | args = parser.parse_args()
187 |
188 | # Configure logging
189 | level = logging.DEBUG if args.debug else logging.INFO
190 | logging.basicConfig(
191 | level=level,
192 | format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
193 | )
194 |
195 | # Create and start server
196 | server = TCPMCPServer(args.host, args.port)
197 |
198 | try:
199 | await server.start()
200 | except KeyboardInterrupt:
201 | print("\nServer stopped.")
202 |
203 |
204 | if __name__ == "__main__":
205 | asyncio.run(main())
```
--------------------------------------------------------------------------------
/check_server.py:
--------------------------------------------------------------------------------
```python
1 | #!/usr/bin/env python3
2 | """
3 | Script to verify the MCP server is working correctly.
4 | This simulates what Claude Code does when it connects to the server.
5 | """
6 |
7 | import json
8 | import subprocess
9 | import sys
10 | import time
11 | from typing import Dict, Any
12 |
13 |
14 | def send_mcp_request(process: subprocess.Popen, request: Dict[str, Any]) -> Dict[str, Any]:
15 | """Send an MCP request and get response."""
16 | request_json = json.dumps(request) + '\n'
17 |
18 | print(f"📤 Sending: {request['method']}")
19 | process.stdin.write(request_json.encode())
20 | process.stdin.flush()
21 |
22 | # Read response
23 | response_line = process.stdout.readline().decode().strip()
24 | if response_line:
25 | response = json.loads(response_line)
26 | print(f"📥 Response: {response.get('result', {}).get('meta', {}).get('name', 'Success')}")
27 | return response
28 |
29 | return {}
30 |
31 |
32 | def send_mcp_notification(process: subprocess.Popen, notification: Dict[str, Any]) -> None:
33 | """Send an MCP notification (no response expected)."""
34 | notification_json = json.dumps(notification) + '\n'
35 |
36 | print(f"📤 Sending notification: {notification['method']}")
37 | process.stdin.write(notification_json.encode())
38 | process.stdin.flush()
39 |
40 |
41 | def test_mcp_server():
42 | """Test the MCP server functionality."""
43 | print("🚀 Testing MCP Log Analyzer Server")
44 | print("=" * 50)
45 |
46 | # Start the server process
47 | try:
48 | process = subprocess.Popen(
49 | [sys.executable, 'main.py'],
50 | stdin=subprocess.PIPE,
51 | stdout=subprocess.PIPE,
52 | stderr=subprocess.PIPE,
53 | text=False # We'll handle encoding ourselves
54 | )
55 |
56 | print("✅ Server process started (PID: {})".format(process.pid))
57 |
58 | # Give the server a moment to start
59 | time.sleep(0.5)
60 |
61 | # Test 1: Initialize connection
62 | init_request = {
63 | "jsonrpc": "2.0",
64 | "id": 1,
65 | "method": "initialize",
66 | "params": {
67 | "protocolVersion": "2024-11-05",
68 | "capabilities": {
69 | "tools": {},
70 | "resources": {},
71 | "prompts": {}
72 | },
73 | "clientInfo": {
74 | "name": "test-client",
75 | "version": "1.0.0"
76 | }
77 | }
78 | }
79 |
80 | init_response = send_mcp_request(process, init_request)
81 |
82 | if 'result' in init_response:
83 | server_info = init_response['result']
84 | print(f"📋 Server Name: {server_info.get('serverInfo', {}).get('name', 'Unknown')}")
85 | print(f"📋 Server Version: {server_info.get('serverInfo', {}).get('version', 'Unknown')}")
86 |
87 | # Show capabilities
88 | capabilities = server_info.get('capabilities', {})
89 | if 'tools' in capabilities:
90 | print(f"🔧 Tools: Available")
91 | if 'resources' in capabilities:
92 | print(f"📂 Resources: Available")
93 | if 'prompts' in capabilities:
94 | print(f"💬 Prompts: Available")
95 |
96 | # CRITICAL: Send initialized notification to complete the handshake
97 | initialized_notification = {
98 | "jsonrpc": "2.0",
99 | "method": "notifications/initialized",
100 | "params": {}
101 | }
102 |
103 | send_mcp_notification(process, initialized_notification)
104 | print("✅ Initialization handshake completed")
105 |
106 | # Small delay to ensure the notification is processed
107 | time.sleep(0.1)
108 |
109 | # Test 2: List available tools
110 | tools_request = {
111 | "jsonrpc": "2.0",
112 | "id": 2,
113 | "method": "tools/list",
114 | "params": {}
115 | }
116 |
117 | tools_response = send_mcp_request(process, tools_request)
118 |
119 | if 'result' in tools_response:
120 | tools = tools_response['result'].get('tools', [])
121 | if isinstance(tools, list):
122 | print(f"\n🔧 Available Tools ({len(tools)}):")
123 | for tool in tools[:10]: # Show first 10
124 | print(f" • {tool.get('name', 'Unknown')}: {tool.get('description', 'No description')[:60]}...")
125 | if len(tools) > 10:
126 | print(f" ... and {len(tools) - 10} more tools")
127 | else:
128 | print(f"\n🔧 Tools response: {tools}")
129 |
130 | # Test 3: List available resources
131 | resources_request = {
132 | "jsonrpc": "2.0",
133 | "id": 3,
134 | "method": "resources/list",
135 | "params": {}
136 | }
137 |
138 | resources_response = send_mcp_request(process, resources_request)
139 |
140 | if 'result' in resources_response:
141 | resources = resources_response['result'].get('resources', [])
142 | if isinstance(resources, list):
143 | print(f"\n📂 Available Resources ({len(resources)}):")
144 | for resource in resources[:10]: # Show first 10
145 | print(f" • {resource.get('uri', 'Unknown')}: {resource.get('description', 'No description')[:60]}...")
146 | if len(resources) > 10:
147 | print(f" ... and {len(resources) - 10} more resources")
148 | else:
149 | print(f"\n📂 Resources response: {resources}")
150 |
151 | print(f"\n✅ MCP Server is working correctly!")
152 | print(f"\n💡 To use with Claude Code:")
153 | print(f" claude mcp add mcp-log-analyzer python main.py")
154 | print(f" claude mcp list")
155 |
156 | # Clean shutdown
157 | process.terminate()
158 | process.wait(timeout=5)
159 |
160 | except subprocess.TimeoutExpired:
161 | print("⚠️ Server didn't respond in time")
162 | process.kill()
163 | except Exception as e:
164 | print(f"❌ Error testing server: {e}")
165 | if 'process' in locals():
166 | process.terminate()
167 |
168 |
169 | def show_usage_instructions():
170 | """Show how to use the MCP server."""
171 | print("\n" + "=" * 50)
172 | print("📖 HOW TO USE THE MCP SERVER")
173 | print("=" * 50)
174 |
175 | print("\n1. 🚀 START THE SERVER:")
176 | print(" python main.py")
177 | print(" (Server runs silently, waiting for MCP connections)")
178 |
179 | print("\n2. 🔗 CONNECT WITH CLAUDE CODE:")
180 | print(" claude mcp add mcp-log-analyzer python main.py")
181 | print(" claude mcp list")
182 |
183 | print("\n3. 📊 USE IN CLAUDE CONVERSATIONS:")
184 | print(" - Register log sources")
185 | print(" - Analyze CSV/Event logs")
186 | print(" - Monitor system resources")
187 | print(" - Get network diagnostics")
188 |
189 | print("\n4. 🧪 TEST THE SERVER:")
190 | print(" python check_server.py")
191 |
192 | print("\n📚 Available Tools:")
193 | print(" • register_log_source - Add new log sources")
194 | print(" • list_log_sources - View registered sources")
195 | print(" • query_logs - Search and filter logs")
196 | print(" • analyze_logs - Perform log analysis")
197 | print(" • test_network_tools_availability - Check network tools")
198 | print(" • diagnose_network_issues - Network diagnostics")
199 | print(" • And many more...")
200 |
201 |
202 | if __name__ == "__main__":
203 | if len(sys.argv) > 1 and sys.argv[1] == "--usage":
204 | show_usage_instructions()
205 | else:
206 | test_mcp_server()
207 | show_usage_instructions()
```
--------------------------------------------------------------------------------
/tests/test_utils.py:
--------------------------------------------------------------------------------
```python
1 | """
2 | Test utilities to access resource functions for testing.
3 | """
4 |
5 | import asyncio
6 |
7 | from mcp.server import FastMCP
8 |
9 |
10 | # Create standalone resource functions for testing
11 | def create_test_functions():
12 | """Create standalone versions of resource functions for testing."""
13 |
14 | # Import resource registration functions
15 | from .resources import (
16 | register_linux_resources,
17 | register_logs_resources,
18 | register_network_resources,
19 | register_process_resources,
20 | register_windows_resources,
21 | )
22 |
23 | # Create temporary MCP instance to extract functions
24 | temp_mcp = FastMCP("test", "1.0.0")
25 |
26 | # Store original resources
27 | temp_resources = {}
28 |
29 | # Manually register each type and capture functions
30 | # Process resources
31 | @temp_mcp.resource("system://process-list")
32 | async def get_process_list() -> str:
33 | from datetime import datetime
34 |
35 | import psutil
36 |
37 | result = []
38 | result.append("=== Process List ===")
39 | result.append(f"Time: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}")
40 | result.append(f"CPU Count: {psutil.cpu_count()}")
41 | result.append(
42 | f"Total Memory: {psutil.virtual_memory().total / (1024**3):.2f} GB"
43 | )
44 | result.append(
45 | f"Available Memory: {psutil.virtual_memory().available / (1024**3):.2f} GB"
46 | )
47 | result.append(f"CPU Usage: {psutil.cpu_percent(interval=1)}%\n")
48 |
49 | result.append(
50 | f"{'PID':<8} {'Name':<25} {'CPU%':<8} {'Memory%':<10} {'Status':<12}"
51 | )
52 | result.append("-" * 75)
53 |
54 | # Get processes sorted by CPU usage
55 | processes = []
56 | for proc in psutil.process_iter(
57 | ["pid", "name", "cpu_percent", "memory_percent", "status"]
58 | ):
59 | try:
60 | proc_info = proc.info
61 | if proc_info["cpu_percent"] is None:
62 | proc_info["cpu_percent"] = proc.cpu_percent(interval=0.1)
63 | processes.append(proc_info)
64 | except (psutil.NoSuchProcess, psutil.AccessDenied):
65 | continue
66 |
67 | # Sort by CPU usage (descending)
68 | processes.sort(key=lambda x: x.get("cpu_percent", 0), reverse=True)
69 |
70 | # Show top 20 processes
71 | for proc in processes[:20]:
72 | result.append(
73 | f"{proc['pid']:<8} "
74 | f"{proc['name'][:24]:<25} "
75 | f"{proc.get('cpu_percent', 0):<8.1f} "
76 | f"{proc.get('memory_percent', 0):<10.2f} "
77 | f"{proc.get('status', 'unknown'):<12}"
78 | )
79 |
80 | result.append(f"\nTotal processes: {len(processes)}")
81 | return "\n".join(result)
82 |
83 | # Windows resources
84 | @temp_mcp.resource("system://windows-event-logs")
85 | async def get_windows_event_logs() -> str:
86 | return await get_windows_event_logs_with_count("10")
87 |
88 | @temp_mcp.resource("system://windows-event-logs/last/{count}")
89 | async def get_windows_event_logs_with_count(count: str) -> str:
90 | import platform
91 |
92 | if platform.system() != "Windows":
93 | return "This resource is only available on Windows systems."
94 | return f"=== Windows Event Logs (Last {count} entries) ==="
95 |
96 | @temp_mcp.resource("system://windows-event-logs/time/{duration}")
97 | async def get_windows_event_logs_by_time(duration: str) -> str:
98 | import platform
99 |
100 | if platform.system() != "Windows":
101 | return "This resource is only available on Windows systems."
102 | return f"=== Windows Event Logs (Since {duration} ago) ==="
103 |
104 | # Linux resources
105 | @temp_mcp.resource("system://linux-logs")
106 | async def get_linux_system_logs() -> str:
107 | return await get_linux_logs_with_count("50")
108 |
109 | @temp_mcp.resource("system://linux-logs/last/{count}")
110 | async def get_linux_logs_with_count(count: str) -> str:
111 | import platform
112 |
113 | if platform.system() != "Linux":
114 | return "This resource is only available on Linux systems."
115 | return f"=== Linux System Logs (Last {count} lines) ==="
116 |
117 | @temp_mcp.resource("system://linux-logs/time/{duration}")
118 | async def get_linux_logs_by_time(duration: str) -> str:
119 | import platform
120 |
121 | if platform.system() != "Linux":
122 | return "This resource is only available on Linux systems."
123 | return f"=== Linux System Logs (Since {duration} ago) ==="
124 |
125 | # Network resources
126 | @temp_mcp.resource("system://netstat")
127 | async def get_netstat() -> str:
128 | return await get_netstat_listening()
129 |
130 | @temp_mcp.resource("system://netstat/listening")
131 | async def get_netstat_listening() -> str:
132 | return "=== Listening Ports ==="
133 |
134 | @temp_mcp.resource("system://netstat/established")
135 | async def get_netstat_established() -> str:
136 | return "=== Established Connections ==="
137 |
138 | @temp_mcp.resource("system://netstat/all")
139 | async def get_netstat_all() -> str:
140 | return "=== All Network Connections ==="
141 |
142 | @temp_mcp.resource("system://netstat/stats")
143 | async def get_netstat_stats() -> str:
144 | return "=== Network Statistics ==="
145 |
146 | @temp_mcp.resource("system://netstat/routing")
147 | async def get_netstat_routing() -> str:
148 | return "=== Routing Table ==="
149 |
150 | @temp_mcp.resource("system://netstat/port/{port}")
151 | async def get_netstat_port(port: str) -> str:
152 | try:
153 | port_num = int(port)
154 | except ValueError:
155 | return f"Invalid port number: {port}"
156 | return f"=== Connections on Port {port} ==="
157 |
158 | return {
159 | "get_process_list": get_process_list,
160 | "get_windows_event_logs": get_windows_event_logs,
161 | "get_windows_event_logs_with_count": get_windows_event_logs_with_count,
162 | "get_windows_event_logs_by_time": get_windows_event_logs_by_time,
163 | "get_linux_system_logs": get_linux_system_logs,
164 | "get_linux_logs_with_count": get_linux_logs_with_count,
165 | "get_linux_logs_by_time": get_linux_logs_by_time,
166 | "get_netstat": get_netstat,
167 | "get_netstat_listening": get_netstat_listening,
168 | "get_netstat_established": get_netstat_established,
169 | "get_netstat_all": get_netstat_all,
170 | "get_netstat_stats": get_netstat_stats,
171 | "get_netstat_routing": get_netstat_routing,
172 | "get_netstat_port": get_netstat_port,
173 | }
174 |
175 |
176 | # Create the test functions
177 | _test_functions = create_test_functions()
178 |
179 | # Export functions for testing
180 | get_process_list = _test_functions["get_process_list"]
181 | get_windows_event_logs = _test_functions["get_windows_event_logs"]
182 | get_windows_event_logs_with_count = _test_functions["get_windows_event_logs_with_count"]
183 | get_windows_event_logs_by_time = _test_functions["get_windows_event_logs_by_time"]
184 | get_linux_system_logs = _test_functions["get_linux_system_logs"]
185 | get_linux_logs_with_count = _test_functions["get_linux_logs_with_count"]
186 | get_linux_logs_by_time = _test_functions["get_linux_logs_by_time"]
187 | get_netstat = _test_functions["get_netstat"]
188 | get_netstat_listening = _test_functions["get_netstat_listening"]
189 | get_netstat_established = _test_functions["get_netstat_established"]
190 | get_netstat_all = _test_functions["get_netstat_all"]
191 | get_netstat_stats = _test_functions["get_netstat_stats"]
192 | get_netstat_routing = _test_functions["get_netstat_routing"]
193 | get_netstat_port = _test_functions["get_netstat_port"]
194 |
```
--------------------------------------------------------------------------------
/main_tcp.py:
--------------------------------------------------------------------------------
```python
1 | #!/usr/bin/env python3
2 | """
3 | TCP-enabled entry point for the MCP Log Analyzer server.
4 | """
5 |
6 | import asyncio
7 | import argparse
8 | import sys
9 | import json
10 | import logging
11 | from pathlib import Path
12 | from typing import Optional
13 |
14 | # Add src to Python path
15 | sys.path.insert(0, str(Path(__file__).parent / "src"))
16 |
17 | from mcp_log_analyzer.mcp_server.server import mcp
18 |
19 | # Set up logging
20 | logging.basicConfig(level=logging.INFO)
21 | logger = logging.getLogger(__name__)
22 |
23 |
24 | class TCPTransport:
25 | """Transport layer for TCP-based MCP communication."""
26 |
27 | def __init__(self, reader: asyncio.StreamReader, writer: asyncio.StreamWriter):
28 | self.reader = reader
29 | self.writer = writer
30 | self._buffer = b""
31 | self._closed = False
32 |
33 | async def read_message(self) -> Optional[dict]:
34 | """Read a complete JSON-RPC message."""
35 | try:
36 | while True:
37 | # Look for newline in buffer
38 | newline_pos = self._buffer.find(b'\n')
39 | if newline_pos != -1:
40 | # Extract complete message
41 | message_bytes = self._buffer[:newline_pos]
42 | self._buffer = self._buffer[newline_pos + 1:]
43 |
44 | if message_bytes:
45 | message_str = message_bytes.decode('utf-8')
46 | return json.loads(message_str)
47 |
48 | # Read more data
49 | chunk = await self.reader.read(4096)
50 | if not chunk:
51 | # Connection closed
52 | self._closed = True
53 | return None
54 |
55 | self._buffer += chunk
56 |
57 | except Exception as e:
58 | logger.error(f"Error reading message: {e}")
59 | return None
60 |
61 | async def write_message(self, message: dict) -> None:
62 | """Write a JSON-RPC message."""
63 | try:
64 | message_str = json.dumps(message)
65 | message_bytes = (message_str + '\n').encode('utf-8')
66 | self.writer.write(message_bytes)
67 | await self.writer.drain()
68 | except Exception as e:
69 | logger.error(f"Error writing message: {e}")
70 | raise
71 |
72 | def is_closed(self) -> bool:
73 | """Check if transport is closed."""
74 | return self._closed
75 |
76 | async def close(self) -> None:
77 | """Close the transport."""
78 | self._closed = True
79 | self.writer.close()
80 | await self.writer.wait_closed()
81 |
82 |
83 | async def handle_client(reader: asyncio.StreamReader, writer: asyncio.StreamWriter) -> None:
84 | """Handle a single client connection."""
85 | addr = writer.get_extra_info('peername')
86 | logger.info(f"New client connected from {addr}")
87 |
88 | transport = TCPTransport(reader, writer)
89 |
90 | try:
91 | # Create a bridge between TCP transport and MCP server
92 | # We'll need to handle the JSON-RPC protocol directly
93 |
94 | while not transport.is_closed():
95 | # Read incoming message
96 | request = await transport.read_message()
97 | if request is None:
98 | break
99 |
100 | logger.debug(f"Received request: {request}")
101 |
102 | # Process the request through MCP
103 | # For now, we'll handle basic protocol messages
104 | if request.get('method') == 'initialize':
105 | # Handle initialization
106 | response = {
107 | "jsonrpc": "2.0",
108 | "id": request.get('id'),
109 | "result": {
110 | "protocolVersion": "2024-11-05",
111 | "capabilities": {
112 | "tools": {"listChanged": True},
113 | "resources": {"subscribe": True, "listChanged": True},
114 | "prompts": {"listChanged": True}
115 | },
116 | "serverInfo": {
117 | "name": "mcp-log-analyzer",
118 | "version": "0.1.0"
119 | }
120 | }
121 | }
122 | await transport.write_message(response)
123 |
124 | elif request.get('method') == 'tools/list':
125 | # List available tools
126 | # This is a simplified response - in production you'd query the MCP server
127 | response = {
128 | "jsonrpc": "2.0",
129 | "id": request.get('id'),
130 | "result": {
131 | "tools": [
132 | {
133 | "name": "register_log_source",
134 | "description": "Register a new log source for analysis"
135 | },
136 | {
137 | "name": "query_logs",
138 | "description": "Query logs with filters"
139 | }
140 | ]
141 | }
142 | }
143 | await transport.write_message(response)
144 |
145 | elif request.get('method') == 'shutdown':
146 | # Handle shutdown
147 | response = {
148 | "jsonrpc": "2.0",
149 | "id": request.get('id'),
150 | "result": {}
151 | }
152 | await transport.write_message(response)
153 | break
154 |
155 | else:
156 | # Unknown method
157 | response = {
158 | "jsonrpc": "2.0",
159 | "id": request.get('id'),
160 | "error": {
161 | "code": -32601,
162 | "message": "Method not found"
163 | }
164 | }
165 | await transport.write_message(response)
166 |
167 | except Exception as e:
168 | logger.error(f"Error handling client {addr}: {e}", exc_info=True)
169 | finally:
170 | logger.info(f"Client {addr} disconnected")
171 | await transport.close()
172 |
173 |
174 | async def run_mcp_stdio_server():
175 | """Run the MCP server in stdio mode with async support."""
176 | # This is a placeholder - we need to integrate with FastMCP's async capabilities
177 | # For now, we'll use the synchronous run method
178 | await asyncio.get_event_loop().run_in_executor(None, mcp.run)
179 |
180 |
181 | async def main_tcp(host="0.0.0.0", port=8080):
182 | """Run the MCP server on a TCP port."""
183 | logger.info(f"Starting MCP Log Analyzer TCP server on {host}:{port}")
184 |
185 | server = await asyncio.start_server(
186 | handle_client,
187 | host,
188 | port
189 | )
190 |
191 | addr = server.sockets[0].getsockname()
192 | logger.info(f"MCP server listening on {addr[0]}:{addr[1]}")
193 |
194 | async with server:
195 | await server.serve_forever()
196 |
197 |
198 | def main():
199 | """Parse arguments and run the appropriate server."""
200 | parser = argparse.ArgumentParser(description='MCP Log Analyzer Server')
201 | parser.add_argument('--tcp', action='store_true',
202 | help='Run server on TCP port instead of stdio')
203 | parser.add_argument('--host', default='0.0.0.0',
204 | help='Host to bind to (default: 0.0.0.0)')
205 | parser.add_argument('--port', type=int, default=8080,
206 | help='Port to bind to (default: 8080)')
207 | parser.add_argument('--debug', action='store_true',
208 | help='Enable debug logging')
209 |
210 | args = parser.parse_args()
211 |
212 | if args.debug:
213 | logging.getLogger().setLevel(logging.DEBUG)
214 |
215 | if args.tcp:
216 | try:
217 | asyncio.run(main_tcp(args.host, args.port))
218 | except KeyboardInterrupt:
219 | logger.info("Server stopped by user")
220 | else:
221 | # Run in stdio mode (original behavior)
222 | mcp.run()
223 |
224 |
225 | if __name__ == "__main__":
226 | main()
```
--------------------------------------------------------------------------------
/src/mcp_log_analyzer/mcp_server/resources/linux_resources.py:
--------------------------------------------------------------------------------
```python
1 | """
2 | Linux system monitoring MCP resources.
3 | """
4 |
5 | import platform
6 | import subprocess
7 | from pathlib import Path
8 |
9 | from mcp.server import FastMCP
10 |
11 |
12 | def register_linux_resources(mcp: FastMCP):
13 | """Register all Linux-related resources with the MCP server."""
14 |
15 | from ..server import parse_time_param
16 |
17 | @mcp.resource("system://linux-logs")
18 | async def get_linux_system_logs() -> str:
19 | """
20 | Get Linux system logs with default parameters.
21 |
22 | Use parameterized versions for more control:
23 | - system://linux-logs/last/50 - Last 50 lines
24 | - system://linux-logs/time/30m - Last 30 minutes
25 | - system://linux-logs/range/2025-01-07 13:00/2025-01-07 14:00 - Time range
26 | """
27 | # Default to last 50 lines
28 | return await get_linux_logs_with_count("50")
29 |
30 | @mcp.resource("system://linux-logs/last/{count}")
31 | async def get_linux_logs_with_count(count: str) -> str:
32 | """
33 | Get recent Linux system logs by line count.
34 |
35 | Args:
36 | count: Number of lines to retrieve (e.g., "100")
37 | """
38 | if platform.system() != "Linux":
39 | return "This resource is only available on Linux systems."
40 |
41 | try:
42 | line_count = int(count)
43 | result = []
44 | result.append(f"=== Linux System Logs (Last {line_count} lines) ===\n")
45 |
46 | # Try to get systemd journal logs
47 | try:
48 | result.append(f"\n--- Systemd Journal ---")
49 | journal_output = subprocess.run(
50 | ["journalctl", "-n", str(line_count), "--no-pager"],
51 | capture_output=True,
52 | text=True,
53 | timeout=5,
54 | )
55 | if journal_output.returncode == 0:
56 | result.append(journal_output.stdout)
57 | else:
58 | result.append(
59 | "Unable to read systemd journal (may require permissions)"
60 | )
61 | except Exception as e:
62 | result.append(f"Systemd journal not available: {str(e)}")
63 |
64 | # Try to read syslog
65 | syslog_paths = [
66 | "/var/log/syslog", # Debian/Ubuntu
67 | "/var/log/messages", # RHEL/CentOS
68 | ]
69 |
70 | for syslog_path in syslog_paths:
71 | if Path(syslog_path).exists():
72 | try:
73 | result.append(f"\n--- {syslog_path} ---")
74 | with open(syslog_path, "r") as f:
75 | lines = f.readlines()
76 | result.extend(lines[-line_count:])
77 | break
78 | except PermissionError:
79 | result.append(f"Permission denied reading {syslog_path}")
80 | except Exception as e:
81 | result.append(f"Error reading {syslog_path}: {str(e)}")
82 |
83 | # Common application logs
84 | app_logs = {
85 | "Apache": ["/var/log/apache2/error.log", "/var/log/httpd/error_log"],
86 | "Nginx": ["/var/log/nginx/error.log"],
87 | "MySQL": ["/var/log/mysql/error.log", "/var/log/mysqld.log"],
88 | "PostgreSQL": ["/var/log/postgresql/postgresql.log"],
89 | }
90 |
91 | result.append("\n--- Application Logs ---")
92 | for app_name, log_paths in app_logs.items():
93 | for log_path in log_paths:
94 | if Path(log_path).exists():
95 | try:
96 | result.append(f"\n{app_name} ({log_path}):")
97 | with open(log_path, "r") as f:
98 | lines = f.readlines()
99 | result.extend(
100 | lines[-(line_count // 5) :]
101 | ) # Show fewer lines for app logs
102 | break
103 | except:
104 | pass
105 |
106 | return "\n".join(result)
107 |
108 | except ValueError:
109 | return f"Invalid count parameter: {count}"
110 | except Exception as e:
111 | return f"Error accessing Linux logs: {str(e)}"
112 |
113 | @mcp.resource("system://linux-logs/time/{duration}")
114 | async def get_linux_logs_by_time(duration: str) -> str:
115 | """
116 | Get Linux logs from the last N minutes/hours/days.
117 |
118 | Args:
119 | duration: Time duration (e.g., "30m", "2h", "1d")
120 | """
121 | if platform.system() != "Linux":
122 | return "This resource is only available on Linux systems."
123 |
124 | try:
125 | start_time = parse_time_param(duration)
126 | if not start_time:
127 | return "Invalid duration format. Use format like '30m', '2h', or '1d'."
128 |
129 | result = []
130 | result.append(
131 | f"=== Linux System Logs (Since {start_time.strftime('%Y-%m-%d %H:%M:%S')}) ===\n"
132 | )
133 |
134 | # Try to get systemd journal logs with time filter
135 | try:
136 | result.append(f"\n--- Systemd Journal ---")
137 | since_arg = f"--since={start_time.strftime('%Y-%m-%d %H:%M:%S')}"
138 | journal_output = subprocess.run(
139 | ["journalctl", since_arg, "--no-pager"],
140 | capture_output=True,
141 | text=True,
142 | timeout=5,
143 | )
144 | if journal_output.returncode == 0:
145 | result.append(journal_output.stdout)
146 | else:
147 | result.append(
148 | "Unable to read systemd journal (may require permissions)"
149 | )
150 | except Exception as e:
151 | result.append(f"Systemd journal not available: {str(e)}")
152 |
153 | # For syslog, we need to parse timestamps
154 | syslog_paths = [
155 | "/var/log/syslog", # Debian/Ubuntu
156 | "/var/log/messages", # RHEL/CentOS
157 | ]
158 |
159 | for syslog_path in syslog_paths:
160 | if Path(syslog_path).exists():
161 | try:
162 | result.append(f"\n--- {syslog_path} ---")
163 | matching_lines = []
164 | with open(syslog_path, "r") as f:
165 | for line in f:
166 | # Simple check if line contains a recent timestamp
167 | # This is a simplified approach
168 | if start_time.strftime("%b %d") in line:
169 | matching_lines.append(line)
170 |
171 | if matching_lines:
172 | result.extend(matching_lines)
173 | else:
174 | result.append(f"No entries found since {start_time}")
175 | break
176 | except PermissionError:
177 | result.append(f"Permission denied reading {syslog_path}")
178 | except Exception as e:
179 | result.append(f"Error reading {syslog_path}: {str(e)}")
180 |
181 | return "\n".join(result)
182 |
183 | except ValueError as e:
184 | return f"Invalid time parameter: {str(e)}"
185 | except Exception as e:
186 | return f"Error accessing Linux logs: {str(e)}"
187 |
188 | @mcp.resource("system://linux-logs/range/{start}/{end}")
189 | async def get_linux_logs_by_range(start: str, end: str) -> str:
190 | """
191 | Get Linux logs within a specific time range.
192 |
193 | Args:
194 | start: Start time (e.g., "2025-01-07 13:00")
195 | end: End time (e.g., "2025-01-07 14:00")
196 | """
197 | if platform.system() != "Linux":
198 | return "This resource is only available on Linux systems."
199 |
200 | try:
201 | start_time = parse_time_param(start)
202 | end_time = parse_time_param(end)
203 |
204 | if not start_time or not end_time:
205 | return "Invalid time format. Use format like '2025-01-07 13:00'."
206 |
207 | result = []
208 | result.append(
209 | f"=== Linux System Logs ({start_time.strftime('%Y-%m-%d %H:%M')} to {end_time.strftime('%Y-%m-%d %H:%M')}) ===\n"
210 | )
211 |
212 | # Try to get systemd journal logs with time range
213 | try:
214 | result.append(f"\n--- Systemd Journal ---")
215 | since_arg = f"--since={start_time.strftime('%Y-%m-%d %H:%M:%S')}"
216 | until_arg = f"--until={end_time.strftime('%Y-%m-%d %H:%M:%S')}"
217 | journal_output = subprocess.run(
218 | ["journalctl", since_arg, until_arg, "--no-pager"],
219 | capture_output=True,
220 | text=True,
221 | timeout=5,
222 | )
223 | if journal_output.returncode == 0:
224 | result.append(journal_output.stdout)
225 | else:
226 | result.append(
227 | "Unable to read systemd journal (may require permissions)"
228 | )
229 | except Exception as e:
230 | result.append(f"Systemd journal not available: {str(e)}")
231 |
232 | return "\n".join(result)
233 |
234 | except ValueError as e:
235 | return f"Invalid time parameter: {str(e)}"
236 | except Exception as e:
237 | return f"Error accessing Linux logs: {str(e)}"
238 |
```
--------------------------------------------------------------------------------
/tests/test_mcp_server.py:
--------------------------------------------------------------------------------
```python
1 | """Tests for the MCP server."""
2 |
3 | import platform
4 |
5 | import pytest
6 | from mcp.server import FastMCP
7 |
8 | from mcp_log_analyzer.mcp_server.server import (
9 | log_sources,
10 | mcp,
11 | )
12 | from mcp_log_analyzer.mcp_server.test_tool_utils import (
13 | AnalyzeLogsRequest,
14 | QueryLogsRequest,
15 | RegisterLogSourceRequest,
16 | analyze_logs,
17 | delete_log_source,
18 | get_log_source,
19 | list_log_sources,
20 | query_logs,
21 | register_log_source,
22 | )
23 |
24 |
25 | @pytest.mark.asyncio
26 | async def test_server_initialization():
27 | """Test that the MCP server is properly initialized."""
28 | assert isinstance(mcp, FastMCP)
29 | # FastMCP stores name differently
30 | assert hasattr(mcp, "tool")
31 | assert hasattr(mcp, "resource")
32 | assert hasattr(mcp, "prompt")
33 |
34 |
35 | @pytest.mark.asyncio
36 | async def test_register_log_source():
37 | """Test registering a log source."""
38 | # Clear any existing sources
39 | log_sources.clear()
40 |
41 | request = RegisterLogSourceRequest(
42 | name="test-source", source_type="json", path="/tmp/test.json"
43 | )
44 |
45 | result = await register_log_source(request)
46 |
47 | assert "message" in result
48 | assert "test-source" in result["message"]
49 | assert "test-source" in log_sources
50 | assert log_sources["test-source"].type == "json"
51 |
52 |
53 | @pytest.mark.asyncio
54 | async def test_list_log_sources():
55 | """Test listing log sources."""
56 | # Clear and add a test source
57 | log_sources.clear()
58 |
59 | request = RegisterLogSourceRequest(
60 | name="test-source", source_type="json", path="/tmp/test.json"
61 | )
62 | await register_log_source(request)
63 |
64 | result = await list_log_sources()
65 |
66 | assert "sources" in result
67 | assert "count" in result
68 | assert result["count"] == 1
69 | assert len(result["sources"]) == 1
70 | assert result["sources"][0]["name"] == "test-source"
71 |
72 |
73 | @pytest.mark.asyncio
74 | async def test_get_log_source():
75 | """Test getting a specific log source."""
76 | # Clear and add a test source
77 | log_sources.clear()
78 |
79 | request = RegisterLogSourceRequest(
80 | name="test-source", source_type="json", path="/tmp/test.json"
81 | )
82 | await register_log_source(request)
83 |
84 | result = await get_log_source("test-source")
85 |
86 | assert "source" in result
87 | assert result["source"]["name"] == "test-source"
88 |
89 | # Test non-existent source
90 | result = await get_log_source("non-existent")
91 | assert "error" in result
92 |
93 |
94 | @pytest.mark.asyncio
95 | async def test_delete_log_source():
96 | """Test deleting a log source."""
97 | # Clear and add a test source
98 | log_sources.clear()
99 |
100 | request = RegisterLogSourceRequest(
101 | name="test-source", source_type="json", path="/tmp/test.json"
102 | )
103 | await register_log_source(request)
104 |
105 | result = await delete_log_source("test-source")
106 |
107 | assert "message" in result
108 | assert "test-source" not in log_sources
109 |
110 | # Test deleting non-existent source
111 | result = await delete_log_source("non-existent")
112 | assert "error" in result
113 |
114 |
115 | @pytest.mark.asyncio
116 | async def test_query_logs():
117 | """Test querying logs."""
118 | # Clear and add a test source
119 | log_sources.clear()
120 |
121 | request = RegisterLogSourceRequest(
122 | name="test-source", source_type="json", path="/tmp/test.json"
123 | )
124 | await register_log_source(request)
125 |
126 | query_request = QueryLogsRequest(source_name="test-source", limit=10)
127 |
128 | result = await query_logs(query_request)
129 |
130 | assert "logs" in result
131 | assert "count" in result
132 | assert "source" in result
133 | assert result["source"] == "test-source"
134 |
135 |
136 | @pytest.mark.asyncio
137 | async def test_analyze_logs():
138 | """Test analyzing logs."""
139 | # Clear and add a test source
140 | log_sources.clear()
141 |
142 | request = RegisterLogSourceRequest(
143 | name="test-source", source_type="json", path="/tmp/test.json"
144 | )
145 | await register_log_source(request)
146 |
147 | analyze_request = AnalyzeLogsRequest(
148 | source_name="test-source", analysis_type="summary"
149 | )
150 |
151 | result = await analyze_logs(analyze_request)
152 |
153 | assert "result" in result
154 | assert "source" in result
155 | assert "analysis_type" in result
156 | assert result["analysis_type"] == "summary"
157 |
158 |
159 | @pytest.mark.asyncio
160 | async def test_system_resources():
161 | """Test system monitoring resources."""
162 | # Import the resource functions
163 | from mcp_log_analyzer.mcp_server.server import parse_time_param
164 | from mcp_log_analyzer.mcp_server.test_utils import (
165 | get_linux_logs_by_time,
166 | get_linux_logs_with_count,
167 | get_linux_system_logs,
168 | get_process_list,
169 | get_windows_event_logs,
170 | get_windows_event_logs_by_time,
171 | get_windows_event_logs_with_count,
172 | )
173 |
174 | # Test process list resource (should work on all platforms)
175 | process_list = await get_process_list()
176 | assert isinstance(process_list, str)
177 | assert "Process List" in process_list
178 | assert "PID" in process_list
179 | assert "CPU%" in process_list
180 | assert "Memory%" in process_list
181 |
182 | # Test Windows event logs (platform-specific)
183 | windows_logs = await get_windows_event_logs()
184 | assert isinstance(windows_logs, str)
185 | if platform.system() == "Windows":
186 | assert "Windows Event Logs" in windows_logs
187 | else:
188 | assert "only available on Windows" in windows_logs
189 |
190 | # Test parameterized Windows event logs
191 | windows_logs_count = await get_windows_event_logs_with_count("5")
192 | assert isinstance(windows_logs_count, str)
193 | if platform.system() == "Windows":
194 | assert "Last 5 entries" in windows_logs_count
195 | else:
196 | assert "only available on Windows" in windows_logs_count
197 |
198 | windows_logs_time = await get_windows_event_logs_by_time("30m")
199 | assert isinstance(windows_logs_time, str)
200 | if platform.system() == "Windows":
201 | assert "Windows Event Logs" in windows_logs_time
202 | else:
203 | assert "only available on Windows" in windows_logs_time
204 |
205 | # Test Linux system logs (platform-specific)
206 | linux_logs = await get_linux_system_logs()
207 | assert isinstance(linux_logs, str)
208 | if platform.system() == "Linux":
209 | assert "Linux System Logs" in linux_logs
210 | else:
211 | assert "only available on Linux" in linux_logs
212 |
213 | # Test parameterized Linux logs
214 | linux_logs_count = await get_linux_logs_with_count("20")
215 | assert isinstance(linux_logs_count, str)
216 | if platform.system() == "Linux":
217 | assert "Last 20 lines" in linux_logs_count
218 | else:
219 | assert "only available on Linux" in linux_logs_count
220 |
221 | linux_logs_time = await get_linux_logs_by_time("1h")
222 | assert isinstance(linux_logs_time, str)
223 | if platform.system() == "Linux":
224 | assert "Linux System Logs" in linux_logs_time
225 | else:
226 | assert "only available on Linux" in linux_logs_time
227 |
228 |
229 | @pytest.mark.asyncio
230 | async def test_time_parsing():
231 | """Test time parameter parsing function."""
232 | from datetime import datetime, timedelta
233 |
234 | from mcp_log_analyzer.mcp_server.server import parse_time_param
235 |
236 | # Test relative time parsing
237 | result = parse_time_param("30m")
238 | assert result is not None
239 | assert isinstance(result, datetime)
240 | assert result < datetime.now()
241 |
242 | result = parse_time_param("2h")
243 | assert result is not None
244 | assert isinstance(result, datetime)
245 |
246 | result = parse_time_param("1d")
247 | assert result is not None
248 | assert isinstance(result, datetime)
249 |
250 | # Test absolute time parsing
251 | result = parse_time_param("2025-01-07 13:00")
252 | assert result is not None
253 | assert isinstance(result, datetime)
254 | assert result.year == 2025
255 | assert result.month == 1
256 | assert result.day == 7
257 | assert result.hour == 13
258 |
259 | # Test invalid formats
260 | try:
261 | parse_time_param("invalid")
262 | assert False, "Should have raised ValueError"
263 | except ValueError:
264 | pass
265 |
266 | # Test none case
267 | result = parse_time_param("none")
268 | assert result is None
269 |
270 |
271 | @pytest.mark.asyncio
272 | async def test_netstat_resources():
273 | """Test netstat network monitoring resources."""
274 | # Import the netstat resource functions
275 | from mcp_log_analyzer.mcp_server.test_utils import (
276 | get_netstat,
277 | get_netstat_all,
278 | get_netstat_established,
279 | get_netstat_listening,
280 | get_netstat_port,
281 | get_netstat_routing,
282 | get_netstat_stats,
283 | )
284 |
285 | # Test default netstat resource
286 | netstat_output = await get_netstat()
287 | assert isinstance(netstat_output, str)
288 | assert "Listening Ports" in netstat_output
289 |
290 | # Test listening ports resource
291 | listening_output = await get_netstat_listening()
292 | assert isinstance(listening_output, str)
293 | assert "Listening Ports" in listening_output
294 |
295 | # Test established connections resource
296 | established_output = await get_netstat_established()
297 | assert isinstance(established_output, str)
298 | assert "Established Connections" in established_output
299 |
300 | # Test all connections resource
301 | all_output = await get_netstat_all()
302 | assert isinstance(all_output, str)
303 | assert "All Network Connections" in all_output
304 |
305 | # Test network statistics resource
306 | stats_output = await get_netstat_stats()
307 | assert isinstance(stats_output, str)
308 | assert "Network Statistics" in stats_output
309 |
310 | # Test routing table resource
311 | routing_output = await get_netstat_routing()
312 | assert isinstance(routing_output, str)
313 | assert "Routing Table" in routing_output
314 |
315 | # Test port-specific resource with a common port
316 | port_output = await get_netstat_port("80")
317 | assert isinstance(port_output, str)
318 | assert "Connections on Port 80" in port_output
319 |
320 | # Test invalid port
321 | invalid_port_output = await get_netstat_port("invalid")
322 | assert isinstance(invalid_port_output, str)
323 | assert "Invalid port number" in invalid_port_output
324 |
```