#
tokens: 48760/50000 49/69 files (page 1/3)
lines: off (toggle) GitHub
raw markdown copy
This is page 1 of 3. Use http://codebase.md/sedwardstx/demomcp?lines=false&page={x} to view the full context.

# Directory Structure

```
├── .gitignore
├── .mcp.json
├── check_server.py
├── CLAUDE.md
├── config
│   └── default.yml
├── docs
│   ├── api_reference.md
│   ├── demo-recording
│   │   └── MCPDemo.gif
│   ├── example-context-docs
│   │   ├── mcp-ai-agent-architecture.md
│   │   ├── mcp-ai-agent-dev-task.md
│   │   └── mcp-ai-agent-prd.md
│   └── getting_started.md
├── LICENSE
├── main_tcp.py
├── main.py
├── mcp_tcp_client.py
├── pyproject.toml
├── QUICK_START.md
├── README.md
├── scripts
│   └── test_server.py
├── setup.py
├── src
│   └── mcp_log_analyzer
│       ├── __init__.py
│       ├── api
│       │   ├── __init__.py
│       │   └── server.py
│       ├── config
│       │   ├── __init__.py
│       │   └── settings.py
│       ├── core
│       │   ├── __init__.py
│       │   ├── config.py
│       │   ├── models.py
│       │   └── state_manager.py
│       ├── mcp_server
│       │   ├── __init__.py
│       │   ├── models
│       │   │   ├── __init__.py
│       │   │   └── schemas.py
│       │   ├── prompts
│       │   │   ├── __init__.py
│       │   │   ├── linux_testing_prompt.py
│       │   │   ├── log_management_prompt.py
│       │   │   ├── mcp_assets_overview_prompt.py
│       │   │   ├── network_testing_prompt.py
│       │   │   ├── process_monitoring_prompt.py
│       │   │   └── windows_testing_prompt.py
│       │   ├── resources
│       │   │   ├── __init__.py
│       │   │   ├── linux_resources.py
│       │   │   ├── logs_resources.py
│       │   │   ├── network_resources.py
│       │   │   ├── process_resources.py
│       │   │   └── windows_resources.py
│       │   ├── server.py
│       │   └── tools
│       │       ├── __init__.py
│       │       ├── health_check_tools.py
│       │       ├── linux_test_tools.py
│       │       ├── log_management_tools.py
│       │       ├── network_test_tools.py
│       │       ├── process_test_tools.py
│       │       └── windows_test_tools.py
│       ├── parsers
│       │   ├── __init__.py
│       │   ├── base.py
│       │   ├── csv_parser.py
│       │   ├── etl_cached_parser.py
│       │   ├── etl_large_file_parser.py
│       │   ├── etl_parser.py
│       │   ├── etl_windows_parser.py
│       │   └── evt_parser.py
│       └── tcp_proxy.py
├── TCP_PROXY_README.md
├── tcp_proxy.py
├── tcp_server.py
├── test_server.py
├── test_tcp_proxy.py
├── test_windows_setup.py
└── tests
    ├── test_base_parser.py
    ├── test_mcp_server.py
    ├── test_tool_utils.py
    └── test_utils.py
```

# Files

--------------------------------------------------------------------------------
/.mcp.json:
--------------------------------------------------------------------------------

```json
{
  "mcps": {
    "mcp-log-analyzer": {
      "command": "python",
      "args": ["main.py"],
      "env": {}
    }
  }
}
```

--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------

```
# Byte-compiled / optimized / DLL files
__pycache__/
*.py[cod]
*$py.class

# C extensions
*.so

# Distribution / packaging
.Python
build/
develop-eggs/
dist/
downloads/
eggs/
.eggs/
lib/
lib64/
parts/
sdist/
var/
wheels/
share/python-wheels/
*.egg-info/
.installed.cfg
*.egg
MANIFEST

# PyInstaller
#  Usually these files are written by a python script from a template
#  before PyInstaller builds the exe, so as to inject date/other infos into it.
*.manifest
*.spec

# Installer logs
pip-log.txt
pip-delete-this-directory.txt

# Unit test / coverage reports
htmlcov/
.tox/
.nox/
.coverage
.coverage.*
.cache
nosetests.xml
coverage.xml
*.cover
*.py,cover
.hypothesis/
.pytest_cache/
cover/

# Translations
*.mo
*.pot

# Django stuff:
*.log
local_settings.py
db.sqlite3
db.sqlite3-journal

# Flask stuff:
instance/
.webassets-cache

# Scrapy stuff:
.scrapy

# Sphinx documentation
docs/_build/

# PyBuilder
.pybuilder/
target/

# Jupyter Notebook
.ipynb_checkpoints

# IPython
profile_default/
ipython_config.py

# pyenv
#   For a library or package, you might want to ignore these files since the code is
#   intended to run in multiple environments; otherwise, check them in:
# .python-version

# pipenv
#   According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
#   However, in case of collaboration, if having platform-specific dependencies or dependencies
#   having no cross-platform support, pipenv may install dependencies that don't work, or not
#   install all needed dependencies.
#Pipfile.lock

# poetry
#   Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
#   This is especially recommended for binary packages to ensure reproducibility, and is more
#   commonly ignored for libraries.
#   https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
#poetry.lock

# pdm
#   Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
#pdm.lock
#   pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
#   in version control.
#   https://pdm.fming.dev/#use-with-ide
.pdm.toml

# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
__pypackages__/

# Celery stuff
celerybeat-schedule
celerybeat.pid

# SageMath parsed files
*.sage.py

# Environments
.env
.venv
env/
venv/
ENV/
env.bak/
venv.bak/

# Spyder project settings
.spyderproject
.spyproject

# Rope project settings
.ropeproject

# mkdocs documentation
/site

# mypy
.mypy_cache/
.dmypy.json
dmypy.json

# Pyre type checker
.pyre/

# pytype static type analyzer
.pytype/

# Cython debug symbols
cython_debug/

# PyCharm
#  JetBrains specific template is maintained in a separate JetBrains.gitignore that can
#  be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
#  and can be added to the global gitignore or merged into this file.  For a more nuclear
#  option (not recommended) you can uncomment the following to ignore the entire idea folder.
.idea/

# VS Code
.vscode/
*.code-workspace

# Local development
local/
tmp/
temp/

# OS specific
.DS_Store
.DS_Store?
._*
.Spotlight-V100
.Trashes
ehthumbs.db
Thumbs.db

# Project specific
data/
logs/
*.db
*.sqlite
*.sqlite3
config/local.yaml
config/local.yml
config/secrets.yaml
config/secrets.yml

# MCP specific
.mcp/
mcp_logs/
mcp_data/

# Test artifacts
.pytest_cache/
test-results/
test-reports/
*.coverage
htmlcov/

# Documentation build
docs/_build/
docs/.doctrees/

# Backup files
*.bak
*.backup
*~
*.swp
*.swo

# Package files
*.tar.gz
*.zip
*.7z
*.rar

# SSL certificates (for development)
*.pem
*.key
*.crt
*.csr

# Environment-specific files
.env.local
.env.development
.env.test
.env.production

# Log files
*.log
logs/
*.log.*

# Database files
*.db
*.sqlite
*.sqlite3
data/

# Cache directories
.cache/
__pycache__/
*.pyc

# Temporary files
*.tmp
*.temp
~$*

# Editor backup files
*~
\#*\#
.\#*

# macOS
.DS_Store
.AppleDouble
.LSOverride

# Windows
Thumbs.db
ehthumbs.db
Desktop.ini

# Linux
.directory
.Trash-*

# JetBrains IDEs
.idea/
*.iml
*.iws
*.ipr

# Visual Studio Code
.vscode/
!.vscode/settings.json
!.vscode/tasks.json
!.vscode/launch.json
!.vscode/extensions.json

# Sublime Text
*.sublime-workspace
*.sublime-project

# Vim
*.swp
*.swo
*~

# Emacs
*~
\#*\#
.\#*
.projectile

# Project specific ignores
/data/
/logs/
/models/
/output/
/results/
/temp/
/tmp/
/cache/
*.pid
*.seed
*.pid.lock

```

--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------

```markdown
# MCP Log Analyzer

A Model Context Protocol (MCP) server for analyzing different types of logs on Windows systems, built with the FastMCP framework.

## Features

- **Multiple Log Format Support**
  - Windows Event Logs (EVT/EVTX)
  - Windows Event Trace Logs (ETL)
  - Structured Logs (JSON, XML)
  - CSV Logs
  - Unstructured Text Logs

- **MCP Tools**
  - `register_log_source`: Register new log sources
  - `list_log_sources`: View all registered sources
  - `get_log_source`: Get details about a specific source
  - `delete_log_source`: Remove a log source
  - `query_logs`: Query logs with filters and pagination
  - `analyze_logs`: Perform analysis (summary, pattern, anomaly)

- **MCP Resources**
  - `logs://sources`: View registered log sources
  - `logs://types`: Learn about supported log types
  - `logs://analysis-types`: Understand analysis options
  - `system://windows-event-logs`: Recent Windows System and Application event logs
  - `system://linux-logs`: Linux systemd journal and application logs
  - `system://process-list`: Current processes with PID, CPU, and memory usage
  - `system://netstat`: Network connections and statistics for troubleshooting

- **MCP Prompts**
  - Log analysis quickstart guide
  - Troubleshooting guide
  - Windows Event Log specific guide

## Installation

```bash
# Clone the repository
git clone https://github.com/your-username/mcp-log-analyzer.git
cd mcp-log-analyzer

# Install the package
pip install -e .

# For ETL file support (optional)
pip install -e ".[etl]"

# For development dependencies
pip install -e ".[dev]"
```

### Windows Setup

On Windows, the package includes Windows Event Log support via `pywin32`. If you encounter import errors:

```powershell
# Ensure Windows dependencies are installed
pip install pywin32>=300

# Test the setup
python test_windows_setup.py

# If successful, start the server
python main.py
```

**Note**: On first install of `pywin32`, you may need to run the post-install script:
```powershell
python Scripts/pywin32_postinstall.py -install
```

## Usage

### Understanding MCP Servers

MCP (Model Context Protocol) servers don't have traditional web endpoints. They communicate via stdin/stdout with MCP clients (like Claude Code). When you run `python main.py`, the server starts silently and waits for MCP protocol messages.

### Testing the Server

```bash
# Test that the server is working
python check_server.py

# See usage instructions
python check_server.py --usage
```

### Starting the MCP Server

```bash
# Run directly
python main.py

# Or use Claude Code's MCP integration
claude mcp add mcp-log-analyzer python main.py
```

### Using with Claude Code

1. **Add the server to Claude Code:**
   ```bash
   claude mcp add mcp-log-analyzer python /path/to/main.py
   ```

2. **Use the tools in Claude Code:**
   - Register a log source: Use the `register_log_source` tool
   - Query logs: Use the `query_logs` tool
   - Analyze logs: Use the `analyze_logs` tool

3. **Access resources:**
   - Reference resources using `@mcp-log-analyzer:logs://sources`
   - Get help with prompts like `/mcp__mcp-log-analyzer__log_analysis_quickstart`

## System Monitoring Resources

These resources provide real-time system information without needing to register log sources:

1. **Check System Processes:**
   - Access via `@mcp-log-analyzer:system://process-list`
   - Shows top processes by CPU usage with memory information

2. **Windows Event Logs** (Windows only):
   - Default: `@mcp-log-analyzer:system://windows-event-logs` (last 10 entries)
   - By count: `@mcp-log-analyzer:system://windows-event-logs/last/50` (last 50 entries)
   - By time: `@mcp-log-analyzer:system://windows-event-logs/time/30m` (last 30 minutes)
   - By range: `@mcp-log-analyzer:system://windows-event-logs/range/2025-01-07 13:00/2025-01-07 14:00`
   - Shows System and Application event log entries

3. **Linux System Logs** (Linux only):
   - Default: `@mcp-log-analyzer:system://linux-logs` (last 50 lines)
   - By count: `@mcp-log-analyzer:system://linux-logs/last/100` (last 100 lines)
   - By time: `@mcp-log-analyzer:system://linux-logs/time/1h` (last hour)
   - By range: `@mcp-log-analyzer:system://linux-logs/range/2025-01-07 13:00/2025-01-07 14:00`
   - Shows systemd journal, syslog, and common application logs

4. **Network Monitoring** (Cross-platform):
   - Default: `@mcp-log-analyzer:system://netstat` (listening ports)
   - Listening ports: `@mcp-log-analyzer:system://netstat/listening`
   - Established connections: `@mcp-log-analyzer:system://netstat/established`
   - All connections: `@mcp-log-analyzer:system://netstat/all`
   - Network statistics: `@mcp-log-analyzer:system://netstat/stats`
   - Routing table: `@mcp-log-analyzer:system://netstat/routing`
   - Port-specific: `@mcp-log-analyzer:system://netstat/port/80`
   - Uses netstat on Windows, ss (preferred) or netstat on Linux

### Time Format Examples:
- **Relative time**: `30m` (30 minutes), `2h` (2 hours), `1d` (1 day)
- **Absolute time**: `2025-01-07 13:00`, `2025-01-07 13:30:15`, `07/01/2025 13:00`

## Example Workflow

1. **Register a Windows System Log:**
   ```
   Use register_log_source tool with:
   - name: "system-logs"
   - source_type: "evt"
   - path: "System"
   ```

2. **Query Recent Errors:**
   ```
   Use query_logs tool with:
   - source_name: "system-logs"
   - filters: {"level": "Error"}
   - limit: 10
   ```

3. **Analyze Patterns:**
   ```
   Use analyze_logs tool with:
   - source_name: "system-logs"
   - analysis_type: "pattern"
   ```

4. **Register an ETL File:**
   ```
   Use register_log_source tool with:
   - name: "network-trace"
   - source_type: "etl"
   - path: "C:\\Traces\\network.etl"
   ```

## Development

```bash
# Run tests
pytest

# Code formatting
black .
isort .

# Type checking
mypy src

# Run all quality checks
black . && isort . && mypy src && flake8
```

## Project Structure

- `src/mcp_log_analyzer/`: Main package
  - `mcp_server/`: MCP server implementation using FastMCP
  - `core/`: Core functionality and models
  - `parsers/`: Log parsers for different formats
- `main.py`: Server entry point
- `.mcp.json`: MCP configuration
- `tests/`: Test files

## Requirements

- Python 3.12+
- Windows OS (for Event Log support)
- See `pyproject.toml` for full dependencies

## License

MIT
```

--------------------------------------------------------------------------------
/CLAUDE.md:
--------------------------------------------------------------------------------

```markdown
# CLAUDE.md

This file provides guidance to Claude Code (claude.ai/code) when working with code in this repository.

## Development Commands

### Build and Install
```bash
# Install the package in development mode
pip install -e .

# Install with development dependencies
pip install -e ".[dev]"

# On Windows, ensure pywin32 is properly installed for Event Log access
# If you encounter issues, try:
pip install pywin32>=300
python -c "import win32api"  # Test Windows API access
```

### Code Quality
```bash
# Format code
black .
isort .

# Type checking
mypy src

# Linting
flake8

# Run all quality checks
black . && isort . && mypy src && flake8
```

### Testing
```bash
# Run all tests with proper PYTHONPATH
PYTHONPATH=src python3 -m pytest tests/ -v

# Run tests with coverage
PYTHONPATH=src python3 -m pytest --cov=mcp_log_analyzer tests/

# Run specific test file
PYTHONPATH=src python3 -m pytest tests/test_base_parser.py -v

# Test server import
PYTHONPATH=src python3 -c "from mcp_log_analyzer.mcp_server.server import mcp; print('Server import successful')"
```

### Running the MCP Server

**Important**: MCP servers don't show output when started - they communicate via stdin/stdout with MCP clients.

```bash
# Start the MCP server (runs silently)
python main.py

# Test the server is working
python check_server.py

# Add to Claude Code
claude mcp add mcp-log-analyzer python main.py

# List MCP servers
claude mcp list

# Remove MCP server
claude mcp remove mcp-log-analyzer
```

**No Output is Normal**: When you run `python main.py`, you won't see any console output. The server is running and waiting for MCP protocol messages from clients like Claude Code.

## Architecture Overview

### MCP Server Structure
This project follows the FastMCP framework pattern, refactored from the quick-data-mcp architecture:

- **Entry Point** (`main.py`): Simple script that imports and runs the MCP server
- **MCP Server** (`src/mcp_log_analyzer/mcp_server/server.py`): FastMCP server coordinator
- **Tools** (`src/mcp_log_analyzer/mcp_server/tools/`): Organized MCP tools by category
- **Resources** (`src/mcp_log_analyzer/mcp_server/resources/`): System monitoring resources
- **Prompts** (`src/mcp_log_analyzer/mcp_server/prompts/`): Comprehensive user guides
- **Core Logic** (`src/mcp_log_analyzer/core/`): Models and configuration
- **Parsers** (`src/mcp_log_analyzer/parsers/`): Log format-specific parsers

### Organized Tool Structure

**1. Core Log Management Tools** (`tools/log_management_tools.py`):
- `register_log_source`: Register new log sources (Windows Event Logs, JSON, XML, CSV, text)
- `list_log_sources`: List all registered log sources
- `get_log_source`: Get details about specific log source
- `delete_log_source`: Remove log source
- `query_logs`: Query logs with filters and time ranges
- `analyze_logs`: Perform pattern detection and anomaly analysis

**2. Windows System Tools** (`tools/windows_test_tools.py`):
- `test_windows_event_log_access`: Test Windows Event Log access and permissions
- `get_windows_event_log_info`: Get detailed Windows Event Log information
- `query_windows_events_by_criteria`: Query Windows Events with specific filters
- `get_windows_system_health`: Windows system health overview from Event Logs

**3. Linux System Tools** (`tools/linux_test_tools.py`):
- `test_linux_log_access`: Test Linux log file and systemd journal access
- `query_systemd_journal`: Query systemd journal with specific criteria
- `analyze_linux_services`: Analyze Linux services status and recent activity
- `get_linux_system_overview`: Comprehensive Linux system overview

**4. Process Monitoring Tools** (`tools/process_test_tools.py`):
- `test_system_resources_access`: Test system resource monitoring capabilities
- `analyze_system_performance`: Analyze current system performance and resource usage
- `find_resource_intensive_processes`: Find processes consuming significant resources
- `monitor_process_health`: Monitor health and status of specific processes
- `get_system_health_summary`: Overall system health summary

**5. Network Diagnostic Tools** (`tools/network_test_tools.py`):
- `test_network_tools_availability`: Test availability of network diagnostic tools
- `test_port_connectivity`: Test connectivity to specific ports
- `test_network_connectivity`: Test network connectivity to various hosts
- `analyze_network_connections`: Analyze current network connections and listening ports
- `diagnose_network_issues`: Comprehensive network diagnostics

### Organized Resource Structure

**1. Log Management Resources** (`resources/logs_resources.py`):
- `logs/sources`: List of registered log sources
- `logs/source/{name}`: Details about specific log source

**2. Windows Resources** (`resources/windows_resources.py`):
- `windows/system-events/{param}`: Windows System Event logs with configurable parameters
- `windows/application-events/{param}`: Windows Application Event logs with configurable parameters

**3. Linux Resources** (`resources/linux_resources.py`):
- `linux/systemd-logs/{param}`: Linux systemd journal logs with configurable parameters
- `linux/system-logs/{param}`: Linux system logs with configurable parameters

**4. Process Resources** (`resources/process_resources.py`):
- `processes/list`: Current running processes with PID, CPU, and memory usage
- `processes/summary`: Process summary statistics

**5. Network Resources** (`resources/network_resources.py`):
- `network/listening-ports`: Currently listening network ports
- `network/established-connections`: Active network connections
- `network/all-connections`: All network connections and statistics
- `network/statistics`: Network interface statistics
- `network/routing-table`: Network routing table
- `network/port/{port}`: Specific port information

### Resource Parameters
System monitoring resources support flexible parameters:
- `/last/{n}` - Get last N entries (e.g., `/last/50`)
- `/time/{duration}` - Get entries from time duration (e.g., `/time/30m`, `/time/2h`, `/time/1d`)
- `/range/{start}/{end}` - Get entries from time range (e.g., `/range/2025-01-07 13:00/2025-01-07 14:00`)

**Time Format Support**:
- Relative: `30m`, `2h`, `1d`
- Absolute: `2025-01-07 13:00`, `07/01/2025 13:30`

### Comprehensive Prompt System

**1. MCP Assets Overview** (`prompts/mcp_assets_overview_prompt.py`):
- Complete reference to all 18 tools, 10+ resources, and usage examples
- Platform support information and getting started guide

**2. Log Management Prompts** (`prompts/log_management_prompt.py`):
- `log_management_guide`: Comprehensive log management and analysis guide
- `log_troubleshooting_guide`: Troubleshooting common log analysis issues

**3. Windows Testing Prompts** (`prompts/windows_testing_prompt.py`):
- `windows_diagnostics_guide`: Windows system diagnostics and Event Log analysis
- `windows_event_reference`: Quick reference for Windows Event IDs and meanings

**4. Linux Testing Prompts** (`prompts/linux_testing_prompt.py`):
- `linux_diagnostics_guide`: Linux system diagnostics and systemd troubleshooting
- `linux_systemd_reference`: systemd services and log patterns reference

**5. Process Monitoring Prompts** (`prompts/process_monitoring_prompt.py`):
- `process_monitoring_guide`: System resource monitoring and performance analysis
- `process_troubleshooting_guide`: Troubleshooting process and performance issues

**6. Network Testing Prompts** (`prompts/network_testing_prompt.py`):
- `network_diagnostics_guide`: Network diagnostics and troubleshooting
- `network_troubleshooting_scenarios`: Specific network troubleshooting scenarios

### Cross-Platform Support

**Windows Features**:
- Full Windows Event Log support with pywin32
- System and Application Event Log analysis
- **Custom Application and Services logs support** (e.g., "Microsoft-Service Fabric/Admin")
- Event ID reference and health assessment  
- Configurable time-based filtering
- Automatic API selection (legacy for standard logs, EvtQuery for custom logs)

**Linux Features**:
- systemd journal and traditional log file support
- Service status analysis and troubleshooting
- Cross-distribution compatibility
- Network diagnostic tool integration

**Cross-Platform Features**:
- Process monitoring with psutil
- Network diagnostics with platform-specific tools
- Core log management for various formats
- Comprehensive error handling and permission checking

### Adding New Features

**Adding New Tools**:
1. Choose appropriate category folder under `tools/`
2. Create Pydantic models for requests in the tool file
3. Add async function decorated with `@mcp.tool()`
4. Update the category's `register_*_tools()` function
5. Add comprehensive docstring and error handling

**Adding New Resources**:
1. Choose appropriate category folder under `resources/`
2. Add async function decorated with `@mcp.resource()`
3. Implement parameterization if needed
4. Update the category's `register_*_resources()` function
5. Add to the main assets overview prompt

**Adding New Prompts**:
1. Choose appropriate category folder under `prompts/`
2. Add async function decorated with `@mcp.prompt()`
3. Follow the established format with emojis and markdown
4. Update the category's `register_*_prompts()` function
5. Include practical examples and troubleshooting guidance

### Design Patterns

- **Modular Organization**: Tools, resources, and prompts organized by functional category
- **Async-First**: All MCP functions are async for better performance
- **Type Safety**: Pydantic models for all requests/responses with comprehensive validation
- **Error Handling**: Consistent error format with helpful messages
- **Cross-Platform**: Platform detection and appropriate tool/command selection
- **Parameterized Resources**: Flexible resource access with time-based and count-based parameters
- **Comprehensive Documentation**: Rich prompts with step-by-step guidance and troubleshooting

### Dependencies

**Core Requirements**:
- `mcp[cli]>=1.9.2`: Model Context Protocol framework
- `pydantic>=1.8.0`: Data validation and serialization
- `python-dotenv>=0.19.0`: Environment variable management
- `pandas>=1.3.0`: Data analysis capabilities
- `psutil>=5.9.0`: System and process monitoring

**Platform-Specific**:
- `pywin32>=300`: Windows Event Log access (Windows only)

**Development**:
- `pytest`: Testing framework
- `black`: Code formatting
- `isort`: Import sorting
- `mypy`: Type checking
- `flake8`: Linting

### Testing Notes

- Tests require `PYTHONPATH=src` to properly import modules
- Some tests are platform-specific (Windows Event Logs, Linux systemd)
- Network tests may require internet connectivity
- Process monitoring tests interact with real system resources

### MCP Integration Notes

- Server communicates via stdio with Claude Code
- Tools appear as callable functions in Claude conversations
- Resources can be referenced with URIs like `logs/sources` or `processes/list`
- Prompts provide comprehensive guidance for effective system administration
- Cross-platform compatibility ensures consistent experience across environments
```

--------------------------------------------------------------------------------
/src/mcp_log_analyzer/mcp_server/models/__init__.py:
--------------------------------------------------------------------------------

```python
"""Models package."""

```

--------------------------------------------------------------------------------
/src/mcp_log_analyzer/config/__init__.py:
--------------------------------------------------------------------------------

```python
"""Configuration package."""

```

--------------------------------------------------------------------------------
/setup.py:
--------------------------------------------------------------------------------

```python
from setuptools import setup

setup()

```

--------------------------------------------------------------------------------
/src/mcp_log_analyzer/api/__init__.py:
--------------------------------------------------------------------------------

```python
"""API implementation for the MCP Log Analyzer."""

```

--------------------------------------------------------------------------------
/src/mcp_log_analyzer/core/__init__.py:
--------------------------------------------------------------------------------

```python
"""Core functionality for the MCP Log Analyzer."""

```

--------------------------------------------------------------------------------
/src/mcp_log_analyzer/__init__.py:
--------------------------------------------------------------------------------

```python
"""MCP Log Analyzer package."""

__version__ = "0.1.0"

```

--------------------------------------------------------------------------------
/src/mcp_log_analyzer/mcp_server/__init__.py:
--------------------------------------------------------------------------------

```python
"""MCP server module for log analysis."""

from .server import mcp

__all__ = ["mcp"]

```

--------------------------------------------------------------------------------
/src/mcp_log_analyzer/core/config.py:
--------------------------------------------------------------------------------

```python
"""Configuration for MCP Log Analyzer."""

from pydantic import BaseModel


class Settings(BaseModel):
    """Simple settings for MCP Log Analyzer."""

    server_name: str = "mcp-log-analyzer"
    version: str = "0.1.0"

    # Parser settings
    max_file_size_mb: int = 100
    max_events: int = 10000
    batch_size: int = 1000

    # Cache settings
    cache_dir: str = "cache"
    max_cache_size_mb: int = 1024

```

--------------------------------------------------------------------------------
/src/mcp_log_analyzer/config/settings.py:
--------------------------------------------------------------------------------

```python
"""Server configuration settings."""

import os
from typing import Optional


class Settings:
    """Application settings."""

    def __init__(self):
        self.server_name = "SF Support Diagnostics MCP Server"
        self.version = "0.1.0"
        self.log_level = os.getenv("LOG_LEVEL", "INFO")
        self.api_key: Optional[str] = os.getenv("API_KEY")
        self.database_url: Optional[str] = os.getenv("DATABASE_URL")

    @property
    def server_info(self) -> dict:
        """Get server information."""
        return {
            "name": self.server_name,
            "version": self.version,
            "log_level": self.log_level,
        }


settings = Settings()

```

--------------------------------------------------------------------------------
/config/default.yml:
--------------------------------------------------------------------------------

```yaml
# MCP Log Analyzer Default Configuration

server:
  host: "127.0.0.1"
  port: 5000
  debug: false
  workers: 4

logging:
  level: "INFO"
  format: "%(asctime)s - %(name)s - %(levelname)s - %(message)s"
  file: "logs/mcp_server.log"

storage:
  cache_dir: "cache"
  max_cache_size_mb: 1024

parsers:
  evt:
    batch_size: 1000
    max_events: 10000
  structured:
    allowed_formats: ["json", "xml"]
    max_file_size_mb: 100
  csv:
    default_delimiter: ","
    infer_types: true
    sample_size: 1000
  unstructured:
    max_line_length: 10000
    max_file_size_mb: 50

models:
  embedding_model: "sentence-transformers/all-MiniLM-L6-v2"
  default_max_tokens: 2048
  max_context_window: 16384 
```

--------------------------------------------------------------------------------
/src/mcp_log_analyzer/mcp_server/prompts/__init__.py:
--------------------------------------------------------------------------------

```python
"""
Prompts package for the MCP Log Analyzer server.
"""

from mcp.server import FastMCP

from .linux_testing_prompt import register_linux_testing_prompts
from .log_management_prompt import register_log_management_prompts
from .mcp_assets_overview_prompt import register_mcp_assets_prompts
from .network_testing_prompt import register_network_testing_prompts
from .process_monitoring_prompt import register_process_monitoring_prompts
from .windows_testing_prompt import register_windows_testing_prompts


def register_all_prompts(mcp: FastMCP):
    """Register all prompts with the MCP server."""
    register_mcp_assets_prompts(mcp)
    register_log_management_prompts(mcp)
    register_windows_testing_prompts(mcp)
    register_linux_testing_prompts(mcp)
    register_process_monitoring_prompts(mcp)
    register_network_testing_prompts(mcp)

```

--------------------------------------------------------------------------------
/src/mcp_log_analyzer/mcp_server/resources/__init__.py:
--------------------------------------------------------------------------------

```python
"""
MCP Resources module.

This module contains all the MCP resources organized by category.
"""

from .linux_resources import register_linux_resources
from .logs_resources import register_logs_resources
from .network_resources import register_network_resources
from .process_resources import register_process_resources
from .windows_resources import register_windows_resources


def register_all_resources(mcp):
    """Register all resources with the MCP server."""
    register_logs_resources(mcp)
    register_windows_resources(mcp)
    register_linux_resources(mcp)
    register_process_resources(mcp)
    register_network_resources(mcp)


__all__ = [
    "register_all_resources",
    "register_logs_resources",
    "register_windows_resources",
    "register_linux_resources",
    "register_process_resources",
    "register_network_resources",
]

```

--------------------------------------------------------------------------------
/src/mcp_log_analyzer/mcp_server/tools/__init__.py:
--------------------------------------------------------------------------------

```python
"""
MCP Tools module.

This module contains all the MCP tools organized by category.
"""

from .linux_test_tools import register_linux_test_tools
from .log_management_tools import register_log_management_tools
from .network_test_tools import register_network_test_tools
from .process_test_tools import register_process_test_tools
from .windows_test_tools import register_windows_test_tools
from .health_check_tools import register_health_check_tools


def register_all_tools(mcp):
    """Register all tools with the MCP server."""
    register_log_management_tools(mcp)
    register_windows_test_tools(mcp)
    register_linux_test_tools(mcp)
    register_process_test_tools(mcp)
    register_network_test_tools(mcp)
    register_health_check_tools(mcp)


__all__ = [
    "register_all_tools",
    "register_log_management_tools",
    "register_windows_test_tools",
    "register_linux_test_tools",
    "register_process_test_tools",
    "register_network_test_tools",
    "register_health_check_tools",
]

```

--------------------------------------------------------------------------------
/test_server.py:
--------------------------------------------------------------------------------

```python
import sys

sys.path.insert(0, "src")

try:
    from mcp_log_analyzer.mcp_server.server import mcp

    print("=== MCP Server Debug Info ===")
    print(f"Server name: {mcp.name}")
    print("Server imported successfully")

    # Check what attributes the mcp object has
    print(f"MCP object type: {type(mcp)}")
    print(f'MCP attributes: {[attr for attr in dir(mcp) if not attr.startswith("_")]}')

    # Check for tools specifically
    if hasattr(mcp, "_tools"):
        tools = mcp._tools
        print(f"Tools found via _tools: {len(tools)}")
        if tools:
            print("Tool names:", list(tools.keys())[:5])

    if hasattr(mcp, "_handlers"):
        handlers = mcp._handlers
        print(f"Handlers found: {len(handlers)}")
        tool_handlers = {k: v for k, v in handlers.items() if "tool" in str(v)}
        print(f"Tool handlers: {len(tool_handlers)}")
        if tool_handlers:
            print("Tool handler names:", list(tool_handlers.keys())[:5])

    print("\nTesting tool import...")
    from mcp_log_analyzer.mcp_server.tools.network_test_tools import (
        register_network_test_tools,
    )

    print("✅ Network tools module imported successfully")

except Exception as e:
    print(f"❌ Error: {e}")
    import traceback

    traceback.print_exc()

```

--------------------------------------------------------------------------------
/pyproject.toml:
--------------------------------------------------------------------------------

```toml
[build-system]
requires = ["setuptools>=64", "wheel"]
build-backend = "setuptools.build_meta"

[project]
name = "mcp-log-analyzer"
version = "0.1.0"
description = "Model Context Protocol server for analyzing various types of logs"
readme = "README.md"
authors = [
    {name = "MCP Log Analyzer Team"}
]
requires-python = ">=3.12"
classifiers = [
    "Development Status :: 3 - Alpha",
    "Intended Audience :: Developers",
    "Programming Language :: Python :: 3",
    "Programming Language :: Python :: 3.8",
    "Programming Language :: Python :: 3.9",
    "Programming Language :: Python :: 3.10",
]
dependencies = [
    "mcp[cli]>=1.9.2",
    "pydantic>=1.8.0",
    "python-dotenv>=0.19.0",
    "pandas>=1.3.0",
    "pywin32>=300; sys_platform == 'win32'",  # For Windows Event Log access
    "psutil>=5.9.0",  # For process monitoring
]

[project.optional-dependencies]
etl = [
    "etl-parser>=1.0.1",  # For parsing Windows ETL files
]
dev = [
    "pytest>=7.0.0",
    "pytest-asyncio>=1.0.0",
    "black>=22.0.0",
    "isort>=5.10.0",
    "mypy>=0.910",
    "flake8>=4.0.0",
]

[tool.setuptools]
package-dir = {"" = "src"}

[tool.setuptools.packages.find]
where = ["src"]

[tool.black]
line-length = 88
target-version = ["py38"]

[tool.isort]
profile = "black"
line_length = 88

[tool.mypy]
python_version = "3.12"
warn_return_any = true
warn_unused_configs = true
disallow_untyped_defs = true
disallow_incomplete_defs = true 
```

--------------------------------------------------------------------------------
/src/mcp_log_analyzer/mcp_server/resources/logs_resources.py:
--------------------------------------------------------------------------------

```python
"""
Log-related MCP resources.
"""

from mcp.server import FastMCP


# This will be imported by the main server
def register_logs_resources(mcp: FastMCP):
    """Register all log-related resources with the MCP server."""

    @mcp.resource("logs://sources")
    async def get_log_sources_resource() -> str:
        """
        Get information about all registered log sources.

        This resource provides a comprehensive view of all log sources
        currently registered in the system.
        """
        from ..server import log_sources

        sources_info = []
        for name, source in log_sources.items():
            sources_info.append(f"- {name}: {source.type} at {source.path}")

        if not sources_info:
            return "No log sources registered."

        return "Registered Log Sources:\n" + "\n".join(sources_info)

    @mcp.resource("logs://types")
    async def get_supported_log_types() -> str:
        """
        Get information about supported log types.

        This resource lists all the log types that can be analyzed
        by the MCP Log Analyzer.
        """
        return """Supported Log Types:

1. Windows Event Logs (evt/evtx)
   - System, Application, Security logs
   - Custom Windows event logs
   
2. Structured Logs
   - JSON format
   - XML format
   
3. CSV Logs
   - Comma-separated values with headers
   
4. Unstructured Text Logs
   - Plain text logs with customizable parsing

Each log type has specific parsers optimized for that format."""

    @mcp.resource("logs://analysis-types")
    async def get_analysis_types() -> str:
        """
        Get information about available analysis types.

        This resource describes the different types of analysis
        that can be performed on logs.
        """
        return """Available Analysis Types:

1. Summary Analysis
   - Log count and time range
   - Event type distribution
   - Severity levels
   - Source statistics

2. Pattern Analysis
   - Common patterns detection
   - Frequency analysis
   - Recurring events
   - Pattern timeline

3. Anomaly Detection
   - Unusual event patterns
   - Spike detection
   - Rare events
   - Deviation from baseline"""

```

--------------------------------------------------------------------------------
/src/mcp_log_analyzer/parsers/__init__.py:
--------------------------------------------------------------------------------

```python
"""Log parsers for different log formats."""

from typing import Any, Dict, Optional, Type

from ..core.models import LogType

# Import parser base class for type checking
from .base import BaseParser

# Dictionary to store parser classes by log type
_parsers: Dict[LogType, Type[BaseParser]] = {}


def register_parser(log_type: LogType, parser_class: Type[BaseParser]) -> None:
    """Register a parser for a log type.

    Args:
        log_type: The log type.
        parser_class: The parser class.
    """
    _parsers[log_type] = parser_class


def get_parser_for_type(
    log_type: LogType, config: Optional[Dict[str, Any]] = None
) -> BaseParser:
    """Get a parser for a log type.

    Args:
        log_type: The log type.
        config: Parser configuration.

    Returns:
        An instance of the parser for the log type.

    Raises:
        ValueError: If no parser is registered for the log type.
    """
    if log_type not in _parsers:
        raise ValueError(f"No parser registered for log type: {log_type}")

    # Get parser-specific configuration
    parser_config = None
    if config is not None:
        if log_type == LogType.EVENT and hasattr(config, "evt"):
            parser_config = config.evt
        elif log_type == LogType.STRUCTURED and hasattr(config, "structured"):
            parser_config = config.structured
        elif log_type == LogType.CSV and hasattr(config, "csv"):
            parser_config = config.csv
        elif log_type == LogType.UNSTRUCTURED and hasattr(config, "unstructured"):
            parser_config = config.unstructured

    # Create parser instance
    return _parsers[log_type](parser_config)


# Import parser implementations and register them
try:
    from .evt_parser import EventLogParser

    register_parser(LogType.EVENT, EventLogParser)
except ImportError:
    # Windows Event Log parser might not be available on non-Windows platforms
    pass

# Import and register CSV parser
try:
    from .csv_parser import CsvLogParser

    register_parser(LogType.CSV, CsvLogParser)
except ImportError:
    pass

# TODO: Implement and register parsers for other log types:
# - StructuredLogParser (JSON, XML)
# - UnstructuredLogParser

```

--------------------------------------------------------------------------------
/src/mcp_log_analyzer/parsers/base.py:
--------------------------------------------------------------------------------

```python
"""Base parser interface for all log types."""

import abc
from pathlib import Path
from typing import Any, Dict, Iterator, List, Optional, Union

from ..core.models import LogRecord, LogSource


class BaseParser(abc.ABC):
    """Base parser interface for all log types."""

    def __init__(self, config: Optional[Dict[str, Any]] = None):
        """Initialize parser with configuration.

        Args:
            config: Parser configuration.
        """
        self.config = config or {}

    @abc.abstractmethod
    def parse_file(
        self, source: LogSource, file_path: Union[str, Path]
    ) -> Iterator[LogRecord]:
        """Parse log records from a file.

        Args:
            source: The log source information.
            file_path: Path to the log file.

        Yields:
            Log records from the file.
        """
        pass

    @abc.abstractmethod
    def parse_content(self, source: LogSource, content: str) -> Iterator[LogRecord]:
        """Parse log records from a string.

        Args:
            source: The log source information.
            content: String content to parse.

        Yields:
            Log records from the content.
        """
        pass

    def validate_file(self, file_path: Union[str, Path]) -> bool:
        """Validate if the file can be parsed by this parser.

        Args:
            file_path: Path to the log file.

        Returns:
            True if the file can be parsed, False otherwise.
        """
        path = Path(file_path)
        return path.exists() and path.is_file()

    def extract_timestamp(self, record_data: Dict[str, Any]) -> Optional[str]:
        """Extract timestamp from record data.

        Args:
            record_data: Record data.

        Returns:
            Timestamp as string if found, None otherwise.
        """
        # Default implementation looks for common timestamp field names
        timestamp_fields = [
            "timestamp",
            "time",
            "date",
            "datetime",
            "@timestamp",
            "created_at",
        ]
        for field in timestamp_fields:
            if field in record_data:
                return str(record_data[field])
        return None

```

--------------------------------------------------------------------------------
/src/mcp_log_analyzer/mcp_server/resources/process_resources.py:
--------------------------------------------------------------------------------

```python
"""
Process monitoring MCP resources.
"""

from datetime import datetime

import psutil
from mcp.server import FastMCP


def register_process_resources(mcp: FastMCP):
    """Register all process-related resources with the MCP server."""

    @mcp.resource("system://process-list")
    async def get_process_list() -> str:
        """
        Get current process list with PID, CPU, and memory usage.

        This resource provides a snapshot of running processes
        with their resource utilization for troubleshooting.
        """
        result = []
        result.append("=== Process List ===")
        result.append(f"Time: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}")
        result.append(f"CPU Count: {psutil.cpu_count()}")
        result.append(
            f"Total Memory: {psutil.virtual_memory().total / (1024**3):.2f} GB"
        )
        result.append(
            f"Available Memory: {psutil.virtual_memory().available / (1024**3):.2f} GB"
        )
        result.append(f"CPU Usage: {psutil.cpu_percent(interval=1)}%\n")

        result.append(
            f"{'PID':<8} {'Name':<25} {'CPU%':<8} {'Memory%':<10} {'Status':<12}"
        )
        result.append("-" * 75)

        # Get processes sorted by CPU usage
        processes = []
        for proc in psutil.process_iter(
            ["pid", "name", "cpu_percent", "memory_percent", "status"]
        ):
            try:
                proc_info = proc.info
                if proc_info["cpu_percent"] is None:
                    proc_info["cpu_percent"] = proc.cpu_percent(interval=0.1)
                processes.append(proc_info)
            except (psutil.NoSuchProcess, psutil.AccessDenied):
                continue

        # Sort by CPU usage (descending)
        processes.sort(key=lambda x: x.get("cpu_percent", 0), reverse=True)

        # Show top 20 processes
        for proc in processes[:20]:
            result.append(
                f"{proc['pid']:<8} "
                f"{proc['name'][:24]:<25} "
                f"{proc.get('cpu_percent', 0):<8.1f} "
                f"{proc.get('memory_percent', 0):<10.2f} "
                f"{proc.get('status', 'unknown'):<12}"
            )

        result.append(f"\nTotal processes: {len(processes)}")

        return "\n".join(result)

```

--------------------------------------------------------------------------------
/TCP_PROXY_README.md:
--------------------------------------------------------------------------------

```markdown
# TCP Proxy for MCP Servers

This TCP proxy allows MCP servers that use stdio communication to be accessed over TCP connections.

## Architecture

The proxy works by:
1. Accepting TCP connections from clients
2. Spawning a new MCP server process for each client
3. Bridging communication between the TCP socket and the MCP process stdio

## Usage

### Running the TCP Proxy

```bash
# Basic usage
python tcp_proxy.py python main.py

# With custom host and port
python tcp_proxy.py --host 0.0.0.0 --port 9000 python main.py

# With debug logging
python tcp_proxy.py --debug python main.py

# For any MCP server command
python tcp_proxy.py node my-mcp-server.js
python tcp_proxy.py ./my-mcp-binary
```

### Testing the Proxy

```bash
# Run the test script
python test_tcp_proxy.py
```

The test script will:
1. Connect to the TCP proxy
2. Send initialize request
3. List available tools
4. List available resources  
5. Call a sample tool
6. Send shutdown request

### Example Client Code

```python
import asyncio
import json

async def connect_to_mcp():
    reader, writer = await asyncio.open_connection('localhost', 8080)
    
    # Send initialize
    request = {
        "jsonrpc": "2.0",
        "id": 1,
        "method": "initialize",
        "params": {
            "protocolVersion": "2024-11-05",
            "capabilities": {},
            "clientInfo": {
                "name": "my-client",
                "version": "1.0.0"
            }
        }
    }
    
    writer.write((json.dumps(request) + '\n').encode('utf-8'))
    await writer.drain()
    
    # Read response
    response = await reader.readline()
    print(json.loads(response))
    
    # Close connection
    writer.close()
    await writer.wait_closed()

asyncio.run(connect_to_mcp())
```

## Features

- **Process Isolation**: Each client gets its own MCP server process
- **Bidirectional Communication**: Full duplex between TCP and stdio
- **Error Handling**: Graceful handling of disconnections and errors
- **Debug Logging**: Optional debug mode to trace all messages
- **Stderr Monitoring**: Captures and logs MCP server stderr output

## Protocol

The proxy uses newline-delimited JSON-RPC 2.0 messages:
- Each message must be a complete JSON object
- Messages are separated by newline characters (`\n`)
- The proxy does not modify messages, it only forwards them

## Limitations

- The current `main_tcp.py` implementation has issues with stdio redirection
- Use `tcp_proxy.py` instead for reliable TCP access to MCP servers
- Each connection spawns a new process (consider connection pooling for production)

## Troubleshooting

If you get connection refused errors:
1. Make sure the proxy is running
2. Check the port is not already in use
3. Verify firewall settings

If you get timeout errors:
1. The MCP server may be taking too long to start
2. Check for errors in the MCP server stderr (use --debug)
3. Verify the MCP command is correct
```

--------------------------------------------------------------------------------
/src/mcp_log_analyzer/mcp_server/models/schemas.py:
--------------------------------------------------------------------------------

```python
"""Core models for the SF MCP Log Analyzer."""

from datetime import datetime
from enum import Enum
from typing import Any, Dict, List, Optional, Union
from uuid import UUID, uuid4

from pydantic import BaseModel, Field


class LogType(str, Enum):
    """Supported log types."""

    EVENT = "event"
    STRUCTURED = "structured"
    CSV = "csv"
    UNSTRUCTURED = "unstructured"


class LogSource(BaseModel):
    """Log source information."""

    id: UUID = Field(default_factory=uuid4)
    name: str
    type: LogType
    path: str
    created_at: datetime = Field(default_factory=datetime.now)
    updated_at: datetime = Field(default_factory=datetime.now)
    metadata: Dict[str, Any] = Field(default_factory=dict)


class LogRecord(BaseModel):
    """Base log record."""

    id: UUID = Field(default_factory=uuid4)
    source_id: UUID
    timestamp: Optional[datetime] = None
    data: Dict[str, Any]
    raw_data: Optional[str] = None


class LogQuery(BaseModel):
    """Query parameters for log retrieval."""

    source_ids: Optional[List[UUID]] = None
    types: Optional[List[LogType]] = None
    start_time: Optional[datetime] = None
    end_time: Optional[datetime] = None
    filters: Dict[str, Any] = Field(default_factory=dict)
    limit: int = 100
    offset: int = 0


class MCPRequest(BaseModel):
    """Base MCP request model."""

    request_id: UUID = Field(default_factory=uuid4)
    timestamp: datetime = Field(default_factory=datetime.now)
    client_id: Optional[str] = None


class LogSourceRequest(MCPRequest):
    """Request to register a log source."""

    name: str
    type: LogType
    path: str
    metadata: Dict[str, Any] = Field(default_factory=dict)


class LogQueryRequest(MCPRequest):
    """Request to query logs."""

    query: LogQuery


class LogAnalysisRequest(MCPRequest):
    """Request to analyze logs."""

    query: LogQuery
    analysis_type: str
    parameters: Dict[str, Any] = Field(default_factory=dict)


class MCPResponse(BaseModel):
    """Base MCP response model."""

    request_id: UUID
    timestamp: datetime = Field(default_factory=datetime.now)
    status: str = "success"
    error: Optional[str] = None


class LogSourceResponse(MCPResponse):
    """Response for log source registration."""

    source: LogSource


class LogQueryResponse(MCPResponse):
    """Response for log query."""

    records: List[LogRecord]
    total: int
    limit: int
    offset: int


class LogAnalysisResponse(MCPResponse):
    """Response for log analysis."""

    results: Dict[str, Any]
    query: LogQuery


class MCPContext(BaseModel):
    """Context for processing MCP requests."""

    request_id: UUID
    start_time: datetime = Field(default_factory=datetime.now)
    client_id: Optional[str] = None
    log_sources: Dict[UUID, LogSource] = Field(default_factory=dict)


class MCPError(Exception):
    """Base error for MCP operations."""

    def __init__(self, message: str, status_code: int = 400):
        self.message = message
        self.status_code = status_code
        super().__init__(self.message)

```

--------------------------------------------------------------------------------
/tests/test_base_parser.py:
--------------------------------------------------------------------------------

```python
"""Tests for the base parser."""

import unittest
from pathlib import Path
from typing import Dict, Iterator
from unittest.mock import MagicMock, patch
from uuid import UUID, uuid4

from mcp_log_analyzer.core.models import LogRecord, LogSource, LogType
from mcp_log_analyzer.parsers.base import BaseParser


class MockParser(BaseParser):
    """Mock parser implementation for testing."""

    def parse_file(self, source: LogSource, file_path: Path) -> Iterator[LogRecord]:
        """Parse a file."""
        yield LogRecord(
            source_id=source.id,
            data={"test": "value"},
        )

    def parse_content(self, source: LogSource, content: str) -> Iterator[LogRecord]:
        """Parse content."""
        yield LogRecord(
            source_id=source.id,
            data={"test": "value"},
        )


class TestBaseParser(unittest.TestCase):
    """Tests for the base parser."""

    def setUp(self) -> None:
        """Set up test fixtures."""
        self.parser = MockParser()
        self.source = LogSource(
            id=uuid4(),
            name="Test Source",
            type=LogType.EVENT,
            path="test/path",
        )

    def test_validate_file(self) -> None:
        """Test validate_file method."""
        with patch("pathlib.Path.exists", return_value=True), patch(
            "pathlib.Path.is_file", return_value=True
        ):
            self.assertTrue(self.parser.validate_file("test/path"))

        with patch("pathlib.Path.exists", return_value=False):
            self.assertFalse(self.parser.validate_file("test/path"))

        with patch("pathlib.Path.exists", return_value=True), patch(
            "pathlib.Path.is_file", return_value=False
        ):
            self.assertFalse(self.parser.validate_file("test/path"))

    def test_extract_timestamp(self) -> None:
        """Test extract_timestamp method."""
        # Test with timestamp field
        record_data: Dict[str, str] = {"timestamp": "2023-05-02T12:00:00Z"}
        self.assertEqual(
            self.parser.extract_timestamp(record_data), "2023-05-02T12:00:00Z"
        )

        # Test with time field
        record_data = {"time": "2023-05-02T12:00:00Z"}
        self.assertEqual(
            self.parser.extract_timestamp(record_data), "2023-05-02T12:00:00Z"
        )

        # Test with date field
        record_data = {"date": "2023-05-02"}
        self.assertEqual(self.parser.extract_timestamp(record_data), "2023-05-02")

        # Test with datetime field
        record_data = {"datetime": "2023-05-02T12:00:00Z"}
        self.assertEqual(
            self.parser.extract_timestamp(record_data), "2023-05-02T12:00:00Z"
        )

        # Test with @timestamp field
        record_data = {"@timestamp": "2023-05-02T12:00:00Z"}
        self.assertEqual(
            self.parser.extract_timestamp(record_data), "2023-05-02T12:00:00Z"
        )

        # Test with created_at field
        record_data = {"created_at": "2023-05-02T12:00:00Z"}
        self.assertEqual(
            self.parser.extract_timestamp(record_data), "2023-05-02T12:00:00Z"
        )

        # Test with no timestamp field
        record_data = {"other": "value"}
        self.assertIsNone(self.parser.extract_timestamp(record_data))


if __name__ == "__main__":
    unittest.main()

```

--------------------------------------------------------------------------------
/src/mcp_log_analyzer/core/models.py:
--------------------------------------------------------------------------------

```python
"""Core models for the MCP Log Analyzer."""

from datetime import datetime
from enum import Enum
from typing import Any, Dict, List, Optional, Union
from uuid import UUID, uuid4

from pydantic import BaseModel, Field


class LogType(str, Enum):
    """Supported log types."""

    EVENT = "evt"
    ETL = "etl"
    JSON = "json"
    XML = "xml"
    CSV = "csv"
    TEXT = "text"


class LogSource(BaseModel):
    """Log source information."""

    id: UUID = Field(default_factory=uuid4)
    name: str
    type: LogType
    path: str
    created_at: datetime = Field(default_factory=datetime.now)
    updated_at: datetime = Field(default_factory=datetime.now)
    metadata: Dict[str, Any] = Field(default_factory=dict)


class LogRecord(BaseModel):
    """Base log record."""

    id: UUID = Field(default_factory=uuid4)
    source_id: UUID
    timestamp: Optional[datetime] = None
    data: Dict[str, Any]
    raw_data: Optional[str] = None


class LogQuery(BaseModel):
    """Query parameters for log retrieval."""

    source_ids: Optional[List[UUID]] = None
    types: Optional[List[LogType]] = None
    start_time: Optional[datetime] = None
    end_time: Optional[datetime] = None
    filters: Dict[str, Any] = Field(default_factory=dict)
    limit: int = 100
    offset: int = 0


class MCPRequest(BaseModel):
    """Base MCP request model."""

    request_id: UUID = Field(default_factory=uuid4)
    timestamp: datetime = Field(default_factory=datetime.now)
    client_id: Optional[str] = None


class LogSourceRequest(MCPRequest):
    """Request to register a log source."""

    name: str
    type: LogType
    path: str
    metadata: Dict[str, Any] = Field(default_factory=dict)


class LogQueryRequest(MCPRequest):
    """Request to query logs."""

    query: LogQuery


class LogAnalysisRequest(MCPRequest):
    """Request to analyze logs."""

    query: LogQuery
    analysis_type: str
    parameters: Dict[str, Any] = Field(default_factory=dict)


class MCPResponse(BaseModel):
    """Base MCP response model."""

    request_id: UUID
    timestamp: datetime = Field(default_factory=datetime.now)
    status: str = "success"
    error: Optional[str] = None


class LogSourceResponse(MCPResponse):
    """Response for log source registration."""

    source: LogSource


class LogQueryResponse(MCPResponse):
    """Response for log query."""

    records: List[LogRecord]
    total: int
    limit: int
    offset: int


class LogAnalysisResponse(MCPResponse):
    """Response for log analysis."""

    results: Dict[str, Any]
    query: LogQuery


class AnalysisResult(BaseModel):
    """Result of log analysis."""

    analysis_type: str
    summary: Dict[str, Any]
    patterns: Optional[List[Dict[str, Any]]] = None
    anomalies: Optional[List[Dict[str, Any]]] = None
    metadata: Dict[str, Any] = Field(default_factory=dict)


class MCPContext(BaseModel):
    """Context for processing MCP requests."""

    request_id: UUID
    start_time: datetime = Field(default_factory=datetime.now)
    client_id: Optional[str] = None
    log_sources: Dict[UUID, LogSource] = Field(default_factory=dict)


class MCPError(Exception):
    """Base error for MCP operations."""

    def __init__(self, message: str, status_code: int = 400):
        self.message = message
        self.status_code = status_code
        super().__init__(self.message)

```

--------------------------------------------------------------------------------
/tests/test_tool_utils.py:
--------------------------------------------------------------------------------

```python
"""
Test utilities to access tool functions for testing.
"""

import asyncio
from typing import Any, Dict

from mcp_log_analyzer.core.models import LogSource

from .server import log_sources, parsers
from .tools.log_management_tools import (
    AnalyzeLogsRequest,
    QueryLogsRequest,
    RegisterLogSourceRequest,
)


# Create direct tool function wrappers for testing
async def register_log_source(request: RegisterLogSourceRequest) -> Dict[str, Any]:
    """Direct wrapper for register_log_source tool."""
    if request.name in log_sources:
        return {"error": f"Log source '{request.name}' already exists"}

    if request.source_type not in parsers:
        return {"error": f"Unsupported source type: {request.source_type}"}

    log_source = LogSource(
        name=request.name,
        type=request.source_type,
        path=request.path,
        metadata=request.config or {},
    )

    log_sources[request.name] = log_source

    return {
        "message": f"Log source '{request.name}' registered successfully",
        "source": log_source.model_dump(),
    }


async def list_log_sources() -> Dict[str, Any]:
    """Direct wrapper for list_log_sources tool."""
    return {
        "sources": [source.model_dump() for source in log_sources.values()],
        "count": len(log_sources),
    }


async def get_log_source(name: str) -> Dict[str, Any]:
    """Direct wrapper for get_log_source tool."""
    if name not in log_sources:
        return {"error": f"Log source '{name}' not found"}

    return {"source": log_sources[name].model_dump()}


async def delete_log_source(name: str) -> Dict[str, Any]:
    """Direct wrapper for delete_log_source tool."""
    if name not in log_sources:
        return {"error": f"Log source '{name}' not found"}

    del log_sources[name]
    return {"message": f"Log source '{name}' deleted successfully"}


async def query_logs(request: QueryLogsRequest) -> Dict[str, Any]:
    """Direct wrapper for query_logs tool."""
    if request.source_name not in log_sources:
        return {"error": f"Log source '{request.source_name}' not found"}

    source = log_sources[request.source_name]
    parser = parsers[source.type]

    try:
        logs = await asyncio.to_thread(
            parser.parse,
            source.path,
            filters=request.filters,
            start_time=request.start_time,
            end_time=request.end_time,
            limit=request.limit,
            offset=request.offset,
        )

        return {
            "logs": [log.model_dump() for log in logs],
            "count": len(logs),
            "source": request.source_name,
        }
    except Exception as e:
        return {"error": f"Failed to query logs: {str(e)}"}


async def analyze_logs(request: AnalyzeLogsRequest) -> Dict[str, Any]:
    """Direct wrapper for analyze_logs tool."""
    if request.source_name not in log_sources:
        return {"error": f"Log source '{request.source_name}' not found"}

    source = log_sources[request.source_name]
    parser = parsers[source.type]

    try:
        # First, get the logs
        logs = await asyncio.to_thread(
            parser.parse,
            source.path,
            filters=request.filters,
            start_time=request.start_time,
            end_time=request.end_time,
        )

        # Then analyze them
        result = await asyncio.to_thread(
            parser.analyze, logs, analysis_type=request.analysis_type
        )

        return {
            "result": result.model_dump(),
            "source": request.source_name,
            "analysis_type": request.analysis_type,
        }
    except Exception as e:
        return {"error": f"Failed to analyze logs: {str(e)}"}

```

--------------------------------------------------------------------------------
/docs/getting_started.md:
--------------------------------------------------------------------------------

```markdown
# Getting Started with MCP Log Analyzer

This guide will help you get started with the MCP Log Analyzer server.

## Prerequisites

- Python 3.8 or higher
- Windows OS (for Windows Event Log functionality)
- pywin32 package

## Installation

1. Clone the repository:

```bash
git clone https://github.com/your-username/mcp-log-analyzer.git
cd mcp-log-analyzer
```

2. Install the package and dependencies:

```bash
pip install -e .  # Install the package in development mode
pip install -e ".[dev]"  # Install development dependencies (optional)
```

## Configuration

The MCP Log Analyzer can be configured using a YAML file or environment variables.

### Configuration File

The default configuration file is `config/default.yml`. You can create a custom configuration file and specify its path when running the server.

### Environment Variables

Configuration can also be provided using environment variables:

- `MCP_CONFIG`: Path to the configuration file
- `MCP_SERVER_HOST`: Server host
- `MCP_SERVER_PORT`: Server port
- `MCP_DEBUG`: Enable debug mode (`true` or `false`)

## Running the Server

To run the server with the default configuration:

```bash
python -m mcp_log_analyzer.api.server
```

To run the server with a custom configuration file:

```bash
python -m mcp_log_analyzer.api.server --config path/to/config.yml
```

To specify host and port directly:

```bash
python -m mcp_log_analyzer.api.server --host 0.0.0.0 --port 8000
```

To enable auto-reload during development:

```bash
python -m mcp_log_analyzer.api.server --reload
```

## Testing the Server

You can use the provided test script to test the server:

```bash
python scripts/test_server.py --url http://localhost:5000
```

The test script will register a Windows Event Log source, query logs, and analyze logs.

## Accessing the API

Once the server is running, you can access the API at `http://localhost:5000/api`.

The API documentation is available at [API Reference](api_reference.md).

## Using with Windows Event Logs

The MCP Log Analyzer can analyze Windows Event Logs using the Windows Event Log API. To register a Windows Event Log source:

```
POST /api/sources
```

```json
{
  "request_id": "e77e5d1e-8a7e-4e2f-9ea2-3b9ac5f5c161",
  "timestamp": "2023-05-02T12:00:00Z",
  "client_id": "test-client",
  "name": "System Event Log",
  "type": "event",
  "path": "System",
  "metadata": {
    "description": "Windows System Event Log"
  }
}
```

The `path` can be one of the standard Windows Event Log names:
- `Application`
- `System`
- `Security`
- Other event log names

## Example Workflow

1. Start the server:

```bash
python -m mcp_log_analyzer.api.server
```

2. Register a log source:

```bash
curl -X POST http://localhost:5000/api/sources \
  -H "Content-Type: application/json" \
  -d '{
    "request_id": "e77e5d1e-8a7e-4e2f-9ea2-3b9ac5f5c161",
    "timestamp": "2023-05-02T12:00:00Z",
    "client_id": "test-client",
    "name": "System Event Log",
    "type": "event",
    "path": "System",
    "metadata": {
      "description": "Windows System Event Log"
    }
  }'
```

3. Get the source ID from the response.

4. Query logs:

```bash
curl -X POST http://localhost:5000/api/query \
  -H "Content-Type: application/json" \
  -d '{
    "request_id": "f88e6d2e-9b8f-5f3g-0fb3-4c0bd6g6d272",
    "timestamp": "2023-05-02T12:01:00Z",
    "client_id": "test-client",
    "query": {
      "source_ids": ["source-id-from-previous-response"],
      "limit": 10,
      "offset": 0
    }
  }'
```

5. Analyze logs:

```bash
curl -X POST http://localhost:5000/api/analyze \
  -H "Content-Type: application/json" \
  -d '{
    "request_id": "g99f7e3f-0c9g-6g4h-1gc4-5d1ce7h7e383",
    "timestamp": "2023-05-02T12:02:00Z",
    "client_id": "test-client",
    "query": {
      "source_ids": ["source-id-from-previous-response"],
      "limit": 100,
      "offset": 0
    },
    "analysis_type": "summary",
    "parameters": {
      "include_statistics": true
    }
  }'
``` 
```

--------------------------------------------------------------------------------
/test_windows_setup.py:
--------------------------------------------------------------------------------

```python
#!/usr/bin/env python3
"""
Test script to verify Windows setup is working correctly.
Run this script after installing dependencies with: pip install -e .
"""

import platform
import sys


def test_basic_imports():
    """Test basic package imports."""
    print("🔍 Testing basic imports...")
    
    try:
        from mcp_log_analyzer.core.models import LogSource, LogType
        print("  ✅ Core models import successful")
    except Exception as e:
        print(f"  ❌ Core models import failed: {e}")
        return False
    
    try:
        from mcp_log_analyzer.parsers.csv_parser import CsvLogParser
        print("  ✅ CSV parser import successful")
    except Exception as e:
        print(f"  ❌ CSV parser import failed: {e}")
        return False
    
    return True


def test_windows_specific():
    """Test Windows-specific functionality."""
    print("\n🪟 Testing Windows-specific functionality...")
    
    if platform.system() != "Windows":
        print("  ⚠️  Skipping Windows tests (not on Windows)")
        return True
    
    try:
        import win32evtlog
        import win32evtlogutil
        import win32con
        print("  ✅ Windows Event Log modules available")
        
        from mcp_log_analyzer.parsers.evt_parser import EvtParser
        print("  ✅ Event Log parser import successful")
        return True
    except ImportError as e:
        print(f"  ❌ Windows Event Log modules not available: {e}")
        print("  💡 Install with: pip install pywin32>=300")
        return False


def test_server_startup():
    """Test MCP server startup."""
    print("\n🚀 Testing MCP server startup...")
    
    try:
        from mcp_log_analyzer.mcp_server.server import mcp
        print("  ✅ MCP server import successful")
        
        # Check available parsers
        from mcp_log_analyzer.mcp_server.server import parsers
        print(f"  📋 Available parsers: {list(parsers.keys())}")
        
        return True
    except Exception as e:
        print(f"  ❌ MCP server startup failed: {e}")
        return False


def test_csv_functionality():
    """Test CSV parsing functionality."""
    print("\n📊 Testing CSV functionality...")
    
    try:
        from mcp_log_analyzer.core.models import LogSource, LogType
        from mcp_log_analyzer.parsers.csv_parser import CsvLogParser
        
        # Create test data
        source = LogSource(name="test", type=LogType.CSV, path="test.csv")
        parser = CsvLogParser({
            'has_header': False,
            'field_names': ['timestamp', 'level', 'message']
        })
        
        # Test parsing
        test_content = """2025-01-01 10:00:00,INFO,Test message
2025-01-01 10:01:00,ERROR,Test error"""
        
        records = list(parser.parse_content(source, test_content))
        print(f"  ✅ Parsed {len(records)} test records")
        
        # Test analysis
        analysis = parser.analyze(records)
        print(f"  ✅ Analysis completed: {analysis['summary']['total_records']} records")
        
        return True
    except Exception as e:
        print(f"  ❌ CSV functionality test failed: {e}")
        return False


def main():
    """Run all tests."""
    print("🧪 MCP Log Analyzer Windows Setup Test")
    print("=" * 50)
    print(f"Platform: {platform.system()} {platform.release()}")
    print(f"Python: {sys.version}")
    print()
    
    tests = [
        test_basic_imports,
        test_windows_specific,
        test_server_startup,
        test_csv_functionality,
    ]
    
    passed = 0
    for test in tests:
        if test():
            passed += 1
    
    print("\n" + "=" * 50)
    print(f"📊 Test Results: {passed}/{len(tests)} tests passed")
    
    if passed == len(tests):
        print("🎉 All tests passed! The setup is working correctly.")
        print("\n💡 You can now run: python main.py")
    else:
        print("⚠️  Some tests failed. Please check the error messages above.")
        if platform.system() == "Windows":
            print("\n💡 Try installing Windows dependencies:")
            print("   pip install pywin32>=300")
    
    return passed == len(tests)


if __name__ == "__main__":
    success = main()
    sys.exit(0 if success else 1)
```

--------------------------------------------------------------------------------
/main.py:
--------------------------------------------------------------------------------

```python
#!/usr/bin/env python3
"""
Main entry point for the MCP Log Analyzer server with graceful shutdown support.
"""

import asyncio
import atexit
import signal
import sys
import logging
from pathlib import Path

# Add src to Python path
sys.path.insert(0, str(Path(__file__).parent / "src"))

from mcp_log_analyzer.mcp_server.server import mcp, log_sources, get_log_sources

logger = logging.getLogger(__name__)

# Track if cleanup has been performed
_cleanup_done = False


def cleanup_resources():
    """Clean up all resources on shutdown."""
    global _cleanup_done
    
    # Avoid duplicate cleanup
    if _cleanup_done:
        return
    
    _cleanup_done = True
    logger.info("Cleaning up resources on shutdown...")
    
    # Clean up ETL caches for all ETL sources
    etl_sources = [source for source in get_log_sources().values() if source.type == "etl"]
    if etl_sources:
        try:
            from mcp_log_analyzer.parsers.etl_cached_parser import EtlCachedParser
            logger.info(f"Cleaning up ETL caches for {len(etl_sources)} source(s)")
            for source in etl_sources:
                logger.info(f"Cleaning up ETL cache for: {source.name} ({source.path})")
                EtlCachedParser.cleanup_cache_for_source(source.path)
        except Exception as e:
            logger.error(f"Error cleaning up ETL caches: {e}")
    
    # Clean up all cached ETL files (including orphaned ones)
    try:
        from mcp_log_analyzer.parsers.etl_cached_parser import EtlCachedParser
        logger.info("Cleaning up any remaining ETL cache files...")
        EtlCachedParser.cleanup_all_cache()
    except Exception as e:
        logger.error(f"Error cleaning up all ETL caches: {e}")
    
    # Don't clear log sources - they should persist across restarts
    # log_sources.clear()
    logger.info(f"Keeping {len(get_log_sources())} log sources for next startup")


def signal_handler(signum, frame):
    """Handle shutdown signals."""
    logger.info(f"Received signal {signum}, initiating graceful shutdown...")
    cleanup_resources()
    sys.exit(0)


async def async_signal_handler():
    """Async signal handler for asyncio."""
    cleanup_resources()


def setup_signal_handlers():
    """Set up signal handlers for graceful shutdown."""
    # Handle CTRL+C (SIGINT) and termination signals
    signal.signal(signal.SIGINT, signal_handler)
    signal.signal(signal.SIGTERM, signal_handler)
    
    # For Windows, also handle CTRL+BREAK
    if sys.platform == "win32":
        signal.signal(signal.SIGBREAK, signal_handler)
    
    # Set up asyncio signal handlers if running in event loop
    try:
        loop = asyncio.get_running_loop()
        for sig in (signal.SIGINT, signal.SIGTERM):
            loop.add_signal_handler(sig, lambda: asyncio.create_task(async_signal_handler()))
    except RuntimeError:
        # No event loop running yet
        pass


def main():
    """Run the MCP server with cleanup support."""
    # Set up logging with DEBUG level to see more details
    logging.basicConfig(
        level=logging.DEBUG,
        format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
    )
    
    # Register cleanup function with atexit
    atexit.register(cleanup_resources)
    
    # Set up signal handlers
    setup_signal_handlers()
    
    try:
        logger.info("Starting MCP Log Analyzer server...")
        
        # Add a handler to catch any unhandled exceptions
        import sys
        def handle_exception(exc_type, exc_value, exc_traceback):
            if issubclass(exc_type, KeyboardInterrupt):
                sys.__excepthook__(exc_type, exc_value, exc_traceback)
                return
            logger.error("Uncaught exception", exc_info=(exc_type, exc_value, exc_traceback))
        
        sys.excepthook = handle_exception
        
        mcp.run()
        logger.info("MCP server run() method returned - server shutting down")
    except KeyboardInterrupt:
        logger.info("Server interrupted by user")
        cleanup_resources()
    except Exception as e:
        logger.error(f"Server error: {e}")
        logger.error(f"Error type: {type(e).__name__}")
        logger.error(f"Error args: {e.args}")
        import traceback
        logger.error(f"Traceback: {traceback.format_exc()}")
        cleanup_resources()
        raise
    finally:
        # Final cleanup if not already done
        logger.info("In finally block - checking if cleanup needed")
        try:
            # Check if any log sources are loaded (use try/except in case lazy loading hasn't happened)
            sources = get_log_sources()
            if sources:
                logger.info(f"Found {len(sources)} log sources, cleaning up")
                cleanup_resources()
        except:
            # If there's any error checking, skip cleanup
            pass
        logger.info("MCP server process ending")


if __name__ == "__main__":
    main()
```

--------------------------------------------------------------------------------
/src/mcp_log_analyzer/tcp_proxy.py:
--------------------------------------------------------------------------------

```python
#!/usr/bin/env python3
"""
TCP Proxy for MCP Server
========================

This script creates a local stdio-to-TCP proxy that allows Claude Code to connect
to remote MCP servers running on different machines.

Usage:
    python tcp_proxy.py <remote_host> <remote_port>
    
Example:
    python tcp_proxy.py 192.168.1.100 8088
    
Add to Claude Code:
    claude mcp add remote-mcp-server python /path/to/tcp_proxy.py 192.168.1.100 8088
"""

import socket
import sys
import threading
import logging
import argparse
from typing import Optional

# Configure logging
logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
    stream=sys.stderr  # Log to stderr to avoid interfering with stdio protocol
)
logger = logging.getLogger(__name__)


class MCPTCPProxy:
    """Proxy that bridges stdio MCP communication to a remote TCP server."""
    
    def __init__(self, host: str, port: int):
        self.host = host
        self.port = port
        self.socket: Optional[socket.socket] = None
        self.running = False
        
    def connect(self) -> bool:
        """Connect to the remote MCP server."""
        try:
            logger.info(f"Connecting to MCP server at {self.host}:{self.port}")
            self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
            self.socket.connect((self.host, self.port))
            logger.info("Successfully connected to remote MCP server")
            return True
        except Exception as e:
            logger.error(f"Failed to connect to {self.host}:{self.port}: {e}")
            return False
    
    def stdin_to_socket(self):
        """Forward stdin to the TCP socket."""
        try:
            while self.running:
                # Read from stdin (binary mode for proper handling)
                data = sys.stdin.buffer.read1(4096)
                if not data:
                    logger.info("stdin closed, stopping stdin->socket forwarding")
                    break
                
                # Send to socket
                self.socket.sendall(data)
                logger.debug(f"Forwarded {len(data)} bytes from stdin to socket")
                
        except Exception as e:
            logger.error(f"Error in stdin->socket forwarding: {e}")
        finally:
            self.running = False
    
    def socket_to_stdout(self):
        """Forward TCP socket data to stdout."""
        try:
            while self.running:
                # Receive from socket
                data = self.socket.recv(4096)
                if not data:
                    logger.info("Socket closed, stopping socket->stdout forwarding")
                    break
                
                # Write to stdout
                sys.stdout.buffer.write(data)
                sys.stdout.buffer.flush()
                logger.debug(f"Forwarded {len(data)} bytes from socket to stdout")
                
        except Exception as e:
            logger.error(f"Error in socket->stdout forwarding: {e}")
        finally:
            self.running = False
    
    def run(self):
        """Run the proxy."""
        if not self.connect():
            sys.exit(1)
        
        self.running = True
        
        # Create forwarding threads
        stdin_thread = threading.Thread(target=self.stdin_to_socket, daemon=True)
        socket_thread = threading.Thread(target=self.socket_to_stdout, daemon=True)
        
        # Start threads
        stdin_thread.start()
        socket_thread.start()
        
        try:
            # Wait for threads to complete
            stdin_thread.join()
            socket_thread.join()
        except KeyboardInterrupt:
            logger.info("Proxy interrupted by user")
        finally:
            self.cleanup()
    
    def cleanup(self):
        """Clean up resources."""
        self.running = False
        if self.socket:
            try:
                self.socket.close()
                logger.info("Socket closed")
            except Exception as e:
                logger.error(f"Error closing socket: {e}")


def main():
    """Main entry point."""
    parser = argparse.ArgumentParser(
        description="TCP Proxy for connecting Claude Code to remote MCP servers",
        epilog="Example: python tcp_proxy.py 192.168.1.100 8088"
    )
    parser.add_argument(
        "host",
        help="Remote MCP server host (IP address or hostname)"
    )
    parser.add_argument(
        "port",
        type=int,
        help="Remote MCP server port"
    )
    parser.add_argument(
        "--debug",
        action="store_true",
        help="Enable debug logging"
    )
    
    args = parser.parse_args()
    
    # Configure logging level
    if args.debug:
        logging.getLogger().setLevel(logging.DEBUG)
    
    # Create and run proxy
    proxy = MCPTCPProxy(args.host, args.port)
    
    try:
        proxy.run()
    except Exception as e:
        logger.error(f"Proxy failed: {e}")
        sys.exit(1)


if __name__ == "__main__":
    main()
```

--------------------------------------------------------------------------------
/docs/api_reference.md:
--------------------------------------------------------------------------------

```markdown
# MCP Log Analyzer API Reference

## Overview

The MCP (Model Context Protocol) Log Analyzer provides a REST API for analyzing different types of logs on Windows systems. This document describes the available endpoints and their usage.

## Base URL

All API endpoints are relative to the base URL:

```
http://localhost:5000/api
```

## Authentication

Authentication is not implemented in the current version. In a production environment, you would want to add proper authentication mechanisms.

## Common Response Format

All API responses follow a common format:

```json
{
  "request_id": "uuid-string",
  "timestamp": "iso-datetime-string",
  "status": "success|error",
  "error": "error-message-if-status-is-error",
  ... endpoint-specific fields ...
}
```

## Endpoints

### Health Check

```
GET /health
```

Check if the server is running.

**Response:**

```json
{
  "status": "ok"
}
```

### Register Log Source

```
POST /sources
```

Register a new log source for analysis.

**Request:**

```json
{
  "request_id": "uuid-string",
  "timestamp": "iso-datetime-string",
  "client_id": "optional-client-id",
  "name": "log-source-name",
  "type": "event|structured|csv|unstructured",
  "path": "path-to-log-source",
  "metadata": {
    "optional-key1": "optional-value1",
    "optional-key2": "optional-value2"
  }
}
```

**Response:**

```json
{
  "request_id": "uuid-string",
  "timestamp": "iso-datetime-string",
  "status": "success",
  "source": {
    "id": "uuid-string",
    "name": "log-source-name",
    "type": "event|structured|csv|unstructured",
    "path": "path-to-log-source",
    "created_at": "iso-datetime-string",
    "updated_at": "iso-datetime-string",
    "metadata": {
      "key1": "value1",
      "key2": "value2"
    }
  }
}
```

### List Log Sources

```
GET /sources
```

List all registered log sources.

**Response:**

```json
[
  {
    "id": "uuid-string",
    "name": "log-source-name",
    "type": "event|structured|csv|unstructured",
    "path": "path-to-log-source",
    "created_at": "iso-datetime-string",
    "updated_at": "iso-datetime-string",
    "metadata": {
      "key1": "value1",
      "key2": "value2"
    }
  },
  ...
]
```

### Get Log Source

```
GET /sources/{source_id}
```

Get details of a specific log source.

**Response:**

```json
{
  "id": "uuid-string",
  "name": "log-source-name",
  "type": "event|structured|csv|unstructured",
  "path": "path-to-log-source",
  "created_at": "iso-datetime-string",
  "updated_at": "iso-datetime-string",
  "metadata": {
    "key1": "value1",
    "key2": "value2"
  }
}
```

### Delete Log Source

```
DELETE /sources/{source_id}
```

Delete a registered log source.

**Response:**

```json
{
  "status": "success",
  "message": "Log source {source_id} deleted"
}
```

### Query Logs

```
POST /query
```

Query logs from registered sources.

**Request:**

```json
{
  "request_id": "uuid-string",
  "timestamp": "iso-datetime-string",
  "client_id": "optional-client-id",
  "query": {
    "source_ids": ["optional-uuid-string1", "optional-uuid-string2"],
    "types": ["optional-log-type1", "optional-log-type2"],
    "start_time": "optional-iso-datetime-string",
    "end_time": "optional-iso-datetime-string",
    "filters": {
      "optional-field1": "optional-value1",
      "optional-field2": "optional-value2"
    },
    "limit": 100,
    "offset": 0
  }
}
```

**Response:**

```json
{
  "request_id": "uuid-string",
  "timestamp": "iso-datetime-string",
  "status": "success",
  "records": [
    {
      "id": "uuid-string",
      "source_id": "uuid-string",
      "timestamp": "iso-datetime-string",
      "data": {
        "field1": "value1",
        "field2": "value2",
        ...
      },
      "raw_data": "optional-raw-data-string"
    },
    ...
  ],
  "total": 1234,
  "limit": 100,
  "offset": 0
}
```

### Analyze Logs

```
POST /analyze
```

Analyze logs from registered sources.

**Request:**

```json
{
  "request_id": "uuid-string",
  "timestamp": "iso-datetime-string",
  "client_id": "optional-client-id",
  "query": {
    "source_ids": ["optional-uuid-string1", "optional-uuid-string2"],
    "types": ["optional-log-type1", "optional-log-type2"],
    "start_time": "optional-iso-datetime-string",
    "end_time": "optional-iso-datetime-string",
    "filters": {
      "optional-field1": "optional-value1",
      "optional-field2": "optional-value2"
    },
    "limit": 100,
    "offset": 0
  },
  "analysis_type": "analysis-type",
  "parameters": {
    "optional-param1": "optional-value1",
    "optional-param2": "optional-value2"
  }
}
```

**Response:**

```json
{
  "request_id": "uuid-string",
  "timestamp": "iso-datetime-string",
  "status": "success",
  "results": {
    "analysis_type": "analysis-type",
    "parameters": {
      "param1": "value1",
      "param2": "value2"
    },
    "summary": "summary-string",
    "details": {
      ... analysis-specific-details ...
    }
  },
  "query": {
    ... query-object-from-request ...
  }
}
```

## Error Handling

If an error occurs, the response will have a status of "error" and an error message:

```json
{
  "status": "error",
  "error": "Error message"
}
```

HTTP status codes are also used to indicate errors:
- 400: Bad Request - The request was invalid
- 404: Not Found - The requested resource was not found
- 500: Internal Server Error - An unexpected error occurred on the server

## Log Types

The following log types are supported:

- `event`: Windows Event Logs (EVT/EVTX)
- `structured`: Structured Logs (JSON, XML)
- `csv`: CSV Logs
- `unstructured`: Unstructured Text Logs 
```

--------------------------------------------------------------------------------
/src/mcp_log_analyzer/core/state_manager.py:
--------------------------------------------------------------------------------

```python
"""
State persistence manager for MCP Log Analyzer.

Manages persistent storage of log sources and other state that needs
to survive server restarts.
"""

import json
import os
from pathlib import Path
from typing import Dict, Any, Optional
import logging
from datetime import datetime

from .models import LogSource, LogType

logger = logging.getLogger(__name__)


class StateManager:
    """Manages persistent state for the MCP server."""
    
    def __init__(self, state_dir: Optional[Path] = None):
        """Initialize state manager.
        
        Args:
            state_dir: Directory to store state files. Defaults to user's app data.
        """
        if state_dir is None:
            # Use platform-appropriate directory
            if os.name == 'nt':  # Windows
                app_data = os.environ.get('APPDATA', os.path.expanduser('~'))
                state_dir = Path(app_data) / 'MCPLogAnalyzer'
            else:  # Unix-like
                state_dir = Path.home() / '.config' / 'mcp-log-analyzer'
        
        self.state_dir = Path(state_dir)
        self.state_dir.mkdir(parents=True, exist_ok=True)
        
        self.state_file = self.state_dir / 'server_state.json'
        self.sources_file = self.state_dir / 'log_sources.json'
        
        logger.info(f"State manager initialized with directory: {self.state_dir}")
    
    def save_log_sources(self, log_sources: Dict[str, LogSource]) -> None:
        """Save log sources to persistent storage.
        
        Args:
            log_sources: Dictionary of log sources to save.
        """
        try:
            sources_data = {
                name: {
                    'id': str(source.id),
                    'name': source.name,
                    'type': source.type,
                    'path': source.path,
                    'metadata': source.metadata,
                    'created_at': source.created_at.isoformat() if source.created_at else None,
                    'updated_at': source.updated_at.isoformat() if source.updated_at else None,
                }
                for name, source in log_sources.items()
            }
            
            with open(self.sources_file, 'w') as f:
                json.dump(sources_data, f, indent=2)
            
            logger.info(f"Saved {len(log_sources)} log sources to {self.sources_file}")
            
        except Exception as e:
            logger.error(f"Failed to save log sources: {e}")
    
    def load_log_sources(self) -> Dict[str, LogSource]:
        """Load log sources from persistent storage.
        
        Returns:
            Dictionary of loaded log sources.
        """
        log_sources = {}
        
        if not self.sources_file.exists():
            logger.info("No saved log sources found")
            return log_sources
        
        try:
            with open(self.sources_file, 'r') as f:
                sources_data = json.load(f)
            
            for name, data in sources_data.items():
                # Convert datetime strings back to datetime objects
                if data.get('created_at'):
                    data['created_at'] = datetime.fromisoformat(data['created_at'])
                if data.get('updated_at'):
                    data['updated_at'] = datetime.fromisoformat(data['updated_at'])
                
                # Create LogSource instance
                source = LogSource(
                    id=data['id'],
                    name=data['name'],
                    type=data['type'],
                    path=data['path'],
                    metadata=data.get('metadata', {}),
                    created_at=data.get('created_at'),
                    updated_at=data.get('updated_at')
                )
                
                log_sources[name] = source
            
            logger.info(f"Loaded {len(log_sources)} log sources from {self.sources_file}")
            
        except Exception as e:
            logger.error(f"Failed to load log sources: {e}")
        
        return log_sources
    
    def save_server_state(self, state: Dict[str, Any]) -> None:
        """Save general server state.
        
        Args:
            state: State dictionary to save.
        """
        try:
            with open(self.state_file, 'w') as f:
                json.dump(state, f, indent=2)
            
            logger.info(f"Saved server state to {self.state_file}")
            
        except Exception as e:
            logger.error(f"Failed to save server state: {e}")
    
    def load_server_state(self) -> Dict[str, Any]:
        """Load general server state.
        
        Returns:
            Loaded state dictionary or empty dict if none exists.
        """
        if not self.state_file.exists():
            logger.info("No saved server state found")
            return {}
        
        try:
            with open(self.state_file, 'r') as f:
                state = json.load(f)
            
            logger.info(f"Loaded server state from {self.state_file}")
            return state
            
        except Exception as e:
            logger.error(f"Failed to load server state: {e}")
            return {}
    
    def clear_state(self) -> None:
        """Clear all saved state."""
        try:
            if self.sources_file.exists():
                self.sources_file.unlink()
                logger.info("Cleared log sources state")
            
            if self.state_file.exists():
                self.state_file.unlink()
                logger.info("Cleared server state")
                
        except Exception as e:
            logger.error(f"Failed to clear state: {e}")


# Global state manager instance
_state_manager: Optional[StateManager] = None


def get_state_manager() -> StateManager:
    """Get the global state manager instance."""
    global _state_manager
    if _state_manager is None:
        _state_manager = StateManager()
    return _state_manager
```

--------------------------------------------------------------------------------
/test_tcp_proxy.py:
--------------------------------------------------------------------------------

```python
#!/usr/bin/env python3
"""
Test script to verify TCP proxy functionality.
"""

import asyncio
import json
import sys
import time

async def test_tcp_connection(host='localhost', port=8080):
    """Test connection to TCP MCP server."""
    print(f"Testing TCP MCP proxy at {host}:{port}...")
    
    try:
        reader, writer = await asyncio.open_connection(host, port)
        print("✓ Connected to TCP server")
        
        # Test 1: Initialize request
        print("\n1. Testing initialize request...")
        init_request = {
            "jsonrpc": "2.0",
            "id": 1,
            "method": "initialize",
            "params": {
                "protocolVersion": "2024-11-05",
                "capabilities": {},
                "clientInfo": {
                    "name": "test-client",
                    "version": "1.0.0"
                }
            }
        }
        
        # Send request
        request_line = json.dumps(init_request) + '\n'
        writer.write(request_line.encode('utf-8'))
        await writer.drain()
        print("   Sent initialize request")
        
        # Read response
        response_line = await asyncio.wait_for(reader.readline(), timeout=5.0)
        if response_line:
            response = json.loads(response_line.decode('utf-8'))
            print("   ✓ Received initialize response")
            print(f"   Server: {response.get('result', {}).get('serverInfo', {})}")
        else:
            print("   ✗ No response received")
            return
        
        # Test 2: List tools
        print("\n2. Testing tools/list request...")
        tools_request = {
            "jsonrpc": "2.0",
            "id": 2,
            "method": "tools/list",
            "params": {}
        }
        
        request_line = json.dumps(tools_request) + '\n'
        writer.write(request_line.encode('utf-8'))
        await writer.drain()
        print("   Sent tools/list request")
        
        response_line = await asyncio.wait_for(reader.readline(), timeout=5.0)
        if response_line:
            response = json.loads(response_line.decode('utf-8'))
            tools = response.get('result', {}).get('tools', [])
            print(f"   ✓ Received {len(tools)} tools")
            for tool in tools[:5]:  # Show first 5 tools
                print(f"     - {tool.get('name')}: {tool.get('description', '')[:50]}...")
        
        # Test 3: List resources
        print("\n3. Testing resources/list request...")
        resources_request = {
            "jsonrpc": "2.0",
            "id": 3,
            "method": "resources/list",
            "params": {}
        }
        
        request_line = json.dumps(resources_request) + '\n'
        writer.write(request_line.encode('utf-8'))
        await writer.drain()
        print("   Sent resources/list request")
        
        response_line = await asyncio.wait_for(reader.readline(), timeout=5.0)
        if response_line:
            response = json.loads(response_line.decode('utf-8'))
            resources = response.get('result', {}).get('resources', [])
            print(f"   ✓ Received {len(resources)} resources")
            for resource in resources[:5]:  # Show first 5 resources
                print(f"     - {resource.get('uri')}: {resource.get('name', '')}")
        
        # Test 4: Call a tool
        print("\n4. Testing tool call (list_log_sources)...")
        tool_request = {
            "jsonrpc": "2.0",
            "id": 4,
            "method": "tools/call",
            "params": {
                "name": "list_log_sources",
                "arguments": {}
            }
        }
        
        request_line = json.dumps(tool_request) + '\n'
        writer.write(request_line.encode('utf-8'))
        await writer.drain()
        print("   Sent tool call request")
        
        response_line = await asyncio.wait_for(reader.readline(), timeout=5.0)
        if response_line:
            response = json.loads(response_line.decode('utf-8'))
            if 'result' in response:
                print("   ✓ Tool call successful")
                result = response.get('result', {})
                if 'content' in result:
                    print(f"   Result: {str(result['content'])[:100]}...")
            elif 'error' in response:
                print(f"   ✗ Tool call error: {response['error']}")
        
        # Test 5: Shutdown
        print("\n5. Testing shutdown...")
        shutdown_request = {
            "jsonrpc": "2.0",
            "id": 5,
            "method": "shutdown",
            "params": {}
        }
        
        request_line = json.dumps(shutdown_request) + '\n'
        writer.write(request_line.encode('utf-8'))
        await writer.drain()
        print("   Sent shutdown request")
        
        response_line = await asyncio.wait_for(reader.readline(), timeout=5.0)
        if response_line:
            response = json.loads(response_line.decode('utf-8'))
            print("   ✓ Received shutdown response")
        
        # Close connection
        writer.close()
        await writer.wait_closed()
        print("\n✓ All tests completed successfully!")
        
    except asyncio.TimeoutError:
        print("✗ Timeout waiting for response")
        sys.exit(1)
    except ConnectionRefusedError:
        print("✗ Could not connect to TCP server. Make sure the proxy is running:")
        print("  python tcp_proxy.py python main.py")
        sys.exit(1)
    except Exception as e:
        print(f"✗ Error: {e}")
        import traceback
        traceback.print_exc()
        sys.exit(1)

if __name__ == "__main__":
    import argparse
    parser = argparse.ArgumentParser(description='Test TCP proxy connection')
    parser.add_argument('--host', default='localhost', help='Host to connect to')
    parser.add_argument('--port', type=int, default=8080, help='Port to connect to')
    args = parser.parse_args()
    
    asyncio.run(test_tcp_connection(args.host, args.port))
```

--------------------------------------------------------------------------------
/scripts/test_server.py:
--------------------------------------------------------------------------------

```python
#!/usr/bin/env python
"""Test script for the MCP Log Analyzer server."""

import argparse
import json
import logging
import sys
import uuid
from datetime import datetime, timedelta
from typing import Any, Dict

import requests
from pydantic import BaseModel, Field

# Configure logging
logging.basicConfig(
    level=logging.INFO,
    format="%(asctime)s - %(name)s - %(levelname)s - %(message)s",
)
logger = logging.getLogger("test_server")


class TestResult(BaseModel):
    """Test result."""

    name: str
    success: bool
    message: str
    response: Dict[str, Any] = Field(default_factory=dict)


def run_test(name: str, func: callable, *args, **kwargs) -> TestResult:
    """Run a test function.

    Args:
        name: Test name.
        func: Test function.
        *args: Arguments for the test function.
        **kwargs: Keyword arguments for the test function.

    Returns:
        Test result.
    """
    logger.info(f"Running test: {name}")
    try:
        response = func(*args, **kwargs)
        return TestResult(
            name=name, success=True, message="Test successful", response=response
        )
    except Exception as e:
        logger.exception(f"Test failed: {e}")
        return TestResult(name=name, success=False, message=f"Test failed: {str(e)}")


def test_health(base_url: str) -> Dict[str, Any]:
    """Test health endpoint.

    Args:
        base_url: Base URL of the server.

    Returns:
        Response from the server.
    """
    response = requests.get(f"{base_url}/api/health")
    response.raise_for_status()
    return response.json()


def test_register_source(
    base_url: str, source_name: str, source_type: str, source_path: str
) -> Dict[str, Any]:
    """Test registering a log source.

    Args:
        base_url: Base URL of the server.
        source_name: Name of the source.
        source_type: Type of the source.
        source_path: Path to the source.

    Returns:
        Response from the server.
    """
    data = {
        "request_id": str(uuid.uuid4()),
        "timestamp": datetime.now().isoformat(),
        "client_id": "test-client",
        "name": source_name,
        "type": source_type,
        "path": source_path,
        "metadata": {"test": True, "description": "Test source"},
    }
    response = requests.post(f"{base_url}/api/sources", json=data)
    response.raise_for_status()
    return response.json()


def test_list_sources(base_url: str) -> Dict[str, Any]:
    """Test listing log sources.

    Args:
        base_url: Base URL of the server.

    Returns:
        Response from the server.
    """
    response = requests.get(f"{base_url}/api/sources")
    response.raise_for_status()
    return response.json()


def test_query_logs(base_url: str, source_id: str = None) -> Dict[str, Any]:
    """Test querying logs.

    Args:
        base_url: Base URL of the server.
        source_id: Source ID to query.

    Returns:
        Response from the server.
    """
    data = {
        "request_id": str(uuid.uuid4()),
        "timestamp": datetime.now().isoformat(),
        "client_id": "test-client",
        "query": {
            "source_ids": [source_id] if source_id else None,
            "start_time": (datetime.now() - timedelta(days=1)).isoformat(),
            "end_time": datetime.now().isoformat(),
            "limit": 10,
            "offset": 0,
        },
    }
    response = requests.post(f"{base_url}/api/query", json=data)
    response.raise_for_status()
    return response.json()


def test_analyze_logs(base_url: str, source_id: str = None) -> Dict[str, Any]:
    """Test analyzing logs.

    Args:
        base_url: Base URL of the server.
        source_id: Source ID to analyze.

    Returns:
        Response from the server.
    """
    data = {
        "request_id": str(uuid.uuid4()),
        "timestamp": datetime.now().isoformat(),
        "client_id": "test-client",
        "query": {
            "source_ids": [source_id] if source_id else None,
            "start_time": (datetime.now() - timedelta(days=1)).isoformat(),
            "end_time": datetime.now().isoformat(),
            "limit": 10,
            "offset": 0,
        },
        "analysis_type": "summary",
        "parameters": {"include_statistics": True},
    }
    response = requests.post(f"{base_url}/api/analyze", json=data)
    response.raise_for_status()
    return response.json()


def main() -> None:
    """Run the test script."""
    parser = argparse.ArgumentParser(description="Test MCP Log Analyzer Server")
    parser.add_argument("--url", help="Server URL", default="http://localhost:5000")
    parser.add_argument(
        "--source-name", help="Log source name", default="System Event Log"
    )
    parser.add_argument("--source-type", help="Log source type", default="event")
    parser.add_argument("--source-path", help="Log source path", default="System")
    parser.add_argument("--output", help="Output file for test results", default=None)
    args = parser.parse_args()

    logger.info(f"Testing server at {args.url}")

    # Run tests
    tests = []

    # Test health
    tests.append(run_test("health", test_health, args.url))

    # Test source registration
    register_result = run_test(
        "register_source",
        test_register_source,
        args.url,
        args.source_name,
        args.source_type,
        args.source_path,
    )
    tests.append(register_result)

    # Get source ID if registration was successful
    source_id = None
    if register_result.success and "source" in register_result.response:
        source_id = register_result.response["source"]["id"]

    # Test listing sources
    tests.append(run_test("list_sources", test_list_sources, args.url))

    # Test querying logs
    tests.append(run_test("query_logs", test_query_logs, args.url, source_id))

    # Test analyzing logs
    tests.append(run_test("analyze_logs", test_analyze_logs, args.url, source_id))

    # Print test results
    logger.info("Test results:")
    success_count = 0
    for test in tests:
        status = "✅ Success" if test.success else "❌ Failure"
        logger.info(f"{status}: {test.name} - {test.message}")
        if test.success:
            success_count += 1

    logger.info(f"{success_count}/{len(tests)} tests succeeded")

    # Write test results to file if specified
    if args.output:
        with open(args.output, "w") as f:
            json.dump([test.dict() for test in tests], f, indent=2)

    # Exit with error if any test failed
    if success_count < len(tests):
        sys.exit(1)


if __name__ == "__main__":
    main()

```

--------------------------------------------------------------------------------
/src/mcp_log_analyzer/mcp_server/tools/health_check_tools.py:
--------------------------------------------------------------------------------

```python
"""
Health check and diagnostic tools for MCP server.
"""

import asyncio
import time
from datetime import datetime
from typing import Dict, Any

from mcp.server import FastMCP


def register_health_check_tools(mcp: FastMCP):
    """Register health check tools with the MCP server."""
    
    # Server start time for uptime calculation
    server_start_time = time.time()
    
    @mcp.tool()
    async def debug_params(**kwargs) -> Dict[str, Any]:
        """
        Debug tool to see exactly what parameters are being passed.
        
        This tool accepts any parameters and returns them for inspection.
        """
        import logging
        logger = logging.getLogger(__name__)
        
        logger.info("=== DEBUG_PARAMS TOOL CALLED ===")
        logger.info(f"Received kwargs: {kwargs}")
        logger.info(f"Kwargs type: {type(kwargs)}")
        logger.info(f"Kwargs keys: {list(kwargs.keys()) if kwargs else 'None'}")
        
        return {
            "received_kwargs": kwargs,
            "kwargs_type": str(type(kwargs)),
            "kwargs_keys": list(kwargs.keys()) if kwargs else [],
            "timestamp": datetime.now().isoformat()
        }
    
    @mcp.tool()
    async def server_diagnostics() -> Dict[str, Any]:
        """
        Get detailed server diagnostics including internal state.
        
        This tool provides deep insights into the server's current state
        and can help diagnose issues like parameter errors.
        """
        import inspect
        from ..server import mcp as server_mcp
        
        diagnostics = {
            "timestamp": datetime.now().isoformat(),
            "server_type": type(server_mcp).__name__,
            "server_info": {
                "name": getattr(server_mcp, 'name', 'unknown'),
                "version": getattr(server_mcp, 'version', 'unknown')
            },
            "request_stats": {},
            "registered_tools": [],
            "internal_state": {}
        }
        
        # Get request statistics if available
        if hasattr(server_mcp, '_request_count'):
            diagnostics["request_stats"] = {
                "total_requests": server_mcp._request_count,
                "total_errors": getattr(server_mcp, '_error_count', 0),
                "consecutive_param_errors": getattr(server_mcp, '_consecutive_param_errors', 0)
            }
        
        # List registered tools
        if hasattr(server_mcp, '_tools'):
            diagnostics["registered_tools"] = list(server_mcp._tools.keys())
        elif hasattr(server_mcp, 'tools'):
            diagnostics["registered_tools"] = list(server_mcp.tools.keys())
        
        # Check for common issues
        diagnostics["health_checks"] = {
            "has_tools": len(diagnostics["registered_tools"]) > 0,
            "server_responsive": True  # We're responding, so this is true
        }
        
        return diagnostics

    @mcp.tool()
    async def health_check() -> Dict[str, Any]:
        """
        Perform a health check on the MCP server.
        
        Returns server status, uptime, and basic diagnostic information.
        This can be used by clients to verify the server is responsive.
        """
        from ..server import log_sources, parsers
        
        current_time = time.time()
        uptime_seconds = current_time - server_start_time
        
        # Check ETL parser status
        etl_parser_info = {}
        if "etl" in parsers:
            parser = parsers["etl"]
            etl_parser_info = {
                "available": parser.is_available() if hasattr(parser, "is_available") else False,
                "type": type(parser).__name__
            }
            
            # Check for cached parser
            try:
                from mcp_log_analyzer.parsers.etl_cached_parser import EtlCachedParser
                EtlCachedParser._init_cache_dir()
                etl_parser_info["cache_dir"] = EtlCachedParser._cache_dir
                etl_parser_info["cached_files"] = len(EtlCachedParser._cache_registry)
            except:
                pass
        
        return {
            "status": "healthy",
            "timestamp": datetime.now().isoformat(),
            "uptime_seconds": uptime_seconds,
            "uptime_human": f"{uptime_seconds/3600:.1f} hours",
            "registered_sources": len(log_sources),
            "available_parsers": list(parsers.keys()),
            "etl_parser": etl_parser_info,
            "server_info": {
                "name": "mcp-log-analyzer",
                "version": "0.1.0"
            }
        }

    @mcp.tool()
    async def ping() -> Dict[str, Any]:
        """
        Simple ping endpoint for connection testing.
        
        Returns immediately with a timestamp. Useful for testing
        if the MCP connection is alive and responsive.
        """
        return {
            "pong": True,
            "timestamp": datetime.now().isoformat(),
            "server_time_ms": int(time.time() * 1000)
        }

    @mcp.tool()
    async def echo(message: str) -> Dict[str, Any]:
        """
        Echo back a message for connection testing.
        
        Args:
            message: The message to echo back
            
        Returns the message with a timestamp. Useful for testing
        round-trip communication with the server.
        """
        import logging
        logger = logging.getLogger(__name__)
        logger.info(f"Echo tool called with message: {message}")
        
        return {
            "echo": message,
            "timestamp": datetime.now().isoformat(),
            "received_at": time.time()
        }

    @mcp.tool()
    async def long_running_test(duration_seconds: int = 60) -> Dict[str, Any]:
        """
        Test long-running operations with periodic updates.
        
        Args:
            duration_seconds: How long to run the test (max 300 seconds)
            
        This simulates a long-running operation and can be used to test
        timeout handling and connection stability.
        """
        import logging
        logger = logging.getLogger(__name__)
        
        # Cap duration at 5 minutes
        duration_seconds = min(duration_seconds, 300)
        
        start_time = time.time()
        logger.info(f"Starting long-running test for {duration_seconds} seconds")
        
        # Log progress every 10 seconds
        for i in range(0, duration_seconds, 10):
            await asyncio.sleep(10)
            elapsed = time.time() - start_time
            logger.info(f"Long-running test progress: {elapsed:.0f}/{duration_seconds} seconds")
        
        # Handle remaining time
        remaining = duration_seconds % 10
        if remaining > 0:
            await asyncio.sleep(remaining)
        
        total_elapsed = time.time() - start_time
        logger.info(f"Long-running test completed after {total_elapsed:.1f} seconds")
        
        return {
            "status": "completed",
            "requested_duration": duration_seconds,
            "actual_duration": total_elapsed,
            "timestamp": datetime.now().isoformat()
        }
```

--------------------------------------------------------------------------------
/tcp_server.py:
--------------------------------------------------------------------------------

```python
#!/usr/bin/env python3
"""
Standalone TCP MCP server for remote connections.
"""

import asyncio
import json
import logging
import sys
from pathlib import Path

# Add src to Python path
sys.path.insert(0, str(Path(__file__).parent / "src"))

from mcp_log_analyzer.mcp_server.server import mcp


class TCPMCPServer:
    """TCP-based MCP server for remote connections."""
    
    def __init__(self, host="0.0.0.0", port=8080):
        self.host = host
        self.port = port
        self.logger = logging.getLogger(__name__)
        
    async def handle_client(self, reader, writer):
        """Handle a single client connection."""
        client_addr = writer.get_extra_info('peername')
        self.logger.info(f"Client connected from {client_addr}")
        
        try:
            while True:
                # Read JSON-RPC message
                line = await reader.readline()
                if not line:
                    break
                    
                try:
                    message = line.decode('utf-8').strip()
                    if not message:
                        continue
                        
                    self.logger.debug(f"Received: {message}")
                    
                    # Parse JSON-RPC message
                    request = json.loads(message)
                    
                    # Process the request through the MCP server
                    response = await self.process_mcp_request(request)
                    
                    # Send response
                    if response:
                        response_line = json.dumps(response) + '\n'
                        writer.write(response_line.encode('utf-8'))
                        await writer.drain()
                        self.logger.debug(f"Sent: {response_line.strip()}")
                        
                except json.JSONDecodeError as e:
                    self.logger.error(f"JSON decode error: {e}")
                    error_response = {
                        "jsonrpc": "2.0",
                        "error": {
                            "code": -32700,
                            "message": "Parse error"
                        }
                    }
                    error_line = json.dumps(error_response) + '\n'
                    writer.write(error_line.encode('utf-8'))
                    await writer.drain()
                    
                except Exception as e:
                    self.logger.error(f"Error processing request: {e}")
                    error_response = {
                        "jsonrpc": "2.0",
                        "error": {
                            "code": -32603,
                            "message": f"Internal error: {str(e)}"
                        }
                    }
                    error_line = json.dumps(error_response) + '\n'
                    writer.write(error_line.encode('utf-8'))
                    await writer.drain()
                    
        except Exception as e:
            self.logger.error(f"Connection error: {e}")
        finally:
            self.logger.info(f"Client {client_addr} disconnected")
            writer.close()
            await writer.wait_closed()
    
    async def process_mcp_request(self, request):
        """Process an MCP request and return the response."""
        # This is a simplified implementation
        # In practice, you'd need to integrate with the actual MCP server logic
        
        method = request.get('method')
        params = request.get('params', {})
        request_id = request.get('id')
        
        if method == 'initialize':
            return {
                "jsonrpc": "2.0",
                "id": request_id,
                "result": {
                    "protocolVersion": "2024-11-05",
                    "capabilities": {
                        "tools": {"listChanged": False},
                        "resources": {"subscribe": False, "listChanged": False},
                        "prompts": {"listChanged": False}
                    },
                    "serverInfo": {
                        "name": "mcp-log-analyzer",
                        "version": "1.0.0"
                    }
                }
            }
        
        elif method == 'notifications/initialized':
            # No response needed for notifications
            return None
            
        elif method == 'tools/list':
            # Return available tools
            return {
                "jsonrpc": "2.0",
                "id": request_id,
                "result": {
                    "tools": [
                        {
                            "name": "register_log_source",
                            "description": "Register a new log source for analysis",
                            "inputSchema": {
                                "type": "object",
                                "properties": {
                                    "name": {"type": "string"},
                                    "source_type": {"type": "string"},
                                    "path": {"type": "string"}
                                },
                                "required": ["name", "source_type", "path"]
                            }
                        }
                        # Add more tools as needed
                    ]
                }
            }
            
        else:
            return {
                "jsonrpc": "2.0",
                "id": request_id,
                "error": {
                    "code": -32601,
                    "message": f"Method not found: {method}"
                }
            }
    
    async def start(self):
        """Start the TCP server."""
        self.logger.info(f"Starting MCP TCP server on {self.host}:{self.port}")
        
        server = await asyncio.start_server(
            self.handle_client,
            self.host,
            self.port
        )
        
        addr = server.sockets[0].getsockname()
        self.logger.info(f"MCP server listening on {addr[0]}:{addr[1]}")
        
        async with server:
            await server.serve_forever()


async def main():
    """Main entry point."""
    import argparse
    
    parser = argparse.ArgumentParser(description='MCP Log Analyzer TCP Server')
    parser.add_argument('--host', default='0.0.0.0', 
                       help='Host to bind to (default: 0.0.0.0)')
    parser.add_argument('--port', type=int, default=8080, 
                       help='Port to bind to (default: 8080)')
    parser.add_argument('--debug', action='store_true',
                       help='Enable debug logging')
    
    args = parser.parse_args()
    
    # Configure logging
    level = logging.DEBUG if args.debug else logging.INFO
    logging.basicConfig(
        level=level,
        format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
    )
    
    # Create and start server
    server = TCPMCPServer(args.host, args.port)
    
    try:
        await server.start()
    except KeyboardInterrupt:
        print("\nServer stopped.")


if __name__ == "__main__":
    asyncio.run(main())
```

--------------------------------------------------------------------------------
/check_server.py:
--------------------------------------------------------------------------------

```python
#!/usr/bin/env python3
"""
Script to verify the MCP server is working correctly.
This simulates what Claude Code does when it connects to the server.
"""

import json
import subprocess
import sys
import time
from typing import Dict, Any


def send_mcp_request(process: subprocess.Popen, request: Dict[str, Any]) -> Dict[str, Any]:
    """Send an MCP request and get response."""
    request_json = json.dumps(request) + '\n'
    
    print(f"📤 Sending: {request['method']}")
    process.stdin.write(request_json.encode())
    process.stdin.flush()
    
    # Read response
    response_line = process.stdout.readline().decode().strip()
    if response_line:
        response = json.loads(response_line)
        print(f"📥 Response: {response.get('result', {}).get('meta', {}).get('name', 'Success')}")
        return response
    
    return {}


def send_mcp_notification(process: subprocess.Popen, notification: Dict[str, Any]) -> None:
    """Send an MCP notification (no response expected)."""
    notification_json = json.dumps(notification) + '\n'
    
    print(f"📤 Sending notification: {notification['method']}")
    process.stdin.write(notification_json.encode())
    process.stdin.flush()


def test_mcp_server():
    """Test the MCP server functionality."""
    print("🚀 Testing MCP Log Analyzer Server")
    print("=" * 50)
    
    # Start the server process
    try:
        process = subprocess.Popen(
            [sys.executable, 'main.py'],
            stdin=subprocess.PIPE,
            stdout=subprocess.PIPE,
            stderr=subprocess.PIPE,
            text=False  # We'll handle encoding ourselves
        )
        
        print("✅ Server process started (PID: {})".format(process.pid))
        
        # Give the server a moment to start
        time.sleep(0.5)
        
        # Test 1: Initialize connection
        init_request = {
            "jsonrpc": "2.0",
            "id": 1,
            "method": "initialize",
            "params": {
                "protocolVersion": "2024-11-05",
                "capabilities": {
                    "tools": {},
                    "resources": {},
                    "prompts": {}
                },
                "clientInfo": {
                    "name": "test-client",
                    "version": "1.0.0"
                }
            }
        }
        
        init_response = send_mcp_request(process, init_request)
        
        if 'result' in init_response:
            server_info = init_response['result']
            print(f"📋 Server Name: {server_info.get('serverInfo', {}).get('name', 'Unknown')}")
            print(f"📋 Server Version: {server_info.get('serverInfo', {}).get('version', 'Unknown')}")
            
            # Show capabilities
            capabilities = server_info.get('capabilities', {})
            if 'tools' in capabilities:
                print(f"🔧 Tools: Available")
            if 'resources' in capabilities:
                print(f"📂 Resources: Available")
            if 'prompts' in capabilities:
                print(f"💬 Prompts: Available")
        
        # CRITICAL: Send initialized notification to complete the handshake
        initialized_notification = {
            "jsonrpc": "2.0",
            "method": "notifications/initialized",
            "params": {}
        }
        
        send_mcp_notification(process, initialized_notification)
        print("✅ Initialization handshake completed")
        
        # Small delay to ensure the notification is processed
        time.sleep(0.1)
        
        # Test 2: List available tools
        tools_request = {
            "jsonrpc": "2.0",
            "id": 2,
            "method": "tools/list",
            "params": {}
        }
        
        tools_response = send_mcp_request(process, tools_request)
        
        if 'result' in tools_response:
            tools = tools_response['result'].get('tools', [])
            if isinstance(tools, list):
                print(f"\n🔧 Available Tools ({len(tools)}):")
                for tool in tools[:10]:  # Show first 10
                    print(f"  • {tool.get('name', 'Unknown')}: {tool.get('description', 'No description')[:60]}...")
                if len(tools) > 10:
                    print(f"  ... and {len(tools) - 10} more tools")
            else:
                print(f"\n🔧 Tools response: {tools}")
        
        # Test 3: List available resources
        resources_request = {
            "jsonrpc": "2.0",
            "id": 3,
            "method": "resources/list",
            "params": {}
        }
        
        resources_response = send_mcp_request(process, resources_request)
        
        if 'result' in resources_response:
            resources = resources_response['result'].get('resources', [])
            if isinstance(resources, list):
                print(f"\n📂 Available Resources ({len(resources)}):")
                for resource in resources[:10]:  # Show first 10
                    print(f"  • {resource.get('uri', 'Unknown')}: {resource.get('description', 'No description')[:60]}...")
                if len(resources) > 10:
                    print(f"  ... and {len(resources) - 10} more resources")
            else:
                print(f"\n📂 Resources response: {resources}")
        
        print(f"\n✅ MCP Server is working correctly!")
        print(f"\n💡 To use with Claude Code:")
        print(f"   claude mcp add mcp-log-analyzer python main.py")
        print(f"   claude mcp list")
        
        # Clean shutdown
        process.terminate()
        process.wait(timeout=5)
        
    except subprocess.TimeoutExpired:
        print("⚠️  Server didn't respond in time")
        process.kill()
    except Exception as e:
        print(f"❌ Error testing server: {e}")
        if 'process' in locals():
            process.terminate()


def show_usage_instructions():
    """Show how to use the MCP server."""
    print("\n" + "=" * 50)
    print("📖 HOW TO USE THE MCP SERVER")
    print("=" * 50)
    
    print("\n1. 🚀 START THE SERVER:")
    print("   python main.py")
    print("   (Server runs silently, waiting for MCP connections)")
    
    print("\n2. 🔗 CONNECT WITH CLAUDE CODE:")
    print("   claude mcp add mcp-log-analyzer python main.py")
    print("   claude mcp list")
    
    print("\n3. 📊 USE IN CLAUDE CONVERSATIONS:")
    print("   - Register log sources")
    print("   - Analyze CSV/Event logs") 
    print("   - Monitor system resources")
    print("   - Get network diagnostics")
    
    print("\n4. 🧪 TEST THE SERVER:")
    print("   python check_server.py")
    
    print("\n📚 Available Tools:")
    print("   • register_log_source - Add new log sources")
    print("   • list_log_sources - View registered sources") 
    print("   • query_logs - Search and filter logs")
    print("   • analyze_logs - Perform log analysis")
    print("   • test_network_tools_availability - Check network tools")
    print("   • diagnose_network_issues - Network diagnostics")
    print("   • And many more...")


if __name__ == "__main__":
    if len(sys.argv) > 1 and sys.argv[1] == "--usage":
        show_usage_instructions()
    else:
        test_mcp_server()
        show_usage_instructions()
```

--------------------------------------------------------------------------------
/tests/test_utils.py:
--------------------------------------------------------------------------------

```python
"""
Test utilities to access resource functions for testing.
"""

import asyncio

from mcp.server import FastMCP


# Create standalone resource functions for testing
def create_test_functions():
    """Create standalone versions of resource functions for testing."""

    # Import resource registration functions
    from .resources import (
        register_linux_resources,
        register_logs_resources,
        register_network_resources,
        register_process_resources,
        register_windows_resources,
    )

    # Create temporary MCP instance to extract functions
    temp_mcp = FastMCP("test", "1.0.0")

    # Store original resources
    temp_resources = {}

    # Manually register each type and capture functions
    # Process resources
    @temp_mcp.resource("system://process-list")
    async def get_process_list() -> str:
        from datetime import datetime

        import psutil

        result = []
        result.append("=== Process List ===")
        result.append(f"Time: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}")
        result.append(f"CPU Count: {psutil.cpu_count()}")
        result.append(
            f"Total Memory: {psutil.virtual_memory().total / (1024**3):.2f} GB"
        )
        result.append(
            f"Available Memory: {psutil.virtual_memory().available / (1024**3):.2f} GB"
        )
        result.append(f"CPU Usage: {psutil.cpu_percent(interval=1)}%\n")

        result.append(
            f"{'PID':<8} {'Name':<25} {'CPU%':<8} {'Memory%':<10} {'Status':<12}"
        )
        result.append("-" * 75)

        # Get processes sorted by CPU usage
        processes = []
        for proc in psutil.process_iter(
            ["pid", "name", "cpu_percent", "memory_percent", "status"]
        ):
            try:
                proc_info = proc.info
                if proc_info["cpu_percent"] is None:
                    proc_info["cpu_percent"] = proc.cpu_percent(interval=0.1)
                processes.append(proc_info)
            except (psutil.NoSuchProcess, psutil.AccessDenied):
                continue

        # Sort by CPU usage (descending)
        processes.sort(key=lambda x: x.get("cpu_percent", 0), reverse=True)

        # Show top 20 processes
        for proc in processes[:20]:
            result.append(
                f"{proc['pid']:<8} "
                f"{proc['name'][:24]:<25} "
                f"{proc.get('cpu_percent', 0):<8.1f} "
                f"{proc.get('memory_percent', 0):<10.2f} "
                f"{proc.get('status', 'unknown'):<12}"
            )

        result.append(f"\nTotal processes: {len(processes)}")
        return "\n".join(result)

    # Windows resources
    @temp_mcp.resource("system://windows-event-logs")
    async def get_windows_event_logs() -> str:
        return await get_windows_event_logs_with_count("10")

    @temp_mcp.resource("system://windows-event-logs/last/{count}")
    async def get_windows_event_logs_with_count(count: str) -> str:
        import platform

        if platform.system() != "Windows":
            return "This resource is only available on Windows systems."
        return f"=== Windows Event Logs (Last {count} entries) ==="

    @temp_mcp.resource("system://windows-event-logs/time/{duration}")
    async def get_windows_event_logs_by_time(duration: str) -> str:
        import platform

        if platform.system() != "Windows":
            return "This resource is only available on Windows systems."
        return f"=== Windows Event Logs (Since {duration} ago) ==="

    # Linux resources
    @temp_mcp.resource("system://linux-logs")
    async def get_linux_system_logs() -> str:
        return await get_linux_logs_with_count("50")

    @temp_mcp.resource("system://linux-logs/last/{count}")
    async def get_linux_logs_with_count(count: str) -> str:
        import platform

        if platform.system() != "Linux":
            return "This resource is only available on Linux systems."
        return f"=== Linux System Logs (Last {count} lines) ==="

    @temp_mcp.resource("system://linux-logs/time/{duration}")
    async def get_linux_logs_by_time(duration: str) -> str:
        import platform

        if platform.system() != "Linux":
            return "This resource is only available on Linux systems."
        return f"=== Linux System Logs (Since {duration} ago) ==="

    # Network resources
    @temp_mcp.resource("system://netstat")
    async def get_netstat() -> str:
        return await get_netstat_listening()

    @temp_mcp.resource("system://netstat/listening")
    async def get_netstat_listening() -> str:
        return "=== Listening Ports ==="

    @temp_mcp.resource("system://netstat/established")
    async def get_netstat_established() -> str:
        return "=== Established Connections ==="

    @temp_mcp.resource("system://netstat/all")
    async def get_netstat_all() -> str:
        return "=== All Network Connections ==="

    @temp_mcp.resource("system://netstat/stats")
    async def get_netstat_stats() -> str:
        return "=== Network Statistics ==="

    @temp_mcp.resource("system://netstat/routing")
    async def get_netstat_routing() -> str:
        return "=== Routing Table ==="

    @temp_mcp.resource("system://netstat/port/{port}")
    async def get_netstat_port(port: str) -> str:
        try:
            port_num = int(port)
        except ValueError:
            return f"Invalid port number: {port}"
        return f"=== Connections on Port {port} ==="

    return {
        "get_process_list": get_process_list,
        "get_windows_event_logs": get_windows_event_logs,
        "get_windows_event_logs_with_count": get_windows_event_logs_with_count,
        "get_windows_event_logs_by_time": get_windows_event_logs_by_time,
        "get_linux_system_logs": get_linux_system_logs,
        "get_linux_logs_with_count": get_linux_logs_with_count,
        "get_linux_logs_by_time": get_linux_logs_by_time,
        "get_netstat": get_netstat,
        "get_netstat_listening": get_netstat_listening,
        "get_netstat_established": get_netstat_established,
        "get_netstat_all": get_netstat_all,
        "get_netstat_stats": get_netstat_stats,
        "get_netstat_routing": get_netstat_routing,
        "get_netstat_port": get_netstat_port,
    }


# Create the test functions
_test_functions = create_test_functions()

# Export functions for testing
get_process_list = _test_functions["get_process_list"]
get_windows_event_logs = _test_functions["get_windows_event_logs"]
get_windows_event_logs_with_count = _test_functions["get_windows_event_logs_with_count"]
get_windows_event_logs_by_time = _test_functions["get_windows_event_logs_by_time"]
get_linux_system_logs = _test_functions["get_linux_system_logs"]
get_linux_logs_with_count = _test_functions["get_linux_logs_with_count"]
get_linux_logs_by_time = _test_functions["get_linux_logs_by_time"]
get_netstat = _test_functions["get_netstat"]
get_netstat_listening = _test_functions["get_netstat_listening"]
get_netstat_established = _test_functions["get_netstat_established"]
get_netstat_all = _test_functions["get_netstat_all"]
get_netstat_stats = _test_functions["get_netstat_stats"]
get_netstat_routing = _test_functions["get_netstat_routing"]
get_netstat_port = _test_functions["get_netstat_port"]

```

--------------------------------------------------------------------------------
/main_tcp.py:
--------------------------------------------------------------------------------

```python
#!/usr/bin/env python3
"""
TCP-enabled entry point for the MCP Log Analyzer server.
"""

import asyncio
import argparse
import sys
import json
import logging
from pathlib import Path
from typing import Optional

# Add src to Python path
sys.path.insert(0, str(Path(__file__).parent / "src"))

from mcp_log_analyzer.mcp_server.server import mcp

# Set up logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)


class TCPTransport:
    """Transport layer for TCP-based MCP communication."""
    
    def __init__(self, reader: asyncio.StreamReader, writer: asyncio.StreamWriter):
        self.reader = reader
        self.writer = writer
        self._buffer = b""
        self._closed = False
    
    async def read_message(self) -> Optional[dict]:
        """Read a complete JSON-RPC message."""
        try:
            while True:
                # Look for newline in buffer
                newline_pos = self._buffer.find(b'\n')
                if newline_pos != -1:
                    # Extract complete message
                    message_bytes = self._buffer[:newline_pos]
                    self._buffer = self._buffer[newline_pos + 1:]
                    
                    if message_bytes:
                        message_str = message_bytes.decode('utf-8')
                        return json.loads(message_str)
                
                # Read more data
                chunk = await self.reader.read(4096)
                if not chunk:
                    # Connection closed
                    self._closed = True
                    return None
                
                self._buffer += chunk
                
        except Exception as e:
            logger.error(f"Error reading message: {e}")
            return None
    
    async def write_message(self, message: dict) -> None:
        """Write a JSON-RPC message."""
        try:
            message_str = json.dumps(message)
            message_bytes = (message_str + '\n').encode('utf-8')
            self.writer.write(message_bytes)
            await self.writer.drain()
        except Exception as e:
            logger.error(f"Error writing message: {e}")
            raise
    
    def is_closed(self) -> bool:
        """Check if transport is closed."""
        return self._closed
    
    async def close(self) -> None:
        """Close the transport."""
        self._closed = True
        self.writer.close()
        await self.writer.wait_closed()


async def handle_client(reader: asyncio.StreamReader, writer: asyncio.StreamWriter) -> None:
    """Handle a single client connection."""
    addr = writer.get_extra_info('peername')
    logger.info(f"New client connected from {addr}")
    
    transport = TCPTransport(reader, writer)
    
    try:
        # Create a bridge between TCP transport and MCP server
        # We'll need to handle the JSON-RPC protocol directly
        
        while not transport.is_closed():
            # Read incoming message
            request = await transport.read_message()
            if request is None:
                break
            
            logger.debug(f"Received request: {request}")
            
            # Process the request through MCP
            # For now, we'll handle basic protocol messages
            if request.get('method') == 'initialize':
                # Handle initialization
                response = {
                    "jsonrpc": "2.0",
                    "id": request.get('id'),
                    "result": {
                        "protocolVersion": "2024-11-05",
                        "capabilities": {
                            "tools": {"listChanged": True},
                            "resources": {"subscribe": True, "listChanged": True},
                            "prompts": {"listChanged": True}
                        },
                        "serverInfo": {
                            "name": "mcp-log-analyzer",
                            "version": "0.1.0"
                        }
                    }
                }
                await transport.write_message(response)
                
            elif request.get('method') == 'tools/list':
                # List available tools
                # This is a simplified response - in production you'd query the MCP server
                response = {
                    "jsonrpc": "2.0",
                    "id": request.get('id'),
                    "result": {
                        "tools": [
                            {
                                "name": "register_log_source",
                                "description": "Register a new log source for analysis"
                            },
                            {
                                "name": "query_logs",
                                "description": "Query logs with filters"
                            }
                        ]
                    }
                }
                await transport.write_message(response)
                
            elif request.get('method') == 'shutdown':
                # Handle shutdown
                response = {
                    "jsonrpc": "2.0",
                    "id": request.get('id'),
                    "result": {}
                }
                await transport.write_message(response)
                break
                
            else:
                # Unknown method
                response = {
                    "jsonrpc": "2.0",
                    "id": request.get('id'),
                    "error": {
                        "code": -32601,
                        "message": "Method not found"
                    }
                }
                await transport.write_message(response)
                
    except Exception as e:
        logger.error(f"Error handling client {addr}: {e}", exc_info=True)
    finally:
        logger.info(f"Client {addr} disconnected")
        await transport.close()


async def run_mcp_stdio_server():
    """Run the MCP server in stdio mode with async support."""
    # This is a placeholder - we need to integrate with FastMCP's async capabilities
    # For now, we'll use the synchronous run method
    await asyncio.get_event_loop().run_in_executor(None, mcp.run)


async def main_tcp(host="0.0.0.0", port=8080):
    """Run the MCP server on a TCP port."""
    logger.info(f"Starting MCP Log Analyzer TCP server on {host}:{port}")
    
    server = await asyncio.start_server(
        handle_client, 
        host, 
        port
    )
    
    addr = server.sockets[0].getsockname()
    logger.info(f"MCP server listening on {addr[0]}:{addr[1]}")
    
    async with server:
        await server.serve_forever()


def main():
    """Parse arguments and run the appropriate server."""
    parser = argparse.ArgumentParser(description='MCP Log Analyzer Server')
    parser.add_argument('--tcp', action='store_true', 
                       help='Run server on TCP port instead of stdio')
    parser.add_argument('--host', default='0.0.0.0', 
                       help='Host to bind to (default: 0.0.0.0)')
    parser.add_argument('--port', type=int, default=8080, 
                       help='Port to bind to (default: 8080)')
    parser.add_argument('--debug', action='store_true',
                       help='Enable debug logging')
    
    args = parser.parse_args()
    
    if args.debug:
        logging.getLogger().setLevel(logging.DEBUG)
    
    if args.tcp:
        try:
            asyncio.run(main_tcp(args.host, args.port))
        except KeyboardInterrupt:
            logger.info("Server stopped by user")
    else:
        # Run in stdio mode (original behavior)
        mcp.run()


if __name__ == "__main__":
    main()
```

--------------------------------------------------------------------------------
/src/mcp_log_analyzer/mcp_server/resources/linux_resources.py:
--------------------------------------------------------------------------------

```python
"""
Linux system monitoring MCP resources.
"""

import platform
import subprocess
from pathlib import Path

from mcp.server import FastMCP


def register_linux_resources(mcp: FastMCP):
    """Register all Linux-related resources with the MCP server."""

    from ..server import parse_time_param

    @mcp.resource("system://linux-logs")
    async def get_linux_system_logs() -> str:
        """
        Get Linux system logs with default parameters.

        Use parameterized versions for more control:
        - system://linux-logs/last/50 - Last 50 lines
        - system://linux-logs/time/30m - Last 30 minutes
        - system://linux-logs/range/2025-01-07 13:00/2025-01-07 14:00 - Time range
        """
        # Default to last 50 lines
        return await get_linux_logs_with_count("50")

    @mcp.resource("system://linux-logs/last/{count}")
    async def get_linux_logs_with_count(count: str) -> str:
        """
        Get recent Linux system logs by line count.

        Args:
            count: Number of lines to retrieve (e.g., "100")
        """
        if platform.system() != "Linux":
            return "This resource is only available on Linux systems."

        try:
            line_count = int(count)
            result = []
            result.append(f"=== Linux System Logs (Last {line_count} lines) ===\n")

            # Try to get systemd journal logs
            try:
                result.append(f"\n--- Systemd Journal ---")
                journal_output = subprocess.run(
                    ["journalctl", "-n", str(line_count), "--no-pager"],
                    capture_output=True,
                    text=True,
                    timeout=5,
                )
                if journal_output.returncode == 0:
                    result.append(journal_output.stdout)
                else:
                    result.append(
                        "Unable to read systemd journal (may require permissions)"
                    )
            except Exception as e:
                result.append(f"Systemd journal not available: {str(e)}")

            # Try to read syslog
            syslog_paths = [
                "/var/log/syslog",  # Debian/Ubuntu
                "/var/log/messages",  # RHEL/CentOS
            ]

            for syslog_path in syslog_paths:
                if Path(syslog_path).exists():
                    try:
                        result.append(f"\n--- {syslog_path} ---")
                        with open(syslog_path, "r") as f:
                            lines = f.readlines()
                            result.extend(lines[-line_count:])
                        break
                    except PermissionError:
                        result.append(f"Permission denied reading {syslog_path}")
                    except Exception as e:
                        result.append(f"Error reading {syslog_path}: {str(e)}")

            # Common application logs
            app_logs = {
                "Apache": ["/var/log/apache2/error.log", "/var/log/httpd/error_log"],
                "Nginx": ["/var/log/nginx/error.log"],
                "MySQL": ["/var/log/mysql/error.log", "/var/log/mysqld.log"],
                "PostgreSQL": ["/var/log/postgresql/postgresql.log"],
            }

            result.append("\n--- Application Logs ---")
            for app_name, log_paths in app_logs.items():
                for log_path in log_paths:
                    if Path(log_path).exists():
                        try:
                            result.append(f"\n{app_name} ({log_path}):")
                            with open(log_path, "r") as f:
                                lines = f.readlines()
                                result.extend(
                                    lines[-(line_count // 5) :]
                                )  # Show fewer lines for app logs
                            break
                        except:
                            pass

            return "\n".join(result)

        except ValueError:
            return f"Invalid count parameter: {count}"
        except Exception as e:
            return f"Error accessing Linux logs: {str(e)}"

    @mcp.resource("system://linux-logs/time/{duration}")
    async def get_linux_logs_by_time(duration: str) -> str:
        """
        Get Linux logs from the last N minutes/hours/days.

        Args:
            duration: Time duration (e.g., "30m", "2h", "1d")
        """
        if platform.system() != "Linux":
            return "This resource is only available on Linux systems."

        try:
            start_time = parse_time_param(duration)
            if not start_time:
                return "Invalid duration format. Use format like '30m', '2h', or '1d'."

            result = []
            result.append(
                f"=== Linux System Logs (Since {start_time.strftime('%Y-%m-%d %H:%M:%S')}) ===\n"
            )

            # Try to get systemd journal logs with time filter
            try:
                result.append(f"\n--- Systemd Journal ---")
                since_arg = f"--since={start_time.strftime('%Y-%m-%d %H:%M:%S')}"
                journal_output = subprocess.run(
                    ["journalctl", since_arg, "--no-pager"],
                    capture_output=True,
                    text=True,
                    timeout=5,
                )
                if journal_output.returncode == 0:
                    result.append(journal_output.stdout)
                else:
                    result.append(
                        "Unable to read systemd journal (may require permissions)"
                    )
            except Exception as e:
                result.append(f"Systemd journal not available: {str(e)}")

            # For syslog, we need to parse timestamps
            syslog_paths = [
                "/var/log/syslog",  # Debian/Ubuntu
                "/var/log/messages",  # RHEL/CentOS
            ]

            for syslog_path in syslog_paths:
                if Path(syslog_path).exists():
                    try:
                        result.append(f"\n--- {syslog_path} ---")
                        matching_lines = []
                        with open(syslog_path, "r") as f:
                            for line in f:
                                # Simple check if line contains a recent timestamp
                                # This is a simplified approach
                                if start_time.strftime("%b %d") in line:
                                    matching_lines.append(line)

                        if matching_lines:
                            result.extend(matching_lines)
                        else:
                            result.append(f"No entries found since {start_time}")
                        break
                    except PermissionError:
                        result.append(f"Permission denied reading {syslog_path}")
                    except Exception as e:
                        result.append(f"Error reading {syslog_path}: {str(e)}")

            return "\n".join(result)

        except ValueError as e:
            return f"Invalid time parameter: {str(e)}"
        except Exception as e:
            return f"Error accessing Linux logs: {str(e)}"

    @mcp.resource("system://linux-logs/range/{start}/{end}")
    async def get_linux_logs_by_range(start: str, end: str) -> str:
        """
        Get Linux logs within a specific time range.

        Args:
            start: Start time (e.g., "2025-01-07 13:00")
            end: End time (e.g., "2025-01-07 14:00")
        """
        if platform.system() != "Linux":
            return "This resource is only available on Linux systems."

        try:
            start_time = parse_time_param(start)
            end_time = parse_time_param(end)

            if not start_time or not end_time:
                return "Invalid time format. Use format like '2025-01-07 13:00'."

            result = []
            result.append(
                f"=== Linux System Logs ({start_time.strftime('%Y-%m-%d %H:%M')} to {end_time.strftime('%Y-%m-%d %H:%M')}) ===\n"
            )

            # Try to get systemd journal logs with time range
            try:
                result.append(f"\n--- Systemd Journal ---")
                since_arg = f"--since={start_time.strftime('%Y-%m-%d %H:%M:%S')}"
                until_arg = f"--until={end_time.strftime('%Y-%m-%d %H:%M:%S')}"
                journal_output = subprocess.run(
                    ["journalctl", since_arg, until_arg, "--no-pager"],
                    capture_output=True,
                    text=True,
                    timeout=5,
                )
                if journal_output.returncode == 0:
                    result.append(journal_output.stdout)
                else:
                    result.append(
                        "Unable to read systemd journal (may require permissions)"
                    )
            except Exception as e:
                result.append(f"Systemd journal not available: {str(e)}")

            return "\n".join(result)

        except ValueError as e:
            return f"Invalid time parameter: {str(e)}"
        except Exception as e:
            return f"Error accessing Linux logs: {str(e)}"

```

--------------------------------------------------------------------------------
/tests/test_mcp_server.py:
--------------------------------------------------------------------------------

```python
"""Tests for the MCP server."""

import platform

import pytest
from mcp.server import FastMCP

from mcp_log_analyzer.mcp_server.server import (
    log_sources,
    mcp,
)
from mcp_log_analyzer.mcp_server.test_tool_utils import (
    AnalyzeLogsRequest,
    QueryLogsRequest,
    RegisterLogSourceRequest,
    analyze_logs,
    delete_log_source,
    get_log_source,
    list_log_sources,
    query_logs,
    register_log_source,
)


@pytest.mark.asyncio
async def test_server_initialization():
    """Test that the MCP server is properly initialized."""
    assert isinstance(mcp, FastMCP)
    # FastMCP stores name differently
    assert hasattr(mcp, "tool")
    assert hasattr(mcp, "resource")
    assert hasattr(mcp, "prompt")


@pytest.mark.asyncio
async def test_register_log_source():
    """Test registering a log source."""
    # Clear any existing sources
    log_sources.clear()

    request = RegisterLogSourceRequest(
        name="test-source", source_type="json", path="/tmp/test.json"
    )

    result = await register_log_source(request)

    assert "message" in result
    assert "test-source" in result["message"]
    assert "test-source" in log_sources
    assert log_sources["test-source"].type == "json"


@pytest.mark.asyncio
async def test_list_log_sources():
    """Test listing log sources."""
    # Clear and add a test source
    log_sources.clear()

    request = RegisterLogSourceRequest(
        name="test-source", source_type="json", path="/tmp/test.json"
    )
    await register_log_source(request)

    result = await list_log_sources()

    assert "sources" in result
    assert "count" in result
    assert result["count"] == 1
    assert len(result["sources"]) == 1
    assert result["sources"][0]["name"] == "test-source"


@pytest.mark.asyncio
async def test_get_log_source():
    """Test getting a specific log source."""
    # Clear and add a test source
    log_sources.clear()

    request = RegisterLogSourceRequest(
        name="test-source", source_type="json", path="/tmp/test.json"
    )
    await register_log_source(request)

    result = await get_log_source("test-source")

    assert "source" in result
    assert result["source"]["name"] == "test-source"

    # Test non-existent source
    result = await get_log_source("non-existent")
    assert "error" in result


@pytest.mark.asyncio
async def test_delete_log_source():
    """Test deleting a log source."""
    # Clear and add a test source
    log_sources.clear()

    request = RegisterLogSourceRequest(
        name="test-source", source_type="json", path="/tmp/test.json"
    )
    await register_log_source(request)

    result = await delete_log_source("test-source")

    assert "message" in result
    assert "test-source" not in log_sources

    # Test deleting non-existent source
    result = await delete_log_source("non-existent")
    assert "error" in result


@pytest.mark.asyncio
async def test_query_logs():
    """Test querying logs."""
    # Clear and add a test source
    log_sources.clear()

    request = RegisterLogSourceRequest(
        name="test-source", source_type="json", path="/tmp/test.json"
    )
    await register_log_source(request)

    query_request = QueryLogsRequest(source_name="test-source", limit=10)

    result = await query_logs(query_request)

    assert "logs" in result
    assert "count" in result
    assert "source" in result
    assert result["source"] == "test-source"


@pytest.mark.asyncio
async def test_analyze_logs():
    """Test analyzing logs."""
    # Clear and add a test source
    log_sources.clear()

    request = RegisterLogSourceRequest(
        name="test-source", source_type="json", path="/tmp/test.json"
    )
    await register_log_source(request)

    analyze_request = AnalyzeLogsRequest(
        source_name="test-source", analysis_type="summary"
    )

    result = await analyze_logs(analyze_request)

    assert "result" in result
    assert "source" in result
    assert "analysis_type" in result
    assert result["analysis_type"] == "summary"


@pytest.mark.asyncio
async def test_system_resources():
    """Test system monitoring resources."""
    # Import the resource functions
    from mcp_log_analyzer.mcp_server.server import parse_time_param
    from mcp_log_analyzer.mcp_server.test_utils import (
        get_linux_logs_by_time,
        get_linux_logs_with_count,
        get_linux_system_logs,
        get_process_list,
        get_windows_event_logs,
        get_windows_event_logs_by_time,
        get_windows_event_logs_with_count,
    )

    # Test process list resource (should work on all platforms)
    process_list = await get_process_list()
    assert isinstance(process_list, str)
    assert "Process List" in process_list
    assert "PID" in process_list
    assert "CPU%" in process_list
    assert "Memory%" in process_list

    # Test Windows event logs (platform-specific)
    windows_logs = await get_windows_event_logs()
    assert isinstance(windows_logs, str)
    if platform.system() == "Windows":
        assert "Windows Event Logs" in windows_logs
    else:
        assert "only available on Windows" in windows_logs

    # Test parameterized Windows event logs
    windows_logs_count = await get_windows_event_logs_with_count("5")
    assert isinstance(windows_logs_count, str)
    if platform.system() == "Windows":
        assert "Last 5 entries" in windows_logs_count
    else:
        assert "only available on Windows" in windows_logs_count

    windows_logs_time = await get_windows_event_logs_by_time("30m")
    assert isinstance(windows_logs_time, str)
    if platform.system() == "Windows":
        assert "Windows Event Logs" in windows_logs_time
    else:
        assert "only available on Windows" in windows_logs_time

    # Test Linux system logs (platform-specific)
    linux_logs = await get_linux_system_logs()
    assert isinstance(linux_logs, str)
    if platform.system() == "Linux":
        assert "Linux System Logs" in linux_logs
    else:
        assert "only available on Linux" in linux_logs

    # Test parameterized Linux logs
    linux_logs_count = await get_linux_logs_with_count("20")
    assert isinstance(linux_logs_count, str)
    if platform.system() == "Linux":
        assert "Last 20 lines" in linux_logs_count
    else:
        assert "only available on Linux" in linux_logs_count

    linux_logs_time = await get_linux_logs_by_time("1h")
    assert isinstance(linux_logs_time, str)
    if platform.system() == "Linux":
        assert "Linux System Logs" in linux_logs_time
    else:
        assert "only available on Linux" in linux_logs_time


@pytest.mark.asyncio
async def test_time_parsing():
    """Test time parameter parsing function."""
    from datetime import datetime, timedelta

    from mcp_log_analyzer.mcp_server.server import parse_time_param

    # Test relative time parsing
    result = parse_time_param("30m")
    assert result is not None
    assert isinstance(result, datetime)
    assert result < datetime.now()

    result = parse_time_param("2h")
    assert result is not None
    assert isinstance(result, datetime)

    result = parse_time_param("1d")
    assert result is not None
    assert isinstance(result, datetime)

    # Test absolute time parsing
    result = parse_time_param("2025-01-07 13:00")
    assert result is not None
    assert isinstance(result, datetime)
    assert result.year == 2025
    assert result.month == 1
    assert result.day == 7
    assert result.hour == 13

    # Test invalid formats
    try:
        parse_time_param("invalid")
        assert False, "Should have raised ValueError"
    except ValueError:
        pass

    # Test none case
    result = parse_time_param("none")
    assert result is None


@pytest.mark.asyncio
async def test_netstat_resources():
    """Test netstat network monitoring resources."""
    # Import the netstat resource functions
    from mcp_log_analyzer.mcp_server.test_utils import (
        get_netstat,
        get_netstat_all,
        get_netstat_established,
        get_netstat_listening,
        get_netstat_port,
        get_netstat_routing,
        get_netstat_stats,
    )

    # Test default netstat resource
    netstat_output = await get_netstat()
    assert isinstance(netstat_output, str)
    assert "Listening Ports" in netstat_output

    # Test listening ports resource
    listening_output = await get_netstat_listening()
    assert isinstance(listening_output, str)
    assert "Listening Ports" in listening_output

    # Test established connections resource
    established_output = await get_netstat_established()
    assert isinstance(established_output, str)
    assert "Established Connections" in established_output

    # Test all connections resource
    all_output = await get_netstat_all()
    assert isinstance(all_output, str)
    assert "All Network Connections" in all_output

    # Test network statistics resource
    stats_output = await get_netstat_stats()
    assert isinstance(stats_output, str)
    assert "Network Statistics" in stats_output

    # Test routing table resource
    routing_output = await get_netstat_routing()
    assert isinstance(routing_output, str)
    assert "Routing Table" in routing_output

    # Test port-specific resource with a common port
    port_output = await get_netstat_port("80")
    assert isinstance(port_output, str)
    assert "Connections on Port 80" in port_output

    # Test invalid port
    invalid_port_output = await get_netstat_port("invalid")
    assert isinstance(invalid_port_output, str)
    assert "Invalid port number" in invalid_port_output

```

--------------------------------------------------------------------------------
/src/mcp_log_analyzer/parsers/csv_parser.py:
--------------------------------------------------------------------------------

```python
"""CSV log parser implementation."""

import csv
from datetime import datetime
from pathlib import Path
from typing import Any, Dict, Iterator, List, Optional, Union

from ..core.models import LogRecord, LogSource, LogType
from .base import BaseParser


class CsvLogParser(BaseParser):
    """Parser for CSV log files."""

    def __init__(self, config: Optional[Dict[str, Any]] = None):
        """Initialize CSV parser.

        Args:
            config: Parser configuration with optional CSV-specific settings.
        """
        super().__init__(config)
        self.delimiter = self.config.get("delimiter", ",")
        self.header_row = self.config.get("header_row", 0)
        self.has_header = self.config.get("has_header", True)
        self.field_names = self.config.get("field_names", [])

    def parse_file(
        self, source: LogSource, file_path: Union[str, Path]
    ) -> Iterator[LogRecord]:
        """Parse CSV log records from a file.

        Args:
            source: The log source information.
            file_path: Path to the CSV file.

        Yields:
            LogRecord objects parsed from the CSV file.
        """
        path = Path(file_path)
        if not path.exists():
            raise FileNotFoundError(f"Log file not found: {file_path}")

        with open(path, "r", encoding="utf-8") as file:
            content = file.read()
            yield from self.parse_content(source, content)

    def parse_content(self, source: LogSource, content: str) -> Iterator[LogRecord]:
        """Parse CSV log records from content string.

        Args:
            source: The log source information.
            content: CSV content string.

        Yields:
            LogRecord objects parsed from the CSV content.
        """
        lines = content.strip().split("\n")
        reader = csv.reader(lines, delimiter=self.delimiter)

        # Handle header row
        if self.has_header:
            try:
                header = next(reader)
                field_names = header
            except StopIteration:
                return
        else:
            field_names = self.field_names or [
                f"field_{i}" for i in range(len(lines[0].split(self.delimiter)))
            ]

        # Parse data rows
        for row_num, row in enumerate(reader, start=1):
            if len(row) == 0:
                continue

            # Create record data dictionary
            record_data = {}
            for i, value in enumerate(row):
                field_name = field_names[i] if i < len(field_names) else f"field_{i}"
                record_data[field_name] = (
                    value.strip() if isinstance(value, str) else value
                )

            # Try to parse timestamp
            timestamp = self._parse_timestamp(record_data)

            # Create log record
            yield LogRecord(
                source_id=source.id,
                timestamp=timestamp,
                data=record_data,
                raw_data=self.delimiter.join(row),
            )

    def _parse_timestamp(self, record_data: Dict[str, Any]) -> Optional[datetime]:
        """Parse timestamp from record data.

        Args:
            record_data: Record data dictionary.

        Returns:
            Parsed datetime object or None.
        """
        # Try common timestamp field names
        timestamp_fields = [
            "timestamp",
            "time",
            "date",
            "datetime",
            "@timestamp",
            "created_at",
            "field_0",
        ]

        for field in timestamp_fields:
            if field in record_data:
                timestamp_str = str(record_data[field])

                # Try common timestamp formats
                formats = [
                    "%Y-%m-%d %H:%M:%S.%f",
                    "%Y-%m-%d %H:%M:%S",
                    "%Y/%m/%d %H:%M:%S",
                    "%m/%d/%Y %H:%M:%S",
                    "%d/%m/%Y %H:%M:%S",
                    "%Y-%m-%d",
                    "%m/%d/%Y",
                    "%d/%m/%Y",
                ]

                for fmt in formats:
                    try:
                        return datetime.strptime(timestamp_str, fmt)
                    except ValueError:
                        continue

        return None

    def analyze(
        self, records: List[LogRecord], analysis_type: str = "summary"
    ) -> Dict[str, Any]:
        """Analyze CSV log records.

        Args:
            records: List of log records to analyze.
            analysis_type: Type of analysis to perform.

        Returns:
            Analysis results dictionary.
        """
        if not records:
            return {
                "analysis_type": analysis_type,
                "summary": {"total_records": 0, "message": "No records to analyze"},
            }

        # Basic statistics
        total_records = len(records)
        records_with_timestamps = sum(1 for r in records if r.timestamp is not None)

        # Time range analysis
        timestamps = [r.timestamp for r in records if r.timestamp is not None]
        time_range = {}
        if timestamps:
            time_range = {
                "earliest": min(timestamps).isoformat(),
                "latest": max(timestamps).isoformat(),
                "span_hours": (max(timestamps) - min(timestamps)).total_seconds()
                / 3600,
            }

        # Field analysis
        all_fields = set()
        field_counts = {}
        for record in records:
            for field in record.data.keys():
                all_fields.add(field)
                field_counts[field] = field_counts.get(field, 0) + 1

        # Value analysis for key fields
        value_analysis = {}
        for field in list(all_fields)[:10]:  # Analyze top 10 fields
            values = [
                str(record.data.get(field, ""))
                for record in records
                if field in record.data
            ]
            unique_values = set(values)
            value_analysis[field] = {
                "total_values": len(values),
                "unique_values": len(unique_values),
                "top_values": list(
                    sorted(unique_values, key=lambda x: values.count(x), reverse=True)[
                        :5
                    ]
                ),
            }

        summary = {
            "total_records": total_records,
            "records_with_timestamps": records_with_timestamps,
            "time_range": time_range,
            "total_fields": len(all_fields),
            "field_names": list(all_fields),
            "field_coverage": field_counts,
            "value_analysis": value_analysis,
        }

        result = {"analysis_type": analysis_type, "summary": summary}

        if analysis_type == "pattern":
            result["patterns"] = self._analyze_patterns(records)
        elif analysis_type == "anomaly":
            result["anomalies"] = self._analyze_anomalies(records)

        return result

    def _analyze_patterns(self, records: List[LogRecord]) -> List[Dict[str, Any]]:
        """Analyze patterns in the log records."""
        patterns = []

        # Pattern analysis for fabric traces
        component_counts = {}
        level_counts = {}

        for record in records:
            # Analyze component patterns (assuming fabric traces format)
            if "field_4" in record.data:  # Component field
                component = record.data["field_4"]
                component_counts[component] = component_counts.get(component, 0) + 1

            if "field_1" in record.data:  # Level field
                level = record.data["field_1"]
                level_counts[level] = level_counts.get(level, 0) + 1

        if component_counts:
            patterns.append(
                {
                    "type": "component_frequency",
                    "description": "Most active components",
                    "data": dict(
                        sorted(
                            component_counts.items(), key=lambda x: x[1], reverse=True
                        )[:10]
                    ),
                }
            )

        if level_counts:
            patterns.append(
                {
                    "type": "log_level_distribution",
                    "description": "Log level distribution",
                    "data": level_counts,
                }
            )

        return patterns

    def _analyze_anomalies(self, records: List[LogRecord]) -> List[Dict[str, Any]]:
        """Analyze anomalies in the log records."""
        anomalies = []

        # Simple anomaly detection based on unusual patterns
        if len(records) > 100:
            # Check for unusual time gaps
            timestamps = [r.timestamp for r in records if r.timestamp is not None]
            if len(timestamps) > 1:
                time_diffs = [
                    (timestamps[i + 1] - timestamps[i]).total_seconds()
                    for i in range(len(timestamps) - 1)
                ]
                avg_diff = sum(time_diffs) / len(time_diffs)
                large_gaps = [diff for diff in time_diffs if diff > avg_diff * 10]

                if large_gaps:
                    anomalies.append(
                        {
                            "type": "time_gap_anomaly",
                            "description": f"Found {len(large_gaps)} unusually large time gaps",
                            "details": f"Average gap: {avg_diff:.2f}s, Max gap: {max(large_gaps):.2f}s",
                        }
                    )

        return anomalies

```

--------------------------------------------------------------------------------
/docs/example-context-docs/mcp-ai-agent-dev-task.md:
--------------------------------------------------------------------------------

```markdown
# MCP AI Agent Development Task

## Project Overview
Build a command-line AI agent in Python that connects to and utilizes an MCP (Model Context Protocol) server running on the local network. The agent should provide an interactive CLI interface for users to communicate with the AI while leveraging MCP server capabilities.

## Project Structure
```
mcp-ai-agent/
├── src/
│   ├── __init__.py
│   ├── mcp_client.py      # MCP server connection handling
│   ├── ai_agent.py        # AI agent logic and processing
│   ├── cli.py             # Command-line interface
│   └── config.py          # Configuration management
├── tests/
│   ├── __init__.py
│   ├── test_mcp_client.py
│   ├── test_ai_agent.py
│   └── test_cli.py
├── .env.example
├── requirements.txt
├── setup.py
├── README.md
└── main.py                # Entry point
```

## Development Prompts for Claude Code

### Phase 1: Project Setup and Core Structure

**Prompt 1.1 - Initialize Project**
```
Create a new Python project directory called 'mcp-ai-agent' with the following:
1. Create the directory structure as shown above
2. Initialize a virtual environment
3. Create a requirements.txt with these dependencies:
   - python-dotenv>=1.0.0
   - httpx>=0.24.0
   - websockets>=11.0
   - pydantic>=2.0.0
   - click>=8.1.0
   - rich>=13.0.0
   - pytest>=7.0.0
   - pytest-asyncio>=0.21.0
   - pytest-mock>=3.10.0
4. Create a .env.example file with placeholders for MCP server configuration
5. Create a setup.py for package installation
6. Initialize git repository with appropriate .gitignore
```

**Prompt 1.2 - Configuration Module**
```
Create src/config.py that:
1. Uses pydantic BaseSettings for configuration management
2. Loads environment variables from .env file
3. Includes these configuration fields:
   - MCP_SERVER_URL (with validation for URL format)
   - MCP_SERVER_PORT (integer between 1-65535)
   - MCP_API_KEY (optional, for authenticated servers)
   - AI_MODEL (default to a standard model)
   - LOG_LEVEL (default to INFO)
   - CONNECTION_TIMEOUT (default to 30 seconds)
   - RETRY_ATTEMPTS (default to 3)
4. Implements validation and provides clear error messages
5. Include docstrings explaining each configuration option
```

### Phase 2: MCP Client Implementation

**Prompt 2.1 - MCP Client Base**
```
Create src/mcp_client.py with:
1. An async MCPClient class that handles connection to the MCP server
2. Methods for:
   - connect(): Establish connection (support both HTTP and WebSocket)
   - disconnect(): Clean shutdown
   - send_request(): Send requests to MCP server
   - receive_response(): Handle responses
   - ping(): Health check functionality
3. Implement connection retry logic with exponential backoff
4. Add proper error handling for network issues
5. Include logging for debugging
6. Support both synchronous and asynchronous operation modes
```

**Prompt 2.2 - MCP Protocol Handling**
```
Extend src/mcp_client.py to:
1. Implement the MCP protocol specification:
   - Message formatting (JSON-RPC 2.0 if applicable)
   - Request/response correlation
   - Error response handling
2. Add methods for common MCP operations:
   - list_tools(): Get available tools from server
   - execute_tool(): Execute a specific tool
   - get_context(): Retrieve context information
3. Implement message queuing for concurrent requests
4. Add request/response validation using pydantic models
```

### Phase 3: AI Agent Core

**Prompt 3.1 - AI Agent Implementation**
```
Create src/ai_agent.py with:
1. An AIAgent class that:
   - Initializes with an MCP client instance
   - Maintains conversation context
   - Processes user inputs and generates responses
2. Implement these methods:
   - process_message(): Main message handling
   - use_mcp_tool(): Decide when to use MCP tools
   - format_response(): Format AI responses for CLI
3. Add conversation memory management
4. Implement tool selection logic based on user queries
5. Include error recovery for failed MCP operations
```

**Prompt 3.2 - AI Integration**
```
Enhance src/ai_agent.py to:
1. Integrate with an AI model (use OpenAI API or similar as placeholder)
2. Implement prompt engineering for:
   - System prompts that explain available MCP tools
   - User message formatting
   - Tool usage instructions
3. Add response streaming support
4. Implement token counting and management
5. Add safety checks and content filtering
```

### Phase 4: CLI Interface

**Prompt 4.1 - CLI Implementation**
```
Create src/cli.py using Click framework:
1. Main command group with these subcommands:
   - start: Start interactive chat session
   - list-tools: Show available MCP tools
   - config: Display current configuration
   - test-connection: Test MCP server connectivity
2. For the interactive chat:
   - Use Rich for colorful output
   - Show typing indicators during processing
   - Support multi-line input (Ctrl+Enter)
   - Add command history
   - Include /help, /clear, /exit commands
3. Add progress bars for long operations
4. Implement graceful shutdown on Ctrl+C
```

**Prompt 4.2 - Enhanced CLI Features**
```
Extend src/cli.py with:
1. Session management:
   - Save/load conversation history
   - Export conversations to markdown
2. Advanced commands:
   - /tools: List and describe available tools
   - /use <tool>: Explicitly use a specific tool
   - /context: Show current context
   - /stats: Display session statistics
3. Add syntax highlighting for code blocks
4. Implement auto-completion for commands
5. Add configuration override options via CLI flags
```

### Phase 5: Testing Suite

**Prompt 5.1 - Unit Tests**
```
Create comprehensive unit tests:
1. tests/test_mcp_client.py:
   - Test connection establishment
   - Mock server responses
   - Test retry logic
   - Verify error handling
2. tests/test_ai_agent.py:
   - Test message processing
   - Mock MCP tool usage
   - Verify context management
3. tests/test_cli.py:
   - Test command parsing
   - Verify output formatting
   - Test interactive mode
4. Use pytest fixtures for common test setup
5. Aim for >80% code coverage
```

**Prompt 5.2 - Integration Tests**
```
Create tests/test_integration.py with:
1. End-to-end tests using a mock MCP server
2. Test full conversation flows
3. Verify tool execution chains
4. Test error recovery scenarios
5. Performance tests for concurrent operations
6. Create docker-compose.yml for test environment
```

### Phase 6: Main Entry Point and Documentation

**Prompt 6.1 - Main Application**
```
Create main.py that:
1. Serves as the application entry point
2. Handles initialization of all components
3. Implements proper async context management
4. Adds global exception handling
5. Includes signal handlers for graceful shutdown
6. Provides --debug flag for verbose logging
```

**Prompt 6.2 - Documentation**
```
Create comprehensive documentation:
1. README.md with:
   - Project description and features
   - Installation instructions
   - Quick start guide
   - Configuration options
   - Usage examples
   - Troubleshooting section
2. Add inline code documentation following Google style
3. Create docs/ directory with:
   - Architecture overview
   - MCP protocol details
   - API reference
   - Contributing guidelines
```

### Phase 7: Advanced Features

**Prompt 7.1 - Plugin System**
```
Implement a plugin system:
1. Create src/plugins/ directory structure
2. Define plugin interface for extending functionality
3. Add plugin loader in main application
4. Create example plugins:
   - Custom output formatters
   - Additional CLI commands
   - Response processors
5. Document plugin development
```

**Prompt 7.2 - Performance and Monitoring**
```
Add performance monitoring:
1. Implement metrics collection:
   - Response times
   - Token usage
   - Error rates
   - MCP server latency
2. Add optional Prometheus metrics endpoint
3. Create performance profiling mode
4. Add request/response logging with rotation
5. Implement caching for frequently used MCP tools
```

## Testing Instructions

### Manual Testing Checklist
1. **Connection Testing**
   - Test with valid MCP server
   - Test with invalid server URL
   - Test connection timeout
   - Test authentication (if applicable)

2. **Functionality Testing**
   - Send various user queries
   - Test all CLI commands
   - Verify tool execution
   - Test error scenarios

3. **Performance Testing**
   - Test with concurrent requests
   - Measure response times
   - Check memory usage
   - Test with long conversations

### Automated Testing
```bash
# Run all tests
pytest

# Run with coverage
pytest --cov=src --cov-report=html

# Run specific test file
pytest tests/test_mcp_client.py

# Run integration tests
pytest tests/test_integration.py -v
```

## Deployment Considerations

1. **Security**
   - Never commit .env files
   - Use secure communication with MCP server
   - Implement rate limiting
   - Add input sanitization

2. **Scalability**
   - Design for horizontal scaling
   - Implement connection pooling
   - Add request queuing
   - Consider caching strategies

3. **Monitoring**
   - Set up logging aggregation
   - Implement health checks
   - Add alerting for failures
   - Monitor resource usage

## Example Usage

```bash
# Install the package
pip install -e .

# Configure environment
cp .env.example .env
# Edit .env with your MCP server details

# Test connection
mcp-agent test-connection

# Start interactive session
mcp-agent start

# List available tools
mcp-agent list-tools

# Start with debug logging
mcp-agent --debug start
```

## Additional Notes

- Ensure all async operations use proper context managers
- Implement graceful degradation when MCP server is unavailable
- Add comprehensive error messages for better user experience
- Consider implementing a web UI as a future enhancement
- Keep dependencies minimal and well-maintained
- Follow semantic versioning for releases
```

--------------------------------------------------------------------------------
/src/mcp_log_analyzer/mcp_server/tools/log_management_tools.py:
--------------------------------------------------------------------------------

```python
"""
Core log management MCP tools.
"""

import asyncio
from datetime import datetime
from typing import Any, Dict

from mcp.server import FastMCP
from pydantic import BaseModel, Field


# Tool Models
class RegisterLogSourceRequest(BaseModel):
    """Request model for registering a log source."""

    name: str = Field(..., description="Unique name for the log source")
    source_type: str = Field(
        ..., description="Type of log source (evt, etl, json, xml, csv, text)"
    )
    path: str = Field(..., description="Path to the log file or directory")
    config: Dict[str, Any] = Field(
        default_factory=dict, description="Additional configuration for the parser"
    )


class QueryLogsRequest(BaseModel):
    """Request model for querying logs."""

    source_name: str = Field(..., description="Name of the log source to query")
    filters: Dict[str, Any] = Field(
        default_factory=dict, description="Filters to apply"
    )
    start_time: datetime = Field(None, description="Start time for the query")
    end_time: datetime = Field(None, description="End time for the query")
    limit: int = Field(100, description="Maximum number of logs to return")
    offset: int = Field(0, description="Number of logs to skip")


class AnalyzeLogsRequest(BaseModel):
    """Request model for analyzing logs."""

    source_name: str = Field(..., description="Name of the log source to analyze")
    analysis_type: str = Field(
        "summary", description="Type of analysis (summary, pattern, anomaly)"
    )
    filters: Dict[str, Any] = Field(
        default_factory=dict, description="Filters to apply before analysis"
    )
    start_time: datetime = Field(None, description="Start time for the analysis")
    end_time: datetime = Field(None, description="End time for the analysis")


def register_log_management_tools(mcp: FastMCP):
    """Register all log management tools with the MCP server."""

    @mcp.tool()
    async def register_log_source(request: RegisterLogSourceRequest) -> Dict[str, Any]:
        """
        Register a new log source for analysis.

        This tool allows you to register various types of log sources including:
        - Windows Event Logs (evt)
        - Windows Event Trace Logs (etl)
        - JSON logs
        - XML logs
        - CSV logs
        - Unstructured text logs
        """
        from mcp_log_analyzer.core.models import LogSource

        from ..server import log_sources, parsers, parser_aliases

        if request.name in log_sources:
            return {"error": f"Log source '{request.name}' already exists"}

        # Check if source_type is an alias and map it to the actual type
        actual_source_type = parser_aliases.get(request.source_type, request.source_type)
        
        if actual_source_type not in parsers:
            supported_types = list(parsers.keys())
            return {"error": f"Unsupported source type: {request.source_type}. Supported types are: {', '.join(supported_types)}"}

        log_source = LogSource(
            name=request.name,
            type=actual_source_type,
            path=request.path,
            metadata=request.config,
        )

        log_sources[request.name] = log_source
        
        # Persist state
        from ..server import state_manager, get_log_sources
        state_manager.save_log_sources(get_log_sources())

        return {
            "message": f"Log source '{request.name}' registered successfully",
            "source": log_source.model_dump(),
        }

    @mcp.tool()
    async def list_log_sources() -> Dict[str, Any]:
        """
        List all registered log sources.

        Returns information about all currently registered log sources
        including their names, types, and paths.
        """
        from ..server import log_sources

        return {
            "sources": [source.model_dump() for source in log_sources.values()],
            "count": len(log_sources),
        }

    @mcp.tool()
    async def get_log_source(name: str) -> Dict[str, Any]:
        """
        Get details about a specific log source.

        Args:
            name: The name of the log source to retrieve
        """
        from ..server import log_sources

        if name not in log_sources:
            return {"error": f"Log source '{name}' not found"}

        return {"source": log_sources[name].model_dump()}

    @mcp.tool()
    async def delete_log_source(name: str) -> Dict[str, Any]:
        """
        Delete a registered log source.

        Args:
            name: The name of the log source to delete
        """
        from ..server import log_sources

        if name not in log_sources:
            return {"error": f"Log source '{name}' not found"}

        # Get source details before deletion
        source = log_sources[name]
        
        # Clean up ETL cache if this is an ETL source
        if source.type == "etl":
            try:
                from mcp_log_analyzer.parsers.etl_cached_parser import EtlCachedParser
                EtlCachedParser.cleanup_cache_for_source(source.path)
            except Exception as e:
                # Log error but don't fail the deletion
                pass

        del log_sources[name]
        
        # Persist state
        from ..server import state_manager, get_log_sources
        state_manager.save_log_sources(get_log_sources())
        
        return {"message": f"Log source '{name}' deleted successfully"}

    @mcp.tool()
    async def query_logs(request: QueryLogsRequest) -> Dict[str, Any]:
        """
        Query logs from a registered source.

        This tool allows you to:
        - Filter logs by various criteria
        - Specify time ranges
        - Paginate through results
        """
        from ..server import log_sources, parsers
        import logging
        
        logger = logging.getLogger(__name__)

        if request.source_name not in log_sources:
            return {"error": f"Log source '{request.source_name}' not found"}

        source = log_sources[request.source_name]
        parser = parsers[source.type]

        try:
            # Use longer timeout for ETL files (10 minutes)
            timeout = 600.0 if source.type == "etl" else 30.0
            
            # Log start of operation for ETL files
            if source.type == "etl":
                import os
                file_size_mb = os.path.getsize(source.path) / (1024 * 1024)
                logger.info(f"Starting ETL query for {source.name} ({file_size_mb:.1f} MB file)")
            
            # Add timeout for log parsing
            logs = await asyncio.wait_for(
                asyncio.to_thread(
                    parser.parse,
                    source.path,
                    filters=request.filters,
                    start_time=request.start_time,
                    end_time=request.end_time,
                    limit=request.limit,
                    offset=request.offset,
                ),
                timeout=timeout
            )

            logger.info(f"Successfully queried {len(logs)} logs from {request.source_name}")
            
            return {
                "logs": [log.model_dump() for log in logs],
                "count": len(logs),
                "source": request.source_name,
            }
        except asyncio.TimeoutError:
            timeout = 600.0 if source.type == "etl" else 30.0
            logger.error(f"Query timed out after {timeout} seconds for {request.source_name}")
            return {"error": f"Query timed out after {timeout} seconds. The log file may be too large or complex to parse."}
        except Exception as e:
            logger.error(f"Query failed for {request.source_name}: {str(e)}")
            return {"error": f"Failed to query logs: {str(e)}"}

    @mcp.tool()
    async def analyze_logs(request: AnalyzeLogsRequest) -> Dict[str, Any]:
        """
        Analyze logs from a registered source.

        Available analysis types:
        - summary: General statistics and overview
        - pattern: Pattern detection and frequency analysis
        - anomaly: Anomaly detection
        """
        from ..server import log_sources, parsers
        import logging
        
        logger = logging.getLogger(__name__)

        if request.source_name not in log_sources:
            return {"error": f"Log source '{request.source_name}' not found"}

        source = log_sources[request.source_name]
        parser = parsers[source.type]

        try:
            # First, get the logs with timeout
            timeout = 600.0 if source.type == "etl" else 30.0
            
            # Log start of operation for ETL files
            if source.type == "etl":
                import os
                file_size_mb = os.path.getsize(source.path) / (1024 * 1024)
                logger.info(f"Starting ETL analysis for {source.name} ({file_size_mb:.1f} MB file)")
            
            logs = await asyncio.wait_for(
                asyncio.to_thread(
                    parser.parse,
                    source.path,
                    filters=request.filters,
                    start_time=request.start_time,
                    end_time=request.end_time,
                ),
                timeout=timeout
            )

            # Then analyze them
            result = await asyncio.to_thread(
                parser.analyze, logs, analysis_type=request.analysis_type
            )

            return {
                "result": result.model_dump(),
                "source": request.source_name,
                "analysis_type": request.analysis_type,
            }
        except asyncio.TimeoutError:
            timeout = 600.0 if source.type == "etl" else 30.0
            logger.error(f"Analysis timed out after {timeout} seconds for {request.source_name}")
            return {"error": f"Analysis timed out after {timeout} seconds. The log file may be too large or complex to parse."}
        except Exception as e:
            logger.error(f"Analysis failed for {request.source_name}: {str(e)}")
            return {"error": f"Failed to analyze logs: {str(e)}"}

```

--------------------------------------------------------------------------------
/src/mcp_log_analyzer/api/server.py:
--------------------------------------------------------------------------------

```python
"""MCP server API implementation."""

import argparse
import logging
import os
from typing import Dict, List, Optional, Union
from uuid import UUID

import uvicorn
from fastapi import Depends, FastAPI, HTTPException, Path, Query, Request
from fastapi.middleware.cors import CORSMiddleware
from fastapi.responses import JSONResponse

from ..core.config import Config, load_config
from ..core.models import (
    LogAnalysisRequest,
    LogAnalysisResponse,
    LogQueryRequest,
    LogQueryResponse,
    LogRecord,
    LogSource,
    LogSourceRequest,
    LogSourceResponse,
    LogType,
    MCPContext,
    MCPError,
)
from ..parsers import get_parser_for_type

# Configure logging
logging.basicConfig(
    level=logging.INFO,
    format="%(asctime)s - %(name)s - %(levelname)s - %(message)s",
)
logger = logging.getLogger("mcp_server")

# Create FastAPI app
app = FastAPI(
    title="MCP Log Analyzer",
    description="Model Context Protocol server for analyzing various types of logs",
    version="0.1.0",
)

# Add CORS middleware
app.add_middleware(
    CORSMiddleware,
    allow_origins=["*"],
    allow_credentials=True,
    allow_methods=["*"],
    allow_headers=["*"],
)

# Store for registered log sources
log_sources: Dict[UUID, LogSource] = {}

# In-memory context store (in a real application, this would be a database)
contexts: Dict[UUID, MCPContext] = {}


def get_config() -> Config:
    """Get application configuration.

    Returns:
        The application configuration.
    """
    config_path = os.environ.get("MCP_CONFIG")
    return load_config(config_path)


@app.exception_handler(MCPError)
async def mcp_error_handler(request: Request, exc: MCPError) -> JSONResponse:
    """Handle MCP errors.

    Args:
        request: The request.
        exc: The exception.

    Returns:
        JSON response with error details.
    """
    return JSONResponse(
        status_code=exc.status_code,
        content={"status": "error", "error": exc.message},
    )


@app.get("/api/health")
async def health() -> Dict[str, str]:
    """Health check endpoint.

    Returns:
        Health status.
    """
    return {"status": "ok"}


@app.post("/api/sources", response_model=LogSourceResponse)
async def register_source(
    request: LogSourceRequest, config: Config = Depends(get_config)
) -> LogSourceResponse:
    """Register a log source.

    Args:
        request: The log source registration request.
        config: The application configuration.

    Returns:
        The registered log source.

    Raises:
        HTTPException: If the log source is invalid.
    """
    logger.info(f"Registering log source: {request.name} ({request.type})")

    # Create log source
    source = LogSource(
        name=request.name,
        type=request.type,
        path=request.path,
        metadata=request.metadata,
    )

    # Validate source with appropriate parser
    try:
        parser = get_parser_for_type(source.type, config.parsers)
        if not parser.validate_file(source.path):
            raise MCPError(f"Invalid log source: {source.path}", status_code=400)
    except Exception as e:
        logger.exception(f"Error validating log source: {e}")
        raise MCPError(f"Error validating log source: {str(e)}", status_code=400)

    # Store log source
    log_sources[source.id] = source

    # Create response
    return LogSourceResponse(request_id=request.request_id, source=source)


@app.get("/api/sources", response_model=List[LogSource])
async def list_sources() -> List[LogSource]:
    """List all registered log sources.

    Returns:
        List of registered log sources.
    """
    return list(log_sources.values())


@app.get("/api/sources/{source_id}", response_model=LogSource)
async def get_source(source_id: UUID = Path(..., description="Source ID")) -> LogSource:
    """Get a log source by ID.

    Args:
        source_id: The source ID.

    Returns:
        The log source.

    Raises:
        HTTPException: If the log source is not found.
    """
    if source_id not in log_sources:
        raise MCPError(f"Log source not found: {source_id}", status_code=404)
    return log_sources[source_id]


@app.delete("/api/sources/{source_id}")
async def delete_source(
    source_id: UUID = Path(..., description="Source ID")
) -> Dict[str, str]:
    """Delete a log source.

    Args:
        source_id: The source ID.

    Returns:
        Success message.

    Raises:
        HTTPException: If the log source is not found.
    """
    if source_id not in log_sources:
        raise MCPError(f"Log source not found: {source_id}", status_code=404)
    del log_sources[source_id]
    return {"status": "success", "message": f"Log source {source_id} deleted"}


@app.post("/api/query", response_model=LogQueryResponse)
async def query_logs(
    request: LogQueryRequest, config: Config = Depends(get_config)
) -> LogQueryResponse:
    """Query logs.

    Args:
        request: The log query request.
        config: The application configuration.

    Returns:
        The query response.
    """
    query = request.query
    logger.info(f"Querying logs: {query}")

    # Collect records from all sources
    records: List[LogRecord] = []
    total_records = 0

    # Create context
    context = MCPContext(request_id=request.request_id, client_id=request.client_id)
    contexts[request.request_id] = context

    # If source_ids is specified, filter by source IDs
    source_filter = {}
    if query.source_ids:
        source_filter = {
            sid: log_sources.get(sid) for sid in query.source_ids if sid in log_sources
        }
    else:
        source_filter = log_sources

    # Filter by log types if specified
    if query.types:
        source_filter = {
            sid: source
            for sid, source in source_filter.items()
            if source.type in query.types
        }

    # Get records from each source
    for source_id, source in source_filter.items():
        try:
            parser = get_parser_for_type(source.type, config.parsers)
            source_records = list(parser.parse_file(source, source.path))
            total_records += len(source_records)

            # Apply time filter
            if query.start_time:
                source_records = [
                    r
                    for r in source_records
                    if r.timestamp and r.timestamp >= query.start_time
                ]
            if query.end_time:
                source_records = [
                    r
                    for r in source_records
                    if r.timestamp and r.timestamp <= query.end_time
                ]

            # Apply custom filters if any
            for field, value in query.filters.items():
                source_records = [
                    r
                    for r in source_records
                    if field in r.data and r.data[field] == value
                ]

            records.extend(source_records)
        except Exception as e:
            logger.exception(f"Error parsing log source {source_id}: {e}")
            # Continue with other sources on error

    # Apply pagination
    start = query.offset
    end = query.offset + query.limit
    paginated_records = records[start:end] if start < len(records) else []

    return LogQueryResponse(
        request_id=request.request_id,
        records=paginated_records,
        total=total_records,
        limit=query.limit,
        offset=query.offset,
    )


@app.post("/api/analyze", response_model=LogAnalysisResponse)
async def analyze_logs(
    request: LogAnalysisRequest, config: Config = Depends(get_config)
) -> LogAnalysisResponse:
    """Analyze logs.

    Args:
        request: The log analysis request.
        config: The application configuration.

    Returns:
        The analysis response.
    """
    # This is a placeholder for the actual analysis logic
    # In a real implementation, this would call different analysis modules
    logger.info(f"Analyzing logs: {request.analysis_type}")

    # Basic implementation - just return a summary of the query
    analysis_results = {
        "analysis_type": request.analysis_type,
        "parameters": request.parameters,
        "summary": "Analysis completed successfully",
        "details": {
            "source_count": (
                len(request.query.source_ids)
                if request.query.source_ids
                else len(log_sources)
            ),
            "type_count": (
                len(request.query.types) if request.query.types else len(LogType)
            ),
            "start_time": (
                str(request.query.start_time)
                if request.query.start_time
                else "Not specified"
            ),
            "end_time": (
                str(request.query.end_time)
                if request.query.end_time
                else "Not specified"
            ),
        },
    }

    return LogAnalysisResponse(
        request_id=request.request_id,
        results=analysis_results,
        query=request.query,
    )


def main() -> None:
    """Run the MCP server."""
    parser = argparse.ArgumentParser(description="MCP Log Analyzer Server")
    parser.add_argument(
        "--config",
        help="Path to configuration file",
        default=os.environ.get("MCP_CONFIG"),
    )
    parser.add_argument("--host", help="Host to bind to", default=None)
    parser.add_argument("--port", help="Port to bind to", type=int, default=None)
    parser.add_argument("--reload", help="Enable auto-reload", action="store_true")
    args = parser.parse_args()

    # Load configuration
    if args.config:
        os.environ["MCP_CONFIG"] = args.config
    config = load_config(args.config)

    # Override with command line arguments
    host = args.host or config.server.host
    port = args.port or config.server.port

    # Configure logging
    log_level = getattr(logging, config.logging.level.upper(), logging.INFO)
    logging.basicConfig(level=log_level, format=config.logging.format)
    if config.logging.file:
        handler = logging.FileHandler(config.logging.file)
        handler.setFormatter(logging.Formatter(config.logging.format))
        logger.addHandler(handler)

    # Start server
    logger.info(f"Starting MCP server at {host}:{port}")
    uvicorn.run(
        "mcp_log_analyzer.api.server:app",
        host=host,
        port=port,
        reload=args.reload,
        log_level="info" if not config.server.debug else "debug",
    )


if __name__ == "__main__":
    main()

```

--------------------------------------------------------------------------------
/src/mcp_log_analyzer/mcp_server/resources/windows_resources.py:
--------------------------------------------------------------------------------

```python
"""
Windows system monitoring MCP resources.
"""

import platform
from datetime import datetime

from mcp.server import FastMCP


def register_windows_resources(mcp: FastMCP):
    """Register all Windows-related resources with the MCP server."""

    from ..server import parse_time_param

    @mcp.resource("system://windows-event-logs")
    async def get_windows_event_logs() -> str:
        """
        Get Windows System and Application event logs with default parameters.

        Use parameterized versions for more control:
        - system://windows-event-logs/last/20 - Last 20 entries
        - system://windows-event-logs/time/30m - Last 30 minutes
        - system://windows-event-logs/range/2025-01-07 13:00/2025-01-07 14:00 - Time range
        """
        # Default to last 10 entries
        return await get_windows_event_logs_with_count("10")

    @mcp.resource("system://windows-event-logs/last/{count}")
    async def get_windows_event_logs_with_count(count: str) -> str:
        """
        Get recent Windows System and Application event logs by count.

        Args:
            count: Number of entries to retrieve (e.g., "20")
        """
        if platform.system() != "Windows":
            return "This resource is only available on Windows systems."

        try:
            import win32evtlog
            import win32evtlogutil
            from win32con import EVENTLOG_BACKWARDS_READ, EVENTLOG_SEQUENTIAL_READ

            max_count = int(count)
            result = []
            result.append(f"=== Windows Event Logs (Last {max_count} entries) ===\n")

            for log_type in ["System", "Application"]:
                result.append(f"\n--- {log_type} Log ---")

                try:
                    hand = win32evtlog.OpenEventLog(None, log_type)
                    flags = EVENTLOG_BACKWARDS_READ | EVENTLOG_SEQUENTIAL_READ
                    
                    count = 0
                    while count < max_count:
                        events = win32evtlog.ReadEventLog(hand, flags, 0)
                        if not events:
                            break  # No more events to read
                        
                        for event in events:
                            if count >= max_count:
                                break

                            result.append(f"\nTime: {event.TimeGenerated}")
                            result.append(f"Source: {event.SourceName}")
                            result.append(f"Event ID: {event.EventID & 0xFFFFFFFF}")  # Convert to unsigned
                            result.append(
                                f"Type: {['Info', 'Warning', 'Error'][event.EventType - 1] if event.EventType <= 3 else 'Unknown'}"
                            )

                            try:
                                message = win32evtlogutil.SafeFormatMessage(event, log_type)
                                if message:
                                    result.append(f"Message: {message[:200]}...")
                            except:
                                result.append("Message: (Unable to format message)")

                            count += 1

                    win32evtlog.CloseEventLog(hand)

                except Exception as e:
                    result.append(f"Error reading {log_type} log: {str(e)}")

            return "\n".join(result)

        except ImportError:
            return "Windows Event Log access requires pywin32 package."
        except ValueError:
            return f"Invalid count parameter: {count}"
        except Exception as e:
            return f"Error accessing Windows Event Logs: {str(e)}"

    @mcp.resource("system://windows-event-logs/time/{duration}")
    async def get_windows_event_logs_by_time(duration: str) -> str:
        """
        Get Windows event logs from the last N minutes/hours/days.

        Args:
            duration: Time duration (e.g., "30m", "2h", "1d")
        """
        if platform.system() != "Windows":
            return "This resource is only available on Windows systems."

        try:
            start_time = parse_time_param(duration)
            if not start_time:
                return "Invalid duration format. Use format like '30m', '2h', or '1d'."

            import win32evtlog
            import win32evtlogutil
            from win32con import EVENTLOG_BACKWARDS_READ, EVENTLOG_SEQUENTIAL_READ

            result = []
            result.append(
                f"=== Windows Event Logs (Since {start_time.strftime('%Y-%m-%d %H:%M:%S')}) ===\n"
            )

            for log_type in ["System", "Application"]:
                result.append(f"\n--- {log_type} Log ---")

                try:
                    hand = win32evtlog.OpenEventLog(None, log_type)
                    flags = EVENTLOG_BACKWARDS_READ | EVENTLOG_SEQUENTIAL_READ
                    
                    event_count = 0
                    done_reading = False
                    
                    while not done_reading:
                        events = win32evtlog.ReadEventLog(hand, flags, 0)
                        if not events:
                            break  # No more events to read
                        
                        for event in events:
                            # Check if event is within time range
                            if event.TimeGenerated < start_time:
                                done_reading = True
                                break

                            result.append(f"\nTime: {event.TimeGenerated}")
                            result.append(f"Source: {event.SourceName}")
                            result.append(f"Event ID: {event.EventID & 0xFFFFFFFF}")  # Convert to unsigned
                            result.append(
                                f"Type: {['Info', 'Warning', 'Error'][event.EventType - 1] if event.EventType <= 3 else 'Unknown'}"
                            )

                            try:
                                message = win32evtlogutil.SafeFormatMessage(event, log_type)
                                if message:
                                    result.append(f"Message: {message[:200]}...")
                            except:
                                result.append("Message: (Unable to format message)")

                            event_count += 1

                    win32evtlog.CloseEventLog(hand)
                    result.append(f"\n{log_type}: {event_count} events found")

                except Exception as e:
                    result.append(f"Error reading {log_type} log: {str(e)}")

            return "\n".join(result)

        except ImportError:
            return "Windows Event Log access requires pywin32 package."
        except ValueError as e:
            return f"Invalid time parameter: {str(e)}"
        except Exception as e:
            return f"Error accessing Windows Event Logs: {str(e)}"

    @mcp.resource("system://windows-event-logs/range/{start}/{end}")
    async def get_windows_event_logs_by_range(start: str, end: str) -> str:
        """
        Get Windows event logs within a specific time range.

        Args:
            start: Start time (e.g., "2025-01-07 13:00")
            end: End time (e.g., "2025-01-07 14:00")
        """
        if platform.system() != "Windows":
            return "This resource is only available on Windows systems."

        try:
            start_time = parse_time_param(start)
            end_time = parse_time_param(end)

            if not start_time or not end_time:
                return "Invalid time format. Use format like '2025-01-07 13:00'."

            import win32evtlog
            import win32evtlogutil
            from win32con import EVENTLOG_BACKWARDS_READ, EVENTLOG_SEQUENTIAL_READ

            result = []
            result.append(
                f"=== Windows Event Logs ({start_time.strftime('%Y-%m-%d %H:%M')} to {end_time.strftime('%Y-%m-%d %H:%M')}) ===\n"
            )

            for log_type in ["System", "Application"]:
                result.append(f"\n--- {log_type} Log ---")

                try:
                    hand = win32evtlog.OpenEventLog(None, log_type)
                    flags = EVENTLOG_BACKWARDS_READ | EVENTLOG_SEQUENTIAL_READ
                    
                    event_count = 0
                    done_reading = False
                    
                    while not done_reading:
                        events = win32evtlog.ReadEventLog(hand, flags, 0)
                        if not events:
                            break  # No more events to read
                        
                        for event in events:
                            # Check if we've gone past the time range
                            if event.TimeGenerated < start_time:
                                done_reading = True
                                break
                            
                            # Check if event is within time range
                            if event.TimeGenerated > end_time:
                                continue

                            result.append(f"\nTime: {event.TimeGenerated}")
                            result.append(f"Source: {event.SourceName}")
                            result.append(f"Event ID: {event.EventID & 0xFFFFFFFF}")  # Convert to unsigned
                            result.append(
                                f"Type: {['Info', 'Warning', 'Error'][event.EventType - 1] if event.EventType <= 3 else 'Unknown'}"
                            )

                            try:
                                message = win32evtlogutil.SafeFormatMessage(event, log_type)
                                if message:
                                    result.append(f"Message: {message[:200]}...")
                            except:
                                result.append("Message: (Unable to format message)")

                            event_count += 1

                    win32evtlog.CloseEventLog(hand)
                    result.append(f"\n{log_type}: {event_count} events found")

                except Exception as e:
                    result.append(f"Error reading {log_type} log: {str(e)}")

            return "\n".join(result)

        except ImportError:
            return "Windows Event Log access requires pywin32 package."
        except ValueError as e:
            return f"Invalid time parameter: {str(e)}"
        except Exception as e:
            return f"Error accessing Windows Event Logs: {str(e)}"

```

--------------------------------------------------------------------------------
/src/mcp_log_analyzer/parsers/etl_windows_parser.py:
--------------------------------------------------------------------------------

```python
"""Windows ETL parser using native Windows tools as fallback."""

import json
import os
import platform
import subprocess
import tempfile
from datetime import datetime
from pathlib import Path
from typing import Any, Dict, Iterator, List, Optional, Union
from uuid import uuid4

from ..core.models import LogRecord, LogSource, LogType
from .base import BaseParser


class EtlWindowsParser(BaseParser):
    """ETL parser using Windows native tools (tracerpt.exe)."""

    def __init__(self, config: Optional[Dict[str, Any]] = None):
        """Initialize Windows ETL parser.

        Args:
            config: Parser configuration.
        """
        super().__init__(config)
        self.use_tracerpt = self.config.get("use_tracerpt", True)
        self.tracerpt_path = self._find_tracerpt()

    def _find_tracerpt(self) -> Optional[str]:
        """Find tracerpt.exe on the system."""
        if platform.system() != "Windows":
            return None

        # Common locations for tracerpt.exe
        possible_paths = [
            r"C:\Windows\System32\tracerpt.exe",
            r"C:\Windows\SysWOW64\tracerpt.exe",
        ]

        for path in possible_paths:
            if os.path.exists(path):
                return path

        # Try to find it in PATH
        try:
            result = subprocess.run(
                ["where", "tracerpt.exe"],
                capture_output=True,
                text=True,
                check=False
            )
            if result.returncode == 0 and result.stdout.strip():
                return result.stdout.strip().split('\n')[0]
        except:
            pass

        return None

    def is_available(self) -> bool:
        """Check if Windows ETL parsing is available."""
        return self.tracerpt_path is not None

    def parse_file(
        self, source: LogSource, file_path: Union[str, Path]
    ) -> Iterator[LogRecord]:
        """Parse ETL log records using Windows tracerpt.

        Args:
            source: The log source information.
            file_path: Path to the ETL file.

        Yields:
            LogRecord objects parsed from the ETL file.
        """
        if not self.is_available():
            raise RuntimeError(
                "Windows ETL parsing is not available. tracerpt.exe not found."
            )

        path = Path(file_path)
        if not path.exists():
            raise FileNotFoundError(f"ETL file not found: {file_path}")

        if not str(path).lower().endswith('.etl'):
            raise ValueError(f"File does not appear to be an ETL file: {file_path}")

        # Create temporary directory for output
        with tempfile.TemporaryDirectory() as temp_dir:
            output_file = os.path.join(temp_dir, "output.csv")
            
            try:
                # Run tracerpt to convert ETL to CSV
                cmd = [
                    self.tracerpt_path,
                    str(path),
                    "-o", output_file,
                    "-of", "CSV",
                    "-y"  # Overwrite without prompting
                ]
                
                result = subprocess.run(
                    cmd,
                    capture_output=True,
                    text=True,
                    timeout=300  # 5 minute timeout
                )
                
                if result.returncode != 0:
                    raise RuntimeError(
                        f"tracerpt failed with code {result.returncode}: {result.stderr}"
                    )
                
                # Parse the CSV output
                if os.path.exists(output_file):
                    with open(output_file, 'r', encoding='utf-8', errors='ignore') as f:
                        import csv
                        reader = csv.DictReader(f)
                        
                        for row in reader:
                            log_record = self._convert_csv_row(source, row)
                            if log_record:
                                yield log_record
                                
            except subprocess.TimeoutExpired:
                raise RuntimeError("tracerpt timed out after 5 minutes")
            except Exception as e:
                raise RuntimeError(f"Failed to parse ETL file: {e}")

    def _convert_csv_row(self, source: LogSource, row: Dict[str, str]) -> Optional[LogRecord]:
        """Convert a CSV row from tracerpt to a LogRecord.

        Args:
            source: The log source information.
            row: CSV row dictionary.

        Returns:
            LogRecord or None if conversion fails.
        """
        try:
            # Common tracerpt CSV columns
            record_data = {}
            
            # Map known columns
            field_mappings = {
                "Event Name": "event_name",
                "Type": "event_type",
                "Event ID": "event_id",
                "Version": "version",
                "Channel": "channel",
                "Level": "level",
                "Task": "task",
                "Opcode": "opcode",
                "Keyword": "keywords",
                "PID": "process_id",
                "TID": "thread_id",
                "Processor Number": "processor",
                "Instance ID": "instance_id",
                "Parent Instance ID": "parent_instance_id",
                "Activity ID": "activity_id",
                "Related Activity ID": "related_activity_id",
                "Provider Name": "provider_name",
                "Provider ID": "provider_id",
                "Message": "message",
                "Process Name": "process_name",
            }
            
            for csv_field, record_field in field_mappings.items():
                if csv_field in row and row[csv_field]:
                    record_data[record_field] = row[csv_field]
            
            # Try to parse timestamp
            timestamp = None
            if "Clock-Time" in row:
                try:
                    timestamp = datetime.strptime(
                        row["Clock-Time"], 
                        "%Y-%m-%d %H:%M:%S.%f"
                    )
                except:
                    pass
            
            # Include any additional fields
            for key, value in row.items():
                if key not in field_mappings and value:
                    # Clean up field name
                    clean_key = key.lower().replace(' ', '_').replace('-', '_')
                    record_data[clean_key] = value
            
            return LogRecord(
                source_id=source.id,
                timestamp=timestamp,
                data=record_data,
                raw_content=None  # CSV rows are already processed
            )
            
        except Exception as e:
            if self.config.get("verbose", False):
                print(f"Failed to convert CSV row: {e}")
            return None

    def parse_content(self, source: LogSource, content: str) -> Iterator[LogRecord]:
        """Parse ETL log records from content string.

        Note: ETL files are binary and cannot be parsed from string content.

        Args:
            source: The log source information.
            content: String content (not supported for ETL).

        Raises:
            NotImplementedError: ETL files must be parsed from file.
        """
        raise NotImplementedError(
            "ETL files are binary and must be parsed from file, not string content"
        )

    def validate_file(self, file_path: Union[str, Path]) -> bool:
        """Validate if the file can be parsed by this parser.

        Args:
            file_path: Path to validate.

        Returns:
            True if file appears to be an ETL file.
        """
        path = Path(file_path)
        
        # Check file extension
        if not str(path).lower().endswith('.etl'):
            return False
            
        # Check if file exists and is readable
        if not path.exists() or not path.is_file():
            return False
            
        # Check if we have tracerpt available
        if not self.is_available():
            return False
            
        return True

    def parse(
        self, path: str, filters: Optional[Dict[str, Any]] = None,
        start_time: Optional[datetime] = None, end_time: Optional[datetime] = None,
        limit: int = 1000, offset: int = 0
    ) -> List[LogRecord]:
        """Parse ETL file with filtering and pagination.

        Args:
            path: Path to the ETL file.
            filters: Optional filters to apply.
            start_time: Optional start time filter.
            end_time: Optional end time filter.
            limit: Maximum number of records to return.
            offset: Number of records to skip.

        Returns:
            List of LogRecord objects.
        """
        # Create a temporary log source for parsing
        temp_source = LogSource(
            name="temp_etl",
            type=LogType.ETL,
            path=path,
            metadata={}
        )

        records = []
        skipped = 0
        
        for record in self.parse_file(temp_source, path):
            # Apply time filters
            if start_time and record.timestamp and record.timestamp < start_time:
                continue
            if end_time and record.timestamp and record.timestamp > end_time:
                continue
                
            # Apply custom filters
            if filters:
                if not self._match_filters(record, filters):
                    continue
            
            # Handle pagination
            if skipped < offset:
                skipped += 1
                continue
                
            records.append(record)
            
            if len(records) >= limit:
                break
                
        return records

    def _match_filters(self, record: LogRecord, filters: Dict[str, Any]) -> bool:
        """Check if a record matches the provided filters.

        Args:
            record: The log record to check.
            filters: Dictionary of filters to apply.

        Returns:
            True if record matches all filters.
        """
        for key, value in filters.items():
            record_value = record.data.get(key)
            
            if isinstance(value, list):
                if record_value not in value:
                    return False
            else:
                if record_value != value:
                    return False
                    
        return True
```
Page 1/3FirstPrevNextLast