#
tokens: 45192/50000 13/69 files (page 2/3)
lines: off (toggle) GitHub
raw markdown copy
This is page 2 of 3. Use http://codebase.md/sedwardstx/demomcp?lines=false&page={x} to view the full context.

# Directory Structure

```
├── .gitignore
├── .mcp.json
├── check_server.py
├── CLAUDE.md
├── config
│   └── default.yml
├── docs
│   ├── api_reference.md
│   ├── demo-recording
│   │   └── MCPDemo.gif
│   ├── example-context-docs
│   │   ├── mcp-ai-agent-architecture.md
│   │   ├── mcp-ai-agent-dev-task.md
│   │   └── mcp-ai-agent-prd.md
│   └── getting_started.md
├── LICENSE
├── main_tcp.py
├── main.py
├── mcp_tcp_client.py
├── pyproject.toml
├── QUICK_START.md
├── README.md
├── scripts
│   └── test_server.py
├── setup.py
├── src
│   └── mcp_log_analyzer
│       ├── __init__.py
│       ├── api
│       │   ├── __init__.py
│       │   └── server.py
│       ├── config
│       │   ├── __init__.py
│       │   └── settings.py
│       ├── core
│       │   ├── __init__.py
│       │   ├── config.py
│       │   ├── models.py
│       │   └── state_manager.py
│       ├── mcp_server
│       │   ├── __init__.py
│       │   ├── models
│       │   │   ├── __init__.py
│       │   │   └── schemas.py
│       │   ├── prompts
│       │   │   ├── __init__.py
│       │   │   ├── linux_testing_prompt.py
│       │   │   ├── log_management_prompt.py
│       │   │   ├── mcp_assets_overview_prompt.py
│       │   │   ├── network_testing_prompt.py
│       │   │   ├── process_monitoring_prompt.py
│       │   │   └── windows_testing_prompt.py
│       │   ├── resources
│       │   │   ├── __init__.py
│       │   │   ├── linux_resources.py
│       │   │   ├── logs_resources.py
│       │   │   ├── network_resources.py
│       │   │   ├── process_resources.py
│       │   │   └── windows_resources.py
│       │   ├── server.py
│       │   └── tools
│       │       ├── __init__.py
│       │       ├── health_check_tools.py
│       │       ├── linux_test_tools.py
│       │       ├── log_management_tools.py
│       │       ├── network_test_tools.py
│       │       ├── process_test_tools.py
│       │       └── windows_test_tools.py
│       ├── parsers
│       │   ├── __init__.py
│       │   ├── base.py
│       │   ├── csv_parser.py
│       │   ├── etl_cached_parser.py
│       │   ├── etl_large_file_parser.py
│       │   ├── etl_parser.py
│       │   ├── etl_windows_parser.py
│       │   └── evt_parser.py
│       └── tcp_proxy.py
├── TCP_PROXY_README.md
├── tcp_proxy.py
├── tcp_server.py
├── test_server.py
├── test_tcp_proxy.py
├── test_windows_setup.py
└── tests
    ├── test_base_parser.py
    ├── test_mcp_server.py
    ├── test_tool_utils.py
    └── test_utils.py
```

# Files

--------------------------------------------------------------------------------
/src/mcp_log_analyzer/parsers/etl_parser.py:
--------------------------------------------------------------------------------

```python
"""Windows ETL (Event Trace Log) file parser implementation."""

import os
import platform
from datetime import datetime
from pathlib import Path
from typing import Any, Dict, Iterator, List, Optional, Union
from uuid import uuid4

from ..core.models import LogRecord, LogSource, LogType
from .base import BaseParser


class EtlParser(BaseParser):
    """Parser for Windows ETL (Event Trace Log) files."""

    def __init__(self, config: Optional[Dict[str, Any]] = None):
        """Initialize ETL parser.

        Args:
            config: Parser configuration.
        """
        super().__init__(config)
        self.etl_parser = None
        self.windows_parser = None
        self._init_parser()

    def _init_parser(self):
        """Initialize the ETL parser library."""
        try:
            # Try to import etl-parser library
            import etl_parser
            self.etl_parser = etl_parser
        except ImportError:
            self.etl_parser = None
            # Try to use Windows native parser as fallback
            try:
                from .etl_windows_parser import EtlWindowsParser
                self.windows_parser = EtlWindowsParser()
                if not self.windows_parser.is_available():
                    self.windows_parser = None
            except:
                self.windows_parser = None

    def is_available(self) -> bool:
        """Check if ETL parsing is available."""
        return self.etl_parser is not None or self.windows_parser is not None

    def parse_file(
        self, source: LogSource, file_path: Union[str, Path]
    ) -> Iterator[LogRecord]:
        """Parse ETL log records from a file.

        Args:
            source: The log source information.
            file_path: Path to the ETL file.

        Yields:
            LogRecord objects parsed from the ETL file.
        """
        if not self.is_available():
            raise RuntimeError(
                "ETL parsing is not available. Please install etl-parser: pip install etl-parser "
                "or ensure tracerpt.exe is available on Windows."
            )

        path = Path(file_path)
        if not path.exists():
            raise FileNotFoundError(f"ETL file not found: {file_path}")

        if not str(path).lower().endswith('.etl'):
            raise ValueError(f"File does not appear to be an ETL file: {file_path}")
        
        # Always try cached parser first for better performance
        try:
            from .etl_cached_parser import EtlCachedParser
            cached_parser = EtlCachedParser(self.config)
            if cached_parser.is_available():
                yield from cached_parser.parse_file(source, file_path)
                return
        except Exception as e:
            # Fall back to streaming parser for large files
            file_size_mb = path.stat().st_size / (1024 * 1024)
            if file_size_mb > 50:  # Use streaming parser for files > 50MB
                try:
                    from .etl_large_file_parser import EtlLargeFileParser
                    large_parser = EtlLargeFileParser(self.config)
                    if large_parser.is_available():
                        yield from large_parser.parse_file(source, file_path)
                        return
                except Exception as e:
                    # Fall back to regular parsing
                    pass

        # Try etl-parser first if available
        if self.etl_parser is not None:
            try:
                # Create an ETL parser instance
                from etl_parser import ETL, ETLParser, build_from_stream
                
                # Parse the ETL file
                with open(path, 'rb') as etl_file:
                    parser = ETLParser(etl_file)
                    
                    # Process all records in the ETL file
                    for record in parser:
                        # Convert ETL record to LogRecord
                        log_record = self._convert_etl_record(source, record)
                        if log_record:
                            yield log_record
                return  # Success, exit
            except Exception as e:
                # If etl-parser fails, try Windows parser
                if self.windows_parser is None:
                    raise RuntimeError(f"Failed to parse ETL file: {e}")
        
        # Fall back to Windows native parser
        if self.windows_parser is not None:
            try:
                yield from self.windows_parser.parse_file(source, file_path)
            except Exception as e:
                raise RuntimeError(f"Failed to parse ETL file with Windows parser: {e}")
        else:
            raise RuntimeError("No ETL parser available")

    def _convert_etl_record(self, source: LogSource, etl_record: Any) -> Optional[LogRecord]:
        """Convert an ETL record to a LogRecord.

        Args:
            source: The log source information.
            etl_record: The ETL record from etl-parser.

        Returns:
            LogRecord or None if conversion fails.
        """
        try:
            # Extract common fields from ETL record
            record_data = {
                "provider_name": getattr(etl_record, "provider_name", "Unknown"),
                "event_id": getattr(etl_record, "event_id", 0),
                "level": getattr(etl_record, "level", 0),
                "task": getattr(etl_record, "task", 0),
                "opcode": getattr(etl_record, "opcode", 0),
                "keywords": getattr(etl_record, "keywords", 0),
                "process_id": getattr(etl_record, "process_id", 0),
                "thread_id": getattr(etl_record, "thread_id", 0),
            }

            # Try to get timestamp
            timestamp = None
            if hasattr(etl_record, "system_time"):
                timestamp = etl_record.system_time
            elif hasattr(etl_record, "timestamp"):
                timestamp = etl_record.timestamp

            # Try to get event data
            if hasattr(etl_record, "user_data"):
                record_data["user_data"] = etl_record.user_data
            elif hasattr(etl_record, "event_data"):
                record_data["event_data"] = etl_record.event_data

            # Add any extended data
            if hasattr(etl_record, "extended_data"):
                record_data["extended_data"] = etl_record.extended_data

            # Create LogRecord
            return LogRecord(
                source_id=source.id,
                timestamp=timestamp,
                data=record_data,
                raw_content=str(etl_record) if self.config.get("include_raw", False) else None
            )

        except Exception as e:
            # Log error but continue processing
            if self.config.get("verbose", False):
                print(f"Failed to convert ETL record: {e}")
            return None

    def parse_content(self, source: LogSource, content: str) -> Iterator[LogRecord]:
        """Parse ETL log records from content string.

        Note: ETL files are binary and cannot be parsed from string content.

        Args:
            source: The log source information.
            content: String content (not supported for ETL).

        Raises:
            NotImplementedError: ETL files must be parsed from file.
        """
        raise NotImplementedError(
            "ETL files are binary and must be parsed from file, not string content"
        )

    def validate_file(self, file_path: Union[str, Path]) -> bool:
        """Validate if the file can be parsed by this parser.

        Args:
            file_path: Path to validate.

        Returns:
            True if file appears to be an ETL file.
        """
        path = Path(file_path)
        
        # Check file extension
        if not str(path).lower().endswith('.etl'):
            return False
            
        # Check if file exists and is readable
        if not path.exists() or not path.is_file():
            return False
            
        # Check if we have any parser available
        if not self.is_available():
            return False
            
        # Could add binary file signature check here
        # ETL files typically start with specific magic bytes
        
        return True

    def parse(
        self, path: str, filters: Optional[Dict[str, Any]] = None,
        start_time: Optional[datetime] = None, end_time: Optional[datetime] = None,
        limit: int = 1000, offset: int = 0
    ) -> List[LogRecord]:
        """Parse ETL file with filtering and pagination.

        Args:
            path: Path to the ETL file.
            filters: Optional filters to apply.
            start_time: Optional start time filter.
            end_time: Optional end time filter.  
            limit: Maximum number of records to return.
            offset: Number of records to skip.

        Returns:
            List of LogRecord objects.
        """
        # Create a temporary log source for parsing
        temp_source = LogSource(
            name="temp_etl",
            type=LogType.ETL,
            path=path,
            metadata={}
        )

        records = []
        skipped = 0
        
        for record in self.parse_file(temp_source, path):
            # Apply time filters
            if start_time and record.timestamp and record.timestamp < start_time:
                continue
            if end_time and record.timestamp and record.timestamp > end_time:
                continue
                
            # Apply custom filters
            if filters:
                if not self._match_filters(record, filters):
                    continue
            
            # Handle pagination
            if skipped < offset:
                skipped += 1
                continue
                
            records.append(record)
            
            if len(records) >= limit:
                break
                
        return records

    def _match_filters(self, record: LogRecord, filters: Dict[str, Any]) -> bool:
        """Check if a record matches the provided filters.

        Args:
            record: The log record to check.
            filters: Dictionary of filters to apply.

        Returns:
            True if record matches all filters.
        """
        for key, value in filters.items():
            record_value = record.data.get(key)
            
            # Handle different filter types
            if isinstance(value, list):
                # Match any value in list
                if record_value not in value:
                    return False
            elif isinstance(value, dict):
                # Handle complex filters (e.g., {"$gte": 4} for level >= 4)
                if not self._match_complex_filter(record_value, value):
                    return False
            else:
                # Exact match
                if record_value != value:
                    return False
                    
        return True

    def _match_complex_filter(self, value: Any, filter_spec: Dict[str, Any]) -> bool:
        """Match a value against a complex filter specification.

        Args:
            value: The value to check.
            filter_spec: Dictionary with filter operators.

        Returns:
            True if value matches the filter.
        """
        for op, filter_value in filter_spec.items():
            if op == "$gte" and not (value >= filter_value):
                return False
            elif op == "$gt" and not (value > filter_value):
                return False
            elif op == "$lte" and not (value <= filter_value):
                return False
            elif op == "$lt" and not (value < filter_value):
                return False
            elif op == "$ne" and not (value != filter_value):
                return False
            elif op == "$in" and value not in filter_value:
                return False
            elif op == "$nin" and value in filter_value:
                return False
                
        return True
```

--------------------------------------------------------------------------------
/docs/example-context-docs/mcp-ai-agent-prd.md:
--------------------------------------------------------------------------------

```markdown
# MCP AI Agent Product Requirements Document (PRD)

## Document Information
- **Product Name**: MCP AI Agent
- **Version**: 1.0
- **Date**: January 2025
- **Status**: Draft
- **Stakeholders**: Development Team, DevOps, Product Management

## Table of Contents
1. [Executive Summary](#executive-summary)
2. [Product Overview](#product-overview)
3. [Business Objectives](#business-objectives)
4. [User Personas](#user-personas)
5. [Functional Requirements](#functional-requirements)
6. [Non-Functional Requirements](#non-functional-requirements)
7. [User Stories](#user-stories)
8. [Technical Requirements](#technical-requirements)
9. [Success Metrics](#success-metrics)
10. [Risks and Mitigations](#risks-and-mitigations)
11. [Timeline and Milestones](#timeline-and-milestones)
12. [Appendix](#appendix)

## Executive Summary

The MCP AI Agent is a command-line tool that enables users to interact with MCP (Model Context Protocol) servers through an intelligent conversational interface. By combining AI capabilities with MCP server functionality, the agent provides an intuitive way to access and utilize MCP tools without requiring deep technical knowledge of the underlying protocol.

### Key Value Propositions
- **Simplified Access**: Natural language interface to complex MCP operations
- **Intelligent Automation**: AI-driven tool selection and execution
- **Developer Productivity**: Reduce time spent on manual MCP interactions
- **Extensible Platform**: Plugin architecture for custom functionality

## Product Overview

### Problem Statement
Currently, interacting with MCP servers requires:
- Deep understanding of the MCP protocol
- Manual crafting of JSON-RPC messages
- Complex error handling and retry logic
- Separate tools for different MCP operations

This creates barriers for adoption and reduces productivity for developers and teams using MCP-based systems.

### Solution
The MCP AI Agent solves these problems by providing:
- Natural language interface for MCP interactions
- Automatic protocol handling and error recovery
- Intelligent tool selection based on user intent
- Unified interface for all MCP operations

### Target Market
- **Primary**: Software developers working with MCP servers
- **Secondary**: DevOps engineers managing MCP infrastructure
- **Tertiary**: Technical teams adopting MCP-based architectures

## Business Objectives

### Primary Objectives
1. **Increase MCP Adoption**: Lower barrier to entry for MCP usage
2. **Improve Developer Productivity**: Reduce time spent on MCP operations by 60%
3. **Enable Innovation**: Provide platform for building MCP-based solutions

### Success Criteria
- 1,000+ active users within 6 months
- 80% user satisfaction rating
- 50% reduction in MCP-related support tickets
- 10+ community-contributed plugins

## User Personas

### 1. Alex - Senior Developer
- **Background**: 8 years experience, works with microservices
- **Goals**: Quickly interact with MCP servers during development
- **Pain Points**: Context switching between code and MCP tools
- **Needs**: Fast, reliable CLI tool with good documentation

### 2. Jordan - DevOps Engineer
- **Background**: 5 years experience, manages production infrastructure
- **Goals**: Monitor and troubleshoot MCP server issues
- **Pain Points**: Lack of unified tooling for MCP operations
- **Needs**: Scriptable interface, logging, and monitoring capabilities

### 3. Sam - Junior Developer
- **Background**: 1 year experience, learning MCP architecture
- **Goals**: Understand and use MCP servers effectively
- **Pain Points**: Steep learning curve for MCP protocol
- **Needs**: Intuitive interface with helpful error messages

## Functional Requirements

### Core Features

#### FR1: Connection Management
- **FR1.1**: Connect to MCP servers via WebSocket or HTTP
- **FR1.2**: Support authenticated and unauthenticated connections
- **FR1.3**: Automatic reconnection with exponential backoff
- **FR1.4**: Connection health monitoring and status display

#### FR2: Conversational Interface
- **FR2.1**: Natural language input processing
- **FR2.2**: Context-aware responses
- **FR2.3**: Multi-turn conversation support
- **FR2.4**: Command history and session management

#### FR3: MCP Tool Integration
- **FR3.1**: Automatic tool discovery from MCP server
- **FR3.2**: Intelligent tool selection based on user queries
- **FR3.3**: Tool parameter validation and formatting
- **FR3.4**: Tool execution with progress tracking

#### FR4: CLI Commands
- **FR4.1**: Interactive chat mode (`mcp-agent start`)
- **FR4.2**: Tool listing (`mcp-agent list-tools`)
- **FR4.3**: Configuration display (`mcp-agent config`)
- **FR4.4**: Connection testing (`mcp-agent test-connection`)

#### FR5: Session Management
- **FR5.1**: Save and restore conversation history
- **FR5.2**: Export conversations to various formats
- **FR5.3**: Search through past conversations
- **FR5.4**: Session statistics and analytics

### Advanced Features

#### FR6: Plugin System
- **FR6.1**: Dynamic plugin loading
- **FR6.2**: Plugin marketplace/registry
- **FR6.3**: Plugin development SDK
- **FR6.4**: Plugin sandboxing for security

#### FR7: Monitoring and Logging
- **FR7.1**: Structured logging with configurable levels
- **FR7.2**: Performance metrics collection
- **FR7.3**: Error tracking and reporting
- **FR7.4**: Audit trail for tool executions

## Non-Functional Requirements

### Performance Requirements
- **NFR1**: Response time < 2 seconds for 95% of requests
- **NFR2**: Support 100 concurrent connections per instance
- **NFR3**: Memory usage < 500MB under normal operation
- **NFR4**: Startup time < 3 seconds

### Reliability Requirements
- **NFR5**: 99.9% uptime for core functionality
- **NFR6**: Graceful degradation when MCP server unavailable
- **NFR7**: Zero data loss for conversation history
- **NFR8**: Automatic recovery from transient failures

### Security Requirements
- **NFR9**: Secure storage of API keys and credentials
- **NFR10**: TLS/SSL for all network communications
- **NFR11**: Input sanitization to prevent injection attacks
- **NFR12**: Role-based access control for multi-user scenarios

### Usability Requirements
- **NFR13**: Intuitive CLI with helpful error messages
- **NFR14**: Comprehensive documentation and examples
- **NFR15**: Context-sensitive help system
- **NFR16**: Support for common terminal emulators

### Compatibility Requirements
- **NFR17**: Python 3.8+ compatibility
- **NFR18**: Cross-platform support (Windows, macOS, Linux)
- **NFR19**: MCP protocol version compatibility
- **NFR20**: AI model provider agnostic

## User Stories

### Epic 1: Basic MCP Interaction

**US1.1**: As a developer, I want to connect to my MCP server using simple commands so that I can start working quickly.
```
GIVEN I have the MCP server URL and credentials
WHEN I run `mcp-agent start`
THEN I should be connected and see a confirmation message
```

**US1.2**: As a developer, I want to ask questions in natural language so that I don't need to learn MCP protocol details.
```
GIVEN I am connected to an MCP server
WHEN I type "search for user documentation"
THEN the agent should use the appropriate search tool and display results
```

### Epic 2: Tool Management

**US2.1**: As a developer, I want to see all available tools so that I know what capabilities are available.
```
GIVEN I am connected to an MCP server
WHEN I run `/tools` or `mcp-agent list-tools`
THEN I should see a formatted list of all available tools with descriptions
```

**US2.2**: As a power user, I want to explicitly use specific tools so that I have fine-grained control.
```
GIVEN I know a specific tool exists
WHEN I type `/use search query="specific term"`
THEN the agent should execute that exact tool with the provided parameters
```

### Epic 3: Session Management

**US3.1**: As a user, I want my conversation history saved so that I can reference previous interactions.
```
GIVEN I have been using the agent
WHEN I restart the application
THEN I should be able to access my previous conversations
```

**US3.2**: As a team lead, I want to export conversations so that I can share knowledge with my team.
```
GIVEN I have a conversation with useful information
WHEN I run `/export markdown`
THEN I should get a markdown file with the formatted conversation
```

## Technical Requirements

### System Architecture
- **Microservices-compatible**: Designed for distributed systems
- **Event-driven**: Async operations throughout
- **Pluggable**: Extension points for customization
- **Observable**: Built-in monitoring and tracing

### Development Requirements
- **Language**: Python 3.8+
- **Framework**: Click for CLI, Rich for terminal UI
- **Testing**: Pytest with 80% coverage minimum
- **Documentation**: Sphinx-generated API docs

### Integration Requirements
- **MCP Protocol**: Full compliance with MCP specification
- **AI Models**: Support for OpenAI, Anthropic, and local models
- **Authentication**: OAuth2, API keys, and custom auth
- **Monitoring**: Prometheus metrics, OpenTelemetry traces

### Deployment Requirements
- **Containerization**: Docker images with multi-stage builds
- **Package Management**: PyPI distribution
- **Configuration**: Environment-based configuration
- **Platforms**: Windows, macOS, Linux support

## Success Metrics

### User Adoption Metrics
- **MAU** (Monthly Active Users): Target 1,000 within 6 months
- **User Retention**: 60% 30-day retention rate
- **Session Duration**: Average 15 minutes per session
- **Feature Adoption**: 80% of users using AI features

### Performance Metrics
- **Response Time**: P95 < 2 seconds
- **Error Rate**: < 1% of requests fail
- **Availability**: 99.9% uptime
- **Throughput**: 100 requests/second per instance

### Business Impact Metrics
- **Productivity Gain**: 60% reduction in MCP task time
- **Support Tickets**: 50% reduction in MCP-related issues
- **Community Growth**: 50+ GitHub stars per month
- **Plugin Ecosystem**: 10+ community plugins

## Risks and Mitigations

### Technical Risks

| Risk | Impact | Probability | Mitigation |
|------|--------|-------------|------------|
| MCP Protocol Changes | High | Medium | Version detection and compatibility layer |
| AI Model Unavailability | High | Low | Fallback to basic mode, local model support |
| Performance Degradation | Medium | Medium | Caching, connection pooling, monitoring |
| Security Vulnerabilities | High | Low | Security audits, dependency scanning |

### Business Risks

| Risk | Impact | Probability | Mitigation |
|------|--------|-------------|------------|
| Low User Adoption | High | Medium | User feedback loops, documentation |
| Competition | Medium | High | Unique features, plugin ecosystem |
| Maintenance Burden | Medium | Medium | Automated testing, CI/CD |

## Timeline and Milestones

### Phase 1: MVP (Weeks 1-4)
- Basic MCP connection
- Simple CLI interface
- Core chat functionality
- Basic error handling

### Phase 2: Core Features (Weeks 5-8)
- AI integration
- Tool discovery and execution
- Session management
- Comprehensive testing

### Phase 3: Advanced Features (Weeks 9-12)
- Plugin system
- Performance optimization
- Monitoring integration
- Documentation

### Phase 4: Beta Release (Weeks 13-16)
- Beta testing program
- Bug fixes and improvements
- Performance tuning
- Launch preparation

### Phase 5: GA Release (Week 17+)
- Public release
- Marketing campaign
- Community building
- Ongoing maintenance

## Appendix

### A. Glossary
- **MCP**: Model Context Protocol - Protocol for AI model communication
- **CLI**: Command Line Interface
- **WebSocket**: Protocol for bidirectional communication
- **JSON-RPC**: JSON Remote Procedure Call protocol

### B. References
- [MCP Protocol Specification](https://github.com/modelcontextprotocol/specification)
- [Click Documentation](https://click.palletsprojects.com/)
- [Rich Documentation](https://rich.readthedocs.io/)
- [Pydantic Documentation](https://docs.pydantic.dev/)

### C. Mockups and Wireframes

#### CLI Interface Example
```
$ mcp-agent start
🤖 MCP AI Agent v1.0
📡 Connecting to mcp://localhost:3000...
✅ Connected successfully!

You: What tools are available?
```

--------------------------------------------------------------------------------
/QUICK_START.md:
--------------------------------------------------------------------------------

```markdown
# MCP Log Analyzer - Quick Start Guide

**Version**: 1.0  
**Date**: July 16, 2025

## Table of Contents

1. [Overview](#overview)
2. [Installation](#installation)
3. [Local Usage (Single Machine)](#local-usage-single-machine)
4. [Network Usage (Multi-Machine)](#network-usage-multi-machine)
5. [AI Agent Client Integration](#ai-agent-client-integration)
6. [Security Considerations](#security-considerations)
7. [Production Deployment](#production-deployment)
8. [Troubleshooting](#troubleshooting)

## Overview

The MCP Log Analyzer Server provides comprehensive log analysis and system monitoring capabilities through the Model Context Protocol (MCP). It offers:

- **18 Tools** across 5 categories (log management, Windows, Linux, process monitoring, network diagnostics)
- **15+ Resources** for real-time system information
- **12 Prompts** for comprehensive user guidance
- **Cross-platform support** (Windows and Linux)
- **Multiple transport options** (stdio, TCP, HTTP, SSE)

## Installation

### Prerequisites

- **Python 3.12+**
- **Platform-specific dependencies**:
  - Windows: `pywin32>=300` for Event Log access
  - Linux: Standard system tools (journalctl, netstat, etc.)

### Install Package

```bash
# Navigate to project directory
cd /path/to/MCPsvr

# Install in development mode
pip install -e .

# Install with development dependencies
pip install -e ".[dev]"

# On Windows, ensure pywin32 is properly installed
pip install pywin32>=300
python -c "import win32api"  # Test Windows API access
```

### Verify Installation

```bash
# Test server import
PYTHONPATH=src python3 -c "from mcp_log_analyzer.mcp_server.server import mcp; print('Server import successful')"

# Test server functionality
python check_server.py
```

## Local Usage (Single Machine)

### Standard MCP Mode (Recommended)

Use this mode for connecting with Claude Code or other MCP clients:

```bash
# Start MCP server (stdio mode)
python main.py

# Add to Claude Code
claude mcp add mcp-log-analyzer python main.py

# List MCP servers
claude mcp list

# Remove MCP server
claude mcp remove mcp-log-analyzer
```

**Important**: MCP servers don't show output when started - they communicate via stdin/stdout with MCP clients.

### TCP Mode (For Testing/Development)

```bash
# Start server in TCP mode for local testing
python main_tcp.py --tcp --host 127.0.0.1 --port 8080

# Start with verbose logging
python main_tcp.py --tcp --host 127.0.0.1 --port 8080 --verbose

# Start in stdio mode (default)
python main_tcp.py

# Add to Claude
claude mcp add remote-log-analyzer python3 /home/steve/git/MCPsvr/src/mcp_log_analyzer/tcp_proxy.py 192.168.2.202 8088
```

## Network Usage (Multi-Machine)

### Single Command for Network Access

```bash
# Start MCP server accessible across network
python main_tcp.py --tcp --host 0.0.0.0 --port 8080
```

### Multi-Machine Deployment

#### Windows Machines (Example: 5 servers)

```cmd
# Machine 1 (Windows Server 1)
cd C:\path\to\MCPsvr
python main_tcp.py --tcp --host 0.0.0.0 --port 8080

# Machine 2 (Windows Server 2)
cd C:\path\to\MCPsvr
python main_tcp.py --tcp --host 0.0.0.0 --port 8081

# Machine 3 (Windows Server 3)
cd C:\path\to\MCPsvr
python main_tcp.py --tcp --host 0.0.0.0 --port 8082

# Machine 4 (Windows Server 4)
cd C:\path\to\MCPsvr
python main_tcp.py --tcp --host 0.0.0.0 --port 8083

# Machine 5 (Windows Server 5)
cd C:\path\to\MCPsvr
python main_tcp.py --tcp --host 0.0.0.0 --port 8084
```

#### Linux Machines (Example: 5 servers)

```bash
# Machine 1 (Linux Server 1)
cd /path/to/MCPsvr
python main_tcp.py --tcp --host 0.0.0.0 --port 8080

# Machine 2 (Linux Server 2)
cd /path/to/MCPsvr
python main_tcp.py --tcp --host 0.0.0.0 --port 8081

# Machine 3 (Linux Server 3)
cd /path/to/MCPsvr
python main_tcp.py --tcp --host 0.0.0.0 --port 8082

# Machine 4 (Linux Server 4)
cd /path/to/MCPsvr
python main_tcp.py --tcp --host 0.0.0.0 --port 8083

# Machine 5 (Linux Server 5)
cd /path/to/MCPsvr
python main_tcp.py --tcp --host 0.0.0.0 --port 8084
```

### Command Line Options

```bash
python main_tcp.py [OPTIONS]

OPTIONS:
  --tcp                 Enable TCP server mode (default: stdio)
  --host HOST          Host to bind to (default: 0.0.0.0)
  --port PORT          Port to bind to (default: 8080)
  --verbose            Enable verbose logging
  --help               Show help message
```

## AI Agent Client Integration

### Connection Configuration

Your AI Agent client can connect to multiple servers simultaneously:

```python
# Example server configuration for AI Agent
servers = [
    # Windows machines
    {"host": "192.168.1.100", "port": 8080, "name": "Windows-Server-1"},
    {"host": "192.168.1.101", "port": 8081, "name": "Windows-Server-2"},
    {"host": "192.168.1.102", "port": 8082, "name": "Windows-Server-3"},
    {"host": "192.168.1.103", "port": 8083, "name": "Windows-Server-4"},
    {"host": "192.168.1.104", "port": 8084, "name": "Windows-Server-5"},
    
    # Linux machines
    {"host": "192.168.1.200", "port": 8080, "name": "Linux-Server-1"},
    {"host": "192.168.1.201", "port": 8081, "name": "Linux-Server-2"},
    {"host": "192.168.1.202", "port": 8082, "name": "Linux-Server-3"},
    {"host": "192.168.1.203", "port": 8083, "name": "Linux-Server-4"},
    {"host": "192.168.1.204", "port": 8084, "name": "Linux-Server-5"},
]
```

### Available Capabilities

Each server provides:

#### Tools (18 total)
- **Log Management**: register_log_source, list_log_sources, query_logs, analyze_logs
- **Windows System**: test_windows_event_log_access, get_windows_event_log_info, query_windows_events_by_criteria, get_windows_system_health
- **Linux System**: test_linux_log_access, query_systemd_journal, analyze_linux_services, get_linux_system_overview
- **Process Monitoring**: analyze_system_performance, find_resource_intensive_processes, monitor_process_health, get_system_health_summary
- **Network Diagnostics**: test_network_connectivity, test_port_connectivity, analyze_network_connections, diagnose_network_issues

#### Resources (15+)
- **Log Resources**: `logs/sources`, `logs/types`, `logs/analysis-types`
- **Windows Resources**: `windows/system-events/{param}`, `windows/application-events/{param}`
- **Linux Resources**: `linux/systemd-logs/{param}`, `linux/system-logs/{param}`
- **Process Resources**: `processes/list`, `processes/summary`
- **Network Resources**: `network/listening-ports`, `network/established-connections`, `network/all-connections`

#### Prompts (12 total)
- Comprehensive guides for log management, Windows diagnostics, Linux diagnostics, process monitoring, and network troubleshooting

## Security Considerations

### Network Security

```bash
# Bind to specific interface instead of 0.0.0.0 for security
python main_tcp.py --tcp --host 192.168.1.100 --port 8080

# Use non-default port
python main_tcp.py --tcp --host 0.0.0.0 --port 9999
```

### Firewall Configuration

#### Windows
```cmd
# Allow through Windows Firewall
netsh advfirewall firewall add rule name="MCP Log Analyzer" dir=in action=allow protocol=TCP localport=8080
```

#### Linux
```bash
# UFW (Ubuntu/Debian)
sudo ufw allow 8080/tcp

# iptables (CentOS/RHEL)
sudo iptables -A INPUT -p tcp --dport 8080 -j ACCEPT
```

### Access Control

- **Read-only operations**: Server performs only read operations on system data
- **No system modification**: No capability to modify system configuration
- **Input validation**: Comprehensive Pydantic model validation
- **Error sanitization**: Safe error messages without sensitive data exposure

## Production Deployment

### Windows Service

```cmd
# Install as Windows service (requires additional setup)
sc create MCPLogAnalyzer binPath="python C:\path\to\MCPsvr\main_tcp.py --tcp --host 0.0.0.0 --port 8080"
sc start MCPLogAnalyzer
```

### Linux Systemd Service

```bash
# Create service file: /etc/systemd/system/mcp-log-analyzer.service
[Unit]
Description=MCP Log Analyzer Server
After=network.target

[Service]
Type=simple
User=your-user
WorkingDirectory=/path/to/MCPsvr
ExecStart=/usr/bin/python3 main_tcp.py --tcp --host 0.0.0.0 --port 8080
Restart=always
RestartSec=10

[Install]
WantedBy=multi-user.target

# Enable and start service
sudo systemctl enable mcp-log-analyzer
sudo systemctl start mcp-log-analyzer
sudo systemctl status mcp-log-analyzer
```

### Docker Deployment

```bash
# Build Docker image
docker build -t mcp-log-analyzer .

# Run container
docker run -d -p 8080:8080 --name mcp-log-analyzer mcp-log-analyzer

# Run with custom configuration
docker run -d -p 9999:8080 --name mcp-log-analyzer mcp-log-analyzer
```

### Kubernetes Deployment

```yaml
apiVersion: apps/v1
kind: Deployment
metadata:
  name: mcp-log-analyzer
spec:
  replicas: 3
  selector:
    matchLabels:
      app: mcp-log-analyzer
  template:
    metadata:
      labels:
        app: mcp-log-analyzer
    spec:
      containers:
      - name: mcp-log-analyzer
        image: mcp-log-analyzer:latest
        ports:
        - containerPort: 8080
        command: ["python", "main_tcp.py", "--tcp", "--host", "0.0.0.0", "--port", "8080"]
        resources:
          limits:
            memory: "1Gi"
            cpu: "500m"
          requests:
            memory: "512Mi"
            cpu: "250m"
---
apiVersion: v1
kind: Service
metadata:
  name: mcp-log-analyzer-service
spec:
  selector:
    app: mcp-log-analyzer
  ports:
  - port: 8080
    targetPort: 8080
  type: LoadBalancer
```

## Troubleshooting

### Common Issues

#### Server Not Starting
```bash
# Check Python version
python --version  # Should be 3.12+

# Test dependencies
pip install -e .

# Check server import
PYTHONPATH=src python3 -c "from mcp_log_analyzer.mcp_server.server import mcp; print('OK')"
```

#### Network Connection Issues
```bash
# Check if server is running
netstat -tuln | grep 8080

# Test connection
telnet your-server-ip 8080

# Check firewall
# Windows: Check Windows Firewall settings
# Linux: Check iptables/ufw rules
```

#### Windows Event Log Access
```bash
# Test Windows API access
python -c "import win32api; print('Windows API available')"

# Install pywin32 if missing
pip install pywin32>=300
```

#### Linux System Access
```bash
# Test systemd access
journalctl --version

# Test network tools
which netstat ss ping
```

### Debug Mode

```bash
# Start with verbose logging
python main_tcp.py --tcp --host 0.0.0.0 --port 8080 --verbose

# Check server functionality
python check_server.py

# Test specific tools
python -c "
from mcp_log_analyzer.mcp_server.server import mcp
# Test server capabilities
"
```

### Performance Monitoring

```bash
# Monitor server resource usage
htop  # Linux
taskmgr  # Windows

# Check network connections
netstat -an | grep 8080

# Monitor logs
tail -f /var/log/syslog  # Linux
# Check Event Viewer on Windows
```

## Development and Testing

### Code Quality

```bash
# Format code
black .
isort .

# Type checking
mypy src

# Linting
flake8

# Run all quality checks
black . && isort . && mypy src && flake8
```

### Testing

```bash
# Run all tests with proper PYTHONPATH
PYTHONPATH=src python3 -m pytest tests/ -v

# Run tests with coverage
PYTHONPATH=src python3 -m pytest --cov=mcp_log_analyzer tests/

# Run specific test file
PYTHONPATH=src python3 -m pytest tests/test_base_parser.py -v
```

### Build and Install

```bash
# Install the package in development mode
pip install -e .

# Install with development dependencies
pip install -e ".[dev]"

# Build distribution
python -m build
```

## Support and Documentation

- **Architecture Documentation**: `/docs/planning/Agent/MCP_Server_Architecture.md`
- **Development Guide**: `/CLAUDE.md`
- **API Documentation**: Auto-generated from code
- **Issues**: Report issues via your project's issue tracker

## Quick Reference

| Command | Purpose |
|---------|---------|
| `python main.py` | Start stdio MCP server (standard mode) |
| `python main_tcp.py --tcp --host 0.0.0.0 --port 8080` | Start network-accessible TCP server |
| `python check_server.py` | Test server functionality |
| `claude mcp add mcp-log-analyzer python main.py` | Add to Claude Code |
| `PYTHONPATH=src python3 -m pytest tests/ -v` | Run tests |
| `black . && isort . && mypy src && flake8` | Code quality checks |

This guide provides everything you need to get started with the MCP Log Analyzer Server in both local and network configurations.
```

--------------------------------------------------------------------------------
/src/mcp_log_analyzer/mcp_server/server.py:
--------------------------------------------------------------------------------

```python
"""
MCP Log Analyzer Server using FastMCP framework.
"""

import re
from datetime import datetime, timedelta
from pathlib import Path
from typing import Any, Dict, Iterator, List, Optional
from uuid import UUID

from mcp.server import FastMCP

from mcp_log_analyzer.core.config import Settings
from mcp_log_analyzer.core.models import AnalysisResult, LogRecord, LogSource
from mcp_log_analyzer.parsers.base import BaseParser

# Initialize settings
settings = Settings()

# Initialize MCP server with custom error handler
import functools
import json

# Track request/response for debugging
request_response_log = []

# Custom error handler to log details
def log_mcp_errors(func):
    """Decorator to log MCP errors with full request details."""
    @functools.wraps(func)
    async def wrapper(*args, **kwargs):
        try:
            return await func(*args, **kwargs)
        except Exception as e:
            logger.error(f"Error in {func.__name__}: {e}")
            logger.error(f"Error type: {type(e).__name__}")
            logger.error(f"Function: {func.__name__}")
            logger.error(f"Args: {args}")
            logger.error(f"Kwargs: {kwargs}")
            raise
    return wrapper

# Tool wrapper to log all tool calls
def debug_tool(tool_func):
    """Wrapper to debug tool function calls."""
    @functools.wraps(tool_func)
    async def wrapper(*args, **kwargs):
        logger.info(f"Tool '{tool_func.__name__}' called")
        logger.info(f"  Args: {args}")
        logger.info(f"  Kwargs: {kwargs}")
        
        try:
            result = await tool_func(*args, **kwargs)
            logger.info(f"Tool '{tool_func.__name__}' returned successfully")
            return result
        except Exception as e:
            logger.error(f"Tool '{tool_func.__name__}' failed: {e}")
            logger.error(f"  Exception type: {type(e).__name__}")
            logger.error(f"  Exception args: {e.args}")
            raise
    
    return wrapper

# For now, we'll use the standard FastMCP and add logging via decorators
# The "Invalid request parameters" error is likely coming from the MCP protocol layer
# when tool arguments don't match expected signatures

# Initialize MCP server
mcp = FastMCP(
    name="mcp-log-analyzer",
    version="0.1.0",
    dependencies=["pandas>=1.3.0", "psutil>=5.9.0"],
)

# Storage with persistence
from mcp_log_analyzer.core.state_manager import get_state_manager

state_manager = get_state_manager()
_log_sources: Optional[Dict[str, LogSource]] = None  # Lazy loaded
parsers: Dict[str, BaseParser] = {}

# Log loaded sources
import logging
logger = logging.getLogger(__name__)

# Lazy loading wrapper for log sources
def get_log_sources() -> Dict[str, LogSource]:
    """Get log sources with lazy loading."""
    global _log_sources
    if _log_sources is None:
        logger.info("Lazy loading persisted log sources...")
        _log_sources = state_manager.load_log_sources()
        logger.info(f"Loaded {len(_log_sources)} persisted log sources")
    return _log_sources

# Create a proxy object that acts like a dict but lazy loads
class LazyLogSources:
    """Proxy for log sources dictionary with lazy loading."""
    
    def __getitem__(self, key):
        return get_log_sources()[key]
    
    def __setitem__(self, key, value):
        sources = get_log_sources()
        sources[key] = value
        # Also update the global reference
        global _log_sources
        _log_sources = sources
    
    def __delitem__(self, key):
        sources = get_log_sources()
        del sources[key]
        # Also update the global reference
        global _log_sources
        _log_sources = sources
    
    def __contains__(self, key):
        return key in get_log_sources()
    
    def __len__(self):
        return len(get_log_sources())
    
    def __iter__(self):
        return iter(get_log_sources())
    
    def values(self):
        return get_log_sources().values()
    
    def keys(self):
        return get_log_sources().keys()
    
    def items(self):
        return get_log_sources().items()
    
    def get(self, key, default=None):
        return get_log_sources().get(key, default)
    
    def clear(self):
        get_log_sources().clear()

# Use the lazy loader
log_sources = LazyLogSources()


# Add a mock parser for testing on non-Windows systems
class MockParser(BaseParser):
    """Mock parser for testing."""

    def parse_file(self, source: LogSource, file_path: Path) -> Iterator[LogRecord]:
        yield LogRecord(source_id=source.id, data={"test": "data"})

    def parse_content(self, source: LogSource, content: str) -> Iterator[LogRecord]:
        yield LogRecord(source_id=source.id, data={"test": "data"})

    def parse(self, path: str, **kwargs) -> List[LogRecord]:
        return [
            LogRecord(
                source_id=UUID("00000000-0000-0000-0000-000000000000"),
                data={"test": "data"},
            )
        ]

    def analyze(
        self, logs: List[LogRecord], analysis_type: str = "summary"
    ) -> AnalysisResult:
        return AnalysisResult(analysis_type=analysis_type, summary={"total": len(logs)})


# Only initialize Windows Event Log parser on Windows
import platform

if platform.system() == "Windows":
    try:
        from mcp_log_analyzer.parsers.evt_parser import EventLogParser

        parsers["evt"] = EventLogParser()
    except ImportError:
        # pywin32 not available, use mock parser
        parsers["evt"] = MockParser()
else:
    # Use mock parser on non-Windows systems
    parsers["evt"] = MockParser()

# Add real CSV parser and mock parsers for others
try:
    from mcp_log_analyzer.parsers.csv_parser import CsvLogParser

    parsers["csv"] = CsvLogParser()
except ImportError:
    parsers["csv"] = MockParser()

# Add ETL parser for Windows Event Trace Logs
try:
    from mcp_log_analyzer.parsers.etl_parser import EtlParser

    etl_parser = EtlParser()
    if etl_parser.is_available():
        parsers["etl"] = etl_parser
    else:
        parsers["etl"] = MockParser()
except ImportError:
    parsers["etl"] = MockParser()

# Add mock parsers for other types not yet implemented
parsers["json"] = MockParser()
parsers["xml"] = MockParser()
parsers["text"] = MockParser()

# Add alias for backward compatibility
# Allow "event" to map to "evt" for users who were using the old name
parser_aliases = {
    "event": "evt"  # Map old name to new name
}


# Utility Functions
def parse_time_param(time_str: str) -> Optional[datetime]:
    """Parse time parameter in various formats."""
    if not time_str or time_str == "none":
        return None

    # Try parsing as relative time (e.g., "30m", "1h", "2d")
    relative_pattern = r"^(\d+)([mhd])$"
    match = re.match(relative_pattern, time_str)
    if match:
        value, unit = match.groups()
        value = int(value)
        if unit == "m":
            return datetime.now() - timedelta(minutes=value)
        elif unit == "h":
            return datetime.now() - timedelta(hours=value)
        elif unit == "d":
            return datetime.now() - timedelta(days=value)

    # Try parsing as absolute datetime
    datetime_formats = [
        "%Y-%m-%d %H:%M:%S",
        "%Y-%m-%d %H:%M",
        "%Y-%m-%d",
        "%d/%m/%Y %H:%M:%S",
        "%d/%m/%Y %H:%M",
        "%d/%m/%Y",
    ]

    for fmt in datetime_formats:
        try:
            return datetime.strptime(time_str, fmt)
        except ValueError:
            continue

    raise ValueError(f"Cannot parse time: {time_str}")


# Prompts
@mcp.prompt()
async def log_analysis_quickstart() -> str:
    """
    A guide to get started with log analysis.

    This prompt provides step-by-step instructions for
    beginning log analysis with the MCP Log Analyzer.
    """
    return """Welcome to MCP Log Analyzer! Here's how to get started:

1. **Register a Log Source**
   First, register the log file or directory you want to analyze:
   - Use the `register_log_source` tool
   - Specify a unique name, source type, and path
   - Example: Register Windows System logs as "system-logs"

2. **Query Logs**
   Retrieve logs from your registered source:
   - Use the `query_logs` tool
   - Apply filters, time ranges, and pagination
   - Start with a small limit to preview the data

3. **Analyze Logs**
   Perform deeper analysis on your logs:
   - Use the `analyze_logs` tool
   - Choose from summary, pattern, or anomaly analysis
   - Review the results to gain insights

4. **Test System Resources**
   Use diagnostic tools to test system health:
   - `test_system_resources_access` - Check system monitoring capabilities
   - `test_windows_event_log_access` - Test Windows Event Log access
   - `test_linux_log_access` - Test Linux log file access
   - `test_network_tools_availability` - Check network diagnostic tools

5. **Explore Resources**
   Check available resources for more information:
   - logs://sources - View registered sources
   - logs://types - See supported log formats
   - logs://analysis-types - Learn about analysis options

Need help with a specific log type? Just ask!"""


@mcp.prompt()
async def troubleshooting_guide() -> str:
    """
    A guide for troubleshooting common log analysis issues.

    This prompt helps users resolve common problems when
    working with log files.
    """
    return """Log Analysis Troubleshooting Guide:

**Common Issues and Solutions:**

1. **"Access Denied" Errors**
   - Ensure you have read permissions for the log files
   - For Windows Event Logs, run with appropriate privileges
   - Check file paths are correct and accessible
   - Use `test_windows_event_log_access` or `test_linux_log_access` tools

2. **"Parser Not Found" Errors**
   - Verify the source_type matches supported types
   - Use logs://types resource to see available parsers
   - Ensure the log format matches the selected parser

3. **Empty Results**
   - Check your filters aren't too restrictive
   - Verify the time range includes log entries
   - Ensure the log file isn't empty or corrupted

4. **Performance Issues**
   - Use pagination (limit/offset) for large log files
   - Apply filters to reduce data volume
   - Consider analyzing smaller time ranges
   - Use `analyze_system_performance` tool to check system resources

5. **Network Issues**
   - Use `test_network_connectivity` to check internet access
   - Use `test_port_connectivity` to check specific ports
   - Use `diagnose_network_issues` for comprehensive network diagnosis

6. **System Health**
   - Use `get_system_health_summary` for overall system status
   - Use `monitor_process_health` to check specific processes
   - Use `find_resource_intensive_processes` to identify performance issues

Still having issues? Provide the error message and log source details for specific help."""


@mcp.prompt()
async def windows_event_log_guide() -> str:
    """
    A comprehensive guide for analyzing Windows Event Logs.

    This prompt provides detailed information about working
    with Windows Event Logs specifically.
    """
    return """Windows Event Log Analysis Guide:

**Getting Started with Windows Event Logs:**

1. **Common Log Types**
   - System: Hardware, drivers, system components
   - Application: Software events and errors
   - Security: Audit and security events
   - Setup: Installation and update logs

2. **Testing Access**
   Use `test_windows_event_log_access` tool to check if you can access Event Logs
   Use `get_windows_system_health` tool for a quick health overview

3. **Registering Event Logs**
   ```
   register_log_source(
     name="system-events",
     source_type="evt",
     path="System"  # Use log name, not file path
   )
   ```

4. **Diagnostic Tools**
   - `get_windows_event_log_info` - Get detailed info about specific logs
   - `query_windows_events_by_criteria` - Filter events by ID, level, or time
   - `get_windows_system_health` - Overall system health from Event Logs

5. **Useful Filters**
   - Event ID: Filter specific event types
   - Level: Error, Warning, Information
   - Source: Filter by event source
   - Time range: Focus on specific periods

6. **Common Event IDs**
   - 6005/6006: System startup/shutdown
   - 7001/7002: User logon/logoff
   - 41: Unexpected shutdown
   - 1074: System restart/shutdown reason

7. **Analysis Tips**
   - Start with summary analysis for overview
   - Use pattern analysis to find recurring issues
   - Apply anomaly detection for unusual events
   - Correlate events across different logs

**Example Workflow:**
1. Test access with `test_windows_event_log_access`
2. Get system health with `get_windows_system_health`
3. Register System and Application logs
4. Query recent errors and warnings with `query_windows_events_by_criteria`
5. Analyze patterns in error events

Need help with specific event IDs or analysis scenarios? Just ask!"""


from .prompts import register_all_prompts
from .resources import register_all_resources

# Register all tools, resources, and prompts
from .tools import register_all_tools

register_all_tools(mcp)
register_all_resources(mcp)
register_all_prompts(mcp)

```

--------------------------------------------------------------------------------
/mcp_tcp_client.py:
--------------------------------------------------------------------------------

```python
#!/usr/bin/env python3
"""
TCP client for connecting to remote MCP servers.
Bridges stdio to TCP connection with automatic reconnection and heartbeat support.
"""

import asyncio
import sys
import json
import argparse
import logging
import time
from typing import Optional, Tuple, Dict, Any
from collections import deque

logging.basicConfig(level=logging.WARNING)
logger = logging.getLogger(__name__)


class TCPClient:
    """TCP client with automatic reconnection and heartbeat support."""
    
    def __init__(self, host: str, port: int, reconnect_delay: float = 2.0, 
                 max_reconnect_attempts: int = 50, heartbeat_interval: float = 30.0):
        self.host = host
        self.port = port
        self.reconnect_delay = reconnect_delay
        self.max_reconnect_attempts = max_reconnect_attempts
        self.heartbeat_interval = heartbeat_interval
        self.heartbeat_timeout = heartbeat_interval * 2
        self.connection_state = {
            'connected': False,
            'last_heartbeat_sent': 0,
            'last_heartbeat_received': 0,
            'server_supports_heartbeat': False,
            'mcp_initialized': False,
            'initialization_in_progress': False,
            'pending_initialize_id': None
        }
        self.reader: Optional[asyncio.StreamReader] = None
        self.writer: Optional[asyncio.StreamWriter] = None
        self.reconnect_attempts = 0
        self.should_reconnect = True
        self.buffered_requests = deque()
    
    async def connect(self) -> Tuple[asyncio.StreamReader, asyncio.StreamWriter]:
        """Connect to TCP server with retry logic."""
        while self.reconnect_attempts < self.max_reconnect_attempts:
            try:
                logger.info(f"Connecting to {self.host}:{self.port} (attempt {self.reconnect_attempts + 1}/{self.max_reconnect_attempts})")
                reader, writer = await asyncio.open_connection(self.host, self.port)
                logger.info(f"Connected to {self.host}:{self.port}")
                
                self.reader = reader
                self.writer = writer
                self.connection_state['connected'] = True
                self.connection_state['mcp_initialized'] = False
                self.connection_state['initialization_in_progress'] = False
                self.connection_state['pending_initialize_id'] = None
                self.buffered_requests.clear()
                self.reconnect_attempts = 0
                
                # Send handshake to indicate we support heartbeat
                await self.send_handshake()
                
                return reader, writer
                
            except Exception as e:
                self.reconnect_attempts += 1
                logger.error(f"Connection failed: {e}")
                
                if self.reconnect_attempts < self.max_reconnect_attempts:
                    logger.info(f"Retrying in {self.reconnect_delay} seconds...")
                    await asyncio.sleep(self.reconnect_delay)
                else:
                    logger.error("Max reconnection attempts reached")
                    raise
        
        raise Exception("Failed to connect after maximum attempts")
    
    async def send_handshake(self):
        """Send handshake message to indicate heartbeat support."""
        if self.writer:
            handshake = {
                'type': 'handshake',
                'client_version': '1.0',
                'supports_heartbeat': True
            }
            message_str = json.dumps(handshake) + '\n'
            self.writer.write(message_str.encode('utf-8'))
            await self.writer.drain()
    
    async def send_heartbeat(self):
        """Send heartbeat message."""
        if self.writer and not self.writer.is_closing():
            heartbeat = {
                'type': 'heartbeat',
                'timestamp': time.time()
            }
            message_str = json.dumps(heartbeat) + '\n'
            self.writer.write(message_str.encode('utf-8'))
            await self.writer.drain()
            self.connection_state['last_heartbeat_sent'] = time.time()
    
    async def handle_heartbeat_response(self, message: dict):
        """Handle heartbeat response from server."""
        self.connection_state['last_heartbeat_received'] = time.time()
        self.connection_state['server_supports_heartbeat'] = True
        
        if not message.get('mcp_alive', True):
            logger.warning("Server reports MCP process is not alive")
    
    async def disconnect(self):
        """Disconnect from server."""
        self.connection_state['connected'] = False
        if self.writer:
            self.writer.close()
            await self.writer.wait_closed()
    
    async def run(self):
        """Main client loop with automatic reconnection."""
        while self.should_reconnect:
            try:
                # Connect to server
                reader, writer = await self.connect()
                
                # Create tasks for communication and heartbeat
                stdin_to_tcp = asyncio.create_task(self.forward_stdin_to_tcp())
                tcp_to_stdout = asyncio.create_task(self.forward_tcp_to_stdout())
                heartbeat_task = asyncio.create_task(self.heartbeat_loop())
                
                # Wait for any task to complete
                done, pending = await asyncio.wait(
                    [stdin_to_tcp, tcp_to_stdout, heartbeat_task],
                    return_when=asyncio.FIRST_COMPLETED
                )
                
                # Cancel remaining tasks
                for task in pending:
                    task.cancel()
                
                # Wait for cancellation
                await asyncio.gather(*pending, return_exceptions=True)
                
                # Disconnect
                await self.disconnect()
                
                # Check if we should reconnect
                if self.should_reconnect and self.reconnect_attempts < self.max_reconnect_attempts:
                    logger.info(f"Connection lost, reconnecting in {self.reconnect_delay} seconds...")
                    await asyncio.sleep(self.reconnect_delay)
                else:
                    break
                    
            except KeyboardInterrupt:
                logger.info("Interrupted by user")
                self.should_reconnect = False
                break
            except Exception as e:
                logger.error(f"Unexpected error: {e}")
                if self.should_reconnect:
                    await asyncio.sleep(self.reconnect_delay)
                else:
                    break
    
    async def forward_stdin_to_tcp(self):
        """Forward stdin to TCP connection with MCP initialization tracking."""
        loop = asyncio.get_event_loop()
        stdin = sys.stdin
        
        while self.connection_state['connected']:
            try:
                # Read line from stdin (blocking in executor)
                line = await loop.run_in_executor(None, stdin.readline)
                if not line:
                    logger.info("Stdin closed")
                    self.should_reconnect = False
                    break
                
                # Parse and check if it's an MCP message
                try:
                    message = json.loads(line.strip())
                    
                    # Handle initialize request
                    if message.get('method') == 'initialize':
                        logger.debug("Detected initialize request")
                        self.connection_state['initialization_in_progress'] = True
                        self.connection_state['pending_initialize_id'] = message.get('id')
                    
                    # Handle initialized notification
                    elif message.get('method') == 'notifications/initialized':
                        logger.debug("Detected initialized notification")
                        self.connection_state['mcp_initialized'] = True
                        self.connection_state['initialization_in_progress'] = False
                        
                        # Process any buffered requests after initialization
                        while self.buffered_requests:
                            buffered_line = self.buffered_requests.popleft()
                            if self.writer and not self.writer.is_closing():
                                self.writer.write(buffered_line.encode('utf-8'))
                                await self.writer.drain()
                                await asyncio.sleep(0.01)
                    
                    # Buffer non-init requests if not initialized
                    elif (not self.connection_state['mcp_initialized'] and 
                          'method' in message and 
                          message['method'] not in ['initialize', 'notifications/initialized']):
                        logger.warning(f"Buffering request '{message.get('method')}' until initialization complete")
                        self.buffered_requests.append(line)
                        
                        # Send temporary error response
                        if 'id' in message:
                            error_response = {
                                'jsonrpc': '2.0',
                                'id': message['id'],
                                'error': {
                                    'code': -32002,
                                    'message': 'Server initialization pending',
                                    'data': 'Waiting for MCP initialization to complete'
                                }
                            }
                            sys.stdout.write(json.dumps(error_response) + '\n')
                            sys.stdout.flush()
                        continue
                        
                except json.JSONDecodeError:
                    # If not JSON, still forward it
                    pass
                
                # Send to TCP
                if self.writer and not self.writer.is_closing():
                    self.writer.write(line.encode('utf-8'))
                    await self.writer.drain()
                else:
                    logger.warning("Connection closed, unable to send")
                    break
                    
            except Exception as e:
                logger.error(f"Error forwarding stdin: {e}")
                break
    
    async def forward_tcp_to_stdout(self):
        """Forward TCP to stdout, handling heartbeats and tracking initialization."""
        while self.connection_state['connected']:
            try:
                # Read from TCP
                if not self.reader:
                    break
                    
                line = await self.reader.readline()
                if not line:
                    logger.info("TCP connection closed")
                    break
                
                # Try to parse as JSON to check for heartbeat or initialization response
                try:
                    message_str = line.decode('utf-8').strip()
                    message = json.loads(message_str)
                    
                    # Handle heartbeat responses
                    if message.get('type') == 'heartbeat_response':
                        await self.handle_heartbeat_response(message)
                        # Don't forward heartbeats to stdout
                        continue
                    
                    # Check if this is a response to initialize request
                    if (self.connection_state['initialization_in_progress'] and
                        'id' in message and 
                        message['id'] == self.connection_state['pending_initialize_id']):
                        logger.debug("Received initialize response")
                        # Initialization response received, waiting for initialized notification
                        
                except json.JSONDecodeError:
                    # Not JSON, just forward as-is
                    pass
                
                # Write to stdout
                sys.stdout.write(line.decode('utf-8'))
                sys.stdout.flush()
                
            except Exception as e:
                logger.error(f"Error forwarding TCP: {e}")
                break
    
    async def heartbeat_loop(self):
        """Send periodic heartbeats to server."""
        await asyncio.sleep(5)  # Initial delay
        
        while self.connection_state['connected']:
            try:
                # Send heartbeat
                await self.send_heartbeat()
                
                # Wait for next interval
                await asyncio.sleep(self.heartbeat_interval)
                
                # Check if server supports heartbeat and we haven't received one recently
                if self.connection_state['server_supports_heartbeat']:
                    time_since_last = time.time() - self.connection_state['last_heartbeat_received']
                    if time_since_last > self.heartbeat_timeout:
                        logger.warning(f"Server heartbeat timeout ({time_since_last:.1f}s since last response)")
                        # Connection might be dead, let TCP detect it
                        
            except asyncio.CancelledError:
                break
            except Exception as e:
                logger.error(f"Error in heartbeat loop: {e}")
                await asyncio.sleep(self.heartbeat_interval)


async def bridge_stdio_to_tcp(host: str, port: int, **kwargs):
    """Bridge stdio to TCP connection with automatic reconnection."""
    client = TCPClient(host, port, **kwargs)
    await client.run()




def main():
    parser = argparse.ArgumentParser(description='TCP client for MCP servers with reconnection support')
    parser.add_argument('host', help='Remote host to connect to')
    parser.add_argument('port', type=int, help='Remote port to connect to')
    parser.add_argument('--reconnect-delay', type=float, default=2.0,
                       help='Delay between reconnection attempts in seconds (default: 2)')
    parser.add_argument('--max-reconnect-attempts', type=int, default=50,
                       help='Maximum number of reconnection attempts (default: 50)')
    parser.add_argument('--heartbeat-interval', type=float, default=30.0,
                       help='Heartbeat interval in seconds (default: 30)')
    parser.add_argument('--no-reconnect', action='store_true',
                       help='Disable automatic reconnection')
    parser.add_argument('--debug', action='store_true', help='Enable debug logging')
    
    args = parser.parse_args()
    
    if args.debug:
        logging.getLogger().setLevel(logging.DEBUG)
    
    # Set up client parameters
    client_kwargs = {
        'reconnect_delay': args.reconnect_delay,
        'max_reconnect_attempts': 1 if args.no_reconnect else args.max_reconnect_attempts,
        'heartbeat_interval': args.heartbeat_interval
    }
    
    asyncio.run(bridge_stdio_to_tcp(args.host, args.port, **client_kwargs))


if __name__ == "__main__":
    main()
```

--------------------------------------------------------------------------------
/src/mcp_log_analyzer/mcp_server/resources/network_resources.py:
--------------------------------------------------------------------------------

```python
"""
Network monitoring MCP resources (netstat functionality).
"""

import platform
import subprocess

from mcp.server import FastMCP


def register_network_resources(mcp: FastMCP):
    """Register all network-related resources with the MCP server."""

    @mcp.resource("system://netstat")
    async def get_netstat() -> str:
        """
        Get network connections and statistics with default options.

        Use parameterized versions for specific troubleshooting:
        - system://netstat/listening - Show only listening ports
        - system://netstat/established - Show only established connections
        - system://netstat/all - Show all connections with process info
        - system://netstat/stats - Show network statistics
        - system://netstat/routing - Show routing table
        - system://netstat/port/80 - Show connections on specific port
        """
        # Default to listening ports for quick overview
        return await get_netstat_listening()

    @mcp.resource("system://netstat/listening")
    async def get_netstat_listening() -> str:
        """
        Show all listening ports and services.

        Useful for checking which services are running and what ports are open.
        """
        result = []
        result.append("=== Listening Ports ===\n")

        try:
            if platform.system() == "Windows":
                # Windows netstat command
                cmd_output = subprocess.run(
                    ["netstat", "-an", "-p", "TCP"],
                    capture_output=True,
                    text=True,
                    timeout=10,
                )
                if cmd_output.returncode == 0:
                    lines = cmd_output.stdout.split("\n")
                    result.append("Protocol  Local Address          State")
                    result.append("-" * 50)
                    for line in lines:
                        if "LISTENING" in line:
                            result.append(line.strip())

                # Also show UDP listening
                cmd_output = subprocess.run(
                    ["netstat", "-an", "-p", "UDP"],
                    capture_output=True,
                    text=True,
                    timeout=10,
                )
                if cmd_output.returncode == 0:
                    result.append("\nUDP Listening:")
                    result.append("Protocol  Local Address")
                    result.append("-" * 30)
                    lines = cmd_output.stdout.split("\n")
                    for line in lines:
                        if "UDP" in line and "*:*" in line:
                            result.append(line.strip())

            else:
                # Linux - try ss first (modern), then netstat (legacy)
                try:
                    cmd_output = subprocess.run(
                        ["ss", "-tlnp"], capture_output=True, text=True, timeout=10
                    )
                    if cmd_output.returncode == 0:
                        result.append("TCP Listening Ports (ss):")
                        result.append(cmd_output.stdout)

                    # UDP listening
                    cmd_output = subprocess.run(
                        ["ss", "-ulnp"], capture_output=True, text=True, timeout=10
                    )
                    if cmd_output.returncode == 0:
                        result.append("\nUDP Listening Ports (ss):")
                        result.append(cmd_output.stdout)

                except FileNotFoundError:
                    # Fall back to netstat if ss not available
                    cmd_output = subprocess.run(
                        ["netstat", "-tlnp"], capture_output=True, text=True, timeout=10
                    )
                    if cmd_output.returncode == 0:
                        result.append("TCP Listening Ports (netstat):")
                        result.append(cmd_output.stdout)

                    cmd_output = subprocess.run(
                        ["netstat", "-ulnp"], capture_output=True, text=True, timeout=10
                    )
                    if cmd_output.returncode == 0:
                        result.append("\nUDP Listening Ports (netstat):")
                        result.append(cmd_output.stdout)

        except Exception as e:
            result.append(f"Error getting listening ports: {str(e)}")

        return "\n".join(result)

    @mcp.resource("system://netstat/established")
    async def get_netstat_established() -> str:
        """
        Show all established network connections.

        Useful for seeing active connections and identifying communication patterns.
        """
        result = []
        result.append("=== Established Connections ===\n")

        try:
            if platform.system() == "Windows":
                cmd_output = subprocess.run(
                    ["netstat", "-an", "-p", "TCP"],
                    capture_output=True,
                    text=True,
                    timeout=10,
                )
                if cmd_output.returncode == 0:
                    lines = cmd_output.stdout.split("\n")
                    result.append(
                        "Protocol  Local Address          Foreign Address        State"
                    )
                    result.append("-" * 70)
                    for line in lines:
                        if "ESTABLISHED" in line:
                            result.append(line.strip())
            else:
                # Linux
                try:
                    cmd_output = subprocess.run(
                        ["ss", "-tnp", "state", "established"],
                        capture_output=True,
                        text=True,
                        timeout=10,
                    )
                    if cmd_output.returncode == 0:
                        result.append("Established TCP Connections (ss):")
                        result.append(cmd_output.stdout)
                except FileNotFoundError:
                    cmd_output = subprocess.run(
                        ["netstat", "-tnp"], capture_output=True, text=True, timeout=10
                    )
                    if cmd_output.returncode == 0:
                        lines = cmd_output.stdout.split("\n")
                        result.append("Established TCP Connections (netstat):")
                        result.append(
                            "Proto  Local Address          Foreign Address        State       PID/Program"
                        )
                        result.append("-" * 90)
                        for line in lines:
                            if "ESTABLISHED" in line:
                                result.append(line.strip())

        except Exception as e:
            result.append(f"Error getting established connections: {str(e)}")

        return "\n".join(result)

    @mcp.resource("system://netstat/all")
    async def get_netstat_all() -> str:
        """
        Show all network connections with process information.

        Comprehensive view including listening, established, and other states.
        """
        result = []
        result.append("=== All Network Connections ===\n")

        try:
            if platform.system() == "Windows":
                cmd_output = subprocess.run(
                    ["netstat", "-ano"], capture_output=True, text=True, timeout=15
                )
                if cmd_output.returncode == 0:
                    result.append("All TCP/UDP Connections:")
                    result.append(cmd_output.stdout)
            else:
                try:
                    # Use ss for comprehensive output
                    cmd_output = subprocess.run(
                        ["ss", "-tulanp"], capture_output=True, text=True, timeout=15
                    )
                    if cmd_output.returncode == 0:
                        result.append("All Network Connections (ss):")
                        result.append(cmd_output.stdout)
                except FileNotFoundError:
                    cmd_output = subprocess.run(
                        ["netstat", "-tulanp"],
                        capture_output=True,
                        text=True,
                        timeout=15,
                    )
                    if cmd_output.returncode == 0:
                        result.append("All Network Connections (netstat):")
                        result.append(cmd_output.stdout)

        except Exception as e:
            result.append(f"Error getting all connections: {str(e)}")

        return "\n".join(result)

    @mcp.resource("system://netstat/stats")
    async def get_netstat_stats() -> str:
        """
        Show network interface statistics and protocol statistics.

        Useful for identifying network performance issues and packet loss.
        """
        result = []
        result.append("=== Network Statistics ===\n")

        try:
            if platform.system() == "Windows":
                # Network statistics
                cmd_output = subprocess.run(
                    ["netstat", "-s"], capture_output=True, text=True, timeout=10
                )
                if cmd_output.returncode == 0:
                    result.append("Protocol Statistics:")
                    result.append(cmd_output.stdout)

                # Interface statistics
                cmd_output = subprocess.run(
                    ["netstat", "-e"], capture_output=True, text=True, timeout=10
                )
                if cmd_output.returncode == 0:
                    result.append("\nInterface Statistics:")
                    result.append(cmd_output.stdout)
            else:
                # Linux interface statistics
                try:
                    cmd_output = subprocess.run(
                        ["ss", "-i"], capture_output=True, text=True, timeout=10
                    )
                    if cmd_output.returncode == 0:
                        result.append("Interface Statistics (ss):")
                        result.append(cmd_output.stdout)
                except FileNotFoundError:
                    cmd_output = subprocess.run(
                        ["netstat", "-i"], capture_output=True, text=True, timeout=10
                    )
                    if cmd_output.returncode == 0:
                        result.append("Interface Statistics (netstat):")
                        result.append(cmd_output.stdout)

                # Protocol statistics if available
                try:
                    cmd_output = subprocess.run(
                        ["netstat", "-s"], capture_output=True, text=True, timeout=10
                    )
                    if cmd_output.returncode == 0:
                        result.append("\nProtocol Statistics:")
                        result.append(cmd_output.stdout)
                except:
                    pass

        except Exception as e:
            result.append(f"Error getting network statistics: {str(e)}")

        return "\n".join(result)

    @mcp.resource("system://netstat/routing")
    async def get_netstat_routing() -> str:
        """
        Show routing table information.

        Useful for diagnosing routing issues and network connectivity problems.
        """
        result = []
        result.append("=== Routing Table ===\n")

        try:
            if platform.system() == "Windows":
                cmd_output = subprocess.run(
                    ["netstat", "-r"], capture_output=True, text=True, timeout=10
                )
                if cmd_output.returncode == 0:
                    result.append("IPv4 Routing Table:")
                    result.append(cmd_output.stdout)
            else:
                # Linux routing table
                try:
                    cmd_output = subprocess.run(
                        ["ip", "route", "show"],
                        capture_output=True,
                        text=True,
                        timeout=10,
                    )
                    if cmd_output.returncode == 0:
                        result.append("Routing Table (ip route):")
                        result.append(cmd_output.stdout)
                except FileNotFoundError:
                    cmd_output = subprocess.run(
                        ["netstat", "-r"], capture_output=True, text=True, timeout=10
                    )
                    if cmd_output.returncode == 0:
                        result.append("Routing Table (netstat):")
                        result.append(cmd_output.stdout)

        except Exception as e:
            result.append(f"Error getting routing table: {str(e)}")

        return "\n".join(result)

    @mcp.resource("system://netstat/port/{port}")
    async def get_netstat_port(port: str) -> str:
        """
        Show connections on a specific port.

        Args:
            port: Port number to check (e.g., "80", "443", "22")

        Useful for checking if a service is running on a specific port.
        """
        try:
            port_num = int(port)
        except ValueError:
            return f"Invalid port number: {port}"

        result = []
        result.append(f"=== Connections on Port {port} ===\n")

        try:
            if platform.system() == "Windows":
                cmd_output = subprocess.run(
                    ["netstat", "-ano"], capture_output=True, text=True, timeout=10
                )
                if cmd_output.returncode == 0:
                    lines = cmd_output.stdout.split("\n")
                    result.append(
                        "Protocol  Local Address          Foreign Address        State       PID"
                    )
                    result.append("-" * 80)
                    for line in lines:
                        if f":{port}" in line:
                            result.append(line.strip())
            else:
                # Linux
                try:
                    cmd_output = subprocess.run(
                        ["ss", "-tulanp", f"sport = :{port}"],
                        capture_output=True,
                        text=True,
                        timeout=10,
                    )
                    if cmd_output.returncode == 0:
                        result.append(f"Connections on port {port} (ss):")
                        result.append(cmd_output.stdout)

                    # Also check for connections TO this port
                    cmd_output = subprocess.run(
                        ["ss", "-tulanp", f"dport = :{port}"],
                        capture_output=True,
                        text=True,
                        timeout=10,
                    )
                    if cmd_output.returncode == 0:
                        result.append(f"\nConnections TO port {port} (ss):")
                        result.append(cmd_output.stdout)

                except FileNotFoundError:
                    cmd_output = subprocess.run(
                        ["netstat", "-tulanp"],
                        capture_output=True,
                        text=True,
                        timeout=10,
                    )
                    if cmd_output.returncode == 0:
                        lines = cmd_output.stdout.split("\n")
                        result.append(f"Connections involving port {port} (netstat):")
                        result.append(
                            "Proto  Local Address          Foreign Address        State       PID/Program"
                        )
                        result.append("-" * 90)
                        for line in lines:
                            if f":{port}" in line:
                                result.append(line.strip())

        except Exception as e:
            result.append(f"Error checking port {port}: {str(e)}")

        return "\n".join(result)

```

--------------------------------------------------------------------------------
/src/mcp_log_analyzer/parsers/evt_parser.py:
--------------------------------------------------------------------------------

```python
"""Parser for Windows Event Logs."""

import datetime
import platform
from pathlib import Path
from typing import Any, Dict, Iterator, List, Optional, Union
from uuid import UUID

from ..core.models import AnalysisResult, LogRecord, LogSource
from .base import BaseParser

# Only import Windows-specific modules on Windows
if platform.system() == "Windows":
    import win32evtlog
    import win32evtlogutil
    import win32api
    import win32con
    from win32con import EVENTLOG_BACKWARDS_READ, EVENTLOG_SEQUENTIAL_READ
    import pywintypes
    import xml.etree.ElementTree as ET
else:
    # Mock objects for non-Windows platforms
    win32evtlog = None
    win32evtlogutil = None
    win32api = None
    win32con = None
    pywintypes = None
    ET = None
    EVENTLOG_BACKWARDS_READ = None
    EVENTLOG_SEQUENTIAL_READ = None


class EventLogParser(BaseParser):
    """Parser for Windows Event Logs."""

    def __init__(self, config: Optional[Dict[str, Any]] = None):
        """Initialize the Windows Event Log parser.

        Args:
            config: Optional configuration dictionary
        """
        super().__init__(config)
        if platform.system() != "Windows":
            raise RuntimeError("Windows Event Log parser is only available on Windows")

    def parse_file(self, source: LogSource, file_path: Path) -> Iterator[LogRecord]:
        """Parse a Windows Event Log file.

        Args:
            source: The log source
            file_path: Path to the event log file

        Yields:
            LogRecord: Parsed log records
        """
        # Windows Event Logs are typically accessed by name, not file path
        # This method would need special handling for .evt/.evtx files
        raise NotImplementedError(
            "Direct file parsing not implemented for Windows Event Logs"
        )

    def parse_content(self, source: LogSource, content: str) -> Iterator[LogRecord]:
        """Parse Windows Event Log content.

        Args:
            source: The log source
            content: The log content (not used for Windows Event Logs)

        Yields:
            LogRecord: Parsed log records
        """
        # Windows Event Logs are binary and accessed via API, not text content
        raise NotImplementedError(
            "Content parsing not applicable for Windows Event Logs"
        )

    def parse(
        self,
        path: str,
        filters: Optional[Dict[str, Any]] = None,
        start_time: Optional[datetime.datetime] = None,
        end_time: Optional[datetime.datetime] = None,
        limit: int = 100,
        offset: int = 0,
    ) -> List[LogRecord]:
        """Parse Windows Event Logs.

        Args:
            path: Event log name (e.g., "System", "Application")
            filters: Optional filters
            start_time: Start time filter
            end_time: End time filter
            limit: Maximum number of records
            offset: Number of records to skip

        Returns:
            List of parsed log records
        """
        if platform.system() != "Windows":
            return []

        records = []

        # Handle both standard logs (System, Application) and custom logs
        # Custom logs like "Microsoft-Service Fabric/Admin" need the newer API
        if "/" in path or "\\" in path:
            # This is a custom Application and Services log - use newer EvtQuery API
            records = self._parse_custom_event_log(
                path, filters, start_time, end_time, limit, offset
            )
        else:
            # Standard log - use legacy API
            try:
                hand = win32evtlog.OpenEventLog(None, path)
                flags = EVENTLOG_BACKWARDS_READ | EVENTLOG_SEQUENTIAL_READ

                # Continue reading until we have enough records
                events_read = 0
                while len(records) < limit:
                    events = win32evtlog.ReadEventLog(hand, flags, 0)
                    if not events:
                        break  # No more events to read

                    for event in events:
                        if events_read >= offset:
                            record = self._parse_event(event, path)
                            if self._matches_filters(
                                record, filters, start_time, end_time
                            ):
                                records.append(record)
                                if len(records) >= limit:
                                    break
                        events_read += 1

                win32evtlog.CloseEventLog(hand)

            except Exception as e:
                # Log error or handle appropriately
                print(f"Error parsing Windows Event Log '{path}': {str(e)}")
                pass

        return records

    def analyze(
        self, logs: List[LogRecord], analysis_type: str = "summary"
    ) -> AnalysisResult:
        """Analyze Windows Event Logs.

        Args:
            logs: List of log records to analyze
            analysis_type: Type of analysis to perform

        Returns:
            Analysis result
        """
        if analysis_type == "summary":
            return self._summary_analysis(logs)
        elif analysis_type == "pattern":
            return self._pattern_analysis(logs)
        elif analysis_type == "anomaly":
            return self._anomaly_analysis(logs)
        else:
            raise ValueError(f"Unknown analysis type: {analysis_type}")

    def _parse_custom_event_log(
        self,
        path: str,
        filters: Optional[Dict[str, Any]],
        start_time: Optional[datetime.datetime],
        end_time: Optional[datetime.datetime],
        limit: int,
        offset: int,
    ) -> List[LogRecord]:
        """Parse custom Application and Services logs using the newer Windows Event Log API."""
        records = []

        try:
            # Build query for the custom log
            query_flags = (
                win32evtlog.EvtQueryChannelPath | win32evtlog.EvtQueryReverseDirection
            )

            # Build XPath query if we have filters
            xpath_query = "*"
            if filters or start_time or end_time:
                conditions = []

                if start_time:
                    # Convert to Windows file time format
                    start_ms = int(start_time.timestamp() * 1000)
                    conditions.append(f"TimeCreated[@SystemTime >= '{start_ms}']")

                if end_time:
                    end_ms = int(end_time.timestamp() * 1000)
                    conditions.append(f"TimeCreated[@SystemTime <= '{end_ms}']")

                if filters and "EventID" in filters:
                    conditions.append(f"EventID={filters['EventID']}")

                if conditions:
                    xpath_query = f"*[System[{' and '.join(conditions)}]]"

            # Query the event log
            query_handle = win32evtlog.EvtQuery(path, query_flags, xpath_query)

            # Read events
            events_read = 0
            while len(records) < limit:
                # Get batch of events
                events = win32evtlog.EvtNext(query_handle, 10)  # Read 10 at a time
                if not events:
                    break

                for event in events:
                    if events_read >= offset:
                        # Render event as XML to extract data
                        xml_content = win32evtlog.EvtRender(
                            event, win32evtlog.EvtRenderEventXml
                        )
                        record = self._parse_event_xml(xml_content, path)

                        if self._matches_filters(record, filters, start_time, end_time):
                            records.append(record)
                            if len(records) >= limit:
                                break

                    events_read += 1
                    win32evtlog.EvtClose(event)

            win32evtlog.EvtClose(query_handle)

        except Exception as e:
            print(f"Error parsing custom event log '{path}': {str(e)}")
            # Fall back to empty list if the API is not available or fails
            pass

        return records

    def _parse_event_xml(self, xml_content: str, log_name: str) -> LogRecord:
        """Parse event data from XML format."""
        try:
            root = ET.fromstring(xml_content)

            # Extract system data
            system = root.find(".//System")
            event_id = (
                int(system.find("EventID").text)
                if system.find("EventID") is not None
                else 0
            )

            # Handle unsigned conversion
            event_id = event_id & 0xFFFFFFFF

            provider = system.find("Provider")
            provider_name = (
                provider.get("Name", "Unknown") if provider is not None else "Unknown"
            )

            computer = system.find("Computer")
            computer_name = computer.text if computer is not None else "Unknown"

            time_created = system.find("TimeCreated")
            if time_created is not None:
                system_time = time_created.get("SystemTime", "")
                # Parse ISO format timestamp
                try:
                    timestamp = datetime.datetime.fromisoformat(
                        system_time.replace("Z", "+00:00")
                    )
                except:
                    timestamp = datetime.datetime.now()
            else:
                timestamp = datetime.datetime.now()

            level = system.find("Level")
            event_type = (
                int(level.text) if level is not None else 4
            )  # Default to Information

            # Map levels to event types (1=Error, 2=Warning, 4=Information)
            level_map = {
                1: 2,
                2: 3,
                3: 4,
                4: 4,
                5: 4,
            }  # Critical=1, Error=2, Warning=3, Info=4, Verbose=5
            event_type = level_map.get(event_type, 4)

            # Extract event data
            event_data = {}
            data_elem = root.find(".//EventData")
            if data_elem is not None:
                for data in data_elem:
                    name = data.get("Name", "")
                    if name:
                        event_data[name] = data.text or ""

            # Try to get rendered message
            message = ""
            rendering = root.find(".//RenderingInfo/Message")
            if rendering is not None:
                message = rendering.text or ""
            else:
                # Build message from event data
                if event_data:
                    message = "; ".join(f"{k}: {v}" for k, v in event_data.items())

            return LogRecord(
                source_id=UUID("00000000-0000-0000-0000-000000000000"),
                timestamp=timestamp,
                data={
                    "EventID": event_id,
                    "EventType": event_type,
                    "EventCategory": 0,  # Not available in new API
                    "SourceName": provider_name,
                    "ComputerName": computer_name,
                    "Message": message,
                    "EventData": event_data,
                    "LogName": log_name,
                },
            )

        except Exception as e:
            print(f"Error parsing event XML: {str(e)}")
            # Return a basic record on error
            return LogRecord(
                source_id=UUID("00000000-0000-0000-0000-000000000000"),
                timestamp=datetime.datetime.now(),
                data={
                    "EventID": 0,
                    "EventType": 4,
                    "EventCategory": 0,
                    "SourceName": "Unknown",
                    "ComputerName": "Unknown",
                    "Message": f"Error parsing event: {str(e)}",
                    "LogName": log_name,
                },
            )

    def _parse_event(self, event, log_name: str = None) -> LogRecord:
        """Parse a single Windows event."""
        try:
            message = win32evtlogutil.SafeFormatMessage(event, log_name)
        except:
            message = "(Unable to format message)"

        return LogRecord(
            source_id=UUID("00000000-0000-0000-0000-000000000000"),  # Placeholder
            timestamp=event.TimeGenerated,
            data={
                "EventID": event.EventID & 0xFFFFFFFF,  # Convert to unsigned
                "EventType": event.EventType,
                "EventCategory": event.EventCategory,
                "SourceName": event.SourceName,
                "ComputerName": event.ComputerName,
                "Message": message,
            },
        )

    def _matches_filters(
        self,
        record: LogRecord,
        filters: Optional[Dict[str, Any]],
        start_time: Optional[datetime.datetime],
        end_time: Optional[datetime.datetime],
    ) -> bool:
        """Check if a record matches the given filters."""
        if start_time and record.timestamp and record.timestamp < start_time:
            return False
        if end_time and record.timestamp and record.timestamp > end_time:
            return False

        if filters:
            for key, value in filters.items():
                if key in record.data and record.data[key] != value:
                    return False

        return True

    def _summary_analysis(self, logs: List[LogRecord]) -> AnalysisResult:
        """Perform summary analysis."""
        event_types = {}
        sources = {}

        for log in logs:
            event_type = log.data.get("EventType", "Unknown")
            event_types[event_type] = event_types.get(event_type, 0) + 1

            source = log.data.get("SourceName", "Unknown")
            sources[source] = sources.get(source, 0) + 1

        return AnalysisResult(
            analysis_type="summary",
            summary={
                "total_events": len(logs),
                "event_types": event_types,
                "sources": sources,
            },
        )

    def _pattern_analysis(self, logs: List[LogRecord]) -> AnalysisResult:
        """Perform pattern analysis."""
        # Simplified pattern analysis
        patterns = []

        # Group by EventID
        event_groups = {}
        for log in logs:
            event_id = log.data.get("EventID", "Unknown")
            if event_id not in event_groups:
                event_groups[event_id] = []
            event_groups[event_id].append(log)

        for event_id, events in event_groups.items():
            if len(events) > 1:
                patterns.append(
                    {
                        "pattern": f"EventID {event_id}",
                        "count": len(events),
                        "frequency": len(events) / len(logs),
                    }
                )

        return AnalysisResult(
            analysis_type="pattern",
            summary={"total_patterns": len(patterns)},
            patterns=patterns,
        )

    def _anomaly_analysis(self, logs: List[LogRecord]) -> AnalysisResult:
        """Perform anomaly analysis."""
        # Simplified anomaly detection
        anomalies = []

        # Look for error events
        for log in logs:
            if log.data.get("EventType") == 1:  # Error
                anomalies.append(
                    {
                        "type": "error_event",
                        "event_id": log.data.get("EventID"),
                        "source": log.data.get("SourceName"),
                        "message": log.data.get("Message", "")[:100],
                    }
                )

        return AnalysisResult(
            analysis_type="anomaly",
            summary={"total_anomalies": len(anomalies)},
            anomalies=anomalies,
        )


# For backward compatibility
EvtParser = EventLogParser

```

--------------------------------------------------------------------------------
/src/mcp_log_analyzer/mcp_server/tools/linux_test_tools.py:
--------------------------------------------------------------------------------

```python
"""
Linux system monitoring and log testing MCP tools.
"""

import platform
import subprocess
from pathlib import Path
from typing import Any, Dict, List

from mcp.server import FastMCP
from pydantic import BaseModel, Field


class LinuxLogTestRequest(BaseModel):
    """Request model for testing Linux log access."""

    log_paths: List[str] = Field(
        default_factory=lambda: ["/var/log/syslog", "/var/log/messages"],
        description="List of log file paths to test",
    )
    check_journalctl: bool = Field(
        True, description="Whether to test systemd journal access"
    )


class LinuxLogQueryRequest(BaseModel):
    """Request model for querying Linux logs."""

    service_name: str = Field(None, description="Specific service to query logs for")
    priority: str = Field(
        None,
        description="Log priority (emerg, alert, crit, err, warning, notice, info, debug)",
    )
    time_duration: str = Field(
        "1h", description="Time duration (e.g., '30m', '2h', '1d')"
    )
    max_lines: int = Field(100, description="Maximum number of log lines to return")


class LinuxServiceAnalysisRequest(BaseModel):
    """Request model for analyzing Linux services."""

    service_pattern: str = Field(
        None, description="Service name pattern to analyze (e.g., 'nginx', 'apache')"
    )
    include_failed: bool = Field(
        True, description="Include failed services in analysis"
    )


def register_linux_test_tools(mcp: FastMCP):
    """Register all Linux testing tools with the MCP server."""

    @mcp.tool()
    async def test_linux_log_access() -> Dict[str, Any]:
        """
        Test Linux log file and systemd journal access.

        This tool checks if the system can access various Linux log sources
        and provides diagnostic information about available logs.
        """
        if platform.system() != "Linux":
            return {
                "status": "unavailable",
                "message": "Linux log tools are only available on Linux systems",
                "platform": platform.system(),
            }

        test_results = {
            "log_files": {},
            "systemd_journal": {"available": False, "accessible": False},
            "commands": {},
        }

        # Test common log file access
        common_logs = {
            "/var/log/syslog": "System log (Debian/Ubuntu)",
            "/var/log/messages": "System log (RHEL/CentOS)",
            "/var/log/auth.log": "Authentication log",
            "/var/log/kern.log": "Kernel log",
            "/var/log/dmesg": "Boot messages",
        }

        for log_path, description in common_logs.items():
            path_obj = Path(log_path)
            if path_obj.exists():
                try:
                    # Test read access
                    with open(log_path, "r") as f:
                        f.read(100)  # Read first 100 chars
                    test_results["log_files"][log_path] = {
                        "exists": True,
                        "readable": True,
                        "description": description,
                        "size_mb": round(path_obj.stat().st_size / (1024 * 1024), 2),
                    }
                except PermissionError:
                    test_results["log_files"][log_path] = {
                        "exists": True,
                        "readable": False,
                        "description": description,
                        "error": "Permission denied",
                    }
                except Exception as e:
                    test_results["log_files"][log_path] = {
                        "exists": True,
                        "readable": False,
                        "description": description,
                        "error": str(e),
                    }
            else:
                test_results["log_files"][log_path] = {
                    "exists": False,
                    "readable": False,
                    "description": description,
                }

        # Test systemd journal access
        try:
            result = subprocess.run(
                ["journalctl", "--version"],
                capture_output=True,
                text=True,
                timeout=5,
            )
            if result.returncode == 0:
                test_results["systemd_journal"]["available"] = True

                # Test actual journal access
                try:
                    result = subprocess.run(
                        ["journalctl", "-n", "1", "--no-pager"],
                        capture_output=True,
                        text=True,
                        timeout=5,
                    )
                    test_results["systemd_journal"]["accessible"] = (
                        result.returncode == 0
                    )
                    if result.returncode != 0:
                        test_results["systemd_journal"]["error"] = result.stderr
                except Exception as e:
                    test_results["systemd_journal"]["error"] = str(e)
        except FileNotFoundError:
            test_results["systemd_journal"]["available"] = False
        except Exception as e:
            test_results["systemd_journal"]["error"] = str(e)

        # Test common system commands
        commands_to_test = ["ss", "netstat", "ps", "top", "systemctl"]
        for cmd in commands_to_test:
            try:
                result = subprocess.run(
                    [cmd, "--version"],
                    capture_output=True,
                    text=True,
                    timeout=5,
                )
                test_results["commands"][cmd] = {"available": True}
            except FileNotFoundError:
                test_results["commands"][cmd] = {"available": False}
            except Exception:
                # Some commands might not support --version but still exist
                test_results["commands"][cmd] = {
                    "available": True,
                    "version_check_failed": True,
                }

        return {
            "status": "completed",
            "platform": platform.system(),
            "distribution": platform.platform(),
            "test_results": test_results,
        }

    @mcp.tool()
    async def query_systemd_journal(request: LinuxLogQueryRequest) -> Dict[str, Any]:
        """
        Query systemd journal with specific criteria.

        This tool allows filtering systemd journal entries by service,
        priority, and time range for targeted analysis.
        """
        if platform.system() != "Linux":
            return {"error": "This tool is only available on Linux systems"}

        try:
            from ..server import parse_time_param

            # Build journalctl command
            cmd = ["journalctl", "--no-pager", "-o", "short"]

            # Add time filter
            if request.time_duration:
                try:
                    start_time = parse_time_param(request.time_duration)
                    since_arg = f"--since={start_time.strftime('%Y-%m-%d %H:%M:%S')}"
                    cmd.append(since_arg)
                except Exception as e:
                    return {"error": f"Invalid time duration: {str(e)}"}

            # Add service filter
            if request.service_name:
                cmd.extend(["-u", request.service_name])

            # Add priority filter
            if request.priority:
                cmd.extend(["-p", request.priority])

            # Add line limit
            cmd.extend(["-n", str(request.max_lines)])

            # Execute command
            result = subprocess.run(cmd, capture_output=True, text=True, timeout=10)

            if result.returncode == 0:
                lines = result.stdout.strip().split("\n")
                return {
                    "query_criteria": {
                        "service_name": request.service_name,
                        "priority": request.priority,
                        "time_duration": request.time_duration,
                        "max_lines": request.max_lines,
                    },
                    "log_entries": lines,
                    "total_entries": len(lines),
                    "command_used": " ".join(cmd),
                }
            else:
                return {
                    "error": f"journalctl command failed: {result.stderr}",
                    "command_used": " ".join(cmd),
                }

        except Exception as e:
            return {"error": f"Error querying systemd journal: {str(e)}"}

    @mcp.tool()
    async def analyze_linux_services(
        request: LinuxServiceAnalysisRequest,
    ) -> Dict[str, Any]:
        """
        Analyze Linux services status and recent activity.

        This tool provides an overview of systemd services, their status,
        and recent log activity for system health assessment.
        """
        if platform.system() != "Linux":
            return {"error": "This tool is only available on Linux systems"}

        try:
            # Get service status
            cmd = ["systemctl", "list-units", "--type=service", "--no-pager"]
            if request.include_failed:
                cmd.append("--failed")

            result = subprocess.run(cmd, capture_output=True, text=True, timeout=10)

            services_info = {"active": [], "failed": [], "summary": {}}

            if result.returncode == 0:
                lines = result.stdout.strip().split("\n")

                for line in lines[1:]:  # Skip header
                    if not line.strip() or "LOAD" in line:
                        continue

                    parts = line.split()
                    if len(parts) >= 4:
                        service_name = parts[0]
                        load_state = parts[1]
                        active_state = parts[2]
                        sub_state = parts[3]

                        # Filter by pattern if specified
                        if (
                            request.service_pattern
                            and request.service_pattern.lower()
                            not in service_name.lower()
                        ):
                            continue

                        service_info = {
                            "name": service_name,
                            "load": load_state,
                            "active": active_state,
                            "sub": sub_state,
                        }

                        if active_state == "active":
                            services_info["active"].append(service_info)
                        elif active_state == "failed":
                            services_info["failed"].append(service_info)

            # Get recent journal entries for failed services
            if services_info["failed"]:
                for service in services_info["failed"][
                    :5
                ]:  # Limit to first 5 failed services
                    try:
                        journal_cmd = [
                            "journalctl",
                            "-u",
                            service["name"],
                            "-n",
                            "10",
                            "--no-pager",
                            "--since",
                            "1 hour ago",
                        ]
                        journal_result = subprocess.run(
                            journal_cmd, capture_output=True, text=True, timeout=5
                        )
                        if journal_result.returncode == 0:
                            service["recent_logs"] = (
                                journal_result.stdout.strip().split("\n")[-5:]
                            )
                    except Exception:
                        service["recent_logs"] = ["Unable to fetch recent logs"]

            services_info["summary"] = {
                "total_active": len(services_info["active"]),
                "total_failed": len(services_info["failed"]),
                "pattern_filter": request.service_pattern,
            }

            # Overall system health assessment
            if len(services_info["failed"]) == 0:
                health_status = "healthy"
            elif len(services_info["failed"]) < 3:
                health_status = "fair"
            else:
                health_status = "concerning"

            return {
                "health_status": health_status,
                "services": services_info,
                "analysis_criteria": {
                    "service_pattern": request.service_pattern,
                    "include_failed": request.include_failed,
                },
                "timestamp": subprocess.run(
                    ["date", "+%Y-%m-%d %H:%M:%S"], capture_output=True, text=True
                ).stdout.strip(),
            }

        except Exception as e:
            return {"error": f"Error analyzing Linux services: {str(e)}"}

    @mcp.tool()
    async def get_linux_system_overview() -> Dict[str, Any]:
        """
        Get comprehensive Linux system overview.

        This tool provides system information, resource usage,
        and health indicators for Linux systems.
        """
        if platform.system() != "Linux":
            return {"error": "This tool is only available on Linux systems"}

        try:
            system_info = {}

            # Basic system information
            system_info["system"] = {
                "hostname": subprocess.run(
                    ["hostname"], capture_output=True, text=True
                ).stdout.strip(),
                "uptime": subprocess.run(
                    ["uptime"], capture_output=True, text=True
                ).stdout.strip(),
                "kernel": subprocess.run(
                    ["uname", "-r"], capture_output=True, text=True
                ).stdout.strip(),
                "distribution": platform.platform(),
            }

            # Memory information
            try:
                with open("/proc/meminfo", "r") as f:
                    meminfo = f.read()
                    mem_lines = meminfo.split("\n")
                    mem_total = next(
                        (line for line in mem_lines if line.startswith("MemTotal:")), ""
                    )
                    mem_available = next(
                        (
                            line
                            for line in mem_lines
                            if line.startswith("MemAvailable:")
                        ),
                        "",
                    )

                system_info["memory"] = {
                    "total": mem_total.split()[1] + " kB" if mem_total else "Unknown",
                    "available": (
                        mem_available.split()[1] + " kB" if mem_available else "Unknown"
                    ),
                }
            except Exception:
                system_info["memory"] = {"error": "Unable to read memory information"}

            # CPU information
            try:
                with open("/proc/loadavg", "r") as f:
                    loadavg = f.read().strip()
                system_info["cpu"] = {"load_average": loadavg}
            except Exception:
                system_info["cpu"] = {"error": "Unable to read CPU information"}

            # Disk usage for root filesystem
            try:
                df_result = subprocess.run(
                    ["df", "-h", "/"], capture_output=True, text=True
                )
                if df_result.returncode == 0:
                    df_lines = df_result.stdout.strip().split("\n")
                    if len(df_lines) > 1:
                        root_disk = df_lines[1].split()
                        system_info["disk"] = {
                            "filesystem": root_disk[0],
                            "size": root_disk[1],
                            "used": root_disk[2],
                            "available": root_disk[3],
                            "use_percent": root_disk[4],
                        }
            except Exception:
                system_info["disk"] = {"error": "Unable to read disk information"}

            # Recent critical logs
            try:
                journal_result = subprocess.run(
                    [
                        "journalctl",
                        "-p",
                        "err",
                        "-n",
                        "5",
                        "--no-pager",
                        "--since",
                        "1 hour ago",
                    ],
                    capture_output=True,
                    text=True,
                    timeout=5,
                )
                if journal_result.returncode == 0:
                    recent_errors = journal_result.stdout.strip().split("\n")
                    system_info["recent_errors"] = (
                        recent_errors if recent_errors != [""] else []
                    )
                else:
                    system_info["recent_errors"] = ["Unable to fetch recent error logs"]
            except Exception:
                system_info["recent_errors"] = ["Error accessing systemd journal"]

            return {
                "status": "success",
                "system_overview": system_info,
                "timestamp": subprocess.run(
                    ["date", "+%Y-%m-%d %H:%M:%S"], capture_output=True, text=True
                ).stdout.strip(),
            }

        except Exception as e:
            return {"error": f"Error getting Linux system overview: {str(e)}"}

```

--------------------------------------------------------------------------------
/src/mcp_log_analyzer/parsers/etl_large_file_parser.py:
--------------------------------------------------------------------------------

```python
"""Enhanced ETL parser for large files with streaming support."""

import asyncio
import os
import platform
import subprocess
import tempfile
from datetime import datetime
from pathlib import Path
from typing import Any, Dict, Iterator, List, Optional, Union
from uuid import uuid4
import csv
import logging

from ..core.models import LogRecord, LogSource, LogType
from .base import BaseParser

logger = logging.getLogger(__name__)


class EtlLargeFileParser(BaseParser):
    """Enhanced ETL parser with support for large files using streaming."""

    def __init__(self, config: Optional[Dict[str, Any]] = None):
        """Initialize ETL large file parser.

        Args:
            config: Parser configuration.
        """
        super().__init__(config)
        self.chunk_size = self.config.get("chunk_size", 1000)  # Records per chunk
        self.tracerpt_path = self._find_tracerpt()
        self.temp_dir = None

    def _find_tracerpt(self) -> Optional[str]:
        """Find tracerpt.exe on the system."""
        if platform.system() != "Windows":
            return None

        # Common locations for tracerpt.exe
        possible_paths = [
            r"C:\Windows\System32\tracerpt.exe",
            r"C:\Windows\SysWOW64\tracerpt.exe",
        ]

        for path in possible_paths:
            if os.path.exists(path):
                return path

        # Try to find it in PATH
        try:
            result = subprocess.run(
                ["where", "tracerpt.exe"],
                capture_output=True,
                text=True,
                check=False
            )
            if result.returncode == 0 and result.stdout.strip():
                return result.stdout.strip().split('\n')[0]
        except:
            pass

        return None

    def is_available(self) -> bool:
        """Check if ETL parsing is available."""
        return self.tracerpt_path is not None

    def parse_file_streaming(
        self, source: LogSource, file_path: Union[str, Path], 
        limit: int = 1000, offset: int = 0
    ) -> Iterator[LogRecord]:
        """Parse ETL file with streaming to handle large files.

        Args:
            source: The log source information.
            file_path: Path to the ETL file.
            limit: Maximum number of records to return.
            offset: Number of records to skip.

        Yields:
            LogRecord objects parsed from the ETL file.
        """
        if not self.is_available():
            raise RuntimeError(
                "Windows ETL parsing is not available. tracerpt.exe not found."
            )

        path = Path(file_path)
        if not path.exists():
            raise FileNotFoundError(f"ETL file not found: {file_path}")

        # Get file size for logging
        file_size_mb = path.stat().st_size / (1024 * 1024)
        logger.info(f"Processing ETL file: {file_size_mb:.1f} MB")

        # Create a persistent temp directory if not exists
        if self.temp_dir is None:
            self.temp_dir = tempfile.mkdtemp(prefix="etl_parser_")
        
        output_file = os.path.join(self.temp_dir, f"etl_{uuid4()}.csv")
        
        try:
            # Use tracerpt with specific parameters for large files
            cmd = [
                self.tracerpt_path,
                str(path),
                "-o", output_file,
                "-of", "CSV",
                "-y",  # Overwrite without prompting
                "-lr",  # Less restrictive; attempt to process badly-formed events
            ]
            
            # For very large files, we might want to limit the time range
            if file_size_mb > 500:  # If file is over 500MB
                logger.warning(f"Large ETL file ({file_size_mb:.1f} MB), processing may take time")
            
            # Run tracerpt as a subprocess
            logger.info("Starting tracerpt conversion...")
            process = subprocess.Popen(
                cmd,
                stdout=subprocess.PIPE,
                stderr=subprocess.PIPE,
                text=True
            )
            
            # Monitor tracerpt process
            import time
            start_time = time.time()
            max_wait_time = 600  # 10 minutes maximum
            check_interval = 5   # Check every 5 seconds
            
            logger.info(f"Waiting for tracerpt.exe to process {file_size_mb:.1f} MB file...")
            
            # Wait for initial processing
            time.sleep(2)
            
            # Check if process failed immediately
            if process.poll() is not None:
                stdout, stderr = process.communicate()
                if process.returncode != 0:
                    raise RuntimeError(
                        f"tracerpt failed immediately with code {process.returncode}: {stderr}"
                    )
            
            # Start reading the CSV file as it's being written
            records_yielded = 0
            records_skipped = 0
            last_pos = 0
            
            # Wait for CSV file to be created with progress monitoring
            wait_time = 0
            last_log_time = start_time
            
            while not os.path.exists(output_file):
                current_time = time.time()
                elapsed = current_time - start_time
                
                # Log progress every 30 seconds
                if current_time - last_log_time >= 30:
                    logger.info(f"tracerpt.exe still running... ({elapsed:.0f}s elapsed)")
                    last_log_time = current_time
                
                # Check if we've exceeded max wait time
                if elapsed > max_wait_time:
                    process.terminate()
                    raise RuntimeError(f"tracerpt timed out after {max_wait_time} seconds")
                
                # Check if process ended
                if process.poll() is not None:
                    stdout, stderr = process.communicate()
                    if process.returncode != 0:
                        raise RuntimeError(f"tracerpt failed with code {process.returncode}: {stderr}")
                    # Process completed but no output file
                    if not os.path.exists(output_file):
                        raise RuntimeError("tracerpt completed but produced no output file")
                    break
                
                time.sleep(check_interval)
            
            if os.path.exists(output_file):
                logger.info(f"CSV file created, starting to read records...")
                file_size = 0
                last_size_check = time.time()
                
                # Wait for file to have some content
                while os.path.getsize(output_file) == 0 and process.poll() is None:
                    time.sleep(0.5)
                
                # Stream the CSV file as it's being written
                with open(output_file, 'r', encoding='utf-8', errors='ignore') as f:
                    # Try to read header
                    header_line = f.readline()
                    if not header_line:
                        # Wait a bit for header to be written
                        time.sleep(1)
                        f.seek(0)
                        header_line = f.readline()
                    
                    if header_line:
                        # Read file incrementally instead of all at once
                        csv_reader = csv.DictReader(f, fieldnames=None)
                        csv_reader.fieldnames = next(csv.reader([header_line]))
                        
                        for row_num, row in enumerate(csv_reader):
                            # Log progress periodically
                            if row_num > 0 and row_num % 1000 == 0:
                                logger.info(f"Processed {row_num} records from CSV...")
                            
                            # Handle offset
                            if records_skipped < offset:
                                records_skipped += 1
                                continue
                            
                            # Convert and yield record
                            log_record = self._convert_csv_row(source, row)
                            if log_record:
                                yield log_record
                                records_yielded += 1
                                
                                # Check limit
                                if records_yielded >= limit:
                                    logger.info(f"Reached limit of {limit} records")
                                    # Terminate tracerpt if still running
                                    if process.poll() is None:
                                        logger.info("Terminating tracerpt as we have enough records")
                                        process.terminate()
                                    break
                            
                            # Check if process is still running periodically
                            if row_num % 100 == 0 and process.poll() is not None:
                                # Process ended, check if there was an error
                                if process.returncode != 0:
                                    logger.warning(f"tracerpt ended with code {process.returncode}")
            
            # Wait for process to complete if still running
            if process.poll() is None:
                remaining_time = max_wait_time - (time.time() - start_time)
                if remaining_time > 0:
                    logger.info(f"Waiting for tracerpt to complete (up to {remaining_time:.0f}s remaining)...")
                    try:
                        process.wait(timeout=remaining_time)
                        logger.info(f"tracerpt completed successfully after {time.time() - start_time:.0f}s")
                    except subprocess.TimeoutExpired:
                        logger.warning(f"tracerpt timed out after {max_wait_time}s, terminating...")
                        process.terminate()
                        process.wait(timeout=5)  # Give it 5 seconds to terminate
                else:
                    logger.warning("Maximum wait time exceeded, terminating tracerpt...")
                    process.terminate()
                    process.wait(timeout=5)
                    
        finally:
            # Clean up temp file
            if os.path.exists(output_file):
                try:
                    os.remove(output_file)
                except:
                    pass

    def _convert_csv_row(self, source: LogSource, row: Dict[str, str]) -> Optional[LogRecord]:
        """Convert a CSV row from tracerpt to a LogRecord.

        Args:
            source: The log source information.
            row: CSV row dictionary.

        Returns:
            LogRecord or None if conversion fails.
        """
        try:
            # Common tracerpt CSV columns
            record_data = {}
            
            # Map known columns
            field_mappings = {
                "Event Name": "event_name",
                "Type": "event_type",
                "Event ID": "event_id",
                "Version": "version",
                "Channel": "channel",
                "Level": "level",
                "Task": "task",
                "Opcode": "opcode",
                "Keyword": "keywords",
                "PID": "process_id",
                "TID": "thread_id",
                "Processor Number": "processor",
                "Provider Name": "provider_name",
                "Provider ID": "provider_id",
                "Message": "message",
                "Process Name": "process_name",
            }
            
            for csv_field, record_field in field_mappings.items():
                if csv_field in row and row[csv_field]:
                    record_data[record_field] = row[csv_field]
            
            # Try to parse timestamp
            timestamp = None
            if "Clock-Time" in row and row["Clock-Time"]:
                try:
                    # Handle different timestamp formats
                    for fmt in ["%Y-%m-%d %H:%M:%S.%f", "%Y-%m-%d %H:%M:%S", "%m/%d/%Y %H:%M:%S.%f"]:
                        try:
                            timestamp = datetime.strptime(row["Clock-Time"], fmt)
                            break
                        except:
                            continue
                except:
                    pass
            
            # Include any additional fields
            for key, value in row.items():
                if key not in field_mappings and value:
                    # Clean up field name
                    clean_key = key.lower().replace(' ', '_').replace('-', '_')
                    record_data[clean_key] = value
            
            return LogRecord(
                source_id=source.id,
                timestamp=timestamp,
                data=record_data,
                raw_content=None
            )
            
        except Exception as e:
            if self.config.get("verbose", False):
                logger.error(f"Failed to convert CSV row: {e}")
            return None

    def parse_file(
        self, source: LogSource, file_path: Union[str, Path]
    ) -> Iterator[LogRecord]:
        """Parse ETL log records from a file.

        Args:
            source: The log source information.
            file_path: Path to the ETL file.

        Yields:
            LogRecord objects parsed from the ETL file.
        """
        # Use streaming parser for all files
        yield from self.parse_file_streaming(source, file_path, limit=10000)

    def parse(
        self, path: str, filters: Optional[Dict[str, Any]] = None,
        start_time: Optional[datetime] = None, end_time: Optional[datetime] = None,
        limit: int = 1000, offset: int = 0
    ) -> List[LogRecord]:
        """Parse ETL file with filtering and pagination.

        Args:
            path: Path to the ETL file.
            filters: Optional filters to apply.
            start_time: Optional start time filter.
            end_time: Optional end time filter.
            limit: Maximum number of records to return.
            offset: Number of records to skip.

        Returns:
            List of LogRecord objects.
        """
        # Create a temporary log source for parsing
        temp_source = LogSource(
            name="temp_etl",
            type=LogType.ETL,
            path=path,
            metadata={}
        )

        records = []
        
        # Use streaming parser
        for record in self.parse_file_streaming(temp_source, path, limit=limit + offset):
            # Apply time filters
            if start_time and record.timestamp and record.timestamp < start_time:
                continue
            if end_time and record.timestamp and record.timestamp > end_time:
                continue
                
            # Apply custom filters
            if filters:
                if not self._match_filters(record, filters):
                    continue
            
            records.append(record)
            
            if len(records) >= limit + offset:
                break
        
        # Apply offset by slicing
        if offset > 0:
            return records[offset:offset + limit]
        else:
            return records[:limit]

    def _match_filters(self, record: LogRecord, filters: Dict[str, Any]) -> bool:
        """Check if a record matches the provided filters.

        Args:
            record: The log record to check.
            filters: Dictionary of filters to apply.

        Returns:
            True if record matches all filters.
        """
        for key, value in filters.items():
            record_value = record.data.get(key)
            
            if isinstance(value, list):
                if record_value not in value:
                    return False
            else:
                if record_value != value:
                    return False
                    
        return True

    def parse_content(self, source: LogSource, content: str) -> Iterator[LogRecord]:
        """Parse ETL log records from content string.

        Note: ETL files are binary and cannot be parsed from string content.

        Args:
            source: The log source information.
            content: String content (not supported for ETL).

        Raises:
            NotImplementedError: ETL files must be parsed from file.
        """
        raise NotImplementedError(
            "ETL files are binary and must be parsed from file, not string content"
        )

    def validate_file(self, file_path: Union[str, Path]) -> bool:
        """Validate if the file can be parsed by this parser.

        Args:
            file_path: Path to validate.

        Returns:
            True if file appears to be an ETL file.
        """
        path = Path(file_path)
        
        # Check file extension
        if not str(path).lower().endswith('.etl'):
            return False
            
        # Check if file exists and is readable
        if not path.exists() or not path.is_file():
            return False
            
        # Check if we have tracerpt available
        if not self.is_available():
            return False
            
        return True

    def __del__(self):
        """Cleanup temp directory on deletion."""
        if self.temp_dir and os.path.exists(self.temp_dir):
            try:
                import shutil
                shutil.rmtree(self.temp_dir)
            except:
                pass
```

--------------------------------------------------------------------------------
/src/mcp_log_analyzer/mcp_server/prompts/linux_testing_prompt.py:
--------------------------------------------------------------------------------

```python
"""
Linux system testing and diagnostics prompts for the MCP Log Analyzer server.
"""

from typing import Optional
from mcp.server import FastMCP


def register_linux_testing_prompts(mcp: FastMCP):
    """Register all Linux testing prompts."""

    @mcp.prompt(
        title="Linux Log Access Test",
        description="Guide for testing access to Linux log files and systemd journal"
    )
    async def linux_log_access_test() -> str:
        """
        Test access to common Linux log files and systemd journal.
        """
        return """
# 🔍 Linux Log Access Test

## Tool: test_linux_log_access

### Purpose
Tests access to common log files in /var/log/ and systemd journal accessibility.

### What It Tests
- **Traditional log files**: /var/log/syslog, messages, auth.log, kern.log
- **systemd journal**: Journal accessibility and permissions
- **System commands**: ss, netstat, systemctl availability
- **File permissions**: Read access and file sizes

### Usage
```
Tool: test_linux_log_access
```

### Interpreting Results
- **Accessible files**: Can be queried and analyzed
- **Permission denied**: Need elevated privileges or group membership
- **File not found**: May indicate different distribution or log rotation
- **Journal access**: Check systemd-journal group membership

### Next Steps
- Add user to systemd-journal group for journal access
- Use sudo for system log access if needed
- Check distribution-specific log locations
"""

    @mcp.prompt(
        title="systemd Journal Query Guide",
        description="How to query and filter systemd journal logs"
    )
    async def systemd_journal_query(
        priority: Optional[str] = None
    ) -> str:
        """
        Guide for querying systemd journal.
        
        Args:
            priority: Optional priority level filter (err, warning, info, etc.)
        """
        
        base_guide = """
# 📊 systemd Journal Query Guide

## Tool: query_systemd_journal

### Purpose
Query systemd journal with specific filters for targeted log analysis.

### Parameters
- **service_name**: Specific service to query (e.g., "nginx", "sshd")
- **priority**: Log level (emerg, alert, crit, err, warning, notice, info, debug)
- **time_duration**: Time range ("30m", "2h", "1d", etc.)
- **max_lines**: Result limit (default: 100)

### Usage Examples
```
# Service errors
Tool: query_systemd_journal
Parameters: service_name="nginx", priority="err", time_duration="1h"

# Recent critical events
Tool: query_systemd_journal
Parameters: priority="crit", time_duration="24h"

# SSH authentication logs
Tool: query_systemd_journal
Parameters: service_name="sshd", time_duration="6h", max_lines=200
```
"""

        priority_guide = {
            "err": """
### Error Priority Focus
- **Use for**: Service failures, application errors
- **Common patterns**: "Failed to start", "Main process exited"
- **Time range**: Start with 1-6 hours, expand if needed
""",
            "warning": """
### Warning Priority Focus
- **Use for**: Performance issues, non-critical failures
- **Common patterns**: Resource warnings, configuration issues
- **Time range**: 6-24 hours typically sufficient
""",
            "info": """
### Info Priority Focus
- **Use for**: Normal operations, service status
- **Common patterns**: "Started", "Stopped", "Reloading"
- **Time range**: Keep short (1-2 hours) due to volume
"""
        }

        if priority and priority.lower() in priority_guide:
            base_guide += priority_guide[priority.lower()]

        base_guide += """
### Priority Levels Reference
- **emerg (0)**: System unusable
- **alert (1)**: Immediate action required
- **crit (2)**: Critical conditions
- **err (3)**: Error conditions
- **warning (4)**: Warning conditions
- **notice (5)**: Normal but significant
- **info (6)**: Informational messages
- **debug (7)**: Debug-level messages
"""
        
        return base_guide

    @mcp.prompt(
        title="Linux Service Analysis",
        description="Guide for analyzing Linux service status and health"
    )
    async def linux_service_analysis() -> str:
        """
        Analyze Linux services using systemd tools.
        """
        return """
# 🛠️ Linux Service Analysis Guide

## Tool: analyze_linux_services

### Purpose
Lists and analyzes systemd services, identifying failed services and recent issues.

### Parameters
- **service_pattern**: Filter services by name pattern (optional)
- **include_failed**: Include failed services in analysis (default: true)

### Usage Examples
```
# Check all failed services
Tool: analyze_linux_services
Parameters: include_failed=true

# Analyze web services
Tool: analyze_linux_services
Parameters: service_pattern="nginx|apache"

# Check database services
Tool: analyze_linux_services
Parameters: service_pattern="mysql|postgres|mongodb"
```

### What It Provides
✅ Active and failed service lists
✅ Recent logs for failed services
✅ Service health assessment
✅ Service dependencies and issues

### Common Service States
- **active (running)**: Service operating normally
- **active (exited)**: One-shot service completed
- **inactive (dead)**: Service not running
- **failed**: Service failed to start or crashed

### Troubleshooting Failed Services
1. Check service logs for error details
2. Verify configuration files
3. Check service dependencies
4. Review system resources
5. Test service restart capability
"""

    @mcp.prompt(
        title="Linux System Overview",
        description="Get comprehensive Linux system health information"
    )
    async def linux_system_overview() -> str:
        """
        Guide for getting Linux system overview.
        """
        return """
# 🖥️ Linux System Overview

## Tool: get_linux_system_overview

### Purpose
Provides comprehensive system information and health status for Linux systems.

### Usage
```
Tool: get_linux_system_overview
```

### Information Provided
- **System Info**: Hostname, uptime, kernel version
- **Distribution**: OS name and version
- **Resources**: CPU, memory, disk usage
- **Critical Errors**: Recent error logs
- **Service Status**: Failed service count

### Health Indicators
✅ **Healthy**: No failed services, low resource usage
⚠️ **Warning**: 1-2 failed services, moderate resource usage
❌ **Critical**: Multiple failures, high resource usage

### Follow-up Actions
Based on the overview:
1. Investigate failed services with analyze_linux_services
2. Check specific errors with query_systemd_journal
3. Monitor resource usage trends
4. Plan maintenance if needed
"""

    @mcp.prompt(
        title="Linux Boot Troubleshooting",
        description="Diagnose Linux boot and startup issues"
    )
    async def linux_boot_troubleshooting() -> str:
        """
        Guide for troubleshooting Linux boot issues.
        """
        return """
# 🚀 Linux Boot Troubleshooting

## Diagnosing Boot Issues

### Step 1: Check Boot Messages
```
Tool: query_systemd_journal
Parameters: time_duration="2h", max_lines=500
```
Look for systemd and kernel messages during boot.

### Step 2: Identify Failed Services
```
Tool: analyze_linux_services
Parameters: include_failed=true
```
Services that fail during boot often indicate issues.

### Step 3: Check Critical Errors
```
Tool: query_systemd_journal
Parameters: priority="err", time_duration="1h"
```
Focus on error messages from boot time.

### Common Boot Problems

#### Service Dependency Failures
- **Symptom**: "Dependency failed for..."
- **Check**: Service order and requirements
- **Fix**: Resolve dependent service issues first

#### Hardware Initialization
- **Symptom**: Kernel errors, driver failures
- **Check**: dmesg output, kernel logs
- **Fix**: Update drivers, check hardware

#### Filesystem Issues
- **Symptom**: Mount failures, read-only root
- **Check**: fstab entries, disk errors
- **Fix**: fsck, correct mount options

### Boot Performance
- Use `systemd-analyze` for boot timing
- Check for slow services
- Optimize service startup order
- Disable unnecessary boot services
"""

    @mcp.prompt(
        title="Linux Security Monitoring",
        description="Monitor authentication and security events on Linux"
    )
    async def linux_security_monitoring() -> str:
        """
        Guide for monitoring Linux security events.
        """
        return """
# 🔒 Linux Security Monitoring

## Authentication Monitoring

### Step 1: SSH Login Activity
```
Tool: query_systemd_journal
Parameters: service_name="sshd", time_duration="24h"
```

### Step 2: Failed Authentication
```
Tool: query_systemd_journal
Parameters: priority="warning", time_duration="6h"
```
Look for "Failed password" or "authentication failure".

### Step 3: Sudo Usage
```
Tool: query_systemd_journal
Parameters: service_name="sudo", time_duration="24h"
```

## Security Patterns to Watch

### Failed Login Attempts
- Multiple failures from same IP
- Attempts on non-existent users
- Rapid retry patterns
- Unusual login times

### Privilege Escalation
- sudo usage by new users
- Unexpected root processes
- Service account activities
- Permission changes

### System Modifications
- Package installations
- Configuration changes
- New user accounts
- Service modifications

## Security Event Examples
- **"Failed password for"**: SSH authentication failure
- **"Accepted publickey"**: Successful SSH key auth
- **"session opened for user root"**: Root access
- **"COMMAND="**: Sudo command execution
"""

    @mcp.prompt(
        title="Linux Performance Issues",
        description="Diagnose Linux system performance problems"
    )
    async def linux_performance_issues() -> str:
        """
        Guide for diagnosing Linux performance issues.
        """
        return """
# 📊 Linux Performance Issues

## Investigating Performance Problems

### Step 1: Check System Warnings
```
Tool: query_systemd_journal
Parameters: priority="warning", time_duration="6h"
```
Look for resource-related warnings.

### Step 2: Memory Issues
Search for OOM (Out of Memory) events:
```
Tool: query_systemd_journal
Parameters: time_duration="24h", max_lines=200
```
Look for "Out of memory" or "killed process".

### Step 3: Disk Space Problems
```
Tool: query_systemd_journal
Parameters: priority="err", time_duration="12h"
```
Search for "No space left on device".

### Step 4: Service Performance
```
Tool: analyze_linux_services
Parameters: include_failed=true
```
Check for services with performance issues.

## Common Performance Issues

### High Memory Usage
- **Symptoms**: OOM killer activations
- **Investigation**: Check process memory usage
- **Solutions**: Add swap, optimize applications

### Disk I/O Bottlenecks
- **Symptoms**: Slow response, high wait times
- **Investigation**: iostat, iotop results
- **Solutions**: Optimize I/O patterns, upgrade storage

### CPU Saturation
- **Symptoms**: High load average, slow processing
- **Investigation**: Check CPU-intensive processes
- **Solutions**: Optimize code, add CPU resources

### Network Issues
- **Symptoms**: Connection timeouts, packet loss
- **Investigation**: Network service logs
- **Solutions**: Check bandwidth, optimize network
"""

    @mcp.prompt(
        title="Linux Service Management",
        description="Managing and troubleshooting specific Linux services"
    )
    async def linux_service_management(
        service_type: Optional[str] = None
    ) -> str:
        """
        Guide for managing specific Linux services.
        
        Args:
            service_type: Type of service (web, database, system, etc.)
        """
        
        base_guide = """
# 🛠️ Linux Service Management

## Managing systemd Services

### Check Service Status
```
Tool: analyze_linux_services
Parameters: service_pattern="service-name"
```

### View Service Logs
```
Tool: query_systemd_journal
Parameters: service_name="service-name", time_duration="1h"
```

### Common Service Operations
- **Start**: systemctl start service-name
- **Stop**: systemctl stop service-name
- **Restart**: systemctl restart service-name
- **Enable**: systemctl enable service-name
- **Status**: systemctl status service-name
"""

        service_guides = {
            "web": """
## Web Server Services

### Nginx
```
Tool: query_systemd_journal
Parameters: service_name="nginx", priority="err"
```
Common issues: Port conflicts, configuration errors

### Apache
```
Tool: query_systemd_journal
Parameters: service_name="apache2", priority="err"
```
Common issues: Module conflicts, .htaccess errors
""",
            "database": """
## Database Services

### MySQL/MariaDB
```
Tool: query_systemd_journal
Parameters: service_name="mysql", priority="err"
```
Common issues: Connection limits, disk space

### PostgreSQL
```
Tool: query_systemd_journal
Parameters: service_name="postgresql", priority="err"
```
Common issues: Shared memory, connection pooling
""",
            "system": """
## System Services

### SSH
```
Tool: query_systemd_journal
Parameters: service_name="sshd", time_duration="6h"
```
Monitor: Authentication attempts, configuration

### Cron
```
Tool: query_systemd_journal
Parameters: service_name="cron", time_duration="24h"
```
Monitor: Job execution, failures
"""
        }

        if service_type and service_type.lower() in service_guides:
            base_guide += service_guides[service_type.lower()]

        base_guide += """
## Service Troubleshooting Steps
1. Check service status and recent logs
2. Verify configuration files
3. Check service dependencies
4. Review resource availability
5. Test service functionality
6. Monitor after restart
"""
        
        return base_guide

    @mcp.prompt(
        title="Linux Log Patterns Reference",
        description="Common Linux log patterns and their meanings"
    )
    async def linux_log_patterns() -> str:
        """
        Reference guide for common Linux log patterns.
        """
        return """
# 📖 Linux Log Patterns Reference

## Service Management Patterns

### Successful Operations
- **"Started [Service]"**: Service startup success
- **"Reloaded [Service]"**: Configuration reload
- **"Listening on"**: Service accepting connections
- **"Reached target"**: systemd target achieved

### Service Failures
- **"Failed to start"**: Startup failure
- **"Main process exited"**: Service crash
- **"Dependency failed"**: Required service unavailable
- **"Timed out"**: Service startup timeout
- **"code=exited, status=1"**: Exit with error

## Security Patterns

### Authentication
- **"Failed password for"**: Login failure
- **"Accepted publickey"**: SSH key success
- **"session opened"**: User session start
- **"session closed"**: User session end
- **"COMMAND="**: Sudo command execution

### Security Events
- **"authentication failure"**: PAM auth fail
- **"Connection closed by"**: Dropped connection
- **"Invalid user"**: Non-existent user login
- **"Connection reset"**: Network interruption

## System Events

### Boot/Shutdown
- **"Booting Linux"**: Kernel boot start
- **"Started Session"**: User session start
- **"Reached target Multi-User"**: Boot complete
- **"Stopped target"**: Shutdown initiated

### Resource Issues
- **"Out of memory"**: OOM killer activated
- **"No space left"**: Disk full
- **"Too many open files"**: File descriptor limit
- **"Cannot allocate memory"**: Memory exhaustion

## Network Patterns

### Connection Events
- **"link is up"**: Network interface active
- **"link is down"**: Network interface inactive
- **"DHCPREQUEST"**: IP address request
- **"DHCPACK"**: IP address assigned

### Network Errors
- **"Name or service not known"**: DNS failure
- **"Connection refused"**: Service not listening
- **"Network is unreachable"**: Routing issue
- **"Connection timed out"**: No response

## Performance Indicators

### Warning Signs
- **"took too long"**: Slow operation
- **"degraded"**: Performance issue
- **"high load"**: System overload
- **"throttling"**: Rate limiting active

### Critical Issues
- **"segfault"**: Memory violation
- **"core dumped"**: Process crash
- **"kernel panic"**: System crash
- **"hung task"**: Process stuck
"""

    @mcp.prompt(
        title="Linux Distribution Differences",
        description="Guide for log locations across different Linux distributions"
    )
    async def linux_distribution_guide() -> str:
        """
        Guide for handling distribution-specific differences.
        """
        return """
# 🐧 Linux Distribution Differences

## Log File Locations

### Debian/Ubuntu
- **System logs**: /var/log/syslog
- **Auth logs**: /var/log/auth.log
- **Kernel**: /var/log/kern.log
- **Package manager**: /var/log/dpkg.log

### RHEL/CentOS/Fedora
- **System logs**: /var/log/messages
- **Auth logs**: /var/log/secure
- **Kernel**: /var/log/messages
- **Package manager**: /var/log/yum.log

### Arch Linux
- **Primary logging**: systemd journal only
- **Persistent logs**: /var/log/journal/
- **Package manager**: /var/log/pacman.log

### SUSE
- **System logs**: /var/log/messages
- **Auth logs**: /var/log/messages
- **Package manager**: /var/log/zypper.log

## systemd Adoption

### Full systemd
- Ubuntu 16.04+
- Debian 8+
- RHEL/CentOS 7+
- Fedora 15+
- Arch Linux
- openSUSE

### SysV Init or Other
- Older distributions
- Some embedded systems
- Specialized distributions

## Best Practices
1. Check for systemd first (systemctl available)
2. Fall back to traditional logs if needed
3. Use distribution detection for paths
4. Handle both logging systems when possible
"""

    @mcp.prompt(
        title="Linux Emergency Diagnostics",
        description="Quick diagnostics for Linux system emergencies"
    )
    async def linux_emergency_diagnostics() -> str:
        """
        Emergency diagnostic procedures for critical Linux issues.
        """
        return """
# 🚨 Linux Emergency Diagnostics

## Critical System Failure

### Phase 1: Initial Assessment (< 2 minutes)
```
Tool: get_linux_system_overview
```
Get immediate system status.

### Phase 2: Service Status (2-5 minutes)
```
Tool: analyze_linux_services
Parameters: include_failed=true
```
Identify all failed services.

### Phase 3: Recent Errors (5-10 minutes)
```
Tool: query_systemd_journal
Parameters: priority="err", time_duration="1h"
```
Find recent critical errors.

## Emergency Scenarios

### System Won't Boot
1. Check journal from rescue mode
2. Look for kernel panic messages
3. Verify filesystem integrity
4. Check hardware initialization

### All Services Failing
1. Check system resources (disk, memory)
2. Verify systemd functionality
3. Check for dependency loops
4. Review recent system changes

### Performance Crisis
1. Check for OOM killer activity
2. Look for disk full errors
3. Monitor CPU/memory usage
4. Identify resource hogs

### Security Breach
1. Check authentication logs immediately
2. Look for privilege escalations
3. Monitor network connections
4. Review system modifications

## Recovery Actions

### Service Recovery
- Restart failed services systematically
- Check service dependencies first
- Monitor logs during restart
- Verify functionality after start

### Resource Recovery
- Free disk space (logs, temp files)
- Kill memory-intensive processes
- Clear system caches if needed
- Add swap space temporarily

### Access Recovery
- Reset service configurations
- Restore from known-good backups
- Check file permissions
- Verify network connectivity

## Critical Commands Reference
- **Journal since boot**: journalctl -b
- **Follow live logs**: journalctl -f
- **System status**: systemctl status
- **Failed services**: systemctl --failed
- **Resource usage**: top, htop, free, df
"""
```

--------------------------------------------------------------------------------
/src/mcp_log_analyzer/mcp_server/tools/windows_test_tools.py:
--------------------------------------------------------------------------------

```python
"""
Windows Event Log testing and diagnostic MCP tools.
"""

from typing import Any, Dict

from mcp.server import FastMCP
from pydantic import BaseModel, Field


class WindowsEventLogTestRequest(BaseModel):
    """Request model for testing Windows Event Log access."""

    log_name: str = Field(
        "System", description="Event log name to test (System, Application, Security)"
    )
    max_entries: int = Field(10, description="Maximum number of entries to fetch")


class WindowsEventLogQueryRequest(BaseModel):
    """Request model for querying Windows Event Logs."""

    log_name: str = Field("System", description="Event log name to query")
    event_id: int = Field(None, description="Specific Event ID to filter by")
    level: str = Field(None, description="Event level (Error, Warning, Information)")
    time_duration: str = Field(
        "1h", description="Time duration (e.g., '30m', '2h', '1d')"
    )
    max_entries: int = Field(50, description="Maximum number of entries to return")


def register_windows_test_tools(mcp: FastMCP):
    """Register all Windows testing tools with the MCP server."""

    @mcp.tool()
    async def test_windows_event_log_access() -> Dict[str, Any]:
        """
        Test Windows Event Log access and permissions.

        This tool checks if the system can access Windows Event Logs
        and provides diagnostic information about available logs.
        """
        import platform

        if platform.system() != "Windows":
            return {
                "status": "unavailable",
                "message": "Windows Event Logs are only available on Windows systems",
                "platform": platform.system(),
            }

        try:
            import win32evtlog

            # Test access to common event logs
            test_results = {}
            common_logs = ["System", "Application", "Security"]

            for log_name in common_logs:
                try:
                    hand = win32evtlog.OpenEventLog(None, log_name)
                    win32evtlog.CloseEventLog(hand)
                    test_results[log_name] = {"accessible": True, "error": None}
                except Exception as e:
                    test_results[log_name] = {"accessible": False, "error": str(e)}

            return {
                "status": "available",
                "message": "Windows Event Log access test completed",
                "log_access": test_results,
                "pywin32_available": True,
            }

        except ImportError:
            return {
                "status": "missing_dependencies",
                "message": "pywin32 package is required for Windows Event Log access",
                "pywin32_available": False,
            }
        except Exception as e:
            return {
                "status": "error",
                "message": f"Error testing Windows Event Log access: {str(e)}",
            }

    @mcp.tool()
    async def get_windows_event_log_info(
        request: WindowsEventLogTestRequest,
    ) -> Dict[str, Any]:
        """
        Get detailed information about a specific Windows Event Log.

        This tool provides metadata and recent entries from the specified
        Windows Event Log for diagnostic purposes.
        """
        import platform

        if platform.system() != "Windows":
            return {"error": "This tool is only available on Windows systems"}

        try:
            import win32evtlog
            import win32evtlogutil
            from win32con import EVENTLOG_BACKWARDS_READ, EVENTLOG_SEQUENTIAL_READ

            hand = win32evtlog.OpenEventLog(None, request.log_name)

            # Get log information
            try:
                num_records = win32evtlog.GetNumberOfEventLogRecords(hand)
                oldest_record = win32evtlog.GetOldestEventLogRecord(hand)
                info = (oldest_record, num_records)
            except:
                info = None

            # Get recent entries
            flags = EVENTLOG_BACKWARDS_READ | EVENTLOG_SEQUENTIAL_READ

            entries = []
            count = 0

            while count < request.max_entries:
                events = win32evtlog.ReadEventLog(hand, flags, 0)
                if not events:
                    break  # No more events to read

                for event in events:
                    if count >= request.max_entries:
                        break

                    try:
                        message = win32evtlogutil.SafeFormatMessage(
                            event, request.log_name
                        )
                    except:
                        message = "(Unable to format message)"

                    entries.append(
                        {
                            "event_id": event.EventID
                            & 0xFFFFFFFF,  # Convert to unsigned
                            "time_generated": str(event.TimeGenerated),
                            "source_name": event.SourceName,
                            "event_type": event.EventType,
                            "message_preview": message[:200] if message else "",
                        }
                    )
                    count += 1

            win32evtlog.CloseEventLog(hand)

            return {
                "log_name": request.log_name,
                "log_info": {
                    "oldest_record_number": info[0] if info else "Unknown",
                    "total_records": info[1] if info else "Unknown",
                },
                "recent_entries": entries,
                "entries_retrieved": len(entries),
                "max_requested": request.max_entries,
            }

        except ImportError:
            return {"error": "pywin32 package is required for Windows Event Log access"}
        except Exception as e:
            return {"error": f"Error accessing Windows Event Log: {str(e)}"}

    @mcp.tool()
    async def query_windows_events_by_criteria(
        request: WindowsEventLogQueryRequest,
    ) -> Dict[str, Any]:
        """
        Query Windows Event Logs with specific criteria.

        This tool allows filtering Windows Event Logs by Event ID,
        level, and time range for targeted analysis.
        """
        import platform

        if platform.system() != "Windows":
            return {"error": "This tool is only available on Windows systems"}

        try:
            import win32evtlog
            import win32evtlogutil
            from win32con import EVENTLOG_BACKWARDS_READ, EVENTLOG_SEQUENTIAL_READ
            import xml.etree.ElementTree as ET
            from datetime import datetime

            from ..server import parse_time_param

            # Parse time duration
            if request.time_duration:
                start_time = parse_time_param(request.time_duration)
            else:
                start_time = None

            matching_events = []
            count = 0
            total_checked = 0
            level_map = {1: "Error", 2: "Warning", 4: "Information"}

            # Check if this is a custom Application and Services log
            if "/" in request.log_name or "\\" in request.log_name:
                # Use newer EvtQuery API for custom logs
                try:
                    query_flags = (
                        win32evtlog.EvtQueryChannelPath
                        | win32evtlog.EvtQueryReverseDirection
                    )

                    # Build XPath query
                    conditions = []
                    if start_time:
                        start_ms = int(start_time.timestamp() * 1000)
                        conditions.append(f"TimeCreated[@SystemTime >= '{start_ms}']")
                    if request.event_id:
                        conditions.append(f"EventID={request.event_id}")
                    if request.level:
                        level_num = {"error": 2, "warning": 3, "information": 4}.get(
                            request.level.lower(), 0
                        )
                        if level_num:
                            conditions.append(f"Level={level_num}")

                    xpath_query = "*"
                    if conditions:
                        xpath_query = f"*[System[{' and '.join(conditions)}]]"

                    query_handle = win32evtlog.EvtQuery(
                        request.log_name, query_flags, xpath_query
                    )

                    while count < request.max_entries:
                        events = win32evtlog.EvtNext(query_handle, 10)
                        if not events:
                            break

                        for event in events:
                            total_checked += 1

                            # Render event as XML
                            xml_content = win32evtlog.EvtRender(
                                event, win32evtlog.EvtRenderEventXml
                            )

                            # Parse XML to extract event data
                            root = ET.fromstring(xml_content)
                            system = root.find(".//System")

                            event_id = (
                                int(system.find("EventID").text)
                                if system.find("EventID") is not None
                                else 0
                            )
                            event_id = event_id & 0xFFFFFFFF

                            provider = system.find("Provider")
                            source_name = (
                                provider.get("Name", "Unknown")
                                if provider is not None
                                else "Unknown"
                            )

                            time_created = system.find("TimeCreated")
                            if time_created is not None:
                                time_str = time_created.get(
                                    "SystemTime", str(datetime.now())
                                )
                            else:
                                time_str = str(datetime.now())

                            level = system.find("Level")
                            event_type = int(level.text) if level is not None else 4

                            # Extract message
                            message = ""
                            event_data = root.find(".//EventData")
                            if event_data is not None:
                                data_items = []
                                for data in event_data:
                                    name = data.get("Name", "")
                                    value = data.text or ""
                                    if name:
                                        data_items.append(f"{name}: {value}")
                                message = "; ".join(data_items)

                            matching_events.append(
                                {
                                    "event_id": event_id,
                                    "time_generated": time_str,
                                    "source_name": source_name,
                                    "event_type": event_type,
                                    "level": level_map.get(event_type, "Unknown"),
                                    "message": message[:500] if message else "",
                                }
                            )

                            count += 1
                            win32evtlog.EvtClose(event)

                            if count >= request.max_entries:
                                break

                    win32evtlog.EvtClose(query_handle)

                except Exception as e:
                    return {"error": f"Error querying custom event log: {str(e)}"}
            else:
                # Use legacy API for standard logs
                hand = win32evtlog.OpenEventLog(None, request.log_name)
                flags = EVENTLOG_BACKWARDS_READ | EVENTLOG_SEQUENTIAL_READ

                # Continue reading until we have enough matching events or no more events
                while count < request.max_entries:
                    events = win32evtlog.ReadEventLog(hand, flags, 0)
                    if not events:
                        break  # No more events to read

                    for event in events:
                        total_checked += 1

                        # Check time filter
                        if start_time and event.TimeGenerated < start_time:
                            continue

                        # Check Event ID filter
                        # Handle both signed and unsigned Event ID comparisons
                        if request.event_id:
                            # Convert to unsigned 32-bit for comparison
                            event_id_unsigned = event.EventID & 0xFFFFFFFF
                            if (
                                event_id_unsigned != request.event_id
                                and event.EventID != request.event_id
                            ):
                                continue

                        # Check level filter (simplified mapping)
                        if request.level:
                            event_level = level_map.get(event.EventType, "Unknown")
                            if event_level.lower() != request.level.lower():
                                continue

                        # Event matches all criteria
                        try:
                            message = win32evtlogutil.SafeFormatMessage(
                                event, request.log_name
                            )
                        except:
                            message = "(Unable to format message)"

                        matching_events.append(
                            {
                                "event_id": event.EventID
                                & 0xFFFFFFFF,  # Convert to unsigned
                                "time_generated": str(event.TimeGenerated),
                                "source_name": event.SourceName,
                                "event_type": event.EventType,
                                "level": level_map.get(event.EventType, "Unknown"),
                                "message": message[:500] if message else "",
                            }
                        )

                        count += 1
                        if count >= request.max_entries:
                            break

                win32evtlog.CloseEventLog(hand)

            return {
                "log_name": request.log_name,
                "query_criteria": {
                    "event_id": request.event_id,
                    "level": request.level,
                    "time_duration": request.time_duration,
                    "start_time": str(start_time) if start_time else None,
                },
                "matching_events": matching_events,
                "total_matches": len(matching_events),
                "total_events_checked": total_checked,
                "max_requested": request.max_entries,
            }

        except ImportError:
            return {"error": "pywin32 package is required for Windows Event Log access"}
        except Exception as e:
            return {"error": f"Error querying Windows Event Logs: {str(e)}"}

    @mcp.tool()
    async def get_windows_system_health() -> Dict[str, Any]:
        """
        Get Windows system health overview from Event Logs.

        This tool analyzes recent System and Application event logs
        to provide a quick health assessment of the Windows system.
        """
        import platform

        if platform.system() != "Windows":
            return {"error": "This tool is only available on Windows systems"}

        try:
            from datetime import datetime, timedelta

            import win32evtlog
            import win32evtlogutil
            from win32con import EVENTLOG_BACKWARDS_READ, EVENTLOG_SEQUENTIAL_READ

            # Check last 24 hours
            start_time = datetime.now() - timedelta(hours=24)

            health_summary = {"errors": 0, "warnings": 0, "critical_events": []}

            for log_name in ["System", "Application"]:
                try:
                    hand = win32evtlog.OpenEventLog(None, log_name)
                    flags = EVENTLOG_BACKWARDS_READ | EVENTLOG_SEQUENTIAL_READ

                    log_errors = 0
                    log_warnings = 0
                    done_reading = False

                    while not done_reading:
                        events = win32evtlog.ReadEventLog(hand, flags, 0)
                        if not events:
                            break  # No more events to read

                        for event in events:
                            if event.TimeGenerated < start_time:
                                done_reading = True
                                break

                            if event.EventType == 1:  # Error
                                log_errors += 1
                                if log_errors <= 5:  # Capture first 5 errors
                                    try:
                                        message = win32evtlogutil.SafeFormatMessage(
                                            event, log_name
                                        )
                                    except:
                                        message = "Unable to format message"

                                    health_summary["critical_events"].append(
                                        {
                                            "log": log_name,
                                            "type": "Error",
                                            "event_id": event.EventID
                                            & 0xFFFFFFFF,  # Convert to unsigned
                                            "source": event.SourceName,
                                            "time": str(event.TimeGenerated),
                                            "message": message[:200],
                                        }
                                    )

                            elif event.EventType == 2:  # Warning
                                log_warnings += 1

                    health_summary["errors"] += log_errors
                    health_summary["warnings"] += log_warnings

                    win32evtlog.CloseEventLog(hand)

                except Exception as e:
                    health_summary[f"{log_name}_error"] = str(e)

            # Determine overall health status
            if health_summary["errors"] == 0 and health_summary["warnings"] < 5:
                status = "healthy"
            elif health_summary["errors"] < 3 and health_summary["warnings"] < 20:
                status = "fair"
            else:
                status = "concerning"

            return {
                "time_period": "Last 24 hours",
                "overall_status": status,
                "summary": {
                    "total_errors": health_summary["errors"],
                    "total_warnings": health_summary["warnings"],
                },
                "critical_events": health_summary["critical_events"],
                "timestamp": str(datetime.now()),
            }

        except ImportError:
            return {"error": "pywin32 package is required for Windows Event Log access"}
        except Exception as e:
            return {"error": f"Error analyzing Windows system health: {str(e)}"}

```

--------------------------------------------------------------------------------
/src/mcp_log_analyzer/mcp_server/prompts/process_monitoring_prompt.py:
--------------------------------------------------------------------------------

```python
"""
Process monitoring and system resource prompts for the MCP Log Analyzer server.
"""

from typing import Optional
from mcp.server import FastMCP


def register_process_monitoring_prompts(mcp: FastMCP):
    """Register all process monitoring prompts."""

    @mcp.prompt(
        title="Test System Resources",
        description="Guide for testing system resource monitoring capabilities"
    )
    async def test_system_resources() -> str:
        """
        Test system resource monitoring access and capabilities.
        """
        return """
# 🔍 Test System Resources

## Tool: test_system_resources_access

### Purpose
Tests CPU, memory, disk, network, and process monitoring capabilities.

### What It Tests
- **CPU**: Core count, usage, frequency
- **Memory**: Total, available, usage percentage
- **Disk**: Space usage, I/O counters
- **Network**: Traffic statistics, connections
- **Processes**: Enumeration and basic info

### Usage
```
Tool: test_system_resources_access
```

### Interpreting Results
- **psutil version**: Library version for compatibility
- **Accessible resources**: What can be monitored
- **Current values**: Baseline measurements
- **Platform notes**: OS-specific capabilities

### Next Steps
- Use specialized monitoring tools for details
- Set up regular monitoring if needed
- Note any access limitations
- Plan resource tracking strategy
"""

    @mcp.prompt(
        title="System Performance Analysis",
        description="Guide for analyzing current system performance"
    )
    async def system_performance_guide() -> str:
        """
        Analyze system performance and resource usage.
        """
        return """
# 📊 System Performance Analysis

## Tool: analyze_system_performance

### Purpose
Provides comprehensive analysis of system performance metrics.

### Parameters
- **include_network**: Include network statistics (default: true)
- **include_disk**: Include disk I/O statistics (default: true)
- **sample_interval**: Sampling interval in seconds (default: 1.0)

### Usage Examples
```
# Full system analysis
Tool: analyze_system_performance

# CPU and memory only (faster)
Tool: analyze_system_performance
Parameters: include_network=false, include_disk=false

# Extended sampling
Tool: analyze_system_performance
Parameters: sample_interval=5.0
```

### Performance Metrics

#### CPU Metrics
- **Usage percent**: Current utilization
- **Core count**: Physical and logical cores
- **Frequency**: Current/min/max MHz
- **Load average**: 1/5/15 minute (Unix)

#### Memory Metrics
- **Virtual memory**: Physical RAM usage
- **Swap memory**: Virtual memory usage
- **Available**: Memory for new processes
- **Percent used**: Overall utilization

#### Disk Metrics
- **Usage**: Total/used/free space
- **I/O counters**: Read/write operations
- **Throughput**: Bytes read/written

#### Network Metrics
- **Traffic**: Bytes sent/received
- **Packets**: Count sent/received
- **Connections**: Active connection count

### Health Status
- **Good**: Normal resource usage
- **Fair**: Elevated but manageable
- **Concerning**: Action needed
"""

    @mcp.prompt(
        title="Find Resource-Intensive Processes",
        description="Guide for identifying high CPU and memory consumers"
    )
    async def resource_intensive_processes(
        sort_by: Optional[str] = None
    ) -> str:
        """
        Find processes consuming significant resources.
        
        Args:
            sort_by: Sort criteria (cpu, memory, pid)
        """
        
        base_guide = """
# 🔍 Find Resource-Intensive Processes

## Tool: find_resource_intensive_processes

### Purpose
Identifies processes with high CPU or memory usage for troubleshooting.

### Parameters
- **process_name**: Filter by name (optional)
- **min_cpu_percent**: CPU threshold (default: 0.0)
- **min_memory_percent**: Memory threshold (default: 0.0)
- **max_results**: Result limit (default: 20)
- **sort_by**: Sort order (cpu/memory/pid)

### Usage Examples
```
# Top CPU consumers
Tool: find_resource_intensive_processes
Parameters: min_cpu_percent=10, sort_by="cpu"

# Memory hogs (>5% memory)
Tool: find_resource_intensive_processes
Parameters: min_memory_percent=5, sort_by="memory"

# Specific process analysis
Tool: find_resource_intensive_processes
Parameters: process_name="python", sort_by="memory"
```
"""

        sort_guides = {
            "cpu": """
### Sorting by CPU
Best for identifying:
- Runaway processes
- CPU-intensive tasks
- Performance bottlenecks
- Hung applications

Look for:
- Consistently high CPU usage
- Unexpected CPU consumers
- Multiple instances of same process
""",
            "memory": """
### Sorting by Memory
Best for identifying:
- Memory leaks
- Resource-heavy applications
- Cache/buffer usage
- Potential OOM risks

Look for:
- Growing memory usage
- Disproportionate memory use
- Zombie processes
""",
            "pid": """
### Sorting by PID
Best for:
- Chronological process view
- Parent-child relationships
- System vs user processes
- Process lifecycle analysis

Lower PIDs are typically system processes.
"""
        }

        if sort_by and sort_by.lower() in sort_guides:
            base_guide += sort_guides[sort_by.lower()]

        base_guide += """
### Process Information Provided
- **PID**: Process identifier
- **Name**: Process name
- **CPU %**: Current CPU usage
- **Memory %**: Memory usage percentage
- **Memory MB**: Actual memory in MB
- **Status**: Running/sleeping/zombie
- **Command**: Full command line
- **Create time**: Process start time
"""
        
        return base_guide

    @mcp.prompt(
        title="Monitor Process Health",
        description="Guide for monitoring specific process health and status"
    )
    async def monitor_process_health() -> str:
        """
        Monitor health of a specific process.
        """
        return """
# 🎯 Monitor Process Health

## Tool: monitor_process_health

### Purpose
Monitors specific process health, resource usage, and potential issues.

### Parameters
- **process_name**: Name of process to monitor (required)

### Usage Examples
```
# Monitor web server
Tool: monitor_process_health
Parameters: process_name="nginx"

# Monitor database
Tool: monitor_process_health
Parameters: process_name="postgres"

# Monitor custom application
Tool: monitor_process_health
Parameters: process_name="myapp"
```

### Health Indicators

#### Healthy Process
✅ Stable CPU usage
✅ Consistent memory usage
✅ Normal connection count
✅ Appropriate age for service
✅ Status: running

#### Warning Signs
⚠️ High CPU spikes
⚠️ Growing memory usage
⚠️ Many connections
⚠️ Recent restarts
⚠️ Status: sleeping (if unexpected)

#### Critical Issues
❌ Excessive CPU usage
❌ Memory leak indicators
❌ Connection exhaustion
❌ Very recent start (crash?)
❌ Status: zombie

### Multiple Instances
Tool handles multiple processes with same name:
- Reports each instance separately
- Shows total resource usage
- Identifies newest/oldest instances

### Follow-up Actions
1. Check process logs for errors
2. Monitor trends over time
3. Compare with baseline values
4. Investigate recent restarts
5. Check system resources
"""

    @mcp.prompt(
        title="System Health Summary",
        description="Get comprehensive system health overview"
    )
    async def system_health_summary() -> str:
        """
        Overall system health assessment guide.
        """
        return """
# 📊 System Health Summary

## Tool: get_system_health_summary

### Purpose
Provides overall system health assessment with resource usage and top consumers.

### Usage
```
Tool: get_system_health_summary
```

### Health Score Interpretation
- **80-100**: Excellent - System running smoothly
- **60-79**: Good - Minor resource usage
- **40-59**: Fair - Moderate load, monitor closely
- **20-39**: Poor - High resource usage
- **0-19**: Critical - Immediate action needed

### Information Provided

#### Resource Summary
- CPU usage percentage
- Memory usage percentage
- Disk usage percentage
- Total process count

#### Top Consumers
- Top 5 CPU processes
- Top 5 memory processes
- Resource usage details
- Process command lines

#### Health Assessment
- Overall status (excellent/good/fair/poor/critical)
- Identified issues
- Recommended actions
- Timestamp

### Common Issues Detected
- High CPU usage (>80%)
- High memory usage (>90%)
- Excessive disk usage (>95%)
- Too many processes (>1000)
- Resource exhaustion risks

### Using the Summary
1. Quick daily health checks
2. Baseline establishment
3. Trend monitoring
4. Capacity planning
5. Problem identification
"""

    @mcp.prompt(
        title="CPU Troubleshooting",
        description="Diagnose and resolve high CPU usage issues"
    )
    async def cpu_troubleshooting() -> str:
        """
        Guide for troubleshooting CPU-related issues.
        """
        return """
# 🔥 CPU Troubleshooting Guide

## Diagnosing High CPU Usage

### Step 1: Identify CPU Consumers
```
Tool: find_resource_intensive_processes
Parameters: min_cpu_percent=20, sort_by="cpu"
```

### Step 2: Analyze System Performance
```
Tool: analyze_system_performance
Parameters: include_disk=false, include_network=false
```

### Step 3: Monitor Specific Process
```
Tool: monitor_process_health
Parameters: process_name="high-cpu-process"
```

## Common CPU Issues

### Runaway Process
**Symptoms**: Single process at 100% CPU
**Causes**:
- Infinite loops
- Busy waiting
- Algorithm issues

**Solutions**:
- Restart the process
- Debug application code
- Apply CPU limits

### System Overload
**Symptoms**: Multiple processes high CPU
**Causes**:
- Too many concurrent tasks
- Insufficient CPU cores
- Background jobs

**Solutions**:
- Reduce concurrent load
- Schedule tasks off-peak
- Upgrade CPU resources

### CPU Thrashing
**Symptoms**: Rapid CPU spikes
**Causes**:
- Context switching
- Memory pressure
- I/O wait

**Solutions**:
- Reduce process count
- Increase memory
- Optimize I/O operations

## Investigation Checklist
- [ ] Check top CPU consumers
- [ ] Review process command lines
- [ ] Monitor CPU trends
- [ ] Check load average
- [ ] Verify cooling/throttling
- [ ] Review recent changes
"""

    @mcp.prompt(
        title="Memory Troubleshooting",
        description="Diagnose and resolve memory usage issues"
    )
    async def memory_troubleshooting() -> str:
        """
        Guide for troubleshooting memory-related issues.
        """
        return """
# 💾 Memory Troubleshooting Guide

## Diagnosing High Memory Usage

### Step 1: Find Memory Consumers
```
Tool: find_resource_intensive_processes
Parameters: min_memory_percent=5, sort_by="memory"
```

### Step 2: Check System Memory
```
Tool: analyze_system_performance
Parameters: include_disk=false, include_network=false
```

### Step 3: Monitor for Leaks
```
Tool: monitor_process_health
Parameters: process_name="suspected-process"
```
Watch for growing memory over time.

## Common Memory Issues

### Memory Leaks
**Symptoms**: 
- Gradual memory increase
- Never releases memory
- Eventually crashes

**Detection**:
- Monitor process over hours/days
- Check memory vs process age
- Look for linear growth

**Solutions**:
- Restart process periodically
- Fix application code
- Implement memory limits

### Memory Exhaustion
**Symptoms**:
- System using 95%+ memory
- Heavy swap usage
- System slowdown

**Solutions**:
- Kill unnecessary processes
- Add more RAM
- Configure swap space
- Optimize applications

### Cache/Buffer Usage
**Symptoms**:
- High memory usage
- But available memory exists
- System performs well

**Note**: This is normal Linux behavior.
Cache/buffers are released when needed.

## Memory Analysis Steps
1. Identify top consumers
2. Check for growth patterns
3. Monitor swap usage
4. Review OOM killer logs
5. Calculate actual free memory
6. Plan capacity upgrades
"""

    @mcp.prompt(
        title="Process Monitoring Best Practices",
        description="Best practices for effective process monitoring"
    )
    async def process_monitoring_practices() -> str:
        """
        Best practices guide for process monitoring.
        """
        return """
# 📋 Process Monitoring Best Practices

## Establishing Baselines

### Initial Baseline
1. Run system health summary
```
Tool: get_system_health_summary
```
2. Document normal values:
   - Typical CPU usage
   - Average memory usage
   - Normal process count
   - Standard disk usage

### Regular Monitoring
- **Daily**: Quick health check
- **Weekly**: Trend analysis
- **Monthly**: Capacity review

## Monitoring Strategy

### Proactive Monitoring
Set thresholds for alerts:
- CPU > 80% for 5 minutes
- Memory > 90%
- Disk > 85%
- Critical process not running

### Resource Tracking
```
# Track specific application
Tool: monitor_process_health
Parameters: process_name="critical-app"

# Find resource spikes
Tool: find_resource_intensive_processes
Parameters: min_cpu_percent=50
```

### Trend Analysis
1. Collect metrics over time
2. Identify patterns:
   - Peak usage hours
   - Growth trends
   - Recurring issues
3. Plan capacity accordingly

## Common Monitoring Tasks

### Daily Health Check
```
1. Tool: get_system_health_summary
2. Review any issues flagged
3. Check critical processes
4. Note unusual patterns
```

### Performance Investigation
```
1. Tool: analyze_system_performance
2. Tool: find_resource_intensive_processes
3. Deep dive on problem processes
4. Check system logs
```

### Capacity Planning
```
1. Track resource trends
2. Project growth rates
3. Identify bottlenecks
4. Plan upgrades
```

## Key Metrics to Track
- **CPU**: Usage %, load average
- **Memory**: Used %, swap usage
- **Disk**: Space %, I/O rates
- **Network**: Bandwidth, connections
- **Processes**: Count, top consumers

## Documentation
Maintain records of:
- Normal baselines
- Known issues
- Growth trends
- Remediation steps
- Capacity plans
"""

    @mcp.prompt(
        title="Emergency Performance Response",
        description="Quick response guide for performance emergencies"
    )
    async def emergency_performance() -> str:
        """
        Emergency response for critical performance issues.
        """
        return """
# 🚨 Emergency Performance Response

## System Unresponsive

### Immediate Actions (< 2 minutes)
```
Tool: get_system_health_summary
```
Quick assessment of system state.

### Find Culprits (2-5 minutes)
```
Tool: find_resource_intensive_processes
Parameters: min_cpu_percent=50, min_memory_percent=20
```

### Kill Problem Processes
1. Identify non-critical high users
2. Terminate gracefully if possible
3. Force kill if necessary
4. Monitor system response

## High CPU Emergency

### Quick Fix
```
# Find top CPU users
Tool: find_resource_intensive_processes
Parameters: min_cpu_percent=30, sort_by="cpu"
```

Actions:
1. Kill non-essential processes
2. Nice/renice CPU hogs
3. Disable background tasks
4. Check for runaway processes

## Memory Emergency

### Quick Fix
```
# Find memory hogs
Tool: find_resource_intensive_processes
Parameters: min_memory_percent=10, sort_by="memory"
```

Actions:
1. Kill largest non-critical process
2. Clear caches if possible
3. Add emergency swap
4. Restart memory-leaking services

## Disk Full Emergency

### Quick Actions
1. Find large files/directories
2. Clear logs and temp files
3. Remove old backups
4. Empty trash/recycle bin
5. Compress large files

## Network Saturation

### Quick Fix
```
Tool: analyze_system_performance
Parameters: include_network=true
```

Actions:
1. Identify bandwidth hogs
2. Rate limit if possible
3. Block non-essential traffic
4. Check for DDoS/attacks

## Recovery Checklist
- [ ] System responsive again?
- [ ] Critical services running?
- [ ] Resources below thresholds?
- [ ] Root cause identified?
- [ ] Temporary fixes documented?
- [ ] Permanent fix planned?
- [ ] Monitoring increased?
"""

    @mcp.prompt(
        title="Process Lifecycle Management",
        description="Understanding and managing process lifecycles"
    )
    async def process_lifecycle() -> str:
        """
        Guide for understanding process states and lifecycle.
        """
        return """
# 🔄 Process Lifecycle Management

## Process States

### Running
- Actively executing on CPU
- Normal state for active processes
- Should match expected workload

### Sleeping
- Waiting for event/resource
- Normal for idle processes
- Check if unexpectedly sleeping

### Zombie
- Process terminated
- Parent hasn't collected status
- Indicates parent process issue
- Can't be killed directly

### Stopped
- Suspended (SIGSTOP)
- Debugging or job control
- Can be resumed

## Monitoring Process Age

### Check Process Start Time
```
Tool: monitor_process_health
Parameters: process_name="service-name"
```

### Age Indicators
- **Very new** (< 1 min): Just started or restarted
- **Recent** (< 1 hour): May indicate crash/restart
- **Stable** (> 1 day): Normal for services
- **Very old**: Check for memory leaks

## Process Relationships

### Parent-Child
- Parent spawns children
- Children inherit resources
- Orphans adopted by init
- Zombies need parent action

### Process Groups
- Related processes
- Share signals
- Common for services
- Monitor as group

## Lifecycle Management

### Graceful Restart
1. Monitor current state
2. Send termination signal
3. Wait for cleanup
4. Start new instance
5. Verify functionality

### Resource Limits
- CPU time limits
- Memory limits
- File descriptor limits
- Process count limits

### Automatic Management
- Systemd restart policies
- Process supervisors
- Health check scripts
- Resource governors

## Common Issues

### Frequent Restarts
- Check logs for crashes
- Review resource limits
- Verify dependencies
- Check configuration

### Long-Running Processes
- Monitor for memory leaks
- Check file descriptor leaks
- Verify log rotation
- Plan periodic restarts
"""

    @mcp.prompt(
        title="System Resource Thresholds",
        description="Guidelines for setting resource monitoring thresholds"
    )
    async def resource_thresholds() -> str:
        """
        Guide for setting appropriate resource thresholds.
        """
        return """
# 📏 System Resource Thresholds

## CPU Thresholds

### Usage Levels
- **0-40%**: Low usage, optimal
- **40-60%**: Moderate, normal
- **60-80%**: High, monitor closely
- **80-95%**: Very high, investigate
- **95-100%**: Critical, take action

### Load Average (Unix/Linux)
- **< 1.0 per core**: Good
- **1-2 per core**: Busy
- **> 2 per core**: Overloaded

Example: 4-core system
- Good: < 4.0
- Busy: 4-8
- Overloaded: > 8

## Memory Thresholds

### RAM Usage
- **0-60%**: Healthy
- **60-75%**: Normal
- **75-85%**: Monitor
- **85-95%**: Warning
- **95-100%**: Critical

### Swap Usage
- **0-20%**: Normal
- **20-50%**: Monitor
- **50-80%**: Performance impact
- **> 80%**: Critical

## Disk Thresholds

### Space Usage
- **0-70%**: Safe
- **70-80%**: Plan cleanup
- **80-90%**: Warning
- **90-95%**: Critical
- **> 95%**: Emergency

### I/O Metrics
- Response time > 20ms: Investigate
- Queue depth > 10: Bottleneck
- Utilization > 80%: Saturated

## Process Thresholds

### Process Count
- **< 200**: Light load
- **200-500**: Normal
- **500-1000**: Heavy
- **> 1000**: Very heavy

### Per-Process Limits
- CPU > 50%: Investigate
- Memory > 10%: Monitor
- Connections > 1000: Check
- Threads > 500: Review

## Network Thresholds

### Bandwidth
- **< 50%**: Good
- **50-70%**: Normal
- **70-85%**: High
- **> 85%**: Saturated

### Connections
- Depends on service type
- Web server: 1000s normal
- Database: 100s typical
- Monitor for growth

## Setting Custom Thresholds

Consider:
1. Baseline measurements
2. Application requirements
3. Peak vs average load
4. Business criticality
5. Hardware capabilities

Adjust based on:
- Historical data
- Growth projections
- SLA requirements
- User experience
"""
```

--------------------------------------------------------------------------------
/src/mcp_log_analyzer/mcp_server/prompts/windows_testing_prompt.py:
--------------------------------------------------------------------------------

```python
"""
Windows testing and diagnostics prompts for the MCP Log Analyzer server.
"""

from typing import Optional
from mcp.server import FastMCP


def register_windows_testing_prompts(mcp: FastMCP):
    """Register all Windows testing prompts."""

    @mcp.prompt(
        title="Test Windows Event Log Access",
        description="Guide for testing Windows Event Log access and permissions"
    )
    async def test_event_log_access() -> str:
        """
        Test Windows Event Log access capabilities.
        """
        return """
# 🔍 Test Windows Event Log Access

## Tool: test_windows_event_log_access

### Purpose
Tests access to Windows Event Logs and verifies permissions.

### What It Tests
- **System Log**: Hardware, drivers, system services
- **Application Log**: Application crashes and errors
- **Security Log**: Authentication and audit events
- **pywin32**: Package availability check

### Usage
```
Tool: test_windows_event_log_access
```

### Interpreting Results
- **Accessible**: Can read and query the log
- **Access Denied**: Need administrator privileges
- **pywin32 available**: Required for Event Log access

### Common Issues
- Security log requires admin rights
- pywin32 not installed: `pip install pywin32`
- Need to run as administrator

### Next Steps
- Install pywin32 if missing
- Run with admin rights for Security log
- Register accessible logs as sources
"""

    @mcp.prompt(
        title="Windows Event Log Information",
        description="Get detailed information about specific Windows Event Logs"
    )
    async def event_log_info_guide() -> str:
        """
        Guide for retrieving Windows Event Log information.
        """
        return """
# 📊 Windows Event Log Information

## Tool: get_windows_event_log_info

### Purpose
Retrieves metadata and recent entries from Windows Event Logs.

### Parameters
- **log_name**: "System", "Application", or "Security"
- **max_entries**: Number of recent entries (default: 10)

### Usage Examples
```
# Get System log info
Tool: get_windows_event_log_info
Parameters: log_name="System", max_entries=20

# Check Application events
Tool: get_windows_event_log_info
Parameters: log_name="Application", max_entries=50

# Security events (requires admin)
Tool: get_windows_event_log_info
Parameters: log_name="Security", max_entries=10
```

### Information Returned
- **Log metadata**: Size, record count, timestamps
- **Recent entries**: Event ID, source, type, message
- **Event details**: Formatted for readability

### Event Types
- 1 = Error (Red)
- 2 = Warning (Yellow)
- 4 = Information (Blue)
- 8 = Success Audit
- 16 = Failure Audit
"""

    @mcp.prompt(
        title="Query Windows Events",
        description="Query Windows Event Logs with specific criteria"
    )
    async def query_windows_events(
        filter_type: Optional[str] = None
    ) -> str:
        """
        Guide for querying Windows events by criteria.
        
        Args:
            filter_type: Type of filter (event_id, level, time)
        """
        
        base_guide = """
# 🔍 Query Windows Events by Criteria

## Tool: query_windows_events_by_criteria

### Purpose
Query Windows Event Logs with powerful filtering options.

### Parameters
- **log_name**: Target log (default: "System")
- **event_id**: Specific Event ID to find
- **level**: "Error", "Warning", or "Information"
- **time_duration**: Time range (e.g., "30m", "2h", "1d")
- **max_entries**: Result limit (default: 50)

### Usage Examples
```
# Find all errors in last hour
Tool: query_windows_events_by_criteria
Parameters: level="Error", time_duration="1h"

# Find specific Event ID
Tool: query_windows_events_by_criteria
Parameters: event_id=7001, time_duration="24h"

# Application warnings
Tool: query_windows_events_by_criteria
Parameters: log_name="Application", level="Warning"
```
"""

        filter_guides = {
            "event_id": """
### Filtering by Event ID
Common Event IDs to search:
- **1074**: System shutdown/restart
- **6005/6006**: Event Log start/stop
- **7000-7034**: Service control events
- **1000**: Application crashes
- **4624/4625**: Logon success/failure

Example:
```
Tool: query_windows_events_by_criteria
Parameters: event_id=7001, time_duration="6h"
```
""",
            "level": """
### Filtering by Level
Event severity levels:
- **Error**: Critical failures
- **Warning**: Potential issues
- **Information**: Normal operations

Example:
```
Tool: query_windows_events_by_criteria
Parameters: level="Error", time_duration="2h"
```
""",
            "time": """
### Time-based Filtering
Duration formats:
- **Minutes**: "30m", "45m"
- **Hours**: "1h", "6h", "12h"
- **Days**: "1d", "7d", "30d"

Example:
```
Tool: query_windows_events_by_criteria
Parameters: time_duration="4h", level="Error"
```
"""
        }

        if filter_type and filter_type.lower() in filter_guides:
            base_guide += filter_guides[filter_type.lower()]

        base_guide += """
### Query Strategy
1. Start with recent time ranges
2. Use specific Event IDs when known
3. Combine filters for precision
4. Expand search if needed
"""
        
        return base_guide

    @mcp.prompt(
        title="Windows System Health",
        description="Get Windows system health overview from Event Logs"
    )
    async def windows_system_health() -> str:
        """
        Guide for Windows system health assessment.
        """
        return """
# 🎯 Windows System Health Overview

## Tool: get_windows_system_health

### Purpose
Analyzes System and Application logs to assess Windows health.

### Usage
```
Tool: get_windows_system_health
```

### Analysis Period
- Last 24 hours of events
- System and Application logs
- Error and warning counts
- Critical event identification

### Health Status Levels
- **Healthy**: 0 errors, minimal warnings
- **Fair**: <3 errors, <20 warnings
- **Concerning**: 3+ errors or 20+ warnings

### Critical Events Shown
- Service failures
- Application crashes
- System errors
- Hardware issues
- Driver problems

### Follow-up Actions
Based on health status:
1. **Healthy**: Continue monitoring
2. **Fair**: Investigate warnings
3. **Concerning**: Address errors immediately

### Common Issues Found
- Windows Update failures
- Service startup problems
- Application crashes
- Driver errors
- Hardware warnings
"""

    @mcp.prompt(
        title="Windows Service Troubleshooting",
        description="Diagnose Windows service failures and issues"
    )
    async def service_troubleshooting() -> str:
        """
        Guide for troubleshooting Windows services.
        """
        return """
# 🛠️ Windows Service Troubleshooting

## Common Service Event IDs

### Service Failures
- **7000**: Service failed to start (logon failure)
- **7001**: Service depends on failed service
- **7023**: Service terminated with error
- **7024**: Service-specific error
- **7031**: Service crashed unexpectedly

### Service Timeouts
- **7009**: Connection timeout
- **7011**: Response timeout
- **7022**: Service hung on starting

## Diagnostic Steps

### Step 1: Find Service Errors
```
Tool: query_windows_events_by_criteria
Parameters: event_id=7000, time_duration="6h"
```

### Step 2: Check Dependencies
```
Tool: query_windows_events_by_criteria
Parameters: event_id=7001, time_duration="6h"
```

### Step 3: Review Service Crashes
```
Tool: query_windows_events_by_criteria
Parameters: event_id=7031, time_duration="24h"
```

## Common Causes

### Logon Failures (7000)
- Incorrect service account password
- Account locked or disabled
- Insufficient permissions

### Dependency Issues (7001)
- Required service not started
- Circular dependencies
- Network service unavailable

### Service Crashes (7031)
- Application bugs
- Resource exhaustion
- Configuration errors

## Resolution Steps
1. Check service account credentials
2. Verify service dependencies
3. Review service configuration
4. Check system resources
5. Examine application logs
"""

    @mcp.prompt(
        title="Windows Application Crashes",
        description="Analyze application crashes and errors"
    )
    async def application_crashes() -> str:
        """
        Guide for investigating application crashes.
        """
        return """
# 💥 Windows Application Crash Analysis

## Key Event IDs

### Application Errors
- **1000**: Application crash/fault
- **1001**: Windows Error Reporting
- **1002**: Application hang
- **1026**: .NET runtime error

## Investigation Steps

### Step 1: Find Recent Crashes
```
Tool: query_windows_events_by_criteria
Parameters: log_name="Application", event_id=1000, time_duration="24h"
```

### Step 2: Check Application Hangs
```
Tool: query_windows_events_by_criteria
Parameters: log_name="Application", event_id=1002, time_duration="24h"
```

### Step 3: Review Error Details
```
Tool: get_windows_event_log_info
Parameters: log_name="Application", max_entries=50
```

## Crash Information

### Event 1000 Details
- Faulting application name
- Faulting module (DLL/EXE)
- Exception code
- Fault offset
- Process ID and path

### Common Exception Codes
- **0xc0000005**: Access violation
- **0xc0000409**: Stack buffer overrun
- **0xc00000fd**: Stack overflow
- **0x80000003**: Breakpoint

## Troubleshooting Steps
1. Identify crashing application
2. Check for patterns (time, frequency)
3. Review exception codes
4. Look for module conflicts
5. Check for updates/patches
6. Test in safe mode
"""

    @mcp.prompt(
        title="Windows Security Monitoring",
        description="Monitor Windows security events and authentication"
    )
    async def security_monitoring() -> str:
        """
        Guide for Windows security event monitoring.
        """
        return """
# 🔒 Windows Security Monitoring

## Important Security Event IDs

### Logon Events
- **4624**: Successful logon
- **4625**: Failed logon attempt
- **4634**: Account logoff
- **4647**: User initiated logoff

### Account Management
- **4720**: User account created
- **4722**: User account enabled
- **4725**: User account disabled
- **4726**: User account deleted
- **4740**: Account locked out

### Privilege Use
- **4672**: Special privileges assigned
- **4673**: Privileged service called
- **4674**: Operation attempted on object

## Security Queries

### Failed Login Attempts
```
Tool: query_windows_events_by_criteria
Parameters: log_name="Security", event_id=4625, time_duration="2h"
```

### Account Lockouts
```
Tool: query_windows_events_by_criteria
Parameters: log_name="Security", event_id=4740, time_duration="24h"
```

### Successful Logins
```
Tool: query_windows_events_by_criteria
Parameters: log_name="Security", event_id=4624, time_duration="1h"
```

## Security Analysis

### Brute Force Detection
- Multiple 4625 events
- Same username, different IPs
- Rapid attempts

### Suspicious Activity
- Logins at unusual times
- New user accounts created
- Privilege escalations
- Service account usage

### Monitoring Best Practices
1. Regular failed login reviews
2. Track account changes
3. Monitor privileged access
4. Check for patterns
5. Set up alerts

**Note**: Security log requires administrator privileges.
"""

    @mcp.prompt(
        title="Windows Boot and Startup",
        description="Diagnose Windows boot and startup issues"
    )
    async def boot_startup_issues() -> str:
        """
        Guide for Windows boot and startup diagnostics.
        """
        return """
# 🚀 Windows Boot and Startup Diagnostics

## Boot-Related Event IDs

### System Start/Stop
- **6005**: Event Log service started (boot)
- **6006**: Event Log service stopped (shutdown)
- **6008**: Unexpected shutdown detected
- **6009**: Processor information at boot

### Shutdown/Restart
- **1074**: System shutdown by user/process
- **1076**: Reason for shutdown

### Driver/Service Issues
- **7026**: Boot-start driver failed
- **7000**: Service failed at startup

## Diagnostic Queries

### Check Last Boot
```
Tool: query_windows_events_by_criteria
Parameters: event_id=6005, time_duration="24h"
```

### Unexpected Shutdowns
```
Tool: query_windows_events_by_criteria
Parameters: event_id=6008, time_duration="7d"
```

### Boot Driver Failures
```
Tool: query_windows_events_by_criteria
Parameters: event_id=7026, time_duration="24h"
```

### Service Startup Issues
```
Tool: query_windows_events_by_criteria
Parameters: event_id=7000, time_duration="2h"
```

## Boot Problem Analysis

### Slow Boot
1. Check service startup times
2. Look for driver failures
3. Review dependency chains
4. Check for timeout events

### Boot Loops
1. Check Event ID 6008 frequency
2. Look for critical errors before shutdown
3. Review hardware events
4. Check for driver conflicts

### Service Failures
1. Identify failing services
2. Check dependencies
3. Review service accounts
4. Verify system resources

## Boot Optimization
- Disable unnecessary startup services
- Update drivers
- Check disk health
- Review startup programs
"""

    @mcp.prompt(
        title="Windows Event ID Reference",
        description="Quick reference for common Windows Event IDs"
    )
    async def event_id_reference(
        category: Optional[str] = None
    ) -> str:
        """
        Windows Event ID reference guide.
        
        Args:
            category: Event category (system, service, security, etc.)
        """
        
        all_categories = {
            "system": """
## System Events
- **1074**: System shutdown/restart initiated
- **6005**: Event Log service started
- **6006**: Event Log service stopped  
- **6008**: Unexpected system shutdown
- **6009**: Processor information
- **6013**: System uptime
""",
            "service": """
## Service Control Manager
- **7000**: Service failed to start
- **7001**: Service dependency failure
- **7009**: Connection timeout
- **7011**: Service timeout
- **7023**: Service terminated with error
- **7024**: Service-specific error
- **7026**: Boot driver failed
- **7031**: Service crash
- **7034**: Service crashed (no recovery)
""",
            "application": """
## Application Events
- **1000**: Application error/crash
- **1001**: Windows Error Reporting
- **1002**: Application hang
- **1004**: Application recovery
- **1026**: .NET runtime error
""",
            "security": """
## Security Events (Admin Required)
- **4624**: Successful logon
- **4625**: Failed logon
- **4634**: Logoff
- **4672**: Special privileges
- **4720**: User created
- **4726**: User deleted
- **4740**: Account locked
""",
            "hardware": """
## Hardware Events
- **7**: Disk bad block
- **11**: Disk controller error
- **15**: Disk not ready
- **51**: Paging error
- **129**: Disk reset
"""
        }

        result = "# 📖 Windows Event ID Reference\n\n"
        
        if category and category.lower() in all_categories:
            result += all_categories[category.lower()]
        else:
            result += "## Common Windows Event IDs by Category\n\n"
            for cat_content in all_categories.values():
                result += cat_content + "\n"
        
        result += """
## Using Event IDs
1. Note the Event ID from logs
2. Query for specific IDs
3. Check patterns and frequency
4. Cross-reference with time
5. Correlate related events

## Event Levels
- **Error**: Critical failures
- **Warning**: Potential issues  
- **Information**: Normal operations
- **Success Audit**: Security success
- **Failure Audit**: Security failure
"""
        
        return result

    @mcp.prompt(
        title="Windows Performance Issues",
        description="Diagnose Windows performance problems using Event Logs"
    )
    async def performance_issues() -> str:
        """
        Guide for Windows performance diagnostics.
        """
        return """
# 📊 Windows Performance Diagnostics

## Performance-Related Events

### Resource Issues
- **2004**: Resource exhaustion
- **1001**: Performance counter issues
- **100**: Component timeout

### Application Performance
- **1002**: Application hang
- **1530**: Application slow response

## Investigation Steps

### Step 1: Check System Health
```
Tool: get_windows_system_health
```

### Step 2: Find Application Hangs
```
Tool: query_windows_events_by_criteria
Parameters: log_name="Application", event_id=1002, time_duration="6h"
```

### Step 3: Look for Timeouts
```
Tool: query_windows_events_by_criteria
Parameters: event_id=7011, time_duration="6h"
```

### Step 4: Resource Warnings
```
Tool: query_windows_events_by_criteria
Parameters: level="Warning", time_duration="2h"
```

## Common Performance Issues

### High CPU Usage
- Check for crashed services
- Look for restart loops
- Review application errors

### Memory Issues
- Application crashes (1000)
- Out of memory errors
- Page file warnings

### Disk Problems
- Event ID 7, 11, 51
- Slow response warnings
- I/O timeouts

### Network Issues
- Connection timeouts
- Service availability
- DNS failures

## Performance Optimization
1. Identify problematic services
2. Check for memory leaks
3. Review disk errors
4. Monitor service restarts
5. Update drivers
6. Check for malware
"""

    @mcp.prompt(
        title="Windows Event Log Best Practices",
        description="Best practices for Windows Event Log monitoring"
    )
    async def event_log_practices() -> str:
        """
        Best practices for Event Log management.
        """
        return """
# 📋 Windows Event Log Best Practices

## Regular Monitoring

### Daily Checks
```
Tool: get_windows_system_health
```
- Review health status
- Check critical errors
- Note new warnings

### Weekly Analysis
- Service failure patterns
- Application crash trends
- Security audit review
- Performance issues

## Effective Queries

### Start Specific
```
# Known issue
Tool: query_windows_events_by_criteria
Parameters: event_id=7001, time_duration="24h"
```

### Then Broaden
```
# General errors
Tool: query_windows_events_by_criteria
Parameters: level="Error", time_duration="6h"
```

## Key Event IDs to Monitor

### Critical System
- 6008: Unexpected shutdown
- 1074: System restart
- 7031: Service crash
- 41: Kernel power

### Security (if accessible)
- 4625: Failed logins
- 4740: Account lockouts
- 4720: User creation

### Application Health
- 1000: App crashes
- 1002: App hangs
- 1026: .NET errors

## Log Management

### Retention
- System: 30-90 days
- Application: 30 days
- Security: 90-365 days

### Size Limits
- Prevent logs from filling
- Archive old events
- Regular cleanup

## Automation Ideas
1. Schedule daily health checks
2. Alert on critical Event IDs
3. Weekly summary reports
4. Trend analysis
5. Correlation rules

## Documentation
Track:
- Recurring issues
- Resolution steps
- Event patterns
- System changes
"""

    @mcp.prompt(
        title="Windows Emergency Diagnostics",
        description="Quick diagnostics for Windows emergencies"
    )
    async def emergency_diagnostics() -> str:
        """
        Emergency Windows diagnostic procedures.
        """
        return """
# 🚨 Windows Emergency Diagnostics

## System Won't Boot

### Quick Checks
1. Boot to Safe Mode
2. Check Event Viewer for:
```
Tool: query_windows_events_by_criteria
Parameters: event_id=7026, time_duration="1h"
```

3. Service failures:
```
Tool: query_windows_events_by_criteria
Parameters: event_id=7000, time_duration="1h"
```

## Blue Screen (BSOD)

### After Reboot
```
Tool: query_windows_events_by_criteria
Parameters: event_id=41, time_duration="1h"
```

Check for:
- Kernel-Power events
- Driver failures
- Hardware errors

## Service Failures

### Critical Service Down
```
# Find specific service
Tool: query_windows_events_by_criteria
Parameters: level="Error", time_duration="30m"
```

Quick fixes:
1. Restart service
2. Check dependencies
3. Verify credentials
4. Review resources

## Performance Crisis

### System Slow
```
Tool: get_windows_system_health
```

Then check:
- Application hangs (1002)
- Service timeouts (7011)
- Resource warnings

## Security Incident

### Suspected Breach
```
# Failed logins
Tool: query_windows_events_by_criteria
Parameters: log_name="Security", event_id=4625

# New accounts
Parameters: log_name="Security", event_id=4720
```

## Recovery Checklist
- [ ] System accessible?
- [ ] Critical services running?
- [ ] Recent errors identified?
- [ ] Security verified?
- [ ] Performance acceptable?
- [ ] Root cause found?
- [ ] Preventive measures?
"""
```
Page 2/3FirstPrevNextLast