#
tokens: 49789/50000 9/54 files (page 2/3)
lines: on (toggle) GitHub
raw markdown copy reset
This is page 2 of 3. Use http://codebase.md/djm81/log_analyzer_mcp?lines=true&page={x} to view the full context.

# Directory Structure

```
├── .cursor
│   └── rules
│       ├── markdown-rules.mdc
│       ├── python-github-rules.mdc
│       └── testing-and-build-guide.mdc
├── .cursorrules
├── .env.template
├── .github
│   ├── ISSUE_TEMPLATE
│   │   └── bug_report.md
│   ├── pull_request_template.md
│   └── workflows
│       └── tests.yml
├── .gitignore
├── CHANGELOG.md
├── CODE_OF_CONDUCT.md
├── CONTRIBUTING.md
├── docs
│   ├── api_reference.md
│   ├── developer_guide.md
│   ├── getting_started.md
│   ├── LICENSE.md
│   ├── README.md
│   ├── refactoring
│   │   ├── log_analyzer_refactoring_v1.md
│   │   ├── log_analyzer_refactoring_v2.md
│   │   └── README.md
│   ├── rules
│   │   ├── markdown-rules.md
│   │   ├── python-github-rules.md
│   │   ├── README.md
│   │   └── testing-and-build-guide.md
│   └── testing
│       └── README.md
├── LICENSE.md
├── pyproject.toml
├── pyrightconfig.json
├── README.md
├── scripts
│   ├── build.sh
│   ├── cleanup.sh
│   ├── publish.sh
│   ├── release.sh
│   ├── run_log_analyzer_mcp_dev.sh
│   └── test_uvx_install.sh
├── SECURITY.md
├── setup.py
├── src
│   ├── __init__.py
│   ├── log_analyzer_client
│   │   ├── __init__.py
│   │   ├── cli.py
│   │   └── py.typed
│   └── log_analyzer_mcp
│       ├── __init__.py
│       ├── common
│       │   ├── __init__.py
│       │   ├── config_loader.py
│       │   ├── logger_setup.py
│       │   └── utils.py
│       ├── core
│       │   ├── __init__.py
│       │   └── analysis_engine.py
│       ├── log_analyzer_mcp_server.py
│       ├── py.typed
│       └── test_log_parser.py
└── tests
    ├── __init__.py
    ├── log_analyzer_client
    │   ├── __init__.py
    │   └── test_cli.py
    └── log_analyzer_mcp
        ├── __init__.py
        ├── common
        │   └── test_logger_setup.py
        ├── test_analysis_engine.py
        ├── test_log_analyzer_mcp_server.py
        └── test_test_log_parser.py
```

# Files

--------------------------------------------------------------------------------
/tests/log_analyzer_mcp/test_test_log_parser.py:
--------------------------------------------------------------------------------

```python
  1 | import pytest
  2 | 
  3 | from log_analyzer_mcp.test_log_parser import (
  4 |     analyze_pytest_log_content,
  5 |     extract_failed_tests,
  6 |     extract_overall_summary,
  7 | )
  8 | 
  9 | # Sample Log Snippets for Testing
 10 | 
 11 | LOG_NO_FAILURES = """
 12 | ============================= test session starts ==============================
 13 | platform linux -- Python 3.9.5, pytest-6.2.4, py-1.10.0, pluggy-0.13.1
 14 | rootdir: /project
 15 | plugins: asyncio-0.15.1
 16 | collected 10 items
 17 | 
 18 | tests/test_module_alpha.py ..........                                    [100%]
 19 | 
 20 | ============================== 10 passed in 0.05s ==============================
 21 | """
 22 | 
 23 | LOG_WITH_MODULE_FAILURES = """
 24 | Unit tests output:
 25 | tests/test_module_beta.py::test_beta_one FAILED
 26 | tests/test_module_beta.py::test_beta_two PASSED
 27 | tests/test_module_gamma.py::test_gamma_one FAILED
 28 | 
 29 | Failed tests by module:
 30 | Module: test_module_beta - 1 failed tests
 31 | - tests/test_module_beta.py
 32 | Module: test_module_gamma - 1 failed tests
 33 | - tests/test_module_gamma.py
 34 | 
 35 | ================= 2 failed, 1 passed in 0.12s =================
 36 | """
 37 | 
 38 | LOG_WITH_DIRECT_FAILURES = """
 39 | ============================= test session starts ==============================
 40 | collected 3 items
 41 | 
 42 | tests/test_data_processing.py::test_process_normal_data PASSED           [ 33%]
 43 | tests/test_data_processing.py::test_process_edge_case FAILED             [ 66%]
 44 | tests/test_another_feature.py::test_main_feature PASSED                  [100%]
 45 | 
 46 | =================================== FAILURES ===================================
 47 | ___________________________ test_process_edge_case ___________________________
 48 | 
 49 |     def test_process_edge_case():
 50 | >       assert 1 == 0
 51 | E       assert 1 == 0
 52 | 
 53 | tests/test_data_processing.py:15: AssertionError
 54 | =========================== short test summary info ============================
 55 | FAILED tests/test_data_processing.py::test_process_edge_case - assert 1 == 0
 56 | !!!!!!!!!!!!!!!!!!!!!!!!!! Interrupted by signal !!!!!!!!!!!!!!!!!!!!!!!!!!!
 57 | ========================= 1 failed, 2 passed in 0.02s =========================
 58 | """
 59 | 
 60 | LOG_WITH_MIXED_FAILURES_AND_XFAIL_XPASS = """
 61 | collected 5 items
 62 | tests/test_ops.py::test_op_add PASSED
 63 | tests/test_ops.py::test_op_subtract FAILED
 64 | tests/test_advanced.py::test_complex_logic_xfail XFAIL
 65 | tests/test_advanced.py::test_another_one_xpass XPASS
 66 | tests/test_misc.py::test_simple PASSED
 67 | 
 68 | =================================== FAILURES ===================================
 69 | ____________________________ test_op_subtract _____________________________
 70 |     def test_op_subtract():
 71 | >       assert 5 - 3 == 1
 72 | E       assert 2 == 1
 73 | tests/test_ops.py:10: AssertionError
 74 | =============== 1 failed, 2 passed, 1 xfailed, 1 xpassed in 0.03s ==============
 75 | """
 76 | 
 77 | LOG_ONLY_SKIPPED = """
 78 | ============================= test session starts ==============================
 79 | collected 2 items
 80 | tests/test_module_delta.py ..                                            [100%]
 81 | ============================== 2 skipped in 0.01s ==============================
 82 | """
 83 | 
 84 | LOG_WITH_ERRORS = """
 85 | ============================= test session starts ==============================
 86 | collected 1 item
 87 | tests/test_setup_issue.py E                                              [100%]
 88 | ==================================== ERRORS ====================================
 89 | _____________________ ERROR at setup of test_setup_issue _____________________
 90 | Setup failed
 91 | =========================== short test summary info ============================
 92 | ERROR tests/test_setup_issue.py::test_setup_issue
 93 | ========================= 1 error in 0.01s =========================
 94 | """
 95 | 
 96 | LOG_SHORT_SUMMARY_ONLY = """
 97 | session duration: 0.11s
 98 | tests/test_api.py::TestAPI::test_get_users PASSED (fixtures used: 'db_session', 'user_factory')
 99 | tests/test_api.py::TestAPI::test_create_user FAILED (fixtures used: 'db_session', 'user_payload')
100 | 1 failed, 1 passed, 2 skipped in 0.04s
101 | """  # This doesn't have the ===== border, tests fallback
102 | 
103 | LOG_NO_SUMMARY_LINE = """
104 | Some random output without a clear pytest summary.
105 | Maybe a crash before summary.
106 | """
107 | 
108 | 
109 | class TestExtractFailedTests:
110 |     def test_no_failures(self) -> None:
111 |         assert extract_failed_tests(LOG_NO_FAILURES) == []
112 | 
113 |     def test_module_failures(self) -> None:
114 |         expected = [
115 |             {"module": "test_module_beta", "test_file": "tests/test_module_beta.py"},
116 |             {"module": "test_module_gamma", "test_file": "tests/test_module_gamma.py"},
117 |         ]
118 |         assert extract_failed_tests(LOG_WITH_MODULE_FAILURES) == expected
119 | 
120 |     def test_direct_failures(self):
121 |         expected = [
122 |             {
123 |                 "module": "test_data_processing",
124 |                 "test_file": "tests/test_data_processing.py",
125 |                 "test_name": "test_process_edge_case",
126 |             }
127 |         ]
128 |         assert extract_failed_tests(LOG_WITH_DIRECT_FAILURES) == expected
129 | 
130 |     def test_mixed_failures(self):
131 |         # LOG_WITH_MIXED_FAILURES_AND_XFAIL_XPASS uses the third pattern (direct FAILED)
132 |         expected = [
133 |             {
134 |                 "module": "test_ops",
135 |                 "test_file": "tests/test_ops.py",
136 |                 "test_name": "test_op_subtract",
137 |             }
138 |         ]
139 |         assert extract_failed_tests(LOG_WITH_MIXED_FAILURES_AND_XFAIL_XPASS) == expected
140 | 
141 | 
142 | class TestExtractOverallSummary:
143 |     def test_no_failures_summary(self) -> None:
144 |         summary = extract_overall_summary(LOG_NO_FAILURES)
145 |         assert summary["passed"] == 10
146 |         assert summary["failed"] == 0
147 |         assert summary["skipped"] == 0
148 |         assert summary["xfailed"] == 0
149 |         assert summary["xpassed"] == 0
150 |         assert summary["errors"] == 0
151 |         assert summary["status"] == "PASSED"
152 |         assert summary["duration_seconds"] == 0.05
153 |         assert (
154 |             summary["summary_line"]
155 |             == "============================== 10 passed in 0.05s =============================="
156 |         )
157 | 
158 |     def test_module_failures_summary(self):
159 |         summary = extract_overall_summary(LOG_WITH_MODULE_FAILURES)
160 |         assert summary["passed"] == 1
161 |         assert summary["failed"] == 2
162 |         assert summary["status"] == "FAILED"
163 |         assert summary["duration_seconds"] == 0.12
164 |         assert summary["summary_line"] == "================= 2 failed, 1 passed in 0.12s ================="
165 | 
166 |     def test_direct_failures_summary(self):
167 |         summary = extract_overall_summary(LOG_WITH_DIRECT_FAILURES)
168 |         assert summary["passed"] == 2
169 |         assert summary["failed"] == 1
170 |         assert summary["status"] == "FAILED"
171 |         assert summary["duration_seconds"] == 0.02
172 |         assert (
173 |             summary["summary_line"] == "========================= 1 failed, 2 passed in 0.02s ========================="
174 |         )
175 | 
176 |     def test_mixed_failures_xpass_xfail_summary(self):
177 |         summary = extract_overall_summary(LOG_WITH_MIXED_FAILURES_AND_XFAIL_XPASS)
178 |         assert summary["passed"] == 2
179 |         assert summary["failed"] == 1
180 |         assert summary["skipped"] == 0
181 |         assert summary["xfailed"] == 1
182 |         assert summary["xpassed"] == 1
183 |         assert summary["errors"] == 0
184 |         assert summary["status"] == "FAILED"
185 |         assert summary["duration_seconds"] == 0.03
186 |         assert (
187 |             summary["summary_line"]
188 |             == "=============== 1 failed, 2 passed, 1 xfailed, 1 xpassed in 0.03s =============="
189 |         )
190 | 
191 |     def test_only_skipped_summary(self):
192 |         summary = extract_overall_summary(LOG_ONLY_SKIPPED)
193 |         assert summary["passed"] == 0
194 |         assert summary["failed"] == 0
195 |         assert summary["skipped"] == 2
196 |         assert summary["status"] == "SKIPPED"
197 |         assert summary["duration_seconds"] == 0.01
198 |         assert (
199 |             summary["summary_line"]
200 |             == "============================== 2 skipped in 0.01s =============================="
201 |         )
202 | 
203 |     def test_errors_summary(self):
204 |         summary = extract_overall_summary(LOG_WITH_ERRORS)
205 |         assert summary["passed"] == 0
206 |         assert summary["failed"] == 0  # Errors are not counted as failed tests by this parser for 'failed' key
207 |         assert summary["skipped"] == 0
208 |         assert summary["errors"] == 1
209 |         assert summary["status"] == "FAILED"  # Status is FAILED due to errors
210 |         assert summary["duration_seconds"] == 0.01
211 |         assert summary["summary_line"] == "========================= 1 error in 0.01s ========================="
212 | 
213 |     def test_short_summary_fallback(self):
214 |         summary = extract_overall_summary(LOG_SHORT_SUMMARY_ONLY)
215 |         assert summary["passed"] == 1
216 |         assert summary["failed"] == 1
217 |         assert summary["skipped"] == 2
218 |         assert summary["xfailed"] == 0  # Not in this short summary example
219 |         assert summary["xpassed"] == 0  # Not in this short summary example
220 |         assert summary["errors"] == 0
221 |         assert summary["status"] == "FAILED"
222 |         assert summary["duration_seconds"] == 0.04
223 |         assert summary["summary_line"] == ""  # No main bordered summary line matched
224 | 
225 |     def test_no_summary_line(self):
226 |         summary = extract_overall_summary(LOG_NO_SUMMARY_LINE)
227 |         assert summary["passed"] == 0
228 |         assert summary["failed"] == 0
229 |         assert summary["skipped"] == 0
230 |         assert summary["status"] == "UNKNOWN"
231 |         assert summary["duration_seconds"] is None
232 |         assert summary["summary_line"] == ""
233 | 
234 | 
235 | class TestAnalyzePytestLogContent:
236 |     def test_analyze_summary_only(self) -> None:
237 |         result = analyze_pytest_log_content(LOG_WITH_MODULE_FAILURES, summary_only=True)
238 |         assert "overall_summary" in result
239 |         assert "failed_tests" not in result
240 |         assert result["overall_summary"]["failed"] == 2
241 |         assert result["overall_summary"]["passed"] == 1
242 | 
243 |     def test_analyze_full_report(self):
244 |         result = analyze_pytest_log_content(LOG_WITH_DIRECT_FAILURES, summary_only=False)
245 |         assert "overall_summary" in result
246 |         assert "failed_tests" in result
247 |         assert result["overall_summary"]["failed"] == 1
248 |         assert result["overall_summary"]["passed"] == 2
249 |         expected_failed_tests = [
250 |             {
251 |                 "module": "test_data_processing",
252 |                 "test_file": "tests/test_data_processing.py",
253 |                 "test_name": "test_process_edge_case",
254 |             }
255 |         ]
256 |         assert result["failed_tests"] == expected_failed_tests
257 | 
258 |     def test_analyze_no_failures(self) -> None:
259 |         result = analyze_pytest_log_content(LOG_NO_FAILURES, summary_only=False)
260 |         assert result["overall_summary"]["status"] == "PASSED"
261 |         assert result["overall_summary"]["passed"] == 10
262 |         assert result["failed_tests"] == []
263 | 
264 |     def test_analyze_with_errors(self) -> None:
265 |         result = analyze_pytest_log_content(LOG_WITH_ERRORS, summary_only=False)
266 |         assert result["overall_summary"]["status"] == "FAILED"
267 |         assert result["overall_summary"]["errors"] == 1
268 |         # extract_failed_tests might not pick up errors as 'failed tests' depending on format
269 |         # For this specific log, it doesn't have typical 'FAILED' markers for extract_failed_tests
270 |         assert result["failed_tests"] == []
271 | 
```

--------------------------------------------------------------------------------
/tests/log_analyzer_client/test_cli.py:
--------------------------------------------------------------------------------

```python
  1 | import json
  2 | from unittest.mock import MagicMock, patch, ANY
  3 | 
  4 | import pytest
  5 | from click.testing import CliRunner
  6 | 
  7 | from log_analyzer_client.cli import cli
  8 | 
  9 | # FilterCriteria is not a class to be imported, it's a dict returned by build_filter_criteria
 10 | # from log_analyzer_mcp.common.utils import FilterCriteria # This import will be removed
 11 | from log_analyzer_mcp.core.analysis_engine import AnalysisEngine
 12 | 
 13 | 
 14 | @pytest.fixture
 15 | def runner():
 16 |     return CliRunner()
 17 | 
 18 | 
 19 | @pytest.fixture
 20 | def mock_analysis_engine_instance():
 21 |     mock_engine = MagicMock()
 22 |     mock_engine.search_logs.return_value = {"results": [{"line": "mocked_log_line_1"}]}
 23 |     return mock_engine
 24 | 
 25 | 
 26 | @pytest.fixture
 27 | def mock_analysis_engine_class(mock_analysis_engine_instance):
 28 |     # Patching AnalysisEngine in the module where it's LOOKED UP (cli.py uses it)
 29 |     with patch("log_analyzer_client.cli.AnalysisEngine", return_value=mock_analysis_engine_instance) as mock_class:
 30 |         yield mock_class
 31 | 
 32 | 
 33 | def test_cli_invoked(runner):
 34 |     """Test that the main CLI group can be invoked."""
 35 |     result = runner.invoke(cli, ["--help"])
 36 |     assert result.exit_code == 0
 37 |     assert "Log Analyzer CLI" in result.output
 38 |     assert "Usage: cli [OPTIONS] COMMAND [ARGS]..." in result.output
 39 | 
 40 | 
 41 | def test_search_all_default_options(runner, mock_analysis_engine_class, mock_analysis_engine_instance):
 42 |     """Test the 'search all' command with default options."""
 43 |     result = runner.invoke(cli, ["search", "all"])
 44 | 
 45 |     assert result.exit_code == 0
 46 |     mock_analysis_engine_instance.search_logs.assert_called_once()
 47 | 
 48 |     # Check the dictionary passed to search_logs
 49 |     args, _ = mock_analysis_engine_instance.search_logs.call_args
 50 |     called_filter_criteria_dict = args[0]
 51 | 
 52 |     assert isinstance(called_filter_criteria_dict, dict)
 53 |     assert called_filter_criteria_dict.get("scope") == "default"
 54 |     assert called_filter_criteria_dict.get("context_before") == 2
 55 |     assert called_filter_criteria_dict.get("context_after") == 2
 56 |     assert called_filter_criteria_dict.get("log_dirs_override") is None
 57 |     assert called_filter_criteria_dict.get("log_content_patterns_override") is None
 58 |     # The build_filter_criteria function doesn't explicitly add a "search_type" key based on its implementation.
 59 |     # We should check for the keys that are actually added.
 60 | 
 61 |     # Check output
 62 |     assert f"Searching all records in scope: default, context: 2B/2A" in result.output
 63 |     assert "mocked_log_line_1" in result.output
 64 | 
 65 | 
 66 | def test_search_all_custom_options(runner, mock_analysis_engine_class, mock_analysis_engine_instance):
 67 |     """Test the 'search all' command with custom options."""
 68 |     custom_scope = "custom_scope"
 69 |     custom_before = 5
 70 |     custom_after = 5
 71 |     custom_log_dirs = "/logs/a,/logs/b"
 72 |     custom_log_patterns = "ERROR,WARN"
 73 | 
 74 |     result = runner.invoke(
 75 |         cli,
 76 |         [
 77 |             "search",
 78 |             "all",
 79 |             "--scope",
 80 |             custom_scope,
 81 |             "--before",
 82 |             str(custom_before),
 83 |             "--after",
 84 |             str(custom_after),
 85 |             "--log-dirs",
 86 |             custom_log_dirs,
 87 |             "--log-patterns",
 88 |             custom_log_patterns,
 89 |         ],
 90 |     )
 91 | 
 92 |     assert result.exit_code == 0
 93 |     mock_analysis_engine_instance.search_logs.assert_called_once()
 94 | 
 95 |     args, _ = mock_analysis_engine_instance.search_logs.call_args
 96 |     called_filter_criteria_dict = args[0]
 97 | 
 98 |     assert called_filter_criteria_dict.get("scope") == custom_scope
 99 |     assert called_filter_criteria_dict.get("context_before") == custom_before
100 |     assert called_filter_criteria_dict.get("context_after") == custom_after
101 |     assert called_filter_criteria_dict.get("log_dirs_override") == ["/logs/a", "/logs/b"]
102 |     assert called_filter_criteria_dict.get("log_content_patterns_override") == ["ERROR", "WARN"]
103 | 
104 |     assert f"Searching all records in scope: {custom_scope}, context: {custom_before}B/{custom_after}A" in result.output
105 |     assert "mocked_log_line_1" in result.output
106 | 
107 | 
108 | def test_search_all_engine_exception(runner, mock_analysis_engine_class, mock_analysis_engine_instance):
109 |     """Test 'search all' when AnalysisEngine throws an exception."""
110 |     error_message = "Engine exploded!"
111 |     mock_analysis_engine_instance.search_logs.side_effect = Exception(error_message)
112 | 
113 |     result = runner.invoke(cli, ["search", "all"])
114 | 
115 |     assert result.exit_code == 0  # CLI itself doesn't exit with error, but prints error message
116 |     assert f"Error during search: {error_message}" in result.output
117 |     mock_analysis_engine_instance.search_logs.assert_called_once()
118 | 
119 | 
120 | def test_cli_with_env_file(runner, mock_analysis_engine_class, mock_analysis_engine_instance):
121 |     """Test CLI initialization with a custom .env file."""
122 |     # Create a dummy .env file for testing
123 |     with runner.isolated_filesystem():
124 |         with open(".env.test", "w") as f:
125 |             f.write("TEST_VAR=test_value\n")
126 | 
127 |         result = runner.invoke(cli, ["--env-file", ".env.test", "search", "all"])
128 | 
129 |         assert result.exit_code == 0
130 |         assert "Using custom .env file: .env.test" in result.output
131 |         # Check that AnalysisEngine was initialized with the env_file_path and a logger
132 |         mock_analysis_engine_class.assert_called_once_with(logger_instance=ANY, env_file_path=".env.test")
133 |         mock_analysis_engine_instance.search_logs.assert_called_once()
134 | 
135 | 
136 | # --- Tests for 'search time' ---
137 | 
138 | 
139 | @pytest.mark.parametrize(
140 |     "time_args, expected_criteria_updates",
141 |     [
142 |         (["--minutes", "30"], {"minutes": 30, "hours": 0, "days": 0}),
143 |         (["--hours", "2"], {"minutes": 0, "hours": 2, "days": 0}),
144 |         (["--days", "1"], {"minutes": 0, "hours": 0, "days": 1}),
145 |         (["--days", "1", "--hours", "2"], {"minutes": 0, "hours": 2, "days": 1}),  # Engine prioritizes
146 |     ],
147 | )
148 | def test_search_time_various_units(
149 |     runner, mock_analysis_engine_class, mock_analysis_engine_instance, time_args, expected_criteria_updates
150 | ):
151 |     """Test 'search time' with different time unit specifications."""
152 |     base_command = ["search", "time"]
153 |     full_command = base_command + time_args
154 | 
155 |     result = runner.invoke(cli, full_command)
156 | 
157 |     assert result.exit_code == 0
158 |     mock_analysis_engine_instance.search_logs.assert_called_once()
159 | 
160 |     args, _ = mock_analysis_engine_instance.search_logs.call_args
161 |     called_filter_criteria_dict = args[0]
162 | 
163 |     assert called_filter_criteria_dict.get("minutes") == expected_criteria_updates["minutes"]
164 |     assert called_filter_criteria_dict.get("hours") == expected_criteria_updates["hours"]
165 |     assert called_filter_criteria_dict.get("days") == expected_criteria_updates["days"]
166 | 
167 |     # Verify default scope, context etc.
168 |     assert called_filter_criteria_dict.get("scope") == "default"
169 |     assert called_filter_criteria_dict.get("context_before") == 2
170 |     assert called_filter_criteria_dict.get("context_after") == 2
171 | 
172 |     assert "mocked_log_line_1" in result.output
173 |     if len(time_args) > 2:  # Multiple time units
174 |         assert "Warning: Multiple time units" in result.output
175 | 
176 | 
177 | def test_search_time_no_time_units(runner, mock_analysis_engine_class, mock_analysis_engine_instance):
178 |     """Test 'search time' when no time units are specified."""
179 |     result = runner.invoke(cli, ["search", "time"])
180 | 
181 |     assert result.exit_code == 0  # The command itself completes
182 |     assert "Error: Please specify at least one of --minutes, --hours, or --days greater than zero." in result.output
183 |     mock_analysis_engine_instance.search_logs.assert_not_called()
184 | 
185 | 
186 | def test_search_time_engine_exception(runner, mock_analysis_engine_class, mock_analysis_engine_instance):
187 |     """Test 'search time' when AnalysisEngine throws an exception."""
188 |     error_message = "Time engine exploded!"
189 |     mock_analysis_engine_instance.search_logs.side_effect = Exception(error_message)
190 | 
191 |     result = runner.invoke(cli, ["search", "time", "--minutes", "10"])
192 | 
193 |     assert result.exit_code == 0
194 |     assert f"Error during time-based search: {error_message}" in result.output
195 |     mock_analysis_engine_instance.search_logs.assert_called_once()
196 | 
197 | 
198 | # --- Tests for 'search first' ---
199 | 
200 | 
201 | def test_search_first_valid_count(runner, mock_analysis_engine_class, mock_analysis_engine_instance):
202 |     """Test 'search first' with a valid count."""
203 |     count = 5
204 |     result = runner.invoke(cli, ["search", "first", "--count", str(count)])
205 | 
206 |     assert result.exit_code == 0
207 |     mock_analysis_engine_instance.search_logs.assert_called_once()
208 | 
209 |     args, _ = mock_analysis_engine_instance.search_logs.call_args
210 |     called_filter_criteria_dict = args[0]
211 | 
212 |     assert called_filter_criteria_dict.get("first_n") == count
213 |     assert called_filter_criteria_dict.get("scope") == "default"
214 | 
215 |     assert f"Searching first {count} records" in result.output
216 |     assert "mocked_log_line_1" in result.output
217 | 
218 | 
219 | @pytest.mark.parametrize("invalid_count", ["0", "-1", "abc"])
220 | def test_search_first_invalid_count(runner, mock_analysis_engine_class, mock_analysis_engine_instance, invalid_count):
221 |     """Test 'search first' with invalid counts."""
222 |     result = runner.invoke(cli, ["search", "first", "--count", invalid_count])
223 | 
224 |     if invalid_count.lstrip("-").isdigit() and int(invalid_count) <= 0:  # Handle negative numbers too
225 |         assert "Error: --count must be a positive integer." in result.output
226 |     else:  # handles non-integer case like 'abc'
227 |         assert "Error: Invalid value for '--count'" in result.output  # Click's default error for type mismatch
228 | 
229 |     mock_analysis_engine_instance.search_logs.assert_not_called()
230 | 
231 | 
232 | def test_search_first_engine_exception(runner, mock_analysis_engine_class, mock_analysis_engine_instance):
233 |     """Test 'search first' when AnalysisEngine throws an exception."""
234 |     error_message = "First engine exploded!"
235 |     mock_analysis_engine_instance.search_logs.side_effect = Exception(error_message)
236 | 
237 |     result = runner.invoke(cli, ["search", "first", "--count", "3"])
238 | 
239 |     assert result.exit_code == 0
240 |     assert f"Error during search for first N records: {error_message}" in result.output
241 |     mock_analysis_engine_instance.search_logs.assert_called_once()
242 | 
243 | 
244 | # --- Tests for 'search last' ---
245 | 
246 | 
247 | def test_search_last_valid_count(runner, mock_analysis_engine_class, mock_analysis_engine_instance):
248 |     """Test 'search last' with a valid count."""
249 |     count = 7
250 |     result = runner.invoke(cli, ["search", "last", "--count", str(count)])
251 | 
252 |     assert result.exit_code == 0
253 |     mock_analysis_engine_instance.search_logs.assert_called_once()
254 | 
255 |     args, _ = mock_analysis_engine_instance.search_logs.call_args
256 |     called_filter_criteria_dict = args[0]
257 | 
258 |     assert called_filter_criteria_dict.get("last_n") == count
259 |     assert called_filter_criteria_dict.get("scope") == "default"
260 | 
261 |     assert f"Searching last {count} records" in result.output
262 |     assert "mocked_log_line_1" in result.output
263 | 
264 | 
265 | @pytest.mark.parametrize("invalid_count", ["0", "-1", "xyz"])
266 | def test_search_last_invalid_count(runner, mock_analysis_engine_class, mock_analysis_engine_instance, invalid_count):
267 |     """Test 'search last' with invalid counts."""
268 |     result = runner.invoke(cli, ["search", "last", "--count", invalid_count])
269 | 
270 |     if invalid_count.lstrip("-").isdigit() and int(invalid_count) <= 0:  # Handle negative numbers too
271 |         assert "Error: --count must be a positive integer." in result.output
272 |     else:
273 |         assert "Error: Invalid value for '--count'" in result.output  # Click's default error
274 | 
275 |     mock_analysis_engine_instance.search_logs.assert_not_called()
276 | 
277 | 
278 | def test_search_last_engine_exception(runner, mock_analysis_engine_class, mock_analysis_engine_instance):
279 |     """Test 'search last' when AnalysisEngine throws an exception."""
280 |     error_message = "Last engine exploded!"
281 |     mock_analysis_engine_instance.search_logs.side_effect = Exception(error_message)
282 | 
283 |     result = runner.invoke(cli, ["search", "last", "--count", "4"])
284 | 
285 |     assert result.exit_code == 0
286 |     assert f"Error during search for last N records: {error_message}" in result.output
287 |     mock_analysis_engine_instance.search_logs.assert_called_once()
288 | 
```

--------------------------------------------------------------------------------
/docs/refactoring/log_analyzer_refactoring_v2.md:
--------------------------------------------------------------------------------

```markdown
  1 | # Refactoring Plan for `log_analyzer_mcp` - v2
  2 | 
  3 | This document outlines the steps to refactor the `log_analyzer_mcp` repository, focusing on enhancing the log analysis capabilities and streamlining the project. This plan supersedes `log_analyzer_refactoring_v1.md`.
  4 | 
  5 | ## Phase 1: Initial Setup and Dependency Resolution (Completed in v1)
  6 | 
  7 | This phase is considered complete as per `log_analyzer_refactoring_v1.md`. All initial setup, dependency resolution, internal import fixes, and missing file issues have been addressed.
  8 | 
  9 | - [x] **Project Structure Update:**
 10 |   - [x] Acknowledge new `src/log_analyzer_client` and `tests/log_analyzer_client` directories.
 11 |   - [x] Confirm `pyproject.toml` `[tool.hatch.build.targets.wheel].packages` includes both `src/log_analyzer_mcp` and `src/log_analyzer_client` to ensure they are packaged together. Example: `packages = ["src/log_analyzer_mcp", "src/log_analyzer_client"]`.
 12 |   - [x] Ensure `[tool.hatch.version].path` points to a shared root or primary module like `src/log_analyzer_mcp/__init__.py`.
 13 | 
 14 | ## Phase 2: Core Log Analyzer Logic and Configuration
 15 | 
 16 | - [x] **Develop Core Analysis Module (in `src/log_analyzer_mcp/core` or similar):**
 17 |   - [x] Create a new module (e.g., `src/log_analyzer_mcp/core/analysis_engine.py`) to house the primary log parsing and filtering logic. This engine will be used by both the MCP server and the CLI client.
 18 |   - [x] Generalize log parsing to be highly flexible and configurable.
 19 |   - [x] Implement logic to search log file content based on filter criteria:
 20 |     - [x] **All records:** Include all matches based on defined patterns.
 21 |     - [x] **Time-based records:**
 22 |       - [x] Last N minutes.
 23 |       - [x] Last N hours.
 24 |       - [x] Last N days.
 25 |     - [x] **Positional records:**
 26 |       - [x] First N (oldest) records.
 27 |       - [x] Last N (newest) records.
 28 |   - [x] Implement log file filtering by:
 29 |     - [x] **Named logging scopes:** Allow users to define scopes in `.env` (e.g., `LOG_SCOPE_MODULE_A=logs/module_a/`, `LOG_SCOPE_SPECIFIC_FILE=logs/specific.log`) to focus search on specific directories or files.
 30 |   - [x] Implement flexible content filter match support, configurable via `.env`:
 31 |     - [x] **Log files directory/directories:** Define an array of directories to search within (e.g., `LOG_DIRECTORIES=["logs/", "another_log_dir/"]`). Default to searching all `*.log` files within the project root if not specified. **Ensure searches are always confined within the project directory.**
 32 |     - [x] **Specific search patterns per log level:** Allow an array of search patterns (strings or regex) for each log level (DEBUG, INFO, WARNING, ERROR) (e.g., `LOG_PATTERNS_ERROR=["Exception:.*", "Traceback (most recent call last):"]`).
 33 |     - [x] **Context lines:** Return N lines before and after a match (e.g., `LOG_CONTEXT_LINES_BEFORE=2`, `LOG_CONTEXT_LINES_AFTER=2`). Default to 2 lines before and 2 after if not specified.
 34 |   - [x] Ensure all configuration options read from `.env` can also be supplied via environment variables. This configuration loading should be part of the core module or a shared utility. (Handled by `ConfigLoader`)
 35 | - [x] **Refactor `log_analyzer.py` (in `src/log_analyzer_mcp`):**
 36 |   - [x] This file might become a wrapper or utility that leverages the new core analysis engine, or its relevant logic moved into the core engine. Its previous role as a direct script for test log analysis will be superseded. (pytest specific logic moved to `test_log_parser.py`)
 37 |   - [x] Identify any specific test log parsing logic from the old `log_analyzer.py` that is still relevant for `analyze_tests` MCP tool and integrate it into the core engine or a specialized part of `src/log_analyzer_mcp`. (Moved to `test_log_parser.py` and used by `analyze_tests`)
 38 | - [x] **Create `.env.template`:**
 39 |   - [x] Provide example configurations for all new features:
 40 |     - [x] Logging scopes.
 41 |     - [x] Log directories.
 42 |     - [x] Search patterns per log level.
 43 |     - [x] Context lines.
 44 |   (Created as `dotenv.template`)
 45 | - [x] **Refactor Type Hinting (Project-wide):**
 46 |   - [x] Throughout the project (both MCP and Client), especially in function signatures for MCP tools and CLI arguments, avoid using `Optional[Type]` or `Union[Type, None]`.
 47 |   - [x] Instead, provide default values for parameters to make them implicitly optional (e.g., `param: str = "default_value"` instead of `param: Optional[str] = None`). This is to ensure better compatibility with AI-driven IDEs and MCP client integrations.
 48 |   - [x] *Note: Refer to FastMCP documentation for best practices if further clarification is needed on MCP tool signature compatibility.*
 49 | - [x] **Shared Utilities (e.g., in `src/log_analyzer_mcp/common`):**
 50 |   - [x] Ensure `logger_setup.py` remains a shared utility.
 51 |   - [x] Consider if any other common logic (e.g., config loading, path resolution) should be placed in `common` for use by both `log_analyzer_mcp` and `log_analyzer_client`. (Created `ConfigLoader`, `utils.py`)
 52 | 
 53 | ## Phase 3: MCP Server and CLI Implementation
 54 | 
 55 | - [x] **Update MCP Server (`src/log_analyzer_mcp/log_analyzer_mcp_server.py`):**
 56 |   - [x] **Remove `analyze_runtime_errors` tool and related logic. The new core analysis engine should cover general log searching.** (Mark as pending until the core engine is stable and functional). (Tool removed, function moved to `analyze_runtime_errors.py`)
 57 |   - [x] **Remove `parse_coverage` tool and its associated tests.** This functionality is confirmed to be no longer needed. (Removed)
 58 |   - [x] Implement new MCP server tools that utilize the core analysis engine from `src/log_analyzer_mcp/core/`:
 59 |     - [x] `search_log_all_records`: Searches for all matching records.
 60 |       - [x] Parameters: `scope: str = "default"`, `context_before: int = 2`, `context_after: int = 2` (and other relevant global configs like patterns, directories if not scope-defined).
 61 |     - [x] `search_log_time_based`: Searches records within a time window.
 62 |       - [x] Parameters: `minutes: int = 0`, `hours: int = 0`, `days: int = 0`, `scope: str = "default"`, `context_before: int = 2`, `context_after: int = 2`. (Ensure only one time unit can be effectively non-zero).
 63 |     - [x] `search_log_first_n_records`: Searches for the first N matching records.
 64 |       - [x] Parameters: `count: int`, `scope: str = "default"`, `context_before: int = 2`, `context_after: int = 2`.
 65 |     - [x] `search_log_last_n_records`: Searches for the last N matching records.
 66 |       - [x] Parameters: `count: int`, `scope: str = "default"`, `context_before: int = 2`, `context_after: int = 2`.
 67 |   - [x] Keep the existing `analyze_tests` tool, but refactor it to use the core analysis engine or specialized test log parsing logic if retained from the old `log_analyzer.py`. (Refactored to use `test_log_parser.analyze_pytest_log_content`)
 68 |   - [x] Ensure all MCP tool parameters adhere to the non-Optional/Union type hinting rule.
 69 | - [x] **Implement CLI (`src/log_analyzer_client/cli.py`):**
 70 |   - [x] Use `click` or `argparse` for the CLI interface.
 71 |   - [x] This CLI will also utilize the core analysis engine from `src/log_analyzer_mcp/core/`.
 72 |   - [x] Create script aliases for CLI invocation (e.g., `log-analyzer`) via `pyproject.toml` `[project.scripts]`.
 73 |   - [x] Provide sub-commands that mirror the MCP server tools with feature parity:
 74 |     - [x] `log-analyzer search all [--scope SCOPE] [--before LINES] [--after LINES]`
 75 |     - [x] `log-analyzer search time [--minutes M] [--hours H] [--days D] [--scope SCOPE] [--before LINES] [--after LINES]`
 76 |     - [x] `log-analyzer search first [--count N] [--scope SCOPE] [--before LINES] [--after LINES]`
 77 |     - [x] `log-analyzer search last [--count N] [--scope SCOPE] [--before LINES] [--after LINES]`
 78 |   - [x] Allow all configuration options (log directories, patterns, etc.) to be overridden via CLI arguments if not using scopes from `.env`.
 79 |   - [x] Ensure CLI parameters also adhere to the non-Optional/Union type hinting rule where applicable for internal consistency.
 80 | 
 81 | ## Phase 4: Testing and Coverage
 82 | 
 83 | - [x] **Update/Create Tests:**
 84 |   - [x] **In `tests/log_analyzer_mcp/`:**
 85 |     - [x] Write comprehensive tests for the core analysis engine (`src/log_analyzer_mcp/core/analysis_engine.py`).
 86 |     - [x] Write tests for the new `.env` configuration loading and environment variable overrides (if handled by a shared module in `src/log_analyzer_mcp/common`). (Covered by `AnalysisEngine` tests with `ConfigLoader`)
 87 |     - [x] Write tests for the new/updated MCP server tools in `log_analyzer_mcp_server.py`. (Tests written; core functionality confirmed via direct MCP calls. `test_main_function_stdio_mode` successfully covers stdio startup via `main()`. `test_main_function_http_mode` is XFAIL. Other automated tests like `test_quick_subset` using the `server_session` fixture remain `xfail` due to fixture issues, though they currently `XPASS`.)
 88 |     - [x] **Remove tests related to `parse_coverage.py`.** (Done)
 89 |     - [x] **Adapt or remove tests for `analyze_runtime_errors.py` once the module is removed.** (Adapted, `test_analyze_runtime_errors.py` calls direct function)
 90 |     - [x] Update tests for `log_analyzer.py` if it's retained in any form, or remove them if its functionality is fully migrated. (Superseded by `test_log_parser.py` and `AnalysisEngine` tests)
 91 |   - [x] **In `tests/log_analyzer_client/`:**
 92 |     - [x] Write tests for the CLI functionality in `src/log_analyzer_client/cli.py`. (All 21 tests PASSING, achieving 100% coverage for `cli.py`)
 93 | - [ ] **Achieve and Maintain Test Coverage:**
 94 |   - [ ] Ensure overall project test coverage is >= 80%, covering both `log_analyzer_mcp` and `log_analyzer_client` modules. (Currently ~78% for `log_analyzer_mcp` and 100% for `log_analyzer_client`. `src/log_analyzer_client/cli.py` has 100% coverage. Key areas for improvement: `log_analyzer_mcp_server.py` (especially HTTP path if XFAIL resolved, and untested tools), and potentially `src/log_analyzer_mcp/test_log_parser.py`.)
 95 |   - [ ] Specifically target >= 80% coverage for the core analysis engine and the new MCP/CLI interfaces. (`AnalysisEngine` coverage is good; `src/log_analyzer_client/cli.py` is 100%. MCP server `main()` for HTTP mode (XFAIL) and other server tools need more test coverage.)
 96 | 
 97 | ## Phase 5: Documentation and Finalization
 98 | 
 99 | - [ ] **Update/Create Documentation:**
100 |   - [ ] Update `README.md` for the standalone project:
101 |     - [ ] Installation instructions (using `hatch`), noting that it installs both MCP server components and the CLI client.
102 |     - [ ] Detailed usage instructions for the MCP server tools.
103 |     - [ ] Detailed usage instructions for the CLI (`log-analyzer`), including all commands and options.
104 |     - [ ] Instructions on how to run the MCP server itself via its script entry point (e.g., `uvx log-analyzer-mcp` or `log-analyzer-mcp`), including the `--transport` option (`http` or `stdio`) and HTTP-specific options like `--host`, `--port`, and `--log-level`.
105 |     - [ ] Clear explanation of how to configure logging scopes, directories, patterns, and context lines using `.env` files and environment variables (relevant for both MCP server and CLI).
106 |     - [ ] Examples for `.env` configuration.
107 |     - [ ] How to run tests (covering both `tests/log_analyzer_mcp` and `tests/log_analyzer_client`) and check coverage.
108 |   - [ ] Update `docs/refactoring/README.md` to link to this v2 plan.
109 |   - [x] Create or update other documents in `docs/` as needed (e.g., `docs/usage.md`, `docs/configuration.md`, `docs/architecture.md` briefly explaining the client/server structure).
110 | - [x] **Linting and Formatting (Project-wide):**
111 |   - [x] Run `black .` and `isort .` across `src/log_analyzer_mcp`, `src/log_analyzer_client`, `tests/log_analyzer_mcp`, `tests/log_analyzer_client`. (Done)
112 |   - [ ] Run `pylint src tests` and address warnings/errors.
113 |   - [ ] Run `mypy src tests` and address type errors, paying close attention to the new type hinting guidelines.
114 | - [x] **Build and Distribution:**
115 |   - [x] Verify `pyproject.toml` correctly defines `[project.scripts]` for the `log-analyzer` CLI. (Verified during CLI implementation)
116 |   - [x] Test building a wheel: `hatch build`. Ensure both modules are included.
117 |   - [x] If this package is intended for PyPI, ensure all metadata is correct.
118 | - [ ] **Final Review:**
119 |   - [ ] Review all changes and ensure the repository is clean, self-contained, and adheres to the new refactoring goals.
120 |   - [ ] Ensure a consistent class hierarchy and code design is maintained, especially for shared components.
121 |   - [x] Ensure all `.cursorrules` instructions are being followed.
122 |   - [x] *Note on FastMCP: Consult the FastMCP documentation for any specific guidance on MCP server implementation details, especially regarding tool definitions and type handling, to ensure optimal compatibility. This can be fetched via the `mcp_FastMCP_Docs_fetch_fastmcp_documentation` tool if needed.* (Fetched)
123 | 
124 | ## Deferred Tasks
125 | 
126 | - [x] **Remove `src/log_analyzer_mcp/analyze_runtime_errors.py` and its tests:** This will be done after the core analysis engine is complete and it's confirmed that no code from `analyze_runtime_errors.py` needs to be salvaged or migrated. (Function moved, module kept for now, tests adapted)
127 | 
128 | ## Notes
129 | 
130 | - The primary goal is to create a highly flexible and configurable log analysis tool with a clear separation between the core logic (in `log_analyzer_mcp`), the MCP service interface (`log_analyzer_mcp`), and a command-line client (`log_analyzer_client`).
131 | - Adherence to the specified type hinting style (no `Optional`/`Union` in favor of default values) is critical for broad compatibility.
132 | 
```

--------------------------------------------------------------------------------
/scripts/release.sh:
--------------------------------------------------------------------------------

```bash
  1 | #!/bin/bash
  2 | # Wrapper script to automate the release process for log_analyzer_mcp
  3 | 
  4 | # Exit immediately if a command exits with a non-zero status.
  5 | set -e
  6 | 
  7 | # --- Configuration ---
  8 | SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
  9 | PROJECT_ROOT="$(cd "${SCRIPT_DIR}/.." && pwd)"
 10 | PACKAGE_NAME="log-analyzer-mcp"
 11 | PYPI_URL="https://pypi.org/pypi"
 12 | TESTPYPI_URL="https://test.pypi.org/pypi"
 13 | PYPROJECT_PATH="${PROJECT_ROOT}/pyproject.toml"
 14 | 
 15 | # --- Dependency Check ---
 16 | if ! command -v curl &> /dev/null; then
 17 |     echo "❌ ERROR: curl is required but not found. Please install curl." >&2
 18 |     exit 1
 19 | fi
 20 | if ! command -v jq &> /dev/null; then
 21 |     echo "❌ ERROR: jq is required but not found. Please install jq (e.g., brew install jq)." >&2
 22 |     exit 1
 23 | fi
 24 | if ! command -v sed &> /dev/null; then
 25 |     echo "❌ ERROR: sed is required but not found." >&2
 26 |     exit 1
 27 | fi
 28 | if ! command -v grep &> /dev/null; then
 29 |     echo "❌ ERROR: grep is required but not found." >&2
 30 |     exit 1
 31 | fi
 32 | 
 33 | # --- Argument Parsing ---
 34 | VERSION=""
 35 | SKIP_TESTPYPI=false
 36 | TEST_ONLY=false
 37 | YES_FLAG=false
 38 | UPDATE_TARGET="prod" # Default
 39 | 
 40 | # Flags to track if options were set via command line
 41 | VERSION_SET=false
 42 | SKIP_TESTPYPI_SET=false
 43 | TEST_ONLY_SET=false
 44 | UPDATE_TARGET_SET=false
 45 | 
 46 | usage() {
 47 |     echo "Usage: $0 [-h] [-y] [--skip-testpypi] [--test-only] [--update-target <prod|test>] [--version VERSION]"
 48 |     echo ""
 49 |     echo "Automates the release process: TestPyPI build & test -> Prod PyPI build & publish -> Update Cursor Config."
 50 |     echo ""
 51 |     echo "Options:"
 52 |     echo "  -h, --help           Show this help message and exit."
 53 |     echo "  -y, --yes            Automatically answer yes to confirmation prompts (non-interactive)."
 54 |     echo "  --skip-testpypi      Skip the TestPyPI build and local install test steps."
 55 |     echo "  --test-only          Only perform the TestPyPI build and local install test, then exit."
 56 |     echo "  --update-target <prod|test>  Install version from prod (PyPI) or test (TestPyPI) for Cursor use (default: prod)."
 57 |     echo "  --version VERSION    Specify the version number to release directly."
 58 |     echo ""
 59 |     echo "If --version is not provided in interactive mode, you will be prompted."
 60 |     exit 0
 61 | }
 62 | 
 63 | # --- Helper Functions ---
 64 | check_if_version_exists() {
 65 |     local pkg_name="$1"
 66 |     local version_to_check="$2"
 67 |     local index_url="$3"
 68 |     local index_name="$4"
 69 | 
 70 |     echo "  Checking if version $version_to_check exists on $index_name..."
 71 |     # Use curl to get package info, jq to check if version exists in releases
 72 |     if curl -s "$index_url/$pkg_name/$version_to_check/json" | jq -e '(.info.version == "'"$version_to_check"'")' > /dev/null; then
 73 |         echo "  ❌ ERROR: Version $version_to_check already exists on $index_name!" >&2
 74 |         return 1 # Indicate failure (version exists)
 75 |     else
 76 |         # Check if the overall package fetch failed (e.g., 404), indicating version likely doesn't exist
 77 |         if ! curl -s -f "$index_url/$pkg_name/json" > /dev/null; then
 78 |              echo "  Package $pkg_name not found on $index_name (or network error). Assuming version $version_to_check does not exist."
 79 |              return 0 # Indicate success (version likely doesn't exist)
 80 |         fi
 81 |         echo "  Version $version_to_check does not appear to exist on $index_name. Proceeding."
 82 |         return 0 # Indicate success (version does not exist)
 83 |     fi
 84 | }
 85 | 
 86 | get_current_version() {
 87 |     if [ ! -f "$PYPROJECT_PATH" ]; then
 88 |         echo "❌ ERROR: pyproject.toml not found at $PYPROJECT_PATH" >&2
 89 |         return 1
 90 |     fi
 91 |     # Extract version using grep -E and the user's corrected sed -E command
 92 |     current_ver=$(grep -E '^version\s*=\s*\".+\"' "$PYPROJECT_PATH" | sed -E 's/^version[\ ]*=[\ ]*\"([0-9.]+)\"$/\1/')
 93 |     if [ -z "$current_ver" ]; then
 94 |         echo "❌ ERROR: Could not extract current version from $PYPROJECT_PATH using grep/sed." >&2
 95 |         return 1
 96 |     fi
 97 |     echo "$current_ver"
 98 |     return 0
 99 | }
100 | 
101 | suggest_next_patch_version() {
102 |     local current_version="$1"
103 |     # Basic suggestion: increments the last number after the last dot
104 |     local prefix=$(echo "$current_version" | sed -E 's/\.[0-9]+$//')
105 |     local patch=$(echo "$current_version" | sed -E 's/^.*\.(.*)/\1/')
106 |     # Check if patch is purely numeric
107 |     if [[ "$patch" =~ ^[0-9]+$ ]]; then
108 |         local next_patch=$((patch + 1))
109 |         echo "${prefix}.${next_patch}"
110 |     else
111 |         # Cannot auto-increment non-numeric patch (e.g., pre-releases)
112 |         echo "$current_version" # Suggest current as fallback
113 |     fi
114 | }
115 | 
116 | is_version_greater() {
117 |     local ver1="$1" # New version
118 |     local ver2="$2" # Old version
119 |     # Returns 0 if ver1 > ver2, 1 otherwise
120 |     if [ "$ver1" == "$ver2" ]; then
121 |         return 1 # Not greater if equal
122 |     fi
123 |     # Use sort -V: If the sorted list's last element is ver1, then ver1 > ver2
124 |     if [ "$(printf '%s\n' "$ver1" "$ver2" | sort -V | tail -n 1)" == "$ver1" ]; then
125 |         return 0 # ver1 is greater
126 |     else
127 |         return 1 # ver1 is not greater
128 |     fi
129 | }
130 | 
131 | validate_version_format() {
132 |     local ver="$1"
133 |     # Basic X.Y.Z format check, allows for suffixes like -alpha, .rc1 etc.
134 |     if ! [[ "$ver" =~ ^[0-9]+\.[0-9]+\.[0-9]+([a-zA-Z0-9.-]*)?$ ]]; then
135 |         echo "  ⚠️ Warning: Version format '$ver' seems unusual. Expected X.Y.Z or similar."
136 |         # Allow proceeding but warn
137 |     fi
138 |     return 0
139 | }
140 | 
141 | # --- Main Script Logic ---
142 | 
143 | # Change to the project root directory to ensure paths are correct
144 | cd "$PROJECT_ROOT"
145 | echo "ℹ️ Changed working directory to project root: $PROJECT_ROOT"
146 | 
147 | # CHANGELOG.md Reminder
148 | echo "🔔 REMINDER: Before proceeding with the release, ensure you have:"
149 | echo "   1. Verified all tests are passing with adequate coverage"
150 | echo "   2. Updated CHANGELOG.md with all notable changes in this version"
151 | echo "   3. Update of version number in pyproject.toml will be done automatically"
152 | echo ""
153 | 
154 | # Argument parsing loop
155 | while [[ $# -gt 0 ]]; do
156 |     key="$1"
157 |     case $key in
158 |         -h|--help)
159 |             usage
160 |             ;;
161 |         -y|--yes)
162 |             YES_FLAG=true
163 |             shift # past argument
164 |             ;;
165 |         --skip-testpypi)
166 |             SKIP_TESTPYPI=true
167 |             SKIP_TESTPYPI_SET=true # Track that it was set
168 |             shift # past argument
169 |             ;;
170 |         --test-only)
171 |             TEST_ONLY=true
172 |             TEST_ONLY_SET=true # Track that it was set
173 |             shift # past argument
174 |             ;;
175 |         --update-target)
176 |             if [[ "$2" == "prod" || "$2" == "test" ]]; then
177 |                 UPDATE_TARGET="$2"
178 |                 UPDATE_TARGET_SET=true # Track that it was set
179 |                 shift # past argument
180 |                 shift # past value
181 |             else
182 |                 echo "❌ ERROR: --update-target must be 'prod' or 'test'" >&2
183 |                 usage
184 |             fi
185 |             ;;
186 |         --version)
187 |             if [ -n "$2" ]; then
188 |                 VERSION="$2"
189 |                 VERSION_SET=true # Track that it was set
190 |                 shift # past argument
191 |                 shift # past value
192 |             else
193 |                 echo "❌ ERROR: --version requires an argument." >&2
194 |                 usage
195 |             fi
196 |             ;;
197 |         *)    # unknown option
198 |             echo "❌ ERROR: Unknown option: $1" >&2
199 |             usage
200 |             ;;
201 |     esac
202 | done
203 | 
204 | # --- Interactive Prompts (if not -y and flags not set) ---
205 | if [ "$YES_FLAG" = false ]; then
206 |     echo "🔧 Entering interactive configuration mode (options not set via flags)..."
207 |     
208 |     # Prompt for Update Target
209 |     if [ "$UPDATE_TARGET_SET" = false ]; then
210 |         read -p "  Update target for local UVX installation? (prod/test) [prod]: " target_choice
211 |         UPDATE_TARGET=${target_choice:-$UPDATE_TARGET} # Default to prod if empty
212 |         if [[ "$UPDATE_TARGET" != "prod" && "$UPDATE_TARGET" != "test" ]]; then
213 |             echo "  ❌ ERROR: Invalid target. Please enter 'prod' or 'test'." >&2
214 |             exit 1
215 |         fi
216 |         echo "  Using update target: $UPDATE_TARGET"
217 |     fi
218 | 
219 |     # Prompt for Test-Only (only if target is test)
220 |     if [ "$TEST_ONLY_SET" = false ] && [ "$UPDATE_TARGET" = "test" ]; then
221 |         read -p "  Run TestPyPI phase ONLY (--test-only)? (y/n) [n]: " test_only_choice
222 |         if [[ ${test_only_choice:-\"n\"} =~ ^[Yy]$ ]]; then
223 |             TEST_ONLY=true
224 |         else
225 |             TEST_ONLY=false
226 |         fi
227 |         echo "  Run TestPyPI only: $TEST_ONLY"
228 |     fi
229 | 
230 |     # Prompt for Skip-TestPyPI (only if target is prod)
231 |     if [ "$SKIP_TESTPYPI_SET" = false ] && [ "$UPDATE_TARGET" = "prod" ]; then
232 |         read -p "  Skip TestPyPI build/test phase (--skip-testpypi)? (y/n) [n]: " skip_testpypi_choice
233 |         if [[ ${skip_testpypi_choice:-\"n\"} =~ ^[Yy]$ ]]; then
234 |             SKIP_TESTPYPI=true
235 |         else
236 |             SKIP_TESTPYPI=false
237 |         fi
238 |         echo "  Skip TestPyPI phase: $SKIP_TESTPYPI"
239 |     fi
240 | 
241 |     # Prompt for Version (if not set via flag)
242 |     if [ "$VERSION_SET" = false ]; then
243 |         CURRENT_VERSION=$(get_current_version)
244 |         if [ $? -ne 0 ]; then exit 1; fi
245 |         SUGGESTED_VERSION=$(suggest_next_patch_version "$CURRENT_VERSION")
246 |         echo "  Current version in pyproject.toml: $CURRENT_VERSION"
247 | 
248 |         while true; do
249 |             read -p "  Enter the new version number (suggested: $SUGGESTED_VERSION): " entered_version
250 |             if [ -z "$entered_version" ]; then
251 |                 entered_version="$SUGGESTED_VERSION"
252 |                 echo "  Using suggested version: $entered_version"
253 |             fi
254 |             validate_version_format "$entered_version"
255 |             if is_version_greater "$entered_version" "$CURRENT_VERSION"; then
256 |                  read -p "  Confirm release version $entered_version? (y/n) " -n 1 -r
257 |                  echo
258 |                  if [[ $REPLY =~ ^[Yy]$ ]]; then
259 |                      VERSION="$entered_version"
260 |                      break
261 |                  else
262 |                      echo "  Version not confirmed. Please try again."
263 |                  fi
264 |             else
265 |                 echo "  ❌ ERROR: New version '$entered_version' must be greater than current version '$CURRENT_VERSION'." >&2
266 |             fi
267 |         done
268 |     fi
269 | fi
270 | 
271 | # --- Version Validation (if provided via flag) ---
272 | if [ "$VERSION_SET" = true ]; then
273 |     validate_version_format "$VERSION"
274 |     # Add check against current version if needed, though less critical if flag is used explicitly
275 |     # CURRENT_VERSION=$(get_current_version)
276 |     # if ! is_version_greater "$VERSION" "$CURRENT_VERSION"; then 
277 |     #    echo "❌ ERROR: Specified version $VERSION is not greater than current version $CURRENT_VERSION." >&2
278 |     #    exit 1
279 |     # fi
280 | fi
281 | 
282 | # Final check: Ensure VERSION is determined
283 | if [ -z "$VERSION" ]; then
284 |     echo "❌ ERROR: Release version could not be determined." >&2
285 |     exit 1
286 | fi
287 | 
288 | echo ""
289 | echo "🚀 Starting Release Process for Version: $VERSION"
290 | echo "--------------------------------------------------"
291 | echo "Configuration:"
292 | echo "  Skip TestPyPI Build/Test: $SKIP_TESTPYPI"
293 | echo "  TestPyPI Only:            $TEST_ONLY"
294 | echo "  Update Target for Cursor: $UPDATE_TARGET"
295 | echo "  Non-interactive:          $YES_FLAG"
296 | echo "--------------------------------------------------"
297 | 
298 | # Confirmation prompt
299 | if [ "$YES_FLAG" = false ]; then
300 |     read -p "Proceed with this release plan? (y/n) " -n 1 -r
301 |     echo
302 |     if [[ ! $REPLY =~ ^[Yy]$ ]]; then
303 |         echo "Release cancelled by user."
304 |         exit 1
305 |     fi
306 | fi
307 | 
308 | # --- TestPyPI Phase ---
309 | if [ "$SKIP_TESTPYPI" = false ]; then
310 |     echo ""
311 |     echo "📦 Phase 1: Building and Publishing to TestPyPI..."
312 |     
313 |     # Check if version already exists on TestPyPI BEFORE publishing
314 |     check_if_version_exists "$PACKAGE_NAME" "$VERSION" "$TESTPYPI_URL" "TestPyPI"
315 |     if [ $? -ne 0 ]; then exit 1; fi # Exit if version exists
316 | 
317 |     # Call publish script (which updates pyproject.toml)
318 |     echo "  Calling publish.sh to build and publish to TestPyPI..."
319 |     "${SCRIPT_DIR}/publish.sh" -t -y -v "$VERSION"
320 |     if [ $? -ne 0 ]; then echo "❌ ERROR: Failed to publish to TestPyPI."; exit 1; fi
321 |     echo "✅ Successfully published to TestPyPI."
322 | 
323 |     echo ""
324 |     echo "🔧 Phase 2: Testing Local Installation..."
325 |     "${SCRIPT_DIR}/test_uvx_install.sh"
326 |     if [ $? -ne 0 ]; then echo "❌ ERROR: Local installation test failed."; exit 1; fi
327 |     echo "✅ Local installation test successful."
328 | 
329 |     if [ "$TEST_ONLY" = true ]; then
330 |         echo ""
331 |         echo "✅ TestPyPI phase complete (--test-only specified). Exiting."
332 |         exit 0
333 |     fi
334 | else
335 |     echo "⏩ Skipping TestPyPI build and test phases."
336 | fi
337 | 
338 | # --- Production Phase ---
339 | echo ""
340 | echo "📦 Phase 3: Building and Publishing to Production PyPI..."
341 | 
342 | # Check if version already exists on Production PyPI BEFORE publishing
343 | check_if_version_exists "$PACKAGE_NAME" "$VERSION" "$PYPI_URL" "Production PyPI"
344 | if [ $? -ne 0 ]; then exit 1; fi # Exit if version exists
345 | 
346 | # Extra confirmation for production unless -y is set
347 | if [ "$YES_FLAG" = false ]; then
348 |     read -p "🚨 REALLY publish version $VERSION to Production PyPI? (y/n) " -n 1 -r
349 |     echo
350 |     if [[ ! $REPLY =~ ^[Yy]$ ]]; then
351 |         echo "Production release cancelled by user."
352 |         exit 1
353 |     fi
354 | fi
355 | 
356 | # Call publish script (which updates pyproject.toml again if needed, harmless)
357 | "${SCRIPT_DIR}/publish.sh" -p -y -v "$VERSION"
358 | if [ $? -ne 0 ]; then echo "❌ ERROR: Failed to publish to Production PyPI."; exit 1; fi
359 | echo "✅ Successfully published to Production PyPI."
360 | 
361 | # --- Update Phase ---
362 | echo ""
363 | echo "🔧 Phase 4: Installing Release Version for Local UVX (Target: $UPDATE_TARGET)..."
364 | 
365 | INSTALL_ARGS=""
366 | STRATEGY_ARG=""
367 | PACKAGE_SPEC="$PACKAGE_NAME@$VERSION"
368 | 
369 | if [ "$UPDATE_TARGET" == "test" ]; then
370 |     echo "  Installing version $VERSION from TestPyPI for local uvx command..."
371 |     INSTALL_ARGS="--default-index $TESTPYPI_URL --index $PYPI_URL"
372 |     STRATEGY_ARG="--index-strategy unsafe-best-match"
373 | else
374 |     echo "  Installing version $VERSION from PyPI for local uvx command..."
375 |     # INSTALL_ARGS="--refresh --default-index $PYPI_URL" # Explicitly use PyPI
376 |     INSTALL_ARGS="--refresh" # Avoid using default-index for pypi as this does not work
377 | fi
378 | 
379 | install_command="uvx ${INSTALL_ARGS} ${STRATEGY_ARG} ${PACKAGE_SPEC}"
380 | echo "  Running: $install_command"
381 | eval $install_command # Use eval to handle args correctly
382 | 
383 | if [ $? -ne 0 ]; then echo "❌ ERROR: Failed to install $UPDATE_TARGET version $VERSION locally via uvx."; exit 1; fi
384 | echo "  ✅ $UPDATE_TARGET version $VERSION installed for local uvx command."
385 | 
386 | echo "  Refreshing local UVX cache (may not be necessary with direct install)..."
387 | if command -v uvx &> /dev/null; then
388 |     # Refresh might still help ensure internal links are updated
389 |     uvx --refresh $PACKAGE_NAME --version 
390 |     echo "  ✅ UVX cache refreshed (or attempted)."
391 | else
392 |      echo "  ⚠️ UVX command not found, skipping cache refresh."
393 | fi
394 | 
395 | echo ""
396 | echo "🎉 Release process for $VERSION completed successfully!"
397 | echo "ℹ️ Remember to commit and push the updated pyproject.toml and potentially tag the release in Git."
398 | echo "ℹ️ Restart Cursor or the MCP Server if needed to pick up the new version."
399 | 
400 | exit 0 
```

--------------------------------------------------------------------------------
/docs/api_reference.md:
--------------------------------------------------------------------------------

```markdown
  1 | # Log Analyzer MCP API Reference
  2 | 
  3 | This document provides a detailed reference for the tools and endpoints exposed by the Log Analyzer MCP Server and the commands available through its CLI client.
  4 | 
  5 | ## Table of Contents
  6 | 
  7 | - [MCP Server Tools](#mcp-server-tools)
  8 |   - [Test Analysis and Execution](#test-analysis-and-execution)
  9 |   - [Log Searching](#log-searching)
 10 |   - [Server Utilities](#server-utilities)
 11 | - [CLI Client (`log-analyzer`)](#cli-client-log-analyzer)
 12 |   - [Global Options](#global-options)
 13 |   - [Search Commands (`log-analyzer search`)](#search-commands-log-analyzer-search)
 14 |     - [Common Search Options](#common-search-options)
 15 |     - [`log-analyzer search all`](#log-analyzer-search-all)
 16 |     - [`log-analyzer search time`](#log-analyzer-search-time)
 17 |     - [`log-analyzer search first`](#log-analyzer-search-first)
 18 |     - [`log-analyzer search last`](#log-analyzer-search-last)
 19 | - [Error Handling](#error-handling)
 20 | 
 21 | ---
 22 | 
 23 | ## MCP Server Tools
 24 | 
 25 | The Log Analyzer MCP Server provides tools for test analysis, log searching, and server introspection.
 26 | 
 27 | ### Test Analysis and Execution
 28 | 
 29 | Tools related to running tests, analyzing results, and managing coverage reports.
 30 | 
 31 | #### `analyze_tests`
 32 | 
 33 | Analyzes the most recent test run and provides detailed information about failures.
 34 | 
 35 | **Parameters:**
 36 | 
 37 | | Name           | Type    | Required | Default | Description                                        |
 38 | |----------------|---------|----------|---------|----------------------------------------------------|
 39 | | `summary_only` | boolean | No       | `False` | Whether to return only a summary of the test results |
 40 | 
 41 | **Returns:**
 42 | 
 43 | A JSON object containing the test analysis, including:
 44 | 
 45 | - `summary`: Overall summary (status, passed, failed, skipped).
 46 | - `error_details`: (If not `summary_only`) List of detailed error information.
 47 | - `log_file`: Path to the analyzed log file.
 48 | - `log_timestamp`: Timestamp of the log file.
 49 | - `log_age_minutes`: Age of the log file in minutes.
 50 | - `error`: (If an error occurred during analysis) Error message.
 51 | 
 52 | **Example Call:**
 53 | 
 54 | ```json
 55 | {
 56 |   "tool_name": "analyze_tests",
 57 |   "arguments": {
 58 |     "summary_only": true
 59 |   }
 60 | }
 61 | ```
 62 | 
 63 | #### `run_tests_no_verbosity`
 64 | 
 65 | Runs all tests with minimal output (verbosity level 0). Excludes server integration tests to prevent recursion.
 66 | 
 67 | **Parameters:** None
 68 | 
 69 | **Returns:**
 70 | 
 71 | A JSON object with:
 72 | 
 73 | - `success`: Boolean indicating if the test execution command was successful.
 74 | - `return_code`: Exit code from the test runner.
 75 | - `test_output`: Combined stdout and stderr from the test run.
 76 | - `analysis_log_path`: Path to the log file where test output was saved.
 77 | - `error`: (If an error occurred) Error message.
 78 | 
 79 | **Example Call:**
 80 | 
 81 | ```json
 82 | {
 83 |   "tool_name": "run_tests_no_verbosity",
 84 |   "arguments": {}
 85 | }
 86 | ```
 87 | 
 88 | #### `run_tests_verbose`
 89 | 
 90 | Runs all tests with verbose output (verbosity level 1). Excludes server integration tests.
 91 | 
 92 | **Parameters:** None
 93 | 
 94 | **Returns:** (Same structure as `run_tests_no_verbosity`)
 95 | 
 96 | **Example Call:**
 97 | 
 98 | ```json
 99 | {
100 |   "tool_name": "run_tests_verbose",
101 |   "arguments": {}
102 | }
103 | ```
104 | 
105 | #### `run_tests_very_verbose`
106 | 
107 | Runs all tests with very verbose output (verbosity level 2) and enables coverage. Excludes server integration tests.
108 | 
109 | **Parameters:** None
110 | 
111 | **Returns:** (Same structure as `run_tests_no_verbosity`, coverage data is generated)
112 | 
113 | **Example Call:**
114 | 
115 | ```json
116 | {
117 |   "tool_name": "run_tests_very_verbose",
118 |   "arguments": {}
119 | }
120 | ```
121 | 
122 | #### `run_unit_test`
123 | 
124 | Runs tests for a specific agent only.
125 | 
126 | **Parameters:**
127 | 
128 | | Name        | Type    | Required | Default | Description                                                    |
129 | |-------------|---------|----------|---------|----------------------------------------------------------------|
130 | | `agent`     | string  | Yes      |         | The agent to run tests for (e.g., 'qa_agent', 'backlog_agent') |
131 | | `verbosity` | integer | No       | `1`     | Verbosity level (0=minimal, 1=normal, 2=detailed)              |
132 | 
133 | **Returns:** (Same structure as `run_tests_no_verbosity`)
134 | 
135 | **Example Call:**
136 | 
137 | ```json
138 | {
139 |   "tool_name": "run_unit_test",
140 |   "arguments": {
141 |     "agent": "my_agent",
142 |     "verbosity": 0
143 |   }
144 | }
145 | ```
146 | 
147 | #### `create_coverage_report`
148 | 
149 | Runs tests with coverage and generates HTML and XML reports using `hatch`.
150 | 
151 | **Parameters:**
152 | 
153 | | Name            | Type    | Required | Default | Description                                                           |
154 | |-----------------|---------|----------|---------|-----------------------------------------------------------------------|
155 | | `force_rebuild` | boolean | No       | `False` | Whether to force rebuilding the report even if it already exists      |
156 | 
157 | **Returns:**
158 | 
159 | A JSON object with:
160 | 
161 | - `success`: Boolean indicating overall success of report generation steps.
162 | - `message`: Summary message.
163 | - `overall_coverage_percent`: Parsed overall coverage percentage.
164 | - `coverage_xml_path`: Path to the generated XML coverage report.
165 | - `coverage_html_dir`: Path to the directory of the HTML coverage report.
166 | - `coverage_html_index`: Path to the main `index.html` of the HTML report.
167 | - `text_summary_output`: Text summary from the coverage tool.
168 | - `hatch_xml_output`: Output from the hatch XML generation command.
169 | - `hatch_html_output`: Output from the hatch HTML generation command.
170 | - `timestamp`: Timestamp of the report generation.
171 | 
172 | **Example Call:**
173 | 
174 | ```json
175 | {
176 |   "tool_name": "create_coverage_report",
177 |   "arguments": {
178 |     "force_rebuild": true
179 |   }
180 | }
181 | ```
182 | 
183 | ### Log Searching
184 | 
185 | Tools for searching and filtering log files managed by the `AnalysisEngine`.
186 | 
187 | #### Common Parameters for Search Tools
188 | 
189 | These parameters are available for `search_log_all_records`, `search_log_time_based`, `search_log_first_n_records`, and `search_log_last_n_records`.
190 | 
191 | | Name                            | Type    | Required | Default   | Description                                                                                                |
192 | |---------------------------------|---------|----------|-----------|------------------------------------------------------------------------------------------------------------|
193 | | `scope`                         | string  | No       | "default" | Logging scope to search within (from `.env` scopes or default).                                            |
194 | | `context_before`                | integer | No       | `2`       | Number of lines before a match.                                                                            |
195 | | `context_after`                 | integer | No       | `2`       | Number of lines after a match.                                                                             |
196 | | `log_dirs_override`             | string  | No       | `""`      | Comma-separated list of log directories, files, or glob patterns (overrides `.env` for file locations).      |
197 | | `log_content_patterns_override` | string  | No       | `""`      | Comma-separated list of REGEX patterns for log messages (overrides `.env` content filters).                  |
198 | 
199 | #### `search_log_all_records`
200 | 
201 | Searches for all log records, optionally filtering by scope and content patterns, with context.
202 | 
203 | **Parameters:** (Includes Common Search Parameters)
204 | 
205 | **Returns:**
206 | 
207 | A list of JSON objects, where each object represents a found log entry and includes:
208 | 
209 | - `timestamp`: Parsed timestamp of the log entry.
210 | - `raw_line`: The original log line.
211 | - `file_path`: Path to the log file containing the entry.
212 | - `line_number`: Line number in the file.
213 | - `context_before_lines`: List of lines before the matched line.
214 | - `context_after_lines`: List of lines after the matched line.
215 | - (Other fields from `LogEntry` model)
216 | 
217 | **Example Call:**
218 | 
219 | ```json
220 | {
221 |   "tool_name": "search_log_all_records",
222 |   "arguments": {
223 |     "scope": "my_app_scope",
224 |     "log_content_patterns_override": "ERROR.*database"
225 |   }
226 | }
227 | ```
228 | 
229 | #### `search_log_time_based`
230 | 
231 | Searches logs within a time window, optionally filtering, with context.
232 | 
233 | **Parameters:** (Includes Common Search Parameters plus)
234 | 
235 | | Name      | Type    | Required | Default | Description                                |
236 | |-----------|---------|----------|---------|--------------------------------------------|
237 | | `minutes` | integer | No       | `0`     | Search logs from the last N minutes.       |
238 | | `hours`   | integer | No       | `0`     | Search logs from the last N hours.         |
239 | | `days`    | integer | No       | `0`     | Search logs from the last N days.          |
240 | 
241 | **Returns:** (List of JSON objects, same structure as `search_log_all_records`)
242 | 
243 | **Example Call:**
244 | 
245 | ```json
246 | {
247 |   "tool_name": "search_log_time_based",
248 |   "arguments": {
249 |     "hours": 2,
250 |     "scope": "server_logs",
251 |     "context_after": 5
252 |   }
253 | }
254 | ```
255 | 
256 | #### `search_log_first_n_records`
257 | 
258 | Searches for the first N (oldest) records, optionally filtering, with context.
259 | 
260 | **Parameters:** (Includes Common Search Parameters plus)
261 | 
262 | | Name    | Type    | Required | Default | Description                                                   |
263 | |---------|---------|----------|---------|---------------------------------------------------------------|
264 | | `count` | integer | Yes      |         | Number of first (oldest) matching records to return (must be > 0). |
265 | 
266 | **Returns:** (List of JSON objects, same structure as `search_log_all_records`)
267 | 
268 | **Example Call:**
269 | 
270 | ```json
271 | {
272 |   "tool_name": "search_log_first_n_records",
273 |   "arguments": {
274 |     "count": 10,
275 |     "log_dirs_override": "/var/log/app_archive/*.log"
276 |   }
277 | }
278 | ```
279 | 
280 | #### `search_log_last_n_records`
281 | 
282 | Search for the last N (newest) records, optionally filtering, with context.
283 | 
284 | **Parameters:** (Includes Common Search Parameters plus)
285 | 
286 | | Name    | Type    | Required | Default | Description                                                  |
287 | |---------|---------|----------|---------|--------------------------------------------------------------|
288 | | `count` | integer | Yes      |         | Number of last (newest) matching records to return (must be > 0). |
289 | 
290 | **Returns:** (List of JSON objects, same structure as `search_log_all_records`)
291 | 
292 | **Example Call:**
293 | 
294 | ```json
295 | {
296 |   "tool_name": "search_log_last_n_records",
297 |   "arguments": {
298 |     "count": 50,
299 |     "scope": "realtime_feed"
300 |   }
301 | }
302 | ```
303 | 
304 | ### Server Utilities
305 | 
306 | General utility tools for the MCP server.
307 | 
308 | #### `ping`
309 | 
310 | Checks if the MCP server is alive and returns status information.
311 | 
312 | **Parameters:** None
313 | 
314 | **Returns:**
315 | 
316 | A string with status, timestamp, and a message indicating the server is running.
317 | 
318 | **Example Call:**
319 | 
320 | ```json
321 | {
322 |   "tool_name": "ping",
323 |   "arguments": {}
324 | }
325 | ```
326 | 
327 | #### `get_server_env_details`
328 | 
329 | Returns `sys.path` and `sys.executable` and other environment details from the running MCP server.
330 | 
331 | **Parameters:** None
332 | 
333 | **Returns:**
334 | 
335 | A JSON object with:
336 | 
337 | - `sys_executable`: Path to the Python interpreter running the server.
338 | - `sys_path`: List of paths in `sys.path`.
339 | - `cwd`: Current working directory of the server.
340 | - `environ_pythonpath`: Value of the `PYTHONPATH` environment variable, if set.
341 | 
342 | **Example Call:**
343 | 
344 | ```json
345 | {
346 |   "tool_name": "get_server_env_details",
347 |   "arguments": {}
348 | }
349 | ```
350 | 
351 | #### `request_server_shutdown`
352 | 
353 | Requests the MCP server to shut down gracefully.
354 | 
355 | **Parameters:** None
356 | 
357 | **Returns:**
358 | 
359 | A string confirming that the shutdown has been initiated.
360 | 
361 | **Example Call:**
362 | 
363 | ```json
364 | {
365 |   "tool_name": "request_server_shutdown",
366 |   "arguments": {}
367 | }
368 | ```
369 | 
370 | ---
371 | 
372 | ## CLI Client (`log-analyzer`)
373 | 
374 | The `log-analyzer` command-line interface provides access to log searching functionalities.
375 | 
376 | ### Global Options
377 | 
378 | These options apply to the main `log-analyzer` command and are available before specifying a sub-command.
379 | 
380 | | Option              | Argument Type | Description                                   |
381 | |---------------------|---------------|-----------------------------------------------|
382 | | `-h`, `--help`      |               | Show help message and exit.                   |
383 | | `--env-file`        | PATH          | Path to a custom `.env` file for configuration. |
384 | 
385 | ### Search Commands (`log-analyzer search`)
386 | 
387 | Base command: `log-analyzer search [OPTIONS] COMMAND [ARGS]...`
388 | 
389 | #### Common Search Options
390 | 
391 | These options can be used with `all`, `time`, `first`, and `last` search commands.
392 | 
393 | | Option                             | Alias    | Type    | Default   | Description                                                                                                |
394 | |------------------------------------|----------|---------|-----------|------------------------------------------------------------------------------------------------------------|
395 | | `--scope`                          |          | STRING  | "default" | Logging scope to search within (from .env or default).                                                     |
396 | | `--before`                         |          | INTEGER | `2`       | Number of context lines before a match.                                                                    |
397 | | `--after`                          |          | INTEGER | `2`       | Number of context lines after a match.                                                                     |
398 | | `--log-dirs`                       |          | STRING  | `None`    | Comma-separated list of log directories, files, or glob patterns to search (overrides .env for file locations).|
399 | | `--log-patterns`                   |          | STRING  | `None`    | Comma-separated list of REGEX patterns to filter log messages (overrides .env content filters).                |
400 | 
401 | #### `log-analyzer search all`
402 | 
403 | Searches for all log records matching configured patterns.
404 | Usage: `log-analyzer search all [COMMON_SEARCH_OPTIONS]`
405 | 
406 | **Example:**
407 | 
408 | ```shell
409 | log-analyzer search all --scope my_scope --log-patterns "CRITICAL" --before 1 --after 1
410 | ```
411 | 
412 | #### `log-analyzer search time`
413 | 
414 | Searches logs within a specified time window.
415 | Usage: `log-analyzer search time [TIME_OPTIONS] [COMMON_SEARCH_OPTIONS]`
416 | 
417 | **Time Options:**
418 | 
419 | | Option      | Type    | Default | Description                                |
420 | |-------------|---------|---------|--------------------------------------------|
421 | | `--minutes` | INTEGER | `0`     | Search logs from the last N minutes.       |
422 | | `--hours`   | INTEGER | `0`     | Search logs from the last N hours.         |
423 | | `--days`    | INTEGER | `0`     | Search logs from the last N days.          |
424 | 
425 | **Example:**
426 | 
427 | ```shell
428 | log-analyzer search time --hours 1 --log-dirs "/var/log/app.log"
429 | ```
430 | 
431 | #### `log-analyzer search first`
432 | 
433 | Searches for the first N (oldest) matching log records.
434 | Usage: `log-analyzer search first --count INTEGER [COMMON_SEARCH_OPTIONS]`
435 | 
436 | **Required Option:**
437 | 
438 | | Option    | Type    | Description                                                   |
439 | |-----------|---------|---------------------------------------------------------------|
440 | | `--count` | INTEGER | Number of first (oldest) matching records to return.          |
441 | 
442 | **Example:**
443 | 
444 | ```shell
445 | log-analyzer search first --count 5 --scope important_logs
446 | ```
447 | 
448 | #### `log-analyzer search last`
449 | 
450 | Searches for the last N (newest) matching log records.
451 | Usage: `log-analyzer search last --count INTEGER [COMMON_SEARCH_OPTIONS]`
452 | 
453 | **Required Option:**
454 | 
455 | | Option    | Type    | Description                                                  |
456 | |-----------|---------|--------------------------------------------------------------|
457 | | `--count` | INTEGER | Number of last (newest) matching records to return.          |
458 | 
459 | **Example:**
460 | 
461 | ```shell
462 | log-analyzer search last --count 20
463 | ```
464 | 
465 | ---
466 | 
467 | ## Error Handling
468 | 
469 | - **MCP Server:** Errors are returned as JSON objects with `code` and `message` fields, conforming to MCP error standards.
470 | - **CLI Client:** Errors are typically printed to stderr.
471 | 
472 | Common error types include invalid parameters, file not found, or issues with the underlying `AnalysisEngine` configuration or execution.
473 | 
```

--------------------------------------------------------------------------------
/src/log_analyzer_mcp/common/logger_setup.py:
--------------------------------------------------------------------------------

```python
  1 | """
  2 | Logging utility for standardized log setup across all agents
  3 | """
  4 | 
  5 | import logging
  6 | import os
  7 | import re
  8 | import sys
  9 | from logging.handlers import RotatingFileHandler
 10 | from typing import Any, Dict, Literal, Optional
 11 | 
 12 | # Explicitly attempt to initialize coverage for subprocesses
 13 | # if "COVERAGE_PROCESS_START" in os.environ:
 14 | #     try:
 15 | #         import coverage
 16 | #
 17 | #         coverage.process_startup()
 18 | #     except Exception:  # nosec B110 # pylint: disable=broad-exception-caught
 19 | #         pass  # Or handle error if coverage is mandatory
 20 | 
 21 | # Determine the project root directory from the location of this script
 22 | # Expected structure: /project_root/src/log_analyzer_mcp/common/logger_setup.py
 23 | # _common_dir = os.path.dirname(os.path.abspath(__file__))
 24 | # _log_analyzer_mcp_dir = os.path.dirname(_common_dir)
 25 | # _src_dir = os.path.dirname(_log_analyzer_mcp_dir)
 26 | # PROJECT_ROOT = os.path.dirname(_src_dir) # Old method
 27 | 
 28 | 
 29 | def find_project_root(start_path: str, marker_file: str = "pyproject.toml") -> str:
 30 |     """Searches upwards from start_path for a directory containing marker_file."""
 31 |     current_path = os.path.abspath(start_path)
 32 |     while True:
 33 |         if os.path.exists(os.path.join(current_path, marker_file)):
 34 |             return current_path
 35 |         parent_path = os.path.dirname(current_path)
 36 |         if parent_path == current_path:  # Reached filesystem root
 37 |             # If marker not found, CWD is the best guess for project root.
 38 |             cwd = os.getcwd()
 39 |             sys.stderr.write(f"Warning: '{marker_file}' not found from '{start_path}'. Falling back to CWD: {cwd}\\n")
 40 |             return cwd
 41 |         current_path = parent_path
 42 | 
 43 | 
 44 | PROJECT_ROOT = find_project_root(os.getcwd())
 45 | 
 46 | # Define the base logs directory at the project root
 47 | LOGS_BASE_DIR = os.path.join(PROJECT_ROOT, "logs")
 48 | 
 49 | 
 50 | def get_logs_dir() -> str:
 51 |     """Returns the absolute path to the base logs directory for the project."""
 52 |     # Ensure the base logs directory exists
 53 |     if not os.path.exists(LOGS_BASE_DIR):
 54 |         try:
 55 |             os.makedirs(LOGS_BASE_DIR, exist_ok=True)
 56 |         except OSError as e:
 57 |             # Fallback or error if cannot create logs dir, though basic logging might still work to console
 58 |             sys.stderr.write(f"Warning: Could not create base logs directory {LOGS_BASE_DIR}: {e}\n")
 59 |             # As a last resort, can try to use a local logs dir if in a restricted env
 60 |             # For now, we assume it can be created or will be handled by calling code.
 61 |     return LOGS_BASE_DIR
 62 | 
 63 | 
 64 | class MessageFlowFormatter(logging.Formatter):
 65 |     """
 66 |     Custom formatter that recognizes message flow patterns and formats them accordingly
 67 |     """
 68 | 
 69 |     # Pattern to match "sender => receiver | message" format
 70 |     FLOW_PATTERN = re.compile(r"^(\w+) => (\w+) \| (.*)$")
 71 | 
 72 |     # Pattern to match already formatted messages (both standard and flow formats)
 73 |     # This includes timestamp pattern \d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2},\d{3}
 74 |     # and agent | timestamp format
 75 |     ALREADY_FORMATTED_PATTERN = re.compile(
 76 |         r"(\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2},\d{3}|^\w+ \| \d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2},\d{3})"
 77 |     )
 78 | 
 79 |     def __init__(
 80 |         self,
 81 |         agent_name: str,
 82 |         fmt: Optional[str] = None,
 83 |         datefmt: Optional[str] = None,
 84 |         style: Literal["%", "{", "$"] = "%",
 85 |         session_id: Optional[str] = None,
 86 |         preserve_newlines: bool = True,
 87 |     ):
 88 |         """
 89 |         Initialize the formatter with the agent name
 90 | 
 91 |         Args:
 92 |             agent_name: Name of the agent (used when no flow information is in the message)
 93 |             fmt: Format string
 94 |             datefmt: Date format string
 95 |             style: Style of format string
 96 |             session_id: Optional unique session ID to include in log messages
 97 |             preserve_newlines: Whether to preserve newlines in the original message
 98 |         """
 99 |         super().__init__(fmt, datefmt, style)
100 |         self.agent_name = agent_name
101 |         self.session_id = session_id
102 |         self.preserve_newlines = preserve_newlines
103 | 
104 |     def format(self, record: logging.LogRecord) -> str:
105 |         """
106 |         Format the log record according to message flow patterns
107 | 
108 |         Args:
109 |             record: The log record to format
110 | 
111 |         Returns:
112 |             Formatted log string
113 |         """
114 |         # Extract the message
115 |         original_message = record.getMessage()
116 | 
117 |         # Special case for test summary format (always preserve exact format)
118 |         if "Test Summary:" in original_message or "===" in original_message:
119 |             # Special case for test analyzer compatibility - don't prepend anything
120 |             return original_message
121 | 
122 |         # Guard against already formatted messages to prevent recursive formatting
123 |         # Check for timestamp pattern to identify already formatted messages
124 |         if self.ALREADY_FORMATTED_PATTERN.search(original_message):
125 |             # Log message is already formatted, return as is
126 |             return original_message
127 | 
128 |         # Check if this is a message flow log
129 |         flow_match = self.FLOW_PATTERN.match(original_message)
130 |         if flow_match:
131 |             sender, receiver, message = flow_match.groups()
132 |             timestamp = self.formatTime(record, self.datefmt)
133 |             if self.session_id:
134 |                 formatted_message = f"{receiver} | {timestamp} | {self.session_id} | {sender} => {receiver} | {message}"
135 |             else:
136 |                 formatted_message = f"{receiver} | {timestamp} | {sender} => {receiver} | {message}"
137 |         else:
138 |             timestamp = self.formatTime(record, self.datefmt)
139 |             if self.preserve_newlines:
140 |                 # Preserve newlines: if newlines are present, split and format first line, append rest
141 |                 if "\\n" in original_message:
142 |                     lines = original_message.split("\\n")
143 |                     if self.session_id:
144 |                         first_line = f"{self.agent_name} | {timestamp} | {self.session_id} | {lines[0]}"
145 |                     else:
146 |                         first_line = f"{self.agent_name} | {timestamp} | {lines[0]}"
147 |                     formatted_message = first_line + "\\n" + "\\n".join(lines[1:])
148 |                 else:  # No newlines, format as single line
149 |                     if self.session_id:
150 |                         formatted_message = f"{self.agent_name} | {timestamp} | {self.session_id} | {original_message}"
151 |                     else:
152 |                         formatted_message = f"{self.agent_name} | {timestamp} | {original_message}"
153 |             else:  # Not preserving newlines (preserve_newlines is False)
154 |                 # Unconditionally replace newlines with spaces and format as a single line
155 |                 processed_message = original_message.replace("\n", " ")  # Replace actual newlines
156 |                 processed_message = processed_message.replace("\\n", " ")  # Also replace literal \\n, just in case
157 |                 if self.session_id:
158 |                     formatted_message = f"{self.agent_name} | {timestamp} | {self.session_id} | {processed_message}"
159 |                 else:
160 |                     formatted_message = f"{self.agent_name} | {timestamp} | {processed_message}"
161 | 
162 |         record.msg = formatted_message
163 |         record.args = ()
164 | 
165 |         # Return the formatted message
166 |         return formatted_message
167 | 
168 | 
169 | class LoggerSetup:
170 |     """
171 |     Utility class for standardized logging setup across all agents
172 |     """
173 | 
174 |     # Keep the old format for backward compatibility
175 |     LEGACY_LOG_FORMAT = "%(asctime)s - %(name)s - %(levelname)s - %(message)s"
176 |     DEFAULT_LOG_LEVEL = "INFO"
177 | 
178 |     # Store active loggers for management
179 |     _active_loggers: Dict[str, logging.Logger] = {}
180 | 
181 |     @classmethod
182 |     def _clear_and_close_handlers(cls, logger: logging.Logger) -> None:
183 |         """Helper to clear and close all handlers for a given logger."""
184 |         if logger.handlers:
185 |             for handler in list(logger.handlers):  # Iterate over a copy
186 |                 try:
187 |                     handler.flush()
188 |                     is_default_stream = False
189 |                     if isinstance(handler, logging.StreamHandler):
190 |                         stream = getattr(handler, "stream", None)
191 |                         if stream is sys.stdout or stream is sys.stderr:
192 |                             is_default_stream = True
193 |                             # Check stream is not None and has fileno before comparing
194 |                             if stream is not None and hasattr(stream, "fileno"):
195 |                                 # Also check sys.__stdout__ and sys.__stderr__ for None and fileno for completeness
196 |                                 if (
197 |                                     sys.__stdout__ is not None
198 |                                     and hasattr(sys.__stdout__, "fileno")
199 |                                     and stream is sys.stdout
200 |                                 ):
201 |                                     if stream.fileno() != sys.__stdout__.fileno():
202 |                                         is_default_stream = False
203 |                                 if (
204 |                                     sys.__stderr__ is not None
205 |                                     and hasattr(sys.__stderr__, "fileno")
206 |                                     and stream is sys.stderr
207 |                                 ):
208 |                                     if stream.fileno() != sys.__stderr__.fileno():
209 |                                         is_default_stream = False
210 | 
211 |                     if hasattr(handler, "close"):
212 |                         if not (is_default_stream and not isinstance(handler, logging.FileHandler)):
213 |                             try:
214 |                                 handler.close()
215 |                             except Exception:  # Broad catch for mocks or unusual states during close
216 |                                 pass
217 |                 except ValueError:
218 |                     pass  # Handler already closed or removed
219 |                 except Exception as e:
220 |                     sys.stderr.write(f"Warning: Error during handler cleanup for {handler}: {e}\n")
221 |                 logger.removeHandler(handler)
222 | 
223 |     @classmethod
224 |     def get_logger(cls, name: str) -> Optional[logging.Logger]:
225 |         """Retrieve an existing logger by name if it has been created."""
226 |         return cls._active_loggers.get(name)
227 | 
228 |     @classmethod
229 |     def create_logger(
230 |         cls,
231 |         name: str,
232 |         log_file: Optional[str] = None,
233 |         agent_name: Optional[str] = None,
234 |         log_level: Optional[str] = None,
235 |         session_id: Optional[str] = None,
236 |         use_rotating_file: bool = True,
237 |         append_mode: bool = True,
238 |         preserve_test_format: bool = False,
239 |     ) -> logging.Logger:
240 |         """
241 |         Creates and configures a logger with the given name
242 | 
243 |         Args:
244 |             name: Name of the logger
245 |             log_file: Optional file path for file logging. If just a filename is provided, it will be created in the centralized logs directory
246 |             agent_name: Optional agent name for message flow formatting (defaults to name)
247 |             log_level: Optional log level (defaults to environment variable or INFO)
248 |             session_id: Optional unique session ID to include in all log messages
249 |             use_rotating_file: Whether to use RotatingFileHandler (True) or simple FileHandler (False)
250 |             append_mode: Whether to append to existing log file (True) or overwrite (False)
251 |             preserve_test_format: Whether to preserve exact format of test-related messages
252 | 
253 |         Returns:
254 |             Configured logger instance
255 |         """
256 |         # Get log level from parameter, environment, or use default
257 |         log_level_str = log_level or os.getenv("LOG_LEVEL", cls.DEFAULT_LOG_LEVEL)
258 |         assert isinstance(log_level_str, str)
259 |         log_level_str = log_level_str.upper()
260 |         log_level_num = getattr(logging, log_level_str, logging.INFO)
261 | 
262 |         # Use agent_name if provided, otherwise use the logger name
263 |         if agent_name:
264 |             actual_agent_name = agent_name
265 |         else:
266 |             base_name = name.lower()
267 |             if "logger" in base_name:
268 |                 base_name = base_name.replace("logger", "")
269 |             if "agent" in base_name:
270 |                 base_name = base_name.replace("agent", "")
271 |             base_name = base_name.strip("_")  # Clean up dangling underscores
272 |             if not base_name:  # if name was 'AgentLogger' or similar
273 |                 actual_agent_name = "default_agent"
274 |             else:
275 |                 actual_agent_name = f"{base_name}_agent"
276 | 
277 |         # Create or get existing logger
278 |         logger = logging.getLogger(name)
279 |         logger.setLevel(log_level_num)
280 | 
281 |         # Disable propagation to root logger to prevent duplicate logs
282 |         logger.propagate = False
283 | 
284 |         # Clear existing handlers to avoid duplicates. This is crucial for overwrite mode.
285 |         # This must happen BEFORE adding new handlers, especially file handler in 'w' mode.
286 |         if name in cls._active_loggers:
287 |             # If logger exists in our tracking, it might have handlers we set up.
288 |             # We use the same logger instance, so clear its handlers.
289 |             cls._clear_and_close_handlers(logger)  # logger is cls._active_loggers[name]
290 |         elif logger.hasHandlers():
291 |             # If not in _active_loggers but has handlers, it was configured elsewhere or is a pre-existing logger (e.g. root)
292 |             # Still important to clear to avoid duplication if we are taking it over.
293 |             cls._clear_and_close_handlers(logger)
294 | 
295 |         # Console Handler
296 |         console_formatter: logging.Formatter
297 |         if preserve_test_format:
298 |             # For test summaries, use standard formatter on console as well
299 |             # to avoid double formatting or MessageFlowFormatter specific handling
300 |             console_formatter = logging.Formatter(cls.LEGACY_LOG_FORMAT)
301 |         else:
302 |             console_formatter = MessageFlowFormatter(
303 |                 actual_agent_name,
304 |                 session_id=session_id,
305 |                 preserve_newlines=not preserve_test_format,  # If preserving test format, don't preserve newlines for flow
306 |             )
307 | 
308 |         console_handler = logging.StreamHandler(sys.stdout)  # Use stdout for informational, stderr for errors
309 |         console_handler.setLevel(log_level_num)
310 |         console_handler.setFormatter(console_formatter)
311 |         logger.addHandler(console_handler)
312 | 
313 |         # File Handler
314 |         if log_file:
315 |             # Determine the log file path
316 |             if os.path.isabs(log_file):
317 |                 log_file_path = log_file
318 |             else:
319 |                 log_file_path = os.path.join(get_logs_dir(), log_file)
320 | 
321 |             # Ensure log directory exists
322 |             log_dir = os.path.dirname(log_file_path)
323 |             if not os.path.exists(log_dir):
324 |                 try:
325 |                     os.makedirs(log_dir, exist_ok=True)
326 |                 except OSError as e:
327 |                     sys.stderr.write(f"ERROR: Could not create log directory {log_dir}: {e}. File logging disabled.\\n")
328 |                     log_file = None  # Disable file logging
329 | 
330 |             if log_file:  # Check again, might have been disabled
331 |                 file_mode = "w" if not append_mode else "a"
332 |                 file_formatter: logging.Formatter
333 |                 if preserve_test_format:
334 |                     # Use a basic formatter for test log files to keep them clean
335 |                     file_formatter = logging.Formatter("%(message)s")
336 |                 else:
337 |                     file_formatter = MessageFlowFormatter(
338 |                         actual_agent_name,
339 |                         session_id=session_id,
340 |                         preserve_newlines=True,  # Always preserve newlines in file logs unless test format
341 |                     )
342 | 
343 |                 if use_rotating_file:
344 |                     file_handler: logging.Handler = RotatingFileHandler(
345 |                         log_file_path, maxBytes=10 * 1024 * 1024, backupCount=5, mode=file_mode, encoding="utf-8"
346 |                     )
347 |                 else:
348 |                     file_handler = logging.FileHandler(log_file_path, mode=file_mode, encoding="utf-8")
349 | 
350 |                 file_handler.setFormatter(file_formatter)
351 |                 file_handler.setLevel(log_level_num)
352 |                 logger.addHandler(file_handler)
353 |                 # Log file configuration message to the logger itself
354 |                 logger.info(f"File logging configured for: {log_file_path}")
355 | 
356 |         cls._active_loggers[name] = logger
357 |         return logger
358 | 
359 |     @classmethod
360 |     def flush_all_loggers(cls) -> None:
361 |         """Flushes all registered active loggers."""
362 |         for logger_instance in cls._active_loggers.values():
363 |             for handler in logger_instance.handlers:
364 |                 handler.flush()
365 | 
366 |     @classmethod
367 |     def flush_logger(cls, name: str) -> bool:
368 |         """
369 |         Flush a specific logger by name
370 | 
371 |         Args:
372 |             name: Name of the logger to flush
373 | 
374 |         Returns:
375 |             True if logger was found and flushed, False otherwise
376 |         """
377 |         if name in cls._active_loggers:
378 |             logger = cls._active_loggers[name]
379 |             for handler in logger.handlers:
380 |                 handler.flush()
381 |             return True
382 |         return False
383 | 
384 |     @classmethod
385 |     def write_test_summary(cls, logger: logging.Logger, summary: Dict[str, Any]) -> None:
386 |         """
387 |         Write test summary in a format that log_analyzer.py can understand
388 | 
389 |         Args:
390 |             logger: The logger to use
391 |             summary: Dictionary with test summary information
392 |         """
393 |         # Flush any pending logs
394 |         for handler in logger.handlers:
395 |             handler.flush()
396 | 
397 |         # Log summary in a format compatible with log_analyzer.py
398 |         logger.info("=" * 15 + " test session starts " + "=" * 15)
399 | 
400 |         # Log test result counts
401 |         passed = summary.get("passed", 0)
402 |         failed = summary.get("failed", 0)
403 |         skipped = summary.get("skipped", 0)
404 |         duration = summary.get("duration", 0)
405 | 
406 |         logger.info(f"{passed} passed, {failed} failed, {skipped} skipped in {duration:.2f}s")
407 |         logger.info(f"Test Summary: {passed} passed, {failed} failed, {skipped} skipped")
408 |         logger.info(f"Status: {'PASSED' if failed == 0 else 'FAILED'}")
409 |         logger.info(f"Duration: {duration:.2f} seconds")
410 | 
411 |         # Log failed tests if any
412 |         if "failed_tests" in summary and summary["failed_tests"]:
413 |             logger.info("Failed tests by module:")
414 |             for module, tests in summary.get("failed_modules", {}).items():
415 |                 logger.info(f"Module: {module} - {len(tests)} failed tests")
416 |                 for test in tests:
417 |                     logger.info(f"- {test}")
418 | 
419 |         logger.info("=" * 50)
420 | 
421 |         # Ensure everything is written
422 |         for handler in logger.handlers:
423 |             handler.flush()
424 | 
425 |     @classmethod
426 |     def reset_loggers_for_testing(cls) -> None:
427 |         """Resets all known loggers by clearing their handlers. Useful for testing."""
428 |         for logger_name in list(cls._active_loggers.keys()):
429 |             logger = cls._active_loggers.pop(logger_name)
430 |             cls._clear_and_close_handlers(logger)
431 |         # Also clear the root logger's handlers if any were added inadvertently by tests
432 |         root_logger = logging.getLogger()
433 |         cls._clear_and_close_handlers(root_logger)
434 | 
435 | 
436 | def setup_logger(
437 |     agent_name: str,
438 |     log_level: str = "INFO",
439 |     session_id: Optional[str] = None,
440 |     log_file: Optional[str] = None,
441 |     use_rotating_file: bool = True,
442 | ) -> logging.Logger:
443 |     """
444 |     Set up a logger with the given name and log level
445 | 
446 |     Args:
447 |         agent_name: Name of the agent
448 |         log_level: Log level (default: INFO)
449 |         session_id: Optional unique session ID to include in all log messages
450 |         log_file: Optional file path for logging
451 |         use_rotating_file: Whether to use rotating file handler (default: True)
452 | 
453 |     Returns:
454 |         Configured logger
455 |     """
456 |     # Use the LoggerSetup class for consistent logging setup
457 |     return LoggerSetup.create_logger(
458 |         agent_name,
459 |         log_file=log_file,
460 |         agent_name=agent_name,
461 |         log_level=log_level,
462 |         session_id=session_id,
463 |         use_rotating_file=use_rotating_file,
464 |     )
465 | 
```

--------------------------------------------------------------------------------
/tests/log_analyzer_mcp/common/test_logger_setup.py:
--------------------------------------------------------------------------------

```python
  1 | import logging
  2 | import os
  3 | import shutil
  4 | import sys
  5 | import tempfile
  6 | from unittest.mock import MagicMock, mock_open, patch
  7 | 
  8 | import pytest
  9 | 
 10 | from log_analyzer_mcp.common.logger_setup import (
 11 |     LOGS_BASE_DIR,
 12 |     PROJECT_ROOT,
 13 |     LoggerSetup,
 14 |     MessageFlowFormatter,
 15 |     find_project_root,
 16 |     get_logs_dir,
 17 |     setup_logger,
 18 | )
 19 | from logging import handlers
 20 | 
 21 | 
 22 | # Helper to reset LoggerSetup._active_loggers for test isolation
 23 | @pytest.fixture(autouse=True)
 24 | def reset_active_loggers():
 25 |     LoggerSetup.reset_loggers_for_testing()  # Use the new robust reset method
 26 |     yield
 27 |     LoggerSetup.reset_loggers_for_testing()  # Ensure clean state after each test
 28 | 
 29 | 
 30 | # --- Tests for find_project_root ---
 31 | def test_find_project_root_fallback(tmp_path):
 32 |     """Test find_project_root fallback when marker_file is not found."""
 33 |     # Ensure no pyproject.toml is found upwards from tmp_path
 34 |     # This test relies on the fallback logic calculating from __file__ of logger_setup.py
 35 |     # We can't easily mock the entire filesystem structure up to root for this specific test.
 36 |     # Instead, we'll check if it returns *a* path and doesn't crash.
 37 |     # A more robust test would involve creating a known deep structure without the marker.
 38 | 
 39 |     # The new fallback is os.getcwd(), so we can check against that.
 40 |     expected_fallback_root = os.getcwd()
 41 | 
 42 |     # To simulate not finding it, we pass a non-existent marker and start path far from project
 43 |     # This forces it to go up to the filesystem root and trigger the fallback.
 44 |     # We need to be careful as the real PROJECT_ROOT might interfere.
 45 |     # Let's patch os.path.exists to simulate marker not being found
 46 |     with patch("os.path.exists", return_value=False) as mock_exists:
 47 |         # And patch abspath for the __file__ to be consistent if needed, though usually not.
 48 |         # Call from a deep, unrelated path
 49 |         unrelated_deep_path = tmp_path / "a" / "b" / "c" / "d" / "e"
 50 |         unrelated_deep_path.mkdir(parents=True, exist_ok=True)
 51 | 
 52 |         # Use a marker that definitely won't exist to force fallback
 53 |         calculated_root = find_project_root(str(unrelated_deep_path), "THIS_MARKER_DOES_NOT_EXIST.txt")
 54 | 
 55 |         # The fallback is now os.getcwd()
 56 |         assert calculated_root == expected_fallback_root
 57 |         # Ensure os.path.exists was called multiple times during the upward search
 58 |         assert mock_exists.call_count > 1
 59 | 
 60 | 
 61 | # --- Tests for get_logs_dir ---
 62 | def test_get_logs_dir_exists(tmp_path):
 63 |     """Test get_logs_dir when the directory already exists."""
 64 |     # Use a temporary logs base dir for this test
 65 |     temp_logs_base = tmp_path / "test_logs"
 66 |     temp_logs_base.mkdir()
 67 |     with patch("log_analyzer_mcp.common.logger_setup.LOGS_BASE_DIR", str(temp_logs_base)):
 68 |         assert get_logs_dir() == str(temp_logs_base)
 69 |         assert temp_logs_base.exists()
 70 | 
 71 | 
 72 | def test_get_logs_dir_creates_if_not_exists(tmp_path):
 73 |     """Test get_logs_dir creates the directory if it doesn't exist."""
 74 |     temp_logs_base = tmp_path / "test_logs_new"
 75 |     with patch("log_analyzer_mcp.common.logger_setup.LOGS_BASE_DIR", str(temp_logs_base)):
 76 |         assert not temp_logs_base.exists()
 77 |         assert get_logs_dir() == str(temp_logs_base)
 78 |         assert temp_logs_base.exists()
 79 | 
 80 | 
 81 | @patch("os.makedirs")
 82 | def test_get_logs_dir_os_error_on_create(mock_makedirs, tmp_path, capsys):
 83 |     """Test get_logs_dir when os.makedirs raises an OSError."""
 84 |     mock_makedirs.side_effect = OSError("Test OS error")
 85 |     temp_logs_base = tmp_path / "test_logs_error"
 86 |     with patch("log_analyzer_mcp.common.logger_setup.LOGS_BASE_DIR", str(temp_logs_base)):
 87 |         assert get_logs_dir() == str(temp_logs_base)  # Should still return the path
 88 |         # Check stderr for the warning
 89 |         captured = capsys.readouterr()
 90 |         assert f"Warning: Could not create base logs directory {str(temp_logs_base)}" in captured.err
 91 | 
 92 | 
 93 | # --- Tests for MessageFlowFormatter ---
 94 | @pytest.fixture
 95 | def mock_log_record():
 96 |     record = MagicMock(spec=logging.LogRecord)
 97 |     record.getMessage = MagicMock(return_value="A normal log message")
 98 |     record.levelno = logging.INFO
 99 |     record.levelname = "INFO"
100 |     record.created = 1678886400  # A fixed time
101 |     record.msecs = 123
102 |     record.name = "TestLogger"
103 |     record.args = ()  # Ensure args is an empty tuple
104 |     record.exc_info = None  # Add exc_info attribute
105 |     record.exc_text = None  # Add exc_text attribute
106 |     record.stack_info = None  # Add stack_info attribute
107 |     return record
108 | 
109 | 
110 | def test_message_flow_formatter_standard_message(mock_log_record):
111 |     formatter = MessageFlowFormatter("TestAgent")
112 |     formatted = formatter.format(mock_log_record)
113 |     assert "TestAgent |" in formatted
114 |     assert "| A normal log message" in formatted
115 | 
116 | 
117 | def test_message_flow_formatter_with_session_id(mock_log_record):
118 |     formatter = MessageFlowFormatter("TestAgent", session_id="sess123")
119 |     mock_log_record.getMessage.return_value = "Another message"
120 |     formatted = formatter.format(mock_log_record)
121 |     assert "TestAgent |" in formatted
122 |     assert "| sess123 |" in formatted
123 |     assert "| Another message" in formatted
124 | 
125 | 
126 | def test_message_flow_formatter_flow_pattern(mock_log_record):
127 |     formatter = MessageFlowFormatter("ReceiverAgent", session_id="s456")
128 |     mock_log_record.getMessage.return_value = "SenderAgent => ReceiverAgent | Flow details here"
129 |     formatted = formatter.format(mock_log_record)
130 |     # Receiver | Timestamp | SessionID | Sender => Receiver | Message
131 |     assert "ReceiverAgent |" in formatted  # Receiver is the first part
132 |     assert "| s456 |" in formatted  # Session ID
133 |     assert "| SenderAgent => ReceiverAgent | Flow details here" in formatted  # The original flow part
134 |     assert "ReceiverAgent => ReceiverAgent" not in formatted  # Ensure agent_name not misused
135 | 
136 | 
137 | def test_message_flow_formatter_already_formatted(mock_log_record):
138 |     formatter = MessageFlowFormatter("TestAgent")
139 |     # Simulate an already formatted message (e.g., from a different handler)
140 |     already_formatted_msg = "2023-03-15 10:00:00,123 - TestAgent - INFO - Already done"
141 |     mock_log_record.getMessage.return_value = already_formatted_msg
142 |     formatted = formatter.format(mock_log_record)
143 |     assert formatted == already_formatted_msg
144 | 
145 |     already_formatted_flow_msg = "OtherAgent | 2023-03-15 10:00:00,123 | SomeSender => OtherAgent | Done this way"
146 |     mock_log_record.getMessage.return_value = already_formatted_flow_msg
147 |     formatted = formatter.format(mock_log_record)
148 |     assert formatted == already_formatted_flow_msg
149 | 
150 | 
151 | def test_message_flow_formatter_test_summary(mock_log_record):
152 |     formatter = MessageFlowFormatter("TestAgent")
153 |     test_summary_msg = "Test Summary: 5 passed, 0 failed"
154 |     mock_log_record.getMessage.return_value = test_summary_msg
155 |     formatted = formatter.format(mock_log_record)
156 |     assert formatted == test_summary_msg  # Should be returned as-is
157 | 
158 |     pytest_output_msg = "============================= test session starts =============================="
159 |     mock_log_record.getMessage.return_value = pytest_output_msg
160 |     formatted = formatter.format(mock_log_record)
161 |     assert formatted == pytest_output_msg  # Should also be returned as-is
162 | 
163 | 
164 | def test_message_flow_formatter_multiline(mock_log_record):
165 |     formatter = MessageFlowFormatter("TestAgent", session_id="multi789")
166 |     multiline_msg = "First line\nSecond line\nThird line"
167 |     mock_log_record.getMessage.return_value = multiline_msg
168 |     formatted = formatter.format(mock_log_record)
169 |     lines = formatted.split("\n")
170 |     assert len(lines) == 3
171 |     assert "TestAgent |" in lines[0]
172 |     assert "| multi789 |" in lines[0]
173 |     assert "| First line" in lines[0]
174 |     assert lines[1] == "Second line"
175 |     assert lines[2] == "Third line"
176 | 
177 | 
178 | def test_message_flow_formatter_no_preserve_newlines(mock_log_record):
179 |     formatter = MessageFlowFormatter("TestAgent", preserve_newlines=False)
180 |     # Use an actual newline character in the message, not a literal '\\n' string
181 |     multiline_msg = "First line\nSecond line"
182 |     mock_log_record.getMessage.return_value = multiline_msg
183 |     formatted = formatter.format(mock_log_record)
184 |     # When not preserving, it should format the whole thing as one line (newlines replaced by \n in record.msg)
185 |     # The format method does `record.msg = formatted_message` then `super().format(record)` would be called.
186 |     # Our current implementation returns the formatted string directly, so it won't go to super().format.
187 |     # It handles multiline splitting itself. If preserve_newlines is false, it just formats original_message
188 |     # as a single line.
189 |     assert "\\n" not in formatted  # The formatted output string should not contain raw newlines
190 |     assert "TestAgent |" in formatted
191 |     # The expected behavior now is that newlines are removed and the message is on a single line.
192 |     assert "| First line Second line" in formatted  # Adjusted expectation: newlines replaced by space
193 | 
194 | 
195 | # --- Tests for LoggerSetup ---
196 | @pytest.fixture
197 | def temp_log_file(tmp_path):
198 |     log_file = tmp_path / "test.log"
199 |     yield str(log_file)
200 |     if log_file.exists():
201 |         log_file.unlink()
202 | 
203 | 
204 | def test_logger_setup_create_logger_basic(temp_log_file):
205 |     logger = LoggerSetup.create_logger("MyLogger", log_file=temp_log_file, agent_name="MyAgent")
206 |     assert logger.name == "MyLogger"
207 |     assert logger.level == logging.INFO  # Default
208 |     assert len(logger.handlers) == 2  # Console and File
209 |     assert isinstance(logger.handlers[0], logging.StreamHandler)  # Console
210 |     assert isinstance(logger.handlers[1], handlers.RotatingFileHandler)  # File
211 | 
212 |     # Check if formatter is MessageFlowFormatter
213 |     for handler in logger.handlers:
214 |         assert isinstance(handler.formatter, MessageFlowFormatter)
215 |         if isinstance(handler, logging.FileHandler):  # Check path of file handler
216 |             assert handler.baseFilename == temp_log_file
217 | 
218 |     # Check if it's stored
219 |     assert LoggerSetup.get_logger("MyLogger") is logger
220 | 
221 | 
222 | def test_logger_setup_create_logger_levels(temp_log_file):
223 |     logger_debug = LoggerSetup.create_logger("DebugLogger", log_file=temp_log_file, log_level="DEBUG")
224 |     assert logger_debug.level == logging.DEBUG
225 |     for handler in logger_debug.handlers:
226 |         assert handler.level == logging.DEBUG
227 | 
228 |     logger_warning = LoggerSetup.create_logger("WarnLogger", log_file=temp_log_file, log_level="WARNING")
229 |     assert logger_warning.level == logging.WARNING
230 | 
231 | 
232 | def test_logger_setup_create_logger_no_file():
233 |     logger = LoggerSetup.create_logger("NoFileLogger")
234 |     assert len(logger.handlers) == 1  # Only console
235 |     assert isinstance(logger.handlers[0], logging.StreamHandler)
236 | 
237 | 
238 | def test_logger_setup_create_logger_agent_name(temp_log_file):
239 |     logger = LoggerSetup.create_logger("AgentLoggerTest", log_file=temp_log_file, agent_name="SpecificAgent")
240 |     console_formatter = logger.handlers[0].formatter
241 |     assert isinstance(console_formatter, MessageFlowFormatter)
242 |     assert console_formatter.agent_name == "SpecificAgent"
243 | 
244 |     # Test default agent_name derivation
245 |     logger_default_agent = LoggerSetup.create_logger("MyAgentLogger", log_file=temp_log_file)
246 |     default_agent_formatter = logger_default_agent.handlers[0].formatter
247 |     assert isinstance(default_agent_formatter, MessageFlowFormatter)
248 |     assert default_agent_formatter.agent_name == "my_agent"  # MyAgentLogger -> my_agent
249 | 
250 |     logger_simple_name = LoggerSetup.create_logger("MyLogger", log_file=temp_log_file)
251 |     simple_name_formatter = logger_simple_name.handlers[0].formatter
252 |     assert isinstance(simple_name_formatter, MessageFlowFormatter)
253 |     assert simple_name_formatter.agent_name == "my_agent"  # MyLogger -> my_agent
254 | 
255 |     logger_just_agent = LoggerSetup.create_logger("Agent", log_file=temp_log_file)
256 |     just_agent_formatter = logger_just_agent.handlers[0].formatter
257 |     assert isinstance(just_agent_formatter, MessageFlowFormatter)
258 |     assert just_agent_formatter.agent_name == "default_agent"  # Agent -> default_agent
259 | 
260 |     logger_empty_derivation = LoggerSetup.create_logger("AgentLogger", log_file=temp_log_file)
261 |     empty_deriv_formatter = logger_empty_derivation.handlers[0].formatter
262 |     assert isinstance(empty_deriv_formatter, MessageFlowFormatter)
263 |     assert empty_deriv_formatter.agent_name == "default_agent"  # AgentLogger -> default_agent
264 | 
265 | 
266 | def test_logger_setup_create_logger_session_id(temp_log_file):
267 |     logger = LoggerSetup.create_logger("SessionLogger", log_file=temp_log_file, session_id="sessABC")
268 |     formatter = logger.handlers[0].formatter
269 |     assert isinstance(formatter, MessageFlowFormatter)
270 |     assert formatter.session_id == "sessABC"
271 | 
272 | 
273 | def test_logger_setup_create_logger_no_rotating_file(temp_log_file):
274 |     logger = LoggerSetup.create_logger("SimpleFileLogger", log_file=temp_log_file, use_rotating_file=False)
275 |     assert isinstance(logger.handlers[1], logging.FileHandler)
276 |     assert not isinstance(logger.handlers[1], handlers.RotatingFileHandler)
277 | 
278 | 
279 | def test_logger_setup_create_logger_overwrite_mode(tmp_path):
280 |     log_file_overwrite = tmp_path / "overwrite.log"
281 |     log_file_overwrite.write_text("Previous content\n")
282 | 
283 |     # Create logger in append mode (default)
284 |     logger_append = LoggerSetup.create_logger("AppendLogger", log_file=str(log_file_overwrite), use_rotating_file=False)
285 |     logger_append.warning("Append test")
286 |     LoggerSetup.flush_logger("AppendLogger")  # Ensure written
287 | 
288 |     # Create logger in overwrite mode, ensure non-rotating for this specific test of 'w' mode.
289 |     logger_overwrite = LoggerSetup.create_logger(
290 |         "OverwriteLogger",
291 |         log_file=str(log_file_overwrite),
292 |         append_mode=False,
293 |         use_rotating_file=False,  # Use simple FileHandler to test 'w' mode directly
294 |     )
295 |     logger_overwrite.error("Overwrite test")
296 |     LoggerSetup.flush_logger("OverwriteLogger")  # Ensure written
297 | 
298 |     content = log_file_overwrite.read_text()
299 |     assert "Previous content" not in content
300 |     assert "Append test" not in content
301 |     assert "Overwrite test" in content
302 |     assert "overwrite_agent |" in content  # agent name will be derived
303 | 
304 | 
305 | def test_logger_setup_create_logger_preserve_test_format(temp_log_file, mock_log_record):
306 |     logger = LoggerSetup.create_logger("TestFormatLogger", log_file=temp_log_file, preserve_test_format=True)
307 | 
308 |     file_handler = logger.handlers[1]  # File handler
309 |     assert isinstance(file_handler.formatter, logging.Formatter)  # Plain Formatter
310 |     assert not isinstance(file_handler.formatter, MessageFlowFormatter)
311 | 
312 |     # Console handler should use standard Formatter when preserve_test_format is True
313 |     console_handler = logger.handlers[0]
314 |     assert isinstance(console_handler.formatter, logging.Formatter)
315 |     assert not isinstance(
316 |         console_handler.formatter, MessageFlowFormatter
317 |     )  # Explicitly check it's NOT MessageFlowFormatter
318 | 
319 |     # Test logging a test summary line
320 |     test_summary_msg = "Test Summary: 1 passed"
321 |     mock_log_record.getMessage.return_value = test_summary_msg
322 | 
323 |     # File handler with simple formatter should just output the message
324 |     formatted_file = file_handler.formatter.format(mock_log_record)
325 |     assert formatted_file == test_summary_msg
326 | 
327 |     # Console handler (standard Formatter) when preserve_test_format=True
328 |     # should output using LEGACY_LOG_FORMAT.
329 |     console_handler_formatter = console_handler.formatter
330 |     expected_console_output = console_handler_formatter.format(mock_log_record)  # Format with the actual formatter
331 |     formatted_console = console_handler.formatter.format(mock_log_record)
332 |     assert formatted_console == expected_console_output
333 |     assert "Test Summary: 1 passed" in formatted_console  # Check if the message is part of it
334 |     assert mock_log_record.name in formatted_console  # e.g. TestLogger
335 |     assert mock_log_record.levelname in formatted_console  # e.g. INFO
336 | 
337 | 
338 | @patch("os.makedirs")
339 | def test_logger_setup_create_logger_log_dir_creation_failure(mock_makedirs, tmp_path, capsys):
340 |     mock_makedirs.side_effect = OSError("Cannot create dir")
341 |     # Use a log file path that would require directory creation
342 |     log_file_in_new_dir = tmp_path / "new_log_subdir" / "error.log"
343 | 
344 |     logger = LoggerSetup.create_logger("ErrorDirLogger", log_file=str(log_file_in_new_dir))
345 | 
346 |     # Should have only console handler if file dir creation failed
347 |     assert len(logger.handlers) == 1
348 |     assert isinstance(logger.handlers[0], logging.StreamHandler)
349 | 
350 |     captured = capsys.readouterr()
351 |     expected_dir = str(tmp_path / "new_log_subdir")
352 |     assert f"ERROR: Could not create log directory {expected_dir}" in captured.err
353 |     assert mock_makedirs.call_count == 1  # Should have attempted to create it
354 | 
355 | 
356 | def test_logger_setup_clear_handlers_on_recreate(temp_log_file):
357 |     logger1 = LoggerSetup.create_logger("RecreateTest", log_file=temp_log_file)
358 |     assert len(logger1.handlers) == 2
359 | 
360 |     # Get the actual underlying logger instance
361 |     underlying_logger = logging.getLogger("RecreateTest")
362 |     assert len(underlying_logger.handlers) == 2
363 | 
364 |     logger2 = LoggerSetup.create_logger("RecreateTest", log_file=temp_log_file, log_level="DEBUG")
365 |     assert logger2 is logger1  # Should be the same logger object
366 |     assert len(logger2.handlers) == 2  # Handlers should be replaced, not added
367 |     assert len(underlying_logger.handlers) == 2
368 | 
369 | 
370 | def test_logger_setup_flush_logger(temp_log_file):
371 |     logger = LoggerSetup.create_logger("FlushTest", log_file=temp_log_file)
372 |     mock_handler = MagicMock(spec=logging.Handler)
373 |     mock_handler.flush = MagicMock()
374 | 
375 |     # Replace handlers for testing flush
376 |     original_handlers = list(logger.handlers)  # Keep a copy
377 |     logger.handlers = [mock_handler]
378 | 
379 |     assert LoggerSetup.flush_logger("FlushTest") is True
380 |     mock_handler.flush.assert_called_once()
381 | 
382 |     logger.handlers = original_handlers  # Restore original handlers
383 |     # Ensure original handlers are closed if they were file handlers, to avoid ResourceWarning
384 |     for handler in original_handlers:
385 |         if isinstance(handler, logging.FileHandler):
386 |             handler.close()
387 | 
388 |     assert LoggerSetup.flush_logger("NonExistentLogger") is False
389 | 
390 | 
391 | def test_logger_setup_flush_all_loggers(temp_log_file):
392 |     logger_a = LoggerSetup.create_logger("FlushAllA", log_file=temp_log_file)
393 |     logger_b = LoggerSetup.create_logger("FlushAllB", log_file=None)  # Console only
394 | 
395 |     # Before replacing logger_a's handlers with mocks, clear its existing (real) handlers
396 |     # to ensure its file handler is properly closed.
397 |     LoggerSetup._clear_and_close_handlers(logger_a)
398 | 
399 |     mock_handler_a_file = MagicMock(spec=logging.FileHandler)
400 |     mock_handler_a_file.flush = MagicMock()
401 |     mock_handler_a_console = MagicMock(spec=logging.StreamHandler)
402 |     mock_handler_a_console.flush = MagicMock()
403 |     # Simulate stream attribute for StreamHandler mocks if _clear_and_close_handlers might access it
404 |     # However, the refined _clear_and_close_handlers uses getattr(handler, 'stream', None)
405 |     # so this might not be strictly necessary unless we want to test specific stream interactions.
406 |     # mock_handler_a_console.stream = sys.stdout
407 |     # Ensure logger_a uses these mocked handlers
408 |     logger_a.handlers = [mock_handler_a_console, mock_handler_a_file]
409 | 
410 |     mock_handler_b_console = MagicMock(spec=logging.StreamHandler)
411 |     mock_handler_b_console.flush = MagicMock()
412 |     # mock_handler_b_console.stream = sys.stdout
413 |     logger_b.handlers = [mock_handler_b_console]
414 | 
415 |     LoggerSetup.flush_all_loggers()
416 | 
417 |     mock_handler_a_file.flush.assert_called_once()
418 |     mock_handler_a_console.flush.assert_called_once()
419 |     mock_handler_b_console.flush.assert_called_once()
420 | 
421 |     # Clean up / close handlers to avoid ResourceWarning
422 |     # This is a bit tricky because flush_all_loggers doesn't return the loggers
423 |     # We rely on the autouse fixture to clear _active_loggers, which should lead to
424 |     # handlers being closed eventually if create_logger handles it well on re-creation.
425 |     # For more direct control in this specific test, we would need to access
426 |     # LoggerSetup._active_loggers, which is an internal detail.
427 |     # However, the fix in create_logger to close handlers should mitigate this.
428 |     # The new reset_loggers_for_testing in the autouse fixture should handle this.
429 |     # LoggerSetup._active_loggers.clear() # No longer needed here due to autouse fixture
430 | 
431 | 
432 | def test_logger_setup_write_test_summary(temp_log_file):
433 |     logger = LoggerSetup.create_logger("TestSummaryLogger", log_file=temp_log_file, preserve_test_format=True)
434 | 
435 |     # Mock the file handler to capture output
436 |     mock_file_handler_write = mock_open()
437 | 
438 |     # Find the file handler and patch its write method
439 |     original_file_handler = None
440 |     for handler in logger.handlers:
441 |         if isinstance(handler, logging.FileHandler):
442 |             original_file_handler = handler
443 |             break
444 | 
445 |     if original_file_handler:
446 |         # To capture output from file handler, we can check the file content
447 |         # or mock its stream's write method. Checking file content is more robust.
448 |         log_file_path = original_file_handler.baseFilename
449 |     else:
450 |         pytest.fail("File handler not found on TestSummaryLogger")
451 | 
452 |     summary_data = {
453 |         "passed": 5,
454 |         "failed": 2,
455 |         "skipped": 1,
456 |         "duration": 1.234,
457 |         "failed_tests": ["test_one", "test_two"],  # This structure might differ from actual use
458 |         "failed_modules": {"moduleA": ["test_one_a"], "moduleB": ["test_two_b"]},
459 |     }
460 |     LoggerSetup.write_test_summary(logger, summary_data)
461 | 
462 |     LoggerSetup.flush_logger("TestSummaryLogger")  # Ensure all written to file
463 | 
464 |     # Read the log file content
465 |     with open(log_file_path, "r") as f:
466 |         log_content = f.read()
467 | 
468 |     assert "=============== test session starts ===============" in log_content
469 |     assert "5 passed, 2 failed, 1 skipped in 1.23s" in log_content
470 |     assert "Test Summary: 5 passed, 2 failed, 1 skipped" in log_content
471 |     assert "Status: FAILED" in log_content
472 |     assert "Duration: 1.23 seconds" in log_content
473 |     assert "Failed tests by module:" in log_content
474 |     assert "Module: moduleA - 1 failed tests" in log_content
475 |     assert "- test_one_a" in log_content
476 |     assert "Module: moduleB - 1 failed tests" in log_content
477 |     assert "- test_two_b" in log_content
478 |     assert "==================================================" in log_content
479 | 
480 | 
481 | # --- Tests for setup_logger (convenience function) ---
482 | def test_setup_logger_convenience_function(temp_log_file):
483 |     with patch.object(LoggerSetup, "create_logger", wraps=LoggerSetup.create_logger) as mock_create_logger:
484 |         logger = setup_logger(
485 |             "ConvenienceAgent", log_level="DEBUG", session_id="conv123", log_file=temp_log_file, use_rotating_file=False
486 |         )
487 | 
488 |         mock_create_logger.assert_called_once_with(
489 |             "ConvenienceAgent",  # name
490 |             log_file=temp_log_file,
491 |             agent_name="ConvenienceAgent",  # agent_name also from first arg
492 |             log_level="DEBUG",
493 |             session_id="conv123",
494 |             use_rotating_file=False,
495 |             # append_mode and preserve_test_format use defaults from create_logger
496 |         )
497 |         assert logger.name == "ConvenienceAgent"
498 |         assert logger.level == logging.DEBUG
499 | 
500 | 
501 | # Test PROJECT_ROOT and LOGS_BASE_DIR for basic correctness
502 | def test_project_root_and_logs_base_dir_paths():
503 |     # PROJECT_ROOT should be a valid directory
504 |     assert os.path.isdir(PROJECT_ROOT), f"PROJECT_ROOT is not a valid directory: {PROJECT_ROOT}"
505 |     # pyproject.toml should exist in PROJECT_ROOT
506 |     assert os.path.exists(os.path.join(PROJECT_ROOT, "pyproject.toml")), "pyproject.toml not found in PROJECT_ROOT"
507 | 
508 |     # LOGS_BASE_DIR should also be valid or creatable
509 |     assert os.path.isdir(LOGS_BASE_DIR) or not os.path.exists(
510 |         LOGS_BASE_DIR
511 |     ), f"LOGS_BASE_DIR is not valid or creatable: {LOGS_BASE_DIR}"
512 |     # LOGS_BASE_DIR should be under PROJECT_ROOT
513 |     assert LOGS_BASE_DIR.startswith(PROJECT_ROOT), "LOGS_BASE_DIR is not under PROJECT_ROOT"
514 | 
515 |     # Test get_logs_dir() directly too
516 |     retrieved_logs_dir = get_logs_dir()
517 |     assert os.path.isdir(retrieved_logs_dir)
518 |     assert retrieved_logs_dir == LOGS_BASE_DIR
519 | 
520 | 
521 | # --- Tests for find_project_root ---
522 | def test_find_project_root_finds_marker(tmp_path):
523 |     """Test find_project_root when pyproject.toml exists."""
524 |     marker_file = "pyproject.toml"
525 |     # Create
526 | 
```

--------------------------------------------------------------------------------
/src/log_analyzer_mcp/core/analysis_engine.py:
--------------------------------------------------------------------------------

```python
  1 | # src/log_analyzer_mcp/core/analysis_engine.py
  2 | 
  3 | import datetime as dt  # Import datetime module as dt
  4 | import glob
  5 | import os
  6 | import re  # For basic parsing
  7 | from datetime import datetime as DateTimeClassForCheck  # Specific import for isinstance check
  8 | from typing import Any, Dict, List, Optional  # Added Any for filter_criteria flexibility
  9 | import logging  # Add logging import
 10 | 
 11 | from ..common.config_loader import ConfigLoader
 12 | 
 13 | # Define a structure for a parsed log entry
 14 | # Using a simple dict for now, could be a Pydantic model later for stricter validation
 15 | ParsedLogEntry = Dict[str, Any]  # Keys: 'timestamp', 'level', 'message', 'raw_line', 'file_path', 'line_number'
 16 | # Adding 'context_before_lines', 'context_after_lines' to store context directly in the entry
 17 | # And 'full_context_log' which would be the original line plus its context
 18 | 
 19 | 
 20 | class AnalysisEngine:
 21 |     def __init__(
 22 |         self,
 23 |         logger_instance: logging.Logger,
 24 |         env_file_path: Optional[str] = None,
 25 |         project_root_for_config: Optional[str] = None,
 26 |     ):
 27 |         self.logger = logger_instance
 28 |         self.config_loader = ConfigLoader(env_file_path=env_file_path, project_root_for_config=project_root_for_config)
 29 | 
 30 |         # Load configurations using the correct ConfigLoader methods
 31 |         self.log_directories: List[str] = self.config_loader.get_log_directories()
 32 |         self.log_content_patterns: Dict[str, List[str]] = self.config_loader.get_log_patterns()
 33 |         self.default_context_lines_before: int = self.config_loader.get_context_lines_before()
 34 |         self.default_context_lines_after: int = self.config_loader.get_context_lines_after()
 35 |         self.logging_scopes: Dict[str, str] = self.config_loader.get_logging_scopes()
 36 | 
 37 |         # TODO: Potentially add more sophisticated validation or processing of loaded configs
 38 | 
 39 |     def _get_target_log_files(
 40 |         self, scope: Optional[str] = None, log_dirs_override: Optional[List[str]] = None
 41 |     ) -> List[str]:
 42 |         """
 43 |         Determines the list of log files to search.
 44 |         Uses log_dirs_override if provided, otherwise falls back to scope or general config.
 45 |         log_dirs_override can contain direct file paths, directory paths, or glob patterns.
 46 |         If a directory path is provided, it searches for '*.log' files recursively.
 47 |         """
 48 |         self.logger.info(f"[_get_target_log_files] Called with scope: {scope}, override: {log_dirs_override}")
 49 |         target_paths_or_patterns: List[str] = []
 50 |         project_root = self.config_loader.project_root
 51 |         self.logger.info(f"[_get_target_log_files] Project root: {project_root}")
 52 | 
 53 |         using_override_dirs = False
 54 |         if log_dirs_override:
 55 |             self.logger.info(f"[_get_target_log_files] Using log_dirs_override: {log_dirs_override}")
 56 |             target_paths_or_patterns.extend(log_dirs_override)
 57 |             using_override_dirs = True
 58 |         elif scope and scope.lower() in self.logging_scopes:
 59 |             path_or_pattern = self.logging_scopes[scope.lower()]
 60 |             self.logger.info(f"[_get_target_log_files] Using scope '{scope}', path_or_pattern: {path_or_pattern}")
 61 |             abs_scope_path = os.path.abspath(os.path.join(project_root, path_or_pattern))
 62 |             if not abs_scope_path.startswith(project_root):
 63 |                 self.logger.warning(
 64 |                     f"Scope '{scope}' path '{path_or_pattern}' resolves outside project root. Skipping."
 65 |                 )
 66 |                 return []
 67 |             target_paths_or_patterns.append(abs_scope_path)
 68 |         elif scope:  # Scope was provided but not found in self.logging_scopes
 69 |             self.logger.info(
 70 |                 f"[AnalysisEngine] Scope '{scope}' not found in configuration. Returning no files for this scope."
 71 |             )
 72 |             return []
 73 |         else:
 74 |             self.logger.info(
 75 |                 f"[_get_target_log_files] Using default log_directories from config: {self.log_directories}"
 76 |             )
 77 |             for log_dir_pattern in self.log_directories:
 78 |                 abs_log_dir_pattern = os.path.abspath(os.path.join(project_root, log_dir_pattern))
 79 |                 if not abs_log_dir_pattern.startswith(project_root):
 80 |                     self.logger.warning(
 81 |                         f"Log directory pattern '{log_dir_pattern}' resolves outside project root. Skipping."
 82 |                     )
 83 |                     continue
 84 |                 target_paths_or_patterns.append(abs_log_dir_pattern)
 85 | 
 86 |         self.logger.info(f"[_get_target_log_files] Effective target_paths_or_patterns: {target_paths_or_patterns}")
 87 | 
 88 |         resolved_files: List[str] = []
 89 |         for path_or_pattern_input in target_paths_or_patterns:
 90 |             self.logger.info(f"[_get_target_log_files] Processing input: {path_or_pattern_input}")
 91 |             if not os.path.isabs(path_or_pattern_input):
 92 |                 current_search_item = os.path.abspath(os.path.join(project_root, path_or_pattern_input))
 93 |                 self.logger.info(
 94 |                     f"[_get_target_log_files] Relative input '{path_or_pattern_input}' made absolute: {current_search_item}"
 95 |                 )
 96 |             else:
 97 |                 current_search_item = os.path.abspath(path_or_pattern_input)
 98 |                 self.logger.info(
 99 |                     f"[_get_target_log_files] Absolute input '{path_or_pattern_input}' normalized to: {current_search_item}"
100 |                 )
101 | 
102 |             if not current_search_item.startswith(project_root):
103 |                 self.logger.warning(
104 |                     f"[_get_target_log_files] Item '{current_search_item}' is outside project root '{project_root}'. Skipping."
105 |                 )
106 |                 continue
107 | 
108 |             self.logger.info(f"[_get_target_log_files] Checking item: {current_search_item}")
109 |             if os.path.isfile(current_search_item):
110 |                 self.logger.info(f"[_get_target_log_files] Item '{current_search_item}' is a file.")
111 |                 # If current_search_item came from a scope that resolved to a direct file,
112 |                 # or from an override that was a direct file, include it.
113 |                 # The `using_override_dirs` flag helps distinguish.
114 |                 # If it came from a scope, `using_override_dirs` is False.
115 |                 is_from_scope_direct_file = not using_override_dirs and any(
116 |                     current_search_item == os.path.abspath(os.path.join(project_root, self.logging_scopes[s_key]))
117 |                     for s_key in self.logging_scopes
118 |                     if not glob.has_magic(self.logging_scopes[s_key])
119 |                     and not os.path.isdir(os.path.join(project_root, self.logging_scopes[s_key]))
120 |                 )
121 | 
122 |                 if using_override_dirs or is_from_scope_direct_file:
123 |                     resolved_files.append(current_search_item)
124 |                 elif current_search_item.endswith(".log"):  # Default behavior for non-override, non-direct-scope-file
125 |                     resolved_files.append(current_search_item)
126 |             elif os.path.isdir(current_search_item):
127 |                 # Search for *.log files recursively in the directory
128 |                 for filepath in glob.glob(
129 |                     os.path.join(glob.escape(current_search_item), "**", "*.log"), recursive=True
130 |                 ):
131 |                     if os.path.isfile(filepath) and os.path.abspath(filepath).startswith(
132 |                         project_root
133 |                     ):  # Double check resolved path
134 |                         resolved_files.append(os.path.abspath(filepath))
135 |             else:  # Assumed to be a glob pattern
136 |                 # For glob patterns, ensure they are rooted or handled carefully.
137 |                 # If an override is a glob like "specific_module/logs/*.log", it should work.
138 |                 # If it's just "*.log", it will glob from CWD unless we force it relative to project_root.
139 |                 # The normalization above should handle making it absolute from project_root if it was relative.
140 | 
141 |                 # The glob pattern itself (current_search_item) is already an absolute path or made absolute starting from project_root
142 |                 is_recursive_glob = "**" in path_or_pattern_input  # Check original input for "**"
143 | 
144 |                 for filepath in glob.glob(current_search_item, recursive=is_recursive_glob):
145 |                     abs_filepath = os.path.abspath(filepath)
146 |                     if (
147 |                         os.path.isfile(abs_filepath)
148 |                         and abs_filepath.endswith(".log")
149 |                         and abs_filepath.startswith(project_root)
150 |                     ):
151 |                         resolved_files.append(abs_filepath)
152 |                     elif (
153 |                         os.path.isfile(abs_filepath)
154 |                         and not abs_filepath.endswith(".log")
155 |                         and using_override_dirs
156 |                         and not os.path.isdir(path_or_pattern_input)  # Ensure original input wasn't a directory
157 |                         and (
158 |                             os.path.splitext(abs_filepath)[1]
159 |                             in os.path.splitext(current_search_item)[1]  # Check if glob was for specific ext
160 |                             if not glob.has_magic(
161 |                                 current_search_item
162 |                             )  # If current_search_item was specific file (not a glob)
163 |                             else True  # If current_search_item itself was a glob (e.g. *.txt)
164 |                         )
165 |                     ):
166 |                         # If using override_dirs and the override was a specific file path (not a pattern or dir) that doesn't end with .log, still include it.
167 |                         # This was changed above: if os.path.isfile(current_search_item) and using_override_dirs, it's added.
168 |                         # This elif handles globs from override_dirs that might pick up non-.log files
169 |                         # if the glob pattern itself was specific (e.g., *.txt)
170 |                         # The original logic for specific file override (path_or_pattern_input == filepath) was too restrictive.
171 |                         # current_search_item is the absolute version of path_or_pattern_input.
172 |                         # abs_filepath is the file found by glob.
173 |                         # This part needs to correctly identify if a non-.log file found by a glob from an override should be included.
174 |                         # If the original glob pattern explicitly asked for non-.log (e.g. *.txt), then yes.
175 |                         # If the glob was generic (e.g. dir/*) and picked up a .txt, then probably no, unless it was the only match for a specific file.
176 |                         # The current logic seems to have simplified: if os.path.isfile(current_search_item) and using_override_dirs, it adds.
177 |                         # This new elif is for results from glob.glob(...)
178 |                         # Let's ensure that if the original path_or_pattern_input (from override) was a glob,
179 |                         # and that glob resolves to a non-.log file, we include it.
180 |                         # This means the user explicitly asked for it via a pattern.
181 |                         if glob.has_magic(path_or_pattern_input) or glob.has_magic(current_search_item):
182 |                             # If original input or its absolute form was a glob, include what it finds.
183 |                             resolved_files.append(abs_filepath)
184 |                         # No 'else' needed here, if it's not a .log and not from an override glob, it's skipped by the main 'if .endswith(".log")'
185 | 
186 |         return sorted(list(set(resolved_files)))  # Unique sorted list
187 | 
188 |     def _parse_log_line(self, line: str, file_path: str, line_number: int) -> Optional[ParsedLogEntry]:
189 |         """Parses a single log line. Attempts to match a common log format and falls back gracefully."""
190 |         # Regex for "YYYY-MM-DD HH:MM:SS[,ms] LEVEL MESSAGE"
191 |         # It captures timestamp, level, and message. Milliseconds are optional.
192 |         log_pattern = re.compile(
193 |             r"^(?P<timestamp>\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}(?:,\d{3})?)\s+"
194 |             r"(?P<level>[A-Z]+(?:\s+[A-Z]+)*)\s+"  # Allow multi-word levels like 'INFO EXAMPLE'
195 |             r"(?P<message>.*)$"
196 |         )
197 |         match = log_pattern.match(line)
198 | 
199 |         if match:
200 |             groups = match.groupdict()
201 |             timestamp_str = groups.get("timestamp")
202 |             level_str = groups.get("level", "UNKNOWN").upper()
203 |             message_str = groups.get("message", "").strip()
204 | 
205 |             parsed_timestamp: Optional[dt.datetime] = None
206 |             if timestamp_str:
207 |                 # self.logger.debug(f"Attempting to parse timestamp string: '{timestamp_str}' from line: '{line.strip()}'") # DEBUG
208 |                 try:
209 |                     # Handle optional milliseconds by splitting at comma
210 |                     timestamp_to_parse = timestamp_str.split(",")[0]
211 |                     parsed_timestamp = dt.datetime.strptime(timestamp_to_parse, "%Y-%m-%d %H:%M:%S")
212 |                 except ValueError as e:
213 |                     self.logger.debug(
214 |                         f"ValueError parsing timestamp string: '{timestamp_str}' (tried '{timestamp_to_parse}'). Error: {e}. Line {line_number} in {file_path}: {line.strip()}"
215 |                     )
216 |                     # Fall through to return with None timestamp but other parsed fields
217 | 
218 |             return {
219 |                 "timestamp": parsed_timestamp,
220 |                 "level": level_str,
221 |                 "message": message_str,
222 |                 "raw_line": line.strip(),
223 |                 "file_path": file_path,
224 |                 "line_number": line_number,
225 |             }
226 | 
227 |         # Fallback for lines that don't match the primary pattern
228 |         # (e.g., stack traces, multi-line messages not handled by a continuation pattern)
229 |         self.logger.debug(f"Line did not match primary log pattern. Line {line_number} in {file_path}: {line.strip()}")
230 |         return {
231 |             "timestamp": None,
232 |             "level": "UNKNOWN",
233 |             "message": line.strip(),
234 |             "raw_line": line.strip(),
235 |             "file_path": file_path,
236 |             "line_number": line_number,
237 |         }
238 | 
239 |     def _apply_content_filters(
240 |         self, entries: List[ParsedLogEntry], filter_criteria: Dict[str, Any]
241 |     ) -> List[ParsedLogEntry]:
242 |         """
243 |         Filters entries based on content patterns.
244 |         Uses 'log_content_patterns_override' from filter_criteria if available (as a list of general regexes).
245 |         Otherwise, uses level-specific regexes from self.log_content_patterns (config) IF a level_filter is also provided.
246 |         """
247 |         override_patterns: Optional[List[str]] = filter_criteria.get("log_content_patterns_override")
248 | 
249 |         if override_patterns is not None:  # Check if the key exists, even if list is empty
250 |             # Apply general override patterns
251 |             if not override_patterns:  # Empty list provided (e.g. override_patterns == [])
252 |                 self.logger.info(
253 |                     "[_apply_content_filters] log_content_patterns_override is empty list. Returning all entries."
254 |                 )
255 |                 return entries
256 | 
257 |             filtered_entries: List[ParsedLogEntry] = []
258 |             for entry in entries:
259 |                 message = entry.get("message", "")
260 |                 # level = entry.get("level", "UNKNOWN").upper() # Not used in override path
261 | 
262 |                 # entry_added = False # Not strictly needed with break
263 |                 for pattern_str in override_patterns:
264 |                     try:
265 |                         if re.search(pattern_str, message, re.IGNORECASE):
266 |                             filtered_entries.append(entry)
267 |                             # entry_added = True
268 |                             break  # Matched one pattern, include entry and move to next entry
269 |                     except re.error as e:
270 |                         self.logger.warning(
271 |                             f"Invalid regex in override_patterns: '{pattern_str}'. Error: {e}. Skipping this pattern."
272 |                         )
273 |             return filtered_entries
274 |         else:
275 |             # No override_patterns. Use configured level-specific patterns only if a level_filter is present.
276 |             level_filter_str = filter_criteria.get("level_filter", "").upper()
277 | 
278 |             if not level_filter_str:
279 |                 # No specific level_filter provided in criteria, and no override patterns.
280 |                 # Content filtering should not apply by default from env/config in this case.
281 |                 self.logger.info(
282 |                     "[_apply_content_filters] No override patterns and no level_filter in criteria. Returning all entries."
283 |                 )
284 |                 return entries
285 | 
286 |             # A specific level_filter_str IS provided. Use patterns for that level from self.log_content_patterns.
287 |             # self.log_content_patterns is Dict[str (lowercase level), List[str_patterns]]
288 |             # Ensure level_filter_str matches the key format (e.g. "error" not "ERROR")
289 |             relevant_patterns = self.log_content_patterns.get(level_filter_str.lower(), [])
290 | 
291 |             self.logger.info(
292 |                 f"[_apply_content_filters] Using config patterns for level_filter: '{level_filter_str}'. Relevant patterns: {relevant_patterns}"
293 |             )
294 | 
295 |             # Filter by the specified level first.
296 |             # Then, if there are patterns for that level, apply them.
297 |             # If no patterns for that level, all entries of that level pass.
298 | 
299 |             filtered_entries = []
300 |             for entry in entries:
301 |                 entry_level = entry.get("level", "UNKNOWN").upper()
302 |                 message = entry.get("message", "")
303 | 
304 |                 if entry_level == level_filter_str:  # Entry must match the specified level
305 |                     if not relevant_patterns:
306 |                         # No patterns for this level, so include if level matches
307 |                         filtered_entries.append(entry)
308 |                     else:
309 |                         # Patterns exist for this level, try to match them
310 |                         for pattern_str in relevant_patterns:
311 |                             try:
312 |                                 if re.search(pattern_str, message, re.IGNORECASE):
313 |                                     filtered_entries.append(entry)
314 |                                     break  # Matched one pattern for this level, include entry
315 |                             except re.error as e:
316 |                                 self.logger.warning(
317 |                                     f"Invalid regex in configured patterns for level {level_filter_str}: '{pattern_str}'. Error: {e}. Skipping pattern."
318 |                                 )
319 |             return filtered_entries
320 | 
321 |     def _apply_time_filters(
322 |         self, entries: List[ParsedLogEntry], filter_criteria: Dict[str, Any]
323 |     ) -> List[ParsedLogEntry]:
324 |         """Filters entries based on time window from filter_criteria."""
325 |         now = dt.datetime.now()  # Use dt.datetime.now()
326 |         time_window_applied = False
327 |         earliest_time: Optional[dt.datetime] = None  # Use dt.datetime for type hint
328 | 
329 |         if filter_criteria.get("minutes", 0) > 0:
330 |             earliest_time = now - dt.timedelta(minutes=filter_criteria["minutes"])
331 |             time_window_applied = True
332 |         elif filter_criteria.get("hours", 0) > 0:
333 |             earliest_time = now - dt.timedelta(hours=filter_criteria["hours"])
334 |             time_window_applied = True
335 |         elif filter_criteria.get("days", 0) > 0:
336 |             earliest_time = now - dt.timedelta(days=filter_criteria["days"])
337 |             time_window_applied = True
338 | 
339 |         if not time_window_applied or earliest_time is None:
340 |             return entries  # No time filter to apply or invalid criteria
341 | 
342 |         filtered_entries: List[ParsedLogEntry] = []
343 |         for entry in entries:
344 |             entry_timestamp = entry.get("timestamp")
345 |             # Ensure entry_timestamp is a datetime.datetime object before comparison
346 |             if (
347 |                 isinstance(entry_timestamp, DateTimeClassForCheck) and entry_timestamp >= earliest_time
348 |             ):  # Use DateTimeClassForCheck for isinstance
349 |                 filtered_entries.append(entry)
350 | 
351 |         return filtered_entries
352 | 
353 |     def _apply_positional_filters(
354 |         self, entries: List[ParsedLogEntry], filter_criteria: Dict[str, Any]
355 |     ) -> List[ParsedLogEntry]:
356 |         """Filters entries based on positional criteria (first_n, last_n)."""
357 |         first_n = filter_criteria.get("first_n")
358 |         last_n = filter_criteria.get("last_n")
359 | 
360 |         # Only filter by timestamp and sort if a positional filter is active
361 |         if (first_n is not None and isinstance(first_n, int) and first_n > 0) or (
362 |             last_n is not None and isinstance(last_n, int) and last_n > 0
363 |         ):
364 | 
365 |             # Filter out entries with no timestamp before sorting for positional filters
366 |             entries_with_timestamp = [e for e in entries if e.get("timestamp") is not None]
367 | 
368 |             # Ensure entries are sorted by timestamp before applying positional filters
369 |             # ParsedLogEntry includes 'timestamp', which is a datetime object
370 |             # Using e["timestamp"] as we've filtered for its existence and non-None value.
371 |             sorted_entries = sorted(entries_with_timestamp, key=lambda e: e["timestamp"])
372 | 
373 |             if first_n is not None and isinstance(first_n, int) and first_n > 0:
374 |                 return sorted_entries[:first_n]
375 |             elif last_n is not None and isinstance(last_n, int) and last_n > 0:
376 |                 return sorted_entries[-last_n:]
377 |             else:
378 |                 # Should not be reached if the outer if condition is met correctly
379 |                 return sorted_entries
380 | 
381 |         # If no positional filter is active, return the original entries
382 |         # Order might be important, so don't sort unless a positional filter needs it.
383 |         return entries
384 | 
385 |     def _extract_context_lines(
386 |         self,
387 |         entries: List[ParsedLogEntry],
388 |         all_lines_by_file: Dict[str, List[str]],
389 |         context_before: int,
390 |         context_after: int,
391 |     ) -> List[ParsedLogEntry]:
392 |         """Extracts context lines for each entry."""
393 |         if context_before == 0 and context_after == 0:
394 |             # Add empty context if no context lines are requested, to maintain structure
395 |             for entry in entries:
396 |                 entry["context_before_lines"] = []
397 |                 entry["context_after_lines"] = []
398 |                 entry["full_context_log"] = entry["raw_line"]
399 |             return entries
400 | 
401 |         entries_with_context: List[ParsedLogEntry] = []
402 |         for entry in entries:
403 |             file_path = entry["file_path"]
404 |             line_number = entry["line_number"]  # 1-indexed from original file
405 | 
406 |             if file_path not in all_lines_by_file:
407 |                 # This shouldn't happen if all_lines_by_file is populated correctly
408 |                 entry["context_before_lines"] = []
409 |                 entry["context_after_lines"] = []
410 |                 entry["full_context_log"] = entry["raw_line"]
411 |                 entries_with_context.append(entry)
412 |                 self.logger.warning(f"Warning: File {file_path} not found in all_lines_by_file for context extraction.")
413 |                 continue
414 | 
415 |             file_lines = all_lines_by_file[file_path]
416 |             actual_line_index = line_number - 1  # Convert to 0-indexed for list access
417 | 
418 |             start_index = max(0, actual_line_index - context_before)
419 |             end_index = min(len(file_lines), actual_line_index + context_after + 1)
420 | 
421 |             entry_copy = entry.copy()  # Avoid modifying the original entry directly in the list
422 |             entry_copy["context_before_lines"] = [line.strip() for line in file_lines[start_index:actual_line_index]]
423 |             entry_copy["context_after_lines"] = [line.strip() for line in file_lines[actual_line_index + 1 : end_index]]
424 | 
425 |             # Construct full_context_log
426 |             full_context_list = (
427 |                 entry_copy["context_before_lines"] + [entry_copy["raw_line"]] + entry_copy["context_after_lines"]
428 |             )
429 |             entry_copy["full_context_log"] = "\\n".join(full_context_list)
430 | 
431 |             entries_with_context.append(entry_copy)
432 | 
433 |         return entries_with_context
434 | 
435 |     def search_logs(self, filter_criteria: Dict[str, Any]) -> List[Dict[str, Any]]:
436 |         """
437 |         Main method to search logs based on various criteria.
438 |         filter_criteria is a dictionary that can contain:
439 |         - log_dirs_override: List[str] (paths/globs to search instead of config)
440 |         - scope: str (e.g., "mcp", "runtime" to use predefined paths from config)
441 |         - log_content_patterns_override: List[str] (regexes for log message content)
442 |         - level_filter: str (e.g., "ERROR", "WARNING")
443 |         - time_filter_type: str ("minutes", "hours", "days") - maps to minutes, hours, days keys
444 |         - time_filter_value: int (e.g., 30 for 30 minutes) - maps to minutes, hours, days values
445 |         - positional_filter_type: str ("first_n", "last_n") - maps to first_n, last_n keys
446 |         - positional_filter_value: int (e.g., 10 for first 10 records) - maps to first_n, last_n values
447 |         - context_before: int (lines of context before match)
448 |         - context_after: int (lines of context after match)
449 |         """
450 |         self.logger.info(f"[AnalysisEngine.search_logs] Called with filter_criteria: {filter_criteria}")
451 | 
452 |         all_raw_lines_by_file: Dict[str, List[str]] = {}
453 |         parsed_entries: List[ParsedLogEntry] = []
454 | 
455 |         # 1. Determine target log files
456 |         target_files = self._get_target_log_files(
457 |             scope=filter_criteria.get("scope"),
458 |             log_dirs_override=filter_criteria.get("log_dirs_override"),
459 |         )
460 | 
461 |         if not target_files:
462 |             self.logger.info(
463 |                 "[AnalysisEngine.search_logs] No log files found by _get_target_log_files. Returning pathway OK message."
464 |             )
465 |             # Return a specific message indicating pathway is okay but no files found
466 |             return [{"message": "No target files found, but pathway OK."}]
467 | 
468 |         self.logger.info(f"[AnalysisEngine.search_logs] Target files found: {target_files}")
469 | 
470 |         # 2. Parse all lines from target files
471 |         for file_path in target_files:
472 |             try:
473 |                 with open(file_path, "r", encoding="utf-8", errors="ignore") as f:
474 |                     lines = f.readlines()
475 |                     # Store all lines for context extraction later
476 |                     all_raw_lines_by_file[file_path] = [
477 |                         line.rstrip("\\n") for line in lines
478 |                     ]  # Store raw lines as they are
479 |                     for i, line_content in enumerate(lines):
480 |                         entry = self._parse_log_line(line_content.strip(), file_path, i + 1)  # line_number is 1-indexed
481 |                         if entry:
482 |                             parsed_entries.append(entry)
483 |             except Exception as e:  # pylint: disable=broad-exception-caught
484 |                 self.logger.error(f"Error reading or parsing file {file_path}: {e}", exc_info=True)
485 |                 continue  # Continue with other files
486 | 
487 |         self.logger.info(f"[AnalysisEngine.search_logs] Parsed {len(parsed_entries)} entries from all target files.")
488 |         if not parsed_entries:
489 |             self.logger.info("[AnalysisEngine.search_logs] No entries parsed from target files.")
490 |             return []
491 | 
492 |         # 3. Apply content filters (level and regex)
493 |         filtered_entries = self._apply_content_filters(parsed_entries, filter_criteria)
494 |         if not filtered_entries:
495 |             self.logger.info("[AnalysisEngine.search_logs] No entries left after content filters.")
496 |             return []
497 | 
498 |         # 4. Apply time filters
499 |         filtered_entries = self._apply_time_filters(filtered_entries, filter_criteria)
500 |         if not filtered_entries:
501 |             self.logger.info("[AnalysisEngine.search_logs] No entries left after time filters.")
502 |             return []
503 | 
504 |         # 5. Apply positional filters (first_n, last_n)
505 |         # Note: _apply_positional_filters sorts by timestamp and handles entries without timestamps
506 |         filtered_entries = self._apply_positional_filters(filtered_entries, filter_criteria)
507 |         if not filtered_entries:
508 |             self.logger.info("[AnalysisEngine.search_logs] No entries left after positional filters.")
509 |             return []
510 | 
511 |         # 6. Extract context lines for the final set of entries
512 |         # Use context_before and context_after from filter_criteria, or defaults from config
513 |         context_before = filter_criteria.get("context_before", self.default_context_lines_before)
514 |         context_after = filter_criteria.get("context_after", self.default_context_lines_after)
515 | 
516 |         final_entries_with_context = self._extract_context_lines(
517 |             filtered_entries, all_raw_lines_by_file, context_before, context_after
518 |         )
519 | 
520 |         self.logger.info(f"[AnalysisEngine.search_logs] Returning {len(final_entries_with_context)} processed entries.")
521 |         # The tool expects a list of dicts, and ParsedLogEntry is already a Dict[str, Any]
522 |         return final_entries_with_context
523 | 
524 | 
525 | # TODO: Add helper functions for parsing, filtering, file handling etc. as needed.
526 | 
```

--------------------------------------------------------------------------------
/src/log_analyzer_mcp/log_analyzer_mcp_server.py:
--------------------------------------------------------------------------------

```python
  1 | #!/usr/bin/env python3
  2 | """
  3 | Test Analyzer MCP Server
  4 | 
  5 | Implements the Model Context Protocol (MCP) for Cursor to analyze test results.
  6 | """
  7 | 
  8 | import asyncio
  9 | import anyio
 10 | import os
 11 | import re
 12 | import subprocess
 13 | import sys
 14 | import functools
 15 | from datetime import datetime
 16 | from typing import Any, Callable
 17 | 
 18 | from mcp.server.fastmcp import FastMCP
 19 | 
 20 | # MCP and Pydantic related imports
 21 | from mcp.shared.exceptions import McpError
 22 | from mcp.types import (
 23 |     ErrorData,
 24 | )
 25 | from pydantic import BaseModel, Field
 26 | 
 27 | # Project-specific imports
 28 | from log_analyzer_mcp.common.logger_setup import LoggerSetup, get_logs_dir
 29 | from log_analyzer_mcp.common.utils import build_filter_criteria
 30 | from log_analyzer_mcp.core.analysis_engine import AnalysisEngine
 31 | from log_analyzer_mcp.test_log_parser import analyze_pytest_log_content
 32 | 
 33 | # Explicitly attempt to initialize coverage for subprocesses
 34 | if "COVERAGE_PROCESS_START" in os.environ:
 35 |     try:
 36 |         import coverage
 37 | 
 38 |         coverage.process_startup()
 39 |         # If your logger is configured very early, you could add a log here:
 40 |         # print("DEBUG: coverage.process_startup() called in subprocess.", flush=True)
 41 |     except ImportError:
 42 |         # print("DEBUG: COVERAGE_PROCESS_START set, but coverage module not found.", flush=True)
 43 |         pass  # Or handle error if coverage is mandatory for the subprocess
 44 |     except Exception:  # pylint: disable=broad-exception-caught
 45 |         # print(f"DEBUG: Error calling coverage.process_startup(): {e}", flush=True)
 46 |         pass
 47 | 
 48 | # Define project_root and script_dir here as they are used for path definitions
 49 | script_dir = os.path.dirname(os.path.abspath(__file__))
 50 | # project_root = os.path.dirname(os.path.dirname(script_dir)) # No longer needed here if logger_setup is robust
 51 | 
 52 | # Set up logging using centralized configuration
 53 | logs_base_dir = get_logs_dir()  # RESTORED - this should now be correct
 54 | mcp_log_dir = os.path.join(logs_base_dir, "mcp")  # RESTORED
 55 | # Ensure project_root is correctly determined as the actual project root
 56 | # Forcing a known-good structure relative to where log_analyzer_mcp_server.py is.
 57 | # __file__ is src/log_analyzer_mcp/log_analyzer_mcp_server.py
 58 | # script_dir is src/log_analyzer_mcp/
 59 | # parent of script_dir is src/
 60 | # parent of parent of script_dir is PROJECT_ROOT
 61 | # actual_project_root = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) # REMOVE direct calculation here
 62 | 
 63 | # mcp_log_dir = os.path.join(actual_project_root, "logs", "mcp") # REMOVE direct calculation here
 64 | os.makedirs(mcp_log_dir, exist_ok=True)  # This is fine, mcp_log_dir is now from get_logs_dir()
 65 | 
 66 | # Determine the log file path, prioritizing MCP_LOG_FILE env var
 67 | env_log_file = os.getenv("MCP_LOG_FILE")
 68 | if env_log_file:
 69 |     log_file_path = os.path.abspath(env_log_file)
 70 |     # Ensure the directory for the environment-specified log file exists
 71 |     env_log_file_dir = os.path.dirname(log_file_path)
 72 |     if not os.path.exists(env_log_file_dir):
 73 |         try:
 74 |             os.makedirs(env_log_file_dir, exist_ok=True)
 75 |             # Temporary print to confirm this path is taken
 76 |             print(
 77 |                 f"DEBUG_MCP_SERVER: Ensured directory exists for MCP_LOG_FILE: {env_log_file_dir}",
 78 |                 file=sys.stderr,
 79 |                 flush=True,
 80 |             )
 81 |         except OSError as e:
 82 |             print(
 83 |                 f"Warning: Could not create directory for MCP_LOG_FILE {env_log_file_dir}: {e}",
 84 |                 file=sys.stderr,
 85 |                 flush=True,
 86 |             )
 87 |             # Fallback to default if directory creation fails for env var path
 88 |             log_file_path = os.path.join(mcp_log_dir, "log_analyzer_mcp_server.log")
 89 |     print(
 90 |         f"DEBUG_MCP_SERVER: Using MCP_LOG_FILE from environment: {log_file_path}", file=sys.stderr, flush=True
 91 |     )  # ADDED
 92 | else:
 93 |     log_file_path = os.path.join(mcp_log_dir, "log_analyzer_mcp_server.log")
 94 |     print(f"DEBUG_MCP_SERVER: Using default log_file_path: {log_file_path}", file=sys.stderr, flush=True)  # ADDED
 95 | 
 96 | logger = LoggerSetup.create_logger("LogAnalyzerMCP", log_file_path, agent_name="LogAnalyzerMCP")
 97 | logger.setLevel("DEBUG")  # Set to debug level for MCP server
 98 | 
 99 | # CRITICAL DEBUG: Print to stderr immediately after logger setup
100 | print(f"DEBUG_MCP_SERVER: Logger initialized. Attempting to log to: {log_file_path}", file=sys.stderr, flush=True)
101 | 
102 | logger.info("Log Analyzer MCP Server starting. Logging to %s", log_file_path)
103 | 
104 | # Initialize AnalysisEngine instance (can be done once)
105 | # It will load .env settings by default upon instantiation.
106 | analysis_engine = AnalysisEngine(logger_instance=logger)
107 | 
108 | # Update paths for scripts and logs (using project_root and script_dir)
109 | # log_analyzer_path = os.path.join(script_dir, 'log_analyzer.py') # REMOVED
110 | # run_tests_path = os.path.join(project_root, 'tests/run_all_tests.py') # REMOVED - using hatch test directly
111 | # run_coverage_path = os.path.join(script_dir, 'create_coverage_report.sh') # REMOVED - using hatch run hatch-test:* directly
112 | # analyze_runtime_errors_path = os.path.join(script_dir, 'analyze_runtime_errors.py') # REMOVED
113 | test_log_file = os.path.join(
114 |     logs_base_dir, "run_all_tests.log"  # RESTORED logs_base_dir
115 | )  # Main test log, now populated by hatch test output
116 | # coverage_xml_path = os.path.join(logs_base_dir, 'tests', 'coverage', 'coverage.xml') # RESTORED logs_base_dir
117 | 
118 | # Initialize FastMCP server
119 | # Add lifespan support for startup/shutdown with strong typing
120 | from collections.abc import AsyncIterator
121 | from contextlib import asynccontextmanager
122 | 
123 | 
124 | @asynccontextmanager
125 | async def server_lifespan(_server: FastMCP) -> AsyncIterator[None]:  # Simple lifespan, no app context needed
126 |     logger.info("MCP Server Lifespan: Startup phase entered.")
127 |     try:
128 |         yield
129 |     finally:
130 |         logger.info("MCP Server Lifespan: Shutdown phase entered (finally block).")
131 | 
132 | 
133 | mcp = FastMCP("log_analyzer", lifespan=server_lifespan)
134 | 
135 | 
136 | # Define input models for tool validation
137 | class AnalyzeTestsInput(BaseModel):
138 |     """Parameters for analyzing tests."""
139 | 
140 |     summary_only: bool = Field(default=False, description="Whether to return only a summary of the test results")
141 | 
142 | 
143 | class RunTestsInput(BaseModel):
144 |     """Parameters for running tests."""
145 | 
146 |     verbosity: int = Field(default=1, description="Verbosity level for the test runner (0-2)", ge=0, le=2)
147 | 
148 | 
149 | class CreateCoverageReportInput(BaseModel):
150 |     """Parameters for creating coverage report."""
151 | 
152 |     force_rebuild: bool = Field(
153 |         default=False, description="Whether to force rebuilding the coverage report even if it already exists"
154 |     )
155 | 
156 | 
157 | class RunUnitTestInput(BaseModel):
158 |     """Parameters for running specific unit tests."""
159 | 
160 |     agent: str = Field(description="The agent to run tests for (e.g., 'qa_agent', 'backlog_agent')")
161 |     verbosity: int = Field(default=1, description="Verbosity level (0=minimal, 1=normal, 2=detailed)", ge=0, le=2)
162 | 
163 | 
164 | # Define default runtime logs directory
165 | DEFAULT_RUNTIME_LOGS_DIR = os.path.join(logs_base_dir, "runtime")  # RESTORED logs_base_dir
166 | 
167 | 
168 | # async def analyze_test_log(log_file_path: str, summary_only: bool = False) -> Dict[str, Any]: # REMOVED: Functionality moved to test_log_parser
169 | #     """
170 | #     Analyze a test log file and return structured results.
171 | #     ...
172 | #     """
173 | #     ...
174 | 
175 | 
176 | @mcp.tool()
177 | async def analyze_tests(summary_only: bool = False) -> dict[str, Any]:
178 |     """Analyze the most recent test run and provide detailed information about failures.
179 | 
180 |     Args:
181 |         summary_only: Whether to return only a summary of the test results
182 |     """
183 |     logger.info("Analyzing test results (summary_only=%s)...", summary_only)
184 | 
185 |     log_file = test_log_file
186 | 
187 |     if not os.path.exists(log_file):
188 |         error_msg = f"Test log file not found at: {log_file}. Please run tests first."
189 |         logger.error(error_msg)
190 |         return {"error": error_msg, "summary": {"status": "ERROR", "passed": 0, "failed": 0, "skipped": 0}}
191 | 
192 |     try:
193 |         with open(log_file, encoding="utf-8", errors="ignore") as f:
194 |             log_contents = f.read()
195 | 
196 |         if not log_contents.strip():
197 |             error_msg = f"Test log file is empty: {log_file}"
198 |             logger.warning(error_msg)
199 |             return {"error": error_msg, "summary": {"status": "EMPTY", "passed": 0, "failed": 0, "skipped": 0}}
200 | 
201 |         analysis = analyze_pytest_log_content(log_contents, summary_only=summary_only)
202 | 
203 |         # Add metadata similar to the old analyze_test_log function
204 |         log_time = datetime.fromtimestamp(os.path.getmtime(log_file))
205 |         time_elapsed = (datetime.now() - log_time).total_seconds() / 60  # minutes
206 |         analysis["log_file"] = log_file
207 |         analysis["log_timestamp"] = log_time.isoformat()
208 |         analysis["log_age_minutes"] = round(time_elapsed, 1)
209 | 
210 |         # The analyze_pytest_log_content already returns a structure including 'overall_summary'.
211 |         # If summary_only is true, it returns only that. Otherwise, it returns more details.
212 |         # We can directly return this analysis dictionary.
213 | 
214 |         # Ensure there's always a summary structure for consistent access, even if minimal
215 |         if "overall_summary" not in analysis:
216 |             analysis["overall_summary"] = {"status": "UNKNOWN", "passed": 0, "failed": 0, "skipped": 0}
217 |         if "summary" not in analysis:  # for backward compatibility or general access
218 |             analysis["summary"] = analysis["overall_summary"]
219 | 
220 |         logger.info(
221 |             "Test log analysis completed using test_log_parser. Summary status: %s",
222 |             analysis.get("summary", {}).get("status"),
223 |         )
224 |         return analysis
225 | 
226 |     except Exception as e:  # pylint: disable=broad-exception-caught
227 |         error_msg = f"Error analyzing test log file with test_log_parser: {e}"
228 |         logger.error(error_msg, exc_info=True)
229 |         return {"error": error_msg, "summary": {"status": "ERROR", "passed": 0, "failed": 0, "skipped": 0}}
230 | 
231 | 
232 | async def _run_tests(
233 |     verbosity: Any | None = None,
234 |     agent: str | None = None,
235 |     pattern: str | None = None,
236 |     run_with_coverage: bool = False,
237 | ) -> dict[str, Any]:
238 |     """Internal helper function to run tests using hatch.
239 | 
240 |     Args:
241 |         verbosity: Optional verbosity level (0=minimal, 1=normal, 2=detailed for pytest)
242 |         agent: Optional agent name to run only tests for that agent (e.g., 'qa_agent')
243 |         pattern: Optional pattern to filter test files (e.g., 'test_qa_*.py')
244 |         run_with_coverage: Whether to run tests with coverage enabled via 'hatch test --cover'.
245 |     """
246 |     logger.info(
247 |         "Preparing to run tests via hatch (verbosity=%s, agent=%s, pattern=%s, coverage=%s)...",
248 |         verbosity,
249 |         agent,
250 |         pattern,
251 |         run_with_coverage,
252 |     )
253 | 
254 |     hatch_base_cmd = ["hatch", "test"]
255 |     pytest_args = []
256 | 
257 |     # ALWAYS add arguments to ignore the server integration tests to prevent recursion
258 |     # when tests are run *by this tool*.
259 |     pytest_args.extend(
260 |         [
261 |             "--ignore=tests/log_analyzer_mcp/test_log_analyzer_mcp_server.py",
262 |             "--ignore=tests/log_analyzer_mcp/test_analyze_runtime_errors.py",
263 |         ]
264 |     )
265 |     logger.debug("Added ignore patterns for server integration tests (tool-invoked run).")
266 | 
267 |     if run_with_coverage:
268 |         hatch_base_cmd.append("--cover")
269 |         logger.debug("Coverage enabled for hatch test run.")
270 |         # Tell pytest not to activate its own coverage plugin, as 'coverage run' is handling it.
271 |         pytest_args.append("-p")
272 |         pytest_args.append("no:cov")
273 |         logger.debug("Added '-p no:cov' to pytest arguments for coverage run.")
274 | 
275 |     # Verbosity for pytest: -q (0), (1), -v (2), -vv (3+)
276 |     if verbosity is not None:
277 |         try:
278 |             v_int = int(verbosity)
279 |             if v_int == 0:
280 |                 pytest_args.append("-q")
281 |             elif v_int == 2:
282 |                 pytest_args.append("-v")
283 |             elif v_int >= 3:
284 |                 pytest_args.append("-vv")
285 |             # Default (verbosity=1) means no specific pytest verbosity arg, relies on hatch default
286 |         except ValueError:
287 |             logger.warning("Invalid verbosity value '%s', using default.", verbosity)
288 | 
289 |     # Construct pytest -k argument if agent or pattern is specified
290 |     k_expressions = []
291 |     if agent:
292 |         # Assuming agent name can be part of test names like test_agent_... or ..._agent_...
293 |         k_expressions.append(f"{agent}")  # This f-string is for constructing a command argument, not direct logging.
294 |         logger.debug("Added agent '%s' to -k filter expressions.", agent)
295 |     if pattern:
296 |         k_expressions.append(pattern)
297 |         logger.debug("Added pattern '%s' to -k filter expressions.", pattern)
298 | 
299 |     if k_expressions:
300 |         pytest_args.extend(["-k", " or ".join(k_expressions)])  # pytest -k "expr1 or expr2"
301 | 
302 |     hatch_cmd = hatch_base_cmd
303 |     if pytest_args:  # Pass pytest arguments after --
304 |         hatch_cmd.extend(["--"] + pytest_args)
305 | 
306 |     logger.info("Constructed hatch command: %s", " ".join(hatch_cmd))
307 | 
308 |     # Ensure the log file is cleared or managed before test run if it's always written to the same path
309 |     # For now, assuming log_analyzer.py handles this or we analyze the latest run.
310 |     test_log_output_path = os.path.join(logs_base_dir, "run_all_tests.log")  # RESTORED logs_base_dir
311 |     logger.debug("Expected test output log path for analysis: %s", test_log_output_path)
312 | 
313 |     try:
314 |         # Run the command using anyio.to_thread to avoid blocking asyncio event loop
315 |         # Ensure text=True for automatic decoding of stdout/stderr to string
316 |         process = await anyio.to_thread.run_sync(  # type: ignore[attr-defined]
317 |             functools.partial(
318 |                 subprocess.run,
319 |                 hatch_cmd,
320 |                 capture_output=True,
321 |                 text=True,  # Decode stdout/stderr as text (usually UTF-8)
322 |                 check=False,  # Don't raise exception for non-zero exit, handle manually
323 |                 timeout=120,  # Add timeout
324 |             )
325 |         )
326 |         stdout_output: str = process.stdout
327 |         stderr_output: str = process.stderr
328 |         rc = process.returncode
329 | 
330 |         if rc not in [0, 1, 5]:
331 |             logger.error("Hatch test command failed with unexpected pytest return code: %s", rc)
332 |             logger.error("STDOUT:\n%s", stdout_output)
333 |             logger.error("STDERR:\n%s", stderr_output)
334 |             return {
335 |                 "success": False,
336 |                 "error": f"Test execution failed with code {rc}",
337 |                 "test_output": stdout_output + "\n" + stderr_output,
338 |                 "analysis_log_path": None,
339 |             }
340 | 
341 |         logger.debug("Saving combined stdout/stderr from hatch test to %s", test_log_output_path)
342 |         with open(test_log_output_path, "w", encoding="utf-8") as f:
343 |             f.write(stdout_output)
344 |             f.write("\n")
345 |             f.write(stderr_output)
346 |         logger.debug("Content saved to %s", test_log_output_path)
347 | 
348 |         # _run_tests now only runs tests and saves the log.
349 |         # Analysis is done by the analyze_tests tool or by the caller if needed.
350 | 
351 |         # The old log_analyzer.main() call is removed.
352 |         # If an agent was specified, the caller of _run_tests might want to know.
353 |         # We can still populate this in the result.
354 |         if agent:
355 |             # analysis_to_return is None, so we can create a small dict or add to a base dict
356 |             # For now, let's just focus on returning the essential info
357 |             pass
358 | 
359 |         return {
360 |             "success": True,
361 |             "return_code": rc,
362 |             "test_output": stdout_output + "\n" + stderr_output,
363 |             "analysis_log_path": test_log_output_path,  # Provide path to the log for analysis
364 |             # "analysis" field is removed from here as _run_tests no longer parses.
365 |         }
366 | 
367 |     except subprocess.TimeoutExpired as e:
368 |         stdout_output = e.stdout.decode("utf-8", errors="replace") if e.stdout else ""
369 |         stderr_output = e.stderr.decode("utf-8", errors="replace") if e.stderr else ""
370 |         stderr_output += f"\nError: Test execution timed out after 170 seconds."
371 |         rc = 1  # Indicate failure
372 |         logger.error("Test execution in _run_tests timed out: %s", e)
373 |         return {
374 |             "success": False,
375 |             "error": stderr_output,
376 |             "test_output": stdout_output + "\n" + stderr_output,
377 |             "analysis_log_path": None,
378 |         }
379 |     except Exception as e:  # pylint: disable=broad-exception-caught
380 |         logger.error("An unexpected error occurred in _run_tests: %s", e, exc_info=True)
381 |         # Capture output if process started
382 |         final_stdout = ""
383 |         final_stderr = ""
384 |         if "stdout_output" in locals() and "stderr_output" in locals():  # Check if communicate() was reached
385 |             final_stdout = stdout_output
386 |             final_stderr = stderr_output
387 |         # else: process might not have been initialized or communicate not called.
388 |         # No direct access to process.stdout/stderr here as it's out of 'with' scope.
389 | 
390 |         return {
391 |             "success": False,
392 |             "error": f"Unexpected error: {e}",
393 |             "test_output": final_stdout + "\n" + final_stderr,
394 |             "analysis_log_path": None,
395 |         }
396 | 
397 | 
398 | @mcp.tool()
399 | async def run_tests_no_verbosity() -> dict[str, Any]:
400 |     """Run all tests with minimal output (verbosity level 0)."""
401 |     return await _run_tests("0")
402 | 
403 | 
404 | @mcp.tool()
405 | async def run_tests_verbose() -> dict[str, Any]:
406 |     """Run all tests with verbose output (verbosity level 1)."""
407 |     return await _run_tests("1")
408 | 
409 | 
410 | @mcp.tool()
411 | async def run_tests_very_verbose() -> dict[str, Any]:
412 |     """Run all tests with very verbose output (verbosity level 2)."""
413 |     logger.info("Running tests with verbosity 2...")
414 |     return await _run_tests(verbosity=2, run_with_coverage=True)
415 | 
416 | 
417 | @mcp.tool()
418 | async def ping() -> str:
419 |     """Check if the MCP server is alive."""
420 |     logger.debug("ping called")
421 |     return f"Status: ok\n" f"Timestamp: {datetime.now().isoformat()}\n" f"Message: Log Analyzer MCP Server is running"
422 | 
423 | 
424 | async def run_coverage_script(force_rebuild: bool = False) -> dict[str, Any]:
425 |     """
426 |     Run the coverage report script and generate HTML and XML reports.
427 |     Now uses hatch scripts for better integration.
428 |     """
429 |     logger.info("Running coverage script...")
430 |     # Correctly reference PROJECT_ROOT from the logger_setup module
431 |     from log_analyzer_mcp.common import logger_setup as common_logger_setup  # Ensure this import is here or global
432 | 
433 |     current_project_root = common_logger_setup.PROJECT_ROOT
434 |     # Define different timeouts for different steps
435 |     timeout_run_cov = 300  # Longer timeout for running tests with coverage
436 |     timeout_cov_report = 120  # Shorter timeout for generating the report
437 | 
438 |     # Command parts for running the coverage script via hatch
439 |     # This assumes 'run-cov' and 'cov-report' are defined in hatch envs.
440 |     # Step 1: Run tests with coverage enabled
441 |     cmd_parts_run_cov = ["hatch", "run", "hatch-test.py3.12:run-cov"]  # Example: Target specific py version
442 |     # Step 2: Generate combined report (HTML and XML)
443 |     cmd_parts_report = ["hatch", "run", "hatch-test.py3.12:cov-report"]  # Example
444 | 
445 |     outputs = []
446 |     errors_encountered = []
447 | 
448 |     steps_with_timeouts = [
449 |         ("run-cov", cmd_parts_run_cov, timeout_run_cov),
450 |         ("cov-report", cmd_parts_report, timeout_cov_report),
451 |     ]
452 | 
453 |     for step_name, cmd_parts, current_timeout_seconds in steps_with_timeouts:
454 |         logger.info(
455 |             "Executing coverage step '%s': %s (timeout: %ss)", step_name, " ".join(cmd_parts), current_timeout_seconds
456 |         )
457 |         try:
458 |             # Use functools.partial for subprocess.run
459 |             configured_subprocess_run_step = functools.partial(
460 |                 subprocess.run,
461 |                 cmd_parts,
462 |                 cwd=current_project_root,
463 |                 capture_output=True,
464 |                 text=True,  # Decode stdout/stderr as text
465 |                 check=False,  # Handle non-zero exit manually
466 |                 timeout=current_timeout_seconds,  # Use current step's timeout
467 |             )
468 |             process = await anyio.to_thread.run_sync(configured_subprocess_run_step)  # type: ignore[attr-defined]
469 |             stdout_output: str = process.stdout
470 |             stderr_output: str = process.stderr
471 |             rc = process.returncode
472 | 
473 |             outputs.append(f"--- {step_name} STDOUT ---\n{stdout_output}")
474 |             if stderr_output:
475 |                 outputs.append(f"--- {step_name} STDERR ---\n{stderr_output}")
476 | 
477 |             if rc != 0:
478 |                 error_msg = f"Coverage step '{step_name}' failed with return code {rc}."
479 |                 logger.error("%s\nSTDERR:\n%s", error_msg, stderr_output)
480 |                 errors_encountered.append(error_msg)
481 |                 # Optionally break if a step fails, or collect all errors
482 |                 # break
483 | 
484 |         except subprocess.TimeoutExpired as e:
485 |             stdout_output = e.stdout.decode("utf-8", errors="replace") if e.stdout else ""
486 |             stderr_output = e.stderr.decode("utf-8", errors="replace") if e.stderr else ""
487 |             error_msg = f"Coverage step '{step_name}' timed out after {current_timeout_seconds} seconds."
488 |             logger.error("%s: %s", error_msg, e)
489 |             errors_encountered.append(error_msg)
490 |             outputs.append(f"--- {step_name} TIMEOUT STDOUT ---\n{stdout_output}")
491 |             outputs.append(f"--- {step_name} TIMEOUT STDERR ---\n{stderr_output}")
492 |             # break
493 |         except Exception as e:  # pylint: disable=broad-exception-caught
494 |             error_msg = f"Error during coverage step '{step_name}': {e}"
495 |             logger.error(error_msg, exc_info=True)
496 |             errors_encountered.append(error_msg)
497 |             # break
498 | 
499 |     # Ensure a dictionary is always returned, even if errors occurred.
500 |     final_success = not errors_encountered
501 |     overall_message = (
502 |         "Coverage script steps completed." if final_success else "Errors encountered during coverage script execution."
503 |     )
504 |     # Placeholder for actual report paths, adapt as needed
505 |     coverage_xml_report_path = os.path.join(logs_base_dir, "tests", "coverage", "coverage.xml")
506 |     coverage_html_index_path = os.path.join(logs_base_dir, "tests", "coverage", "html", "index.html")
507 | 
508 |     return {
509 |         "success": final_success,
510 |         "message": overall_message,
511 |         "details": "\n".join(outputs),
512 |         "errors": errors_encountered,
513 |         "coverage_xml_path": coverage_xml_report_path if final_success else None,  # Example path
514 |         "coverage_html_index": coverage_html_index_path if final_success else None,  # Example path
515 |         "timestamp": datetime.now().isoformat(),
516 |     }
517 | 
518 | 
519 | @mcp.tool()
520 | async def create_coverage_report(force_rebuild: bool = False) -> dict[str, Any]:
521 |     """
522 |     Run the coverage report script and generate HTML and XML reports.
523 | 
524 |     Args:
525 |         force_rebuild: Whether to force rebuilding the report even if it exists
526 | 
527 |     Returns:
528 |         Dictionary containing execution results and report paths
529 |     """
530 |     return await run_coverage_script(force_rebuild)
531 | 
532 | 
533 | @mcp.tool()
534 | async def run_unit_test(agent: str, verbosity: int = 1) -> dict[str, Any]:
535 |     """
536 |     Run tests for a specific agent only.
537 | 
538 |     This tool runs tests that match the agent's patterns including both main agent tests
539 |     and healthcheck tests, significantly reducing test execution time compared to running all tests.
540 |     Use this tool when you need to focus on testing a specific agent component.
541 | 
542 |     Args:
543 |         agent: The agent to run tests for (e.g., 'qa_agent', 'backlog_agent')
544 |         verbosity: Verbosity level (0=minimal, 1=normal, 2=detailed), default is 1
545 | 
546 |     Returns:
547 |         Dictionary containing test results and analysis
548 |     """
549 |     logger.info("Running unit tests for agent: %s with verbosity %s", agent, verbosity)
550 | 
551 |     # The _run_tests function now handles pattern creation from agent name.
552 |     # We call _run_tests once, and it will construct a pattern like "test_agent.py or test_healthcheck.py"
553 |     # No need for separate calls for main and healthcheck unless _run_tests logic changes.
554 | 
555 |     # For verbosity, _run_tests expects 0, 1, or 2 as string or int.
556 |     # The pattern is derived by _run_tests from the agent name.
557 |     results = await _run_tests(agent=agent, verbosity=verbosity, run_with_coverage=False)
558 | 
559 |     # The structure of the response from _run_tests is already good for run_unit_test.
560 |     # It includes success, return_code, test_output, and analysis (which contains agent_tested).
561 |     # No need to combine results manually here if _run_tests handles the agent pattern correctly.
562 | 
563 |     return results
564 | 
565 | 
566 | # --- Pydantic Models for Search Tools ---
567 | class BaseSearchInput(BaseModel):
568 |     """Base model for common search parameters."""
569 | 
570 |     scope: str = Field(default="default", description="Logging scope to search within (from .env scopes or default).")
571 |     context_before: int = Field(default=2, description="Number of lines before a match.", ge=0)
572 |     context_after: int = Field(default=2, description="Number of lines after a match.", ge=0)
573 |     log_dirs_override: str = Field(
574 |         default="",
575 |         description="Comma-separated list of log directories, files, or glob patterns (overrides .env for file locations).",
576 |     )
577 |     log_content_patterns_override: str = Field(
578 |         default="",
579 |         description="Comma-separated list of REGEX patterns for log messages (overrides .env content filters).",
580 |     )
581 | 
582 | 
583 | class SearchLogAllInput(BaseSearchInput):
584 |     """Input for search_log_all_records."""
585 | 
586 | 
587 | @mcp.tool()
588 | async def search_log_all_records(
589 |     scope: str = "default",
590 |     context_before: int = 2,
591 |     context_after: int = 2,
592 |     log_dirs_override: str = "",
593 |     log_content_patterns_override: str = "",
594 | ) -> list[dict[str, Any]]:
595 |     """Search for all log records, optionally filtering by scope and content patterns, with context."""
596 |     # Forcing re-initialization of analysis_engine for debugging module caching.
597 |     # Pass project_root_for_config=None to allow AnalysisEngine to determine it.
598 |     current_analysis_engine = AnalysisEngine(logger_instance=logger, project_root_for_config=None)
599 |     print(
600 |         f"DEBUG_MCP_TOOL_SEARCH_ALL: Entered search_log_all_records with log_dirs_override='{log_dirs_override}'",
601 |         file=sys.stderr,
602 |         flush=True,
603 |     )
604 |     logger.info(
605 |         "MCP search_log_all_records called with scope='%s', context=%sB/%sA, "
606 |         "log_dirs_override='%s', log_content_patterns_override='%s'",
607 |         scope,
608 |         context_before,
609 |         context_after,
610 |         log_dirs_override,
611 |         log_content_patterns_override,
612 |     )
613 |     log_dirs_list = log_dirs_override.split(",") if log_dirs_override else None
614 |     log_content_patterns_list = log_content_patterns_override.split(",") if log_content_patterns_override else None
615 | 
616 |     filter_criteria = build_filter_criteria(
617 |         scope=scope,
618 |         context_before=context_before,
619 |         context_after=context_after,
620 |         log_dirs_override=log_dirs_list,
621 |         log_content_patterns_override=log_content_patterns_list,
622 |     )
623 |     try:
624 |         results = await asyncio.to_thread(current_analysis_engine.search_logs, filter_criteria)
625 |         logger.info("search_log_all_records returning %s records.", len(results))
626 |         return results
627 |     except Exception as e:  # pylint: disable=broad-exception-caught
628 |         logger.error("Error in search_log_all_records: %s", e, exc_info=True)
629 |         custom_message = f"Failed to search all logs: {e!s}"
630 |         raise McpError(ErrorData(code=-32603, message=custom_message)) from e
631 | 
632 | 
633 | class SearchLogTimeBasedInput(BaseSearchInput):
634 |     """Input for search_log_time_based."""
635 | 
636 |     minutes: int = Field(default=0, description="Search logs from the last N minutes.", ge=0)
637 |     hours: int = Field(default=0, description="Search logs from the last N hours.", ge=0)
638 |     days: int = Field(default=0, description="Search logs from the last N days.", ge=0)
639 | 
640 |     # Custom validation to ensure at least one time field is set if others are default (0)
641 |     # Pydantic v2: @model_validator(mode='after')
642 |     # Pydantic v1: @root_validator(pre=False)
643 |     # For simplicity here, relying on tool logic to handle it, or can add validator if needed.
644 | 
645 | 
646 | @mcp.tool()
647 | async def search_log_time_based(
648 |     minutes: int = 0,
649 |     hours: int = 0,
650 |     days: int = 0,
651 |     scope: str = "default",
652 |     context_before: int = 2,
653 |     context_after: int = 2,
654 |     log_dirs_override: str = "",
655 |     log_content_patterns_override: str = "",
656 | ) -> list[dict[str, Any]]:
657 |     """Search logs within a time window, optionally filtering, with context."""
658 |     logger.info(
659 |         "MCP search_log_time_based called with time=%sd/%sh/%sm, scope='%s', "
660 |         "context=%sB/%sA, log_dirs_override='%s', "
661 |         "log_content_patterns_override='%s'",
662 |         days,
663 |         hours,
664 |         minutes,
665 |         scope,
666 |         context_before,
667 |         context_after,
668 |         log_dirs_override,
669 |         log_content_patterns_override,
670 |     )
671 | 
672 |     if minutes == 0 and hours == 0 and days == 0:
673 |         logger.warning("search_log_time_based called without a time window (all minutes/hours/days are 0).")
674 | 
675 |     log_dirs_list = log_dirs_override.split(",") if log_dirs_override else None
676 |     log_content_patterns_list = log_content_patterns_override.split(",") if log_content_patterns_override else None
677 | 
678 |     filter_criteria = build_filter_criteria(
679 |         minutes=minutes,
680 |         hours=hours,
681 |         days=days,
682 |         scope=scope,
683 |         context_before=context_before,
684 |         context_after=context_after,
685 |         log_dirs_override=log_dirs_list,
686 |         log_content_patterns_override=log_content_patterns_list,
687 |     )
688 |     try:
689 |         results = await asyncio.to_thread(analysis_engine.search_logs, filter_criteria)
690 |         logger.info("search_log_time_based returning %s records.", len(results))
691 |         return results
692 |     except Exception as e:  # pylint: disable=broad-exception-caught
693 |         logger.error("Error in search_log_time_based: %s", e, exc_info=True)
694 |         custom_message = f"Failed to search time-based logs: {e!s}"
695 |         raise McpError(ErrorData(code=-32603, message=custom_message)) from e
696 | 
697 | 
698 | class SearchLogFirstNInput(BaseSearchInput):
699 |     """Input for search_log_first_n_records."""
700 | 
701 |     count: int = Field(description="Number of first (oldest) matching records to return.", gt=0)
702 | 
703 | 
704 | @mcp.tool()
705 | async def search_log_first_n_records(
706 |     count: int,
707 |     scope: str = "default",
708 |     context_before: int = 2,
709 |     context_after: int = 2,
710 |     log_dirs_override: str = "",
711 |     log_content_patterns_override: str = "",
712 | ) -> list[dict[str, Any]]:
713 |     """Search for the first N (oldest) records, optionally filtering, with context."""
714 |     logger.info(
715 |         "MCP search_log_first_n_records called with count=%s, scope='%s', "
716 |         "context=%sB/%sA, log_dirs_override='%s', "
717 |         "log_content_patterns_override='%s'",
718 |         count,
719 |         scope,
720 |         context_before,
721 |         context_after,
722 |         log_dirs_override,
723 |         log_content_patterns_override,
724 |     )
725 |     if count <= 0:
726 |         logger.error("Invalid count for search_log_first_n_records: %s. Must be > 0.", count)
727 |         raise McpError(ErrorData(code=-32602, message="Count must be a positive integer."))
728 | 
729 |     log_dirs_list = log_dirs_override.split(",") if log_dirs_override else None
730 |     log_content_patterns_list = log_content_patterns_override.split(",") if log_content_patterns_override else None
731 | 
732 |     filter_criteria = build_filter_criteria(
733 |         first_n=count,
734 |         scope=scope,
735 |         context_before=context_before,
736 |         context_after=context_after,
737 |         log_dirs_override=log_dirs_list,
738 |         log_content_patterns_override=log_content_patterns_list,
739 |     )
740 |     try:
741 |         results = await asyncio.to_thread(analysis_engine.search_logs, filter_criteria)
742 |         logger.info("search_log_first_n_records returning %s records.", len(results))
743 |         return results
744 |     except Exception as e:  # pylint: disable=broad-exception-caught
745 |         logger.error("Error in search_log_first_n_records: %s", e, exc_info=True)
746 |         custom_message = f"Failed to search first N logs: {e!s}"
747 |         raise McpError(ErrorData(code=-32603, message=custom_message)) from e
748 | 
749 | 
750 | class SearchLogLastNInput(BaseSearchInput):
751 |     """Input for search_log_last_n_records."""
752 | 
753 |     count: int = Field(description="Number of last (newest) matching records to return.", gt=0)
754 | 
755 | 
756 | @mcp.tool()
757 | async def search_log_last_n_records(
758 |     count: int,
759 |     scope: str = "default",
760 |     context_before: int = 2,
761 |     context_after: int = 2,
762 |     log_dirs_override: str = "",
763 |     log_content_patterns_override: str = "",
764 | ) -> list[dict[str, Any]]:
765 |     """Search for the last N (newest) records, optionally filtering, with context."""
766 |     logger.info(
767 |         "MCP search_log_last_n_records called with count=%s, scope='%s', "
768 |         "context=%sB/%sA, log_dirs_override='%s', "
769 |         "log_content_patterns_override='%s'",
770 |         count,
771 |         scope,
772 |         context_before,
773 |         context_after,
774 |         log_dirs_override,
775 |         log_content_patterns_override,
776 |     )
777 |     if count <= 0:
778 |         logger.error("Invalid count for search_log_last_n_records: %s. Must be > 0.", count)
779 |         raise McpError(ErrorData(code=-32602, message="Count must be a positive integer."))
780 | 
781 |     log_dirs_list = log_dirs_override.split(",") if log_dirs_override else None
782 |     log_content_patterns_list = log_content_patterns_override.split(",") if log_content_patterns_override else None
783 | 
784 |     filter_criteria = build_filter_criteria(
785 |         last_n=count,
786 |         scope=scope,
787 |         context_before=context_before,
788 |         context_after=context_after,
789 |         log_dirs_override=log_dirs_list,
790 |         log_content_patterns_override=log_content_patterns_list,
791 |     )
792 |     try:
793 |         results = await asyncio.to_thread(analysis_engine.search_logs, filter_criteria)
794 |         logger.info("search_log_last_n_records returning %s records.", len(results))
795 |         return results
796 |     except Exception as e:  # pylint: disable=broad-exception-caught
797 |         logger.error("Error in search_log_last_n_records: %s", e, exc_info=True)
798 |         custom_message = f"Failed to search last N logs: {e!s}"
799 |         raise McpError(ErrorData(code=-32603, message=custom_message)) from e
800 | 
801 | 
802 | @mcp.tool()
803 | async def get_server_env_details() -> dict[str, Any]:
804 |     """Returns sys.path and sys.executable from the running MCP server."""
805 |     logger.info("get_server_env_details called.")
806 |     details = {
807 |         "sys_executable": sys.executable,
808 |         "sys_path": sys.path,
809 |         "cwd": os.getcwd(),
810 |         "environ_pythonpath": os.environ.get("PYTHONPATH"),
811 |     }
812 |     logger.info(f"Server env details: {details}")
813 |     return details
814 | 
815 | 
816 | # Main entry point for Uvicorn or direct stdio run via script
817 | # Ref: https://fastmcp.numaru.com/usage/server-integration/#uvicorn-integration
818 | # Ref: https://fastmcp.numaru.com/usage/server-integration/#stdio-transport
819 | 
820 | 
821 | def main() -> None:
822 |     """Runs the MCP server, choosing transport based on arguments."""
823 |     import argparse
824 | 
825 |     # Argument parsing should be done first
826 |     parser = argparse.ArgumentParser(description="Log Analyzer MCP Server")
827 |     parser.add_argument(
828 |         "--transport",
829 |         type=str,
830 |         choices=["stdio", "http"],
831 |         default=os.getenv("MCP_DEFAULT_TRANSPORT", "stdio"),  # Default to stdio
832 |         help="Transport protocol to use: 'stdio' or 'http' (default: stdio or MCP_DEFAULT_TRANSPORT env var)",
833 |     )
834 |     parser.add_argument(
835 |         "--host",
836 |         type=str,
837 |         default=os.getenv("MCP_HTTP_HOST", "127.0.0.1"),
838 |         help="Host for HTTP transport (default: 127.0.0.1 or MCP_HTTP_HOST env var)",
839 |     )
840 |     parser.add_argument(
841 |         "--port",
842 |         type=int,
843 |         default=int(os.getenv("MCP_HTTP_PORT", "8000")),
844 |         help="Port for HTTP transport (default: 8000 or MCP_HTTP_PORT env var)",
845 |     )
846 |     parser.add_argument(
847 |         "--log-level",
848 |         type=str,
849 |         default=os.getenv("MCP_LOG_LEVEL", "info"),
850 |         choices=["debug", "info", "warning", "error", "critical"],
851 |         help="Logging level for Uvicorn (default: info or MCP_LOG_LEVEL env var)",
852 |     )
853 |     args = parser.parse_args()
854 | 
855 |     # Uses the global mcp instance and logger already configured at module level.
856 |     # logger.info("Logger for main() using global instance.") # Optional: confirm logger usage
857 | 
858 |     if args.transport == "stdio":
859 |         logger.info("Starting Log Analyzer MCP server in stdio mode via main().")
860 |         mcp.run(transport="stdio")  # FastMCP handles stdio internally
861 |     elif args.transport == "http":
862 |         # Only import uvicorn and ASGIApplication if http transport is selected
863 |         try:
864 |             import uvicorn
865 |             from asgiref.typing import ASGIApplication  # For type hinting
866 |             from typing import cast
867 |         except ImportError as e:
868 |             logger.error("Required packages for HTTP transport (uvicorn, asgiref) are not installed. %s", e)
869 |             sys.exit(1)
870 | 
871 |         logger.info(
872 |             "Starting Log Analyzer MCP server with Uvicorn on %s:%s (log_level: %s)",
873 |             args.host,
874 |             args.port,
875 |             args.log_level,
876 |         )
877 |         uvicorn.run(cast(ASGIApplication, mcp), host=args.host, port=args.port, log_level=args.log_level)
878 |     else:
879 |         # Should not happen due to choices in argparse, but as a fallback:
880 |         logger.error("Unsupported transport type: %s. Exiting.", args.transport)
881 |         sys.exit(1)
882 | 
883 | 
884 | if __name__ == "__main__":
885 |     # This block now directly calls main() to handle argument parsing and server start.
886 |     # This ensures consistency whether run as a script or via the entry point.
887 |     logger.info("Log Analyzer MCP Server script execution (__name__ == '__main__'). Calling main().")
888 |     main()
889 | 
```
Page 2/3FirstPrevNextLast